VirtualBox

source: vbox/trunk/src/recompiler/exec.c@ 35004

最後變更 在這個檔案從35004是 33656,由 vboxsync 提交於 14 年 前

*: rebrand Sun (L)GPL disclaimers

  • 屬性 svn:eol-style 設為 native
檔案大小: 114.2 KB
 
1/*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29
30#include "config.h"
31#ifndef VBOX
32#ifdef _WIN32
33#include <windows.h>
34#else
35#include <sys/types.h>
36#include <sys/mman.h>
37#endif
38#include <stdlib.h>
39#include <stdio.h>
40#include <stdarg.h>
41#include <string.h>
42#include <errno.h>
43#include <unistd.h>
44#include <inttypes.h>
45#else /* VBOX */
46# include <stdlib.h>
47# include <stdio.h>
48# include <iprt/alloc.h>
49# include <iprt/string.h>
50# include <iprt/param.h>
51# include <VBox/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
52#endif /* VBOX */
53
54#include "cpu.h"
55#include "exec-all.h"
56#if defined(CONFIG_USER_ONLY)
57#include <qemu.h>
58#endif
59
60//#define DEBUG_TB_INVALIDATE
61//#define DEBUG_FLUSH
62//#define DEBUG_TLB
63//#define DEBUG_UNASSIGNED
64
65/* make various TB consistency checks */
66//#define DEBUG_TB_CHECK
67//#define DEBUG_TLB_CHECK
68
69#if !defined(CONFIG_USER_ONLY)
70/* TB consistency checks only implemented for usermode emulation. */
71#undef DEBUG_TB_CHECK
72#endif
73
74#define SMC_BITMAP_USE_THRESHOLD 10
75
76#define MMAP_AREA_START 0x00000000
77#define MMAP_AREA_END 0xa8000000
78
79#if defined(TARGET_SPARC64)
80#define TARGET_PHYS_ADDR_SPACE_BITS 41
81#elif defined(TARGET_SPARC)
82#define TARGET_PHYS_ADDR_SPACE_BITS 36
83#elif defined(TARGET_ALPHA)
84#define TARGET_PHYS_ADDR_SPACE_BITS 42
85#define TARGET_VIRT_ADDR_SPACE_BITS 42
86#elif defined(TARGET_PPC64)
87#define TARGET_PHYS_ADDR_SPACE_BITS 42
88#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
89#define TARGET_PHYS_ADDR_SPACE_BITS 42
90#elif defined(TARGET_I386) && !defined(USE_KQEMU)
91#define TARGET_PHYS_ADDR_SPACE_BITS 36
92#else
93/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
94#define TARGET_PHYS_ADDR_SPACE_BITS 32
95#endif
96
97static TranslationBlock *tbs;
98int code_gen_max_blocks;
99TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
100static int nb_tbs;
101/* any access to the tbs or the page table must use this lock */
102spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
103
104#ifndef VBOX
105#if defined(__arm__) || defined(__sparc_v9__)
106/* The prologue must be reachable with a direct jump. ARM and Sparc64
107 have limited branch ranges (possibly also PPC) so place it in a
108 section close to code segment. */
109#define code_gen_section \
110 __attribute__((__section__(".gen_code"))) \
111 __attribute__((aligned (32)))
112#else
113#define code_gen_section \
114 __attribute__((aligned (32)))
115#endif
116uint8_t code_gen_prologue[1024] code_gen_section;
117
118#else /* VBOX */
119extern uint8_t* code_gen_prologue;
120#endif /* VBOX */
121
122static uint8_t *code_gen_buffer;
123static unsigned long code_gen_buffer_size;
124/* threshold to flush the translated code buffer */
125static unsigned long code_gen_buffer_max_size;
126uint8_t *code_gen_ptr;
127
128#ifndef VBOX
129#if !defined(CONFIG_USER_ONLY)
130ram_addr_t phys_ram_size;
131int phys_ram_fd;
132uint8_t *phys_ram_base;
133uint8_t *phys_ram_dirty;
134static int in_migration;
135static ram_addr_t phys_ram_alloc_offset = 0;
136#endif
137#else /* VBOX */
138RTGCPHYS phys_ram_size;
139/* we have memory ranges (the high PC-BIOS mapping) which
140 causes some pages to fall outside the dirty map here. */
141RTGCPHYS phys_ram_dirty_size;
142#endif /* VBOX */
143#if !defined(VBOX)
144uint8_t *phys_ram_base;
145#endif
146uint8_t *phys_ram_dirty;
147
148CPUState *first_cpu;
149/* current CPU in the current thread. It is only valid inside
150 cpu_exec() */
151CPUState *cpu_single_env;
152/* 0 = Do not count executed instructions.
153 1 = Precise instruction counting.
154 2 = Adaptive rate instruction counting. */
155int use_icount = 0;
156/* Current instruction counter. While executing translated code this may
157 include some instructions that have not yet been executed. */
158int64_t qemu_icount;
159
160typedef struct PageDesc {
161 /* list of TBs intersecting this ram page */
162 TranslationBlock *first_tb;
163 /* in order to optimize self modifying code, we count the number
164 of lookups we do to a given page to use a bitmap */
165 unsigned int code_write_count;
166 uint8_t *code_bitmap;
167#if defined(CONFIG_USER_ONLY)
168 unsigned long flags;
169#endif
170} PageDesc;
171
172typedef struct PhysPageDesc {
173 /* offset in host memory of the page + io_index in the low 12 bits */
174 ram_addr_t phys_offset;
175} PhysPageDesc;
176
177#define L2_BITS 10
178#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
179/* XXX: this is a temporary hack for alpha target.
180 * In the future, this is to be replaced by a multi-level table
181 * to actually be able to handle the complete 64 bits address space.
182 */
183#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
184#else
185#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
186#endif
187#ifdef VBOX
188#define L0_BITS (TARGET_PHYS_ADDR_SPACE_BITS - 32)
189#endif
190
191#ifdef VBOX
192#define L0_SIZE (1 << L0_BITS)
193#endif
194#define L1_SIZE (1 << L1_BITS)
195#define L2_SIZE (1 << L2_BITS)
196
197static void io_mem_init(void);
198
199unsigned long qemu_real_host_page_size;
200unsigned long qemu_host_page_bits;
201unsigned long qemu_host_page_size;
202unsigned long qemu_host_page_mask;
203
204/* XXX: for system emulation, it could just be an array */
205#ifndef VBOX
206static PageDesc *l1_map[L1_SIZE];
207static PhysPageDesc **l1_phys_map;
208#else
209static unsigned l0_map_max_used = 0;
210static PageDesc **l0_map[L0_SIZE];
211static void **l0_phys_map[L0_SIZE];
212#endif
213
214#if !defined(CONFIG_USER_ONLY)
215static void io_mem_init(void);
216
217/* io memory support */
218CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
219CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
220void *io_mem_opaque[IO_MEM_NB_ENTRIES];
221static int io_mem_nb;
222static int io_mem_watch;
223#endif
224
225#ifndef VBOX
226/* log support */
227static const char *logfilename = "/tmp/qemu.log";
228#endif /* !VBOX */
229FILE *logfile;
230int loglevel;
231#ifndef VBOX
232static int log_append = 0;
233#endif
234
235/* statistics */
236#ifndef VBOX
237static int tlb_flush_count;
238static int tb_flush_count;
239static int tb_phys_invalidate_count;
240#else /* VBOX - Resettable U32 stats, see VBoxRecompiler.c. */
241uint32_t tlb_flush_count;
242uint32_t tb_flush_count;
243uint32_t tb_phys_invalidate_count;
244#endif /* VBOX */
245
246#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
247typedef struct subpage_t {
248 target_phys_addr_t base;
249 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
250 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
251 void *opaque[TARGET_PAGE_SIZE][2][4];
252} subpage_t;
253
254
255#ifndef VBOX
256#ifdef _WIN32
257static void map_exec(void *addr, long size)
258{
259 DWORD old_protect;
260 VirtualProtect(addr, size,
261 PAGE_EXECUTE_READWRITE, &old_protect);
262
263}
264#else
265static void map_exec(void *addr, long size)
266{
267 unsigned long start, end, page_size;
268
269 page_size = getpagesize();
270 start = (unsigned long)addr;
271 start &= ~(page_size - 1);
272
273 end = (unsigned long)addr + size;
274 end += page_size - 1;
275 end &= ~(page_size - 1);
276
277 mprotect((void *)start, end - start,
278 PROT_READ | PROT_WRITE | PROT_EXEC);
279}
280#endif
281#else // VBOX
282static void map_exec(void *addr, long size)
283{
284 RTMemProtect(addr, size,
285 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
286}
287#endif
288
289static void page_init(void)
290{
291 /* NOTE: we can always suppose that qemu_host_page_size >=
292 TARGET_PAGE_SIZE */
293#ifdef VBOX
294 RTMemProtect(code_gen_buffer, sizeof(code_gen_buffer),
295 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
296 qemu_real_host_page_size = PAGE_SIZE;
297#else /* !VBOX */
298#ifdef _WIN32
299 {
300 SYSTEM_INFO system_info;
301 DWORD old_protect;
302
303 GetSystemInfo(&system_info);
304 qemu_real_host_page_size = system_info.dwPageSize;
305 }
306#else
307 qemu_real_host_page_size = getpagesize();
308#endif
309#endif /* !VBOX */
310
311 if (qemu_host_page_size == 0)
312 qemu_host_page_size = qemu_real_host_page_size;
313 if (qemu_host_page_size < TARGET_PAGE_SIZE)
314 qemu_host_page_size = TARGET_PAGE_SIZE;
315 qemu_host_page_bits = 0;
316#ifndef VBOX
317 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
318#else
319 while ((1 << qemu_host_page_bits) < (int)qemu_host_page_size)
320#endif
321 qemu_host_page_bits++;
322 qemu_host_page_mask = ~(qemu_host_page_size - 1);
323#ifndef VBOX
324 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
325 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
326#endif
327#ifdef VBOX
328 /* We use other means to set reserved bit on our pages */
329#else
330#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
331 {
332 long long startaddr, endaddr;
333 FILE *f;
334 int n;
335
336 mmap_lock();
337 last_brk = (unsigned long)sbrk(0);
338 f = fopen("/proc/self/maps", "r");
339 if (f) {
340 do {
341 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
342 if (n == 2) {
343 startaddr = MIN(startaddr,
344 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
345 endaddr = MIN(endaddr,
346 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
347 page_set_flags(startaddr & TARGET_PAGE_MASK,
348 TARGET_PAGE_ALIGN(endaddr),
349 PAGE_RESERVED);
350 }
351 } while (!feof(f));
352 fclose(f);
353 }
354 mmap_unlock();
355 }
356#endif
357#endif
358}
359
360#ifndef VBOX
361static inline PageDesc **page_l1_map(target_ulong index)
362#else
363DECLINLINE(PageDesc **) page_l1_map(target_ulong index)
364#endif
365{
366#ifndef VBOX
367#if TARGET_LONG_BITS > 32
368 /* Host memory outside guest VM. For 32-bit targets we have already
369 excluded high addresses. */
370 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
371 return NULL;
372#endif
373 return &l1_map[index >> L2_BITS];
374#else /* VBOX */
375 PageDesc **l1_map;
376 AssertMsgReturn(index < (target_ulong)L2_SIZE * L1_SIZE * L0_SIZE,
377 ("index=%RGp >= %RGp; L1_SIZE=%#x L2_SIZE=%#x L0_SIZE=%#x\n",
378 (RTGCPHYS)index, (RTGCPHYS)L2_SIZE * L1_SIZE, L1_SIZE, L2_SIZE, L0_SIZE),
379 NULL);
380 l1_map = l0_map[index >> (L1_BITS + L2_BITS)];
381 if (RT_UNLIKELY(!l1_map))
382 {
383 unsigned i0 = index >> (L1_BITS + L2_BITS);
384 l0_map[i0] = l1_map = qemu_mallocz(sizeof(PageDesc *) * L1_SIZE);
385 if (RT_UNLIKELY(!l1_map))
386 return NULL;
387 if (i0 >= l0_map_max_used)
388 l0_map_max_used = i0 + 1;
389 }
390 return &l1_map[(index >> L2_BITS) & (L1_SIZE - 1)];
391#endif /* VBOX */
392}
393
394#ifndef VBOX
395static inline PageDesc *page_find_alloc(target_ulong index)
396#else
397DECLINLINE(PageDesc *) page_find_alloc(target_ulong index)
398#endif
399{
400 PageDesc **lp, *p;
401 lp = page_l1_map(index);
402 if (!lp)
403 return NULL;
404
405 p = *lp;
406 if (!p) {
407 /* allocate if not found */
408#if defined(CONFIG_USER_ONLY)
409 unsigned long addr;
410 size_t len = sizeof(PageDesc) * L2_SIZE;
411 /* Don't use qemu_malloc because it may recurse. */
412 p = mmap(0, len, PROT_READ | PROT_WRITE,
413 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
414 *lp = p;
415 addr = h2g(p);
416 if (addr == (target_ulong)addr) {
417 page_set_flags(addr & TARGET_PAGE_MASK,
418 TARGET_PAGE_ALIGN(addr + len),
419 PAGE_RESERVED);
420 }
421#else
422 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
423 *lp = p;
424#endif
425 }
426 return p + (index & (L2_SIZE - 1));
427}
428
429#ifndef VBOX
430static inline PageDesc *page_find(target_ulong index)
431#else
432DECLINLINE(PageDesc *) page_find(target_ulong index)
433#endif
434{
435 PageDesc **lp, *p;
436 lp = page_l1_map(index);
437 if (!lp)
438 return NULL;
439
440 p = *lp;
441 if (!p)
442 return 0;
443 return p + (index & (L2_SIZE - 1));
444}
445
446static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
447{
448 void **lp, **p;
449 PhysPageDesc *pd;
450
451#ifndef VBOX
452 p = (void **)l1_phys_map;
453#if TARGET_PHYS_ADDR_SPACE_BITS > 32
454
455#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
456#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
457#endif
458 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
459 p = *lp;
460 if (!p) {
461 /* allocate if not found */
462 if (!alloc)
463 return NULL;
464 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
465 memset(p, 0, sizeof(void *) * L1_SIZE);
466 *lp = p;
467 }
468#endif
469#else /* VBOX */
470 /* level 0 lookup and lazy allocation of level 1 map. */
471 if (RT_UNLIKELY(index >= (target_phys_addr_t)L2_SIZE * L1_SIZE * L0_SIZE))
472 return NULL;
473 p = l0_phys_map[index >> (L1_BITS + L2_BITS)];
474 if (RT_UNLIKELY(!p)) {
475 if (!alloc)
476 return NULL;
477 p = qemu_vmalloc(sizeof(void **) * L1_SIZE);
478 memset(p, 0, sizeof(void **) * L1_SIZE);
479 l0_phys_map[index >> (L1_BITS + L2_BITS)] = p;
480 }
481
482 /* level 1 lookup and lazy allocation of level 2 map. */
483#endif /* VBOX */
484 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
485 pd = *lp;
486 if (!pd) {
487 int i;
488 /* allocate if not found */
489 if (!alloc)
490 return NULL;
491 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
492 *lp = pd;
493 for (i = 0; i < L2_SIZE; i++)
494 pd[i].phys_offset = IO_MEM_UNASSIGNED;
495 }
496 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
497}
498
499#ifndef VBOX
500static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
501#else
502DECLINLINE(PhysPageDesc *) phys_page_find(target_phys_addr_t index)
503#endif
504{
505 return phys_page_find_alloc(index, 0);
506}
507
508#if !defined(CONFIG_USER_ONLY)
509static void tlb_protect_code(ram_addr_t ram_addr);
510static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
511 target_ulong vaddr);
512#define mmap_lock() do { } while(0)
513#define mmap_unlock() do { } while(0)
514#endif
515
516#ifdef VBOX
517/*
518 * We don't need such huge codegen buffer size, as execute most of the code
519 * in raw or hwacc mode
520 */
521#define DEFAULT_CODE_GEN_BUFFER_SIZE (8 * 1024 * 1024)
522#else
523#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
524#endif
525
526#if defined(CONFIG_USER_ONLY)
527/* Currently it is not recommended to allocate big chunks of data in
528 user mode. It will change when a dedicated libc will be used */
529#define USE_STATIC_CODE_GEN_BUFFER
530#endif
531
532/* VBox allocates codegen buffer dynamically */
533#ifndef VBOX
534#ifdef USE_STATIC_CODE_GEN_BUFFER
535static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
536#endif
537#endif
538
539static void code_gen_alloc(unsigned long tb_size)
540{
541#ifdef USE_STATIC_CODE_GEN_BUFFER
542 code_gen_buffer = static_code_gen_buffer;
543 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
544 map_exec(code_gen_buffer, code_gen_buffer_size);
545#else
546#ifdef VBOX
547 /* We cannot use phys_ram_size here, as it's 0 now,
548 * it only gets initialized once RAM registration callback
549 * (REMR3NotifyPhysRamRegister()) called.
550 */
551 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
552#else
553 code_gen_buffer_size = tb_size;
554 if (code_gen_buffer_size == 0) {
555#if defined(CONFIG_USER_ONLY)
556 /* in user mode, phys_ram_size is not meaningful */
557 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
558#else
559 /* XXX: needs adjustments */
560 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
561#endif
562
563 }
564 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
565 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
566#endif /* VBOX */
567
568 /* The code gen buffer location may have constraints depending on
569 the host cpu and OS */
570#ifdef VBOX
571 code_gen_buffer = RTMemExecAlloc(code_gen_buffer_size);
572
573 if (!code_gen_buffer) {
574 LogRel(("REM: failed allocate codegen buffer %lld\n",
575 code_gen_buffer_size));
576 return;
577 }
578#else //!VBOX
579#if defined(__linux__)
580 {
581 int flags;
582 void *start = NULL;
583
584 flags = MAP_PRIVATE | MAP_ANONYMOUS;
585#if defined(__x86_64__)
586 flags |= MAP_32BIT;
587 /* Cannot map more than that */
588 if (code_gen_buffer_size > (800 * 1024 * 1024))
589 code_gen_buffer_size = (800 * 1024 * 1024);
590#elif defined(__sparc_v9__)
591 // Map the buffer below 2G, so we can use direct calls and branches
592 flags |= MAP_FIXED;
593 start = (void *) 0x60000000UL;
594 if (code_gen_buffer_size > (512 * 1024 * 1024))
595 code_gen_buffer_size = (512 * 1024 * 1024);
596#endif
597 code_gen_buffer = mmap(start, code_gen_buffer_size,
598 PROT_WRITE | PROT_READ | PROT_EXEC,
599 flags, -1, 0);
600 if (code_gen_buffer == MAP_FAILED) {
601 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
602 exit(1);
603 }
604 }
605#elif defined(__FreeBSD__)
606 {
607 int flags;
608 void *addr = NULL;
609 flags = MAP_PRIVATE | MAP_ANONYMOUS;
610#if defined(__x86_64__)
611 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
612 * 0x40000000 is free */
613 flags |= MAP_FIXED;
614 addr = (void *)0x40000000;
615 /* Cannot map more than that */
616 if (code_gen_buffer_size > (800 * 1024 * 1024))
617 code_gen_buffer_size = (800 * 1024 * 1024);
618#endif
619 code_gen_buffer = mmap(addr, code_gen_buffer_size,
620 PROT_WRITE | PROT_READ | PROT_EXEC,
621 flags, -1, 0);
622 if (code_gen_buffer == MAP_FAILED) {
623 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
624 exit(1);
625 }
626 }
627#else
628 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
629 if (!code_gen_buffer) {
630 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
631 exit(1);
632 }
633 map_exec(code_gen_buffer, code_gen_buffer_size);
634#endif
635 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
636#endif /* !VBOX */
637#endif /* !USE_STATIC_CODE_GEN_BUFFER */
638#ifndef VBOX
639 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
640#else
641 map_exec(code_gen_prologue, _1K);
642#endif
643
644 code_gen_buffer_max_size = code_gen_buffer_size -
645 code_gen_max_block_size();
646 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
647 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
648}
649
650/* Must be called before using the QEMU cpus. 'tb_size' is the size
651 (in bytes) allocated to the translation buffer. Zero means default
652 size. */
653void cpu_exec_init_all(unsigned long tb_size)
654{
655 cpu_gen_init();
656 code_gen_alloc(tb_size);
657 code_gen_ptr = code_gen_buffer;
658 page_init();
659#if !defined(CONFIG_USER_ONLY)
660 io_mem_init();
661#endif
662}
663
664#ifndef VBOX
665#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
666
667#define CPU_COMMON_SAVE_VERSION 1
668
669static void cpu_common_save(QEMUFile *f, void *opaque)
670{
671 CPUState *env = opaque;
672
673 qemu_put_be32s(f, &env->halted);
674 qemu_put_be32s(f, &env->interrupt_request);
675}
676
677static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
678{
679 CPUState *env = opaque;
680
681 if (version_id != CPU_COMMON_SAVE_VERSION)
682 return -EINVAL;
683
684 qemu_get_be32s(f, &env->halted);
685 qemu_get_be32s(f, &env->interrupt_request);
686 tlb_flush(env, 1);
687
688 return 0;
689}
690#endif
691#endif //!VBOX
692
693void cpu_exec_init(CPUState *env)
694{
695 CPUState **penv;
696 int cpu_index;
697
698 env->next_cpu = NULL;
699 penv = &first_cpu;
700 cpu_index = 0;
701 while (*penv != NULL) {
702 penv = (CPUState **)&(*penv)->next_cpu;
703 cpu_index++;
704 }
705 env->cpu_index = cpu_index;
706 env->nb_watchpoints = 0;
707 *penv = env;
708#ifndef VBOX
709#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
710 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
711 cpu_common_save, cpu_common_load, env);
712 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
713 cpu_save, cpu_load, env);
714#endif
715#endif // !VBOX
716}
717
718#ifndef VBOX
719static inline void invalidate_page_bitmap(PageDesc *p)
720#else
721DECLINLINE(void) invalidate_page_bitmap(PageDesc *p)
722#endif
723{
724 if (p->code_bitmap) {
725 qemu_free(p->code_bitmap);
726 p->code_bitmap = NULL;
727 }
728 p->code_write_count = 0;
729}
730
731/* set to NULL all the 'first_tb' fields in all PageDescs */
732static void page_flush_tb(void)
733{
734 int i, j;
735 PageDesc *p;
736#ifdef VBOX
737 int k;
738#endif
739
740#ifdef VBOX
741 k = l0_map_max_used;
742 while (k-- > 0) {
743 PageDesc **l1_map = l0_map[k];
744 if (l1_map) {
745#endif
746 for(i = 0; i < L1_SIZE; i++) {
747 p = l1_map[i];
748 if (p) {
749 for(j = 0; j < L2_SIZE; j++) {
750 p->first_tb = NULL;
751 invalidate_page_bitmap(p);
752 p++;
753 }
754 }
755 }
756#ifdef VBOX
757 }
758 }
759#endif
760}
761
762/* flush all the translation blocks */
763/* XXX: tb_flush is currently not thread safe */
764void tb_flush(CPUState *env1)
765{
766 CPUState *env;
767#ifdef VBOX
768 STAM_PROFILE_START(&env1->StatTbFlush, a);
769#endif
770#if defined(DEBUG_FLUSH)
771 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
772 (unsigned long)(code_gen_ptr - code_gen_buffer),
773 nb_tbs, nb_tbs > 0 ?
774 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
775#endif
776 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
777 cpu_abort(env1, "Internal error: code buffer overflow\n");
778
779 nb_tbs = 0;
780
781 for(env = first_cpu; env != NULL; env = env->next_cpu) {
782 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
783 }
784
785 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
786 page_flush_tb();
787
788 code_gen_ptr = code_gen_buffer;
789 /* XXX: flush processor icache at this point if cache flush is
790 expensive */
791 tb_flush_count++;
792#ifdef VBOX
793 STAM_PROFILE_STOP(&env1->StatTbFlush, a);
794#endif
795}
796
797#ifdef DEBUG_TB_CHECK
798static void tb_invalidate_check(target_ulong address)
799{
800 TranslationBlock *tb;
801 int i;
802 address &= TARGET_PAGE_MASK;
803 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
804 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
805 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
806 address >= tb->pc + tb->size)) {
807 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
808 address, (long)tb->pc, tb->size);
809 }
810 }
811 }
812}
813
814/* verify that all the pages have correct rights for code */
815static void tb_page_check(void)
816{
817 TranslationBlock *tb;
818 int i, flags1, flags2;
819
820 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
821 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
822 flags1 = page_get_flags(tb->pc);
823 flags2 = page_get_flags(tb->pc + tb->size - 1);
824 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
825 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
826 (long)tb->pc, tb->size, flags1, flags2);
827 }
828 }
829 }
830}
831
832static void tb_jmp_check(TranslationBlock *tb)
833{
834 TranslationBlock *tb1;
835 unsigned int n1;
836
837 /* suppress any remaining jumps to this TB */
838 tb1 = tb->jmp_first;
839 for(;;) {
840 n1 = (long)tb1 & 3;
841 tb1 = (TranslationBlock *)((long)tb1 & ~3);
842 if (n1 == 2)
843 break;
844 tb1 = tb1->jmp_next[n1];
845 }
846 /* check end of list */
847 if (tb1 != tb) {
848 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
849 }
850}
851#endif // DEBUG_TB_CHECK
852
853/* invalidate one TB */
854#ifndef VBOX
855static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
856 int next_offset)
857#else
858DECLINLINE(void) tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
859 int next_offset)
860#endif
861{
862 TranslationBlock *tb1;
863 for(;;) {
864 tb1 = *ptb;
865 if (tb1 == tb) {
866 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
867 break;
868 }
869 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
870 }
871}
872
873#ifndef VBOX
874static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
875#else
876DECLINLINE(void) tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
877#endif
878{
879 TranslationBlock *tb1;
880 unsigned int n1;
881
882 for(;;) {
883 tb1 = *ptb;
884 n1 = (long)tb1 & 3;
885 tb1 = (TranslationBlock *)((long)tb1 & ~3);
886 if (tb1 == tb) {
887 *ptb = tb1->page_next[n1];
888 break;
889 }
890 ptb = &tb1->page_next[n1];
891 }
892}
893
894#ifndef VBOX
895static inline void tb_jmp_remove(TranslationBlock *tb, int n)
896#else
897DECLINLINE(void) tb_jmp_remove(TranslationBlock *tb, int n)
898#endif
899{
900 TranslationBlock *tb1, **ptb;
901 unsigned int n1;
902
903 ptb = &tb->jmp_next[n];
904 tb1 = *ptb;
905 if (tb1) {
906 /* find tb(n) in circular list */
907 for(;;) {
908 tb1 = *ptb;
909 n1 = (long)tb1 & 3;
910 tb1 = (TranslationBlock *)((long)tb1 & ~3);
911 if (n1 == n && tb1 == tb)
912 break;
913 if (n1 == 2) {
914 ptb = &tb1->jmp_first;
915 } else {
916 ptb = &tb1->jmp_next[n1];
917 }
918 }
919 /* now we can suppress tb(n) from the list */
920 *ptb = tb->jmp_next[n];
921
922 tb->jmp_next[n] = NULL;
923 }
924}
925
926/* reset the jump entry 'n' of a TB so that it is not chained to
927 another TB */
928#ifndef VBOX
929static inline void tb_reset_jump(TranslationBlock *tb, int n)
930#else
931DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
932#endif
933{
934 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
935}
936
937void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
938{
939 CPUState *env;
940 PageDesc *p;
941 unsigned int h, n1;
942 target_phys_addr_t phys_pc;
943 TranslationBlock *tb1, *tb2;
944
945 /* remove the TB from the hash list */
946 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
947 h = tb_phys_hash_func(phys_pc);
948 tb_remove(&tb_phys_hash[h], tb,
949 offsetof(TranslationBlock, phys_hash_next));
950
951 /* remove the TB from the page list */
952 if (tb->page_addr[0] != page_addr) {
953 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
954 tb_page_remove(&p->first_tb, tb);
955 invalidate_page_bitmap(p);
956 }
957 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
958 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
959 tb_page_remove(&p->first_tb, tb);
960 invalidate_page_bitmap(p);
961 }
962
963 tb_invalidated_flag = 1;
964
965 /* remove the TB from the hash list */
966 h = tb_jmp_cache_hash_func(tb->pc);
967 for(env = first_cpu; env != NULL; env = env->next_cpu) {
968 if (env->tb_jmp_cache[h] == tb)
969 env->tb_jmp_cache[h] = NULL;
970 }
971
972 /* suppress this TB from the two jump lists */
973 tb_jmp_remove(tb, 0);
974 tb_jmp_remove(tb, 1);
975
976 /* suppress any remaining jumps to this TB */
977 tb1 = tb->jmp_first;
978 for(;;) {
979 n1 = (long)tb1 & 3;
980 if (n1 == 2)
981 break;
982 tb1 = (TranslationBlock *)((long)tb1 & ~3);
983 tb2 = tb1->jmp_next[n1];
984 tb_reset_jump(tb1, n1);
985 tb1->jmp_next[n1] = NULL;
986 tb1 = tb2;
987 }
988 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
989
990 tb_phys_invalidate_count++;
991}
992
993
994#ifdef VBOX
995void tb_invalidate_virt(CPUState *env, uint32_t eip)
996{
997# if 1
998 tb_flush(env);
999# else
1000 uint8_t *cs_base, *pc;
1001 unsigned int flags, h, phys_pc;
1002 TranslationBlock *tb, **ptb;
1003
1004 flags = env->hflags;
1005 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1006 cs_base = env->segs[R_CS].base;
1007 pc = cs_base + eip;
1008
1009 tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
1010 flags);
1011
1012 if(tb)
1013 {
1014# ifdef DEBUG
1015 printf("invalidating TB (%08X) at %08X\n", tb, eip);
1016# endif
1017 tb_invalidate(tb);
1018 //Note: this will leak TBs, but the whole cache will be flushed
1019 // when it happens too often
1020 tb->pc = 0;
1021 tb->cs_base = 0;
1022 tb->flags = 0;
1023 }
1024# endif
1025}
1026
1027# ifdef VBOX_STRICT
1028/**
1029 * Gets the page offset.
1030 */
1031unsigned long get_phys_page_offset(target_ulong addr)
1032{
1033 PhysPageDesc *p = phys_page_find(addr >> TARGET_PAGE_BITS);
1034 return p ? p->phys_offset : 0;
1035}
1036# endif /* VBOX_STRICT */
1037#endif /* VBOX */
1038
1039#ifndef VBOX
1040static inline void set_bits(uint8_t *tab, int start, int len)
1041#else
1042DECLINLINE(void) set_bits(uint8_t *tab, int start, int len)
1043#endif
1044{
1045 int end, mask, end1;
1046
1047 end = start + len;
1048 tab += start >> 3;
1049 mask = 0xff << (start & 7);
1050 if ((start & ~7) == (end & ~7)) {
1051 if (start < end) {
1052 mask &= ~(0xff << (end & 7));
1053 *tab |= mask;
1054 }
1055 } else {
1056 *tab++ |= mask;
1057 start = (start + 8) & ~7;
1058 end1 = end & ~7;
1059 while (start < end1) {
1060 *tab++ = 0xff;
1061 start += 8;
1062 }
1063 if (start < end) {
1064 mask = ~(0xff << (end & 7));
1065 *tab |= mask;
1066 }
1067 }
1068}
1069
1070static void build_page_bitmap(PageDesc *p)
1071{
1072 int n, tb_start, tb_end;
1073 TranslationBlock *tb;
1074
1075 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
1076 if (!p->code_bitmap)
1077 return;
1078 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
1079
1080 tb = p->first_tb;
1081 while (tb != NULL) {
1082 n = (long)tb & 3;
1083 tb = (TranslationBlock *)((long)tb & ~3);
1084 /* NOTE: this is subtle as a TB may span two physical pages */
1085 if (n == 0) {
1086 /* NOTE: tb_end may be after the end of the page, but
1087 it is not a problem */
1088 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1089 tb_end = tb_start + tb->size;
1090 if (tb_end > TARGET_PAGE_SIZE)
1091 tb_end = TARGET_PAGE_SIZE;
1092 } else {
1093 tb_start = 0;
1094 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1095 }
1096 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1097 tb = tb->page_next[n];
1098 }
1099}
1100
1101TranslationBlock *tb_gen_code(CPUState *env,
1102 target_ulong pc, target_ulong cs_base,
1103 int flags, int cflags)
1104{
1105 TranslationBlock *tb;
1106 uint8_t *tc_ptr;
1107 target_ulong phys_pc, phys_page2, virt_page2;
1108 int code_gen_size;
1109
1110 phys_pc = get_phys_addr_code(env, pc);
1111 tb = tb_alloc(pc);
1112 if (!tb) {
1113 /* flush must be done */
1114 tb_flush(env);
1115 /* cannot fail at this point */
1116 tb = tb_alloc(pc);
1117 /* Don't forget to invalidate previous TB info. */
1118 tb_invalidated_flag = 1;
1119 }
1120 tc_ptr = code_gen_ptr;
1121 tb->tc_ptr = tc_ptr;
1122 tb->cs_base = cs_base;
1123 tb->flags = flags;
1124 tb->cflags = cflags;
1125 cpu_gen_code(env, tb, &code_gen_size);
1126 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1127
1128 /* check next page if needed */
1129 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1130 phys_page2 = -1;
1131 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1132 phys_page2 = get_phys_addr_code(env, virt_page2);
1133 }
1134 tb_link_phys(tb, phys_pc, phys_page2);
1135 return tb;
1136}
1137
1138/* invalidate all TBs which intersect with the target physical page
1139 starting in range [start;end[. NOTE: start and end must refer to
1140 the same physical page. 'is_cpu_write_access' should be true if called
1141 from a real cpu write access: the virtual CPU will exit the current
1142 TB if code is modified inside this TB. */
1143void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
1144 int is_cpu_write_access)
1145{
1146 int n, current_tb_modified, current_tb_not_found, current_flags;
1147 CPUState *env = cpu_single_env;
1148 PageDesc *p;
1149 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
1150 target_ulong tb_start, tb_end;
1151 target_ulong current_pc, current_cs_base;
1152
1153 p = page_find(start >> TARGET_PAGE_BITS);
1154 if (!p)
1155 return;
1156 if (!p->code_bitmap &&
1157 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1158 is_cpu_write_access) {
1159 /* build code bitmap */
1160 build_page_bitmap(p);
1161 }
1162
1163 /* we remove all the TBs in the range [start, end[ */
1164 /* XXX: see if in some cases it could be faster to invalidate all the code */
1165 current_tb_not_found = is_cpu_write_access;
1166 current_tb_modified = 0;
1167 current_tb = NULL; /* avoid warning */
1168 current_pc = 0; /* avoid warning */
1169 current_cs_base = 0; /* avoid warning */
1170 current_flags = 0; /* avoid warning */
1171 tb = p->first_tb;
1172 while (tb != NULL) {
1173 n = (long)tb & 3;
1174 tb = (TranslationBlock *)((long)tb & ~3);
1175 tb_next = tb->page_next[n];
1176 /* NOTE: this is subtle as a TB may span two physical pages */
1177 if (n == 0) {
1178 /* NOTE: tb_end may be after the end of the page, but
1179 it is not a problem */
1180 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1181 tb_end = tb_start + tb->size;
1182 } else {
1183 tb_start = tb->page_addr[1];
1184 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1185 }
1186 if (!(tb_end <= start || tb_start >= end)) {
1187#ifdef TARGET_HAS_PRECISE_SMC
1188 if (current_tb_not_found) {
1189 current_tb_not_found = 0;
1190 current_tb = NULL;
1191 if (env->mem_io_pc) {
1192 /* now we have a real cpu fault */
1193 current_tb = tb_find_pc(env->mem_io_pc);
1194 }
1195 }
1196 if (current_tb == tb &&
1197 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1198 /* If we are modifying the current TB, we must stop
1199 its execution. We could be more precise by checking
1200 that the modification is after the current PC, but it
1201 would require a specialized function to partially
1202 restore the CPU state */
1203
1204 current_tb_modified = 1;
1205 cpu_restore_state(current_tb, env,
1206 env->mem_io_pc, NULL);
1207#if defined(TARGET_I386)
1208 current_flags = env->hflags;
1209 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1210 current_cs_base = (target_ulong)env->segs[R_CS].base;
1211 current_pc = current_cs_base + env->eip;
1212#else
1213#error unsupported CPU
1214#endif
1215 }
1216#endif /* TARGET_HAS_PRECISE_SMC */
1217 /* we need to do that to handle the case where a signal
1218 occurs while doing tb_phys_invalidate() */
1219 saved_tb = NULL;
1220 if (env) {
1221 saved_tb = env->current_tb;
1222 env->current_tb = NULL;
1223 }
1224 tb_phys_invalidate(tb, -1);
1225 if (env) {
1226 env->current_tb = saved_tb;
1227 if (env->interrupt_request && env->current_tb)
1228 cpu_interrupt(env, env->interrupt_request);
1229 }
1230 }
1231 tb = tb_next;
1232 }
1233#if !defined(CONFIG_USER_ONLY)
1234 /* if no code remaining, no need to continue to use slow writes */
1235 if (!p->first_tb) {
1236 invalidate_page_bitmap(p);
1237 if (is_cpu_write_access) {
1238 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1239 }
1240 }
1241#endif
1242#ifdef TARGET_HAS_PRECISE_SMC
1243 if (current_tb_modified) {
1244 /* we generate a block containing just the instruction
1245 modifying the memory. It will ensure that it cannot modify
1246 itself */
1247 env->current_tb = NULL;
1248 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1249 cpu_resume_from_signal(env, NULL);
1250 }
1251#endif
1252}
1253
1254
1255/* len must be <= 8 and start must be a multiple of len */
1256#ifndef VBOX
1257static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1258#else
1259DECLINLINE(void) tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1260#endif
1261{
1262 PageDesc *p;
1263 int offset, b;
1264#if 0
1265 if (1) {
1266 if (loglevel) {
1267 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1268 cpu_single_env->mem_io_vaddr, len,
1269 cpu_single_env->eip,
1270 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1271 }
1272 }
1273#endif
1274 p = page_find(start >> TARGET_PAGE_BITS);
1275 if (!p)
1276 return;
1277 if (p->code_bitmap) {
1278 offset = start & ~TARGET_PAGE_MASK;
1279 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1280 if (b & ((1 << len) - 1))
1281 goto do_invalidate;
1282 } else {
1283 do_invalidate:
1284 tb_invalidate_phys_page_range(start, start + len, 1);
1285 }
1286}
1287
1288
1289#if !defined(CONFIG_SOFTMMU)
1290static void tb_invalidate_phys_page(target_phys_addr_t addr,
1291 unsigned long pc, void *puc)
1292{
1293 int n, current_flags, current_tb_modified;
1294 target_ulong current_pc, current_cs_base;
1295 PageDesc *p;
1296 TranslationBlock *tb, *current_tb;
1297#ifdef TARGET_HAS_PRECISE_SMC
1298 CPUState *env = cpu_single_env;
1299#endif
1300
1301 addr &= TARGET_PAGE_MASK;
1302 p = page_find(addr >> TARGET_PAGE_BITS);
1303 if (!p)
1304 return;
1305 tb = p->first_tb;
1306 current_tb_modified = 0;
1307 current_tb = NULL;
1308 current_pc = 0; /* avoid warning */
1309 current_cs_base = 0; /* avoid warning */
1310 current_flags = 0; /* avoid warning */
1311#ifdef TARGET_HAS_PRECISE_SMC
1312 if (tb && pc != 0) {
1313 current_tb = tb_find_pc(pc);
1314 }
1315#endif
1316 while (tb != NULL) {
1317 n = (long)tb & 3;
1318 tb = (TranslationBlock *)((long)tb & ~3);
1319#ifdef TARGET_HAS_PRECISE_SMC
1320 if (current_tb == tb &&
1321 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1322 /* If we are modifying the current TB, we must stop
1323 its execution. We could be more precise by checking
1324 that the modification is after the current PC, but it
1325 would require a specialized function to partially
1326 restore the CPU state */
1327
1328 current_tb_modified = 1;
1329 cpu_restore_state(current_tb, env, pc, puc);
1330#if defined(TARGET_I386)
1331 current_flags = env->hflags;
1332 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1333 current_cs_base = (target_ulong)env->segs[R_CS].base;
1334 current_pc = current_cs_base + env->eip;
1335#else
1336#error unsupported CPU
1337#endif
1338 }
1339#endif /* TARGET_HAS_PRECISE_SMC */
1340 tb_phys_invalidate(tb, addr);
1341 tb = tb->page_next[n];
1342 }
1343 p->first_tb = NULL;
1344#ifdef TARGET_HAS_PRECISE_SMC
1345 if (current_tb_modified) {
1346 /* we generate a block containing just the instruction
1347 modifying the memory. It will ensure that it cannot modify
1348 itself */
1349 env->current_tb = NULL;
1350 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1351 cpu_resume_from_signal(env, puc);
1352 }
1353#endif
1354}
1355#endif
1356
1357/* add the tb in the target page and protect it if necessary */
1358#ifndef VBOX
1359static inline void tb_alloc_page(TranslationBlock *tb,
1360 unsigned int n, target_ulong page_addr)
1361#else
1362DECLINLINE(void) tb_alloc_page(TranslationBlock *tb,
1363 unsigned int n, target_ulong page_addr)
1364#endif
1365{
1366 PageDesc *p;
1367 TranslationBlock *last_first_tb;
1368
1369 tb->page_addr[n] = page_addr;
1370 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1371 tb->page_next[n] = p->first_tb;
1372 last_first_tb = p->first_tb;
1373 p->first_tb = (TranslationBlock *)((long)tb | n);
1374 invalidate_page_bitmap(p);
1375
1376#if defined(TARGET_HAS_SMC) || 1
1377
1378#if defined(CONFIG_USER_ONLY)
1379 if (p->flags & PAGE_WRITE) {
1380 target_ulong addr;
1381 PageDesc *p2;
1382 int prot;
1383
1384 /* force the host page as non writable (writes will have a
1385 page fault + mprotect overhead) */
1386 page_addr &= qemu_host_page_mask;
1387 prot = 0;
1388 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1389 addr += TARGET_PAGE_SIZE) {
1390
1391 p2 = page_find (addr >> TARGET_PAGE_BITS);
1392 if (!p2)
1393 continue;
1394 prot |= p2->flags;
1395 p2->flags &= ~PAGE_WRITE;
1396 page_get_flags(addr);
1397 }
1398 mprotect(g2h(page_addr), qemu_host_page_size,
1399 (prot & PAGE_BITS) & ~PAGE_WRITE);
1400#ifdef DEBUG_TB_INVALIDATE
1401 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1402 page_addr);
1403#endif
1404 }
1405#else
1406 /* if some code is already present, then the pages are already
1407 protected. So we handle the case where only the first TB is
1408 allocated in a physical page */
1409 if (!last_first_tb) {
1410 tlb_protect_code(page_addr);
1411 }
1412#endif
1413
1414#endif /* TARGET_HAS_SMC */
1415}
1416
1417/* Allocate a new translation block. Flush the translation buffer if
1418 too many translation blocks or too much generated code. */
1419TranslationBlock *tb_alloc(target_ulong pc)
1420{
1421 TranslationBlock *tb;
1422
1423 if (nb_tbs >= code_gen_max_blocks ||
1424#ifndef VBOX
1425 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1426#else
1427 (code_gen_ptr - code_gen_buffer) >= (int)code_gen_buffer_max_size)
1428#endif
1429 return NULL;
1430 tb = &tbs[nb_tbs++];
1431 tb->pc = pc;
1432 tb->cflags = 0;
1433 return tb;
1434}
1435
1436void tb_free(TranslationBlock *tb)
1437{
1438 /* In practice this is mostly used for single use temporary TB
1439 Ignore the hard cases and just back up if this TB happens to
1440 be the last one generated. */
1441 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1442 code_gen_ptr = tb->tc_ptr;
1443 nb_tbs--;
1444 }
1445}
1446
1447/* add a new TB and link it to the physical page tables. phys_page2 is
1448 (-1) to indicate that only one page contains the TB. */
1449void tb_link_phys(TranslationBlock *tb,
1450 target_ulong phys_pc, target_ulong phys_page2)
1451{
1452 unsigned int h;
1453 TranslationBlock **ptb;
1454
1455 /* Grab the mmap lock to stop another thread invalidating this TB
1456 before we are done. */
1457 mmap_lock();
1458 /* add in the physical hash table */
1459 h = tb_phys_hash_func(phys_pc);
1460 ptb = &tb_phys_hash[h];
1461 tb->phys_hash_next = *ptb;
1462 *ptb = tb;
1463
1464 /* add in the page list */
1465 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1466 if (phys_page2 != -1)
1467 tb_alloc_page(tb, 1, phys_page2);
1468 else
1469 tb->page_addr[1] = -1;
1470
1471 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1472 tb->jmp_next[0] = NULL;
1473 tb->jmp_next[1] = NULL;
1474
1475 /* init original jump addresses */
1476 if (tb->tb_next_offset[0] != 0xffff)
1477 tb_reset_jump(tb, 0);
1478 if (tb->tb_next_offset[1] != 0xffff)
1479 tb_reset_jump(tb, 1);
1480
1481#ifdef DEBUG_TB_CHECK
1482 tb_page_check();
1483#endif
1484 mmap_unlock();
1485}
1486
1487/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1488 tb[1].tc_ptr. Return NULL if not found */
1489TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1490{
1491 int m_min, m_max, m;
1492 unsigned long v;
1493 TranslationBlock *tb;
1494
1495 if (nb_tbs <= 0)
1496 return NULL;
1497 if (tc_ptr < (unsigned long)code_gen_buffer ||
1498 tc_ptr >= (unsigned long)code_gen_ptr)
1499 return NULL;
1500 /* binary search (cf Knuth) */
1501 m_min = 0;
1502 m_max = nb_tbs - 1;
1503 while (m_min <= m_max) {
1504 m = (m_min + m_max) >> 1;
1505 tb = &tbs[m];
1506 v = (unsigned long)tb->tc_ptr;
1507 if (v == tc_ptr)
1508 return tb;
1509 else if (tc_ptr < v) {
1510 m_max = m - 1;
1511 } else {
1512 m_min = m + 1;
1513 }
1514 }
1515 return &tbs[m_max];
1516}
1517
1518static void tb_reset_jump_recursive(TranslationBlock *tb);
1519
1520#ifndef VBOX
1521static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1522#else
1523DECLINLINE(void) tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1524#endif
1525{
1526 TranslationBlock *tb1, *tb_next, **ptb;
1527 unsigned int n1;
1528
1529 tb1 = tb->jmp_next[n];
1530 if (tb1 != NULL) {
1531 /* find head of list */
1532 for(;;) {
1533 n1 = (long)tb1 & 3;
1534 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1535 if (n1 == 2)
1536 break;
1537 tb1 = tb1->jmp_next[n1];
1538 }
1539 /* we are now sure now that tb jumps to tb1 */
1540 tb_next = tb1;
1541
1542 /* remove tb from the jmp_first list */
1543 ptb = &tb_next->jmp_first;
1544 for(;;) {
1545 tb1 = *ptb;
1546 n1 = (long)tb1 & 3;
1547 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1548 if (n1 == n && tb1 == tb)
1549 break;
1550 ptb = &tb1->jmp_next[n1];
1551 }
1552 *ptb = tb->jmp_next[n];
1553 tb->jmp_next[n] = NULL;
1554
1555 /* suppress the jump to next tb in generated code */
1556 tb_reset_jump(tb, n);
1557
1558 /* suppress jumps in the tb on which we could have jumped */
1559 tb_reset_jump_recursive(tb_next);
1560 }
1561}
1562
1563static void tb_reset_jump_recursive(TranslationBlock *tb)
1564{
1565 tb_reset_jump_recursive2(tb, 0);
1566 tb_reset_jump_recursive2(tb, 1);
1567}
1568
1569#if defined(TARGET_HAS_ICE)
1570static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1571{
1572 target_ulong addr, pd;
1573 ram_addr_t ram_addr;
1574 PhysPageDesc *p;
1575
1576 addr = cpu_get_phys_page_debug(env, pc);
1577 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1578 if (!p) {
1579 pd = IO_MEM_UNASSIGNED;
1580 } else {
1581 pd = p->phys_offset;
1582 }
1583 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1584 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1585}
1586#endif
1587
1588/* Add a watchpoint. */
1589int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1590{
1591 int i;
1592
1593 for (i = 0; i < env->nb_watchpoints; i++) {
1594 if (addr == env->watchpoint[i].vaddr)
1595 return 0;
1596 }
1597 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1598 return -1;
1599
1600 i = env->nb_watchpoints++;
1601 env->watchpoint[i].vaddr = addr;
1602 env->watchpoint[i].type = type;
1603 tlb_flush_page(env, addr);
1604 /* FIXME: This flush is needed because of the hack to make memory ops
1605 terminate the TB. It can be removed once the proper IO trap and
1606 re-execute bits are in. */
1607 tb_flush(env);
1608 return i;
1609}
1610
1611/* Remove a watchpoint. */
1612int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1613{
1614 int i;
1615
1616 for (i = 0; i < env->nb_watchpoints; i++) {
1617 if (addr == env->watchpoint[i].vaddr) {
1618 env->nb_watchpoints--;
1619 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1620 tlb_flush_page(env, addr);
1621 return 0;
1622 }
1623 }
1624 return -1;
1625}
1626
1627/* Remove all watchpoints. */
1628void cpu_watchpoint_remove_all(CPUState *env) {
1629 int i;
1630
1631 for (i = 0; i < env->nb_watchpoints; i++) {
1632 tlb_flush_page(env, env->watchpoint[i].vaddr);
1633 }
1634 env->nb_watchpoints = 0;
1635}
1636
1637/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1638 breakpoint is reached */
1639int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1640{
1641#if defined(TARGET_HAS_ICE)
1642 int i;
1643
1644 for(i = 0; i < env->nb_breakpoints; i++) {
1645 if (env->breakpoints[i] == pc)
1646 return 0;
1647 }
1648
1649 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1650 return -1;
1651 env->breakpoints[env->nb_breakpoints++] = pc;
1652
1653 breakpoint_invalidate(env, pc);
1654 return 0;
1655#else
1656 return -1;
1657#endif
1658}
1659
1660/* remove all breakpoints */
1661void cpu_breakpoint_remove_all(CPUState *env) {
1662#if defined(TARGET_HAS_ICE)
1663 int i;
1664 for(i = 0; i < env->nb_breakpoints; i++) {
1665 breakpoint_invalidate(env, env->breakpoints[i]);
1666 }
1667 env->nb_breakpoints = 0;
1668#endif
1669}
1670
1671/* remove a breakpoint */
1672int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1673{
1674#if defined(TARGET_HAS_ICE)
1675 int i;
1676 for(i = 0; i < env->nb_breakpoints; i++) {
1677 if (env->breakpoints[i] == pc)
1678 goto found;
1679 }
1680 return -1;
1681 found:
1682 env->nb_breakpoints--;
1683 if (i < env->nb_breakpoints)
1684 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1685
1686 breakpoint_invalidate(env, pc);
1687 return 0;
1688#else
1689 return -1;
1690#endif
1691}
1692
1693/* enable or disable single step mode. EXCP_DEBUG is returned by the
1694 CPU loop after each instruction */
1695void cpu_single_step(CPUState *env, int enabled)
1696{
1697#if defined(TARGET_HAS_ICE)
1698 if (env->singlestep_enabled != enabled) {
1699 env->singlestep_enabled = enabled;
1700 /* must flush all the translated code to avoid inconsistencies */
1701 /* XXX: only flush what is necessary */
1702 tb_flush(env);
1703 }
1704#endif
1705}
1706
1707#ifndef VBOX
1708/* enable or disable low levels log */
1709void cpu_set_log(int log_flags)
1710{
1711 loglevel = log_flags;
1712 if (loglevel && !logfile) {
1713 logfile = fopen(logfilename, "w");
1714 if (!logfile) {
1715 perror(logfilename);
1716 _exit(1);
1717 }
1718#if !defined(CONFIG_SOFTMMU)
1719 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1720 {
1721 static uint8_t logfile_buf[4096];
1722 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1723 }
1724#else
1725 setvbuf(logfile, NULL, _IOLBF, 0);
1726#endif
1727 }
1728}
1729
1730void cpu_set_log_filename(const char *filename)
1731{
1732 logfilename = strdup(filename);
1733}
1734#endif /* !VBOX */
1735
1736/* mask must never be zero, except for A20 change call */
1737void cpu_interrupt(CPUState *env, int mask)
1738{
1739#if !defined(USE_NPTL)
1740 TranslationBlock *tb;
1741 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1742#endif
1743 int old_mask;
1744
1745 old_mask = env->interrupt_request;
1746#ifdef VBOX
1747 VM_ASSERT_EMT(env->pVM);
1748 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, mask);
1749#else /* !VBOX */
1750 /* FIXME: This is probably not threadsafe. A different thread could
1751 be in the middle of a read-modify-write operation. */
1752 env->interrupt_request |= mask;
1753#endif /* !VBOX */
1754#if defined(USE_NPTL)
1755 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1756 problem and hope the cpu will stop of its own accord. For userspace
1757 emulation this often isn't actually as bad as it sounds. Often
1758 signals are used primarily to interrupt blocking syscalls. */
1759#else
1760 if (use_icount) {
1761 env->icount_decr.u16.high = 0xffff;
1762#ifndef CONFIG_USER_ONLY
1763 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1764 an async event happened and we need to process it. */
1765 if (!can_do_io(env)
1766 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1767 cpu_abort(env, "Raised interrupt while not in I/O function");
1768 }
1769#endif
1770 } else {
1771 tb = env->current_tb;
1772 /* if the cpu is currently executing code, we must unlink it and
1773 all the potentially executing TB */
1774 if (tb && !testandset(&interrupt_lock)) {
1775 env->current_tb = NULL;
1776 tb_reset_jump_recursive(tb);
1777 resetlock(&interrupt_lock);
1778 }
1779 }
1780#endif
1781}
1782
1783void cpu_reset_interrupt(CPUState *env, int mask)
1784{
1785#ifdef VBOX
1786 /*
1787 * Note: the current implementation can be executed by another thread without problems; make sure this remains true
1788 * for future changes!
1789 */
1790 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~mask);
1791#else /* !VBOX */
1792 env->interrupt_request &= ~mask;
1793#endif /* !VBOX */
1794}
1795
1796#ifndef VBOX
1797CPULogItem cpu_log_items[] = {
1798 { CPU_LOG_TB_OUT_ASM, "out_asm",
1799 "show generated host assembly code for each compiled TB" },
1800 { CPU_LOG_TB_IN_ASM, "in_asm",
1801 "show target assembly code for each compiled TB" },
1802 { CPU_LOG_TB_OP, "op",
1803 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1804#ifdef TARGET_I386
1805 { CPU_LOG_TB_OP_OPT, "op_opt",
1806 "show micro ops after optimization for each compiled TB" },
1807#endif
1808 { CPU_LOG_INT, "int",
1809 "show interrupts/exceptions in short format" },
1810 { CPU_LOG_EXEC, "exec",
1811 "show trace before each executed TB (lots of logs)" },
1812 { CPU_LOG_TB_CPU, "cpu",
1813 "show CPU state before bloc translation" },
1814#ifdef TARGET_I386
1815 { CPU_LOG_PCALL, "pcall",
1816 "show protected mode far calls/returns/exceptions" },
1817#endif
1818#ifdef DEBUG_IOPORT
1819 { CPU_LOG_IOPORT, "ioport",
1820 "show all i/o ports accesses" },
1821#endif
1822 { 0, NULL, NULL },
1823};
1824
1825static int cmp1(const char *s1, int n, const char *s2)
1826{
1827 if (strlen(s2) != n)
1828 return 0;
1829 return memcmp(s1, s2, n) == 0;
1830}
1831
1832/* takes a comma separated list of log masks. Return 0 if error. */
1833int cpu_str_to_log_mask(const char *str)
1834{
1835 CPULogItem *item;
1836 int mask;
1837 const char *p, *p1;
1838
1839 p = str;
1840 mask = 0;
1841 for(;;) {
1842 p1 = strchr(p, ',');
1843 if (!p1)
1844 p1 = p + strlen(p);
1845 if(cmp1(p,p1-p,"all")) {
1846 for(item = cpu_log_items; item->mask != 0; item++) {
1847 mask |= item->mask;
1848 }
1849 } else {
1850 for(item = cpu_log_items; item->mask != 0; item++) {
1851 if (cmp1(p, p1 - p, item->name))
1852 goto found;
1853 }
1854 return 0;
1855 }
1856 found:
1857 mask |= item->mask;
1858 if (*p1 != ',')
1859 break;
1860 p = p1 + 1;
1861 }
1862 return mask;
1863}
1864#endif /* !VBOX */
1865
1866#ifndef VBOX /* VBOX: we have our own routine. */
1867void cpu_abort(CPUState *env, const char *fmt, ...)
1868{
1869 va_list ap;
1870
1871 va_start(ap, fmt);
1872 fprintf(stderr, "qemu: fatal: ");
1873 vfprintf(stderr, fmt, ap);
1874 fprintf(stderr, "\n");
1875#ifdef TARGET_I386
1876 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1877#else
1878 cpu_dump_state(env, stderr, fprintf, 0);
1879#endif
1880 va_end(ap);
1881 abort();
1882}
1883#endif /* !VBOX */
1884
1885#ifndef VBOX
1886CPUState *cpu_copy(CPUState *env)
1887{
1888 CPUState *new_env = cpu_init(env->cpu_model_str);
1889 /* preserve chaining and index */
1890 CPUState *next_cpu = new_env->next_cpu;
1891 int cpu_index = new_env->cpu_index;
1892 memcpy(new_env, env, sizeof(CPUState));
1893 new_env->next_cpu = next_cpu;
1894 new_env->cpu_index = cpu_index;
1895 return new_env;
1896}
1897#endif
1898
1899#if !defined(CONFIG_USER_ONLY)
1900
1901#ifndef VBOX
1902static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1903#else
1904DECLINLINE(void) tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1905#endif
1906{
1907 unsigned int i;
1908
1909 /* Discard jump cache entries for any tb which might potentially
1910 overlap the flushed page. */
1911 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1912 memset (&env->tb_jmp_cache[i], 0,
1913 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1914
1915 i = tb_jmp_cache_hash_page(addr);
1916 memset (&env->tb_jmp_cache[i], 0,
1917 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1918
1919#ifdef VBOX
1920 /* inform raw mode about TLB page flush */
1921 remR3FlushPage(env, addr);
1922#endif /* VBOX */
1923}
1924
1925/* NOTE: if flush_global is true, also flush global entries (not
1926 implemented yet) */
1927void tlb_flush(CPUState *env, int flush_global)
1928{
1929 int i;
1930#if defined(DEBUG_TLB)
1931 printf("tlb_flush:\n");
1932#endif
1933 /* must reset current TB so that interrupts cannot modify the
1934 links while we are modifying them */
1935 env->current_tb = NULL;
1936
1937 for(i = 0; i < CPU_TLB_SIZE; i++) {
1938 env->tlb_table[0][i].addr_read = -1;
1939 env->tlb_table[0][i].addr_write = -1;
1940 env->tlb_table[0][i].addr_code = -1;
1941 env->tlb_table[1][i].addr_read = -1;
1942 env->tlb_table[1][i].addr_write = -1;
1943 env->tlb_table[1][i].addr_code = -1;
1944#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
1945 env->phys_addends[0][i] = -1;
1946 env->phys_addends[1][i] = -1;
1947#endif
1948#if (NB_MMU_MODES >= 3)
1949 env->tlb_table[2][i].addr_read = -1;
1950 env->tlb_table[2][i].addr_write = -1;
1951 env->tlb_table[2][i].addr_code = -1;
1952#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
1953 env->phys_addends[2][i] = -1;
1954#endif
1955#if (NB_MMU_MODES == 4)
1956 env->tlb_table[3][i].addr_read = -1;
1957 env->tlb_table[3][i].addr_write = -1;
1958 env->tlb_table[3][i].addr_code = -1;
1959#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
1960 env->phys_addends[3][i] = -1;
1961#endif
1962#endif
1963#endif
1964 }
1965
1966 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1967
1968#ifdef VBOX
1969 /* inform raw mode about TLB flush */
1970 remR3FlushTLB(env, flush_global);
1971#endif
1972#ifdef USE_KQEMU
1973 if (env->kqemu_enabled) {
1974 kqemu_flush(env, flush_global);
1975 }
1976#endif
1977 tlb_flush_count++;
1978}
1979
1980#ifndef VBOX
1981static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1982#else
1983DECLINLINE(void) tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1984#endif
1985{
1986 if (addr == (tlb_entry->addr_read &
1987 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1988 addr == (tlb_entry->addr_write &
1989 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1990 addr == (tlb_entry->addr_code &
1991 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1992 tlb_entry->addr_read = -1;
1993 tlb_entry->addr_write = -1;
1994 tlb_entry->addr_code = -1;
1995 }
1996}
1997
1998void tlb_flush_page(CPUState *env, target_ulong addr)
1999{
2000 int i;
2001
2002#if defined(DEBUG_TLB)
2003 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
2004#endif
2005 /* must reset current TB so that interrupts cannot modify the
2006 links while we are modifying them */
2007 env->current_tb = NULL;
2008
2009 addr &= TARGET_PAGE_MASK;
2010 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2011 tlb_flush_entry(&env->tlb_table[0][i], addr);
2012 tlb_flush_entry(&env->tlb_table[1][i], addr);
2013#if (NB_MMU_MODES >= 3)
2014 tlb_flush_entry(&env->tlb_table[2][i], addr);
2015#if (NB_MMU_MODES == 4)
2016 tlb_flush_entry(&env->tlb_table[3][i], addr);
2017#endif
2018#endif
2019
2020 tlb_flush_jmp_cache(env, addr);
2021
2022#ifdef USE_KQEMU
2023 if (env->kqemu_enabled) {
2024 kqemu_flush_page(env, addr);
2025 }
2026#endif
2027}
2028
2029/* update the TLBs so that writes to code in the virtual page 'addr'
2030 can be detected */
2031static void tlb_protect_code(ram_addr_t ram_addr)
2032{
2033 cpu_physical_memory_reset_dirty(ram_addr,
2034 ram_addr + TARGET_PAGE_SIZE,
2035 CODE_DIRTY_FLAG);
2036#if defined(VBOX) && defined(REM_MONITOR_CODE_PAGES)
2037 /** @todo Retest this? This function has changed... */
2038 remR3ProtectCode(cpu_single_env, ram_addr);
2039#endif
2040}
2041
2042/* update the TLB so that writes in physical page 'phys_addr' are no longer
2043 tested for self modifying code */
2044static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2045 target_ulong vaddr)
2046{
2047#ifdef VBOX
2048 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2049#endif
2050 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
2051}
2052
2053#ifndef VBOX
2054static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2055 unsigned long start, unsigned long length)
2056#else
2057DECLINLINE(void) tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2058 unsigned long start, unsigned long length)
2059#endif
2060{
2061 unsigned long addr;
2062
2063#ifdef VBOX
2064 if (start & 3)
2065 return;
2066#endif
2067 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2068 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2069 if ((addr - start) < length) {
2070 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
2071 }
2072 }
2073}
2074
2075void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2076 int dirty_flags)
2077{
2078 CPUState *env;
2079 unsigned long length, start1;
2080 int i, mask, len;
2081 uint8_t *p;
2082
2083 start &= TARGET_PAGE_MASK;
2084 end = TARGET_PAGE_ALIGN(end);
2085
2086 length = end - start;
2087 if (length == 0)
2088 return;
2089 len = length >> TARGET_PAGE_BITS;
2090#ifdef USE_KQEMU
2091 /* XXX: should not depend on cpu context */
2092 env = first_cpu;
2093 if (env->kqemu_enabled) {
2094 ram_addr_t addr;
2095 addr = start;
2096 for(i = 0; i < len; i++) {
2097 kqemu_set_notdirty(env, addr);
2098 addr += TARGET_PAGE_SIZE;
2099 }
2100 }
2101#endif
2102 mask = ~dirty_flags;
2103 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
2104#ifdef VBOX
2105 if (RT_LIKELY((start >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2106#endif
2107 for(i = 0; i < len; i++)
2108 p[i] &= mask;
2109
2110 /* we modify the TLB cache so that the dirty bit will be set again
2111 when accessing the range */
2112#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2113 start1 = start;
2114#elif !defined(VBOX)
2115 start1 = start + (unsigned long)phys_ram_base;
2116#else
2117 start1 = (unsigned long)remR3TlbGCPhys2Ptr(first_cpu, start, 1 /*fWritable*/); /** @todo page replacing (sharing or read only) may cause trouble, fix interface/whatever. */
2118#endif
2119 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2120 for(i = 0; i < CPU_TLB_SIZE; i++)
2121 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
2122 for(i = 0; i < CPU_TLB_SIZE; i++)
2123 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
2124#if (NB_MMU_MODES >= 3)
2125 for(i = 0; i < CPU_TLB_SIZE; i++)
2126 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
2127#if (NB_MMU_MODES == 4)
2128 for(i = 0; i < CPU_TLB_SIZE; i++)
2129 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
2130#endif
2131#endif
2132 }
2133}
2134
2135#ifndef VBOX
2136int cpu_physical_memory_set_dirty_tracking(int enable)
2137{
2138 in_migration = enable;
2139 return 0;
2140}
2141
2142int cpu_physical_memory_get_dirty_tracking(void)
2143{
2144 return in_migration;
2145}
2146#endif
2147
2148#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2149DECLINLINE(void) tlb_update_dirty(CPUTLBEntry *tlb_entry, target_phys_addr_t phys_addend)
2150#else
2151static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2152#endif
2153{
2154 ram_addr_t ram_addr;
2155
2156 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2157 /* RAM case */
2158#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2159 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2160#elif !defined(VBOX)
2161 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
2162 tlb_entry->addend - (unsigned long)phys_ram_base;
2163#else
2164 Assert(phys_addend != -1);
2165 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + phys_addend;
2166#endif
2167 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2168 tlb_entry->addr_write |= TLB_NOTDIRTY;
2169 }
2170 }
2171}
2172
2173/* update the TLB according to the current state of the dirty bits */
2174void cpu_tlb_update_dirty(CPUState *env)
2175{
2176 int i;
2177#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2178 for(i = 0; i < CPU_TLB_SIZE; i++)
2179 tlb_update_dirty(&env->tlb_table[0][i], env->phys_addends[0][i]);
2180 for(i = 0; i < CPU_TLB_SIZE; i++)
2181 tlb_update_dirty(&env->tlb_table[1][i], env->phys_addends[1][i]);
2182#if (NB_MMU_MODES >= 3)
2183 for(i = 0; i < CPU_TLB_SIZE; i++)
2184 tlb_update_dirty(&env->tlb_table[2][i], env->phys_addends[2][i]);
2185#if (NB_MMU_MODES == 4)
2186 for(i = 0; i < CPU_TLB_SIZE; i++)
2187 tlb_update_dirty(&env->tlb_table[3][i], env->phys_addends[3][i]);
2188#endif
2189#endif
2190#else /* VBOX */
2191 for(i = 0; i < CPU_TLB_SIZE; i++)
2192 tlb_update_dirty(&env->tlb_table[0][i]);
2193 for(i = 0; i < CPU_TLB_SIZE; i++)
2194 tlb_update_dirty(&env->tlb_table[1][i]);
2195#if (NB_MMU_MODES >= 3)
2196 for(i = 0; i < CPU_TLB_SIZE; i++)
2197 tlb_update_dirty(&env->tlb_table[2][i]);
2198#if (NB_MMU_MODES == 4)
2199 for(i = 0; i < CPU_TLB_SIZE; i++)
2200 tlb_update_dirty(&env->tlb_table[3][i]);
2201#endif
2202#endif
2203#endif /* VBOX */
2204}
2205
2206#ifndef VBOX
2207static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2208#else
2209DECLINLINE(void) tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2210#endif
2211{
2212 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2213 tlb_entry->addr_write = vaddr;
2214}
2215
2216
2217/* update the TLB corresponding to virtual page vaddr and phys addr
2218 addr so that it is no longer dirty */
2219#ifndef VBOX
2220static inline void tlb_set_dirty(CPUState *env,
2221 unsigned long addr, target_ulong vaddr)
2222#else
2223DECLINLINE(void) tlb_set_dirty(CPUState *env,
2224 unsigned long addr, target_ulong vaddr)
2225#endif
2226{
2227 int i;
2228
2229 addr &= TARGET_PAGE_MASK;
2230 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2231 tlb_set_dirty1(&env->tlb_table[0][i], addr);
2232 tlb_set_dirty1(&env->tlb_table[1][i], addr);
2233#if (NB_MMU_MODES >= 3)
2234 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
2235#if (NB_MMU_MODES == 4)
2236 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
2237#endif
2238#endif
2239}
2240
2241/* add a new TLB entry. At most one entry for a given virtual address
2242 is permitted. Return 0 if OK or 2 if the page could not be mapped
2243 (can only happen in non SOFTMMU mode for I/O pages or pages
2244 conflicting with the host address space). */
2245int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2246 target_phys_addr_t paddr, int prot,
2247 int mmu_idx, int is_softmmu)
2248{
2249 PhysPageDesc *p;
2250 unsigned long pd;
2251 unsigned int index;
2252 target_ulong address;
2253 target_ulong code_address;
2254 target_phys_addr_t addend;
2255 int ret;
2256 CPUTLBEntry *te;
2257 int i;
2258 target_phys_addr_t iotlb;
2259#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2260 int read_mods = 0, write_mods = 0, code_mods = 0;
2261#endif
2262
2263 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2264 if (!p) {
2265 pd = IO_MEM_UNASSIGNED;
2266 } else {
2267 pd = p->phys_offset;
2268 }
2269#if defined(DEBUG_TLB)
2270 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2271 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2272#endif
2273
2274 ret = 0;
2275 address = vaddr;
2276 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2277 /* IO memory case (romd handled later) */
2278 address |= TLB_MMIO;
2279 }
2280#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2281 addend = pd & TARGET_PAGE_MASK;
2282#elif !defined(VBOX)
2283 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
2284#else
2285 /** @todo this is racing the phys_page_find call above since it may register
2286 * a new chunk of memory... */
2287 addend = (unsigned long)remR3TlbGCPhys2Ptr(env,
2288 pd & TARGET_PAGE_MASK,
2289 !!(prot & PAGE_WRITE));
2290#endif
2291
2292 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2293 /* Normal RAM. */
2294 iotlb = pd & TARGET_PAGE_MASK;
2295 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2296 iotlb |= IO_MEM_NOTDIRTY;
2297 else
2298 iotlb |= IO_MEM_ROM;
2299 } else {
2300 /* IO handlers are currently passed a phsical address.
2301 It would be nice to pass an offset from the base address
2302 of that region. This would avoid having to special case RAM,
2303 and avoid full address decoding in every device.
2304 We can't use the high bits of pd for this because
2305 IO_MEM_ROMD uses these as a ram address. */
2306 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
2307 }
2308
2309 code_address = address;
2310
2311#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2312 if (addend & 0x3)
2313 {
2314 if (addend & 0x2)
2315 {
2316 /* catch write */
2317 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2318 write_mods |= TLB_MMIO;
2319 }
2320 else if (addend & 0x1)
2321 {
2322 /* catch all */
2323 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2324 {
2325 read_mods |= TLB_MMIO;
2326 write_mods |= TLB_MMIO;
2327 code_mods |= TLB_MMIO;
2328 }
2329 }
2330 if ((iotlb & ~TARGET_PAGE_MASK) == 0)
2331 iotlb = env->pVM->rem.s.iHandlerMemType + paddr;
2332 addend &= ~(target_ulong)0x3;
2333 }
2334#endif
2335
2336 /* Make accesses to pages with watchpoints go via the
2337 watchpoint trap routines. */
2338 for (i = 0; i < env->nb_watchpoints; i++) {
2339 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
2340 iotlb = io_mem_watch + paddr;
2341 /* TODO: The memory case can be optimized by not trapping
2342 reads of pages with a write breakpoint. */
2343 address |= TLB_MMIO;
2344 }
2345 }
2346
2347 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2348 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2349 te = &env->tlb_table[mmu_idx][index];
2350 te->addend = addend - vaddr;
2351 if (prot & PAGE_READ) {
2352 te->addr_read = address;
2353 } else {
2354 te->addr_read = -1;
2355 }
2356
2357 if (prot & PAGE_EXEC) {
2358 te->addr_code = code_address;
2359 } else {
2360 te->addr_code = -1;
2361 }
2362 if (prot & PAGE_WRITE) {
2363 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2364 (pd & IO_MEM_ROMD)) {
2365 /* Write access calls the I/O callback. */
2366 te->addr_write = address | TLB_MMIO;
2367 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2368 !cpu_physical_memory_is_dirty(pd)) {
2369 te->addr_write = address | TLB_NOTDIRTY;
2370 } else {
2371 te->addr_write = address;
2372 }
2373 } else {
2374 te->addr_write = -1;
2375 }
2376
2377#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2378 if (prot & PAGE_READ)
2379 te->addr_read |= read_mods;
2380 if (prot & PAGE_EXEC)
2381 te->addr_code |= code_mods;
2382 if (prot & PAGE_WRITE)
2383 te->addr_write |= write_mods;
2384
2385 env->phys_addends[mmu_idx][index] = (pd & TARGET_PAGE_MASK)- vaddr;
2386#endif
2387
2388#ifdef VBOX
2389 /* inform raw mode about TLB page change */
2390 remR3FlushPage(env, vaddr);
2391#endif
2392 return ret;
2393}
2394#if 0
2395/* called from signal handler: invalidate the code and unprotect the
2396 page. Return TRUE if the fault was successfully handled. */
2397int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
2398{
2399#if !defined(CONFIG_SOFTMMU)
2400 VirtPageDesc *vp;
2401
2402#if defined(DEBUG_TLB)
2403 printf("page_unprotect: addr=0x%08x\n", addr);
2404#endif
2405 addr &= TARGET_PAGE_MASK;
2406
2407 /* if it is not mapped, no need to worry here */
2408 if (addr >= MMAP_AREA_END)
2409 return 0;
2410 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
2411 if (!vp)
2412 return 0;
2413 /* NOTE: in this case, validate_tag is _not_ tested as it
2414 validates only the code TLB */
2415 if (vp->valid_tag != virt_valid_tag)
2416 return 0;
2417 if (!(vp->prot & PAGE_WRITE))
2418 return 0;
2419#if defined(DEBUG_TLB)
2420 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
2421 addr, vp->phys_addr, vp->prot);
2422#endif
2423 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
2424 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
2425 (unsigned long)addr, vp->prot);
2426 /* set the dirty bit */
2427 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
2428 /* flush the code inside */
2429 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
2430 return 1;
2431#elif defined(VBOX)
2432 addr &= TARGET_PAGE_MASK;
2433
2434 /* if it is not mapped, no need to worry here */
2435 if (addr >= MMAP_AREA_END)
2436 return 0;
2437 return 1;
2438#else
2439 return 0;
2440#endif
2441}
2442#endif /* 0 */
2443
2444#else
2445
2446void tlb_flush(CPUState *env, int flush_global)
2447{
2448}
2449
2450void tlb_flush_page(CPUState *env, target_ulong addr)
2451{
2452}
2453
2454int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2455 target_phys_addr_t paddr, int prot,
2456 int mmu_idx, int is_softmmu)
2457{
2458 return 0;
2459}
2460
2461#ifndef VBOX
2462/* dump memory mappings */
2463void page_dump(FILE *f)
2464{
2465 unsigned long start, end;
2466 int i, j, prot, prot1;
2467 PageDesc *p;
2468
2469 fprintf(f, "%-8s %-8s %-8s %s\n",
2470 "start", "end", "size", "prot");
2471 start = -1;
2472 end = -1;
2473 prot = 0;
2474 for(i = 0; i <= L1_SIZE; i++) {
2475 if (i < L1_SIZE)
2476 p = l1_map[i];
2477 else
2478 p = NULL;
2479 for(j = 0;j < L2_SIZE; j++) {
2480 if (!p)
2481 prot1 = 0;
2482 else
2483 prot1 = p[j].flags;
2484 if (prot1 != prot) {
2485 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2486 if (start != -1) {
2487 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2488 start, end, end - start,
2489 prot & PAGE_READ ? 'r' : '-',
2490 prot & PAGE_WRITE ? 'w' : '-',
2491 prot & PAGE_EXEC ? 'x' : '-');
2492 }
2493 if (prot1 != 0)
2494 start = end;
2495 else
2496 start = -1;
2497 prot = prot1;
2498 }
2499 if (!p)
2500 break;
2501 }
2502 }
2503}
2504#endif /* !VBOX */
2505
2506int page_get_flags(target_ulong address)
2507{
2508 PageDesc *p;
2509
2510 p = page_find(address >> TARGET_PAGE_BITS);
2511 if (!p)
2512 return 0;
2513 return p->flags;
2514}
2515
2516/* modify the flags of a page and invalidate the code if
2517 necessary. The flag PAGE_WRITE_ORG is positioned automatically
2518 depending on PAGE_WRITE */
2519void page_set_flags(target_ulong start, target_ulong end, int flags)
2520{
2521 PageDesc *p;
2522 target_ulong addr;
2523
2524 start = start & TARGET_PAGE_MASK;
2525 end = TARGET_PAGE_ALIGN(end);
2526 if (flags & PAGE_WRITE)
2527 flags |= PAGE_WRITE_ORG;
2528#ifdef VBOX
2529 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
2530#endif
2531 spin_lock(&tb_lock);
2532 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2533 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2534 /* if the write protection is set, then we invalidate the code
2535 inside */
2536 if (!(p->flags & PAGE_WRITE) &&
2537 (flags & PAGE_WRITE) &&
2538 p->first_tb) {
2539 tb_invalidate_phys_page(addr, 0, NULL);
2540 }
2541 p->flags = flags;
2542 }
2543 spin_unlock(&tb_lock);
2544}
2545
2546int page_check_range(target_ulong start, target_ulong len, int flags)
2547{
2548 PageDesc *p;
2549 target_ulong end;
2550 target_ulong addr;
2551
2552 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2553 start = start & TARGET_PAGE_MASK;
2554
2555 if( end < start )
2556 /* we've wrapped around */
2557 return -1;
2558 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2559 p = page_find(addr >> TARGET_PAGE_BITS);
2560 if( !p )
2561 return -1;
2562 if( !(p->flags & PAGE_VALID) )
2563 return -1;
2564
2565 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2566 return -1;
2567 if (flags & PAGE_WRITE) {
2568 if (!(p->flags & PAGE_WRITE_ORG))
2569 return -1;
2570 /* unprotect the page if it was put read-only because it
2571 contains translated code */
2572 if (!(p->flags & PAGE_WRITE)) {
2573 if (!page_unprotect(addr, 0, NULL))
2574 return -1;
2575 }
2576 return 0;
2577 }
2578 }
2579 return 0;
2580}
2581
2582/* called from signal handler: invalidate the code and unprotect the
2583 page. Return TRUE if the fault was successfully handled. */
2584int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2585{
2586 unsigned int page_index, prot, pindex;
2587 PageDesc *p, *p1;
2588 target_ulong host_start, host_end, addr;
2589
2590 /* Technically this isn't safe inside a signal handler. However we
2591 know this only ever happens in a synchronous SEGV handler, so in
2592 practice it seems to be ok. */
2593 mmap_lock();
2594
2595 host_start = address & qemu_host_page_mask;
2596 page_index = host_start >> TARGET_PAGE_BITS;
2597 p1 = page_find(page_index);
2598 if (!p1) {
2599 mmap_unlock();
2600 return 0;
2601 }
2602 host_end = host_start + qemu_host_page_size;
2603 p = p1;
2604 prot = 0;
2605 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2606 prot |= p->flags;
2607 p++;
2608 }
2609 /* if the page was really writable, then we change its
2610 protection back to writable */
2611 if (prot & PAGE_WRITE_ORG) {
2612 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2613 if (!(p1[pindex].flags & PAGE_WRITE)) {
2614 mprotect((void *)g2h(host_start), qemu_host_page_size,
2615 (prot & PAGE_BITS) | PAGE_WRITE);
2616 p1[pindex].flags |= PAGE_WRITE;
2617 /* and since the content will be modified, we must invalidate
2618 the corresponding translated code. */
2619 tb_invalidate_phys_page(address, pc, puc);
2620#ifdef DEBUG_TB_CHECK
2621 tb_invalidate_check(address);
2622#endif
2623 mmap_unlock();
2624 return 1;
2625 }
2626 }
2627 mmap_unlock();
2628 return 0;
2629}
2630
2631static inline void tlb_set_dirty(CPUState *env,
2632 unsigned long addr, target_ulong vaddr)
2633{
2634}
2635#endif /* defined(CONFIG_USER_ONLY) */
2636
2637#if !defined(CONFIG_USER_ONLY)
2638static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2639 ram_addr_t memory);
2640static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2641 ram_addr_t orig_memory);
2642#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2643 need_subpage) \
2644 do { \
2645 if (addr > start_addr) \
2646 start_addr2 = 0; \
2647 else { \
2648 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2649 if (start_addr2 > 0) \
2650 need_subpage = 1; \
2651 } \
2652 \
2653 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2654 end_addr2 = TARGET_PAGE_SIZE - 1; \
2655 else { \
2656 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2657 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2658 need_subpage = 1; \
2659 } \
2660 } while (0)
2661
2662
2663/* register physical memory. 'size' must be a multiple of the target
2664 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2665 io memory page */
2666void cpu_register_physical_memory(target_phys_addr_t start_addr,
2667 unsigned long size,
2668 unsigned long phys_offset)
2669{
2670 target_phys_addr_t addr, end_addr;
2671 PhysPageDesc *p;
2672 CPUState *env;
2673 ram_addr_t orig_size = size;
2674 void *subpage;
2675
2676#ifdef USE_KQEMU
2677 /* XXX: should not depend on cpu context */
2678 env = first_cpu;
2679 if (env->kqemu_enabled) {
2680 kqemu_set_phys_mem(start_addr, size, phys_offset);
2681 }
2682#endif
2683 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2684 end_addr = start_addr + (target_phys_addr_t)size;
2685 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2686 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2687 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2688 ram_addr_t orig_memory = p->phys_offset;
2689 target_phys_addr_t start_addr2, end_addr2;
2690 int need_subpage = 0;
2691
2692 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2693 need_subpage);
2694 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2695 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2696 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2697 &p->phys_offset, orig_memory);
2698 } else {
2699 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2700 >> IO_MEM_SHIFT];
2701 }
2702 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2703 } else {
2704 p->phys_offset = phys_offset;
2705 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2706 (phys_offset & IO_MEM_ROMD))
2707 phys_offset += TARGET_PAGE_SIZE;
2708 }
2709 } else {
2710 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2711 p->phys_offset = phys_offset;
2712 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2713 (phys_offset & IO_MEM_ROMD))
2714 phys_offset += TARGET_PAGE_SIZE;
2715 else {
2716 target_phys_addr_t start_addr2, end_addr2;
2717 int need_subpage = 0;
2718
2719 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2720 end_addr2, need_subpage);
2721
2722 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2723 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2724 &p->phys_offset, IO_MEM_UNASSIGNED);
2725 subpage_register(subpage, start_addr2, end_addr2,
2726 phys_offset);
2727 }
2728 }
2729 }
2730 }
2731 /* since each CPU stores ram addresses in its TLB cache, we must
2732 reset the modified entries */
2733 /* XXX: slow ! */
2734 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2735 tlb_flush(env, 1);
2736 }
2737}
2738
2739/* XXX: temporary until new memory mapping API */
2740uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2741{
2742 PhysPageDesc *p;
2743
2744 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2745 if (!p)
2746 return IO_MEM_UNASSIGNED;
2747 return p->phys_offset;
2748}
2749
2750#ifndef VBOX
2751/* XXX: better than nothing */
2752ram_addr_t qemu_ram_alloc(ram_addr_t size)
2753{
2754 ram_addr_t addr;
2755 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2756 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2757 (uint64_t)size, (uint64_t)phys_ram_size);
2758 abort();
2759 }
2760 addr = phys_ram_alloc_offset;
2761 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2762 return addr;
2763}
2764
2765void qemu_ram_free(ram_addr_t addr)
2766{
2767}
2768#endif
2769
2770
2771static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2772{
2773#ifdef DEBUG_UNASSIGNED
2774 printf("Unassigned mem read 0x%08x\n", (int)addr);
2775#endif
2776#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2777 do_unassigned_access(addr, 0, 0, 0, 1);
2778#endif
2779 return 0;
2780}
2781
2782static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2783{
2784#ifdef DEBUG_UNASSIGNED
2785 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2786#endif
2787#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2788 do_unassigned_access(addr, 0, 0, 0, 2);
2789#endif
2790 return 0;
2791}
2792
2793static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2794{
2795#ifdef DEBUG_UNASSIGNED
2796 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2797#endif
2798#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2799 do_unassigned_access(addr, 0, 0, 0, 4);
2800#endif
2801 return 0;
2802}
2803
2804static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2805{
2806#ifdef DEBUG_UNASSIGNED
2807 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
2808#endif
2809}
2810
2811static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2812{
2813#ifdef DEBUG_UNASSIGNED
2814 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2815#endif
2816#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2817 do_unassigned_access(addr, 1, 0, 0, 2);
2818#endif
2819}
2820
2821static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2822{
2823#ifdef DEBUG_UNASSIGNED
2824 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2825#endif
2826#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2827 do_unassigned_access(addr, 1, 0, 0, 4);
2828#endif
2829}
2830static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2831 unassigned_mem_readb,
2832 unassigned_mem_readw,
2833 unassigned_mem_readl,
2834};
2835
2836static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2837 unassigned_mem_writeb,
2838 unassigned_mem_writew,
2839 unassigned_mem_writel,
2840};
2841
2842static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2843{
2844 unsigned long ram_addr;
2845 int dirty_flags;
2846#if defined(VBOX)
2847 ram_addr = addr;
2848#else
2849 ram_addr = addr - (unsigned long)phys_ram_base;
2850#endif
2851#ifdef VBOX
2852 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2853 dirty_flags = 0xff;
2854 else
2855#endif /* VBOX */
2856 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2857 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2858#if !defined(CONFIG_USER_ONLY)
2859 tb_invalidate_phys_page_fast(ram_addr, 1);
2860# ifdef VBOX
2861 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2862 dirty_flags = 0xff;
2863 else
2864# endif /* VBOX */
2865 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2866#endif
2867 }
2868#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2869 remR3PhysWriteU8(addr, val);
2870#else
2871 stb_p((uint8_t *)(long)addr, val);
2872#endif
2873#ifdef USE_KQEMU
2874 if (cpu_single_env->kqemu_enabled &&
2875 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2876 kqemu_modify_page(cpu_single_env, ram_addr);
2877#endif
2878 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2879#ifdef VBOX
2880 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2881#endif /* !VBOX */
2882 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2883 /* we remove the notdirty callback only if the code has been
2884 flushed */
2885 if (dirty_flags == 0xff)
2886 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2887}
2888
2889static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2890{
2891 unsigned long ram_addr;
2892 int dirty_flags;
2893#if defined(VBOX)
2894 ram_addr = addr;
2895#else
2896 ram_addr = addr - (unsigned long)phys_ram_base;
2897#endif
2898#ifdef VBOX
2899 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2900 dirty_flags = 0xff;
2901 else
2902#endif /* VBOX */
2903 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2904 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2905#if !defined(CONFIG_USER_ONLY)
2906 tb_invalidate_phys_page_fast(ram_addr, 2);
2907# ifdef VBOX
2908 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2909 dirty_flags = 0xff;
2910 else
2911# endif /* VBOX */
2912 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2913#endif
2914 }
2915#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2916 remR3PhysWriteU16(addr, val);
2917#else
2918 stw_p((uint8_t *)(long)addr, val);
2919#endif
2920
2921#ifdef USE_KQEMU
2922 if (cpu_single_env->kqemu_enabled &&
2923 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2924 kqemu_modify_page(cpu_single_env, ram_addr);
2925#endif
2926 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2927#ifdef VBOX
2928 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2929#endif
2930 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2931 /* we remove the notdirty callback only if the code has been
2932 flushed */
2933 if (dirty_flags == 0xff)
2934 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2935}
2936
2937static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2938{
2939 unsigned long ram_addr;
2940 int dirty_flags;
2941#if defined(VBOX)
2942 ram_addr = addr;
2943#else
2944 ram_addr = addr - (unsigned long)phys_ram_base;
2945#endif
2946#ifdef VBOX
2947 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2948 dirty_flags = 0xff;
2949 else
2950#endif /* VBOX */
2951 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2952 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2953#if !defined(CONFIG_USER_ONLY)
2954 tb_invalidate_phys_page_fast(ram_addr, 4);
2955# ifdef VBOX
2956 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2957 dirty_flags = 0xff;
2958 else
2959# endif /* VBOX */
2960 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2961#endif
2962 }
2963#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2964 remR3PhysWriteU32(addr, val);
2965#else
2966 stl_p((uint8_t *)(long)addr, val);
2967#endif
2968#ifdef USE_KQEMU
2969 if (cpu_single_env->kqemu_enabled &&
2970 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2971 kqemu_modify_page(cpu_single_env, ram_addr);
2972#endif
2973 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2974#ifdef VBOX
2975 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2976#endif
2977 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2978 /* we remove the notdirty callback only if the code has been
2979 flushed */
2980 if (dirty_flags == 0xff)
2981 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2982}
2983
2984static CPUReadMemoryFunc *error_mem_read[3] = {
2985 NULL, /* never used */
2986 NULL, /* never used */
2987 NULL, /* never used */
2988};
2989
2990static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2991 notdirty_mem_writeb,
2992 notdirty_mem_writew,
2993 notdirty_mem_writel,
2994};
2995
2996
2997/* Generate a debug exception if a watchpoint has been hit. */
2998static void check_watchpoint(int offset, int flags)
2999{
3000 CPUState *env = cpu_single_env;
3001 target_ulong vaddr;
3002 int i;
3003
3004 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3005 for (i = 0; i < env->nb_watchpoints; i++) {
3006 if (vaddr == env->watchpoint[i].vaddr
3007 && (env->watchpoint[i].type & flags)) {
3008 env->watchpoint_hit = i + 1;
3009 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3010 break;
3011 }
3012 }
3013}
3014
3015/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3016 so these check for a hit then pass through to the normal out-of-line
3017 phys routines. */
3018static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3019{
3020 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
3021 return ldub_phys(addr);
3022}
3023
3024static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3025{
3026 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
3027 return lduw_phys(addr);
3028}
3029
3030static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3031{
3032 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
3033 return ldl_phys(addr);
3034}
3035
3036static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3037 uint32_t val)
3038{
3039 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
3040 stb_phys(addr, val);
3041}
3042
3043static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3044 uint32_t val)
3045{
3046 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
3047 stw_phys(addr, val);
3048}
3049
3050static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3051 uint32_t val)
3052{
3053 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
3054 stl_phys(addr, val);
3055}
3056
3057static CPUReadMemoryFunc *watch_mem_read[3] = {
3058 watch_mem_readb,
3059 watch_mem_readw,
3060 watch_mem_readl,
3061};
3062
3063static CPUWriteMemoryFunc *watch_mem_write[3] = {
3064 watch_mem_writeb,
3065 watch_mem_writew,
3066 watch_mem_writel,
3067};
3068
3069static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
3070 unsigned int len)
3071{
3072 uint32_t ret;
3073 unsigned int idx;
3074
3075 idx = SUBPAGE_IDX(addr - mmio->base);
3076#if defined(DEBUG_SUBPAGE)
3077 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3078 mmio, len, addr, idx);
3079#endif
3080 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
3081
3082 return ret;
3083}
3084
3085static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3086 uint32_t value, unsigned int len)
3087{
3088 unsigned int idx;
3089
3090 idx = SUBPAGE_IDX(addr - mmio->base);
3091#if defined(DEBUG_SUBPAGE)
3092 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
3093 mmio, len, addr, idx, value);
3094#endif
3095 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
3096}
3097
3098static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3099{
3100#if defined(DEBUG_SUBPAGE)
3101 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3102#endif
3103
3104 return subpage_readlen(opaque, addr, 0);
3105}
3106
3107static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3108 uint32_t value)
3109{
3110#if defined(DEBUG_SUBPAGE)
3111 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3112#endif
3113 subpage_writelen(opaque, addr, value, 0);
3114}
3115
3116static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3117{
3118#if defined(DEBUG_SUBPAGE)
3119 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3120#endif
3121
3122 return subpage_readlen(opaque, addr, 1);
3123}
3124
3125static void subpage_writew (void *opaque, target_phys_addr_t addr,
3126 uint32_t value)
3127{
3128#if defined(DEBUG_SUBPAGE)
3129 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3130#endif
3131 subpage_writelen(opaque, addr, value, 1);
3132}
3133
3134static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3135{
3136#if defined(DEBUG_SUBPAGE)
3137 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3138#endif
3139
3140 return subpage_readlen(opaque, addr, 2);
3141}
3142
3143static void subpage_writel (void *opaque,
3144 target_phys_addr_t addr, uint32_t value)
3145{
3146#if defined(DEBUG_SUBPAGE)
3147 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3148#endif
3149 subpage_writelen(opaque, addr, value, 2);
3150}
3151
3152static CPUReadMemoryFunc *subpage_read[] = {
3153 &subpage_readb,
3154 &subpage_readw,
3155 &subpage_readl,
3156};
3157
3158static CPUWriteMemoryFunc *subpage_write[] = {
3159 &subpage_writeb,
3160 &subpage_writew,
3161 &subpage_writel,
3162};
3163
3164static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3165 ram_addr_t memory)
3166{
3167 int idx, eidx;
3168 unsigned int i;
3169
3170 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3171 return -1;
3172 idx = SUBPAGE_IDX(start);
3173 eidx = SUBPAGE_IDX(end);
3174#if defined(DEBUG_SUBPAGE)
3175 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
3176 mmio, start, end, idx, eidx, memory);
3177#endif
3178 memory >>= IO_MEM_SHIFT;
3179 for (; idx <= eidx; idx++) {
3180 for (i = 0; i < 4; i++) {
3181 if (io_mem_read[memory][i]) {
3182 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
3183 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
3184 }
3185 if (io_mem_write[memory][i]) {
3186 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
3187 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
3188 }
3189 }
3190 }
3191
3192 return 0;
3193}
3194
3195static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3196 ram_addr_t orig_memory)
3197{
3198 subpage_t *mmio;
3199 int subpage_memory;
3200
3201 mmio = qemu_mallocz(sizeof(subpage_t));
3202 if (mmio != NULL) {
3203 mmio->base = base;
3204 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
3205#if defined(DEBUG_SUBPAGE)
3206 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3207 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3208#endif
3209 *phys = subpage_memory | IO_MEM_SUBPAGE;
3210 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
3211 }
3212
3213 return mmio;
3214}
3215
3216static void io_mem_init(void)
3217{
3218 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
3219 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3220 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
3221 io_mem_nb = 5;
3222
3223 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
3224 watch_mem_write, NULL);
3225
3226#ifndef VBOX /* VBOX: we do this later when the RAM is allocated. */
3227 /* alloc dirty bits array */
3228 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3229 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
3230#endif /* !VBOX */
3231}
3232
3233/* mem_read and mem_write are arrays of functions containing the
3234 function to access byte (index 0), word (index 1) and dword (index
3235 2). Functions can be omitted with a NULL function pointer. The
3236 registered functions may be modified dynamically later.
3237 If io_index is non zero, the corresponding io zone is
3238 modified. If it is zero, a new io zone is allocated. The return
3239 value can be used with cpu_register_physical_memory(). (-1) is
3240 returned if error. */
3241int cpu_register_io_memory(int io_index,
3242 CPUReadMemoryFunc **mem_read,
3243 CPUWriteMemoryFunc **mem_write,
3244 void *opaque)
3245{
3246 int i, subwidth = 0;
3247
3248 if (io_index <= 0) {
3249 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
3250 return -1;
3251 io_index = io_mem_nb++;
3252 } else {
3253 if (io_index >= IO_MEM_NB_ENTRIES)
3254 return -1;
3255 }
3256
3257 for(i = 0;i < 3; i++) {
3258 if (!mem_read[i] || !mem_write[i])
3259 subwidth = IO_MEM_SUBWIDTH;
3260 io_mem_read[io_index][i] = mem_read[i];
3261 io_mem_write[io_index][i] = mem_write[i];
3262 }
3263 io_mem_opaque[io_index] = opaque;
3264 return (io_index << IO_MEM_SHIFT) | subwidth;
3265}
3266
3267CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
3268{
3269 return io_mem_write[io_index >> IO_MEM_SHIFT];
3270}
3271
3272CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
3273{
3274 return io_mem_read[io_index >> IO_MEM_SHIFT];
3275}
3276#endif /* !defined(CONFIG_USER_ONLY) */
3277
3278/* physical memory access (slow version, mainly for debug) */
3279#if defined(CONFIG_USER_ONLY)
3280void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3281 int len, int is_write)
3282{
3283 int l, flags;
3284 target_ulong page;
3285 void * p;
3286
3287 while (len > 0) {
3288 page = addr & TARGET_PAGE_MASK;
3289 l = (page + TARGET_PAGE_SIZE) - addr;
3290 if (l > len)
3291 l = len;
3292 flags = page_get_flags(page);
3293 if (!(flags & PAGE_VALID))
3294 return;
3295 if (is_write) {
3296 if (!(flags & PAGE_WRITE))
3297 return;
3298 /* XXX: this code should not depend on lock_user */
3299 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3300 /* FIXME - should this return an error rather than just fail? */
3301 return;
3302 memcpy(p, buf, len);
3303 unlock_user(p, addr, len);
3304 } else {
3305 if (!(flags & PAGE_READ))
3306 return;
3307 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3308 /* FIXME - should this return an error rather than just fail? */
3309 return;
3310 memcpy(buf, p, len);
3311 unlock_user(p, addr, 0);
3312 }
3313 len -= l;
3314 buf += l;
3315 addr += l;
3316 }
3317}
3318
3319#else
3320void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3321 int len, int is_write)
3322{
3323 int l, io_index;
3324 uint8_t *ptr;
3325 uint32_t val;
3326 target_phys_addr_t page;
3327 unsigned long pd;
3328 PhysPageDesc *p;
3329
3330 while (len > 0) {
3331 page = addr & TARGET_PAGE_MASK;
3332 l = (page + TARGET_PAGE_SIZE) - addr;
3333 if (l > len)
3334 l = len;
3335 p = phys_page_find(page >> TARGET_PAGE_BITS);
3336 if (!p) {
3337 pd = IO_MEM_UNASSIGNED;
3338 } else {
3339 pd = p->phys_offset;
3340 }
3341
3342 if (is_write) {
3343 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3344 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3345 /* XXX: could force cpu_single_env to NULL to avoid
3346 potential bugs */
3347 if (l >= 4 && ((addr & 3) == 0)) {
3348 /* 32 bit write access */
3349#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3350 val = ldl_p(buf);
3351#else
3352 val = *(const uint32_t *)buf;
3353#endif
3354 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3355 l = 4;
3356 } else if (l >= 2 && ((addr & 1) == 0)) {
3357 /* 16 bit write access */
3358#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3359 val = lduw_p(buf);
3360#else
3361 val = *(const uint16_t *)buf;
3362#endif
3363 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
3364 l = 2;
3365 } else {
3366 /* 8 bit write access */
3367#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3368 val = ldub_p(buf);
3369#else
3370 val = *(const uint8_t *)buf;
3371#endif
3372 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
3373 l = 1;
3374 }
3375 } else {
3376 unsigned long addr1;
3377 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3378 /* RAM case */
3379#ifdef VBOX
3380 remR3PhysWrite(addr1, buf, l); NOREF(ptr);
3381#else
3382 ptr = phys_ram_base + addr1;
3383 memcpy(ptr, buf, l);
3384#endif
3385 if (!cpu_physical_memory_is_dirty(addr1)) {
3386 /* invalidate code */
3387 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3388 /* set dirty bit */
3389#ifdef VBOX
3390 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3391#endif
3392 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3393 (0xff & ~CODE_DIRTY_FLAG);
3394 }
3395 }
3396 } else {
3397 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3398 !(pd & IO_MEM_ROMD)) {
3399 /* I/O case */
3400 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3401 if (l >= 4 && ((addr & 3) == 0)) {
3402 /* 32 bit read access */
3403 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3404#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3405 stl_p(buf, val);
3406#else
3407 *(uint32_t *)buf = val;
3408#endif
3409 l = 4;
3410 } else if (l >= 2 && ((addr & 1) == 0)) {
3411 /* 16 bit read access */
3412 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3413#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3414 stw_p(buf, val);
3415#else
3416 *(uint16_t *)buf = val;
3417#endif
3418 l = 2;
3419 } else {
3420 /* 8 bit read access */
3421 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
3422#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3423 stb_p(buf, val);
3424#else
3425 *(uint8_t *)buf = val;
3426#endif
3427 l = 1;
3428 }
3429 } else {
3430 /* RAM case */
3431#ifdef VBOX
3432 remR3PhysRead((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), buf, l); NOREF(ptr);
3433#else
3434 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3435 (addr & ~TARGET_PAGE_MASK);
3436 memcpy(buf, ptr, l);
3437#endif
3438 }
3439 }
3440 len -= l;
3441 buf += l;
3442 addr += l;
3443 }
3444}
3445
3446#ifndef VBOX
3447/* used for ROM loading : can write in RAM and ROM */
3448void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3449 const uint8_t *buf, int len)
3450{
3451 int l;
3452 uint8_t *ptr;
3453 target_phys_addr_t page;
3454 unsigned long pd;
3455 PhysPageDesc *p;
3456
3457 while (len > 0) {
3458 page = addr & TARGET_PAGE_MASK;
3459 l = (page + TARGET_PAGE_SIZE) - addr;
3460 if (l > len)
3461 l = len;
3462 p = phys_page_find(page >> TARGET_PAGE_BITS);
3463 if (!p) {
3464 pd = IO_MEM_UNASSIGNED;
3465 } else {
3466 pd = p->phys_offset;
3467 }
3468
3469 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3470 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3471 !(pd & IO_MEM_ROMD)) {
3472 /* do nothing */
3473 } else {
3474 unsigned long addr1;
3475 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3476 /* ROM/RAM case */
3477 ptr = phys_ram_base + addr1;
3478 memcpy(ptr, buf, l);
3479 }
3480 len -= l;
3481 buf += l;
3482 addr += l;
3483 }
3484}
3485#endif /* !VBOX */
3486
3487
3488/* warning: addr must be aligned */
3489uint32_t ldl_phys(target_phys_addr_t addr)
3490{
3491 int io_index;
3492 uint8_t *ptr;
3493 uint32_t val;
3494 unsigned long pd;
3495 PhysPageDesc *p;
3496
3497 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3498 if (!p) {
3499 pd = IO_MEM_UNASSIGNED;
3500 } else {
3501 pd = p->phys_offset;
3502 }
3503
3504 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3505 !(pd & IO_MEM_ROMD)) {
3506 /* I/O case */
3507 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3508 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3509 } else {
3510 /* RAM case */
3511#ifndef VBOX
3512 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3513 (addr & ~TARGET_PAGE_MASK);
3514 val = ldl_p(ptr);
3515#else
3516 val = remR3PhysReadU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
3517#endif
3518 }
3519 return val;
3520}
3521
3522/* warning: addr must be aligned */
3523uint64_t ldq_phys(target_phys_addr_t addr)
3524{
3525 int io_index;
3526 uint8_t *ptr;
3527 uint64_t val;
3528 unsigned long pd;
3529 PhysPageDesc *p;
3530
3531 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3532 if (!p) {
3533 pd = IO_MEM_UNASSIGNED;
3534 } else {
3535 pd = p->phys_offset;
3536 }
3537
3538 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3539 !(pd & IO_MEM_ROMD)) {
3540 /* I/O case */
3541 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3542#ifdef TARGET_WORDS_BIGENDIAN
3543 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3544 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3545#else
3546 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3547 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3548#endif
3549 } else {
3550 /* RAM case */
3551#ifndef VBOX
3552 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3553 (addr & ~TARGET_PAGE_MASK);
3554 val = ldq_p(ptr);
3555#else
3556 val = remR3PhysReadU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
3557#endif
3558 }
3559 return val;
3560}
3561
3562/* XXX: optimize */
3563uint32_t ldub_phys(target_phys_addr_t addr)
3564{
3565 uint8_t val;
3566 cpu_physical_memory_read(addr, &val, 1);
3567 return val;
3568}
3569
3570/* XXX: optimize */
3571uint32_t lduw_phys(target_phys_addr_t addr)
3572{
3573 uint16_t val;
3574 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3575 return tswap16(val);
3576}
3577
3578/* warning: addr must be aligned. The ram page is not masked as dirty
3579 and the code inside is not invalidated. It is useful if the dirty
3580 bits are used to track modified PTEs */
3581void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3582{
3583 int io_index;
3584 uint8_t *ptr;
3585 unsigned long pd;
3586 PhysPageDesc *p;
3587
3588 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3589 if (!p) {
3590 pd = IO_MEM_UNASSIGNED;
3591 } else {
3592 pd = p->phys_offset;
3593 }
3594
3595 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3596 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3597 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3598 } else {
3599#ifndef VBOX
3600 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3601 (addr & ~TARGET_PAGE_MASK);
3602 stl_p(ptr, val);
3603#else
3604 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3605#endif
3606#ifndef VBOX
3607 if (unlikely(in_migration)) {
3608 if (!cpu_physical_memory_is_dirty(addr1)) {
3609 /* invalidate code */
3610 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3611 /* set dirty bit */
3612 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3613 (0xff & ~CODE_DIRTY_FLAG);
3614 }
3615 }
3616#endif
3617 }
3618}
3619
3620void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3621{
3622 int io_index;
3623 uint8_t *ptr;
3624 unsigned long pd;
3625 PhysPageDesc *p;
3626
3627 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3628 if (!p) {
3629 pd = IO_MEM_UNASSIGNED;
3630 } else {
3631 pd = p->phys_offset;
3632 }
3633
3634 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3635 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3636#ifdef TARGET_WORDS_BIGENDIAN
3637 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3638 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3639#else
3640 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3641 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3642#endif
3643 } else {
3644#ifndef VBOX
3645 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3646 (addr & ~TARGET_PAGE_MASK);
3647 stq_p(ptr, val);
3648#else
3649 remR3PhysWriteU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3650#endif
3651 }
3652}
3653
3654
3655/* warning: addr must be aligned */
3656void stl_phys(target_phys_addr_t addr, uint32_t val)
3657{
3658 int io_index;
3659 uint8_t *ptr;
3660 unsigned long pd;
3661 PhysPageDesc *p;
3662
3663 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3664 if (!p) {
3665 pd = IO_MEM_UNASSIGNED;
3666 } else {
3667 pd = p->phys_offset;
3668 }
3669
3670 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3671 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3672 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3673 } else {
3674 unsigned long addr1;
3675 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3676 /* RAM case */
3677#ifndef VBOX
3678 ptr = phys_ram_base + addr1;
3679 stl_p(ptr, val);
3680#else
3681 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3682#endif
3683 if (!cpu_physical_memory_is_dirty(addr1)) {
3684 /* invalidate code */
3685 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3686 /* set dirty bit */
3687#ifdef VBOX
3688 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3689#endif
3690 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3691 (0xff & ~CODE_DIRTY_FLAG);
3692 }
3693 }
3694}
3695
3696/* XXX: optimize */
3697void stb_phys(target_phys_addr_t addr, uint32_t val)
3698{
3699 uint8_t v = val;
3700 cpu_physical_memory_write(addr, &v, 1);
3701}
3702
3703/* XXX: optimize */
3704void stw_phys(target_phys_addr_t addr, uint32_t val)
3705{
3706 uint16_t v = tswap16(val);
3707 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3708}
3709
3710/* XXX: optimize */
3711void stq_phys(target_phys_addr_t addr, uint64_t val)
3712{
3713 val = tswap64(val);
3714 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3715}
3716
3717#endif
3718
3719/* virtual memory access for debug */
3720int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3721 uint8_t *buf, int len, int is_write)
3722{
3723 int l;
3724 target_ulong page, phys_addr;
3725
3726 while (len > 0) {
3727 page = addr & TARGET_PAGE_MASK;
3728 phys_addr = cpu_get_phys_page_debug(env, page);
3729 /* if no physical page mapped, return an error */
3730 if (phys_addr == -1)
3731 return -1;
3732 l = (page + TARGET_PAGE_SIZE) - addr;
3733 if (l > len)
3734 l = len;
3735 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3736 buf, l, is_write);
3737 len -= l;
3738 buf += l;
3739 addr += l;
3740 }
3741 return 0;
3742}
3743
3744/* in deterministic execution mode, instructions doing device I/Os
3745 must be at the end of the TB */
3746void cpu_io_recompile(CPUState *env, void *retaddr)
3747{
3748 TranslationBlock *tb;
3749 uint32_t n, cflags;
3750 target_ulong pc, cs_base;
3751 uint64_t flags;
3752
3753 tb = tb_find_pc((unsigned long)retaddr);
3754 if (!tb) {
3755 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3756 retaddr);
3757 }
3758 n = env->icount_decr.u16.low + tb->icount;
3759 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3760 /* Calculate how many instructions had been executed before the fault
3761 occurred. */
3762 n = n - env->icount_decr.u16.low;
3763 /* Generate a new TB ending on the I/O insn. */
3764 n++;
3765 /* On MIPS and SH, delay slot instructions can only be restarted if
3766 they were already the first instruction in the TB. If this is not
3767 the first instruction in a TB then re-execute the preceding
3768 branch. */
3769#if defined(TARGET_MIPS)
3770 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3771 env->active_tc.PC -= 4;
3772 env->icount_decr.u16.low++;
3773 env->hflags &= ~MIPS_HFLAG_BMASK;
3774 }
3775#elif defined(TARGET_SH4)
3776 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3777 && n > 1) {
3778 env->pc -= 2;
3779 env->icount_decr.u16.low++;
3780 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3781 }
3782#endif
3783 /* This should never happen. */
3784 if (n > CF_COUNT_MASK)
3785 cpu_abort(env, "TB too big during recompile");
3786
3787 cflags = n | CF_LAST_IO;
3788 pc = tb->pc;
3789 cs_base = tb->cs_base;
3790 flags = tb->flags;
3791 tb_phys_invalidate(tb, -1);
3792 /* FIXME: In theory this could raise an exception. In practice
3793 we have already translated the block once so it's probably ok. */
3794 tb_gen_code(env, pc, cs_base, flags, cflags);
3795 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3796 the first in the TB) then we end up generating a whole new TB and
3797 repeating the fault, which is horribly inefficient.
3798 Better would be to execute just this insn uncached, or generate a
3799 second new TB. */
3800 cpu_resume_from_signal(env, NULL);
3801}
3802
3803#ifndef VBOX
3804void dump_exec_info(FILE *f,
3805 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3806{
3807 int i, target_code_size, max_target_code_size;
3808 int direct_jmp_count, direct_jmp2_count, cross_page;
3809 TranslationBlock *tb;
3810
3811 target_code_size = 0;
3812 max_target_code_size = 0;
3813 cross_page = 0;
3814 direct_jmp_count = 0;
3815 direct_jmp2_count = 0;
3816 for(i = 0; i < nb_tbs; i++) {
3817 tb = &tbs[i];
3818 target_code_size += tb->size;
3819 if (tb->size > max_target_code_size)
3820 max_target_code_size = tb->size;
3821 if (tb->page_addr[1] != -1)
3822 cross_page++;
3823 if (tb->tb_next_offset[0] != 0xffff) {
3824 direct_jmp_count++;
3825 if (tb->tb_next_offset[1] != 0xffff) {
3826 direct_jmp2_count++;
3827 }
3828 }
3829 }
3830 /* XXX: avoid using doubles ? */
3831 cpu_fprintf(f, "Translation buffer state:\n");
3832 cpu_fprintf(f, "gen code size %ld/%ld\n",
3833 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3834 cpu_fprintf(f, "TB count %d/%d\n",
3835 nb_tbs, code_gen_max_blocks);
3836 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3837 nb_tbs ? target_code_size / nb_tbs : 0,
3838 max_target_code_size);
3839 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3840 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3841 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3842 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3843 cross_page,
3844 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3845 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3846 direct_jmp_count,
3847 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3848 direct_jmp2_count,
3849 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3850 cpu_fprintf(f, "\nStatistics:\n");
3851 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3852 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3853 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3854 tcg_dump_info(f, cpu_fprintf);
3855}
3856#endif /* !VBOX */
3857
3858#if !defined(CONFIG_USER_ONLY)
3859
3860#define MMUSUFFIX _cmmu
3861#define GETPC() NULL
3862#define env cpu_single_env
3863#define SOFTMMU_CODE_ACCESS
3864
3865#define SHIFT 0
3866#include "softmmu_template.h"
3867
3868#define SHIFT 1
3869#include "softmmu_template.h"
3870
3871#define SHIFT 2
3872#include "softmmu_template.h"
3873
3874#define SHIFT 3
3875#include "softmmu_template.h"
3876
3877#undef env
3878
3879#endif
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette