VirtualBox

source: vbox/trunk/src/recompiler_new/exec.c@ 13462

最後變更 在這個檔案從13462是 13440,由 vboxsync 提交於 16 年 前

further MSVC stuff, almost there

  • 屬性 svn:eol-style 設為 native
檔案大小: 93.2 KB
 
1/*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#include "config.h"
30#ifndef VBOX
31#ifdef _WIN32
32#include <windows.h>
33#else
34#include <sys/types.h>
35#include <sys/mman.h>
36#endif
37#include <stdlib.h>
38#include <stdio.h>
39#include <stdarg.h>
40#include <string.h>
41#include <errno.h>
42#include <unistd.h>
43#include <inttypes.h>
44#else /* VBOX */
45# include <stdlib.h>
46# include <stdio.h>
47# include <iprt/alloc.h>
48# include <iprt/string.h>
49# include <iprt/param.h>
50# include <VBox/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
51#endif /* VBOX */
52
53#include "cpu.h"
54#include "exec-all.h"
55#if defined(CONFIG_USER_ONLY)
56#include <qemu.h>
57#endif
58
59//#define DEBUG_TB_INVALIDATE
60//#define DEBUG_FLUSH
61//#define DEBUG_TLB
62//#define DEBUG_UNASSIGNED
63
64/* make various TB consistency checks */
65//#define DEBUG_TB_CHECK
66//#define DEBUG_TLB_CHECK
67
68#if !defined(CONFIG_USER_ONLY)
69/* TB consistency checks only implemented for usermode emulation. */
70#undef DEBUG_TB_CHECK
71#endif
72
73#define SMC_BITMAP_USE_THRESHOLD 10
74
75#define MMAP_AREA_START 0x00000000
76#define MMAP_AREA_END 0xa8000000
77
78#if defined(TARGET_SPARC64)
79#define TARGET_PHYS_ADDR_SPACE_BITS 41
80#elif defined(TARGET_SPARC)
81#define TARGET_PHYS_ADDR_SPACE_BITS 36
82#elif defined(TARGET_ALPHA)
83#define TARGET_PHYS_ADDR_SPACE_BITS 42
84#define TARGET_VIRT_ADDR_SPACE_BITS 42
85#elif defined(TARGET_PPC64)
86#define TARGET_PHYS_ADDR_SPACE_BITS 42
87#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
88#define TARGET_PHYS_ADDR_SPACE_BITS 42
89#elif defined(TARGET_I386) && !defined(USE_KQEMU)
90#define TARGET_PHYS_ADDR_SPACE_BITS 36
91#else
92/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
93#define TARGET_PHYS_ADDR_SPACE_BITS 32
94#endif
95
96static TranslationBlock *tbs;
97int code_gen_max_blocks;
98TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
99static int nb_tbs;
100/* any access to the tbs or the page table must use this lock */
101spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
102
103#ifndef VBOX
104#if defined(__arm__) || defined(__sparc_v9__)
105/* The prologue must be reachable with a direct jump. ARM and Sparc64
106 have limited branch ranges (possibly also PPC) so place it in a
107 section close to code segment. */
108#define code_gen_section \
109 __attribute__((__section__(".gen_code"))) \
110 __attribute__((aligned (32)))
111#else
112#define code_gen_section \
113 __attribute__((aligned (32)))
114#endif
115uint8_t code_gen_prologue[1024] code_gen_section;
116
117#else /* VBOX */
118ALIGNED_MEMBER(uint8_t, code_gen_prologue[1024], 32);
119#endif /* VBOX */
120
121static uint8_t *code_gen_buffer;
122static unsigned long code_gen_buffer_size;
123/* threshold to flush the translated code buffer */
124static unsigned long code_gen_buffer_max_size;
125uint8_t *code_gen_ptr;
126
127#ifndef VBOX
128#if !defined(CONFIG_USER_ONLY)
129ram_addr_t phys_ram_size;
130int phys_ram_fd;
131uint8_t *phys_ram_base;
132uint8_t *phys_ram_dirty;
133static int in_migration;
134static ram_addr_t phys_ram_alloc_offset = 0;
135#endif
136#else /* VBOX */
137RTGCPHYS phys_ram_size;
138/* we have memory ranges (the high PC-BIOS mapping) which
139 causes some pages to fall outside the dirty map here. */
140uint32_t phys_ram_dirty_size;
141#endif /* VBOX */
142#if !defined(VBOX)
143uint8_t *phys_ram_base;
144#endif
145uint8_t *phys_ram_dirty;
146
147CPUState *first_cpu;
148/* current CPU in the current thread. It is only valid inside
149 cpu_exec() */
150CPUState *cpu_single_env;
151/* 0 = Do not count executed instructions.
152 1 = Precise instruction counting.
153 2 = Adaptive rate instruction counting. */
154int use_icount = 0;
155/* Current instruction counter. While executing translated code this may
156 include some instructions that have not yet been executed. */
157int64_t qemu_icount;
158
159typedef struct PageDesc {
160 /* list of TBs intersecting this ram page */
161 TranslationBlock *first_tb;
162 /* in order to optimize self modifying code, we count the number
163 of lookups we do to a given page to use a bitmap */
164 unsigned int code_write_count;
165 uint8_t *code_bitmap;
166#if defined(CONFIG_USER_ONLY)
167 unsigned long flags;
168#endif
169} PageDesc;
170
171typedef struct PhysPageDesc {
172 /* offset in host memory of the page + io_index in the low 12 bits */
173 ram_addr_t phys_offset;
174} PhysPageDesc;
175
176#define L2_BITS 10
177#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
178/* XXX: this is a temporary hack for alpha target.
179 * In the future, this is to be replaced by a multi-level table
180 * to actually be able to handle the complete 64 bits address space.
181 */
182#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
183#else
184#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
185#endif
186
187#define L1_SIZE (1 << L1_BITS)
188#define L2_SIZE (1 << L2_BITS)
189
190static void io_mem_init(void);
191
192unsigned long qemu_real_host_page_size;
193unsigned long qemu_host_page_bits;
194unsigned long qemu_host_page_size;
195unsigned long qemu_host_page_mask;
196
197/* XXX: for system emulation, it could just be an array */
198static PageDesc *l1_map[L1_SIZE];
199static PhysPageDesc **l1_phys_map;
200
201#if !defined(CONFIG_USER_ONLY)
202static void io_mem_init(void);
203
204/* io memory support */
205CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
206CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
207void *io_mem_opaque[IO_MEM_NB_ENTRIES];
208static int io_mem_nb;
209static int io_mem_watch;
210#endif
211
212#ifndef VBOX
213/* log support */
214static const char *logfilename = "/tmp/qemu.log";
215#endif /* !VBOX */
216FILE *logfile;
217int loglevel;
218#ifndef VBOX
219static int log_append = 0;
220#endif
221
222/* statistics */
223static int tlb_flush_count;
224static int tb_flush_count;
225#ifndef VBOX
226static int tb_phys_invalidate_count;
227#endif /* !VBOX */
228
229#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
230typedef struct subpage_t {
231 target_phys_addr_t base;
232 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
233 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
234 void *opaque[TARGET_PAGE_SIZE][2][4];
235} subpage_t;
236
237
238#ifndef VBOX
239#ifdef _WIN32
240static void map_exec(void *addr, long size)
241{
242 DWORD old_protect;
243 VirtualProtect(addr, size,
244 PAGE_EXECUTE_READWRITE, &old_protect);
245
246}
247#else
248static void map_exec(void *addr, long size)
249{
250 unsigned long start, end, page_size;
251
252 page_size = getpagesize();
253 start = (unsigned long)addr;
254 start &= ~(page_size - 1);
255
256 end = (unsigned long)addr + size;
257 end += page_size - 1;
258 end &= ~(page_size - 1);
259
260 mprotect((void *)start, end - start,
261 PROT_READ | PROT_WRITE | PROT_EXEC);
262}
263#endif
264#else // VBOX
265static void map_exec(void *addr, long size)
266{
267 RTMemProtect(addr, size,
268 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
269}
270#endif
271
272static void page_init(void)
273{
274 /* NOTE: we can always suppose that qemu_host_page_size >=
275 TARGET_PAGE_SIZE */
276#ifdef VBOX
277 RTMemProtect(code_gen_buffer, sizeof(code_gen_buffer),
278 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
279 qemu_real_host_page_size = PAGE_SIZE;
280#else /* !VBOX */
281#ifdef _WIN32
282 {
283 SYSTEM_INFO system_info;
284 DWORD old_protect;
285
286 GetSystemInfo(&system_info);
287 qemu_real_host_page_size = system_info.dwPageSize;
288 }
289#else
290 qemu_real_host_page_size = getpagesize();
291#endif
292#endif /* !VBOX */
293
294 if (qemu_host_page_size == 0)
295 qemu_host_page_size = qemu_real_host_page_size;
296 if (qemu_host_page_size < TARGET_PAGE_SIZE)
297 qemu_host_page_size = TARGET_PAGE_SIZE;
298 qemu_host_page_bits = 0;
299#ifndef VBOX
300 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
301#else
302 while ((1 << qemu_host_page_bits) < (int)qemu_host_page_size)
303#endif
304 qemu_host_page_bits++;
305 qemu_host_page_mask = ~(qemu_host_page_size - 1);
306 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
307 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
308#ifdef VBOX
309 /* We use other means to set reserved bit on our pages */
310#else
311#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
312 {
313 long long startaddr, endaddr;
314 FILE *f;
315 int n;
316
317 mmap_lock();
318 last_brk = (unsigned long)sbrk(0);
319 f = fopen("/proc/self/maps", "r");
320 if (f) {
321 do {
322 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
323 if (n == 2) {
324 startaddr = MIN(startaddr,
325 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
326 endaddr = MIN(endaddr,
327 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
328 page_set_flags(startaddr & TARGET_PAGE_MASK,
329 TARGET_PAGE_ALIGN(endaddr),
330 PAGE_RESERVED);
331 }
332 } while (!feof(f));
333 fclose(f);
334 }
335 mmap_unlock();
336 }
337#endif
338#endif
339}
340
341#ifndef VBOX
342static inline PageDesc **page_l1_map(target_ulong index)
343#else
344DECLINLINE(PageDesc **) page_l1_map(target_ulong index)
345#endif
346{
347#if TARGET_LONG_BITS > 32
348 /* Host memory outside guest VM. For 32-bit targets we have already
349 excluded high addresses. */
350 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
351 return NULL;
352#endif
353 return &l1_map[index >> L2_BITS];
354}
355
356#ifndef VBOX
357static inline PageDesc *page_find_alloc(target_ulong index)
358#else
359DECLINLINE(PageDesc *) page_find_alloc(target_ulong index)
360#endif
361{
362 PageDesc **lp, *p;
363 lp = page_l1_map(index);
364 if (!lp)
365 return NULL;
366
367 p = *lp;
368 if (!p) {
369 /* allocate if not found */
370#if defined(CONFIG_USER_ONLY)
371 unsigned long addr;
372 size_t len = sizeof(PageDesc) * L2_SIZE;
373 /* Don't use qemu_malloc because it may recurse. */
374 p = mmap(0, len, PROT_READ | PROT_WRITE,
375 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
376 *lp = p;
377 addr = h2g(p);
378 if (addr == (target_ulong)addr) {
379 page_set_flags(addr & TARGET_PAGE_MASK,
380 TARGET_PAGE_ALIGN(addr + len),
381 PAGE_RESERVED);
382 }
383#else
384 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
385 *lp = p;
386#endif
387 }
388 return p + (index & (L2_SIZE - 1));
389}
390
391#ifndef VBOX
392static inline PageDesc *page_find(target_ulong index)
393#else
394DECLINLINE(PageDesc *) page_find(target_ulong index)
395#endif
396{
397 PageDesc **lp, *p;
398 lp = page_l1_map(index);
399 if (!lp)
400 return NULL;
401
402 p = *lp;
403 if (!p)
404 return 0;
405 return p + (index & (L2_SIZE - 1));
406}
407
408static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
409{
410 void **lp, **p;
411 PhysPageDesc *pd;
412
413 p = (void **)l1_phys_map;
414#if TARGET_PHYS_ADDR_SPACE_BITS > 32
415
416#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
417#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
418#endif
419 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
420 p = *lp;
421 if (!p) {
422 /* allocate if not found */
423 if (!alloc)
424 return NULL;
425 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
426 memset(p, 0, sizeof(void *) * L1_SIZE);
427 *lp = p;
428 }
429#endif
430 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
431 pd = *lp;
432 if (!pd) {
433 int i;
434 /* allocate if not found */
435 if (!alloc)
436 return NULL;
437 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
438 *lp = pd;
439 for (i = 0; i < L2_SIZE; i++)
440 pd[i].phys_offset = IO_MEM_UNASSIGNED;
441 }
442#if defined(VBOX) && !defined(VBOX_WITH_NEW_PHYS_CODE)
443 pd = ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
444 if (RT_UNLIKELY((pd->phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING))
445 remR3GrowDynRange(pd->phys_offset & TARGET_PAGE_MASK);
446 return pd;
447#else
448 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
449#endif
450}
451
452#ifndef VBOX
453static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
454#else
455DECLINLINE(PhysPageDesc *) phys_page_find(target_phys_addr_t index)
456#endif
457{
458 return phys_page_find_alloc(index, 0);
459}
460
461#if !defined(CONFIG_USER_ONLY)
462static void tlb_protect_code(ram_addr_t ram_addr);
463static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
464 target_ulong vaddr);
465#define mmap_lock() do { } while(0)
466#define mmap_unlock() do { } while(0)
467#endif
468
469#ifdef VBOX
470/** @todo nike: isn't 32M too much ? */
471#endif
472#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
473
474#if defined(CONFIG_USER_ONLY)
475/* Currently it is not recommanded to allocate big chunks of data in
476 user mode. It will change when a dedicated libc will be used */
477#define USE_STATIC_CODE_GEN_BUFFER
478#endif
479
480/* VBox allocates codegen buffer dynamically */
481#ifndef VBOX
482#ifdef USE_STATIC_CODE_GEN_BUFFER
483static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
484#endif
485#endif
486
487static void code_gen_alloc(unsigned long tb_size)
488{
489#ifdef USE_STATIC_CODE_GEN_BUFFER
490 code_gen_buffer = static_code_gen_buffer;
491 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
492 map_exec(code_gen_buffer, code_gen_buffer_size);
493#else
494 code_gen_buffer_size = tb_size;
495 if (code_gen_buffer_size == 0) {
496#if defined(CONFIG_USER_ONLY)
497 /* in user mode, phys_ram_size is not meaningful */
498 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
499#else
500 /* XXX: needs ajustments */
501 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
502#endif
503 }
504 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
505 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
506 /* The code gen buffer location may have constraints depending on
507 the host cpu and OS */
508#ifdef VBOX
509 code_gen_buffer = RTMemExecAlloc(code_gen_buffer_size);
510
511 if (!code_gen_buffer) {
512 LogRel(("REM: failed allocate codegen buffer %lld\n",
513 code_gen_buffer_size));
514 return;
515 }
516#else //!VBOX
517#if defined(__linux__)
518 {
519 int flags;
520 void *start = NULL;
521
522 flags = MAP_PRIVATE | MAP_ANONYMOUS;
523#if defined(__x86_64__)
524 flags |= MAP_32BIT;
525 /* Cannot map more than that */
526 if (code_gen_buffer_size > (800 * 1024 * 1024))
527 code_gen_buffer_size = (800 * 1024 * 1024);
528#elif defined(__sparc_v9__)
529 // Map the buffer below 2G, so we can use direct calls and branches
530 flags |= MAP_FIXED;
531 start = (void *) 0x60000000UL;
532 if (code_gen_buffer_size > (512 * 1024 * 1024))
533 code_gen_buffer_size = (512 * 1024 * 1024);
534#endif
535 code_gen_buffer = mmap(start, code_gen_buffer_size,
536 PROT_WRITE | PROT_READ | PROT_EXEC,
537 flags, -1, 0);
538 if (code_gen_buffer == MAP_FAILED) {
539 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
540 exit(1);
541 }
542 }
543#elif defined(__FreeBSD__)
544 {
545 int flags;
546 void *addr = NULL;
547 flags = MAP_PRIVATE | MAP_ANONYMOUS;
548#if defined(__x86_64__)
549 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
550 * 0x40000000 is free */
551 flags |= MAP_FIXED;
552 addr = (void *)0x40000000;
553 /* Cannot map more than that */
554 if (code_gen_buffer_size > (800 * 1024 * 1024))
555 code_gen_buffer_size = (800 * 1024 * 1024);
556#endif
557 code_gen_buffer = mmap(addr, code_gen_buffer_size,
558 PROT_WRITE | PROT_READ | PROT_EXEC,
559 flags, -1, 0);
560 if (code_gen_buffer == MAP_FAILED) {
561 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
562 exit(1);
563 }
564 }
565#else
566 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
567 if (!code_gen_buffer) {
568 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
569 exit(1);
570 }
571 map_exec(code_gen_buffer, code_gen_buffer_size);
572#endif
573#endif // VBOX
574#endif /* !USE_STATIC_CODE_GEN_BUFFER */
575 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
576 code_gen_buffer_max_size = code_gen_buffer_size -
577 code_gen_max_block_size();
578 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
579 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
580}
581
582/* Must be called before using the QEMU cpus. 'tb_size' is the size
583 (in bytes) allocated to the translation buffer. Zero means default
584 size. */
585void cpu_exec_init_all(unsigned long tb_size)
586{
587 cpu_gen_init();
588 code_gen_alloc(tb_size);
589 code_gen_ptr = code_gen_buffer;
590 page_init();
591#if !defined(CONFIG_USER_ONLY)
592 io_mem_init();
593#endif
594}
595
596#ifndef VBOX
597#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
598
599#define CPU_COMMON_SAVE_VERSION 1
600
601static void cpu_common_save(QEMUFile *f, void *opaque)
602{
603 CPUState *env = opaque;
604
605 qemu_put_be32s(f, &env->halted);
606 qemu_put_be32s(f, &env->interrupt_request);
607}
608
609static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
610{
611 CPUState *env = opaque;
612
613 if (version_id != CPU_COMMON_SAVE_VERSION)
614 return -EINVAL;
615
616 qemu_get_be32s(f, &env->halted);
617 qemu_get_be32s(f, &env->interrupt_request);
618 tlb_flush(env, 1);
619
620 return 0;
621}
622#endif
623#endif //!VBOX
624
625void cpu_exec_init(CPUState *env)
626{
627 CPUState **penv;
628 int cpu_index;
629
630 env->next_cpu = NULL;
631 penv = &first_cpu;
632 cpu_index = 0;
633 while (*penv != NULL) {
634 penv = (CPUState **)&(*penv)->next_cpu;
635 cpu_index++;
636 }
637 env->cpu_index = cpu_index;
638 env->nb_watchpoints = 0;
639 *penv = env;
640#ifndef VBOX
641#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
642 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
643 cpu_common_save, cpu_common_load, env);
644 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
645 cpu_save, cpu_load, env);
646#endif
647#endif // !VBOX
648}
649
650#ifndef VBOX
651static inline void invalidate_page_bitmap(PageDesc *p)
652#else
653DECLINLINE(void) invalidate_page_bitmap(PageDesc *p)
654#endif
655{
656 if (p->code_bitmap) {
657 qemu_free(p->code_bitmap);
658 p->code_bitmap = NULL;
659 }
660 p->code_write_count = 0;
661}
662
663/* set to NULL all the 'first_tb' fields in all PageDescs */
664static void page_flush_tb(void)
665{
666 int i, j;
667 PageDesc *p;
668
669 for(i = 0; i < L1_SIZE; i++) {
670 p = l1_map[i];
671 if (p) {
672 for(j = 0; j < L2_SIZE; j++) {
673 p->first_tb = NULL;
674 invalidate_page_bitmap(p);
675 p++;
676 }
677 }
678 }
679}
680
681/* flush all the translation blocks */
682/* XXX: tb_flush is currently not thread safe */
683void tb_flush(CPUState *env1)
684{
685 CPUState *env;
686#if defined(DEBUG_FLUSH)
687 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
688 (unsigned long)(code_gen_ptr - code_gen_buffer),
689 nb_tbs, nb_tbs > 0 ?
690 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
691#endif
692 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
693 cpu_abort(env1, "Internal error: code buffer overflow\n");
694
695 nb_tbs = 0;
696
697 for(env = first_cpu; env != NULL; env = env->next_cpu) {
698 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
699 }
700
701 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
702 page_flush_tb();
703
704 code_gen_ptr = code_gen_buffer;
705 /* XXX: flush processor icache at this point if cache flush is
706 expensive */
707 tb_flush_count++;
708}
709
710#ifdef DEBUG_TB_CHECK
711static void tb_invalidate_check(target_ulong address)
712{
713 TranslationBlock *tb;
714 int i;
715 address &= TARGET_PAGE_MASK;
716 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
717 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
718 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
719 address >= tb->pc + tb->size)) {
720 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
721 address, (long)tb->pc, tb->size);
722 }
723 }
724 }
725}
726
727/* verify that all the pages have correct rights for code */
728static void tb_page_check(void)
729{
730 TranslationBlock *tb;
731 int i, flags1, flags2;
732
733 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
734 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
735 flags1 = page_get_flags(tb->pc);
736 flags2 = page_get_flags(tb->pc + tb->size - 1);
737 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
738 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
739 (long)tb->pc, tb->size, flags1, flags2);
740 }
741 }
742 }
743}
744
745static void tb_jmp_check(TranslationBlock *tb)
746{
747 TranslationBlock *tb1;
748 unsigned int n1;
749
750 /* suppress any remaining jumps to this TB */
751 tb1 = tb->jmp_first;
752 for(;;) {
753 n1 = (long)tb1 & 3;
754 tb1 = (TranslationBlock *)((long)tb1 & ~3);
755 if (n1 == 2)
756 break;
757 tb1 = tb1->jmp_next[n1];
758 }
759 /* check end of list */
760 if (tb1 != tb) {
761 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
762 }
763}
764#endif // DEBUG_TB_CHECK
765
766/* invalidate one TB */
767#ifndef VBOX
768static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
769 int next_offset)
770#else
771DECLINLINE(void) tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
772 int next_offset)
773#endif
774{
775 TranslationBlock *tb1;
776 for(;;) {
777 tb1 = *ptb;
778 if (tb1 == tb) {
779 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
780 break;
781 }
782 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
783 }
784}
785
786#ifndef VBOX
787static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
788#else
789DECLINLINE(void) tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
790#endif
791{
792 TranslationBlock *tb1;
793 unsigned int n1;
794
795 for(;;) {
796 tb1 = *ptb;
797 n1 = (long)tb1 & 3;
798 tb1 = (TranslationBlock *)((long)tb1 & ~3);
799 if (tb1 == tb) {
800 *ptb = tb1->page_next[n1];
801 break;
802 }
803 ptb = &tb1->page_next[n1];
804 }
805}
806
807#ifndef VBOX
808static inline void tb_jmp_remove(TranslationBlock *tb, int n)
809#else
810DECLINLINE(void) tb_jmp_remove(TranslationBlock *tb, int n)
811#endif
812{
813 TranslationBlock *tb1, **ptb;
814 unsigned int n1;
815
816 ptb = &tb->jmp_next[n];
817 tb1 = *ptb;
818 if (tb1) {
819 /* find tb(n) in circular list */
820 for(;;) {
821 tb1 = *ptb;
822 n1 = (long)tb1 & 3;
823 tb1 = (TranslationBlock *)((long)tb1 & ~3);
824 if (n1 == n && tb1 == tb)
825 break;
826 if (n1 == 2) {
827 ptb = &tb1->jmp_first;
828 } else {
829 ptb = &tb1->jmp_next[n1];
830 }
831 }
832 /* now we can suppress tb(n) from the list */
833 *ptb = tb->jmp_next[n];
834
835 tb->jmp_next[n] = NULL;
836 }
837}
838
839/* reset the jump entry 'n' of a TB so that it is not chained to
840 another TB */
841#ifndef VBOX
842static inline void tb_reset_jump(TranslationBlock *tb, int n)
843#else
844DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
845#endif
846{
847 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
848}
849
850void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
851{
852 CPUState *env;
853 PageDesc *p;
854 unsigned int h, n1;
855 target_phys_addr_t phys_pc;
856 TranslationBlock *tb1, *tb2;
857
858 /* remove the TB from the hash list */
859 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
860 h = tb_phys_hash_func(phys_pc);
861 tb_remove(&tb_phys_hash[h], tb,
862 offsetof(TranslationBlock, phys_hash_next));
863
864 /* remove the TB from the page list */
865 if (tb->page_addr[0] != page_addr) {
866 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
867 tb_page_remove(&p->first_tb, tb);
868 invalidate_page_bitmap(p);
869 }
870 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
871 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
872 tb_page_remove(&p->first_tb, tb);
873 invalidate_page_bitmap(p);
874 }
875
876 tb_invalidated_flag = 1;
877
878 /* remove the TB from the hash list */
879 h = tb_jmp_cache_hash_func(tb->pc);
880 for(env = first_cpu; env != NULL; env = env->next_cpu) {
881 if (env->tb_jmp_cache[h] == tb)
882 env->tb_jmp_cache[h] = NULL;
883 }
884
885 /* suppress this TB from the two jump lists */
886 tb_jmp_remove(tb, 0);
887 tb_jmp_remove(tb, 1);
888
889 /* suppress any remaining jumps to this TB */
890 tb1 = tb->jmp_first;
891 for(;;) {
892 n1 = (long)tb1 & 3;
893 if (n1 == 2)
894 break;
895 tb1 = (TranslationBlock *)((long)tb1 & ~3);
896 tb2 = tb1->jmp_next[n1];
897 tb_reset_jump(tb1, n1);
898 tb1->jmp_next[n1] = NULL;
899 tb1 = tb2;
900 }
901 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
902
903#ifndef VBOX
904 tb_phys_invalidate_count++;
905#endif
906}
907
908
909#ifdef VBOX
910void tb_invalidate_virt(CPUState *env, uint32_t eip)
911{
912# if 1
913 tb_flush(env);
914# else
915 uint8_t *cs_base, *pc;
916 unsigned int flags, h, phys_pc;
917 TranslationBlock *tb, **ptb;
918
919 flags = env->hflags;
920 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
921 cs_base = env->segs[R_CS].base;
922 pc = cs_base + eip;
923
924 tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
925 flags);
926
927 if(tb)
928 {
929# ifdef DEBUG
930 printf("invalidating TB (%08X) at %08X\n", tb, eip);
931# endif
932 tb_invalidate(tb);
933 //Note: this will leak TBs, but the whole cache will be flushed
934 // when it happens too often
935 tb->pc = 0;
936 tb->cs_base = 0;
937 tb->flags = 0;
938 }
939# endif
940}
941
942# ifdef VBOX_STRICT
943/**
944 * Gets the page offset.
945 */
946unsigned long get_phys_page_offset(target_ulong addr)
947{
948 PhysPageDesc *p = phys_page_find(addr >> TARGET_PAGE_BITS);
949 return p ? p->phys_offset : 0;
950}
951# endif /* VBOX_STRICT */
952#endif /* VBOX */
953
954#ifndef VBOX
955static inline void set_bits(uint8_t *tab, int start, int len)
956#else
957DECLINLINE(void) set_bits(uint8_t *tab, int start, int len)
958#endif
959{
960 int end, mask, end1;
961
962 end = start + len;
963 tab += start >> 3;
964 mask = 0xff << (start & 7);
965 if ((start & ~7) == (end & ~7)) {
966 if (start < end) {
967 mask &= ~(0xff << (end & 7));
968 *tab |= mask;
969 }
970 } else {
971 *tab++ |= mask;
972 start = (start + 8) & ~7;
973 end1 = end & ~7;
974 while (start < end1) {
975 *tab++ = 0xff;
976 start += 8;
977 }
978 if (start < end) {
979 mask = ~(0xff << (end & 7));
980 *tab |= mask;
981 }
982 }
983}
984
985static void build_page_bitmap(PageDesc *p)
986{
987 int n, tb_start, tb_end;
988 TranslationBlock *tb;
989
990 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
991 if (!p->code_bitmap)
992 return;
993 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
994
995 tb = p->first_tb;
996 while (tb != NULL) {
997 n = (long)tb & 3;
998 tb = (TranslationBlock *)((long)tb & ~3);
999 /* NOTE: this is subtle as a TB may span two physical pages */
1000 if (n == 0) {
1001 /* NOTE: tb_end may be after the end of the page, but
1002 it is not a problem */
1003 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1004 tb_end = tb_start + tb->size;
1005 if (tb_end > TARGET_PAGE_SIZE)
1006 tb_end = TARGET_PAGE_SIZE;
1007 } else {
1008 tb_start = 0;
1009 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1010 }
1011 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1012 tb = tb->page_next[n];
1013 }
1014}
1015
1016TranslationBlock *tb_gen_code(CPUState *env,
1017 target_ulong pc, target_ulong cs_base,
1018 int flags, int cflags)
1019{
1020 TranslationBlock *tb;
1021 uint8_t *tc_ptr;
1022 target_ulong phys_pc, phys_page2, virt_page2;
1023 int code_gen_size;
1024
1025 phys_pc = get_phys_addr_code(env, pc);
1026 tb = tb_alloc(pc);
1027 if (!tb) {
1028 /* flush must be done */
1029 tb_flush(env);
1030 /* cannot fail at this point */
1031 tb = tb_alloc(pc);
1032 /* Don't forget to invalidate previous TB info. */
1033 tb_invalidated_flag = 1;
1034 }
1035 tc_ptr = code_gen_ptr;
1036 tb->tc_ptr = tc_ptr;
1037 tb->cs_base = cs_base;
1038 tb->flags = flags;
1039 tb->cflags = cflags;
1040 cpu_gen_code(env, tb, &code_gen_size);
1041 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1042
1043 /* check next page if needed */
1044 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1045 phys_page2 = -1;
1046 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1047 phys_page2 = get_phys_addr_code(env, virt_page2);
1048 }
1049 tb_link_phys(tb, phys_pc, phys_page2);
1050 return tb;
1051}
1052
1053/* invalidate all TBs which intersect with the target physical page
1054 starting in range [start;end[. NOTE: start and end must refer to
1055 the same physical page. 'is_cpu_write_access' should be true if called
1056 from a real cpu write access: the virtual CPU will exit the current
1057 TB if code is modified inside this TB. */
1058void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
1059 int is_cpu_write_access)
1060{
1061 int n, current_tb_modified, current_tb_not_found, current_flags;
1062 CPUState *env = cpu_single_env;
1063 PageDesc *p;
1064 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
1065 target_ulong tb_start, tb_end;
1066 target_ulong current_pc, current_cs_base;
1067
1068 p = page_find(start >> TARGET_PAGE_BITS);
1069 if (!p)
1070 return;
1071 if (!p->code_bitmap &&
1072 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1073 is_cpu_write_access) {
1074 /* build code bitmap */
1075 build_page_bitmap(p);
1076 }
1077
1078 /* we remove all the TBs in the range [start, end[ */
1079 /* XXX: see if in some cases it could be faster to invalidate all the code */
1080 current_tb_not_found = is_cpu_write_access;
1081 current_tb_modified = 0;
1082 current_tb = NULL; /* avoid warning */
1083 current_pc = 0; /* avoid warning */
1084 current_cs_base = 0; /* avoid warning */
1085 current_flags = 0; /* avoid warning */
1086 tb = p->first_tb;
1087 while (tb != NULL) {
1088 n = (long)tb & 3;
1089 tb = (TranslationBlock *)((long)tb & ~3);
1090 tb_next = tb->page_next[n];
1091 /* NOTE: this is subtle as a TB may span two physical pages */
1092 if (n == 0) {
1093 /* NOTE: tb_end may be after the end of the page, but
1094 it is not a problem */
1095 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1096 tb_end = tb_start + tb->size;
1097 } else {
1098 tb_start = tb->page_addr[1];
1099 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1100 }
1101 if (!(tb_end <= start || tb_start >= end)) {
1102#ifdef TARGET_HAS_PRECISE_SMC
1103 if (current_tb_not_found) {
1104 current_tb_not_found = 0;
1105 current_tb = NULL;
1106 if (env->mem_io_pc) {
1107 /* now we have a real cpu fault */
1108 current_tb = tb_find_pc(env->mem_io_pc);
1109 }
1110 }
1111 if (current_tb == tb &&
1112 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1113 /* If we are modifying the current TB, we must stop
1114 its execution. We could be more precise by checking
1115 that the modification is after the current PC, but it
1116 would require a specialized function to partially
1117 restore the CPU state */
1118
1119 current_tb_modified = 1;
1120 cpu_restore_state(current_tb, env,
1121 env->mem_io_pc, NULL);
1122#if defined(TARGET_I386)
1123 current_flags = env->hflags;
1124 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1125 current_cs_base = (target_ulong)env->segs[R_CS].base;
1126 current_pc = current_cs_base + env->eip;
1127#else
1128#error unsupported CPU
1129#endif
1130 }
1131#endif /* TARGET_HAS_PRECISE_SMC */
1132 /* we need to do that to handle the case where a signal
1133 occurs while doing tb_phys_invalidate() */
1134 saved_tb = NULL;
1135 if (env) {
1136 saved_tb = env->current_tb;
1137 env->current_tb = NULL;
1138 }
1139 tb_phys_invalidate(tb, -1);
1140 if (env) {
1141 env->current_tb = saved_tb;
1142 if (env->interrupt_request && env->current_tb)
1143 cpu_interrupt(env, env->interrupt_request);
1144 }
1145 }
1146 tb = tb_next;
1147 }
1148#if !defined(CONFIG_USER_ONLY)
1149 /* if no code remaining, no need to continue to use slow writes */
1150 if (!p->first_tb) {
1151 invalidate_page_bitmap(p);
1152 if (is_cpu_write_access) {
1153 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1154 }
1155 }
1156#endif
1157#ifdef TARGET_HAS_PRECISE_SMC
1158 if (current_tb_modified) {
1159 /* we generate a block containing just the instruction
1160 modifying the memory. It will ensure that it cannot modify
1161 itself */
1162 env->current_tb = NULL;
1163 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1164 cpu_resume_from_signal(env, NULL);
1165 }
1166#endif
1167}
1168
1169
1170/* len must be <= 8 and start must be a multiple of len */
1171#ifndef VBOX
1172static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1173#else
1174DECLINLINE(void) tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1175#endif
1176{
1177 PageDesc *p;
1178 int offset, b;
1179#if 0
1180 if (1) {
1181 if (loglevel) {
1182 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1183 cpu_single_env->mem_io_vaddr, len,
1184 cpu_single_env->eip,
1185 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1186 }
1187 }
1188#endif
1189 p = page_find(start >> TARGET_PAGE_BITS);
1190 if (!p)
1191 return;
1192 if (p->code_bitmap) {
1193 offset = start & ~TARGET_PAGE_MASK;
1194 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1195 if (b & ((1 << len) - 1))
1196 goto do_invalidate;
1197 } else {
1198 do_invalidate:
1199 tb_invalidate_phys_page_range(start, start + len, 1);
1200 }
1201}
1202
1203
1204#if !defined(CONFIG_SOFTMMU)
1205static void tb_invalidate_phys_page(target_phys_addr_t addr,
1206 unsigned long pc, void *puc)
1207{
1208 int n, current_flags, current_tb_modified;
1209 target_ulong current_pc, current_cs_base;
1210 PageDesc *p;
1211 TranslationBlock *tb, *current_tb;
1212#ifdef TARGET_HAS_PRECISE_SMC
1213 CPUState *env = cpu_single_env;
1214#endif
1215
1216 addr &= TARGET_PAGE_MASK;
1217 p = page_find(addr >> TARGET_PAGE_BITS);
1218 if (!p)
1219 return;
1220 tb = p->first_tb;
1221 current_tb_modified = 0;
1222 current_tb = NULL;
1223 current_pc = 0; /* avoid warning */
1224 current_cs_base = 0; /* avoid warning */
1225 current_flags = 0; /* avoid warning */
1226#ifdef TARGET_HAS_PRECISE_SMC
1227 if (tb && pc != 0) {
1228 current_tb = tb_find_pc(pc);
1229 }
1230#endif
1231 while (tb != NULL) {
1232 n = (long)tb & 3;
1233 tb = (TranslationBlock *)((long)tb & ~3);
1234#ifdef TARGET_HAS_PRECISE_SMC
1235 if (current_tb == tb &&
1236 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1237 /* If we are modifying the current TB, we must stop
1238 its execution. We could be more precise by checking
1239 that the modification is after the current PC, but it
1240 would require a specialized function to partially
1241 restore the CPU state */
1242
1243 current_tb_modified = 1;
1244 cpu_restore_state(current_tb, env, pc, puc);
1245#if defined(TARGET_I386)
1246 current_flags = env->hflags;
1247 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1248 current_cs_base = (target_ulong)env->segs[R_CS].base;
1249 current_pc = current_cs_base + env->eip;
1250#else
1251#error unsupported CPU
1252#endif
1253 }
1254#endif /* TARGET_HAS_PRECISE_SMC */
1255 tb_phys_invalidate(tb, addr);
1256 tb = tb->page_next[n];
1257 }
1258 p->first_tb = NULL;
1259#ifdef TARGET_HAS_PRECISE_SMC
1260 if (current_tb_modified) {
1261 /* we generate a block containing just the instruction
1262 modifying the memory. It will ensure that it cannot modify
1263 itself */
1264 env->current_tb = NULL;
1265 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1266 cpu_resume_from_signal(env, puc);
1267 }
1268#endif
1269}
1270#endif
1271
1272/* add the tb in the target page and protect it if necessary */
1273#ifndef VBOX
1274static inline void tb_alloc_page(TranslationBlock *tb,
1275 unsigned int n, target_ulong page_addr)
1276#else
1277DECLINLINE(void) tb_alloc_page(TranslationBlock *tb,
1278 unsigned int n, target_ulong page_addr)
1279#endif
1280{
1281 PageDesc *p;
1282 TranslationBlock *last_first_tb;
1283
1284 tb->page_addr[n] = page_addr;
1285 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1286 tb->page_next[n] = p->first_tb;
1287 last_first_tb = p->first_tb;
1288 p->first_tb = (TranslationBlock *)((long)tb | n);
1289 invalidate_page_bitmap(p);
1290
1291#if defined(TARGET_HAS_SMC) || 1
1292
1293#if defined(CONFIG_USER_ONLY)
1294 if (p->flags & PAGE_WRITE) {
1295 target_ulong addr;
1296 PageDesc *p2;
1297 int prot;
1298
1299 /* force the host page as non writable (writes will have a
1300 page fault + mprotect overhead) */
1301 page_addr &= qemu_host_page_mask;
1302 prot = 0;
1303 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1304 addr += TARGET_PAGE_SIZE) {
1305
1306 p2 = page_find (addr >> TARGET_PAGE_BITS);
1307 if (!p2)
1308 continue;
1309 prot |= p2->flags;
1310 p2->flags &= ~PAGE_WRITE;
1311 page_get_flags(addr);
1312 }
1313 mprotect(g2h(page_addr), qemu_host_page_size,
1314 (prot & PAGE_BITS) & ~PAGE_WRITE);
1315#ifdef DEBUG_TB_INVALIDATE
1316 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1317 page_addr);
1318#endif
1319 }
1320#else
1321 /* if some code is already present, then the pages are already
1322 protected. So we handle the case where only the first TB is
1323 allocated in a physical page */
1324 if (!last_first_tb) {
1325 tlb_protect_code(page_addr);
1326 }
1327#endif
1328
1329#endif /* TARGET_HAS_SMC */
1330}
1331
1332/* Allocate a new translation block. Flush the translation buffer if
1333 too many translation blocks or too much generated code. */
1334TranslationBlock *tb_alloc(target_ulong pc)
1335{
1336 TranslationBlock *tb;
1337
1338 if (nb_tbs >= code_gen_max_blocks ||
1339#ifndef VBOX
1340 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1341#else
1342 (code_gen_ptr - code_gen_buffer) >= (int)code_gen_buffer_max_size)
1343#endif
1344 return NULL;
1345 tb = &tbs[nb_tbs++];
1346 tb->pc = pc;
1347 tb->cflags = 0;
1348 return tb;
1349}
1350
1351void tb_free(TranslationBlock *tb)
1352{
1353 /* In practice this is mostly used for single use temporary TB
1354 Ignore the hard cases and just back up if this TB happens to
1355 be the last one generated. */
1356 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1357 code_gen_ptr = tb->tc_ptr;
1358 nb_tbs--;
1359 }
1360}
1361
1362/* add a new TB and link it to the physical page tables. phys_page2 is
1363 (-1) to indicate that only one page contains the TB. */
1364void tb_link_phys(TranslationBlock *tb,
1365 target_ulong phys_pc, target_ulong phys_page2)
1366{
1367 unsigned int h;
1368 TranslationBlock **ptb;
1369
1370 /* Grab the mmap lock to stop another thread invalidating this TB
1371 before we are done. */
1372 mmap_lock();
1373 /* add in the physical hash table */
1374 h = tb_phys_hash_func(phys_pc);
1375 ptb = &tb_phys_hash[h];
1376 tb->phys_hash_next = *ptb;
1377 *ptb = tb;
1378
1379 /* add in the page list */
1380 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1381 if (phys_page2 != -1)
1382 tb_alloc_page(tb, 1, phys_page2);
1383 else
1384 tb->page_addr[1] = -1;
1385
1386 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1387 tb->jmp_next[0] = NULL;
1388 tb->jmp_next[1] = NULL;
1389
1390 /* init original jump addresses */
1391 if (tb->tb_next_offset[0] != 0xffff)
1392 tb_reset_jump(tb, 0);
1393 if (tb->tb_next_offset[1] != 0xffff)
1394 tb_reset_jump(tb, 1);
1395
1396#ifdef DEBUG_TB_CHECK
1397 tb_page_check();
1398#endif
1399 mmap_unlock();
1400}
1401
1402/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1403 tb[1].tc_ptr. Return NULL if not found */
1404TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1405{
1406 int m_min, m_max, m;
1407 unsigned long v;
1408 TranslationBlock *tb;
1409
1410 if (nb_tbs <= 0)
1411 return NULL;
1412 if (tc_ptr < (unsigned long)code_gen_buffer ||
1413 tc_ptr >= (unsigned long)code_gen_ptr)
1414 return NULL;
1415 /* binary search (cf Knuth) */
1416 m_min = 0;
1417 m_max = nb_tbs - 1;
1418 while (m_min <= m_max) {
1419 m = (m_min + m_max) >> 1;
1420 tb = &tbs[m];
1421 v = (unsigned long)tb->tc_ptr;
1422 if (v == tc_ptr)
1423 return tb;
1424 else if (tc_ptr < v) {
1425 m_max = m - 1;
1426 } else {
1427 m_min = m + 1;
1428 }
1429 }
1430 return &tbs[m_max];
1431}
1432
1433static void tb_reset_jump_recursive(TranslationBlock *tb);
1434
1435#ifndef VBOX
1436static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1437#else
1438DECLINLINE(void) tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1439#endif
1440{
1441 TranslationBlock *tb1, *tb_next, **ptb;
1442 unsigned int n1;
1443
1444 tb1 = tb->jmp_next[n];
1445 if (tb1 != NULL) {
1446 /* find head of list */
1447 for(;;) {
1448 n1 = (long)tb1 & 3;
1449 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1450 if (n1 == 2)
1451 break;
1452 tb1 = tb1->jmp_next[n1];
1453 }
1454 /* we are now sure now that tb jumps to tb1 */
1455 tb_next = tb1;
1456
1457 /* remove tb from the jmp_first list */
1458 ptb = &tb_next->jmp_first;
1459 for(;;) {
1460 tb1 = *ptb;
1461 n1 = (long)tb1 & 3;
1462 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1463 if (n1 == n && tb1 == tb)
1464 break;
1465 ptb = &tb1->jmp_next[n1];
1466 }
1467 *ptb = tb->jmp_next[n];
1468 tb->jmp_next[n] = NULL;
1469
1470 /* suppress the jump to next tb in generated code */
1471 tb_reset_jump(tb, n);
1472
1473 /* suppress jumps in the tb on which we could have jumped */
1474 tb_reset_jump_recursive(tb_next);
1475 }
1476}
1477
1478static void tb_reset_jump_recursive(TranslationBlock *tb)
1479{
1480 tb_reset_jump_recursive2(tb, 0);
1481 tb_reset_jump_recursive2(tb, 1);
1482}
1483
1484#if defined(TARGET_HAS_ICE)
1485static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1486{
1487 target_ulong addr, pd;
1488 ram_addr_t ram_addr;
1489 PhysPageDesc *p;
1490
1491 addr = cpu_get_phys_page_debug(env, pc);
1492 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1493 if (!p) {
1494 pd = IO_MEM_UNASSIGNED;
1495 } else {
1496 pd = p->phys_offset;
1497 }
1498 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1499 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1500}
1501#endif
1502
1503/* Add a watchpoint. */
1504int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1505{
1506 int i;
1507
1508 for (i = 0; i < env->nb_watchpoints; i++) {
1509 if (addr == env->watchpoint[i].vaddr)
1510 return 0;
1511 }
1512 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1513 return -1;
1514
1515 i = env->nb_watchpoints++;
1516 env->watchpoint[i].vaddr = addr;
1517 env->watchpoint[i].type = type;
1518 tlb_flush_page(env, addr);
1519 /* FIXME: This flush is needed because of the hack to make memory ops
1520 terminate the TB. It can be removed once the proper IO trap and
1521 re-execute bits are in. */
1522 tb_flush(env);
1523 return i;
1524}
1525
1526/* Remove a watchpoint. */
1527int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1528{
1529 int i;
1530
1531 for (i = 0; i < env->nb_watchpoints; i++) {
1532 if (addr == env->watchpoint[i].vaddr) {
1533 env->nb_watchpoints--;
1534 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1535 tlb_flush_page(env, addr);
1536 return 0;
1537 }
1538 }
1539 return -1;
1540}
1541
1542/* Remove all watchpoints. */
1543void cpu_watchpoint_remove_all(CPUState *env) {
1544 int i;
1545
1546 for (i = 0; i < env->nb_watchpoints; i++) {
1547 tlb_flush_page(env, env->watchpoint[i].vaddr);
1548 }
1549 env->nb_watchpoints = 0;
1550}
1551
1552/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1553 breakpoint is reached */
1554int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1555{
1556#if defined(TARGET_HAS_ICE)
1557 int i;
1558
1559 for(i = 0; i < env->nb_breakpoints; i++) {
1560 if (env->breakpoints[i] == pc)
1561 return 0;
1562 }
1563
1564 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1565 return -1;
1566 env->breakpoints[env->nb_breakpoints++] = pc;
1567
1568 breakpoint_invalidate(env, pc);
1569 return 0;
1570#else
1571 return -1;
1572#endif
1573}
1574
1575/* remove all breakpoints */
1576void cpu_breakpoint_remove_all(CPUState *env) {
1577#if defined(TARGET_HAS_ICE)
1578 int i;
1579 for(i = 0; i < env->nb_breakpoints; i++) {
1580 breakpoint_invalidate(env, env->breakpoints[i]);
1581 }
1582 env->nb_breakpoints = 0;
1583#endif
1584}
1585
1586/* remove a breakpoint */
1587int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1588{
1589#if defined(TARGET_HAS_ICE)
1590 int i;
1591 for(i = 0; i < env->nb_breakpoints; i++) {
1592 if (env->breakpoints[i] == pc)
1593 goto found;
1594 }
1595 return -1;
1596 found:
1597 env->nb_breakpoints--;
1598 if (i < env->nb_breakpoints)
1599 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1600
1601 breakpoint_invalidate(env, pc);
1602 return 0;
1603#else
1604 return -1;
1605#endif
1606}
1607
1608/* enable or disable single step mode. EXCP_DEBUG is returned by the
1609 CPU loop after each instruction */
1610void cpu_single_step(CPUState *env, int enabled)
1611{
1612#if defined(TARGET_HAS_ICE)
1613 if (env->singlestep_enabled != enabled) {
1614 env->singlestep_enabled = enabled;
1615 /* must flush all the translated code to avoid inconsistancies */
1616 /* XXX: only flush what is necessary */
1617 tb_flush(env);
1618 }
1619#endif
1620}
1621
1622#ifndef VBOX
1623/* enable or disable low levels log */
1624void cpu_set_log(int log_flags)
1625{
1626 loglevel = log_flags;
1627 if (loglevel && !logfile) {
1628 logfile = fopen(logfilename, "w");
1629 if (!logfile) {
1630 perror(logfilename);
1631 _exit(1);
1632 }
1633#if !defined(CONFIG_SOFTMMU)
1634 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1635 {
1636 static uint8_t logfile_buf[4096];
1637 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1638 }
1639#else
1640 setvbuf(logfile, NULL, _IOLBF, 0);
1641#endif
1642 }
1643}
1644
1645void cpu_set_log_filename(const char *filename)
1646{
1647 logfilename = strdup(filename);
1648}
1649#endif /* !VBOX */
1650
1651/* mask must never be zero, except for A20 change call */
1652void cpu_interrupt(CPUState *env, int mask)
1653{
1654#if !defined(USE_NPTL)
1655 TranslationBlock *tb;
1656 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1657#endif
1658 int old_mask;
1659
1660 old_mask = env->interrupt_request;
1661#ifdef VBOX
1662 VM_ASSERT_EMT(env->pVM);
1663 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, mask);
1664#else /* !VBOX */
1665 /* FIXME: This is probably not threadsafe. A different thread could
1666 be in the middle of a read-modify-write operation. */
1667 env->interrupt_request |= mask;
1668#endif /* !VBOX */
1669#if defined(USE_NPTL)
1670 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1671 problem and hope the cpu will stop of its own accord. For userspace
1672 emulation this often isn't actually as bad as it sounds. Often
1673 signals are used primarily to interrupt blocking syscalls. */
1674#else
1675 if (use_icount) {
1676 env->icount_decr.u16.high = 0xffff;
1677#ifndef CONFIG_USER_ONLY
1678 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1679 an async event happened and we need to process it. */
1680 if (!can_do_io(env)
1681 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1682 cpu_abort(env, "Raised interrupt while not in I/O function");
1683 }
1684#endif
1685 } else {
1686 tb = env->current_tb;
1687 /* if the cpu is currently executing code, we must unlink it and
1688 all the potentially executing TB */
1689 if (tb && !testandset(&interrupt_lock)) {
1690 env->current_tb = NULL;
1691 tb_reset_jump_recursive(tb);
1692 resetlock(&interrupt_lock);
1693 }
1694 }
1695#endif
1696}
1697
1698void cpu_reset_interrupt(CPUState *env, int mask)
1699{
1700#ifdef VBOX
1701 /*
1702 * Note: the current implementation can be executed by another thread without problems; make sure this remains true
1703 * for future changes!
1704 */
1705 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~mask);
1706#else /* !VBOX */
1707 env->interrupt_request &= ~mask;
1708#endif /* !VBOX */
1709}
1710
1711#ifndef VBOX
1712CPULogItem cpu_log_items[] = {
1713 { CPU_LOG_TB_OUT_ASM, "out_asm",
1714 "show generated host assembly code for each compiled TB" },
1715 { CPU_LOG_TB_IN_ASM, "in_asm",
1716 "show target assembly code for each compiled TB" },
1717 { CPU_LOG_TB_OP, "op",
1718 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1719#ifdef TARGET_I386
1720 { CPU_LOG_TB_OP_OPT, "op_opt",
1721 "show micro ops after optimization for each compiled TB" },
1722#endif
1723 { CPU_LOG_INT, "int",
1724 "show interrupts/exceptions in short format" },
1725 { CPU_LOG_EXEC, "exec",
1726 "show trace before each executed TB (lots of logs)" },
1727 { CPU_LOG_TB_CPU, "cpu",
1728 "show CPU state before bloc translation" },
1729#ifdef TARGET_I386
1730 { CPU_LOG_PCALL, "pcall",
1731 "show protected mode far calls/returns/exceptions" },
1732#endif
1733#ifdef DEBUG_IOPORT
1734 { CPU_LOG_IOPORT, "ioport",
1735 "show all i/o ports accesses" },
1736#endif
1737 { 0, NULL, NULL },
1738};
1739
1740static int cmp1(const char *s1, int n, const char *s2)
1741{
1742 if (strlen(s2) != n)
1743 return 0;
1744 return memcmp(s1, s2, n) == 0;
1745}
1746
1747/* takes a comma separated list of log masks. Return 0 if error. */
1748int cpu_str_to_log_mask(const char *str)
1749{
1750 CPULogItem *item;
1751 int mask;
1752 const char *p, *p1;
1753
1754 p = str;
1755 mask = 0;
1756 for(;;) {
1757 p1 = strchr(p, ',');
1758 if (!p1)
1759 p1 = p + strlen(p);
1760 if(cmp1(p,p1-p,"all")) {
1761 for(item = cpu_log_items; item->mask != 0; item++) {
1762 mask |= item->mask;
1763 }
1764 } else {
1765 for(item = cpu_log_items; item->mask != 0; item++) {
1766 if (cmp1(p, p1 - p, item->name))
1767 goto found;
1768 }
1769 return 0;
1770 }
1771 found:
1772 mask |= item->mask;
1773 if (*p1 != ',')
1774 break;
1775 p = p1 + 1;
1776 }
1777 return mask;
1778}
1779#endif /* !VBOX */
1780
1781#ifndef VBOX /* VBOX: we have our own routine. */
1782void cpu_abort(CPUState *env, const char *fmt, ...)
1783{
1784 va_list ap;
1785
1786 va_start(ap, fmt);
1787 fprintf(stderr, "qemu: fatal: ");
1788 vfprintf(stderr, fmt, ap);
1789 fprintf(stderr, "\n");
1790#ifdef TARGET_I386
1791 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1792#else
1793 cpu_dump_state(env, stderr, fprintf, 0);
1794#endif
1795 va_end(ap);
1796 abort();
1797}
1798#endif /* !VBOX */
1799
1800#ifndef VBOX
1801CPUState *cpu_copy(CPUState *env)
1802{
1803 CPUState *new_env = cpu_init(env->cpu_model_str);
1804 /* preserve chaining and index */
1805 CPUState *next_cpu = new_env->next_cpu;
1806 int cpu_index = new_env->cpu_index;
1807 memcpy(new_env, env, sizeof(CPUState));
1808 new_env->next_cpu = next_cpu;
1809 new_env->cpu_index = cpu_index;
1810 return new_env;
1811}
1812#endif
1813
1814#if !defined(CONFIG_USER_ONLY)
1815
1816#ifndef VBOX
1817static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1818#else
1819DECLINLINE(void) tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1820#endif
1821{
1822 unsigned int i;
1823
1824 /* Discard jump cache entries for any tb which might potentially
1825 overlap the flushed page. */
1826 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1827 memset (&env->tb_jmp_cache[i], 0,
1828 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1829
1830 i = tb_jmp_cache_hash_page(addr);
1831 memset (&env->tb_jmp_cache[i], 0,
1832 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1833
1834#ifdef VBOX
1835 /* inform raw mode about TLB page flush */
1836 remR3FlushPage(env, addr);
1837#endif /* VBOX */
1838}
1839
1840/* NOTE: if flush_global is true, also flush global entries (not
1841 implemented yet) */
1842void tlb_flush(CPUState *env, int flush_global)
1843{
1844 int i;
1845
1846#if defined(DEBUG_TLB)
1847 printf("tlb_flush:\n");
1848#endif
1849 /* must reset current TB so that interrupts cannot modify the
1850 links while we are modifying them */
1851 env->current_tb = NULL;
1852
1853 for(i = 0; i < CPU_TLB_SIZE; i++) {
1854 env->tlb_table[0][i].addr_read = -1;
1855 env->tlb_table[0][i].addr_write = -1;
1856 env->tlb_table[0][i].addr_code = -1;
1857 env->tlb_table[1][i].addr_read = -1;
1858 env->tlb_table[1][i].addr_write = -1;
1859 env->tlb_table[1][i].addr_code = -1;
1860#if (NB_MMU_MODES >= 3)
1861 env->tlb_table[2][i].addr_read = -1;
1862 env->tlb_table[2][i].addr_write = -1;
1863 env->tlb_table[2][i].addr_code = -1;
1864#if (NB_MMU_MODES == 4)
1865 env->tlb_table[3][i].addr_read = -1;
1866 env->tlb_table[3][i].addr_write = -1;
1867 env->tlb_table[3][i].addr_code = -1;
1868#endif
1869#endif
1870 }
1871
1872 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1873
1874#ifdef VBOX
1875 /* inform raw mode about TLB flush */
1876 remR3FlushTLB(env, flush_global);
1877#endif
1878#ifdef USE_KQEMU
1879 if (env->kqemu_enabled) {
1880 kqemu_flush(env, flush_global);
1881 }
1882#endif
1883 tlb_flush_count++;
1884}
1885
1886#ifndef VBOX
1887static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1888#else
1889DECLINLINE(void) tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1890#endif
1891{
1892 if (addr == (tlb_entry->addr_read &
1893 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1894 addr == (tlb_entry->addr_write &
1895 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1896 addr == (tlb_entry->addr_code &
1897 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1898 tlb_entry->addr_read = -1;
1899 tlb_entry->addr_write = -1;
1900 tlb_entry->addr_code = -1;
1901 }
1902}
1903
1904void tlb_flush_page(CPUState *env, target_ulong addr)
1905{
1906 int i;
1907
1908#if defined(DEBUG_TLB)
1909 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1910#endif
1911 /* must reset current TB so that interrupts cannot modify the
1912 links while we are modifying them */
1913 env->current_tb = NULL;
1914
1915 addr &= TARGET_PAGE_MASK;
1916 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1917 tlb_flush_entry(&env->tlb_table[0][i], addr);
1918 tlb_flush_entry(&env->tlb_table[1][i], addr);
1919#if (NB_MMU_MODES >= 3)
1920 tlb_flush_entry(&env->tlb_table[2][i], addr);
1921#if (NB_MMU_MODES == 4)
1922 tlb_flush_entry(&env->tlb_table[3][i], addr);
1923#endif
1924#endif
1925
1926 tlb_flush_jmp_cache(env, addr);
1927
1928#ifdef USE_KQEMU
1929 if (env->kqemu_enabled) {
1930 kqemu_flush_page(env, addr);
1931 }
1932#endif
1933}
1934
1935/* update the TLBs so that writes to code in the virtual page 'addr'
1936 can be detected */
1937static void tlb_protect_code(ram_addr_t ram_addr)
1938{
1939 cpu_physical_memory_reset_dirty(ram_addr,
1940 ram_addr + TARGET_PAGE_SIZE,
1941 CODE_DIRTY_FLAG);
1942#if defined(VBOX) && defined(REM_MONITOR_CODE_PAGES)
1943 /** @todo Retest this? This function has changed... */
1944 remR3ProtectCode(cpu_single_env, ram_addr);
1945#endif
1946}
1947
1948/* update the TLB so that writes in physical page 'phys_addr' are no longer
1949 tested for self modifying code */
1950static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1951 target_ulong vaddr)
1952{
1953#ifdef VBOX
1954 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
1955#endif
1956 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1957}
1958
1959#ifndef VBOX
1960static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1961 unsigned long start, unsigned long length)
1962#else
1963DECLINLINE(void) tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1964 unsigned long start, unsigned long length)
1965#endif
1966{
1967 unsigned long addr;
1968 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1969 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1970 if ((addr - start) < length) {
1971 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1972 }
1973 }
1974}
1975
1976void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1977 int dirty_flags)
1978{
1979 CPUState *env;
1980 unsigned long length, start1;
1981 int i, mask, len;
1982 uint8_t *p;
1983
1984 start &= TARGET_PAGE_MASK;
1985 end = TARGET_PAGE_ALIGN(end);
1986
1987 length = end - start;
1988 if (length == 0)
1989 return;
1990 len = length >> TARGET_PAGE_BITS;
1991#ifdef USE_KQEMU
1992 /* XXX: should not depend on cpu context */
1993 env = first_cpu;
1994 if (env->kqemu_enabled) {
1995 ram_addr_t addr;
1996 addr = start;
1997 for(i = 0; i < len; i++) {
1998 kqemu_set_notdirty(env, addr);
1999 addr += TARGET_PAGE_SIZE;
2000 }
2001 }
2002#endif
2003 mask = ~dirty_flags;
2004 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
2005#ifdef VBOX
2006 if (RT_LIKELY((start >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2007#endif
2008 for(i = 0; i < len; i++)
2009 p[i] &= mask;
2010
2011 /* we modify the TLB cache so that the dirty bit will be set again
2012 when accessing the range */
2013#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2014 start1 = start;
2015#elif !defined(VBOX)
2016 start1 = start + (unsigned long)phys_ram_base;
2017#else
2018 start1 = (unsigned long)remR3GCPhys2HCVirt(first_cpu, start);
2019#endif
2020 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2021 for(i = 0; i < CPU_TLB_SIZE; i++)
2022 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
2023 for(i = 0; i < CPU_TLB_SIZE; i++)
2024 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
2025#if (NB_MMU_MODES >= 3)
2026 for(i = 0; i < CPU_TLB_SIZE; i++)
2027 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
2028#if (NB_MMU_MODES == 4)
2029 for(i = 0; i < CPU_TLB_SIZE; i++)
2030 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
2031#endif
2032#endif
2033 }
2034}
2035
2036#ifndef VBOX
2037int cpu_physical_memory_set_dirty_tracking(int enable)
2038{
2039 in_migration = enable;
2040 return 0;
2041}
2042
2043int cpu_physical_memory_get_dirty_tracking(void)
2044{
2045 return in_migration;
2046}
2047#endif
2048
2049#ifndef VBOX
2050static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2051#else
2052DECLINLINE(void) tlb_update_dirty(CPUTLBEntry *tlb_entry)
2053#endif
2054{
2055 ram_addr_t ram_addr;
2056
2057 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2058 /* RAM case */
2059#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2060 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2061#elif !defined(VBOX)
2062 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
2063 tlb_entry->addend - (unsigned long)phys_ram_base;
2064#else
2065 ram_addr = remR3HCVirt2GCPhys(first_cpu, (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend);
2066#endif
2067 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2068 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
2069 }
2070 }
2071}
2072
2073/* update the TLB according to the current state of the dirty bits */
2074void cpu_tlb_update_dirty(CPUState *env)
2075{
2076 int i;
2077 for(i = 0; i < CPU_TLB_SIZE; i++)
2078 tlb_update_dirty(&env->tlb_table[0][i]);
2079 for(i = 0; i < CPU_TLB_SIZE; i++)
2080 tlb_update_dirty(&env->tlb_table[1][i]);
2081#if (NB_MMU_MODES >= 3)
2082 for(i = 0; i < CPU_TLB_SIZE; i++)
2083 tlb_update_dirty(&env->tlb_table[2][i]);
2084#if (NB_MMU_MODES == 4)
2085 for(i = 0; i < CPU_TLB_SIZE; i++)
2086 tlb_update_dirty(&env->tlb_table[3][i]);
2087#endif
2088#endif
2089}
2090
2091#ifndef VBOX
2092static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2093#else
2094DECLINLINE(void) tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2095#endif
2096{
2097 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2098 tlb_entry->addr_write = vaddr;
2099}
2100
2101
2102/* update the TLB corresponding to virtual page vaddr and phys addr
2103 addr so that it is no longer dirty */
2104#ifndef VBOX
2105static inline void tlb_set_dirty(CPUState *env,
2106 unsigned long addr, target_ulong vaddr)
2107#else
2108DECLINLINE(void) tlb_set_dirty(CPUState *env,
2109 unsigned long addr, target_ulong vaddr)
2110#endif
2111{
2112 int i;
2113
2114 addr &= TARGET_PAGE_MASK;
2115 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2116 tlb_set_dirty1(&env->tlb_table[0][i], addr);
2117 tlb_set_dirty1(&env->tlb_table[1][i], addr);
2118#if (NB_MMU_MODES >= 3)
2119 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
2120#if (NB_MMU_MODES == 4)
2121 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
2122#endif
2123#endif
2124}
2125
2126/* add a new TLB entry. At most one entry for a given virtual address
2127 is permitted. Return 0 if OK or 2 if the page could not be mapped
2128 (can only happen in non SOFTMMU mode for I/O pages or pages
2129 conflicting with the host address space). */
2130int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2131 target_phys_addr_t paddr, int prot,
2132 int mmu_idx, int is_softmmu)
2133{
2134 PhysPageDesc *p;
2135 unsigned long pd;
2136 unsigned int index;
2137 target_ulong address;
2138 target_ulong code_address;
2139 target_phys_addr_t addend;
2140 int ret;
2141 CPUTLBEntry *te;
2142 int i;
2143 target_phys_addr_t iotlb;
2144
2145 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2146 if (!p) {
2147 pd = IO_MEM_UNASSIGNED;
2148 } else {
2149 pd = p->phys_offset;
2150 }
2151#if defined(DEBUG_TLB)
2152 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2153 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2154#endif
2155
2156 ret = 0;
2157 address = vaddr;
2158 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2159 /* IO memory case (romd handled later) */
2160 address |= TLB_MMIO;
2161 }
2162#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2163 addend = pd & TARGET_PAGE_MASK;
2164#elif !defined(VBOX)
2165 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
2166#else
2167 addend = (unsigned long)remR3GCPhys2HCVirt(env, pd & TARGET_PAGE_MASK);
2168#endif
2169 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2170 /* Normal RAM. */
2171 iotlb = pd & TARGET_PAGE_MASK;
2172 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2173 iotlb |= IO_MEM_NOTDIRTY;
2174 else
2175 iotlb |= IO_MEM_ROM;
2176 } else {
2177 /* IO handlers are currently passed a phsical address.
2178 It would be nice to pass an offset from the base address
2179 of that region. This would avoid having to special case RAM,
2180 and avoid full address decoding in every device.
2181 We can't use the high bits of pd for this because
2182 IO_MEM_ROMD uses these as a ram address. */
2183 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
2184 }
2185
2186 code_address = address;
2187 /* Make accesses to pages with watchpoints go via the
2188 watchpoint trap routines. */
2189 for (i = 0; i < env->nb_watchpoints; i++) {
2190 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
2191 iotlb = io_mem_watch + paddr;
2192 /* TODO: The memory case can be optimized by not trapping
2193 reads of pages with a write breakpoint. */
2194 address |= TLB_MMIO;
2195 }
2196 }
2197
2198 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2199 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2200 te = &env->tlb_table[mmu_idx][index];
2201 te->addend = addend - vaddr;
2202 if (prot & PAGE_READ) {
2203 te->addr_read = address;
2204 } else {
2205 te->addr_read = -1;
2206 }
2207
2208 if (prot & PAGE_EXEC) {
2209 te->addr_code = code_address;
2210 } else {
2211 te->addr_code = -1;
2212 }
2213 if (prot & PAGE_WRITE) {
2214 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2215 (pd & IO_MEM_ROMD)) {
2216 /* Write access calls the I/O callback. */
2217 te->addr_write = address | TLB_MMIO;
2218 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2219 !cpu_physical_memory_is_dirty(pd)) {
2220 te->addr_write = address | TLB_NOTDIRTY;
2221 } else {
2222 te->addr_write = address;
2223 }
2224 } else {
2225 te->addr_write = -1;
2226 }
2227#ifdef VBOX
2228 /* inform raw mode about TLB page change */
2229 remR3FlushPage(env, vaddr);
2230#endif
2231 return ret;
2232}
2233
2234/* called from signal handler: invalidate the code and unprotect the
2235 page. Return TRUE if the fault was succesfully handled. */
2236int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
2237{
2238#if !defined(CONFIG_SOFTMMU)
2239 VirtPageDesc *vp;
2240
2241#if defined(DEBUG_TLB)
2242 printf("page_unprotect: addr=0x%08x\n", addr);
2243#endif
2244 addr &= TARGET_PAGE_MASK;
2245
2246 /* if it is not mapped, no need to worry here */
2247 if (addr >= MMAP_AREA_END)
2248 return 0;
2249 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
2250 if (!vp)
2251 return 0;
2252 /* NOTE: in this case, validate_tag is _not_ tested as it
2253 validates only the code TLB */
2254 if (vp->valid_tag != virt_valid_tag)
2255 return 0;
2256 if (!(vp->prot & PAGE_WRITE))
2257 return 0;
2258#if defined(DEBUG_TLB)
2259 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
2260 addr, vp->phys_addr, vp->prot);
2261#endif
2262 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
2263 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
2264 (unsigned long)addr, vp->prot);
2265 /* set the dirty bit */
2266 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
2267 /* flush the code inside */
2268 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
2269 return 1;
2270#elif defined(VBOX)
2271 addr &= TARGET_PAGE_MASK;
2272
2273 /* if it is not mapped, no need to worry here */
2274 if (addr >= MMAP_AREA_END)
2275 return 0;
2276 return 1;
2277#else
2278 return 0;
2279#endif
2280}
2281
2282#else
2283
2284void tlb_flush(CPUState *env, int flush_global)
2285{
2286}
2287
2288void tlb_flush_page(CPUState *env, target_ulong addr)
2289{
2290}
2291
2292int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2293 target_phys_addr_t paddr, int prot,
2294 int is_user, int is_softmmu)
2295{
2296 return 0;
2297}
2298
2299#ifndef VBOX
2300/* dump memory mappings */
2301void page_dump(FILE *f)
2302{
2303 unsigned long start, end;
2304 int i, j, prot, prot1;
2305 PageDesc *p;
2306
2307 fprintf(f, "%-8s %-8s %-8s %s\n",
2308 "start", "end", "size", "prot");
2309 start = -1;
2310 end = -1;
2311 prot = 0;
2312 for(i = 0; i <= L1_SIZE; i++) {
2313 if (i < L1_SIZE)
2314 p = l1_map[i];
2315 else
2316 p = NULL;
2317 for(j = 0;j < L2_SIZE; j++) {
2318 if (!p)
2319 prot1 = 0;
2320 else
2321 prot1 = p[j].flags;
2322 if (prot1 != prot) {
2323 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2324 if (start != -1) {
2325 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2326 start, end, end - start,
2327 prot & PAGE_READ ? 'r' : '-',
2328 prot & PAGE_WRITE ? 'w' : '-',
2329 prot & PAGE_EXEC ? 'x' : '-');
2330 }
2331 if (prot1 != 0)
2332 start = end;
2333 else
2334 start = -1;
2335 prot = prot1;
2336 }
2337 if (!p)
2338 break;
2339 }
2340 }
2341}
2342#endif /* !VBOX */
2343
2344int page_get_flags(target_ulong address)
2345{
2346 PageDesc *p;
2347
2348 p = page_find(address >> TARGET_PAGE_BITS);
2349 if (!p)
2350 return 0;
2351 return p->flags;
2352}
2353
2354/* modify the flags of a page and invalidate the code if
2355 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2356 depending on PAGE_WRITE */
2357void page_set_flags(target_ulong start, target_ulong end, int flags)
2358{
2359 PageDesc *p;
2360 target_ulong addr;
2361
2362 start = start & TARGET_PAGE_MASK;
2363 end = TARGET_PAGE_ALIGN(end);
2364 if (flags & PAGE_WRITE)
2365 flags |= PAGE_WRITE_ORG;
2366#ifdef VBOX
2367 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
2368#endif
2369 spin_lock(&tb_lock);
2370 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2371 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2372 /* if the write protection is set, then we invalidate the code
2373 inside */
2374 if (!(p->flags & PAGE_WRITE) &&
2375 (flags & PAGE_WRITE) &&
2376 p->first_tb) {
2377 tb_invalidate_phys_page(addr, 0, NULL);
2378 }
2379 p->flags = flags;
2380 }
2381 spin_unlock(&tb_lock);
2382}
2383
2384/* called from signal handler: invalidate the code and unprotect the
2385 page. Return TRUE if the fault was succesfully handled. */
2386int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2387{
2388 unsigned int page_index, prot, pindex;
2389 PageDesc *p, *p1;
2390 target_ulong host_start, host_end, addr;
2391
2392 host_start = address & qemu_host_page_mask;
2393 page_index = host_start >> TARGET_PAGE_BITS;
2394 p1 = page_find(page_index);
2395 if (!p1)
2396 return 0;
2397 host_end = host_start + qemu_host_page_size;
2398 p = p1;
2399 prot = 0;
2400 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2401 prot |= p->flags;
2402 p++;
2403 }
2404 /* if the page was really writable, then we change its
2405 protection back to writable */
2406 if (prot & PAGE_WRITE_ORG) {
2407 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2408 if (!(p1[pindex].flags & PAGE_WRITE)) {
2409 mprotect((void *)g2h(host_start), qemu_host_page_size,
2410 (prot & PAGE_BITS) | PAGE_WRITE);
2411 p1[pindex].flags |= PAGE_WRITE;
2412 /* and since the content will be modified, we must invalidate
2413 the corresponding translated code. */
2414 tb_invalidate_phys_page(address, pc, puc);
2415#ifdef DEBUG_TB_CHECK
2416 tb_invalidate_check(address);
2417#endif
2418 return 1;
2419 }
2420 }
2421 return 0;
2422}
2423
2424/* call this function when system calls directly modify a memory area */
2425/* ??? This should be redundant now we have lock_user. */
2426void page_unprotect_range(target_ulong data, target_ulong data_size)
2427{
2428 target_ulong start, end, addr;
2429
2430 start = data;
2431 end = start + data_size;
2432 start &= TARGET_PAGE_MASK;
2433 end = TARGET_PAGE_ALIGN(end);
2434 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2435 page_unprotect(addr, 0, NULL);
2436 }
2437}
2438
2439static inline void tlb_set_dirty(CPUState *env,
2440 unsigned long addr, target_ulong vaddr)
2441{
2442}
2443#endif /* defined(CONFIG_USER_ONLY) */
2444
2445/* register physical memory. 'size' must be a multiple of the target
2446 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2447 io memory page */
2448void cpu_register_physical_memory(target_phys_addr_t start_addr,
2449 unsigned long size,
2450 unsigned long phys_offset)
2451{
2452 target_phys_addr_t addr, end_addr;
2453 PhysPageDesc *p;
2454 CPUState *env;
2455
2456 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2457 end_addr = start_addr + size;
2458 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2459 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2460 p->phys_offset = phys_offset;
2461#if !defined(VBOX) || defined(VBOX_WITH_NEW_PHYS_CODE)
2462 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2463 (phys_offset & IO_MEM_ROMD))
2464#else
2465 if ( (phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM
2466 || (phys_offset & IO_MEM_ROMD)
2467 || (phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING)
2468#endif
2469
2470 phys_offset += TARGET_PAGE_SIZE;
2471 }
2472
2473 /* since each CPU stores ram addresses in its TLB cache, we must
2474 reset the modified entries */
2475 /* XXX: slow ! */
2476 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2477 tlb_flush(env, 1);
2478 }
2479}
2480
2481/* XXX: temporary until new memory mapping API */
2482uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2483{
2484 PhysPageDesc *p;
2485
2486 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2487 if (!p)
2488 return IO_MEM_UNASSIGNED;
2489 return p->phys_offset;
2490}
2491
2492static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2493{
2494#ifdef DEBUG_UNASSIGNED
2495 printf("Unassigned mem read 0x%08x\n", (int)addr);
2496#endif
2497 return 0;
2498}
2499
2500static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2501{
2502#ifdef DEBUG_UNASSIGNED
2503 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
2504#endif
2505}
2506
2507static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2508 unassigned_mem_readb,
2509 unassigned_mem_readb,
2510 unassigned_mem_readb,
2511};
2512
2513static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2514 unassigned_mem_writeb,
2515 unassigned_mem_writeb,
2516 unassigned_mem_writeb,
2517};
2518
2519static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2520{
2521 unsigned long ram_addr;
2522 int dirty_flags;
2523#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2524 ram_addr = addr;
2525#elif !defined(VBOX)
2526 ram_addr = addr - (unsigned long)phys_ram_base;
2527#else
2528 ram_addr = remR3HCVirt2GCPhys(first_cpu, (void *)addr);
2529#endif
2530#ifdef VBOX
2531 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2532 dirty_flags = 0xff;
2533 else
2534#endif /* VBOX */
2535 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2536 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2537#if !defined(CONFIG_USER_ONLY)
2538 tb_invalidate_phys_page_fast(ram_addr, 1);
2539# ifdef VBOX
2540 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2541 dirty_flags = 0xff;
2542 else
2543# endif /* VBOX */
2544 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2545#endif
2546 }
2547 stb_p((uint8_t *)(long)addr, val);
2548#ifdef USE_KQEMU
2549 if (cpu_single_env->kqemu_enabled &&
2550 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2551 kqemu_modify_page(cpu_single_env, ram_addr);
2552#endif
2553 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2554#ifdef VBOX
2555 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2556#endif /* !VBOX */
2557 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2558 /* we remove the notdirty callback only if the code has been
2559 flushed */
2560 if (dirty_flags == 0xff)
2561 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2562}
2563
2564static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2565{
2566 unsigned long ram_addr;
2567 int dirty_flags;
2568#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2569 ram_addr = addr;
2570#elif !defined(VBOX)
2571 ram_addr = addr - (unsigned long)phys_ram_base;
2572#else
2573 ram_addr = remR3HCVirt2GCPhys(first_cpu, (void *)addr);
2574#endif
2575#ifdef VBOX
2576 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2577 dirty_flags = 0xff;
2578 else
2579#endif /* VBOX */
2580 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2581 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2582#if !defined(CONFIG_USER_ONLY)
2583 tb_invalidate_phys_page_fast(ram_addr, 2);
2584# ifdef VBOX
2585 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2586 dirty_flags = 0xff;
2587 else
2588# endif /* VBOX */
2589 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2590#endif
2591 }
2592 stw_p((uint8_t *)(long)addr, val);
2593#ifdef USE_KQEMU
2594 if (cpu_single_env->kqemu_enabled &&
2595 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2596 kqemu_modify_page(cpu_single_env, ram_addr);
2597#endif
2598 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2599#ifdef VBOX
2600 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2601#endif
2602 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2603 /* we remove the notdirty callback only if the code has been
2604 flushed */
2605 if (dirty_flags == 0xff)
2606 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2607}
2608
2609static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2610{
2611 unsigned long ram_addr;
2612 int dirty_flags;
2613#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2614 ram_addr = addr;
2615#elif !defined(VBOX)
2616 ram_addr = addr - (unsigned long)phys_ram_base;
2617#else
2618 ram_addr = remR3HCVirt2GCPhys(first_cpu, (void *)addr);
2619#endif
2620#ifdef VBOX
2621 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2622 dirty_flags = 0xff;
2623 else
2624#endif /* VBOX */
2625 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2626 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2627#if !defined(CONFIG_USER_ONLY)
2628 tb_invalidate_phys_page_fast(ram_addr, 4);
2629# ifdef VBOX
2630 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2631 dirty_flags = 0xff;
2632 else
2633# endif /* VBOX */
2634 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2635#endif
2636 }
2637 stl_p((uint8_t *)(long)addr, val);
2638#ifdef USE_KQEMU
2639 if (cpu_single_env->kqemu_enabled &&
2640 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2641 kqemu_modify_page(cpu_single_env, ram_addr);
2642#endif
2643 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2644#ifdef VBOX
2645 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2646#endif
2647 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2648 /* we remove the notdirty callback only if the code has been
2649 flushed */
2650 if (dirty_flags == 0xff)
2651 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2652}
2653
2654static CPUReadMemoryFunc *error_mem_read[3] = {
2655 NULL, /* never used */
2656 NULL, /* never used */
2657 NULL, /* never used */
2658};
2659
2660static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2661 notdirty_mem_writeb,
2662 notdirty_mem_writew,
2663 notdirty_mem_writel,
2664};
2665
2666static void io_mem_init(void)
2667{
2668 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2669 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2670 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2671#if defined(VBOX) && !defined(VBOX_WITH_NEW_PHYS_CODE)
2672 cpu_register_io_memory(IO_MEM_RAM_MISSING >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2673 io_mem_nb = 6;
2674#else
2675 io_mem_nb = 5;
2676#endif
2677
2678#ifndef VBOX /* VBOX: we do this later when the RAM is allocated. */
2679 /* alloc dirty bits array */
2680 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2681 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2682#endif /* !VBOX */
2683}
2684
2685/* mem_read and mem_write are arrays of functions containing the
2686 function to access byte (index 0), word (index 1) and dword (index
2687 2). All functions must be supplied. If io_index is non zero, the
2688 corresponding io zone is modified. If it is zero, a new io zone is
2689 allocated. The return value can be used with
2690 cpu_register_physical_memory(). (-1) is returned if error. */
2691int cpu_register_io_memory(int io_index,
2692 CPUReadMemoryFunc **mem_read,
2693 CPUWriteMemoryFunc **mem_write,
2694 void *opaque)
2695{
2696 int i;
2697
2698 if (io_index <= 0) {
2699 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2700 return -1;
2701 io_index = io_mem_nb++;
2702 } else {
2703 if (io_index >= IO_MEM_NB_ENTRIES)
2704 return -1;
2705 }
2706
2707 for(i = 0;i < 3; i++) {
2708 io_mem_read[io_index][i] = mem_read[i];
2709 io_mem_write[io_index][i] = mem_write[i];
2710 }
2711 io_mem_opaque[io_index] = opaque;
2712 return io_index << IO_MEM_SHIFT;
2713}
2714
2715CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2716{
2717 return io_mem_write[io_index >> IO_MEM_SHIFT];
2718}
2719
2720CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2721{
2722 return io_mem_read[io_index >> IO_MEM_SHIFT];
2723}
2724
2725/* physical memory access (slow version, mainly for debug) */
2726#if defined(CONFIG_USER_ONLY)
2727void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2728 int len, int is_write)
2729{
2730 int l, flags;
2731 target_ulong page;
2732 void * p;
2733
2734 while (len > 0) {
2735 page = addr & TARGET_PAGE_MASK;
2736 l = (page + TARGET_PAGE_SIZE) - addr;
2737 if (l > len)
2738 l = len;
2739 flags = page_get_flags(page);
2740 if (!(flags & PAGE_VALID))
2741 return;
2742 if (is_write) {
2743 if (!(flags & PAGE_WRITE))
2744 return;
2745 p = lock_user(addr, len, 0);
2746 memcpy(p, buf, len);
2747 unlock_user(p, addr, len);
2748 } else {
2749 if (!(flags & PAGE_READ))
2750 return;
2751 p = lock_user(addr, len, 1);
2752 memcpy(buf, p, len);
2753 unlock_user(p, addr, 0);
2754 }
2755 len -= l;
2756 buf += l;
2757 addr += l;
2758 }
2759}
2760
2761#else
2762void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2763 int len, int is_write)
2764{
2765 int l, io_index;
2766 uint8_t *ptr;
2767 uint32_t val;
2768 target_phys_addr_t page;
2769 unsigned long pd;
2770 PhysPageDesc *p;
2771
2772 while (len > 0) {
2773 page = addr & TARGET_PAGE_MASK;
2774 l = (page + TARGET_PAGE_SIZE) - addr;
2775 if (l > len)
2776 l = len;
2777 p = phys_page_find(page >> TARGET_PAGE_BITS);
2778 if (!p) {
2779 pd = IO_MEM_UNASSIGNED;
2780 } else {
2781 pd = p->phys_offset;
2782 }
2783
2784 if (is_write) {
2785 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2786 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2787 /* XXX: could force cpu_single_env to NULL to avoid
2788 potential bugs */
2789 if (l >= 4 && ((addr & 3) == 0)) {
2790 /* 32 bit write access */
2791#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2792 val = ldl_p(buf);
2793#else
2794 val = *(const uint32_t *)buf;
2795#endif
2796 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2797 l = 4;
2798 } else if (l >= 2 && ((addr & 1) == 0)) {
2799 /* 16 bit write access */
2800#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2801 val = lduw_p(buf);
2802#else
2803 val = *(const uint16_t *)buf;
2804#endif
2805 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2806 l = 2;
2807 } else {
2808 /* 8 bit write access */
2809#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2810 val = ldub_p(buf);
2811#else
2812 val = *(const uint8_t *)buf;
2813#endif
2814 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2815 l = 1;
2816 }
2817 } else {
2818 unsigned long addr1;
2819 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2820 /* RAM case */
2821#ifdef VBOX
2822 remR3PhysWrite(addr1, buf, l); NOREF(ptr);
2823#else
2824 ptr = phys_ram_base + addr1;
2825 memcpy(ptr, buf, l);
2826#endif
2827 if (!cpu_physical_memory_is_dirty(addr1)) {
2828 /* invalidate code */
2829 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2830 /* set dirty bit */
2831#ifdef VBOX
2832 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2833#endif
2834 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2835 (0xff & ~CODE_DIRTY_FLAG);
2836 }
2837 }
2838 } else {
2839 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2840 !(pd & IO_MEM_ROMD)) {
2841 /* I/O case */
2842 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2843 if (l >= 4 && ((addr & 3) == 0)) {
2844 /* 32 bit read access */
2845 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2846#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2847 stl_p(buf, val);
2848#else
2849 *(uint32_t *)buf = val;
2850#endif
2851 l = 4;
2852 } else if (l >= 2 && ((addr & 1) == 0)) {
2853 /* 16 bit read access */
2854 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2855#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2856 stw_p(buf, val);
2857#else
2858 *(uint16_t *)buf = val;
2859#endif
2860 l = 2;
2861 } else {
2862 /* 8 bit read access */
2863 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2864#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2865 stb_p(buf, val);
2866#else
2867 *(uint8_t *)buf = val;
2868#endif
2869 l = 1;
2870 }
2871 } else {
2872 /* RAM case */
2873#ifdef VBOX
2874 remR3PhysRead((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), buf, l); NOREF(ptr);
2875#else
2876 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2877 (addr & ~TARGET_PAGE_MASK);
2878 memcpy(buf, ptr, l);
2879#endif
2880 }
2881 }
2882 len -= l;
2883 buf += l;
2884 addr += l;
2885 }
2886}
2887
2888#ifndef VBOX
2889/* used for ROM loading : can write in RAM and ROM */
2890void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2891 const uint8_t *buf, int len)
2892{
2893 int l;
2894 uint8_t *ptr;
2895 target_phys_addr_t page;
2896 unsigned long pd;
2897 PhysPageDesc *p;
2898
2899 while (len > 0) {
2900 page = addr & TARGET_PAGE_MASK;
2901 l = (page + TARGET_PAGE_SIZE) - addr;
2902 if (l > len)
2903 l = len;
2904 p = phys_page_find(page >> TARGET_PAGE_BITS);
2905 if (!p) {
2906 pd = IO_MEM_UNASSIGNED;
2907 } else {
2908 pd = p->phys_offset;
2909 }
2910
2911 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2912 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2913 !(pd & IO_MEM_ROMD)) {
2914 /* do nothing */
2915 } else {
2916 unsigned long addr1;
2917 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2918 /* ROM/RAM case */
2919 ptr = phys_ram_base + addr1;
2920 memcpy(ptr, buf, l);
2921 }
2922 len -= l;
2923 buf += l;
2924 addr += l;
2925 }
2926}
2927#endif /* !VBOX */
2928
2929
2930/* warning: addr must be aligned */
2931uint32_t ldl_phys(target_phys_addr_t addr)
2932{
2933 int io_index;
2934 uint8_t *ptr;
2935 uint32_t val;
2936 unsigned long pd;
2937 PhysPageDesc *p;
2938
2939 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2940 if (!p) {
2941 pd = IO_MEM_UNASSIGNED;
2942 } else {
2943 pd = p->phys_offset;
2944 }
2945
2946 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2947 !(pd & IO_MEM_ROMD)) {
2948 /* I/O case */
2949 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2950 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2951 } else {
2952 /* RAM case */
2953#ifndef VBOX
2954 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2955 (addr & ~TARGET_PAGE_MASK);
2956 val = ldl_p(ptr);
2957#else
2958 val = remR3PhysReadU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
2959#endif
2960 }
2961 return val;
2962}
2963
2964/* warning: addr must be aligned */
2965uint64_t ldq_phys(target_phys_addr_t addr)
2966{
2967 int io_index;
2968 uint8_t *ptr;
2969 uint64_t val;
2970 unsigned long pd;
2971 PhysPageDesc *p;
2972
2973 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2974 if (!p) {
2975 pd = IO_MEM_UNASSIGNED;
2976 } else {
2977 pd = p->phys_offset;
2978 }
2979
2980 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2981 !(pd & IO_MEM_ROMD)) {
2982 /* I/O case */
2983 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2984#ifdef TARGET_WORDS_BIGENDIAN
2985 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2986 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2987#else
2988 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2989 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2990#endif
2991 } else {
2992 /* RAM case */
2993#ifndef VBOX
2994 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2995 (addr & ~TARGET_PAGE_MASK);
2996 val = ldq_p(ptr);
2997#else
2998 val = remR3PhysReadU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
2999#endif
3000 }
3001 return val;
3002}
3003
3004/* XXX: optimize */
3005uint32_t ldub_phys(target_phys_addr_t addr)
3006{
3007 uint8_t val;
3008 cpu_physical_memory_read(addr, &val, 1);
3009 return val;
3010}
3011
3012/* XXX: optimize */
3013uint32_t lduw_phys(target_phys_addr_t addr)
3014{
3015 uint16_t val;
3016 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3017 return tswap16(val);
3018}
3019
3020/* warning: addr must be aligned. The ram page is not masked as dirty
3021 and the code inside is not invalidated. It is useful if the dirty
3022 bits are used to track modified PTEs */
3023void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3024{
3025 int io_index;
3026 uint8_t *ptr;
3027 unsigned long pd;
3028 PhysPageDesc *p;
3029
3030 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3031 if (!p) {
3032 pd = IO_MEM_UNASSIGNED;
3033 } else {
3034 pd = p->phys_offset;
3035 }
3036
3037 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3038 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3039 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3040 } else {
3041#ifndef VBOX
3042 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3043 (addr & ~TARGET_PAGE_MASK);
3044 stl_p(ptr, val);
3045#else
3046 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3047#endif
3048 }
3049}
3050
3051/* warning: addr must be aligned */
3052void stl_phys(target_phys_addr_t addr, uint32_t val)
3053{
3054 int io_index;
3055 uint8_t *ptr;
3056 unsigned long pd;
3057 PhysPageDesc *p;
3058
3059 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3060 if (!p) {
3061 pd = IO_MEM_UNASSIGNED;
3062 } else {
3063 pd = p->phys_offset;
3064 }
3065
3066 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3067 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3068 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3069 } else {
3070 unsigned long addr1;
3071 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3072 /* RAM case */
3073#ifndef VBOX
3074 ptr = phys_ram_base + addr1;
3075 stl_p(ptr, val);
3076#else
3077 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3078#endif
3079 if (!cpu_physical_memory_is_dirty(addr1)) {
3080 /* invalidate code */
3081 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3082 /* set dirty bit */
3083#ifdef VBOX
3084 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3085#endif
3086 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3087 (0xff & ~CODE_DIRTY_FLAG);
3088 }
3089 }
3090}
3091
3092/* XXX: optimize */
3093void stb_phys(target_phys_addr_t addr, uint32_t val)
3094{
3095 uint8_t v = val;
3096 cpu_physical_memory_write(addr, &v, 1);
3097}
3098
3099/* XXX: optimize */
3100void stw_phys(target_phys_addr_t addr, uint32_t val)
3101{
3102 uint16_t v = tswap16(val);
3103 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3104}
3105
3106/* XXX: optimize */
3107void stq_phys(target_phys_addr_t addr, uint64_t val)
3108{
3109 val = tswap64(val);
3110 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3111}
3112
3113#endif
3114
3115#ifndef VBOX
3116/* virtual memory access for debug */
3117int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3118 uint8_t *buf, int len, int is_write)
3119{
3120 int l;
3121 target_ulong page, phys_addr;
3122
3123 while (len > 0) {
3124 page = addr & TARGET_PAGE_MASK;
3125 phys_addr = cpu_get_phys_page_debug(env, page);
3126 /* if no physical page mapped, return an error */
3127 if (phys_addr == -1)
3128 return -1;
3129 l = (page + TARGET_PAGE_SIZE) - addr;
3130 if (l > len)
3131 l = len;
3132 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3133 buf, l, is_write);
3134 len -= l;
3135 buf += l;
3136 addr += l;
3137 }
3138 return 0;
3139}
3140
3141void dump_exec_info(FILE *f,
3142 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3143{
3144 int i, target_code_size, max_target_code_size;
3145 int direct_jmp_count, direct_jmp2_count, cross_page;
3146 TranslationBlock *tb;
3147
3148 target_code_size = 0;
3149 max_target_code_size = 0;
3150 cross_page = 0;
3151 direct_jmp_count = 0;
3152 direct_jmp2_count = 0;
3153 for(i = 0; i < nb_tbs; i++) {
3154 tb = &tbs[i];
3155 target_code_size += tb->size;
3156 if (tb->size > max_target_code_size)
3157 max_target_code_size = tb->size;
3158 if (tb->page_addr[1] != -1)
3159 cross_page++;
3160 if (tb->tb_next_offset[0] != 0xffff) {
3161 direct_jmp_count++;
3162 if (tb->tb_next_offset[1] != 0xffff) {
3163 direct_jmp2_count++;
3164 }
3165 }
3166 }
3167 /* XXX: avoid using doubles ? */
3168 cpu_fprintf(f, "TB count %d\n", nb_tbs);
3169 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3170 nb_tbs ? target_code_size / nb_tbs : 0,
3171 max_target_code_size);
3172 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3173 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3174 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3175 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3176 cross_page,
3177 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3178 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3179 direct_jmp_count,
3180 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3181 direct_jmp2_count,
3182 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3183 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3184 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3185 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3186}
3187#endif /* !VBOX */
3188
3189#if !defined(CONFIG_USER_ONLY)
3190
3191#define MMUSUFFIX _cmmu
3192#define GETPC() NULL
3193#define env cpu_single_env
3194#define SOFTMMU_CODE_ACCESS
3195
3196#define SHIFT 0
3197#include "softmmu_template.h"
3198
3199#define SHIFT 1
3200#include "softmmu_template.h"
3201
3202#define SHIFT 2
3203#include "softmmu_template.h"
3204
3205#define SHIFT 3
3206#include "softmmu_template.h"
3207
3208#undef env
3209
3210#endif
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette