VirtualBox

source: vbox/trunk/src/recompiler/new/exec.c@ 88

最後變更 在這個檔案從88是 1,由 vboxsync 提交於 55 年 前

import

  • 屬性 svn:eol-style 設為 native
檔案大小: 75.9 KB
 
1/*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include "config.h"
21#ifndef VBOX
22#ifdef _WIN32
23#include <windows.h>
24#else
25#include <sys/types.h>
26#include <sys/mman.h>
27#endif
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35#else /* VBOX */
36# include <stdlib.h>
37# include <stdio.h>
38# include <inttypes.h>
39# include <iprt/alloc.h>
40# include <iprt/string.h>
41# include <iprt/param.h>
42#endif /* VBOX */
43
44#include "cpu.h"
45#include "exec-all.h"
46#if defined(CONFIG_USER_ONLY)
47#include <qemu.h>
48#endif
49
50//#define DEBUG_TB_INVALIDATE
51//#define DEBUG_FLUSH
52//#define DEBUG_TLB
53//#define DEBUG_UNASSIGNED
54
55/* make various TB consistency checks */
56//#define DEBUG_TB_CHECK
57//#define DEBUG_TLB_CHECK
58
59#if !defined(CONFIG_USER_ONLY)
60/* TB consistency checks only implemented for usermode emulation. */
61#undef DEBUG_TB_CHECK
62#endif
63
64/* threshold to flush the translated code buffer */
65#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
66
67#define SMC_BITMAP_USE_THRESHOLD 10
68
69#define MMAP_AREA_START 0x00000000
70#define MMAP_AREA_END 0xa8000000
71
72#if defined(TARGET_SPARC64)
73#define TARGET_PHYS_ADDR_SPACE_BITS 41
74#elif defined(TARGET_PPC64)
75#define TARGET_PHYS_ADDR_SPACE_BITS 42
76#else
77/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
78#define TARGET_PHYS_ADDR_SPACE_BITS 32
79#endif
80
81TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
82TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
83int nb_tbs;
84/* any access to the tbs or the page table must use this lock */
85spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
86
87uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE]
88#if defined(__MINGW32__)
89 __attribute__((aligned (16)));
90#else
91 __attribute__((aligned (32)));
92#endif
93uint8_t *code_gen_ptr;
94
95int phys_ram_size;
96#ifndef VBOX
97int phys_ram_fd;
98#endif /* !VBOX */
99uint8_t *phys_ram_base;
100uint8_t *phys_ram_dirty;
101#ifdef VBOX
102/* we have memory ranges (the high PC-BIOS mapping) which
103 causes some pages to fall outside the dirty map here. */
104uint32_t phys_ram_dirty_size;
105#endif/* VBOX */
106
107CPUState *first_cpu;
108/* current CPU in the current thread. It is only valid inside
109 cpu_exec() */
110CPUState *cpu_single_env;
111
112typedef struct PageDesc {
113 /* list of TBs intersecting this ram page */
114 TranslationBlock *first_tb;
115 /* in order to optimize self modifying code, we count the number
116 of lookups we do to a given page to use a bitmap */
117 unsigned int code_write_count;
118 uint8_t *code_bitmap;
119#if defined(CONFIG_USER_ONLY)
120 unsigned long flags;
121#endif
122} PageDesc;
123
124typedef struct PhysPageDesc {
125 /* offset in host memory of the page + io_index in the low 12 bits */
126 uint32_t phys_offset;
127} PhysPageDesc;
128
129#define L2_BITS 10
130#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
131
132#define L1_SIZE (1 << L1_BITS)
133#define L2_SIZE (1 << L2_BITS)
134
135static void io_mem_init(void);
136
137unsigned long qemu_real_host_page_size;
138unsigned long qemu_host_page_bits;
139unsigned long qemu_host_page_size;
140unsigned long qemu_host_page_mask;
141
142/* XXX: for system emulation, it could just be an array */
143static PageDesc *l1_map[L1_SIZE];
144PhysPageDesc **l1_phys_map;
145
146/* io memory support */
147CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
148CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
149void *io_mem_opaque[IO_MEM_NB_ENTRIES];
150static int io_mem_nb;
151
152#ifndef VBOX
153/* log support */
154char *logfilename = "/tmp/qemu.log";
155#endif /* !VBOX */
156FILE *logfile;
157int loglevel;
158
159/* statistics */
160static int tlb_flush_count;
161static int tb_flush_count;
162#ifndef VBOX
163static int tb_phys_invalidate_count;
164#endif /* !VBOX */
165
166static void page_init(void)
167{
168 /* NOTE: we can always suppose that qemu_host_page_size >=
169 TARGET_PAGE_SIZE */
170#ifdef VBOX
171 RTMemProtect(code_gen_buffer, sizeof(code_gen_buffer),
172 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
173 qemu_real_host_page_size = PAGE_SIZE;
174#else /* !VBOX */
175#ifdef _WIN32
176 {
177 SYSTEM_INFO system_info;
178 DWORD old_protect;
179
180 GetSystemInfo(&system_info);
181 qemu_real_host_page_size = system_info.dwPageSize;
182
183 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
184 PAGE_EXECUTE_READWRITE, &old_protect);
185 }
186#else
187 qemu_real_host_page_size = getpagesize();
188 {
189 unsigned long start, end;
190
191 start = (unsigned long)code_gen_buffer;
192 start &= ~(qemu_real_host_page_size - 1);
193
194 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
195 end += qemu_real_host_page_size - 1;
196 end &= ~(qemu_real_host_page_size - 1);
197
198 mprotect((void *)start, end - start,
199 PROT_READ | PROT_WRITE | PROT_EXEC);
200 }
201#endif
202#endif /* !VBOX */
203
204 if (qemu_host_page_size == 0)
205 qemu_host_page_size = qemu_real_host_page_size;
206 if (qemu_host_page_size < TARGET_PAGE_SIZE)
207 qemu_host_page_size = TARGET_PAGE_SIZE;
208 qemu_host_page_bits = 0;
209 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
210 qemu_host_page_bits++;
211 qemu_host_page_mask = ~(qemu_host_page_size - 1);
212 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
213 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
214}
215
216static inline PageDesc *page_find_alloc(unsigned int index)
217{
218 PageDesc **lp, *p;
219
220 lp = &l1_map[index >> L2_BITS];
221 p = *lp;
222 if (!p) {
223 /* allocate if not found */
224 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
225 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
226 *lp = p;
227 }
228 return p + (index & (L2_SIZE - 1));
229}
230
231static inline PageDesc *page_find(unsigned int index)
232{
233 PageDesc *p;
234
235 p = l1_map[index >> L2_BITS];
236 if (!p)
237 return 0;
238 return p + (index & (L2_SIZE - 1));
239}
240
241static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
242{
243 void **lp, **p;
244 PhysPageDesc *pd;
245
246 p = (void **)l1_phys_map;
247#if TARGET_PHYS_ADDR_SPACE_BITS > 32
248
249#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
250#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
251#endif
252 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
253 p = *lp;
254 if (!p) {
255 /* allocate if not found */
256 if (!alloc)
257 return NULL;
258 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
259 memset(p, 0, sizeof(void *) * L1_SIZE);
260 *lp = p;
261 }
262#endif
263 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
264 pd = *lp;
265 if (!pd) {
266 int i;
267 /* allocate if not found */
268 if (!alloc)
269 return NULL;
270 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
271 *lp = pd;
272 for (i = 0; i < L2_SIZE; i++)
273 pd[i].phys_offset = IO_MEM_UNASSIGNED;
274 }
275 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
276}
277
278static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
279{
280 return phys_page_find_alloc(index, 0);
281}
282
283#if !defined(CONFIG_USER_ONLY)
284static void tlb_protect_code(ram_addr_t ram_addr);
285static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
286 target_ulong vaddr);
287#endif
288
289void cpu_exec_init(CPUState *env)
290{
291 CPUState **penv;
292 int cpu_index;
293
294 if (!code_gen_ptr) {
295 code_gen_ptr = code_gen_buffer;
296 page_init();
297 io_mem_init();
298 }
299 env->next_cpu = NULL;
300 penv = &first_cpu;
301 cpu_index = 0;
302 while (*penv != NULL) {
303 penv = (CPUState **)&(*penv)->next_cpu;
304 cpu_index++;
305 }
306 env->cpu_index = cpu_index;
307 *penv = env;
308}
309
310static inline void invalidate_page_bitmap(PageDesc *p)
311{
312 if (p->code_bitmap) {
313 qemu_free(p->code_bitmap);
314 p->code_bitmap = NULL;
315 }
316 p->code_write_count = 0;
317}
318
319/* set to NULL all the 'first_tb' fields in all PageDescs */
320static void page_flush_tb(void)
321{
322 int i, j;
323 PageDesc *p;
324
325 for(i = 0; i < L1_SIZE; i++) {
326 p = l1_map[i];
327 if (p) {
328 for(j = 0; j < L2_SIZE; j++) {
329 p->first_tb = NULL;
330 invalidate_page_bitmap(p);
331 p++;
332 }
333 }
334 }
335}
336
337/* flush all the translation blocks */
338/* XXX: tb_flush is currently not thread safe */
339void tb_flush(CPUState *env1)
340{
341 CPUState *env;
342#if defined(DEBUG_FLUSH)
343 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
344 code_gen_ptr - code_gen_buffer,
345 nb_tbs,
346 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
347#endif
348 nb_tbs = 0;
349
350 for(env = first_cpu; env != NULL; env = env->next_cpu) {
351 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
352 }
353
354 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
355 page_flush_tb();
356
357 code_gen_ptr = code_gen_buffer;
358 /* XXX: flush processor icache at this point if cache flush is
359 expensive */
360 tb_flush_count++;
361}
362
363#ifdef DEBUG_TB_CHECK
364
365static void tb_invalidate_check(unsigned long address)
366{
367 TranslationBlock *tb;
368 int i;
369 address &= TARGET_PAGE_MASK;
370 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
371 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
372 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
373 address >= tb->pc + tb->size)) {
374 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
375 address, (long)tb->pc, tb->size);
376 }
377 }
378 }
379}
380
381/* verify that all the pages have correct rights for code */
382static void tb_page_check(void)
383{
384 TranslationBlock *tb;
385 int i, flags1, flags2;
386
387 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
388 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
389 flags1 = page_get_flags(tb->pc);
390 flags2 = page_get_flags(tb->pc + tb->size - 1);
391 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
392 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
393 (long)tb->pc, tb->size, flags1, flags2);
394 }
395 }
396 }
397}
398
399void tb_jmp_check(TranslationBlock *tb)
400{
401 TranslationBlock *tb1;
402 unsigned int n1;
403
404 /* suppress any remaining jumps to this TB */
405 tb1 = tb->jmp_first;
406 for(;;) {
407 n1 = (long)tb1 & 3;
408 tb1 = (TranslationBlock *)((long)tb1 & ~3);
409 if (n1 == 2)
410 break;
411 tb1 = tb1->jmp_next[n1];
412 }
413 /* check end of list */
414 if (tb1 != tb) {
415 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
416 }
417}
418
419#endif
420
421/* invalidate one TB */
422static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
423 int next_offset)
424{
425 TranslationBlock *tb1;
426 for(;;) {
427 tb1 = *ptb;
428 if (tb1 == tb) {
429 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
430 break;
431 }
432 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
433 }
434}
435
436static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
437{
438 TranslationBlock *tb1;
439 unsigned int n1;
440
441 for(;;) {
442 tb1 = *ptb;
443 n1 = (long)tb1 & 3;
444 tb1 = (TranslationBlock *)((long)tb1 & ~3);
445 if (tb1 == tb) {
446 *ptb = tb1->page_next[n1];
447 break;
448 }
449 ptb = &tb1->page_next[n1];
450 }
451}
452
453static inline void tb_jmp_remove(TranslationBlock *tb, int n)
454{
455 TranslationBlock *tb1, **ptb;
456 unsigned int n1;
457
458 ptb = &tb->jmp_next[n];
459 tb1 = *ptb;
460 if (tb1) {
461 /* find tb(n) in circular list */
462 for(;;) {
463 tb1 = *ptb;
464 n1 = (long)tb1 & 3;
465 tb1 = (TranslationBlock *)((long)tb1 & ~3);
466 if (n1 == n && tb1 == tb)
467 break;
468 if (n1 == 2) {
469 ptb = &tb1->jmp_first;
470 } else {
471 ptb = &tb1->jmp_next[n1];
472 }
473 }
474 /* now we can suppress tb(n) from the list */
475 *ptb = tb->jmp_next[n];
476
477 tb->jmp_next[n] = NULL;
478 }
479}
480
481/* reset the jump entry 'n' of a TB so that it is not chained to
482 another TB */
483static inline void tb_reset_jump(TranslationBlock *tb, int n)
484{
485 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
486}
487
488static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
489{
490 CPUState *env;
491 PageDesc *p;
492 unsigned int h, n1;
493 target_ulong phys_pc;
494 TranslationBlock *tb1, *tb2;
495
496 /* remove the TB from the hash list */
497 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
498 h = tb_phys_hash_func(phys_pc);
499 tb_remove(&tb_phys_hash[h], tb,
500 offsetof(TranslationBlock, phys_hash_next));
501
502 /* remove the TB from the page list */
503 if (tb->page_addr[0] != page_addr) {
504 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
505 tb_page_remove(&p->first_tb, tb);
506 invalidate_page_bitmap(p);
507 }
508 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
509 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
510 tb_page_remove(&p->first_tb, tb);
511 invalidate_page_bitmap(p);
512 }
513
514 tb_invalidated_flag = 1;
515
516 /* remove the TB from the hash list */
517 h = tb_jmp_cache_hash_func(tb->pc);
518 for(env = first_cpu; env != NULL; env = env->next_cpu) {
519 if (env->tb_jmp_cache[h] == tb)
520 env->tb_jmp_cache[h] = NULL;
521 }
522
523 /* suppress this TB from the two jump lists */
524 tb_jmp_remove(tb, 0);
525 tb_jmp_remove(tb, 1);
526
527 /* suppress any remaining jumps to this TB */
528 tb1 = tb->jmp_first;
529 for(;;) {
530 n1 = (long)tb1 & 3;
531 if (n1 == 2)
532 break;
533 tb1 = (TranslationBlock *)((long)tb1 & ~3);
534 tb2 = tb1->jmp_next[n1];
535 tb_reset_jump(tb1, n1);
536 tb1->jmp_next[n1] = NULL;
537 tb1 = tb2;
538 }
539 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
540
541#ifndef VBOX
542 tb_phys_invalidate_count++;
543#endif /* !VBOX */
544}
545
546#ifdef VBOX
547void tb_invalidate_virt(CPUState *env, uint32_t eip)
548{
549# if 1
550 tb_flush(env);
551# else
552 uint8_t *cs_base, *pc;
553 unsigned int flags, h, phys_pc;
554 TranslationBlock *tb, **ptb;
555
556 flags = env->hflags;
557 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
558 cs_base = env->segs[R_CS].base;
559 pc = cs_base + eip;
560
561 tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
562 flags);
563
564 if(tb)
565 {
566# ifdef DEBUG
567 printf("invalidating TB (%08X) at %08X\n", tb, eip);
568# endif
569 tb_invalidate(tb);
570 //Note: this will leak TBs, but the whole cache will be flushed
571 // when it happens too often
572 tb->pc = 0;
573 tb->cs_base = 0;
574 tb->flags = 0;
575 }
576# endif
577}
578
579# ifdef VBOX_STRICT
580/**
581 * Gets the page offset.
582 */
583unsigned long get_phys_page_offset(target_ulong addr)
584{
585 PhysPageDesc *p = phys_page_find(addr >> TARGET_PAGE_BITS);
586 return p ? p->phys_offset : 0;
587}
588# endif /* VBOX_STRICT */
589#endif /* VBOX */
590
591static inline void set_bits(uint8_t *tab, int start, int len)
592{
593 int end, mask, end1;
594
595 end = start + len;
596 tab += start >> 3;
597 mask = 0xff << (start & 7);
598 if ((start & ~7) == (end & ~7)) {
599 if (start < end) {
600 mask &= ~(0xff << (end & 7));
601 *tab |= mask;
602 }
603 } else {
604 *tab++ |= mask;
605 start = (start + 8) & ~7;
606 end1 = end & ~7;
607 while (start < end1) {
608 *tab++ = 0xff;
609 start += 8;
610 }
611 if (start < end) {
612 mask = ~(0xff << (end & 7));
613 *tab |= mask;
614 }
615 }
616}
617
618static void build_page_bitmap(PageDesc *p)
619{
620 int n, tb_start, tb_end;
621 TranslationBlock *tb;
622
623 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
624 if (!p->code_bitmap)
625 return;
626 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
627
628 tb = p->first_tb;
629 while (tb != NULL) {
630 n = (long)tb & 3;
631 tb = (TranslationBlock *)((long)tb & ~3);
632 /* NOTE: this is subtle as a TB may span two physical pages */
633 if (n == 0) {
634 /* NOTE: tb_end may be after the end of the page, but
635 it is not a problem */
636 tb_start = tb->pc & ~TARGET_PAGE_MASK;
637 tb_end = tb_start + tb->size;
638 if (tb_end > TARGET_PAGE_SIZE)
639 tb_end = TARGET_PAGE_SIZE;
640 } else {
641 tb_start = 0;
642 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
643 }
644 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
645 tb = tb->page_next[n];
646 }
647}
648
649#ifdef TARGET_HAS_PRECISE_SMC
650
651static void tb_gen_code(CPUState *env,
652 target_ulong pc, target_ulong cs_base, int flags,
653 int cflags)
654{
655 TranslationBlock *tb;
656 uint8_t *tc_ptr;
657 target_ulong phys_pc, phys_page2, virt_page2;
658 int code_gen_size;
659
660 phys_pc = get_phys_addr_code(env, pc);
661 tb = tb_alloc(pc);
662 if (!tb) {
663 /* flush must be done */
664 tb_flush(env);
665 /* cannot fail at this point */
666 tb = tb_alloc(pc);
667 }
668 tc_ptr = code_gen_ptr;
669 tb->tc_ptr = tc_ptr;
670 tb->cs_base = cs_base;
671 tb->flags = flags;
672 tb->cflags = cflags;
673 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
674 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
675
676 /* check next page if needed */
677 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
678 phys_page2 = -1;
679 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
680 phys_page2 = get_phys_addr_code(env, virt_page2);
681 }
682 tb_link_phys(tb, phys_pc, phys_page2);
683}
684#endif
685
686/* invalidate all TBs which intersect with the target physical page
687 starting in range [start;end[. NOTE: start and end must refer to
688 the same physical page. 'is_cpu_write_access' should be true if called
689 from a real cpu write access: the virtual CPU will exit the current
690 TB if code is modified inside this TB. */
691void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
692 int is_cpu_write_access)
693{
694 int n, current_tb_modified, current_tb_not_found, current_flags;
695 CPUState *env = cpu_single_env;
696 PageDesc *p;
697 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
698 target_ulong tb_start, tb_end;
699 target_ulong current_pc, current_cs_base;
700
701 p = page_find(start >> TARGET_PAGE_BITS);
702 if (!p)
703 return;
704 if (!p->code_bitmap &&
705 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
706 is_cpu_write_access) {
707 /* build code bitmap */
708 build_page_bitmap(p);
709 }
710
711 /* we remove all the TBs in the range [start, end[ */
712 /* XXX: see if in some cases it could be faster to invalidate all the code */
713 current_tb_not_found = is_cpu_write_access;
714 current_tb_modified = 0;
715 current_tb = NULL; /* avoid warning */
716 current_pc = 0; /* avoid warning */
717 current_cs_base = 0; /* avoid warning */
718 current_flags = 0; /* avoid warning */
719 tb = p->first_tb;
720 while (tb != NULL) {
721 n = (long)tb & 3;
722 tb = (TranslationBlock *)((long)tb & ~3);
723 tb_next = tb->page_next[n];
724 /* NOTE: this is subtle as a TB may span two physical pages */
725 if (n == 0) {
726 /* NOTE: tb_end may be after the end of the page, but
727 it is not a problem */
728 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
729 tb_end = tb_start + tb->size;
730 } else {
731 tb_start = tb->page_addr[1];
732 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
733 }
734 if (!(tb_end <= start || tb_start >= end)) {
735#ifdef TARGET_HAS_PRECISE_SMC
736 if (current_tb_not_found) {
737 current_tb_not_found = 0;
738 current_tb = NULL;
739 if (env->mem_write_pc) {
740 /* now we have a real cpu fault */
741 current_tb = tb_find_pc(env->mem_write_pc);
742 }
743 }
744 if (current_tb == tb &&
745 !(current_tb->cflags & CF_SINGLE_INSN)) {
746 /* If we are modifying the current TB, we must stop
747 its execution. We could be more precise by checking
748 that the modification is after the current PC, but it
749 would require a specialized function to partially
750 restore the CPU state */
751
752 current_tb_modified = 1;
753 cpu_restore_state(current_tb, env,
754 env->mem_write_pc, NULL);
755#if defined(TARGET_I386)
756 current_flags = env->hflags;
757 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
758 current_cs_base = (target_ulong)env->segs[R_CS].base;
759 current_pc = current_cs_base + env->eip;
760#else
761#error unsupported CPU
762#endif
763 }
764#endif /* TARGET_HAS_PRECISE_SMC */
765 /* we need to do that to handle the case where a signal
766 occurs while doing tb_phys_invalidate() */
767 saved_tb = NULL;
768 if (env) {
769 saved_tb = env->current_tb;
770 env->current_tb = NULL;
771 }
772 tb_phys_invalidate(tb, -1);
773 if (env) {
774 env->current_tb = saved_tb;
775 if (env->interrupt_request && env->current_tb)
776 cpu_interrupt(env, env->interrupt_request);
777 }
778 }
779 tb = tb_next;
780 }
781#if !defined(CONFIG_USER_ONLY)
782 /* if no code remaining, no need to continue to use slow writes */
783 if (!p->first_tb) {
784 invalidate_page_bitmap(p);
785 if (is_cpu_write_access) {
786 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
787 }
788 }
789#endif
790#ifdef TARGET_HAS_PRECISE_SMC
791 if (current_tb_modified) {
792 /* we generate a block containing just the instruction
793 modifying the memory. It will ensure that it cannot modify
794 itself */
795 env->current_tb = NULL;
796 tb_gen_code(env, current_pc, current_cs_base, current_flags,
797 CF_SINGLE_INSN);
798 cpu_resume_from_signal(env, NULL);
799 }
800#endif
801}
802
803/* len must be <= 8 and start must be a multiple of len */
804static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
805{
806 PageDesc *p;
807 int offset, b;
808#if 0
809 if (1) {
810 if (loglevel) {
811 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
812 cpu_single_env->mem_write_vaddr, len,
813 cpu_single_env->eip,
814 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
815 }
816 }
817#endif
818 p = page_find(start >> TARGET_PAGE_BITS);
819 if (!p)
820 return;
821 if (p->code_bitmap) {
822 offset = start & ~TARGET_PAGE_MASK;
823 b = p->code_bitmap[offset >> 3] >> (offset & 7);
824 if (b & ((1 << len) - 1))
825 goto do_invalidate;
826 } else {
827 do_invalidate:
828 tb_invalidate_phys_page_range(start, start + len, 1);
829 }
830}
831
832#if !defined(CONFIG_SOFTMMU)
833static void tb_invalidate_phys_page(target_ulong addr,
834 unsigned long pc, void *puc)
835{
836 int n, current_flags, current_tb_modified;
837 target_ulong current_pc, current_cs_base;
838 PageDesc *p;
839 TranslationBlock *tb, *current_tb;
840#ifdef TARGET_HAS_PRECISE_SMC
841 CPUState *env = cpu_single_env;
842#endif
843
844 addr &= TARGET_PAGE_MASK;
845 p = page_find(addr >> TARGET_PAGE_BITS);
846 if (!p)
847 return;
848 tb = p->first_tb;
849 current_tb_modified = 0;
850 current_tb = NULL;
851 current_pc = 0; /* avoid warning */
852 current_cs_base = 0; /* avoid warning */
853 current_flags = 0; /* avoid warning */
854#ifdef TARGET_HAS_PRECISE_SMC
855 if (tb && pc != 0) {
856 current_tb = tb_find_pc(pc);
857 }
858#endif
859 while (tb != NULL) {
860 n = (long)tb & 3;
861 tb = (TranslationBlock *)((long)tb & ~3);
862#ifdef TARGET_HAS_PRECISE_SMC
863 if (current_tb == tb &&
864 !(current_tb->cflags & CF_SINGLE_INSN)) {
865 /* If we are modifying the current TB, we must stop
866 its execution. We could be more precise by checking
867 that the modification is after the current PC, but it
868 would require a specialized function to partially
869 restore the CPU state */
870
871 current_tb_modified = 1;
872 cpu_restore_state(current_tb, env, pc, puc);
873#if defined(TARGET_I386)
874 current_flags = env->hflags;
875 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
876 current_cs_base = (target_ulong)env->segs[R_CS].base;
877 current_pc = current_cs_base + env->eip;
878#else
879#error unsupported CPU
880#endif
881 }
882#endif /* TARGET_HAS_PRECISE_SMC */
883 tb_phys_invalidate(tb, addr);
884 tb = tb->page_next[n];
885 }
886 p->first_tb = NULL;
887#ifdef TARGET_HAS_PRECISE_SMC
888 if (current_tb_modified) {
889 /* we generate a block containing just the instruction
890 modifying the memory. It will ensure that it cannot modify
891 itself */
892 env->current_tb = NULL;
893 tb_gen_code(env, current_pc, current_cs_base, current_flags,
894 CF_SINGLE_INSN);
895 cpu_resume_from_signal(env, puc);
896 }
897#endif
898}
899#endif
900
901/* add the tb in the target page and protect it if necessary */
902static inline void tb_alloc_page(TranslationBlock *tb,
903 unsigned int n, target_ulong page_addr)
904{
905 PageDesc *p;
906 TranslationBlock *last_first_tb;
907
908 tb->page_addr[n] = page_addr;
909 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
910 tb->page_next[n] = p->first_tb;
911 last_first_tb = p->first_tb;
912 p->first_tb = (TranslationBlock *)((long)tb | n);
913 invalidate_page_bitmap(p);
914
915#if defined(TARGET_HAS_SMC) || 1
916
917#if defined(CONFIG_USER_ONLY)
918 if (p->flags & PAGE_WRITE) {
919 target_ulong addr;
920 PageDesc *p2;
921 int prot;
922
923 /* force the host page as non writable (writes will have a
924 page fault + mprotect overhead) */
925 page_addr &= qemu_host_page_mask;
926 prot = 0;
927 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
928 addr += TARGET_PAGE_SIZE) {
929
930 p2 = page_find (addr >> TARGET_PAGE_BITS);
931 if (!p2)
932 continue;
933 prot |= p2->flags;
934 p2->flags &= ~PAGE_WRITE;
935 page_get_flags(addr);
936 }
937 mprotect(g2h(page_addr), qemu_host_page_size,
938 (prot & PAGE_BITS) & ~PAGE_WRITE);
939#ifdef DEBUG_TB_INVALIDATE
940 printf("protecting code page: 0x%08lx\n",
941 page_addr);
942#endif
943 }
944#else
945 /* if some code is already present, then the pages are already
946 protected. So we handle the case where only the first TB is
947 allocated in a physical page */
948 if (!last_first_tb) {
949 tlb_protect_code(page_addr);
950 }
951#endif
952
953#endif /* TARGET_HAS_SMC */
954}
955
956/* Allocate a new translation block. Flush the translation buffer if
957 too many translation blocks or too much generated code. */
958TranslationBlock *tb_alloc(target_ulong pc)
959{
960 TranslationBlock *tb;
961
962 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
963 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
964 return NULL;
965 tb = &tbs[nb_tbs++];
966 tb->pc = pc;
967 tb->cflags = 0;
968 return tb;
969}
970
971/* add a new TB and link it to the physical page tables. phys_page2 is
972 (-1) to indicate that only one page contains the TB. */
973void tb_link_phys(TranslationBlock *tb,
974 target_ulong phys_pc, target_ulong phys_page2)
975{
976 unsigned int h;
977 TranslationBlock **ptb;
978
979 /* add in the physical hash table */
980 h = tb_phys_hash_func(phys_pc);
981 ptb = &tb_phys_hash[h];
982 tb->phys_hash_next = *ptb;
983 *ptb = tb;
984
985 /* add in the page list */
986 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
987 if (phys_page2 != -1)
988 tb_alloc_page(tb, 1, phys_page2);
989 else
990 tb->page_addr[1] = -1;
991
992 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
993 tb->jmp_next[0] = NULL;
994 tb->jmp_next[1] = NULL;
995#ifdef USE_CODE_COPY
996 tb->cflags &= ~CF_FP_USED;
997 if (tb->cflags & CF_TB_FP_USED)
998 tb->cflags |= CF_FP_USED;
999#endif
1000
1001 /* init original jump addresses */
1002 if (tb->tb_next_offset[0] != 0xffff)
1003 tb_reset_jump(tb, 0);
1004 if (tb->tb_next_offset[1] != 0xffff)
1005 tb_reset_jump(tb, 1);
1006
1007#ifdef DEBUG_TB_CHECK
1008 tb_page_check();
1009#endif
1010}
1011
1012/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1013 tb[1].tc_ptr. Return NULL if not found */
1014TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1015{
1016 int m_min, m_max, m;
1017 unsigned long v;
1018 TranslationBlock *tb;
1019
1020 if (nb_tbs <= 0)
1021 return NULL;
1022 if (tc_ptr < (unsigned long)code_gen_buffer ||
1023 tc_ptr >= (unsigned long)code_gen_ptr)
1024 return NULL;
1025 /* binary search (cf Knuth) */
1026 m_min = 0;
1027 m_max = nb_tbs - 1;
1028 while (m_min <= m_max) {
1029 m = (m_min + m_max) >> 1;
1030 tb = &tbs[m];
1031 v = (unsigned long)tb->tc_ptr;
1032 if (v == tc_ptr)
1033 return tb;
1034 else if (tc_ptr < v) {
1035 m_max = m - 1;
1036 } else {
1037 m_min = m + 1;
1038 }
1039 }
1040 return &tbs[m_max];
1041}
1042
1043static void tb_reset_jump_recursive(TranslationBlock *tb);
1044
1045static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1046{
1047 TranslationBlock *tb1, *tb_next, **ptb;
1048 unsigned int n1;
1049
1050 tb1 = tb->jmp_next[n];
1051 if (tb1 != NULL) {
1052 /* find head of list */
1053 for(;;) {
1054 n1 = (long)tb1 & 3;
1055 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1056 if (n1 == 2)
1057 break;
1058 tb1 = tb1->jmp_next[n1];
1059 }
1060 /* we are now sure now that tb jumps to tb1 */
1061 tb_next = tb1;
1062
1063 /* remove tb from the jmp_first list */
1064 ptb = &tb_next->jmp_first;
1065 for(;;) {
1066 tb1 = *ptb;
1067 n1 = (long)tb1 & 3;
1068 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1069 if (n1 == n && tb1 == tb)
1070 break;
1071 ptb = &tb1->jmp_next[n1];
1072 }
1073 *ptb = tb->jmp_next[n];
1074 tb->jmp_next[n] = NULL;
1075
1076 /* suppress the jump to next tb in generated code */
1077 tb_reset_jump(tb, n);
1078
1079 /* suppress jumps in the tb on which we could have jumped */
1080 tb_reset_jump_recursive(tb_next);
1081 }
1082}
1083
1084static void tb_reset_jump_recursive(TranslationBlock *tb)
1085{
1086 tb_reset_jump_recursive2(tb, 0);
1087 tb_reset_jump_recursive2(tb, 1);
1088}
1089
1090#if defined(TARGET_HAS_ICE)
1091static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1092{
1093 target_ulong addr, pd;
1094 ram_addr_t ram_addr;
1095 PhysPageDesc *p;
1096
1097 addr = cpu_get_phys_page_debug(env, pc);
1098 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1099 if (!p) {
1100 pd = IO_MEM_UNASSIGNED;
1101 } else {
1102 pd = p->phys_offset;
1103 }
1104 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1105 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1106}
1107#endif
1108
1109/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1110 breakpoint is reached */
1111int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1112{
1113#if defined(TARGET_HAS_ICE)
1114 int i;
1115
1116 for(i = 0; i < env->nb_breakpoints; i++) {
1117 if (env->breakpoints[i] == pc)
1118 return 0;
1119 }
1120
1121 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1122 return -1;
1123 env->breakpoints[env->nb_breakpoints++] = pc;
1124
1125 breakpoint_invalidate(env, pc);
1126 return 0;
1127#else
1128 return -1;
1129#endif
1130}
1131
1132/* remove a breakpoint */
1133int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1134{
1135#if defined(TARGET_HAS_ICE)
1136 int i;
1137 for(i = 0; i < env->nb_breakpoints; i++) {
1138 if (env->breakpoints[i] == pc)
1139 goto found;
1140 }
1141 return -1;
1142 found:
1143 env->nb_breakpoints--;
1144 if (i < env->nb_breakpoints)
1145 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1146
1147 breakpoint_invalidate(env, pc);
1148 return 0;
1149#else
1150 return -1;
1151#endif
1152}
1153
1154/* enable or disable single step mode. EXCP_DEBUG is returned by the
1155 CPU loop after each instruction */
1156void cpu_single_step(CPUState *env, int enabled)
1157{
1158#if defined(TARGET_HAS_ICE)
1159 if (env->singlestep_enabled != enabled) {
1160 env->singlestep_enabled = enabled;
1161 /* must flush all the translated code to avoid inconsistancies */
1162 /* XXX: only flush what is necessary */
1163 tb_flush(env);
1164 }
1165#endif
1166}
1167
1168#ifndef VBOX
1169/* enable or disable low levels log */
1170void cpu_set_log(int log_flags)
1171{
1172 loglevel = log_flags;
1173 if (loglevel && !logfile) {
1174 logfile = fopen(logfilename, "w");
1175 if (!logfile) {
1176 perror(logfilename);
1177 _exit(1);
1178 }
1179#if !defined(CONFIG_SOFTMMU)
1180 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1181 {
1182 static uint8_t logfile_buf[4096];
1183 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1184 }
1185#else
1186 setvbuf(logfile, NULL, _IOLBF, 0);
1187#endif
1188 }
1189}
1190
1191void cpu_set_log_filename(const char *filename)
1192{
1193 logfilename = strdup(filename);
1194}
1195#endif /* !VBOX */
1196
1197/* mask must never be zero, except for A20 change call */
1198void cpu_interrupt(CPUState *env, int mask)
1199{
1200 TranslationBlock *tb;
1201 static int interrupt_lock;
1202
1203#ifdef VBOX
1204 VM_ASSERT_EMT(env->pVM);
1205 ASMAtomicOrS32(&env->interrupt_request, mask);
1206#else /* !VBOX */
1207 env->interrupt_request |= mask;
1208#endif /* !VBOX */
1209 /* if the cpu is currently executing code, we must unlink it and
1210 all the potentially executing TB */
1211 tb = env->current_tb;
1212 if (tb && !testandset(&interrupt_lock)) {
1213 env->current_tb = NULL;
1214 tb_reset_jump_recursive(tb);
1215 interrupt_lock = 0;
1216 }
1217}
1218
1219void cpu_reset_interrupt(CPUState *env, int mask)
1220{
1221#ifdef VBOX
1222 /*
1223 * Note: the current implementation can be executed by another thread without problems; make sure this remains true
1224 * for future changes!
1225 */
1226 ASMAtomicAndS32(&env->interrupt_request, ~mask);
1227#else /* !VBOX */
1228 env->interrupt_request &= ~mask;
1229#endif /* !VBOX */
1230}
1231
1232#ifndef VBOX
1233CPULogItem cpu_log_items[] = {
1234 { CPU_LOG_TB_OUT_ASM, "out_asm",
1235 "show generated host assembly code for each compiled TB" },
1236 { CPU_LOG_TB_IN_ASM, "in_asm",
1237 "show target assembly code for each compiled TB" },
1238 { CPU_LOG_TB_OP, "op",
1239 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1240#ifdef TARGET_I386
1241 { CPU_LOG_TB_OP_OPT, "op_opt",
1242 "show micro ops after optimization for each compiled TB" },
1243#endif
1244 { CPU_LOG_INT, "int",
1245 "show interrupts/exceptions in short format" },
1246 { CPU_LOG_EXEC, "exec",
1247 "show trace before each executed TB (lots of logs)" },
1248 { CPU_LOG_TB_CPU, "cpu",
1249 "show CPU state before bloc translation" },
1250#ifdef TARGET_I386
1251 { CPU_LOG_PCALL, "pcall",
1252 "show protected mode far calls/returns/exceptions" },
1253#endif
1254#ifdef DEBUG_IOPORT
1255 { CPU_LOG_IOPORT, "ioport",
1256 "show all i/o ports accesses" },
1257#endif
1258 { 0, NULL, NULL },
1259};
1260
1261static int cmp1(const char *s1, int n, const char *s2)
1262{
1263 if (strlen(s2) != n)
1264 return 0;
1265 return memcmp(s1, s2, n) == 0;
1266}
1267
1268/* takes a comma separated list of log masks. Return 0 if error. */
1269int cpu_str_to_log_mask(const char *str)
1270{
1271 CPULogItem *item;
1272 int mask;
1273 const char *p, *p1;
1274
1275 p = str;
1276 mask = 0;
1277 for(;;) {
1278 p1 = strchr(p, ',');
1279 if (!p1)
1280 p1 = p + strlen(p);
1281 if(cmp1(p,p1-p,"all")) {
1282 for(item = cpu_log_items; item->mask != 0; item++) {
1283 mask |= item->mask;
1284 }
1285 } else {
1286 for(item = cpu_log_items; item->mask != 0; item++) {
1287 if (cmp1(p, p1 - p, item->name))
1288 goto found;
1289 }
1290 return 0;
1291 }
1292 found:
1293 mask |= item->mask;
1294 if (*p1 != ',')
1295 break;
1296 p = p1 + 1;
1297 }
1298 return mask;
1299}
1300#endif /* !VBOX */
1301
1302#if !defined(VBOX) /* VBOX: we have our own routine. */
1303void cpu_abort(CPUState *env, const char *fmt, ...)
1304{
1305 va_list ap;
1306
1307 va_start(ap, fmt);
1308 fprintf(stderr, "qemu: fatal: ");
1309 vfprintf(stderr, fmt, ap);
1310 fprintf(stderr, "\n");
1311#ifdef TARGET_I386
1312 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1313#else
1314 cpu_dump_state(env, stderr, fprintf, 0);
1315#endif
1316 va_end(ap);
1317 abort();
1318}
1319#endif /* !VBOX */
1320
1321#if !defined(CONFIG_USER_ONLY)
1322
1323/* NOTE: if flush_global is true, also flush global entries (not
1324 implemented yet) */
1325void tlb_flush(CPUState *env, int flush_global)
1326{
1327 int i;
1328
1329#if defined(DEBUG_TLB)
1330 printf("tlb_flush:\n");
1331#endif
1332 /* must reset current TB so that interrupts cannot modify the
1333 links while we are modifying them */
1334 env->current_tb = NULL;
1335
1336 for(i = 0; i < CPU_TLB_SIZE; i++) {
1337 env->tlb_table[0][i].addr_read = -1;
1338 env->tlb_table[0][i].addr_write = -1;
1339 env->tlb_table[0][i].addr_code = -1;
1340 env->tlb_table[1][i].addr_read = -1;
1341 env->tlb_table[1][i].addr_write = -1;
1342 env->tlb_table[1][i].addr_code = -1;
1343 }
1344
1345 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1346
1347#if !defined(CONFIG_SOFTMMU)
1348 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1349#endif
1350#ifdef VBOX
1351 /* inform raw mode about TLB flush */
1352 remR3FlushTLB(env, flush_global);
1353#endif
1354#ifdef USE_KQEMU
1355 if (env->kqemu_enabled) {
1356 kqemu_flush(env, flush_global);
1357 }
1358#endif
1359 tlb_flush_count++;
1360}
1361
1362static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1363{
1364 if (addr == (tlb_entry->addr_read &
1365 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1366 addr == (tlb_entry->addr_write &
1367 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1368 addr == (tlb_entry->addr_code &
1369 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1370 tlb_entry->addr_read = -1;
1371 tlb_entry->addr_write = -1;
1372 tlb_entry->addr_code = -1;
1373 }
1374}
1375
1376void tlb_flush_page(CPUState *env, target_ulong addr)
1377{
1378 int i;
1379 TranslationBlock *tb;
1380
1381#if defined(DEBUG_TLB)
1382 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1383#endif
1384 /* must reset current TB so that interrupts cannot modify the
1385 links while we are modifying them */
1386 env->current_tb = NULL;
1387
1388 addr &= TARGET_PAGE_MASK;
1389 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1390 tlb_flush_entry(&env->tlb_table[0][i], addr);
1391 tlb_flush_entry(&env->tlb_table[1][i], addr);
1392
1393 /* Discard jump cache entries for any tb which might potentially
1394 overlap the flushed page. */
1395 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1396 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1397
1398 i = tb_jmp_cache_hash_page(addr);
1399 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1400
1401#if !defined(CONFIG_SOFTMMU)
1402 if (addr < MMAP_AREA_END)
1403 munmap((void *)addr, TARGET_PAGE_SIZE);
1404#endif
1405#ifdef VBOX
1406 /* inform raw mode about TLB page flush */
1407 remR3FlushPage(env, addr);
1408#endif /* VBOX */
1409#ifdef USE_KQEMU
1410 if (env->kqemu_enabled) {
1411 kqemu_flush_page(env, addr);
1412 }
1413#endif
1414}
1415
1416/* update the TLBs so that writes to code in the virtual page 'addr'
1417 can be detected */
1418static void tlb_protect_code(ram_addr_t ram_addr)
1419{
1420 cpu_physical_memory_reset_dirty(ram_addr,
1421 ram_addr + TARGET_PAGE_SIZE,
1422 CODE_DIRTY_FLAG);
1423#if defined(VBOX) && defined(REM_MONITOR_CODE_PAGES)
1424 /** @todo Retest this? This function has changed... */
1425 remR3ProtectCode(cpu_single_env, ram_addr);
1426#endif
1427}
1428
1429/* update the TLB so that writes in physical page 'phys_addr' are no longer
1430 tested for self modifying code */
1431static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1432 target_ulong vaddr)
1433{
1434#ifdef VBOX
1435 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
1436#endif
1437 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1438}
1439
1440static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1441 unsigned long start, unsigned long length)
1442{
1443 unsigned long addr;
1444 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1445 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1446 if ((addr - start) < length) {
1447 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1448 }
1449 }
1450}
1451
1452void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1453 int dirty_flags)
1454{
1455 CPUState *env;
1456 unsigned long length, start1;
1457 int i, mask, len;
1458 uint8_t *p;
1459
1460 start &= TARGET_PAGE_MASK;
1461 end = TARGET_PAGE_ALIGN(end);
1462
1463 length = end - start;
1464 if (length == 0)
1465 return;
1466 len = length >> TARGET_PAGE_BITS;
1467#ifdef USE_KQEMU
1468 /* XXX: should not depend on cpu context */
1469 env = first_cpu;
1470 if (env->kqemu_enabled) {
1471 ram_addr_t addr;
1472 addr = start;
1473 for(i = 0; i < len; i++) {
1474 kqemu_set_notdirty(env, addr);
1475 addr += TARGET_PAGE_SIZE;
1476 }
1477 }
1478#endif
1479 mask = ~dirty_flags;
1480 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1481#ifdef VBOX
1482 if (RT_LIKELY((start >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
1483#endif
1484 for(i = 0; i < len; i++)
1485 p[i] &= mask;
1486
1487 /* we modify the TLB cache so that the dirty bit will be set again
1488 when accessing the range */
1489 start1 = start + (unsigned long)phys_ram_base;
1490 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1491 for(i = 0; i < CPU_TLB_SIZE; i++)
1492 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1493 for(i = 0; i < CPU_TLB_SIZE; i++)
1494 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1495 }
1496
1497#if !defined(CONFIG_SOFTMMU)
1498#ifdef VBOX /**@todo remove this check */
1499# error "We shouldn't get here..."
1500#endif
1501 /* XXX: this is expensive */
1502 {
1503 VirtPageDesc *p;
1504 int j;
1505 target_ulong addr;
1506
1507 for(i = 0; i < L1_SIZE; i++) {
1508 p = l1_virt_map[i];
1509 if (p) {
1510 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1511 for(j = 0; j < L2_SIZE; j++) {
1512 if (p->valid_tag == virt_valid_tag &&
1513 p->phys_addr >= start && p->phys_addr < end &&
1514 (p->prot & PROT_WRITE)) {
1515 if (addr < MMAP_AREA_END) {
1516 mprotect((void *)addr, TARGET_PAGE_SIZE,
1517 p->prot & ~PROT_WRITE);
1518 }
1519 }
1520 addr += TARGET_PAGE_SIZE;
1521 p++;
1522 }
1523 }
1524 }
1525 }
1526#endif
1527}
1528
1529static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1530{
1531 ram_addr_t ram_addr;
1532
1533 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1534 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1535 tlb_entry->addend - (unsigned long)phys_ram_base;
1536 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1537 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1538 }
1539 }
1540}
1541
1542/* update the TLB according to the current state of the dirty bits */
1543void cpu_tlb_update_dirty(CPUState *env)
1544{
1545 int i;
1546 for(i = 0; i < CPU_TLB_SIZE; i++)
1547 tlb_update_dirty(&env->tlb_table[0][i]);
1548 for(i = 0; i < CPU_TLB_SIZE; i++)
1549 tlb_update_dirty(&env->tlb_table[1][i]);
1550}
1551
1552static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1553 unsigned long start)
1554{
1555 unsigned long addr;
1556 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1557 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1558 if (addr == start) {
1559 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1560 }
1561 }
1562}
1563
1564/* update the TLB corresponding to virtual page vaddr and phys addr
1565 addr so that it is no longer dirty */
1566static inline void tlb_set_dirty(CPUState *env,
1567 unsigned long addr, target_ulong vaddr)
1568{
1569 int i;
1570
1571 addr &= TARGET_PAGE_MASK;
1572 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1573 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1574 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1575}
1576
1577/* add a new TLB entry. At most one entry for a given virtual address
1578 is permitted. Return 0 if OK or 2 if the page could not be mapped
1579 (can only happen in non SOFTMMU mode for I/O pages or pages
1580 conflicting with the host address space). */
1581int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1582 target_phys_addr_t paddr, int prot,
1583 int is_user, int is_softmmu)
1584{
1585 PhysPageDesc *p;
1586 unsigned long pd;
1587 unsigned int index;
1588 target_ulong address;
1589 target_phys_addr_t addend;
1590 int ret;
1591 CPUTLBEntry *te;
1592
1593 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1594 if (!p) {
1595 pd = IO_MEM_UNASSIGNED;
1596 } else {
1597 pd = p->phys_offset;
1598 }
1599#if defined(DEBUG_TLB)
1600 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1601 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1602#endif
1603
1604 ret = 0;
1605#if !defined(CONFIG_SOFTMMU)
1606 if (is_softmmu)
1607#endif
1608 {
1609 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1610 /* IO memory case */
1611 address = vaddr | pd;
1612 addend = paddr;
1613 } else {
1614 /* standard memory */
1615 address = vaddr;
1616 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1617 }
1618
1619 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1620 addend -= vaddr;
1621 te = &env->tlb_table[is_user][index];
1622 te->addend = addend;
1623 if (prot & PAGE_READ) {
1624 te->addr_read = address;
1625 } else {
1626 te->addr_read = -1;
1627 }
1628 if (prot & PAGE_EXEC) {
1629 te->addr_code = address;
1630 } else {
1631 te->addr_code = -1;
1632 }
1633 if (prot & PAGE_WRITE) {
1634 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1635 (pd & IO_MEM_ROMD)) {
1636 /* write access calls the I/O callback */
1637 te->addr_write = vaddr |
1638 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1639 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1640 !cpu_physical_memory_is_dirty(pd)) {
1641 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1642 } else {
1643 te->addr_write = address;
1644 }
1645 } else {
1646 te->addr_write = -1;
1647 }
1648#ifdef VBOX
1649 /* inform raw mode about TLB page change */
1650 /** @todo double check and fix this interface. OLD: remR3SetPage(env, &env->tlb_read[is_user][index], &env->tlb_write[is_user][index], prot, is_user); */
1651 remR3SetPage(env, te, te, prot, is_user);
1652#endif
1653 }
1654#if !defined(CONFIG_SOFTMMU)
1655 else {
1656 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1657 /* IO access: no mapping is done as it will be handled by the
1658 soft MMU */
1659 if (!(env->hflags & HF_SOFTMMU_MASK))
1660 ret = 2;
1661 } else {
1662 void *map_addr;
1663
1664 if (vaddr >= MMAP_AREA_END) {
1665 ret = 2;
1666 } else {
1667 if (prot & PROT_WRITE) {
1668 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1669#if defined(TARGET_HAS_SMC) || 1
1670 first_tb ||
1671#endif
1672 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1673 !cpu_physical_memory_is_dirty(pd))) {
1674 /* ROM: we do as if code was inside */
1675 /* if code is present, we only map as read only and save the
1676 original mapping */
1677 VirtPageDesc *vp;
1678
1679 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1680 vp->phys_addr = pd;
1681 vp->prot = prot;
1682 vp->valid_tag = virt_valid_tag;
1683 prot &= ~PAGE_WRITE;
1684 }
1685 }
1686 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1687 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1688 if (map_addr == MAP_FAILED) {
1689 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1690 paddr, vaddr);
1691 }
1692 }
1693 }
1694 }
1695#endif
1696 return ret;
1697}
1698
1699/* called from signal handler: invalidate the code and unprotect the
1700 page. Return TRUE if the fault was succesfully handled. */
1701int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1702{
1703#if !defined(CONFIG_SOFTMMU)
1704 VirtPageDesc *vp;
1705
1706#if defined(DEBUG_TLB)
1707 printf("page_unprotect: addr=0x%08x\n", addr);
1708#endif
1709 addr &= TARGET_PAGE_MASK;
1710
1711 /* if it is not mapped, no need to worry here */
1712 if (addr >= MMAP_AREA_END)
1713 return 0;
1714 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1715 if (!vp)
1716 return 0;
1717 /* NOTE: in this case, validate_tag is _not_ tested as it
1718 validates only the code TLB */
1719 if (vp->valid_tag != virt_valid_tag)
1720 return 0;
1721 if (!(vp->prot & PAGE_WRITE))
1722 return 0;
1723#if defined(DEBUG_TLB)
1724 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1725 addr, vp->phys_addr, vp->prot);
1726#endif
1727 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1728 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1729 (unsigned long)addr, vp->prot);
1730 /* set the dirty bit */
1731#ifdef VBOX
1732 if (RT_LIKELY((vp->phys_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
1733#endif
1734 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1735 /* flush the code inside */
1736 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1737 return 1;
1738#elif defined(VBOX)
1739 addr &= TARGET_PAGE_MASK;
1740
1741 /* if it is not mapped, no need to worry here */
1742 if (addr >= MMAP_AREA_END)
1743 return 0;
1744 return 1;
1745#else
1746 return 0;
1747#endif
1748}
1749
1750#else
1751
1752void tlb_flush(CPUState *env, int flush_global)
1753{
1754}
1755
1756void tlb_flush_page(CPUState *env, target_ulong addr)
1757{
1758}
1759
1760int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1761 target_phys_addr_t paddr, int prot,
1762 int is_user, int is_softmmu)
1763{
1764 return 0;
1765}
1766
1767#ifndef VBOX
1768/* dump memory mappings */
1769void page_dump(FILE *f)
1770{
1771 unsigned long start, end;
1772 int i, j, prot, prot1;
1773 PageDesc *p;
1774
1775 fprintf(f, "%-8s %-8s %-8s %s\n",
1776 "start", "end", "size", "prot");
1777 start = -1;
1778 end = -1;
1779 prot = 0;
1780 for(i = 0; i <= L1_SIZE; i++) {
1781 if (i < L1_SIZE)
1782 p = l1_map[i];
1783 else
1784 p = NULL;
1785 for(j = 0;j < L2_SIZE; j++) {
1786 if (!p)
1787 prot1 = 0;
1788 else
1789 prot1 = p[j].flags;
1790 if (prot1 != prot) {
1791 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1792 if (start != -1) {
1793 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1794 start, end, end - start,
1795 prot & PAGE_READ ? 'r' : '-',
1796 prot & PAGE_WRITE ? 'w' : '-',
1797 prot & PAGE_EXEC ? 'x' : '-');
1798 }
1799 if (prot1 != 0)
1800 start = end;
1801 else
1802 start = -1;
1803 prot = prot1;
1804 }
1805 if (!p)
1806 break;
1807 }
1808 }
1809}
1810#endif /* !VBOX */
1811
1812int page_get_flags(target_ulong address)
1813{
1814 PageDesc *p;
1815
1816 p = page_find(address >> TARGET_PAGE_BITS);
1817 if (!p)
1818 return 0;
1819 return p->flags;
1820}
1821
1822/* modify the flags of a page and invalidate the code if
1823 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1824 depending on PAGE_WRITE */
1825void page_set_flags(target_ulong start, target_ulong end, int flags)
1826{
1827 PageDesc *p;
1828 target_ulong addr;
1829
1830 start = start & TARGET_PAGE_MASK;
1831 end = TARGET_PAGE_ALIGN(end);
1832 if (flags & PAGE_WRITE)
1833 flags |= PAGE_WRITE_ORG;
1834#if defined(VBOX)
1835 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
1836#endif
1837 spin_lock(&tb_lock);
1838 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1839 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1840 /* if the write protection is set, then we invalidate the code
1841 inside */
1842 if (!(p->flags & PAGE_WRITE) &&
1843 (flags & PAGE_WRITE) &&
1844 p->first_tb) {
1845 tb_invalidate_phys_page(addr, 0, NULL);
1846 }
1847 p->flags = flags;
1848 }
1849 spin_unlock(&tb_lock);
1850}
1851
1852/* called from signal handler: invalidate the code and unprotect the
1853 page. Return TRUE if the fault was succesfully handled. */
1854int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1855{
1856 unsigned int page_index, prot, pindex;
1857 PageDesc *p, *p1;
1858 target_ulong host_start, host_end, addr;
1859
1860 host_start = address & qemu_host_page_mask;
1861 page_index = host_start >> TARGET_PAGE_BITS;
1862 p1 = page_find(page_index);
1863 if (!p1)
1864 return 0;
1865 host_end = host_start + qemu_host_page_size;
1866 p = p1;
1867 prot = 0;
1868 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1869 prot |= p->flags;
1870 p++;
1871 }
1872 /* if the page was really writable, then we change its
1873 protection back to writable */
1874 if (prot & PAGE_WRITE_ORG) {
1875 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1876 if (!(p1[pindex].flags & PAGE_WRITE)) {
1877 mprotect((void *)g2h(host_start), qemu_host_page_size,
1878 (prot & PAGE_BITS) | PAGE_WRITE);
1879 p1[pindex].flags |= PAGE_WRITE;
1880 /* and since the content will be modified, we must invalidate
1881 the corresponding translated code. */
1882 tb_invalidate_phys_page(address, pc, puc);
1883#ifdef DEBUG_TB_CHECK
1884 tb_invalidate_check(address);
1885#endif
1886 return 1;
1887 }
1888 }
1889 return 0;
1890}
1891
1892/* call this function when system calls directly modify a memory area */
1893/* ??? This should be redundant now we have lock_user. */
1894void page_unprotect_range(target_ulong data, target_ulong data_size)
1895{
1896 target_ulong start, end, addr;
1897
1898 start = data;
1899 end = start + data_size;
1900 start &= TARGET_PAGE_MASK;
1901 end = TARGET_PAGE_ALIGN(end);
1902 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1903 page_unprotect(addr, 0, NULL);
1904 }
1905}
1906
1907static inline void tlb_set_dirty(CPUState *env,
1908 unsigned long addr, target_ulong vaddr)
1909{
1910}
1911#endif /* defined(CONFIG_USER_ONLY) */
1912
1913/* register physical memory. 'size' must be a multiple of the target
1914 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1915 io memory page */
1916void cpu_register_physical_memory(target_phys_addr_t start_addr,
1917 unsigned long size,
1918 unsigned long phys_offset)
1919{
1920 target_phys_addr_t addr, end_addr;
1921 PhysPageDesc *p;
1922 CPUState *env;
1923
1924 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1925 end_addr = start_addr + size;
1926 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1927 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1928 p->phys_offset = phys_offset;
1929 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1930 (phys_offset & IO_MEM_ROMD))
1931 phys_offset += TARGET_PAGE_SIZE;
1932 }
1933
1934 /* since each CPU stores ram addresses in its TLB cache, we must
1935 reset the modified entries */
1936 /* XXX: slow ! */
1937 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1938 tlb_flush(env, 1);
1939 }
1940}
1941
1942/* XXX: temporary until new memory mapping API */
1943uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
1944{
1945 PhysPageDesc *p;
1946
1947 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1948 if (!p)
1949 return IO_MEM_UNASSIGNED;
1950 return p->phys_offset;
1951}
1952
1953static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1954{
1955#ifdef DEBUG_UNASSIGNED
1956 printf("Unassigned mem read 0x%08x\n", (int)addr);
1957#endif
1958 return 0;
1959}
1960
1961static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1962{
1963#ifdef DEBUG_UNASSIGNED
1964 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
1965#endif
1966}
1967
1968static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1969 unassigned_mem_readb,
1970 unassigned_mem_readb,
1971 unassigned_mem_readb,
1972};
1973
1974static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1975 unassigned_mem_writeb,
1976 unassigned_mem_writeb,
1977 unassigned_mem_writeb,
1978};
1979
1980static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1981{
1982 unsigned long ram_addr;
1983 int dirty_flags;
1984 ram_addr = addr - (unsigned long)phys_ram_base;
1985#ifdef VBOX
1986 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
1987 dirty_flags = 0xff;
1988 else
1989#endif /* VBOX */
1990 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1991 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1992#if !defined(CONFIG_USER_ONLY)
1993 tb_invalidate_phys_page_fast(ram_addr, 1);
1994# ifdef VBOX
1995 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
1996 dirty_flags = 0xff;
1997 else
1998# endif /* VBOX */
1999 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2000#endif
2001 }
2002 stb_p((uint8_t *)(long)addr, val);
2003#ifdef USE_KQEMU
2004 if (cpu_single_env->kqemu_enabled &&
2005 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2006 kqemu_modify_page(cpu_single_env, ram_addr);
2007#endif
2008 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2009#ifdef VBOX
2010 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2011#endif /* !VBOX */
2012 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2013 /* we remove the notdirty callback only if the code has been
2014 flushed */
2015 if (dirty_flags == 0xff)
2016 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2017}
2018
2019static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2020{
2021 unsigned long ram_addr;
2022 int dirty_flags;
2023 ram_addr = addr - (unsigned long)phys_ram_base;
2024#ifdef VBOX
2025 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2026 dirty_flags = 0xff;
2027 else
2028#endif /* VBOX */
2029 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2030 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2031#if !defined(CONFIG_USER_ONLY)
2032 tb_invalidate_phys_page_fast(ram_addr, 2);
2033# ifdef VBOX
2034 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2035 dirty_flags = 0xff;
2036 else
2037# endif /* VBOX */
2038 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2039#endif
2040 }
2041 stw_p((uint8_t *)(long)addr, val);
2042#ifdef USE_KQEMU
2043 if (cpu_single_env->kqemu_enabled &&
2044 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2045 kqemu_modify_page(cpu_single_env, ram_addr);
2046#endif
2047 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2048#ifdef VBOX
2049 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2050#endif
2051 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2052 /* we remove the notdirty callback only if the code has been
2053 flushed */
2054 if (dirty_flags == 0xff)
2055 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2056}
2057
2058static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2059{
2060 unsigned long ram_addr;
2061 int dirty_flags;
2062 ram_addr = addr - (unsigned long)phys_ram_base;
2063#ifdef VBOX
2064 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2065 dirty_flags = 0xff;
2066 else
2067#endif /* VBOX */
2068 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2069 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2070#if !defined(CONFIG_USER_ONLY)
2071 tb_invalidate_phys_page_fast(ram_addr, 4);
2072# ifdef VBOX
2073 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2074 dirty_flags = 0xff;
2075 else
2076# endif /* VBOX */
2077 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2078#endif
2079 }
2080 stl_p((uint8_t *)(long)addr, val);
2081#ifdef USE_KQEMU
2082 if (cpu_single_env->kqemu_enabled &&
2083 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2084 kqemu_modify_page(cpu_single_env, ram_addr);
2085#endif
2086 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2087#ifdef VBOX
2088 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2089#endif
2090 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2091 /* we remove the notdirty callback only if the code has been
2092 flushed */
2093 if (dirty_flags == 0xff)
2094 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2095}
2096
2097static CPUReadMemoryFunc *error_mem_read[3] = {
2098 NULL, /* never used */
2099 NULL, /* never used */
2100 NULL, /* never used */
2101};
2102
2103static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2104 notdirty_mem_writeb,
2105 notdirty_mem_writew,
2106 notdirty_mem_writel,
2107};
2108
2109static void io_mem_init(void)
2110{
2111 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2112 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2113 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2114 io_mem_nb = 5;
2115
2116#ifndef VBOX /* VBOX: we do this later when the RAM is allocated. */
2117 /* alloc dirty bits array */
2118 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2119 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2120#endif /* !VBOX */
2121}
2122
2123/* mem_read and mem_write are arrays of functions containing the
2124 function to access byte (index 0), word (index 1) and dword (index
2125 2). All functions must be supplied. If io_index is non zero, the
2126 corresponding io zone is modified. If it is zero, a new io zone is
2127 allocated. The return value can be used with
2128 cpu_register_physical_memory(). (-1) is returned if error. */
2129int cpu_register_io_memory(int io_index,
2130 CPUReadMemoryFunc **mem_read,
2131 CPUWriteMemoryFunc **mem_write,
2132 void *opaque)
2133{
2134 int i;
2135
2136 if (io_index <= 0) {
2137 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2138 return -1;
2139 io_index = io_mem_nb++;
2140 } else {
2141 if (io_index >= IO_MEM_NB_ENTRIES)
2142 return -1;
2143 }
2144
2145 for(i = 0;i < 3; i++) {
2146 io_mem_read[io_index][i] = mem_read[i];
2147 io_mem_write[io_index][i] = mem_write[i];
2148 }
2149 io_mem_opaque[io_index] = opaque;
2150 return io_index << IO_MEM_SHIFT;
2151}
2152
2153CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2154{
2155 return io_mem_write[io_index >> IO_MEM_SHIFT];
2156}
2157
2158CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2159{
2160 return io_mem_read[io_index >> IO_MEM_SHIFT];
2161}
2162
2163/* physical memory access (slow version, mainly for debug) */
2164#if defined(CONFIG_USER_ONLY)
2165void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2166 int len, int is_write)
2167{
2168 int l, flags;
2169 target_ulong page;
2170 void * p;
2171
2172 while (len > 0) {
2173 page = addr & TARGET_PAGE_MASK;
2174 l = (page + TARGET_PAGE_SIZE) - addr;
2175 if (l > len)
2176 l = len;
2177 flags = page_get_flags(page);
2178 if (!(flags & PAGE_VALID))
2179 return;
2180 if (is_write) {
2181 if (!(flags & PAGE_WRITE))
2182 return;
2183 p = lock_user(addr, len, 0);
2184 memcpy(p, buf, len);
2185 unlock_user(p, addr, len);
2186 } else {
2187 if (!(flags & PAGE_READ))
2188 return;
2189 p = lock_user(addr, len, 1);
2190 memcpy(buf, p, len);
2191 unlock_user(p, addr, 0);
2192 }
2193 len -= l;
2194 buf += l;
2195 addr += l;
2196 }
2197}
2198
2199#else
2200void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2201 int len, int is_write)
2202{
2203 int l, io_index;
2204 uint8_t *ptr;
2205 uint32_t val;
2206 target_phys_addr_t page;
2207 unsigned long pd;
2208 PhysPageDesc *p;
2209
2210 while (len > 0) {
2211 page = addr & TARGET_PAGE_MASK;
2212 l = (page + TARGET_PAGE_SIZE) - addr;
2213 if (l > len)
2214 l = len;
2215 p = phys_page_find(page >> TARGET_PAGE_BITS);
2216 if (!p) {
2217 pd = IO_MEM_UNASSIGNED;
2218 } else {
2219 pd = p->phys_offset;
2220 }
2221
2222 if (is_write) {
2223 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2224 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2225 /* XXX: could force cpu_single_env to NULL to avoid
2226 potential bugs */
2227 if (l >= 4 && ((addr & 3) == 0)) {
2228 /* 32 bit write access */
2229 val = ldl_p(buf);
2230 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2231 l = 4;
2232 } else if (l >= 2 && ((addr & 1) == 0)) {
2233 /* 16 bit write access */
2234 val = lduw_p(buf);
2235 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2236 l = 2;
2237 } else {
2238 /* 8 bit write access */
2239 val = ldub_p(buf);
2240 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2241 l = 1;
2242 }
2243 } else {
2244 unsigned long addr1;
2245 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2246 /* RAM case */
2247 ptr = phys_ram_base + addr1;
2248#ifdef VBOX
2249 remR3PhysWrite(ptr, buf, l);
2250#else
2251 memcpy(ptr, buf, l);
2252#endif
2253 if (!cpu_physical_memory_is_dirty(addr1)) {
2254 /* invalidate code */
2255 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2256 /* set dirty bit */
2257#ifdef VBOX
2258 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2259#endif
2260 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2261 (0xff & ~CODE_DIRTY_FLAG);
2262 }
2263 }
2264 } else {
2265 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2266 !(pd & IO_MEM_ROMD)) {
2267 /* I/O case */
2268 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2269 if (l >= 4 && ((addr & 3) == 0)) {
2270 /* 32 bit read access */
2271 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2272 stl_p(buf, val);
2273 l = 4;
2274 } else if (l >= 2 && ((addr & 1) == 0)) {
2275 /* 16 bit read access */
2276 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2277 stw_p(buf, val);
2278 l = 2;
2279 } else {
2280 /* 8 bit read access */
2281 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2282 stb_p(buf, val);
2283 l = 1;
2284 }
2285 } else {
2286 /* RAM case */
2287 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2288 (addr & ~TARGET_PAGE_MASK);
2289#ifdef VBOX
2290 remR3PhysRead(ptr, buf, l);
2291#else
2292 memcpy(buf, ptr, l);
2293#endif
2294 }
2295 }
2296 len -= l;
2297 buf += l;
2298 addr += l;
2299 }
2300}
2301
2302/* used for ROM loading : can write in RAM and ROM */
2303void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2304 const uint8_t *buf, int len)
2305{
2306 int l;
2307 uint8_t *ptr;
2308 target_phys_addr_t page;
2309 unsigned long pd;
2310 PhysPageDesc *p;
2311
2312 while (len > 0) {
2313 page = addr & TARGET_PAGE_MASK;
2314 l = (page + TARGET_PAGE_SIZE) - addr;
2315 if (l > len)
2316 l = len;
2317 p = phys_page_find(page >> TARGET_PAGE_BITS);
2318 if (!p) {
2319 pd = IO_MEM_UNASSIGNED;
2320 } else {
2321 pd = p->phys_offset;
2322 }
2323
2324 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2325 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2326 !(pd & IO_MEM_ROMD)) {
2327 /* do nothing */
2328 } else {
2329 unsigned long addr1;
2330 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2331 /* ROM/RAM case */
2332 ptr = phys_ram_base + addr1;
2333 memcpy(ptr, buf, l);
2334 }
2335 len -= l;
2336 buf += l;
2337 addr += l;
2338 }
2339}
2340
2341
2342/* warning: addr must be aligned */
2343uint32_t ldl_phys(target_phys_addr_t addr)
2344{
2345 int io_index;
2346 uint8_t *ptr;
2347 uint32_t val;
2348 unsigned long pd;
2349 PhysPageDesc *p;
2350
2351 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2352 if (!p) {
2353 pd = IO_MEM_UNASSIGNED;
2354 } else {
2355 pd = p->phys_offset;
2356 }
2357
2358 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2359 !(pd & IO_MEM_ROMD)) {
2360 /* I/O case */
2361 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2362 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2363 } else {
2364 /* RAM case */
2365 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2366 (addr & ~TARGET_PAGE_MASK);
2367 val = ldl_p(ptr);
2368 }
2369 return val;
2370}
2371
2372/* warning: addr must be aligned */
2373uint64_t ldq_phys(target_phys_addr_t addr)
2374{
2375 int io_index;
2376 uint8_t *ptr;
2377 uint64_t val;
2378 unsigned long pd;
2379 PhysPageDesc *p;
2380
2381 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2382 if (!p) {
2383 pd = IO_MEM_UNASSIGNED;
2384 } else {
2385 pd = p->phys_offset;
2386 }
2387
2388 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2389 !(pd & IO_MEM_ROMD)) {
2390 /* I/O case */
2391 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2392#ifdef TARGET_WORDS_BIGENDIAN
2393 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2394 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2395#else
2396 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2397 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2398#endif
2399 } else {
2400 /* RAM case */
2401 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2402 (addr & ~TARGET_PAGE_MASK);
2403 val = ldq_p(ptr);
2404 }
2405 return val;
2406}
2407
2408/* XXX: optimize */
2409uint32_t ldub_phys(target_phys_addr_t addr)
2410{
2411 uint8_t val;
2412 cpu_physical_memory_read(addr, &val, 1);
2413 return val;
2414}
2415
2416/* XXX: optimize */
2417uint32_t lduw_phys(target_phys_addr_t addr)
2418{
2419 uint16_t val;
2420 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2421 return tswap16(val);
2422}
2423
2424/* warning: addr must be aligned. The ram page is not masked as dirty
2425 and the code inside is not invalidated. It is useful if the dirty
2426 bits are used to track modified PTEs */
2427void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2428{
2429 int io_index;
2430 uint8_t *ptr;
2431 unsigned long pd;
2432 PhysPageDesc *p;
2433
2434 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2435 if (!p) {
2436 pd = IO_MEM_UNASSIGNED;
2437 } else {
2438 pd = p->phys_offset;
2439 }
2440
2441 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2442 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2443 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2444 } else {
2445 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2446 (addr & ~TARGET_PAGE_MASK);
2447 stl_p(ptr, val);
2448 }
2449}
2450
2451/* warning: addr must be aligned */
2452void stl_phys(target_phys_addr_t addr, uint32_t val)
2453{
2454 int io_index;
2455 uint8_t *ptr;
2456 unsigned long pd;
2457 PhysPageDesc *p;
2458
2459 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2460 if (!p) {
2461 pd = IO_MEM_UNASSIGNED;
2462 } else {
2463 pd = p->phys_offset;
2464 }
2465
2466 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2467 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2468 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2469 } else {
2470 unsigned long addr1;
2471 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2472 /* RAM case */
2473 ptr = phys_ram_base + addr1;
2474 stl_p(ptr, val);
2475 if (!cpu_physical_memory_is_dirty(addr1)) {
2476 /* invalidate code */
2477 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2478 /* set dirty bit */
2479#ifdef VBOX
2480 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2481#endif
2482 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2483 (0xff & ~CODE_DIRTY_FLAG);
2484 }
2485 }
2486}
2487
2488/* XXX: optimize */
2489void stb_phys(target_phys_addr_t addr, uint32_t val)
2490{
2491 uint8_t v = val;
2492 cpu_physical_memory_write(addr, &v, 1);
2493}
2494
2495/* XXX: optimize */
2496void stw_phys(target_phys_addr_t addr, uint32_t val)
2497{
2498 uint16_t v = tswap16(val);
2499 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2500}
2501
2502/* XXX: optimize */
2503void stq_phys(target_phys_addr_t addr, uint64_t val)
2504{
2505 val = tswap64(val);
2506 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2507}
2508
2509#endif
2510
2511/* virtual memory access for debug */
2512int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2513 uint8_t *buf, int len, int is_write)
2514{
2515 int l;
2516 target_ulong page, phys_addr;
2517
2518 while (len > 0) {
2519 page = addr & TARGET_PAGE_MASK;
2520 phys_addr = cpu_get_phys_page_debug(env, page);
2521 /* if no physical page mapped, return an error */
2522 if (phys_addr == -1)
2523 return -1;
2524 l = (page + TARGET_PAGE_SIZE) - addr;
2525 if (l > len)
2526 l = len;
2527 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2528 buf, l, is_write);
2529 len -= l;
2530 buf += l;
2531 addr += l;
2532 }
2533 return 0;
2534}
2535
2536#ifndef VBOX
2537void dump_exec_info(FILE *f,
2538 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2539{
2540 int i, target_code_size, max_target_code_size;
2541 int direct_jmp_count, direct_jmp2_count, cross_page;
2542 TranslationBlock *tb;
2543
2544 target_code_size = 0;
2545 max_target_code_size = 0;
2546 cross_page = 0;
2547 direct_jmp_count = 0;
2548 direct_jmp2_count = 0;
2549 for(i = 0; i < nb_tbs; i++) {
2550 tb = &tbs[i];
2551 target_code_size += tb->size;
2552 if (tb->size > max_target_code_size)
2553 max_target_code_size = tb->size;
2554 if (tb->page_addr[1] != -1)
2555 cross_page++;
2556 if (tb->tb_next_offset[0] != 0xffff) {
2557 direct_jmp_count++;
2558 if (tb->tb_next_offset[1] != 0xffff) {
2559 direct_jmp2_count++;
2560 }
2561 }
2562 }
2563 /* XXX: avoid using doubles ? */
2564 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2565 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2566 nb_tbs ? target_code_size / nb_tbs : 0,
2567 max_target_code_size);
2568 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2569 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2570 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2571 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2572 cross_page,
2573 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2574 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2575 direct_jmp_count,
2576 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2577 direct_jmp2_count,
2578 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2579 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2580 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2581 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2582}
2583#endif /* !VBOX */
2584
2585#if !defined(CONFIG_USER_ONLY)
2586
2587#define MMUSUFFIX _cmmu
2588#define GETPC() NULL
2589#define env cpu_single_env
2590#define SOFTMMU_CODE_ACCESS
2591
2592#define SHIFT 0
2593#include "softmmu_template.h"
2594
2595#define SHIFT 1
2596#include "softmmu_template.h"
2597
2598#define SHIFT 2
2599#include "softmmu_template.h"
2600
2601#define SHIFT 3
2602#include "softmmu_template.h"
2603
2604#undef env
2605
2606#endif
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette