VirtualBox

source: vbox/trunk/src/recompiler/exec.c@ 2422

最後變更 在這個檔案從2422是 2422,由 vboxsync 提交於 18 年 前

Removed the old recompiler code.

  • 屬性 svn:eol-style 設為 native
檔案大小: 79.3 KB
 
1/*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include "config.h"
21#ifndef VBOX
22#ifdef _WIN32
23#include <windows.h>
24#else
25#include <sys/types.h>
26#include <sys/mman.h>
27#endif
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35#else /* VBOX */
36# include <stdlib.h>
37# include <stdio.h>
38# include <inttypes.h>
39# include <iprt/alloc.h>
40# include <iprt/string.h>
41# include <iprt/param.h>
42# include <VBox/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
43#endif /* VBOX */
44
45#include "cpu.h"
46#include "exec-all.h"
47#if defined(CONFIG_USER_ONLY)
48#include <qemu.h>
49#endif
50
51//#define DEBUG_TB_INVALIDATE
52//#define DEBUG_FLUSH
53//#define DEBUG_TLB
54//#define DEBUG_UNASSIGNED
55
56/* make various TB consistency checks */
57//#define DEBUG_TB_CHECK
58//#define DEBUG_TLB_CHECK
59
60#if !defined(CONFIG_USER_ONLY)
61/* TB consistency checks only implemented for usermode emulation. */
62#undef DEBUG_TB_CHECK
63#endif
64
65/* threshold to flush the translated code buffer */
66#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
67
68#define SMC_BITMAP_USE_THRESHOLD 10
69
70#define MMAP_AREA_START 0x00000000
71#define MMAP_AREA_END 0xa8000000
72
73#if defined(TARGET_SPARC64)
74#define TARGET_PHYS_ADDR_SPACE_BITS 41
75#elif defined(TARGET_PPC64)
76#define TARGET_PHYS_ADDR_SPACE_BITS 42
77#else
78/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
79#define TARGET_PHYS_ADDR_SPACE_BITS 32
80#endif
81
82TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
83TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
84int nb_tbs;
85/* any access to the tbs or the page table must use this lock */
86spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
87
88uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE]
89#if defined(__MINGW32__)
90 __attribute__((aligned (16)));
91#else
92 __attribute__((aligned (32)));
93#endif
94uint8_t *code_gen_ptr;
95
96#ifndef VBOX
97int phys_ram_size;
98int phys_ram_fd;
99int phys_ram_size;
100#else /* VBOX */
101RTGCPHYS phys_ram_size;
102/* we have memory ranges (the high PC-BIOS mapping) which
103 causes some pages to fall outside the dirty map here. */
104uint32_t phys_ram_dirty_size;
105#endif /* VBOX */
106#if !defined(VBOX) || !(defined(PGM_DYNAMIC_RAM_ALLOC) || defined(REM_PHYS_ADDR_IN_TLB))
107uint8_t *phys_ram_base;
108#endif
109uint8_t *phys_ram_dirty;
110
111CPUState *first_cpu;
112/* current CPU in the current thread. It is only valid inside
113 cpu_exec() */
114CPUState *cpu_single_env;
115
116typedef struct PageDesc {
117 /* list of TBs intersecting this ram page */
118 TranslationBlock *first_tb;
119 /* in order to optimize self modifying code, we count the number
120 of lookups we do to a given page to use a bitmap */
121 unsigned int code_write_count;
122 uint8_t *code_bitmap;
123#if defined(CONFIG_USER_ONLY)
124 unsigned long flags;
125#endif
126} PageDesc;
127
128typedef struct PhysPageDesc {
129 /* offset in host memory of the page + io_index in the low 12 bits */
130 uint32_t phys_offset;
131} PhysPageDesc;
132
133#define L2_BITS 10
134#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
135
136#define L1_SIZE (1 << L1_BITS)
137#define L2_SIZE (1 << L2_BITS)
138
139static void io_mem_init(void);
140
141unsigned long qemu_real_host_page_size;
142unsigned long qemu_host_page_bits;
143unsigned long qemu_host_page_size;
144unsigned long qemu_host_page_mask;
145
146/* XXX: for system emulation, it could just be an array */
147static PageDesc *l1_map[L1_SIZE];
148PhysPageDesc **l1_phys_map;
149
150/* io memory support */
151CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
152CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
153void *io_mem_opaque[IO_MEM_NB_ENTRIES];
154static int io_mem_nb;
155
156#ifndef VBOX
157/* log support */
158char *logfilename = "/tmp/qemu.log";
159#endif /* !VBOX */
160FILE *logfile;
161int loglevel;
162
163/* statistics */
164static int tlb_flush_count;
165static int tb_flush_count;
166#ifndef VBOX
167static int tb_phys_invalidate_count;
168#endif /* !VBOX */
169
170static void page_init(void)
171{
172 /* NOTE: we can always suppose that qemu_host_page_size >=
173 TARGET_PAGE_SIZE */
174#ifdef VBOX
175 RTMemProtect(code_gen_buffer, sizeof(code_gen_buffer),
176 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
177 qemu_real_host_page_size = PAGE_SIZE;
178#else /* !VBOX */
179#ifdef _WIN32
180 {
181 SYSTEM_INFO system_info;
182 DWORD old_protect;
183
184 GetSystemInfo(&system_info);
185 qemu_real_host_page_size = system_info.dwPageSize;
186
187 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
188 PAGE_EXECUTE_READWRITE, &old_protect);
189 }
190#else
191 qemu_real_host_page_size = getpagesize();
192 {
193 unsigned long start, end;
194
195 start = (unsigned long)code_gen_buffer;
196 start &= ~(qemu_real_host_page_size - 1);
197
198 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
199 end += qemu_real_host_page_size - 1;
200 end &= ~(qemu_real_host_page_size - 1);
201
202 mprotect((void *)start, end - start,
203 PROT_READ | PROT_WRITE | PROT_EXEC);
204 }
205#endif
206#endif /* !VBOX */
207
208 if (qemu_host_page_size == 0)
209 qemu_host_page_size = qemu_real_host_page_size;
210 if (qemu_host_page_size < TARGET_PAGE_SIZE)
211 qemu_host_page_size = TARGET_PAGE_SIZE;
212 qemu_host_page_bits = 0;
213 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
214 qemu_host_page_bits++;
215 qemu_host_page_mask = ~(qemu_host_page_size - 1);
216 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
217 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
218}
219
220static inline PageDesc *page_find_alloc(unsigned int index)
221{
222 PageDesc **lp, *p;
223
224 lp = &l1_map[index >> L2_BITS];
225 p = *lp;
226 if (!p) {
227 /* allocate if not found */
228 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
229 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
230 *lp = p;
231 }
232 return p + (index & (L2_SIZE - 1));
233}
234
235static inline PageDesc *page_find(unsigned int index)
236{
237 PageDesc *p;
238
239 p = l1_map[index >> L2_BITS];
240 if (!p)
241 return 0;
242 return p + (index & (L2_SIZE - 1));
243}
244
245static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
246{
247 void **lp, **p;
248 PhysPageDesc *pd;
249
250 p = (void **)l1_phys_map;
251#if TARGET_PHYS_ADDR_SPACE_BITS > 32
252
253#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
254#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
255#endif
256 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
257 p = *lp;
258 if (!p) {
259 /* allocate if not found */
260 if (!alloc)
261 return NULL;
262 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
263 memset(p, 0, sizeof(void *) * L1_SIZE);
264 *lp = p;
265 }
266#endif
267 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
268 pd = *lp;
269 if (!pd) {
270 int i;
271 /* allocate if not found */
272 if (!alloc)
273 return NULL;
274 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
275 *lp = pd;
276 for (i = 0; i < L2_SIZE; i++)
277 pd[i].phys_offset = IO_MEM_UNASSIGNED;
278 }
279#if defined(VBOX) && defined(PGM_DYNAMIC_RAM_ALLOC)
280 pd = ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
281 if (RT_UNLIKELY((pd->phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING))
282 remR3GrowDynRange(pd->phys_offset & TARGET_PAGE_MASK);
283 return pd;
284#else
285 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
286#endif
287}
288
289static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
290{
291 return phys_page_find_alloc(index, 0);
292}
293
294#if !defined(CONFIG_USER_ONLY)
295static void tlb_protect_code(ram_addr_t ram_addr);
296static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
297 target_ulong vaddr);
298#endif
299
300void cpu_exec_init(CPUState *env)
301{
302 CPUState **penv;
303 int cpu_index;
304
305 if (!code_gen_ptr) {
306 code_gen_ptr = code_gen_buffer;
307 page_init();
308 io_mem_init();
309 }
310 env->next_cpu = NULL;
311 penv = &first_cpu;
312 cpu_index = 0;
313 while (*penv != NULL) {
314 penv = (CPUState **)&(*penv)->next_cpu;
315 cpu_index++;
316 }
317 env->cpu_index = cpu_index;
318 *penv = env;
319}
320
321static inline void invalidate_page_bitmap(PageDesc *p)
322{
323 if (p->code_bitmap) {
324 qemu_free(p->code_bitmap);
325 p->code_bitmap = NULL;
326 }
327 p->code_write_count = 0;
328}
329
330/* set to NULL all the 'first_tb' fields in all PageDescs */
331static void page_flush_tb(void)
332{
333 int i, j;
334 PageDesc *p;
335
336 for(i = 0; i < L1_SIZE; i++) {
337 p = l1_map[i];
338 if (p) {
339 for(j = 0; j < L2_SIZE; j++) {
340 p->first_tb = NULL;
341 invalidate_page_bitmap(p);
342 p++;
343 }
344 }
345 }
346}
347
348/* flush all the translation blocks */
349/* XXX: tb_flush is currently not thread safe */
350void tb_flush(CPUState *env1)
351{
352 CPUState *env;
353#if defined(DEBUG_FLUSH)
354 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
355 code_gen_ptr - code_gen_buffer,
356 nb_tbs,
357 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
358#endif
359 nb_tbs = 0;
360
361 for(env = first_cpu; env != NULL; env = env->next_cpu) {
362 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
363 }
364
365 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
366 page_flush_tb();
367
368 code_gen_ptr = code_gen_buffer;
369 /* XXX: flush processor icache at this point if cache flush is
370 expensive */
371 tb_flush_count++;
372}
373
374#ifdef DEBUG_TB_CHECK
375
376static void tb_invalidate_check(unsigned long address)
377{
378 TranslationBlock *tb;
379 int i;
380 address &= TARGET_PAGE_MASK;
381 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
382 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
383 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
384 address >= tb->pc + tb->size)) {
385 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
386 address, (long)tb->pc, tb->size);
387 }
388 }
389 }
390}
391
392/* verify that all the pages have correct rights for code */
393static void tb_page_check(void)
394{
395 TranslationBlock *tb;
396 int i, flags1, flags2;
397
398 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
399 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
400 flags1 = page_get_flags(tb->pc);
401 flags2 = page_get_flags(tb->pc + tb->size - 1);
402 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
403 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
404 (long)tb->pc, tb->size, flags1, flags2);
405 }
406 }
407 }
408}
409
410void tb_jmp_check(TranslationBlock *tb)
411{
412 TranslationBlock *tb1;
413 unsigned int n1;
414
415 /* suppress any remaining jumps to this TB */
416 tb1 = tb->jmp_first;
417 for(;;) {
418 n1 = (long)tb1 & 3;
419 tb1 = (TranslationBlock *)((long)tb1 & ~3);
420 if (n1 == 2)
421 break;
422 tb1 = tb1->jmp_next[n1];
423 }
424 /* check end of list */
425 if (tb1 != tb) {
426 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
427 }
428}
429
430#endif
431
432/* invalidate one TB */
433static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
434 int next_offset)
435{
436 TranslationBlock *tb1;
437 for(;;) {
438 tb1 = *ptb;
439 if (tb1 == tb) {
440 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
441 break;
442 }
443 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
444 }
445}
446
447static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
448{
449 TranslationBlock *tb1;
450 unsigned int n1;
451
452 for(;;) {
453 tb1 = *ptb;
454 n1 = (long)tb1 & 3;
455 tb1 = (TranslationBlock *)((long)tb1 & ~3);
456 if (tb1 == tb) {
457 *ptb = tb1->page_next[n1];
458 break;
459 }
460 ptb = &tb1->page_next[n1];
461 }
462}
463
464static inline void tb_jmp_remove(TranslationBlock *tb, int n)
465{
466 TranslationBlock *tb1, **ptb;
467 unsigned int n1;
468
469 ptb = &tb->jmp_next[n];
470 tb1 = *ptb;
471 if (tb1) {
472 /* find tb(n) in circular list */
473 for(;;) {
474 tb1 = *ptb;
475 n1 = (long)tb1 & 3;
476 tb1 = (TranslationBlock *)((long)tb1 & ~3);
477 if (n1 == n && tb1 == tb)
478 break;
479 if (n1 == 2) {
480 ptb = &tb1->jmp_first;
481 } else {
482 ptb = &tb1->jmp_next[n1];
483 }
484 }
485 /* now we can suppress tb(n) from the list */
486 *ptb = tb->jmp_next[n];
487
488 tb->jmp_next[n] = NULL;
489 }
490}
491
492/* reset the jump entry 'n' of a TB so that it is not chained to
493 another TB */
494static inline void tb_reset_jump(TranslationBlock *tb, int n)
495{
496 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
497}
498
499static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
500{
501 CPUState *env;
502 PageDesc *p;
503 unsigned int h, n1;
504 target_ulong phys_pc;
505 TranslationBlock *tb1, *tb2;
506
507 /* remove the TB from the hash list */
508 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
509 h = tb_phys_hash_func(phys_pc);
510 tb_remove(&tb_phys_hash[h], tb,
511 offsetof(TranslationBlock, phys_hash_next));
512
513 /* remove the TB from the page list */
514 if (tb->page_addr[0] != page_addr) {
515 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
516 tb_page_remove(&p->first_tb, tb);
517 invalidate_page_bitmap(p);
518 }
519 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
520 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
521 tb_page_remove(&p->first_tb, tb);
522 invalidate_page_bitmap(p);
523 }
524
525 tb_invalidated_flag = 1;
526
527 /* remove the TB from the hash list */
528 h = tb_jmp_cache_hash_func(tb->pc);
529 for(env = first_cpu; env != NULL; env = env->next_cpu) {
530 if (env->tb_jmp_cache[h] == tb)
531 env->tb_jmp_cache[h] = NULL;
532 }
533
534 /* suppress this TB from the two jump lists */
535 tb_jmp_remove(tb, 0);
536 tb_jmp_remove(tb, 1);
537
538 /* suppress any remaining jumps to this TB */
539 tb1 = tb->jmp_first;
540 for(;;) {
541 n1 = (long)tb1 & 3;
542 if (n1 == 2)
543 break;
544 tb1 = (TranslationBlock *)((long)tb1 & ~3);
545 tb2 = tb1->jmp_next[n1];
546 tb_reset_jump(tb1, n1);
547 tb1->jmp_next[n1] = NULL;
548 tb1 = tb2;
549 }
550 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
551
552#ifndef VBOX
553 tb_phys_invalidate_count++;
554#endif /* !VBOX */
555}
556
557#ifdef VBOX
558void tb_invalidate_virt(CPUState *env, uint32_t eip)
559{
560# if 1
561 tb_flush(env);
562# else
563 uint8_t *cs_base, *pc;
564 unsigned int flags, h, phys_pc;
565 TranslationBlock *tb, **ptb;
566
567 flags = env->hflags;
568 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
569 cs_base = env->segs[R_CS].base;
570 pc = cs_base + eip;
571
572 tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
573 flags);
574
575 if(tb)
576 {
577# ifdef DEBUG
578 printf("invalidating TB (%08X) at %08X\n", tb, eip);
579# endif
580 tb_invalidate(tb);
581 //Note: this will leak TBs, but the whole cache will be flushed
582 // when it happens too often
583 tb->pc = 0;
584 tb->cs_base = 0;
585 tb->flags = 0;
586 }
587# endif
588}
589
590# ifdef VBOX_STRICT
591/**
592 * Gets the page offset.
593 */
594unsigned long get_phys_page_offset(target_ulong addr)
595{
596 PhysPageDesc *p = phys_page_find(addr >> TARGET_PAGE_BITS);
597 return p ? p->phys_offset : 0;
598}
599# endif /* VBOX_STRICT */
600#endif /* VBOX */
601
602static inline void set_bits(uint8_t *tab, int start, int len)
603{
604 int end, mask, end1;
605
606 end = start + len;
607 tab += start >> 3;
608 mask = 0xff << (start & 7);
609 if ((start & ~7) == (end & ~7)) {
610 if (start < end) {
611 mask &= ~(0xff << (end & 7));
612 *tab |= mask;
613 }
614 } else {
615 *tab++ |= mask;
616 start = (start + 8) & ~7;
617 end1 = end & ~7;
618 while (start < end1) {
619 *tab++ = 0xff;
620 start += 8;
621 }
622 if (start < end) {
623 mask = ~(0xff << (end & 7));
624 *tab |= mask;
625 }
626 }
627}
628
629static void build_page_bitmap(PageDesc *p)
630{
631 int n, tb_start, tb_end;
632 TranslationBlock *tb;
633
634 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
635 if (!p->code_bitmap)
636 return;
637 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
638
639 tb = p->first_tb;
640 while (tb != NULL) {
641 n = (long)tb & 3;
642 tb = (TranslationBlock *)((long)tb & ~3);
643 /* NOTE: this is subtle as a TB may span two physical pages */
644 if (n == 0) {
645 /* NOTE: tb_end may be after the end of the page, but
646 it is not a problem */
647 tb_start = tb->pc & ~TARGET_PAGE_MASK;
648 tb_end = tb_start + tb->size;
649 if (tb_end > TARGET_PAGE_SIZE)
650 tb_end = TARGET_PAGE_SIZE;
651 } else {
652 tb_start = 0;
653 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
654 }
655 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
656 tb = tb->page_next[n];
657 }
658}
659
660#ifdef TARGET_HAS_PRECISE_SMC
661
662static void tb_gen_code(CPUState *env,
663 target_ulong pc, target_ulong cs_base, int flags,
664 int cflags)
665{
666 TranslationBlock *tb;
667 uint8_t *tc_ptr;
668 target_ulong phys_pc, phys_page2, virt_page2;
669 int code_gen_size;
670
671 phys_pc = get_phys_addr_code(env, pc);
672 tb = tb_alloc(pc);
673 if (!tb) {
674 /* flush must be done */
675 tb_flush(env);
676 /* cannot fail at this point */
677 tb = tb_alloc(pc);
678 }
679 tc_ptr = code_gen_ptr;
680 tb->tc_ptr = tc_ptr;
681 tb->cs_base = cs_base;
682 tb->flags = flags;
683 tb->cflags = cflags;
684 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
685 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
686
687 /* check next page if needed */
688 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
689 phys_page2 = -1;
690 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
691 phys_page2 = get_phys_addr_code(env, virt_page2);
692 }
693 tb_link_phys(tb, phys_pc, phys_page2);
694}
695#endif
696
697/* invalidate all TBs which intersect with the target physical page
698 starting in range [start;end[. NOTE: start and end must refer to
699 the same physical page. 'is_cpu_write_access' should be true if called
700 from a real cpu write access: the virtual CPU will exit the current
701 TB if code is modified inside this TB. */
702void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
703 int is_cpu_write_access)
704{
705 int n, current_tb_modified, current_tb_not_found, current_flags;
706 CPUState *env = cpu_single_env;
707 PageDesc *p;
708 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
709 target_ulong tb_start, tb_end;
710 target_ulong current_pc, current_cs_base;
711
712 p = page_find(start >> TARGET_PAGE_BITS);
713 if (!p)
714 return;
715 if (!p->code_bitmap &&
716 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
717 is_cpu_write_access) {
718 /* build code bitmap */
719 build_page_bitmap(p);
720 }
721
722 /* we remove all the TBs in the range [start, end[ */
723 /* XXX: see if in some cases it could be faster to invalidate all the code */
724 current_tb_not_found = is_cpu_write_access;
725 current_tb_modified = 0;
726 current_tb = NULL; /* avoid warning */
727 current_pc = 0; /* avoid warning */
728 current_cs_base = 0; /* avoid warning */
729 current_flags = 0; /* avoid warning */
730 tb = p->first_tb;
731 while (tb != NULL) {
732 n = (long)tb & 3;
733 tb = (TranslationBlock *)((long)tb & ~3);
734 tb_next = tb->page_next[n];
735 /* NOTE: this is subtle as a TB may span two physical pages */
736 if (n == 0) {
737 /* NOTE: tb_end may be after the end of the page, but
738 it is not a problem */
739 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
740 tb_end = tb_start + tb->size;
741 } else {
742 tb_start = tb->page_addr[1];
743 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
744 }
745 if (!(tb_end <= start || tb_start >= end)) {
746#ifdef TARGET_HAS_PRECISE_SMC
747 if (current_tb_not_found) {
748 current_tb_not_found = 0;
749 current_tb = NULL;
750 if (env->mem_write_pc) {
751 /* now we have a real cpu fault */
752 current_tb = tb_find_pc(env->mem_write_pc);
753 }
754 }
755 if (current_tb == tb &&
756 !(current_tb->cflags & CF_SINGLE_INSN)) {
757 /* If we are modifying the current TB, we must stop
758 its execution. We could be more precise by checking
759 that the modification is after the current PC, but it
760 would require a specialized function to partially
761 restore the CPU state */
762
763 current_tb_modified = 1;
764 cpu_restore_state(current_tb, env,
765 env->mem_write_pc, NULL);
766#if defined(TARGET_I386)
767 current_flags = env->hflags;
768 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
769 current_cs_base = (target_ulong)env->segs[R_CS].base;
770 current_pc = current_cs_base + env->eip;
771#else
772#error unsupported CPU
773#endif
774 }
775#endif /* TARGET_HAS_PRECISE_SMC */
776 /* we need to do that to handle the case where a signal
777 occurs while doing tb_phys_invalidate() */
778 saved_tb = NULL;
779 if (env) {
780 saved_tb = env->current_tb;
781 env->current_tb = NULL;
782 }
783 tb_phys_invalidate(tb, -1);
784 if (env) {
785 env->current_tb = saved_tb;
786 if (env->interrupt_request && env->current_tb)
787 cpu_interrupt(env, env->interrupt_request);
788 }
789 }
790 tb = tb_next;
791 }
792#if !defined(CONFIG_USER_ONLY)
793 /* if no code remaining, no need to continue to use slow writes */
794 if (!p->first_tb) {
795 invalidate_page_bitmap(p);
796 if (is_cpu_write_access) {
797 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
798 }
799 }
800#endif
801#ifdef TARGET_HAS_PRECISE_SMC
802 if (current_tb_modified) {
803 /* we generate a block containing just the instruction
804 modifying the memory. It will ensure that it cannot modify
805 itself */
806 env->current_tb = NULL;
807 tb_gen_code(env, current_pc, current_cs_base, current_flags,
808 CF_SINGLE_INSN);
809 cpu_resume_from_signal(env, NULL);
810 }
811#endif
812}
813
814/* len must be <= 8 and start must be a multiple of len */
815static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
816{
817 PageDesc *p;
818 int offset, b;
819#if 0
820 if (1) {
821 if (loglevel) {
822 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
823 cpu_single_env->mem_write_vaddr, len,
824 cpu_single_env->eip,
825 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
826 }
827 }
828#endif
829 p = page_find(start >> TARGET_PAGE_BITS);
830 if (!p)
831 return;
832 if (p->code_bitmap) {
833 offset = start & ~TARGET_PAGE_MASK;
834 b = p->code_bitmap[offset >> 3] >> (offset & 7);
835 if (b & ((1 << len) - 1))
836 goto do_invalidate;
837 } else {
838 do_invalidate:
839 tb_invalidate_phys_page_range(start, start + len, 1);
840 }
841}
842
843#if !defined(CONFIG_SOFTMMU)
844static void tb_invalidate_phys_page(target_ulong addr,
845 unsigned long pc, void *puc)
846{
847 int n, current_flags, current_tb_modified;
848 target_ulong current_pc, current_cs_base;
849 PageDesc *p;
850 TranslationBlock *tb, *current_tb;
851#ifdef TARGET_HAS_PRECISE_SMC
852 CPUState *env = cpu_single_env;
853#endif
854
855 addr &= TARGET_PAGE_MASK;
856 p = page_find(addr >> TARGET_PAGE_BITS);
857 if (!p)
858 return;
859 tb = p->first_tb;
860 current_tb_modified = 0;
861 current_tb = NULL;
862 current_pc = 0; /* avoid warning */
863 current_cs_base = 0; /* avoid warning */
864 current_flags = 0; /* avoid warning */
865#ifdef TARGET_HAS_PRECISE_SMC
866 if (tb && pc != 0) {
867 current_tb = tb_find_pc(pc);
868 }
869#endif
870 while (tb != NULL) {
871 n = (long)tb & 3;
872 tb = (TranslationBlock *)((long)tb & ~3);
873#ifdef TARGET_HAS_PRECISE_SMC
874 if (current_tb == tb &&
875 !(current_tb->cflags & CF_SINGLE_INSN)) {
876 /* If we are modifying the current TB, we must stop
877 its execution. We could be more precise by checking
878 that the modification is after the current PC, but it
879 would require a specialized function to partially
880 restore the CPU state */
881
882 current_tb_modified = 1;
883 cpu_restore_state(current_tb, env, pc, puc);
884#if defined(TARGET_I386)
885 current_flags = env->hflags;
886 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
887 current_cs_base = (target_ulong)env->segs[R_CS].base;
888 current_pc = current_cs_base + env->eip;
889#else
890#error unsupported CPU
891#endif
892 }
893#endif /* TARGET_HAS_PRECISE_SMC */
894 tb_phys_invalidate(tb, addr);
895 tb = tb->page_next[n];
896 }
897 p->first_tb = NULL;
898#ifdef TARGET_HAS_PRECISE_SMC
899 if (current_tb_modified) {
900 /* we generate a block containing just the instruction
901 modifying the memory. It will ensure that it cannot modify
902 itself */
903 env->current_tb = NULL;
904 tb_gen_code(env, current_pc, current_cs_base, current_flags,
905 CF_SINGLE_INSN);
906 cpu_resume_from_signal(env, puc);
907 }
908#endif
909}
910#endif
911
912/* add the tb in the target page and protect it if necessary */
913static inline void tb_alloc_page(TranslationBlock *tb,
914 unsigned int n, target_ulong page_addr)
915{
916 PageDesc *p;
917 TranslationBlock *last_first_tb;
918
919 tb->page_addr[n] = page_addr;
920 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
921 tb->page_next[n] = p->first_tb;
922 last_first_tb = p->first_tb;
923 p->first_tb = (TranslationBlock *)((long)tb | n);
924 invalidate_page_bitmap(p);
925
926#if defined(TARGET_HAS_SMC) || 1
927
928#if defined(CONFIG_USER_ONLY)
929 if (p->flags & PAGE_WRITE) {
930 target_ulong addr;
931 PageDesc *p2;
932 int prot;
933
934 /* force the host page as non writable (writes will have a
935 page fault + mprotect overhead) */
936 page_addr &= qemu_host_page_mask;
937 prot = 0;
938 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
939 addr += TARGET_PAGE_SIZE) {
940
941 p2 = page_find (addr >> TARGET_PAGE_BITS);
942 if (!p2)
943 continue;
944 prot |= p2->flags;
945 p2->flags &= ~PAGE_WRITE;
946 page_get_flags(addr);
947 }
948 mprotect(g2h(page_addr), qemu_host_page_size,
949 (prot & PAGE_BITS) & ~PAGE_WRITE);
950#ifdef DEBUG_TB_INVALIDATE
951 printf("protecting code page: 0x%08lx\n",
952 page_addr);
953#endif
954 }
955#else
956 /* if some code is already present, then the pages are already
957 protected. So we handle the case where only the first TB is
958 allocated in a physical page */
959 if (!last_first_tb) {
960 tlb_protect_code(page_addr);
961 }
962#endif
963
964#endif /* TARGET_HAS_SMC */
965}
966
967/* Allocate a new translation block. Flush the translation buffer if
968 too many translation blocks or too much generated code. */
969TranslationBlock *tb_alloc(target_ulong pc)
970{
971 TranslationBlock *tb;
972
973 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
974 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
975 return NULL;
976 tb = &tbs[nb_tbs++];
977 tb->pc = pc;
978 tb->cflags = 0;
979 return tb;
980}
981
982/* add a new TB and link it to the physical page tables. phys_page2 is
983 (-1) to indicate that only one page contains the TB. */
984void tb_link_phys(TranslationBlock *tb,
985 target_ulong phys_pc, target_ulong phys_page2)
986{
987 unsigned int h;
988 TranslationBlock **ptb;
989
990 /* add in the physical hash table */
991 h = tb_phys_hash_func(phys_pc);
992 ptb = &tb_phys_hash[h];
993 tb->phys_hash_next = *ptb;
994 *ptb = tb;
995
996 /* add in the page list */
997 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
998 if (phys_page2 != -1)
999 tb_alloc_page(tb, 1, phys_page2);
1000 else
1001 tb->page_addr[1] = -1;
1002
1003 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1004 tb->jmp_next[0] = NULL;
1005 tb->jmp_next[1] = NULL;
1006#ifdef USE_CODE_COPY
1007 tb->cflags &= ~CF_FP_USED;
1008 if (tb->cflags & CF_TB_FP_USED)
1009 tb->cflags |= CF_FP_USED;
1010#endif
1011
1012 /* init original jump addresses */
1013 if (tb->tb_next_offset[0] != 0xffff)
1014 tb_reset_jump(tb, 0);
1015 if (tb->tb_next_offset[1] != 0xffff)
1016 tb_reset_jump(tb, 1);
1017
1018#ifdef DEBUG_TB_CHECK
1019 tb_page_check();
1020#endif
1021}
1022
1023/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1024 tb[1].tc_ptr. Return NULL if not found */
1025TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1026{
1027 int m_min, m_max, m;
1028 unsigned long v;
1029 TranslationBlock *tb;
1030
1031 if (nb_tbs <= 0)
1032 return NULL;
1033 if (tc_ptr < (unsigned long)code_gen_buffer ||
1034 tc_ptr >= (unsigned long)code_gen_ptr)
1035 return NULL;
1036 /* binary search (cf Knuth) */
1037 m_min = 0;
1038 m_max = nb_tbs - 1;
1039 while (m_min <= m_max) {
1040 m = (m_min + m_max) >> 1;
1041 tb = &tbs[m];
1042 v = (unsigned long)tb->tc_ptr;
1043 if (v == tc_ptr)
1044 return tb;
1045 else if (tc_ptr < v) {
1046 m_max = m - 1;
1047 } else {
1048 m_min = m + 1;
1049 }
1050 }
1051 return &tbs[m_max];
1052}
1053
1054static void tb_reset_jump_recursive(TranslationBlock *tb);
1055
1056static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1057{
1058 TranslationBlock *tb1, *tb_next, **ptb;
1059 unsigned int n1;
1060
1061 tb1 = tb->jmp_next[n];
1062 if (tb1 != NULL) {
1063 /* find head of list */
1064 for(;;) {
1065 n1 = (long)tb1 & 3;
1066 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1067 if (n1 == 2)
1068 break;
1069 tb1 = tb1->jmp_next[n1];
1070 }
1071 /* we are now sure now that tb jumps to tb1 */
1072 tb_next = tb1;
1073
1074 /* remove tb from the jmp_first list */
1075 ptb = &tb_next->jmp_first;
1076 for(;;) {
1077 tb1 = *ptb;
1078 n1 = (long)tb1 & 3;
1079 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1080 if (n1 == n && tb1 == tb)
1081 break;
1082 ptb = &tb1->jmp_next[n1];
1083 }
1084 *ptb = tb->jmp_next[n];
1085 tb->jmp_next[n] = NULL;
1086
1087 /* suppress the jump to next tb in generated code */
1088 tb_reset_jump(tb, n);
1089
1090 /* suppress jumps in the tb on which we could have jumped */
1091 tb_reset_jump_recursive(tb_next);
1092 }
1093}
1094
1095static void tb_reset_jump_recursive(TranslationBlock *tb)
1096{
1097 tb_reset_jump_recursive2(tb, 0);
1098 tb_reset_jump_recursive2(tb, 1);
1099}
1100
1101#if defined(TARGET_HAS_ICE)
1102static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1103{
1104 target_ulong addr, pd;
1105 ram_addr_t ram_addr;
1106 PhysPageDesc *p;
1107
1108 addr = cpu_get_phys_page_debug(env, pc);
1109 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1110 if (!p) {
1111 pd = IO_MEM_UNASSIGNED;
1112 } else {
1113 pd = p->phys_offset;
1114 }
1115 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1116 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1117}
1118#endif
1119
1120/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1121 breakpoint is reached */
1122int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1123{
1124#if defined(TARGET_HAS_ICE)
1125 int i;
1126
1127 for(i = 0; i < env->nb_breakpoints; i++) {
1128 if (env->breakpoints[i] == pc)
1129 return 0;
1130 }
1131
1132 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1133 return -1;
1134 env->breakpoints[env->nb_breakpoints++] = pc;
1135
1136 breakpoint_invalidate(env, pc);
1137 return 0;
1138#else
1139 return -1;
1140#endif
1141}
1142
1143/* remove a breakpoint */
1144int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1145{
1146#if defined(TARGET_HAS_ICE)
1147 int i;
1148 for(i = 0; i < env->nb_breakpoints; i++) {
1149 if (env->breakpoints[i] == pc)
1150 goto found;
1151 }
1152 return -1;
1153 found:
1154 env->nb_breakpoints--;
1155 if (i < env->nb_breakpoints)
1156 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1157
1158 breakpoint_invalidate(env, pc);
1159 return 0;
1160#else
1161 return -1;
1162#endif
1163}
1164
1165/* enable or disable single step mode. EXCP_DEBUG is returned by the
1166 CPU loop after each instruction */
1167void cpu_single_step(CPUState *env, int enabled)
1168{
1169#if defined(TARGET_HAS_ICE)
1170 if (env->singlestep_enabled != enabled) {
1171 env->singlestep_enabled = enabled;
1172 /* must flush all the translated code to avoid inconsistancies */
1173 /* XXX: only flush what is necessary */
1174 tb_flush(env);
1175 }
1176#endif
1177}
1178
1179#ifndef VBOX
1180/* enable or disable low levels log */
1181void cpu_set_log(int log_flags)
1182{
1183 loglevel = log_flags;
1184 if (loglevel && !logfile) {
1185 logfile = fopen(logfilename, "w");
1186 if (!logfile) {
1187 perror(logfilename);
1188 _exit(1);
1189 }
1190#if !defined(CONFIG_SOFTMMU)
1191 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1192 {
1193 static uint8_t logfile_buf[4096];
1194 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1195 }
1196#else
1197 setvbuf(logfile, NULL, _IOLBF, 0);
1198#endif
1199 }
1200}
1201
1202void cpu_set_log_filename(const char *filename)
1203{
1204 logfilename = strdup(filename);
1205}
1206#endif /* !VBOX */
1207
1208/* mask must never be zero, except for A20 change call */
1209void cpu_interrupt(CPUState *env, int mask)
1210{
1211 TranslationBlock *tb;
1212 static int interrupt_lock;
1213
1214#ifdef VBOX
1215 VM_ASSERT_EMT(env->pVM);
1216 ASMAtomicOrS32(&env->interrupt_request, mask);
1217#else /* !VBOX */
1218 env->interrupt_request |= mask;
1219#endif /* !VBOX */
1220 /* if the cpu is currently executing code, we must unlink it and
1221 all the potentially executing TB */
1222 tb = env->current_tb;
1223 if (tb && !testandset(&interrupt_lock)) {
1224 env->current_tb = NULL;
1225 tb_reset_jump_recursive(tb);
1226 interrupt_lock = 0;
1227 }
1228}
1229
1230void cpu_reset_interrupt(CPUState *env, int mask)
1231{
1232#ifdef VBOX
1233 /*
1234 * Note: the current implementation can be executed by another thread without problems; make sure this remains true
1235 * for future changes!
1236 */
1237 ASMAtomicAndS32(&env->interrupt_request, ~mask);
1238#else /* !VBOX */
1239 env->interrupt_request &= ~mask;
1240#endif /* !VBOX */
1241}
1242
1243#ifndef VBOX
1244CPULogItem cpu_log_items[] = {
1245 { CPU_LOG_TB_OUT_ASM, "out_asm",
1246 "show generated host assembly code for each compiled TB" },
1247 { CPU_LOG_TB_IN_ASM, "in_asm",
1248 "show target assembly code for each compiled TB" },
1249 { CPU_LOG_TB_OP, "op",
1250 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1251#ifdef TARGET_I386
1252 { CPU_LOG_TB_OP_OPT, "op_opt",
1253 "show micro ops after optimization for each compiled TB" },
1254#endif
1255 { CPU_LOG_INT, "int",
1256 "show interrupts/exceptions in short format" },
1257 { CPU_LOG_EXEC, "exec",
1258 "show trace before each executed TB (lots of logs)" },
1259 { CPU_LOG_TB_CPU, "cpu",
1260 "show CPU state before bloc translation" },
1261#ifdef TARGET_I386
1262 { CPU_LOG_PCALL, "pcall",
1263 "show protected mode far calls/returns/exceptions" },
1264#endif
1265#ifdef DEBUG_IOPORT
1266 { CPU_LOG_IOPORT, "ioport",
1267 "show all i/o ports accesses" },
1268#endif
1269 { 0, NULL, NULL },
1270};
1271
1272static int cmp1(const char *s1, int n, const char *s2)
1273{
1274 if (strlen(s2) != n)
1275 return 0;
1276 return memcmp(s1, s2, n) == 0;
1277}
1278
1279/* takes a comma separated list of log masks. Return 0 if error. */
1280int cpu_str_to_log_mask(const char *str)
1281{
1282 CPULogItem *item;
1283 int mask;
1284 const char *p, *p1;
1285
1286 p = str;
1287 mask = 0;
1288 for(;;) {
1289 p1 = strchr(p, ',');
1290 if (!p1)
1291 p1 = p + strlen(p);
1292 if(cmp1(p,p1-p,"all")) {
1293 for(item = cpu_log_items; item->mask != 0; item++) {
1294 mask |= item->mask;
1295 }
1296 } else {
1297 for(item = cpu_log_items; item->mask != 0; item++) {
1298 if (cmp1(p, p1 - p, item->name))
1299 goto found;
1300 }
1301 return 0;
1302 }
1303 found:
1304 mask |= item->mask;
1305 if (*p1 != ',')
1306 break;
1307 p = p1 + 1;
1308 }
1309 return mask;
1310}
1311#endif /* !VBOX */
1312
1313#ifndef VBOX /* VBOX: we have our own routine. */
1314void cpu_abort(CPUState *env, const char *fmt, ...)
1315{
1316 va_list ap;
1317
1318 va_start(ap, fmt);
1319 fprintf(stderr, "qemu: fatal: ");
1320 vfprintf(stderr, fmt, ap);
1321 fprintf(stderr, "\n");
1322#ifdef TARGET_I386
1323 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1324#else
1325 cpu_dump_state(env, stderr, fprintf, 0);
1326#endif
1327 va_end(ap);
1328 abort();
1329}
1330#endif /* !VBOX */
1331
1332#if !defined(CONFIG_USER_ONLY)
1333
1334/* NOTE: if flush_global is true, also flush global entries (not
1335 implemented yet) */
1336void tlb_flush(CPUState *env, int flush_global)
1337{
1338 int i;
1339
1340#if defined(DEBUG_TLB)
1341 printf("tlb_flush:\n");
1342#endif
1343 /* must reset current TB so that interrupts cannot modify the
1344 links while we are modifying them */
1345 env->current_tb = NULL;
1346
1347 for(i = 0; i < CPU_TLB_SIZE; i++) {
1348 env->tlb_table[0][i].addr_read = -1;
1349 env->tlb_table[0][i].addr_write = -1;
1350 env->tlb_table[0][i].addr_code = -1;
1351 env->tlb_table[1][i].addr_read = -1;
1352 env->tlb_table[1][i].addr_write = -1;
1353 env->tlb_table[1][i].addr_code = -1;
1354 }
1355
1356 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1357
1358#if !defined(CONFIG_SOFTMMU)
1359 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1360#endif
1361#ifdef VBOX
1362 /* inform raw mode about TLB flush */
1363 remR3FlushTLB(env, flush_global);
1364#endif
1365#ifdef USE_KQEMU
1366 if (env->kqemu_enabled) {
1367 kqemu_flush(env, flush_global);
1368 }
1369#endif
1370 tlb_flush_count++;
1371}
1372
1373static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1374{
1375 if (addr == (tlb_entry->addr_read &
1376 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1377 addr == (tlb_entry->addr_write &
1378 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1379 addr == (tlb_entry->addr_code &
1380 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1381 tlb_entry->addr_read = -1;
1382 tlb_entry->addr_write = -1;
1383 tlb_entry->addr_code = -1;
1384 }
1385}
1386
1387void tlb_flush_page(CPUState *env, target_ulong addr)
1388{
1389 int i;
1390 TranslationBlock *tb;
1391
1392#if defined(DEBUG_TLB)
1393 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1394#endif
1395 /* must reset current TB so that interrupts cannot modify the
1396 links while we are modifying them */
1397 env->current_tb = NULL;
1398
1399 addr &= TARGET_PAGE_MASK;
1400 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1401 tlb_flush_entry(&env->tlb_table[0][i], addr);
1402 tlb_flush_entry(&env->tlb_table[1][i], addr);
1403
1404 /* Discard jump cache entries for any tb which might potentially
1405 overlap the flushed page. */
1406 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1407 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1408
1409 i = tb_jmp_cache_hash_page(addr);
1410 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1411
1412#if !defined(CONFIG_SOFTMMU)
1413 if (addr < MMAP_AREA_END)
1414 munmap((void *)addr, TARGET_PAGE_SIZE);
1415#endif
1416#ifdef VBOX
1417 /* inform raw mode about TLB page flush */
1418 remR3FlushPage(env, addr);
1419#endif /* VBOX */
1420#ifdef USE_KQEMU
1421 if (env->kqemu_enabled) {
1422 kqemu_flush_page(env, addr);
1423 }
1424#endif
1425}
1426
1427/* update the TLBs so that writes to code in the virtual page 'addr'
1428 can be detected */
1429static void tlb_protect_code(ram_addr_t ram_addr)
1430{
1431 cpu_physical_memory_reset_dirty(ram_addr,
1432 ram_addr + TARGET_PAGE_SIZE,
1433 CODE_DIRTY_FLAG);
1434#if defined(VBOX) && defined(REM_MONITOR_CODE_PAGES)
1435 /** @todo Retest this? This function has changed... */
1436 remR3ProtectCode(cpu_single_env, ram_addr);
1437#endif
1438}
1439
1440/* update the TLB so that writes in physical page 'phys_addr' are no longer
1441 tested for self modifying code */
1442static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1443 target_ulong vaddr)
1444{
1445#ifdef VBOX
1446 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
1447#endif
1448 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1449}
1450
1451static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1452 unsigned long start, unsigned long length)
1453{
1454 unsigned long addr;
1455 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1456 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1457 if ((addr - start) < length) {
1458 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1459 }
1460 }
1461}
1462
1463void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1464 int dirty_flags)
1465{
1466 CPUState *env;
1467 unsigned long length, start1;
1468 int i, mask, len;
1469 uint8_t *p;
1470
1471 start &= TARGET_PAGE_MASK;
1472 end = TARGET_PAGE_ALIGN(end);
1473
1474 length = end - start;
1475 if (length == 0)
1476 return;
1477 len = length >> TARGET_PAGE_BITS;
1478#ifdef USE_KQEMU
1479 /* XXX: should not depend on cpu context */
1480 env = first_cpu;
1481 if (env->kqemu_enabled) {
1482 ram_addr_t addr;
1483 addr = start;
1484 for(i = 0; i < len; i++) {
1485 kqemu_set_notdirty(env, addr);
1486 addr += TARGET_PAGE_SIZE;
1487 }
1488 }
1489#endif
1490 mask = ~dirty_flags;
1491 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1492#ifdef VBOX
1493 if (RT_LIKELY((start >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
1494#endif
1495 for(i = 0; i < len; i++)
1496 p[i] &= mask;
1497
1498 /* we modify the TLB cache so that the dirty bit will be set again
1499 when accessing the range */
1500#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
1501 start1 = start;
1502#elif !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC)
1503 start1 = start + (unsigned long)phys_ram_base;
1504#else
1505 start1 = (unsigned long)remR3GCPhys2HCVirt(first_cpu, start);
1506#endif
1507 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1508 for(i = 0; i < CPU_TLB_SIZE; i++)
1509 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1510 for(i = 0; i < CPU_TLB_SIZE; i++)
1511 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1512 }
1513
1514#if !defined(CONFIG_SOFTMMU)
1515#ifdef VBOX /**@todo remove this check */
1516# error "We shouldn't get here..."
1517#endif
1518 /* XXX: this is expensive */
1519 {
1520 VirtPageDesc *p;
1521 int j;
1522 target_ulong addr;
1523
1524 for(i = 0; i < L1_SIZE; i++) {
1525 p = l1_virt_map[i];
1526 if (p) {
1527 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1528 for(j = 0; j < L2_SIZE; j++) {
1529 if (p->valid_tag == virt_valid_tag &&
1530 p->phys_addr >= start && p->phys_addr < end &&
1531 (p->prot & PROT_WRITE)) {
1532 if (addr < MMAP_AREA_END) {
1533 mprotect((void *)addr, TARGET_PAGE_SIZE,
1534 p->prot & ~PROT_WRITE);
1535 }
1536 }
1537 addr += TARGET_PAGE_SIZE;
1538 p++;
1539 }
1540 }
1541 }
1542 }
1543#endif
1544}
1545
1546static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1547{
1548 ram_addr_t ram_addr;
1549
1550 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1551 /* RAM case */
1552#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
1553 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1554#elif !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC)
1555 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1556 tlb_entry->addend - (unsigned long)phys_ram_base;
1557#else
1558 ram_addr = remR3HCVirt2GCPhys(first_cpu, (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend);
1559#endif
1560 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1561 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1562 }
1563 }
1564}
1565
1566/* update the TLB according to the current state of the dirty bits */
1567void cpu_tlb_update_dirty(CPUState *env)
1568{
1569 int i;
1570 for(i = 0; i < CPU_TLB_SIZE; i++)
1571 tlb_update_dirty(&env->tlb_table[0][i]);
1572 for(i = 0; i < CPU_TLB_SIZE; i++)
1573 tlb_update_dirty(&env->tlb_table[1][i]);
1574}
1575
1576static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1577 unsigned long start)
1578{
1579 unsigned long addr;
1580 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1581 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1582 if (addr == start) {
1583 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1584 }
1585 }
1586}
1587
1588/* update the TLB corresponding to virtual page vaddr and phys addr
1589 addr so that it is no longer dirty */
1590static inline void tlb_set_dirty(CPUState *env,
1591 unsigned long addr, target_ulong vaddr)
1592{
1593 int i;
1594
1595 addr &= TARGET_PAGE_MASK;
1596 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1597 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1598 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1599}
1600
1601/* add a new TLB entry. At most one entry for a given virtual address
1602 is permitted. Return 0 if OK or 2 if the page could not be mapped
1603 (can only happen in non SOFTMMU mode for I/O pages or pages
1604 conflicting with the host address space). */
1605int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1606 target_phys_addr_t paddr, int prot,
1607 int is_user, int is_softmmu)
1608{
1609 PhysPageDesc *p;
1610 unsigned long pd;
1611 unsigned int index;
1612 target_ulong address;
1613 target_phys_addr_t addend;
1614 int ret;
1615 CPUTLBEntry *te;
1616
1617 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1618 if (!p) {
1619 pd = IO_MEM_UNASSIGNED;
1620 } else {
1621 pd = p->phys_offset;
1622 }
1623#if defined(DEBUG_TLB)
1624 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1625 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1626#endif
1627
1628 ret = 0;
1629#if !defined(CONFIG_SOFTMMU)
1630 if (is_softmmu)
1631#endif
1632 {
1633 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1634 /* IO memory case */
1635 address = vaddr | pd;
1636 addend = paddr;
1637 } else {
1638 /* standard memory */
1639 address = vaddr;
1640#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
1641 addend = pd & TARGET_PAGE_MASK;
1642#elif !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC)
1643 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1644#else
1645 addend = (unsigned long)remR3GCPhys2HCVirt(env, pd & TARGET_PAGE_MASK);
1646#endif
1647 }
1648
1649 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1650 addend -= vaddr;
1651 te = &env->tlb_table[is_user][index];
1652 te->addend = addend;
1653 if (prot & PAGE_READ) {
1654 te->addr_read = address;
1655 } else {
1656 te->addr_read = -1;
1657 }
1658 if (prot & PAGE_EXEC) {
1659 te->addr_code = address;
1660 } else {
1661 te->addr_code = -1;
1662 }
1663 if (prot & PAGE_WRITE) {
1664 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1665 (pd & IO_MEM_ROMD)) {
1666 /* write access calls the I/O callback */
1667 te->addr_write = vaddr |
1668 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1669 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1670 !cpu_physical_memory_is_dirty(pd)) {
1671 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1672 } else {
1673 te->addr_write = address;
1674 }
1675 } else {
1676 te->addr_write = -1;
1677 }
1678#ifdef VBOX
1679 /* inform raw mode about TLB page change */
1680 /** @todo double check and fix this interface. OLD: remR3SetPage(env, &env->tlb_read[is_user][index], &env->tlb_write[is_user][index], prot, is_user); */
1681 remR3SetPage(env, te, te, prot, is_user);
1682#endif
1683 }
1684#if !defined(CONFIG_SOFTMMU)
1685 else {
1686 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1687 /* IO access: no mapping is done as it will be handled by the
1688 soft MMU */
1689 if (!(env->hflags & HF_SOFTMMU_MASK))
1690 ret = 2;
1691 } else {
1692 void *map_addr;
1693
1694 if (vaddr >= MMAP_AREA_END) {
1695 ret = 2;
1696 } else {
1697 if (prot & PROT_WRITE) {
1698 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1699#if defined(TARGET_HAS_SMC) || 1
1700 first_tb ||
1701#endif
1702 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1703 !cpu_physical_memory_is_dirty(pd))) {
1704 /* ROM: we do as if code was inside */
1705 /* if code is present, we only map as read only and save the
1706 original mapping */
1707 VirtPageDesc *vp;
1708
1709 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1710 vp->phys_addr = pd;
1711 vp->prot = prot;
1712 vp->valid_tag = virt_valid_tag;
1713 prot &= ~PAGE_WRITE;
1714 }
1715 }
1716 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1717 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1718 if (map_addr == MAP_FAILED) {
1719 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1720 paddr, vaddr);
1721 }
1722 }
1723 }
1724 }
1725#endif
1726 return ret;
1727}
1728
1729/* called from signal handler: invalidate the code and unprotect the
1730 page. Return TRUE if the fault was succesfully handled. */
1731int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1732{
1733#if !defined(CONFIG_SOFTMMU)
1734 VirtPageDesc *vp;
1735
1736#if defined(DEBUG_TLB)
1737 printf("page_unprotect: addr=0x%08x\n", addr);
1738#endif
1739 addr &= TARGET_PAGE_MASK;
1740
1741 /* if it is not mapped, no need to worry here */
1742 if (addr >= MMAP_AREA_END)
1743 return 0;
1744 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1745 if (!vp)
1746 return 0;
1747 /* NOTE: in this case, validate_tag is _not_ tested as it
1748 validates only the code TLB */
1749 if (vp->valid_tag != virt_valid_tag)
1750 return 0;
1751 if (!(vp->prot & PAGE_WRITE))
1752 return 0;
1753#if defined(DEBUG_TLB)
1754 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1755 addr, vp->phys_addr, vp->prot);
1756#endif
1757 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1758 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1759 (unsigned long)addr, vp->prot);
1760 /* set the dirty bit */
1761 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1762 /* flush the code inside */
1763 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1764 return 1;
1765#elif defined(VBOX)
1766 addr &= TARGET_PAGE_MASK;
1767
1768 /* if it is not mapped, no need to worry here */
1769 if (addr >= MMAP_AREA_END)
1770 return 0;
1771 return 1;
1772#else
1773 return 0;
1774#endif
1775}
1776
1777#else
1778
1779void tlb_flush(CPUState *env, int flush_global)
1780{
1781}
1782
1783void tlb_flush_page(CPUState *env, target_ulong addr)
1784{
1785}
1786
1787int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1788 target_phys_addr_t paddr, int prot,
1789 int is_user, int is_softmmu)
1790{
1791 return 0;
1792}
1793
1794#ifndef VBOX
1795/* dump memory mappings */
1796void page_dump(FILE *f)
1797{
1798 unsigned long start, end;
1799 int i, j, prot, prot1;
1800 PageDesc *p;
1801
1802 fprintf(f, "%-8s %-8s %-8s %s\n",
1803 "start", "end", "size", "prot");
1804 start = -1;
1805 end = -1;
1806 prot = 0;
1807 for(i = 0; i <= L1_SIZE; i++) {
1808 if (i < L1_SIZE)
1809 p = l1_map[i];
1810 else
1811 p = NULL;
1812 for(j = 0;j < L2_SIZE; j++) {
1813 if (!p)
1814 prot1 = 0;
1815 else
1816 prot1 = p[j].flags;
1817 if (prot1 != prot) {
1818 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1819 if (start != -1) {
1820 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1821 start, end, end - start,
1822 prot & PAGE_READ ? 'r' : '-',
1823 prot & PAGE_WRITE ? 'w' : '-',
1824 prot & PAGE_EXEC ? 'x' : '-');
1825 }
1826 if (prot1 != 0)
1827 start = end;
1828 else
1829 start = -1;
1830 prot = prot1;
1831 }
1832 if (!p)
1833 break;
1834 }
1835 }
1836}
1837#endif /* !VBOX */
1838
1839int page_get_flags(target_ulong address)
1840{
1841 PageDesc *p;
1842
1843 p = page_find(address >> TARGET_PAGE_BITS);
1844 if (!p)
1845 return 0;
1846 return p->flags;
1847}
1848
1849/* modify the flags of a page and invalidate the code if
1850 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1851 depending on PAGE_WRITE */
1852void page_set_flags(target_ulong start, target_ulong end, int flags)
1853{
1854 PageDesc *p;
1855 target_ulong addr;
1856
1857 start = start & TARGET_PAGE_MASK;
1858 end = TARGET_PAGE_ALIGN(end);
1859 if (flags & PAGE_WRITE)
1860 flags |= PAGE_WRITE_ORG;
1861#ifdef VBOX
1862 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
1863#endif
1864 spin_lock(&tb_lock);
1865 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1866 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1867 /* if the write protection is set, then we invalidate the code
1868 inside */
1869 if (!(p->flags & PAGE_WRITE) &&
1870 (flags & PAGE_WRITE) &&
1871 p->first_tb) {
1872 tb_invalidate_phys_page(addr, 0, NULL);
1873 }
1874 p->flags = flags;
1875 }
1876 spin_unlock(&tb_lock);
1877}
1878
1879/* called from signal handler: invalidate the code and unprotect the
1880 page. Return TRUE if the fault was succesfully handled. */
1881int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1882{
1883 unsigned int page_index, prot, pindex;
1884 PageDesc *p, *p1;
1885 target_ulong host_start, host_end, addr;
1886
1887 host_start = address & qemu_host_page_mask;
1888 page_index = host_start >> TARGET_PAGE_BITS;
1889 p1 = page_find(page_index);
1890 if (!p1)
1891 return 0;
1892 host_end = host_start + qemu_host_page_size;
1893 p = p1;
1894 prot = 0;
1895 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1896 prot |= p->flags;
1897 p++;
1898 }
1899 /* if the page was really writable, then we change its
1900 protection back to writable */
1901 if (prot & PAGE_WRITE_ORG) {
1902 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1903 if (!(p1[pindex].flags & PAGE_WRITE)) {
1904 mprotect((void *)g2h(host_start), qemu_host_page_size,
1905 (prot & PAGE_BITS) | PAGE_WRITE);
1906 p1[pindex].flags |= PAGE_WRITE;
1907 /* and since the content will be modified, we must invalidate
1908 the corresponding translated code. */
1909 tb_invalidate_phys_page(address, pc, puc);
1910#ifdef DEBUG_TB_CHECK
1911 tb_invalidate_check(address);
1912#endif
1913 return 1;
1914 }
1915 }
1916 return 0;
1917}
1918
1919/* call this function when system calls directly modify a memory area */
1920/* ??? This should be redundant now we have lock_user. */
1921void page_unprotect_range(target_ulong data, target_ulong data_size)
1922{
1923 target_ulong start, end, addr;
1924
1925 start = data;
1926 end = start + data_size;
1927 start &= TARGET_PAGE_MASK;
1928 end = TARGET_PAGE_ALIGN(end);
1929 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1930 page_unprotect(addr, 0, NULL);
1931 }
1932}
1933
1934static inline void tlb_set_dirty(CPUState *env,
1935 unsigned long addr, target_ulong vaddr)
1936{
1937}
1938#endif /* defined(CONFIG_USER_ONLY) */
1939
1940/* register physical memory. 'size' must be a multiple of the target
1941 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1942 io memory page */
1943void cpu_register_physical_memory(target_phys_addr_t start_addr,
1944 unsigned long size,
1945 unsigned long phys_offset)
1946{
1947 target_phys_addr_t addr, end_addr;
1948 PhysPageDesc *p;
1949 CPUState *env;
1950
1951 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1952 end_addr = start_addr + size;
1953 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1954 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1955 p->phys_offset = phys_offset;
1956#if !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC)
1957 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1958 (phys_offset & IO_MEM_ROMD))
1959#else
1960 if ( (phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM
1961 || (phys_offset & IO_MEM_ROMD)
1962 || (phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING)
1963#endif
1964
1965 phys_offset += TARGET_PAGE_SIZE;
1966 }
1967
1968 /* since each CPU stores ram addresses in its TLB cache, we must
1969 reset the modified entries */
1970 /* XXX: slow ! */
1971 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1972 tlb_flush(env, 1);
1973 }
1974}
1975
1976/* XXX: temporary until new memory mapping API */
1977uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
1978{
1979 PhysPageDesc *p;
1980
1981 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1982 if (!p)
1983 return IO_MEM_UNASSIGNED;
1984 return p->phys_offset;
1985}
1986
1987static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1988{
1989#ifdef DEBUG_UNASSIGNED
1990 printf("Unassigned mem read 0x%08x\n", (int)addr);
1991#endif
1992 return 0;
1993}
1994
1995static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1996{
1997#ifdef DEBUG_UNASSIGNED
1998 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
1999#endif
2000}
2001
2002static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2003 unassigned_mem_readb,
2004 unassigned_mem_readb,
2005 unassigned_mem_readb,
2006};
2007
2008static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2009 unassigned_mem_writeb,
2010 unassigned_mem_writeb,
2011 unassigned_mem_writeb,
2012};
2013
2014static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2015{
2016 unsigned long ram_addr;
2017 int dirty_flags;
2018#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2019 ram_addr = addr;
2020#elif !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC)
2021 ram_addr = addr - (unsigned long)phys_ram_base;
2022#else
2023 ram_addr = remR3HCVirt2GCPhys(first_cpu, (void *)addr);
2024#endif
2025#ifdef VBOX
2026 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2027 dirty_flags = 0xff;
2028 else
2029#endif /* VBOX */
2030 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2031 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2032#if !defined(CONFIG_USER_ONLY)
2033 tb_invalidate_phys_page_fast(ram_addr, 1);
2034# ifdef VBOX
2035 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2036 dirty_flags = 0xff;
2037 else
2038# endif /* VBOX */
2039 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2040#endif
2041 }
2042 stb_p((uint8_t *)(long)addr, val);
2043#ifdef USE_KQEMU
2044 if (cpu_single_env->kqemu_enabled &&
2045 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2046 kqemu_modify_page(cpu_single_env, ram_addr);
2047#endif
2048 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2049#ifdef VBOX
2050 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2051#endif /* !VBOX */
2052 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2053 /* we remove the notdirty callback only if the code has been
2054 flushed */
2055 if (dirty_flags == 0xff)
2056 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2057}
2058
2059static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2060{
2061 unsigned long ram_addr;
2062 int dirty_flags;
2063#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2064 ram_addr = addr;
2065#elif !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC)
2066 ram_addr = addr - (unsigned long)phys_ram_base;
2067#else
2068 ram_addr = remR3HCVirt2GCPhys(first_cpu, (void *)addr);
2069#endif
2070#ifdef VBOX
2071 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2072 dirty_flags = 0xff;
2073 else
2074#endif /* VBOX */
2075 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2076 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2077#if !defined(CONFIG_USER_ONLY)
2078 tb_invalidate_phys_page_fast(ram_addr, 2);
2079# ifdef VBOX
2080 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2081 dirty_flags = 0xff;
2082 else
2083# endif /* VBOX */
2084 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2085#endif
2086 }
2087 stw_p((uint8_t *)(long)addr, val);
2088#ifdef USE_KQEMU
2089 if (cpu_single_env->kqemu_enabled &&
2090 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2091 kqemu_modify_page(cpu_single_env, ram_addr);
2092#endif
2093 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2094#ifdef VBOX
2095 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2096#endif
2097 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2098 /* we remove the notdirty callback only if the code has been
2099 flushed */
2100 if (dirty_flags == 0xff)
2101 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2102}
2103
2104static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2105{
2106 unsigned long ram_addr;
2107 int dirty_flags;
2108#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2109 ram_addr = addr;
2110#elif !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC)
2111 ram_addr = addr - (unsigned long)phys_ram_base;
2112#else
2113 ram_addr = remR3HCVirt2GCPhys(first_cpu, (void *)addr);
2114#endif
2115#ifdef VBOX
2116 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2117 dirty_flags = 0xff;
2118 else
2119#endif /* VBOX */
2120 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2121 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2122#if !defined(CONFIG_USER_ONLY)
2123 tb_invalidate_phys_page_fast(ram_addr, 4);
2124# ifdef VBOX
2125 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2126 dirty_flags = 0xff;
2127 else
2128# endif /* VBOX */
2129 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2130#endif
2131 }
2132 stl_p((uint8_t *)(long)addr, val);
2133#ifdef USE_KQEMU
2134 if (cpu_single_env->kqemu_enabled &&
2135 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2136 kqemu_modify_page(cpu_single_env, ram_addr);
2137#endif
2138 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2139#ifdef VBOX
2140 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2141#endif
2142 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2143 /* we remove the notdirty callback only if the code has been
2144 flushed */
2145 if (dirty_flags == 0xff)
2146 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2147}
2148
2149static CPUReadMemoryFunc *error_mem_read[3] = {
2150 NULL, /* never used */
2151 NULL, /* never used */
2152 NULL, /* never used */
2153};
2154
2155static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2156 notdirty_mem_writeb,
2157 notdirty_mem_writew,
2158 notdirty_mem_writel,
2159};
2160
2161static void io_mem_init(void)
2162{
2163 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2164 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2165 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2166#if defined(VBOX) && defined(PGM_DYNAMIC_RAM_ALLOC)
2167 cpu_register_io_memory(IO_MEM_RAM_MISSING >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2168 io_mem_nb = 6;
2169#else
2170 io_mem_nb = 5;
2171#endif
2172
2173#ifndef VBOX /* VBOX: we do this later when the RAM is allocated. */
2174 /* alloc dirty bits array */
2175 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2176 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2177#endif /* !VBOX */
2178}
2179
2180/* mem_read and mem_write are arrays of functions containing the
2181 function to access byte (index 0), word (index 1) and dword (index
2182 2). All functions must be supplied. If io_index is non zero, the
2183 corresponding io zone is modified. If it is zero, a new io zone is
2184 allocated. The return value can be used with
2185 cpu_register_physical_memory(). (-1) is returned if error. */
2186int cpu_register_io_memory(int io_index,
2187 CPUReadMemoryFunc **mem_read,
2188 CPUWriteMemoryFunc **mem_write,
2189 void *opaque)
2190{
2191 int i;
2192
2193 if (io_index <= 0) {
2194 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2195 return -1;
2196 io_index = io_mem_nb++;
2197 } else {
2198 if (io_index >= IO_MEM_NB_ENTRIES)
2199 return -1;
2200 }
2201
2202 for(i = 0;i < 3; i++) {
2203 io_mem_read[io_index][i] = mem_read[i];
2204 io_mem_write[io_index][i] = mem_write[i];
2205 }
2206 io_mem_opaque[io_index] = opaque;
2207 return io_index << IO_MEM_SHIFT;
2208}
2209
2210CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2211{
2212 return io_mem_write[io_index >> IO_MEM_SHIFT];
2213}
2214
2215CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2216{
2217 return io_mem_read[io_index >> IO_MEM_SHIFT];
2218}
2219
2220/* physical memory access (slow version, mainly for debug) */
2221#if defined(CONFIG_USER_ONLY)
2222void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2223 int len, int is_write)
2224{
2225 int l, flags;
2226 target_ulong page;
2227 void * p;
2228
2229 while (len > 0) {
2230 page = addr & TARGET_PAGE_MASK;
2231 l = (page + TARGET_PAGE_SIZE) - addr;
2232 if (l > len)
2233 l = len;
2234 flags = page_get_flags(page);
2235 if (!(flags & PAGE_VALID))
2236 return;
2237 if (is_write) {
2238 if (!(flags & PAGE_WRITE))
2239 return;
2240 p = lock_user(addr, len, 0);
2241 memcpy(p, buf, len);
2242 unlock_user(p, addr, len);
2243 } else {
2244 if (!(flags & PAGE_READ))
2245 return;
2246 p = lock_user(addr, len, 1);
2247 memcpy(buf, p, len);
2248 unlock_user(p, addr, 0);
2249 }
2250 len -= l;
2251 buf += l;
2252 addr += l;
2253 }
2254}
2255
2256#else
2257void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2258 int len, int is_write)
2259{
2260 int l, io_index;
2261 uint8_t *ptr;
2262 uint32_t val;
2263 target_phys_addr_t page;
2264 unsigned long pd;
2265 PhysPageDesc *p;
2266
2267 while (len > 0) {
2268 page = addr & TARGET_PAGE_MASK;
2269 l = (page + TARGET_PAGE_SIZE) - addr;
2270 if (l > len)
2271 l = len;
2272 p = phys_page_find(page >> TARGET_PAGE_BITS);
2273 if (!p) {
2274 pd = IO_MEM_UNASSIGNED;
2275 } else {
2276 pd = p->phys_offset;
2277 }
2278
2279 if (is_write) {
2280 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2281 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2282 /* XXX: could force cpu_single_env to NULL to avoid
2283 potential bugs */
2284 if (l >= 4 && ((addr & 3) == 0)) {
2285 /* 32 bit write access */
2286#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2287 val = ldl_p(buf);
2288#else
2289 val = *(const uint32_t *)buf;
2290#endif
2291 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2292 l = 4;
2293 } else if (l >= 2 && ((addr & 1) == 0)) {
2294 /* 16 bit write access */
2295#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2296 val = lduw_p(buf);
2297#else
2298 val = *(const uint16_t *)buf;
2299#endif
2300 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2301 l = 2;
2302 } else {
2303 /* 8 bit write access */
2304#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2305 val = ldub_p(buf);
2306#else
2307 val = *(const uint8_t *)buf;
2308#endif
2309 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2310 l = 1;
2311 }
2312 } else {
2313 unsigned long addr1;
2314 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2315 /* RAM case */
2316#ifdef VBOX
2317 remR3PhysWrite(addr1, buf, l); NOREF(ptr);
2318#else
2319 ptr = phys_ram_base + addr1;
2320 memcpy(ptr, buf, l);
2321#endif
2322 if (!cpu_physical_memory_is_dirty(addr1)) {
2323 /* invalidate code */
2324 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2325 /* set dirty bit */
2326#ifdef VBOX
2327 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2328#endif
2329 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2330 (0xff & ~CODE_DIRTY_FLAG);
2331 }
2332 }
2333 } else {
2334 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2335 !(pd & IO_MEM_ROMD)) {
2336 /* I/O case */
2337 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2338 if (l >= 4 && ((addr & 3) == 0)) {
2339 /* 32 bit read access */
2340 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2341#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2342 stl_p(buf, val);
2343#else
2344 *(uint32_t *)buf = val;
2345#endif
2346 l = 4;
2347 } else if (l >= 2 && ((addr & 1) == 0)) {
2348 /* 16 bit read access */
2349 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2350#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2351 stw_p(buf, val);
2352#else
2353 *(uint16_t *)buf = val;
2354#endif
2355 l = 2;
2356 } else {
2357 /* 8 bit read access */
2358 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2359#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2360 stb_p(buf, val);
2361#else
2362 *(uint8_t *)buf = val;
2363#endif
2364 l = 1;
2365 }
2366 } else {
2367 /* RAM case */
2368#ifdef VBOX
2369 remR3PhysRead((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), buf, l); NOREF(ptr);
2370#else
2371 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2372 (addr & ~TARGET_PAGE_MASK);
2373 memcpy(buf, ptr, l);
2374#endif
2375 }
2376 }
2377 len -= l;
2378 buf += l;
2379 addr += l;
2380 }
2381}
2382
2383#ifndef VBOX
2384/* used for ROM loading : can write in RAM and ROM */
2385void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2386 const uint8_t *buf, int len)
2387{
2388 int l;
2389 uint8_t *ptr;
2390 target_phys_addr_t page;
2391 unsigned long pd;
2392 PhysPageDesc *p;
2393
2394 while (len > 0) {
2395 page = addr & TARGET_PAGE_MASK;
2396 l = (page + TARGET_PAGE_SIZE) - addr;
2397 if (l > len)
2398 l = len;
2399 p = phys_page_find(page >> TARGET_PAGE_BITS);
2400 if (!p) {
2401 pd = IO_MEM_UNASSIGNED;
2402 } else {
2403 pd = p->phys_offset;
2404 }
2405
2406 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2407 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2408 !(pd & IO_MEM_ROMD)) {
2409 /* do nothing */
2410 } else {
2411 unsigned long addr1;
2412 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2413 /* ROM/RAM case */
2414 ptr = phys_ram_base + addr1;
2415 memcpy(ptr, buf, l);
2416 }
2417 len -= l;
2418 buf += l;
2419 addr += l;
2420 }
2421}
2422#endif /* !VBOX */
2423
2424
2425/* warning: addr must be aligned */
2426uint32_t ldl_phys(target_phys_addr_t addr)
2427{
2428 int io_index;
2429 uint8_t *ptr;
2430 uint32_t val;
2431 unsigned long pd;
2432 PhysPageDesc *p;
2433
2434 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2435 if (!p) {
2436 pd = IO_MEM_UNASSIGNED;
2437 } else {
2438 pd = p->phys_offset;
2439 }
2440
2441 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2442 !(pd & IO_MEM_ROMD)) {
2443 /* I/O case */
2444 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2445 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2446 } else {
2447 /* RAM case */
2448#ifndef VBOX
2449 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2450 (addr & ~TARGET_PAGE_MASK);
2451 val = ldl_p(ptr);
2452#else
2453 val = remR3PhysReadU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
2454#endif
2455 }
2456 return val;
2457}
2458
2459/* warning: addr must be aligned */
2460uint64_t ldq_phys(target_phys_addr_t addr)
2461{
2462 int io_index;
2463 uint8_t *ptr;
2464 uint64_t val;
2465 unsigned long pd;
2466 PhysPageDesc *p;
2467
2468 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2469 if (!p) {
2470 pd = IO_MEM_UNASSIGNED;
2471 } else {
2472 pd = p->phys_offset;
2473 }
2474
2475 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2476 !(pd & IO_MEM_ROMD)) {
2477 /* I/O case */
2478 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2479#ifdef TARGET_WORDS_BIGENDIAN
2480 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2481 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2482#else
2483 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2484 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2485#endif
2486 } else {
2487 /* RAM case */
2488#ifndef VBOX
2489 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2490 (addr & ~TARGET_PAGE_MASK);
2491 val = ldq_p(ptr);
2492#else
2493 val = remR3PhysReadU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
2494#endif
2495 }
2496 return val;
2497}
2498
2499/* XXX: optimize */
2500uint32_t ldub_phys(target_phys_addr_t addr)
2501{
2502 uint8_t val;
2503 cpu_physical_memory_read(addr, &val, 1);
2504 return val;
2505}
2506
2507/* XXX: optimize */
2508uint32_t lduw_phys(target_phys_addr_t addr)
2509{
2510 uint16_t val;
2511 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2512 return tswap16(val);
2513}
2514
2515/* warning: addr must be aligned. The ram page is not masked as dirty
2516 and the code inside is not invalidated. It is useful if the dirty
2517 bits are used to track modified PTEs */
2518void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2519{
2520 int io_index;
2521 uint8_t *ptr;
2522 unsigned long pd;
2523 PhysPageDesc *p;
2524
2525 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2526 if (!p) {
2527 pd = IO_MEM_UNASSIGNED;
2528 } else {
2529 pd = p->phys_offset;
2530 }
2531
2532 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2533 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2534 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2535 } else {
2536#ifndef VBOX
2537 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2538 (addr & ~TARGET_PAGE_MASK);
2539 stl_p(ptr, val);
2540#else
2541 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
2542#endif
2543 }
2544}
2545
2546/* warning: addr must be aligned */
2547void stl_phys(target_phys_addr_t addr, uint32_t val)
2548{
2549 int io_index;
2550 uint8_t *ptr;
2551 unsigned long pd;
2552 PhysPageDesc *p;
2553
2554 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2555 if (!p) {
2556 pd = IO_MEM_UNASSIGNED;
2557 } else {
2558 pd = p->phys_offset;
2559 }
2560
2561 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2562 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2563 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2564 } else {
2565 unsigned long addr1;
2566 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2567 /* RAM case */
2568#ifndef VBOX
2569 ptr = phys_ram_base + addr1;
2570 stl_p(ptr, val);
2571#else
2572 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
2573#endif
2574 if (!cpu_physical_memory_is_dirty(addr1)) {
2575 /* invalidate code */
2576 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2577 /* set dirty bit */
2578#ifdef VBOX
2579 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2580#endif
2581 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2582 (0xff & ~CODE_DIRTY_FLAG);
2583 }
2584 }
2585}
2586
2587/* XXX: optimize */
2588void stb_phys(target_phys_addr_t addr, uint32_t val)
2589{
2590 uint8_t v = val;
2591 cpu_physical_memory_write(addr, &v, 1);
2592}
2593
2594/* XXX: optimize */
2595void stw_phys(target_phys_addr_t addr, uint32_t val)
2596{
2597 uint16_t v = tswap16(val);
2598 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2599}
2600
2601/* XXX: optimize */
2602void stq_phys(target_phys_addr_t addr, uint64_t val)
2603{
2604 val = tswap64(val);
2605 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2606}
2607
2608#endif
2609
2610#ifndef VBOX
2611/* virtual memory access for debug */
2612int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2613 uint8_t *buf, int len, int is_write)
2614{
2615 int l;
2616 target_ulong page, phys_addr;
2617
2618 while (len > 0) {
2619 page = addr & TARGET_PAGE_MASK;
2620 phys_addr = cpu_get_phys_page_debug(env, page);
2621 /* if no physical page mapped, return an error */
2622 if (phys_addr == -1)
2623 return -1;
2624 l = (page + TARGET_PAGE_SIZE) - addr;
2625 if (l > len)
2626 l = len;
2627 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2628 buf, l, is_write);
2629 len -= l;
2630 buf += l;
2631 addr += l;
2632 }
2633 return 0;
2634}
2635
2636void dump_exec_info(FILE *f,
2637 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2638{
2639 int i, target_code_size, max_target_code_size;
2640 int direct_jmp_count, direct_jmp2_count, cross_page;
2641 TranslationBlock *tb;
2642
2643 target_code_size = 0;
2644 max_target_code_size = 0;
2645 cross_page = 0;
2646 direct_jmp_count = 0;
2647 direct_jmp2_count = 0;
2648 for(i = 0; i < nb_tbs; i++) {
2649 tb = &tbs[i];
2650 target_code_size += tb->size;
2651 if (tb->size > max_target_code_size)
2652 max_target_code_size = tb->size;
2653 if (tb->page_addr[1] != -1)
2654 cross_page++;
2655 if (tb->tb_next_offset[0] != 0xffff) {
2656 direct_jmp_count++;
2657 if (tb->tb_next_offset[1] != 0xffff) {
2658 direct_jmp2_count++;
2659 }
2660 }
2661 }
2662 /* XXX: avoid using doubles ? */
2663 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2664 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2665 nb_tbs ? target_code_size / nb_tbs : 0,
2666 max_target_code_size);
2667 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2668 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2669 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2670 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2671 cross_page,
2672 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2673 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2674 direct_jmp_count,
2675 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2676 direct_jmp2_count,
2677 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2678 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2679 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2680 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2681}
2682#endif /* !VBOX */
2683
2684#if !defined(CONFIG_USER_ONLY)
2685
2686#define MMUSUFFIX _cmmu
2687#define GETPC() NULL
2688#define env cpu_single_env
2689#define SOFTMMU_CODE_ACCESS
2690
2691#define SHIFT 0
2692#include "softmmu_template.h"
2693
2694#define SHIFT 1
2695#include "softmmu_template.h"
2696
2697#define SHIFT 2
2698#include "softmmu_template.h"
2699
2700#define SHIFT 3
2701#include "softmmu_template.h"
2702
2703#undef env
2704
2705#endif
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette