VirtualBox

source: vbox/trunk/src/recompiler/exec.c@ 13005

最後變更 在這個檔案從13005是 11982,由 vboxsync 提交於 16 年 前

All: license header changes for 2.0 (OSE headers, add Sun GPL/LGPL disclaimer)

  • 屬性 svn:eol-style 設為 native
檔案大小: 79.1 KB
 
1/*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#include "config.h"
30#ifndef VBOX
31#ifdef _WIN32
32#include <windows.h>
33#else
34#include <sys/types.h>
35#include <sys/mman.h>
36#endif
37#include <stdlib.h>
38#include <stdio.h>
39#include <stdarg.h>
40#include <string.h>
41#include <errno.h>
42#include <unistd.h>
43#include <inttypes.h>
44#else /* VBOX */
45# include <stdlib.h>
46# include <stdio.h>
47# include <inttypes.h>
48# include <iprt/alloc.h>
49# include <iprt/string.h>
50# include <iprt/param.h>
51# include <VBox/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
52#endif /* VBOX */
53
54#include "cpu.h"
55#include "exec-all.h"
56#if defined(CONFIG_USER_ONLY)
57#include <qemu.h>
58#endif
59
60//#define DEBUG_TB_INVALIDATE
61//#define DEBUG_FLUSH
62//#define DEBUG_TLB
63//#define DEBUG_UNASSIGNED
64
65/* make various TB consistency checks */
66//#define DEBUG_TB_CHECK
67//#define DEBUG_TLB_CHECK
68
69#if !defined(CONFIG_USER_ONLY)
70/* TB consistency checks only implemented for usermode emulation. */
71#undef DEBUG_TB_CHECK
72#endif
73
74/* threshold to flush the translated code buffer */
75#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
76
77#define SMC_BITMAP_USE_THRESHOLD 10
78
79#define MMAP_AREA_START 0x00000000
80#define MMAP_AREA_END 0xa8000000
81
82#if defined(TARGET_SPARC64)
83#define TARGET_PHYS_ADDR_SPACE_BITS 41
84#elif defined(TARGET_PPC64)
85#define TARGET_PHYS_ADDR_SPACE_BITS 42
86#else
87/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
88#define TARGET_PHYS_ADDR_SPACE_BITS 32
89#endif
90
91TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
92TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
93int nb_tbs;
94/* any access to the tbs or the page table must use this lock */
95spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
96
97uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE]
98#if defined(__MINGW32__)
99 __attribute__((aligned (16)));
100#else
101 __attribute__((aligned (32)));
102#endif
103uint8_t *code_gen_ptr;
104
105#ifndef VBOX
106int phys_ram_size;
107int phys_ram_fd;
108int phys_ram_size;
109#else /* VBOX */
110RTGCPHYS phys_ram_size;
111/* we have memory ranges (the high PC-BIOS mapping) which
112 causes some pages to fall outside the dirty map here. */
113uint32_t phys_ram_dirty_size;
114#endif /* VBOX */
115#if !defined(VBOX)
116uint8_t *phys_ram_base;
117#endif
118uint8_t *phys_ram_dirty;
119
120CPUState *first_cpu;
121/* current CPU in the current thread. It is only valid inside
122 cpu_exec() */
123CPUState *cpu_single_env;
124
125typedef struct PageDesc {
126 /* list of TBs intersecting this ram page */
127 TranslationBlock *first_tb;
128 /* in order to optimize self modifying code, we count the number
129 of lookups we do to a given page to use a bitmap */
130 unsigned int code_write_count;
131 uint8_t *code_bitmap;
132#if defined(CONFIG_USER_ONLY)
133 unsigned long flags;
134#endif
135} PageDesc;
136
137typedef struct PhysPageDesc {
138 /* offset in host memory of the page + io_index in the low 12 bits */
139 uint32_t phys_offset;
140} PhysPageDesc;
141
142#define L2_BITS 10
143#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
144
145#define L1_SIZE (1 << L1_BITS)
146#define L2_SIZE (1 << L2_BITS)
147
148static void io_mem_init(void);
149
150unsigned long qemu_real_host_page_size;
151unsigned long qemu_host_page_bits;
152unsigned long qemu_host_page_size;
153unsigned long qemu_host_page_mask;
154
155/* XXX: for system emulation, it could just be an array */
156static PageDesc *l1_map[L1_SIZE];
157PhysPageDesc **l1_phys_map;
158
159/* io memory support */
160CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
161CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
162void *io_mem_opaque[IO_MEM_NB_ENTRIES];
163static int io_mem_nb;
164
165#ifndef VBOX
166/* log support */
167char *logfilename = "/tmp/qemu.log";
168#endif /* !VBOX */
169FILE *logfile;
170int loglevel;
171
172/* statistics */
173static int tlb_flush_count;
174static int tb_flush_count;
175#ifndef VBOX
176static int tb_phys_invalidate_count;
177#endif /* !VBOX */
178
179static void page_init(void)
180{
181 /* NOTE: we can always suppose that qemu_host_page_size >=
182 TARGET_PAGE_SIZE */
183#ifdef VBOX
184 RTMemProtect(code_gen_buffer, sizeof(code_gen_buffer),
185 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
186 qemu_real_host_page_size = PAGE_SIZE;
187#else /* !VBOX */
188#ifdef _WIN32
189 {
190 SYSTEM_INFO system_info;
191 DWORD old_protect;
192
193 GetSystemInfo(&system_info);
194 qemu_real_host_page_size = system_info.dwPageSize;
195
196 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
197 PAGE_EXECUTE_READWRITE, &old_protect);
198 }
199#else
200 qemu_real_host_page_size = getpagesize();
201 {
202 unsigned long start, end;
203
204 start = (unsigned long)code_gen_buffer;
205 start &= ~(qemu_real_host_page_size - 1);
206
207 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
208 end += qemu_real_host_page_size - 1;
209 end &= ~(qemu_real_host_page_size - 1);
210
211 mprotect((void *)start, end - start,
212 PROT_READ | PROT_WRITE | PROT_EXEC);
213 }
214#endif
215#endif /* !VBOX */
216
217 if (qemu_host_page_size == 0)
218 qemu_host_page_size = qemu_real_host_page_size;
219 if (qemu_host_page_size < TARGET_PAGE_SIZE)
220 qemu_host_page_size = TARGET_PAGE_SIZE;
221 qemu_host_page_bits = 0;
222 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
223 qemu_host_page_bits++;
224 qemu_host_page_mask = ~(qemu_host_page_size - 1);
225 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
226 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
227}
228
229static inline PageDesc *page_find_alloc(unsigned int index)
230{
231 PageDesc **lp, *p;
232
233 lp = &l1_map[index >> L2_BITS];
234 p = *lp;
235 if (!p) {
236 /* allocate if not found */
237 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
238 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
239 *lp = p;
240 }
241 return p + (index & (L2_SIZE - 1));
242}
243
244static inline PageDesc *page_find(unsigned int index)
245{
246 PageDesc *p;
247
248 p = l1_map[index >> L2_BITS];
249 if (!p)
250 return 0;
251 return p + (index & (L2_SIZE - 1));
252}
253
254static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
255{
256 void **lp, **p;
257 PhysPageDesc *pd;
258
259 p = (void **)l1_phys_map;
260#if TARGET_PHYS_ADDR_SPACE_BITS > 32
261
262#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
263#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
264#endif
265 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
266 p = *lp;
267 if (!p) {
268 /* allocate if not found */
269 if (!alloc)
270 return NULL;
271 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
272 memset(p, 0, sizeof(void *) * L1_SIZE);
273 *lp = p;
274 }
275#endif
276 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
277 pd = *lp;
278 if (!pd) {
279 int i;
280 /* allocate if not found */
281 if (!alloc)
282 return NULL;
283 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
284 *lp = pd;
285 for (i = 0; i < L2_SIZE; i++)
286 pd[i].phys_offset = IO_MEM_UNASSIGNED;
287 }
288#if defined(VBOX) && !defined(VBOX_WITH_NEW_PHYS_CODE)
289 pd = ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
290 if (RT_UNLIKELY((pd->phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING))
291 remR3GrowDynRange(pd->phys_offset & TARGET_PAGE_MASK);
292 return pd;
293#else
294 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
295#endif
296}
297
298static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
299{
300 return phys_page_find_alloc(index, 0);
301}
302
303#if !defined(CONFIG_USER_ONLY)
304static void tlb_protect_code(ram_addr_t ram_addr);
305static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
306 target_ulong vaddr);
307#endif
308
309void cpu_exec_init(CPUState *env)
310{
311 CPUState **penv;
312 int cpu_index;
313
314 if (!code_gen_ptr) {
315 code_gen_ptr = code_gen_buffer;
316 page_init();
317 io_mem_init();
318 }
319 env->next_cpu = NULL;
320 penv = &first_cpu;
321 cpu_index = 0;
322 while (*penv != NULL) {
323 penv = (CPUState **)&(*penv)->next_cpu;
324 cpu_index++;
325 }
326 env->cpu_index = cpu_index;
327 *penv = env;
328}
329
330static inline void invalidate_page_bitmap(PageDesc *p)
331{
332 if (p->code_bitmap) {
333 qemu_free(p->code_bitmap);
334 p->code_bitmap = NULL;
335 }
336 p->code_write_count = 0;
337}
338
339/* set to NULL all the 'first_tb' fields in all PageDescs */
340static void page_flush_tb(void)
341{
342 int i, j;
343 PageDesc *p;
344
345 for(i = 0; i < L1_SIZE; i++) {
346 p = l1_map[i];
347 if (p) {
348 for(j = 0; j < L2_SIZE; j++) {
349 p->first_tb = NULL;
350 invalidate_page_bitmap(p);
351 p++;
352 }
353 }
354 }
355}
356
357/* flush all the translation blocks */
358/* XXX: tb_flush is currently not thread safe */
359void tb_flush(CPUState *env1)
360{
361 CPUState *env;
362#if defined(DEBUG_FLUSH)
363 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
364 code_gen_ptr - code_gen_buffer,
365 nb_tbs,
366 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
367#endif
368 nb_tbs = 0;
369
370 for(env = first_cpu; env != NULL; env = env->next_cpu) {
371 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
372 }
373
374 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
375 page_flush_tb();
376
377 code_gen_ptr = code_gen_buffer;
378 /* XXX: flush processor icache at this point if cache flush is
379 expensive */
380 tb_flush_count++;
381}
382
383#ifdef DEBUG_TB_CHECK
384
385static void tb_invalidate_check(unsigned long address)
386{
387 TranslationBlock *tb;
388 int i;
389 address &= TARGET_PAGE_MASK;
390 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
391 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
392 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
393 address >= tb->pc + tb->size)) {
394 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
395 address, (long)tb->pc, tb->size);
396 }
397 }
398 }
399}
400
401/* verify that all the pages have correct rights for code */
402static void tb_page_check(void)
403{
404 TranslationBlock *tb;
405 int i, flags1, flags2;
406
407 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
408 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
409 flags1 = page_get_flags(tb->pc);
410 flags2 = page_get_flags(tb->pc + tb->size - 1);
411 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
412 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
413 (long)tb->pc, tb->size, flags1, flags2);
414 }
415 }
416 }
417}
418
419void tb_jmp_check(TranslationBlock *tb)
420{
421 TranslationBlock *tb1;
422 unsigned int n1;
423
424 /* suppress any remaining jumps to this TB */
425 tb1 = tb->jmp_first;
426 for(;;) {
427 n1 = (long)tb1 & 3;
428 tb1 = (TranslationBlock *)((long)tb1 & ~3);
429 if (n1 == 2)
430 break;
431 tb1 = tb1->jmp_next[n1];
432 }
433 /* check end of list */
434 if (tb1 != tb) {
435 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
436 }
437}
438
439#endif
440
441/* invalidate one TB */
442static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
443 int next_offset)
444{
445 TranslationBlock *tb1;
446 for(;;) {
447 tb1 = *ptb;
448 if (tb1 == tb) {
449 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
450 break;
451 }
452 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
453 }
454}
455
456static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
457{
458 TranslationBlock *tb1;
459 unsigned int n1;
460
461 for(;;) {
462 tb1 = *ptb;
463 n1 = (long)tb1 & 3;
464 tb1 = (TranslationBlock *)((long)tb1 & ~3);
465 if (tb1 == tb) {
466 *ptb = tb1->page_next[n1];
467 break;
468 }
469 ptb = &tb1->page_next[n1];
470 }
471}
472
473static inline void tb_jmp_remove(TranslationBlock *tb, int n)
474{
475 TranslationBlock *tb1, **ptb;
476 unsigned int n1;
477
478 ptb = &tb->jmp_next[n];
479 tb1 = *ptb;
480 if (tb1) {
481 /* find tb(n) in circular list */
482 for(;;) {
483 tb1 = *ptb;
484 n1 = (long)tb1 & 3;
485 tb1 = (TranslationBlock *)((long)tb1 & ~3);
486 if (n1 == n && tb1 == tb)
487 break;
488 if (n1 == 2) {
489 ptb = &tb1->jmp_first;
490 } else {
491 ptb = &tb1->jmp_next[n1];
492 }
493 }
494 /* now we can suppress tb(n) from the list */
495 *ptb = tb->jmp_next[n];
496
497 tb->jmp_next[n] = NULL;
498 }
499}
500
501/* reset the jump entry 'n' of a TB so that it is not chained to
502 another TB */
503static inline void tb_reset_jump(TranslationBlock *tb, int n)
504{
505 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
506}
507
508static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
509{
510 CPUState *env;
511 PageDesc *p;
512 unsigned int h, n1;
513 target_ulong phys_pc;
514 TranslationBlock *tb1, *tb2;
515
516 /* remove the TB from the hash list */
517 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
518 h = tb_phys_hash_func(phys_pc);
519 tb_remove(&tb_phys_hash[h], tb,
520 offsetof(TranslationBlock, phys_hash_next));
521
522 /* remove the TB from the page list */
523 if (tb->page_addr[0] != page_addr) {
524 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
525 tb_page_remove(&p->first_tb, tb);
526 invalidate_page_bitmap(p);
527 }
528 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
529 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
530 tb_page_remove(&p->first_tb, tb);
531 invalidate_page_bitmap(p);
532 }
533
534 tb_invalidated_flag = 1;
535
536 /* remove the TB from the hash list */
537 h = tb_jmp_cache_hash_func(tb->pc);
538 for(env = first_cpu; env != NULL; env = env->next_cpu) {
539 if (env->tb_jmp_cache[h] == tb)
540 env->tb_jmp_cache[h] = NULL;
541 }
542
543 /* suppress this TB from the two jump lists */
544 tb_jmp_remove(tb, 0);
545 tb_jmp_remove(tb, 1);
546
547 /* suppress any remaining jumps to this TB */
548 tb1 = tb->jmp_first;
549 for(;;) {
550 n1 = (long)tb1 & 3;
551 if (n1 == 2)
552 break;
553 tb1 = (TranslationBlock *)((long)tb1 & ~3);
554 tb2 = tb1->jmp_next[n1];
555 tb_reset_jump(tb1, n1);
556 tb1->jmp_next[n1] = NULL;
557 tb1 = tb2;
558 }
559 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
560
561#ifndef VBOX
562 tb_phys_invalidate_count++;
563#endif /* !VBOX */
564}
565
566#ifdef VBOX
567void tb_invalidate_virt(CPUState *env, uint32_t eip)
568{
569# if 1
570 tb_flush(env);
571# else
572 uint8_t *cs_base, *pc;
573 unsigned int flags, h, phys_pc;
574 TranslationBlock *tb, **ptb;
575
576 flags = env->hflags;
577 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
578 cs_base = env->segs[R_CS].base;
579 pc = cs_base + eip;
580
581 tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
582 flags);
583
584 if(tb)
585 {
586# ifdef DEBUG
587 printf("invalidating TB (%08X) at %08X\n", tb, eip);
588# endif
589 tb_invalidate(tb);
590 //Note: this will leak TBs, but the whole cache will be flushed
591 // when it happens too often
592 tb->pc = 0;
593 tb->cs_base = 0;
594 tb->flags = 0;
595 }
596# endif
597}
598
599# ifdef VBOX_STRICT
600/**
601 * Gets the page offset.
602 */
603unsigned long get_phys_page_offset(target_ulong addr)
604{
605 PhysPageDesc *p = phys_page_find(addr >> TARGET_PAGE_BITS);
606 return p ? p->phys_offset : 0;
607}
608# endif /* VBOX_STRICT */
609#endif /* VBOX */
610
611static inline void set_bits(uint8_t *tab, int start, int len)
612{
613 int end, mask, end1;
614
615 end = start + len;
616 tab += start >> 3;
617 mask = 0xff << (start & 7);
618 if ((start & ~7) == (end & ~7)) {
619 if (start < end) {
620 mask &= ~(0xff << (end & 7));
621 *tab |= mask;
622 }
623 } else {
624 *tab++ |= mask;
625 start = (start + 8) & ~7;
626 end1 = end & ~7;
627 while (start < end1) {
628 *tab++ = 0xff;
629 start += 8;
630 }
631 if (start < end) {
632 mask = ~(0xff << (end & 7));
633 *tab |= mask;
634 }
635 }
636}
637
638static void build_page_bitmap(PageDesc *p)
639{
640 int n, tb_start, tb_end;
641 TranslationBlock *tb;
642
643 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
644 if (!p->code_bitmap)
645 return;
646 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
647
648 tb = p->first_tb;
649 while (tb != NULL) {
650 n = (long)tb & 3;
651 tb = (TranslationBlock *)((long)tb & ~3);
652 /* NOTE: this is subtle as a TB may span two physical pages */
653 if (n == 0) {
654 /* NOTE: tb_end may be after the end of the page, but
655 it is not a problem */
656 tb_start = tb->pc & ~TARGET_PAGE_MASK;
657 tb_end = tb_start + tb->size;
658 if (tb_end > TARGET_PAGE_SIZE)
659 tb_end = TARGET_PAGE_SIZE;
660 } else {
661 tb_start = 0;
662 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
663 }
664 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
665 tb = tb->page_next[n];
666 }
667}
668
669#ifdef TARGET_HAS_PRECISE_SMC
670
671static void tb_gen_code(CPUState *env,
672 target_ulong pc, target_ulong cs_base, int flags,
673 int cflags)
674{
675 TranslationBlock *tb;
676 uint8_t *tc_ptr;
677 target_ulong phys_pc, phys_page2, virt_page2;
678 int code_gen_size;
679
680 phys_pc = get_phys_addr_code(env, pc);
681 tb = tb_alloc(pc);
682 if (!tb) {
683 /* flush must be done */
684 tb_flush(env);
685 /* cannot fail at this point */
686 tb = tb_alloc(pc);
687 }
688 tc_ptr = code_gen_ptr;
689 tb->tc_ptr = tc_ptr;
690 tb->cs_base = cs_base;
691 tb->flags = flags;
692 tb->cflags = cflags;
693 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
694 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
695
696 /* check next page if needed */
697 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
698 phys_page2 = -1;
699 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
700 phys_page2 = get_phys_addr_code(env, virt_page2);
701 }
702 tb_link_phys(tb, phys_pc, phys_page2);
703}
704#endif
705
706/* invalidate all TBs which intersect with the target physical page
707 starting in range [start;end[. NOTE: start and end must refer to
708 the same physical page. 'is_cpu_write_access' should be true if called
709 from a real cpu write access: the virtual CPU will exit the current
710 TB if code is modified inside this TB. */
711void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
712 int is_cpu_write_access)
713{
714 int n, current_tb_modified, current_tb_not_found, current_flags;
715 CPUState *env = cpu_single_env;
716 PageDesc *p;
717 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
718 target_ulong tb_start, tb_end;
719 target_ulong current_pc, current_cs_base;
720
721 p = page_find(start >> TARGET_PAGE_BITS);
722 if (!p)
723 return;
724 if (!p->code_bitmap &&
725 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
726 is_cpu_write_access) {
727 /* build code bitmap */
728 build_page_bitmap(p);
729 }
730
731 /* we remove all the TBs in the range [start, end[ */
732 /* XXX: see if in some cases it could be faster to invalidate all the code */
733 current_tb_not_found = is_cpu_write_access;
734 current_tb_modified = 0;
735 current_tb = NULL; /* avoid warning */
736 current_pc = 0; /* avoid warning */
737 current_cs_base = 0; /* avoid warning */
738 current_flags = 0; /* avoid warning */
739 tb = p->first_tb;
740 while (tb != NULL) {
741 n = (long)tb & 3;
742 tb = (TranslationBlock *)((long)tb & ~3);
743 tb_next = tb->page_next[n];
744 /* NOTE: this is subtle as a TB may span two physical pages */
745 if (n == 0) {
746 /* NOTE: tb_end may be after the end of the page, but
747 it is not a problem */
748 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
749 tb_end = tb_start + tb->size;
750 } else {
751 tb_start = tb->page_addr[1];
752 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
753 }
754 if (!(tb_end <= start || tb_start >= end)) {
755#ifdef TARGET_HAS_PRECISE_SMC
756 if (current_tb_not_found) {
757 current_tb_not_found = 0;
758 current_tb = NULL;
759 if (env->mem_write_pc) {
760 /* now we have a real cpu fault */
761 current_tb = tb_find_pc(env->mem_write_pc);
762 }
763 }
764 if (current_tb == tb &&
765 !(current_tb->cflags & CF_SINGLE_INSN)) {
766 /* If we are modifying the current TB, we must stop
767 its execution. We could be more precise by checking
768 that the modification is after the current PC, but it
769 would require a specialized function to partially
770 restore the CPU state */
771
772 current_tb_modified = 1;
773 cpu_restore_state(current_tb, env,
774 env->mem_write_pc, NULL);
775#if defined(TARGET_I386)
776 current_flags = env->hflags;
777 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
778 current_cs_base = (target_ulong)env->segs[R_CS].base;
779 current_pc = current_cs_base + env->eip;
780#else
781#error unsupported CPU
782#endif
783 }
784#endif /* TARGET_HAS_PRECISE_SMC */
785 /* we need to do that to handle the case where a signal
786 occurs while doing tb_phys_invalidate() */
787 saved_tb = NULL;
788 if (env) {
789 saved_tb = env->current_tb;
790 env->current_tb = NULL;
791 }
792 tb_phys_invalidate(tb, -1);
793 if (env) {
794 env->current_tb = saved_tb;
795 if (env->interrupt_request && env->current_tb)
796 cpu_interrupt(env, env->interrupt_request);
797 }
798 }
799 tb = tb_next;
800 }
801#if !defined(CONFIG_USER_ONLY)
802 /* if no code remaining, no need to continue to use slow writes */
803 if (!p->first_tb) {
804 invalidate_page_bitmap(p);
805 if (is_cpu_write_access) {
806 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
807 }
808 }
809#endif
810#ifdef TARGET_HAS_PRECISE_SMC
811 if (current_tb_modified) {
812 /* we generate a block containing just the instruction
813 modifying the memory. It will ensure that it cannot modify
814 itself */
815 env->current_tb = NULL;
816 tb_gen_code(env, current_pc, current_cs_base, current_flags,
817 CF_SINGLE_INSN);
818 cpu_resume_from_signal(env, NULL);
819 }
820#endif
821}
822
823/* len must be <= 8 and start must be a multiple of len */
824static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
825{
826 PageDesc *p;
827 int offset, b;
828#if 0
829 if (1) {
830 if (loglevel) {
831 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
832 cpu_single_env->mem_write_vaddr, len,
833 cpu_single_env->eip,
834 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
835 }
836 }
837#endif
838 p = page_find(start >> TARGET_PAGE_BITS);
839 if (!p)
840 return;
841 if (p->code_bitmap) {
842 offset = start & ~TARGET_PAGE_MASK;
843 b = p->code_bitmap[offset >> 3] >> (offset & 7);
844 if (b & ((1 << len) - 1))
845 goto do_invalidate;
846 } else {
847 do_invalidate:
848 tb_invalidate_phys_page_range(start, start + len, 1);
849 }
850}
851
852#if !defined(CONFIG_SOFTMMU)
853static void tb_invalidate_phys_page(target_ulong addr,
854 unsigned long pc, void *puc)
855{
856 int n, current_flags, current_tb_modified;
857 target_ulong current_pc, current_cs_base;
858 PageDesc *p;
859 TranslationBlock *tb, *current_tb;
860#ifdef TARGET_HAS_PRECISE_SMC
861 CPUState *env = cpu_single_env;
862#endif
863
864 addr &= TARGET_PAGE_MASK;
865 p = page_find(addr >> TARGET_PAGE_BITS);
866 if (!p)
867 return;
868 tb = p->first_tb;
869 current_tb_modified = 0;
870 current_tb = NULL;
871 current_pc = 0; /* avoid warning */
872 current_cs_base = 0; /* avoid warning */
873 current_flags = 0; /* avoid warning */
874#ifdef TARGET_HAS_PRECISE_SMC
875 if (tb && pc != 0) {
876 current_tb = tb_find_pc(pc);
877 }
878#endif
879 while (tb != NULL) {
880 n = (long)tb & 3;
881 tb = (TranslationBlock *)((long)tb & ~3);
882#ifdef TARGET_HAS_PRECISE_SMC
883 if (current_tb == tb &&
884 !(current_tb->cflags & CF_SINGLE_INSN)) {
885 /* If we are modifying the current TB, we must stop
886 its execution. We could be more precise by checking
887 that the modification is after the current PC, but it
888 would require a specialized function to partially
889 restore the CPU state */
890
891 current_tb_modified = 1;
892 cpu_restore_state(current_tb, env, pc, puc);
893#if defined(TARGET_I386)
894 current_flags = env->hflags;
895 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
896 current_cs_base = (target_ulong)env->segs[R_CS].base;
897 current_pc = current_cs_base + env->eip;
898#else
899#error unsupported CPU
900#endif
901 }
902#endif /* TARGET_HAS_PRECISE_SMC */
903 tb_phys_invalidate(tb, addr);
904 tb = tb->page_next[n];
905 }
906 p->first_tb = NULL;
907#ifdef TARGET_HAS_PRECISE_SMC
908 if (current_tb_modified) {
909 /* we generate a block containing just the instruction
910 modifying the memory. It will ensure that it cannot modify
911 itself */
912 env->current_tb = NULL;
913 tb_gen_code(env, current_pc, current_cs_base, current_flags,
914 CF_SINGLE_INSN);
915 cpu_resume_from_signal(env, puc);
916 }
917#endif
918}
919#endif
920
921/* add the tb in the target page and protect it if necessary */
922static inline void tb_alloc_page(TranslationBlock *tb,
923 unsigned int n, target_ulong page_addr)
924{
925 PageDesc *p;
926 TranslationBlock *last_first_tb;
927
928 tb->page_addr[n] = page_addr;
929 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
930 tb->page_next[n] = p->first_tb;
931 last_first_tb = p->first_tb;
932 p->first_tb = (TranslationBlock *)((long)tb | n);
933 invalidate_page_bitmap(p);
934
935#if defined(TARGET_HAS_SMC) || 1
936
937#if defined(CONFIG_USER_ONLY)
938 if (p->flags & PAGE_WRITE) {
939 target_ulong addr;
940 PageDesc *p2;
941 int prot;
942
943 /* force the host page as non writable (writes will have a
944 page fault + mprotect overhead) */
945 page_addr &= qemu_host_page_mask;
946 prot = 0;
947 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
948 addr += TARGET_PAGE_SIZE) {
949
950 p2 = page_find (addr >> TARGET_PAGE_BITS);
951 if (!p2)
952 continue;
953 prot |= p2->flags;
954 p2->flags &= ~PAGE_WRITE;
955 page_get_flags(addr);
956 }
957 mprotect(g2h(page_addr), qemu_host_page_size,
958 (prot & PAGE_BITS) & ~PAGE_WRITE);
959#ifdef DEBUG_TB_INVALIDATE
960 printf("protecting code page: 0x%08lx\n",
961 page_addr);
962#endif
963 }
964#else
965 /* if some code is already present, then the pages are already
966 protected. So we handle the case where only the first TB is
967 allocated in a physical page */
968 if (!last_first_tb) {
969 tlb_protect_code(page_addr);
970 }
971#endif
972
973#endif /* TARGET_HAS_SMC */
974}
975
976/* Allocate a new translation block. Flush the translation buffer if
977 too many translation blocks or too much generated code. */
978TranslationBlock *tb_alloc(target_ulong pc)
979{
980 TranslationBlock *tb;
981
982 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
983 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
984 return NULL;
985 tb = &tbs[nb_tbs++];
986 tb->pc = pc;
987 tb->cflags = 0;
988 return tb;
989}
990
991/* add a new TB and link it to the physical page tables. phys_page2 is
992 (-1) to indicate that only one page contains the TB. */
993void tb_link_phys(TranslationBlock *tb,
994 target_ulong phys_pc, target_ulong phys_page2)
995{
996 unsigned int h;
997 TranslationBlock **ptb;
998
999 /* add in the physical hash table */
1000 h = tb_phys_hash_func(phys_pc);
1001 ptb = &tb_phys_hash[h];
1002 tb->phys_hash_next = *ptb;
1003 *ptb = tb;
1004
1005 /* add in the page list */
1006 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1007 if (phys_page2 != -1)
1008 tb_alloc_page(tb, 1, phys_page2);
1009 else
1010 tb->page_addr[1] = -1;
1011
1012 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1013 tb->jmp_next[0] = NULL;
1014 tb->jmp_next[1] = NULL;
1015#ifdef USE_CODE_COPY
1016 tb->cflags &= ~CF_FP_USED;
1017 if (tb->cflags & CF_TB_FP_USED)
1018 tb->cflags |= CF_FP_USED;
1019#endif
1020
1021 /* init original jump addresses */
1022 if (tb->tb_next_offset[0] != 0xffff)
1023 tb_reset_jump(tb, 0);
1024 if (tb->tb_next_offset[1] != 0xffff)
1025 tb_reset_jump(tb, 1);
1026
1027#ifdef DEBUG_TB_CHECK
1028 tb_page_check();
1029#endif
1030}
1031
1032/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1033 tb[1].tc_ptr. Return NULL if not found */
1034TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1035{
1036 int m_min, m_max, m;
1037 unsigned long v;
1038 TranslationBlock *tb;
1039
1040 if (nb_tbs <= 0)
1041 return NULL;
1042 if (tc_ptr < (unsigned long)code_gen_buffer ||
1043 tc_ptr >= (unsigned long)code_gen_ptr)
1044 return NULL;
1045 /* binary search (cf Knuth) */
1046 m_min = 0;
1047 m_max = nb_tbs - 1;
1048 while (m_min <= m_max) {
1049 m = (m_min + m_max) >> 1;
1050 tb = &tbs[m];
1051 v = (unsigned long)tb->tc_ptr;
1052 if (v == tc_ptr)
1053 return tb;
1054 else if (tc_ptr < v) {
1055 m_max = m - 1;
1056 } else {
1057 m_min = m + 1;
1058 }
1059 }
1060 return &tbs[m_max];
1061}
1062
1063static void tb_reset_jump_recursive(TranslationBlock *tb);
1064
1065static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1066{
1067 TranslationBlock *tb1, *tb_next, **ptb;
1068 unsigned int n1;
1069
1070 tb1 = tb->jmp_next[n];
1071 if (tb1 != NULL) {
1072 /* find head of list */
1073 for(;;) {
1074 n1 = (long)tb1 & 3;
1075 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1076 if (n1 == 2)
1077 break;
1078 tb1 = tb1->jmp_next[n1];
1079 }
1080 /* we are now sure now that tb jumps to tb1 */
1081 tb_next = tb1;
1082
1083 /* remove tb from the jmp_first list */
1084 ptb = &tb_next->jmp_first;
1085 for(;;) {
1086 tb1 = *ptb;
1087 n1 = (long)tb1 & 3;
1088 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1089 if (n1 == n && tb1 == tb)
1090 break;
1091 ptb = &tb1->jmp_next[n1];
1092 }
1093 *ptb = tb->jmp_next[n];
1094 tb->jmp_next[n] = NULL;
1095
1096 /* suppress the jump to next tb in generated code */
1097 tb_reset_jump(tb, n);
1098
1099 /* suppress jumps in the tb on which we could have jumped */
1100 tb_reset_jump_recursive(tb_next);
1101 }
1102}
1103
1104static void tb_reset_jump_recursive(TranslationBlock *tb)
1105{
1106 tb_reset_jump_recursive2(tb, 0);
1107 tb_reset_jump_recursive2(tb, 1);
1108}
1109
1110#if defined(TARGET_HAS_ICE)
1111static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1112{
1113 target_ulong addr, pd;
1114 ram_addr_t ram_addr;
1115 PhysPageDesc *p;
1116
1117 addr = cpu_get_phys_page_debug(env, pc);
1118 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1119 if (!p) {
1120 pd = IO_MEM_UNASSIGNED;
1121 } else {
1122 pd = p->phys_offset;
1123 }
1124 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1125 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1126}
1127#endif
1128
1129/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1130 breakpoint is reached */
1131int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1132{
1133#if defined(TARGET_HAS_ICE)
1134 int i;
1135
1136 for(i = 0; i < env->nb_breakpoints; i++) {
1137 if (env->breakpoints[i] == pc)
1138 return 0;
1139 }
1140
1141 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1142 return -1;
1143 env->breakpoints[env->nb_breakpoints++] = pc;
1144
1145 breakpoint_invalidate(env, pc);
1146 return 0;
1147#else
1148 return -1;
1149#endif
1150}
1151
1152/* remove a breakpoint */
1153int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1154{
1155#if defined(TARGET_HAS_ICE)
1156 int i;
1157 for(i = 0; i < env->nb_breakpoints; i++) {
1158 if (env->breakpoints[i] == pc)
1159 goto found;
1160 }
1161 return -1;
1162 found:
1163 env->nb_breakpoints--;
1164 if (i < env->nb_breakpoints)
1165 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1166
1167 breakpoint_invalidate(env, pc);
1168 return 0;
1169#else
1170 return -1;
1171#endif
1172}
1173
1174/* enable or disable single step mode. EXCP_DEBUG is returned by the
1175 CPU loop after each instruction */
1176void cpu_single_step(CPUState *env, int enabled)
1177{
1178#if defined(TARGET_HAS_ICE)
1179 if (env->singlestep_enabled != enabled) {
1180 env->singlestep_enabled = enabled;
1181 /* must flush all the translated code to avoid inconsistancies */
1182 /* XXX: only flush what is necessary */
1183 tb_flush(env);
1184 }
1185#endif
1186}
1187
1188#ifndef VBOX
1189/* enable or disable low levels log */
1190void cpu_set_log(int log_flags)
1191{
1192 loglevel = log_flags;
1193 if (loglevel && !logfile) {
1194 logfile = fopen(logfilename, "w");
1195 if (!logfile) {
1196 perror(logfilename);
1197 _exit(1);
1198 }
1199#if !defined(CONFIG_SOFTMMU)
1200 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1201 {
1202 static uint8_t logfile_buf[4096];
1203 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1204 }
1205#else
1206 setvbuf(logfile, NULL, _IOLBF, 0);
1207#endif
1208 }
1209}
1210
1211void cpu_set_log_filename(const char *filename)
1212{
1213 logfilename = strdup(filename);
1214}
1215#endif /* !VBOX */
1216
1217/* mask must never be zero, except for A20 change call */
1218void cpu_interrupt(CPUState *env, int mask)
1219{
1220 TranslationBlock *tb;
1221 static int interrupt_lock;
1222
1223#ifdef VBOX
1224 VM_ASSERT_EMT(env->pVM);
1225 ASMAtomicOrS32(&env->interrupt_request, mask);
1226#else /* !VBOX */
1227 env->interrupt_request |= mask;
1228#endif /* !VBOX */
1229 /* if the cpu is currently executing code, we must unlink it and
1230 all the potentially executing TB */
1231 tb = env->current_tb;
1232 if (tb && !testandset(&interrupt_lock)) {
1233 env->current_tb = NULL;
1234 tb_reset_jump_recursive(tb);
1235 interrupt_lock = 0;
1236 }
1237}
1238
1239void cpu_reset_interrupt(CPUState *env, int mask)
1240{
1241#ifdef VBOX
1242 /*
1243 * Note: the current implementation can be executed by another thread without problems; make sure this remains true
1244 * for future changes!
1245 */
1246 ASMAtomicAndS32(&env->interrupt_request, ~mask);
1247#else /* !VBOX */
1248 env->interrupt_request &= ~mask;
1249#endif /* !VBOX */
1250}
1251
1252#ifndef VBOX
1253CPULogItem cpu_log_items[] = {
1254 { CPU_LOG_TB_OUT_ASM, "out_asm",
1255 "show generated host assembly code for each compiled TB" },
1256 { CPU_LOG_TB_IN_ASM, "in_asm",
1257 "show target assembly code for each compiled TB" },
1258 { CPU_LOG_TB_OP, "op",
1259 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1260#ifdef TARGET_I386
1261 { CPU_LOG_TB_OP_OPT, "op_opt",
1262 "show micro ops after optimization for each compiled TB" },
1263#endif
1264 { CPU_LOG_INT, "int",
1265 "show interrupts/exceptions in short format" },
1266 { CPU_LOG_EXEC, "exec",
1267 "show trace before each executed TB (lots of logs)" },
1268 { CPU_LOG_TB_CPU, "cpu",
1269 "show CPU state before bloc translation" },
1270#ifdef TARGET_I386
1271 { CPU_LOG_PCALL, "pcall",
1272 "show protected mode far calls/returns/exceptions" },
1273#endif
1274#ifdef DEBUG_IOPORT
1275 { CPU_LOG_IOPORT, "ioport",
1276 "show all i/o ports accesses" },
1277#endif
1278 { 0, NULL, NULL },
1279};
1280
1281static int cmp1(const char *s1, int n, const char *s2)
1282{
1283 if (strlen(s2) != n)
1284 return 0;
1285 return memcmp(s1, s2, n) == 0;
1286}
1287
1288/* takes a comma separated list of log masks. Return 0 if error. */
1289int cpu_str_to_log_mask(const char *str)
1290{
1291 CPULogItem *item;
1292 int mask;
1293 const char *p, *p1;
1294
1295 p = str;
1296 mask = 0;
1297 for(;;) {
1298 p1 = strchr(p, ',');
1299 if (!p1)
1300 p1 = p + strlen(p);
1301 if(cmp1(p,p1-p,"all")) {
1302 for(item = cpu_log_items; item->mask != 0; item++) {
1303 mask |= item->mask;
1304 }
1305 } else {
1306 for(item = cpu_log_items; item->mask != 0; item++) {
1307 if (cmp1(p, p1 - p, item->name))
1308 goto found;
1309 }
1310 return 0;
1311 }
1312 found:
1313 mask |= item->mask;
1314 if (*p1 != ',')
1315 break;
1316 p = p1 + 1;
1317 }
1318 return mask;
1319}
1320#endif /* !VBOX */
1321
1322#ifndef VBOX /* VBOX: we have our own routine. */
1323void cpu_abort(CPUState *env, const char *fmt, ...)
1324{
1325 va_list ap;
1326
1327 va_start(ap, fmt);
1328 fprintf(stderr, "qemu: fatal: ");
1329 vfprintf(stderr, fmt, ap);
1330 fprintf(stderr, "\n");
1331#ifdef TARGET_I386
1332 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1333#else
1334 cpu_dump_state(env, stderr, fprintf, 0);
1335#endif
1336 va_end(ap);
1337 abort();
1338}
1339#endif /* !VBOX */
1340
1341#if !defined(CONFIG_USER_ONLY)
1342
1343/* NOTE: if flush_global is true, also flush global entries (not
1344 implemented yet) */
1345void tlb_flush(CPUState *env, int flush_global)
1346{
1347 int i;
1348
1349#if defined(DEBUG_TLB)
1350 printf("tlb_flush:\n");
1351#endif
1352 /* must reset current TB so that interrupts cannot modify the
1353 links while we are modifying them */
1354 env->current_tb = NULL;
1355
1356 for(i = 0; i < CPU_TLB_SIZE; i++) {
1357 env->tlb_table[0][i].addr_read = -1;
1358 env->tlb_table[0][i].addr_write = -1;
1359 env->tlb_table[0][i].addr_code = -1;
1360 env->tlb_table[1][i].addr_read = -1;
1361 env->tlb_table[1][i].addr_write = -1;
1362 env->tlb_table[1][i].addr_code = -1;
1363 }
1364
1365 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1366
1367#if !defined(CONFIG_SOFTMMU)
1368 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1369#endif
1370#ifdef VBOX
1371 /* inform raw mode about TLB flush */
1372 remR3FlushTLB(env, flush_global);
1373#endif
1374#ifdef USE_KQEMU
1375 if (env->kqemu_enabled) {
1376 kqemu_flush(env, flush_global);
1377 }
1378#endif
1379 tlb_flush_count++;
1380}
1381
1382static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1383{
1384 if (addr == (tlb_entry->addr_read &
1385 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1386 addr == (tlb_entry->addr_write &
1387 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1388 addr == (tlb_entry->addr_code &
1389 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1390 tlb_entry->addr_read = -1;
1391 tlb_entry->addr_write = -1;
1392 tlb_entry->addr_code = -1;
1393 }
1394}
1395
1396void tlb_flush_page(CPUState *env, target_ulong addr)
1397{
1398 int i;
1399 TranslationBlock *tb;
1400
1401#if defined(DEBUG_TLB)
1402 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1403#endif
1404 /* must reset current TB so that interrupts cannot modify the
1405 links while we are modifying them */
1406 env->current_tb = NULL;
1407
1408 addr &= TARGET_PAGE_MASK;
1409 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1410 tlb_flush_entry(&env->tlb_table[0][i], addr);
1411 tlb_flush_entry(&env->tlb_table[1][i], addr);
1412
1413 /* Discard jump cache entries for any tb which might potentially
1414 overlap the flushed page. */
1415 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1416 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1417
1418 i = tb_jmp_cache_hash_page(addr);
1419 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1420
1421#if !defined(CONFIG_SOFTMMU)
1422 if (addr < MMAP_AREA_END)
1423 munmap((void *)addr, TARGET_PAGE_SIZE);
1424#endif
1425#ifdef VBOX
1426 /* inform raw mode about TLB page flush */
1427 remR3FlushPage(env, addr);
1428#endif /* VBOX */
1429#ifdef USE_KQEMU
1430 if (env->kqemu_enabled) {
1431 kqemu_flush_page(env, addr);
1432 }
1433#endif
1434}
1435
1436/* update the TLBs so that writes to code in the virtual page 'addr'
1437 can be detected */
1438static void tlb_protect_code(ram_addr_t ram_addr)
1439{
1440 cpu_physical_memory_reset_dirty(ram_addr,
1441 ram_addr + TARGET_PAGE_SIZE,
1442 CODE_DIRTY_FLAG);
1443#if defined(VBOX) && defined(REM_MONITOR_CODE_PAGES)
1444 /** @todo Retest this? This function has changed... */
1445 remR3ProtectCode(cpu_single_env, ram_addr);
1446#endif
1447}
1448
1449/* update the TLB so that writes in physical page 'phys_addr' are no longer
1450 tested for self modifying code */
1451static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1452 target_ulong vaddr)
1453{
1454#ifdef VBOX
1455 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
1456#endif
1457 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1458}
1459
1460static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1461 unsigned long start, unsigned long length)
1462{
1463 unsigned long addr;
1464 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1465 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1466 if ((addr - start) < length) {
1467 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1468 }
1469 }
1470}
1471
1472void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1473 int dirty_flags)
1474{
1475 CPUState *env;
1476 unsigned long length, start1;
1477 int i, mask, len;
1478 uint8_t *p;
1479
1480 start &= TARGET_PAGE_MASK;
1481 end = TARGET_PAGE_ALIGN(end);
1482
1483 length = end - start;
1484 if (length == 0)
1485 return;
1486 len = length >> TARGET_PAGE_BITS;
1487#ifdef USE_KQEMU
1488 /* XXX: should not depend on cpu context */
1489 env = first_cpu;
1490 if (env->kqemu_enabled) {
1491 ram_addr_t addr;
1492 addr = start;
1493 for(i = 0; i < len; i++) {
1494 kqemu_set_notdirty(env, addr);
1495 addr += TARGET_PAGE_SIZE;
1496 }
1497 }
1498#endif
1499 mask = ~dirty_flags;
1500 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1501#ifdef VBOX
1502 if (RT_LIKELY((start >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
1503#endif
1504 for(i = 0; i < len; i++)
1505 p[i] &= mask;
1506
1507 /* we modify the TLB cache so that the dirty bit will be set again
1508 when accessing the range */
1509#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
1510 start1 = start;
1511#elif !defined(VBOX)
1512 start1 = start + (unsigned long)phys_ram_base;
1513#else
1514 start1 = (unsigned long)remR3GCPhys2HCVirt(first_cpu, start);
1515#endif
1516 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1517 for(i = 0; i < CPU_TLB_SIZE; i++)
1518 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1519 for(i = 0; i < CPU_TLB_SIZE; i++)
1520 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1521 }
1522
1523#if !defined(CONFIG_SOFTMMU)
1524#ifdef VBOX /**@todo remove this check */
1525# error "We shouldn't get here..."
1526#endif
1527 /* XXX: this is expensive */
1528 {
1529 VirtPageDesc *p;
1530 int j;
1531 target_ulong addr;
1532
1533 for(i = 0; i < L1_SIZE; i++) {
1534 p = l1_virt_map[i];
1535 if (p) {
1536 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1537 for(j = 0; j < L2_SIZE; j++) {
1538 if (p->valid_tag == virt_valid_tag &&
1539 p->phys_addr >= start && p->phys_addr < end &&
1540 (p->prot & PROT_WRITE)) {
1541 if (addr < MMAP_AREA_END) {
1542 mprotect((void *)addr, TARGET_PAGE_SIZE,
1543 p->prot & ~PROT_WRITE);
1544 }
1545 }
1546 addr += TARGET_PAGE_SIZE;
1547 p++;
1548 }
1549 }
1550 }
1551 }
1552#endif
1553}
1554
1555static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1556{
1557 ram_addr_t ram_addr;
1558
1559 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1560 /* RAM case */
1561#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
1562 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1563#elif !defined(VBOX)
1564 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1565 tlb_entry->addend - (unsigned long)phys_ram_base;
1566#else
1567 ram_addr = remR3HCVirt2GCPhys(first_cpu, (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend);
1568#endif
1569 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1570 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1571 }
1572 }
1573}
1574
1575/* update the TLB according to the current state of the dirty bits */
1576void cpu_tlb_update_dirty(CPUState *env)
1577{
1578 int i;
1579 for(i = 0; i < CPU_TLB_SIZE; i++)
1580 tlb_update_dirty(&env->tlb_table[0][i]);
1581 for(i = 0; i < CPU_TLB_SIZE; i++)
1582 tlb_update_dirty(&env->tlb_table[1][i]);
1583}
1584
1585static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1586 unsigned long start)
1587{
1588 unsigned long addr;
1589 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1590 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1591 if (addr == start) {
1592 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1593 }
1594 }
1595}
1596
1597/* update the TLB corresponding to virtual page vaddr and phys addr
1598 addr so that it is no longer dirty */
1599static inline void tlb_set_dirty(CPUState *env,
1600 unsigned long addr, target_ulong vaddr)
1601{
1602 int i;
1603
1604 addr &= TARGET_PAGE_MASK;
1605 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1606 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1607 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1608}
1609
1610/* add a new TLB entry. At most one entry for a given virtual address
1611 is permitted. Return 0 if OK or 2 if the page could not be mapped
1612 (can only happen in non SOFTMMU mode for I/O pages or pages
1613 conflicting with the host address space). */
1614int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1615 target_phys_addr_t paddr, int prot,
1616 int is_user, int is_softmmu)
1617{
1618 PhysPageDesc *p;
1619 unsigned long pd;
1620 unsigned int index;
1621 target_ulong address;
1622 target_phys_addr_t addend;
1623 int ret;
1624 CPUTLBEntry *te;
1625
1626 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1627 if (!p) {
1628 pd = IO_MEM_UNASSIGNED;
1629 } else {
1630 pd = p->phys_offset;
1631 }
1632#if defined(DEBUG_TLB)
1633 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1634 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1635#endif
1636
1637 ret = 0;
1638#if !defined(CONFIG_SOFTMMU)
1639 if (is_softmmu)
1640#endif
1641 {
1642 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1643 /* IO memory case */
1644 address = vaddr | pd;
1645 addend = paddr;
1646 } else {
1647 /* standard memory */
1648 address = vaddr;
1649#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
1650 addend = pd & TARGET_PAGE_MASK;
1651#elif !defined(VBOX)
1652 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1653#else
1654 addend = (unsigned long)remR3GCPhys2HCVirt(env, pd & TARGET_PAGE_MASK);
1655#endif
1656 }
1657
1658 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1659 addend -= vaddr;
1660 te = &env->tlb_table[is_user][index];
1661 te->addend = addend;
1662 if (prot & PAGE_READ) {
1663 te->addr_read = address;
1664 } else {
1665 te->addr_read = -1;
1666 }
1667 if (prot & PAGE_EXEC) {
1668 te->addr_code = address;
1669 } else {
1670 te->addr_code = -1;
1671 }
1672 if (prot & PAGE_WRITE) {
1673 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1674 (pd & IO_MEM_ROMD)) {
1675 /* write access calls the I/O callback */
1676 te->addr_write = vaddr |
1677 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1678 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1679 !cpu_physical_memory_is_dirty(pd)) {
1680 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1681 } else {
1682 te->addr_write = address;
1683 }
1684 } else {
1685 te->addr_write = -1;
1686 }
1687#ifdef VBOX
1688 /* inform raw mode about TLB page change */
1689 remR3FlushPage(env, vaddr);
1690#endif
1691 }
1692#if !defined(CONFIG_SOFTMMU)
1693 else {
1694 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1695 /* IO access: no mapping is done as it will be handled by the
1696 soft MMU */
1697 if (!(env->hflags & HF_SOFTMMU_MASK))
1698 ret = 2;
1699 } else {
1700 void *map_addr;
1701
1702 if (vaddr >= MMAP_AREA_END) {
1703 ret = 2;
1704 } else {
1705 if (prot & PROT_WRITE) {
1706 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1707#if defined(TARGET_HAS_SMC) || 1
1708 first_tb ||
1709#endif
1710 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1711 !cpu_physical_memory_is_dirty(pd))) {
1712 /* ROM: we do as if code was inside */
1713 /* if code is present, we only map as read only and save the
1714 original mapping */
1715 VirtPageDesc *vp;
1716
1717 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1718 vp->phys_addr = pd;
1719 vp->prot = prot;
1720 vp->valid_tag = virt_valid_tag;
1721 prot &= ~PAGE_WRITE;
1722 }
1723 }
1724 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1725 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1726 if (map_addr == MAP_FAILED) {
1727 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1728 paddr, vaddr);
1729 }
1730 }
1731 }
1732 }
1733#endif
1734 return ret;
1735}
1736
1737/* called from signal handler: invalidate the code and unprotect the
1738 page. Return TRUE if the fault was succesfully handled. */
1739int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1740{
1741#if !defined(CONFIG_SOFTMMU)
1742 VirtPageDesc *vp;
1743
1744#if defined(DEBUG_TLB)
1745 printf("page_unprotect: addr=0x%08x\n", addr);
1746#endif
1747 addr &= TARGET_PAGE_MASK;
1748
1749 /* if it is not mapped, no need to worry here */
1750 if (addr >= MMAP_AREA_END)
1751 return 0;
1752 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1753 if (!vp)
1754 return 0;
1755 /* NOTE: in this case, validate_tag is _not_ tested as it
1756 validates only the code TLB */
1757 if (vp->valid_tag != virt_valid_tag)
1758 return 0;
1759 if (!(vp->prot & PAGE_WRITE))
1760 return 0;
1761#if defined(DEBUG_TLB)
1762 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1763 addr, vp->phys_addr, vp->prot);
1764#endif
1765 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1766 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1767 (unsigned long)addr, vp->prot);
1768 /* set the dirty bit */
1769 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1770 /* flush the code inside */
1771 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1772 return 1;
1773#elif defined(VBOX)
1774 addr &= TARGET_PAGE_MASK;
1775
1776 /* if it is not mapped, no need to worry here */
1777 if (addr >= MMAP_AREA_END)
1778 return 0;
1779 return 1;
1780#else
1781 return 0;
1782#endif
1783}
1784
1785#else
1786
1787void tlb_flush(CPUState *env, int flush_global)
1788{
1789}
1790
1791void tlb_flush_page(CPUState *env, target_ulong addr)
1792{
1793}
1794
1795int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1796 target_phys_addr_t paddr, int prot,
1797 int is_user, int is_softmmu)
1798{
1799 return 0;
1800}
1801
1802#ifndef VBOX
1803/* dump memory mappings */
1804void page_dump(FILE *f)
1805{
1806 unsigned long start, end;
1807 int i, j, prot, prot1;
1808 PageDesc *p;
1809
1810 fprintf(f, "%-8s %-8s %-8s %s\n",
1811 "start", "end", "size", "prot");
1812 start = -1;
1813 end = -1;
1814 prot = 0;
1815 for(i = 0; i <= L1_SIZE; i++) {
1816 if (i < L1_SIZE)
1817 p = l1_map[i];
1818 else
1819 p = NULL;
1820 for(j = 0;j < L2_SIZE; j++) {
1821 if (!p)
1822 prot1 = 0;
1823 else
1824 prot1 = p[j].flags;
1825 if (prot1 != prot) {
1826 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1827 if (start != -1) {
1828 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1829 start, end, end - start,
1830 prot & PAGE_READ ? 'r' : '-',
1831 prot & PAGE_WRITE ? 'w' : '-',
1832 prot & PAGE_EXEC ? 'x' : '-');
1833 }
1834 if (prot1 != 0)
1835 start = end;
1836 else
1837 start = -1;
1838 prot = prot1;
1839 }
1840 if (!p)
1841 break;
1842 }
1843 }
1844}
1845#endif /* !VBOX */
1846
1847int page_get_flags(target_ulong address)
1848{
1849 PageDesc *p;
1850
1851 p = page_find(address >> TARGET_PAGE_BITS);
1852 if (!p)
1853 return 0;
1854 return p->flags;
1855}
1856
1857/* modify the flags of a page and invalidate the code if
1858 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1859 depending on PAGE_WRITE */
1860void page_set_flags(target_ulong start, target_ulong end, int flags)
1861{
1862 PageDesc *p;
1863 target_ulong addr;
1864
1865 start = start & TARGET_PAGE_MASK;
1866 end = TARGET_PAGE_ALIGN(end);
1867 if (flags & PAGE_WRITE)
1868 flags |= PAGE_WRITE_ORG;
1869#ifdef VBOX
1870 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
1871#endif
1872 spin_lock(&tb_lock);
1873 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1874 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1875 /* if the write protection is set, then we invalidate the code
1876 inside */
1877 if (!(p->flags & PAGE_WRITE) &&
1878 (flags & PAGE_WRITE) &&
1879 p->first_tb) {
1880 tb_invalidate_phys_page(addr, 0, NULL);
1881 }
1882 p->flags = flags;
1883 }
1884 spin_unlock(&tb_lock);
1885}
1886
1887/* called from signal handler: invalidate the code and unprotect the
1888 page. Return TRUE if the fault was succesfully handled. */
1889int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1890{
1891 unsigned int page_index, prot, pindex;
1892 PageDesc *p, *p1;
1893 target_ulong host_start, host_end, addr;
1894
1895 host_start = address & qemu_host_page_mask;
1896 page_index = host_start >> TARGET_PAGE_BITS;
1897 p1 = page_find(page_index);
1898 if (!p1)
1899 return 0;
1900 host_end = host_start + qemu_host_page_size;
1901 p = p1;
1902 prot = 0;
1903 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1904 prot |= p->flags;
1905 p++;
1906 }
1907 /* if the page was really writable, then we change its
1908 protection back to writable */
1909 if (prot & PAGE_WRITE_ORG) {
1910 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1911 if (!(p1[pindex].flags & PAGE_WRITE)) {
1912 mprotect((void *)g2h(host_start), qemu_host_page_size,
1913 (prot & PAGE_BITS) | PAGE_WRITE);
1914 p1[pindex].flags |= PAGE_WRITE;
1915 /* and since the content will be modified, we must invalidate
1916 the corresponding translated code. */
1917 tb_invalidate_phys_page(address, pc, puc);
1918#ifdef DEBUG_TB_CHECK
1919 tb_invalidate_check(address);
1920#endif
1921 return 1;
1922 }
1923 }
1924 return 0;
1925}
1926
1927/* call this function when system calls directly modify a memory area */
1928/* ??? This should be redundant now we have lock_user. */
1929void page_unprotect_range(target_ulong data, target_ulong data_size)
1930{
1931 target_ulong start, end, addr;
1932
1933 start = data;
1934 end = start + data_size;
1935 start &= TARGET_PAGE_MASK;
1936 end = TARGET_PAGE_ALIGN(end);
1937 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1938 page_unprotect(addr, 0, NULL);
1939 }
1940}
1941
1942static inline void tlb_set_dirty(CPUState *env,
1943 unsigned long addr, target_ulong vaddr)
1944{
1945}
1946#endif /* defined(CONFIG_USER_ONLY) */
1947
1948/* register physical memory. 'size' must be a multiple of the target
1949 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1950 io memory page */
1951void cpu_register_physical_memory(target_phys_addr_t start_addr,
1952 unsigned long size,
1953 unsigned long phys_offset)
1954{
1955 target_phys_addr_t addr, end_addr;
1956 PhysPageDesc *p;
1957 CPUState *env;
1958
1959 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1960 end_addr = start_addr + size;
1961 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1962 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1963 p->phys_offset = phys_offset;
1964#if !defined(VBOX) || defined(VBOX_WITH_NEW_PHYS_CODE)
1965 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1966 (phys_offset & IO_MEM_ROMD))
1967#else
1968 if ( (phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM
1969 || (phys_offset & IO_MEM_ROMD)
1970 || (phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING)
1971#endif
1972
1973 phys_offset += TARGET_PAGE_SIZE;
1974 }
1975
1976 /* since each CPU stores ram addresses in its TLB cache, we must
1977 reset the modified entries */
1978 /* XXX: slow ! */
1979 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1980 tlb_flush(env, 1);
1981 }
1982}
1983
1984/* XXX: temporary until new memory mapping API */
1985uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
1986{
1987 PhysPageDesc *p;
1988
1989 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1990 if (!p)
1991 return IO_MEM_UNASSIGNED;
1992 return p->phys_offset;
1993}
1994
1995static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1996{
1997#ifdef DEBUG_UNASSIGNED
1998 printf("Unassigned mem read 0x%08x\n", (int)addr);
1999#endif
2000 return 0;
2001}
2002
2003static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2004{
2005#ifdef DEBUG_UNASSIGNED
2006 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
2007#endif
2008}
2009
2010static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2011 unassigned_mem_readb,
2012 unassigned_mem_readb,
2013 unassigned_mem_readb,
2014};
2015
2016static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2017 unassigned_mem_writeb,
2018 unassigned_mem_writeb,
2019 unassigned_mem_writeb,
2020};
2021
2022static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2023{
2024 unsigned long ram_addr;
2025 int dirty_flags;
2026#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2027 ram_addr = addr;
2028#elif !defined(VBOX)
2029 ram_addr = addr - (unsigned long)phys_ram_base;
2030#else
2031 ram_addr = remR3HCVirt2GCPhys(first_cpu, (void *)addr);
2032#endif
2033#ifdef VBOX
2034 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2035 dirty_flags = 0xff;
2036 else
2037#endif /* VBOX */
2038 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2039 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2040#if !defined(CONFIG_USER_ONLY)
2041 tb_invalidate_phys_page_fast(ram_addr, 1);
2042# ifdef VBOX
2043 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2044 dirty_flags = 0xff;
2045 else
2046# endif /* VBOX */
2047 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2048#endif
2049 }
2050 stb_p((uint8_t *)(long)addr, val);
2051#ifdef USE_KQEMU
2052 if (cpu_single_env->kqemu_enabled &&
2053 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2054 kqemu_modify_page(cpu_single_env, ram_addr);
2055#endif
2056 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2057#ifdef VBOX
2058 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2059#endif /* !VBOX */
2060 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2061 /* we remove the notdirty callback only if the code has been
2062 flushed */
2063 if (dirty_flags == 0xff)
2064 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2065}
2066
2067static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2068{
2069 unsigned long ram_addr;
2070 int dirty_flags;
2071#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2072 ram_addr = addr;
2073#elif !defined(VBOX)
2074 ram_addr = addr - (unsigned long)phys_ram_base;
2075#else
2076 ram_addr = remR3HCVirt2GCPhys(first_cpu, (void *)addr);
2077#endif
2078#ifdef VBOX
2079 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2080 dirty_flags = 0xff;
2081 else
2082#endif /* VBOX */
2083 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2084 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2085#if !defined(CONFIG_USER_ONLY)
2086 tb_invalidate_phys_page_fast(ram_addr, 2);
2087# ifdef VBOX
2088 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2089 dirty_flags = 0xff;
2090 else
2091# endif /* VBOX */
2092 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2093#endif
2094 }
2095 stw_p((uint8_t *)(long)addr, val);
2096#ifdef USE_KQEMU
2097 if (cpu_single_env->kqemu_enabled &&
2098 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2099 kqemu_modify_page(cpu_single_env, ram_addr);
2100#endif
2101 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2102#ifdef VBOX
2103 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2104#endif
2105 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2106 /* we remove the notdirty callback only if the code has been
2107 flushed */
2108 if (dirty_flags == 0xff)
2109 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2110}
2111
2112static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2113{
2114 unsigned long ram_addr;
2115 int dirty_flags;
2116#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2117 ram_addr = addr;
2118#elif !defined(VBOX)
2119 ram_addr = addr - (unsigned long)phys_ram_base;
2120#else
2121 ram_addr = remR3HCVirt2GCPhys(first_cpu, (void *)addr);
2122#endif
2123#ifdef VBOX
2124 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2125 dirty_flags = 0xff;
2126 else
2127#endif /* VBOX */
2128 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2129 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2130#if !defined(CONFIG_USER_ONLY)
2131 tb_invalidate_phys_page_fast(ram_addr, 4);
2132# ifdef VBOX
2133 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2134 dirty_flags = 0xff;
2135 else
2136# endif /* VBOX */
2137 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2138#endif
2139 }
2140 stl_p((uint8_t *)(long)addr, val);
2141#ifdef USE_KQEMU
2142 if (cpu_single_env->kqemu_enabled &&
2143 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2144 kqemu_modify_page(cpu_single_env, ram_addr);
2145#endif
2146 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2147#ifdef VBOX
2148 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2149#endif
2150 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2151 /* we remove the notdirty callback only if the code has been
2152 flushed */
2153 if (dirty_flags == 0xff)
2154 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2155}
2156
2157static CPUReadMemoryFunc *error_mem_read[3] = {
2158 NULL, /* never used */
2159 NULL, /* never used */
2160 NULL, /* never used */
2161};
2162
2163static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2164 notdirty_mem_writeb,
2165 notdirty_mem_writew,
2166 notdirty_mem_writel,
2167};
2168
2169static void io_mem_init(void)
2170{
2171 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2172 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2173 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2174#if defined(VBOX) && !defined(VBOX_WITH_NEW_PHYS_CODE)
2175 cpu_register_io_memory(IO_MEM_RAM_MISSING >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2176 io_mem_nb = 6;
2177#else
2178 io_mem_nb = 5;
2179#endif
2180
2181#ifndef VBOX /* VBOX: we do this later when the RAM is allocated. */
2182 /* alloc dirty bits array */
2183 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2184 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2185#endif /* !VBOX */
2186}
2187
2188/* mem_read and mem_write are arrays of functions containing the
2189 function to access byte (index 0), word (index 1) and dword (index
2190 2). All functions must be supplied. If io_index is non zero, the
2191 corresponding io zone is modified. If it is zero, a new io zone is
2192 allocated. The return value can be used with
2193 cpu_register_physical_memory(). (-1) is returned if error. */
2194int cpu_register_io_memory(int io_index,
2195 CPUReadMemoryFunc **mem_read,
2196 CPUWriteMemoryFunc **mem_write,
2197 void *opaque)
2198{
2199 int i;
2200
2201 if (io_index <= 0) {
2202 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2203 return -1;
2204 io_index = io_mem_nb++;
2205 } else {
2206 if (io_index >= IO_MEM_NB_ENTRIES)
2207 return -1;
2208 }
2209
2210 for(i = 0;i < 3; i++) {
2211 io_mem_read[io_index][i] = mem_read[i];
2212 io_mem_write[io_index][i] = mem_write[i];
2213 }
2214 io_mem_opaque[io_index] = opaque;
2215 return io_index << IO_MEM_SHIFT;
2216}
2217
2218CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2219{
2220 return io_mem_write[io_index >> IO_MEM_SHIFT];
2221}
2222
2223CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2224{
2225 return io_mem_read[io_index >> IO_MEM_SHIFT];
2226}
2227
2228/* physical memory access (slow version, mainly for debug) */
2229#if defined(CONFIG_USER_ONLY)
2230void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2231 int len, int is_write)
2232{
2233 int l, flags;
2234 target_ulong page;
2235 void * p;
2236
2237 while (len > 0) {
2238 page = addr & TARGET_PAGE_MASK;
2239 l = (page + TARGET_PAGE_SIZE) - addr;
2240 if (l > len)
2241 l = len;
2242 flags = page_get_flags(page);
2243 if (!(flags & PAGE_VALID))
2244 return;
2245 if (is_write) {
2246 if (!(flags & PAGE_WRITE))
2247 return;
2248 p = lock_user(addr, len, 0);
2249 memcpy(p, buf, len);
2250 unlock_user(p, addr, len);
2251 } else {
2252 if (!(flags & PAGE_READ))
2253 return;
2254 p = lock_user(addr, len, 1);
2255 memcpy(buf, p, len);
2256 unlock_user(p, addr, 0);
2257 }
2258 len -= l;
2259 buf += l;
2260 addr += l;
2261 }
2262}
2263
2264#else
2265void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2266 int len, int is_write)
2267{
2268 int l, io_index;
2269 uint8_t *ptr;
2270 uint32_t val;
2271 target_phys_addr_t page;
2272 unsigned long pd;
2273 PhysPageDesc *p;
2274
2275 while (len > 0) {
2276 page = addr & TARGET_PAGE_MASK;
2277 l = (page + TARGET_PAGE_SIZE) - addr;
2278 if (l > len)
2279 l = len;
2280 p = phys_page_find(page >> TARGET_PAGE_BITS);
2281 if (!p) {
2282 pd = IO_MEM_UNASSIGNED;
2283 } else {
2284 pd = p->phys_offset;
2285 }
2286
2287 if (is_write) {
2288 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2289 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2290 /* XXX: could force cpu_single_env to NULL to avoid
2291 potential bugs */
2292 if (l >= 4 && ((addr & 3) == 0)) {
2293 /* 32 bit write access */
2294#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2295 val = ldl_p(buf);
2296#else
2297 val = *(const uint32_t *)buf;
2298#endif
2299 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2300 l = 4;
2301 } else if (l >= 2 && ((addr & 1) == 0)) {
2302 /* 16 bit write access */
2303#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2304 val = lduw_p(buf);
2305#else
2306 val = *(const uint16_t *)buf;
2307#endif
2308 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2309 l = 2;
2310 } else {
2311 /* 8 bit write access */
2312#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2313 val = ldub_p(buf);
2314#else
2315 val = *(const uint8_t *)buf;
2316#endif
2317 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2318 l = 1;
2319 }
2320 } else {
2321 unsigned long addr1;
2322 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2323 /* RAM case */
2324#ifdef VBOX
2325 remR3PhysWrite(addr1, buf, l); NOREF(ptr);
2326#else
2327 ptr = phys_ram_base + addr1;
2328 memcpy(ptr, buf, l);
2329#endif
2330 if (!cpu_physical_memory_is_dirty(addr1)) {
2331 /* invalidate code */
2332 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2333 /* set dirty bit */
2334#ifdef VBOX
2335 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2336#endif
2337 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2338 (0xff & ~CODE_DIRTY_FLAG);
2339 }
2340 }
2341 } else {
2342 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2343 !(pd & IO_MEM_ROMD)) {
2344 /* I/O case */
2345 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2346 if (l >= 4 && ((addr & 3) == 0)) {
2347 /* 32 bit read access */
2348 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2349#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2350 stl_p(buf, val);
2351#else
2352 *(uint32_t *)buf = val;
2353#endif
2354 l = 4;
2355 } else if (l >= 2 && ((addr & 1) == 0)) {
2356 /* 16 bit read access */
2357 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2358#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2359 stw_p(buf, val);
2360#else
2361 *(uint16_t *)buf = val;
2362#endif
2363 l = 2;
2364 } else {
2365 /* 8 bit read access */
2366 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2367#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2368 stb_p(buf, val);
2369#else
2370 *(uint8_t *)buf = val;
2371#endif
2372 l = 1;
2373 }
2374 } else {
2375 /* RAM case */
2376#ifdef VBOX
2377 remR3PhysRead((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), buf, l); NOREF(ptr);
2378#else
2379 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2380 (addr & ~TARGET_PAGE_MASK);
2381 memcpy(buf, ptr, l);
2382#endif
2383 }
2384 }
2385 len -= l;
2386 buf += l;
2387 addr += l;
2388 }
2389}
2390
2391#ifndef VBOX
2392/* used for ROM loading : can write in RAM and ROM */
2393void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2394 const uint8_t *buf, int len)
2395{
2396 int l;
2397 uint8_t *ptr;
2398 target_phys_addr_t page;
2399 unsigned long pd;
2400 PhysPageDesc *p;
2401
2402 while (len > 0) {
2403 page = addr & TARGET_PAGE_MASK;
2404 l = (page + TARGET_PAGE_SIZE) - addr;
2405 if (l > len)
2406 l = len;
2407 p = phys_page_find(page >> TARGET_PAGE_BITS);
2408 if (!p) {
2409 pd = IO_MEM_UNASSIGNED;
2410 } else {
2411 pd = p->phys_offset;
2412 }
2413
2414 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2415 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2416 !(pd & IO_MEM_ROMD)) {
2417 /* do nothing */
2418 } else {
2419 unsigned long addr1;
2420 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2421 /* ROM/RAM case */
2422 ptr = phys_ram_base + addr1;
2423 memcpy(ptr, buf, l);
2424 }
2425 len -= l;
2426 buf += l;
2427 addr += l;
2428 }
2429}
2430#endif /* !VBOX */
2431
2432
2433/* warning: addr must be aligned */
2434uint32_t ldl_phys(target_phys_addr_t addr)
2435{
2436 int io_index;
2437 uint8_t *ptr;
2438 uint32_t val;
2439 unsigned long pd;
2440 PhysPageDesc *p;
2441
2442 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2443 if (!p) {
2444 pd = IO_MEM_UNASSIGNED;
2445 } else {
2446 pd = p->phys_offset;
2447 }
2448
2449 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2450 !(pd & IO_MEM_ROMD)) {
2451 /* I/O case */
2452 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2453 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2454 } else {
2455 /* RAM case */
2456#ifndef VBOX
2457 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2458 (addr & ~TARGET_PAGE_MASK);
2459 val = ldl_p(ptr);
2460#else
2461 val = remR3PhysReadU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
2462#endif
2463 }
2464 return val;
2465}
2466
2467/* warning: addr must be aligned */
2468uint64_t ldq_phys(target_phys_addr_t addr)
2469{
2470 int io_index;
2471 uint8_t *ptr;
2472 uint64_t val;
2473 unsigned long pd;
2474 PhysPageDesc *p;
2475
2476 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2477 if (!p) {
2478 pd = IO_MEM_UNASSIGNED;
2479 } else {
2480 pd = p->phys_offset;
2481 }
2482
2483 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2484 !(pd & IO_MEM_ROMD)) {
2485 /* I/O case */
2486 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2487#ifdef TARGET_WORDS_BIGENDIAN
2488 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2489 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2490#else
2491 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2492 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2493#endif
2494 } else {
2495 /* RAM case */
2496#ifndef VBOX
2497 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2498 (addr & ~TARGET_PAGE_MASK);
2499 val = ldq_p(ptr);
2500#else
2501 val = remR3PhysReadU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
2502#endif
2503 }
2504 return val;
2505}
2506
2507/* XXX: optimize */
2508uint32_t ldub_phys(target_phys_addr_t addr)
2509{
2510 uint8_t val;
2511 cpu_physical_memory_read(addr, &val, 1);
2512 return val;
2513}
2514
2515/* XXX: optimize */
2516uint32_t lduw_phys(target_phys_addr_t addr)
2517{
2518 uint16_t val;
2519 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2520 return tswap16(val);
2521}
2522
2523/* warning: addr must be aligned. The ram page is not masked as dirty
2524 and the code inside is not invalidated. It is useful if the dirty
2525 bits are used to track modified PTEs */
2526void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2527{
2528 int io_index;
2529 uint8_t *ptr;
2530 unsigned long pd;
2531 PhysPageDesc *p;
2532
2533 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2534 if (!p) {
2535 pd = IO_MEM_UNASSIGNED;
2536 } else {
2537 pd = p->phys_offset;
2538 }
2539
2540 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2541 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2542 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2543 } else {
2544#ifndef VBOX
2545 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2546 (addr & ~TARGET_PAGE_MASK);
2547 stl_p(ptr, val);
2548#else
2549 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
2550#endif
2551 }
2552}
2553
2554/* warning: addr must be aligned */
2555void stl_phys(target_phys_addr_t addr, uint32_t val)
2556{
2557 int io_index;
2558 uint8_t *ptr;
2559 unsigned long pd;
2560 PhysPageDesc *p;
2561
2562 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2563 if (!p) {
2564 pd = IO_MEM_UNASSIGNED;
2565 } else {
2566 pd = p->phys_offset;
2567 }
2568
2569 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2570 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2571 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2572 } else {
2573 unsigned long addr1;
2574 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2575 /* RAM case */
2576#ifndef VBOX
2577 ptr = phys_ram_base + addr1;
2578 stl_p(ptr, val);
2579#else
2580 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
2581#endif
2582 if (!cpu_physical_memory_is_dirty(addr1)) {
2583 /* invalidate code */
2584 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2585 /* set dirty bit */
2586#ifdef VBOX
2587 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2588#endif
2589 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2590 (0xff & ~CODE_DIRTY_FLAG);
2591 }
2592 }
2593}
2594
2595/* XXX: optimize */
2596void stb_phys(target_phys_addr_t addr, uint32_t val)
2597{
2598 uint8_t v = val;
2599 cpu_physical_memory_write(addr, &v, 1);
2600}
2601
2602/* XXX: optimize */
2603void stw_phys(target_phys_addr_t addr, uint32_t val)
2604{
2605 uint16_t v = tswap16(val);
2606 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2607}
2608
2609/* XXX: optimize */
2610void stq_phys(target_phys_addr_t addr, uint64_t val)
2611{
2612 val = tswap64(val);
2613 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2614}
2615
2616#endif
2617
2618#ifndef VBOX
2619/* virtual memory access for debug */
2620int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2621 uint8_t *buf, int len, int is_write)
2622{
2623 int l;
2624 target_ulong page, phys_addr;
2625
2626 while (len > 0) {
2627 page = addr & TARGET_PAGE_MASK;
2628 phys_addr = cpu_get_phys_page_debug(env, page);
2629 /* if no physical page mapped, return an error */
2630 if (phys_addr == -1)
2631 return -1;
2632 l = (page + TARGET_PAGE_SIZE) - addr;
2633 if (l > len)
2634 l = len;
2635 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2636 buf, l, is_write);
2637 len -= l;
2638 buf += l;
2639 addr += l;
2640 }
2641 return 0;
2642}
2643
2644void dump_exec_info(FILE *f,
2645 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2646{
2647 int i, target_code_size, max_target_code_size;
2648 int direct_jmp_count, direct_jmp2_count, cross_page;
2649 TranslationBlock *tb;
2650
2651 target_code_size = 0;
2652 max_target_code_size = 0;
2653 cross_page = 0;
2654 direct_jmp_count = 0;
2655 direct_jmp2_count = 0;
2656 for(i = 0; i < nb_tbs; i++) {
2657 tb = &tbs[i];
2658 target_code_size += tb->size;
2659 if (tb->size > max_target_code_size)
2660 max_target_code_size = tb->size;
2661 if (tb->page_addr[1] != -1)
2662 cross_page++;
2663 if (tb->tb_next_offset[0] != 0xffff) {
2664 direct_jmp_count++;
2665 if (tb->tb_next_offset[1] != 0xffff) {
2666 direct_jmp2_count++;
2667 }
2668 }
2669 }
2670 /* XXX: avoid using doubles ? */
2671 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2672 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2673 nb_tbs ? target_code_size / nb_tbs : 0,
2674 max_target_code_size);
2675 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2676 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2677 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2678 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2679 cross_page,
2680 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2681 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2682 direct_jmp_count,
2683 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2684 direct_jmp2_count,
2685 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2686 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2687 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2688 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2689}
2690#endif /* !VBOX */
2691
2692#if !defined(CONFIG_USER_ONLY)
2693
2694#define MMUSUFFIX _cmmu
2695#define GETPC() NULL
2696#define env cpu_single_env
2697#define SOFTMMU_CODE_ACCESS
2698
2699#define SHIFT 0
2700#include "softmmu_template.h"
2701
2702#define SHIFT 1
2703#include "softmmu_template.h"
2704
2705#define SHIFT 2
2706#include "softmmu_template.h"
2707
2708#define SHIFT 3
2709#include "softmmu_template.h"
2710
2711#undef env
2712
2713#endif
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette