VirtualBox

source: vbox/trunk/src/recompiler/exec.c@ 18661

最後變更 在這個檔案從18661是 18661,由 vboxsync 提交於 16 年 前

src/recompiler: Clean out the VBOX_WITH_NEW_PHYS_CODE #ifdefs.

  • 屬性 svn:eol-style 設為 native
檔案大小: 78.6 KB
 
1/*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#include "config.h"
30#ifndef VBOX
31#ifdef _WIN32
32#include <windows.h>
33#else
34#include <sys/types.h>
35#include <sys/mman.h>
36#endif
37#include <stdlib.h>
38#include <stdio.h>
39#include <stdarg.h>
40#include <string.h>
41#include <errno.h>
42#include <unistd.h>
43#include <inttypes.h>
44#else /* VBOX */
45# include <stdlib.h>
46# include <stdio.h>
47# include <inttypes.h>
48# include <iprt/alloc.h>
49# include <iprt/string.h>
50# include <iprt/param.h>
51# include <VBox/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
52#endif /* VBOX */
53
54#include "cpu.h"
55#include "exec-all.h"
56#if defined(CONFIG_USER_ONLY)
57#include <qemu.h>
58#endif
59
60//#define DEBUG_TB_INVALIDATE
61//#define DEBUG_FLUSH
62//#define DEBUG_TLB
63//#define DEBUG_UNASSIGNED
64
65/* make various TB consistency checks */
66//#define DEBUG_TB_CHECK
67//#define DEBUG_TLB_CHECK
68
69#if !defined(CONFIG_USER_ONLY)
70/* TB consistency checks only implemented for usermode emulation. */
71#undef DEBUG_TB_CHECK
72#endif
73
74/* threshold to flush the translated code buffer */
75#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
76
77#define SMC_BITMAP_USE_THRESHOLD 10
78
79#define MMAP_AREA_START 0x00000000
80#define MMAP_AREA_END 0xa8000000
81
82#if defined(TARGET_SPARC64)
83#define TARGET_PHYS_ADDR_SPACE_BITS 41
84#elif defined(TARGET_PPC64)
85#define TARGET_PHYS_ADDR_SPACE_BITS 42
86#else
87/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
88#define TARGET_PHYS_ADDR_SPACE_BITS 32
89#endif
90
91TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
92TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
93int nb_tbs;
94/* any access to the tbs or the page table must use this lock */
95spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
96
97uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE]
98#if defined(__MINGW32__)
99 __attribute__((aligned (16)));
100#else
101 __attribute__((aligned (32)));
102#endif
103uint8_t *code_gen_ptr;
104
105#ifndef VBOX
106int phys_ram_size;
107int phys_ram_fd;
108int phys_ram_size;
109#else /* VBOX */
110RTGCPHYS phys_ram_size;
111/* we have memory ranges (the high PC-BIOS mapping) which
112 causes some pages to fall outside the dirty map here. */
113uint32_t phys_ram_dirty_size;
114#endif /* VBOX */
115#if !defined(VBOX)
116uint8_t *phys_ram_base;
117#endif
118uint8_t *phys_ram_dirty;
119
120CPUState *first_cpu;
121/* current CPU in the current thread. It is only valid inside
122 cpu_exec() */
123CPUState *cpu_single_env;
124
125typedef struct PageDesc {
126 /* list of TBs intersecting this ram page */
127 TranslationBlock *first_tb;
128 /* in order to optimize self modifying code, we count the number
129 of lookups we do to a given page to use a bitmap */
130 unsigned int code_write_count;
131 uint8_t *code_bitmap;
132#if defined(CONFIG_USER_ONLY)
133 unsigned long flags;
134#endif
135} PageDesc;
136
137typedef struct PhysPageDesc {
138 /* offset in host memory of the page + io_index in the low 12 bits */
139 uint32_t phys_offset;
140} PhysPageDesc;
141
142#define L2_BITS 10
143#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
144
145#define L1_SIZE (1 << L1_BITS)
146#define L2_SIZE (1 << L2_BITS)
147
148static void io_mem_init(void);
149
150unsigned long qemu_real_host_page_size;
151unsigned long qemu_host_page_bits;
152unsigned long qemu_host_page_size;
153unsigned long qemu_host_page_mask;
154
155/* XXX: for system emulation, it could just be an array */
156static PageDesc *l1_map[L1_SIZE];
157PhysPageDesc **l1_phys_map;
158
159/* io memory support */
160CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
161CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
162void *io_mem_opaque[IO_MEM_NB_ENTRIES];
163static int io_mem_nb;
164
165#ifndef VBOX
166/* log support */
167char *logfilename = "/tmp/qemu.log";
168#endif /* !VBOX */
169FILE *logfile;
170int loglevel;
171
172/* statistics */
173#ifndef VBOX
174static int tlb_flush_count;
175static int tb_flush_count;
176static int tb_phys_invalidate_count;
177#else /* VBOX */
178# ifdef VBOX_WITH_STATISTICS
179uint32_t tlb_flush_count;
180uint32_t tb_flush_count;
181uint32_t tb_phys_invalidate_count;
182# endif
183#endif /* VBOX */
184
185static void page_init(void)
186{
187 /* NOTE: we can always suppose that qemu_host_page_size >=
188 TARGET_PAGE_SIZE */
189#ifdef VBOX
190 RTMemProtect(code_gen_buffer, sizeof(code_gen_buffer),
191 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
192 qemu_real_host_page_size = PAGE_SIZE;
193#else /* !VBOX */
194#ifdef _WIN32
195 {
196 SYSTEM_INFO system_info;
197 DWORD old_protect;
198
199 GetSystemInfo(&system_info);
200 qemu_real_host_page_size = system_info.dwPageSize;
201
202 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
203 PAGE_EXECUTE_READWRITE, &old_protect);
204 }
205#else
206 qemu_real_host_page_size = getpagesize();
207 {
208 unsigned long start, end;
209
210 start = (unsigned long)code_gen_buffer;
211 start &= ~(qemu_real_host_page_size - 1);
212
213 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
214 end += qemu_real_host_page_size - 1;
215 end &= ~(qemu_real_host_page_size - 1);
216
217 mprotect((void *)start, end - start,
218 PROT_READ | PROT_WRITE | PROT_EXEC);
219 }
220#endif
221#endif /* !VBOX */
222
223 if (qemu_host_page_size == 0)
224 qemu_host_page_size = qemu_real_host_page_size;
225 if (qemu_host_page_size < TARGET_PAGE_SIZE)
226 qemu_host_page_size = TARGET_PAGE_SIZE;
227 qemu_host_page_bits = 0;
228 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
229 qemu_host_page_bits++;
230 qemu_host_page_mask = ~(qemu_host_page_size - 1);
231 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
232 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
233}
234
235static inline PageDesc *page_find_alloc(unsigned int index)
236{
237 PageDesc **lp, *p;
238
239 lp = &l1_map[index >> L2_BITS];
240 p = *lp;
241 if (!p) {
242 /* allocate if not found */
243 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
244 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
245 *lp = p;
246 }
247 return p + (index & (L2_SIZE - 1));
248}
249
250static inline PageDesc *page_find(unsigned int index)
251{
252 PageDesc *p;
253
254 p = l1_map[index >> L2_BITS];
255 if (!p)
256 return 0;
257 return p + (index & (L2_SIZE - 1));
258}
259
260static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
261{
262 void **lp, **p;
263 PhysPageDesc *pd;
264
265 p = (void **)l1_phys_map;
266#if TARGET_PHYS_ADDR_SPACE_BITS > 32
267
268#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
269#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
270#endif
271 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
272 p = *lp;
273 if (!p) {
274 /* allocate if not found */
275 if (!alloc)
276 return NULL;
277 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
278 memset(p, 0, sizeof(void *) * L1_SIZE);
279 *lp = p;
280 }
281#endif
282 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
283 pd = *lp;
284 if (!pd) {
285 int i;
286 /* allocate if not found */
287 if (!alloc)
288 return NULL;
289 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
290 *lp = pd;
291 for (i = 0; i < L2_SIZE; i++)
292 pd[i].phys_offset = IO_MEM_UNASSIGNED;
293 }
294 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
295}
296
297static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
298{
299 return phys_page_find_alloc(index, 0);
300}
301
302#if !defined(CONFIG_USER_ONLY)
303static void tlb_protect_code(ram_addr_t ram_addr);
304static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
305 target_ulong vaddr);
306#endif
307
308void cpu_exec_init(CPUState *env)
309{
310 CPUState **penv;
311 int cpu_index;
312
313 if (!code_gen_ptr) {
314 code_gen_ptr = code_gen_buffer;
315 page_init();
316 io_mem_init();
317 }
318 env->next_cpu = NULL;
319 penv = &first_cpu;
320 cpu_index = 0;
321 while (*penv != NULL) {
322 penv = (CPUState **)&(*penv)->next_cpu;
323 cpu_index++;
324 }
325 env->cpu_index = cpu_index;
326 *penv = env;
327}
328
329static inline void invalidate_page_bitmap(PageDesc *p)
330{
331 if (p->code_bitmap) {
332 qemu_free(p->code_bitmap);
333 p->code_bitmap = NULL;
334 }
335 p->code_write_count = 0;
336}
337
338/* set to NULL all the 'first_tb' fields in all PageDescs */
339static void page_flush_tb(void)
340{
341 int i, j;
342 PageDesc *p;
343
344 for(i = 0; i < L1_SIZE; i++) {
345 p = l1_map[i];
346 if (p) {
347 for(j = 0; j < L2_SIZE; j++) {
348 p->first_tb = NULL;
349 invalidate_page_bitmap(p);
350 p++;
351 }
352 }
353 }
354}
355
356/* flush all the translation blocks */
357/* XXX: tb_flush is currently not thread safe */
358void tb_flush(CPUState *env1)
359{
360 CPUState *env;
361#if defined(DEBUG_FLUSH)
362 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
363 code_gen_ptr - code_gen_buffer,
364 nb_tbs,
365 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
366#endif
367 nb_tbs = 0;
368
369 for(env = first_cpu; env != NULL; env = env->next_cpu) {
370 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
371 }
372
373 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
374 page_flush_tb();
375
376 code_gen_ptr = code_gen_buffer;
377 /* XXX: flush processor icache at this point if cache flush is
378 expensive */
379#if !defined(VBOX) || defined(VBOX_WITH_STATISTICS)
380 tb_flush_count++;
381#endif
382}
383
384#ifdef DEBUG_TB_CHECK
385
386static void tb_invalidate_check(unsigned long address)
387{
388 TranslationBlock *tb;
389 int i;
390 address &= TARGET_PAGE_MASK;
391 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
392 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
393 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
394 address >= tb->pc + tb->size)) {
395 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
396 address, (long)tb->pc, tb->size);
397 }
398 }
399 }
400}
401
402/* verify that all the pages have correct rights for code */
403static void tb_page_check(void)
404{
405 TranslationBlock *tb;
406 int i, flags1, flags2;
407
408 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
409 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
410 flags1 = page_get_flags(tb->pc);
411 flags2 = page_get_flags(tb->pc + tb->size - 1);
412 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
413 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
414 (long)tb->pc, tb->size, flags1, flags2);
415 }
416 }
417 }
418}
419
420void tb_jmp_check(TranslationBlock *tb)
421{
422 TranslationBlock *tb1;
423 unsigned int n1;
424
425 /* suppress any remaining jumps to this TB */
426 tb1 = tb->jmp_first;
427 for(;;) {
428 n1 = (long)tb1 & 3;
429 tb1 = (TranslationBlock *)((long)tb1 & ~3);
430 if (n1 == 2)
431 break;
432 tb1 = tb1->jmp_next[n1];
433 }
434 /* check end of list */
435 if (tb1 != tb) {
436 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
437 }
438}
439
440#endif
441
442/* invalidate one TB */
443static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
444 int next_offset)
445{
446 TranslationBlock *tb1;
447 for(;;) {
448 tb1 = *ptb;
449 if (tb1 == tb) {
450 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
451 break;
452 }
453 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
454 }
455}
456
457static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
458{
459 TranslationBlock *tb1;
460 unsigned int n1;
461
462 for(;;) {
463 tb1 = *ptb;
464 n1 = (long)tb1 & 3;
465 tb1 = (TranslationBlock *)((long)tb1 & ~3);
466 if (tb1 == tb) {
467 *ptb = tb1->page_next[n1];
468 break;
469 }
470 ptb = &tb1->page_next[n1];
471 }
472}
473
474static inline void tb_jmp_remove(TranslationBlock *tb, int n)
475{
476 TranslationBlock *tb1, **ptb;
477 unsigned int n1;
478
479 ptb = &tb->jmp_next[n];
480 tb1 = *ptb;
481 if (tb1) {
482 /* find tb(n) in circular list */
483 for(;;) {
484 tb1 = *ptb;
485 n1 = (long)tb1 & 3;
486 tb1 = (TranslationBlock *)((long)tb1 & ~3);
487 if (n1 == n && tb1 == tb)
488 break;
489 if (n1 == 2) {
490 ptb = &tb1->jmp_first;
491 } else {
492 ptb = &tb1->jmp_next[n1];
493 }
494 }
495 /* now we can suppress tb(n) from the list */
496 *ptb = tb->jmp_next[n];
497
498 tb->jmp_next[n] = NULL;
499 }
500}
501
502/* reset the jump entry 'n' of a TB so that it is not chained to
503 another TB */
504static inline void tb_reset_jump(TranslationBlock *tb, int n)
505{
506 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
507}
508
509static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
510{
511 CPUState *env;
512 PageDesc *p;
513 unsigned int h, n1;
514 target_ulong phys_pc;
515 TranslationBlock *tb1, *tb2;
516
517 /* remove the TB from the hash list */
518 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
519 h = tb_phys_hash_func(phys_pc);
520 tb_remove(&tb_phys_hash[h], tb,
521 offsetof(TranslationBlock, phys_hash_next));
522
523 /* remove the TB from the page list */
524 if (tb->page_addr[0] != page_addr) {
525 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
526 tb_page_remove(&p->first_tb, tb);
527 invalidate_page_bitmap(p);
528 }
529 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
530 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
531 tb_page_remove(&p->first_tb, tb);
532 invalidate_page_bitmap(p);
533 }
534
535 tb_invalidated_flag = 1;
536
537 /* remove the TB from the hash list */
538 h = tb_jmp_cache_hash_func(tb->pc);
539 for(env = first_cpu; env != NULL; env = env->next_cpu) {
540 if (env->tb_jmp_cache[h] == tb)
541 env->tb_jmp_cache[h] = NULL;
542 }
543
544 /* suppress this TB from the two jump lists */
545 tb_jmp_remove(tb, 0);
546 tb_jmp_remove(tb, 1);
547
548 /* suppress any remaining jumps to this TB */
549 tb1 = tb->jmp_first;
550 for(;;) {
551 n1 = (long)tb1 & 3;
552 if (n1 == 2)
553 break;
554 tb1 = (TranslationBlock *)((long)tb1 & ~3);
555 tb2 = tb1->jmp_next[n1];
556 tb_reset_jump(tb1, n1);
557 tb1->jmp_next[n1] = NULL;
558 tb1 = tb2;
559 }
560 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
561
562#if !defined(VBOX) || defined(VBOX_WITH_STATISTICS)
563 tb_phys_invalidate_count++;
564#endif
565}
566
567#ifdef VBOX
568void tb_invalidate_virt(CPUState *env, uint32_t eip)
569{
570# if 1
571 tb_flush(env);
572# else
573 uint8_t *cs_base, *pc;
574 unsigned int flags, h, phys_pc;
575 TranslationBlock *tb, **ptb;
576
577 flags = env->hflags;
578 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
579 cs_base = env->segs[R_CS].base;
580 pc = cs_base + eip;
581
582 tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
583 flags);
584
585 if(tb)
586 {
587# ifdef DEBUG
588 printf("invalidating TB (%08X) at %08X\n", tb, eip);
589# endif
590 tb_invalidate(tb);
591 //Note: this will leak TBs, but the whole cache will be flushed
592 // when it happens too often
593 tb->pc = 0;
594 tb->cs_base = 0;
595 tb->flags = 0;
596 }
597# endif
598}
599
600# ifdef VBOX_STRICT
601/**
602 * Gets the page offset.
603 */
604unsigned long get_phys_page_offset(target_ulong addr)
605{
606 PhysPageDesc *p = phys_page_find(addr >> TARGET_PAGE_BITS);
607 return p ? p->phys_offset : 0;
608}
609# endif /* VBOX_STRICT */
610#endif /* VBOX */
611
612static inline void set_bits(uint8_t *tab, int start, int len)
613{
614 int end, mask, end1;
615
616 end = start + len;
617 tab += start >> 3;
618 mask = 0xff << (start & 7);
619 if ((start & ~7) == (end & ~7)) {
620 if (start < end) {
621 mask &= ~(0xff << (end & 7));
622 *tab |= mask;
623 }
624 } else {
625 *tab++ |= mask;
626 start = (start + 8) & ~7;
627 end1 = end & ~7;
628 while (start < end1) {
629 *tab++ = 0xff;
630 start += 8;
631 }
632 if (start < end) {
633 mask = ~(0xff << (end & 7));
634 *tab |= mask;
635 }
636 }
637}
638
639static void build_page_bitmap(PageDesc *p)
640{
641 int n, tb_start, tb_end;
642 TranslationBlock *tb;
643
644 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
645 if (!p->code_bitmap)
646 return;
647 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
648
649 tb = p->first_tb;
650 while (tb != NULL) {
651 n = (long)tb & 3;
652 tb = (TranslationBlock *)((long)tb & ~3);
653 /* NOTE: this is subtle as a TB may span two physical pages */
654 if (n == 0) {
655 /* NOTE: tb_end may be after the end of the page, but
656 it is not a problem */
657 tb_start = tb->pc & ~TARGET_PAGE_MASK;
658 tb_end = tb_start + tb->size;
659 if (tb_end > TARGET_PAGE_SIZE)
660 tb_end = TARGET_PAGE_SIZE;
661 } else {
662 tb_start = 0;
663 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
664 }
665 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
666 tb = tb->page_next[n];
667 }
668}
669
670#ifdef TARGET_HAS_PRECISE_SMC
671
672static void tb_gen_code(CPUState *env,
673 target_ulong pc, target_ulong cs_base, int flags,
674 int cflags)
675{
676 TranslationBlock *tb;
677 uint8_t *tc_ptr;
678 target_ulong phys_pc, phys_page2, virt_page2;
679 int code_gen_size;
680
681 phys_pc = get_phys_addr_code(env, pc);
682 tb = tb_alloc(pc);
683 if (!tb) {
684 /* flush must be done */
685 tb_flush(env);
686 /* cannot fail at this point */
687 tb = tb_alloc(pc);
688 }
689 tc_ptr = code_gen_ptr;
690 tb->tc_ptr = tc_ptr;
691 tb->cs_base = cs_base;
692 tb->flags = flags;
693 tb->cflags = cflags;
694 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
695 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
696
697 /* check next page if needed */
698 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
699 phys_page2 = -1;
700 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
701 phys_page2 = get_phys_addr_code(env, virt_page2);
702 }
703 tb_link_phys(tb, phys_pc, phys_page2);
704}
705#endif
706
707/* invalidate all TBs which intersect with the target physical page
708 starting in range [start;end[. NOTE: start and end must refer to
709 the same physical page. 'is_cpu_write_access' should be true if called
710 from a real cpu write access: the virtual CPU will exit the current
711 TB if code is modified inside this TB. */
712void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
713 int is_cpu_write_access)
714{
715 int n, current_tb_modified, current_tb_not_found, current_flags;
716 CPUState *env = cpu_single_env;
717 PageDesc *p;
718 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
719 target_ulong tb_start, tb_end;
720 target_ulong current_pc, current_cs_base;
721
722 p = page_find(start >> TARGET_PAGE_BITS);
723 if (!p)
724 return;
725 if (!p->code_bitmap &&
726 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
727 is_cpu_write_access) {
728 /* build code bitmap */
729 build_page_bitmap(p);
730 }
731
732 /* we remove all the TBs in the range [start, end[ */
733 /* XXX: see if in some cases it could be faster to invalidate all the code */
734 current_tb_not_found = is_cpu_write_access;
735 current_tb_modified = 0;
736 current_tb = NULL; /* avoid warning */
737 current_pc = 0; /* avoid warning */
738 current_cs_base = 0; /* avoid warning */
739 current_flags = 0; /* avoid warning */
740 tb = p->first_tb;
741 while (tb != NULL) {
742 n = (long)tb & 3;
743 tb = (TranslationBlock *)((long)tb & ~3);
744 tb_next = tb->page_next[n];
745 /* NOTE: this is subtle as a TB may span two physical pages */
746 if (n == 0) {
747 /* NOTE: tb_end may be after the end of the page, but
748 it is not a problem */
749 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
750 tb_end = tb_start + tb->size;
751 } else {
752 tb_start = tb->page_addr[1];
753 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
754 }
755 if (!(tb_end <= start || tb_start >= end)) {
756#ifdef TARGET_HAS_PRECISE_SMC
757 if (current_tb_not_found) {
758 current_tb_not_found = 0;
759 current_tb = NULL;
760 if (env->mem_write_pc) {
761 /* now we have a real cpu fault */
762 current_tb = tb_find_pc(env->mem_write_pc);
763 }
764 }
765 if (current_tb == tb &&
766 !(current_tb->cflags & CF_SINGLE_INSN)) {
767 /* If we are modifying the current TB, we must stop
768 its execution. We could be more precise by checking
769 that the modification is after the current PC, but it
770 would require a specialized function to partially
771 restore the CPU state */
772
773 current_tb_modified = 1;
774 cpu_restore_state(current_tb, env,
775 env->mem_write_pc, NULL);
776#if defined(TARGET_I386)
777 current_flags = env->hflags;
778 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
779 current_cs_base = (target_ulong)env->segs[R_CS].base;
780 current_pc = current_cs_base + env->eip;
781#else
782#error unsupported CPU
783#endif
784 }
785#endif /* TARGET_HAS_PRECISE_SMC */
786 /* we need to do that to handle the case where a signal
787 occurs while doing tb_phys_invalidate() */
788 saved_tb = NULL;
789 if (env) {
790 saved_tb = env->current_tb;
791 env->current_tb = NULL;
792 }
793 tb_phys_invalidate(tb, -1);
794 if (env) {
795 env->current_tb = saved_tb;
796 if (env->interrupt_request && env->current_tb)
797 cpu_interrupt(env, env->interrupt_request);
798 }
799 }
800 tb = tb_next;
801 }
802#if !defined(CONFIG_USER_ONLY)
803 /* if no code remaining, no need to continue to use slow writes */
804 if (!p->first_tb) {
805 invalidate_page_bitmap(p);
806 if (is_cpu_write_access) {
807 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
808 }
809 }
810#endif
811#ifdef TARGET_HAS_PRECISE_SMC
812 if (current_tb_modified) {
813 /* we generate a block containing just the instruction
814 modifying the memory. It will ensure that it cannot modify
815 itself */
816 env->current_tb = NULL;
817 tb_gen_code(env, current_pc, current_cs_base, current_flags,
818 CF_SINGLE_INSN);
819 cpu_resume_from_signal(env, NULL);
820 }
821#endif
822}
823
824/* len must be <= 8 and start must be a multiple of len */
825static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
826{
827 PageDesc *p;
828 int offset, b;
829#if 0
830 if (1) {
831 if (loglevel) {
832 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
833 cpu_single_env->mem_write_vaddr, len,
834 cpu_single_env->eip,
835 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
836 }
837 }
838#endif
839 p = page_find(start >> TARGET_PAGE_BITS);
840 if (!p)
841 return;
842 if (p->code_bitmap) {
843 offset = start & ~TARGET_PAGE_MASK;
844 b = p->code_bitmap[offset >> 3] >> (offset & 7);
845 if (b & ((1 << len) - 1))
846 goto do_invalidate;
847 } else {
848 do_invalidate:
849 tb_invalidate_phys_page_range(start, start + len, 1);
850 }
851}
852
853#if !defined(CONFIG_SOFTMMU)
854static void tb_invalidate_phys_page(target_ulong addr,
855 unsigned long pc, void *puc)
856{
857 int n, current_flags, current_tb_modified;
858 target_ulong current_pc, current_cs_base;
859 PageDesc *p;
860 TranslationBlock *tb, *current_tb;
861#ifdef TARGET_HAS_PRECISE_SMC
862 CPUState *env = cpu_single_env;
863#endif
864
865 addr &= TARGET_PAGE_MASK;
866 p = page_find(addr >> TARGET_PAGE_BITS);
867 if (!p)
868 return;
869 tb = p->first_tb;
870 current_tb_modified = 0;
871 current_tb = NULL;
872 current_pc = 0; /* avoid warning */
873 current_cs_base = 0; /* avoid warning */
874 current_flags = 0; /* avoid warning */
875#ifdef TARGET_HAS_PRECISE_SMC
876 if (tb && pc != 0) {
877 current_tb = tb_find_pc(pc);
878 }
879#endif
880 while (tb != NULL) {
881 n = (long)tb & 3;
882 tb = (TranslationBlock *)((long)tb & ~3);
883#ifdef TARGET_HAS_PRECISE_SMC
884 if (current_tb == tb &&
885 !(current_tb->cflags & CF_SINGLE_INSN)) {
886 /* If we are modifying the current TB, we must stop
887 its execution. We could be more precise by checking
888 that the modification is after the current PC, but it
889 would require a specialized function to partially
890 restore the CPU state */
891
892 current_tb_modified = 1;
893 cpu_restore_state(current_tb, env, pc, puc);
894#if defined(TARGET_I386)
895 current_flags = env->hflags;
896 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
897 current_cs_base = (target_ulong)env->segs[R_CS].base;
898 current_pc = current_cs_base + env->eip;
899#else
900#error unsupported CPU
901#endif
902 }
903#endif /* TARGET_HAS_PRECISE_SMC */
904 tb_phys_invalidate(tb, addr);
905 tb = tb->page_next[n];
906 }
907 p->first_tb = NULL;
908#ifdef TARGET_HAS_PRECISE_SMC
909 if (current_tb_modified) {
910 /* we generate a block containing just the instruction
911 modifying the memory. It will ensure that it cannot modify
912 itself */
913 env->current_tb = NULL;
914 tb_gen_code(env, current_pc, current_cs_base, current_flags,
915 CF_SINGLE_INSN);
916 cpu_resume_from_signal(env, puc);
917 }
918#endif
919}
920#endif
921
922/* add the tb in the target page and protect it if necessary */
923static inline void tb_alloc_page(TranslationBlock *tb,
924 unsigned int n, target_ulong page_addr)
925{
926 PageDesc *p;
927 TranslationBlock *last_first_tb;
928
929 tb->page_addr[n] = page_addr;
930 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
931 tb->page_next[n] = p->first_tb;
932 last_first_tb = p->first_tb;
933 p->first_tb = (TranslationBlock *)((long)tb | n);
934 invalidate_page_bitmap(p);
935
936#if defined(TARGET_HAS_SMC) || 1
937
938#if defined(CONFIG_USER_ONLY)
939 if (p->flags & PAGE_WRITE) {
940 target_ulong addr;
941 PageDesc *p2;
942 int prot;
943
944 /* force the host page as non writable (writes will have a
945 page fault + mprotect overhead) */
946 page_addr &= qemu_host_page_mask;
947 prot = 0;
948 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
949 addr += TARGET_PAGE_SIZE) {
950
951 p2 = page_find (addr >> TARGET_PAGE_BITS);
952 if (!p2)
953 continue;
954 prot |= p2->flags;
955 p2->flags &= ~PAGE_WRITE;
956 page_get_flags(addr);
957 }
958 mprotect(g2h(page_addr), qemu_host_page_size,
959 (prot & PAGE_BITS) & ~PAGE_WRITE);
960#ifdef DEBUG_TB_INVALIDATE
961 printf("protecting code page: 0x%08lx\n",
962 page_addr);
963#endif
964 }
965#else
966 /* if some code is already present, then the pages are already
967 protected. So we handle the case where only the first TB is
968 allocated in a physical page */
969 if (!last_first_tb) {
970 tlb_protect_code(page_addr);
971 }
972#endif
973
974#endif /* TARGET_HAS_SMC */
975}
976
977/* Allocate a new translation block. Flush the translation buffer if
978 too many translation blocks or too much generated code. */
979TranslationBlock *tb_alloc(target_ulong pc)
980{
981 TranslationBlock *tb;
982
983 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
984 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
985 return NULL;
986 tb = &tbs[nb_tbs++];
987 tb->pc = pc;
988 tb->cflags = 0;
989 return tb;
990}
991
992/* add a new TB and link it to the physical page tables. phys_page2 is
993 (-1) to indicate that only one page contains the TB. */
994void tb_link_phys(TranslationBlock *tb,
995 target_ulong phys_pc, target_ulong phys_page2)
996{
997 unsigned int h;
998 TranslationBlock **ptb;
999
1000 /* add in the physical hash table */
1001 h = tb_phys_hash_func(phys_pc);
1002 ptb = &tb_phys_hash[h];
1003 tb->phys_hash_next = *ptb;
1004 *ptb = tb;
1005
1006 /* add in the page list */
1007 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1008 if (phys_page2 != -1)
1009 tb_alloc_page(tb, 1, phys_page2);
1010 else
1011 tb->page_addr[1] = -1;
1012
1013 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1014 tb->jmp_next[0] = NULL;
1015 tb->jmp_next[1] = NULL;
1016#ifdef USE_CODE_COPY
1017 tb->cflags &= ~CF_FP_USED;
1018 if (tb->cflags & CF_TB_FP_USED)
1019 tb->cflags |= CF_FP_USED;
1020#endif
1021
1022 /* init original jump addresses */
1023 if (tb->tb_next_offset[0] != 0xffff)
1024 tb_reset_jump(tb, 0);
1025 if (tb->tb_next_offset[1] != 0xffff)
1026 tb_reset_jump(tb, 1);
1027
1028#ifdef DEBUG_TB_CHECK
1029 tb_page_check();
1030#endif
1031}
1032
1033/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1034 tb[1].tc_ptr. Return NULL if not found */
1035TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1036{
1037 int m_min, m_max, m;
1038 unsigned long v;
1039 TranslationBlock *tb;
1040
1041 if (nb_tbs <= 0)
1042 return NULL;
1043 if (tc_ptr < (unsigned long)code_gen_buffer ||
1044 tc_ptr >= (unsigned long)code_gen_ptr)
1045 return NULL;
1046 /* binary search (cf Knuth) */
1047 m_min = 0;
1048 m_max = nb_tbs - 1;
1049 while (m_min <= m_max) {
1050 m = (m_min + m_max) >> 1;
1051 tb = &tbs[m];
1052 v = (unsigned long)tb->tc_ptr;
1053 if (v == tc_ptr)
1054 return tb;
1055 else if (tc_ptr < v) {
1056 m_max = m - 1;
1057 } else {
1058 m_min = m + 1;
1059 }
1060 }
1061 return &tbs[m_max];
1062}
1063
1064static void tb_reset_jump_recursive(TranslationBlock *tb);
1065
1066static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1067{
1068 TranslationBlock *tb1, *tb_next, **ptb;
1069 unsigned int n1;
1070
1071 tb1 = tb->jmp_next[n];
1072 if (tb1 != NULL) {
1073 /* find head of list */
1074 for(;;) {
1075 n1 = (long)tb1 & 3;
1076 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1077 if (n1 == 2)
1078 break;
1079 tb1 = tb1->jmp_next[n1];
1080 }
1081 /* we are now sure now that tb jumps to tb1 */
1082 tb_next = tb1;
1083
1084 /* remove tb from the jmp_first list */
1085 ptb = &tb_next->jmp_first;
1086 for(;;) {
1087 tb1 = *ptb;
1088 n1 = (long)tb1 & 3;
1089 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1090 if (n1 == n && tb1 == tb)
1091 break;
1092 ptb = &tb1->jmp_next[n1];
1093 }
1094 *ptb = tb->jmp_next[n];
1095 tb->jmp_next[n] = NULL;
1096
1097 /* suppress the jump to next tb in generated code */
1098 tb_reset_jump(tb, n);
1099
1100 /* suppress jumps in the tb on which we could have jumped */
1101 tb_reset_jump_recursive(tb_next);
1102 }
1103}
1104
1105static void tb_reset_jump_recursive(TranslationBlock *tb)
1106{
1107 tb_reset_jump_recursive2(tb, 0);
1108 tb_reset_jump_recursive2(tb, 1);
1109}
1110
1111#if defined(TARGET_HAS_ICE)
1112static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1113{
1114 target_ulong addr, pd;
1115 ram_addr_t ram_addr;
1116 PhysPageDesc *p;
1117
1118 addr = cpu_get_phys_page_debug(env, pc);
1119 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1120 if (!p) {
1121 pd = IO_MEM_UNASSIGNED;
1122 } else {
1123 pd = p->phys_offset;
1124 }
1125 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1126 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1127}
1128#endif
1129
1130/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1131 breakpoint is reached */
1132int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1133{
1134#if defined(TARGET_HAS_ICE)
1135 int i;
1136
1137 for(i = 0; i < env->nb_breakpoints; i++) {
1138 if (env->breakpoints[i] == pc)
1139 return 0;
1140 }
1141
1142 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1143 return -1;
1144 env->breakpoints[env->nb_breakpoints++] = pc;
1145
1146 breakpoint_invalidate(env, pc);
1147 return 0;
1148#else
1149 return -1;
1150#endif
1151}
1152
1153/* remove a breakpoint */
1154int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1155{
1156#if defined(TARGET_HAS_ICE)
1157 int i;
1158 for(i = 0; i < env->nb_breakpoints; i++) {
1159 if (env->breakpoints[i] == pc)
1160 goto found;
1161 }
1162 return -1;
1163 found:
1164 env->nb_breakpoints--;
1165 if (i < env->nb_breakpoints)
1166 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1167
1168 breakpoint_invalidate(env, pc);
1169 return 0;
1170#else
1171 return -1;
1172#endif
1173}
1174
1175/* enable or disable single step mode. EXCP_DEBUG is returned by the
1176 CPU loop after each instruction */
1177void cpu_single_step(CPUState *env, int enabled)
1178{
1179#if defined(TARGET_HAS_ICE)
1180 if (env->singlestep_enabled != enabled) {
1181 env->singlestep_enabled = enabled;
1182 /* must flush all the translated code to avoid inconsistancies */
1183 /* XXX: only flush what is necessary */
1184 tb_flush(env);
1185 }
1186#endif
1187}
1188
1189#ifndef VBOX
1190/* enable or disable low levels log */
1191void cpu_set_log(int log_flags)
1192{
1193 loglevel = log_flags;
1194 if (loglevel && !logfile) {
1195 logfile = fopen(logfilename, "w");
1196 if (!logfile) {
1197 perror(logfilename);
1198 _exit(1);
1199 }
1200#if !defined(CONFIG_SOFTMMU)
1201 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1202 {
1203 static uint8_t logfile_buf[4096];
1204 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1205 }
1206#else
1207 setvbuf(logfile, NULL, _IOLBF, 0);
1208#endif
1209 }
1210}
1211
1212void cpu_set_log_filename(const char *filename)
1213{
1214 logfilename = strdup(filename);
1215}
1216#endif /* !VBOX */
1217
1218/* mask must never be zero, except for A20 change call */
1219void cpu_interrupt(CPUState *env, int mask)
1220{
1221 TranslationBlock *tb;
1222 static int interrupt_lock;
1223
1224#ifdef VBOX
1225 VM_ASSERT_EMT(env->pVM);
1226 ASMAtomicOrS32(&env->interrupt_request, mask);
1227#else /* !VBOX */
1228 env->interrupt_request |= mask;
1229#endif /* !VBOX */
1230 /* if the cpu is currently executing code, we must unlink it and
1231 all the potentially executing TB */
1232 tb = env->current_tb;
1233 if (tb && !testandset(&interrupt_lock)) {
1234 env->current_tb = NULL;
1235 tb_reset_jump_recursive(tb);
1236 interrupt_lock = 0;
1237 }
1238}
1239
1240void cpu_reset_interrupt(CPUState *env, int mask)
1241{
1242#ifdef VBOX
1243 /*
1244 * Note: the current implementation can be executed by another thread without problems; make sure this remains true
1245 * for future changes!
1246 */
1247 ASMAtomicAndS32(&env->interrupt_request, ~mask);
1248#else /* !VBOX */
1249 env->interrupt_request &= ~mask;
1250#endif /* !VBOX */
1251}
1252
1253#ifndef VBOX
1254CPULogItem cpu_log_items[] = {
1255 { CPU_LOG_TB_OUT_ASM, "out_asm",
1256 "show generated host assembly code for each compiled TB" },
1257 { CPU_LOG_TB_IN_ASM, "in_asm",
1258 "show target assembly code for each compiled TB" },
1259 { CPU_LOG_TB_OP, "op",
1260 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1261#ifdef TARGET_I386
1262 { CPU_LOG_TB_OP_OPT, "op_opt",
1263 "show micro ops after optimization for each compiled TB" },
1264#endif
1265 { CPU_LOG_INT, "int",
1266 "show interrupts/exceptions in short format" },
1267 { CPU_LOG_EXEC, "exec",
1268 "show trace before each executed TB (lots of logs)" },
1269 { CPU_LOG_TB_CPU, "cpu",
1270 "show CPU state before bloc translation" },
1271#ifdef TARGET_I386
1272 { CPU_LOG_PCALL, "pcall",
1273 "show protected mode far calls/returns/exceptions" },
1274#endif
1275#ifdef DEBUG_IOPORT
1276 { CPU_LOG_IOPORT, "ioport",
1277 "show all i/o ports accesses" },
1278#endif
1279 { 0, NULL, NULL },
1280};
1281
1282static int cmp1(const char *s1, int n, const char *s2)
1283{
1284 if (strlen(s2) != n)
1285 return 0;
1286 return memcmp(s1, s2, n) == 0;
1287}
1288
1289/* takes a comma separated list of log masks. Return 0 if error. */
1290int cpu_str_to_log_mask(const char *str)
1291{
1292 CPULogItem *item;
1293 int mask;
1294 const char *p, *p1;
1295
1296 p = str;
1297 mask = 0;
1298 for(;;) {
1299 p1 = strchr(p, ',');
1300 if (!p1)
1301 p1 = p + strlen(p);
1302 if(cmp1(p,p1-p,"all")) {
1303 for(item = cpu_log_items; item->mask != 0; item++) {
1304 mask |= item->mask;
1305 }
1306 } else {
1307 for(item = cpu_log_items; item->mask != 0; item++) {
1308 if (cmp1(p, p1 - p, item->name))
1309 goto found;
1310 }
1311 return 0;
1312 }
1313 found:
1314 mask |= item->mask;
1315 if (*p1 != ',')
1316 break;
1317 p = p1 + 1;
1318 }
1319 return mask;
1320}
1321#endif /* !VBOX */
1322
1323#ifndef VBOX /* VBOX: we have our own routine. */
1324void cpu_abort(CPUState *env, const char *fmt, ...)
1325{
1326 va_list ap;
1327
1328 va_start(ap, fmt);
1329 fprintf(stderr, "qemu: fatal: ");
1330 vfprintf(stderr, fmt, ap);
1331 fprintf(stderr, "\n");
1332#ifdef TARGET_I386
1333 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1334#else
1335 cpu_dump_state(env, stderr, fprintf, 0);
1336#endif
1337 va_end(ap);
1338 abort();
1339}
1340#endif /* !VBOX */
1341
1342#if !defined(CONFIG_USER_ONLY)
1343
1344/* NOTE: if flush_global is true, also flush global entries (not
1345 implemented yet) */
1346void tlb_flush(CPUState *env, int flush_global)
1347{
1348 int i;
1349
1350#if defined(DEBUG_TLB)
1351 printf("tlb_flush:\n");
1352#endif
1353 /* must reset current TB so that interrupts cannot modify the
1354 links while we are modifying them */
1355 env->current_tb = NULL;
1356
1357 for(i = 0; i < CPU_TLB_SIZE; i++) {
1358 env->tlb_table[0][i].addr_read = -1;
1359 env->tlb_table[0][i].addr_write = -1;
1360 env->tlb_table[0][i].addr_code = -1;
1361 env->tlb_table[1][i].addr_read = -1;
1362 env->tlb_table[1][i].addr_write = -1;
1363 env->tlb_table[1][i].addr_code = -1;
1364 }
1365
1366 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1367
1368#if !defined(CONFIG_SOFTMMU)
1369 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1370#endif
1371#ifdef VBOX
1372 /* inform raw mode about TLB flush */
1373 remR3FlushTLB(env, flush_global);
1374#endif
1375#ifdef USE_KQEMU
1376 if (env->kqemu_enabled) {
1377 kqemu_flush(env, flush_global);
1378 }
1379#endif
1380#if !defined(VBOX) || defined(VBOX_WITH_STATISTICS)
1381 tlb_flush_count++;
1382#endif
1383}
1384
1385static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1386{
1387 if (addr == (tlb_entry->addr_read &
1388 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1389 addr == (tlb_entry->addr_write &
1390 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1391 addr == (tlb_entry->addr_code &
1392 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1393 tlb_entry->addr_read = -1;
1394 tlb_entry->addr_write = -1;
1395 tlb_entry->addr_code = -1;
1396 }
1397}
1398
1399void tlb_flush_page(CPUState *env, target_ulong addr)
1400{
1401 int i;
1402 TranslationBlock *tb;
1403
1404#if defined(DEBUG_TLB)
1405 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1406#endif
1407 /* must reset current TB so that interrupts cannot modify the
1408 links while we are modifying them */
1409 env->current_tb = NULL;
1410
1411 addr &= TARGET_PAGE_MASK;
1412 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1413 tlb_flush_entry(&env->tlb_table[0][i], addr);
1414 tlb_flush_entry(&env->tlb_table[1][i], addr);
1415
1416 /* Discard jump cache entries for any tb which might potentially
1417 overlap the flushed page. */
1418 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1419 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1420
1421 i = tb_jmp_cache_hash_page(addr);
1422 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1423
1424#if !defined(CONFIG_SOFTMMU)
1425 if (addr < MMAP_AREA_END)
1426 munmap((void *)addr, TARGET_PAGE_SIZE);
1427#endif
1428#ifdef VBOX
1429 /* inform raw mode about TLB page flush */
1430 remR3FlushPage(env, addr);
1431#endif /* VBOX */
1432#ifdef USE_KQEMU
1433 if (env->kqemu_enabled) {
1434 kqemu_flush_page(env, addr);
1435 }
1436#endif
1437}
1438
1439/* update the TLBs so that writes to code in the virtual page 'addr'
1440 can be detected */
1441static void tlb_protect_code(ram_addr_t ram_addr)
1442{
1443 cpu_physical_memory_reset_dirty(ram_addr,
1444 ram_addr + TARGET_PAGE_SIZE,
1445 CODE_DIRTY_FLAG);
1446#if defined(VBOX) && defined(REM_MONITOR_CODE_PAGES)
1447 /** @todo Retest this? This function has changed... */
1448 remR3ProtectCode(cpu_single_env, ram_addr);
1449#endif
1450}
1451
1452/* update the TLB so that writes in physical page 'phys_addr' are no longer
1453 tested for self modifying code */
1454static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1455 target_ulong vaddr)
1456{
1457#ifdef VBOX
1458 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
1459#endif
1460 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1461}
1462
1463static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1464 unsigned long start, unsigned long length)
1465{
1466 unsigned long addr;
1467 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1468 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1469 if ((addr - start) < length) {
1470 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1471 }
1472 }
1473}
1474
1475void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1476 int dirty_flags)
1477{
1478 CPUState *env;
1479 unsigned long length, start1;
1480 int i, mask, len;
1481 uint8_t *p;
1482
1483 start &= TARGET_PAGE_MASK;
1484 end = TARGET_PAGE_ALIGN(end);
1485
1486 length = end - start;
1487 if (length == 0)
1488 return;
1489 len = length >> TARGET_PAGE_BITS;
1490#ifdef USE_KQEMU
1491 /* XXX: should not depend on cpu context */
1492 env = first_cpu;
1493 if (env->kqemu_enabled) {
1494 ram_addr_t addr;
1495 addr = start;
1496 for(i = 0; i < len; i++) {
1497 kqemu_set_notdirty(env, addr);
1498 addr += TARGET_PAGE_SIZE;
1499 }
1500 }
1501#endif
1502 mask = ~dirty_flags;
1503 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1504#ifdef VBOX
1505 if (RT_LIKELY((start >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
1506#endif
1507 for(i = 0; i < len; i++)
1508 p[i] &= mask;
1509
1510 /* we modify the TLB cache so that the dirty bit will be set again
1511 when accessing the range */
1512#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
1513 start1 = start;
1514#elif !defined(VBOX)
1515 start1 = start + (unsigned long)phys_ram_base;
1516#else
1517 start1 = (unsigned long)remR3GCPhys2HCVirt(first_cpu, start);
1518#endif
1519 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1520 for(i = 0; i < CPU_TLB_SIZE; i++)
1521 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1522 for(i = 0; i < CPU_TLB_SIZE; i++)
1523 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1524 }
1525
1526#if !defined(CONFIG_SOFTMMU)
1527#ifdef VBOX /**@todo remove this check */
1528# error "We shouldn't get here..."
1529#endif
1530 /* XXX: this is expensive */
1531 {
1532 VirtPageDesc *p;
1533 int j;
1534 target_ulong addr;
1535
1536 for(i = 0; i < L1_SIZE; i++) {
1537 p = l1_virt_map[i];
1538 if (p) {
1539 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1540 for(j = 0; j < L2_SIZE; j++) {
1541 if (p->valid_tag == virt_valid_tag &&
1542 p->phys_addr >= start && p->phys_addr < end &&
1543 (p->prot & PROT_WRITE)) {
1544 if (addr < MMAP_AREA_END) {
1545 mprotect((void *)addr, TARGET_PAGE_SIZE,
1546 p->prot & ~PROT_WRITE);
1547 }
1548 }
1549 addr += TARGET_PAGE_SIZE;
1550 p++;
1551 }
1552 }
1553 }
1554 }
1555#endif
1556}
1557
1558static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1559{
1560 ram_addr_t ram_addr;
1561
1562 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1563 /* RAM case */
1564#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
1565 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1566#elif !defined(VBOX)
1567 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1568 tlb_entry->addend - (unsigned long)phys_ram_base;
1569#else
1570 ram_addr = remR3HCVirt2GCPhys(first_cpu, (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend);
1571#endif
1572 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1573 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1574 }
1575 }
1576}
1577
1578/* update the TLB according to the current state of the dirty bits */
1579void cpu_tlb_update_dirty(CPUState *env)
1580{
1581 int i;
1582 for(i = 0; i < CPU_TLB_SIZE; i++)
1583 tlb_update_dirty(&env->tlb_table[0][i]);
1584 for(i = 0; i < CPU_TLB_SIZE; i++)
1585 tlb_update_dirty(&env->tlb_table[1][i]);
1586}
1587
1588static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1589 unsigned long start)
1590{
1591 unsigned long addr;
1592 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1593 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1594 if (addr == start) {
1595 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1596 }
1597 }
1598}
1599
1600/* update the TLB corresponding to virtual page vaddr and phys addr
1601 addr so that it is no longer dirty */
1602static inline void tlb_set_dirty(CPUState *env,
1603 unsigned long addr, target_ulong vaddr)
1604{
1605 int i;
1606
1607 addr &= TARGET_PAGE_MASK;
1608 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1609 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1610 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1611}
1612
1613/* add a new TLB entry. At most one entry for a given virtual address
1614 is permitted. Return 0 if OK or 2 if the page could not be mapped
1615 (can only happen in non SOFTMMU mode for I/O pages or pages
1616 conflicting with the host address space). */
1617int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1618 target_phys_addr_t paddr, int prot,
1619 int is_user, int is_softmmu)
1620{
1621 PhysPageDesc *p;
1622 unsigned long pd;
1623 unsigned int index;
1624 target_ulong address;
1625 target_phys_addr_t addend;
1626 int ret;
1627 CPUTLBEntry *te;
1628
1629 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1630 if (!p) {
1631 pd = IO_MEM_UNASSIGNED;
1632 } else {
1633 pd = p->phys_offset;
1634 }
1635#if defined(DEBUG_TLB)
1636 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1637 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1638#endif
1639
1640 ret = 0;
1641#if !defined(CONFIG_SOFTMMU)
1642 if (is_softmmu)
1643#endif
1644 {
1645 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1646 /* IO memory case */
1647 address = vaddr | pd;
1648 addend = paddr;
1649 } else {
1650 /* standard memory */
1651 address = vaddr;
1652#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
1653 addend = pd & TARGET_PAGE_MASK;
1654#elif !defined(VBOX)
1655 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1656#else
1657 addend = (unsigned long)remR3GCPhys2HCVirt(env, pd & TARGET_PAGE_MASK);
1658#endif
1659 }
1660
1661 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1662 addend -= vaddr;
1663 te = &env->tlb_table[is_user][index];
1664 te->addend = addend;
1665 if (prot & PAGE_READ) {
1666 te->addr_read = address;
1667 } else {
1668 te->addr_read = -1;
1669 }
1670 if (prot & PAGE_EXEC) {
1671 te->addr_code = address;
1672 } else {
1673 te->addr_code = -1;
1674 }
1675 if (prot & PAGE_WRITE) {
1676 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1677 (pd & IO_MEM_ROMD)) {
1678 /* write access calls the I/O callback */
1679 te->addr_write = vaddr |
1680 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1681 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1682 !cpu_physical_memory_is_dirty(pd)) {
1683 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1684 } else {
1685 te->addr_write = address;
1686 }
1687 } else {
1688 te->addr_write = -1;
1689 }
1690#ifdef VBOX
1691 /* inform raw mode about TLB page change */
1692 remR3FlushPage(env, vaddr);
1693#endif
1694 }
1695#if !defined(CONFIG_SOFTMMU)
1696 else {
1697 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1698 /* IO access: no mapping is done as it will be handled by the
1699 soft MMU */
1700 if (!(env->hflags & HF_SOFTMMU_MASK))
1701 ret = 2;
1702 } else {
1703 void *map_addr;
1704
1705 if (vaddr >= MMAP_AREA_END) {
1706 ret = 2;
1707 } else {
1708 if (prot & PROT_WRITE) {
1709 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1710#if defined(TARGET_HAS_SMC) || 1
1711 first_tb ||
1712#endif
1713 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1714 !cpu_physical_memory_is_dirty(pd))) {
1715 /* ROM: we do as if code was inside */
1716 /* if code is present, we only map as read only and save the
1717 original mapping */
1718 VirtPageDesc *vp;
1719
1720 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1721 vp->phys_addr = pd;
1722 vp->prot = prot;
1723 vp->valid_tag = virt_valid_tag;
1724 prot &= ~PAGE_WRITE;
1725 }
1726 }
1727 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1728 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1729 if (map_addr == MAP_FAILED) {
1730 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1731 paddr, vaddr);
1732 }
1733 }
1734 }
1735 }
1736#endif
1737 return ret;
1738}
1739
1740/* called from signal handler: invalidate the code and unprotect the
1741 page. Return TRUE if the fault was succesfully handled. */
1742int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1743{
1744#if !defined(CONFIG_SOFTMMU)
1745 VirtPageDesc *vp;
1746
1747#if defined(DEBUG_TLB)
1748 printf("page_unprotect: addr=0x%08x\n", addr);
1749#endif
1750 addr &= TARGET_PAGE_MASK;
1751
1752 /* if it is not mapped, no need to worry here */
1753 if (addr >= MMAP_AREA_END)
1754 return 0;
1755 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1756 if (!vp)
1757 return 0;
1758 /* NOTE: in this case, validate_tag is _not_ tested as it
1759 validates only the code TLB */
1760 if (vp->valid_tag != virt_valid_tag)
1761 return 0;
1762 if (!(vp->prot & PAGE_WRITE))
1763 return 0;
1764#if defined(DEBUG_TLB)
1765 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1766 addr, vp->phys_addr, vp->prot);
1767#endif
1768 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1769 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1770 (unsigned long)addr, vp->prot);
1771 /* set the dirty bit */
1772 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1773 /* flush the code inside */
1774 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1775 return 1;
1776#elif defined(VBOX)
1777 addr &= TARGET_PAGE_MASK;
1778
1779 /* if it is not mapped, no need to worry here */
1780 if (addr >= MMAP_AREA_END)
1781 return 0;
1782 return 1;
1783#else
1784 return 0;
1785#endif
1786}
1787
1788#else
1789
1790void tlb_flush(CPUState *env, int flush_global)
1791{
1792}
1793
1794void tlb_flush_page(CPUState *env, target_ulong addr)
1795{
1796}
1797
1798int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1799 target_phys_addr_t paddr, int prot,
1800 int is_user, int is_softmmu)
1801{
1802 return 0;
1803}
1804
1805#ifndef VBOX
1806/* dump memory mappings */
1807void page_dump(FILE *f)
1808{
1809 unsigned long start, end;
1810 int i, j, prot, prot1;
1811 PageDesc *p;
1812
1813 fprintf(f, "%-8s %-8s %-8s %s\n",
1814 "start", "end", "size", "prot");
1815 start = -1;
1816 end = -1;
1817 prot = 0;
1818 for(i = 0; i <= L1_SIZE; i++) {
1819 if (i < L1_SIZE)
1820 p = l1_map[i];
1821 else
1822 p = NULL;
1823 for(j = 0;j < L2_SIZE; j++) {
1824 if (!p)
1825 prot1 = 0;
1826 else
1827 prot1 = p[j].flags;
1828 if (prot1 != prot) {
1829 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1830 if (start != -1) {
1831 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1832 start, end, end - start,
1833 prot & PAGE_READ ? 'r' : '-',
1834 prot & PAGE_WRITE ? 'w' : '-',
1835 prot & PAGE_EXEC ? 'x' : '-');
1836 }
1837 if (prot1 != 0)
1838 start = end;
1839 else
1840 start = -1;
1841 prot = prot1;
1842 }
1843 if (!p)
1844 break;
1845 }
1846 }
1847}
1848#endif /* !VBOX */
1849
1850int page_get_flags(target_ulong address)
1851{
1852 PageDesc *p;
1853
1854 p = page_find(address >> TARGET_PAGE_BITS);
1855 if (!p)
1856 return 0;
1857 return p->flags;
1858}
1859
1860/* modify the flags of a page and invalidate the code if
1861 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1862 depending on PAGE_WRITE */
1863void page_set_flags(target_ulong start, target_ulong end, int flags)
1864{
1865 PageDesc *p;
1866 target_ulong addr;
1867
1868 start = start & TARGET_PAGE_MASK;
1869 end = TARGET_PAGE_ALIGN(end);
1870 if (flags & PAGE_WRITE)
1871 flags |= PAGE_WRITE_ORG;
1872#ifdef VBOX
1873 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
1874#endif
1875 spin_lock(&tb_lock);
1876 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1877 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1878 /* if the write protection is set, then we invalidate the code
1879 inside */
1880 if (!(p->flags & PAGE_WRITE) &&
1881 (flags & PAGE_WRITE) &&
1882 p->first_tb) {
1883 tb_invalidate_phys_page(addr, 0, NULL);
1884 }
1885 p->flags = flags;
1886 }
1887 spin_unlock(&tb_lock);
1888}
1889
1890/* called from signal handler: invalidate the code and unprotect the
1891 page. Return TRUE if the fault was succesfully handled. */
1892int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1893{
1894 unsigned int page_index, prot, pindex;
1895 PageDesc *p, *p1;
1896 target_ulong host_start, host_end, addr;
1897
1898 host_start = address & qemu_host_page_mask;
1899 page_index = host_start >> TARGET_PAGE_BITS;
1900 p1 = page_find(page_index);
1901 if (!p1)
1902 return 0;
1903 host_end = host_start + qemu_host_page_size;
1904 p = p1;
1905 prot = 0;
1906 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1907 prot |= p->flags;
1908 p++;
1909 }
1910 /* if the page was really writable, then we change its
1911 protection back to writable */
1912 if (prot & PAGE_WRITE_ORG) {
1913 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1914 if (!(p1[pindex].flags & PAGE_WRITE)) {
1915 mprotect((void *)g2h(host_start), qemu_host_page_size,
1916 (prot & PAGE_BITS) | PAGE_WRITE);
1917 p1[pindex].flags |= PAGE_WRITE;
1918 /* and since the content will be modified, we must invalidate
1919 the corresponding translated code. */
1920 tb_invalidate_phys_page(address, pc, puc);
1921#ifdef DEBUG_TB_CHECK
1922 tb_invalidate_check(address);
1923#endif
1924 return 1;
1925 }
1926 }
1927 return 0;
1928}
1929
1930/* call this function when system calls directly modify a memory area */
1931/* ??? This should be redundant now we have lock_user. */
1932void page_unprotect_range(target_ulong data, target_ulong data_size)
1933{
1934 target_ulong start, end, addr;
1935
1936 start = data;
1937 end = start + data_size;
1938 start &= TARGET_PAGE_MASK;
1939 end = TARGET_PAGE_ALIGN(end);
1940 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1941 page_unprotect(addr, 0, NULL);
1942 }
1943}
1944
1945static inline void tlb_set_dirty(CPUState *env,
1946 unsigned long addr, target_ulong vaddr)
1947{
1948}
1949#endif /* defined(CONFIG_USER_ONLY) */
1950
1951/* register physical memory. 'size' must be a multiple of the target
1952 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1953 io memory page */
1954void cpu_register_physical_memory(target_phys_addr_t start_addr,
1955 unsigned long size,
1956 unsigned long phys_offset)
1957{
1958 target_phys_addr_t addr, end_addr;
1959 PhysPageDesc *p;
1960 CPUState *env;
1961
1962 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1963 end_addr = start_addr + size;
1964 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1965 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1966 p->phys_offset = phys_offset;
1967 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1968 (phys_offset & IO_MEM_ROMD))
1969 phys_offset += TARGET_PAGE_SIZE;
1970 }
1971
1972 /* since each CPU stores ram addresses in its TLB cache, we must
1973 reset the modified entries */
1974 /* XXX: slow ! */
1975 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1976 tlb_flush(env, 1);
1977 }
1978}
1979
1980/* XXX: temporary until new memory mapping API */
1981uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
1982{
1983 PhysPageDesc *p;
1984
1985 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1986 if (!p)
1987 return IO_MEM_UNASSIGNED;
1988 return p->phys_offset;
1989}
1990
1991static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1992{
1993#ifdef DEBUG_UNASSIGNED
1994 printf("Unassigned mem read 0x%08x\n", (int)addr);
1995#endif
1996 return 0;
1997}
1998
1999static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2000{
2001#ifdef DEBUG_UNASSIGNED
2002 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
2003#endif
2004}
2005
2006static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2007 unassigned_mem_readb,
2008 unassigned_mem_readb,
2009 unassigned_mem_readb,
2010};
2011
2012static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2013 unassigned_mem_writeb,
2014 unassigned_mem_writeb,
2015 unassigned_mem_writeb,
2016};
2017
2018static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2019{
2020 unsigned long ram_addr;
2021 int dirty_flags;
2022#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2023 ram_addr = addr;
2024#elif !defined(VBOX)
2025 ram_addr = addr - (unsigned long)phys_ram_base;
2026#else
2027 ram_addr = remR3HCVirt2GCPhys(first_cpu, (void *)addr);
2028#endif
2029#ifdef VBOX
2030 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2031 dirty_flags = 0xff;
2032 else
2033#endif /* VBOX */
2034 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2035 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2036#if !defined(CONFIG_USER_ONLY)
2037 tb_invalidate_phys_page_fast(ram_addr, 1);
2038# ifdef VBOX
2039 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2040 dirty_flags = 0xff;
2041 else
2042# endif /* VBOX */
2043 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2044#endif
2045 }
2046 stb_p((uint8_t *)(long)addr, val);
2047#ifdef USE_KQEMU
2048 if (cpu_single_env->kqemu_enabled &&
2049 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2050 kqemu_modify_page(cpu_single_env, ram_addr);
2051#endif
2052 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2053#ifdef VBOX
2054 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2055#endif /* !VBOX */
2056 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2057 /* we remove the notdirty callback only if the code has been
2058 flushed */
2059 if (dirty_flags == 0xff)
2060 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2061}
2062
2063static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2064{
2065 unsigned long ram_addr;
2066 int dirty_flags;
2067#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2068 ram_addr = addr;
2069#elif !defined(VBOX)
2070 ram_addr = addr - (unsigned long)phys_ram_base;
2071#else
2072 ram_addr = remR3HCVirt2GCPhys(first_cpu, (void *)addr);
2073#endif
2074#ifdef VBOX
2075 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2076 dirty_flags = 0xff;
2077 else
2078#endif /* VBOX */
2079 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2080 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2081#if !defined(CONFIG_USER_ONLY)
2082 tb_invalidate_phys_page_fast(ram_addr, 2);
2083# ifdef VBOX
2084 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2085 dirty_flags = 0xff;
2086 else
2087# endif /* VBOX */
2088 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2089#endif
2090 }
2091 stw_p((uint8_t *)(long)addr, val);
2092#ifdef USE_KQEMU
2093 if (cpu_single_env->kqemu_enabled &&
2094 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2095 kqemu_modify_page(cpu_single_env, ram_addr);
2096#endif
2097 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2098#ifdef VBOX
2099 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2100#endif
2101 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2102 /* we remove the notdirty callback only if the code has been
2103 flushed */
2104 if (dirty_flags == 0xff)
2105 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2106}
2107
2108static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2109{
2110 unsigned long ram_addr;
2111 int dirty_flags;
2112#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2113 ram_addr = addr;
2114#elif !defined(VBOX)
2115 ram_addr = addr - (unsigned long)phys_ram_base;
2116#else
2117 ram_addr = remR3HCVirt2GCPhys(first_cpu, (void *)addr);
2118#endif
2119#ifdef VBOX
2120 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2121 dirty_flags = 0xff;
2122 else
2123#endif /* VBOX */
2124 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2125 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2126#if !defined(CONFIG_USER_ONLY)
2127 tb_invalidate_phys_page_fast(ram_addr, 4);
2128# ifdef VBOX
2129 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2130 dirty_flags = 0xff;
2131 else
2132# endif /* VBOX */
2133 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2134#endif
2135 }
2136 stl_p((uint8_t *)(long)addr, val);
2137#ifdef USE_KQEMU
2138 if (cpu_single_env->kqemu_enabled &&
2139 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2140 kqemu_modify_page(cpu_single_env, ram_addr);
2141#endif
2142 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2143#ifdef VBOX
2144 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2145#endif
2146 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2147 /* we remove the notdirty callback only if the code has been
2148 flushed */
2149 if (dirty_flags == 0xff)
2150 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2151}
2152
2153static CPUReadMemoryFunc *error_mem_read[3] = {
2154 NULL, /* never used */
2155 NULL, /* never used */
2156 NULL, /* never used */
2157};
2158
2159static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2160 notdirty_mem_writeb,
2161 notdirty_mem_writew,
2162 notdirty_mem_writel,
2163};
2164
2165static void io_mem_init(void)
2166{
2167 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2168 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2169 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2170 io_mem_nb = 5;
2171
2172#ifndef VBOX /* VBOX: we do this later when the RAM is allocated. */
2173 /* alloc dirty bits array */
2174 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2175 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2176#endif /* !VBOX */
2177}
2178
2179/* mem_read and mem_write are arrays of functions containing the
2180 function to access byte (index 0), word (index 1) and dword (index
2181 2). All functions must be supplied. If io_index is non zero, the
2182 corresponding io zone is modified. If it is zero, a new io zone is
2183 allocated. The return value can be used with
2184 cpu_register_physical_memory(). (-1) is returned if error. */
2185int cpu_register_io_memory(int io_index,
2186 CPUReadMemoryFunc **mem_read,
2187 CPUWriteMemoryFunc **mem_write,
2188 void *opaque)
2189{
2190 int i;
2191
2192 if (io_index <= 0) {
2193 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2194 return -1;
2195 io_index = io_mem_nb++;
2196 } else {
2197 if (io_index >= IO_MEM_NB_ENTRIES)
2198 return -1;
2199 }
2200
2201 for(i = 0;i < 3; i++) {
2202 io_mem_read[io_index][i] = mem_read[i];
2203 io_mem_write[io_index][i] = mem_write[i];
2204 }
2205 io_mem_opaque[io_index] = opaque;
2206 return io_index << IO_MEM_SHIFT;
2207}
2208
2209CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2210{
2211 return io_mem_write[io_index >> IO_MEM_SHIFT];
2212}
2213
2214CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2215{
2216 return io_mem_read[io_index >> IO_MEM_SHIFT];
2217}
2218
2219/* physical memory access (slow version, mainly for debug) */
2220#if defined(CONFIG_USER_ONLY)
2221void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2222 int len, int is_write)
2223{
2224 int l, flags;
2225 target_ulong page;
2226 void * p;
2227
2228 while (len > 0) {
2229 page = addr & TARGET_PAGE_MASK;
2230 l = (page + TARGET_PAGE_SIZE) - addr;
2231 if (l > len)
2232 l = len;
2233 flags = page_get_flags(page);
2234 if (!(flags & PAGE_VALID))
2235 return;
2236 if (is_write) {
2237 if (!(flags & PAGE_WRITE))
2238 return;
2239 p = lock_user(addr, len, 0);
2240 memcpy(p, buf, len);
2241 unlock_user(p, addr, len);
2242 } else {
2243 if (!(flags & PAGE_READ))
2244 return;
2245 p = lock_user(addr, len, 1);
2246 memcpy(buf, p, len);
2247 unlock_user(p, addr, 0);
2248 }
2249 len -= l;
2250 buf += l;
2251 addr += l;
2252 }
2253}
2254
2255#else
2256void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2257 int len, int is_write)
2258{
2259 int l, io_index;
2260 uint8_t *ptr;
2261 uint32_t val;
2262 target_phys_addr_t page;
2263 unsigned long pd;
2264 PhysPageDesc *p;
2265
2266 while (len > 0) {
2267 page = addr & TARGET_PAGE_MASK;
2268 l = (page + TARGET_PAGE_SIZE) - addr;
2269 if (l > len)
2270 l = len;
2271 p = phys_page_find(page >> TARGET_PAGE_BITS);
2272 if (!p) {
2273 pd = IO_MEM_UNASSIGNED;
2274 } else {
2275 pd = p->phys_offset;
2276 }
2277
2278 if (is_write) {
2279 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2280 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2281 /* XXX: could force cpu_single_env to NULL to avoid
2282 potential bugs */
2283 if (l >= 4 && ((addr & 3) == 0)) {
2284 /* 32 bit write access */
2285#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2286 val = ldl_p(buf);
2287#else
2288 val = *(const uint32_t *)buf;
2289#endif
2290 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2291 l = 4;
2292 } else if (l >= 2 && ((addr & 1) == 0)) {
2293 /* 16 bit write access */
2294#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2295 val = lduw_p(buf);
2296#else
2297 val = *(const uint16_t *)buf;
2298#endif
2299 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2300 l = 2;
2301 } else {
2302 /* 8 bit write access */
2303#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2304 val = ldub_p(buf);
2305#else
2306 val = *(const uint8_t *)buf;
2307#endif
2308 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2309 l = 1;
2310 }
2311 } else {
2312 unsigned long addr1;
2313 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2314 /* RAM case */
2315#ifdef VBOX
2316 remR3PhysWrite(addr1, buf, l); NOREF(ptr);
2317#else
2318 ptr = phys_ram_base + addr1;
2319 memcpy(ptr, buf, l);
2320#endif
2321 if (!cpu_physical_memory_is_dirty(addr1)) {
2322 /* invalidate code */
2323 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2324 /* set dirty bit */
2325#ifdef VBOX
2326 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2327#endif
2328 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2329 (0xff & ~CODE_DIRTY_FLAG);
2330 }
2331 }
2332 } else {
2333 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2334 !(pd & IO_MEM_ROMD)) {
2335 /* I/O case */
2336 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2337 if (l >= 4 && ((addr & 3) == 0)) {
2338 /* 32 bit read access */
2339 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2340#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2341 stl_p(buf, val);
2342#else
2343 *(uint32_t *)buf = val;
2344#endif
2345 l = 4;
2346 } else if (l >= 2 && ((addr & 1) == 0)) {
2347 /* 16 bit read access */
2348 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2349#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2350 stw_p(buf, val);
2351#else
2352 *(uint16_t *)buf = val;
2353#endif
2354 l = 2;
2355 } else {
2356 /* 8 bit read access */
2357 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2358#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2359 stb_p(buf, val);
2360#else
2361 *(uint8_t *)buf = val;
2362#endif
2363 l = 1;
2364 }
2365 } else {
2366 /* RAM case */
2367#ifdef VBOX
2368 remR3PhysRead((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), buf, l); NOREF(ptr);
2369#else
2370 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2371 (addr & ~TARGET_PAGE_MASK);
2372 memcpy(buf, ptr, l);
2373#endif
2374 }
2375 }
2376 len -= l;
2377 buf += l;
2378 addr += l;
2379 }
2380}
2381
2382#ifndef VBOX
2383/* used for ROM loading : can write in RAM and ROM */
2384void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2385 const uint8_t *buf, int len)
2386{
2387 int l;
2388 uint8_t *ptr;
2389 target_phys_addr_t page;
2390 unsigned long pd;
2391 PhysPageDesc *p;
2392
2393 while (len > 0) {
2394 page = addr & TARGET_PAGE_MASK;
2395 l = (page + TARGET_PAGE_SIZE) - addr;
2396 if (l > len)
2397 l = len;
2398 p = phys_page_find(page >> TARGET_PAGE_BITS);
2399 if (!p) {
2400 pd = IO_MEM_UNASSIGNED;
2401 } else {
2402 pd = p->phys_offset;
2403 }
2404
2405 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2406 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2407 !(pd & IO_MEM_ROMD)) {
2408 /* do nothing */
2409 } else {
2410 unsigned long addr1;
2411 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2412 /* ROM/RAM case */
2413 ptr = phys_ram_base + addr1;
2414 memcpy(ptr, buf, l);
2415 }
2416 len -= l;
2417 buf += l;
2418 addr += l;
2419 }
2420}
2421#endif /* !VBOX */
2422
2423
2424/* warning: addr must be aligned */
2425uint32_t ldl_phys(target_phys_addr_t addr)
2426{
2427 int io_index;
2428 uint8_t *ptr;
2429 uint32_t val;
2430 unsigned long pd;
2431 PhysPageDesc *p;
2432
2433 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2434 if (!p) {
2435 pd = IO_MEM_UNASSIGNED;
2436 } else {
2437 pd = p->phys_offset;
2438 }
2439
2440 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2441 !(pd & IO_MEM_ROMD)) {
2442 /* I/O case */
2443 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2444 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2445 } else {
2446 /* RAM case */
2447#ifndef VBOX
2448 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2449 (addr & ~TARGET_PAGE_MASK);
2450 val = ldl_p(ptr);
2451#else
2452 val = remR3PhysReadU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
2453#endif
2454 }
2455 return val;
2456}
2457
2458/* warning: addr must be aligned */
2459uint64_t ldq_phys(target_phys_addr_t addr)
2460{
2461 int io_index;
2462 uint8_t *ptr;
2463 uint64_t val;
2464 unsigned long pd;
2465 PhysPageDesc *p;
2466
2467 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2468 if (!p) {
2469 pd = IO_MEM_UNASSIGNED;
2470 } else {
2471 pd = p->phys_offset;
2472 }
2473
2474 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2475 !(pd & IO_MEM_ROMD)) {
2476 /* I/O case */
2477 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2478#ifdef TARGET_WORDS_BIGENDIAN
2479 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2480 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2481#else
2482 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2483 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2484#endif
2485 } else {
2486 /* RAM case */
2487#ifndef VBOX
2488 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2489 (addr & ~TARGET_PAGE_MASK);
2490 val = ldq_p(ptr);
2491#else
2492 val = remR3PhysReadU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
2493#endif
2494 }
2495 return val;
2496}
2497
2498/* XXX: optimize */
2499uint32_t ldub_phys(target_phys_addr_t addr)
2500{
2501 uint8_t val;
2502 cpu_physical_memory_read(addr, &val, 1);
2503 return val;
2504}
2505
2506/* XXX: optimize */
2507uint32_t lduw_phys(target_phys_addr_t addr)
2508{
2509 uint16_t val;
2510 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2511 return tswap16(val);
2512}
2513
2514/* warning: addr must be aligned. The ram page is not masked as dirty
2515 and the code inside is not invalidated. It is useful if the dirty
2516 bits are used to track modified PTEs */
2517void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2518{
2519 int io_index;
2520 uint8_t *ptr;
2521 unsigned long pd;
2522 PhysPageDesc *p;
2523
2524 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2525 if (!p) {
2526 pd = IO_MEM_UNASSIGNED;
2527 } else {
2528 pd = p->phys_offset;
2529 }
2530
2531 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2532 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2533 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2534 } else {
2535#ifndef VBOX
2536 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2537 (addr & ~TARGET_PAGE_MASK);
2538 stl_p(ptr, val);
2539#else
2540 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
2541#endif
2542 }
2543}
2544
2545/* warning: addr must be aligned */
2546void stl_phys(target_phys_addr_t addr, uint32_t val)
2547{
2548 int io_index;
2549 uint8_t *ptr;
2550 unsigned long pd;
2551 PhysPageDesc *p;
2552
2553 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2554 if (!p) {
2555 pd = IO_MEM_UNASSIGNED;
2556 } else {
2557 pd = p->phys_offset;
2558 }
2559
2560 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2561 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2562 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2563 } else {
2564 unsigned long addr1;
2565 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2566 /* RAM case */
2567#ifndef VBOX
2568 ptr = phys_ram_base + addr1;
2569 stl_p(ptr, val);
2570#else
2571 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
2572#endif
2573 if (!cpu_physical_memory_is_dirty(addr1)) {
2574 /* invalidate code */
2575 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2576 /* set dirty bit */
2577#ifdef VBOX
2578 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2579#endif
2580 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2581 (0xff & ~CODE_DIRTY_FLAG);
2582 }
2583 }
2584}
2585
2586/* XXX: optimize */
2587void stb_phys(target_phys_addr_t addr, uint32_t val)
2588{
2589 uint8_t v = val;
2590 cpu_physical_memory_write(addr, &v, 1);
2591}
2592
2593/* XXX: optimize */
2594void stw_phys(target_phys_addr_t addr, uint32_t val)
2595{
2596 uint16_t v = tswap16(val);
2597 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2598}
2599
2600/* XXX: optimize */
2601void stq_phys(target_phys_addr_t addr, uint64_t val)
2602{
2603 val = tswap64(val);
2604 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2605}
2606
2607#endif
2608
2609#ifndef VBOX
2610/* virtual memory access for debug */
2611int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2612 uint8_t *buf, int len, int is_write)
2613{
2614 int l;
2615 target_ulong page, phys_addr;
2616
2617 while (len > 0) {
2618 page = addr & TARGET_PAGE_MASK;
2619 phys_addr = cpu_get_phys_page_debug(env, page);
2620 /* if no physical page mapped, return an error */
2621 if (phys_addr == -1)
2622 return -1;
2623 l = (page + TARGET_PAGE_SIZE) - addr;
2624 if (l > len)
2625 l = len;
2626 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2627 buf, l, is_write);
2628 len -= l;
2629 buf += l;
2630 addr += l;
2631 }
2632 return 0;
2633}
2634
2635void dump_exec_info(FILE *f,
2636 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2637{
2638 int i, target_code_size, max_target_code_size;
2639 int direct_jmp_count, direct_jmp2_count, cross_page;
2640 TranslationBlock *tb;
2641
2642 target_code_size = 0;
2643 max_target_code_size = 0;
2644 cross_page = 0;
2645 direct_jmp_count = 0;
2646 direct_jmp2_count = 0;
2647 for(i = 0; i < nb_tbs; i++) {
2648 tb = &tbs[i];
2649 target_code_size += tb->size;
2650 if (tb->size > max_target_code_size)
2651 max_target_code_size = tb->size;
2652 if (tb->page_addr[1] != -1)
2653 cross_page++;
2654 if (tb->tb_next_offset[0] != 0xffff) {
2655 direct_jmp_count++;
2656 if (tb->tb_next_offset[1] != 0xffff) {
2657 direct_jmp2_count++;
2658 }
2659 }
2660 }
2661 /* XXX: avoid using doubles ? */
2662 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2663 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2664 nb_tbs ? target_code_size / nb_tbs : 0,
2665 max_target_code_size);
2666 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2667 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2668 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2669 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2670 cross_page,
2671 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2672 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2673 direct_jmp_count,
2674 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2675 direct_jmp2_count,
2676 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2677 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2678 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2679 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2680}
2681#endif /* !VBOX */
2682
2683#if !defined(CONFIG_USER_ONLY)
2684
2685#define MMUSUFFIX _cmmu
2686#define GETPC() NULL
2687#define env cpu_single_env
2688#define SOFTMMU_CODE_ACCESS
2689
2690#define SHIFT 0
2691#include "softmmu_template.h"
2692
2693#define SHIFT 1
2694#include "softmmu_template.h"
2695
2696#define SHIFT 2
2697#include "softmmu_template.h"
2698
2699#define SHIFT 3
2700#include "softmmu_template.h"
2701
2702#undef env
2703
2704#endif
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette