VirtualBox

source: vbox/trunk/src/recompiler_new/exec.c@ 13301

最後變更 在這個檔案從13301是 13301,由 vboxsync 提交於 16 年 前

more synchronization with QEMU - things get pretty hairy

  • 屬性 svn:eol-style 設為 native
檔案大小: 91.7 KB
 
1/*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#include "config.h"
30#ifndef VBOX
31#ifdef _WIN32
32#include <windows.h>
33#else
34#include <sys/types.h>
35#include <sys/mman.h>
36#endif
37#include <stdlib.h>
38#include <stdio.h>
39#include <stdarg.h>
40#include <string.h>
41#include <errno.h>
42#include <unistd.h>
43#include <inttypes.h>
44#else /* VBOX */
45# include <stdlib.h>
46# include <stdio.h>
47# include <inttypes.h>
48# include <iprt/alloc.h>
49# include <iprt/string.h>
50# include <iprt/param.h>
51# include <VBox/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
52#endif /* VBOX */
53
54#include "cpu.h"
55#include "exec-all.h"
56#if defined(CONFIG_USER_ONLY)
57#include <qemu.h>
58#endif
59
60//#define DEBUG_TB_INVALIDATE
61//#define DEBUG_FLUSH
62//#define DEBUG_TLB
63//#define DEBUG_UNASSIGNED
64
65/* make various TB consistency checks */
66//#define DEBUG_TB_CHECK
67//#define DEBUG_TLB_CHECK
68
69#if !defined(CONFIG_USER_ONLY)
70/* TB consistency checks only implemented for usermode emulation. */
71#undef DEBUG_TB_CHECK
72#endif
73
74#define SMC_BITMAP_USE_THRESHOLD 10
75
76#define MMAP_AREA_START 0x00000000
77#define MMAP_AREA_END 0xa8000000
78
79#if defined(TARGET_SPARC64)
80#define TARGET_PHYS_ADDR_SPACE_BITS 41
81#elif defined(TARGET_SPARC)
82#define TARGET_PHYS_ADDR_SPACE_BITS 36
83#elif defined(TARGET_ALPHA)
84#define TARGET_PHYS_ADDR_SPACE_BITS 42
85#define TARGET_VIRT_ADDR_SPACE_BITS 42
86#elif defined(TARGET_PPC64)
87#define TARGET_PHYS_ADDR_SPACE_BITS 42
88#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
89#define TARGET_PHYS_ADDR_SPACE_BITS 42
90#elif defined(TARGET_I386) && !defined(USE_KQEMU)
91#define TARGET_PHYS_ADDR_SPACE_BITS 36
92#else
93/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
94#define TARGET_PHYS_ADDR_SPACE_BITS 32
95#endif
96
97static TranslationBlock *tbs;
98int code_gen_max_blocks;
99TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
100static int nb_tbs;
101/* any access to the tbs or the page table must use this lock */
102spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
103
104#if defined(__arm__) || defined(__sparc_v9__)
105/* The prologue must be reachable with a direct jump. ARM and Sparc64
106 have limited branch ranges (possibly also PPC) so place it in a
107 section close to code segment. */
108#define code_gen_section \
109 __attribute__((__section__(".gen_code"))) \
110 __attribute__((aligned (32)))
111#else
112#define code_gen_section \
113 __attribute__((aligned (32)))
114#endif
115
116uint8_t code_gen_prologue[1024] code_gen_section;
117static uint8_t *code_gen_buffer;
118static unsigned long code_gen_buffer_size;
119/* threshold to flush the translated code buffer */
120static unsigned long code_gen_buffer_max_size;
121uint8_t *code_gen_ptr;
122
123#ifndef VBOX
124#if !defined(CONFIG_USER_ONLY)
125ram_addr_t phys_ram_size;
126int phys_ram_fd;
127uint8_t *phys_ram_base;
128uint8_t *phys_ram_dirty;
129static int in_migration;
130static ram_addr_t phys_ram_alloc_offset = 0;
131#endif
132#else /* VBOX */
133RTGCPHYS phys_ram_size;
134/* we have memory ranges (the high PC-BIOS mapping) which
135 causes some pages to fall outside the dirty map here. */
136uint32_t phys_ram_dirty_size;
137#endif /* VBOX */
138#if !defined(VBOX)
139uint8_t *phys_ram_base;
140#endif
141uint8_t *phys_ram_dirty;
142
143CPUState *first_cpu;
144/* current CPU in the current thread. It is only valid inside
145 cpu_exec() */
146CPUState *cpu_single_env;
147/* 0 = Do not count executed instructions.
148 1 = Precise instruction counting.
149 2 = Adaptive rate instruction counting. */
150int use_icount = 0;
151/* Current instruction counter. While executing translated code this may
152 include some instructions that have not yet been executed. */
153int64_t qemu_icount;
154
155typedef struct PageDesc {
156 /* list of TBs intersecting this ram page */
157 TranslationBlock *first_tb;
158 /* in order to optimize self modifying code, we count the number
159 of lookups we do to a given page to use a bitmap */
160 unsigned int code_write_count;
161 uint8_t *code_bitmap;
162#if defined(CONFIG_USER_ONLY)
163 unsigned long flags;
164#endif
165} PageDesc;
166
167typedef struct PhysPageDesc {
168 /* offset in host memory of the page + io_index in the low 12 bits */
169 ram_addr_t phys_offset;
170} PhysPageDesc;
171
172#define L2_BITS 10
173#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
174/* XXX: this is a temporary hack for alpha target.
175 * In the future, this is to be replaced by a multi-level table
176 * to actually be able to handle the complete 64 bits address space.
177 */
178#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
179#else
180#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
181#endif
182
183#define L1_SIZE (1 << L1_BITS)
184#define L2_SIZE (1 << L2_BITS)
185
186static void io_mem_init(void);
187
188unsigned long qemu_real_host_page_size;
189unsigned long qemu_host_page_bits;
190unsigned long qemu_host_page_size;
191unsigned long qemu_host_page_mask;
192
193/* XXX: for system emulation, it could just be an array */
194static PageDesc *l1_map[L1_SIZE];
195static PhysPageDesc **l1_phys_map;
196
197#if !defined(CONFIG_USER_ONLY)
198static void io_mem_init(void);
199
200/* io memory support */
201CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
202CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
203void *io_mem_opaque[IO_MEM_NB_ENTRIES];
204static int io_mem_nb;
205static int io_mem_watch;
206#endif
207
208#ifndef VBOX
209/* log support */
210static const char *logfilename = "/tmp/qemu.log";
211#endif /* !VBOX */
212FILE *logfile;
213int loglevel;
214static int log_append = 0;
215
216/* statistics */
217static int tlb_flush_count;
218static int tb_flush_count;
219#ifndef VBOX
220static int tb_phys_invalidate_count;
221#endif /* !VBOX */
222
223#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
224typedef struct subpage_t {
225 target_phys_addr_t base;
226 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
227 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
228 void *opaque[TARGET_PAGE_SIZE][2][4];
229} subpage_t;
230
231
232#ifndef VBOX
233#ifdef _WIN32
234static void map_exec(void *addr, long size)
235{
236 DWORD old_protect;
237 VirtualProtect(addr, size,
238 PAGE_EXECUTE_READWRITE, &old_protect);
239
240}
241#else
242static void map_exec(void *addr, long size)
243{
244 unsigned long start, end, page_size;
245
246 page_size = getpagesize();
247 start = (unsigned long)addr;
248 start &= ~(page_size - 1);
249
250 end = (unsigned long)addr + size;
251 end += page_size - 1;
252 end &= ~(page_size - 1);
253
254 mprotect((void *)start, end - start,
255 PROT_READ | PROT_WRITE | PROT_EXEC);
256}
257#endif
258#else // VBOX
259static void map_exec(void *addr, long size)
260{
261 RTMemProtect(addr, size,
262 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
263}
264#endif
265
266static void page_init(void)
267{
268 /* NOTE: we can always suppose that qemu_host_page_size >=
269 TARGET_PAGE_SIZE */
270#ifdef VBOX
271 RTMemProtect(code_gen_buffer, sizeof(code_gen_buffer),
272 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
273 qemu_real_host_page_size = PAGE_SIZE;
274#else /* !VBOX */
275#ifdef _WIN32
276 {
277 SYSTEM_INFO system_info;
278 DWORD old_protect;
279
280 GetSystemInfo(&system_info);
281 qemu_real_host_page_size = system_info.dwPageSize;
282 }
283#else
284 qemu_real_host_page_size = getpagesize();
285#endif
286#endif /* !VBOX */
287
288 if (qemu_host_page_size == 0)
289 qemu_host_page_size = qemu_real_host_page_size;
290 if (qemu_host_page_size < TARGET_PAGE_SIZE)
291 qemu_host_page_size = TARGET_PAGE_SIZE;
292 qemu_host_page_bits = 0;
293 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
294 qemu_host_page_bits++;
295 qemu_host_page_mask = ~(qemu_host_page_size - 1);
296 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
297 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
298#ifdef VBOX
299 /* We use other means to set reserved bit on our pages */
300#else
301#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
302 {
303 long long startaddr, endaddr;
304 FILE *f;
305 int n;
306
307 mmap_lock();
308 last_brk = (unsigned long)sbrk(0);
309 f = fopen("/proc/self/maps", "r");
310 if (f) {
311 do {
312 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
313 if (n == 2) {
314 startaddr = MIN(startaddr,
315 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
316 endaddr = MIN(endaddr,
317 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
318 page_set_flags(startaddr & TARGET_PAGE_MASK,
319 TARGET_PAGE_ALIGN(endaddr),
320 PAGE_RESERVED);
321 }
322 } while (!feof(f));
323 fclose(f);
324 }
325 mmap_unlock();
326 }
327#endif
328#endif
329}
330
331static inline PageDesc **page_l1_map(target_ulong index)
332{
333#if TARGET_LONG_BITS > 32
334 /* Host memory outside guest VM. For 32-bit targets we have already
335 excluded high addresses. */
336 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
337 return NULL;
338#endif
339 return &l1_map[index >> L2_BITS];
340}
341
342static inline PageDesc *page_find_alloc(target_ulong index)
343{
344 PageDesc **lp, *p;
345 lp = page_l1_map(index);
346 if (!lp)
347 return NULL;
348
349 p = *lp;
350 if (!p) {
351 /* allocate if not found */
352#if defined(CONFIG_USER_ONLY)
353 unsigned long addr;
354 size_t len = sizeof(PageDesc) * L2_SIZE;
355 /* Don't use qemu_malloc because it may recurse. */
356 p = mmap(0, len, PROT_READ | PROT_WRITE,
357 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
358 *lp = p;
359 addr = h2g(p);
360 if (addr == (target_ulong)addr) {
361 page_set_flags(addr & TARGET_PAGE_MASK,
362 TARGET_PAGE_ALIGN(addr + len),
363 PAGE_RESERVED);
364 }
365#else
366 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
367 *lp = p;
368#endif
369 }
370 return p + (index & (L2_SIZE - 1));
371}
372
373static inline PageDesc *page_find(target_ulong index)
374{
375 PageDesc **lp, *p;
376 lp = page_l1_map(index);
377 if (!lp)
378 return NULL;
379
380 p = *lp;
381 if (!p)
382 return 0;
383 return p + (index & (L2_SIZE - 1));
384}
385
386static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
387{
388 void **lp, **p;
389 PhysPageDesc *pd;
390
391 p = (void **)l1_phys_map;
392#if TARGET_PHYS_ADDR_SPACE_BITS > 32
393
394#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
395#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
396#endif
397 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
398 p = *lp;
399 if (!p) {
400 /* allocate if not found */
401 if (!alloc)
402 return NULL;
403 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
404 memset(p, 0, sizeof(void *) * L1_SIZE);
405 *lp = p;
406 }
407#endif
408 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
409 pd = *lp;
410 if (!pd) {
411 int i;
412 /* allocate if not found */
413 if (!alloc)
414 return NULL;
415 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
416 *lp = pd;
417 for (i = 0; i < L2_SIZE; i++)
418 pd[i].phys_offset = IO_MEM_UNASSIGNED;
419 }
420#if defined(VBOX) && !defined(VBOX_WITH_NEW_PHYS_CODE)
421 pd = ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
422 if (RT_UNLIKELY((pd->phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING))
423 remR3GrowDynRange(pd->phys_offset & TARGET_PAGE_MASK);
424 return pd;
425#else
426 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
427#endif
428}
429
430static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
431{
432 return phys_page_find_alloc(index, 0);
433}
434
435#if !defined(CONFIG_USER_ONLY)
436static void tlb_protect_code(ram_addr_t ram_addr);
437static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
438 target_ulong vaddr);
439#define mmap_lock() do { } while(0)
440#define mmap_unlock() do { } while(0)
441#endif
442
443#ifdef VBOX
444/** @todo nike: isn't 32M too much ? */
445#endif
446#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
447
448#if defined(CONFIG_USER_ONLY)
449/* Currently it is not recommanded to allocate big chunks of data in
450 user mode. It will change when a dedicated libc will be used */
451#define USE_STATIC_CODE_GEN_BUFFER
452#endif
453
454/* VBox allocates codegen buffer dynamically */
455#ifndef VBOX
456#ifdef USE_STATIC_CODE_GEN_BUFFER
457static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
458#endif
459#endif
460
461static void code_gen_alloc(unsigned long tb_size)
462{
463#ifdef USE_STATIC_CODE_GEN_BUFFER
464 code_gen_buffer = static_code_gen_buffer;
465 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
466 map_exec(code_gen_buffer, code_gen_buffer_size);
467#else
468 code_gen_buffer_size = tb_size;
469 if (code_gen_buffer_size == 0) {
470#if defined(CONFIG_USER_ONLY)
471 /* in user mode, phys_ram_size is not meaningful */
472 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
473#else
474 /* XXX: needs ajustments */
475 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
476#endif
477 }
478 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
479 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
480 /* The code gen buffer location may have constraints depending on
481 the host cpu and OS */
482#ifdef VBOX
483 code_gen_buffer = RTMemExecAlloc(code_gen_buffer_size);
484
485 if (!code_gen_buffer) {
486 LogRel(("REM: failed allocate codegen buffer %lld\n",
487 code_gen_buffer_size));
488 return;
489 }
490#else //!VBOX
491#if defined(__linux__)
492 {
493 int flags;
494 void *start = NULL;
495
496 flags = MAP_PRIVATE | MAP_ANONYMOUS;
497#if defined(__x86_64__)
498 flags |= MAP_32BIT;
499 /* Cannot map more than that */
500 if (code_gen_buffer_size > (800 * 1024 * 1024))
501 code_gen_buffer_size = (800 * 1024 * 1024);
502#elif defined(__sparc_v9__)
503 // Map the buffer below 2G, so we can use direct calls and branches
504 flags |= MAP_FIXED;
505 start = (void *) 0x60000000UL;
506 if (code_gen_buffer_size > (512 * 1024 * 1024))
507 code_gen_buffer_size = (512 * 1024 * 1024);
508#endif
509 code_gen_buffer = mmap(start, code_gen_buffer_size,
510 PROT_WRITE | PROT_READ | PROT_EXEC,
511 flags, -1, 0);
512 if (code_gen_buffer == MAP_FAILED) {
513 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
514 exit(1);
515 }
516 }
517#elif defined(__FreeBSD__)
518 {
519 int flags;
520 void *addr = NULL;
521 flags = MAP_PRIVATE | MAP_ANONYMOUS;
522#if defined(__x86_64__)
523 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
524 * 0x40000000 is free */
525 flags |= MAP_FIXED;
526 addr = (void *)0x40000000;
527 /* Cannot map more than that */
528 if (code_gen_buffer_size > (800 * 1024 * 1024))
529 code_gen_buffer_size = (800 * 1024 * 1024);
530#endif
531 code_gen_buffer = mmap(addr, code_gen_buffer_size,
532 PROT_WRITE | PROT_READ | PROT_EXEC,
533 flags, -1, 0);
534 if (code_gen_buffer == MAP_FAILED) {
535 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
536 exit(1);
537 }
538 }
539#else
540 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
541 if (!code_gen_buffer) {
542 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
543 exit(1);
544 }
545 map_exec(code_gen_buffer, code_gen_buffer_size);
546#endif
547#endif // VBOX
548#endif /* !USE_STATIC_CODE_GEN_BUFFER */
549 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
550 code_gen_buffer_max_size = code_gen_buffer_size -
551 code_gen_max_block_size();
552 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
553 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
554}
555
556/* Must be called before using the QEMU cpus. 'tb_size' is the size
557 (in bytes) allocated to the translation buffer. Zero means default
558 size. */
559void cpu_exec_init_all(unsigned long tb_size)
560{
561 cpu_gen_init();
562 code_gen_alloc(tb_size);
563 code_gen_ptr = code_gen_buffer;
564 page_init();
565#if !defined(CONFIG_USER_ONLY)
566 io_mem_init();
567#endif
568}
569
570#ifndef VBOX
571#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
572
573#define CPU_COMMON_SAVE_VERSION 1
574
575static void cpu_common_save(QEMUFile *f, void *opaque)
576{
577 CPUState *env = opaque;
578
579 qemu_put_be32s(f, &env->halted);
580 qemu_put_be32s(f, &env->interrupt_request);
581}
582
583static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
584{
585 CPUState *env = opaque;
586
587 if (version_id != CPU_COMMON_SAVE_VERSION)
588 return -EINVAL;
589
590 qemu_get_be32s(f, &env->halted);
591 qemu_get_be32s(f, &env->interrupt_request);
592 tlb_flush(env, 1);
593
594 return 0;
595}
596#endif
597#endif //!VBOX
598
599void cpu_exec_init(CPUState *env)
600{
601 CPUState **penv;
602 int cpu_index;
603
604 env->next_cpu = NULL;
605 penv = &first_cpu;
606 cpu_index = 0;
607 while (*penv != NULL) {
608 penv = (CPUState **)&(*penv)->next_cpu;
609 cpu_index++;
610 }
611 env->cpu_index = cpu_index;
612 env->nb_watchpoints = 0;
613 *penv = env;
614#ifndef VBOX
615#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
616 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
617 cpu_common_save, cpu_common_load, env);
618 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
619 cpu_save, cpu_load, env);
620#endif
621#endif // !VBOX
622}
623
624static inline void invalidate_page_bitmap(PageDesc *p)
625{
626 if (p->code_bitmap) {
627 qemu_free(p->code_bitmap);
628 p->code_bitmap = NULL;
629 }
630 p->code_write_count = 0;
631}
632
633/* set to NULL all the 'first_tb' fields in all PageDescs */
634static void page_flush_tb(void)
635{
636 int i, j;
637 PageDesc *p;
638
639 for(i = 0; i < L1_SIZE; i++) {
640 p = l1_map[i];
641 if (p) {
642 for(j = 0; j < L2_SIZE; j++) {
643 p->first_tb = NULL;
644 invalidate_page_bitmap(p);
645 p++;
646 }
647 }
648 }
649}
650
651/* flush all the translation blocks */
652/* XXX: tb_flush is currently not thread safe */
653void tb_flush(CPUState *env1)
654{
655 CPUState *env;
656#if defined(DEBUG_FLUSH)
657 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
658 (unsigned long)(code_gen_ptr - code_gen_buffer),
659 nb_tbs, nb_tbs > 0 ?
660 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
661#endif
662 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
663 cpu_abort(env1, "Internal error: code buffer overflow\n");
664
665 nb_tbs = 0;
666
667 for(env = first_cpu; env != NULL; env = env->next_cpu) {
668 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
669 }
670
671 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
672 page_flush_tb();
673
674 code_gen_ptr = code_gen_buffer;
675 /* XXX: flush processor icache at this point if cache flush is
676 expensive */
677 tb_flush_count++;
678}
679
680#ifdef DEBUG_TB_CHECK
681static void tb_invalidate_check(target_ulong address)
682{
683 TranslationBlock *tb;
684 int i;
685 address &= TARGET_PAGE_MASK;
686 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
687 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
688 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
689 address >= tb->pc + tb->size)) {
690 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
691 address, (long)tb->pc, tb->size);
692 }
693 }
694 }
695}
696
697/* verify that all the pages have correct rights for code */
698static void tb_page_check(void)
699{
700 TranslationBlock *tb;
701 int i, flags1, flags2;
702
703 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
704 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
705 flags1 = page_get_flags(tb->pc);
706 flags2 = page_get_flags(tb->pc + tb->size - 1);
707 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
708 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
709 (long)tb->pc, tb->size, flags1, flags2);
710 }
711 }
712 }
713}
714
715static void tb_jmp_check(TranslationBlock *tb)
716{
717 TranslationBlock *tb1;
718 unsigned int n1;
719
720 /* suppress any remaining jumps to this TB */
721 tb1 = tb->jmp_first;
722 for(;;) {
723 n1 = (long)tb1 & 3;
724 tb1 = (TranslationBlock *)((long)tb1 & ~3);
725 if (n1 == 2)
726 break;
727 tb1 = tb1->jmp_next[n1];
728 }
729 /* check end of list */
730 if (tb1 != tb) {
731 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
732 }
733}
734#endif // DEBUG_TB_CHECK
735
736/* invalidate one TB */
737static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
738 int next_offset)
739{
740 TranslationBlock *tb1;
741 for(;;) {
742 tb1 = *ptb;
743 if (tb1 == tb) {
744 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
745 break;
746 }
747 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
748 }
749}
750
751static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
752{
753 TranslationBlock *tb1;
754 unsigned int n1;
755
756 for(;;) {
757 tb1 = *ptb;
758 n1 = (long)tb1 & 3;
759 tb1 = (TranslationBlock *)((long)tb1 & ~3);
760 if (tb1 == tb) {
761 *ptb = tb1->page_next[n1];
762 break;
763 }
764 ptb = &tb1->page_next[n1];
765 }
766}
767
768static inline void tb_jmp_remove(TranslationBlock *tb, int n)
769{
770 TranslationBlock *tb1, **ptb;
771 unsigned int n1;
772
773 ptb = &tb->jmp_next[n];
774 tb1 = *ptb;
775 if (tb1) {
776 /* find tb(n) in circular list */
777 for(;;) {
778 tb1 = *ptb;
779 n1 = (long)tb1 & 3;
780 tb1 = (TranslationBlock *)((long)tb1 & ~3);
781 if (n1 == n && tb1 == tb)
782 break;
783 if (n1 == 2) {
784 ptb = &tb1->jmp_first;
785 } else {
786 ptb = &tb1->jmp_next[n1];
787 }
788 }
789 /* now we can suppress tb(n) from the list */
790 *ptb = tb->jmp_next[n];
791
792 tb->jmp_next[n] = NULL;
793 }
794}
795
796/* reset the jump entry 'n' of a TB so that it is not chained to
797 another TB */
798static inline void tb_reset_jump(TranslationBlock *tb, int n)
799{
800 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
801}
802
803void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
804{
805 CPUState *env;
806 PageDesc *p;
807 unsigned int h, n1;
808 target_phys_addr_t phys_pc;
809 TranslationBlock *tb1, *tb2;
810
811 /* remove the TB from the hash list */
812 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
813 h = tb_phys_hash_func(phys_pc);
814 tb_remove(&tb_phys_hash[h], tb,
815 offsetof(TranslationBlock, phys_hash_next));
816
817 /* remove the TB from the page list */
818 if (tb->page_addr[0] != page_addr) {
819 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
820 tb_page_remove(&p->first_tb, tb);
821 invalidate_page_bitmap(p);
822 }
823 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
824 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
825 tb_page_remove(&p->first_tb, tb);
826 invalidate_page_bitmap(p);
827 }
828
829 tb_invalidated_flag = 1;
830
831 /* remove the TB from the hash list */
832 h = tb_jmp_cache_hash_func(tb->pc);
833 for(env = first_cpu; env != NULL; env = env->next_cpu) {
834 if (env->tb_jmp_cache[h] == tb)
835 env->tb_jmp_cache[h] = NULL;
836 }
837
838 /* suppress this TB from the two jump lists */
839 tb_jmp_remove(tb, 0);
840 tb_jmp_remove(tb, 1);
841
842 /* suppress any remaining jumps to this TB */
843 tb1 = tb->jmp_first;
844 for(;;) {
845 n1 = (long)tb1 & 3;
846 if (n1 == 2)
847 break;
848 tb1 = (TranslationBlock *)((long)tb1 & ~3);
849 tb2 = tb1->jmp_next[n1];
850 tb_reset_jump(tb1, n1);
851 tb1->jmp_next[n1] = NULL;
852 tb1 = tb2;
853 }
854 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
855
856#ifndef VBOX
857 tb_phys_invalidate_count++;
858#endif
859}
860
861
862#ifdef VBOX
863void tb_invalidate_virt(CPUState *env, uint32_t eip)
864{
865# if 1
866 tb_flush(env);
867# else
868 uint8_t *cs_base, *pc;
869 unsigned int flags, h, phys_pc;
870 TranslationBlock *tb, **ptb;
871
872 flags = env->hflags;
873 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
874 cs_base = env->segs[R_CS].base;
875 pc = cs_base + eip;
876
877 tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
878 flags);
879
880 if(tb)
881 {
882# ifdef DEBUG
883 printf("invalidating TB (%08X) at %08X\n", tb, eip);
884# endif
885 tb_invalidate(tb);
886 //Note: this will leak TBs, but the whole cache will be flushed
887 // when it happens too often
888 tb->pc = 0;
889 tb->cs_base = 0;
890 tb->flags = 0;
891 }
892# endif
893}
894
895# ifdef VBOX_STRICT
896/**
897 * Gets the page offset.
898 */
899unsigned long get_phys_page_offset(target_ulong addr)
900{
901 PhysPageDesc *p = phys_page_find(addr >> TARGET_PAGE_BITS);
902 return p ? p->phys_offset : 0;
903}
904# endif /* VBOX_STRICT */
905#endif /* VBOX */
906
907static inline void set_bits(uint8_t *tab, int start, int len)
908{
909 int end, mask, end1;
910
911 end = start + len;
912 tab += start >> 3;
913 mask = 0xff << (start & 7);
914 if ((start & ~7) == (end & ~7)) {
915 if (start < end) {
916 mask &= ~(0xff << (end & 7));
917 *tab |= mask;
918 }
919 } else {
920 *tab++ |= mask;
921 start = (start + 8) & ~7;
922 end1 = end & ~7;
923 while (start < end1) {
924 *tab++ = 0xff;
925 start += 8;
926 }
927 if (start < end) {
928 mask = ~(0xff << (end & 7));
929 *tab |= mask;
930 }
931 }
932}
933
934static void build_page_bitmap(PageDesc *p)
935{
936 int n, tb_start, tb_end;
937 TranslationBlock *tb;
938
939 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
940 if (!p->code_bitmap)
941 return;
942 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
943
944 tb = p->first_tb;
945 while (tb != NULL) {
946 n = (long)tb & 3;
947 tb = (TranslationBlock *)((long)tb & ~3);
948 /* NOTE: this is subtle as a TB may span two physical pages */
949 if (n == 0) {
950 /* NOTE: tb_end may be after the end of the page, but
951 it is not a problem */
952 tb_start = tb->pc & ~TARGET_PAGE_MASK;
953 tb_end = tb_start + tb->size;
954 if (tb_end > TARGET_PAGE_SIZE)
955 tb_end = TARGET_PAGE_SIZE;
956 } else {
957 tb_start = 0;
958 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
959 }
960 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
961 tb = tb->page_next[n];
962 }
963}
964
965TranslationBlock *tb_gen_code(CPUState *env,
966 target_ulong pc, target_ulong cs_base,
967 int flags, int cflags)
968{
969 TranslationBlock *tb;
970 uint8_t *tc_ptr;
971 target_ulong phys_pc, phys_page2, virt_page2;
972 int code_gen_size;
973
974 phys_pc = get_phys_addr_code(env, pc);
975 tb = tb_alloc(pc);
976 if (!tb) {
977 /* flush must be done */
978 tb_flush(env);
979 /* cannot fail at this point */
980 tb = tb_alloc(pc);
981 /* Don't forget to invalidate previous TB info. */
982 tb_invalidated_flag = 1;
983 }
984 tc_ptr = code_gen_ptr;
985 tb->tc_ptr = tc_ptr;
986 tb->cs_base = cs_base;
987 tb->flags = flags;
988 tb->cflags = cflags;
989 cpu_gen_code(env, tb, &code_gen_size);
990 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
991
992 /* check next page if needed */
993 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
994 phys_page2 = -1;
995 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
996 phys_page2 = get_phys_addr_code(env, virt_page2);
997 }
998 tb_link_phys(tb, phys_pc, phys_page2);
999 return tb;
1000}
1001
1002/* invalidate all TBs which intersect with the target physical page
1003 starting in range [start;end[. NOTE: start and end must refer to
1004 the same physical page. 'is_cpu_write_access' should be true if called
1005 from a real cpu write access: the virtual CPU will exit the current
1006 TB if code is modified inside this TB. */
1007void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
1008 int is_cpu_write_access)
1009{
1010 int n, current_tb_modified, current_tb_not_found, current_flags;
1011 CPUState *env = cpu_single_env;
1012 PageDesc *p;
1013 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
1014 target_ulong tb_start, tb_end;
1015 target_ulong current_pc, current_cs_base;
1016
1017 p = page_find(start >> TARGET_PAGE_BITS);
1018 if (!p)
1019 return;
1020 if (!p->code_bitmap &&
1021 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1022 is_cpu_write_access) {
1023 /* build code bitmap */
1024 build_page_bitmap(p);
1025 }
1026
1027 /* we remove all the TBs in the range [start, end[ */
1028 /* XXX: see if in some cases it could be faster to invalidate all the code */
1029 current_tb_not_found = is_cpu_write_access;
1030 current_tb_modified = 0;
1031 current_tb = NULL; /* avoid warning */
1032 current_pc = 0; /* avoid warning */
1033 current_cs_base = 0; /* avoid warning */
1034 current_flags = 0; /* avoid warning */
1035 tb = p->first_tb;
1036 while (tb != NULL) {
1037 n = (long)tb & 3;
1038 tb = (TranslationBlock *)((long)tb & ~3);
1039 tb_next = tb->page_next[n];
1040 /* NOTE: this is subtle as a TB may span two physical pages */
1041 if (n == 0) {
1042 /* NOTE: tb_end may be after the end of the page, but
1043 it is not a problem */
1044 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1045 tb_end = tb_start + tb->size;
1046 } else {
1047 tb_start = tb->page_addr[1];
1048 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1049 }
1050 if (!(tb_end <= start || tb_start >= end)) {
1051#ifdef TARGET_HAS_PRECISE_SMC
1052 if (current_tb_not_found) {
1053 current_tb_not_found = 0;
1054 current_tb = NULL;
1055 if (env->mem_io_pc) {
1056 /* now we have a real cpu fault */
1057 current_tb = tb_find_pc(env->mem_io_pc);
1058 }
1059 }
1060 if (current_tb == tb &&
1061 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1062 /* If we are modifying the current TB, we must stop
1063 its execution. We could be more precise by checking
1064 that the modification is after the current PC, but it
1065 would require a specialized function to partially
1066 restore the CPU state */
1067
1068 current_tb_modified = 1;
1069 cpu_restore_state(current_tb, env,
1070 env->mem_io_pc, NULL);
1071#if defined(TARGET_I386)
1072 current_flags = env->hflags;
1073 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1074 current_cs_base = (target_ulong)env->segs[R_CS].base;
1075 current_pc = current_cs_base + env->eip;
1076#else
1077#error unsupported CPU
1078#endif
1079 }
1080#endif /* TARGET_HAS_PRECISE_SMC */
1081 /* we need to do that to handle the case where a signal
1082 occurs while doing tb_phys_invalidate() */
1083 saved_tb = NULL;
1084 if (env) {
1085 saved_tb = env->current_tb;
1086 env->current_tb = NULL;
1087 }
1088 tb_phys_invalidate(tb, -1);
1089 if (env) {
1090 env->current_tb = saved_tb;
1091 if (env->interrupt_request && env->current_tb)
1092 cpu_interrupt(env, env->interrupt_request);
1093 }
1094 }
1095 tb = tb_next;
1096 }
1097#if !defined(CONFIG_USER_ONLY)
1098 /* if no code remaining, no need to continue to use slow writes */
1099 if (!p->first_tb) {
1100 invalidate_page_bitmap(p);
1101 if (is_cpu_write_access) {
1102 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1103 }
1104 }
1105#endif
1106#ifdef TARGET_HAS_PRECISE_SMC
1107 if (current_tb_modified) {
1108 /* we generate a block containing just the instruction
1109 modifying the memory. It will ensure that it cannot modify
1110 itself */
1111 env->current_tb = NULL;
1112 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1113 cpu_resume_from_signal(env, NULL);
1114 }
1115#endif
1116}
1117
1118
1119/* len must be <= 8 and start must be a multiple of len */
1120static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1121{
1122 PageDesc *p;
1123 int offset, b;
1124#if 0
1125 if (1) {
1126 if (loglevel) {
1127 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1128 cpu_single_env->mem_io_vaddr, len,
1129 cpu_single_env->eip,
1130 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1131 }
1132 }
1133#endif
1134 p = page_find(start >> TARGET_PAGE_BITS);
1135 if (!p)
1136 return;
1137 if (p->code_bitmap) {
1138 offset = start & ~TARGET_PAGE_MASK;
1139 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1140 if (b & ((1 << len) - 1))
1141 goto do_invalidate;
1142 } else {
1143 do_invalidate:
1144 tb_invalidate_phys_page_range(start, start + len, 1);
1145 }
1146}
1147
1148
1149#if !defined(CONFIG_SOFTMMU)
1150static void tb_invalidate_phys_page(target_phys_addr_t addr,
1151 unsigned long pc, void *puc)
1152{
1153 int n, current_flags, current_tb_modified;
1154 target_ulong current_pc, current_cs_base;
1155 PageDesc *p;
1156 TranslationBlock *tb, *current_tb;
1157#ifdef TARGET_HAS_PRECISE_SMC
1158 CPUState *env = cpu_single_env;
1159#endif
1160
1161 addr &= TARGET_PAGE_MASK;
1162 p = page_find(addr >> TARGET_PAGE_BITS);
1163 if (!p)
1164 return;
1165 tb = p->first_tb;
1166 current_tb_modified = 0;
1167 current_tb = NULL;
1168 current_pc = 0; /* avoid warning */
1169 current_cs_base = 0; /* avoid warning */
1170 current_flags = 0; /* avoid warning */
1171#ifdef TARGET_HAS_PRECISE_SMC
1172 if (tb && pc != 0) {
1173 current_tb = tb_find_pc(pc);
1174 }
1175#endif
1176 while (tb != NULL) {
1177 n = (long)tb & 3;
1178 tb = (TranslationBlock *)((long)tb & ~3);
1179#ifdef TARGET_HAS_PRECISE_SMC
1180 if (current_tb == tb &&
1181 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1182 /* If we are modifying the current TB, we must stop
1183 its execution. We could be more precise by checking
1184 that the modification is after the current PC, but it
1185 would require a specialized function to partially
1186 restore the CPU state */
1187
1188 current_tb_modified = 1;
1189 cpu_restore_state(current_tb, env, pc, puc);
1190#if defined(TARGET_I386)
1191 current_flags = env->hflags;
1192 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1193 current_cs_base = (target_ulong)env->segs[R_CS].base;
1194 current_pc = current_cs_base + env->eip;
1195#else
1196#error unsupported CPU
1197#endif
1198 }
1199#endif /* TARGET_HAS_PRECISE_SMC */
1200 tb_phys_invalidate(tb, addr);
1201 tb = tb->page_next[n];
1202 }
1203 p->first_tb = NULL;
1204#ifdef TARGET_HAS_PRECISE_SMC
1205 if (current_tb_modified) {
1206 /* we generate a block containing just the instruction
1207 modifying the memory. It will ensure that it cannot modify
1208 itself */
1209 env->current_tb = NULL;
1210 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1211 cpu_resume_from_signal(env, puc);
1212 }
1213#endif
1214}
1215#endif
1216
1217/* add the tb in the target page and protect it if necessary */
1218static inline void tb_alloc_page(TranslationBlock *tb,
1219 unsigned int n, target_ulong page_addr)
1220{
1221 PageDesc *p;
1222 TranslationBlock *last_first_tb;
1223
1224 tb->page_addr[n] = page_addr;
1225 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1226 tb->page_next[n] = p->first_tb;
1227 last_first_tb = p->first_tb;
1228 p->first_tb = (TranslationBlock *)((long)tb | n);
1229 invalidate_page_bitmap(p);
1230
1231#if defined(TARGET_HAS_SMC) || 1
1232
1233#if defined(CONFIG_USER_ONLY)
1234 if (p->flags & PAGE_WRITE) {
1235 target_ulong addr;
1236 PageDesc *p2;
1237 int prot;
1238
1239 /* force the host page as non writable (writes will have a
1240 page fault + mprotect overhead) */
1241 page_addr &= qemu_host_page_mask;
1242 prot = 0;
1243 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1244 addr += TARGET_PAGE_SIZE) {
1245
1246 p2 = page_find (addr >> TARGET_PAGE_BITS);
1247 if (!p2)
1248 continue;
1249 prot |= p2->flags;
1250 p2->flags &= ~PAGE_WRITE;
1251 page_get_flags(addr);
1252 }
1253 mprotect(g2h(page_addr), qemu_host_page_size,
1254 (prot & PAGE_BITS) & ~PAGE_WRITE);
1255#ifdef DEBUG_TB_INVALIDATE
1256 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1257 page_addr);
1258#endif
1259 }
1260#else
1261 /* if some code is already present, then the pages are already
1262 protected. So we handle the case where only the first TB is
1263 allocated in a physical page */
1264 if (!last_first_tb) {
1265 tlb_protect_code(page_addr);
1266 }
1267#endif
1268
1269#endif /* TARGET_HAS_SMC */
1270}
1271
1272/* Allocate a new translation block. Flush the translation buffer if
1273 too many translation blocks or too much generated code. */
1274TranslationBlock *tb_alloc(target_ulong pc)
1275{
1276 TranslationBlock *tb;
1277
1278 if (nb_tbs >= code_gen_max_blocks ||
1279 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1280 return NULL;
1281 tb = &tbs[nb_tbs++];
1282 tb->pc = pc;
1283 tb->cflags = 0;
1284 return tb;
1285}
1286
1287void tb_free(TranslationBlock *tb)
1288{
1289 /* In practice this is mostly used for single use temporary TB
1290 Ignore the hard cases and just back up if this TB happens to
1291 be the last one generated. */
1292 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1293 code_gen_ptr = tb->tc_ptr;
1294 nb_tbs--;
1295 }
1296}
1297
1298/* add a new TB and link it to the physical page tables. phys_page2 is
1299 (-1) to indicate that only one page contains the TB. */
1300void tb_link_phys(TranslationBlock *tb,
1301 target_ulong phys_pc, target_ulong phys_page2)
1302{
1303 unsigned int h;
1304 TranslationBlock **ptb;
1305
1306 /* Grab the mmap lock to stop another thread invalidating this TB
1307 before we are done. */
1308 mmap_lock();
1309 /* add in the physical hash table */
1310 h = tb_phys_hash_func(phys_pc);
1311 ptb = &tb_phys_hash[h];
1312 tb->phys_hash_next = *ptb;
1313 *ptb = tb;
1314
1315 /* add in the page list */
1316 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1317 if (phys_page2 != -1)
1318 tb_alloc_page(tb, 1, phys_page2);
1319 else
1320 tb->page_addr[1] = -1;
1321
1322 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1323 tb->jmp_next[0] = NULL;
1324 tb->jmp_next[1] = NULL;
1325
1326 /* init original jump addresses */
1327 if (tb->tb_next_offset[0] != 0xffff)
1328 tb_reset_jump(tb, 0);
1329 if (tb->tb_next_offset[1] != 0xffff)
1330 tb_reset_jump(tb, 1);
1331
1332#ifdef DEBUG_TB_CHECK
1333 tb_page_check();
1334#endif
1335 mmap_unlock();
1336}
1337
1338/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1339 tb[1].tc_ptr. Return NULL if not found */
1340TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1341{
1342 int m_min, m_max, m;
1343 unsigned long v;
1344 TranslationBlock *tb;
1345
1346 if (nb_tbs <= 0)
1347 return NULL;
1348 if (tc_ptr < (unsigned long)code_gen_buffer ||
1349 tc_ptr >= (unsigned long)code_gen_ptr)
1350 return NULL;
1351 /* binary search (cf Knuth) */
1352 m_min = 0;
1353 m_max = nb_tbs - 1;
1354 while (m_min <= m_max) {
1355 m = (m_min + m_max) >> 1;
1356 tb = &tbs[m];
1357 v = (unsigned long)tb->tc_ptr;
1358 if (v == tc_ptr)
1359 return tb;
1360 else if (tc_ptr < v) {
1361 m_max = m - 1;
1362 } else {
1363 m_min = m + 1;
1364 }
1365 }
1366 return &tbs[m_max];
1367}
1368
1369static void tb_reset_jump_recursive(TranslationBlock *tb);
1370
1371static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1372{
1373 TranslationBlock *tb1, *tb_next, **ptb;
1374 unsigned int n1;
1375
1376 tb1 = tb->jmp_next[n];
1377 if (tb1 != NULL) {
1378 /* find head of list */
1379 for(;;) {
1380 n1 = (long)tb1 & 3;
1381 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1382 if (n1 == 2)
1383 break;
1384 tb1 = tb1->jmp_next[n1];
1385 }
1386 /* we are now sure now that tb jumps to tb1 */
1387 tb_next = tb1;
1388
1389 /* remove tb from the jmp_first list */
1390 ptb = &tb_next->jmp_first;
1391 for(;;) {
1392 tb1 = *ptb;
1393 n1 = (long)tb1 & 3;
1394 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1395 if (n1 == n && tb1 == tb)
1396 break;
1397 ptb = &tb1->jmp_next[n1];
1398 }
1399 *ptb = tb->jmp_next[n];
1400 tb->jmp_next[n] = NULL;
1401
1402 /* suppress the jump to next tb in generated code */
1403 tb_reset_jump(tb, n);
1404
1405 /* suppress jumps in the tb on which we could have jumped */
1406 tb_reset_jump_recursive(tb_next);
1407 }
1408}
1409
1410static void tb_reset_jump_recursive(TranslationBlock *tb)
1411{
1412 tb_reset_jump_recursive2(tb, 0);
1413 tb_reset_jump_recursive2(tb, 1);
1414}
1415
1416#if defined(TARGET_HAS_ICE)
1417static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1418{
1419 target_ulong addr, pd;
1420 ram_addr_t ram_addr;
1421 PhysPageDesc *p;
1422
1423 addr = cpu_get_phys_page_debug(env, pc);
1424 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1425 if (!p) {
1426 pd = IO_MEM_UNASSIGNED;
1427 } else {
1428 pd = p->phys_offset;
1429 }
1430 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1431 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1432}
1433#endif
1434
1435/* Add a watchpoint. */
1436int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1437{
1438 int i;
1439
1440 for (i = 0; i < env->nb_watchpoints; i++) {
1441 if (addr == env->watchpoint[i].vaddr)
1442 return 0;
1443 }
1444 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1445 return -1;
1446
1447 i = env->nb_watchpoints++;
1448 env->watchpoint[i].vaddr = addr;
1449 env->watchpoint[i].type = type;
1450 tlb_flush_page(env, addr);
1451 /* FIXME: This flush is needed because of the hack to make memory ops
1452 terminate the TB. It can be removed once the proper IO trap and
1453 re-execute bits are in. */
1454 tb_flush(env);
1455 return i;
1456}
1457
1458/* Remove a watchpoint. */
1459int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1460{
1461 int i;
1462
1463 for (i = 0; i < env->nb_watchpoints; i++) {
1464 if (addr == env->watchpoint[i].vaddr) {
1465 env->nb_watchpoints--;
1466 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1467 tlb_flush_page(env, addr);
1468 return 0;
1469 }
1470 }
1471 return -1;
1472}
1473
1474/* Remove all watchpoints. */
1475void cpu_watchpoint_remove_all(CPUState *env) {
1476 int i;
1477
1478 for (i = 0; i < env->nb_watchpoints; i++) {
1479 tlb_flush_page(env, env->watchpoint[i].vaddr);
1480 }
1481 env->nb_watchpoints = 0;
1482}
1483
1484/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1485 breakpoint is reached */
1486int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1487{
1488#if defined(TARGET_HAS_ICE)
1489 int i;
1490
1491 for(i = 0; i < env->nb_breakpoints; i++) {
1492 if (env->breakpoints[i] == pc)
1493 return 0;
1494 }
1495
1496 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1497 return -1;
1498 env->breakpoints[env->nb_breakpoints++] = pc;
1499
1500 breakpoint_invalidate(env, pc);
1501 return 0;
1502#else
1503 return -1;
1504#endif
1505}
1506
1507/* remove all breakpoints */
1508void cpu_breakpoint_remove_all(CPUState *env) {
1509#if defined(TARGET_HAS_ICE)
1510 int i;
1511 for(i = 0; i < env->nb_breakpoints; i++) {
1512 breakpoint_invalidate(env, env->breakpoints[i]);
1513 }
1514 env->nb_breakpoints = 0;
1515#endif
1516}
1517
1518/* remove a breakpoint */
1519int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1520{
1521#if defined(TARGET_HAS_ICE)
1522 int i;
1523 for(i = 0; i < env->nb_breakpoints; i++) {
1524 if (env->breakpoints[i] == pc)
1525 goto found;
1526 }
1527 return -1;
1528 found:
1529 env->nb_breakpoints--;
1530 if (i < env->nb_breakpoints)
1531 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1532
1533 breakpoint_invalidate(env, pc);
1534 return 0;
1535#else
1536 return -1;
1537#endif
1538}
1539
1540/* enable or disable single step mode. EXCP_DEBUG is returned by the
1541 CPU loop after each instruction */
1542void cpu_single_step(CPUState *env, int enabled)
1543{
1544#if defined(TARGET_HAS_ICE)
1545 if (env->singlestep_enabled != enabled) {
1546 env->singlestep_enabled = enabled;
1547 /* must flush all the translated code to avoid inconsistancies */
1548 /* XXX: only flush what is necessary */
1549 tb_flush(env);
1550 }
1551#endif
1552}
1553
1554#ifndef VBOX
1555/* enable or disable low levels log */
1556void cpu_set_log(int log_flags)
1557{
1558 loglevel = log_flags;
1559 if (loglevel && !logfile) {
1560 logfile = fopen(logfilename, "w");
1561 if (!logfile) {
1562 perror(logfilename);
1563 _exit(1);
1564 }
1565#if !defined(CONFIG_SOFTMMU)
1566 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1567 {
1568 static uint8_t logfile_buf[4096];
1569 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1570 }
1571#else
1572 setvbuf(logfile, NULL, _IOLBF, 0);
1573#endif
1574 }
1575}
1576
1577void cpu_set_log_filename(const char *filename)
1578{
1579 logfilename = strdup(filename);
1580}
1581#endif /* !VBOX */
1582
1583/* mask must never be zero, except for A20 change call */
1584void cpu_interrupt(CPUState *env, int mask)
1585{
1586#if !defined(USE_NPTL)
1587 TranslationBlock *tb;
1588 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1589#endif
1590 int old_mask;
1591
1592 old_mask = env->interrupt_request;
1593#ifdef VBOX
1594 VM_ASSERT_EMT(env->pVM);
1595 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, mask);
1596#else /* !VBOX */
1597 /* FIXME: This is probably not threadsafe. A different thread could
1598 be in the middle of a read-modify-write operation. */
1599 env->interrupt_request |= mask;
1600#endif /* !VBOX */
1601#if defined(USE_NPTL)
1602 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1603 problem and hope the cpu will stop of its own accord. For userspace
1604 emulation this often isn't actually as bad as it sounds. Often
1605 signals are used primarily to interrupt blocking syscalls. */
1606#else
1607 if (use_icount) {
1608 env->icount_decr.u16.high = 0xffff;
1609#ifndef CONFIG_USER_ONLY
1610 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1611 an async event happened and we need to process it. */
1612 if (!can_do_io(env)
1613 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1614 cpu_abort(env, "Raised interrupt while not in I/O function");
1615 }
1616#endif
1617 } else {
1618 tb = env->current_tb;
1619 /* if the cpu is currently executing code, we must unlink it and
1620 all the potentially executing TB */
1621 if (tb && !testandset(&interrupt_lock)) {
1622 env->current_tb = NULL;
1623 tb_reset_jump_recursive(tb);
1624 resetlock(&interrupt_lock);
1625 }
1626 }
1627#endif
1628}
1629
1630void cpu_reset_interrupt(CPUState *env, int mask)
1631{
1632#ifdef VBOX
1633 /*
1634 * Note: the current implementation can be executed by another thread without problems; make sure this remains true
1635 * for future changes!
1636 */
1637 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~mask);
1638#else /* !VBOX */
1639 env->interrupt_request &= ~mask;
1640#endif /* !VBOX */
1641}
1642
1643#ifndef VBOX
1644CPULogItem cpu_log_items[] = {
1645 { CPU_LOG_TB_OUT_ASM, "out_asm",
1646 "show generated host assembly code for each compiled TB" },
1647 { CPU_LOG_TB_IN_ASM, "in_asm",
1648 "show target assembly code for each compiled TB" },
1649 { CPU_LOG_TB_OP, "op",
1650 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1651#ifdef TARGET_I386
1652 { CPU_LOG_TB_OP_OPT, "op_opt",
1653 "show micro ops after optimization for each compiled TB" },
1654#endif
1655 { CPU_LOG_INT, "int",
1656 "show interrupts/exceptions in short format" },
1657 { CPU_LOG_EXEC, "exec",
1658 "show trace before each executed TB (lots of logs)" },
1659 { CPU_LOG_TB_CPU, "cpu",
1660 "show CPU state before bloc translation" },
1661#ifdef TARGET_I386
1662 { CPU_LOG_PCALL, "pcall",
1663 "show protected mode far calls/returns/exceptions" },
1664#endif
1665#ifdef DEBUG_IOPORT
1666 { CPU_LOG_IOPORT, "ioport",
1667 "show all i/o ports accesses" },
1668#endif
1669 { 0, NULL, NULL },
1670};
1671
1672static int cmp1(const char *s1, int n, const char *s2)
1673{
1674 if (strlen(s2) != n)
1675 return 0;
1676 return memcmp(s1, s2, n) == 0;
1677}
1678
1679/* takes a comma separated list of log masks. Return 0 if error. */
1680int cpu_str_to_log_mask(const char *str)
1681{
1682 CPULogItem *item;
1683 int mask;
1684 const char *p, *p1;
1685
1686 p = str;
1687 mask = 0;
1688 for(;;) {
1689 p1 = strchr(p, ',');
1690 if (!p1)
1691 p1 = p + strlen(p);
1692 if(cmp1(p,p1-p,"all")) {
1693 for(item = cpu_log_items; item->mask != 0; item++) {
1694 mask |= item->mask;
1695 }
1696 } else {
1697 for(item = cpu_log_items; item->mask != 0; item++) {
1698 if (cmp1(p, p1 - p, item->name))
1699 goto found;
1700 }
1701 return 0;
1702 }
1703 found:
1704 mask |= item->mask;
1705 if (*p1 != ',')
1706 break;
1707 p = p1 + 1;
1708 }
1709 return mask;
1710}
1711#endif /* !VBOX */
1712
1713#ifndef VBOX /* VBOX: we have our own routine. */
1714void cpu_abort(CPUState *env, const char *fmt, ...)
1715{
1716 va_list ap;
1717
1718 va_start(ap, fmt);
1719 fprintf(stderr, "qemu: fatal: ");
1720 vfprintf(stderr, fmt, ap);
1721 fprintf(stderr, "\n");
1722#ifdef TARGET_I386
1723 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1724#else
1725 cpu_dump_state(env, stderr, fprintf, 0);
1726#endif
1727 va_end(ap);
1728 abort();
1729}
1730#endif /* !VBOX */
1731
1732#ifndef VBOX
1733CPUState *cpu_copy(CPUState *env)
1734{
1735 CPUState *new_env = cpu_init(env->cpu_model_str);
1736 /* preserve chaining and index */
1737 CPUState *next_cpu = new_env->next_cpu;
1738 int cpu_index = new_env->cpu_index;
1739 memcpy(new_env, env, sizeof(CPUState));
1740 new_env->next_cpu = next_cpu;
1741 new_env->cpu_index = cpu_index;
1742 return new_env;
1743}
1744#endif
1745
1746#if !defined(CONFIG_USER_ONLY)
1747
1748static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1749{
1750 unsigned int i;
1751
1752 /* Discard jump cache entries for any tb which might potentially
1753 overlap the flushed page. */
1754 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1755 memset (&env->tb_jmp_cache[i], 0,
1756 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1757
1758 i = tb_jmp_cache_hash_page(addr);
1759 memset (&env->tb_jmp_cache[i], 0,
1760 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1761
1762#ifdef VBOX
1763 /* inform raw mode about TLB page flush */
1764 remR3FlushPage(env, addr);
1765#endif /* VBOX */
1766}
1767
1768/* NOTE: if flush_global is true, also flush global entries (not
1769 implemented yet) */
1770void tlb_flush(CPUState *env, int flush_global)
1771{
1772 int i;
1773
1774#if defined(DEBUG_TLB)
1775 printf("tlb_flush:\n");
1776#endif
1777 /* must reset current TB so that interrupts cannot modify the
1778 links while we are modifying them */
1779 env->current_tb = NULL;
1780
1781 for(i = 0; i < CPU_TLB_SIZE; i++) {
1782 env->tlb_table[0][i].addr_read = -1;
1783 env->tlb_table[0][i].addr_write = -1;
1784 env->tlb_table[0][i].addr_code = -1;
1785 env->tlb_table[1][i].addr_read = -1;
1786 env->tlb_table[1][i].addr_write = -1;
1787 env->tlb_table[1][i].addr_code = -1;
1788#if (NB_MMU_MODES >= 3)
1789 env->tlb_table[2][i].addr_read = -1;
1790 env->tlb_table[2][i].addr_write = -1;
1791 env->tlb_table[2][i].addr_code = -1;
1792#if (NB_MMU_MODES == 4)
1793 env->tlb_table[3][i].addr_read = -1;
1794 env->tlb_table[3][i].addr_write = -1;
1795 env->tlb_table[3][i].addr_code = -1;
1796#endif
1797#endif
1798 }
1799
1800 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1801
1802#ifdef VBOX
1803 /* inform raw mode about TLB flush */
1804 remR3FlushTLB(env, flush_global);
1805#endif
1806#ifdef USE_KQEMU
1807 if (env->kqemu_enabled) {
1808 kqemu_flush(env, flush_global);
1809 }
1810#endif
1811 tlb_flush_count++;
1812}
1813
1814static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1815{
1816 if (addr == (tlb_entry->addr_read &
1817 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1818 addr == (tlb_entry->addr_write &
1819 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1820 addr == (tlb_entry->addr_code &
1821 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1822 tlb_entry->addr_read = -1;
1823 tlb_entry->addr_write = -1;
1824 tlb_entry->addr_code = -1;
1825 }
1826}
1827
1828void tlb_flush_page(CPUState *env, target_ulong addr)
1829{
1830 int i;
1831 TranslationBlock *tb;
1832
1833#if defined(DEBUG_TLB)
1834 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1835#endif
1836 /* must reset current TB so that interrupts cannot modify the
1837 links while we are modifying them */
1838 env->current_tb = NULL;
1839
1840 addr &= TARGET_PAGE_MASK;
1841 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1842 tlb_flush_entry(&env->tlb_table[0][i], addr);
1843 tlb_flush_entry(&env->tlb_table[1][i], addr);
1844#if (NB_MMU_MODES >= 3)
1845 tlb_flush_entry(&env->tlb_table[2][i], addr);
1846#if (NB_MMU_MODES == 4)
1847 tlb_flush_entry(&env->tlb_table[3][i], addr);
1848#endif
1849#endif
1850
1851 tlb_flush_jmp_cache(env, addr);
1852
1853#ifdef USE_KQEMU
1854 if (env->kqemu_enabled) {
1855 kqemu_flush_page(env, addr);
1856 }
1857#endif
1858}
1859
1860/* update the TLBs so that writes to code in the virtual page 'addr'
1861 can be detected */
1862static void tlb_protect_code(ram_addr_t ram_addr)
1863{
1864 cpu_physical_memory_reset_dirty(ram_addr,
1865 ram_addr + TARGET_PAGE_SIZE,
1866 CODE_DIRTY_FLAG);
1867#if defined(VBOX) && defined(REM_MONITOR_CODE_PAGES)
1868 /** @todo Retest this? This function has changed... */
1869 remR3ProtectCode(cpu_single_env, ram_addr);
1870#endif
1871}
1872
1873/* update the TLB so that writes in physical page 'phys_addr' are no longer
1874 tested for self modifying code */
1875static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1876 target_ulong vaddr)
1877{
1878#ifdef VBOX
1879 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
1880#endif
1881 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1882}
1883
1884static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1885 unsigned long start, unsigned long length)
1886{
1887 unsigned long addr;
1888 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1889 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1890 if ((addr - start) < length) {
1891 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1892 }
1893 }
1894}
1895
1896void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1897 int dirty_flags)
1898{
1899 CPUState *env;
1900 unsigned long length, start1;
1901 int i, mask, len;
1902 uint8_t *p;
1903
1904 start &= TARGET_PAGE_MASK;
1905 end = TARGET_PAGE_ALIGN(end);
1906
1907 length = end - start;
1908 if (length == 0)
1909 return;
1910 len = length >> TARGET_PAGE_BITS;
1911#ifdef USE_KQEMU
1912 /* XXX: should not depend on cpu context */
1913 env = first_cpu;
1914 if (env->kqemu_enabled) {
1915 ram_addr_t addr;
1916 addr = start;
1917 for(i = 0; i < len; i++) {
1918 kqemu_set_notdirty(env, addr);
1919 addr += TARGET_PAGE_SIZE;
1920 }
1921 }
1922#endif
1923 mask = ~dirty_flags;
1924 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1925#ifdef VBOX
1926 if (RT_LIKELY((start >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
1927#endif
1928 for(i = 0; i < len; i++)
1929 p[i] &= mask;
1930
1931 /* we modify the TLB cache so that the dirty bit will be set again
1932 when accessing the range */
1933#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
1934 start1 = start;
1935#elif !defined(VBOX)
1936 start1 = start + (unsigned long)phys_ram_base;
1937#else
1938 start1 = (unsigned long)remR3GCPhys2HCVirt(first_cpu, start);
1939#endif
1940 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1941 for(i = 0; i < CPU_TLB_SIZE; i++)
1942 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1943 for(i = 0; i < CPU_TLB_SIZE; i++)
1944 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1945#if (NB_MMU_MODES >= 3)
1946 for(i = 0; i < CPU_TLB_SIZE; i++)
1947 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1948#if (NB_MMU_MODES == 4)
1949 for(i = 0; i < CPU_TLB_SIZE; i++)
1950 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1951#endif
1952#endif
1953 }
1954}
1955
1956int cpu_physical_memory_set_dirty_tracking(int enable)
1957{
1958 in_migration = enable;
1959 return 0;
1960}
1961
1962int cpu_physical_memory_get_dirty_tracking(void)
1963{
1964 return in_migration;
1965}
1966
1967
1968static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1969{
1970 ram_addr_t ram_addr;
1971
1972 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1973 /* RAM case */
1974#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
1975 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1976#elif !defined(VBOX)
1977 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1978 tlb_entry->addend - (unsigned long)phys_ram_base;
1979#else
1980 ram_addr = remR3HCVirt2GCPhys(first_cpu, (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend);
1981#endif
1982 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1983 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1984 }
1985 }
1986}
1987
1988/* update the TLB according to the current state of the dirty bits */
1989void cpu_tlb_update_dirty(CPUState *env)
1990{
1991 int i;
1992 for(i = 0; i < CPU_TLB_SIZE; i++)
1993 tlb_update_dirty(&env->tlb_table[0][i]);
1994 for(i = 0; i < CPU_TLB_SIZE; i++)
1995 tlb_update_dirty(&env->tlb_table[1][i]);
1996#if (NB_MMU_MODES >= 3)
1997 for(i = 0; i < CPU_TLB_SIZE; i++)
1998 tlb_update_dirty(&env->tlb_table[2][i]);
1999#if (NB_MMU_MODES == 4)
2000 for(i = 0; i < CPU_TLB_SIZE; i++)
2001 tlb_update_dirty(&env->tlb_table[3][i]);
2002#endif
2003#endif
2004}
2005
2006static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2007{
2008 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2009 tlb_entry->addr_write = vaddr;
2010}
2011
2012
2013/* update the TLB corresponding to virtual page vaddr and phys addr
2014 addr so that it is no longer dirty */
2015static inline void tlb_set_dirty(CPUState *env,
2016 unsigned long addr, target_ulong vaddr)
2017{
2018 int i;
2019
2020 addr &= TARGET_PAGE_MASK;
2021 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2022 tlb_set_dirty1(&env->tlb_table[0][i], addr);
2023 tlb_set_dirty1(&env->tlb_table[1][i], addr);
2024#if (NB_MMU_MODES >= 3)
2025 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
2026#if (NB_MMU_MODES == 4)
2027 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
2028#endif
2029#endif
2030}
2031
2032/* add a new TLB entry. At most one entry for a given virtual address
2033 is permitted. Return 0 if OK or 2 if the page could not be mapped
2034 (can only happen in non SOFTMMU mode for I/O pages or pages
2035 conflicting with the host address space). */
2036int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2037 target_phys_addr_t paddr, int prot,
2038 int is_user, int is_softmmu)
2039{
2040 PhysPageDesc *p;
2041 unsigned long pd;
2042 unsigned int index;
2043 target_ulong address;
2044 target_phys_addr_t addend;
2045 int ret;
2046 CPUTLBEntry *te;
2047
2048 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2049 if (!p) {
2050 pd = IO_MEM_UNASSIGNED;
2051 } else {
2052 pd = p->phys_offset;
2053 }
2054#if defined(DEBUG_TLB)
2055 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
2056 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
2057#endif
2058
2059 ret = 0;
2060#if !defined(CONFIG_SOFTMMU)
2061 if (is_softmmu)
2062#endif
2063 {
2064 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2065 /* IO memory case */
2066 address = vaddr | pd;
2067 addend = paddr;
2068 } else {
2069 /* standard memory */
2070 address = vaddr;
2071#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2072 addend = pd & TARGET_PAGE_MASK;
2073#elif !defined(VBOX)
2074 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
2075#else
2076 addend = (unsigned long)remR3GCPhys2HCVirt(env, pd & TARGET_PAGE_MASK);
2077#endif
2078 }
2079
2080 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2081 addend -= vaddr;
2082 te = &env->tlb_table[is_user][index];
2083 te->addend = addend;
2084 if (prot & PAGE_READ) {
2085 te->addr_read = address;
2086 } else {
2087 te->addr_read = -1;
2088 }
2089 if (prot & PAGE_EXEC) {
2090 te->addr_code = address;
2091 } else {
2092 te->addr_code = -1;
2093 }
2094 if (prot & PAGE_WRITE) {
2095 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2096 (pd & IO_MEM_ROMD)) {
2097 /* write access calls the I/O callback */
2098 te->addr_write = vaddr |
2099 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
2100 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2101 !cpu_physical_memory_is_dirty(pd)) {
2102 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
2103 } else {
2104 te->addr_write = address;
2105 }
2106 } else {
2107 te->addr_write = -1;
2108 }
2109#ifdef VBOX
2110 /* inform raw mode about TLB page change */
2111 remR3FlushPage(env, vaddr);
2112#endif
2113 }
2114#if !defined(CONFIG_SOFTMMU)
2115 else {
2116 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
2117 /* IO access: no mapping is done as it will be handled by the
2118 soft MMU */
2119 if (!(env->hflags & HF_SOFTMMU_MASK))
2120 ret = 2;
2121 } else {
2122 void *map_addr;
2123
2124 if (vaddr >= MMAP_AREA_END) {
2125 ret = 2;
2126 } else {
2127 if (prot & PROT_WRITE) {
2128 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2129#if defined(TARGET_HAS_SMC) || 1
2130 first_tb ||
2131#endif
2132 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2133 !cpu_physical_memory_is_dirty(pd))) {
2134 /* ROM: we do as if code was inside */
2135 /* if code is present, we only map as read only and save the
2136 original mapping */
2137 VirtPageDesc *vp;
2138
2139 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
2140 vp->phys_addr = pd;
2141 vp->prot = prot;
2142 vp->valid_tag = virt_valid_tag;
2143 prot &= ~PAGE_WRITE;
2144 }
2145 }
2146 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
2147 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
2148 if (map_addr == MAP_FAILED) {
2149 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
2150 paddr, vaddr);
2151 }
2152 }
2153 }
2154 }
2155#endif
2156 return ret;
2157}
2158
2159/* called from signal handler: invalidate the code and unprotect the
2160 page. Return TRUE if the fault was succesfully handled. */
2161int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
2162{
2163#if !defined(CONFIG_SOFTMMU)
2164 VirtPageDesc *vp;
2165
2166#if defined(DEBUG_TLB)
2167 printf("page_unprotect: addr=0x%08x\n", addr);
2168#endif
2169 addr &= TARGET_PAGE_MASK;
2170
2171 /* if it is not mapped, no need to worry here */
2172 if (addr >= MMAP_AREA_END)
2173 return 0;
2174 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
2175 if (!vp)
2176 return 0;
2177 /* NOTE: in this case, validate_tag is _not_ tested as it
2178 validates only the code TLB */
2179 if (vp->valid_tag != virt_valid_tag)
2180 return 0;
2181 if (!(vp->prot & PAGE_WRITE))
2182 return 0;
2183#if defined(DEBUG_TLB)
2184 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
2185 addr, vp->phys_addr, vp->prot);
2186#endif
2187 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
2188 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
2189 (unsigned long)addr, vp->prot);
2190 /* set the dirty bit */
2191 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
2192 /* flush the code inside */
2193 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
2194 return 1;
2195#elif defined(VBOX)
2196 addr &= TARGET_PAGE_MASK;
2197
2198 /* if it is not mapped, no need to worry here */
2199 if (addr >= MMAP_AREA_END)
2200 return 0;
2201 return 1;
2202#else
2203 return 0;
2204#endif
2205}
2206
2207#else
2208
2209void tlb_flush(CPUState *env, int flush_global)
2210{
2211}
2212
2213void tlb_flush_page(CPUState *env, target_ulong addr)
2214{
2215}
2216
2217int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2218 target_phys_addr_t paddr, int prot,
2219 int is_user, int is_softmmu)
2220{
2221 return 0;
2222}
2223
2224#ifndef VBOX
2225/* dump memory mappings */
2226void page_dump(FILE *f)
2227{
2228 unsigned long start, end;
2229 int i, j, prot, prot1;
2230 PageDesc *p;
2231
2232 fprintf(f, "%-8s %-8s %-8s %s\n",
2233 "start", "end", "size", "prot");
2234 start = -1;
2235 end = -1;
2236 prot = 0;
2237 for(i = 0; i <= L1_SIZE; i++) {
2238 if (i < L1_SIZE)
2239 p = l1_map[i];
2240 else
2241 p = NULL;
2242 for(j = 0;j < L2_SIZE; j++) {
2243 if (!p)
2244 prot1 = 0;
2245 else
2246 prot1 = p[j].flags;
2247 if (prot1 != prot) {
2248 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2249 if (start != -1) {
2250 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2251 start, end, end - start,
2252 prot & PAGE_READ ? 'r' : '-',
2253 prot & PAGE_WRITE ? 'w' : '-',
2254 prot & PAGE_EXEC ? 'x' : '-');
2255 }
2256 if (prot1 != 0)
2257 start = end;
2258 else
2259 start = -1;
2260 prot = prot1;
2261 }
2262 if (!p)
2263 break;
2264 }
2265 }
2266}
2267#endif /* !VBOX */
2268
2269int page_get_flags(target_ulong address)
2270{
2271 PageDesc *p;
2272
2273 p = page_find(address >> TARGET_PAGE_BITS);
2274 if (!p)
2275 return 0;
2276 return p->flags;
2277}
2278
2279/* modify the flags of a page and invalidate the code if
2280 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2281 depending on PAGE_WRITE */
2282void page_set_flags(target_ulong start, target_ulong end, int flags)
2283{
2284 PageDesc *p;
2285 target_ulong addr;
2286
2287 start = start & TARGET_PAGE_MASK;
2288 end = TARGET_PAGE_ALIGN(end);
2289 if (flags & PAGE_WRITE)
2290 flags |= PAGE_WRITE_ORG;
2291#ifdef VBOX
2292 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
2293#endif
2294 spin_lock(&tb_lock);
2295 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2296 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2297 /* if the write protection is set, then we invalidate the code
2298 inside */
2299 if (!(p->flags & PAGE_WRITE) &&
2300 (flags & PAGE_WRITE) &&
2301 p->first_tb) {
2302 tb_invalidate_phys_page(addr, 0, NULL);
2303 }
2304 p->flags = flags;
2305 }
2306 spin_unlock(&tb_lock);
2307}
2308
2309/* called from signal handler: invalidate the code and unprotect the
2310 page. Return TRUE if the fault was succesfully handled. */
2311int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2312{
2313 unsigned int page_index, prot, pindex;
2314 PageDesc *p, *p1;
2315 target_ulong host_start, host_end, addr;
2316
2317 host_start = address & qemu_host_page_mask;
2318 page_index = host_start >> TARGET_PAGE_BITS;
2319 p1 = page_find(page_index);
2320 if (!p1)
2321 return 0;
2322 host_end = host_start + qemu_host_page_size;
2323 p = p1;
2324 prot = 0;
2325 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2326 prot |= p->flags;
2327 p++;
2328 }
2329 /* if the page was really writable, then we change its
2330 protection back to writable */
2331 if (prot & PAGE_WRITE_ORG) {
2332 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2333 if (!(p1[pindex].flags & PAGE_WRITE)) {
2334 mprotect((void *)g2h(host_start), qemu_host_page_size,
2335 (prot & PAGE_BITS) | PAGE_WRITE);
2336 p1[pindex].flags |= PAGE_WRITE;
2337 /* and since the content will be modified, we must invalidate
2338 the corresponding translated code. */
2339 tb_invalidate_phys_page(address, pc, puc);
2340#ifdef DEBUG_TB_CHECK
2341 tb_invalidate_check(address);
2342#endif
2343 return 1;
2344 }
2345 }
2346 return 0;
2347}
2348
2349/* call this function when system calls directly modify a memory area */
2350/* ??? This should be redundant now we have lock_user. */
2351void page_unprotect_range(target_ulong data, target_ulong data_size)
2352{
2353 target_ulong start, end, addr;
2354
2355 start = data;
2356 end = start + data_size;
2357 start &= TARGET_PAGE_MASK;
2358 end = TARGET_PAGE_ALIGN(end);
2359 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2360 page_unprotect(addr, 0, NULL);
2361 }
2362}
2363
2364static inline void tlb_set_dirty(CPUState *env,
2365 unsigned long addr, target_ulong vaddr)
2366{
2367}
2368#endif /* defined(CONFIG_USER_ONLY) */
2369
2370/* register physical memory. 'size' must be a multiple of the target
2371 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2372 io memory page */
2373void cpu_register_physical_memory(target_phys_addr_t start_addr,
2374 unsigned long size,
2375 unsigned long phys_offset)
2376{
2377 target_phys_addr_t addr, end_addr;
2378 PhysPageDesc *p;
2379 CPUState *env;
2380
2381 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2382 end_addr = start_addr + size;
2383 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2384 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2385 p->phys_offset = phys_offset;
2386#if !defined(VBOX) || defined(VBOX_WITH_NEW_PHYS_CODE)
2387 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2388 (phys_offset & IO_MEM_ROMD))
2389#else
2390 if ( (phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM
2391 || (phys_offset & IO_MEM_ROMD)
2392 || (phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING)
2393#endif
2394
2395 phys_offset += TARGET_PAGE_SIZE;
2396 }
2397
2398 /* since each CPU stores ram addresses in its TLB cache, we must
2399 reset the modified entries */
2400 /* XXX: slow ! */
2401 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2402 tlb_flush(env, 1);
2403 }
2404}
2405
2406/* XXX: temporary until new memory mapping API */
2407uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2408{
2409 PhysPageDesc *p;
2410
2411 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2412 if (!p)
2413 return IO_MEM_UNASSIGNED;
2414 return p->phys_offset;
2415}
2416
2417static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2418{
2419#ifdef DEBUG_UNASSIGNED
2420 printf("Unassigned mem read 0x%08x\n", (int)addr);
2421#endif
2422 return 0;
2423}
2424
2425static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2426{
2427#ifdef DEBUG_UNASSIGNED
2428 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
2429#endif
2430}
2431
2432static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2433 unassigned_mem_readb,
2434 unassigned_mem_readb,
2435 unassigned_mem_readb,
2436};
2437
2438static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2439 unassigned_mem_writeb,
2440 unassigned_mem_writeb,
2441 unassigned_mem_writeb,
2442};
2443
2444static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2445{
2446 unsigned long ram_addr;
2447 int dirty_flags;
2448#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2449 ram_addr = addr;
2450#elif !defined(VBOX)
2451 ram_addr = addr - (unsigned long)phys_ram_base;
2452#else
2453 ram_addr = remR3HCVirt2GCPhys(first_cpu, (void *)addr);
2454#endif
2455#ifdef VBOX
2456 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2457 dirty_flags = 0xff;
2458 else
2459#endif /* VBOX */
2460 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2461 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2462#if !defined(CONFIG_USER_ONLY)
2463 tb_invalidate_phys_page_fast(ram_addr, 1);
2464# ifdef VBOX
2465 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2466 dirty_flags = 0xff;
2467 else
2468# endif /* VBOX */
2469 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2470#endif
2471 }
2472 stb_p((uint8_t *)(long)addr, val);
2473#ifdef USE_KQEMU
2474 if (cpu_single_env->kqemu_enabled &&
2475 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2476 kqemu_modify_page(cpu_single_env, ram_addr);
2477#endif
2478 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2479#ifdef VBOX
2480 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2481#endif /* !VBOX */
2482 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2483 /* we remove the notdirty callback only if the code has been
2484 flushed */
2485 if (dirty_flags == 0xff)
2486 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2487}
2488
2489static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2490{
2491 unsigned long ram_addr;
2492 int dirty_flags;
2493#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2494 ram_addr = addr;
2495#elif !defined(VBOX)
2496 ram_addr = addr - (unsigned long)phys_ram_base;
2497#else
2498 ram_addr = remR3HCVirt2GCPhys(first_cpu, (void *)addr);
2499#endif
2500#ifdef VBOX
2501 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2502 dirty_flags = 0xff;
2503 else
2504#endif /* VBOX */
2505 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2506 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2507#if !defined(CONFIG_USER_ONLY)
2508 tb_invalidate_phys_page_fast(ram_addr, 2);
2509# ifdef VBOX
2510 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2511 dirty_flags = 0xff;
2512 else
2513# endif /* VBOX */
2514 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2515#endif
2516 }
2517 stw_p((uint8_t *)(long)addr, val);
2518#ifdef USE_KQEMU
2519 if (cpu_single_env->kqemu_enabled &&
2520 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2521 kqemu_modify_page(cpu_single_env, ram_addr);
2522#endif
2523 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2524#ifdef VBOX
2525 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2526#endif
2527 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2528 /* we remove the notdirty callback only if the code has been
2529 flushed */
2530 if (dirty_flags == 0xff)
2531 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2532}
2533
2534static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2535{
2536 unsigned long ram_addr;
2537 int dirty_flags;
2538#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2539 ram_addr = addr;
2540#elif !defined(VBOX)
2541 ram_addr = addr - (unsigned long)phys_ram_base;
2542#else
2543 ram_addr = remR3HCVirt2GCPhys(first_cpu, (void *)addr);
2544#endif
2545#ifdef VBOX
2546 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2547 dirty_flags = 0xff;
2548 else
2549#endif /* VBOX */
2550 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2551 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2552#if !defined(CONFIG_USER_ONLY)
2553 tb_invalidate_phys_page_fast(ram_addr, 4);
2554# ifdef VBOX
2555 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2556 dirty_flags = 0xff;
2557 else
2558# endif /* VBOX */
2559 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2560#endif
2561 }
2562 stl_p((uint8_t *)(long)addr, val);
2563#ifdef USE_KQEMU
2564 if (cpu_single_env->kqemu_enabled &&
2565 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2566 kqemu_modify_page(cpu_single_env, ram_addr);
2567#endif
2568 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2569#ifdef VBOX
2570 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2571#endif
2572 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2573 /* we remove the notdirty callback only if the code has been
2574 flushed */
2575 if (dirty_flags == 0xff)
2576 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2577}
2578
2579static CPUReadMemoryFunc *error_mem_read[3] = {
2580 NULL, /* never used */
2581 NULL, /* never used */
2582 NULL, /* never used */
2583};
2584
2585static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2586 notdirty_mem_writeb,
2587 notdirty_mem_writew,
2588 notdirty_mem_writel,
2589};
2590
2591static void io_mem_init(void)
2592{
2593 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2594 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2595 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2596#if defined(VBOX) && !defined(VBOX_WITH_NEW_PHYS_CODE)
2597 cpu_register_io_memory(IO_MEM_RAM_MISSING >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2598 io_mem_nb = 6;
2599#else
2600 io_mem_nb = 5;
2601#endif
2602
2603#ifndef VBOX /* VBOX: we do this later when the RAM is allocated. */
2604 /* alloc dirty bits array */
2605 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2606 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2607#endif /* !VBOX */
2608}
2609
2610/* mem_read and mem_write are arrays of functions containing the
2611 function to access byte (index 0), word (index 1) and dword (index
2612 2). All functions must be supplied. If io_index is non zero, the
2613 corresponding io zone is modified. If it is zero, a new io zone is
2614 allocated. The return value can be used with
2615 cpu_register_physical_memory(). (-1) is returned if error. */
2616int cpu_register_io_memory(int io_index,
2617 CPUReadMemoryFunc **mem_read,
2618 CPUWriteMemoryFunc **mem_write,
2619 void *opaque)
2620{
2621 int i;
2622
2623 if (io_index <= 0) {
2624 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2625 return -1;
2626 io_index = io_mem_nb++;
2627 } else {
2628 if (io_index >= IO_MEM_NB_ENTRIES)
2629 return -1;
2630 }
2631
2632 for(i = 0;i < 3; i++) {
2633 io_mem_read[io_index][i] = mem_read[i];
2634 io_mem_write[io_index][i] = mem_write[i];
2635 }
2636 io_mem_opaque[io_index] = opaque;
2637 return io_index << IO_MEM_SHIFT;
2638}
2639
2640CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2641{
2642 return io_mem_write[io_index >> IO_MEM_SHIFT];
2643}
2644
2645CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2646{
2647 return io_mem_read[io_index >> IO_MEM_SHIFT];
2648}
2649
2650/* physical memory access (slow version, mainly for debug) */
2651#if defined(CONFIG_USER_ONLY)
2652void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2653 int len, int is_write)
2654{
2655 int l, flags;
2656 target_ulong page;
2657 void * p;
2658
2659 while (len > 0) {
2660 page = addr & TARGET_PAGE_MASK;
2661 l = (page + TARGET_PAGE_SIZE) - addr;
2662 if (l > len)
2663 l = len;
2664 flags = page_get_flags(page);
2665 if (!(flags & PAGE_VALID))
2666 return;
2667 if (is_write) {
2668 if (!(flags & PAGE_WRITE))
2669 return;
2670 p = lock_user(addr, len, 0);
2671 memcpy(p, buf, len);
2672 unlock_user(p, addr, len);
2673 } else {
2674 if (!(flags & PAGE_READ))
2675 return;
2676 p = lock_user(addr, len, 1);
2677 memcpy(buf, p, len);
2678 unlock_user(p, addr, 0);
2679 }
2680 len -= l;
2681 buf += l;
2682 addr += l;
2683 }
2684}
2685
2686#else
2687void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2688 int len, int is_write)
2689{
2690 int l, io_index;
2691 uint8_t *ptr;
2692 uint32_t val;
2693 target_phys_addr_t page;
2694 unsigned long pd;
2695 PhysPageDesc *p;
2696
2697 while (len > 0) {
2698 page = addr & TARGET_PAGE_MASK;
2699 l = (page + TARGET_PAGE_SIZE) - addr;
2700 if (l > len)
2701 l = len;
2702 p = phys_page_find(page >> TARGET_PAGE_BITS);
2703 if (!p) {
2704 pd = IO_MEM_UNASSIGNED;
2705 } else {
2706 pd = p->phys_offset;
2707 }
2708
2709 if (is_write) {
2710 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2711 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2712 /* XXX: could force cpu_single_env to NULL to avoid
2713 potential bugs */
2714 if (l >= 4 && ((addr & 3) == 0)) {
2715 /* 32 bit write access */
2716#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2717 val = ldl_p(buf);
2718#else
2719 val = *(const uint32_t *)buf;
2720#endif
2721 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2722 l = 4;
2723 } else if (l >= 2 && ((addr & 1) == 0)) {
2724 /* 16 bit write access */
2725#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2726 val = lduw_p(buf);
2727#else
2728 val = *(const uint16_t *)buf;
2729#endif
2730 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2731 l = 2;
2732 } else {
2733 /* 8 bit write access */
2734#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2735 val = ldub_p(buf);
2736#else
2737 val = *(const uint8_t *)buf;
2738#endif
2739 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2740 l = 1;
2741 }
2742 } else {
2743 unsigned long addr1;
2744 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2745 /* RAM case */
2746#ifdef VBOX
2747 remR3PhysWrite(addr1, buf, l); NOREF(ptr);
2748#else
2749 ptr = phys_ram_base + addr1;
2750 memcpy(ptr, buf, l);
2751#endif
2752 if (!cpu_physical_memory_is_dirty(addr1)) {
2753 /* invalidate code */
2754 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2755 /* set dirty bit */
2756#ifdef VBOX
2757 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2758#endif
2759 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2760 (0xff & ~CODE_DIRTY_FLAG);
2761 }
2762 }
2763 } else {
2764 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2765 !(pd & IO_MEM_ROMD)) {
2766 /* I/O case */
2767 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2768 if (l >= 4 && ((addr & 3) == 0)) {
2769 /* 32 bit read access */
2770 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2771#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2772 stl_p(buf, val);
2773#else
2774 *(uint32_t *)buf = val;
2775#endif
2776 l = 4;
2777 } else if (l >= 2 && ((addr & 1) == 0)) {
2778 /* 16 bit read access */
2779 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2780#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2781 stw_p(buf, val);
2782#else
2783 *(uint16_t *)buf = val;
2784#endif
2785 l = 2;
2786 } else {
2787 /* 8 bit read access */
2788 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2789#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2790 stb_p(buf, val);
2791#else
2792 *(uint8_t *)buf = val;
2793#endif
2794 l = 1;
2795 }
2796 } else {
2797 /* RAM case */
2798#ifdef VBOX
2799 remR3PhysRead((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), buf, l); NOREF(ptr);
2800#else
2801 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2802 (addr & ~TARGET_PAGE_MASK);
2803 memcpy(buf, ptr, l);
2804#endif
2805 }
2806 }
2807 len -= l;
2808 buf += l;
2809 addr += l;
2810 }
2811}
2812
2813#ifndef VBOX
2814/* used for ROM loading : can write in RAM and ROM */
2815void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2816 const uint8_t *buf, int len)
2817{
2818 int l;
2819 uint8_t *ptr;
2820 target_phys_addr_t page;
2821 unsigned long pd;
2822 PhysPageDesc *p;
2823
2824 while (len > 0) {
2825 page = addr & TARGET_PAGE_MASK;
2826 l = (page + TARGET_PAGE_SIZE) - addr;
2827 if (l > len)
2828 l = len;
2829 p = phys_page_find(page >> TARGET_PAGE_BITS);
2830 if (!p) {
2831 pd = IO_MEM_UNASSIGNED;
2832 } else {
2833 pd = p->phys_offset;
2834 }
2835
2836 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2837 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2838 !(pd & IO_MEM_ROMD)) {
2839 /* do nothing */
2840 } else {
2841 unsigned long addr1;
2842 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2843 /* ROM/RAM case */
2844 ptr = phys_ram_base + addr1;
2845 memcpy(ptr, buf, l);
2846 }
2847 len -= l;
2848 buf += l;
2849 addr += l;
2850 }
2851}
2852#endif /* !VBOX */
2853
2854
2855/* warning: addr must be aligned */
2856uint32_t ldl_phys(target_phys_addr_t addr)
2857{
2858 int io_index;
2859 uint8_t *ptr;
2860 uint32_t val;
2861 unsigned long pd;
2862 PhysPageDesc *p;
2863
2864 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2865 if (!p) {
2866 pd = IO_MEM_UNASSIGNED;
2867 } else {
2868 pd = p->phys_offset;
2869 }
2870
2871 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2872 !(pd & IO_MEM_ROMD)) {
2873 /* I/O case */
2874 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2875 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2876 } else {
2877 /* RAM case */
2878#ifndef VBOX
2879 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2880 (addr & ~TARGET_PAGE_MASK);
2881 val = ldl_p(ptr);
2882#else
2883 val = remR3PhysReadU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
2884#endif
2885 }
2886 return val;
2887}
2888
2889/* warning: addr must be aligned */
2890uint64_t ldq_phys(target_phys_addr_t addr)
2891{
2892 int io_index;
2893 uint8_t *ptr;
2894 uint64_t val;
2895 unsigned long pd;
2896 PhysPageDesc *p;
2897
2898 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2899 if (!p) {
2900 pd = IO_MEM_UNASSIGNED;
2901 } else {
2902 pd = p->phys_offset;
2903 }
2904
2905 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2906 !(pd & IO_MEM_ROMD)) {
2907 /* I/O case */
2908 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2909#ifdef TARGET_WORDS_BIGENDIAN
2910 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2911 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2912#else
2913 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2914 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2915#endif
2916 } else {
2917 /* RAM case */
2918#ifndef VBOX
2919 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2920 (addr & ~TARGET_PAGE_MASK);
2921 val = ldq_p(ptr);
2922#else
2923 val = remR3PhysReadU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
2924#endif
2925 }
2926 return val;
2927}
2928
2929/* XXX: optimize */
2930uint32_t ldub_phys(target_phys_addr_t addr)
2931{
2932 uint8_t val;
2933 cpu_physical_memory_read(addr, &val, 1);
2934 return val;
2935}
2936
2937/* XXX: optimize */
2938uint32_t lduw_phys(target_phys_addr_t addr)
2939{
2940 uint16_t val;
2941 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2942 return tswap16(val);
2943}
2944
2945/* warning: addr must be aligned. The ram page is not masked as dirty
2946 and the code inside is not invalidated. It is useful if the dirty
2947 bits are used to track modified PTEs */
2948void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2949{
2950 int io_index;
2951 uint8_t *ptr;
2952 unsigned long pd;
2953 PhysPageDesc *p;
2954
2955 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2956 if (!p) {
2957 pd = IO_MEM_UNASSIGNED;
2958 } else {
2959 pd = p->phys_offset;
2960 }
2961
2962 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2963 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2964 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2965 } else {
2966#ifndef VBOX
2967 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2968 (addr & ~TARGET_PAGE_MASK);
2969 stl_p(ptr, val);
2970#else
2971 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
2972#endif
2973 }
2974}
2975
2976/* warning: addr must be aligned */
2977void stl_phys(target_phys_addr_t addr, uint32_t val)
2978{
2979 int io_index;
2980 uint8_t *ptr;
2981 unsigned long pd;
2982 PhysPageDesc *p;
2983
2984 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2985 if (!p) {
2986 pd = IO_MEM_UNASSIGNED;
2987 } else {
2988 pd = p->phys_offset;
2989 }
2990
2991 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2992 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2993 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2994 } else {
2995 unsigned long addr1;
2996 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2997 /* RAM case */
2998#ifndef VBOX
2999 ptr = phys_ram_base + addr1;
3000 stl_p(ptr, val);
3001#else
3002 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3003#endif
3004 if (!cpu_physical_memory_is_dirty(addr1)) {
3005 /* invalidate code */
3006 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3007 /* set dirty bit */
3008#ifdef VBOX
3009 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3010#endif
3011 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3012 (0xff & ~CODE_DIRTY_FLAG);
3013 }
3014 }
3015}
3016
3017/* XXX: optimize */
3018void stb_phys(target_phys_addr_t addr, uint32_t val)
3019{
3020 uint8_t v = val;
3021 cpu_physical_memory_write(addr, &v, 1);
3022}
3023
3024/* XXX: optimize */
3025void stw_phys(target_phys_addr_t addr, uint32_t val)
3026{
3027 uint16_t v = tswap16(val);
3028 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3029}
3030
3031/* XXX: optimize */
3032void stq_phys(target_phys_addr_t addr, uint64_t val)
3033{
3034 val = tswap64(val);
3035 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3036}
3037
3038#endif
3039
3040#ifndef VBOX
3041/* virtual memory access for debug */
3042int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3043 uint8_t *buf, int len, int is_write)
3044{
3045 int l;
3046 target_ulong page, phys_addr;
3047
3048 while (len > 0) {
3049 page = addr & TARGET_PAGE_MASK;
3050 phys_addr = cpu_get_phys_page_debug(env, page);
3051 /* if no physical page mapped, return an error */
3052 if (phys_addr == -1)
3053 return -1;
3054 l = (page + TARGET_PAGE_SIZE) - addr;
3055 if (l > len)
3056 l = len;
3057 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3058 buf, l, is_write);
3059 len -= l;
3060 buf += l;
3061 addr += l;
3062 }
3063 return 0;
3064}
3065
3066void dump_exec_info(FILE *f,
3067 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3068{
3069 int i, target_code_size, max_target_code_size;
3070 int direct_jmp_count, direct_jmp2_count, cross_page;
3071 TranslationBlock *tb;
3072
3073 target_code_size = 0;
3074 max_target_code_size = 0;
3075 cross_page = 0;
3076 direct_jmp_count = 0;
3077 direct_jmp2_count = 0;
3078 for(i = 0; i < nb_tbs; i++) {
3079 tb = &tbs[i];
3080 target_code_size += tb->size;
3081 if (tb->size > max_target_code_size)
3082 max_target_code_size = tb->size;
3083 if (tb->page_addr[1] != -1)
3084 cross_page++;
3085 if (tb->tb_next_offset[0] != 0xffff) {
3086 direct_jmp_count++;
3087 if (tb->tb_next_offset[1] != 0xffff) {
3088 direct_jmp2_count++;
3089 }
3090 }
3091 }
3092 /* XXX: avoid using doubles ? */
3093 cpu_fprintf(f, "TB count %d\n", nb_tbs);
3094 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3095 nb_tbs ? target_code_size / nb_tbs : 0,
3096 max_target_code_size);
3097 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3098 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3099 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3100 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3101 cross_page,
3102 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3103 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3104 direct_jmp_count,
3105 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3106 direct_jmp2_count,
3107 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3108 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3109 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3110 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3111}
3112#endif /* !VBOX */
3113
3114#if !defined(CONFIG_USER_ONLY)
3115
3116#define MMUSUFFIX _cmmu
3117#define GETPC() NULL
3118#define env cpu_single_env
3119#define SOFTMMU_CODE_ACCESS
3120
3121#define SHIFT 0
3122#include "softmmu_template.h"
3123
3124#define SHIFT 1
3125#include "softmmu_template.h"
3126
3127#define SHIFT 2
3128#include "softmmu_template.h"
3129
3130#define SHIFT 3
3131#include "softmmu_template.h"
3132
3133#undef env
3134
3135#endif
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette