VirtualBox

source: vbox/trunk/src/recompiler_new/exec.c@ 13337

最後變更 在這個檔案從13337是 13337,由 vboxsync 提交於 16 年 前

more recompiler work

  • 屬性 svn:eol-style 設為 native
檔案大小: 91.0 KB
 
1/*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#include "config.h"
30#ifndef VBOX
31#ifdef _WIN32
32#include <windows.h>
33#else
34#include <sys/types.h>
35#include <sys/mman.h>
36#endif
37#include <stdlib.h>
38#include <stdio.h>
39#include <stdarg.h>
40#include <string.h>
41#include <errno.h>
42#include <unistd.h>
43#include <inttypes.h>
44#else /* VBOX */
45# include <stdlib.h>
46# include <stdio.h>
47# include <inttypes.h>
48# include <iprt/alloc.h>
49# include <iprt/string.h>
50# include <iprt/param.h>
51# include <VBox/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
52#endif /* VBOX */
53
54#include "cpu.h"
55#include "exec-all.h"
56#if defined(CONFIG_USER_ONLY)
57#include <qemu.h>
58#endif
59
60//#define DEBUG_TB_INVALIDATE
61//#define DEBUG_FLUSH
62//#define DEBUG_TLB
63//#define DEBUG_UNASSIGNED
64
65/* make various TB consistency checks */
66//#define DEBUG_TB_CHECK
67//#define DEBUG_TLB_CHECK
68
69#if !defined(CONFIG_USER_ONLY)
70/* TB consistency checks only implemented for usermode emulation. */
71#undef DEBUG_TB_CHECK
72#endif
73
74#define SMC_BITMAP_USE_THRESHOLD 10
75
76#define MMAP_AREA_START 0x00000000
77#define MMAP_AREA_END 0xa8000000
78
79#if defined(TARGET_SPARC64)
80#define TARGET_PHYS_ADDR_SPACE_BITS 41
81#elif defined(TARGET_SPARC)
82#define TARGET_PHYS_ADDR_SPACE_BITS 36
83#elif defined(TARGET_ALPHA)
84#define TARGET_PHYS_ADDR_SPACE_BITS 42
85#define TARGET_VIRT_ADDR_SPACE_BITS 42
86#elif defined(TARGET_PPC64)
87#define TARGET_PHYS_ADDR_SPACE_BITS 42
88#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
89#define TARGET_PHYS_ADDR_SPACE_BITS 42
90#elif defined(TARGET_I386) && !defined(USE_KQEMU)
91#define TARGET_PHYS_ADDR_SPACE_BITS 36
92#else
93/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
94#define TARGET_PHYS_ADDR_SPACE_BITS 32
95#endif
96
97static TranslationBlock *tbs;
98int code_gen_max_blocks;
99TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
100static int nb_tbs;
101/* any access to the tbs or the page table must use this lock */
102spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
103
104#if defined(__arm__) || defined(__sparc_v9__)
105/* The prologue must be reachable with a direct jump. ARM and Sparc64
106 have limited branch ranges (possibly also PPC) so place it in a
107 section close to code segment. */
108#define code_gen_section \
109 __attribute__((__section__(".gen_code"))) \
110 __attribute__((aligned (32)))
111#else
112#define code_gen_section \
113 __attribute__((aligned (32)))
114#endif
115
116uint8_t code_gen_prologue[1024] code_gen_section;
117static uint8_t *code_gen_buffer;
118static unsigned long code_gen_buffer_size;
119/* threshold to flush the translated code buffer */
120static unsigned long code_gen_buffer_max_size;
121uint8_t *code_gen_ptr;
122
123#ifndef VBOX
124#if !defined(CONFIG_USER_ONLY)
125ram_addr_t phys_ram_size;
126int phys_ram_fd;
127uint8_t *phys_ram_base;
128uint8_t *phys_ram_dirty;
129static int in_migration;
130static ram_addr_t phys_ram_alloc_offset = 0;
131#endif
132#else /* VBOX */
133RTGCPHYS phys_ram_size;
134/* we have memory ranges (the high PC-BIOS mapping) which
135 causes some pages to fall outside the dirty map here. */
136uint32_t phys_ram_dirty_size;
137#endif /* VBOX */
138#if !defined(VBOX)
139uint8_t *phys_ram_base;
140#endif
141uint8_t *phys_ram_dirty;
142
143CPUState *first_cpu;
144/* current CPU in the current thread. It is only valid inside
145 cpu_exec() */
146CPUState *cpu_single_env;
147/* 0 = Do not count executed instructions.
148 1 = Precise instruction counting.
149 2 = Adaptive rate instruction counting. */
150int use_icount = 0;
151/* Current instruction counter. While executing translated code this may
152 include some instructions that have not yet been executed. */
153int64_t qemu_icount;
154
155typedef struct PageDesc {
156 /* list of TBs intersecting this ram page */
157 TranslationBlock *first_tb;
158 /* in order to optimize self modifying code, we count the number
159 of lookups we do to a given page to use a bitmap */
160 unsigned int code_write_count;
161 uint8_t *code_bitmap;
162#if defined(CONFIG_USER_ONLY)
163 unsigned long flags;
164#endif
165} PageDesc;
166
167typedef struct PhysPageDesc {
168 /* offset in host memory of the page + io_index in the low 12 bits */
169 ram_addr_t phys_offset;
170} PhysPageDesc;
171
172#define L2_BITS 10
173#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
174/* XXX: this is a temporary hack for alpha target.
175 * In the future, this is to be replaced by a multi-level table
176 * to actually be able to handle the complete 64 bits address space.
177 */
178#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
179#else
180#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
181#endif
182
183#define L1_SIZE (1 << L1_BITS)
184#define L2_SIZE (1 << L2_BITS)
185
186static void io_mem_init(void);
187
188unsigned long qemu_real_host_page_size;
189unsigned long qemu_host_page_bits;
190unsigned long qemu_host_page_size;
191unsigned long qemu_host_page_mask;
192
193/* XXX: for system emulation, it could just be an array */
194static PageDesc *l1_map[L1_SIZE];
195static PhysPageDesc **l1_phys_map;
196
197#if !defined(CONFIG_USER_ONLY)
198static void io_mem_init(void);
199
200/* io memory support */
201CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
202CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
203void *io_mem_opaque[IO_MEM_NB_ENTRIES];
204static int io_mem_nb;
205static int io_mem_watch;
206#endif
207
208#ifndef VBOX
209/* log support */
210static const char *logfilename = "/tmp/qemu.log";
211#endif /* !VBOX */
212FILE *logfile;
213int loglevel;
214#ifndef VBOX
215static int log_append = 0;
216#endif
217
218/* statistics */
219static int tlb_flush_count;
220static int tb_flush_count;
221#ifndef VBOX
222static int tb_phys_invalidate_count;
223#endif /* !VBOX */
224
225#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
226typedef struct subpage_t {
227 target_phys_addr_t base;
228 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
229 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
230 void *opaque[TARGET_PAGE_SIZE][2][4];
231} subpage_t;
232
233
234#ifndef VBOX
235#ifdef _WIN32
236static void map_exec(void *addr, long size)
237{
238 DWORD old_protect;
239 VirtualProtect(addr, size,
240 PAGE_EXECUTE_READWRITE, &old_protect);
241
242}
243#else
244static void map_exec(void *addr, long size)
245{
246 unsigned long start, end, page_size;
247
248 page_size = getpagesize();
249 start = (unsigned long)addr;
250 start &= ~(page_size - 1);
251
252 end = (unsigned long)addr + size;
253 end += page_size - 1;
254 end &= ~(page_size - 1);
255
256 mprotect((void *)start, end - start,
257 PROT_READ | PROT_WRITE | PROT_EXEC);
258}
259#endif
260#else // VBOX
261static void map_exec(void *addr, long size)
262{
263 RTMemProtect(addr, size,
264 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
265}
266#endif
267
268static void page_init(void)
269{
270 /* NOTE: we can always suppose that qemu_host_page_size >=
271 TARGET_PAGE_SIZE */
272#ifdef VBOX
273 RTMemProtect(code_gen_buffer, sizeof(code_gen_buffer),
274 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
275 qemu_real_host_page_size = PAGE_SIZE;
276#else /* !VBOX */
277#ifdef _WIN32
278 {
279 SYSTEM_INFO system_info;
280 DWORD old_protect;
281
282 GetSystemInfo(&system_info);
283 qemu_real_host_page_size = system_info.dwPageSize;
284 }
285#else
286 qemu_real_host_page_size = getpagesize();
287#endif
288#endif /* !VBOX */
289
290 if (qemu_host_page_size == 0)
291 qemu_host_page_size = qemu_real_host_page_size;
292 if (qemu_host_page_size < TARGET_PAGE_SIZE)
293 qemu_host_page_size = TARGET_PAGE_SIZE;
294 qemu_host_page_bits = 0;
295 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
296 qemu_host_page_bits++;
297 qemu_host_page_mask = ~(qemu_host_page_size - 1);
298 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
299 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
300#ifdef VBOX
301 /* We use other means to set reserved bit on our pages */
302#else
303#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
304 {
305 long long startaddr, endaddr;
306 FILE *f;
307 int n;
308
309 mmap_lock();
310 last_brk = (unsigned long)sbrk(0);
311 f = fopen("/proc/self/maps", "r");
312 if (f) {
313 do {
314 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
315 if (n == 2) {
316 startaddr = MIN(startaddr,
317 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
318 endaddr = MIN(endaddr,
319 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
320 page_set_flags(startaddr & TARGET_PAGE_MASK,
321 TARGET_PAGE_ALIGN(endaddr),
322 PAGE_RESERVED);
323 }
324 } while (!feof(f));
325 fclose(f);
326 }
327 mmap_unlock();
328 }
329#endif
330#endif
331}
332
333static inline PageDesc **page_l1_map(target_ulong index)
334{
335#if TARGET_LONG_BITS > 32
336 /* Host memory outside guest VM. For 32-bit targets we have already
337 excluded high addresses. */
338 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
339 return NULL;
340#endif
341 return &l1_map[index >> L2_BITS];
342}
343
344static inline PageDesc *page_find_alloc(target_ulong index)
345{
346 PageDesc **lp, *p;
347 lp = page_l1_map(index);
348 if (!lp)
349 return NULL;
350
351 p = *lp;
352 if (!p) {
353 /* allocate if not found */
354#if defined(CONFIG_USER_ONLY)
355 unsigned long addr;
356 size_t len = sizeof(PageDesc) * L2_SIZE;
357 /* Don't use qemu_malloc because it may recurse. */
358 p = mmap(0, len, PROT_READ | PROT_WRITE,
359 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
360 *lp = p;
361 addr = h2g(p);
362 if (addr == (target_ulong)addr) {
363 page_set_flags(addr & TARGET_PAGE_MASK,
364 TARGET_PAGE_ALIGN(addr + len),
365 PAGE_RESERVED);
366 }
367#else
368 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
369 *lp = p;
370#endif
371 }
372 return p + (index & (L2_SIZE - 1));
373}
374
375static inline PageDesc *page_find(target_ulong index)
376{
377 PageDesc **lp, *p;
378 lp = page_l1_map(index);
379 if (!lp)
380 return NULL;
381
382 p = *lp;
383 if (!p)
384 return 0;
385 return p + (index & (L2_SIZE - 1));
386}
387
388static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
389{
390 void **lp, **p;
391 PhysPageDesc *pd;
392
393 p = (void **)l1_phys_map;
394#if TARGET_PHYS_ADDR_SPACE_BITS > 32
395
396#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
397#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
398#endif
399 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
400 p = *lp;
401 if (!p) {
402 /* allocate if not found */
403 if (!alloc)
404 return NULL;
405 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
406 memset(p, 0, sizeof(void *) * L1_SIZE);
407 *lp = p;
408 }
409#endif
410 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
411 pd = *lp;
412 if (!pd) {
413 int i;
414 /* allocate if not found */
415 if (!alloc)
416 return NULL;
417 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
418 *lp = pd;
419 for (i = 0; i < L2_SIZE; i++)
420 pd[i].phys_offset = IO_MEM_UNASSIGNED;
421 }
422#if defined(VBOX) && !defined(VBOX_WITH_NEW_PHYS_CODE)
423 pd = ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
424 if (RT_UNLIKELY((pd->phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING))
425 remR3GrowDynRange(pd->phys_offset & TARGET_PAGE_MASK);
426 return pd;
427#else
428 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
429#endif
430}
431
432static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
433{
434 return phys_page_find_alloc(index, 0);
435}
436
437#if !defined(CONFIG_USER_ONLY)
438static void tlb_protect_code(ram_addr_t ram_addr);
439static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
440 target_ulong vaddr);
441#define mmap_lock() do { } while(0)
442#define mmap_unlock() do { } while(0)
443#endif
444
445#ifdef VBOX
446/** @todo nike: isn't 32M too much ? */
447#endif
448#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
449
450#if defined(CONFIG_USER_ONLY)
451/* Currently it is not recommanded to allocate big chunks of data in
452 user mode. It will change when a dedicated libc will be used */
453#define USE_STATIC_CODE_GEN_BUFFER
454#endif
455
456/* VBox allocates codegen buffer dynamically */
457#ifndef VBOX
458#ifdef USE_STATIC_CODE_GEN_BUFFER
459static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
460#endif
461#endif
462
463static void code_gen_alloc(unsigned long tb_size)
464{
465#ifdef USE_STATIC_CODE_GEN_BUFFER
466 code_gen_buffer = static_code_gen_buffer;
467 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
468 map_exec(code_gen_buffer, code_gen_buffer_size);
469#else
470 code_gen_buffer_size = tb_size;
471 if (code_gen_buffer_size == 0) {
472#if defined(CONFIG_USER_ONLY)
473 /* in user mode, phys_ram_size is not meaningful */
474 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
475#else
476 /* XXX: needs ajustments */
477 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
478#endif
479 }
480 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
481 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
482 /* The code gen buffer location may have constraints depending on
483 the host cpu and OS */
484#ifdef VBOX
485 code_gen_buffer = RTMemExecAlloc(code_gen_buffer_size);
486
487 if (!code_gen_buffer) {
488 LogRel(("REM: failed allocate codegen buffer %lld\n",
489 code_gen_buffer_size));
490 return;
491 }
492#else //!VBOX
493#if defined(__linux__)
494 {
495 int flags;
496 void *start = NULL;
497
498 flags = MAP_PRIVATE | MAP_ANONYMOUS;
499#if defined(__x86_64__)
500 flags |= MAP_32BIT;
501 /* Cannot map more than that */
502 if (code_gen_buffer_size > (800 * 1024 * 1024))
503 code_gen_buffer_size = (800 * 1024 * 1024);
504#elif defined(__sparc_v9__)
505 // Map the buffer below 2G, so we can use direct calls and branches
506 flags |= MAP_FIXED;
507 start = (void *) 0x60000000UL;
508 if (code_gen_buffer_size > (512 * 1024 * 1024))
509 code_gen_buffer_size = (512 * 1024 * 1024);
510#endif
511 code_gen_buffer = mmap(start, code_gen_buffer_size,
512 PROT_WRITE | PROT_READ | PROT_EXEC,
513 flags, -1, 0);
514 if (code_gen_buffer == MAP_FAILED) {
515 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
516 exit(1);
517 }
518 }
519#elif defined(__FreeBSD__)
520 {
521 int flags;
522 void *addr = NULL;
523 flags = MAP_PRIVATE | MAP_ANONYMOUS;
524#if defined(__x86_64__)
525 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
526 * 0x40000000 is free */
527 flags |= MAP_FIXED;
528 addr = (void *)0x40000000;
529 /* Cannot map more than that */
530 if (code_gen_buffer_size > (800 * 1024 * 1024))
531 code_gen_buffer_size = (800 * 1024 * 1024);
532#endif
533 code_gen_buffer = mmap(addr, code_gen_buffer_size,
534 PROT_WRITE | PROT_READ | PROT_EXEC,
535 flags, -1, 0);
536 if (code_gen_buffer == MAP_FAILED) {
537 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
538 exit(1);
539 }
540 }
541#else
542 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
543 if (!code_gen_buffer) {
544 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
545 exit(1);
546 }
547 map_exec(code_gen_buffer, code_gen_buffer_size);
548#endif
549#endif // VBOX
550#endif /* !USE_STATIC_CODE_GEN_BUFFER */
551 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
552 code_gen_buffer_max_size = code_gen_buffer_size -
553 code_gen_max_block_size();
554 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
555 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
556}
557
558/* Must be called before using the QEMU cpus. 'tb_size' is the size
559 (in bytes) allocated to the translation buffer. Zero means default
560 size. */
561void cpu_exec_init_all(unsigned long tb_size)
562{
563 cpu_gen_init();
564 code_gen_alloc(tb_size);
565 code_gen_ptr = code_gen_buffer;
566 page_init();
567#if !defined(CONFIG_USER_ONLY)
568 io_mem_init();
569#endif
570}
571
572#ifndef VBOX
573#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
574
575#define CPU_COMMON_SAVE_VERSION 1
576
577static void cpu_common_save(QEMUFile *f, void *opaque)
578{
579 CPUState *env = opaque;
580
581 qemu_put_be32s(f, &env->halted);
582 qemu_put_be32s(f, &env->interrupt_request);
583}
584
585static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
586{
587 CPUState *env = opaque;
588
589 if (version_id != CPU_COMMON_SAVE_VERSION)
590 return -EINVAL;
591
592 qemu_get_be32s(f, &env->halted);
593 qemu_get_be32s(f, &env->interrupt_request);
594 tlb_flush(env, 1);
595
596 return 0;
597}
598#endif
599#endif //!VBOX
600
601void cpu_exec_init(CPUState *env)
602{
603 CPUState **penv;
604 int cpu_index;
605
606 env->next_cpu = NULL;
607 penv = &first_cpu;
608 cpu_index = 0;
609 while (*penv != NULL) {
610 penv = (CPUState **)&(*penv)->next_cpu;
611 cpu_index++;
612 }
613 env->cpu_index = cpu_index;
614 env->nb_watchpoints = 0;
615 *penv = env;
616#ifndef VBOX
617#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
618 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
619 cpu_common_save, cpu_common_load, env);
620 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
621 cpu_save, cpu_load, env);
622#endif
623#endif // !VBOX
624}
625
626static inline void invalidate_page_bitmap(PageDesc *p)
627{
628 if (p->code_bitmap) {
629 qemu_free(p->code_bitmap);
630 p->code_bitmap = NULL;
631 }
632 p->code_write_count = 0;
633}
634
635/* set to NULL all the 'first_tb' fields in all PageDescs */
636static void page_flush_tb(void)
637{
638 int i, j;
639 PageDesc *p;
640
641 for(i = 0; i < L1_SIZE; i++) {
642 p = l1_map[i];
643 if (p) {
644 for(j = 0; j < L2_SIZE; j++) {
645 p->first_tb = NULL;
646 invalidate_page_bitmap(p);
647 p++;
648 }
649 }
650 }
651}
652
653/* flush all the translation blocks */
654/* XXX: tb_flush is currently not thread safe */
655void tb_flush(CPUState *env1)
656{
657 CPUState *env;
658#if defined(DEBUG_FLUSH)
659 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
660 (unsigned long)(code_gen_ptr - code_gen_buffer),
661 nb_tbs, nb_tbs > 0 ?
662 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
663#endif
664 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
665 cpu_abort(env1, "Internal error: code buffer overflow\n");
666
667 nb_tbs = 0;
668
669 for(env = first_cpu; env != NULL; env = env->next_cpu) {
670 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
671 }
672
673 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
674 page_flush_tb();
675
676 code_gen_ptr = code_gen_buffer;
677 /* XXX: flush processor icache at this point if cache flush is
678 expensive */
679 tb_flush_count++;
680}
681
682#ifdef DEBUG_TB_CHECK
683static void tb_invalidate_check(target_ulong address)
684{
685 TranslationBlock *tb;
686 int i;
687 address &= TARGET_PAGE_MASK;
688 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
689 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
690 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
691 address >= tb->pc + tb->size)) {
692 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
693 address, (long)tb->pc, tb->size);
694 }
695 }
696 }
697}
698
699/* verify that all the pages have correct rights for code */
700static void tb_page_check(void)
701{
702 TranslationBlock *tb;
703 int i, flags1, flags2;
704
705 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
706 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
707 flags1 = page_get_flags(tb->pc);
708 flags2 = page_get_flags(tb->pc + tb->size - 1);
709 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
710 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
711 (long)tb->pc, tb->size, flags1, flags2);
712 }
713 }
714 }
715}
716
717static void tb_jmp_check(TranslationBlock *tb)
718{
719 TranslationBlock *tb1;
720 unsigned int n1;
721
722 /* suppress any remaining jumps to this TB */
723 tb1 = tb->jmp_first;
724 for(;;) {
725 n1 = (long)tb1 & 3;
726 tb1 = (TranslationBlock *)((long)tb1 & ~3);
727 if (n1 == 2)
728 break;
729 tb1 = tb1->jmp_next[n1];
730 }
731 /* check end of list */
732 if (tb1 != tb) {
733 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
734 }
735}
736#endif // DEBUG_TB_CHECK
737
738/* invalidate one TB */
739static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
740 int next_offset)
741{
742 TranslationBlock *tb1;
743 for(;;) {
744 tb1 = *ptb;
745 if (tb1 == tb) {
746 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
747 break;
748 }
749 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
750 }
751}
752
753static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
754{
755 TranslationBlock *tb1;
756 unsigned int n1;
757
758 for(;;) {
759 tb1 = *ptb;
760 n1 = (long)tb1 & 3;
761 tb1 = (TranslationBlock *)((long)tb1 & ~3);
762 if (tb1 == tb) {
763 *ptb = tb1->page_next[n1];
764 break;
765 }
766 ptb = &tb1->page_next[n1];
767 }
768}
769
770static inline void tb_jmp_remove(TranslationBlock *tb, int n)
771{
772 TranslationBlock *tb1, **ptb;
773 unsigned int n1;
774
775 ptb = &tb->jmp_next[n];
776 tb1 = *ptb;
777 if (tb1) {
778 /* find tb(n) in circular list */
779 for(;;) {
780 tb1 = *ptb;
781 n1 = (long)tb1 & 3;
782 tb1 = (TranslationBlock *)((long)tb1 & ~3);
783 if (n1 == n && tb1 == tb)
784 break;
785 if (n1 == 2) {
786 ptb = &tb1->jmp_first;
787 } else {
788 ptb = &tb1->jmp_next[n1];
789 }
790 }
791 /* now we can suppress tb(n) from the list */
792 *ptb = tb->jmp_next[n];
793
794 tb->jmp_next[n] = NULL;
795 }
796}
797
798/* reset the jump entry 'n' of a TB so that it is not chained to
799 another TB */
800static inline void tb_reset_jump(TranslationBlock *tb, int n)
801{
802 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
803}
804
805void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
806{
807 CPUState *env;
808 PageDesc *p;
809 unsigned int h, n1;
810 target_phys_addr_t phys_pc;
811 TranslationBlock *tb1, *tb2;
812
813 /* remove the TB from the hash list */
814 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
815 h = tb_phys_hash_func(phys_pc);
816 tb_remove(&tb_phys_hash[h], tb,
817 offsetof(TranslationBlock, phys_hash_next));
818
819 /* remove the TB from the page list */
820 if (tb->page_addr[0] != page_addr) {
821 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
822 tb_page_remove(&p->first_tb, tb);
823 invalidate_page_bitmap(p);
824 }
825 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
826 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
827 tb_page_remove(&p->first_tb, tb);
828 invalidate_page_bitmap(p);
829 }
830
831 tb_invalidated_flag = 1;
832
833 /* remove the TB from the hash list */
834 h = tb_jmp_cache_hash_func(tb->pc);
835 for(env = first_cpu; env != NULL; env = env->next_cpu) {
836 if (env->tb_jmp_cache[h] == tb)
837 env->tb_jmp_cache[h] = NULL;
838 }
839
840 /* suppress this TB from the two jump lists */
841 tb_jmp_remove(tb, 0);
842 tb_jmp_remove(tb, 1);
843
844 /* suppress any remaining jumps to this TB */
845 tb1 = tb->jmp_first;
846 for(;;) {
847 n1 = (long)tb1 & 3;
848 if (n1 == 2)
849 break;
850 tb1 = (TranslationBlock *)((long)tb1 & ~3);
851 tb2 = tb1->jmp_next[n1];
852 tb_reset_jump(tb1, n1);
853 tb1->jmp_next[n1] = NULL;
854 tb1 = tb2;
855 }
856 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
857
858#ifndef VBOX
859 tb_phys_invalidate_count++;
860#endif
861}
862
863
864#ifdef VBOX
865void tb_invalidate_virt(CPUState *env, uint32_t eip)
866{
867# if 1
868 tb_flush(env);
869# else
870 uint8_t *cs_base, *pc;
871 unsigned int flags, h, phys_pc;
872 TranslationBlock *tb, **ptb;
873
874 flags = env->hflags;
875 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
876 cs_base = env->segs[R_CS].base;
877 pc = cs_base + eip;
878
879 tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
880 flags);
881
882 if(tb)
883 {
884# ifdef DEBUG
885 printf("invalidating TB (%08X) at %08X\n", tb, eip);
886# endif
887 tb_invalidate(tb);
888 //Note: this will leak TBs, but the whole cache will be flushed
889 // when it happens too often
890 tb->pc = 0;
891 tb->cs_base = 0;
892 tb->flags = 0;
893 }
894# endif
895}
896
897# ifdef VBOX_STRICT
898/**
899 * Gets the page offset.
900 */
901unsigned long get_phys_page_offset(target_ulong addr)
902{
903 PhysPageDesc *p = phys_page_find(addr >> TARGET_PAGE_BITS);
904 return p ? p->phys_offset : 0;
905}
906# endif /* VBOX_STRICT */
907#endif /* VBOX */
908
909static inline void set_bits(uint8_t *tab, int start, int len)
910{
911 int end, mask, end1;
912
913 end = start + len;
914 tab += start >> 3;
915 mask = 0xff << (start & 7);
916 if ((start & ~7) == (end & ~7)) {
917 if (start < end) {
918 mask &= ~(0xff << (end & 7));
919 *tab |= mask;
920 }
921 } else {
922 *tab++ |= mask;
923 start = (start + 8) & ~7;
924 end1 = end & ~7;
925 while (start < end1) {
926 *tab++ = 0xff;
927 start += 8;
928 }
929 if (start < end) {
930 mask = ~(0xff << (end & 7));
931 *tab |= mask;
932 }
933 }
934}
935
936static void build_page_bitmap(PageDesc *p)
937{
938 int n, tb_start, tb_end;
939 TranslationBlock *tb;
940
941 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
942 if (!p->code_bitmap)
943 return;
944 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
945
946 tb = p->first_tb;
947 while (tb != NULL) {
948 n = (long)tb & 3;
949 tb = (TranslationBlock *)((long)tb & ~3);
950 /* NOTE: this is subtle as a TB may span two physical pages */
951 if (n == 0) {
952 /* NOTE: tb_end may be after the end of the page, but
953 it is not a problem */
954 tb_start = tb->pc & ~TARGET_PAGE_MASK;
955 tb_end = tb_start + tb->size;
956 if (tb_end > TARGET_PAGE_SIZE)
957 tb_end = TARGET_PAGE_SIZE;
958 } else {
959 tb_start = 0;
960 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
961 }
962 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
963 tb = tb->page_next[n];
964 }
965}
966
967TranslationBlock *tb_gen_code(CPUState *env,
968 target_ulong pc, target_ulong cs_base,
969 int flags, int cflags)
970{
971 TranslationBlock *tb;
972 uint8_t *tc_ptr;
973 target_ulong phys_pc, phys_page2, virt_page2;
974 int code_gen_size;
975
976 phys_pc = get_phys_addr_code(env, pc);
977 tb = tb_alloc(pc);
978 if (!tb) {
979 /* flush must be done */
980 tb_flush(env);
981 /* cannot fail at this point */
982 tb = tb_alloc(pc);
983 /* Don't forget to invalidate previous TB info. */
984 tb_invalidated_flag = 1;
985 }
986 tc_ptr = code_gen_ptr;
987 tb->tc_ptr = tc_ptr;
988 tb->cs_base = cs_base;
989 tb->flags = flags;
990 tb->cflags = cflags;
991 cpu_gen_code(env, tb, &code_gen_size);
992 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
993
994 /* check next page if needed */
995 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
996 phys_page2 = -1;
997 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
998 phys_page2 = get_phys_addr_code(env, virt_page2);
999 }
1000 tb_link_phys(tb, phys_pc, phys_page2);
1001 return tb;
1002}
1003
1004/* invalidate all TBs which intersect with the target physical page
1005 starting in range [start;end[. NOTE: start and end must refer to
1006 the same physical page. 'is_cpu_write_access' should be true if called
1007 from a real cpu write access: the virtual CPU will exit the current
1008 TB if code is modified inside this TB. */
1009void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
1010 int is_cpu_write_access)
1011{
1012 int n, current_tb_modified, current_tb_not_found, current_flags;
1013 CPUState *env = cpu_single_env;
1014 PageDesc *p;
1015 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
1016 target_ulong tb_start, tb_end;
1017 target_ulong current_pc, current_cs_base;
1018
1019 p = page_find(start >> TARGET_PAGE_BITS);
1020 if (!p)
1021 return;
1022 if (!p->code_bitmap &&
1023 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1024 is_cpu_write_access) {
1025 /* build code bitmap */
1026 build_page_bitmap(p);
1027 }
1028
1029 /* we remove all the TBs in the range [start, end[ */
1030 /* XXX: see if in some cases it could be faster to invalidate all the code */
1031 current_tb_not_found = is_cpu_write_access;
1032 current_tb_modified = 0;
1033 current_tb = NULL; /* avoid warning */
1034 current_pc = 0; /* avoid warning */
1035 current_cs_base = 0; /* avoid warning */
1036 current_flags = 0; /* avoid warning */
1037 tb = p->first_tb;
1038 while (tb != NULL) {
1039 n = (long)tb & 3;
1040 tb = (TranslationBlock *)((long)tb & ~3);
1041 tb_next = tb->page_next[n];
1042 /* NOTE: this is subtle as a TB may span two physical pages */
1043 if (n == 0) {
1044 /* NOTE: tb_end may be after the end of the page, but
1045 it is not a problem */
1046 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1047 tb_end = tb_start + tb->size;
1048 } else {
1049 tb_start = tb->page_addr[1];
1050 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1051 }
1052 if (!(tb_end <= start || tb_start >= end)) {
1053#ifdef TARGET_HAS_PRECISE_SMC
1054 if (current_tb_not_found) {
1055 current_tb_not_found = 0;
1056 current_tb = NULL;
1057 if (env->mem_io_pc) {
1058 /* now we have a real cpu fault */
1059 current_tb = tb_find_pc(env->mem_io_pc);
1060 }
1061 }
1062 if (current_tb == tb &&
1063 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1064 /* If we are modifying the current TB, we must stop
1065 its execution. We could be more precise by checking
1066 that the modification is after the current PC, but it
1067 would require a specialized function to partially
1068 restore the CPU state */
1069
1070 current_tb_modified = 1;
1071 cpu_restore_state(current_tb, env,
1072 env->mem_io_pc, NULL);
1073#if defined(TARGET_I386)
1074 current_flags = env->hflags;
1075 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1076 current_cs_base = (target_ulong)env->segs[R_CS].base;
1077 current_pc = current_cs_base + env->eip;
1078#else
1079#error unsupported CPU
1080#endif
1081 }
1082#endif /* TARGET_HAS_PRECISE_SMC */
1083 /* we need to do that to handle the case where a signal
1084 occurs while doing tb_phys_invalidate() */
1085 saved_tb = NULL;
1086 if (env) {
1087 saved_tb = env->current_tb;
1088 env->current_tb = NULL;
1089 }
1090 tb_phys_invalidate(tb, -1);
1091 if (env) {
1092 env->current_tb = saved_tb;
1093 if (env->interrupt_request && env->current_tb)
1094 cpu_interrupt(env, env->interrupt_request);
1095 }
1096 }
1097 tb = tb_next;
1098 }
1099#if !defined(CONFIG_USER_ONLY)
1100 /* if no code remaining, no need to continue to use slow writes */
1101 if (!p->first_tb) {
1102 invalidate_page_bitmap(p);
1103 if (is_cpu_write_access) {
1104 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1105 }
1106 }
1107#endif
1108#ifdef TARGET_HAS_PRECISE_SMC
1109 if (current_tb_modified) {
1110 /* we generate a block containing just the instruction
1111 modifying the memory. It will ensure that it cannot modify
1112 itself */
1113 env->current_tb = NULL;
1114 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1115 cpu_resume_from_signal(env, NULL);
1116 }
1117#endif
1118}
1119
1120
1121/* len must be <= 8 and start must be a multiple of len */
1122static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1123{
1124 PageDesc *p;
1125 int offset, b;
1126#if 0
1127 if (1) {
1128 if (loglevel) {
1129 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1130 cpu_single_env->mem_io_vaddr, len,
1131 cpu_single_env->eip,
1132 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1133 }
1134 }
1135#endif
1136 p = page_find(start >> TARGET_PAGE_BITS);
1137 if (!p)
1138 return;
1139 if (p->code_bitmap) {
1140 offset = start & ~TARGET_PAGE_MASK;
1141 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1142 if (b & ((1 << len) - 1))
1143 goto do_invalidate;
1144 } else {
1145 do_invalidate:
1146 tb_invalidate_phys_page_range(start, start + len, 1);
1147 }
1148}
1149
1150
1151#if !defined(CONFIG_SOFTMMU)
1152static void tb_invalidate_phys_page(target_phys_addr_t addr,
1153 unsigned long pc, void *puc)
1154{
1155 int n, current_flags, current_tb_modified;
1156 target_ulong current_pc, current_cs_base;
1157 PageDesc *p;
1158 TranslationBlock *tb, *current_tb;
1159#ifdef TARGET_HAS_PRECISE_SMC
1160 CPUState *env = cpu_single_env;
1161#endif
1162
1163 addr &= TARGET_PAGE_MASK;
1164 p = page_find(addr >> TARGET_PAGE_BITS);
1165 if (!p)
1166 return;
1167 tb = p->first_tb;
1168 current_tb_modified = 0;
1169 current_tb = NULL;
1170 current_pc = 0; /* avoid warning */
1171 current_cs_base = 0; /* avoid warning */
1172 current_flags = 0; /* avoid warning */
1173#ifdef TARGET_HAS_PRECISE_SMC
1174 if (tb && pc != 0) {
1175 current_tb = tb_find_pc(pc);
1176 }
1177#endif
1178 while (tb != NULL) {
1179 n = (long)tb & 3;
1180 tb = (TranslationBlock *)((long)tb & ~3);
1181#ifdef TARGET_HAS_PRECISE_SMC
1182 if (current_tb == tb &&
1183 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1184 /* If we are modifying the current TB, we must stop
1185 its execution. We could be more precise by checking
1186 that the modification is after the current PC, but it
1187 would require a specialized function to partially
1188 restore the CPU state */
1189
1190 current_tb_modified = 1;
1191 cpu_restore_state(current_tb, env, pc, puc);
1192#if defined(TARGET_I386)
1193 current_flags = env->hflags;
1194 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1195 current_cs_base = (target_ulong)env->segs[R_CS].base;
1196 current_pc = current_cs_base + env->eip;
1197#else
1198#error unsupported CPU
1199#endif
1200 }
1201#endif /* TARGET_HAS_PRECISE_SMC */
1202 tb_phys_invalidate(tb, addr);
1203 tb = tb->page_next[n];
1204 }
1205 p->first_tb = NULL;
1206#ifdef TARGET_HAS_PRECISE_SMC
1207 if (current_tb_modified) {
1208 /* we generate a block containing just the instruction
1209 modifying the memory. It will ensure that it cannot modify
1210 itself */
1211 env->current_tb = NULL;
1212 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1213 cpu_resume_from_signal(env, puc);
1214 }
1215#endif
1216}
1217#endif
1218
1219/* add the tb in the target page and protect it if necessary */
1220static inline void tb_alloc_page(TranslationBlock *tb,
1221 unsigned int n, target_ulong page_addr)
1222{
1223 PageDesc *p;
1224 TranslationBlock *last_first_tb;
1225
1226 tb->page_addr[n] = page_addr;
1227 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1228 tb->page_next[n] = p->first_tb;
1229 last_first_tb = p->first_tb;
1230 p->first_tb = (TranslationBlock *)((long)tb | n);
1231 invalidate_page_bitmap(p);
1232
1233#if defined(TARGET_HAS_SMC) || 1
1234
1235#if defined(CONFIG_USER_ONLY)
1236 if (p->flags & PAGE_WRITE) {
1237 target_ulong addr;
1238 PageDesc *p2;
1239 int prot;
1240
1241 /* force the host page as non writable (writes will have a
1242 page fault + mprotect overhead) */
1243 page_addr &= qemu_host_page_mask;
1244 prot = 0;
1245 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1246 addr += TARGET_PAGE_SIZE) {
1247
1248 p2 = page_find (addr >> TARGET_PAGE_BITS);
1249 if (!p2)
1250 continue;
1251 prot |= p2->flags;
1252 p2->flags &= ~PAGE_WRITE;
1253 page_get_flags(addr);
1254 }
1255 mprotect(g2h(page_addr), qemu_host_page_size,
1256 (prot & PAGE_BITS) & ~PAGE_WRITE);
1257#ifdef DEBUG_TB_INVALIDATE
1258 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1259 page_addr);
1260#endif
1261 }
1262#else
1263 /* if some code is already present, then the pages are already
1264 protected. So we handle the case where only the first TB is
1265 allocated in a physical page */
1266 if (!last_first_tb) {
1267 tlb_protect_code(page_addr);
1268 }
1269#endif
1270
1271#endif /* TARGET_HAS_SMC */
1272}
1273
1274/* Allocate a new translation block. Flush the translation buffer if
1275 too many translation blocks or too much generated code. */
1276TranslationBlock *tb_alloc(target_ulong pc)
1277{
1278 TranslationBlock *tb;
1279
1280 if (nb_tbs >= code_gen_max_blocks ||
1281 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1282 return NULL;
1283 tb = &tbs[nb_tbs++];
1284 tb->pc = pc;
1285 tb->cflags = 0;
1286 return tb;
1287}
1288
1289void tb_free(TranslationBlock *tb)
1290{
1291 /* In practice this is mostly used for single use temporary TB
1292 Ignore the hard cases and just back up if this TB happens to
1293 be the last one generated. */
1294 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1295 code_gen_ptr = tb->tc_ptr;
1296 nb_tbs--;
1297 }
1298}
1299
1300/* add a new TB and link it to the physical page tables. phys_page2 is
1301 (-1) to indicate that only one page contains the TB. */
1302void tb_link_phys(TranslationBlock *tb,
1303 target_ulong phys_pc, target_ulong phys_page2)
1304{
1305 unsigned int h;
1306 TranslationBlock **ptb;
1307
1308 /* Grab the mmap lock to stop another thread invalidating this TB
1309 before we are done. */
1310 mmap_lock();
1311 /* add in the physical hash table */
1312 h = tb_phys_hash_func(phys_pc);
1313 ptb = &tb_phys_hash[h];
1314 tb->phys_hash_next = *ptb;
1315 *ptb = tb;
1316
1317 /* add in the page list */
1318 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1319 if (phys_page2 != -1)
1320 tb_alloc_page(tb, 1, phys_page2);
1321 else
1322 tb->page_addr[1] = -1;
1323
1324 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1325 tb->jmp_next[0] = NULL;
1326 tb->jmp_next[1] = NULL;
1327
1328 /* init original jump addresses */
1329 if (tb->tb_next_offset[0] != 0xffff)
1330 tb_reset_jump(tb, 0);
1331 if (tb->tb_next_offset[1] != 0xffff)
1332 tb_reset_jump(tb, 1);
1333
1334#ifdef DEBUG_TB_CHECK
1335 tb_page_check();
1336#endif
1337 mmap_unlock();
1338}
1339
1340/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1341 tb[1].tc_ptr. Return NULL if not found */
1342TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1343{
1344 int m_min, m_max, m;
1345 unsigned long v;
1346 TranslationBlock *tb;
1347
1348 if (nb_tbs <= 0)
1349 return NULL;
1350 if (tc_ptr < (unsigned long)code_gen_buffer ||
1351 tc_ptr >= (unsigned long)code_gen_ptr)
1352 return NULL;
1353 /* binary search (cf Knuth) */
1354 m_min = 0;
1355 m_max = nb_tbs - 1;
1356 while (m_min <= m_max) {
1357 m = (m_min + m_max) >> 1;
1358 tb = &tbs[m];
1359 v = (unsigned long)tb->tc_ptr;
1360 if (v == tc_ptr)
1361 return tb;
1362 else if (tc_ptr < v) {
1363 m_max = m - 1;
1364 } else {
1365 m_min = m + 1;
1366 }
1367 }
1368 return &tbs[m_max];
1369}
1370
1371static void tb_reset_jump_recursive(TranslationBlock *tb);
1372
1373static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1374{
1375 TranslationBlock *tb1, *tb_next, **ptb;
1376 unsigned int n1;
1377
1378 tb1 = tb->jmp_next[n];
1379 if (tb1 != NULL) {
1380 /* find head of list */
1381 for(;;) {
1382 n1 = (long)tb1 & 3;
1383 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1384 if (n1 == 2)
1385 break;
1386 tb1 = tb1->jmp_next[n1];
1387 }
1388 /* we are now sure now that tb jumps to tb1 */
1389 tb_next = tb1;
1390
1391 /* remove tb from the jmp_first list */
1392 ptb = &tb_next->jmp_first;
1393 for(;;) {
1394 tb1 = *ptb;
1395 n1 = (long)tb1 & 3;
1396 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1397 if (n1 == n && tb1 == tb)
1398 break;
1399 ptb = &tb1->jmp_next[n1];
1400 }
1401 *ptb = tb->jmp_next[n];
1402 tb->jmp_next[n] = NULL;
1403
1404 /* suppress the jump to next tb in generated code */
1405 tb_reset_jump(tb, n);
1406
1407 /* suppress jumps in the tb on which we could have jumped */
1408 tb_reset_jump_recursive(tb_next);
1409 }
1410}
1411
1412static void tb_reset_jump_recursive(TranslationBlock *tb)
1413{
1414 tb_reset_jump_recursive2(tb, 0);
1415 tb_reset_jump_recursive2(tb, 1);
1416}
1417
1418#if defined(TARGET_HAS_ICE)
1419static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1420{
1421 target_ulong addr, pd;
1422 ram_addr_t ram_addr;
1423 PhysPageDesc *p;
1424
1425 addr = cpu_get_phys_page_debug(env, pc);
1426 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1427 if (!p) {
1428 pd = IO_MEM_UNASSIGNED;
1429 } else {
1430 pd = p->phys_offset;
1431 }
1432 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1433 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1434}
1435#endif
1436
1437/* Add a watchpoint. */
1438int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1439{
1440 int i;
1441
1442 for (i = 0; i < env->nb_watchpoints; i++) {
1443 if (addr == env->watchpoint[i].vaddr)
1444 return 0;
1445 }
1446 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1447 return -1;
1448
1449 i = env->nb_watchpoints++;
1450 env->watchpoint[i].vaddr = addr;
1451 env->watchpoint[i].type = type;
1452 tlb_flush_page(env, addr);
1453 /* FIXME: This flush is needed because of the hack to make memory ops
1454 terminate the TB. It can be removed once the proper IO trap and
1455 re-execute bits are in. */
1456 tb_flush(env);
1457 return i;
1458}
1459
1460/* Remove a watchpoint. */
1461int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1462{
1463 int i;
1464
1465 for (i = 0; i < env->nb_watchpoints; i++) {
1466 if (addr == env->watchpoint[i].vaddr) {
1467 env->nb_watchpoints--;
1468 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1469 tlb_flush_page(env, addr);
1470 return 0;
1471 }
1472 }
1473 return -1;
1474}
1475
1476/* Remove all watchpoints. */
1477void cpu_watchpoint_remove_all(CPUState *env) {
1478 int i;
1479
1480 for (i = 0; i < env->nb_watchpoints; i++) {
1481 tlb_flush_page(env, env->watchpoint[i].vaddr);
1482 }
1483 env->nb_watchpoints = 0;
1484}
1485
1486/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1487 breakpoint is reached */
1488int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1489{
1490#if defined(TARGET_HAS_ICE)
1491 int i;
1492
1493 for(i = 0; i < env->nb_breakpoints; i++) {
1494 if (env->breakpoints[i] == pc)
1495 return 0;
1496 }
1497
1498 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1499 return -1;
1500 env->breakpoints[env->nb_breakpoints++] = pc;
1501
1502 breakpoint_invalidate(env, pc);
1503 return 0;
1504#else
1505 return -1;
1506#endif
1507}
1508
1509/* remove all breakpoints */
1510void cpu_breakpoint_remove_all(CPUState *env) {
1511#if defined(TARGET_HAS_ICE)
1512 int i;
1513 for(i = 0; i < env->nb_breakpoints; i++) {
1514 breakpoint_invalidate(env, env->breakpoints[i]);
1515 }
1516 env->nb_breakpoints = 0;
1517#endif
1518}
1519
1520/* remove a breakpoint */
1521int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1522{
1523#if defined(TARGET_HAS_ICE)
1524 int i;
1525 for(i = 0; i < env->nb_breakpoints; i++) {
1526 if (env->breakpoints[i] == pc)
1527 goto found;
1528 }
1529 return -1;
1530 found:
1531 env->nb_breakpoints--;
1532 if (i < env->nb_breakpoints)
1533 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1534
1535 breakpoint_invalidate(env, pc);
1536 return 0;
1537#else
1538 return -1;
1539#endif
1540}
1541
1542/* enable or disable single step mode. EXCP_DEBUG is returned by the
1543 CPU loop after each instruction */
1544void cpu_single_step(CPUState *env, int enabled)
1545{
1546#if defined(TARGET_HAS_ICE)
1547 if (env->singlestep_enabled != enabled) {
1548 env->singlestep_enabled = enabled;
1549 /* must flush all the translated code to avoid inconsistancies */
1550 /* XXX: only flush what is necessary */
1551 tb_flush(env);
1552 }
1553#endif
1554}
1555
1556#ifndef VBOX
1557/* enable or disable low levels log */
1558void cpu_set_log(int log_flags)
1559{
1560 loglevel = log_flags;
1561 if (loglevel && !logfile) {
1562 logfile = fopen(logfilename, "w");
1563 if (!logfile) {
1564 perror(logfilename);
1565 _exit(1);
1566 }
1567#if !defined(CONFIG_SOFTMMU)
1568 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1569 {
1570 static uint8_t logfile_buf[4096];
1571 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1572 }
1573#else
1574 setvbuf(logfile, NULL, _IOLBF, 0);
1575#endif
1576 }
1577}
1578
1579void cpu_set_log_filename(const char *filename)
1580{
1581 logfilename = strdup(filename);
1582}
1583#endif /* !VBOX */
1584
1585/* mask must never be zero, except for A20 change call */
1586void cpu_interrupt(CPUState *env, int mask)
1587{
1588#if !defined(USE_NPTL)
1589 TranslationBlock *tb;
1590 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1591#endif
1592 int old_mask;
1593
1594 old_mask = env->interrupt_request;
1595#ifdef VBOX
1596 VM_ASSERT_EMT(env->pVM);
1597 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, mask);
1598#else /* !VBOX */
1599 /* FIXME: This is probably not threadsafe. A different thread could
1600 be in the middle of a read-modify-write operation. */
1601 env->interrupt_request |= mask;
1602#endif /* !VBOX */
1603#if defined(USE_NPTL)
1604 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1605 problem and hope the cpu will stop of its own accord. For userspace
1606 emulation this often isn't actually as bad as it sounds. Often
1607 signals are used primarily to interrupt blocking syscalls. */
1608#else
1609 if (use_icount) {
1610 env->icount_decr.u16.high = 0xffff;
1611#ifndef CONFIG_USER_ONLY
1612 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1613 an async event happened and we need to process it. */
1614 if (!can_do_io(env)
1615 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1616 cpu_abort(env, "Raised interrupt while not in I/O function");
1617 }
1618#endif
1619 } else {
1620 tb = env->current_tb;
1621 /* if the cpu is currently executing code, we must unlink it and
1622 all the potentially executing TB */
1623 if (tb && !testandset(&interrupt_lock)) {
1624 env->current_tb = NULL;
1625 tb_reset_jump_recursive(tb);
1626 resetlock(&interrupt_lock);
1627 }
1628 }
1629#endif
1630}
1631
1632void cpu_reset_interrupt(CPUState *env, int mask)
1633{
1634#ifdef VBOX
1635 /*
1636 * Note: the current implementation can be executed by another thread without problems; make sure this remains true
1637 * for future changes!
1638 */
1639 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~mask);
1640#else /* !VBOX */
1641 env->interrupt_request &= ~mask;
1642#endif /* !VBOX */
1643}
1644
1645#ifndef VBOX
1646CPULogItem cpu_log_items[] = {
1647 { CPU_LOG_TB_OUT_ASM, "out_asm",
1648 "show generated host assembly code for each compiled TB" },
1649 { CPU_LOG_TB_IN_ASM, "in_asm",
1650 "show target assembly code for each compiled TB" },
1651 { CPU_LOG_TB_OP, "op",
1652 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1653#ifdef TARGET_I386
1654 { CPU_LOG_TB_OP_OPT, "op_opt",
1655 "show micro ops after optimization for each compiled TB" },
1656#endif
1657 { CPU_LOG_INT, "int",
1658 "show interrupts/exceptions in short format" },
1659 { CPU_LOG_EXEC, "exec",
1660 "show trace before each executed TB (lots of logs)" },
1661 { CPU_LOG_TB_CPU, "cpu",
1662 "show CPU state before bloc translation" },
1663#ifdef TARGET_I386
1664 { CPU_LOG_PCALL, "pcall",
1665 "show protected mode far calls/returns/exceptions" },
1666#endif
1667#ifdef DEBUG_IOPORT
1668 { CPU_LOG_IOPORT, "ioport",
1669 "show all i/o ports accesses" },
1670#endif
1671 { 0, NULL, NULL },
1672};
1673
1674static int cmp1(const char *s1, int n, const char *s2)
1675{
1676 if (strlen(s2) != n)
1677 return 0;
1678 return memcmp(s1, s2, n) == 0;
1679}
1680
1681/* takes a comma separated list of log masks. Return 0 if error. */
1682int cpu_str_to_log_mask(const char *str)
1683{
1684 CPULogItem *item;
1685 int mask;
1686 const char *p, *p1;
1687
1688 p = str;
1689 mask = 0;
1690 for(;;) {
1691 p1 = strchr(p, ',');
1692 if (!p1)
1693 p1 = p + strlen(p);
1694 if(cmp1(p,p1-p,"all")) {
1695 for(item = cpu_log_items; item->mask != 0; item++) {
1696 mask |= item->mask;
1697 }
1698 } else {
1699 for(item = cpu_log_items; item->mask != 0; item++) {
1700 if (cmp1(p, p1 - p, item->name))
1701 goto found;
1702 }
1703 return 0;
1704 }
1705 found:
1706 mask |= item->mask;
1707 if (*p1 != ',')
1708 break;
1709 p = p1 + 1;
1710 }
1711 return mask;
1712}
1713#endif /* !VBOX */
1714
1715#ifndef VBOX /* VBOX: we have our own routine. */
1716void cpu_abort(CPUState *env, const char *fmt, ...)
1717{
1718 va_list ap;
1719
1720 va_start(ap, fmt);
1721 fprintf(stderr, "qemu: fatal: ");
1722 vfprintf(stderr, fmt, ap);
1723 fprintf(stderr, "\n");
1724#ifdef TARGET_I386
1725 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1726#else
1727 cpu_dump_state(env, stderr, fprintf, 0);
1728#endif
1729 va_end(ap);
1730 abort();
1731}
1732#endif /* !VBOX */
1733
1734#ifndef VBOX
1735CPUState *cpu_copy(CPUState *env)
1736{
1737 CPUState *new_env = cpu_init(env->cpu_model_str);
1738 /* preserve chaining and index */
1739 CPUState *next_cpu = new_env->next_cpu;
1740 int cpu_index = new_env->cpu_index;
1741 memcpy(new_env, env, sizeof(CPUState));
1742 new_env->next_cpu = next_cpu;
1743 new_env->cpu_index = cpu_index;
1744 return new_env;
1745}
1746#endif
1747
1748#if !defined(CONFIG_USER_ONLY)
1749
1750static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1751{
1752 unsigned int i;
1753
1754 /* Discard jump cache entries for any tb which might potentially
1755 overlap the flushed page. */
1756 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1757 memset (&env->tb_jmp_cache[i], 0,
1758 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1759
1760 i = tb_jmp_cache_hash_page(addr);
1761 memset (&env->tb_jmp_cache[i], 0,
1762 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1763
1764#ifdef VBOX
1765 /* inform raw mode about TLB page flush */
1766 remR3FlushPage(env, addr);
1767#endif /* VBOX */
1768}
1769
1770/* NOTE: if flush_global is true, also flush global entries (not
1771 implemented yet) */
1772void tlb_flush(CPUState *env, int flush_global)
1773{
1774 int i;
1775
1776#if defined(DEBUG_TLB)
1777 printf("tlb_flush:\n");
1778#endif
1779 /* must reset current TB so that interrupts cannot modify the
1780 links while we are modifying them */
1781 env->current_tb = NULL;
1782
1783 for(i = 0; i < CPU_TLB_SIZE; i++) {
1784 env->tlb_table[0][i].addr_read = -1;
1785 env->tlb_table[0][i].addr_write = -1;
1786 env->tlb_table[0][i].addr_code = -1;
1787 env->tlb_table[1][i].addr_read = -1;
1788 env->tlb_table[1][i].addr_write = -1;
1789 env->tlb_table[1][i].addr_code = -1;
1790#if (NB_MMU_MODES >= 3)
1791 env->tlb_table[2][i].addr_read = -1;
1792 env->tlb_table[2][i].addr_write = -1;
1793 env->tlb_table[2][i].addr_code = -1;
1794#if (NB_MMU_MODES == 4)
1795 env->tlb_table[3][i].addr_read = -1;
1796 env->tlb_table[3][i].addr_write = -1;
1797 env->tlb_table[3][i].addr_code = -1;
1798#endif
1799#endif
1800 }
1801
1802 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1803
1804#ifdef VBOX
1805 /* inform raw mode about TLB flush */
1806 remR3FlushTLB(env, flush_global);
1807#endif
1808#ifdef USE_KQEMU
1809 if (env->kqemu_enabled) {
1810 kqemu_flush(env, flush_global);
1811 }
1812#endif
1813 tlb_flush_count++;
1814}
1815
1816static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1817{
1818 if (addr == (tlb_entry->addr_read &
1819 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1820 addr == (tlb_entry->addr_write &
1821 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1822 addr == (tlb_entry->addr_code &
1823 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1824 tlb_entry->addr_read = -1;
1825 tlb_entry->addr_write = -1;
1826 tlb_entry->addr_code = -1;
1827 }
1828}
1829
1830void tlb_flush_page(CPUState *env, target_ulong addr)
1831{
1832 int i;
1833
1834#if defined(DEBUG_TLB)
1835 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1836#endif
1837 /* must reset current TB so that interrupts cannot modify the
1838 links while we are modifying them */
1839 env->current_tb = NULL;
1840
1841 addr &= TARGET_PAGE_MASK;
1842 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1843 tlb_flush_entry(&env->tlb_table[0][i], addr);
1844 tlb_flush_entry(&env->tlb_table[1][i], addr);
1845#if (NB_MMU_MODES >= 3)
1846 tlb_flush_entry(&env->tlb_table[2][i], addr);
1847#if (NB_MMU_MODES == 4)
1848 tlb_flush_entry(&env->tlb_table[3][i], addr);
1849#endif
1850#endif
1851
1852 tlb_flush_jmp_cache(env, addr);
1853
1854#ifdef USE_KQEMU
1855 if (env->kqemu_enabled) {
1856 kqemu_flush_page(env, addr);
1857 }
1858#endif
1859}
1860
1861/* update the TLBs so that writes to code in the virtual page 'addr'
1862 can be detected */
1863static void tlb_protect_code(ram_addr_t ram_addr)
1864{
1865 cpu_physical_memory_reset_dirty(ram_addr,
1866 ram_addr + TARGET_PAGE_SIZE,
1867 CODE_DIRTY_FLAG);
1868#if defined(VBOX) && defined(REM_MONITOR_CODE_PAGES)
1869 /** @todo Retest this? This function has changed... */
1870 remR3ProtectCode(cpu_single_env, ram_addr);
1871#endif
1872}
1873
1874/* update the TLB so that writes in physical page 'phys_addr' are no longer
1875 tested for self modifying code */
1876static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1877 target_ulong vaddr)
1878{
1879#ifdef VBOX
1880 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
1881#endif
1882 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1883}
1884
1885static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1886 unsigned long start, unsigned long length)
1887{
1888 unsigned long addr;
1889 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1890 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1891 if ((addr - start) < length) {
1892 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1893 }
1894 }
1895}
1896
1897void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1898 int dirty_flags)
1899{
1900 CPUState *env;
1901 unsigned long length, start1;
1902 int i, mask, len;
1903 uint8_t *p;
1904
1905 start &= TARGET_PAGE_MASK;
1906 end = TARGET_PAGE_ALIGN(end);
1907
1908 length = end - start;
1909 if (length == 0)
1910 return;
1911 len = length >> TARGET_PAGE_BITS;
1912#ifdef USE_KQEMU
1913 /* XXX: should not depend on cpu context */
1914 env = first_cpu;
1915 if (env->kqemu_enabled) {
1916 ram_addr_t addr;
1917 addr = start;
1918 for(i = 0; i < len; i++) {
1919 kqemu_set_notdirty(env, addr);
1920 addr += TARGET_PAGE_SIZE;
1921 }
1922 }
1923#endif
1924 mask = ~dirty_flags;
1925 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1926#ifdef VBOX
1927 if (RT_LIKELY((start >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
1928#endif
1929 for(i = 0; i < len; i++)
1930 p[i] &= mask;
1931
1932 /* we modify the TLB cache so that the dirty bit will be set again
1933 when accessing the range */
1934#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
1935 start1 = start;
1936#elif !defined(VBOX)
1937 start1 = start + (unsigned long)phys_ram_base;
1938#else
1939 start1 = (unsigned long)remR3GCPhys2HCVirt(first_cpu, start);
1940#endif
1941 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1942 for(i = 0; i < CPU_TLB_SIZE; i++)
1943 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1944 for(i = 0; i < CPU_TLB_SIZE; i++)
1945 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1946#if (NB_MMU_MODES >= 3)
1947 for(i = 0; i < CPU_TLB_SIZE; i++)
1948 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1949#if (NB_MMU_MODES == 4)
1950 for(i = 0; i < CPU_TLB_SIZE; i++)
1951 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1952#endif
1953#endif
1954 }
1955}
1956
1957#ifndef VBOX
1958int cpu_physical_memory_set_dirty_tracking(int enable)
1959{
1960 in_migration = enable;
1961 return 0;
1962}
1963
1964int cpu_physical_memory_get_dirty_tracking(void)
1965{
1966 return in_migration;
1967}
1968#endif
1969
1970static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1971{
1972 ram_addr_t ram_addr;
1973
1974 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1975 /* RAM case */
1976#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
1977 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1978#elif !defined(VBOX)
1979 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1980 tlb_entry->addend - (unsigned long)phys_ram_base;
1981#else
1982 ram_addr = remR3HCVirt2GCPhys(first_cpu, (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend);
1983#endif
1984 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1985 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1986 }
1987 }
1988}
1989
1990/* update the TLB according to the current state of the dirty bits */
1991void cpu_tlb_update_dirty(CPUState *env)
1992{
1993 int i;
1994 for(i = 0; i < CPU_TLB_SIZE; i++)
1995 tlb_update_dirty(&env->tlb_table[0][i]);
1996 for(i = 0; i < CPU_TLB_SIZE; i++)
1997 tlb_update_dirty(&env->tlb_table[1][i]);
1998#if (NB_MMU_MODES >= 3)
1999 for(i = 0; i < CPU_TLB_SIZE; i++)
2000 tlb_update_dirty(&env->tlb_table[2][i]);
2001#if (NB_MMU_MODES == 4)
2002 for(i = 0; i < CPU_TLB_SIZE; i++)
2003 tlb_update_dirty(&env->tlb_table[3][i]);
2004#endif
2005#endif
2006}
2007
2008static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2009{
2010 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2011 tlb_entry->addr_write = vaddr;
2012}
2013
2014
2015/* update the TLB corresponding to virtual page vaddr and phys addr
2016 addr so that it is no longer dirty */
2017static inline void tlb_set_dirty(CPUState *env,
2018 unsigned long addr, target_ulong vaddr)
2019{
2020 int i;
2021
2022 addr &= TARGET_PAGE_MASK;
2023 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2024 tlb_set_dirty1(&env->tlb_table[0][i], addr);
2025 tlb_set_dirty1(&env->tlb_table[1][i], addr);
2026#if (NB_MMU_MODES >= 3)
2027 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
2028#if (NB_MMU_MODES == 4)
2029 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
2030#endif
2031#endif
2032}
2033
2034/* add a new TLB entry. At most one entry for a given virtual address
2035 is permitted. Return 0 if OK or 2 if the page could not be mapped
2036 (can only happen in non SOFTMMU mode for I/O pages or pages
2037 conflicting with the host address space). */
2038int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2039 target_phys_addr_t paddr, int prot,
2040 int mmu_idx, int is_softmmu)
2041{
2042 PhysPageDesc *p;
2043 unsigned long pd;
2044 unsigned int index;
2045 target_ulong address;
2046 target_ulong code_address;
2047 target_phys_addr_t addend;
2048 int ret;
2049 CPUTLBEntry *te;
2050 int i;
2051 target_phys_addr_t iotlb;
2052
2053 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2054 if (!p) {
2055 pd = IO_MEM_UNASSIGNED;
2056 } else {
2057 pd = p->phys_offset;
2058 }
2059#if defined(DEBUG_TLB)
2060 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2061 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2062#endif
2063
2064 ret = 0;
2065 address = vaddr;
2066 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2067 /* IO memory case (romd handled later) */
2068 address |= TLB_MMIO;
2069 }
2070#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2071 addend = pd & TARGET_PAGE_MASK;
2072#elif !defined(VBOX)
2073 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
2074#else
2075 addend = (unsigned long)remR3GCPhys2HCVirt(env, pd & TARGET_PAGE_MASK);
2076#endif
2077 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2078 /* Normal RAM. */
2079 iotlb = pd & TARGET_PAGE_MASK;
2080 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2081 iotlb |= IO_MEM_NOTDIRTY;
2082 else
2083 iotlb |= IO_MEM_ROM;
2084 } else {
2085 /* IO handlers are currently passed a phsical address.
2086 It would be nice to pass an offset from the base address
2087 of that region. This would avoid having to special case RAM,
2088 and avoid full address decoding in every device.
2089 We can't use the high bits of pd for this because
2090 IO_MEM_ROMD uses these as a ram address. */
2091 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
2092 }
2093
2094 code_address = address;
2095 /* Make accesses to pages with watchpoints go via the
2096 watchpoint trap routines. */
2097 for (i = 0; i < env->nb_watchpoints; i++) {
2098 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
2099 iotlb = io_mem_watch + paddr;
2100 /* TODO: The memory case can be optimized by not trapping
2101 reads of pages with a write breakpoint. */
2102 address |= TLB_MMIO;
2103 }
2104 }
2105
2106 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2107 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2108 te = &env->tlb_table[mmu_idx][index];
2109 te->addend = addend - vaddr;
2110 if (prot & PAGE_READ) {
2111 te->addr_read = address;
2112 } else {
2113 te->addr_read = -1;
2114 }
2115
2116 if (prot & PAGE_EXEC) {
2117 te->addr_code = code_address;
2118 } else {
2119 te->addr_code = -1;
2120 }
2121 if (prot & PAGE_WRITE) {
2122 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2123 (pd & IO_MEM_ROMD)) {
2124 /* Write access calls the I/O callback. */
2125 te->addr_write = address | TLB_MMIO;
2126 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2127 !cpu_physical_memory_is_dirty(pd)) {
2128 te->addr_write = address | TLB_NOTDIRTY;
2129 } else {
2130 te->addr_write = address;
2131 }
2132 } else {
2133 te->addr_write = -1;
2134 }
2135#ifdef VBOX
2136 /* inform raw mode about TLB page change */
2137 remR3FlushPage(env, vaddr);
2138#endif
2139 return ret;
2140}
2141
2142/* called from signal handler: invalidate the code and unprotect the
2143 page. Return TRUE if the fault was succesfully handled. */
2144int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
2145{
2146#if !defined(CONFIG_SOFTMMU)
2147 VirtPageDesc *vp;
2148
2149#if defined(DEBUG_TLB)
2150 printf("page_unprotect: addr=0x%08x\n", addr);
2151#endif
2152 addr &= TARGET_PAGE_MASK;
2153
2154 /* if it is not mapped, no need to worry here */
2155 if (addr >= MMAP_AREA_END)
2156 return 0;
2157 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
2158 if (!vp)
2159 return 0;
2160 /* NOTE: in this case, validate_tag is _not_ tested as it
2161 validates only the code TLB */
2162 if (vp->valid_tag != virt_valid_tag)
2163 return 0;
2164 if (!(vp->prot & PAGE_WRITE))
2165 return 0;
2166#if defined(DEBUG_TLB)
2167 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
2168 addr, vp->phys_addr, vp->prot);
2169#endif
2170 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
2171 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
2172 (unsigned long)addr, vp->prot);
2173 /* set the dirty bit */
2174 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
2175 /* flush the code inside */
2176 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
2177 return 1;
2178#elif defined(VBOX)
2179 addr &= TARGET_PAGE_MASK;
2180
2181 /* if it is not mapped, no need to worry here */
2182 if (addr >= MMAP_AREA_END)
2183 return 0;
2184 return 1;
2185#else
2186 return 0;
2187#endif
2188}
2189
2190#else
2191
2192void tlb_flush(CPUState *env, int flush_global)
2193{
2194}
2195
2196void tlb_flush_page(CPUState *env, target_ulong addr)
2197{
2198}
2199
2200int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2201 target_phys_addr_t paddr, int prot,
2202 int is_user, int is_softmmu)
2203{
2204 return 0;
2205}
2206
2207#ifndef VBOX
2208/* dump memory mappings */
2209void page_dump(FILE *f)
2210{
2211 unsigned long start, end;
2212 int i, j, prot, prot1;
2213 PageDesc *p;
2214
2215 fprintf(f, "%-8s %-8s %-8s %s\n",
2216 "start", "end", "size", "prot");
2217 start = -1;
2218 end = -1;
2219 prot = 0;
2220 for(i = 0; i <= L1_SIZE; i++) {
2221 if (i < L1_SIZE)
2222 p = l1_map[i];
2223 else
2224 p = NULL;
2225 for(j = 0;j < L2_SIZE; j++) {
2226 if (!p)
2227 prot1 = 0;
2228 else
2229 prot1 = p[j].flags;
2230 if (prot1 != prot) {
2231 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2232 if (start != -1) {
2233 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2234 start, end, end - start,
2235 prot & PAGE_READ ? 'r' : '-',
2236 prot & PAGE_WRITE ? 'w' : '-',
2237 prot & PAGE_EXEC ? 'x' : '-');
2238 }
2239 if (prot1 != 0)
2240 start = end;
2241 else
2242 start = -1;
2243 prot = prot1;
2244 }
2245 if (!p)
2246 break;
2247 }
2248 }
2249}
2250#endif /* !VBOX */
2251
2252int page_get_flags(target_ulong address)
2253{
2254 PageDesc *p;
2255
2256 p = page_find(address >> TARGET_PAGE_BITS);
2257 if (!p)
2258 return 0;
2259 return p->flags;
2260}
2261
2262/* modify the flags of a page and invalidate the code if
2263 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2264 depending on PAGE_WRITE */
2265void page_set_flags(target_ulong start, target_ulong end, int flags)
2266{
2267 PageDesc *p;
2268 target_ulong addr;
2269
2270 start = start & TARGET_PAGE_MASK;
2271 end = TARGET_PAGE_ALIGN(end);
2272 if (flags & PAGE_WRITE)
2273 flags |= PAGE_WRITE_ORG;
2274#ifdef VBOX
2275 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
2276#endif
2277 spin_lock(&tb_lock);
2278 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2279 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2280 /* if the write protection is set, then we invalidate the code
2281 inside */
2282 if (!(p->flags & PAGE_WRITE) &&
2283 (flags & PAGE_WRITE) &&
2284 p->first_tb) {
2285 tb_invalidate_phys_page(addr, 0, NULL);
2286 }
2287 p->flags = flags;
2288 }
2289 spin_unlock(&tb_lock);
2290}
2291
2292/* called from signal handler: invalidate the code and unprotect the
2293 page. Return TRUE if the fault was succesfully handled. */
2294int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2295{
2296 unsigned int page_index, prot, pindex;
2297 PageDesc *p, *p1;
2298 target_ulong host_start, host_end, addr;
2299
2300 host_start = address & qemu_host_page_mask;
2301 page_index = host_start >> TARGET_PAGE_BITS;
2302 p1 = page_find(page_index);
2303 if (!p1)
2304 return 0;
2305 host_end = host_start + qemu_host_page_size;
2306 p = p1;
2307 prot = 0;
2308 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2309 prot |= p->flags;
2310 p++;
2311 }
2312 /* if the page was really writable, then we change its
2313 protection back to writable */
2314 if (prot & PAGE_WRITE_ORG) {
2315 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2316 if (!(p1[pindex].flags & PAGE_WRITE)) {
2317 mprotect((void *)g2h(host_start), qemu_host_page_size,
2318 (prot & PAGE_BITS) | PAGE_WRITE);
2319 p1[pindex].flags |= PAGE_WRITE;
2320 /* and since the content will be modified, we must invalidate
2321 the corresponding translated code. */
2322 tb_invalidate_phys_page(address, pc, puc);
2323#ifdef DEBUG_TB_CHECK
2324 tb_invalidate_check(address);
2325#endif
2326 return 1;
2327 }
2328 }
2329 return 0;
2330}
2331
2332/* call this function when system calls directly modify a memory area */
2333/* ??? This should be redundant now we have lock_user. */
2334void page_unprotect_range(target_ulong data, target_ulong data_size)
2335{
2336 target_ulong start, end, addr;
2337
2338 start = data;
2339 end = start + data_size;
2340 start &= TARGET_PAGE_MASK;
2341 end = TARGET_PAGE_ALIGN(end);
2342 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2343 page_unprotect(addr, 0, NULL);
2344 }
2345}
2346
2347static inline void tlb_set_dirty(CPUState *env,
2348 unsigned long addr, target_ulong vaddr)
2349{
2350}
2351#endif /* defined(CONFIG_USER_ONLY) */
2352
2353/* register physical memory. 'size' must be a multiple of the target
2354 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2355 io memory page */
2356void cpu_register_physical_memory(target_phys_addr_t start_addr,
2357 unsigned long size,
2358 unsigned long phys_offset)
2359{
2360 target_phys_addr_t addr, end_addr;
2361 PhysPageDesc *p;
2362 CPUState *env;
2363
2364 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2365 end_addr = start_addr + size;
2366 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2367 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2368 p->phys_offset = phys_offset;
2369#if !defined(VBOX) || defined(VBOX_WITH_NEW_PHYS_CODE)
2370 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2371 (phys_offset & IO_MEM_ROMD))
2372#else
2373 if ( (phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM
2374 || (phys_offset & IO_MEM_ROMD)
2375 || (phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING)
2376#endif
2377
2378 phys_offset += TARGET_PAGE_SIZE;
2379 }
2380
2381 /* since each CPU stores ram addresses in its TLB cache, we must
2382 reset the modified entries */
2383 /* XXX: slow ! */
2384 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2385 tlb_flush(env, 1);
2386 }
2387}
2388
2389/* XXX: temporary until new memory mapping API */
2390uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2391{
2392 PhysPageDesc *p;
2393
2394 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2395 if (!p)
2396 return IO_MEM_UNASSIGNED;
2397 return p->phys_offset;
2398}
2399
2400static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2401{
2402#ifdef DEBUG_UNASSIGNED
2403 printf("Unassigned mem read 0x%08x\n", (int)addr);
2404#endif
2405 return 0;
2406}
2407
2408static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2409{
2410#ifdef DEBUG_UNASSIGNED
2411 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
2412#endif
2413}
2414
2415static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2416 unassigned_mem_readb,
2417 unassigned_mem_readb,
2418 unassigned_mem_readb,
2419};
2420
2421static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2422 unassigned_mem_writeb,
2423 unassigned_mem_writeb,
2424 unassigned_mem_writeb,
2425};
2426
2427static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2428{
2429 unsigned long ram_addr;
2430 int dirty_flags;
2431#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2432 ram_addr = addr;
2433#elif !defined(VBOX)
2434 ram_addr = addr - (unsigned long)phys_ram_base;
2435#else
2436 ram_addr = remR3HCVirt2GCPhys(first_cpu, (void *)addr);
2437#endif
2438#ifdef VBOX
2439 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2440 dirty_flags = 0xff;
2441 else
2442#endif /* VBOX */
2443 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2444 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2445#if !defined(CONFIG_USER_ONLY)
2446 tb_invalidate_phys_page_fast(ram_addr, 1);
2447# ifdef VBOX
2448 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2449 dirty_flags = 0xff;
2450 else
2451# endif /* VBOX */
2452 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2453#endif
2454 }
2455 stb_p((uint8_t *)(long)addr, val);
2456#ifdef USE_KQEMU
2457 if (cpu_single_env->kqemu_enabled &&
2458 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2459 kqemu_modify_page(cpu_single_env, ram_addr);
2460#endif
2461 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2462#ifdef VBOX
2463 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2464#endif /* !VBOX */
2465 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2466 /* we remove the notdirty callback only if the code has been
2467 flushed */
2468 if (dirty_flags == 0xff)
2469 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2470}
2471
2472static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2473{
2474 unsigned long ram_addr;
2475 int dirty_flags;
2476#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2477 ram_addr = addr;
2478#elif !defined(VBOX)
2479 ram_addr = addr - (unsigned long)phys_ram_base;
2480#else
2481 ram_addr = remR3HCVirt2GCPhys(first_cpu, (void *)addr);
2482#endif
2483#ifdef VBOX
2484 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2485 dirty_flags = 0xff;
2486 else
2487#endif /* VBOX */
2488 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2489 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2490#if !defined(CONFIG_USER_ONLY)
2491 tb_invalidate_phys_page_fast(ram_addr, 2);
2492# ifdef VBOX
2493 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2494 dirty_flags = 0xff;
2495 else
2496# endif /* VBOX */
2497 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2498#endif
2499 }
2500 stw_p((uint8_t *)(long)addr, val);
2501#ifdef USE_KQEMU
2502 if (cpu_single_env->kqemu_enabled &&
2503 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2504 kqemu_modify_page(cpu_single_env, ram_addr);
2505#endif
2506 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2507#ifdef VBOX
2508 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2509#endif
2510 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2511 /* we remove the notdirty callback only if the code has been
2512 flushed */
2513 if (dirty_flags == 0xff)
2514 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2515}
2516
2517static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2518{
2519 unsigned long ram_addr;
2520 int dirty_flags;
2521#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2522 ram_addr = addr;
2523#elif !defined(VBOX)
2524 ram_addr = addr - (unsigned long)phys_ram_base;
2525#else
2526 ram_addr = remR3HCVirt2GCPhys(first_cpu, (void *)addr);
2527#endif
2528#ifdef VBOX
2529 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2530 dirty_flags = 0xff;
2531 else
2532#endif /* VBOX */
2533 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2534 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2535#if !defined(CONFIG_USER_ONLY)
2536 tb_invalidate_phys_page_fast(ram_addr, 4);
2537# ifdef VBOX
2538 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2539 dirty_flags = 0xff;
2540 else
2541# endif /* VBOX */
2542 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2543#endif
2544 }
2545 stl_p((uint8_t *)(long)addr, val);
2546#ifdef USE_KQEMU
2547 if (cpu_single_env->kqemu_enabled &&
2548 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2549 kqemu_modify_page(cpu_single_env, ram_addr);
2550#endif
2551 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2552#ifdef VBOX
2553 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2554#endif
2555 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2556 /* we remove the notdirty callback only if the code has been
2557 flushed */
2558 if (dirty_flags == 0xff)
2559 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2560}
2561
2562static CPUReadMemoryFunc *error_mem_read[3] = {
2563 NULL, /* never used */
2564 NULL, /* never used */
2565 NULL, /* never used */
2566};
2567
2568static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2569 notdirty_mem_writeb,
2570 notdirty_mem_writew,
2571 notdirty_mem_writel,
2572};
2573
2574static void io_mem_init(void)
2575{
2576 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2577 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2578 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2579#if defined(VBOX) && !defined(VBOX_WITH_NEW_PHYS_CODE)
2580 cpu_register_io_memory(IO_MEM_RAM_MISSING >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2581 io_mem_nb = 6;
2582#else
2583 io_mem_nb = 5;
2584#endif
2585
2586#ifndef VBOX /* VBOX: we do this later when the RAM is allocated. */
2587 /* alloc dirty bits array */
2588 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2589 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2590#endif /* !VBOX */
2591}
2592
2593/* mem_read and mem_write are arrays of functions containing the
2594 function to access byte (index 0), word (index 1) and dword (index
2595 2). All functions must be supplied. If io_index is non zero, the
2596 corresponding io zone is modified. If it is zero, a new io zone is
2597 allocated. The return value can be used with
2598 cpu_register_physical_memory(). (-1) is returned if error. */
2599int cpu_register_io_memory(int io_index,
2600 CPUReadMemoryFunc **mem_read,
2601 CPUWriteMemoryFunc **mem_write,
2602 void *opaque)
2603{
2604 int i;
2605
2606 if (io_index <= 0) {
2607 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2608 return -1;
2609 io_index = io_mem_nb++;
2610 } else {
2611 if (io_index >= IO_MEM_NB_ENTRIES)
2612 return -1;
2613 }
2614
2615 for(i = 0;i < 3; i++) {
2616 io_mem_read[io_index][i] = mem_read[i];
2617 io_mem_write[io_index][i] = mem_write[i];
2618 }
2619 io_mem_opaque[io_index] = opaque;
2620 return io_index << IO_MEM_SHIFT;
2621}
2622
2623CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2624{
2625 return io_mem_write[io_index >> IO_MEM_SHIFT];
2626}
2627
2628CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2629{
2630 return io_mem_read[io_index >> IO_MEM_SHIFT];
2631}
2632
2633/* physical memory access (slow version, mainly for debug) */
2634#if defined(CONFIG_USER_ONLY)
2635void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2636 int len, int is_write)
2637{
2638 int l, flags;
2639 target_ulong page;
2640 void * p;
2641
2642 while (len > 0) {
2643 page = addr & TARGET_PAGE_MASK;
2644 l = (page + TARGET_PAGE_SIZE) - addr;
2645 if (l > len)
2646 l = len;
2647 flags = page_get_flags(page);
2648 if (!(flags & PAGE_VALID))
2649 return;
2650 if (is_write) {
2651 if (!(flags & PAGE_WRITE))
2652 return;
2653 p = lock_user(addr, len, 0);
2654 memcpy(p, buf, len);
2655 unlock_user(p, addr, len);
2656 } else {
2657 if (!(flags & PAGE_READ))
2658 return;
2659 p = lock_user(addr, len, 1);
2660 memcpy(buf, p, len);
2661 unlock_user(p, addr, 0);
2662 }
2663 len -= l;
2664 buf += l;
2665 addr += l;
2666 }
2667}
2668
2669#else
2670void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2671 int len, int is_write)
2672{
2673 int l, io_index;
2674 uint8_t *ptr;
2675 uint32_t val;
2676 target_phys_addr_t page;
2677 unsigned long pd;
2678 PhysPageDesc *p;
2679
2680 while (len > 0) {
2681 page = addr & TARGET_PAGE_MASK;
2682 l = (page + TARGET_PAGE_SIZE) - addr;
2683 if (l > len)
2684 l = len;
2685 p = phys_page_find(page >> TARGET_PAGE_BITS);
2686 if (!p) {
2687 pd = IO_MEM_UNASSIGNED;
2688 } else {
2689 pd = p->phys_offset;
2690 }
2691
2692 if (is_write) {
2693 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2694 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2695 /* XXX: could force cpu_single_env to NULL to avoid
2696 potential bugs */
2697 if (l >= 4 && ((addr & 3) == 0)) {
2698 /* 32 bit write access */
2699#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2700 val = ldl_p(buf);
2701#else
2702 val = *(const uint32_t *)buf;
2703#endif
2704 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2705 l = 4;
2706 } else if (l >= 2 && ((addr & 1) == 0)) {
2707 /* 16 bit write access */
2708#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2709 val = lduw_p(buf);
2710#else
2711 val = *(const uint16_t *)buf;
2712#endif
2713 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2714 l = 2;
2715 } else {
2716 /* 8 bit write access */
2717#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2718 val = ldub_p(buf);
2719#else
2720 val = *(const uint8_t *)buf;
2721#endif
2722 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2723 l = 1;
2724 }
2725 } else {
2726 unsigned long addr1;
2727 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2728 /* RAM case */
2729#ifdef VBOX
2730 remR3PhysWrite(addr1, buf, l); NOREF(ptr);
2731#else
2732 ptr = phys_ram_base + addr1;
2733 memcpy(ptr, buf, l);
2734#endif
2735 if (!cpu_physical_memory_is_dirty(addr1)) {
2736 /* invalidate code */
2737 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2738 /* set dirty bit */
2739#ifdef VBOX
2740 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2741#endif
2742 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2743 (0xff & ~CODE_DIRTY_FLAG);
2744 }
2745 }
2746 } else {
2747 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2748 !(pd & IO_MEM_ROMD)) {
2749 /* I/O case */
2750 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2751 if (l >= 4 && ((addr & 3) == 0)) {
2752 /* 32 bit read access */
2753 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2754#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2755 stl_p(buf, val);
2756#else
2757 *(uint32_t *)buf = val;
2758#endif
2759 l = 4;
2760 } else if (l >= 2 && ((addr & 1) == 0)) {
2761 /* 16 bit read access */
2762 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2763#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2764 stw_p(buf, val);
2765#else
2766 *(uint16_t *)buf = val;
2767#endif
2768 l = 2;
2769 } else {
2770 /* 8 bit read access */
2771 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2772#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2773 stb_p(buf, val);
2774#else
2775 *(uint8_t *)buf = val;
2776#endif
2777 l = 1;
2778 }
2779 } else {
2780 /* RAM case */
2781#ifdef VBOX
2782 remR3PhysRead((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), buf, l); NOREF(ptr);
2783#else
2784 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2785 (addr & ~TARGET_PAGE_MASK);
2786 memcpy(buf, ptr, l);
2787#endif
2788 }
2789 }
2790 len -= l;
2791 buf += l;
2792 addr += l;
2793 }
2794}
2795
2796#ifndef VBOX
2797/* used for ROM loading : can write in RAM and ROM */
2798void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2799 const uint8_t *buf, int len)
2800{
2801 int l;
2802 uint8_t *ptr;
2803 target_phys_addr_t page;
2804 unsigned long pd;
2805 PhysPageDesc *p;
2806
2807 while (len > 0) {
2808 page = addr & TARGET_PAGE_MASK;
2809 l = (page + TARGET_PAGE_SIZE) - addr;
2810 if (l > len)
2811 l = len;
2812 p = phys_page_find(page >> TARGET_PAGE_BITS);
2813 if (!p) {
2814 pd = IO_MEM_UNASSIGNED;
2815 } else {
2816 pd = p->phys_offset;
2817 }
2818
2819 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2820 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2821 !(pd & IO_MEM_ROMD)) {
2822 /* do nothing */
2823 } else {
2824 unsigned long addr1;
2825 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2826 /* ROM/RAM case */
2827 ptr = phys_ram_base + addr1;
2828 memcpy(ptr, buf, l);
2829 }
2830 len -= l;
2831 buf += l;
2832 addr += l;
2833 }
2834}
2835#endif /* !VBOX */
2836
2837
2838/* warning: addr must be aligned */
2839uint32_t ldl_phys(target_phys_addr_t addr)
2840{
2841 int io_index;
2842 uint8_t *ptr;
2843 uint32_t val;
2844 unsigned long pd;
2845 PhysPageDesc *p;
2846
2847 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2848 if (!p) {
2849 pd = IO_MEM_UNASSIGNED;
2850 } else {
2851 pd = p->phys_offset;
2852 }
2853
2854 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2855 !(pd & IO_MEM_ROMD)) {
2856 /* I/O case */
2857 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2858 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2859 } else {
2860 /* RAM case */
2861#ifndef VBOX
2862 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2863 (addr & ~TARGET_PAGE_MASK);
2864 val = ldl_p(ptr);
2865#else
2866 val = remR3PhysReadU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
2867#endif
2868 }
2869 return val;
2870}
2871
2872/* warning: addr must be aligned */
2873uint64_t ldq_phys(target_phys_addr_t addr)
2874{
2875 int io_index;
2876 uint8_t *ptr;
2877 uint64_t val;
2878 unsigned long pd;
2879 PhysPageDesc *p;
2880
2881 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2882 if (!p) {
2883 pd = IO_MEM_UNASSIGNED;
2884 } else {
2885 pd = p->phys_offset;
2886 }
2887
2888 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2889 !(pd & IO_MEM_ROMD)) {
2890 /* I/O case */
2891 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2892#ifdef TARGET_WORDS_BIGENDIAN
2893 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2894 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2895#else
2896 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2897 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2898#endif
2899 } else {
2900 /* RAM case */
2901#ifndef VBOX
2902 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2903 (addr & ~TARGET_PAGE_MASK);
2904 val = ldq_p(ptr);
2905#else
2906 val = remR3PhysReadU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
2907#endif
2908 }
2909 return val;
2910}
2911
2912/* XXX: optimize */
2913uint32_t ldub_phys(target_phys_addr_t addr)
2914{
2915 uint8_t val;
2916 cpu_physical_memory_read(addr, &val, 1);
2917 return val;
2918}
2919
2920/* XXX: optimize */
2921uint32_t lduw_phys(target_phys_addr_t addr)
2922{
2923 uint16_t val;
2924 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2925 return tswap16(val);
2926}
2927
2928/* warning: addr must be aligned. The ram page is not masked as dirty
2929 and the code inside is not invalidated. It is useful if the dirty
2930 bits are used to track modified PTEs */
2931void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2932{
2933 int io_index;
2934 uint8_t *ptr;
2935 unsigned long pd;
2936 PhysPageDesc *p;
2937
2938 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2939 if (!p) {
2940 pd = IO_MEM_UNASSIGNED;
2941 } else {
2942 pd = p->phys_offset;
2943 }
2944
2945 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2946 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2947 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2948 } else {
2949#ifndef VBOX
2950 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2951 (addr & ~TARGET_PAGE_MASK);
2952 stl_p(ptr, val);
2953#else
2954 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
2955#endif
2956 }
2957}
2958
2959/* warning: addr must be aligned */
2960void stl_phys(target_phys_addr_t addr, uint32_t val)
2961{
2962 int io_index;
2963 uint8_t *ptr;
2964 unsigned long pd;
2965 PhysPageDesc *p;
2966
2967 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2968 if (!p) {
2969 pd = IO_MEM_UNASSIGNED;
2970 } else {
2971 pd = p->phys_offset;
2972 }
2973
2974 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2975 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2976 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2977 } else {
2978 unsigned long addr1;
2979 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2980 /* RAM case */
2981#ifndef VBOX
2982 ptr = phys_ram_base + addr1;
2983 stl_p(ptr, val);
2984#else
2985 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
2986#endif
2987 if (!cpu_physical_memory_is_dirty(addr1)) {
2988 /* invalidate code */
2989 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2990 /* set dirty bit */
2991#ifdef VBOX
2992 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2993#endif
2994 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2995 (0xff & ~CODE_DIRTY_FLAG);
2996 }
2997 }
2998}
2999
3000/* XXX: optimize */
3001void stb_phys(target_phys_addr_t addr, uint32_t val)
3002{
3003 uint8_t v = val;
3004 cpu_physical_memory_write(addr, &v, 1);
3005}
3006
3007/* XXX: optimize */
3008void stw_phys(target_phys_addr_t addr, uint32_t val)
3009{
3010 uint16_t v = tswap16(val);
3011 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3012}
3013
3014/* XXX: optimize */
3015void stq_phys(target_phys_addr_t addr, uint64_t val)
3016{
3017 val = tswap64(val);
3018 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3019}
3020
3021#endif
3022
3023#ifndef VBOX
3024/* virtual memory access for debug */
3025int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3026 uint8_t *buf, int len, int is_write)
3027{
3028 int l;
3029 target_ulong page, phys_addr;
3030
3031 while (len > 0) {
3032 page = addr & TARGET_PAGE_MASK;
3033 phys_addr = cpu_get_phys_page_debug(env, page);
3034 /* if no physical page mapped, return an error */
3035 if (phys_addr == -1)
3036 return -1;
3037 l = (page + TARGET_PAGE_SIZE) - addr;
3038 if (l > len)
3039 l = len;
3040 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3041 buf, l, is_write);
3042 len -= l;
3043 buf += l;
3044 addr += l;
3045 }
3046 return 0;
3047}
3048
3049void dump_exec_info(FILE *f,
3050 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3051{
3052 int i, target_code_size, max_target_code_size;
3053 int direct_jmp_count, direct_jmp2_count, cross_page;
3054 TranslationBlock *tb;
3055
3056 target_code_size = 0;
3057 max_target_code_size = 0;
3058 cross_page = 0;
3059 direct_jmp_count = 0;
3060 direct_jmp2_count = 0;
3061 for(i = 0; i < nb_tbs; i++) {
3062 tb = &tbs[i];
3063 target_code_size += tb->size;
3064 if (tb->size > max_target_code_size)
3065 max_target_code_size = tb->size;
3066 if (tb->page_addr[1] != -1)
3067 cross_page++;
3068 if (tb->tb_next_offset[0] != 0xffff) {
3069 direct_jmp_count++;
3070 if (tb->tb_next_offset[1] != 0xffff) {
3071 direct_jmp2_count++;
3072 }
3073 }
3074 }
3075 /* XXX: avoid using doubles ? */
3076 cpu_fprintf(f, "TB count %d\n", nb_tbs);
3077 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3078 nb_tbs ? target_code_size / nb_tbs : 0,
3079 max_target_code_size);
3080 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3081 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3082 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3083 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3084 cross_page,
3085 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3086 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3087 direct_jmp_count,
3088 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3089 direct_jmp2_count,
3090 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3091 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3092 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3093 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3094}
3095#endif /* !VBOX */
3096
3097#if !defined(CONFIG_USER_ONLY)
3098
3099#define MMUSUFFIX _cmmu
3100#define GETPC() NULL
3101#define env cpu_single_env
3102#define SOFTMMU_CODE_ACCESS
3103
3104#define SHIFT 0
3105#include "softmmu_template.h"
3106
3107#define SHIFT 1
3108#include "softmmu_template.h"
3109
3110#define SHIFT 2
3111#include "softmmu_template.h"
3112
3113#define SHIFT 3
3114#include "softmmu_template.h"
3115
3116#undef env
3117
3118#endif
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette