VirtualBox

source: vbox/trunk/src/recompiler/exec.c@ 37675

最後變更 在這個檔案從37675是 37675,由 vboxsync 提交於 13 年 前

rem: Synced with v0.12.5.

  • 屬性 svn:eol-style 設為 native
檔案大小: 123.3 KB
 
1/*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include "config.h"
30#ifndef VBOX
31#ifdef _WIN32
32#include <windows.h>
33#else
34#include <sys/types.h>
35#include <sys/mman.h>
36#endif
37#include <stdlib.h>
38#include <stdio.h>
39#include <stdarg.h>
40#include <string.h>
41#include <errno.h>
42#include <unistd.h>
43#include <inttypes.h>
44#else /* VBOX */
45# include <stdlib.h>
46# include <stdio.h>
47# include <iprt/alloc.h>
48# include <iprt/string.h>
49# include <iprt/param.h>
50# include <VBox/vmm/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
51#endif /* VBOX */
52
53#include "cpu.h"
54#include "exec-all.h"
55#include "qemu-common.h"
56#include "tcg.h"
57#ifndef VBOX
58#include "hw/hw.h"
59#endif
60#include "osdep.h"
61#include "kvm.h"
62#if defined(CONFIG_USER_ONLY)
63#include <qemu.h>
64#endif
65
66//#define DEBUG_TB_INVALIDATE
67//#define DEBUG_FLUSH
68//#define DEBUG_TLB
69//#define DEBUG_UNASSIGNED
70
71/* make various TB consistency checks */
72//#define DEBUG_TB_CHECK
73//#define DEBUG_TLB_CHECK
74
75//#define DEBUG_IOPORT
76//#define DEBUG_SUBPAGE
77
78#if !defined(CONFIG_USER_ONLY)
79/* TB consistency checks only implemented for usermode emulation. */
80#undef DEBUG_TB_CHECK
81#endif
82
83#define SMC_BITMAP_USE_THRESHOLD 10
84
85#if defined(TARGET_SPARC64)
86#define TARGET_PHYS_ADDR_SPACE_BITS 41
87#elif defined(TARGET_SPARC)
88#define TARGET_PHYS_ADDR_SPACE_BITS 36
89#elif defined(TARGET_ALPHA)
90#define TARGET_PHYS_ADDR_SPACE_BITS 42
91#define TARGET_VIRT_ADDR_SPACE_BITS 42
92#elif defined(TARGET_PPC64)
93#define TARGET_PHYS_ADDR_SPACE_BITS 42
94#elif defined(TARGET_X86_64)
95#define TARGET_PHYS_ADDR_SPACE_BITS 42
96#elif defined(TARGET_I386)
97#define TARGET_PHYS_ADDR_SPACE_BITS 36
98#else
99#define TARGET_PHYS_ADDR_SPACE_BITS 32
100#endif
101
102static TranslationBlock *tbs;
103int code_gen_max_blocks;
104TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
105static int nb_tbs;
106/* any access to the tbs or the page table must use this lock */
107spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
108
109#ifndef VBOX
110#if defined(__arm__) || defined(__sparc_v9__)
111/* The prologue must be reachable with a direct jump. ARM and Sparc64
112 have limited branch ranges (possibly also PPC) so place it in a
113 section close to code segment. */
114#define code_gen_section \
115 __attribute__((__section__(".gen_code"))) \
116 __attribute__((aligned (32)))
117#elif defined(_WIN32)
118/* Maximum alignment for Win32 is 16. */
119#define code_gen_section \
120 __attribute__((aligned (16)))
121#else
122#define code_gen_section \
123 __attribute__((aligned (32)))
124#endif
125
126uint8_t code_gen_prologue[1024] code_gen_section;
127#else /* VBOX */
128extern uint8_t* code_gen_prologue;
129#endif /* VBOX */
130static uint8_t *code_gen_buffer;
131static unsigned long code_gen_buffer_size;
132/* threshold to flush the translated code buffer */
133static unsigned long code_gen_buffer_max_size;
134uint8_t *code_gen_ptr;
135
136#ifndef VBOX
137#if !defined(CONFIG_USER_ONLY)
138int phys_ram_fd;
139uint8_t *phys_ram_dirty;
140static int in_migration;
141
142typedef struct RAMBlock {
143 uint8_t *host;
144 ram_addr_t offset;
145 ram_addr_t length;
146 struct RAMBlock *next;
147} RAMBlock;
148
149static RAMBlock *ram_blocks;
150/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
151 then we can no longer assume contiguous ram offsets, and external uses
152 of this variable will break. */
153ram_addr_t last_ram_offset;
154#endif
155#else /* VBOX */
156/* we have memory ranges (the high PC-BIOS mapping) which
157 causes some pages to fall outside the dirty map here. */
158RTGCPHYS phys_ram_dirty_size;
159uint8_t *phys_ram_dirty;
160#endif /* VBOX */
161
162CPUState *first_cpu;
163/* current CPU in the current thread. It is only valid inside
164 cpu_exec() */
165CPUState *cpu_single_env;
166/* 0 = Do not count executed instructions.
167 1 = Precise instruction counting.
168 2 = Adaptive rate instruction counting. */
169int use_icount = 0;
170/* Current instruction counter. While executing translated code this may
171 include some instructions that have not yet been executed. */
172int64_t qemu_icount;
173
174typedef struct PageDesc {
175 /* list of TBs intersecting this ram page */
176 TranslationBlock *first_tb;
177 /* in order to optimize self modifying code, we count the number
178 of lookups we do to a given page to use a bitmap */
179 unsigned int code_write_count;
180 uint8_t *code_bitmap;
181#if defined(CONFIG_USER_ONLY)
182 unsigned long flags;
183#endif
184} PageDesc;
185
186typedef struct PhysPageDesc {
187 /* offset in host memory of the page + io_index in the low bits */
188 ram_addr_t phys_offset;
189 ram_addr_t region_offset;
190} PhysPageDesc;
191
192#define L2_BITS 10
193#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
194/* XXX: this is a temporary hack for alpha target.
195 * In the future, this is to be replaced by a multi-level table
196 * to actually be able to handle the complete 64 bits address space.
197 */
198#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
199#else
200#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
201#endif
202#ifdef VBOX
203#define L0_BITS (TARGET_PHYS_ADDR_SPACE_BITS - 32)
204#endif
205
206#ifdef VBOX
207#define L0_SIZE (1 << L0_BITS)
208#endif
209#define L1_SIZE (1 << L1_BITS)
210#define L2_SIZE (1 << L2_BITS)
211
212unsigned long qemu_real_host_page_size;
213unsigned long qemu_host_page_bits;
214unsigned long qemu_host_page_size;
215unsigned long qemu_host_page_mask;
216
217/* XXX: for system emulation, it could just be an array */
218#ifndef VBOX
219static PageDesc *l1_map[L1_SIZE];
220static PhysPageDesc **l1_phys_map;
221#else
222static unsigned l0_map_max_used = 0;
223static PageDesc **l0_map[L0_SIZE];
224static void **l0_phys_map[L0_SIZE];
225#endif
226
227#if !defined(CONFIG_USER_ONLY)
228static void io_mem_init(void);
229
230/* io memory support */
231CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
232CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
233void *io_mem_opaque[IO_MEM_NB_ENTRIES];
234static char io_mem_used[IO_MEM_NB_ENTRIES];
235static int io_mem_watch;
236#endif
237
238#ifndef VBOX
239/* log support */
240static const char *logfilename = "/tmp/qemu.log";
241#endif /* !VBOX */
242FILE *logfile;
243int loglevel;
244#ifndef VBOX
245static int log_append = 0;
246#endif
247
248/* statistics */
249#ifndef VBOX
250static int tlb_flush_count;
251static int tb_flush_count;
252static int tb_phys_invalidate_count;
253#else /* VBOX - Resettable U32 stats, see VBoxRecompiler.c. */
254uint32_t tlb_flush_count;
255uint32_t tb_flush_count;
256uint32_t tb_phys_invalidate_count;
257#endif /* VBOX */
258
259#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
260typedef struct subpage_t {
261 target_phys_addr_t base;
262 CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
263 CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
264 void *opaque[TARGET_PAGE_SIZE][2][4];
265 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
266} subpage_t;
267
268#ifndef VBOX
269#ifdef _WIN32
270static void map_exec(void *addr, long size)
271{
272 DWORD old_protect;
273 VirtualProtect(addr, size,
274 PAGE_EXECUTE_READWRITE, &old_protect);
275
276}
277#else
278static void map_exec(void *addr, long size)
279{
280 unsigned long start, end, page_size;
281
282 page_size = getpagesize();
283 start = (unsigned long)addr;
284 start &= ~(page_size - 1);
285
286 end = (unsigned long)addr + size;
287 end += page_size - 1;
288 end &= ~(page_size - 1);
289
290 mprotect((void *)start, end - start,
291 PROT_READ | PROT_WRITE | PROT_EXEC);
292}
293#endif
294#else /* VBOX */
295static void map_exec(void *addr, long size)
296{
297 RTMemProtect(addr, size,
298 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
299}
300#endif /* VBOX */
301
302static void page_init(void)
303{
304 /* NOTE: we can always suppose that qemu_host_page_size >=
305 TARGET_PAGE_SIZE */
306#ifdef VBOX
307 RTMemProtect(code_gen_buffer, sizeof(code_gen_buffer),
308 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
309 qemu_real_host_page_size = PAGE_SIZE;
310#else /* !VBOX */
311#ifdef _WIN32
312 {
313 SYSTEM_INFO system_info;
314
315 GetSystemInfo(&system_info);
316 qemu_real_host_page_size = system_info.dwPageSize;
317 }
318#else
319 qemu_real_host_page_size = getpagesize();
320#endif
321#endif /* !VBOX */
322 if (qemu_host_page_size == 0)
323 qemu_host_page_size = qemu_real_host_page_size;
324 if (qemu_host_page_size < TARGET_PAGE_SIZE)
325 qemu_host_page_size = TARGET_PAGE_SIZE;
326 qemu_host_page_bits = 0;
327#ifndef VBOX
328 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
329#else
330 while ((1 << qemu_host_page_bits) < (int)qemu_host_page_size)
331#endif
332 qemu_host_page_bits++;
333 qemu_host_page_mask = ~(qemu_host_page_size - 1);
334#ifndef VBOX
335 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
336 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
337#endif
338
339#ifdef VBOX
340 /* We use other means to set reserved bit on our pages */
341#else /* !VBOX */
342#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
343 {
344 long long startaddr, endaddr;
345 FILE *f;
346 int n;
347
348 mmap_lock();
349 last_brk = (unsigned long)sbrk(0);
350 f = fopen("/proc/self/maps", "r");
351 if (f) {
352 do {
353 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
354 if (n == 2) {
355 startaddr = MIN(startaddr,
356 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
357 endaddr = MIN(endaddr,
358 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
359 page_set_flags(startaddr & TARGET_PAGE_MASK,
360 TARGET_PAGE_ALIGN(endaddr),
361 PAGE_RESERVED);
362 }
363 } while (!feof(f));
364 fclose(f);
365 }
366 mmap_unlock();
367 }
368#endif
369#endif /* !VBOX */
370}
371
372static inline PageDesc **page_l1_map(target_ulong index)
373{
374#ifndef VBOX
375#if TARGET_LONG_BITS > 32
376 /* Host memory outside guest VM. For 32-bit targets we have already
377 excluded high addresses. */
378 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
379 return NULL;
380#endif
381 return &l1_map[index >> L2_BITS];
382#else /* VBOX */
383 PageDesc **l1_map;
384 AssertMsgReturn(index < (target_ulong)L2_SIZE * L1_SIZE * L0_SIZE,
385 ("index=%RGp >= %RGp; L1_SIZE=%#x L2_SIZE=%#x L0_SIZE=%#x\n",
386 (RTGCPHYS)index, (RTGCPHYS)L2_SIZE * L1_SIZE, L1_SIZE, L2_SIZE, L0_SIZE),
387 NULL);
388 l1_map = l0_map[index >> (L1_BITS + L2_BITS)];
389 if (RT_UNLIKELY(!l1_map))
390 {
391 unsigned i0 = index >> (L1_BITS + L2_BITS);
392 l0_map[i0] = l1_map = qemu_mallocz(sizeof(PageDesc *) * L1_SIZE);
393 if (RT_UNLIKELY(!l1_map))
394 return NULL;
395 if (i0 >= l0_map_max_used)
396 l0_map_max_used = i0 + 1;
397 }
398 return &l1_map[(index >> L2_BITS) & (L1_SIZE - 1)];
399#endif /* VBOX */
400}
401
402static inline PageDesc *page_find_alloc(target_ulong index)
403{
404 PageDesc **lp, *p;
405 lp = page_l1_map(index);
406 if (!lp)
407 return NULL;
408
409 p = *lp;
410 if (!p) {
411 /* allocate if not found */
412#if defined(CONFIG_USER_ONLY)
413 size_t len = sizeof(PageDesc) * L2_SIZE;
414 /* Don't use qemu_malloc because it may recurse. */
415 p = mmap(NULL, len, PROT_READ | PROT_WRITE,
416 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
417 *lp = p;
418 if (h2g_valid(p)) {
419 unsigned long addr = h2g(p);
420 page_set_flags(addr & TARGET_PAGE_MASK,
421 TARGET_PAGE_ALIGN(addr + len),
422 PAGE_RESERVED);
423 }
424#else
425 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
426 *lp = p;
427#endif
428 }
429 return p + (index & (L2_SIZE - 1));
430}
431
432static inline PageDesc *page_find(target_ulong index)
433{
434 PageDesc **lp, *p;
435 lp = page_l1_map(index);
436 if (!lp)
437 return NULL;
438
439 p = *lp;
440 if (!p) {
441 return NULL;
442 }
443 return p + (index & (L2_SIZE - 1));
444}
445
446static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
447{
448 void **lp, **p;
449 PhysPageDesc *pd;
450
451#ifndef VBOX
452 p = (void **)l1_phys_map;
453#if TARGET_PHYS_ADDR_SPACE_BITS > 32
454
455#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
456#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
457#endif
458 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
459 p = *lp;
460 if (!p) {
461 /* allocate if not found */
462 if (!alloc)
463 return NULL;
464 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
465 memset(p, 0, sizeof(void *) * L1_SIZE);
466 *lp = p;
467 }
468#endif
469#else /* VBOX */
470 /* level 0 lookup and lazy allocation of level 1 map. */
471 if (RT_UNLIKELY(index >= (target_phys_addr_t)L2_SIZE * L1_SIZE * L0_SIZE))
472 return NULL;
473 p = l0_phys_map[index >> (L1_BITS + L2_BITS)];
474 if (RT_UNLIKELY(!p)) {
475 if (!alloc)
476 return NULL;
477 p = qemu_vmalloc(sizeof(void **) * L1_SIZE);
478 memset(p, 0, sizeof(void **) * L1_SIZE);
479 l0_phys_map[index >> (L1_BITS + L2_BITS)] = p;
480 }
481
482 /* level 1 lookup and lazy allocation of level 2 map. */
483#endif /* VBOX */
484 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
485 pd = *lp;
486 if (!pd) {
487 int i;
488 /* allocate if not found */
489 if (!alloc)
490 return NULL;
491 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
492 *lp = pd;
493 for (i = 0; i < L2_SIZE; i++) {
494 pd[i].phys_offset = IO_MEM_UNASSIGNED;
495 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
496 }
497 }
498 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
499}
500
501static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
502{
503 return phys_page_find_alloc(index, 0);
504}
505
506#if !defined(CONFIG_USER_ONLY)
507static void tlb_protect_code(ram_addr_t ram_addr);
508static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
509 target_ulong vaddr);
510#define mmap_lock() do { } while(0)
511#define mmap_unlock() do { } while(0)
512#endif
513
514#ifdef VBOX /* We don't need such huge codegen buffer size, as execute
515 most of the code in raw or hwacc mode. */
516#define DEFAULT_CODE_GEN_BUFFER_SIZE (8 * 1024 * 1024)
517#else /* !VBOX */
518#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
519#endif /* !VBOX */
520
521#if defined(CONFIG_USER_ONLY)
522/* Currently it is not recommended to allocate big chunks of data in
523 user mode. It will change when a dedicated libc will be used */
524#define USE_STATIC_CODE_GEN_BUFFER
525#endif
526
527#if defined(VBOX) && defined(USE_STATIC_CODE_GEN_BUFFER)
528# error "VBox allocates codegen buffer dynamically"
529#endif
530
531#ifdef USE_STATIC_CODE_GEN_BUFFER
532static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
533#endif
534
535static void code_gen_alloc(unsigned long tb_size)
536{
537#ifdef USE_STATIC_CODE_GEN_BUFFER
538 code_gen_buffer = static_code_gen_buffer;
539 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
540 map_exec(code_gen_buffer, code_gen_buffer_size);
541#else
542# ifdef VBOX
543 /* We cannot use phys_ram_size here, as it's 0 now,
544 * it only gets initialized once RAM registration callback
545 * (REMR3NotifyPhysRamRegister()) called.
546 */
547 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
548# else /* !VBOX */
549 code_gen_buffer_size = tb_size;
550 if (code_gen_buffer_size == 0) {
551#if defined(CONFIG_USER_ONLY)
552 /* in user mode, phys_ram_size is not meaningful */
553 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
554#else
555 /* XXX: needs adjustments */
556 code_gen_buffer_size = (unsigned long)(ram_size / 4);
557#endif
558 }
559 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
560 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
561# endif /* !VBOX */
562 /* The code gen buffer location may have constraints depending on
563 the host cpu and OS */
564# ifdef VBOX
565 code_gen_buffer = RTMemExecAlloc(code_gen_buffer_size);
566
567 if (!code_gen_buffer) {
568 LogRel(("REM: failed allocate codegen buffer %lld\n",
569 code_gen_buffer_size));
570 return;
571 }
572# else /* !VBOX */
573#if defined(__linux__)
574 {
575 int flags;
576 void *start = NULL;
577
578 flags = MAP_PRIVATE | MAP_ANONYMOUS;
579#if defined(__x86_64__)
580 flags |= MAP_32BIT;
581 /* Cannot map more than that */
582 if (code_gen_buffer_size > (800 * 1024 * 1024))
583 code_gen_buffer_size = (800 * 1024 * 1024);
584#elif defined(__sparc_v9__)
585 // Map the buffer below 2G, so we can use direct calls and branches
586 flags |= MAP_FIXED;
587 start = (void *) 0x60000000UL;
588 if (code_gen_buffer_size > (512 * 1024 * 1024))
589 code_gen_buffer_size = (512 * 1024 * 1024);
590#elif defined(__arm__)
591 /* Map the buffer below 32M, so we can use direct calls and branches */
592 flags |= MAP_FIXED;
593 start = (void *) 0x01000000UL;
594 if (code_gen_buffer_size > 16 * 1024 * 1024)
595 code_gen_buffer_size = 16 * 1024 * 1024;
596#endif
597 code_gen_buffer = mmap(start, code_gen_buffer_size,
598 PROT_WRITE | PROT_READ | PROT_EXEC,
599 flags, -1, 0);
600 if (code_gen_buffer == MAP_FAILED) {
601 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
602 exit(1);
603 }
604 }
605#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
606 {
607 int flags;
608 void *addr = NULL;
609 flags = MAP_PRIVATE | MAP_ANONYMOUS;
610#if defined(__x86_64__)
611 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
612 * 0x40000000 is free */
613 flags |= MAP_FIXED;
614 addr = (void *)0x40000000;
615 /* Cannot map more than that */
616 if (code_gen_buffer_size > (800 * 1024 * 1024))
617 code_gen_buffer_size = (800 * 1024 * 1024);
618#endif
619 code_gen_buffer = mmap(addr, code_gen_buffer_size,
620 PROT_WRITE | PROT_READ | PROT_EXEC,
621 flags, -1, 0);
622 if (code_gen_buffer == MAP_FAILED) {
623 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
624 exit(1);
625 }
626 }
627#else
628 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
629 map_exec(code_gen_buffer, code_gen_buffer_size);
630#endif
631# endif /* !VBOX */
632#endif /* !USE_STATIC_CODE_GEN_BUFFER */
633#ifndef VBOX /** @todo r=bird: why are we different? */
634 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
635#else
636 map_exec(code_gen_prologue, _1K);
637#endif
638 code_gen_buffer_max_size = code_gen_buffer_size -
639 code_gen_max_block_size();
640 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
641 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
642}
643
644/* Must be called before using the QEMU cpus. 'tb_size' is the size
645 (in bytes) allocated to the translation buffer. Zero means default
646 size. */
647void cpu_exec_init_all(unsigned long tb_size)
648{
649 cpu_gen_init();
650 code_gen_alloc(tb_size);
651 code_gen_ptr = code_gen_buffer;
652 page_init();
653#if !defined(CONFIG_USER_ONLY)
654 io_mem_init();
655#endif
656}
657
658#ifndef VBOX
659#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
660
661static void cpu_common_pre_save(void *opaque)
662{
663 CPUState *env = opaque;
664
665 cpu_synchronize_state(env);
666}
667
668static int cpu_common_pre_load(void *opaque)
669{
670 CPUState *env = opaque;
671
672 cpu_synchronize_state(env);
673 return 0;
674}
675
676static int cpu_common_post_load(void *opaque, int version_id)
677{
678 CPUState *env = opaque;
679
680 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
681 version_id is increased. */
682 env->interrupt_request &= ~0x01;
683 tlb_flush(env, 1);
684
685 return 0;
686}
687
688static const VMStateDescription vmstate_cpu_common = {
689 .name = "cpu_common",
690 .version_id = 1,
691 .minimum_version_id = 1,
692 .minimum_version_id_old = 1,
693 .pre_save = cpu_common_pre_save,
694 .pre_load = cpu_common_pre_load,
695 .post_load = cpu_common_post_load,
696 .fields = (VMStateField []) {
697 VMSTATE_UINT32(halted, CPUState),
698 VMSTATE_UINT32(interrupt_request, CPUState),
699 VMSTATE_END_OF_LIST()
700 }
701};
702#endif
703
704CPUState *qemu_get_cpu(int cpu)
705{
706 CPUState *env = first_cpu;
707
708 while (env) {
709 if (env->cpu_index == cpu)
710 break;
711 env = env->next_cpu;
712 }
713
714 return env;
715}
716
717#endif /* !VBOX */
718
719void cpu_exec_init(CPUState *env)
720{
721 CPUState **penv;
722 int cpu_index;
723
724#if defined(CONFIG_USER_ONLY)
725 cpu_list_lock();
726#endif
727 env->next_cpu = NULL;
728 penv = &first_cpu;
729 cpu_index = 0;
730 while (*penv != NULL) {
731 penv = &(*penv)->next_cpu;
732 cpu_index++;
733 }
734 env->cpu_index = cpu_index;
735 env->numa_node = 0;
736 QTAILQ_INIT(&env->breakpoints);
737 QTAILQ_INIT(&env->watchpoints);
738 *penv = env;
739#ifndef VBOX
740#if defined(CONFIG_USER_ONLY)
741 cpu_list_unlock();
742#endif
743#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
744 vmstate_register(cpu_index, &vmstate_cpu_common, env);
745 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
746 cpu_save, cpu_load, env);
747#endif
748#endif /* !VBOX */
749}
750
751static inline void invalidate_page_bitmap(PageDesc *p)
752{
753 if (p->code_bitmap) {
754 qemu_free(p->code_bitmap);
755 p->code_bitmap = NULL;
756 }
757 p->code_write_count = 0;
758}
759
760/* set to NULL all the 'first_tb' fields in all PageDescs */
761static void page_flush_tb(void)
762{
763 int i, j;
764 PageDesc *p;
765#ifdef VBOX
766 int k;
767#endif
768
769#ifdef VBOX
770 k = l0_map_max_used;
771 while (k-- > 0) {
772 PageDesc **l1_map = l0_map[k];
773 if (l1_map) {
774#endif
775 for(i = 0; i < L1_SIZE; i++) {
776 p = l1_map[i];
777 if (p) {
778 for(j = 0; j < L2_SIZE; j++) {
779 p->first_tb = NULL;
780 invalidate_page_bitmap(p);
781 p++;
782 }
783 }
784 }
785#ifdef VBOX
786 }
787 }
788#endif
789}
790
791/* flush all the translation blocks */
792/* XXX: tb_flush is currently not thread safe */
793void tb_flush(CPUState *env1)
794{
795 CPUState *env;
796#ifdef VBOX
797 STAM_PROFILE_START(&env1->StatTbFlush, a);
798#endif
799#if defined(DEBUG_FLUSH)
800 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
801 (unsigned long)(code_gen_ptr - code_gen_buffer),
802 nb_tbs, nb_tbs > 0 ?
803 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
804#endif
805 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
806 cpu_abort(env1, "Internal error: code buffer overflow\n");
807
808 nb_tbs = 0;
809
810 for(env = first_cpu; env != NULL; env = env->next_cpu) {
811 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
812 }
813
814 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
815 page_flush_tb();
816
817 code_gen_ptr = code_gen_buffer;
818 /* XXX: flush processor icache at this point if cache flush is
819 expensive */
820 tb_flush_count++;
821#ifdef VBOX
822 STAM_PROFILE_STOP(&env1->StatTbFlush, a);
823#endif
824}
825
826#ifdef DEBUG_TB_CHECK
827
828static void tb_invalidate_check(target_ulong address)
829{
830 TranslationBlock *tb;
831 int i;
832 address &= TARGET_PAGE_MASK;
833 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
834 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
835 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
836 address >= tb->pc + tb->size)) {
837 printf("ERROR invalidate: address=" TARGET_FMT_lx
838 " PC=%08lx size=%04x\n",
839 address, (long)tb->pc, tb->size);
840 }
841 }
842 }
843}
844
845/* verify that all the pages have correct rights for code */
846static void tb_page_check(void)
847{
848 TranslationBlock *tb;
849 int i, flags1, flags2;
850
851 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
852 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
853 flags1 = page_get_flags(tb->pc);
854 flags2 = page_get_flags(tb->pc + tb->size - 1);
855 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
856 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
857 (long)tb->pc, tb->size, flags1, flags2);
858 }
859 }
860 }
861}
862
863#endif
864
865/* invalidate one TB */
866static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
867 int next_offset)
868{
869 TranslationBlock *tb1;
870 for(;;) {
871 tb1 = *ptb;
872 if (tb1 == tb) {
873 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
874 break;
875 }
876 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
877 }
878}
879
880static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
881{
882 TranslationBlock *tb1;
883 unsigned int n1;
884
885 for(;;) {
886 tb1 = *ptb;
887 n1 = (long)tb1 & 3;
888 tb1 = (TranslationBlock *)((long)tb1 & ~3);
889 if (tb1 == tb) {
890 *ptb = tb1->page_next[n1];
891 break;
892 }
893 ptb = &tb1->page_next[n1];
894 }
895}
896
897static inline void tb_jmp_remove(TranslationBlock *tb, int n)
898{
899 TranslationBlock *tb1, **ptb;
900 unsigned int n1;
901
902 ptb = &tb->jmp_next[n];
903 tb1 = *ptb;
904 if (tb1) {
905 /* find tb(n) in circular list */
906 for(;;) {
907 tb1 = *ptb;
908 n1 = (long)tb1 & 3;
909 tb1 = (TranslationBlock *)((long)tb1 & ~3);
910 if (n1 == n && tb1 == tb)
911 break;
912 if (n1 == 2) {
913 ptb = &tb1->jmp_first;
914 } else {
915 ptb = &tb1->jmp_next[n1];
916 }
917 }
918 /* now we can suppress tb(n) from the list */
919 *ptb = tb->jmp_next[n];
920
921 tb->jmp_next[n] = NULL;
922 }
923}
924
925/* reset the jump entry 'n' of a TB so that it is not chained to
926 another TB */
927static inline void tb_reset_jump(TranslationBlock *tb, int n)
928{
929 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
930}
931
932void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
933{
934 CPUState *env;
935 PageDesc *p;
936 unsigned int h, n1;
937 target_phys_addr_t phys_pc;
938 TranslationBlock *tb1, *tb2;
939
940 /* remove the TB from the hash list */
941 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
942 h = tb_phys_hash_func(phys_pc);
943 tb_remove(&tb_phys_hash[h], tb,
944 offsetof(TranslationBlock, phys_hash_next));
945
946 /* remove the TB from the page list */
947 if (tb->page_addr[0] != page_addr) {
948 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
949 tb_page_remove(&p->first_tb, tb);
950 invalidate_page_bitmap(p);
951 }
952 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
953 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
954 tb_page_remove(&p->first_tb, tb);
955 invalidate_page_bitmap(p);
956 }
957
958 tb_invalidated_flag = 1;
959
960 /* remove the TB from the hash list */
961 h = tb_jmp_cache_hash_func(tb->pc);
962 for(env = first_cpu; env != NULL; env = env->next_cpu) {
963 if (env->tb_jmp_cache[h] == tb)
964 env->tb_jmp_cache[h] = NULL;
965 }
966
967 /* suppress this TB from the two jump lists */
968 tb_jmp_remove(tb, 0);
969 tb_jmp_remove(tb, 1);
970
971 /* suppress any remaining jumps to this TB */
972 tb1 = tb->jmp_first;
973 for(;;) {
974 n1 = (long)tb1 & 3;
975 if (n1 == 2)
976 break;
977 tb1 = (TranslationBlock *)((long)tb1 & ~3);
978 tb2 = tb1->jmp_next[n1];
979 tb_reset_jump(tb1, n1);
980 tb1->jmp_next[n1] = NULL;
981 tb1 = tb2;
982 }
983 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
984
985 tb_phys_invalidate_count++;
986}
987
988#ifdef VBOX
989
990void tb_invalidate_virt(CPUState *env, uint32_t eip)
991{
992# if 1
993 tb_flush(env);
994# else
995 uint8_t *cs_base, *pc;
996 unsigned int flags, h, phys_pc;
997 TranslationBlock *tb, **ptb;
998
999 flags = env->hflags;
1000 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1001 cs_base = env->segs[R_CS].base;
1002 pc = cs_base + eip;
1003
1004 tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
1005 flags);
1006
1007 if(tb)
1008 {
1009# ifdef DEBUG
1010 printf("invalidating TB (%08X) at %08X\n", tb, eip);
1011# endif
1012 tb_invalidate(tb);
1013 //Note: this will leak TBs, but the whole cache will be flushed
1014 // when it happens too often
1015 tb->pc = 0;
1016 tb->cs_base = 0;
1017 tb->flags = 0;
1018 }
1019# endif
1020}
1021
1022# ifdef VBOX_STRICT
1023/**
1024 * Gets the page offset.
1025 */
1026unsigned long get_phys_page_offset(target_ulong addr)
1027{
1028 PhysPageDesc *p = phys_page_find(addr >> TARGET_PAGE_BITS);
1029 return p ? p->phys_offset : 0;
1030}
1031# endif /* VBOX_STRICT */
1032
1033#endif /* VBOX */
1034
1035static inline void set_bits(uint8_t *tab, int start, int len)
1036{
1037 int end, mask, end1;
1038
1039 end = start + len;
1040 tab += start >> 3;
1041 mask = 0xff << (start & 7);
1042 if ((start & ~7) == (end & ~7)) {
1043 if (start < end) {
1044 mask &= ~(0xff << (end & 7));
1045 *tab |= mask;
1046 }
1047 } else {
1048 *tab++ |= mask;
1049 start = (start + 8) & ~7;
1050 end1 = end & ~7;
1051 while (start < end1) {
1052 *tab++ = 0xff;
1053 start += 8;
1054 }
1055 if (start < end) {
1056 mask = ~(0xff << (end & 7));
1057 *tab |= mask;
1058 }
1059 }
1060}
1061
1062static void build_page_bitmap(PageDesc *p)
1063{
1064 int n, tb_start, tb_end;
1065 TranslationBlock *tb;
1066
1067 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
1068
1069 tb = p->first_tb;
1070 while (tb != NULL) {
1071 n = (long)tb & 3;
1072 tb = (TranslationBlock *)((long)tb & ~3);
1073 /* NOTE: this is subtle as a TB may span two physical pages */
1074 if (n == 0) {
1075 /* NOTE: tb_end may be after the end of the page, but
1076 it is not a problem */
1077 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1078 tb_end = tb_start + tb->size;
1079 if (tb_end > TARGET_PAGE_SIZE)
1080 tb_end = TARGET_PAGE_SIZE;
1081 } else {
1082 tb_start = 0;
1083 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1084 }
1085 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1086 tb = tb->page_next[n];
1087 }
1088}
1089
1090TranslationBlock *tb_gen_code(CPUState *env,
1091 target_ulong pc, target_ulong cs_base,
1092 int flags, int cflags)
1093{
1094 TranslationBlock *tb;
1095 uint8_t *tc_ptr;
1096 target_ulong phys_pc, phys_page2, virt_page2;
1097 int code_gen_size;
1098
1099 phys_pc = get_phys_addr_code(env, pc);
1100 tb = tb_alloc(pc);
1101 if (!tb) {
1102 /* flush must be done */
1103 tb_flush(env);
1104 /* cannot fail at this point */
1105 tb = tb_alloc(pc);
1106 /* Don't forget to invalidate previous TB info. */
1107 tb_invalidated_flag = 1;
1108 }
1109 tc_ptr = code_gen_ptr;
1110 tb->tc_ptr = tc_ptr;
1111 tb->cs_base = cs_base;
1112 tb->flags = flags;
1113 tb->cflags = cflags;
1114 cpu_gen_code(env, tb, &code_gen_size);
1115 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1116
1117 /* check next page if needed */
1118 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1119 phys_page2 = -1;
1120 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1121 phys_page2 = get_phys_addr_code(env, virt_page2);
1122 }
1123 tb_link_phys(tb, phys_pc, phys_page2);
1124 return tb;
1125}
1126
1127/* invalidate all TBs which intersect with the target physical page
1128 starting in range [start;end[. NOTE: start and end must refer to
1129 the same physical page. 'is_cpu_write_access' should be true if called
1130 from a real cpu write access: the virtual CPU will exit the current
1131 TB if code is modified inside this TB. */
1132void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
1133 int is_cpu_write_access)
1134{
1135 TranslationBlock *tb, *tb_next, *saved_tb;
1136 CPUState *env = cpu_single_env;
1137 target_ulong tb_start, tb_end;
1138 PageDesc *p;
1139 int n;
1140#ifdef TARGET_HAS_PRECISE_SMC
1141 int current_tb_not_found = is_cpu_write_access;
1142 TranslationBlock *current_tb = NULL;
1143 int current_tb_modified = 0;
1144 target_ulong current_pc = 0;
1145 target_ulong current_cs_base = 0;
1146 int current_flags = 0;
1147#endif /* TARGET_HAS_PRECISE_SMC */
1148
1149 p = page_find(start >> TARGET_PAGE_BITS);
1150 if (!p)
1151 return;
1152 if (!p->code_bitmap &&
1153 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1154 is_cpu_write_access) {
1155 /* build code bitmap */
1156 build_page_bitmap(p);
1157 }
1158
1159 /* we remove all the TBs in the range [start, end[ */
1160 /* XXX: see if in some cases it could be faster to invalidate all the code */
1161 tb = p->first_tb;
1162 while (tb != NULL) {
1163 n = (long)tb & 3;
1164 tb = (TranslationBlock *)((long)tb & ~3);
1165 tb_next = tb->page_next[n];
1166 /* NOTE: this is subtle as a TB may span two physical pages */
1167 if (n == 0) {
1168 /* NOTE: tb_end may be after the end of the page, but
1169 it is not a problem */
1170 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1171 tb_end = tb_start + tb->size;
1172 } else {
1173 tb_start = tb->page_addr[1];
1174 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1175 }
1176 if (!(tb_end <= start || tb_start >= end)) {
1177#ifdef TARGET_HAS_PRECISE_SMC
1178 if (current_tb_not_found) {
1179 current_tb_not_found = 0;
1180 current_tb = NULL;
1181 if (env->mem_io_pc) {
1182 /* now we have a real cpu fault */
1183 current_tb = tb_find_pc(env->mem_io_pc);
1184 }
1185 }
1186 if (current_tb == tb &&
1187 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1188 /* If we are modifying the current TB, we must stop
1189 its execution. We could be more precise by checking
1190 that the modification is after the current PC, but it
1191 would require a specialized function to partially
1192 restore the CPU state */
1193
1194 current_tb_modified = 1;
1195 cpu_restore_state(current_tb, env,
1196 env->mem_io_pc, NULL);
1197 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1198 &current_flags);
1199 }
1200#endif /* TARGET_HAS_PRECISE_SMC */
1201 /* we need to do that to handle the case where a signal
1202 occurs while doing tb_phys_invalidate() */
1203 saved_tb = NULL;
1204 if (env) {
1205 saved_tb = env->current_tb;
1206 env->current_tb = NULL;
1207 }
1208 tb_phys_invalidate(tb, -1);
1209 if (env) {
1210 env->current_tb = saved_tb;
1211 if (env->interrupt_request && env->current_tb)
1212 cpu_interrupt(env, env->interrupt_request);
1213 }
1214 }
1215 tb = tb_next;
1216 }
1217#if !defined(CONFIG_USER_ONLY)
1218 /* if no code remaining, no need to continue to use slow writes */
1219 if (!p->first_tb) {
1220 invalidate_page_bitmap(p);
1221 if (is_cpu_write_access) {
1222 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1223 }
1224 }
1225#endif
1226#ifdef TARGET_HAS_PRECISE_SMC
1227 if (current_tb_modified) {
1228 /* we generate a block containing just the instruction
1229 modifying the memory. It will ensure that it cannot modify
1230 itself */
1231 env->current_tb = NULL;
1232 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1233 cpu_resume_from_signal(env, NULL);
1234 }
1235#endif
1236}
1237
1238/* len must be <= 8 and start must be a multiple of len */
1239static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1240{
1241 PageDesc *p;
1242 int offset, b;
1243#if 0
1244 if (1) {
1245 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1246 cpu_single_env->mem_io_vaddr, len,
1247 cpu_single_env->eip,
1248 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1249 }
1250#endif
1251 p = page_find(start >> TARGET_PAGE_BITS);
1252 if (!p)
1253 return;
1254 if (p->code_bitmap) {
1255 offset = start & ~TARGET_PAGE_MASK;
1256 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1257 if (b & ((1 << len) - 1))
1258 goto do_invalidate;
1259 } else {
1260 do_invalidate:
1261 tb_invalidate_phys_page_range(start, start + len, 1);
1262 }
1263}
1264
1265#if !defined(CONFIG_SOFTMMU)
1266static void tb_invalidate_phys_page(target_phys_addr_t addr,
1267 unsigned long pc, void *puc)
1268{
1269 TranslationBlock *tb;
1270 PageDesc *p;
1271 int n;
1272#ifdef TARGET_HAS_PRECISE_SMC
1273 TranslationBlock *current_tb = NULL;
1274 CPUState *env = cpu_single_env;
1275 int current_tb_modified = 0;
1276 target_ulong current_pc = 0;
1277 target_ulong current_cs_base = 0;
1278 int current_flags = 0;
1279#endif
1280
1281 addr &= TARGET_PAGE_MASK;
1282 p = page_find(addr >> TARGET_PAGE_BITS);
1283 if (!p)
1284 return;
1285 tb = p->first_tb;
1286#ifdef TARGET_HAS_PRECISE_SMC
1287 if (tb && pc != 0) {
1288 current_tb = tb_find_pc(pc);
1289 }
1290#endif
1291 while (tb != NULL) {
1292 n = (long)tb & 3;
1293 tb = (TranslationBlock *)((long)tb & ~3);
1294#ifdef TARGET_HAS_PRECISE_SMC
1295 if (current_tb == tb &&
1296 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1297 /* If we are modifying the current TB, we must stop
1298 its execution. We could be more precise by checking
1299 that the modification is after the current PC, but it
1300 would require a specialized function to partially
1301 restore the CPU state */
1302
1303 current_tb_modified = 1;
1304 cpu_restore_state(current_tb, env, pc, puc);
1305 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1306 &current_flags);
1307 }
1308#endif /* TARGET_HAS_PRECISE_SMC */
1309 tb_phys_invalidate(tb, addr);
1310 tb = tb->page_next[n];
1311 }
1312 p->first_tb = NULL;
1313#ifdef TARGET_HAS_PRECISE_SMC
1314 if (current_tb_modified) {
1315 /* we generate a block containing just the instruction
1316 modifying the memory. It will ensure that it cannot modify
1317 itself */
1318 env->current_tb = NULL;
1319 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1320 cpu_resume_from_signal(env, puc);
1321 }
1322#endif
1323}
1324#endif
1325
1326/* add the tb in the target page and protect it if necessary */
1327static inline void tb_alloc_page(TranslationBlock *tb,
1328 unsigned int n, target_ulong page_addr)
1329{
1330 PageDesc *p;
1331 TranslationBlock *last_first_tb;
1332
1333 tb->page_addr[n] = page_addr;
1334 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1335 tb->page_next[n] = p->first_tb;
1336 last_first_tb = p->first_tb;
1337 p->first_tb = (TranslationBlock *)((long)tb | n);
1338 invalidate_page_bitmap(p);
1339
1340#if defined(TARGET_HAS_SMC) || 1
1341
1342#if defined(CONFIG_USER_ONLY)
1343 if (p->flags & PAGE_WRITE) {
1344 target_ulong addr;
1345 PageDesc *p2;
1346 int prot;
1347
1348 /* force the host page as non writable (writes will have a
1349 page fault + mprotect overhead) */
1350 page_addr &= qemu_host_page_mask;
1351 prot = 0;
1352 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1353 addr += TARGET_PAGE_SIZE) {
1354
1355 p2 = page_find (addr >> TARGET_PAGE_BITS);
1356 if (!p2)
1357 continue;
1358 prot |= p2->flags;
1359 p2->flags &= ~PAGE_WRITE;
1360 page_get_flags(addr);
1361 }
1362 mprotect(g2h(page_addr), qemu_host_page_size,
1363 (prot & PAGE_BITS) & ~PAGE_WRITE);
1364#ifdef DEBUG_TB_INVALIDATE
1365 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1366 page_addr);
1367#endif
1368 }
1369#else
1370 /* if some code is already present, then the pages are already
1371 protected. So we handle the case where only the first TB is
1372 allocated in a physical page */
1373 if (!last_first_tb) {
1374 tlb_protect_code(page_addr);
1375 }
1376#endif
1377
1378#endif /* TARGET_HAS_SMC */
1379}
1380
1381/* Allocate a new translation block. Flush the translation buffer if
1382 too many translation blocks or too much generated code. */
1383TranslationBlock *tb_alloc(target_ulong pc)
1384{
1385 TranslationBlock *tb;
1386
1387 if (nb_tbs >= code_gen_max_blocks ||
1388#ifndef VBOX
1389 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1390#else
1391 (code_gen_ptr - code_gen_buffer) >= (int)code_gen_buffer_max_size)
1392#endif
1393 return NULL;
1394 tb = &tbs[nb_tbs++];
1395 tb->pc = pc;
1396 tb->cflags = 0;
1397 return tb;
1398}
1399
1400void tb_free(TranslationBlock *tb)
1401{
1402 /* In practice this is mostly used for single use temporary TB
1403 Ignore the hard cases and just back up if this TB happens to
1404 be the last one generated. */
1405 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1406 code_gen_ptr = tb->tc_ptr;
1407 nb_tbs--;
1408 }
1409}
1410
1411/* add a new TB and link it to the physical page tables. phys_page2 is
1412 (-1) to indicate that only one page contains the TB. */
1413void tb_link_phys(TranslationBlock *tb,
1414 target_ulong phys_pc, target_ulong phys_page2)
1415{
1416 unsigned int h;
1417 TranslationBlock **ptb;
1418
1419 /* Grab the mmap lock to stop another thread invalidating this TB
1420 before we are done. */
1421 mmap_lock();
1422 /* add in the physical hash table */
1423 h = tb_phys_hash_func(phys_pc);
1424 ptb = &tb_phys_hash[h];
1425 tb->phys_hash_next = *ptb;
1426 *ptb = tb;
1427
1428 /* add in the page list */
1429 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1430 if (phys_page2 != -1)
1431 tb_alloc_page(tb, 1, phys_page2);
1432 else
1433 tb->page_addr[1] = -1;
1434
1435 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1436 tb->jmp_next[0] = NULL;
1437 tb->jmp_next[1] = NULL;
1438
1439 /* init original jump addresses */
1440 if (tb->tb_next_offset[0] != 0xffff)
1441 tb_reset_jump(tb, 0);
1442 if (tb->tb_next_offset[1] != 0xffff)
1443 tb_reset_jump(tb, 1);
1444
1445#ifdef DEBUG_TB_CHECK
1446 tb_page_check();
1447#endif
1448 mmap_unlock();
1449}
1450
1451/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1452 tb[1].tc_ptr. Return NULL if not found */
1453TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1454{
1455 int m_min, m_max, m;
1456 unsigned long v;
1457 TranslationBlock *tb;
1458
1459 if (nb_tbs <= 0)
1460 return NULL;
1461 if (tc_ptr < (unsigned long)code_gen_buffer ||
1462 tc_ptr >= (unsigned long)code_gen_ptr)
1463 return NULL;
1464 /* binary search (cf Knuth) */
1465 m_min = 0;
1466 m_max = nb_tbs - 1;
1467 while (m_min <= m_max) {
1468 m = (m_min + m_max) >> 1;
1469 tb = &tbs[m];
1470 v = (unsigned long)tb->tc_ptr;
1471 if (v == tc_ptr)
1472 return tb;
1473 else if (tc_ptr < v) {
1474 m_max = m - 1;
1475 } else {
1476 m_min = m + 1;
1477 }
1478 }
1479 return &tbs[m_max];
1480}
1481
1482static void tb_reset_jump_recursive(TranslationBlock *tb);
1483
1484static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1485{
1486 TranslationBlock *tb1, *tb_next, **ptb;
1487 unsigned int n1;
1488
1489 tb1 = tb->jmp_next[n];
1490 if (tb1 != NULL) {
1491 /* find head of list */
1492 for(;;) {
1493 n1 = (long)tb1 & 3;
1494 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1495 if (n1 == 2)
1496 break;
1497 tb1 = tb1->jmp_next[n1];
1498 }
1499 /* we are now sure now that tb jumps to tb1 */
1500 tb_next = tb1;
1501
1502 /* remove tb from the jmp_first list */
1503 ptb = &tb_next->jmp_first;
1504 for(;;) {
1505 tb1 = *ptb;
1506 n1 = (long)tb1 & 3;
1507 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1508 if (n1 == n && tb1 == tb)
1509 break;
1510 ptb = &tb1->jmp_next[n1];
1511 }
1512 *ptb = tb->jmp_next[n];
1513 tb->jmp_next[n] = NULL;
1514
1515 /* suppress the jump to next tb in generated code */
1516 tb_reset_jump(tb, n);
1517
1518 /* suppress jumps in the tb on which we could have jumped */
1519 tb_reset_jump_recursive(tb_next);
1520 }
1521}
1522
1523static void tb_reset_jump_recursive(TranslationBlock *tb)
1524{
1525 tb_reset_jump_recursive2(tb, 0);
1526 tb_reset_jump_recursive2(tb, 1);
1527}
1528
1529#if defined(TARGET_HAS_ICE)
1530static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1531{
1532 target_phys_addr_t addr;
1533 target_ulong pd;
1534 ram_addr_t ram_addr;
1535 PhysPageDesc *p;
1536
1537 addr = cpu_get_phys_page_debug(env, pc);
1538 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1539 if (!p) {
1540 pd = IO_MEM_UNASSIGNED;
1541 } else {
1542 pd = p->phys_offset;
1543 }
1544 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1545 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1546}
1547#endif
1548
1549/* Add a watchpoint. */
1550int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1551 int flags, CPUWatchpoint **watchpoint)
1552{
1553 target_ulong len_mask = ~(len - 1);
1554 CPUWatchpoint *wp;
1555
1556 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1557 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1558 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1559 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1560#ifndef VBOX
1561 return -EINVAL;
1562#else
1563 return VERR_INVALID_PARAMETER;
1564#endif
1565 }
1566 wp = qemu_malloc(sizeof(*wp));
1567
1568 wp->vaddr = addr;
1569 wp->len_mask = len_mask;
1570 wp->flags = flags;
1571
1572 /* keep all GDB-injected watchpoints in front */
1573 if (flags & BP_GDB)
1574 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1575 else
1576 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1577
1578 tlb_flush_page(env, addr);
1579
1580 if (watchpoint)
1581 *watchpoint = wp;
1582 return 0;
1583}
1584
1585/* Remove a specific watchpoint. */
1586int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1587 int flags)
1588{
1589 target_ulong len_mask = ~(len - 1);
1590 CPUWatchpoint *wp;
1591
1592 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1593 if (addr == wp->vaddr && len_mask == wp->len_mask
1594 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1595 cpu_watchpoint_remove_by_ref(env, wp);
1596 return 0;
1597 }
1598 }
1599#ifndef VBOX
1600 return -ENOENT;
1601#else
1602 return VERR_NOT_FOUND;
1603#endif
1604}
1605
1606/* Remove a specific watchpoint by reference. */
1607void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1608{
1609 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1610
1611 tlb_flush_page(env, watchpoint->vaddr);
1612
1613 qemu_free(watchpoint);
1614}
1615
1616/* Remove all matching watchpoints. */
1617void cpu_watchpoint_remove_all(CPUState *env, int mask)
1618{
1619 CPUWatchpoint *wp, *next;
1620
1621 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1622 if (wp->flags & mask)
1623 cpu_watchpoint_remove_by_ref(env, wp);
1624 }
1625}
1626
1627/* Add a breakpoint. */
1628int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1629 CPUBreakpoint **breakpoint)
1630{
1631#if defined(TARGET_HAS_ICE)
1632 CPUBreakpoint *bp;
1633
1634 bp = qemu_malloc(sizeof(*bp));
1635
1636 bp->pc = pc;
1637 bp->flags = flags;
1638
1639 /* keep all GDB-injected breakpoints in front */
1640 if (flags & BP_GDB)
1641 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1642 else
1643 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1644
1645 breakpoint_invalidate(env, pc);
1646
1647 if (breakpoint)
1648 *breakpoint = bp;
1649 return 0;
1650#else
1651 return -ENOSYS;
1652#endif
1653}
1654
1655/* Remove a specific breakpoint. */
1656int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1657{
1658#if defined(TARGET_HAS_ICE)
1659 CPUBreakpoint *bp;
1660
1661 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1662 if (bp->pc == pc && bp->flags == flags) {
1663 cpu_breakpoint_remove_by_ref(env, bp);
1664 return 0;
1665 }
1666 }
1667# ifndef VBOX
1668 return -ENOENT;
1669# else
1670 return VERR_NOT_FOUND;
1671# endif
1672#else
1673 return -ENOSYS;
1674#endif
1675}
1676
1677/* Remove a specific breakpoint by reference. */
1678void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1679{
1680#if defined(TARGET_HAS_ICE)
1681 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1682
1683 breakpoint_invalidate(env, breakpoint->pc);
1684
1685 qemu_free(breakpoint);
1686#endif
1687}
1688
1689/* Remove all matching breakpoints. */
1690void cpu_breakpoint_remove_all(CPUState *env, int mask)
1691{
1692#if defined(TARGET_HAS_ICE)
1693 CPUBreakpoint *bp, *next;
1694
1695 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1696 if (bp->flags & mask)
1697 cpu_breakpoint_remove_by_ref(env, bp);
1698 }
1699#endif
1700}
1701
1702/* enable or disable single step mode. EXCP_DEBUG is returned by the
1703 CPU loop after each instruction */
1704void cpu_single_step(CPUState *env, int enabled)
1705{
1706#if defined(TARGET_HAS_ICE)
1707 if (env->singlestep_enabled != enabled) {
1708 env->singlestep_enabled = enabled;
1709 if (kvm_enabled())
1710 kvm_update_guest_debug(env, 0);
1711 else {
1712 /* must flush all the translated code to avoid inconsistencies */
1713 /* XXX: only flush what is necessary */
1714 tb_flush(env);
1715 }
1716 }
1717#endif
1718}
1719
1720#ifndef VBOX
1721
1722/* enable or disable low levels log */
1723void cpu_set_log(int log_flags)
1724{
1725 loglevel = log_flags;
1726 if (loglevel && !logfile) {
1727 logfile = fopen(logfilename, log_append ? "a" : "w");
1728 if (!logfile) {
1729 perror(logfilename);
1730 _exit(1);
1731 }
1732#if !defined(CONFIG_SOFTMMU)
1733 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1734 {
1735 static char logfile_buf[4096];
1736 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1737 }
1738#elif !defined(_WIN32)
1739 /* Win32 doesn't support line-buffering and requires size >= 2 */
1740 setvbuf(logfile, NULL, _IOLBF, 0);
1741#endif
1742 log_append = 1;
1743 }
1744 if (!loglevel && logfile) {
1745 fclose(logfile);
1746 logfile = NULL;
1747 }
1748}
1749
1750void cpu_set_log_filename(const char *filename)
1751{
1752 logfilename = strdup(filename);
1753 if (logfile) {
1754 fclose(logfile);
1755 logfile = NULL;
1756 }
1757 cpu_set_log(loglevel);
1758}
1759
1760#endif /* !VBOX */
1761
1762static void cpu_unlink_tb(CPUState *env)
1763{
1764#if defined(CONFIG_USE_NPTL)
1765 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1766 problem and hope the cpu will stop of its own accord. For userspace
1767 emulation this often isn't actually as bad as it sounds. Often
1768 signals are used primarily to interrupt blocking syscalls. */
1769#else
1770 TranslationBlock *tb;
1771 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1772
1773 tb = env->current_tb;
1774 /* if the cpu is currently executing code, we must unlink it and
1775 all the potentially executing TB */
1776 if (tb && !testandset(&interrupt_lock)) {
1777 env->current_tb = NULL;
1778 tb_reset_jump_recursive(tb);
1779 resetlock(&interrupt_lock);
1780 }
1781#endif
1782}
1783
1784/* mask must never be zero, except for A20 change call */
1785void cpu_interrupt(CPUState *env, int mask)
1786{
1787 int old_mask;
1788
1789 old_mask = env->interrupt_request;
1790#ifndef VBOX
1791 env->interrupt_request |= mask;
1792#else /* VBOX */
1793 VM_ASSERT_EMT(env->pVM);
1794 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, mask);
1795#endif /* VBOX */
1796
1797#ifndef VBOX
1798#ifndef CONFIG_USER_ONLY
1799 /*
1800 * If called from iothread context, wake the target cpu in
1801 * case its halted.
1802 */
1803 if (!qemu_cpu_self(env)) {
1804 qemu_cpu_kick(env);
1805 return;
1806 }
1807#endif
1808#endif /* !VBOX */
1809
1810 if (use_icount) {
1811 env->icount_decr.u16.high = 0xffff;
1812#ifndef CONFIG_USER_ONLY
1813 if (!can_do_io(env)
1814 && (mask & ~old_mask) != 0) {
1815 cpu_abort(env, "Raised interrupt while not in I/O function");
1816 }
1817#endif
1818 } else {
1819 cpu_unlink_tb(env);
1820 }
1821}
1822
1823void cpu_reset_interrupt(CPUState *env, int mask)
1824{
1825#ifdef VBOX
1826 /*
1827 * Note: the current implementation can be executed by another thread without problems; make sure this remains true
1828 * for future changes!
1829 */
1830 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~mask);
1831#else /* !VBOX */
1832 env->interrupt_request &= ~mask;
1833#endif /* !VBOX */
1834}
1835
1836void cpu_exit(CPUState *env)
1837{
1838 env->exit_request = 1;
1839 cpu_unlink_tb(env);
1840}
1841
1842#ifndef VBOX
1843const CPULogItem cpu_log_items[] = {
1844 { CPU_LOG_TB_OUT_ASM, "out_asm",
1845 "show generated host assembly code for each compiled TB" },
1846 { CPU_LOG_TB_IN_ASM, "in_asm",
1847 "show target assembly code for each compiled TB" },
1848 { CPU_LOG_TB_OP, "op",
1849 "show micro ops for each compiled TB" },
1850 { CPU_LOG_TB_OP_OPT, "op_opt",
1851 "show micro ops "
1852#ifdef TARGET_I386
1853 "before eflags optimization and "
1854#endif
1855 "after liveness analysis" },
1856 { CPU_LOG_INT, "int",
1857 "show interrupts/exceptions in short format" },
1858 { CPU_LOG_EXEC, "exec",
1859 "show trace before each executed TB (lots of logs)" },
1860 { CPU_LOG_TB_CPU, "cpu",
1861 "show CPU state before block translation" },
1862#ifdef TARGET_I386
1863 { CPU_LOG_PCALL, "pcall",
1864 "show protected mode far calls/returns/exceptions" },
1865 { CPU_LOG_RESET, "cpu_reset",
1866 "show CPU state before CPU resets" },
1867#endif
1868#ifdef DEBUG_IOPORT
1869 { CPU_LOG_IOPORT, "ioport",
1870 "show all i/o ports accesses" },
1871#endif
1872 { 0, NULL, NULL },
1873};
1874
1875static int cmp1(const char *s1, int n, const char *s2)
1876{
1877 if (strlen(s2) != n)
1878 return 0;
1879 return memcmp(s1, s2, n) == 0;
1880}
1881
1882/* takes a comma separated list of log masks. Return 0 if error. */
1883int cpu_str_to_log_mask(const char *str)
1884{
1885 const CPULogItem *item;
1886 int mask;
1887 const char *p, *p1;
1888
1889 p = str;
1890 mask = 0;
1891 for(;;) {
1892 p1 = strchr(p, ',');
1893 if (!p1)
1894 p1 = p + strlen(p);
1895 if(cmp1(p,p1-p,"all")) {
1896 for(item = cpu_log_items; item->mask != 0; item++) {
1897 mask |= item->mask;
1898 }
1899 } else {
1900 for(item = cpu_log_items; item->mask != 0; item++) {
1901 if (cmp1(p, p1 - p, item->name))
1902 goto found;
1903 }
1904 return 0;
1905 }
1906 found:
1907 mask |= item->mask;
1908 if (*p1 != ',')
1909 break;
1910 p = p1 + 1;
1911 }
1912 return mask;
1913}
1914#endif /* !VBOX */
1915
1916#ifndef VBOX /* VBOX: we have our own routine. */
1917void cpu_abort(CPUState *env, const char *fmt, ...)
1918{
1919 va_list ap;
1920 va_list ap2;
1921
1922 va_start(ap, fmt);
1923 va_copy(ap2, ap);
1924 fprintf(stderr, "qemu: fatal: ");
1925 vfprintf(stderr, fmt, ap);
1926 fprintf(stderr, "\n");
1927#ifdef TARGET_I386
1928 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1929#else
1930 cpu_dump_state(env, stderr, fprintf, 0);
1931#endif
1932 if (qemu_log_enabled()) {
1933 qemu_log("qemu: fatal: ");
1934 qemu_log_vprintf(fmt, ap2);
1935 qemu_log("\n");
1936#ifdef TARGET_I386
1937 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1938#else
1939 log_cpu_state(env, 0);
1940#endif
1941 qemu_log_flush();
1942 qemu_log_close();
1943 }
1944 va_end(ap2);
1945 va_end(ap);
1946 abort();
1947}
1948#endif /* !VBOX */
1949
1950#ifndef VBOX /* not needed */
1951CPUState *cpu_copy(CPUState *env)
1952{
1953 CPUState *new_env = cpu_init(env->cpu_model_str);
1954 CPUState *next_cpu = new_env->next_cpu;
1955 int cpu_index = new_env->cpu_index;
1956#if defined(TARGET_HAS_ICE)
1957 CPUBreakpoint *bp;
1958 CPUWatchpoint *wp;
1959#endif
1960
1961 memcpy(new_env, env, sizeof(CPUState));
1962
1963 /* Preserve chaining and index. */
1964 new_env->next_cpu = next_cpu;
1965 new_env->cpu_index = cpu_index;
1966
1967 /* Clone all break/watchpoints.
1968 Note: Once we support ptrace with hw-debug register access, make sure
1969 BP_CPU break/watchpoints are handled correctly on clone. */
1970 QTAILQ_INIT(&env->breakpoints);
1971 QTAILQ_INIT(&env->watchpoints);
1972#if defined(TARGET_HAS_ICE)
1973 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1974 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1975 }
1976 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1977 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1978 wp->flags, NULL);
1979 }
1980#endif
1981
1982 return new_env;
1983}
1984#endif /* !VBOX */
1985
1986#if !defined(CONFIG_USER_ONLY)
1987
1988static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1989{
1990 unsigned int i;
1991
1992 /* Discard jump cache entries for any tb which might potentially
1993 overlap the flushed page. */
1994 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1995 memset (&env->tb_jmp_cache[i], 0,
1996 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1997
1998 i = tb_jmp_cache_hash_page(addr);
1999 memset (&env->tb_jmp_cache[i], 0,
2000 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
2001
2002#ifdef VBOX
2003 /* inform raw mode about TLB page flush */
2004 remR3FlushPage(env, addr);
2005#endif /* VBOX */
2006}
2007
2008static CPUTLBEntry s_cputlb_empty_entry = {
2009 .addr_read = -1,
2010 .addr_write = -1,
2011 .addr_code = -1,
2012 .addend = -1,
2013};
2014
2015/* NOTE: if flush_global is true, also flush global entries (not
2016 implemented yet) */
2017void tlb_flush(CPUState *env, int flush_global)
2018{
2019 int i;
2020
2021#if defined(DEBUG_TLB)
2022 printf("tlb_flush:\n");
2023#endif
2024 /* must reset current TB so that interrupts cannot modify the
2025 links while we are modifying them */
2026 env->current_tb = NULL;
2027
2028 for(i = 0; i < CPU_TLB_SIZE; i++) {
2029 int mmu_idx;
2030 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2031 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
2032 }
2033 }
2034
2035 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
2036
2037#ifdef VBOX
2038 /* inform raw mode about TLB flush */
2039 remR3FlushTLB(env, flush_global);
2040#endif
2041 tlb_flush_count++;
2042}
2043
2044static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
2045{
2046 if (addr == (tlb_entry->addr_read &
2047 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
2048 addr == (tlb_entry->addr_write &
2049 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
2050 addr == (tlb_entry->addr_code &
2051 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
2052 *tlb_entry = s_cputlb_empty_entry;
2053 }
2054}
2055
2056void tlb_flush_page(CPUState *env, target_ulong addr)
2057{
2058 int i;
2059 int mmu_idx;
2060
2061#if defined(DEBUG_TLB)
2062 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
2063#endif
2064 /* must reset current TB so that interrupts cannot modify the
2065 links while we are modifying them */
2066 env->current_tb = NULL;
2067
2068 addr &= TARGET_PAGE_MASK;
2069 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2070 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2071 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
2072
2073 tlb_flush_jmp_cache(env, addr);
2074}
2075
2076/* update the TLBs so that writes to code in the virtual page 'addr'
2077 can be detected */
2078static void tlb_protect_code(ram_addr_t ram_addr)
2079{
2080 cpu_physical_memory_reset_dirty(ram_addr,
2081 ram_addr + TARGET_PAGE_SIZE,
2082 CODE_DIRTY_FLAG);
2083#if defined(VBOX) && defined(REM_MONITOR_CODE_PAGES)
2084 /** @todo Retest this? This function has changed... */
2085 remR3ProtectCode(cpu_single_env, ram_addr);
2086#endif
2087}
2088
2089/* update the TLB so that writes in physical page 'phys_addr' are no longer
2090 tested for self modifying code */
2091static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2092 target_ulong vaddr)
2093{
2094#ifdef VBOX
2095 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2096#endif
2097 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
2098}
2099
2100static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2101 unsigned long start, unsigned long length)
2102{
2103 unsigned long addr;
2104
2105#ifdef VBOX
2106 if (start & 3)
2107 return;
2108#endif
2109 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2110 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2111 if ((addr - start) < length) {
2112 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
2113 }
2114 }
2115}
2116
2117/* Note: start and end must be within the same ram block. */
2118void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2119 int dirty_flags)
2120{
2121 CPUState *env;
2122 unsigned long length, start1;
2123 int i, mask, len;
2124 uint8_t *p;
2125
2126 start &= TARGET_PAGE_MASK;
2127 end = TARGET_PAGE_ALIGN(end);
2128
2129 length = end - start;
2130 if (length == 0)
2131 return;
2132 len = length >> TARGET_PAGE_BITS;
2133 mask = ~dirty_flags;
2134 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
2135#ifdef VBOX
2136 if (RT_LIKELY((start >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2137#endif
2138 for(i = 0; i < len; i++)
2139 p[i] &= mask;
2140
2141 /* we modify the TLB cache so that the dirty bit will be set again
2142 when accessing the range */
2143#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2144 start1 = start;
2145#elif !defined(VBOX)
2146 start1 = (unsigned long)qemu_get_ram_ptr(start);
2147 /* Chek that we don't span multiple blocks - this breaks the
2148 address comparisons below. */
2149 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
2150 != (end - 1) - start) {
2151 abort();
2152 }
2153#else
2154 start1 = (unsigned long)remR3TlbGCPhys2Ptr(first_cpu, start, 1 /*fWritable*/); /** @todo page replacing (sharing or read only) may cause trouble, fix interface/whatever. */
2155#endif
2156
2157 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2158 int mmu_idx;
2159 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2160 for(i = 0; i < CPU_TLB_SIZE; i++)
2161 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2162 start1, length);
2163 }
2164 }
2165}
2166
2167#ifndef VBOX
2168int cpu_physical_memory_set_dirty_tracking(int enable)
2169{
2170 in_migration = enable;
2171 if (kvm_enabled()) {
2172 return kvm_set_migration_log(enable);
2173 }
2174 return 0;
2175}
2176
2177int cpu_physical_memory_get_dirty_tracking(void)
2178{
2179 return in_migration;
2180}
2181#endif /* !VBOX */
2182
2183int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2184 target_phys_addr_t end_addr)
2185{
2186#ifndef VBOX
2187 int ret = 0;
2188
2189 if (kvm_enabled())
2190 ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
2191 return ret;
2192#else
2193 return 0;
2194#endif
2195}
2196
2197#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2198DECLINLINE(void) tlb_update_dirty(CPUTLBEntry *tlb_entry, target_phys_addr_t phys_addend)
2199#else
2200static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2201#endif
2202{
2203 ram_addr_t ram_addr;
2204 void *p;
2205
2206 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2207#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2208 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2209#elif !defined(VBOX)
2210 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2211 + tlb_entry->addend);
2212 ram_addr = qemu_ram_addr_from_host(p);
2213#else
2214 Assert(phys_addend != -1);
2215 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + phys_addend;
2216#endif
2217 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2218 tlb_entry->addr_write |= TLB_NOTDIRTY;
2219 }
2220 }
2221}
2222
2223/* update the TLB according to the current state of the dirty bits */
2224void cpu_tlb_update_dirty(CPUState *env)
2225{
2226 int i;
2227 int mmu_idx;
2228 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2229 for(i = 0; i < CPU_TLB_SIZE; i++)
2230#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2231 tlb_update_dirty(&env->tlb_table[mmu_idx][i], env->phys_addends[mmu_idx][i]);
2232#else
2233 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2234#endif
2235 }
2236}
2237
2238static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2239{
2240 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2241 tlb_entry->addr_write = vaddr;
2242}
2243
2244/* update the TLB corresponding to virtual page vaddr
2245 so that it is no longer dirty */
2246static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2247{
2248 int i;
2249 int mmu_idx;
2250
2251 vaddr &= TARGET_PAGE_MASK;
2252 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2253 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2254 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2255}
2256
2257/* add a new TLB entry. At most one entry for a given virtual address
2258 is permitted. Return 0 if OK or 2 if the page could not be mapped
2259 (can only happen in non SOFTMMU mode for I/O pages or pages
2260 conflicting with the host address space). */
2261int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2262 target_phys_addr_t paddr, int prot,
2263 int mmu_idx, int is_softmmu)
2264{
2265 PhysPageDesc *p;
2266 unsigned long pd;
2267 unsigned int index;
2268 target_ulong address;
2269 target_ulong code_address;
2270 target_phys_addr_t addend;
2271 int ret;
2272 CPUTLBEntry *te;
2273 CPUWatchpoint *wp;
2274 target_phys_addr_t iotlb;
2275#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2276 int read_mods = 0, write_mods = 0, code_mods = 0;
2277#endif
2278
2279 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2280 if (!p) {
2281 pd = IO_MEM_UNASSIGNED;
2282 } else {
2283 pd = p->phys_offset;
2284 }
2285#if defined(DEBUG_TLB)
2286 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2287 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2288#endif
2289
2290 ret = 0;
2291 address = vaddr;
2292 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2293 /* IO memory case (romd handled later) */
2294 address |= TLB_MMIO;
2295 }
2296#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2297 addend = pd & TARGET_PAGE_MASK;
2298#elif !defined(VBOX)
2299 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2300#else
2301 /** @todo this is racing the phys_page_find call above since it may register
2302 * a new chunk of memory... */
2303 addend = (unsigned long)remR3TlbGCPhys2Ptr(env, pd & TARGET_PAGE_MASK, !!(prot & PAGE_WRITE));
2304#endif
2305
2306 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2307 /* Normal RAM. */
2308 iotlb = pd & TARGET_PAGE_MASK;
2309 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2310 iotlb |= IO_MEM_NOTDIRTY;
2311 else
2312 iotlb |= IO_MEM_ROM;
2313 } else {
2314 /* IO handlers are currently passed a physical address.
2315 It would be nice to pass an offset from the base address
2316 of that region. This would avoid having to special case RAM,
2317 and avoid full address decoding in every device.
2318 We can't use the high bits of pd for this because
2319 IO_MEM_ROMD uses these as a ram address. */
2320 iotlb = (pd & ~TARGET_PAGE_MASK);
2321 if (p) {
2322 iotlb += p->region_offset;
2323 } else {
2324 iotlb += paddr;
2325 }
2326 }
2327
2328 code_address = address;
2329#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2330
2331 if (addend & 0x3)
2332 {
2333 if (addend & 0x2)
2334 {
2335 /* catch write */
2336 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2337 write_mods |= TLB_MMIO;
2338 }
2339 else if (addend & 0x1)
2340 {
2341 /* catch all */
2342 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2343 {
2344 read_mods |= TLB_MMIO;
2345 write_mods |= TLB_MMIO;
2346 code_mods |= TLB_MMIO;
2347 }
2348 }
2349 if ((iotlb & ~TARGET_PAGE_MASK) == 0)
2350 iotlb = env->pVM->rem.s.iHandlerMemType + paddr;
2351 addend &= ~(target_ulong)0x3;
2352 }
2353
2354#endif
2355 /* Make accesses to pages with watchpoints go via the
2356 watchpoint trap routines. */
2357 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2358 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2359 iotlb = io_mem_watch + paddr;
2360 /* TODO: The memory case can be optimized by not trapping
2361 reads of pages with a write breakpoint. */
2362 address |= TLB_MMIO;
2363 }
2364 }
2365
2366 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2367 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2368 te = &env->tlb_table[mmu_idx][index];
2369 te->addend = addend - vaddr;
2370 if (prot & PAGE_READ) {
2371 te->addr_read = address;
2372 } else {
2373 te->addr_read = -1;
2374 }
2375
2376 if (prot & PAGE_EXEC) {
2377 te->addr_code = code_address;
2378 } else {
2379 te->addr_code = -1;
2380 }
2381 if (prot & PAGE_WRITE) {
2382 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2383 (pd & IO_MEM_ROMD)) {
2384 /* Write access calls the I/O callback. */
2385 te->addr_write = address | TLB_MMIO;
2386 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2387 !cpu_physical_memory_is_dirty(pd)) {
2388 te->addr_write = address | TLB_NOTDIRTY;
2389 } else {
2390 te->addr_write = address;
2391 }
2392 } else {
2393 te->addr_write = -1;
2394 }
2395
2396#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2397 if (prot & PAGE_READ)
2398 te->addr_read |= read_mods;
2399 if (prot & PAGE_EXEC)
2400 te->addr_code |= code_mods;
2401 if (prot & PAGE_WRITE)
2402 te->addr_write |= write_mods;
2403
2404 env->phys_addends[mmu_idx][index] = (pd & TARGET_PAGE_MASK)- vaddr;
2405#endif
2406
2407#ifdef VBOX
2408 /* inform raw mode about TLB page change */
2409 remR3FlushPage(env, vaddr);
2410#endif
2411 return ret;
2412}
2413
2414#else
2415
2416void tlb_flush(CPUState *env, int flush_global)
2417{
2418}
2419
2420void tlb_flush_page(CPUState *env, target_ulong addr)
2421{
2422}
2423
2424int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2425 target_phys_addr_t paddr, int prot,
2426 int mmu_idx, int is_softmmu)
2427{
2428 return 0;
2429}
2430
2431#ifndef VBOX
2432
2433/*
2434 * Walks guest process memory "regions" one by one
2435 * and calls callback function 'fn' for each region.
2436 */
2437int walk_memory_regions(void *priv,
2438 int (*fn)(void *, unsigned long, unsigned long, unsigned long))
2439{
2440 unsigned long start, end;
2441 PageDesc *p = NULL;
2442 int i, j, prot, prot1;
2443 int rc = 0;
2444
2445 start = end = -1;
2446 prot = 0;
2447
2448 for (i = 0; i <= L1_SIZE; i++) {
2449 p = (i < L1_SIZE) ? l1_map[i] : NULL;
2450 for (j = 0; j < L2_SIZE; j++) {
2451 prot1 = (p == NULL) ? 0 : p[j].flags;
2452 /*
2453 * "region" is one continuous chunk of memory
2454 * that has same protection flags set.
2455 */
2456 if (prot1 != prot) {
2457 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2458 if (start != -1) {
2459 rc = (*fn)(priv, start, end, prot);
2460 /* callback can stop iteration by returning != 0 */
2461 if (rc != 0)
2462 return (rc);
2463 }
2464 if (prot1 != 0)
2465 start = end;
2466 else
2467 start = -1;
2468 prot = prot1;
2469 }
2470 if (p == NULL)
2471 break;
2472 }
2473 }
2474 return (rc);
2475}
2476
2477static int dump_region(void *priv, unsigned long start,
2478 unsigned long end, unsigned long prot)
2479{
2480 FILE *f = (FILE *)priv;
2481
2482 (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2483 start, end, end - start,
2484 ((prot & PAGE_READ) ? 'r' : '-'),
2485 ((prot & PAGE_WRITE) ? 'w' : '-'),
2486 ((prot & PAGE_EXEC) ? 'x' : '-'));
2487
2488 return (0);
2489}
2490
2491/* dump memory mappings */
2492void page_dump(FILE *f)
2493{
2494 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2495 "start", "end", "size", "prot");
2496 walk_memory_regions(f, dump_region);
2497}
2498
2499#endif /* !VBOX */
2500
2501int page_get_flags(target_ulong address)
2502{
2503 PageDesc *p;
2504
2505 p = page_find(address >> TARGET_PAGE_BITS);
2506 if (!p)
2507 return 0;
2508 return p->flags;
2509}
2510
2511/* modify the flags of a page and invalidate the code if
2512 necessary. The flag PAGE_WRITE_ORG is positioned automatically
2513 depending on PAGE_WRITE */
2514void page_set_flags(target_ulong start, target_ulong end, int flags)
2515{
2516 PageDesc *p;
2517 target_ulong addr;
2518
2519 /* mmap_lock should already be held. */
2520 start = start & TARGET_PAGE_MASK;
2521 end = TARGET_PAGE_ALIGN(end);
2522 if (flags & PAGE_WRITE)
2523 flags |= PAGE_WRITE_ORG;
2524#ifdef VBOX
2525 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
2526#endif
2527 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2528 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2529 /* We may be called for host regions that are outside guest
2530 address space. */
2531 if (!p)
2532 return;
2533 /* if the write protection is set, then we invalidate the code
2534 inside */
2535 if (!(p->flags & PAGE_WRITE) &&
2536 (flags & PAGE_WRITE) &&
2537 p->first_tb) {
2538 tb_invalidate_phys_page(addr, 0, NULL);
2539 }
2540 p->flags = flags;
2541 }
2542}
2543
2544int page_check_range(target_ulong start, target_ulong len, int flags)
2545{
2546 PageDesc *p;
2547 target_ulong end;
2548 target_ulong addr;
2549
2550 if (start + len < start)
2551 /* we've wrapped around */
2552 return -1;
2553
2554 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2555 start = start & TARGET_PAGE_MASK;
2556
2557 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2558 p = page_find(addr >> TARGET_PAGE_BITS);
2559 if( !p )
2560 return -1;
2561 if( !(p->flags & PAGE_VALID) )
2562 return -1;
2563
2564 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2565 return -1;
2566 if (flags & PAGE_WRITE) {
2567 if (!(p->flags & PAGE_WRITE_ORG))
2568 return -1;
2569 /* unprotect the page if it was put read-only because it
2570 contains translated code */
2571 if (!(p->flags & PAGE_WRITE)) {
2572 if (!page_unprotect(addr, 0, NULL))
2573 return -1;
2574 }
2575 return 0;
2576 }
2577 }
2578 return 0;
2579}
2580
2581/* called from signal handler: invalidate the code and unprotect the
2582 page. Return TRUE if the fault was successfully handled. */
2583int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2584{
2585 unsigned int page_index, prot, pindex;
2586 PageDesc *p, *p1;
2587 target_ulong host_start, host_end, addr;
2588
2589 /* Technically this isn't safe inside a signal handler. However we
2590 know this only ever happens in a synchronous SEGV handler, so in
2591 practice it seems to be ok. */
2592 mmap_lock();
2593
2594 host_start = address & qemu_host_page_mask;
2595 page_index = host_start >> TARGET_PAGE_BITS;
2596 p1 = page_find(page_index);
2597 if (!p1) {
2598 mmap_unlock();
2599 return 0;
2600 }
2601 host_end = host_start + qemu_host_page_size;
2602 p = p1;
2603 prot = 0;
2604 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2605 prot |= p->flags;
2606 p++;
2607 }
2608 /* if the page was really writable, then we change its
2609 protection back to writable */
2610 if (prot & PAGE_WRITE_ORG) {
2611 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2612 if (!(p1[pindex].flags & PAGE_WRITE)) {
2613 mprotect((void *)g2h(host_start), qemu_host_page_size,
2614 (prot & PAGE_BITS) | PAGE_WRITE);
2615 p1[pindex].flags |= PAGE_WRITE;
2616 /* and since the content will be modified, we must invalidate
2617 the corresponding translated code. */
2618 tb_invalidate_phys_page(address, pc, puc);
2619#ifdef DEBUG_TB_CHECK
2620 tb_invalidate_check(address);
2621#endif
2622 mmap_unlock();
2623 return 1;
2624 }
2625 }
2626 mmap_unlock();
2627 return 0;
2628}
2629
2630static inline void tlb_set_dirty(CPUState *env,
2631 unsigned long addr, target_ulong vaddr)
2632{
2633}
2634#endif /* defined(CONFIG_USER_ONLY) */
2635
2636#if !defined(CONFIG_USER_ONLY)
2637
2638static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2639 ram_addr_t memory, ram_addr_t region_offset);
2640static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2641 ram_addr_t orig_memory, ram_addr_t region_offset);
2642#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2643 need_subpage) \
2644 do { \
2645 if (addr > start_addr) \
2646 start_addr2 = 0; \
2647 else { \
2648 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2649 if (start_addr2 > 0) \
2650 need_subpage = 1; \
2651 } \
2652 \
2653 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2654 end_addr2 = TARGET_PAGE_SIZE - 1; \
2655 else { \
2656 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2657 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2658 need_subpage = 1; \
2659 } \
2660 } while (0)
2661
2662/* register physical memory.
2663 For RAM, 'size' must be a multiple of the target page size.
2664 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2665 io memory page. The address used when calling the IO function is
2666 the offset from the start of the region, plus region_offset. Both
2667 start_addr and region_offset are rounded down to a page boundary
2668 before calculating this offset. This should not be a problem unless
2669 the low bits of start_addr and region_offset differ. */
2670void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2671 ram_addr_t size,
2672 ram_addr_t phys_offset,
2673 ram_addr_t region_offset)
2674{
2675 target_phys_addr_t addr, end_addr;
2676 PhysPageDesc *p;
2677 CPUState *env;
2678 ram_addr_t orig_size = size;
2679 void *subpage;
2680
2681 if (kvm_enabled())
2682 kvm_set_phys_mem(start_addr, size, phys_offset);
2683
2684 if (phys_offset == IO_MEM_UNASSIGNED) {
2685 region_offset = start_addr;
2686 }
2687 region_offset &= TARGET_PAGE_MASK;
2688 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2689 end_addr = start_addr + (target_phys_addr_t)size;
2690 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2691 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2692 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2693 ram_addr_t orig_memory = p->phys_offset;
2694 target_phys_addr_t start_addr2, end_addr2;
2695 int need_subpage = 0;
2696
2697 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2698 need_subpage);
2699 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2700 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2701 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2702 &p->phys_offset, orig_memory,
2703 p->region_offset);
2704 } else {
2705 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2706 >> IO_MEM_SHIFT];
2707 }
2708 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2709 region_offset);
2710 p->region_offset = 0;
2711 } else {
2712 p->phys_offset = phys_offset;
2713 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2714 (phys_offset & IO_MEM_ROMD))
2715 phys_offset += TARGET_PAGE_SIZE;
2716 }
2717 } else {
2718 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2719 p->phys_offset = phys_offset;
2720 p->region_offset = region_offset;
2721 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2722 (phys_offset & IO_MEM_ROMD)) {
2723 phys_offset += TARGET_PAGE_SIZE;
2724 } else {
2725 target_phys_addr_t start_addr2, end_addr2;
2726 int need_subpage = 0;
2727
2728 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2729 end_addr2, need_subpage);
2730
2731 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2732 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2733 &p->phys_offset, IO_MEM_UNASSIGNED,
2734 addr & TARGET_PAGE_MASK);
2735 subpage_register(subpage, start_addr2, end_addr2,
2736 phys_offset, region_offset);
2737 p->region_offset = 0;
2738 }
2739 }
2740 }
2741 region_offset += TARGET_PAGE_SIZE;
2742 }
2743
2744 /* since each CPU stores ram addresses in its TLB cache, we must
2745 reset the modified entries */
2746 /* XXX: slow ! */
2747 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2748 tlb_flush(env, 1);
2749 }
2750}
2751
2752/* XXX: temporary until new memory mapping API */
2753ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2754{
2755 PhysPageDesc *p;
2756
2757 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2758 if (!p)
2759 return IO_MEM_UNASSIGNED;
2760 return p->phys_offset;
2761}
2762
2763#ifndef VBOX
2764void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2765{
2766 if (kvm_enabled())
2767 kvm_coalesce_mmio_region(addr, size);
2768}
2769
2770void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2771{
2772 if (kvm_enabled())
2773 kvm_uncoalesce_mmio_region(addr, size);
2774}
2775
2776ram_addr_t qemu_ram_alloc(ram_addr_t size)
2777{
2778 RAMBlock *new_block;
2779
2780 size = TARGET_PAGE_ALIGN(size);
2781 new_block = qemu_malloc(sizeof(*new_block));
2782
2783#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2784 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2785 new_block->host = mmap((void*)0x1000000, size, PROT_EXEC|PROT_READ|PROT_WRITE,
2786 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2787#else
2788 new_block->host = qemu_vmalloc(size);
2789#endif
2790#ifdef MADV_MERGEABLE
2791 madvise(new_block->host, size, MADV_MERGEABLE);
2792#endif
2793 new_block->offset = last_ram_offset;
2794 new_block->length = size;
2795
2796 new_block->next = ram_blocks;
2797 ram_blocks = new_block;
2798
2799 phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2800 (last_ram_offset + size) >> TARGET_PAGE_BITS);
2801 memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2802 0xff, size >> TARGET_PAGE_BITS);
2803
2804 last_ram_offset += size;
2805
2806 if (kvm_enabled())
2807 kvm_setup_guest_memory(new_block->host, size);
2808
2809 return new_block->offset;
2810}
2811
2812void qemu_ram_free(ram_addr_t addr)
2813{
2814 /* TODO: implement this. */
2815}
2816
2817/* Return a host pointer to ram allocated with qemu_ram_alloc.
2818 With the exception of the softmmu code in this file, this should
2819 only be used for local memory (e.g. video ram) that the device owns,
2820 and knows it isn't going to access beyond the end of the block.
2821
2822 It should not be used for general purpose DMA.
2823 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2824 */
2825void *qemu_get_ram_ptr(ram_addr_t addr)
2826{
2827 RAMBlock *prev;
2828 RAMBlock **prevp;
2829 RAMBlock *block;
2830
2831 prev = NULL;
2832 prevp = &ram_blocks;
2833 block = ram_blocks;
2834 while (block && (block->offset > addr
2835 || block->offset + block->length <= addr)) {
2836 if (prev)
2837 prevp = &prev->next;
2838 prev = block;
2839 block = block->next;
2840 }
2841 if (!block) {
2842 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2843 abort();
2844 }
2845 /* Move this entry to to start of the list. */
2846 if (prev) {
2847 prev->next = block->next;
2848 block->next = *prevp;
2849 *prevp = block;
2850 }
2851 return block->host + (addr - block->offset);
2852}
2853
2854/* Some of the softmmu routines need to translate from a host pointer
2855 (typically a TLB entry) back to a ram offset. */
2856ram_addr_t qemu_ram_addr_from_host(void *ptr)
2857{
2858 RAMBlock *prev;
2859 RAMBlock **prevp;
2860 RAMBlock *block;
2861 uint8_t *host = ptr;
2862
2863 prev = NULL;
2864 prevp = &ram_blocks;
2865 block = ram_blocks;
2866 while (block && (block->host > host
2867 || block->host + block->length <= host)) {
2868 if (prev)
2869 prevp = &prev->next;
2870 prev = block;
2871 block = block->next;
2872 }
2873 if (!block) {
2874 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2875 abort();
2876 }
2877 return block->offset + (host - block->host);
2878}
2879
2880#endif /* !VBOX */
2881
2882static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2883{
2884#ifdef DEBUG_UNASSIGNED
2885 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2886#endif
2887#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2888 do_unassigned_access(addr, 0, 0, 0, 1);
2889#endif
2890 return 0;
2891}
2892
2893static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2894{
2895#ifdef DEBUG_UNASSIGNED
2896 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2897#endif
2898#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2899 do_unassigned_access(addr, 0, 0, 0, 2);
2900#endif
2901 return 0;
2902}
2903
2904static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2905{
2906#ifdef DEBUG_UNASSIGNED
2907 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2908#endif
2909#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2910 do_unassigned_access(addr, 0, 0, 0, 4);
2911#endif
2912 return 0;
2913}
2914
2915static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2916{
2917#ifdef DEBUG_UNASSIGNED
2918 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2919#endif
2920#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2921 do_unassigned_access(addr, 1, 0, 0, 1);
2922#endif
2923}
2924
2925static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2926{
2927#ifdef DEBUG_UNASSIGNED
2928 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2929#endif
2930#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2931 do_unassigned_access(addr, 1, 0, 0, 2);
2932#endif
2933}
2934
2935static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2936{
2937#ifdef DEBUG_UNASSIGNED
2938 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2939#endif
2940#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2941 do_unassigned_access(addr, 1, 0, 0, 4);
2942#endif
2943}
2944
2945static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
2946 unassigned_mem_readb,
2947 unassigned_mem_readw,
2948 unassigned_mem_readl,
2949};
2950
2951static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
2952 unassigned_mem_writeb,
2953 unassigned_mem_writew,
2954 unassigned_mem_writel,
2955};
2956
2957static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2958 uint32_t val)
2959{
2960 int dirty_flags;
2961#ifdef VBOX
2962 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2963 dirty_flags = 0xff;
2964 else
2965#endif /* VBOX */
2966 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2967 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2968#if !defined(CONFIG_USER_ONLY)
2969 tb_invalidate_phys_page_fast(ram_addr, 1);
2970# ifdef VBOX
2971 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2972 dirty_flags = 0xff;
2973 else
2974# endif /* VBOX */
2975 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2976#endif
2977 }
2978#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2979 remR3PhysWriteU8(ram_addr, val);
2980#else
2981 stb_p(qemu_get_ram_ptr(ram_addr), val);
2982#endif
2983#ifdef CONFIG_KQEMU
2984 if (cpu_single_env->kqemu_enabled &&
2985 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2986 kqemu_modify_page(cpu_single_env, ram_addr);
2987#endif
2988 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2989#ifdef VBOX
2990 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2991#endif /* !VBOX */
2992 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2993 /* we remove the notdirty callback only if the code has been
2994 flushed */
2995 if (dirty_flags == 0xff)
2996 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2997}
2998
2999static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
3000 uint32_t val)
3001{
3002 int dirty_flags;
3003#ifdef VBOX
3004 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
3005 dirty_flags = 0xff;
3006 else
3007#endif /* VBOX */
3008 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
3009 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3010#if !defined(CONFIG_USER_ONLY)
3011 tb_invalidate_phys_page_fast(ram_addr, 2);
3012# ifdef VBOX
3013 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
3014 dirty_flags = 0xff;
3015 else
3016# endif /* VBOX */
3017 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
3018#endif
3019 }
3020#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
3021 remR3PhysWriteU16(ram_addr, val);
3022#else
3023 stw_p(qemu_get_ram_ptr(ram_addr), val);
3024#endif
3025 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3026#ifdef VBOX
3027 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3028#endif
3029 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
3030 /* we remove the notdirty callback only if the code has been
3031 flushed */
3032 if (dirty_flags == 0xff)
3033 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3034}
3035
3036static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
3037 uint32_t val)
3038{
3039 int dirty_flags;
3040#ifdef VBOX
3041 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
3042 dirty_flags = 0xff;
3043 else
3044#endif /* VBOX */
3045 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
3046 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3047#if !defined(CONFIG_USER_ONLY)
3048 tb_invalidate_phys_page_fast(ram_addr, 4);
3049# ifdef VBOX
3050 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
3051 dirty_flags = 0xff;
3052 else
3053# endif /* VBOX */
3054 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
3055#endif
3056 }
3057#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
3058 remR3PhysWriteU32(ram_addr, val);
3059#else
3060 stl_p(qemu_get_ram_ptr(ram_addr), val);
3061#endif
3062 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3063#ifdef VBOX
3064 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3065#endif
3066 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
3067 /* we remove the notdirty callback only if the code has been
3068 flushed */
3069 if (dirty_flags == 0xff)
3070 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3071}
3072
3073static CPUReadMemoryFunc * const error_mem_read[3] = {
3074 NULL, /* never used */
3075 NULL, /* never used */
3076 NULL, /* never used */
3077};
3078
3079static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
3080 notdirty_mem_writeb,
3081 notdirty_mem_writew,
3082 notdirty_mem_writel,
3083};
3084
3085/* Generate a debug exception if a watchpoint has been hit. */
3086static void check_watchpoint(int offset, int len_mask, int flags)
3087{
3088 CPUState *env = cpu_single_env;
3089 target_ulong pc, cs_base;
3090 TranslationBlock *tb;
3091 target_ulong vaddr;
3092 CPUWatchpoint *wp;
3093 int cpu_flags;
3094
3095 if (env->watchpoint_hit) {
3096 /* We re-entered the check after replacing the TB. Now raise
3097 * the debug interrupt so that is will trigger after the
3098 * current instruction. */
3099 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3100 return;
3101 }
3102 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3103 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3104 if ((vaddr == (wp->vaddr & len_mask) ||
3105 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3106 wp->flags |= BP_WATCHPOINT_HIT;
3107 if (!env->watchpoint_hit) {
3108 env->watchpoint_hit = wp;
3109 tb = tb_find_pc(env->mem_io_pc);
3110 if (!tb) {
3111 cpu_abort(env, "check_watchpoint: could not find TB for "
3112 "pc=%p", (void *)env->mem_io_pc);
3113 }
3114 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3115 tb_phys_invalidate(tb, -1);
3116 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3117 env->exception_index = EXCP_DEBUG;
3118 } else {
3119 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3120 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3121 }
3122 cpu_resume_from_signal(env, NULL);
3123 }
3124 } else {
3125 wp->flags &= ~BP_WATCHPOINT_HIT;
3126 }
3127 }
3128}
3129
3130/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3131 so these check for a hit then pass through to the normal out-of-line
3132 phys routines. */
3133static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3134{
3135 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
3136 return ldub_phys(addr);
3137}
3138
3139static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3140{
3141 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
3142 return lduw_phys(addr);
3143}
3144
3145static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3146{
3147 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
3148 return ldl_phys(addr);
3149}
3150
3151static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3152 uint32_t val)
3153{
3154 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3155 stb_phys(addr, val);
3156}
3157
3158static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3159 uint32_t val)
3160{
3161 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3162 stw_phys(addr, val);
3163}
3164
3165static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3166 uint32_t val)
3167{
3168 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3169 stl_phys(addr, val);
3170}
3171
3172static CPUReadMemoryFunc * const watch_mem_read[3] = {
3173 watch_mem_readb,
3174 watch_mem_readw,
3175 watch_mem_readl,
3176};
3177
3178static CPUWriteMemoryFunc * const watch_mem_write[3] = {
3179 watch_mem_writeb,
3180 watch_mem_writew,
3181 watch_mem_writel,
3182};
3183
3184static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
3185 unsigned int len)
3186{
3187 uint32_t ret;
3188 unsigned int idx;
3189
3190 idx = SUBPAGE_IDX(addr);
3191#if defined(DEBUG_SUBPAGE)
3192 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3193 mmio, len, addr, idx);
3194#endif
3195 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
3196 addr + mmio->region_offset[idx][0][len]);
3197
3198 return ret;
3199}
3200
3201static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3202 uint32_t value, unsigned int len)
3203{
3204 unsigned int idx;
3205
3206 idx = SUBPAGE_IDX(addr);
3207#if defined(DEBUG_SUBPAGE)
3208 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
3209 mmio, len, addr, idx, value);
3210#endif
3211 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
3212 addr + mmio->region_offset[idx][1][len],
3213 value);
3214}
3215
3216static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3217{
3218#if defined(DEBUG_SUBPAGE)
3219 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3220#endif
3221
3222 return subpage_readlen(opaque, addr, 0);
3223}
3224
3225static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3226 uint32_t value)
3227{
3228#if defined(DEBUG_SUBPAGE)
3229 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3230#endif
3231 subpage_writelen(opaque, addr, value, 0);
3232}
3233
3234static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3235{
3236#if defined(DEBUG_SUBPAGE)
3237 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3238#endif
3239
3240 return subpage_readlen(opaque, addr, 1);
3241}
3242
3243static void subpage_writew (void *opaque, target_phys_addr_t addr,
3244 uint32_t value)
3245{
3246#if defined(DEBUG_SUBPAGE)
3247 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3248#endif
3249 subpage_writelen(opaque, addr, value, 1);
3250}
3251
3252static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3253{
3254#if defined(DEBUG_SUBPAGE)
3255 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3256#endif
3257
3258 return subpage_readlen(opaque, addr, 2);
3259}
3260
3261static void subpage_writel (void *opaque,
3262 target_phys_addr_t addr, uint32_t value)
3263{
3264#if defined(DEBUG_SUBPAGE)
3265 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3266#endif
3267 subpage_writelen(opaque, addr, value, 2);
3268}
3269
3270static CPUReadMemoryFunc * const subpage_read[] = {
3271 &subpage_readb,
3272 &subpage_readw,
3273 &subpage_readl,
3274};
3275
3276static CPUWriteMemoryFunc * const subpage_write[] = {
3277 &subpage_writeb,
3278 &subpage_writew,
3279 &subpage_writel,
3280};
3281
3282static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3283 ram_addr_t memory, ram_addr_t region_offset)
3284{
3285 int idx, eidx;
3286 unsigned int i;
3287
3288 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3289 return -1;
3290 idx = SUBPAGE_IDX(start);
3291 eidx = SUBPAGE_IDX(end);
3292#if defined(DEBUG_SUBPAGE)
3293 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3294 mmio, start, end, idx, eidx, memory);
3295#endif
3296 memory >>= IO_MEM_SHIFT;
3297 for (; idx <= eidx; idx++) {
3298 for (i = 0; i < 4; i++) {
3299 if (io_mem_read[memory][i]) {
3300 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
3301 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
3302 mmio->region_offset[idx][0][i] = region_offset;
3303 }
3304 if (io_mem_write[memory][i]) {
3305 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
3306 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
3307 mmio->region_offset[idx][1][i] = region_offset;
3308 }
3309 }
3310 }
3311
3312 return 0;
3313}
3314
3315static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3316 ram_addr_t orig_memory, ram_addr_t region_offset)
3317{
3318 subpage_t *mmio;
3319 int subpage_memory;
3320
3321 mmio = qemu_mallocz(sizeof(subpage_t));
3322
3323 mmio->base = base;
3324 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
3325#if defined(DEBUG_SUBPAGE)
3326 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3327 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3328#endif
3329 *phys = subpage_memory | IO_MEM_SUBPAGE;
3330 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
3331 region_offset);
3332
3333 return mmio;
3334}
3335
3336static int get_free_io_mem_idx(void)
3337{
3338 int i;
3339
3340 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3341 if (!io_mem_used[i]) {
3342 io_mem_used[i] = 1;
3343 return i;
3344 }
3345
3346 return -1;
3347}
3348
3349/* mem_read and mem_write are arrays of functions containing the
3350 function to access byte (index 0), word (index 1) and dword (index
3351 2). Functions can be omitted with a NULL function pointer.
3352 If io_index is non zero, the corresponding io zone is
3353 modified. If it is zero, a new io zone is allocated. The return
3354 value can be used with cpu_register_physical_memory(). (-1) is
3355 returned if error. */
3356static int cpu_register_io_memory_fixed(int io_index,
3357 CPUReadMemoryFunc * const *mem_read,
3358 CPUWriteMemoryFunc * const *mem_write,
3359 void *opaque)
3360{
3361 int i, subwidth = 0;
3362
3363 if (io_index <= 0) {
3364 io_index = get_free_io_mem_idx();
3365 if (io_index == -1)
3366 return io_index;
3367 } else {
3368 io_index >>= IO_MEM_SHIFT;
3369 if (io_index >= IO_MEM_NB_ENTRIES)
3370 return -1;
3371 }
3372
3373 for(i = 0;i < 3; i++) {
3374 if (!mem_read[i] || !mem_write[i])
3375 subwidth = IO_MEM_SUBWIDTH;
3376 io_mem_read[io_index][i] = mem_read[i];
3377 io_mem_write[io_index][i] = mem_write[i];
3378 }
3379 io_mem_opaque[io_index] = opaque;
3380 return (io_index << IO_MEM_SHIFT) | subwidth;
3381}
3382
3383int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3384 CPUWriteMemoryFunc * const *mem_write,
3385 void *opaque)
3386{
3387 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3388}
3389
3390void cpu_unregister_io_memory(int io_table_address)
3391{
3392 int i;
3393 int io_index = io_table_address >> IO_MEM_SHIFT;
3394
3395 for (i=0;i < 3; i++) {
3396 io_mem_read[io_index][i] = unassigned_mem_read[i];
3397 io_mem_write[io_index][i] = unassigned_mem_write[i];
3398 }
3399 io_mem_opaque[io_index] = NULL;
3400 io_mem_used[io_index] = 0;
3401}
3402
3403static void io_mem_init(void)
3404{
3405 int i;
3406
3407 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3408 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3409 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3410 for (i=0; i<5; i++)
3411 io_mem_used[i] = 1;
3412
3413 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3414 watch_mem_write, NULL);
3415}
3416
3417#endif /* !defined(CONFIG_USER_ONLY) */
3418
3419/* physical memory access (slow version, mainly for debug) */
3420#if defined(CONFIG_USER_ONLY)
3421void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3422 int len, int is_write)
3423{
3424 int l, flags;
3425 target_ulong page;
3426 void * p;
3427
3428 while (len > 0) {
3429 page = addr & TARGET_PAGE_MASK;
3430 l = (page + TARGET_PAGE_SIZE) - addr;
3431 if (l > len)
3432 l = len;
3433 flags = page_get_flags(page);
3434 if (!(flags & PAGE_VALID))
3435 return;
3436 if (is_write) {
3437 if (!(flags & PAGE_WRITE))
3438 return;
3439 /* XXX: this code should not depend on lock_user */
3440 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3441 /* FIXME - should this return an error rather than just fail? */
3442 return;
3443 memcpy(p, buf, l);
3444 unlock_user(p, addr, l);
3445 } else {
3446 if (!(flags & PAGE_READ))
3447 return;
3448 /* XXX: this code should not depend on lock_user */
3449 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3450 /* FIXME - should this return an error rather than just fail? */
3451 return;
3452 memcpy(buf, p, l);
3453 unlock_user(p, addr, 0);
3454 }
3455 len -= l;
3456 buf += l;
3457 addr += l;
3458 }
3459}
3460
3461#else
3462void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3463 int len, int is_write)
3464{
3465 int l, io_index;
3466 uint8_t *ptr;
3467 uint32_t val;
3468 target_phys_addr_t page;
3469 unsigned long pd;
3470 PhysPageDesc *p;
3471
3472 while (len > 0) {
3473 page = addr & TARGET_PAGE_MASK;
3474 l = (page + TARGET_PAGE_SIZE) - addr;
3475 if (l > len)
3476 l = len;
3477 p = phys_page_find(page >> TARGET_PAGE_BITS);
3478 if (!p) {
3479 pd = IO_MEM_UNASSIGNED;
3480 } else {
3481 pd = p->phys_offset;
3482 }
3483
3484 if (is_write) {
3485 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3486 target_phys_addr_t addr1 = addr;
3487 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3488 if (p)
3489 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3490 /* XXX: could force cpu_single_env to NULL to avoid
3491 potential bugs */
3492 if (l >= 4 && ((addr1 & 3) == 0)) {
3493 /* 32 bit write access */
3494#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3495 val = ldl_p(buf);
3496#else
3497 val = *(const uint32_t *)buf;
3498#endif
3499 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3500 l = 4;
3501 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3502 /* 16 bit write access */
3503#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3504 val = lduw_p(buf);
3505#else
3506 val = *(const uint16_t *)buf;
3507#endif
3508 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3509 l = 2;
3510 } else {
3511 /* 8 bit write access */
3512#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3513 val = ldub_p(buf);
3514#else
3515 val = *(const uint8_t *)buf;
3516#endif
3517 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3518 l = 1;
3519 }
3520 } else {
3521 unsigned long addr1;
3522 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3523 /* RAM case */
3524#ifdef VBOX
3525 remR3PhysWrite(addr1, buf, l); NOREF(ptr);
3526#else
3527 ptr = qemu_get_ram_ptr(addr1);
3528 memcpy(ptr, buf, l);
3529#endif
3530 if (!cpu_physical_memory_is_dirty(addr1)) {
3531 /* invalidate code */
3532 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3533 /* set dirty bit */
3534#ifdef VBOX
3535 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3536#endif
3537 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3538 (0xff & ~CODE_DIRTY_FLAG);
3539 }
3540 }
3541 } else {
3542 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3543 !(pd & IO_MEM_ROMD)) {
3544 target_phys_addr_t addr1 = addr;
3545 /* I/O case */
3546 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3547 if (p)
3548 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3549 if (l >= 4 && ((addr1 & 3) == 0)) {
3550 /* 32 bit read access */
3551 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3552#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3553 stl_p(buf, val);
3554#else
3555 *(uint32_t *)buf = val;
3556#endif
3557 l = 4;
3558 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3559 /* 16 bit read access */
3560 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3561#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3562 stw_p(buf, val);
3563#else
3564 *(uint16_t *)buf = val;
3565#endif
3566 l = 2;
3567 } else {
3568 /* 8 bit read access */
3569 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3570#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3571 stb_p(buf, val);
3572#else
3573 *(uint8_t *)buf = val;
3574#endif
3575 l = 1;
3576 }
3577 } else {
3578 /* RAM case */
3579#ifdef VBOX
3580 remR3PhysRead((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), buf, l); NOREF(ptr);
3581#else
3582 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3583 (addr & ~TARGET_PAGE_MASK);
3584 memcpy(buf, ptr, l);
3585#endif
3586 }
3587 }
3588 len -= l;
3589 buf += l;
3590 addr += l;
3591 }
3592}
3593
3594#ifndef VBOX
3595
3596/* used for ROM loading : can write in RAM and ROM */
3597void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3598 const uint8_t *buf, int len)
3599{
3600 int l;
3601 uint8_t *ptr;
3602 target_phys_addr_t page;
3603 unsigned long pd;
3604 PhysPageDesc *p;
3605
3606 while (len > 0) {
3607 page = addr & TARGET_PAGE_MASK;
3608 l = (page + TARGET_PAGE_SIZE) - addr;
3609 if (l > len)
3610 l = len;
3611 p = phys_page_find(page >> TARGET_PAGE_BITS);
3612 if (!p) {
3613 pd = IO_MEM_UNASSIGNED;
3614 } else {
3615 pd = p->phys_offset;
3616 }
3617
3618 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3619 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3620 !(pd & IO_MEM_ROMD)) {
3621 /* do nothing */
3622 } else {
3623 unsigned long addr1;
3624 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3625 /* ROM/RAM case */
3626 ptr = qemu_get_ram_ptr(addr1);
3627 memcpy(ptr, buf, l);
3628 }
3629 len -= l;
3630 buf += l;
3631 addr += l;
3632 }
3633}
3634
3635typedef struct {
3636 void *buffer;
3637 target_phys_addr_t addr;
3638 target_phys_addr_t len;
3639} BounceBuffer;
3640
3641static BounceBuffer bounce;
3642
3643typedef struct MapClient {
3644 void *opaque;
3645 void (*callback)(void *opaque);
3646 QLIST_ENTRY(MapClient) link;
3647} MapClient;
3648
3649static QLIST_HEAD(map_client_list, MapClient) map_client_list
3650 = QLIST_HEAD_INITIALIZER(map_client_list);
3651
3652void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3653{
3654 MapClient *client = qemu_malloc(sizeof(*client));
3655
3656 client->opaque = opaque;
3657 client->callback = callback;
3658 QLIST_INSERT_HEAD(&map_client_list, client, link);
3659 return client;
3660}
3661
3662void cpu_unregister_map_client(void *_client)
3663{
3664 MapClient *client = (MapClient *)_client;
3665
3666 QLIST_REMOVE(client, link);
3667 qemu_free(client);
3668}
3669
3670static void cpu_notify_map_clients(void)
3671{
3672 MapClient *client;
3673
3674 while (!QLIST_EMPTY(&map_client_list)) {
3675 client = QLIST_FIRST(&map_client_list);
3676 client->callback(client->opaque);
3677 cpu_unregister_map_client(client);
3678 }
3679}
3680
3681/* Map a physical memory region into a host virtual address.
3682 * May map a subset of the requested range, given by and returned in *plen.
3683 * May return NULL if resources needed to perform the mapping are exhausted.
3684 * Use only for reads OR writes - not for read-modify-write operations.
3685 * Use cpu_register_map_client() to know when retrying the map operation is
3686 * likely to succeed.
3687 */
3688void *cpu_physical_memory_map(target_phys_addr_t addr,
3689 target_phys_addr_t *plen,
3690 int is_write)
3691{
3692 target_phys_addr_t len = *plen;
3693 target_phys_addr_t done = 0;
3694 int l;
3695 uint8_t *ret = NULL;
3696 uint8_t *ptr;
3697 target_phys_addr_t page;
3698 unsigned long pd;
3699 PhysPageDesc *p;
3700 unsigned long addr1;
3701
3702 while (len > 0) {
3703 page = addr & TARGET_PAGE_MASK;
3704 l = (page + TARGET_PAGE_SIZE) - addr;
3705 if (l > len)
3706 l = len;
3707 p = phys_page_find(page >> TARGET_PAGE_BITS);
3708 if (!p) {
3709 pd = IO_MEM_UNASSIGNED;
3710 } else {
3711 pd = p->phys_offset;
3712 }
3713
3714 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3715 if (done || bounce.buffer) {
3716 break;
3717 }
3718 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3719 bounce.addr = addr;
3720 bounce.len = l;
3721 if (!is_write) {
3722 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3723 }
3724 ptr = bounce.buffer;
3725 } else {
3726 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3727 ptr = qemu_get_ram_ptr(addr1);
3728 }
3729 if (!done) {
3730 ret = ptr;
3731 } else if (ret + done != ptr) {
3732 break;
3733 }
3734
3735 len -= l;
3736 addr += l;
3737 done += l;
3738 }
3739 *plen = done;
3740 return ret;
3741}
3742
3743/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3744 * Will also mark the memory as dirty if is_write == 1. access_len gives
3745 * the amount of memory that was actually read or written by the caller.
3746 */
3747void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3748 int is_write, target_phys_addr_t access_len)
3749{
3750 if (buffer != bounce.buffer) {
3751 if (is_write) {
3752 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3753 while (access_len) {
3754 unsigned l;
3755 l = TARGET_PAGE_SIZE;
3756 if (l > access_len)
3757 l = access_len;
3758 if (!cpu_physical_memory_is_dirty(addr1)) {
3759 /* invalidate code */
3760 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3761 /* set dirty bit */
3762 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3763 (0xff & ~CODE_DIRTY_FLAG);
3764 }
3765 addr1 += l;
3766 access_len -= l;
3767 }
3768 }
3769 return;
3770 }
3771 if (is_write) {
3772 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3773 }
3774 qemu_vfree(bounce.buffer);
3775 bounce.buffer = NULL;
3776 cpu_notify_map_clients();
3777}
3778
3779#endif /* !VBOX */
3780
3781/* warning: addr must be aligned */
3782uint32_t ldl_phys(target_phys_addr_t addr)
3783{
3784 int io_index;
3785 uint8_t *ptr;
3786 uint32_t val;
3787 unsigned long pd;
3788 PhysPageDesc *p;
3789
3790 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3791 if (!p) {
3792 pd = IO_MEM_UNASSIGNED;
3793 } else {
3794 pd = p->phys_offset;
3795 }
3796
3797 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3798 !(pd & IO_MEM_ROMD)) {
3799 /* I/O case */
3800 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3801 if (p)
3802 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3803 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3804 } else {
3805 /* RAM case */
3806#ifndef VBOX
3807 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3808 (addr & ~TARGET_PAGE_MASK);
3809 val = ldl_p(ptr);
3810#else
3811 val = remR3PhysReadU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
3812#endif
3813 }
3814 return val;
3815}
3816
3817/* warning: addr must be aligned */
3818uint64_t ldq_phys(target_phys_addr_t addr)
3819{
3820 int io_index;
3821 uint8_t *ptr;
3822 uint64_t val;
3823 unsigned long pd;
3824 PhysPageDesc *p;
3825
3826 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3827 if (!p) {
3828 pd = IO_MEM_UNASSIGNED;
3829 } else {
3830 pd = p->phys_offset;
3831 }
3832
3833 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3834 !(pd & IO_MEM_ROMD)) {
3835 /* I/O case */
3836 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3837 if (p)
3838 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3839#ifdef TARGET_WORDS_BIGENDIAN
3840 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3841 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3842#else
3843 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3844 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3845#endif
3846 } else {
3847 /* RAM case */
3848#ifndef VBOX
3849 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3850 (addr & ~TARGET_PAGE_MASK);
3851 val = ldq_p(ptr);
3852#else
3853 val = remR3PhysReadU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
3854#endif
3855 }
3856 return val;
3857}
3858
3859/* XXX: optimize */
3860uint32_t ldub_phys(target_phys_addr_t addr)
3861{
3862 uint8_t val;
3863 cpu_physical_memory_read(addr, &val, 1);
3864 return val;
3865}
3866
3867/* XXX: optimize */
3868uint32_t lduw_phys(target_phys_addr_t addr)
3869{
3870 uint16_t val;
3871 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3872 return tswap16(val);
3873}
3874
3875/* warning: addr must be aligned. The ram page is not masked as dirty
3876 and the code inside is not invalidated. It is useful if the dirty
3877 bits are used to track modified PTEs */
3878void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3879{
3880 int io_index;
3881 uint8_t *ptr;
3882 unsigned long pd;
3883 PhysPageDesc *p;
3884
3885 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3886 if (!p) {
3887 pd = IO_MEM_UNASSIGNED;
3888 } else {
3889 pd = p->phys_offset;
3890 }
3891
3892 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3893 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3894 if (p)
3895 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3896 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3897 } else {
3898#ifndef VBOX
3899 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3900 ptr = qemu_get_ram_ptr(addr1);
3901 stl_p(ptr, val);
3902#else
3903 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3904#endif
3905
3906#ifndef VBOX
3907 if (unlikely(in_migration)) {
3908 if (!cpu_physical_memory_is_dirty(addr1)) {
3909 /* invalidate code */
3910 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3911 /* set dirty bit */
3912 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3913 (0xff & ~CODE_DIRTY_FLAG);
3914 }
3915 }
3916#endif /* !VBOX */
3917 }
3918}
3919
3920void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3921{
3922 int io_index;
3923 uint8_t *ptr;
3924 unsigned long pd;
3925 PhysPageDesc *p;
3926
3927 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3928 if (!p) {
3929 pd = IO_MEM_UNASSIGNED;
3930 } else {
3931 pd = p->phys_offset;
3932 }
3933
3934 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3935 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3936 if (p)
3937 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3938#ifdef TARGET_WORDS_BIGENDIAN
3939 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3940 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3941#else
3942 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3943 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3944#endif
3945 } else {
3946#ifndef VBOX
3947 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3948 (addr & ~TARGET_PAGE_MASK);
3949 stq_p(ptr, val);
3950#else
3951 remR3PhysWriteU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3952#endif
3953 }
3954}
3955
3956/* warning: addr must be aligned */
3957void stl_phys(target_phys_addr_t addr, uint32_t val)
3958{
3959 int io_index;
3960 uint8_t *ptr;
3961 unsigned long pd;
3962 PhysPageDesc *p;
3963
3964 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3965 if (!p) {
3966 pd = IO_MEM_UNASSIGNED;
3967 } else {
3968 pd = p->phys_offset;
3969 }
3970
3971 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3972 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3973 if (p)
3974 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3975 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3976 } else {
3977 unsigned long addr1;
3978 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3979 /* RAM case */
3980#ifndef VBOX
3981 ptr = qemu_get_ram_ptr(addr1);
3982 stl_p(ptr, val);
3983#else
3984 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3985#endif
3986 if (!cpu_physical_memory_is_dirty(addr1)) {
3987 /* invalidate code */
3988 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3989 /* set dirty bit */
3990#ifdef VBOX
3991 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3992#endif
3993 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3994 (0xff & ~CODE_DIRTY_FLAG);
3995 }
3996 }
3997}
3998
3999/* XXX: optimize */
4000void stb_phys(target_phys_addr_t addr, uint32_t val)
4001{
4002 uint8_t v = val;
4003 cpu_physical_memory_write(addr, &v, 1);
4004}
4005
4006/* XXX: optimize */
4007void stw_phys(target_phys_addr_t addr, uint32_t val)
4008{
4009 uint16_t v = tswap16(val);
4010 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
4011}
4012
4013/* XXX: optimize */
4014void stq_phys(target_phys_addr_t addr, uint64_t val)
4015{
4016 val = tswap64(val);
4017 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
4018}
4019
4020#endif
4021
4022#ifndef VBOX
4023/* virtual memory access for debug (includes writing to ROM) */
4024int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
4025 uint8_t *buf, int len, int is_write)
4026{
4027 int l;
4028 target_phys_addr_t phys_addr;
4029 target_ulong page;
4030
4031 while (len > 0) {
4032 page = addr & TARGET_PAGE_MASK;
4033 phys_addr = cpu_get_phys_page_debug(env, page);
4034 /* if no physical page mapped, return an error */
4035 if (phys_addr == -1)
4036 return -1;
4037 l = (page + TARGET_PAGE_SIZE) - addr;
4038 if (l > len)
4039 l = len;
4040 phys_addr += (addr & ~TARGET_PAGE_MASK);
4041#if !defined(CONFIG_USER_ONLY)
4042 if (is_write)
4043 cpu_physical_memory_write_rom(phys_addr, buf, l);
4044 else
4045#endif
4046 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4047 len -= l;
4048 buf += l;
4049 addr += l;
4050 }
4051 return 0;
4052}
4053#endif /* !VBOX */
4054
4055/* in deterministic execution mode, instructions doing device I/Os
4056 must be at the end of the TB */
4057void cpu_io_recompile(CPUState *env, void *retaddr)
4058{
4059 TranslationBlock *tb;
4060 uint32_t n, cflags;
4061 target_ulong pc, cs_base;
4062 uint64_t flags;
4063
4064 tb = tb_find_pc((unsigned long)retaddr);
4065 if (!tb) {
4066 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4067 retaddr);
4068 }
4069 n = env->icount_decr.u16.low + tb->icount;
4070 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
4071 /* Calculate how many instructions had been executed before the fault
4072 occurred. */
4073 n = n - env->icount_decr.u16.low;
4074 /* Generate a new TB ending on the I/O insn. */
4075 n++;
4076 /* On MIPS and SH, delay slot instructions can only be restarted if
4077 they were already the first instruction in the TB. If this is not
4078 the first instruction in a TB then re-execute the preceding
4079 branch. */
4080#if defined(TARGET_MIPS)
4081 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4082 env->active_tc.PC -= 4;
4083 env->icount_decr.u16.low++;
4084 env->hflags &= ~MIPS_HFLAG_BMASK;
4085 }
4086#elif defined(TARGET_SH4)
4087 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4088 && n > 1) {
4089 env->pc -= 2;
4090 env->icount_decr.u16.low++;
4091 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4092 }
4093#endif
4094 /* This should never happen. */
4095 if (n > CF_COUNT_MASK)
4096 cpu_abort(env, "TB too big during recompile");
4097
4098 cflags = n | CF_LAST_IO;
4099 pc = tb->pc;
4100 cs_base = tb->cs_base;
4101 flags = tb->flags;
4102 tb_phys_invalidate(tb, -1);
4103 /* FIXME: In theory this could raise an exception. In practice
4104 we have already translated the block once so it's probably ok. */
4105 tb_gen_code(env, pc, cs_base, flags, cflags);
4106 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4107 the first in the TB) then we end up generating a whole new TB and
4108 repeating the fault, which is horribly inefficient.
4109 Better would be to execute just this insn uncached, or generate a
4110 second new TB. */
4111 cpu_resume_from_signal(env, NULL);
4112}
4113
4114#ifndef VBOX
4115void dump_exec_info(FILE *f,
4116 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
4117{
4118 int i, target_code_size, max_target_code_size;
4119 int direct_jmp_count, direct_jmp2_count, cross_page;
4120 TranslationBlock *tb;
4121
4122 target_code_size = 0;
4123 max_target_code_size = 0;
4124 cross_page = 0;
4125 direct_jmp_count = 0;
4126 direct_jmp2_count = 0;
4127 for(i = 0; i < nb_tbs; i++) {
4128 tb = &tbs[i];
4129 target_code_size += tb->size;
4130 if (tb->size > max_target_code_size)
4131 max_target_code_size = tb->size;
4132 if (tb->page_addr[1] != -1)
4133 cross_page++;
4134 if (tb->tb_next_offset[0] != 0xffff) {
4135 direct_jmp_count++;
4136 if (tb->tb_next_offset[1] != 0xffff) {
4137 direct_jmp2_count++;
4138 }
4139 }
4140 }
4141 /* XXX: avoid using doubles ? */
4142 cpu_fprintf(f, "Translation buffer state:\n");
4143 cpu_fprintf(f, "gen code size %ld/%ld\n",
4144 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4145 cpu_fprintf(f, "TB count %d/%d\n",
4146 nb_tbs, code_gen_max_blocks);
4147 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
4148 nb_tbs ? target_code_size / nb_tbs : 0,
4149 max_target_code_size);
4150 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
4151 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4152 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4153 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4154 cross_page,
4155 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4156 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4157 direct_jmp_count,
4158 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4159 direct_jmp2_count,
4160 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4161 cpu_fprintf(f, "\nStatistics:\n");
4162 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4163 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4164 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
4165 tcg_dump_info(f, cpu_fprintf);
4166}
4167#endif /* !VBOX */
4168
4169#if !defined(CONFIG_USER_ONLY)
4170
4171#define MMUSUFFIX _cmmu
4172#define GETPC() NULL
4173#define env cpu_single_env
4174#define SOFTMMU_CODE_ACCESS
4175
4176#define SHIFT 0
4177#include "softmmu_template.h"
4178
4179#define SHIFT 1
4180#include "softmmu_template.h"
4181
4182#define SHIFT 2
4183#include "softmmu_template.h"
4184
4185#define SHIFT 3
4186#include "softmmu_template.h"
4187
4188#undef env
4189
4190#endif
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette