VirtualBox

source: vbox/trunk/src/recompiler_new/exec.c@ 14437

最後變更 在這個檔案從14437是 14346,由 vboxsync 提交於 16 年 前

Implemented check for monitored page accesses, fixing TSS out of sync problem with VA in TLB. Enabled VA in TLB by default in new REM>

  • 屬性 svn:eol-style 設為 native
檔案大小: 110.6 KB
 
1/*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#include "config.h"
30#ifndef VBOX
31#ifdef _WIN32
32#include <windows.h>
33#else
34#include <sys/types.h>
35#include <sys/mman.h>
36#endif
37#include <stdlib.h>
38#include <stdio.h>
39#include <stdarg.h>
40#include <string.h>
41#include <errno.h>
42#include <unistd.h>
43#include <inttypes.h>
44#else /* VBOX */
45# include <stdlib.h>
46# include <stdio.h>
47# include <iprt/alloc.h>
48# include <iprt/string.h>
49# include <iprt/param.h>
50# include <VBox/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
51#endif /* VBOX */
52
53#include "cpu.h"
54#include "exec-all.h"
55#if defined(CONFIG_USER_ONLY)
56#include <qemu.h>
57#endif
58
59//#define DEBUG_TB_INVALIDATE
60//#define DEBUG_FLUSH
61//#define DEBUG_TLB
62//#define DEBUG_UNASSIGNED
63
64/* make various TB consistency checks */
65//#define DEBUG_TB_CHECK
66//#define DEBUG_TLB_CHECK
67
68#if !defined(CONFIG_USER_ONLY)
69/* TB consistency checks only implemented for usermode emulation. */
70#undef DEBUG_TB_CHECK
71#endif
72
73#define SMC_BITMAP_USE_THRESHOLD 10
74
75#define MMAP_AREA_START 0x00000000
76#define MMAP_AREA_END 0xa8000000
77
78#if defined(TARGET_SPARC64)
79#define TARGET_PHYS_ADDR_SPACE_BITS 41
80#elif defined(TARGET_SPARC)
81#define TARGET_PHYS_ADDR_SPACE_BITS 36
82#elif defined(TARGET_ALPHA)
83#define TARGET_PHYS_ADDR_SPACE_BITS 42
84#define TARGET_VIRT_ADDR_SPACE_BITS 42
85#elif defined(TARGET_PPC64)
86#define TARGET_PHYS_ADDR_SPACE_BITS 42
87#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
88#define TARGET_PHYS_ADDR_SPACE_BITS 42
89#elif defined(TARGET_I386) && !defined(USE_KQEMU)
90#define TARGET_PHYS_ADDR_SPACE_BITS 36
91#else
92/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
93#define TARGET_PHYS_ADDR_SPACE_BITS 32
94#endif
95
96static TranslationBlock *tbs;
97int code_gen_max_blocks;
98TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
99static int nb_tbs;
100/* any access to the tbs or the page table must use this lock */
101spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
102
103#ifndef VBOX
104#if defined(__arm__) || defined(__sparc_v9__)
105/* The prologue must be reachable with a direct jump. ARM and Sparc64
106 have limited branch ranges (possibly also PPC) so place it in a
107 section close to code segment. */
108#define code_gen_section \
109 __attribute__((__section__(".gen_code"))) \
110 __attribute__((aligned (32)))
111#else
112#define code_gen_section \
113 __attribute__((aligned (32)))
114#endif
115uint8_t code_gen_prologue[1024] code_gen_section;
116
117#else /* VBOX */
118extern uint8_t* code_gen_prologue;
119#endif /* VBOX */
120
121static uint8_t *code_gen_buffer;
122static unsigned long code_gen_buffer_size;
123/* threshold to flush the translated code buffer */
124static unsigned long code_gen_buffer_max_size;
125uint8_t *code_gen_ptr;
126
127#ifndef VBOX
128#if !defined(CONFIG_USER_ONLY)
129ram_addr_t phys_ram_size;
130int phys_ram_fd;
131uint8_t *phys_ram_base;
132uint8_t *phys_ram_dirty;
133static int in_migration;
134static ram_addr_t phys_ram_alloc_offset = 0;
135#endif
136#else /* VBOX */
137RTGCPHYS phys_ram_size;
138/* we have memory ranges (the high PC-BIOS mapping) which
139 causes some pages to fall outside the dirty map here. */
140uint32_t phys_ram_dirty_size;
141#endif /* VBOX */
142#if !defined(VBOX)
143uint8_t *phys_ram_base;
144#endif
145uint8_t *phys_ram_dirty;
146
147CPUState *first_cpu;
148/* current CPU in the current thread. It is only valid inside
149 cpu_exec() */
150CPUState *cpu_single_env;
151/* 0 = Do not count executed instructions.
152 1 = Precise instruction counting.
153 2 = Adaptive rate instruction counting. */
154int use_icount = 0;
155/* Current instruction counter. While executing translated code this may
156 include some instructions that have not yet been executed. */
157int64_t qemu_icount;
158
159typedef struct PageDesc {
160 /* list of TBs intersecting this ram page */
161 TranslationBlock *first_tb;
162 /* in order to optimize self modifying code, we count the number
163 of lookups we do to a given page to use a bitmap */
164 unsigned int code_write_count;
165 uint8_t *code_bitmap;
166#if defined(CONFIG_USER_ONLY)
167 unsigned long flags;
168#endif
169} PageDesc;
170
171typedef struct PhysPageDesc {
172 /* offset in host memory of the page + io_index in the low 12 bits */
173 ram_addr_t phys_offset;
174} PhysPageDesc;
175
176#define L2_BITS 10
177#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
178/* XXX: this is a temporary hack for alpha target.
179 * In the future, this is to be replaced by a multi-level table
180 * to actually be able to handle the complete 64 bits address space.
181 */
182#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
183#else
184#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
185#endif
186
187#define L1_SIZE (1 << L1_BITS)
188#define L2_SIZE (1 << L2_BITS)
189
190static void io_mem_init(void);
191
192unsigned long qemu_real_host_page_size;
193unsigned long qemu_host_page_bits;
194unsigned long qemu_host_page_size;
195unsigned long qemu_host_page_mask;
196
197/* XXX: for system emulation, it could just be an array */
198static PageDesc *l1_map[L1_SIZE];
199static PhysPageDesc **l1_phys_map;
200
201#if !defined(CONFIG_USER_ONLY)
202static void io_mem_init(void);
203
204/* io memory support */
205CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
206CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
207void *io_mem_opaque[IO_MEM_NB_ENTRIES];
208static int io_mem_nb;
209static int io_mem_watch;
210#endif
211
212#ifndef VBOX
213/* log support */
214static const char *logfilename = "/tmp/qemu.log";
215#endif /* !VBOX */
216FILE *logfile;
217int loglevel;
218#ifndef VBOX
219static int log_append = 0;
220#endif
221
222/* statistics */
223static int tlb_flush_count;
224static int tb_flush_count;
225#ifndef VBOX
226static int tb_phys_invalidate_count;
227#endif /* !VBOX */
228
229#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
230typedef struct subpage_t {
231 target_phys_addr_t base;
232 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
233 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
234 void *opaque[TARGET_PAGE_SIZE][2][4];
235} subpage_t;
236
237
238#ifndef VBOX
239#ifdef _WIN32
240static void map_exec(void *addr, long size)
241{
242 DWORD old_protect;
243 VirtualProtect(addr, size,
244 PAGE_EXECUTE_READWRITE, &old_protect);
245
246}
247#else
248static void map_exec(void *addr, long size)
249{
250 unsigned long start, end, page_size;
251
252 page_size = getpagesize();
253 start = (unsigned long)addr;
254 start &= ~(page_size - 1);
255
256 end = (unsigned long)addr + size;
257 end += page_size - 1;
258 end &= ~(page_size - 1);
259
260 mprotect((void *)start, end - start,
261 PROT_READ | PROT_WRITE | PROT_EXEC);
262}
263#endif
264#else // VBOX
265static void map_exec(void *addr, long size)
266{
267 RTMemProtect(addr, size,
268 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
269}
270#endif
271
272static void page_init(void)
273{
274 /* NOTE: we can always suppose that qemu_host_page_size >=
275 TARGET_PAGE_SIZE */
276#ifdef VBOX
277 RTMemProtect(code_gen_buffer, sizeof(code_gen_buffer),
278 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
279 qemu_real_host_page_size = PAGE_SIZE;
280#else /* !VBOX */
281#ifdef _WIN32
282 {
283 SYSTEM_INFO system_info;
284 DWORD old_protect;
285
286 GetSystemInfo(&system_info);
287 qemu_real_host_page_size = system_info.dwPageSize;
288 }
289#else
290 qemu_real_host_page_size = getpagesize();
291#endif
292#endif /* !VBOX */
293
294 if (qemu_host_page_size == 0)
295 qemu_host_page_size = qemu_real_host_page_size;
296 if (qemu_host_page_size < TARGET_PAGE_SIZE)
297 qemu_host_page_size = TARGET_PAGE_SIZE;
298 qemu_host_page_bits = 0;
299#ifndef VBOX
300 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
301#else
302 while ((1 << qemu_host_page_bits) < (int)qemu_host_page_size)
303#endif
304 qemu_host_page_bits++;
305 qemu_host_page_mask = ~(qemu_host_page_size - 1);
306 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
307 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
308#ifdef VBOX
309 /* We use other means to set reserved bit on our pages */
310#else
311#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
312 {
313 long long startaddr, endaddr;
314 FILE *f;
315 int n;
316
317 mmap_lock();
318 last_brk = (unsigned long)sbrk(0);
319 f = fopen("/proc/self/maps", "r");
320 if (f) {
321 do {
322 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
323 if (n == 2) {
324 startaddr = MIN(startaddr,
325 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
326 endaddr = MIN(endaddr,
327 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
328 page_set_flags(startaddr & TARGET_PAGE_MASK,
329 TARGET_PAGE_ALIGN(endaddr),
330 PAGE_RESERVED);
331 }
332 } while (!feof(f));
333 fclose(f);
334 }
335 mmap_unlock();
336 }
337#endif
338#endif
339}
340
341#ifndef VBOX
342static inline PageDesc **page_l1_map(target_ulong index)
343#else
344DECLINLINE(PageDesc **) page_l1_map(target_ulong index)
345#endif
346{
347#if TARGET_LONG_BITS > 32
348 /* Host memory outside guest VM. For 32-bit targets we have already
349 excluded high addresses. */
350 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
351 return NULL;
352#endif
353 return &l1_map[index >> L2_BITS];
354}
355
356#ifndef VBOX
357static inline PageDesc *page_find_alloc(target_ulong index)
358#else
359DECLINLINE(PageDesc *) page_find_alloc(target_ulong index)
360#endif
361{
362 PageDesc **lp, *p;
363 lp = page_l1_map(index);
364 if (!lp)
365 return NULL;
366
367 p = *lp;
368 if (!p) {
369 /* allocate if not found */
370#if defined(CONFIG_USER_ONLY)
371 unsigned long addr;
372 size_t len = sizeof(PageDesc) * L2_SIZE;
373 /* Don't use qemu_malloc because it may recurse. */
374 p = mmap(0, len, PROT_READ | PROT_WRITE,
375 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
376 *lp = p;
377 addr = h2g(p);
378 if (addr == (target_ulong)addr) {
379 page_set_flags(addr & TARGET_PAGE_MASK,
380 TARGET_PAGE_ALIGN(addr + len),
381 PAGE_RESERVED);
382 }
383#else
384 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
385 *lp = p;
386#endif
387 }
388 return p + (index & (L2_SIZE - 1));
389}
390
391#ifndef VBOX
392static inline PageDesc *page_find(target_ulong index)
393#else
394DECLINLINE(PageDesc *) page_find(target_ulong index)
395#endif
396{
397 PageDesc **lp, *p;
398 lp = page_l1_map(index);
399 if (!lp)
400 return NULL;
401
402 p = *lp;
403 if (!p)
404 return 0;
405 return p + (index & (L2_SIZE - 1));
406}
407
408static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
409{
410 void **lp, **p;
411 PhysPageDesc *pd;
412
413 p = (void **)l1_phys_map;
414#if TARGET_PHYS_ADDR_SPACE_BITS > 32
415
416#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
417#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
418#endif
419 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
420 p = *lp;
421 if (!p) {
422 /* allocate if not found */
423 if (!alloc)
424 return NULL;
425 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
426 memset(p, 0, sizeof(void *) * L1_SIZE);
427 *lp = p;
428 }
429#endif
430 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
431 pd = *lp;
432 if (!pd) {
433 int i;
434 /* allocate if not found */
435 if (!alloc)
436 return NULL;
437 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
438 *lp = pd;
439 for (i = 0; i < L2_SIZE; i++)
440 pd[i].phys_offset = IO_MEM_UNASSIGNED;
441 }
442#if defined(VBOX) && !defined(VBOX_WITH_NEW_PHYS_CODE)
443 pd = ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
444 if (RT_UNLIKELY((pd->phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING))
445 remR3GrowDynRange(pd->phys_offset & TARGET_PAGE_MASK);
446 return pd;
447#else
448 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
449#endif
450}
451
452#ifndef VBOX
453static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
454#else
455DECLINLINE(PhysPageDesc *) phys_page_find(target_phys_addr_t index)
456#endif
457{
458 return phys_page_find_alloc(index, 0);
459}
460
461#if !defined(CONFIG_USER_ONLY)
462static void tlb_protect_code(ram_addr_t ram_addr);
463static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
464 target_ulong vaddr);
465#define mmap_lock() do { } while(0)
466#define mmap_unlock() do { } while(0)
467#endif
468
469#ifdef VBOX
470/** @todo nike: isn't 32M too much ? */
471#endif
472#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
473
474#if defined(CONFIG_USER_ONLY)
475/* Currently it is not recommanded to allocate big chunks of data in
476 user mode. It will change when a dedicated libc will be used */
477#define USE_STATIC_CODE_GEN_BUFFER
478#endif
479
480/* VBox allocates codegen buffer dynamically */
481#ifndef VBOX
482#ifdef USE_STATIC_CODE_GEN_BUFFER
483static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
484#endif
485#endif
486
487static void code_gen_alloc(unsigned long tb_size)
488{
489#ifdef USE_STATIC_CODE_GEN_BUFFER
490 code_gen_buffer = static_code_gen_buffer;
491 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
492 map_exec(code_gen_buffer, code_gen_buffer_size);
493#else
494 code_gen_buffer_size = tb_size;
495 if (code_gen_buffer_size == 0) {
496#if defined(CONFIG_USER_ONLY)
497 /* in user mode, phys_ram_size is not meaningful */
498 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
499#else
500 /* XXX: needs ajustments */
501 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
502#endif
503 }
504 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
505 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
506 /* The code gen buffer location may have constraints depending on
507 the host cpu and OS */
508#ifdef VBOX
509 code_gen_buffer = RTMemExecAlloc(code_gen_buffer_size);
510
511 if (!code_gen_buffer) {
512 LogRel(("REM: failed allocate codegen buffer %lld\n",
513 code_gen_buffer_size));
514 return;
515 }
516#else //!VBOX
517#if defined(__linux__)
518 {
519 int flags;
520 void *start = NULL;
521
522 flags = MAP_PRIVATE | MAP_ANONYMOUS;
523#if defined(__x86_64__)
524 flags |= MAP_32BIT;
525 /* Cannot map more than that */
526 if (code_gen_buffer_size > (800 * 1024 * 1024))
527 code_gen_buffer_size = (800 * 1024 * 1024);
528#elif defined(__sparc_v9__)
529 // Map the buffer below 2G, so we can use direct calls and branches
530 flags |= MAP_FIXED;
531 start = (void *) 0x60000000UL;
532 if (code_gen_buffer_size > (512 * 1024 * 1024))
533 code_gen_buffer_size = (512 * 1024 * 1024);
534#endif
535 code_gen_buffer = mmap(start, code_gen_buffer_size,
536 PROT_WRITE | PROT_READ | PROT_EXEC,
537 flags, -1, 0);
538 if (code_gen_buffer == MAP_FAILED) {
539 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
540 exit(1);
541 }
542 }
543#elif defined(__FreeBSD__)
544 {
545 int flags;
546 void *addr = NULL;
547 flags = MAP_PRIVATE | MAP_ANONYMOUS;
548#if defined(__x86_64__)
549 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
550 * 0x40000000 is free */
551 flags |= MAP_FIXED;
552 addr = (void *)0x40000000;
553 /* Cannot map more than that */
554 if (code_gen_buffer_size > (800 * 1024 * 1024))
555 code_gen_buffer_size = (800 * 1024 * 1024);
556#endif
557 code_gen_buffer = mmap(addr, code_gen_buffer_size,
558 PROT_WRITE | PROT_READ | PROT_EXEC,
559 flags, -1, 0);
560 if (code_gen_buffer == MAP_FAILED) {
561 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
562 exit(1);
563 }
564 }
565#else
566 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
567 if (!code_gen_buffer) {
568 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
569 exit(1);
570 }
571 map_exec(code_gen_buffer, code_gen_buffer_size);
572#endif
573 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
574#endif /* !VBOX */
575#endif /* !USE_STATIC_CODE_GEN_BUFFER */
576#ifndef VBOX
577 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
578#else
579 map_exec(code_gen_prologue, _1K);
580#endif
581
582 code_gen_buffer_max_size = code_gen_buffer_size -
583 code_gen_max_block_size();
584 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
585 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
586}
587
588/* Must be called before using the QEMU cpus. 'tb_size' is the size
589 (in bytes) allocated to the translation buffer. Zero means default
590 size. */
591void cpu_exec_init_all(unsigned long tb_size)
592{
593 cpu_gen_init();
594 code_gen_alloc(tb_size);
595 code_gen_ptr = code_gen_buffer;
596 page_init();
597#if !defined(CONFIG_USER_ONLY)
598 io_mem_init();
599#endif
600}
601
602#ifndef VBOX
603#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
604
605#define CPU_COMMON_SAVE_VERSION 1
606
607static void cpu_common_save(QEMUFile *f, void *opaque)
608{
609 CPUState *env = opaque;
610
611 qemu_put_be32s(f, &env->halted);
612 qemu_put_be32s(f, &env->interrupt_request);
613}
614
615static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
616{
617 CPUState *env = opaque;
618
619 if (version_id != CPU_COMMON_SAVE_VERSION)
620 return -EINVAL;
621
622 qemu_get_be32s(f, &env->halted);
623 qemu_get_be32s(f, &env->interrupt_request);
624 tlb_flush(env, 1);
625
626 return 0;
627}
628#endif
629#endif //!VBOX
630
631void cpu_exec_init(CPUState *env)
632{
633 CPUState **penv;
634 int cpu_index;
635
636 env->next_cpu = NULL;
637 penv = &first_cpu;
638 cpu_index = 0;
639 while (*penv != NULL) {
640 penv = (CPUState **)&(*penv)->next_cpu;
641 cpu_index++;
642 }
643 env->cpu_index = cpu_index;
644 env->nb_watchpoints = 0;
645 *penv = env;
646#ifndef VBOX
647#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
648 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
649 cpu_common_save, cpu_common_load, env);
650 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
651 cpu_save, cpu_load, env);
652#endif
653#endif // !VBOX
654}
655
656#ifndef VBOX
657static inline void invalidate_page_bitmap(PageDesc *p)
658#else
659DECLINLINE(void) invalidate_page_bitmap(PageDesc *p)
660#endif
661{
662 if (p->code_bitmap) {
663 qemu_free(p->code_bitmap);
664 p->code_bitmap = NULL;
665 }
666 p->code_write_count = 0;
667}
668
669/* set to NULL all the 'first_tb' fields in all PageDescs */
670static void page_flush_tb(void)
671{
672 int i, j;
673 PageDesc *p;
674
675 for(i = 0; i < L1_SIZE; i++) {
676 p = l1_map[i];
677 if (p) {
678 for(j = 0; j < L2_SIZE; j++) {
679 p->first_tb = NULL;
680 invalidate_page_bitmap(p);
681 p++;
682 }
683 }
684 }
685}
686
687/* flush all the translation blocks */
688/* XXX: tb_flush is currently not thread safe */
689void tb_flush(CPUState *env1)
690{
691 CPUState *env;
692#if defined(DEBUG_FLUSH)
693 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
694 (unsigned long)(code_gen_ptr - code_gen_buffer),
695 nb_tbs, nb_tbs > 0 ?
696 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
697#endif
698 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
699 cpu_abort(env1, "Internal error: code buffer overflow\n");
700
701 nb_tbs = 0;
702
703 for(env = first_cpu; env != NULL; env = env->next_cpu) {
704 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
705 }
706
707 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
708 page_flush_tb();
709
710 code_gen_ptr = code_gen_buffer;
711 /* XXX: flush processor icache at this point if cache flush is
712 expensive */
713 tb_flush_count++;
714}
715
716#ifdef DEBUG_TB_CHECK
717static void tb_invalidate_check(target_ulong address)
718{
719 TranslationBlock *tb;
720 int i;
721 address &= TARGET_PAGE_MASK;
722 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
723 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
724 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
725 address >= tb->pc + tb->size)) {
726 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
727 address, (long)tb->pc, tb->size);
728 }
729 }
730 }
731}
732
733/* verify that all the pages have correct rights for code */
734static void tb_page_check(void)
735{
736 TranslationBlock *tb;
737 int i, flags1, flags2;
738
739 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
740 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
741 flags1 = page_get_flags(tb->pc);
742 flags2 = page_get_flags(tb->pc + tb->size - 1);
743 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
744 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
745 (long)tb->pc, tb->size, flags1, flags2);
746 }
747 }
748 }
749}
750
751static void tb_jmp_check(TranslationBlock *tb)
752{
753 TranslationBlock *tb1;
754 unsigned int n1;
755
756 /* suppress any remaining jumps to this TB */
757 tb1 = tb->jmp_first;
758 for(;;) {
759 n1 = (long)tb1 & 3;
760 tb1 = (TranslationBlock *)((long)tb1 & ~3);
761 if (n1 == 2)
762 break;
763 tb1 = tb1->jmp_next[n1];
764 }
765 /* check end of list */
766 if (tb1 != tb) {
767 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
768 }
769}
770#endif // DEBUG_TB_CHECK
771
772/* invalidate one TB */
773#ifndef VBOX
774static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
775 int next_offset)
776#else
777DECLINLINE(void) tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
778 int next_offset)
779#endif
780{
781 TranslationBlock *tb1;
782 for(;;) {
783 tb1 = *ptb;
784 if (tb1 == tb) {
785 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
786 break;
787 }
788 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
789 }
790}
791
792#ifndef VBOX
793static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
794#else
795DECLINLINE(void) tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
796#endif
797{
798 TranslationBlock *tb1;
799 unsigned int n1;
800
801 for(;;) {
802 tb1 = *ptb;
803 n1 = (long)tb1 & 3;
804 tb1 = (TranslationBlock *)((long)tb1 & ~3);
805 if (tb1 == tb) {
806 *ptb = tb1->page_next[n1];
807 break;
808 }
809 ptb = &tb1->page_next[n1];
810 }
811}
812
813#ifndef VBOX
814static inline void tb_jmp_remove(TranslationBlock *tb, int n)
815#else
816DECLINLINE(void) tb_jmp_remove(TranslationBlock *tb, int n)
817#endif
818{
819 TranslationBlock *tb1, **ptb;
820 unsigned int n1;
821
822 ptb = &tb->jmp_next[n];
823 tb1 = *ptb;
824 if (tb1) {
825 /* find tb(n) in circular list */
826 for(;;) {
827 tb1 = *ptb;
828 n1 = (long)tb1 & 3;
829 tb1 = (TranslationBlock *)((long)tb1 & ~3);
830 if (n1 == n && tb1 == tb)
831 break;
832 if (n1 == 2) {
833 ptb = &tb1->jmp_first;
834 } else {
835 ptb = &tb1->jmp_next[n1];
836 }
837 }
838 /* now we can suppress tb(n) from the list */
839 *ptb = tb->jmp_next[n];
840
841 tb->jmp_next[n] = NULL;
842 }
843}
844
845/* reset the jump entry 'n' of a TB so that it is not chained to
846 another TB */
847#ifndef VBOX
848static inline void tb_reset_jump(TranslationBlock *tb, int n)
849#else
850DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
851#endif
852{
853 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
854}
855
856void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
857{
858 CPUState *env;
859 PageDesc *p;
860 unsigned int h, n1;
861 target_phys_addr_t phys_pc;
862 TranslationBlock *tb1, *tb2;
863
864 /* remove the TB from the hash list */
865 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
866 h = tb_phys_hash_func(phys_pc);
867 tb_remove(&tb_phys_hash[h], tb,
868 offsetof(TranslationBlock, phys_hash_next));
869
870 /* remove the TB from the page list */
871 if (tb->page_addr[0] != page_addr) {
872 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
873 tb_page_remove(&p->first_tb, tb);
874 invalidate_page_bitmap(p);
875 }
876 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
877 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
878 tb_page_remove(&p->first_tb, tb);
879 invalidate_page_bitmap(p);
880 }
881
882 tb_invalidated_flag = 1;
883
884 /* remove the TB from the hash list */
885 h = tb_jmp_cache_hash_func(tb->pc);
886 for(env = first_cpu; env != NULL; env = env->next_cpu) {
887 if (env->tb_jmp_cache[h] == tb)
888 env->tb_jmp_cache[h] = NULL;
889 }
890
891 /* suppress this TB from the two jump lists */
892 tb_jmp_remove(tb, 0);
893 tb_jmp_remove(tb, 1);
894
895 /* suppress any remaining jumps to this TB */
896 tb1 = tb->jmp_first;
897 for(;;) {
898 n1 = (long)tb1 & 3;
899 if (n1 == 2)
900 break;
901 tb1 = (TranslationBlock *)((long)tb1 & ~3);
902 tb2 = tb1->jmp_next[n1];
903 tb_reset_jump(tb1, n1);
904 tb1->jmp_next[n1] = NULL;
905 tb1 = tb2;
906 }
907 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
908
909#ifndef VBOX
910 tb_phys_invalidate_count++;
911#endif
912}
913
914
915#ifdef VBOX
916void tb_invalidate_virt(CPUState *env, uint32_t eip)
917{
918# if 1
919 tb_flush(env);
920# else
921 uint8_t *cs_base, *pc;
922 unsigned int flags, h, phys_pc;
923 TranslationBlock *tb, **ptb;
924
925 flags = env->hflags;
926 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
927 cs_base = env->segs[R_CS].base;
928 pc = cs_base + eip;
929
930 tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
931 flags);
932
933 if(tb)
934 {
935# ifdef DEBUG
936 printf("invalidating TB (%08X) at %08X\n", tb, eip);
937# endif
938 tb_invalidate(tb);
939 //Note: this will leak TBs, but the whole cache will be flushed
940 // when it happens too often
941 tb->pc = 0;
942 tb->cs_base = 0;
943 tb->flags = 0;
944 }
945# endif
946}
947
948# ifdef VBOX_STRICT
949/**
950 * Gets the page offset.
951 */
952unsigned long get_phys_page_offset(target_ulong addr)
953{
954 PhysPageDesc *p = phys_page_find(addr >> TARGET_PAGE_BITS);
955 return p ? p->phys_offset : 0;
956}
957# endif /* VBOX_STRICT */
958#endif /* VBOX */
959
960#ifndef VBOX
961static inline void set_bits(uint8_t *tab, int start, int len)
962#else
963DECLINLINE(void) set_bits(uint8_t *tab, int start, int len)
964#endif
965{
966 int end, mask, end1;
967
968 end = start + len;
969 tab += start >> 3;
970 mask = 0xff << (start & 7);
971 if ((start & ~7) == (end & ~7)) {
972 if (start < end) {
973 mask &= ~(0xff << (end & 7));
974 *tab |= mask;
975 }
976 } else {
977 *tab++ |= mask;
978 start = (start + 8) & ~7;
979 end1 = end & ~7;
980 while (start < end1) {
981 *tab++ = 0xff;
982 start += 8;
983 }
984 if (start < end) {
985 mask = ~(0xff << (end & 7));
986 *tab |= mask;
987 }
988 }
989}
990
991static void build_page_bitmap(PageDesc *p)
992{
993 int n, tb_start, tb_end;
994 TranslationBlock *tb;
995
996 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
997 if (!p->code_bitmap)
998 return;
999 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
1000
1001 tb = p->first_tb;
1002 while (tb != NULL) {
1003 n = (long)tb & 3;
1004 tb = (TranslationBlock *)((long)tb & ~3);
1005 /* NOTE: this is subtle as a TB may span two physical pages */
1006 if (n == 0) {
1007 /* NOTE: tb_end may be after the end of the page, but
1008 it is not a problem */
1009 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1010 tb_end = tb_start + tb->size;
1011 if (tb_end > TARGET_PAGE_SIZE)
1012 tb_end = TARGET_PAGE_SIZE;
1013 } else {
1014 tb_start = 0;
1015 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1016 }
1017 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1018 tb = tb->page_next[n];
1019 }
1020}
1021
1022TranslationBlock *tb_gen_code(CPUState *env,
1023 target_ulong pc, target_ulong cs_base,
1024 int flags, int cflags)
1025{
1026 TranslationBlock *tb;
1027 uint8_t *tc_ptr;
1028 target_ulong phys_pc, phys_page2, virt_page2;
1029 int code_gen_size;
1030
1031 phys_pc = get_phys_addr_code(env, pc);
1032 tb = tb_alloc(pc);
1033 if (!tb) {
1034 /* flush must be done */
1035 tb_flush(env);
1036 /* cannot fail at this point */
1037 tb = tb_alloc(pc);
1038 /* Don't forget to invalidate previous TB info. */
1039 tb_invalidated_flag = 1;
1040 }
1041 tc_ptr = code_gen_ptr;
1042 tb->tc_ptr = tc_ptr;
1043 tb->cs_base = cs_base;
1044 tb->flags = flags;
1045 tb->cflags = cflags;
1046 cpu_gen_code(env, tb, &code_gen_size);
1047 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1048
1049 /* check next page if needed */
1050 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1051 phys_page2 = -1;
1052 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1053 phys_page2 = get_phys_addr_code(env, virt_page2);
1054 }
1055 tb_link_phys(tb, phys_pc, phys_page2);
1056 return tb;
1057}
1058
1059/* invalidate all TBs which intersect with the target physical page
1060 starting in range [start;end[. NOTE: start and end must refer to
1061 the same physical page. 'is_cpu_write_access' should be true if called
1062 from a real cpu write access: the virtual CPU will exit the current
1063 TB if code is modified inside this TB. */
1064void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
1065 int is_cpu_write_access)
1066{
1067 int n, current_tb_modified, current_tb_not_found, current_flags;
1068 CPUState *env = cpu_single_env;
1069 PageDesc *p;
1070 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
1071 target_ulong tb_start, tb_end;
1072 target_ulong current_pc, current_cs_base;
1073
1074 p = page_find(start >> TARGET_PAGE_BITS);
1075 if (!p)
1076 return;
1077 if (!p->code_bitmap &&
1078 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1079 is_cpu_write_access) {
1080 /* build code bitmap */
1081 build_page_bitmap(p);
1082 }
1083
1084 /* we remove all the TBs in the range [start, end[ */
1085 /* XXX: see if in some cases it could be faster to invalidate all the code */
1086 current_tb_not_found = is_cpu_write_access;
1087 current_tb_modified = 0;
1088 current_tb = NULL; /* avoid warning */
1089 current_pc = 0; /* avoid warning */
1090 current_cs_base = 0; /* avoid warning */
1091 current_flags = 0; /* avoid warning */
1092 tb = p->first_tb;
1093 while (tb != NULL) {
1094 n = (long)tb & 3;
1095 tb = (TranslationBlock *)((long)tb & ~3);
1096 tb_next = tb->page_next[n];
1097 /* NOTE: this is subtle as a TB may span two physical pages */
1098 if (n == 0) {
1099 /* NOTE: tb_end may be after the end of the page, but
1100 it is not a problem */
1101 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1102 tb_end = tb_start + tb->size;
1103 } else {
1104 tb_start = tb->page_addr[1];
1105 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1106 }
1107 if (!(tb_end <= start || tb_start >= end)) {
1108#ifdef TARGET_HAS_PRECISE_SMC
1109 if (current_tb_not_found) {
1110 current_tb_not_found = 0;
1111 current_tb = NULL;
1112 if (env->mem_io_pc) {
1113 /* now we have a real cpu fault */
1114 current_tb = tb_find_pc(env->mem_io_pc);
1115 }
1116 }
1117 if (current_tb == tb &&
1118 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1119 /* If we are modifying the current TB, we must stop
1120 its execution. We could be more precise by checking
1121 that the modification is after the current PC, but it
1122 would require a specialized function to partially
1123 restore the CPU state */
1124
1125 current_tb_modified = 1;
1126 cpu_restore_state(current_tb, env,
1127 env->mem_io_pc, NULL);
1128#if defined(TARGET_I386)
1129 current_flags = env->hflags;
1130 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1131 current_cs_base = (target_ulong)env->segs[R_CS].base;
1132 current_pc = current_cs_base + env->eip;
1133#else
1134#error unsupported CPU
1135#endif
1136 }
1137#endif /* TARGET_HAS_PRECISE_SMC */
1138 /* we need to do that to handle the case where a signal
1139 occurs while doing tb_phys_invalidate() */
1140 saved_tb = NULL;
1141 if (env) {
1142 saved_tb = env->current_tb;
1143 env->current_tb = NULL;
1144 }
1145 tb_phys_invalidate(tb, -1);
1146 if (env) {
1147 env->current_tb = saved_tb;
1148 if (env->interrupt_request && env->current_tb)
1149 cpu_interrupt(env, env->interrupt_request);
1150 }
1151 }
1152 tb = tb_next;
1153 }
1154#if !defined(CONFIG_USER_ONLY)
1155 /* if no code remaining, no need to continue to use slow writes */
1156 if (!p->first_tb) {
1157 invalidate_page_bitmap(p);
1158 if (is_cpu_write_access) {
1159 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1160 }
1161 }
1162#endif
1163#ifdef TARGET_HAS_PRECISE_SMC
1164 if (current_tb_modified) {
1165 /* we generate a block containing just the instruction
1166 modifying the memory. It will ensure that it cannot modify
1167 itself */
1168 env->current_tb = NULL;
1169 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1170 cpu_resume_from_signal(env, NULL);
1171 }
1172#endif
1173}
1174
1175
1176/* len must be <= 8 and start must be a multiple of len */
1177#ifndef VBOX
1178static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1179#else
1180DECLINLINE(void) tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1181#endif
1182{
1183 PageDesc *p;
1184 int offset, b;
1185#if 0
1186 if (1) {
1187 if (loglevel) {
1188 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1189 cpu_single_env->mem_io_vaddr, len,
1190 cpu_single_env->eip,
1191 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1192 }
1193 }
1194#endif
1195 p = page_find(start >> TARGET_PAGE_BITS);
1196 if (!p)
1197 return;
1198 if (p->code_bitmap) {
1199 offset = start & ~TARGET_PAGE_MASK;
1200 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1201 if (b & ((1 << len) - 1))
1202 goto do_invalidate;
1203 } else {
1204 do_invalidate:
1205 tb_invalidate_phys_page_range(start, start + len, 1);
1206 }
1207}
1208
1209
1210#if !defined(CONFIG_SOFTMMU)
1211static void tb_invalidate_phys_page(target_phys_addr_t addr,
1212 unsigned long pc, void *puc)
1213{
1214 int n, current_flags, current_tb_modified;
1215 target_ulong current_pc, current_cs_base;
1216 PageDesc *p;
1217 TranslationBlock *tb, *current_tb;
1218#ifdef TARGET_HAS_PRECISE_SMC
1219 CPUState *env = cpu_single_env;
1220#endif
1221
1222 addr &= TARGET_PAGE_MASK;
1223 p = page_find(addr >> TARGET_PAGE_BITS);
1224 if (!p)
1225 return;
1226 tb = p->first_tb;
1227 current_tb_modified = 0;
1228 current_tb = NULL;
1229 current_pc = 0; /* avoid warning */
1230 current_cs_base = 0; /* avoid warning */
1231 current_flags = 0; /* avoid warning */
1232#ifdef TARGET_HAS_PRECISE_SMC
1233 if (tb && pc != 0) {
1234 current_tb = tb_find_pc(pc);
1235 }
1236#endif
1237 while (tb != NULL) {
1238 n = (long)tb & 3;
1239 tb = (TranslationBlock *)((long)tb & ~3);
1240#ifdef TARGET_HAS_PRECISE_SMC
1241 if (current_tb == tb &&
1242 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1243 /* If we are modifying the current TB, we must stop
1244 its execution. We could be more precise by checking
1245 that the modification is after the current PC, but it
1246 would require a specialized function to partially
1247 restore the CPU state */
1248
1249 current_tb_modified = 1;
1250 cpu_restore_state(current_tb, env, pc, puc);
1251#if defined(TARGET_I386)
1252 current_flags = env->hflags;
1253 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1254 current_cs_base = (target_ulong)env->segs[R_CS].base;
1255 current_pc = current_cs_base + env->eip;
1256#else
1257#error unsupported CPU
1258#endif
1259 }
1260#endif /* TARGET_HAS_PRECISE_SMC */
1261 tb_phys_invalidate(tb, addr);
1262 tb = tb->page_next[n];
1263 }
1264 p->first_tb = NULL;
1265#ifdef TARGET_HAS_PRECISE_SMC
1266 if (current_tb_modified) {
1267 /* we generate a block containing just the instruction
1268 modifying the memory. It will ensure that it cannot modify
1269 itself */
1270 env->current_tb = NULL;
1271 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1272 cpu_resume_from_signal(env, puc);
1273 }
1274#endif
1275}
1276#endif
1277
1278/* add the tb in the target page and protect it if necessary */
1279#ifndef VBOX
1280static inline void tb_alloc_page(TranslationBlock *tb,
1281 unsigned int n, target_ulong page_addr)
1282#else
1283DECLINLINE(void) tb_alloc_page(TranslationBlock *tb,
1284 unsigned int n, target_ulong page_addr)
1285#endif
1286{
1287 PageDesc *p;
1288 TranslationBlock *last_first_tb;
1289
1290 tb->page_addr[n] = page_addr;
1291 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1292 tb->page_next[n] = p->first_tb;
1293 last_first_tb = p->first_tb;
1294 p->first_tb = (TranslationBlock *)((long)tb | n);
1295 invalidate_page_bitmap(p);
1296
1297#if defined(TARGET_HAS_SMC) || 1
1298
1299#if defined(CONFIG_USER_ONLY)
1300 if (p->flags & PAGE_WRITE) {
1301 target_ulong addr;
1302 PageDesc *p2;
1303 int prot;
1304
1305 /* force the host page as non writable (writes will have a
1306 page fault + mprotect overhead) */
1307 page_addr &= qemu_host_page_mask;
1308 prot = 0;
1309 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1310 addr += TARGET_PAGE_SIZE) {
1311
1312 p2 = page_find (addr >> TARGET_PAGE_BITS);
1313 if (!p2)
1314 continue;
1315 prot |= p2->flags;
1316 p2->flags &= ~PAGE_WRITE;
1317 page_get_flags(addr);
1318 }
1319 mprotect(g2h(page_addr), qemu_host_page_size,
1320 (prot & PAGE_BITS) & ~PAGE_WRITE);
1321#ifdef DEBUG_TB_INVALIDATE
1322 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1323 page_addr);
1324#endif
1325 }
1326#else
1327 /* if some code is already present, then the pages are already
1328 protected. So we handle the case where only the first TB is
1329 allocated in a physical page */
1330 if (!last_first_tb) {
1331 tlb_protect_code(page_addr);
1332 }
1333#endif
1334
1335#endif /* TARGET_HAS_SMC */
1336}
1337
1338/* Allocate a new translation block. Flush the translation buffer if
1339 too many translation blocks or too much generated code. */
1340TranslationBlock *tb_alloc(target_ulong pc)
1341{
1342 TranslationBlock *tb;
1343
1344 if (nb_tbs >= code_gen_max_blocks ||
1345#ifndef VBOX
1346 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1347#else
1348 (code_gen_ptr - code_gen_buffer) >= (int)code_gen_buffer_max_size)
1349#endif
1350 return NULL;
1351 tb = &tbs[nb_tbs++];
1352 tb->pc = pc;
1353 tb->cflags = 0;
1354 return tb;
1355}
1356
1357void tb_free(TranslationBlock *tb)
1358{
1359 /* In practice this is mostly used for single use temporary TB
1360 Ignore the hard cases and just back up if this TB happens to
1361 be the last one generated. */
1362 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1363 code_gen_ptr = tb->tc_ptr;
1364 nb_tbs--;
1365 }
1366}
1367
1368/* add a new TB and link it to the physical page tables. phys_page2 is
1369 (-1) to indicate that only one page contains the TB. */
1370void tb_link_phys(TranslationBlock *tb,
1371 target_ulong phys_pc, target_ulong phys_page2)
1372{
1373 unsigned int h;
1374 TranslationBlock **ptb;
1375
1376 /* Grab the mmap lock to stop another thread invalidating this TB
1377 before we are done. */
1378 mmap_lock();
1379 /* add in the physical hash table */
1380 h = tb_phys_hash_func(phys_pc);
1381 ptb = &tb_phys_hash[h];
1382 tb->phys_hash_next = *ptb;
1383 *ptb = tb;
1384
1385 /* add in the page list */
1386 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1387 if (phys_page2 != -1)
1388 tb_alloc_page(tb, 1, phys_page2);
1389 else
1390 tb->page_addr[1] = -1;
1391
1392 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1393 tb->jmp_next[0] = NULL;
1394 tb->jmp_next[1] = NULL;
1395
1396 /* init original jump addresses */
1397 if (tb->tb_next_offset[0] != 0xffff)
1398 tb_reset_jump(tb, 0);
1399 if (tb->tb_next_offset[1] != 0xffff)
1400 tb_reset_jump(tb, 1);
1401
1402#ifdef DEBUG_TB_CHECK
1403 tb_page_check();
1404#endif
1405 mmap_unlock();
1406}
1407
1408/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1409 tb[1].tc_ptr. Return NULL if not found */
1410TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1411{
1412 int m_min, m_max, m;
1413 unsigned long v;
1414 TranslationBlock *tb;
1415
1416 if (nb_tbs <= 0)
1417 return NULL;
1418 if (tc_ptr < (unsigned long)code_gen_buffer ||
1419 tc_ptr >= (unsigned long)code_gen_ptr)
1420 return NULL;
1421 /* binary search (cf Knuth) */
1422 m_min = 0;
1423 m_max = nb_tbs - 1;
1424 while (m_min <= m_max) {
1425 m = (m_min + m_max) >> 1;
1426 tb = &tbs[m];
1427 v = (unsigned long)tb->tc_ptr;
1428 if (v == tc_ptr)
1429 return tb;
1430 else if (tc_ptr < v) {
1431 m_max = m - 1;
1432 } else {
1433 m_min = m + 1;
1434 }
1435 }
1436 return &tbs[m_max];
1437}
1438
1439static void tb_reset_jump_recursive(TranslationBlock *tb);
1440
1441#ifndef VBOX
1442static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1443#else
1444DECLINLINE(void) tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1445#endif
1446{
1447 TranslationBlock *tb1, *tb_next, **ptb;
1448 unsigned int n1;
1449
1450 tb1 = tb->jmp_next[n];
1451 if (tb1 != NULL) {
1452 /* find head of list */
1453 for(;;) {
1454 n1 = (long)tb1 & 3;
1455 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1456 if (n1 == 2)
1457 break;
1458 tb1 = tb1->jmp_next[n1];
1459 }
1460 /* we are now sure now that tb jumps to tb1 */
1461 tb_next = tb1;
1462
1463 /* remove tb from the jmp_first list */
1464 ptb = &tb_next->jmp_first;
1465 for(;;) {
1466 tb1 = *ptb;
1467 n1 = (long)tb1 & 3;
1468 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1469 if (n1 == n && tb1 == tb)
1470 break;
1471 ptb = &tb1->jmp_next[n1];
1472 }
1473 *ptb = tb->jmp_next[n];
1474 tb->jmp_next[n] = NULL;
1475
1476 /* suppress the jump to next tb in generated code */
1477 tb_reset_jump(tb, n);
1478
1479 /* suppress jumps in the tb on which we could have jumped */
1480 tb_reset_jump_recursive(tb_next);
1481 }
1482}
1483
1484static void tb_reset_jump_recursive(TranslationBlock *tb)
1485{
1486 tb_reset_jump_recursive2(tb, 0);
1487 tb_reset_jump_recursive2(tb, 1);
1488}
1489
1490#if defined(TARGET_HAS_ICE)
1491static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1492{
1493 target_ulong addr, pd;
1494 ram_addr_t ram_addr;
1495 PhysPageDesc *p;
1496
1497 addr = cpu_get_phys_page_debug(env, pc);
1498 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1499 if (!p) {
1500 pd = IO_MEM_UNASSIGNED;
1501 } else {
1502 pd = p->phys_offset;
1503 }
1504 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1505 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1506}
1507#endif
1508
1509/* Add a watchpoint. */
1510int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1511{
1512 int i;
1513
1514 for (i = 0; i < env->nb_watchpoints; i++) {
1515 if (addr == env->watchpoint[i].vaddr)
1516 return 0;
1517 }
1518 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1519 return -1;
1520
1521 i = env->nb_watchpoints++;
1522 env->watchpoint[i].vaddr = addr;
1523 env->watchpoint[i].type = type;
1524 tlb_flush_page(env, addr);
1525 /* FIXME: This flush is needed because of the hack to make memory ops
1526 terminate the TB. It can be removed once the proper IO trap and
1527 re-execute bits are in. */
1528 tb_flush(env);
1529 return i;
1530}
1531
1532/* Remove a watchpoint. */
1533int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1534{
1535 int i;
1536
1537 for (i = 0; i < env->nb_watchpoints; i++) {
1538 if (addr == env->watchpoint[i].vaddr) {
1539 env->nb_watchpoints--;
1540 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1541 tlb_flush_page(env, addr);
1542 return 0;
1543 }
1544 }
1545 return -1;
1546}
1547
1548/* Remove all watchpoints. */
1549void cpu_watchpoint_remove_all(CPUState *env) {
1550 int i;
1551
1552 for (i = 0; i < env->nb_watchpoints; i++) {
1553 tlb_flush_page(env, env->watchpoint[i].vaddr);
1554 }
1555 env->nb_watchpoints = 0;
1556}
1557
1558/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1559 breakpoint is reached */
1560int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1561{
1562#if defined(TARGET_HAS_ICE)
1563 int i;
1564
1565 for(i = 0; i < env->nb_breakpoints; i++) {
1566 if (env->breakpoints[i] == pc)
1567 return 0;
1568 }
1569
1570 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1571 return -1;
1572 env->breakpoints[env->nb_breakpoints++] = pc;
1573
1574 breakpoint_invalidate(env, pc);
1575 return 0;
1576#else
1577 return -1;
1578#endif
1579}
1580
1581/* remove all breakpoints */
1582void cpu_breakpoint_remove_all(CPUState *env) {
1583#if defined(TARGET_HAS_ICE)
1584 int i;
1585 for(i = 0; i < env->nb_breakpoints; i++) {
1586 breakpoint_invalidate(env, env->breakpoints[i]);
1587 }
1588 env->nb_breakpoints = 0;
1589#endif
1590}
1591
1592/* remove a breakpoint */
1593int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1594{
1595#if defined(TARGET_HAS_ICE)
1596 int i;
1597 for(i = 0; i < env->nb_breakpoints; i++) {
1598 if (env->breakpoints[i] == pc)
1599 goto found;
1600 }
1601 return -1;
1602 found:
1603 env->nb_breakpoints--;
1604 if (i < env->nb_breakpoints)
1605 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1606
1607 breakpoint_invalidate(env, pc);
1608 return 0;
1609#else
1610 return -1;
1611#endif
1612}
1613
1614/* enable or disable single step mode. EXCP_DEBUG is returned by the
1615 CPU loop after each instruction */
1616void cpu_single_step(CPUState *env, int enabled)
1617{
1618#if defined(TARGET_HAS_ICE)
1619 if (env->singlestep_enabled != enabled) {
1620 env->singlestep_enabled = enabled;
1621 /* must flush all the translated code to avoid inconsistancies */
1622 /* XXX: only flush what is necessary */
1623 tb_flush(env);
1624 }
1625#endif
1626}
1627
1628#ifndef VBOX
1629/* enable or disable low levels log */
1630void cpu_set_log(int log_flags)
1631{
1632 loglevel = log_flags;
1633 if (loglevel && !logfile) {
1634 logfile = fopen(logfilename, "w");
1635 if (!logfile) {
1636 perror(logfilename);
1637 _exit(1);
1638 }
1639#if !defined(CONFIG_SOFTMMU)
1640 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1641 {
1642 static uint8_t logfile_buf[4096];
1643 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1644 }
1645#else
1646 setvbuf(logfile, NULL, _IOLBF, 0);
1647#endif
1648 }
1649}
1650
1651void cpu_set_log_filename(const char *filename)
1652{
1653 logfilename = strdup(filename);
1654}
1655#endif /* !VBOX */
1656
1657/* mask must never be zero, except for A20 change call */
1658void cpu_interrupt(CPUState *env, int mask)
1659{
1660#if !defined(USE_NPTL)
1661 TranslationBlock *tb;
1662 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1663#endif
1664 int old_mask;
1665
1666 old_mask = env->interrupt_request;
1667#ifdef VBOX
1668 VM_ASSERT_EMT(env->pVM);
1669 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, mask);
1670#else /* !VBOX */
1671 /* FIXME: This is probably not threadsafe. A different thread could
1672 be in the middle of a read-modify-write operation. */
1673 env->interrupt_request |= mask;
1674#endif /* !VBOX */
1675#if defined(USE_NPTL)
1676 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1677 problem and hope the cpu will stop of its own accord. For userspace
1678 emulation this often isn't actually as bad as it sounds. Often
1679 signals are used primarily to interrupt blocking syscalls. */
1680#else
1681 if (use_icount) {
1682 env->icount_decr.u16.high = 0xffff;
1683#ifndef CONFIG_USER_ONLY
1684 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1685 an async event happened and we need to process it. */
1686 if (!can_do_io(env)
1687 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1688 cpu_abort(env, "Raised interrupt while not in I/O function");
1689 }
1690#endif
1691 } else {
1692 tb = env->current_tb;
1693 /* if the cpu is currently executing code, we must unlink it and
1694 all the potentially executing TB */
1695 if (tb && !testandset(&interrupt_lock)) {
1696 env->current_tb = NULL;
1697 tb_reset_jump_recursive(tb);
1698 resetlock(&interrupt_lock);
1699 }
1700 }
1701#endif
1702}
1703
1704void cpu_reset_interrupt(CPUState *env, int mask)
1705{
1706#ifdef VBOX
1707 /*
1708 * Note: the current implementation can be executed by another thread without problems; make sure this remains true
1709 * for future changes!
1710 */
1711 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~mask);
1712#else /* !VBOX */
1713 env->interrupt_request &= ~mask;
1714#endif /* !VBOX */
1715}
1716
1717#ifndef VBOX
1718CPULogItem cpu_log_items[] = {
1719 { CPU_LOG_TB_OUT_ASM, "out_asm",
1720 "show generated host assembly code for each compiled TB" },
1721 { CPU_LOG_TB_IN_ASM, "in_asm",
1722 "show target assembly code for each compiled TB" },
1723 { CPU_LOG_TB_OP, "op",
1724 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1725#ifdef TARGET_I386
1726 { CPU_LOG_TB_OP_OPT, "op_opt",
1727 "show micro ops after optimization for each compiled TB" },
1728#endif
1729 { CPU_LOG_INT, "int",
1730 "show interrupts/exceptions in short format" },
1731 { CPU_LOG_EXEC, "exec",
1732 "show trace before each executed TB (lots of logs)" },
1733 { CPU_LOG_TB_CPU, "cpu",
1734 "show CPU state before bloc translation" },
1735#ifdef TARGET_I386
1736 { CPU_LOG_PCALL, "pcall",
1737 "show protected mode far calls/returns/exceptions" },
1738#endif
1739#ifdef DEBUG_IOPORT
1740 { CPU_LOG_IOPORT, "ioport",
1741 "show all i/o ports accesses" },
1742#endif
1743 { 0, NULL, NULL },
1744};
1745
1746static int cmp1(const char *s1, int n, const char *s2)
1747{
1748 if (strlen(s2) != n)
1749 return 0;
1750 return memcmp(s1, s2, n) == 0;
1751}
1752
1753/* takes a comma separated list of log masks. Return 0 if error. */
1754int cpu_str_to_log_mask(const char *str)
1755{
1756 CPULogItem *item;
1757 int mask;
1758 const char *p, *p1;
1759
1760 p = str;
1761 mask = 0;
1762 for(;;) {
1763 p1 = strchr(p, ',');
1764 if (!p1)
1765 p1 = p + strlen(p);
1766 if(cmp1(p,p1-p,"all")) {
1767 for(item = cpu_log_items; item->mask != 0; item++) {
1768 mask |= item->mask;
1769 }
1770 } else {
1771 for(item = cpu_log_items; item->mask != 0; item++) {
1772 if (cmp1(p, p1 - p, item->name))
1773 goto found;
1774 }
1775 return 0;
1776 }
1777 found:
1778 mask |= item->mask;
1779 if (*p1 != ',')
1780 break;
1781 p = p1 + 1;
1782 }
1783 return mask;
1784}
1785#endif /* !VBOX */
1786
1787#ifndef VBOX /* VBOX: we have our own routine. */
1788void cpu_abort(CPUState *env, const char *fmt, ...)
1789{
1790 va_list ap;
1791
1792 va_start(ap, fmt);
1793 fprintf(stderr, "qemu: fatal: ");
1794 vfprintf(stderr, fmt, ap);
1795 fprintf(stderr, "\n");
1796#ifdef TARGET_I386
1797 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1798#else
1799 cpu_dump_state(env, stderr, fprintf, 0);
1800#endif
1801 va_end(ap);
1802 abort();
1803}
1804#endif /* !VBOX */
1805
1806#ifndef VBOX
1807CPUState *cpu_copy(CPUState *env)
1808{
1809 CPUState *new_env = cpu_init(env->cpu_model_str);
1810 /* preserve chaining and index */
1811 CPUState *next_cpu = new_env->next_cpu;
1812 int cpu_index = new_env->cpu_index;
1813 memcpy(new_env, env, sizeof(CPUState));
1814 new_env->next_cpu = next_cpu;
1815 new_env->cpu_index = cpu_index;
1816 return new_env;
1817}
1818#endif
1819
1820#if !defined(CONFIG_USER_ONLY)
1821
1822#ifndef VBOX
1823static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1824#else
1825DECLINLINE(void) tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1826#endif
1827{
1828 unsigned int i;
1829
1830 /* Discard jump cache entries for any tb which might potentially
1831 overlap the flushed page. */
1832 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1833 memset (&env->tb_jmp_cache[i], 0,
1834 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1835
1836 i = tb_jmp_cache_hash_page(addr);
1837 memset (&env->tb_jmp_cache[i], 0,
1838 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1839
1840#ifdef VBOX
1841 /* inform raw mode about TLB page flush */
1842 remR3FlushPage(env, addr);
1843#endif /* VBOX */
1844}
1845
1846/* NOTE: if flush_global is true, also flush global entries (not
1847 implemented yet) */
1848void tlb_flush(CPUState *env, int flush_global)
1849{
1850 int i;
1851
1852#if defined(DEBUG_TLB)
1853 printf("tlb_flush:\n");
1854#endif
1855 /* must reset current TB so that interrupts cannot modify the
1856 links while we are modifying them */
1857 env->current_tb = NULL;
1858
1859 for(i = 0; i < CPU_TLB_SIZE; i++) {
1860 env->tlb_table[0][i].addr_read = -1;
1861 env->tlb_table[0][i].addr_write = -1;
1862 env->tlb_table[0][i].addr_code = -1;
1863 env->tlb_table[1][i].addr_read = -1;
1864 env->tlb_table[1][i].addr_write = -1;
1865 env->tlb_table[1][i].addr_code = -1;
1866#if (NB_MMU_MODES >= 3)
1867 env->tlb_table[2][i].addr_read = -1;
1868 env->tlb_table[2][i].addr_write = -1;
1869 env->tlb_table[2][i].addr_code = -1;
1870#if (NB_MMU_MODES == 4)
1871 env->tlb_table[3][i].addr_read = -1;
1872 env->tlb_table[3][i].addr_write = -1;
1873 env->tlb_table[3][i].addr_code = -1;
1874#endif
1875#endif
1876 }
1877
1878 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1879
1880#ifdef VBOX
1881 /* inform raw mode about TLB flush */
1882 remR3FlushTLB(env, flush_global);
1883#endif
1884#ifdef USE_KQEMU
1885 if (env->kqemu_enabled) {
1886 kqemu_flush(env, flush_global);
1887 }
1888#endif
1889 tlb_flush_count++;
1890}
1891
1892#ifndef VBOX
1893static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1894#else
1895DECLINLINE(void) tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1896#endif
1897{
1898 if (addr == (tlb_entry->addr_read &
1899 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1900 addr == (tlb_entry->addr_write &
1901 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1902 addr == (tlb_entry->addr_code &
1903 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1904 tlb_entry->addr_read = -1;
1905 tlb_entry->addr_write = -1;
1906 tlb_entry->addr_code = -1;
1907 }
1908}
1909
1910void tlb_flush_page(CPUState *env, target_ulong addr)
1911{
1912 int i;
1913
1914#if defined(DEBUG_TLB)
1915 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1916#endif
1917 /* must reset current TB so that interrupts cannot modify the
1918 links while we are modifying them */
1919 env->current_tb = NULL;
1920
1921 addr &= TARGET_PAGE_MASK;
1922 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1923 tlb_flush_entry(&env->tlb_table[0][i], addr);
1924 tlb_flush_entry(&env->tlb_table[1][i], addr);
1925#if (NB_MMU_MODES >= 3)
1926 tlb_flush_entry(&env->tlb_table[2][i], addr);
1927#if (NB_MMU_MODES == 4)
1928 tlb_flush_entry(&env->tlb_table[3][i], addr);
1929#endif
1930#endif
1931
1932 tlb_flush_jmp_cache(env, addr);
1933
1934#ifdef USE_KQEMU
1935 if (env->kqemu_enabled) {
1936 kqemu_flush_page(env, addr);
1937 }
1938#endif
1939}
1940
1941/* update the TLBs so that writes to code in the virtual page 'addr'
1942 can be detected */
1943static void tlb_protect_code(ram_addr_t ram_addr)
1944{
1945 cpu_physical_memory_reset_dirty(ram_addr,
1946 ram_addr + TARGET_PAGE_SIZE,
1947 CODE_DIRTY_FLAG);
1948#if defined(VBOX) && defined(REM_MONITOR_CODE_PAGES)
1949 /** @todo Retest this? This function has changed... */
1950 remR3ProtectCode(cpu_single_env, ram_addr);
1951#endif
1952}
1953
1954/* update the TLB so that writes in physical page 'phys_addr' are no longer
1955 tested for self modifying code */
1956static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1957 target_ulong vaddr)
1958{
1959#ifdef VBOX
1960 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
1961#endif
1962 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1963}
1964
1965#ifndef VBOX
1966static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1967 unsigned long start, unsigned long length)
1968#else
1969DECLINLINE(void) tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1970 unsigned long start, unsigned long length)
1971#endif
1972{
1973 unsigned long addr;
1974 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1975 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1976 if ((addr - start) < length) {
1977 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1978 }
1979 }
1980}
1981
1982void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1983 int dirty_flags)
1984{
1985 CPUState *env;
1986 unsigned long length, start1;
1987 int i, mask, len;
1988 uint8_t *p;
1989
1990 start &= TARGET_PAGE_MASK;
1991 end = TARGET_PAGE_ALIGN(end);
1992
1993 length = end - start;
1994 if (length == 0)
1995 return;
1996 len = length >> TARGET_PAGE_BITS;
1997#ifdef USE_KQEMU
1998 /* XXX: should not depend on cpu context */
1999 env = first_cpu;
2000 if (env->kqemu_enabled) {
2001 ram_addr_t addr;
2002 addr = start;
2003 for(i = 0; i < len; i++) {
2004 kqemu_set_notdirty(env, addr);
2005 addr += TARGET_PAGE_SIZE;
2006 }
2007 }
2008#endif
2009 mask = ~dirty_flags;
2010 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
2011#ifdef VBOX
2012 if (RT_LIKELY((start >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2013#endif
2014 for(i = 0; i < len; i++)
2015 p[i] &= mask;
2016
2017 /* we modify the TLB cache so that the dirty bit will be set again
2018 when accessing the range */
2019#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2020 start1 = start;
2021#elif !defined(VBOX)
2022 start1 = start + (unsigned long)phys_ram_base;
2023#else
2024 start1 = (unsigned long)remR3GCPhys2HCVirt(first_cpu, start);
2025#endif
2026 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2027 for(i = 0; i < CPU_TLB_SIZE; i++)
2028 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
2029 for(i = 0; i < CPU_TLB_SIZE; i++)
2030 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
2031#if (NB_MMU_MODES >= 3)
2032 for(i = 0; i < CPU_TLB_SIZE; i++)
2033 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
2034#if (NB_MMU_MODES == 4)
2035 for(i = 0; i < CPU_TLB_SIZE; i++)
2036 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
2037#endif
2038#endif
2039 }
2040}
2041
2042#ifndef VBOX
2043int cpu_physical_memory_set_dirty_tracking(int enable)
2044{
2045 in_migration = enable;
2046 return 0;
2047}
2048
2049int cpu_physical_memory_get_dirty_tracking(void)
2050{
2051 return in_migration;
2052}
2053#endif
2054
2055#ifndef VBOX
2056static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2057#else
2058DECLINLINE(void) tlb_update_dirty(CPUTLBEntry *tlb_entry)
2059#endif
2060{
2061 ram_addr_t ram_addr;
2062
2063 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2064 /* RAM case */
2065#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2066 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2067#elif !defined(VBOX)
2068 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
2069 tlb_entry->addend - (unsigned long)phys_ram_base;
2070#else
2071 ram_addr = remR3HCVirt2GCPhys(first_cpu, (void*)((tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend));
2072#endif
2073 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2074 tlb_entry->addr_write |= TLB_NOTDIRTY;
2075 }
2076 }
2077}
2078
2079/* update the TLB according to the current state of the dirty bits */
2080void cpu_tlb_update_dirty(CPUState *env)
2081{
2082 int i;
2083 for(i = 0; i < CPU_TLB_SIZE; i++)
2084 tlb_update_dirty(&env->tlb_table[0][i]);
2085 for(i = 0; i < CPU_TLB_SIZE; i++)
2086 tlb_update_dirty(&env->tlb_table[1][i]);
2087#if (NB_MMU_MODES >= 3)
2088 for(i = 0; i < CPU_TLB_SIZE; i++)
2089 tlb_update_dirty(&env->tlb_table[2][i]);
2090#if (NB_MMU_MODES == 4)
2091 for(i = 0; i < CPU_TLB_SIZE; i++)
2092 tlb_update_dirty(&env->tlb_table[3][i]);
2093#endif
2094#endif
2095}
2096
2097#ifndef VBOX
2098static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2099#else
2100DECLINLINE(void) tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2101#endif
2102{
2103 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2104 tlb_entry->addr_write = vaddr;
2105}
2106
2107
2108/* update the TLB corresponding to virtual page vaddr and phys addr
2109 addr so that it is no longer dirty */
2110#ifndef VBOX
2111static inline void tlb_set_dirty(CPUState *env,
2112 unsigned long addr, target_ulong vaddr)
2113#else
2114DECLINLINE(void) tlb_set_dirty(CPUState *env,
2115 unsigned long addr, target_ulong vaddr)
2116#endif
2117{
2118 int i;
2119
2120 addr &= TARGET_PAGE_MASK;
2121 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2122 tlb_set_dirty1(&env->tlb_table[0][i], addr);
2123 tlb_set_dirty1(&env->tlb_table[1][i], addr);
2124#if (NB_MMU_MODES >= 3)
2125 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
2126#if (NB_MMU_MODES == 4)
2127 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
2128#endif
2129#endif
2130}
2131
2132/* add a new TLB entry. At most one entry for a given virtual address
2133 is permitted. Return 0 if OK or 2 if the page could not be mapped
2134 (can only happen in non SOFTMMU mode for I/O pages or pages
2135 conflicting with the host address space). */
2136int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2137 target_phys_addr_t paddr, int prot,
2138 int mmu_idx, int is_softmmu)
2139{
2140 PhysPageDesc *p;
2141 unsigned long pd;
2142 unsigned int index;
2143 target_ulong address;
2144 target_ulong code_address;
2145 target_phys_addr_t addend;
2146 int ret;
2147 CPUTLBEntry *te;
2148 int i;
2149 target_phys_addr_t iotlb;
2150
2151 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2152 if (!p) {
2153 pd = IO_MEM_UNASSIGNED;
2154 } else {
2155 pd = p->phys_offset;
2156 }
2157#if defined(DEBUG_TLB)
2158 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2159 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2160#endif
2161
2162 ret = 0;
2163 address = vaddr;
2164 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2165 /* IO memory case (romd handled later) */
2166 address |= TLB_MMIO;
2167 }
2168#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2169 addend = pd & TARGET_PAGE_MASK;
2170#elif !defined(VBOX)
2171 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
2172#else
2173 addend = (unsigned long)remR3GCPhys2HCVirt(env, pd & TARGET_PAGE_MASK);
2174#endif
2175 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2176 /* Normal RAM. */
2177 iotlb = pd & TARGET_PAGE_MASK;
2178 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2179 iotlb |= IO_MEM_NOTDIRTY;
2180 else
2181 iotlb |= IO_MEM_ROM;
2182 } else {
2183 /* IO handlers are currently passed a phsical address.
2184 It would be nice to pass an offset from the base address
2185 of that region. This would avoid having to special case RAM,
2186 and avoid full address decoding in every device.
2187 We can't use the high bits of pd for this because
2188 IO_MEM_ROMD uses these as a ram address. */
2189 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
2190 }
2191
2192 code_address = address;
2193 /* Make accesses to pages with watchpoints go via the
2194 watchpoint trap routines. */
2195 for (i = 0; i < env->nb_watchpoints; i++) {
2196 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
2197 iotlb = io_mem_watch + paddr;
2198 /* TODO: The memory case can be optimized by not trapping
2199 reads of pages with a write breakpoint. */
2200 address |= TLB_MMIO;
2201 }
2202 }
2203
2204#ifdef VBOX
2205# if !defined(REM_PHYS_ADDR_IN_TLB)
2206 if (remR3IsMonitored(env, vaddr & TARGET_PAGE_MASK))
2207 {
2208 address |= TLB_MMIO;
2209 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr +env->pVM->rem.s.iHandlerMemType;
2210 }
2211# endif
2212#endif
2213
2214 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2215 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2216 te = &env->tlb_table[mmu_idx][index];
2217 te->addend = addend - vaddr;
2218 if (prot & PAGE_READ) {
2219 te->addr_read = address;
2220 } else {
2221 te->addr_read = -1;
2222 }
2223
2224 if (prot & PAGE_EXEC) {
2225 te->addr_code = code_address;
2226 } else {
2227 te->addr_code = -1;
2228 }
2229 if (prot & PAGE_WRITE) {
2230 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2231 (pd & IO_MEM_ROMD)) {
2232 /* Write access calls the I/O callback. */
2233 te->addr_write = address | TLB_MMIO;
2234 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2235 !cpu_physical_memory_is_dirty(pd)) {
2236 te->addr_write = address | TLB_NOTDIRTY;
2237 } else {
2238 te->addr_write = address;
2239 }
2240 } else {
2241 te->addr_write = -1;
2242 }
2243#ifdef VBOX
2244 /* inform raw mode about TLB page change */
2245 remR3FlushPage(env, vaddr);
2246#endif
2247 return ret;
2248}
2249#if 0
2250/* called from signal handler: invalidate the code and unprotect the
2251 page. Return TRUE if the fault was succesfully handled. */
2252int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
2253{
2254#if !defined(CONFIG_SOFTMMU)
2255 VirtPageDesc *vp;
2256
2257#if defined(DEBUG_TLB)
2258 printf("page_unprotect: addr=0x%08x\n", addr);
2259#endif
2260 addr &= TARGET_PAGE_MASK;
2261
2262 /* if it is not mapped, no need to worry here */
2263 if (addr >= MMAP_AREA_END)
2264 return 0;
2265 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
2266 if (!vp)
2267 return 0;
2268 /* NOTE: in this case, validate_tag is _not_ tested as it
2269 validates only the code TLB */
2270 if (vp->valid_tag != virt_valid_tag)
2271 return 0;
2272 if (!(vp->prot & PAGE_WRITE))
2273 return 0;
2274#if defined(DEBUG_TLB)
2275 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
2276 addr, vp->phys_addr, vp->prot);
2277#endif
2278 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
2279 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
2280 (unsigned long)addr, vp->prot);
2281 /* set the dirty bit */
2282 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
2283 /* flush the code inside */
2284 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
2285 return 1;
2286#elif defined(VBOX)
2287 addr &= TARGET_PAGE_MASK;
2288
2289 /* if it is not mapped, no need to worry here */
2290 if (addr >= MMAP_AREA_END)
2291 return 0;
2292 return 1;
2293#else
2294 return 0;
2295#endif
2296}
2297#endif /* 0 */
2298
2299#else
2300
2301void tlb_flush(CPUState *env, int flush_global)
2302{
2303}
2304
2305void tlb_flush_page(CPUState *env, target_ulong addr)
2306{
2307}
2308
2309int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2310 target_phys_addr_t paddr, int prot,
2311 int mmu_idx, int is_softmmu)
2312{
2313 return 0;
2314}
2315
2316#ifndef VBOX
2317/* dump memory mappings */
2318void page_dump(FILE *f)
2319{
2320 unsigned long start, end;
2321 int i, j, prot, prot1;
2322 PageDesc *p;
2323
2324 fprintf(f, "%-8s %-8s %-8s %s\n",
2325 "start", "end", "size", "prot");
2326 start = -1;
2327 end = -1;
2328 prot = 0;
2329 for(i = 0; i <= L1_SIZE; i++) {
2330 if (i < L1_SIZE)
2331 p = l1_map[i];
2332 else
2333 p = NULL;
2334 for(j = 0;j < L2_SIZE; j++) {
2335 if (!p)
2336 prot1 = 0;
2337 else
2338 prot1 = p[j].flags;
2339 if (prot1 != prot) {
2340 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2341 if (start != -1) {
2342 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2343 start, end, end - start,
2344 prot & PAGE_READ ? 'r' : '-',
2345 prot & PAGE_WRITE ? 'w' : '-',
2346 prot & PAGE_EXEC ? 'x' : '-');
2347 }
2348 if (prot1 != 0)
2349 start = end;
2350 else
2351 start = -1;
2352 prot = prot1;
2353 }
2354 if (!p)
2355 break;
2356 }
2357 }
2358}
2359#endif /* !VBOX */
2360
2361int page_get_flags(target_ulong address)
2362{
2363 PageDesc *p;
2364
2365 p = page_find(address >> TARGET_PAGE_BITS);
2366 if (!p)
2367 return 0;
2368 return p->flags;
2369}
2370
2371/* modify the flags of a page and invalidate the code if
2372 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2373 depending on PAGE_WRITE */
2374void page_set_flags(target_ulong start, target_ulong end, int flags)
2375{
2376 PageDesc *p;
2377 target_ulong addr;
2378
2379 start = start & TARGET_PAGE_MASK;
2380 end = TARGET_PAGE_ALIGN(end);
2381 if (flags & PAGE_WRITE)
2382 flags |= PAGE_WRITE_ORG;
2383#ifdef VBOX
2384 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
2385#endif
2386 spin_lock(&tb_lock);
2387 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2388 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2389 /* if the write protection is set, then we invalidate the code
2390 inside */
2391 if (!(p->flags & PAGE_WRITE) &&
2392 (flags & PAGE_WRITE) &&
2393 p->first_tb) {
2394 tb_invalidate_phys_page(addr, 0, NULL);
2395 }
2396 p->flags = flags;
2397 }
2398 spin_unlock(&tb_lock);
2399}
2400
2401int page_check_range(target_ulong start, target_ulong len, int flags)
2402{
2403 PageDesc *p;
2404 target_ulong end;
2405 target_ulong addr;
2406
2407 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2408 start = start & TARGET_PAGE_MASK;
2409
2410 if( end < start )
2411 /* we've wrapped around */
2412 return -1;
2413 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2414 p = page_find(addr >> TARGET_PAGE_BITS);
2415 if( !p )
2416 return -1;
2417 if( !(p->flags & PAGE_VALID) )
2418 return -1;
2419
2420 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2421 return -1;
2422 if (flags & PAGE_WRITE) {
2423 if (!(p->flags & PAGE_WRITE_ORG))
2424 return -1;
2425 /* unprotect the page if it was put read-only because it
2426 contains translated code */
2427 if (!(p->flags & PAGE_WRITE)) {
2428 if (!page_unprotect(addr, 0, NULL))
2429 return -1;
2430 }
2431 return 0;
2432 }
2433 }
2434 return 0;
2435}
2436
2437/* called from signal handler: invalidate the code and unprotect the
2438 page. Return TRUE if the fault was succesfully handled. */
2439int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2440{
2441 unsigned int page_index, prot, pindex;
2442 PageDesc *p, *p1;
2443 target_ulong host_start, host_end, addr;
2444
2445 /* Technically this isn't safe inside a signal handler. However we
2446 know this only ever happens in a synchronous SEGV handler, so in
2447 practice it seems to be ok. */
2448 mmap_lock();
2449
2450 host_start = address & qemu_host_page_mask;
2451 page_index = host_start >> TARGET_PAGE_BITS;
2452 p1 = page_find(page_index);
2453 if (!p1) {
2454 mmap_unlock();
2455 return 0;
2456 }
2457 host_end = host_start + qemu_host_page_size;
2458 p = p1;
2459 prot = 0;
2460 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2461 prot |= p->flags;
2462 p++;
2463 }
2464 /* if the page was really writable, then we change its
2465 protection back to writable */
2466 if (prot & PAGE_WRITE_ORG) {
2467 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2468 if (!(p1[pindex].flags & PAGE_WRITE)) {
2469 mprotect((void *)g2h(host_start), qemu_host_page_size,
2470 (prot & PAGE_BITS) | PAGE_WRITE);
2471 p1[pindex].flags |= PAGE_WRITE;
2472 /* and since the content will be modified, we must invalidate
2473 the corresponding translated code. */
2474 tb_invalidate_phys_page(address, pc, puc);
2475#ifdef DEBUG_TB_CHECK
2476 tb_invalidate_check(address);
2477#endif
2478 mmap_unlock();
2479 return 1;
2480 }
2481 }
2482 mmap_unlock();
2483 return 0;
2484}
2485
2486static inline void tlb_set_dirty(CPUState *env,
2487 unsigned long addr, target_ulong vaddr)
2488{
2489}
2490#endif /* defined(CONFIG_USER_ONLY) */
2491
2492#if !defined(CONFIG_USER_ONLY)
2493static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2494 ram_addr_t memory);
2495static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2496 ram_addr_t orig_memory);
2497#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2498 need_subpage) \
2499 do { \
2500 if (addr > start_addr) \
2501 start_addr2 = 0; \
2502 else { \
2503 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2504 if (start_addr2 > 0) \
2505 need_subpage = 1; \
2506 } \
2507 \
2508 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2509 end_addr2 = TARGET_PAGE_SIZE - 1; \
2510 else { \
2511 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2512 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2513 need_subpage = 1; \
2514 } \
2515 } while (0)
2516
2517
2518/* register physical memory. 'size' must be a multiple of the target
2519 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2520 io memory page */
2521void cpu_register_physical_memory(target_phys_addr_t start_addr,
2522 unsigned long size,
2523 unsigned long phys_offset)
2524{
2525 target_phys_addr_t addr, end_addr;
2526 PhysPageDesc *p;
2527 CPUState *env;
2528 ram_addr_t orig_size = size;
2529 void *subpage;
2530
2531#ifdef USE_KQEMU
2532 /* XXX: should not depend on cpu context */
2533 env = first_cpu;
2534 if (env->kqemu_enabled) {
2535 kqemu_set_phys_mem(start_addr, size, phys_offset);
2536 }
2537#endif
2538 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2539 end_addr = start_addr + (target_phys_addr_t)size;
2540 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2541 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2542 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2543 ram_addr_t orig_memory = p->phys_offset;
2544 target_phys_addr_t start_addr2, end_addr2;
2545 int need_subpage = 0;
2546
2547 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2548 need_subpage);
2549 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2550 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2551 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2552 &p->phys_offset, orig_memory);
2553 } else {
2554 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2555 >> IO_MEM_SHIFT];
2556 }
2557 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2558 } else {
2559 p->phys_offset = phys_offset;
2560#if !defined(VBOX) || defined(VBOX_WITH_NEW_PHYS_CODE)
2561 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2562 (phys_offset & IO_MEM_ROMD))
2563#else
2564 if ( (phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM
2565 || (phys_offset & IO_MEM_ROMD)
2566 || (phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING)
2567#endif
2568 phys_offset += TARGET_PAGE_SIZE;
2569 }
2570 } else {
2571 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2572 p->phys_offset = phys_offset;
2573#if !defined(VBOX) || defined(VBOX_WITH_NEW_PHYS_CODE)
2574 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2575 (phys_offset & IO_MEM_ROMD))
2576#else
2577 if ( (phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM
2578 || (phys_offset & IO_MEM_ROMD)
2579 || (phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING)
2580#endif
2581 phys_offset += TARGET_PAGE_SIZE;
2582 else {
2583 target_phys_addr_t start_addr2, end_addr2;
2584 int need_subpage = 0;
2585
2586 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2587 end_addr2, need_subpage);
2588
2589 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2590 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2591 &p->phys_offset, IO_MEM_UNASSIGNED);
2592 subpage_register(subpage, start_addr2, end_addr2,
2593 phys_offset);
2594 }
2595 }
2596 }
2597 }
2598 /* since each CPU stores ram addresses in its TLB cache, we must
2599 reset the modified entries */
2600 /* XXX: slow ! */
2601 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2602 tlb_flush(env, 1);
2603 }
2604}
2605
2606/* XXX: temporary until new memory mapping API */
2607uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2608{
2609 PhysPageDesc *p;
2610
2611 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2612 if (!p)
2613 return IO_MEM_UNASSIGNED;
2614 return p->phys_offset;
2615}
2616
2617#ifndef VBOX
2618/* XXX: better than nothing */
2619ram_addr_t qemu_ram_alloc(ram_addr_t size)
2620{
2621 ram_addr_t addr;
2622 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2623 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2624 (uint64_t)size, (uint64_t)phys_ram_size);
2625 abort();
2626 }
2627 addr = phys_ram_alloc_offset;
2628 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2629 return addr;
2630}
2631
2632void qemu_ram_free(ram_addr_t addr)
2633{
2634}
2635#endif
2636
2637
2638static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2639{
2640#ifdef DEBUG_UNASSIGNED
2641 printf("Unassigned mem read 0x%08x\n", (int)addr);
2642#endif
2643#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2644 do_unassigned_access(addr, 0, 0, 0, 1);
2645#endif
2646 return 0;
2647}
2648
2649static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2650{
2651#ifdef DEBUG_UNASSIGNED
2652 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2653#endif
2654#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2655 do_unassigned_access(addr, 0, 0, 0, 2);
2656#endif
2657 return 0;
2658}
2659
2660static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2661{
2662#ifdef DEBUG_UNASSIGNED
2663 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2664#endif
2665#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2666 do_unassigned_access(addr, 0, 0, 0, 4);
2667#endif
2668 return 0;
2669}
2670
2671static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2672{
2673#ifdef DEBUG_UNASSIGNED
2674 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
2675#endif
2676}
2677
2678static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2679{
2680#ifdef DEBUG_UNASSIGNED
2681 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2682#endif
2683#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2684 do_unassigned_access(addr, 1, 0, 0, 2);
2685#endif
2686}
2687
2688static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2689{
2690#ifdef DEBUG_UNASSIGNED
2691 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2692#endif
2693#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2694 do_unassigned_access(addr, 1, 0, 0, 4);
2695#endif
2696}
2697static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2698 unassigned_mem_readb,
2699 unassigned_mem_readw,
2700 unassigned_mem_readl,
2701};
2702
2703static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2704 unassigned_mem_writeb,
2705 unassigned_mem_writew,
2706 unassigned_mem_writel,
2707};
2708
2709static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2710{
2711 unsigned long ram_addr;
2712 int dirty_flags;
2713#if defined(VBOX)
2714 ram_addr = addr;
2715#elif
2716 ram_addr = addr - (unsigned long)phys_ram_base;
2717#endif
2718#ifdef VBOX
2719 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2720 dirty_flags = 0xff;
2721 else
2722#endif /* VBOX */
2723 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2724 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2725#if !defined(CONFIG_USER_ONLY)
2726 tb_invalidate_phys_page_fast(ram_addr, 1);
2727# ifdef VBOX
2728 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2729 dirty_flags = 0xff;
2730 else
2731# endif /* VBOX */
2732 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2733#endif
2734 }
2735#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2736 remR3PhysWriteU8(addr, val);
2737#else
2738 stb_p((uint8_t *)(long)addr, val);
2739#endif
2740#ifdef USE_KQEMU
2741 if (cpu_single_env->kqemu_enabled &&
2742 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2743 kqemu_modify_page(cpu_single_env, ram_addr);
2744#endif
2745 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2746#ifdef VBOX
2747 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2748#endif /* !VBOX */
2749 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2750 /* we remove the notdirty callback only if the code has been
2751 flushed */
2752 if (dirty_flags == 0xff)
2753 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2754}
2755
2756static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2757{
2758 unsigned long ram_addr;
2759 int dirty_flags;
2760#if defined(VBOX)
2761 ram_addr = addr;
2762#else
2763 ram_addr = addr - (unsigned long)phys_ram_base;
2764#endif
2765#ifdef VBOX
2766 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2767 dirty_flags = 0xff;
2768 else
2769#endif /* VBOX */
2770 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2771 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2772#if !defined(CONFIG_USER_ONLY)
2773 tb_invalidate_phys_page_fast(ram_addr, 2);
2774# ifdef VBOX
2775 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2776 dirty_flags = 0xff;
2777 else
2778# endif /* VBOX */
2779 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2780#endif
2781 }
2782#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2783 remR3PhysWriteU16(addr, val);
2784#else
2785 stw_p((uint8_t *)(long)addr, val);
2786#endif
2787
2788#ifdef USE_KQEMU
2789 if (cpu_single_env->kqemu_enabled &&
2790 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2791 kqemu_modify_page(cpu_single_env, ram_addr);
2792#endif
2793 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2794#ifdef VBOX
2795 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2796#endif
2797 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2798 /* we remove the notdirty callback only if the code has been
2799 flushed */
2800 if (dirty_flags == 0xff)
2801 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2802}
2803
2804static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2805{
2806 unsigned long ram_addr;
2807 int dirty_flags;
2808#if defined(VBOX)
2809 ram_addr = addr;
2810#else
2811 ram_addr = addr - (unsigned long)phys_ram_base;
2812#endif
2813#ifdef VBOX
2814 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2815 dirty_flags = 0xff;
2816 else
2817#endif /* VBOX */
2818 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2819 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2820#if !defined(CONFIG_USER_ONLY)
2821 tb_invalidate_phys_page_fast(ram_addr, 4);
2822# ifdef VBOX
2823 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2824 dirty_flags = 0xff;
2825 else
2826# endif /* VBOX */
2827 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2828#endif
2829 }
2830#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2831 remR3PhysWriteU32(addr, val);
2832#else
2833 stl_p((uint8_t *)(long)addr, val);
2834#endif
2835#ifdef USE_KQEMU
2836 if (cpu_single_env->kqemu_enabled &&
2837 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2838 kqemu_modify_page(cpu_single_env, ram_addr);
2839#endif
2840 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2841#ifdef VBOX
2842 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2843#endif
2844 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2845 /* we remove the notdirty callback only if the code has been
2846 flushed */
2847 if (dirty_flags == 0xff)
2848 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2849}
2850
2851static CPUReadMemoryFunc *error_mem_read[3] = {
2852 NULL, /* never used */
2853 NULL, /* never used */
2854 NULL, /* never used */
2855};
2856
2857static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2858 notdirty_mem_writeb,
2859 notdirty_mem_writew,
2860 notdirty_mem_writel,
2861};
2862
2863
2864/* Generate a debug exception if a watchpoint has been hit. */
2865static void check_watchpoint(int offset, int flags)
2866{
2867 CPUState *env = cpu_single_env;
2868 target_ulong vaddr;
2869 int i;
2870
2871 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2872 for (i = 0; i < env->nb_watchpoints; i++) {
2873 if (vaddr == env->watchpoint[i].vaddr
2874 && (env->watchpoint[i].type & flags)) {
2875 env->watchpoint_hit = i + 1;
2876 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2877 break;
2878 }
2879 }
2880}
2881
2882/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2883 so these check for a hit then pass through to the normal out-of-line
2884 phys routines. */
2885static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2886{
2887 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2888 return ldub_phys(addr);
2889}
2890
2891static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2892{
2893 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2894 return lduw_phys(addr);
2895}
2896
2897static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2898{
2899 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2900 return ldl_phys(addr);
2901}
2902
2903static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2904 uint32_t val)
2905{
2906 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2907 stb_phys(addr, val);
2908}
2909
2910static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2911 uint32_t val)
2912{
2913 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2914 stw_phys(addr, val);
2915}
2916
2917static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2918 uint32_t val)
2919{
2920 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2921 stl_phys(addr, val);
2922}
2923
2924static CPUReadMemoryFunc *watch_mem_read[3] = {
2925 watch_mem_readb,
2926 watch_mem_readw,
2927 watch_mem_readl,
2928};
2929
2930static CPUWriteMemoryFunc *watch_mem_write[3] = {
2931 watch_mem_writeb,
2932 watch_mem_writew,
2933 watch_mem_writel,
2934};
2935
2936static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2937 unsigned int len)
2938{
2939 uint32_t ret;
2940 unsigned int idx;
2941
2942 idx = SUBPAGE_IDX(addr - mmio->base);
2943#if defined(DEBUG_SUBPAGE)
2944 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2945 mmio, len, addr, idx);
2946#endif
2947 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2948
2949 return ret;
2950}
2951
2952static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2953 uint32_t value, unsigned int len)
2954{
2955 unsigned int idx;
2956
2957 idx = SUBPAGE_IDX(addr - mmio->base);
2958#if defined(DEBUG_SUBPAGE)
2959 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2960 mmio, len, addr, idx, value);
2961#endif
2962 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2963}
2964
2965static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2966{
2967#if defined(DEBUG_SUBPAGE)
2968 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2969#endif
2970
2971 return subpage_readlen(opaque, addr, 0);
2972}
2973
2974static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2975 uint32_t value)
2976{
2977#if defined(DEBUG_SUBPAGE)
2978 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2979#endif
2980 subpage_writelen(opaque, addr, value, 0);
2981}
2982
2983static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2984{
2985#if defined(DEBUG_SUBPAGE)
2986 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2987#endif
2988
2989 return subpage_readlen(opaque, addr, 1);
2990}
2991
2992static void subpage_writew (void *opaque, target_phys_addr_t addr,
2993 uint32_t value)
2994{
2995#if defined(DEBUG_SUBPAGE)
2996 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2997#endif
2998 subpage_writelen(opaque, addr, value, 1);
2999}
3000
3001static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3002{
3003#if defined(DEBUG_SUBPAGE)
3004 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3005#endif
3006
3007 return subpage_readlen(opaque, addr, 2);
3008}
3009
3010static void subpage_writel (void *opaque,
3011 target_phys_addr_t addr, uint32_t value)
3012{
3013#if defined(DEBUG_SUBPAGE)
3014 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3015#endif
3016 subpage_writelen(opaque, addr, value, 2);
3017}
3018
3019static CPUReadMemoryFunc *subpage_read[] = {
3020 &subpage_readb,
3021 &subpage_readw,
3022 &subpage_readl,
3023};
3024
3025static CPUWriteMemoryFunc *subpage_write[] = {
3026 &subpage_writeb,
3027 &subpage_writew,
3028 &subpage_writel,
3029};
3030
3031static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3032 ram_addr_t memory)
3033{
3034 int idx, eidx;
3035 unsigned int i;
3036
3037 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3038 return -1;
3039 idx = SUBPAGE_IDX(start);
3040 eidx = SUBPAGE_IDX(end);
3041#if defined(DEBUG_SUBPAGE)
3042 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
3043 mmio, start, end, idx, eidx, memory);
3044#endif
3045 memory >>= IO_MEM_SHIFT;
3046 for (; idx <= eidx; idx++) {
3047 for (i = 0; i < 4; i++) {
3048 if (io_mem_read[memory][i]) {
3049 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
3050 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
3051 }
3052 if (io_mem_write[memory][i]) {
3053 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
3054 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
3055 }
3056 }
3057 }
3058
3059 return 0;
3060}
3061
3062static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3063 ram_addr_t orig_memory)
3064{
3065 subpage_t *mmio;
3066 int subpage_memory;
3067
3068 mmio = qemu_mallocz(sizeof(subpage_t));
3069 if (mmio != NULL) {
3070 mmio->base = base;
3071 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
3072#if defined(DEBUG_SUBPAGE)
3073 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3074 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3075#endif
3076 *phys = subpage_memory | IO_MEM_SUBPAGE;
3077 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
3078 }
3079
3080 return mmio;
3081}
3082
3083static void io_mem_init(void)
3084{
3085 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
3086 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3087 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
3088#if defined(VBOX) && !defined(VBOX_WITH_NEW_PHYS_CODE)
3089 cpu_register_io_memory(IO_MEM_RAM_MISSING >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3090 io_mem_nb = 6;
3091#else
3092 io_mem_nb = 5;
3093#endif
3094
3095 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
3096 watch_mem_write, NULL);
3097
3098#ifndef VBOX /* VBOX: we do this later when the RAM is allocated. */
3099 /* alloc dirty bits array */
3100 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3101 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
3102#endif /* !VBOX */
3103}
3104
3105/* mem_read and mem_write are arrays of functions containing the
3106 function to access byte (index 0), word (index 1) and dword (index
3107 2). Functions can be omitted with a NULL function pointer. The
3108 registered functions may be modified dynamically later.
3109 If io_index is non zero, the corresponding io zone is
3110 modified. If it is zero, a new io zone is allocated. The return
3111 value can be used with cpu_register_physical_memory(). (-1) is
3112 returned if error. */
3113int cpu_register_io_memory(int io_index,
3114 CPUReadMemoryFunc **mem_read,
3115 CPUWriteMemoryFunc **mem_write,
3116 void *opaque)
3117{
3118 int i, subwidth = 0;
3119
3120 if (io_index <= 0) {
3121 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
3122 return -1;
3123 io_index = io_mem_nb++;
3124 } else {
3125 if (io_index >= IO_MEM_NB_ENTRIES)
3126 return -1;
3127 }
3128
3129 for(i = 0;i < 3; i++) {
3130 if (!mem_read[i] || !mem_write[i])
3131 subwidth = IO_MEM_SUBWIDTH;
3132 io_mem_read[io_index][i] = mem_read[i];
3133 io_mem_write[io_index][i] = mem_write[i];
3134 }
3135 io_mem_opaque[io_index] = opaque;
3136 return (io_index << IO_MEM_SHIFT) | subwidth;
3137}
3138
3139CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
3140{
3141 return io_mem_write[io_index >> IO_MEM_SHIFT];
3142}
3143
3144CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
3145{
3146 return io_mem_read[io_index >> IO_MEM_SHIFT];
3147}
3148#endif /* !defined(CONFIG_USER_ONLY) */
3149
3150/* physical memory access (slow version, mainly for debug) */
3151#if defined(CONFIG_USER_ONLY)
3152void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3153 int len, int is_write)
3154{
3155 int l, flags;
3156 target_ulong page;
3157 void * p;
3158
3159 while (len > 0) {
3160 page = addr & TARGET_PAGE_MASK;
3161 l = (page + TARGET_PAGE_SIZE) - addr;
3162 if (l > len)
3163 l = len;
3164 flags = page_get_flags(page);
3165 if (!(flags & PAGE_VALID))
3166 return;
3167 if (is_write) {
3168 if (!(flags & PAGE_WRITE))
3169 return;
3170 /* XXX: this code should not depend on lock_user */
3171 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3172 /* FIXME - should this return an error rather than just fail? */
3173 return;
3174 memcpy(p, buf, len);
3175 unlock_user(p, addr, len);
3176 } else {
3177 if (!(flags & PAGE_READ))
3178 return;
3179 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3180 /* FIXME - should this return an error rather than just fail? */
3181 return;
3182 memcpy(buf, p, len);
3183 unlock_user(p, addr, 0);
3184 }
3185 len -= l;
3186 buf += l;
3187 addr += l;
3188 }
3189}
3190
3191#else
3192void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3193 int len, int is_write)
3194{
3195 int l, io_index;
3196 uint8_t *ptr;
3197 uint32_t val;
3198 target_phys_addr_t page;
3199 unsigned long pd;
3200 PhysPageDesc *p;
3201
3202 while (len > 0) {
3203 page = addr & TARGET_PAGE_MASK;
3204 l = (page + TARGET_PAGE_SIZE) - addr;
3205 if (l > len)
3206 l = len;
3207 p = phys_page_find(page >> TARGET_PAGE_BITS);
3208 if (!p) {
3209 pd = IO_MEM_UNASSIGNED;
3210 } else {
3211 pd = p->phys_offset;
3212 }
3213
3214 if (is_write) {
3215 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3216 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3217 /* XXX: could force cpu_single_env to NULL to avoid
3218 potential bugs */
3219 if (l >= 4 && ((addr & 3) == 0)) {
3220 /* 32 bit write access */
3221#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3222 val = ldl_p(buf);
3223#else
3224 val = *(const uint32_t *)buf;
3225#endif
3226 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3227 l = 4;
3228 } else if (l >= 2 && ((addr & 1) == 0)) {
3229 /* 16 bit write access */
3230#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3231 val = lduw_p(buf);
3232#else
3233 val = *(const uint16_t *)buf;
3234#endif
3235 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
3236 l = 2;
3237 } else {
3238 /* 8 bit write access */
3239#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3240 val = ldub_p(buf);
3241#else
3242 val = *(const uint8_t *)buf;
3243#endif
3244 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
3245 l = 1;
3246 }
3247 } else {
3248 unsigned long addr1;
3249 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3250 /* RAM case */
3251#ifdef VBOX
3252 remR3PhysWrite(addr1, buf, l); NOREF(ptr);
3253#else
3254 ptr = phys_ram_base + addr1;
3255 memcpy(ptr, buf, l);
3256#endif
3257 if (!cpu_physical_memory_is_dirty(addr1)) {
3258 /* invalidate code */
3259 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3260 /* set dirty bit */
3261#ifdef VBOX
3262 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3263#endif
3264 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3265 (0xff & ~CODE_DIRTY_FLAG);
3266 }
3267 }
3268 } else {
3269 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3270 !(pd & IO_MEM_ROMD)) {
3271 /* I/O case */
3272 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3273 if (l >= 4 && ((addr & 3) == 0)) {
3274 /* 32 bit read access */
3275 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3276#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3277 stl_p(buf, val);
3278#else
3279 *(uint32_t *)buf = val;
3280#endif
3281 l = 4;
3282 } else if (l >= 2 && ((addr & 1) == 0)) {
3283 /* 16 bit read access */
3284 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3285#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3286 stw_p(buf, val);
3287#else
3288 *(uint16_t *)buf = val;
3289#endif
3290 l = 2;
3291 } else {
3292 /* 8 bit read access */
3293 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
3294#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3295 stb_p(buf, val);
3296#else
3297 *(uint8_t *)buf = val;
3298#endif
3299 l = 1;
3300 }
3301 } else {
3302 /* RAM case */
3303#ifdef VBOX
3304 remR3PhysRead((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), buf, l); NOREF(ptr);
3305#else
3306 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3307 (addr & ~TARGET_PAGE_MASK);
3308 memcpy(buf, ptr, l);
3309#endif
3310 }
3311 }
3312 len -= l;
3313 buf += l;
3314 addr += l;
3315 }
3316}
3317
3318#ifndef VBOX
3319/* used for ROM loading : can write in RAM and ROM */
3320void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3321 const uint8_t *buf, int len)
3322{
3323 int l;
3324 uint8_t *ptr;
3325 target_phys_addr_t page;
3326 unsigned long pd;
3327 PhysPageDesc *p;
3328
3329 while (len > 0) {
3330 page = addr & TARGET_PAGE_MASK;
3331 l = (page + TARGET_PAGE_SIZE) - addr;
3332 if (l > len)
3333 l = len;
3334 p = phys_page_find(page >> TARGET_PAGE_BITS);
3335 if (!p) {
3336 pd = IO_MEM_UNASSIGNED;
3337 } else {
3338 pd = p->phys_offset;
3339 }
3340
3341 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3342 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3343 !(pd & IO_MEM_ROMD)) {
3344 /* do nothing */
3345 } else {
3346 unsigned long addr1;
3347 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3348 /* ROM/RAM case */
3349 ptr = phys_ram_base + addr1;
3350 memcpy(ptr, buf, l);
3351 }
3352 len -= l;
3353 buf += l;
3354 addr += l;
3355 }
3356}
3357#endif /* !VBOX */
3358
3359
3360/* warning: addr must be aligned */
3361uint32_t ldl_phys(target_phys_addr_t addr)
3362{
3363 int io_index;
3364 uint8_t *ptr;
3365 uint32_t val;
3366 unsigned long pd;
3367 PhysPageDesc *p;
3368
3369 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3370 if (!p) {
3371 pd = IO_MEM_UNASSIGNED;
3372 } else {
3373 pd = p->phys_offset;
3374 }
3375
3376 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3377 !(pd & IO_MEM_ROMD)) {
3378 /* I/O case */
3379 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3380 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3381 } else {
3382 /* RAM case */
3383#ifndef VBOX
3384 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3385 (addr & ~TARGET_PAGE_MASK);
3386 val = ldl_p(ptr);
3387#else
3388 val = remR3PhysReadU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
3389#endif
3390 }
3391 return val;
3392}
3393
3394/* warning: addr must be aligned */
3395uint64_t ldq_phys(target_phys_addr_t addr)
3396{
3397 int io_index;
3398 uint8_t *ptr;
3399 uint64_t val;
3400 unsigned long pd;
3401 PhysPageDesc *p;
3402
3403 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3404 if (!p) {
3405 pd = IO_MEM_UNASSIGNED;
3406 } else {
3407 pd = p->phys_offset;
3408 }
3409
3410 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3411 !(pd & IO_MEM_ROMD)) {
3412 /* I/O case */
3413 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3414#ifdef TARGET_WORDS_BIGENDIAN
3415 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3416 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3417#else
3418 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3419 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3420#endif
3421 } else {
3422 /* RAM case */
3423#ifndef VBOX
3424 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3425 (addr & ~TARGET_PAGE_MASK);
3426 val = ldq_p(ptr);
3427#else
3428 val = remR3PhysReadU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
3429#endif
3430 }
3431 return val;
3432}
3433
3434/* XXX: optimize */
3435uint32_t ldub_phys(target_phys_addr_t addr)
3436{
3437 uint8_t val;
3438 cpu_physical_memory_read(addr, &val, 1);
3439 return val;
3440}
3441
3442/* XXX: optimize */
3443uint32_t lduw_phys(target_phys_addr_t addr)
3444{
3445 uint16_t val;
3446 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3447 return tswap16(val);
3448}
3449
3450/* warning: addr must be aligned. The ram page is not masked as dirty
3451 and the code inside is not invalidated. It is useful if the dirty
3452 bits are used to track modified PTEs */
3453void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3454{
3455 int io_index;
3456 uint8_t *ptr;
3457 unsigned long pd;
3458 PhysPageDesc *p;
3459
3460 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3461 if (!p) {
3462 pd = IO_MEM_UNASSIGNED;
3463 } else {
3464 pd = p->phys_offset;
3465 }
3466
3467 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3468 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3469 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3470 } else {
3471#ifndef VBOX
3472 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3473 (addr & ~TARGET_PAGE_MASK);
3474 stl_p(ptr, val);
3475#else
3476 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3477#endif
3478#ifndef VBOX
3479 if (unlikely(in_migration)) {
3480 if (!cpu_physical_memory_is_dirty(addr1)) {
3481 /* invalidate code */
3482 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3483 /* set dirty bit */
3484 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3485 (0xff & ~CODE_DIRTY_FLAG);
3486 }
3487 }
3488#endif
3489 }
3490}
3491
3492void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3493{
3494 int io_index;
3495 uint8_t *ptr;
3496 unsigned long pd;
3497 PhysPageDesc *p;
3498
3499 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3500 if (!p) {
3501 pd = IO_MEM_UNASSIGNED;
3502 } else {
3503 pd = p->phys_offset;
3504 }
3505
3506 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3507 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3508#ifdef TARGET_WORDS_BIGENDIAN
3509 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3510 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3511#else
3512 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3513 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3514#endif
3515 } else {
3516#ifndef VBOX
3517 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3518 (addr & ~TARGET_PAGE_MASK);
3519 stq_p(ptr, val);
3520#else
3521 remR3PhysWriteU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3522#endif
3523 }
3524}
3525
3526
3527/* warning: addr must be aligned */
3528void stl_phys(target_phys_addr_t addr, uint32_t val)
3529{
3530 int io_index;
3531 uint8_t *ptr;
3532 unsigned long pd;
3533 PhysPageDesc *p;
3534
3535 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3536 if (!p) {
3537 pd = IO_MEM_UNASSIGNED;
3538 } else {
3539 pd = p->phys_offset;
3540 }
3541
3542 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3543 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3544 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3545 } else {
3546 unsigned long addr1;
3547 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3548 /* RAM case */
3549#ifndef VBOX
3550 ptr = phys_ram_base + addr1;
3551 stl_p(ptr, val);
3552#else
3553 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3554#endif
3555 if (!cpu_physical_memory_is_dirty(addr1)) {
3556 /* invalidate code */
3557 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3558 /* set dirty bit */
3559#ifdef VBOX
3560 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3561#endif
3562 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3563 (0xff & ~CODE_DIRTY_FLAG);
3564 }
3565 }
3566}
3567
3568/* XXX: optimize */
3569void stb_phys(target_phys_addr_t addr, uint32_t val)
3570{
3571 uint8_t v = val;
3572 cpu_physical_memory_write(addr, &v, 1);
3573}
3574
3575/* XXX: optimize */
3576void stw_phys(target_phys_addr_t addr, uint32_t val)
3577{
3578 uint16_t v = tswap16(val);
3579 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3580}
3581
3582/* XXX: optimize */
3583void stq_phys(target_phys_addr_t addr, uint64_t val)
3584{
3585 val = tswap64(val);
3586 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3587}
3588
3589#endif
3590
3591/* virtual memory access for debug */
3592int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3593 uint8_t *buf, int len, int is_write)
3594{
3595 int l;
3596 target_ulong page, phys_addr;
3597
3598 while (len > 0) {
3599 page = addr & TARGET_PAGE_MASK;
3600 phys_addr = cpu_get_phys_page_debug(env, page);
3601 /* if no physical page mapped, return an error */
3602 if (phys_addr == -1)
3603 return -1;
3604 l = (page + TARGET_PAGE_SIZE) - addr;
3605 if (l > len)
3606 l = len;
3607 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3608 buf, l, is_write);
3609 len -= l;
3610 buf += l;
3611 addr += l;
3612 }
3613 return 0;
3614}
3615
3616/* in deterministic execution mode, instructions doing device I/Os
3617 must be at the end of the TB */
3618void cpu_io_recompile(CPUState *env, void *retaddr)
3619{
3620 TranslationBlock *tb;
3621 uint32_t n, cflags;
3622 target_ulong pc, cs_base;
3623 uint64_t flags;
3624
3625 tb = tb_find_pc((unsigned long)retaddr);
3626 if (!tb) {
3627 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3628 retaddr);
3629 }
3630 n = env->icount_decr.u16.low + tb->icount;
3631 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3632 /* Calculate how many instructions had been executed before the fault
3633 occurred. */
3634 n = n - env->icount_decr.u16.low;
3635 /* Generate a new TB ending on the I/O insn. */
3636 n++;
3637 /* On MIPS and SH, delay slot instructions can only be restarted if
3638 they were already the first instruction in the TB. If this is not
3639 the first instruction in a TB then re-execute the preceding
3640 branch. */
3641#if defined(TARGET_MIPS)
3642 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3643 env->active_tc.PC -= 4;
3644 env->icount_decr.u16.low++;
3645 env->hflags &= ~MIPS_HFLAG_BMASK;
3646 }
3647#elif defined(TARGET_SH4)
3648 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3649 && n > 1) {
3650 env->pc -= 2;
3651 env->icount_decr.u16.low++;
3652 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3653 }
3654#endif
3655 /* This should never happen. */
3656 if (n > CF_COUNT_MASK)
3657 cpu_abort(env, "TB too big during recompile");
3658
3659 cflags = n | CF_LAST_IO;
3660 pc = tb->pc;
3661 cs_base = tb->cs_base;
3662 flags = tb->flags;
3663 tb_phys_invalidate(tb, -1);
3664 /* FIXME: In theory this could raise an exception. In practice
3665 we have already translated the block once so it's probably ok. */
3666 tb_gen_code(env, pc, cs_base, flags, cflags);
3667 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3668 the first in the TB) then we end up generating a whole new TB and
3669 repeating the fault, which is horribly inefficient.
3670 Better would be to execute just this insn uncached, or generate a
3671 second new TB. */
3672 cpu_resume_from_signal(env, NULL);
3673}
3674
3675#ifndef VBOX
3676void dump_exec_info(FILE *f,
3677 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3678{
3679 int i, target_code_size, max_target_code_size;
3680 int direct_jmp_count, direct_jmp2_count, cross_page;
3681 TranslationBlock *tb;
3682
3683 target_code_size = 0;
3684 max_target_code_size = 0;
3685 cross_page = 0;
3686 direct_jmp_count = 0;
3687 direct_jmp2_count = 0;
3688 for(i = 0; i < nb_tbs; i++) {
3689 tb = &tbs[i];
3690 target_code_size += tb->size;
3691 if (tb->size > max_target_code_size)
3692 max_target_code_size = tb->size;
3693 if (tb->page_addr[1] != -1)
3694 cross_page++;
3695 if (tb->tb_next_offset[0] != 0xffff) {
3696 direct_jmp_count++;
3697 if (tb->tb_next_offset[1] != 0xffff) {
3698 direct_jmp2_count++;
3699 }
3700 }
3701 }
3702 /* XXX: avoid using doubles ? */
3703 cpu_fprintf(f, "Translation buffer state:\n");
3704 cpu_fprintf(f, "gen code size %ld/%ld\n",
3705 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3706 cpu_fprintf(f, "TB count %d/%d\n",
3707 nb_tbs, code_gen_max_blocks);
3708 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3709 nb_tbs ? target_code_size / nb_tbs : 0,
3710 max_target_code_size);
3711 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3712 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3713 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3714 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3715 cross_page,
3716 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3717 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3718 direct_jmp_count,
3719 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3720 direct_jmp2_count,
3721 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3722 cpu_fprintf(f, "\nStatistics:\n");
3723 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3724 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3725 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3726 tcg_dump_info(f, cpu_fprintf);
3727}
3728#endif /* !VBOX */
3729
3730#if !defined(CONFIG_USER_ONLY)
3731
3732#define MMUSUFFIX _cmmu
3733#define GETPC() NULL
3734#define env cpu_single_env
3735#define SOFTMMU_CODE_ACCESS
3736
3737#define SHIFT 0
3738#include "softmmu_template.h"
3739
3740#define SHIFT 1
3741#include "softmmu_template.h"
3742
3743#define SHIFT 2
3744#include "softmmu_template.h"
3745
3746#define SHIFT 3
3747#include "softmmu_template.h"
3748
3749#undef env
3750
3751#endif
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette