VirtualBox

source: vbox/trunk/src/recompiler/exec-all.h@ 14025

最後變更 在這個檔案從14025是 11982,由 vboxsync 提交於 16 年 前

All: license header changes for 2.0 (OSE headers, add Sun GPL/LGPL disclaimer)

  • 屬性 svn:eol-style 設為 native
檔案大小: 20.3 KB
 
1/*
2 * internal execution defines for qemu
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29
30/* allow to see translation results - the slowdown should be negligible, so we leave it */
31#ifndef VBOX
32#define DEBUG_DISAS
33#endif
34
35#ifdef VBOX
36# include <VBox/tm.h>
37# include <VBox/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
38# ifndef LOG_GROUP
39# define LOG_GROUP LOG_GROUP_REM
40# endif
41# include <VBox/log.h>
42# include "REMInternal.h"
43# include <VBox/vm.h>
44#endif /* VBOX */
45
46#ifndef glue
47#define xglue(x, y) x ## y
48#define glue(x, y) xglue(x, y)
49#define stringify(s) tostring(s)
50#define tostring(s) #s
51#endif
52
53#if __GNUC__ < 3
54#define __builtin_expect(x, n) (x)
55#endif
56
57#ifdef __i386__
58#define REGPARM(n) __attribute((regparm(n)))
59#else
60#define REGPARM(n)
61#endif
62
63/* is_jmp field values */
64#define DISAS_NEXT 0 /* next instruction can be analyzed */
65#define DISAS_JUMP 1 /* only pc was modified dynamically */
66#define DISAS_UPDATE 2 /* cpu state was modified dynamically */
67#define DISAS_TB_JUMP 3 /* only pc was modified statically */
68
69struct TranslationBlock;
70
71/* XXX: make safe guess about sizes */
72#define MAX_OP_PER_INSTR 32
73#define OPC_BUF_SIZE 512
74#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
75
76#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * 3)
77
78extern uint16_t gen_opc_buf[OPC_BUF_SIZE];
79extern uint32_t gen_opparam_buf[OPPARAM_BUF_SIZE];
80extern long gen_labels[OPC_BUF_SIZE];
81extern int nb_gen_labels;
82extern target_ulong gen_opc_pc[OPC_BUF_SIZE];
83extern target_ulong gen_opc_npc[OPC_BUF_SIZE];
84extern uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
85extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE];
86extern target_ulong gen_opc_jump_pc[2];
87extern uint32_t gen_opc_hflags[OPC_BUF_SIZE];
88
89typedef void (GenOpFunc)(void);
90typedef void (GenOpFunc1)(long);
91typedef void (GenOpFunc2)(long, long);
92typedef void (GenOpFunc3)(long, long, long);
93
94#if defined(TARGET_I386)
95
96void optimize_flags_init(void);
97
98#endif
99
100extern FILE *logfile;
101extern int loglevel;
102
103int gen_intermediate_code(CPUState *env, struct TranslationBlock *tb);
104int gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb);
105void dump_ops(const uint16_t *opc_buf, const uint32_t *opparam_buf);
106int cpu_gen_code(CPUState *env, struct TranslationBlock *tb,
107 int max_code_size, int *gen_code_size_ptr);
108int cpu_restore_state(struct TranslationBlock *tb,
109 CPUState *env, unsigned long searched_pc,
110 void *puc);
111int cpu_gen_code_copy(CPUState *env, struct TranslationBlock *tb,
112 int max_code_size, int *gen_code_size_ptr);
113int cpu_restore_state_copy(struct TranslationBlock *tb,
114 CPUState *env, unsigned long searched_pc,
115 void *puc);
116void cpu_resume_from_signal(CPUState *env1, void *puc);
117void cpu_exec_init(CPUState *env);
118int page_unprotect(target_ulong address, unsigned long pc, void *puc);
119void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
120 int is_cpu_write_access);
121void tb_invalidate_page_range(target_ulong start, target_ulong end);
122void tlb_flush_page(CPUState *env, target_ulong addr);
123void tlb_flush(CPUState *env, int flush_global);
124int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
125 target_phys_addr_t paddr, int prot,
126 int is_user, int is_softmmu);
127static inline int tlb_set_page(CPUState *env, target_ulong vaddr,
128 target_phys_addr_t paddr, int prot,
129 int is_user, int is_softmmu)
130{
131 if (prot & PAGE_READ)
132 prot |= PAGE_EXEC;
133 return tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu);
134}
135
136#define CODE_GEN_MAX_SIZE 65536
137#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
138
139#define CODE_GEN_PHYS_HASH_BITS 15
140#define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
141
142/* maximum total translate dcode allocated */
143
144/* NOTE: the translated code area cannot be too big because on some
145 archs the range of "fast" function calls is limited. Here is a
146 summary of the ranges:
147
148 i386 : signed 32 bits
149 arm : signed 26 bits
150 ppc : signed 24 bits
151 sparc : signed 32 bits
152 alpha : signed 23 bits
153*/
154
155#if defined(__alpha__)
156#define CODE_GEN_BUFFER_SIZE (2 * 1024 * 1024)
157#elif defined(__ia64)
158#define CODE_GEN_BUFFER_SIZE (4 * 1024 * 1024) /* range of addl */
159#elif defined(__powerpc__)
160#define CODE_GEN_BUFFER_SIZE (6 * 1024 * 1024)
161#else
162#define CODE_GEN_BUFFER_SIZE (16 * 1024 * 1024)
163#endif
164
165//#define CODE_GEN_BUFFER_SIZE (128 * 1024)
166
167/* estimated block size for TB allocation */
168/* XXX: use a per code average code fragment size and modulate it
169 according to the host CPU */
170#if defined(CONFIG_SOFTMMU)
171#define CODE_GEN_AVG_BLOCK_SIZE 128
172#else
173#define CODE_GEN_AVG_BLOCK_SIZE 64
174#endif
175
176#define CODE_GEN_MAX_BLOCKS (CODE_GEN_BUFFER_SIZE / CODE_GEN_AVG_BLOCK_SIZE)
177
178#if defined(__powerpc__)
179#define USE_DIRECT_JUMP
180#endif
181#if defined(__i386__) && !defined(_WIN32)
182#define USE_DIRECT_JUMP
183#endif
184#ifdef VBOX /* bird: not safe in next step because of threading & cpu_interrupt. */
185#undef USE_DIRECT_JUMP
186#endif /* VBOX */
187
188typedef struct TranslationBlock {
189 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
190 target_ulong cs_base; /* CS base for this block */
191 unsigned int flags; /* flags defining in which context the code was generated */
192 uint16_t size; /* size of target code for this block (1 <=
193 size <= TARGET_PAGE_SIZE) */
194 uint16_t cflags; /* compile flags */
195#define CF_CODE_COPY 0x0001 /* block was generated in code copy mode */
196#define CF_TB_FP_USED 0x0002 /* fp ops are used in the TB */
197#define CF_FP_USED 0x0004 /* fp ops are used in the TB or in a chained TB */
198#define CF_SINGLE_INSN 0x0008 /* compile only a single instruction */
199#ifdef VBOX
200#define CF_RAW_MODE 0x0010 /* block was generated in raw mode */
201#endif
202
203 uint8_t *tc_ptr; /* pointer to the translated code */
204 /* next matching tb for physical address. */
205 struct TranslationBlock *phys_hash_next;
206 /* first and second physical page containing code. The lower bit
207 of the pointer tells the index in page_next[] */
208 struct TranslationBlock *page_next[2];
209 target_ulong page_addr[2];
210
211 /* the following data are used to directly call another TB from
212 the code of this one. */
213 uint16_t tb_next_offset[2]; /* offset of original jump target */
214#ifdef USE_DIRECT_JUMP
215 uint16_t tb_jmp_offset[4]; /* offset of jump instruction */
216#else
217# if defined(VBOX) && defined(RT_OS_DARWIN) && defined(RT_ARCH_AMD64)
218# error "First 4GB aren't reachable. jmp dword [tb_next] wont work."
219# endif
220 uint32_t tb_next[2]; /* address of jump generated code */
221#endif
222 /* list of TBs jumping to this one. This is a circular list using
223 the two least significant bits of the pointers to tell what is
224 the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
225 jmp_first */
226 struct TranslationBlock *jmp_next[2];
227 struct TranslationBlock *jmp_first;
228} TranslationBlock;
229
230static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
231{
232 target_ulong tmp;
233 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
234 return (tmp >> TB_JMP_PAGE_BITS) & TB_JMP_PAGE_MASK;
235}
236
237static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
238{
239 target_ulong tmp;
240 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
241 return (((tmp >> TB_JMP_PAGE_BITS) & TB_JMP_PAGE_MASK) |
242 (tmp & TB_JMP_ADDR_MASK));
243}
244
245static inline unsigned int tb_phys_hash_func(unsigned long pc)
246{
247 return pc & (CODE_GEN_PHYS_HASH_SIZE - 1);
248}
249
250TranslationBlock *tb_alloc(target_ulong pc);
251void tb_flush(CPUState *env);
252void tb_link_phys(TranslationBlock *tb,
253 target_ulong phys_pc, target_ulong phys_page2);
254
255extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
256
257extern uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
258extern uint8_t *code_gen_ptr;
259
260#if defined(USE_DIRECT_JUMP)
261
262#if defined(__powerpc__)
263static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
264{
265 uint32_t val, *ptr;
266
267 /* patch the branch destination */
268 ptr = (uint32_t *)jmp_addr;
269 val = *ptr;
270 val = (val & ~0x03fffffc) | ((addr - jmp_addr) & 0x03fffffc);
271 *ptr = val;
272 /* flush icache */
273 asm volatile ("dcbst 0,%0" : : "r"(ptr) : "memory");
274 asm volatile ("sync" : : : "memory");
275 asm volatile ("icbi 0,%0" : : "r"(ptr) : "memory");
276 asm volatile ("sync" : : : "memory");
277 asm volatile ("isync" : : : "memory");
278}
279#elif defined(__i386__)
280static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
281{
282 /* patch the branch destination */
283 *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
284 /* no need to flush icache explicitely */
285}
286#endif
287
288static inline void tb_set_jmp_target(TranslationBlock *tb,
289 int n, unsigned long addr)
290{
291 unsigned long offset;
292
293 offset = tb->tb_jmp_offset[n];
294 tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);
295 offset = tb->tb_jmp_offset[n + 2];
296 if (offset != 0xffff)
297 tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);
298}
299
300#else
301
302/* set the jump target */
303static inline void tb_set_jmp_target(TranslationBlock *tb,
304 int n, unsigned long addr)
305{
306 tb->tb_next[n] = addr;
307}
308
309#endif
310
311static inline void tb_add_jump(TranslationBlock *tb, int n,
312 TranslationBlock *tb_next)
313{
314 /* NOTE: this test is only needed for thread safety */
315 if (!tb->jmp_next[n]) {
316 /* patch the native jump address */
317 tb_set_jmp_target(tb, n, (unsigned long)tb_next->tc_ptr);
318
319 /* add in TB jmp circular list */
320 tb->jmp_next[n] = tb_next->jmp_first;
321 tb_next->jmp_first = (TranslationBlock *)((long)(tb) | (n));
322 }
323}
324
325TranslationBlock *tb_find_pc(unsigned long pc_ptr);
326
327#ifndef offsetof
328#define offsetof(type, field) ((size_t) &((type *)0)->field)
329#endif
330
331#if defined(_WIN32)
332#define ASM_DATA_SECTION ".section \".data\"\n"
333#define ASM_PREVIOUS_SECTION ".section .text\n"
334#elif defined(__APPLE__)
335#define ASM_DATA_SECTION ".data\n"
336#define ASM_PREVIOUS_SECTION ".text\n"
337#else
338#define ASM_DATA_SECTION ".section \".data\"\n"
339#define ASM_PREVIOUS_SECTION ".previous\n"
340#endif
341
342#define ASM_OP_LABEL_NAME(n, opname) \
343 ASM_NAME(__op_label) #n "." ASM_NAME(opname)
344
345#if defined(__powerpc__)
346
347/* we patch the jump instruction directly */
348#define GOTO_TB(opname, tbparam, n)\
349do {\
350 asm volatile (ASM_DATA_SECTION\
351 ASM_OP_LABEL_NAME(n, opname) ":\n"\
352 ".long 1f\n"\
353 ASM_PREVIOUS_SECTION \
354 "b " ASM_NAME(__op_jmp) #n "\n"\
355 "1:\n");\
356} while (0)
357
358#elif defined(__i386__) && defined(USE_DIRECT_JUMP)
359
360/* we patch the jump instruction directly */
361#define GOTO_TB(opname, tbparam, n)\
362do {\
363 asm volatile (".section .data\n"\
364 ASM_OP_LABEL_NAME(n, opname) ":\n"\
365 ".long 1f\n"\
366 ASM_PREVIOUS_SECTION \
367 "jmp " ASM_NAME(__op_jmp) #n "\n"\
368 "1:\n");\
369} while (0)
370
371#else
372
373/* jump to next block operations (more portable code, does not need
374 cache flushing, but slower because of indirect jump) */
375# ifdef VBOX /* bird: GCC4 (and Ming 3.4.x?) will remove the two unused static
376 variables. I've added a dummy __asm__ statement which reference
377 the two variables to prevent this. */
378# if __GNUC__ >= 4
379# define GOTO_TB(opname, tbparam, n)\
380 do {\
381 static void __attribute__((unused)) *dummy ## n = &&dummy_label ## n;\
382 static void __attribute__((unused)) *__op_label ## n \
383 __asm__(ASM_OP_LABEL_NAME(n, opname)) = &&label ## n;\
384 __asm__ ("" : : "m" (__op_label ## n), "m" (dummy ## n));\
385 goto *(void *)(uintptr_t)(((TranslationBlock *)tbparam)->tb_next[n]);\
386 label ## n: ;\
387 dummy_label ## n: ;\
388 } while (0)
389# else
390# define GOTO_TB(opname, tbparam, n)\
391 do {\
392 static void __attribute__((unused)) *dummy ## n = &&dummy_label ## n;\
393 static void __attribute__((unused)) *__op_label ## n \
394 __asm__(ASM_OP_LABEL_NAME(n, opname)) = &&label ## n;\
395 goto *(void *)(uintptr_t)(((TranslationBlock *)tbparam)->tb_next[n]);\
396 label ## n: ;\
397 dummy_label ## n: ;\
398 } while (0)
399# endif
400# else /* !VBOX */
401#define GOTO_TB(opname, tbparam, n)\
402do {\
403 static void __attribute__((unused)) *dummy ## n = &&dummy_label ## n;\
404 static void __attribute__((unused)) *__op_label ## n \
405 __asm__(ASM_OP_LABEL_NAME(n, opname)) = &&label ## n;\
406 goto *(void *)(((TranslationBlock *)tbparam)->tb_next[n]);\
407label ## n: ;\
408dummy_label ## n: ;\
409} while (0)
410# endif /* !VBOX */
411
412#endif
413
414extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
415extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
416extern void *io_mem_opaque[IO_MEM_NB_ENTRIES];
417
418#ifdef __powerpc__
419static inline int testandset (int *p)
420{
421 int ret;
422 __asm__ __volatile__ (
423 "0: lwarx %0,0,%1\n"
424 " xor. %0,%3,%0\n"
425 " bne 1f\n"
426 " stwcx. %2,0,%1\n"
427 " bne- 0b\n"
428 "1: "
429 : "=&r" (ret)
430 : "r" (p), "r" (1), "r" (0)
431 : "cr0", "memory");
432 return ret;
433}
434#endif
435
436#ifdef __i386__
437static inline int testandset (int *p)
438{
439 long int readval = 0;
440
441 __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
442 : "+m" (*p), "+a" (readval)
443 : "r" (1)
444 : "cc");
445 return readval;
446}
447#endif
448
449#ifdef __x86_64__
450static inline int testandset (int *p)
451{
452 long int readval = 0;
453
454 __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
455 : "+m" (*p), "+a" (readval)
456 : "r" (1)
457 : "cc");
458 return readval;
459}
460#endif
461
462#ifdef __s390__
463static inline int testandset (int *p)
464{
465 int ret;
466
467 __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n"
468 " jl 0b"
469 : "=&d" (ret)
470 : "r" (1), "a" (p), "0" (*p)
471 : "cc", "memory" );
472 return ret;
473}
474#endif
475
476#ifdef __alpha__
477static inline int testandset (int *p)
478{
479 int ret;
480 unsigned long one;
481
482 __asm__ __volatile__ ("0: mov 1,%2\n"
483 " ldl_l %0,%1\n"
484 " stl_c %2,%1\n"
485 " beq %2,1f\n"
486 ".subsection 2\n"
487 "1: br 0b\n"
488 ".previous"
489 : "=r" (ret), "=m" (*p), "=r" (one)
490 : "m" (*p));
491 return ret;
492}
493#endif
494
495#ifdef __sparc__
496static inline int testandset (int *p)
497{
498 int ret;
499
500 __asm__ __volatile__("ldstub [%1], %0"
501 : "=r" (ret)
502 : "r" (p)
503 : "memory");
504
505 return (ret ? 1 : 0);
506}
507#endif
508
509#ifdef __arm__
510static inline int testandset (int *spinlock)
511{
512 register unsigned int ret;
513 __asm__ __volatile__("swp %0, %1, [%2]"
514 : "=r"(ret)
515 : "0"(1), "r"(spinlock));
516
517 return ret;
518}
519#endif
520
521#ifdef __mc68000
522static inline int testandset (int *p)
523{
524 char ret;
525 __asm__ __volatile__("tas %1; sne %0"
526 : "=r" (ret)
527 : "m" (p)
528 : "cc","memory");
529 return ret;
530}
531#endif
532
533#ifdef __ia64
534#include <ia64intrin.h>
535
536static inline int testandset (int *p)
537{
538 return __sync_lock_test_and_set (p, 1);
539}
540#endif
541
542typedef int spinlock_t;
543
544#define SPIN_LOCK_UNLOCKED 0
545
546#if defined(CONFIG_USER_ONLY)
547static inline void spin_lock(spinlock_t *lock)
548{
549 while (testandset(lock));
550}
551
552static inline void spin_unlock(spinlock_t *lock)
553{
554 *lock = 0;
555}
556
557static inline int spin_trylock(spinlock_t *lock)
558{
559 return !testandset(lock);
560}
561#else
562static inline void spin_lock(spinlock_t *lock)
563{
564}
565
566static inline void spin_unlock(spinlock_t *lock)
567{
568}
569
570static inline int spin_trylock(spinlock_t *lock)
571{
572 return 1;
573}
574#endif
575
576extern spinlock_t tb_lock;
577
578extern int tb_invalidated_flag;
579
580#if !defined(CONFIG_USER_ONLY)
581
582void tlb_fill(target_ulong addr, int is_write, int is_user,
583 void *retaddr);
584
585#define ACCESS_TYPE 3
586#define MEMSUFFIX _code
587#define env cpu_single_env
588
589#define DATA_SIZE 1
590#include "softmmu_header.h"
591
592#define DATA_SIZE 2
593#include "softmmu_header.h"
594
595#define DATA_SIZE 4
596#include "softmmu_header.h"
597
598#define DATA_SIZE 8
599#include "softmmu_header.h"
600
601#undef ACCESS_TYPE
602#undef MEMSUFFIX
603#undef env
604
605#endif
606
607#if defined(CONFIG_USER_ONLY)
608static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
609{
610 return addr;
611}
612#else
613# ifdef VBOX
614target_ulong remR3PhysGetPhysicalAddressCode(CPUState *env, target_ulong addr, CPUTLBEntry *pTLBEntry);
615# if !defined(REM_PHYS_ADDR_IN_TLB)
616target_ulong remR3HCVirt2GCPhys(void *env, void *addr);
617# endif
618# endif
619/* NOTE: this function can trigger an exception */
620/* NOTE2: the returned address is not exactly the physical address: it
621 is the offset relative to phys_ram_base */
622static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
623{
624 int is_user, index, pd;
625
626 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
627#if defined(TARGET_I386)
628 is_user = ((env->hflags & HF_CPL_MASK) == 3);
629#elif defined (TARGET_PPC)
630 is_user = msr_pr;
631#elif defined (TARGET_MIPS)
632 is_user = ((env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM);
633#elif defined (TARGET_SPARC)
634 is_user = (env->psrs == 0);
635#elif defined (TARGET_ARM)
636 is_user = ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR);
637#elif defined (TARGET_SH4)
638 is_user = ((env->sr & SR_MD) == 0);
639#else
640#error unimplemented CPU
641#endif
642 if (__builtin_expect(env->tlb_table[is_user][index].addr_code !=
643 (addr & TARGET_PAGE_MASK), 0)) {
644 ldub_code(addr);
645 }
646 pd = env->tlb_table[is_user][index].addr_code & ~TARGET_PAGE_MASK;
647 if (pd > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
648# ifdef VBOX
649 /* deal with non-MMIO access handlers. */
650 return remR3PhysGetPhysicalAddressCode(env, addr, &env->tlb_table[is_user][index]);
651# else
652 cpu_abort(env, "Trying to execute code outside RAM or ROM at 0x%08lx\n", addr);
653# endif
654 }
655# if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
656 return addr + env->tlb_table[is_user][index].addend;
657# elif defined(VBOX)
658 return remR3HCVirt2GCPhys(env, (void *)(addr + env->tlb_table[is_user][index].addend));
659# else
660 return addr + env->tlb_table[is_user][index].addend - (unsigned long)phys_ram_base;
661# endif
662}
663#endif
664
665
666#ifdef USE_KQEMU
667#define KQEMU_MODIFY_PAGE_MASK (0xff & ~(VGA_DIRTY_FLAG | CODE_DIRTY_FLAG))
668
669int kqemu_init(CPUState *env);
670int kqemu_cpu_exec(CPUState *env);
671void kqemu_flush_page(CPUState *env, target_ulong addr);
672void kqemu_flush(CPUState *env, int global);
673void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr);
674void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr);
675void kqemu_cpu_interrupt(CPUState *env);
676void kqemu_record_dump(void);
677
678static inline int kqemu_is_ok(CPUState *env)
679{
680 return(env->kqemu_enabled &&
681 (env->cr[0] & CR0_PE_MASK) &&
682 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
683 (env->eflags & IF_MASK) &&
684 !(env->eflags & VM_MASK) &&
685 (env->kqemu_enabled == 2 ||
686 ((env->hflags & HF_CPL_MASK) == 3 &&
687 (env->eflags & IOPL_MASK) != IOPL_MASK)));
688}
689
690#endif
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette