vbox的更動 17274 路徑 trunk/src/recompiler_new
- 時間撮記:
- 2009-3-3 下午01:16:06 (16 年 以前)
- 位置:
- trunk/src/recompiler_new
- 檔案:
-
- 修改 6 筆資料
圖例:
- 未更動
- 新增
- 刪除
-
trunk/src/recompiler_new/VBoxRecompiler.c
r17251 r17274 5072 5072 { 5073 5073 } 5074 -
trunk/src/recompiler_new/cpu-defs.h
r13559 r17274 1 1 /* 2 2 * common defines for all CPUs 3 * 3 * 4 4 * Copyright (c) 2003 Fabrice Bellard 5 5 * … … 41 41 #endif 42 42 43 #ifndef TARGET_PHYS_ADDR_BITS 43 #ifndef TARGET_PHYS_ADDR_BITS 44 44 #if TARGET_LONG_BITS >= HOST_LONG_BITS 45 45 #define TARGET_PHYS_ADDR_BITS TARGET_LONG_BITS … … 86 86 #define HOST_LONG_SIZE (HOST_LONG_BITS / 8) 87 87 88 #define EXCP_INTERRUPT 88 #define EXCP_INTERRUPT 0x10000 /* async interruption */ 89 89 #define EXCP_HLT 0x10001 /* hlt instruction reached */ 90 90 #define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */ … … 126 126 bit 2..0 : zero 127 127 */ 128 target_ulong addr_read; 129 target_ulong addr_write; 130 target_ulong addr_code; 128 target_ulong addr_read; 129 target_ulong addr_write; 130 target_ulong addr_code; 131 131 /* Addend to virtual address to get physical address. IO accesses 132 132 use the correcponding iotlb value. */ … … 138 138 #endif 139 139 /* padding to get a power of two size */ 140 uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) - 141 (sizeof(target_ulong) * 3 + 142 ((-sizeof(target_ulong) * 3) & (sizeof(target_phys_addr_t) - 1)) + 140 uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) - 141 (sizeof(target_ulong) * 3 + 142 ((-sizeof(target_ulong) * 3) & (sizeof(target_phys_addr_t) - 1)) + 143 143 sizeof(target_phys_addr_t))]; 144 144 } CPUTLBEntry; … … 174 174 CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \ 175 175 target_phys_addr_t iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \ 176 /** addends for HVA -> GPA translations */ \ 177 VBOX_ONLY(target_phys_addr_t phys_addends[NB_MMU_MODES][CPU_TLB_SIZE]); \ 176 178 struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; \ 177 179 /* buffer for temporaries in the code generator */ \ … … 213 215 void *opaque; \ 214 216 \ 215 const char *cpu_model_str; 216 217 #endif 217 const char *cpu_model_str; 218 219 #endif -
trunk/src/recompiler_new/exec-all.h
r16339 r17274 418 418 return addr + env1->tlb_table[mmu_idx][page_index].addend; 419 419 # elif defined(VBOX) 420 return remR3HCVirt2GCPhys(env1, (void *)(uintptr_t)(addr + env1->tlb_table[mmu_idx][page_index].addend)); 420 Assert(env1->phys_addends[mmu_idx][page_index] != -1); 421 /** @todo: nike: will remove this assert along with remR3HCVirt2GCPhys() soon */ 422 Assert(remR3HCVirt2GCPhys(env1, (void *)(uintptr_t)(addr + env1->tlb_table[mmu_idx][page_index].addend)) == addr + env1->phys_addends[mmu_idx][page_index]); 423 return addr + env1->phys_addends[mmu_idx][page_index]; 421 424 # else 422 425 return addr + env1->tlb_table[mmu_idx][page_index].addend - (unsigned long)phys_ram_base; -
trunk/src/recompiler_new/exec.c
r15761 r17274 1849 1849 { 1850 1850 int i; 1851 1852 1851 #if defined(DEBUG_TLB) 1853 1852 printf("tlb_flush:\n"); … … 1864 1863 env->tlb_table[1][i].addr_write = -1; 1865 1864 env->tlb_table[1][i].addr_code = -1; 1865 #if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB) 1866 env->phys_addends[0][i] = -1; 1867 env->phys_addends[1][i] = -1; 1868 #endif 1866 1869 #if (NB_MMU_MODES >= 3) 1867 1870 env->tlb_table[2][i].addr_read = -1; 1868 1871 env->tlb_table[2][i].addr_write = -1; 1869 1872 env->tlb_table[2][i].addr_code = -1; 1873 #if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB) 1874 env->phys_addends[2][i] = -1; 1875 #endif 1870 1876 #if (NB_MMU_MODES == 4) 1871 1877 env->tlb_table[3][i].addr_read = -1; 1872 1878 env->tlb_table[3][i].addr_write = -1; 1873 1879 env->tlb_table[3][i].addr_code = -1; 1880 #if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB) 1881 env->phys_addends[3][i] = -1; 1882 #endif 1874 1883 #endif 1875 1884 #endif … … 2058 2067 #endif 2059 2068 2060 #ifndef VBOX 2069 #if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB) 2070 DECLINLINE(void) tlb_update_dirty(CPUTLBEntry *tlb_entry, target_phys_addr_t phys_addend) 2071 #else 2061 2072 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry) 2062 #else2063 DECLINLINE(void) tlb_update_dirty(CPUTLBEntry *tlb_entry)2064 2073 #endif 2065 2074 { … … 2074 2083 tlb_entry->addend - (unsigned long)phys_ram_base; 2075 2084 #else 2076 ram_addr = remR3HCVirt2GCPhys(first_cpu, (void*)((tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend)); 2085 Assert(phys_addend != -1); 2086 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + phys_addend; 2087 2088 /** @todo: nike: will remove this assert along with remR3HCVirt2GCPhys() soon */ 2089 Assert(ram_addr == remR3HCVirt2GCPhys(first_cpu, (void*)((tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend))); 2077 2090 #endif 2078 2091 if (!cpu_physical_memory_is_dirty(ram_addr)) { … … 2086 2099 { 2087 2100 int i; 2101 #if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB) 2102 for(i = 0; i < CPU_TLB_SIZE; i++) 2103 tlb_update_dirty(&env->tlb_table[0][i], env->phys_addends[0][i]); 2104 for(i = 0; i < CPU_TLB_SIZE; i++) 2105 tlb_update_dirty(&env->tlb_table[1][i], env->phys_addends[1][i]); 2106 #if (NB_MMU_MODES >= 3) 2107 for(i = 0; i < CPU_TLB_SIZE; i++) 2108 tlb_update_dirty(&env->tlb_table[2][i], env->phys_addends[2][i]); 2109 #if (NB_MMU_MODES == 4) 2110 for(i = 0; i < CPU_TLB_SIZE; i++) 2111 tlb_update_dirty(&env->tlb_table[3][i], env->phys_addends[3][i]); 2112 #endif 2113 #endif 2114 #else /* VBOX */ 2088 2115 for(i = 0; i < CPU_TLB_SIZE; i++) 2089 2116 tlb_update_dirty(&env->tlb_table[0][i]); … … 2098 2125 #endif 2099 2126 #endif 2127 #endif /* VBOX */ 2100 2128 } 2101 2129 … … 2278 2306 if (prot & PAGE_WRITE) 2279 2307 te->addr_write |= write_mods; 2308 2309 env->phys_addends[mmu_idx][index] = (pd & TARGET_PAGE_MASK)- vaddr; 2280 2310 #endif 2281 2311 -
trunk/src/recompiler_new/osdep.h
r16455 r17274 12 12 13 13 #include "config.h" 14 15 #define VBOX_ONLY(x) x 14 16 15 17 #ifndef _MSC_VER … … 49 51 50 52 #include <stdarg.h> 53 54 #define VBOX_ONLY(x) 51 55 52 56 #define qemu_snprintf snprintf /* bird */ -
trunk/src/recompiler_new/target-i386/cpu.h
r14425 r17274 1 1 /* 2 2 * i386 virtual CPU header 3 * 3 * 4 4 * Copyright (c) 2003 Fabrice Bellard 5 5 * … … 112 112 113 113 /* eflags masks */ 114 #define CC_C 115 #define CC_P 114 #define CC_C 0x0001 115 #define CC_P 0x0004 116 116 #define CC_A 0x0010 117 117 #define CC_Z 0x0040 … … 123 123 #define VM_SHIFT 17 124 124 125 #define TF_MASK 126 #define IF_MASK 127 #define DF_MASK 125 #define TF_MASK 0x00000100 126 #define IF_MASK 0x00000200 127 #define DF_MASK 0x00000400 128 128 #define IOPL_MASK 0x00003000 129 #define NT_MASK 129 #define NT_MASK 0x00004000 130 130 #define RF_MASK 0x00010000 131 131 #define VM_MASK 0x00020000 132 #define AC_MASK 0x00040000 132 #define AC_MASK 0x00040000 133 133 #define VIF_MASK 0x00080000 134 134 #define VIP_MASK 0x00100000 … … 193 193 194 194 #define HF2_GIF_MASK (1 << HF2_GIF_SHIFT) 195 #define HF2_HIF_MASK (1 << HF2_HIF_SHIFT) 195 #define HF2_HIF_MASK (1 << HF2_HIF_SHIFT) 196 196 #define HF2_NMI_MASK (1 << HF2_NMI_SHIFT) 197 197 #define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT) … … 257 257 #define MSR_IA32_SYSENTER_ESP 0x175 258 258 #define MSR_IA32_SYSENTER_EIP 0x176 259 #endif 259 #endif 260 260 261 261 #define MSR_IA32_SYSENTER_CS 0x174 … … 377 377 378 378 #define CPUID_VENDOR_AMD_1 0x68747541 /* "Auth" */ 379 #define CPUID_VENDOR_AMD_2 0x69746e65 /* "enti" */ 379 #define CPUID_VENDOR_AMD_2 0x69746e65 /* "enti" */ 380 380 #define CPUID_VENDOR_AMD_3 0x444d4163 /* "cAMD" */ 381 381 … … 572 572 CPU86_LDouble d __attribute__((aligned(16))); 573 573 #else 574 ALIGNED_MEMBER(CPU86_LDouble, d, 16); 574 ALIGNED_MEMBER(CPU86_LDouble, d, 16); 575 575 #endif 576 576 #else … … 584 584 #ifdef VBOX 585 585 uint32_t alignment3[3]; /* force the long double to start a 16 byte line. */ 586 #endif 586 #endif 587 587 CPU86_LDouble ft0; 588 588 #if defined(VBOX) && defined(RT_ARCH_X86) && !defined(RT_OS_DARWIN) 589 589 uint32_t alignment4; /* long double is 12 byte, pad it to 16. */ 590 #endif 591 590 #endif 591 592 592 float_status mmx_status; /* for 3DNow! float ops */ 593 593 float_status sse_status; … … 604 604 #ifdef VBOX 605 605 uint32_t alignment0; 606 #endif 606 #endif 607 607 uint64_t efer; 608 608 uint64_t star; 609 609 610 610 uint64_t vm_hsave; 611 611 uint64_t vm_vmcb; … … 677 677 #else 678 678 uint32_t alignment2[3]; 679 #endif 679 #endif 680 680 } CPUX86State; 681 681 … … 747 747 #ifdef VBOX 748 748 uint32_t alignment3[3]; /* force the long double to start a 16 byte line. */ 749 #endif 749 #endif 750 750 CPU86_LDouble ft0; 751 751 #if defined(VBOX) && defined(RT_ARCH_X86) && !defined(RT_OS_DARWIN) 752 752 uint32_t alignment4; /* long double is 12 byte, pad it to 16. */ 753 #endif 753 #endif 754 754 union { 755 755 float f; … … 758 758 int64_t i64; 759 759 } fp_convert; 760 760 761 761 float_status sse_status; 762 762 uint32_t mxcsr; … … 771 771 #ifdef VBOX 772 772 uint32_t alignment0; 773 #endif 773 #endif 774 774 uint64_t efer; 775 775 uint64_t star; … … 783 783 int native_fp_regs; /* if true, the FPU state is in the native CPU regs */ 784 784 #endif 785 785 786 786 /* exception/interrupt handling */ 787 787 jmp_buf jmp_env; 788 788 } CPUX86State_Ver16; 789 789 790 /** CPUX86State state flags 790 /** CPUX86State state flags 791 791 * @{ */ 792 792 #define CPU_RAW_RING0 0x0002 /* Set after first time RawR0 is executed, never cleared. */ … … 813 813 cache: it synchronizes the hflags with the segment cache values */ 814 814 #ifndef VBOX 815 static inline void cpu_x86_load_seg_cache(CPUX86State *env, 815 static inline void cpu_x86_load_seg_cache(CPUX86State *env, 816 816 int seg_reg, unsigned int selector, 817 817 target_ulong base, 818 unsigned int limit, 818 unsigned int limit, 819 819 unsigned int flags) 820 820 #else 821 DECLINLINE(void) cpu_x86_load_seg_cache(CPUX86State *env, 821 DECLINLINE(void) cpu_x86_load_seg_cache(CPUX86State *env, 822 822 int seg_reg, unsigned int selector, 823 823 target_ulong base, 824 unsigned int limit, 824 unsigned int limit, 825 825 unsigned int flags) 826 826 … … 829 829 SegmentCache *sc; 830 830 unsigned int new_hflags; 831 831 832 832 sc = &env->segs[seg_reg]; 833 833 sc->selector = selector; … … 847 847 env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; 848 848 env->hflags &= ~(HF_ADDSEG_MASK); 849 } else 849 } else 850 850 #endif 851 851 { … … 861 861 if (env->hflags & HF_CS64_MASK) { 862 862 /* zero base assumed for DS, ES and SS in long mode */ 863 } else if (!(env->cr[0] & CR0_PE_MASK) || 863 } else if (!(env->cr[0] & CR0_PE_MASK) || 864 864 (env->eflags & VM_MASK) || 865 865 !(env->hflags & HF_CS32_MASK)) { … … 871 871 new_hflags |= HF_ADDSEG_MASK; 872 872 } else { 873 new_hflags |= ((env->segs[R_DS].base | 873 new_hflags |= ((env->segs[R_DS].base | 874 874 env->segs[R_ES].base | 875 env->segs[R_SS].base) != 0) << 875 env->segs[R_SS].base) != 0) << 876 876 HF_ADDSEG_SHIFT; 877 877 } 878 env->hflags = (env->hflags & 878 env->hflags = (env->hflags & 879 879 ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags; 880 880 } … … 908 908 signal handlers to inform the virtual CPU of exceptions. non zero 909 909 is returned if the signal was handled by the virtual CPU. */ 910 int cpu_x86_signal_handler(int host_signum, void *pinfo, 910 int cpu_x86_signal_handler(int host_signum, void *pinfo, 911 911 void *puc); 912 912 void cpu_x86_set_a20(CPUX86State *env, int a20_state); … … 923 923 uint64_t cpu_apic_rdmsr(CPUX86State *env, uint32_t reg); 924 924 void cpu_apic_wrmsr(CPUX86State *env, uint32_t reg, uint64_t value); 925 uint64_t cpu_rdmsr(CPUX86State *env, uint32_t msr); 926 void cpu_wrmsr(CPUX86State *env, uint32_t msr, uint64_t val); 925 uint64_t cpu_rdmsr(CPUX86State *env, uint32_t msr); 926 void cpu_wrmsr(CPUX86State *env, uint32_t msr, uint64_t val); 927 927 #endif 928 928 void cpu_smm_update(CPUX86State *env);
注意:
瀏覽 TracChangeset
來幫助您使用更動檢視器