儲存庫 vbox 的更動 46442
- 時間撮記:
- 2013-6-7 下午02:34:39 (11 年 以前)
- 位置:
- trunk/src/VBox/VMM
- 檔案:
-
- 修改 3 筆資料
圖例:
- 未更動
- 新增
- 刪除
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r46441 r46442 792 792 * @param pVCpu Pointer to the VMCPU. 793 793 * @param pCtx Pointer to the guest-CPU context. 794 * 795 * @remarks No-long-jump zone!!! 794 796 */ 795 797 static int hmR0SvmLoadGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pCtx) … … 843 845 844 846 /** 847 * Loads the guest MSRs into the VMCB. 848 * 849 * @param pVCpu Pointer to the VMCPU. 850 * @param pCtx Pointer to the guest-CPU context. 851 * 852 * @remarks No-long-jump zone!!! 853 */ 854 static void hmR0SvmLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pCtx) 855 { 856 /* Guest Sysenter MSRs. */ 857 pVmcb->guest.u64SysEnterCS = pCtx->SysEnter.cs; 858 pVmcb->guest.u64SysEnterEIP = pCtx->SysEnter.eip; 859 pVmcb->guest.u64SysEnterESP = pCtx->SysEnter.esp; 860 861 /* Guest EFER MSR. */ 862 /* AMD-V requires guest EFER.SVME to be set. Weird. 863 See AMD spec. 15.5.1 "Basic Operation" | "Canonicalization and Consistency Checks". */ 864 pVmcb->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME; 865 866 /* If the guest isn't in 64-bit mode, clear MSR_K6_LME bit from guest EFER otherwise AMD-V expects amd64 shadow paging. */ 867 if (!CPUMIsGuestInLongModeEx(pCtx)) 868 pVmcb->guest.u64EFER &= ~MSR_K6_EFER_LME; 869 } 870 871 872 /** 873 * Sets up the appropriate function to run guest code. 874 * 875 * @returns VBox status code. 876 * @param pVCpu Pointer to the VMCPU. 877 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 878 * out-of-sync. Make sure to update the required fields 879 * before using them. 880 * 881 * @remarks No-long-jump zone!!! 882 */ 883 static int hmR0SvmSetupVMRunHandler(PVMCPU pVCpu, PCPUMCTX pCtx) 884 { 885 if (CPUMIsGuestInLongModeEx(pCtx)) 886 { 887 #ifndef VBOX_ENABLE_64_BITS_GUESTS 888 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE; 889 #endif 890 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */ 891 #if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 892 /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */ 893 pVCpu->hm.s.svm.pfnVMRun = SVMR0VMSwitcherRun64; 894 #else 895 /* 64-bit host or hybrid host. */ 896 pVCpu->hm.s.svm.pfnVMRun = SVMR0VMRun64; 897 #endif 898 } 899 else 900 { 901 /* Guest is not in long mode, use the 32-bit handler. */ 902 pVCpu->hm.s.svm.pfnVMRun = SVMR0VMRun; 903 } 904 return VINF_SUCCESS; 905 } 906 907 908 /** 845 909 * Loads the guest state. 846 910 * … … 849 913 * @param pVCpu Pointer to the VMCPU. 850 914 * @param pCtx Pointer to the guest-CPU context. 915 * 916 * @remarks No-long-jump zone!!! 851 917 */ 852 918 VMMR0DECL(int) SVMR0LoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) … … 862 928 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x); 863 929 864 int rc = hmR0SvmLoadGuestSegmentRegs(pVCpu, pCtx); 930 int rc = hmR0SvmLoadGuestControlRegs(pVCpu, pMixedCtx); 931 AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestControlRegs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 932 933 rc = hmR0SvmLoadGuestSegmentRegs(pVCpu, pCtx); 865 934 AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestSegmentRegs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 866 935 867 rc 868 /* -XXX- todo */ 936 hmR0SvmLoadGuestMsrs(pVCpu, pCtx); 937 938 /* Guest RIP, RSP, RFLAGS, CPL. */ 939 pVmcb->guest.u64RIP = pCtx->rip; 940 pVmcb->guest.u64RSP = pCtx->rsp; 941 pVmcb->guest.u64RFlags = pCtx->eflags.u32; 942 pVmcb->guest.u8CPL = pCtx->ss.Attr.n.u2Dpl; 943 944 /* Guest RAX (VMRUN uses RAX as an implicit parameter). */ 945 pVmcb->guest.u64RAX = pCtx->rax; 946 947 rc = hmR0SvmSetupVMRunHandler(pVCpu, pMixedCtx); 948 AssertLogRelMsgRCReturn(rc, ("hmR0SvmSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 949 950 951 /* Clear any unused and reserved bits. */ 952 pVCpu->hm.s.fContextUseFlags &= ~( HM_CHANGED_GUEST_SYSENTER_CS_MSR 953 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR 954 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR); 955 869 956 870 957 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x); -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r46440 r46442 6686 6686 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 6687 6687 6688 /* Clear the currently unused reserved bits. */ 6689 pVCpu->hm.s.fContextUseFlags &= ~( HM_CHANGED_VMX_RESERVED1 6688 /* Clear any unused and reserved bits. */ 6689 pVCpu->hm.s.fContextUseFlags &= ~( HM_CHANGED_GUEST_CR2 6690 | HM_CHANGED_VMX_RESERVED1 6690 6691 | HM_CHANGED_VMX_RESERVED2); 6691 6692 -
trunk/src/VBox/VMM/include/HMInternal.h
r46441 r46442 110 110 # define HM_CHANGED_GUEST_RFLAGS RT_BIT(2) 111 111 # define HM_CHANGED_GUEST_CR0 RT_BIT(3) 112 # define HM_CHANGED_GUEST_CR3 RT_BIT(4) 113 # define HM_CHANGED_GUEST_CR4 RT_BIT(5) 114 # define HM_CHANGED_GUEST_GDTR RT_BIT(6) 115 # define HM_CHANGED_GUEST_IDTR RT_BIT(7) 116 # define HM_CHANGED_GUEST_LDTR RT_BIT(8) 117 # define HM_CHANGED_GUEST_TR RT_BIT(9) 118 # define HM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(10) 119 # define HM_CHANGED_GUEST_DEBUG RT_BIT(11) 120 # define HM_CHANGED_GUEST_SYSENTER_CS_MSR RT_BIT(12) 121 # define HM_CHANGED_GUEST_SYSENTER_EIP_MSR RT_BIT(13) 122 # define HM_CHANGED_GUEST_SYSENTER_ESP_MSR RT_BIT(14) 112 # define HM_CHANGED_GUEST_CR2 RT_BIT(4) 113 # define HM_CHANGED_GUEST_CR3 RT_BIT(5) 114 # define HM_CHANGED_GUEST_CR4 RT_BIT(6) 115 # define HM_CHANGED_GUEST_GDTR RT_BIT(7) 116 # define HM_CHANGED_GUEST_IDTR RT_BIT(8) 117 # define HM_CHANGED_GUEST_LDTR RT_BIT(9) 118 # define HM_CHANGED_GUEST_TR RT_BIT(10) 119 # define HM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(11) 120 # define HM_CHANGED_GUEST_DEBUG RT_BIT(12) 121 # define HM_CHANGED_GUEST_SYSENTER_CS_MSR RT_BIT(13) 122 # define HM_CHANGED_GUEST_SYSENTER_EIP_MSR RT_BIT(14) 123 # define HM_CHANGED_GUEST_SYSENTER_ESP_MSR RT_BIT(15) 123 124 /* VT-x specific state. */ 124 # define HM_CHANGED_VMX_GUEST_AUTO_MSRS RT_BIT(1 5)125 # define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE RT_BIT(1 6)126 # define HM_CHANGED_VMX_GUEST_APIC_STATE RT_BIT(1 7)127 # define HM_CHANGED_VMX_ENTRY_CTLS RT_BIT(1 8)128 # define HM_CHANGED_VMX_EXIT_CTLS RT_BIT( 19)129 # define HM_CHANGED_VMX_RESERVED1 RT_BIT(2 0)130 # define HM_CHANGED_VMX_RESERVED2 RT_BIT(2 1)125 # define HM_CHANGED_VMX_GUEST_AUTO_MSRS RT_BIT(16) 126 # define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE RT_BIT(17) 127 # define HM_CHANGED_VMX_GUEST_APIC_STATE RT_BIT(18) 128 # define HM_CHANGED_VMX_ENTRY_CTLS RT_BIT(19) 129 # define HM_CHANGED_VMX_EXIT_CTLS RT_BIT(20) 130 # define HM_CHANGED_VMX_RESERVED1 RT_BIT(21) 131 # define HM_CHANGED_VMX_RESERVED2 RT_BIT(22) 131 132 /* AMD-V specific state. */ 132 # define HM_CHANGED_SVM_INTERCEPT_VECTORS RT_BIT(1 5)133 # define HM_CHANGED_SVM_IOPM_MSRPM_BITMAPS RT_BIT(1 6)134 # define HM_CHANGED_SVM_GUEST_ASID RT_BIT(1 7)135 # define HM_CHANGED_SVM_GUEST_TPR RT_BIT(1 8)136 # define HM_CHANGED_SVM_GUEST_NP RT_BIT( 19)137 # define HM_CHANGED_SVM_LBR RT_BIT(2 0)138 # define HM_CHANGED_SVM_AVIC RT_BIT(2 1)139 140 # define HM_CHANGED_HOST_CONTEXT RT_BIT(2 2)133 # define HM_CHANGED_SVM_INTERCEPT_VECTORS RT_BIT(16) 134 # define HM_CHANGED_SVM_IOPM_MSRPM_BITMAPS RT_BIT(17) 135 # define HM_CHANGED_SVM_GUEST_ASID RT_BIT(18) 136 # define HM_CHANGED_SVM_GUEST_TPR RT_BIT(19) 137 # define HM_CHANGED_SVM_GUEST_NP RT_BIT(20) 138 # define HM_CHANGED_SVM_LBR RT_BIT(21) 139 # define HM_CHANGED_SVM_AVIC RT_BIT(22) 140 141 # define HM_CHANGED_HOST_CONTEXT RT_BIT(23) 141 142 # define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_RIP \ 142 143 | HM_CHANGED_GUEST_RSP \
注意:
瀏覽 TracChangeset
來幫助您使用更動檢視器