儲存庫 vbox 的更動 55129
- 時間撮記:
- 2015-4-8 上午11:31:47 (10 年 以前)
- 位置:
- trunk
- 檔案:
-
- 修改 15 筆資料
圖例:
- 未更動
- 新增
- 刪除
-
trunk/include/VBox/vmm/gim.h
r55118 r55129 175 175 VMM_INT_DECL(int) GIMHypercall(PVMCPU pVCpu, PCPUMCTX pCtx); 176 176 VMM_INT_DECL(int) GIMXcptUD(PVMCPU pVCpu, PCPUMCTX pCtx, PDISCPUSTATE pDis); 177 VMM_INT_DECL(bool) GIMShouldTrapXcptUD(PVM pVM);177 VMM_INT_DECL(bool) GIMShouldTrapXcptUD(PVMCPU pVCpu); 178 178 VMM_INT_DECL(VBOXSTRICTRC) GIMReadMsr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue); 179 179 VMM_INT_DECL(VBOXSTRICTRC) GIMWriteMsr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue); -
trunk/include/VBox/vmm/hm.h
r55118 r55129 144 144 VMM_INT_DECL(int) HMAmdIsSubjectToErratum170(uint32_t *pu32Family, uint32_t *pu32Model, uint32_t *pu32Stepping); 145 145 VMM_INT_DECL(bool) HMSetSingleInstruction(PVMCPU pVCpu, bool fEnable); 146 VMM_INT_DECL(void) HMHypercallsEnable(PVM pVM);147 VMM_INT_DECL(void) HMHypercallsDisable(PVM pVM);146 VMM_INT_DECL(void) HMHypercallsEnable(PVMCPU pVCpu); 147 VMM_INT_DECL(void) HMHypercallsDisable(PVMCPU pVCpu); 148 148 149 149 #ifndef IN_RC -
trunk/include/VBox/vmm/vmm.h
r55118 r55129 269 269 VMM_INT_DECL(void) VMMTrashVolatileXMMRegs(void); 270 270 VMM_INT_DECL(int) VMMPatchHypercall(PVM pVM, void *pvBuf, size_t cbBuf, size_t *pcbWritten); 271 VMM_INT_DECL(void) VMMHypercallsEnable(PVM pVM);272 VMM_INT_DECL(void) VMMHypercallsDisable(PVM pVM);271 VMM_INT_DECL(void) VMMHypercallsEnable(PVMCPU pVCpu); 272 VMM_INT_DECL(void) VMMHypercallsDisable(PVMCPU pVCpu); 273 273 274 274 -
trunk/src/VBox/VMM/VMMAll/GIMAll.cpp
r55118 r55129 149 149 * 150 150 * @returns true if needed, false otherwise. 151 * @param pVM Pointer to the VM. 152 */ 153 VMM_INT_DECL(bool) GIMShouldTrapXcptUD(PVM pVM) 154 { 151 * @param pVCpu Pointer to the VMCPU. 152 */ 153 VMM_INT_DECL(bool) GIMShouldTrapXcptUD(PVMCPU pVCpu) 154 { 155 PVM pVM = pVCpu->CTX_SUFF(pVM); 155 156 if (!GIMIsEnabled(pVM)) 156 157 return false; … … 159 160 { 160 161 case GIMPROVIDERID_KVM: 161 return gimKvmShouldTrapXcptUD(pV M);162 return gimKvmShouldTrapXcptUD(pVCpu); 162 163 163 164 default: -
trunk/src/VBox/VMM/VMMAll/GIMAllKvm.cpp
r55118 r55129 324 324 * For raw-mode VMs, this function will always return true. See gimR3KvmInit(). 325 325 * 326 * @param pVM Pointer to the VM. 327 */ 328 VMM_INT_DECL(bool) gimKvmShouldTrapXcptUD(PVM pVM) 329 { 326 * @param pVCpu Pointer to the VMCPU. 327 */ 328 VMM_INT_DECL(bool) gimKvmShouldTrapXcptUD(PVMCPU pVCpu) 329 { 330 PVM pVM = pVCpu->CTX_SUFF(pVM); 330 331 return pVM->gim.s.u.Kvm.fTrapXcptUD; 331 332 } -
trunk/src/VBox/VMM/VMMAll/HMAll.cpp
r55118 r55129 510 510 * Notifies HM that paravirtualized hypercalls are now enabled. 511 511 * 512 * @param pV M Pointer to the VM.513 */ 514 VMM_INT_DECL(void) HMHypercallsEnable(PVM pVM)515 { 516 pV M->hm.s.fHypercallsEnabled = true;512 * @param pVCpu Pointer to the VMCPU. 513 */ 514 VMM_INT_DECL(void) HMHypercallsEnable(PVMCPU pVCpu) 515 { 516 pVCpu->hm.s.fHypercallsEnabled = true; 517 517 } 518 518 … … 521 521 * Notifies HM that paravirtualized hypercalls are now disabled. 522 522 * 523 * @param pVM Pointer to the VM. 524 */ 525 VMM_INT_DECL(void) HMHypercallsDisable(PVM pVM) 526 { 527 pVM->hm.s.fHypercallsEnabled = false; 528 } 529 523 * @param pVCpu Pointer to the VMCPU. 524 */ 525 VMM_INT_DECL(void) HMHypercallsDisable(PVMCPU pVCpu) 526 { 527 pVCpu->hm.s.fHypercallsEnabled = false; 528 } 529 530 531 /** 532 * Notifies HM that GIM provider wants to trap #UD. 533 * 534 * @param pVCpu Pointer to the VMCPU. 535 */ 536 VMM_INT_DECL(void) HMTrapXcptUDForGIMEnable(PVMCPU pVCpu) 537 { 538 pVCpu->hm.s.fGIMTrapXcptUD = true; 539 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS); 540 } 541 542 543 /** 544 * Notifies HM that GIM provider no longer wants to trap #UD. 545 * 546 * @param pVCpu Pointer to the VMCPU. 547 */ 548 VMM_INT_DECL(void) HMTrapXcptUDForGIMDisable(PVMCPU pVCpu) 549 { 550 pVCpu->hm.s.fGIMTrapXcptUD = false; 551 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS); 552 } 553 -
trunk/src/VBox/VMM/VMMAll/VMMAll.cpp
r55118 r55129 439 439 * Notifies VMM that paravirtualized hypercalls are now enabled. 440 440 * 441 * @param pV M Pointer to the VM.442 */ 443 VMM_INT_DECL(void) VMMHypercallsEnable(PVM pVM)441 * @param pVCpu Pointer to the VMCPU. 442 */ 443 VMM_INT_DECL(void) VMMHypercallsEnable(PVMCPU pVCpu) 444 444 { 445 445 /* If there is anything to do for raw-mode, do it here. */ 446 446 #ifndef IN_RC 447 if (HMIsEnabled(pV M))448 HMHypercallsEnable(pV M);447 if (HMIsEnabled(pVCpu->CTX_SUFF(pVM))) 448 HMHypercallsEnable(pVCpu); 449 449 #endif 450 450 } … … 454 454 * Notifies VMM that paravirtualized hypercalls are now disabled. 455 455 * 456 * @param pV M Pointer to the VM.457 */ 458 VMM_INT_DECL(void) VMMHypercallsDisable(PVM pVM)456 * @param pVCpu Pointer to the VMCPU. 457 */ 458 VMM_INT_DECL(void) VMMHypercallsDisable(PVMCPU pVCpu) 459 459 { 460 460 /* If there is anything to do for raw-mode, do it here. */ 461 461 #ifndef IN_RC 462 if (HMIsEnabled(pV M))463 HMHypercallsDisable(pV M);464 #endif 465 } 466 462 if (HMIsEnabled(pVCpu->CTX_SUFF(pVM))) 463 HMHypercallsDisable(pVCpu); 464 #endif 465 } 466 -
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r55118 r55129 1214 1214 pVM->hm.s.uMaxAsid = g_HvmR0.uMaxAsid; 1215 1215 1216 pVM->hm.s.fGIMTrapXcptUD = GIMShouldTrapXcptUD(pVM);1217 1218 1216 if (!pVM->hm.s.cMaxResumeLoops) /* allow ring-3 overrides */ 1219 1217 { … … 1229 1227 { 1230 1228 PVMCPU pVCpu = &pVM->aCpus[i]; 1231 pVCpu->hm.s.idEnteredCpu = NIL_RTCPUID; 1232 pVCpu->hm.s.idLastCpu = NIL_RTCPUID; 1229 pVCpu->hm.s.idEnteredCpu = NIL_RTCPUID; 1230 pVCpu->hm.s.idLastCpu = NIL_RTCPUID; 1231 pVCpu->hm.s.fGIMTrapXcptUD = GIMShouldTrapXcptUD(pVCpu); 1233 1232 1234 1233 /* We'll aways increment this the first time (host uses ASID 0). */ -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r55118 r55129 670 670 Assert(pVM->hm.s.svm.fSupported); 671 671 672 uint32_t const fGimXcptIntercepts = pVM->hm.s.fGIMTrapXcptUD ? RT_BIT(X86_XCPT_UD) : 0;673 672 for (VMCPUID i = 0; i < pVM->cCpus; i++) 674 673 { … … 787 786 788 787 /* Apply the exceptions intercepts needed by the GIM provider. */ 789 pVmcb->ctrl.u32InterceptException |= fGimXcptIntercepts; 788 if (pVCpu->hm.s.fGIMTrapXcptUD) 789 pVmcb->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_UD); 790 790 791 791 /* … … 1634 1634 1635 1635 /** 1636 * Loads the exception interrupts required for guest execution in the VMCB. 1637 * 1638 * @returns VBox status code. 1639 * @param pVCpu Pointer to the VMCPU. 1640 * @param pVmcb Pointer to the VM control block. 1641 * @param pCtx Pointer to the guest-CPU context. 1642 */ 1643 static int hmR0SvmLoadGuestXcptIntercepts(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx) 1644 { 1645 int rc = VINF_SUCCESS; 1646 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS)) 1647 { 1648 if (pVCpu->hm.s.fGIMTrapXcptUD) 1649 hmR0SvmAddXcptIntercept(pVmcb, X86_XCPT_UD); 1650 else 1651 hmR0SvmRemoveXcptIntercept(pVmcb, X86_XCPT_UD); 1652 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS); 1653 } 1654 return rc; 1655 } 1656 1657 1658 /** 1636 1659 * Sets up the appropriate function to run guest code. 1637 1660 * … … 1816 1839 rc = hmR0SvmLoadGuestApicState(pVCpu, pVmcb, pCtx); 1817 1840 AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 1841 1842 rc = hmR0SvmLoadGuestXcptIntercepts(pVCpu, pVmcb, pCtx); 1843 AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestXcptIntercepts! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 1818 1844 1819 1845 rc = hmR0SvmSetupVMRunHandler(pVCpu, pCtx); … … 5008 5034 else if (rc == VERR_NOT_FOUND) 5009 5035 { 5010 PVM pVM = pVCpu->CTX_SUFF(pVM); 5011 if (pVM->hm.s.fHypercallsEnabled) 5036 if (pVCpu->hm.s.fHypercallsEnabled) 5012 5037 { 5013 5038 rc = GIMHypercall(pVCpu, pCtx); … … 5227 5252 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(); 5228 5253 5229 PVM pVM = pVCpu->CTX_SUFF(pVM); 5230 if (pVM->hm.s.fGIMTrapXcptUD) 5254 if (pVCpu->hm.s.fGIMTrapXcptUD) 5231 5255 GIMXcptUD(pVCpu, pCtx, NULL /* pDis */); 5232 5256 else -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r55118 r55129 2635 2635 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu)); 2636 2636 2637 uint32_t u32XcptBitmap = 0;2637 uint32_t u32XcptBitmap = pVCpu->hm.s.fGIMTrapXcptUD ? RT_BIT(X86_XCPT_UD) : 0; 2638 2638 2639 2639 /* Without Nested Paging, #PF must cause a VM-exit so we can sync our shadow page tables. */ … … 3554 3554 3555 3555 /** 3556 * Loads the exception intercepts required for guest execution in the VMCS. 3557 * 3558 * @returns VBox status code. 3559 * @param pVCpu Pointer to the VMCPU. 3560 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 3561 * out-of-sync. Make sure to update the required fields 3562 * before using them. 3563 */ 3564 static int hmR0VmxLoadGuestXcptIntercepts(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 3565 { 3566 NOREF(pMixedCtx); 3567 int rc = VINF_SUCCESS; 3568 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS)) 3569 { 3570 /* The remaining exception intercepts are handled elsewhere, e.g. in hmR0VmxLoadSharedCR0(). */ 3571 if (pVCpu->hm.s.fGIMTrapXcptUD) 3572 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_UD); 3573 else 3574 { 3575 #ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS 3576 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_UD); 3577 #endif 3578 } 3579 3580 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap); 3581 AssertRCReturn(rc, rc); 3582 3583 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS); 3584 Log4(("Load[%RU32]: VMX_VMCS32_CTRL_EXCEPTION_BITMAP=%#RX64 fContextUseFlags=%#RX32\n", pVCpu->idCpu, 3585 pVCpu->hm.s.vmx.u32XcptBitmap, HMCPU_CF_VALUE(pVCpu))); 3586 } 3587 return rc; 3588 } 3589 3590 3591 /** 3556 3592 * Loads the guest's RIP into the guest-state area in the VMCS. 3557 3593 * … … 3779 3815 pVCpu->hm.s.vmx.u32XcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK; 3780 3816 } 3817 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS); 3781 3818 3782 3819 if (fInterceptNM) … … 3823 3860 u32GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW); /* Always enable caching. */ 3824 3861 3825 /* Write VT-x's view of the guest CR0 into the VMCS and update the exception bitmap. */3862 /* Write VT-x's view of the guest CR0 into the VMCS. */ 3826 3863 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u32GuestCR0); 3827 AssertRCReturn(rc, rc);3828 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);3829 3864 AssertRCReturn(rc, rc); 3830 3865 Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR0=%#RX32 (uSetCR0=%#RX32 uZapCR0=%#RX32)\n", pVCpu->idCpu, u32GuestCR0, uSetCR0, … … 4220 4255 if ( fInterceptDB 4221 4256 || pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 4257 { 4222 4258 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_DB); 4259 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS); 4260 } 4223 4261 else 4224 4262 { 4225 4263 #ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS 4226 4264 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_DB); 4227 #endif 4228 } 4229 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap); 4230 AssertRCReturn(rc, rc); 4265 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS); 4266 #endif 4267 } 4231 4268 4232 4269 /* … … 8295 8332 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 8296 8333 8334 rc = hmR0VmxLoadGuestXcptIntercepts(pVCpu, pMixedCtx); 8335 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestXcptIntercepts! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 8336 8297 8337 /* 8298 8338 * Loading Rflags here is fine, even though Rflags.TF might depend on guest debug state (which is not loaded here). … … 8354 8394 #endif 8355 8395 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS); 8396 } 8397 8398 /* Loading CR0, debug state might have changed intercepts, update VMCS. */ 8399 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS)) 8400 { 8401 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap); 8402 AssertRC(rc); 8403 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS); 8356 8404 } 8357 8405 … … 10243 10291 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmcall); 10244 10292 10245 PVM pVM = pVCpu->CTX_SUFF(pVM); 10246 if (pVM->hm.s.fHypercallsEnabled) 10293 if (pVCpu->hm.s.fHypercallsEnabled) 10247 10294 { 10248 10295 #if 0 … … 11405 11452 #ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS 11406 11453 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_DB); 11407 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap); 11408 AssertRCReturn(rc, rc); 11454 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS); 11409 11455 #endif 11410 11456 } -
trunk/src/VBox/VMM/VMMR3/GIMHv.cpp
r55118 r55129 633 633 GIMR3Mmio2Unmap(pVM, pRegion); 634 634 Assert(!pRegion->fMapped); 635 VMMHypercallsDisable(pVM); 635 for (VMCPUID i = 0; i < pVM->cCpus; i++) 636 VMMHypercallsDisable(&pVM->aCpus[i]); 636 637 LogRel(("GIM: HyperV: Disabled Hypercall-page\n")); 637 638 return VINF_SUCCESS; … … 690 691 691 692 /* 692 * Notify VMM that hypercalls are now enabled .693 * Notify VMM that hypercalls are now enabled for all VCPUs. 693 694 */ 694 VMMHypercallsEnable(pVM); 695 for (VMCPUID i = 0; i < pVM->cCpus; i++) 696 VMMHypercallsEnable(&pVM->aCpus[i]); 695 697 696 698 LogRel(("GIM: HyperV: Enabled hypercalls at %#RGp\n", GCPhysHypercallPage)); -
trunk/src/VBox/VMM/VMMR3/GIMKvm.cpp
r55118 r55129 143 143 144 144 /* 145 * Setup #UD and hypercall behaviour. 146 */ 147 VMMHypercallsEnable(pVM); 145 * Setup hypercall and #UD handling. 146 */ 147 for (VMCPUID i = 0; i < pVM->cCpus; i++) 148 VMMHypercallsEnable(&pVM->aCpus[i]); 149 148 150 if (ASMIsAmdCpu()) 149 151 { … … 157 159 pKvm->uOpCodeNative = OP_VMCALL; 158 160 } 161 159 162 /* We always need to trap VMCALL/VMMCALL hypercall using #UDs for raw-mode VMs. */ 160 163 if (!HMIsEnabled(pVM)) -
trunk/src/VBox/VMM/VMMRC/TRPMRCHandlers.cpp
r55118 r55129 610 610 rc = EMInterpretInstructionDisasState(pVCpu, &Cpu, pRegFrame, PC, EMCODETYPE_SUPERVISOR); 611 611 } 612 else if (GIMShouldTrapXcptUD(pV M))612 else if (GIMShouldTrapXcptUD(pVCpu)) 613 613 { 614 614 LogFlow(("TRPMGCTrap06Handler: -> GIMXcptUD\n")); -
trunk/src/VBox/VMM/include/GIMKvmInternal.h
r55118 r55129 261 261 VMM_INT_DECL(VBOXSTRICTRC) gimKvmReadMsr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue); 262 262 VMM_INT_DECL(VBOXSTRICTRC) gimKvmWriteMsr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uRawValue); 263 VMM_INT_DECL(bool) gimKvmShouldTrapXcptUD(PVM pVM);263 VMM_INT_DECL(bool) gimKvmShouldTrapXcptUD(PVMCPU pVCpu); 264 264 VMM_INT_DECL(int) gimKvmXcptUD(PVMCPU pVCpu, PCPUMCTX pCtx, PDISCPUSTATE pDis); 265 265 -
trunk/src/VBox/VMM/include/HMInternal.h
r55118 r55129 164 164 #define HM_CHANGED_GUEST_EFER_MSR RT_BIT(16) 165 165 #define HM_CHANGED_GUEST_LAZY_MSRS RT_BIT(17) /* Shared */ 166 #define HM_CHANGED_GUEST_XCPT_INTERCEPTS RT_BIT(18) 166 167 /* VT-x specific state. */ 167 #define HM_CHANGED_VMX_GUEST_AUTO_MSRS RT_BIT(1 8)168 #define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE RT_BIT( 19)169 #define HM_CHANGED_VMX_GUEST_APIC_STATE RT_BIT(2 0)170 #define HM_CHANGED_VMX_ENTRY_CTLS RT_BIT(2 1)171 #define HM_CHANGED_VMX_EXIT_CTLS RT_BIT(2 2)168 #define HM_CHANGED_VMX_GUEST_AUTO_MSRS RT_BIT(19) 169 #define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE RT_BIT(20) 170 #define HM_CHANGED_VMX_GUEST_APIC_STATE RT_BIT(21) 171 #define HM_CHANGED_VMX_ENTRY_CTLS RT_BIT(22) 172 #define HM_CHANGED_VMX_EXIT_CTLS RT_BIT(23) 172 173 /* AMD-V specific state. */ 173 #define HM_CHANGED_SVM_GUEST_APIC_STATE RT_BIT(1 8)174 #define HM_CHANGED_SVM_RESERVED1 RT_BIT( 19)175 #define HM_CHANGED_SVM_RESERVED2 RT_BIT(2 0)176 #define HM_CHANGED_SVM_RESERVED3 RT_BIT(2 1)177 #define HM_CHANGED_SVM_RESERVED4 RT_BIT(2 2)174 #define HM_CHANGED_SVM_GUEST_APIC_STATE RT_BIT(19) 175 #define HM_CHANGED_SVM_RESERVED1 RT_BIT(20) 176 #define HM_CHANGED_SVM_RESERVED2 RT_BIT(21) 177 #define HM_CHANGED_SVM_RESERVED3 RT_BIT(22) 178 #define HM_CHANGED_SVM_RESERVED4 RT_BIT(23) 178 179 179 180 #define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_CR0 \ … … 195 196 | HM_CHANGED_GUEST_EFER_MSR \ 196 197 | HM_CHANGED_GUEST_LAZY_MSRS \ 198 | HM_CHANGED_GUEST_XCPT_INTERCEPTS \ 197 199 | HM_CHANGED_VMX_GUEST_AUTO_MSRS \ 198 200 | HM_CHANGED_VMX_GUEST_ACTIVITY_STATE \ … … 201 203 | HM_CHANGED_VMX_EXIT_CTLS) 202 204 203 #define HM_CHANGED_HOST_CONTEXT RT_BIT(2 3)205 #define HM_CHANGED_HOST_CONTEXT RT_BIT(24) 204 206 205 207 /* Bits shared between host and guest. */ … … 343 345 /** Set when TPR patching is active. */ 344 346 bool fTPRPatchingActive; 345 /** Whether #UD needs to be intercepted (required by certain GIM providers). */ 346 bool fGIMTrapXcptUD; 347 /** Whether paravirt. hypercalls are enabled. */ 348 bool fHypercallsEnabled; 349 bool u8Alignment[1]; 347 bool u8Alignment[3]; 350 348 351 349 /** Host kernel flags that HM might need to know (SUPKERNELFEATURES_XXX). */ … … 584 582 /** Whether to preload the guest-FPU state to avoid #NM VM-exit overhead. */ 585 583 bool fPreloadGuestFpu; 584 585 /** Whether #UD needs to be intercepted (required by certain GIM providers). */ 586 bool fGIMTrapXcptUD; 587 /** Whether paravirt. hypercalls are enabled. */ 588 bool fHypercallsEnabled; 589 uint8_t u8Alignment0[6]; 586 590 587 591 /** World switch exit counter. */
注意:
瀏覽 TracChangeset
來幫助您使用更動檢視器