vbox的更動 45379 路徑 trunk/src/VBox
- 時間撮記:
- 2013-4-5 下午02:46:04 (12 年 以前)
- 檔案:
-
- 修改 1 筆資料
圖例:
- 未更動
- 新增
- 刪除
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r45352 r45379 1790 1790 /* Set VMCS link pointer. Reserved for future use, must be -1. Intel spec. 24.4 "Guest-State Area". */ 1791 1791 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, 0xffffffffffffffffULL); 1792 1793 /* Setup debug controls */ 1794 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0); /** @todo think about this. */ 1795 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0); /** @todo Intel spec. 26.6.3 think about this */ 1792 1796 AssertRCReturn(rc, rc); 1793 1797 return rc; … … 2932 2936 return VINF_SUCCESS; 2933 2937 2938 #ifdef DEBUG 2934 2939 /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */ 2935 2940 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG) … … 2940 2945 Assert((pCtx->dr[7] & 0x400) == 0x400); /* bit 10 is reserved (MB1). */ 2941 2946 } 2947 #endif 2942 2948 2943 2949 int rc = VERR_INTERNAL_ERROR_5; … … 3014 3020 /* The guest's view of its DR7 is unblemished. */ 3015 3021 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_DR7, pCtx->dr[7]); 3016 3017 /* Setup other debug controls */3018 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0); /** @todo think about this. */3019 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0); /** @todo Intel spec. 26.6.3 think about this */3020 AssertRCReturn(rc, rc);3021 3022 3022 3023 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_DEBUG; … … 4534 4535 4535 4536 /** 4536 * Saves, if necessary, any event that occurred during event delivery as a 4537 * pending VMX event to handle before the next VM-entry or to be translated as a 4538 * TRPM event in the case of exiting to ring-3. 4537 * Handle a condition that occurred while delivering an event through the guest 4538 * IDT. 4539 4539 * 4540 4540 * @returns VBox status code (informational error codes included). 4541 * @retval VINF_SUCCESS if we should continue handling VM-exits.4541 * @retval VINF_SUCCESS if we should continue handling the VM-exit. 4542 4542 * @retval VINF_VMX_DOUBLE_FAULT if a #DF condition was detected and we ought to 4543 4543 * continue execution of the guest which will delivery the #DF. … … 4552 4552 * 4553 4553 * @remarks No-long-jump zone!!! 4554 * @remarks Called unconditionally after every VM-exit. 4555 * 4556 */ 4557 static int hmR0VmxSavePendingEventDueToEventDelivery(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient) 4558 { 4559 4560 Assert(pVCpu); 4561 Assert(pVmxTransient); 4562 4563 if (RT_UNLIKELY(pVmxTransient->fVMEntryFailed)) /* Don't bother with pending events if the VM-entry itself failed. */ 4564 return VINF_SUCCESS; 4565 4554 */ 4555 static int hmR0VmxCheckExitDueToEventDelivery(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient) 4556 { 4566 4557 int rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient); 4567 4558 AssertRCReturn(rc, rc); … … 5064 5055 5065 5056 /* Guest CR3. Only changes with Nested Paging. This must be done -after- saving CR0 and CR4 from the guest! */ 5066 if ( pVM->hm.s.fNestedPaging5067 && CPUMIsGuestPagingEnabledEx(pMixedCtx))5068 {5069 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_CR3))5057 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_CR3)) 5058 { 5059 if ( pVM->hm.s.fNestedPaging 5060 && CPUMIsGuestPagingEnabledEx(pMixedCtx)) 5070 5061 { 5071 5062 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &uVal); … … 5093 5084 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES); 5094 5085 } 5095 5096 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_CR3; 5097 } 5098 } 5099 else 5086 } 5100 5087 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_CR3; 5088 } 5101 5089 return rc; 5102 5090 } … … 5535 5523 AssertRC(rc); 5536 5524 5537 /* Restore debug registers if necessary and resync on next R0 re-entry. */ 5525 /* Restore FPU state if necessary and resync on next R0 reentry .*/ 5526 if (CPUMIsGuestFPUStateActive(pVCpu)) 5527 { 5528 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx); 5529 Assert(!CPUMIsGuestFPUStateActive(pVCpu)); 5530 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 5531 } 5532 5533 /* Restore debug registers if necessary and resync on next R0 reentry. */ 5538 5534 if (CPUMIsGuestDebugStateActive(pVCpu)) 5539 5535 { 5540 5536 CPUMR0SaveGuestDebugState(pVM, pVCpu, pMixedCtx, true /* save DR6 */); 5537 Assert(!CPUMIsGuestDebugStateActive(pVCpu)); 5541 5538 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG; 5542 5539 } … … 5544 5541 { 5545 5542 CPUMR0LoadHostDebugState(pVM, pVCpu); 5546 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;5543 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT); 5547 5544 } 5548 5545 … … 6219 6216 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu)); 6220 6217 6221 /* Currently we always atleast reload CR0 (longjmps included because of FPU state sharing). */ 6222 Log(("LdGstFlags=%#RX32\n", pVCpu->hm.s.fContextUseFlags)); 6218 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x); 6223 6219 6224 6220 /* Determine real-on-v86 mode. */ … … 6266 6262 ("Missed updating flags while loading guest state. pVM=%p pVCpu=%p fContextUseFlags=%#RX32\n", 6267 6263 pVM, pVCpu, pVCpu->hm.s.fContextUseFlags)); 6264 6265 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x); 6268 6266 return rc; 6269 6267 } … … 6382 6380 /* Load the required guest state bits (for guest-state changes in the inner execution loop). */ 6383 6381 Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT)); 6384 int rc = VMXR0LoadGuestState(pVM, pVCpu, pMixedCtx); 6382 int rc; 6383 if (pVCpu->hm.s.fContextUseFlags == HM_CHANGED_GUEST_INTR_STATE) 6384 rc = hmR0VmxLoadGuestIntrState(pVM, pVCpu, pMixedCtx); 6385 else 6386 rc = VMXR0LoadGuestState(pVM, pVCpu, pMixedCtx); 6385 6387 AssertRC(rc); 6386 6388 AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags =%#x\n", pVCpu->hm.s.fContextUseFlags)); … … 6444 6446 { 6445 6447 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 6446 STAM_PROFILE_ADV_STOP (&pVCpu->hm.s.StatInGC, x);6448 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x); 6447 6449 6448 6450 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB-shootdowns. */ … … 6450 6452 pVCpu->hm.s.vmx.fUpdatedGuestState = 0; /* Exits/longjmps to ring-3 requires saving the guest state. */ 6451 6453 pVmxTransient->fVmcsFieldsRead = 0; /* Transient fields need to be read from the VMCS. */ 6452 pVmxTransient->fVectoringPF = false; /* Clear the vectoring page-fault flag. */6454 pVmxTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */ 6453 6455 6454 6456 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT)) … … 6528 6530 Assert(VMMRZCallRing3IsEnabled(pVCpu)); 6529 6531 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 6530 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);6531 6532 6532 6533 VMXTRANSIENT VmxTransient; … … 6543 6544 6544 6545 /* Preparatory work for running guest code, this may return to ring-3 for some last minute updates. */ 6546 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x); 6545 6547 rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient); 6546 6548 if (rc != VINF_SUCCESS) … … 6566 6568 if (RT_UNLIKELY(rc != VINF_SUCCESS)) /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */ 6567 6569 { 6570 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x); 6568 6571 hmR0VmxReportWorldSwitchError(pVM, pVCpu, rc, pCtx, &VmxTransient); 6569 6572 return rc; 6570 6573 } 6571 6574 6572 /* Handle any exception that caused a VM-exit while delivering an event to the guest. */ 6573 rc = hmR0VmxSavePendingEventDueToEventDelivery(pVM, pVCpu, pCtx, &VmxTransient); 6574 if (RT_LIKELY(rc == VINF_SUCCESS)) 6575 { 6576 /* Handle VM-exits. */ 6577 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("Invalid VM-exit %#x\n", VmxTransient.uExitReason)); 6578 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]); 6579 rc = (*s_apfnVMExitHandlers[VmxTransient.uExitReason])(pVM, pVCpu, pCtx, &VmxTransient); 6580 if (rc != VINF_SUCCESS) 6581 break; 6582 else if (cLoops > pVM->hm.s.cMaxResumeLoops) 6583 { 6584 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume); 6585 rc = VINF_EM_RAW_INTERRUPT; 6586 break; 6587 } 6588 } 6589 else if (RT_UNLIKELY(rc == VINF_EM_RESET)) 6575 /* Handle the VM-exit. */ 6576 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]); 6577 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x); 6578 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason)); 6579 rc = (*s_apfnVMExitHandlers[VmxTransient.uExitReason])(pVM, pVCpu, pCtx, &VmxTransient); 6580 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x); 6581 if (rc != VINF_SUCCESS) 6590 6582 break; 6591 /* else continue guest execution for (VINF_VMX_DOUBLE_FAULT) */ 6592 } 6593 6583 else if (cLoops > pVM->hm.s.cMaxResumeLoops) 6584 { 6585 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume); 6586 rc = VINF_EM_RAW_INTERRUPT; 6587 break; 6588 } 6589 } 6590 6591 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x); 6594 6592 if (rc == VERR_EM_INTERPRETER) 6595 6593 rc = VINF_EM_RAW_EMULATE_INSTR; … … 6644 6642 VMX_VALIDATE_EXIT_HANDLER_PARAMS(); 6645 6643 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq); 6646 return VINF_ EM_RAW_INTERRUPT;6644 return VINF_SUCCESS; 6647 6645 } 6648 6646 … … 6655 6653 VMX_VALIDATE_EXIT_HANDLER_PARAMS(); 6656 6654 int rc = hmR0VmxReadExitIntrInfoVmcs(pVmxTransient); 6657 rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);6658 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVmxTransient);6659 6655 AssertRCReturn(rc, rc); 6660 6656 … … 6668 6664 uint32_t uExitIntrInfo = pVmxTransient->uExitIntrInfo; 6669 6665 Assert(VMX_EXIT_INTERRUPTION_INFO_VALID(uExitIntrInfo)); 6666 6667 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */ 6668 rc = hmR0VmxCheckExitDueToEventDelivery(pVM, pVCpu, pMixedCtx, pVmxTransient); 6669 if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT)) 6670 return VINF_SUCCESS; 6671 else if (RT_UNLIKELY(rc == VINF_EM_RESET)) 6672 return rc; 6670 6673 6671 6674 uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(uExitIntrInfo); … … 6707 6710 Assert(pVM->hm.s.vmx.pRealModeTSS); 6708 6711 Assert(PDMVmmDevHeapIsEnabled(pVM)); 6712 rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 6713 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVmxTransient); 6714 AssertRCReturn(rc, rc); 6709 6715 rc = hmR0VmxInjectEventVmcs(pVM, pVCpu, pMixedCtx, 6710 6716 VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(uExitIntrInfo), … … 7788 7794 VMX_VALIDATE_EXIT_HANDLER_PARAMS(); 7789 7795 7796 /* Check if this task-switch occurred while delivery an event through the guest IDT. */ 7797 int rc = hmR0VmxReadExitQualificationVmcs(pVmxTransient); 7798 AssertRCReturn(rc, rc); 7799 if (VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IDT) 7800 { 7801 rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient); 7802 AssertRCReturn(rc, rc); 7803 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo)) 7804 { 7805 uint32_t uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo); 7806 if ( uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT 7807 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT 7808 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT) 7809 { 7810 /* Save it as a pending event while will be converted to a TRPM event on the way out to ring-3. */ 7811 pVCpu->hm.s.Event.fPending = true; 7812 pVCpu->hm.s.Event.u64IntrInfo = pVmxTransient->uIdtVectoringInfo; 7813 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient); 7814 AssertRCReturn(rc, rc); 7815 if (VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringErrorCode)) 7816 pVCpu->hm.s.Event.u32ErrCode = pVmxTransient->uIdtVectoringErrorCode; 7817 else 7818 pVCpu->hm.s.Event.u32ErrCode = 0; 7819 } 7820 } 7821 } 7790 7822 /** @todo Emulate task switch someday, currently just going back to ring-3 for 7791 7823 * emulation. */ 7792 7793 7824 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch); 7794 7825 return VERR_EM_INTERPRETER; … … 7819 7850 int rc = hmR0VmxReadExitQualificationVmcs(pVmxTransient); 7820 7851 7852 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */ 7853 rc = hmR0VmxCheckExitDueToEventDelivery(pVM, pVCpu, pMixedCtx, pVmxTransient); 7854 if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT)) 7855 return VINF_SUCCESS; 7856 else if (RT_UNLIKELY(rc == VINF_EM_RESET)) 7857 return rc; 7858 7859 #if 0 7821 7860 /** @todo Investigate if IOMMMIOPhysHandler() requires a lot of state, for now 7822 7861 * just sync the whole thing. */ 7823 rc |= hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx); 7862 rc = hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx); 7863 #else 7864 /* Aggressive state sync. for now. */ 7865 rc = hmR0VmxSaveGuestGprs(pVM, pVCpu, pMixedCtx); 7866 rc |= hmR0VmxSaveGuestControlRegs(pVM, pVCpu, pMixedCtx); 7867 rc |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx); 7868 #endif 7824 7869 AssertRCReturn(rc, rc); 7825 7870 … … 7840 7885 GCPhys &= PAGE_BASE_GC_MASK; 7841 7886 GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification); 7842 Log(("ApicAccess %RGp %#x\n", GCPhys, VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification)));7843 7887 VBOXSTRICTRC rc2 = IOMMMIOPhysHandler(pVM, pVCpu, (uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ) ? 0 : X86_TRAP_PF_RW, 7844 7888 CPUMCTX2CORE(pMixedCtx), GCPhys); 7845 7889 rc = VBOXSTRICTRC_VAL(rc2); 7890 Log(("ApicAccess %RGp %#x\n", GCPhys, VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification))); 7846 7891 if ( rc == VINF_SUCCESS 7847 7892 || rc == VERR_PAGE_TABLE_NOT_PRESENT … … 7957 8002 Assert(pVM->hm.s.fNestedPaging); 7958 8003 8004 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */ 8005 int rc = hmR0VmxCheckExitDueToEventDelivery(pVM, pVCpu, pMixedCtx, pVmxTransient); 8006 if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT)) 8007 return VINF_SUCCESS; 8008 else if (RT_UNLIKELY(rc == VINF_EM_RESET)) 8009 return rc; 8010 7959 8011 RTGCPHYS GCPhys = 0; 7960 intrc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);8012 rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys); 7961 8013 AssertRCReturn(rc, rc); 7962 8014 8015 #if 0 7963 8016 rc = hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx); /** @todo Can we do better? */ 8017 #else 8018 /* Aggressive state sync. for now. */ 8019 rc = hmR0VmxSaveGuestGprs(pVM, pVCpu, pMixedCtx); 8020 rc |= hmR0VmxSaveGuestControlRegs(pVM, pVCpu, pMixedCtx); 8021 rc |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx); 8022 #endif 7964 8023 AssertRCReturn(rc, rc); 7965 7966 Log(("EPT misconfig at %#RX64 RIP=%#RX64\n", GCPhys, pMixedCtx->rip));7967 8024 7968 8025 /* … … 7974 8031 */ 7975 8032 VBOXSTRICTRC rc2 = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pMixedCtx), GCPhys, UINT32_MAX); 8033 Log(("EPT misconfig at %#RX64 RIP=%#RX64 rc=%d\n", GCPhys, pMixedCtx->rip, rc)); 7976 8034 rc = VBOXSTRICTRC_VAL(rc2); 7977 Log(("EPT misconfig rc=%d\n", rc));7978 8035 if ( rc == VINF_SUCCESS 7979 8036 || rc == VERR_PAGE_TABLE_NOT_PRESENT … … 7997 8054 Assert(pVM->hm.s.fNestedPaging); 7998 8055 8056 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */ 8057 int rc = hmR0VmxCheckExitDueToEventDelivery(pVM, pVCpu, pMixedCtx, pVmxTransient); 8058 if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT)) 8059 return VINF_SUCCESS; 8060 else if (RT_UNLIKELY(rc == VINF_EM_RESET)) 8061 return rc; 8062 7999 8063 RTGCPHYS GCPhys = 0; 8000 int rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys); 8001 rc |= hmR0VmxReadExitQualificationVmcs(pVmxTransient); 8002 rc |= hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx); /** @todo can we do better? */ 8064 rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys); 8065 rc |= hmR0VmxReadExitQualificationVmcs(pVmxTransient); 8066 #if 0 8067 rc |= hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx); /** @todo Can we do better? */ 8068 #else 8069 /* Aggressive state sync. for now. */ 8070 rc = hmR0VmxSaveGuestGprs(pVM, pVCpu, pMixedCtx); 8071 rc |= hmR0VmxSaveGuestControlRegs(pVM, pVCpu, pMixedCtx); 8072 rc |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx); 8073 #endif 8003 8074 AssertRCReturn(rc, rc); 8004 8075 … … 8178 8249 { 8179 8250 Assert(CPUMIsGuestFPUStateActive(pVCpu)); 8180 8181 8251 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 8182 8252 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM); … … 8444 8514 VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(); 8445 8515 8446 /* Re-inject the exception into the guest. This cannot be a double-fault condition which arehandled in8447 hmR0Vmx SavePendingEventDueToEventDelivery(). */8516 /* Re-inject the exception into the guest. This cannot be a double-fault condition which would have been handled in 8517 hmR0VmxCheckExitDueToEventDelivery(). */ 8448 8518 int rc = hmR0VmxInjectEventVmcs(pVM, pVCpu, pMixedCtx, 8449 8519 VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
注意:
瀏覽 TracChangeset
來幫助您使用更動檢視器