VirtualBox

vbox的更動 45379 路徑 trunk/src/VBox


忽略:
時間撮記:
2013-4-5 下午02:46:04 (12 年 以前)
作者:
vboxsync
訊息:

VMM/VMMR0: HM bits, optimizations, be more aggressive with saving state for hot exit paths. Do less on every exit, moved pending event delivery to only the required exits.

檔案:
修改 1 筆資料

圖例:

未更動
新增
刪除
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r45352 r45379  
    17901790    /* Set VMCS link pointer. Reserved for future use, must be -1. Intel spec. 24.4 "Guest-State Area". */
    17911791    rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, 0xffffffffffffffffULL);
     1792
     1793    /* Setup debug controls */
     1794    rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0);                /** @todo think about this. */
     1795    rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS,  0);    /** @todo Intel spec. 26.6.3 think about this */
    17921796    AssertRCReturn(rc, rc);
    17931797    return rc;
     
    29322936        return VINF_SUCCESS;
    29332937
     2938#ifdef DEBUG
    29342939    /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */
    29352940    if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG)
     
    29402945        Assert((pCtx->dr[7] & 0x400) == 0x400);              /* bit 10 is reserved (MB1). */
    29412946    }
     2947#endif
    29422948
    29432949    int rc                = VERR_INTERNAL_ERROR_5;
     
    30143020    /* The guest's view of its DR7 is unblemished. */
    30153021    rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_DR7, pCtx->dr[7]);
    3016 
    3017     /* Setup other debug controls */
    3018     rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0);                /** @todo think about this. */
    3019     rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS,  0);    /** @todo Intel spec. 26.6.3 think about this */
    3020     AssertRCReturn(rc, rc);
    30213022
    30223023    pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_DEBUG;
     
    45344535
    45354536/**
    4536  * Saves, if necessary, any event that occurred during event delivery as a
    4537  * pending VMX event to handle before the next VM-entry or to be translated as a
    4538  * TRPM event in the case of exiting to ring-3.
     4537 * Handle a condition that occurred while delivering an event through the guest
     4538 * IDT.
    45394539 *
    45404540 * @returns VBox status code (informational error codes included).
    4541  * @retval VINF_SUCCESS if we should continue handling VM-exits.
     4541 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
    45424542 * @retval VINF_VMX_DOUBLE_FAULT if a #DF condition was detected and we ought to
    45434543 *         continue execution of the guest which will delivery the #DF.
     
    45524552 *
    45534553 * @remarks No-long-jump zone!!!
    4554  * @remarks Called unconditionally after every VM-exit.
    4555  *
    4556  */
    4557 static int hmR0VmxSavePendingEventDueToEventDelivery(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
    4558 {
    4559 
    4560     Assert(pVCpu);
    4561     Assert(pVmxTransient);
    4562 
    4563     if (RT_UNLIKELY(pVmxTransient->fVMEntryFailed))     /* Don't bother with pending events if the VM-entry itself failed. */
    4564         return VINF_SUCCESS;
    4565 
     4554 */
     4555static int hmR0VmxCheckExitDueToEventDelivery(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     4556{
    45664557    int rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
    45674558    AssertRCReturn(rc, rc);
     
    50645055
    50655056    /* Guest CR3. Only changes with Nested Paging. This must be done -after- saving CR0 and CR4 from the guest! */
    5066     if (   pVM->hm.s.fNestedPaging
    5067         && CPUMIsGuestPagingEnabledEx(pMixedCtx))
    5068     {
    5069         if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_CR3))
     5057    if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_CR3))
     5058    {
     5059        if (   pVM->hm.s.fNestedPaging
     5060            && CPUMIsGuestPagingEnabledEx(pMixedCtx))
    50705061        {
    50715062            rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &uVal);
     
    50935084                VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
    50945085            }
    5095 
    5096             pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_CR3;
    5097         }
    5098     }
    5099     else
     5086        }
    51005087        pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_CR3;
     5088    }
    51015089    return rc;
    51025090}
     
    55355523    AssertRC(rc);
    55365524
    5537     /* Restore debug registers if necessary and resync on next R0 re-entry. */
     5525    /* Restore FPU state if necessary and resync on next R0 reentry .*/
     5526    if (CPUMIsGuestFPUStateActive(pVCpu))
     5527    {
     5528        CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
     5529        Assert(!CPUMIsGuestFPUStateActive(pVCpu));
     5530        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
     5531    }
     5532
     5533    /* Restore debug registers if necessary and resync on next R0 reentry. */
    55385534    if (CPUMIsGuestDebugStateActive(pVCpu))
    55395535    {
    55405536        CPUMR0SaveGuestDebugState(pVM, pVCpu, pMixedCtx, true /* save DR6 */);
     5537        Assert(!CPUMIsGuestDebugStateActive(pVCpu));
    55415538        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
    55425539    }
     
    55445541    {
    55455542        CPUMR0LoadHostDebugState(pVM, pVCpu);
    5546         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
     5543        Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT);
    55475544    }
    55485545
     
    62196216    LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
    62206217
    6221     /* Currently we always atleast reload CR0 (longjmps included because of FPU state sharing). */
    6222     Log(("LdGstFlags=%#RX32\n", pVCpu->hm.s.fContextUseFlags));
     6218    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
    62236219
    62246220    /* Determine real-on-v86 mode. */
     
    62666262             ("Missed updating flags while loading guest state. pVM=%p pVCpu=%p fContextUseFlags=%#RX32\n",
    62676263              pVM, pVCpu, pVCpu->hm.s.fContextUseFlags));
     6264
     6265    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
    62686266    return rc;
    62696267}
     
    63826380    /* Load the required guest state bits (for guest-state changes in the inner execution loop). */
    63836381    Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT));
    6384     int rc = VMXR0LoadGuestState(pVM, pVCpu, pMixedCtx);
     6382    int rc;
     6383    if (pVCpu->hm.s.fContextUseFlags == HM_CHANGED_GUEST_INTR_STATE)
     6384        rc = hmR0VmxLoadGuestIntrState(pVM, pVCpu, pMixedCtx);
     6385    else
     6386        rc = VMXR0LoadGuestState(pVM, pVCpu, pMixedCtx);
    63856387    AssertRC(rc);
    63866388    AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags =%#x\n", pVCpu->hm.s.fContextUseFlags));
     
    64446446{
    64456447    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    6446     STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatInGC, x);
     6448    STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
    64476449
    64486450    ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false);   /* See HMInvalidatePageOnAllVCpus(): used for TLB-shootdowns. */
     
    64506452    pVCpu->hm.s.vmx.fUpdatedGuestState = 0;                     /* Exits/longjmps to ring-3 requires saving the guest state. */
    64516453    pVmxTransient->fVmcsFieldsRead     = 0;                     /* Transient fields need to be read from the VMCS. */
    6452     pVmxTransient->fVectoringPF        = false;                 /* Clear the vectoring page-fault flag. */
     6454    pVmxTransient->fVectoringPF        = false;                 /* Vectoring page-fault needs to be determined later. */
    64536455
    64546456    if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT))
     
    65286530    Assert(VMMRZCallRing3IsEnabled(pVCpu));
    65296531    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    6530     STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
    65316532
    65326533    VMXTRANSIENT VmxTransient;
     
    65436544
    65446545        /* Preparatory work for running guest code, this may return to ring-3 for some last minute updates. */
     6546        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
    65456547        rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient);
    65466548        if (rc != VINF_SUCCESS)
     
    65666568        if (RT_UNLIKELY(rc != VINF_SUCCESS))        /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
    65676569        {
     6570            STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
    65686571            hmR0VmxReportWorldSwitchError(pVM, pVCpu, rc, pCtx, &VmxTransient);
    65696572            return rc;
    65706573        }
    65716574
    6572         /* Handle any exception that caused a VM-exit while delivering an event to the guest.  */
    6573         rc = hmR0VmxSavePendingEventDueToEventDelivery(pVM, pVCpu, pCtx, &VmxTransient);
    6574         if (RT_LIKELY(rc == VINF_SUCCESS))
    6575         {
    6576             /* Handle VM-exits. */
    6577             AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("Invalid VM-exit %#x\n", VmxTransient.uExitReason));
    6578             STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
    6579             rc = (*s_apfnVMExitHandlers[VmxTransient.uExitReason])(pVM, pVCpu, pCtx, &VmxTransient);
    6580             if (rc != VINF_SUCCESS)
    6581                 break;
    6582             else if (cLoops > pVM->hm.s.cMaxResumeLoops)
    6583             {
    6584                 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume);
    6585                 rc = VINF_EM_RAW_INTERRUPT;
    6586                 break;
    6587             }
    6588         }
    6589         else if (RT_UNLIKELY(rc == VINF_EM_RESET))
     6575        /* Handle the VM-exit. */
     6576        STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
     6577        STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
     6578        AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
     6579        rc = (*s_apfnVMExitHandlers[VmxTransient.uExitReason])(pVM, pVCpu, pCtx, &VmxTransient);
     6580        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
     6581        if (rc != VINF_SUCCESS)
    65906582            break;
    6591         /* else continue guest execution for (VINF_VMX_DOUBLE_FAULT) */
    6592     }
    6593 
     6583        else if (cLoops > pVM->hm.s.cMaxResumeLoops)
     6584        {
     6585            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume);
     6586            rc = VINF_EM_RAW_INTERRUPT;
     6587            break;
     6588        }
     6589    }
     6590
     6591    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
    65946592    if (rc == VERR_EM_INTERPRETER)
    65956593        rc = VINF_EM_RAW_EMULATE_INSTR;
     
    66446642    VMX_VALIDATE_EXIT_HANDLER_PARAMS();
    66456643    STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
    6646     return VINF_EM_RAW_INTERRUPT;
     6644    return VINF_SUCCESS;
    66476645}
    66486646
     
    66556653    VMX_VALIDATE_EXIT_HANDLER_PARAMS();
    66566654    int rc = hmR0VmxReadExitIntrInfoVmcs(pVmxTransient);
    6657     rc     = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    6658     rc    |= hmR0VmxReadExitIntrErrorCodeVmcs(pVmxTransient);
    66596655    AssertRCReturn(rc, rc);
    66606656
     
    66686664    uint32_t uExitIntrInfo = pVmxTransient->uExitIntrInfo;
    66696665    Assert(VMX_EXIT_INTERRUPTION_INFO_VALID(uExitIntrInfo));
     6666
     6667    /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
     6668    rc = hmR0VmxCheckExitDueToEventDelivery(pVM, pVCpu, pMixedCtx, pVmxTransient);
     6669    if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT))
     6670        return VINF_SUCCESS;
     6671    else if (RT_UNLIKELY(rc == VINF_EM_RESET))
     6672        return rc;
    66706673
    66716674    uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(uExitIntrInfo);
     
    67076710                        Assert(pVM->hm.s.vmx.pRealModeTSS);
    67086711                        Assert(PDMVmmDevHeapIsEnabled(pVM));
     6712                        rc     = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
     6713                        rc    |= hmR0VmxReadExitIntrErrorCodeVmcs(pVmxTransient);
     6714                        AssertRCReturn(rc, rc);
    67096715                        rc = hmR0VmxInjectEventVmcs(pVM, pVCpu, pMixedCtx,
    67106716                                                    VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(uExitIntrInfo),
     
    77887794    VMX_VALIDATE_EXIT_HANDLER_PARAMS();
    77897795
     7796    /* Check if this task-switch occurred while delivery an event through the guest IDT. */
     7797    int rc = hmR0VmxReadExitQualificationVmcs(pVmxTransient);
     7798    AssertRCReturn(rc, rc);
     7799    if (VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IDT)
     7800    {
     7801        rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
     7802        AssertRCReturn(rc, rc);
     7803        if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
     7804        {
     7805            uint32_t uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
     7806            if (   uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT
     7807                && uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
     7808                && uIntType != VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
     7809            {
     7810                /* Save it as a pending event while will be converted to a TRPM event on the way out to ring-3. */
     7811                pVCpu->hm.s.Event.fPending = true;
     7812                pVCpu->hm.s.Event.u64IntrInfo = pVmxTransient->uIdtVectoringInfo;
     7813                rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
     7814                AssertRCReturn(rc, rc);
     7815                if (VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringErrorCode))
     7816                    pVCpu->hm.s.Event.u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
     7817                else
     7818                    pVCpu->hm.s.Event.u32ErrCode = 0;
     7819            }
     7820        }
     7821    }
    77907822    /** @todo Emulate task switch someday, currently just going back to ring-3 for
    77917823     *        emulation. */
    7792 
    77937824    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
    77947825    return VERR_EM_INTERPRETER;
     
    78197850    int rc = hmR0VmxReadExitQualificationVmcs(pVmxTransient);
    78207851
     7852    /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
     7853    rc = hmR0VmxCheckExitDueToEventDelivery(pVM, pVCpu, pMixedCtx, pVmxTransient);
     7854    if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT))
     7855        return VINF_SUCCESS;
     7856    else if (RT_UNLIKELY(rc == VINF_EM_RESET))
     7857        return rc;
     7858
     7859#if 0
    78217860    /** @todo Investigate if IOMMMIOPhysHandler() requires a lot of state, for now
    78227861     *   just sync the whole thing. */
    7823     rc    |= hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx);
     7862    rc = hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx);
     7863#else
     7864    /* Aggressive state sync. for now. */
     7865    rc = hmR0VmxSaveGuestGprs(pVM, pVCpu, pMixedCtx);
     7866    rc |= hmR0VmxSaveGuestControlRegs(pVM, pVCpu, pMixedCtx);
     7867    rc |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
     7868#endif
    78247869    AssertRCReturn(rc, rc);
    78257870
     
    78407885            GCPhys &= PAGE_BASE_GC_MASK;
    78417886            GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification);
    7842             Log(("ApicAccess %RGp %#x\n", GCPhys, VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification)));
    78437887            VBOXSTRICTRC rc2 = IOMMMIOPhysHandler(pVM, pVCpu, (uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ) ? 0 : X86_TRAP_PF_RW,
    78447888                                                  CPUMCTX2CORE(pMixedCtx), GCPhys);
    78457889            rc = VBOXSTRICTRC_VAL(rc2);
     7890            Log(("ApicAccess %RGp %#x\n", GCPhys, VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification)));
    78467891            if (   rc == VINF_SUCCESS
    78477892                || rc == VERR_PAGE_TABLE_NOT_PRESENT
     
    79578002    Assert(pVM->hm.s.fNestedPaging);
    79588003
     8004    /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
     8005    int rc = hmR0VmxCheckExitDueToEventDelivery(pVM, pVCpu, pMixedCtx, pVmxTransient);
     8006    if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT))
     8007        return VINF_SUCCESS;
     8008    else if (RT_UNLIKELY(rc == VINF_EM_RESET))
     8009        return rc;
     8010
    79598011    RTGCPHYS GCPhys = 0;
    7960     int rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
     8012    rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
    79618013    AssertRCReturn(rc, rc);
    79628014
     8015#if 0
    79638016    rc = hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx);     /** @todo Can we do better?  */
     8017#else
     8018    /* Aggressive state sync. for now. */
     8019    rc  = hmR0VmxSaveGuestGprs(pVM, pVCpu, pMixedCtx);
     8020    rc |= hmR0VmxSaveGuestControlRegs(pVM, pVCpu, pMixedCtx);
     8021    rc |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
     8022#endif
    79648023    AssertRCReturn(rc, rc);
    7965 
    7966     Log(("EPT misconfig at %#RX64 RIP=%#RX64\n", GCPhys, pMixedCtx->rip));
    79678024
    79688025    /*
     
    79748031     */
    79758032    VBOXSTRICTRC rc2 = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pMixedCtx), GCPhys, UINT32_MAX);
     8033    Log(("EPT misconfig at %#RX64 RIP=%#RX64 rc=%d\n", GCPhys, pMixedCtx->rip, rc));
    79768034    rc = VBOXSTRICTRC_VAL(rc2);
    7977     Log(("EPT misconfig rc=%d\n",  rc));
    79788035    if (   rc == VINF_SUCCESS
    79798036        || rc == VERR_PAGE_TABLE_NOT_PRESENT
     
    79978054    Assert(pVM->hm.s.fNestedPaging);
    79988055
     8056    /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
     8057    int rc = hmR0VmxCheckExitDueToEventDelivery(pVM, pVCpu, pMixedCtx, pVmxTransient);
     8058    if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT))
     8059        return VINF_SUCCESS;
     8060    else if (RT_UNLIKELY(rc == VINF_EM_RESET))
     8061        return rc;
     8062
    79998063    RTGCPHYS GCPhys = 0;
    8000     int rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
    8001     rc    |= hmR0VmxReadExitQualificationVmcs(pVmxTransient);
    8002     rc    |= hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx);  /** @todo can we do better? */
     8064    rc  = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
     8065    rc |= hmR0VmxReadExitQualificationVmcs(pVmxTransient);
     8066#if 0
     8067    rc |= hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx);     /** @todo Can we do better?  */
     8068#else
     8069    /* Aggressive state sync. for now. */
     8070    rc  = hmR0VmxSaveGuestGprs(pVM, pVCpu, pMixedCtx);
     8071    rc |= hmR0VmxSaveGuestControlRegs(pVM, pVCpu, pMixedCtx);
     8072    rc |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
     8073#endif
    80038074    AssertRCReturn(rc, rc);
    80048075
     
    81788249    {
    81798250        Assert(CPUMIsGuestFPUStateActive(pVCpu));
    8180 
    81818251        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
    81828252        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
     
    84448514    VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
    84458515
    8446     /* Re-inject the exception into the guest. This cannot be a double-fault condition which are handled in
    8447        hmR0VmxSavePendingEventDueToEventDelivery(). */
     8516    /* Re-inject the exception into the guest. This cannot be a double-fault condition which would have been handled in
     8517       hmR0VmxCheckExitDueToEventDelivery(). */
    84488518    int rc = hmR0VmxInjectEventVmcs(pVM, pVCpu, pMixedCtx,
    84498519                                    VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
注意: 瀏覽 TracChangeset 來幫助您使用更動檢視器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette