VirtualBox

忽略:
時間撮記:
2011-6-8 下午03:15:11 (14 年 以前)
作者:
vboxsync
訊息:

VMM/HM: more paranoid poking code.

檔案:
修改 1 筆資料

圖例:

未更動
新增
刪除
  • trunk/src/VBox/VMM/VMMAll/HWACCMAll.cpp

    r35346 r37386  
    9797
    9898#ifdef IN_RING0
     99
    99100/**
    100101 * Dummy RTMpOnSpecific handler since RTMpPokeCpu couldn't be used.
     
    107108
    108109/**
    109  * Wrapper for RTMpPokeCpu to deal with VERR_NOT_SUPPORTED
    110  *
    111  */
    112 void hwaccmMpPokeCpu(PVMCPU pVCpu, RTCPUID idHostCpu)
    113 {
    114     uint32_t cWorldSwitchExit = pVCpu->hwaccm.s.cWorldSwitchExit;
    115 
    116     Assert(idHostCpu == pVCpu->idHostCpu);
     110 * Wrapper for RTMpPokeCpu to deal with VERR_NOT_SUPPORTED.
     111 */
     112static void hmR0PokeCpu(PVMCPU pVCpu, RTCPUID idHostCpu)
     113{
     114    uint32_t cWorldSwitchExits = ASMAtomicUoReadU32(&pVCpu->hwaccm.s.cWorldSwitchExits);
     115    Assert(idHostCpu == pVCpu->idHostCpu);  /** @todo bogus assertion subject to races? */
    117116
    118117    STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatPoke, x);
    119118    int rc = RTMpPokeCpu(idHostCpu);
    120119    STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatPoke, x);
    121     /* Not implemented on some platforms (Darwin, Linux kernel < 2.6.19); fall back to a less efficient implementation (broadcast). */
     120
     121    /* Not implemented on some platforms (Darwin, Linux kernel < 2.6.19); fall
     122       back to a less efficient implementation (broadcast). */
    122123    if (rc == VERR_NOT_SUPPORTED)
    123124    {
     
    134135            STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatSpinPokeFailed, z);
    135136
    136         /* Spin until the VCPU has switched back. */
    137         while (     pVCpu->hwaccm.s.fCheckedTLBFlush
    138                &&   cWorldSwitchExit == pVCpu->hwaccm.s.cWorldSwitchExit)
    139         {
     137        /* Spin until the VCPU has switched back (poking is async). */
     138        while (   ASMAtomicUoReadBool(&pVCpu->hwaccm.s.fCheckedTLBFlush)
     139               && cWorldSwitchExits == ASMAtomicUoReadU32(&pVCpu->hwaccm.s.cWorldSwitchExits))
    140140            ASMNopPause();
    141         }
     141
    142142        if (rc == VINF_SUCCESS)
    143143            STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatSpinPoke, z);
     
    146146    }
    147147}
     148
     149#endif /* IN_RING0 */
     150#ifndef IN_RC
     151
     152/**
     153 * Poke an EMT so it can perform the appropriate TLB shootdowns.
     154 *
     155 * @param   pVCpu               The handle of the virtual CPU to poke.
     156 * @param   fAccountFlushStat   Whether to account the call to
     157 *                              StatTlbShootdownFlush or StatTlbShootdown.
     158 */
     159static void hmPokeCpuForTlbFlush(PVMCPU pVCpu, bool fAccountFlushStat)
     160{
     161    if (ASMAtomicUoReadBool(&pVCpu->hwaccm.s.fCheckedTLBFlush))
     162    {
     163        if (fAccountFlushStat)
     164            STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdownFlush);
     165        else
     166            STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdown);
     167#ifdef IN_RING0
     168        RTCPUID idHostCpu = pVCpu->hwaccm.s.idEnteredCpu;
     169        if (idHostCpu != NIL_RTCPUID)
     170        {
     171            hmR0PokeCpu(pVCpu, idHostCpu);
     172        }
     173#else
     174        VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_POKE);
    148175#endif
    149 
    150 #ifndef IN_RC
     176    }
     177    else
     178        STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushPageManual);
     179}
     180
     181
    151182/**
    152183 * Invalidates a guest page on all VCPUs.
     
    159190{
    160191    VMCPUID idCurCpu = VMMGetCpuId(pVM);
    161 
    162192    STAM_COUNTER_INC(&pVM->aCpus[idCurCpu].hwaccm.s.StatFlushPage);
    163193
     
    166196        PVMCPU pVCpu = &pVM->aCpus[idCpu];
    167197
    168         /* Nothing to do if a TLB flush is already pending; the VCPU should have already been poked if it were active */
     198        /* Nothing to do if a TLB flush is already pending; the VCPU should
     199           have already been poked if it were active. */
    169200        if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH))
    170201            continue;
    171202
    172203        if (pVCpu->idCpu == idCurCpu)
    173         {
    174204            HWACCMInvalidatePage(pVCpu, GCPtr);
    175         }
    176205        else
    177206        {
    178207            hwaccmQueueInvlPage(pVCpu, GCPtr);
    179             if (pVCpu->hwaccm.s.fCheckedTLBFlush)
    180             {
    181                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdown);
    182 #ifdef IN_RING0
    183                 RTCPUID idHostCpu = pVCpu->hwaccm.s.idEnteredCpu;
    184                 if (idHostCpu != NIL_RTCPUID)
    185                     hwaccmMpPokeCpu(pVCpu, idHostCpu);
    186 #else
    187                 VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_POKE);
    188 #endif
    189             }
    190             else
    191                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushPageManual);
     208            hmPokeCpuForTlbFlush(pVCpu, false /*fAccountFlushStat*/);
    192209        }
    193210    }
     
    216233        PVMCPU pVCpu = &pVM->aCpus[idCpu];
    217234
    218         /* Nothing to do if a TLB flush is already pending; the VCPU should have already been poked if it were active */
    219         if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH))
    220             continue;
    221 
    222         VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
    223         if (idThisCpu == idCpu)
    224             continue;
    225 
    226         if (pVCpu->hwaccm.s.fCheckedTLBFlush)
     235        /* Nothing to do if a TLB flush is already pending; the VCPU should
     236           have already been poked if it were active. */
     237        if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH))
    227238        {
    228             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdownFlush);
    229 #ifdef IN_RING0
    230             RTCPUID idHostCpu = pVCpu->hwaccm.s.idEnteredCpu;
    231             if (idHostCpu != NIL_RTCPUID)
    232                 hwaccmMpPokeCpu(pVCpu, idHostCpu);
    233 #else
    234             VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_POKE);
    235 #endif
     239            VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
     240            if (idThisCpu != idCpu)
     241                hmPokeCpuForTlbFlush(pVCpu, true /*fAccountFlushStat*/);
    236242        }
    237         else
    238             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushTLBManual);
    239     }
    240     return VINF_SUCCESS;
    241 }
    242 #endif
     243    }
     244
     245    return VINF_SUCCESS;
     246}
     247
     248#endif /* !IN_RC */
    243249
    244250/**
     
    293299
    294300            if (idThisCpu == idCpu)
     301                VMXR0InvalidatePhysPage(pVM, pVCpu, GCPhys);
     302            else
    295303            {
    296                 VMXR0InvalidatePhysPage(pVM, pVCpu, GCPhys);
    297                 continue;
     304                VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
     305                hmPokeCpuForTlbFlush(pVCpu, true /*fAccountFlushStat*/);
    298306            }
    299 
    300             VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
    301             if (pVCpu->hwaccm.s.fCheckedTLBFlush)
    302             {
    303                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdownFlush);
    304 # ifdef IN_RING0
    305                 RTCPUID idHostCpu = pVCpu->hwaccm.s.idEnteredCpu;
    306                 if (idHostCpu != NIL_RTCPUID)
    307                     hwaccmMpPokeCpu(pVCpu, idHostCpu);
    308 # else
    309                 VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_POKE);
    310 # endif
    311             }
    312             else
    313                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushTLBManual);
    314307        }
    315308        return VINF_SUCCESS;
    316309    }
    317310
     311    /* AMD-V doesn't support invalidation with guest physical addresses; see
     312       comment in SVMR0InvalidatePhysPage. */
    318313    Assert(pVM->hwaccm.s.svm.fSupported);
    319     /* AMD-V doesn't support invalidation with guest physical addresses; see comment in SVMR0InvalidatePhysPage. */
     314#endif
     315
    320316    HWACCMFlushTLBOnAllVCpus(pVM);
    321 #else
    322     HWACCMFlushTLBOnAllVCpus(pVM);
    323 #endif
    324317    return VINF_SUCCESS;
    325318}
     
    336329    return !!pVCpu->hwaccm.s.Event.fPending;
    337330}
     331
注意: 瀏覽 TracChangeset 來幫助您使用更動檢視器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette