VirtualBox

忽略:
時間撮記:
2007-4-19 下午09:43:29 (18 年 以前)
作者:
vboxsync
訊息:

Implementing timer syncrhonous virtual clock.

檔案:
修改 1 筆資料

圖例:

未更動
新增
刪除
  • trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp

    r2082 r2248  
    2828#ifdef IN_RING3
    2929# include <VBox/rem.h>
     30# include <iprt/thread.h>
    3031#endif
    3132#include "TMInternal.h"
     
    5354 * @param   pVM     The VM handle.
    5455 */
    55 uint64_t tmVirtualGetRawNonNormal(PVM pVM)
     56static uint64_t tmVirtualGetRawNonNormal(PVM pVM)
    5657{
    5758    /*
     
    9091
    9192/**
    92  * Gets the current TMCLOCK_VIRTUAL time
    93  *
    94  * @returns The timestamp.
    95  * @param   pVM     VM handle.
    96  *
    97  * @remark  While the flow of time will never go backwards, the speed of the
    98  *          progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be
    99  *          influenced by power saving (SpeedStep, PowerNow!), while the former
    100  *          makes use of TSC and kernel timers.
    101  */
    102 TMDECL(uint64_t) TMVirtualGet(PVM pVM)
    103 {
    104     return TMVirtualGetEx(pVM, true /* check timers */);
    105 }
    106 
    107 
    108 /**
    109  * Gets the current TMCLOCK_VIRTUAL time
    110  *
    111  * @returns The timestamp.
    112  * @param   pVM             VM handle.
    113  * @param   fCheckTimers    Check timers or not
    114  *
    115  * @remark  While the flow of time will never go backwards, the speed of the
    116  *          progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be
    117  *          influenced by power saving (SpeedStep, PowerNow!), while the former
    118  *          makes use of TSC and kernel timers.
    119  */
    120 TMDECL(uint64_t) TMVirtualGetEx(PVM pVM, bool fCheckTimers)
     93 * Inlined version of tmVirtualGetEx.
     94 */
     95DECLINLINE(uint64_t) tmVirtualGet(PVM pVM, bool fCheckTimers)
    12196{
    12297    uint64_t u64;
     
    152127
    153128/**
     129 * Gets the current TMCLOCK_VIRTUAL time
     130 *
     131 * @returns The timestamp.
     132 * @param   pVM     VM handle.
     133 *
     134 * @remark  While the flow of time will never go backwards, the speed of the
     135 *          progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be
     136 *          influenced by power saving (SpeedStep, PowerNow!), while the former
     137 *          makes use of TSC and kernel timers.
     138 */
     139TMDECL(uint64_t) TMVirtualGet(PVM pVM)
     140{
     141    return TMVirtualGetEx(pVM, true /* check timers */);
     142}
     143
     144
     145/**
     146 * Gets the current TMCLOCK_VIRTUAL time
     147 *
     148 * @returns The timestamp.
     149 * @param   pVM             VM handle.
     150 * @param   fCheckTimers    Check timers or not
     151 *
     152 * @remark  While the flow of time will never go backwards, the speed of the
     153 *          progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be
     154 *          influenced by power saving (SpeedStep, PowerNow!), while the former
     155 *          makes use of TSC and kernel timers.
     156 */
     157TMDECL(uint64_t) TMVirtualGetEx(PVM pVM, bool fCheckTimers)
     158{
     159    return tmVirtualGet(pVM, fCheckTimers);
     160}
     161
     162
     163/**
    154164 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
    155165 *
    156166 * @returns The timestamp.
    157167 * @param   pVM     VM handle.
    158  */
    159 TMDECL(uint64_t) TMVirtualGetSync(PVM pVM)
    160 {
     168 * @thread  EMT.
     169 */
     170TMDECL(uint64_t) TMVirtualSyncGet(PVM pVM)
     171{
     172    VM_ASSERT_EMT(pVM);
     173
    161174    uint64_t u64;
    162175    if (pVM->tm.s.fVirtualSyncTicking)
     
    165178
    166179        /*
    167          * Do TMVirtualGet() to get the current TMCLOCK_VIRTUAL time.
     180         * Query the virtual clock and do the usual expired timer check.
    168181         */
    169182        Assert(pVM->tm.s.fVirtualTicking);
     
    183196         *
    184197         * The catch-up adjusting work by us decrementing the offset by a percentage of
    185          * the time elapsed since the previous TMVritualGetSync call. We take some simple
    186          * precautions against racing other threads here, but assume that this isn't going
    187          * to be much of a problem since calls to this function is unlikely from threads
    188          * other than the EMT.
     198         * the time elapsed since the previous TMVirtualGetSync call.
    189199         *
    190200         * It's possible to get a very long or even negative interval between two read
     
    209219            const uint64_t u64Prev = pVM->tm.s.u64VirtualSyncCatchUpPrev;
    210220            uint64_t u64Delta = u64 - u64Prev;
    211             if (!(u64Delta >> 32))
     221            if (RT_LIKELY(!(u64Delta >> 32)))
    212222            {
    213                 uint32_t u32Sub = ASMDivU64ByU32RetU32(ASMMult2xU32RetU64((uint32_t)u64Delta, pVM->tm.s.u32VirtualSyncCatchupPercentage),
     223                uint32_t u32Sub = ASMDivU64ByU32RetU32(ASMMult2xU32RetU64((uint32_t)u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage),
    214224                                                       100);
    215                 if (u32Sub < (uint32_t)u64Delta)
     225                if (u64Offset > u32Sub)
    216226                {
    217                     const uint64_t u64NewOffset = u64Offset - u32Sub;
    218                     if (ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64, u64Prev))
    219                         ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualSyncOffset, u64NewOffset, u64Offset);
    220                     u64Offset = u64NewOffset;
     227                    u64Offset -= u32Sub;
     228                    ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSyncOffset, u64Offset);
     229                    pVM->tm.s.u64VirtualSyncCatchUpPrev = u64;
    221230                }
    222231                else
    223232                {
    224233                    /* we've completely caught up. */
    225                     if (    ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64, u64Prev)
    226                         &&  ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualSyncOffset, 0, u64Offset))
    227                         ASMAtomicXchgSize(&pVM->tm.s.fVirtualSyncCatchUp, false);
     234                    u64Offset = 0;
     235                    ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSyncOffset, 0);
     236                    ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
     237                    pVM->tm.s.u64VirtualSyncCatchUpPrev = u64;
    228238                }
    229239            }
    230240            else
    231241            {
    232                 /* Update the previous TMVirtualGetSync time it's not a negative delta. */
    233                 if (!(u64Delta >> 63))
    234                     ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64, u64Prev);
    235                 Log(("TMVirtualGetSync: u64Delta=%VRU64\n", u64Delta));
     242                /* More than 4 seconds since last time (or negative), ignore it. */
     243                if (!(u64Delta & RT_BIT_64(63)))
     244                    pVM->tm.s.u64VirtualSyncCatchUpPrev = u64;
     245                Log(("TMVirtualGetSync: u64Delta=%RX64\n", u64Delta));
    236246            }
    237247        }
    238248
    239249        /*
    240          * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time.
    241          * The current approach will not let us pass any expired timer.
     250         * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
     251         * approach is to never pass the head timer. So, when we do stop the clock and
     252         * set the the timer pending flag.
    242253         */
    243254        u64 -= u64Offset;
    244         if (pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64)
     255        const uint64_t u64Expire = pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire;
     256        if (u64 >= u64Expire)
    245257        {
     258            u64 = u64Expire;
     259            ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSync, u64);
     260            ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncTicking, false);
    246261            if (!VM_FF_ISSET(pVM, VM_FF_TIMER))
    247262            {
     
    252267#endif
    253268            }
    254             const uint64_t u64Expire = pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire;
    255             if (u64Expire < u64)
    256                 u64 = u64Expire;
    257269        }
    258270    }
     
    264276
    265277/**
     278 * Gets the current lag of the synchronous virtual clock (relative to the virtual clock).
     279 *
     280 * @return  The current lag.
     281 * @param   pVM     VM handle.
     282 */
     283TMDECL(uint64_t) TMVirtualSyncGetLag(PVM pVM)
     284{
     285    return pVM->tm.s.u64VirtualSyncOffset;
     286}
     287
     288
     289/**
     290 * Get the current catch-up percent.
     291 *
     292 * @return  The current catch0up percent. 0 means running at the same speed as the virtual clock.
     293 * @param   pVM     VM handle.
     294 */
     295TMDECL(uint32_t) TMVirtualSyncGetCatchUpPct(PVM pVM)
     296{
     297    if (pVM->tm.s.fVirtualSyncCatchUp)
     298        return pVM->tm.s.u32VirtualSyncCatchUpPercentage;
     299    return 0;
     300}
     301
     302
     303/**
    266304 * Gets the current TMCLOCK_VIRTUAL frequency.
    267305 *
     
    274312}
    275313
    276 
    277 //#define TM_CONTINUOUS_TIME
    278314
    279315/**
     
    296332    }
    297333
    298 #ifndef TM_CONTINUOUS_TIME
    299334    AssertFailed();
    300335    return VERR_INTERNAL_ERROR;
    301 #else
    302     return VINF_SUCCESS;
    303 #endif
    304336}
    305337
     
    316348    if (pVM->tm.s.fVirtualTicking)
    317349    {
    318 #ifndef TM_CONTINUOUS_TIME
    319350        STAM_COUNTER_INC(&pVM->tm.s.StatVirtualPause);
    320351        pVM->tm.s.u64Virtual = tmVirtualGetRaw(pVM);
    321352        pVM->tm.s.fVirtualSyncTicking = false;
    322353        pVM->tm.s.fVirtualTicking = false;
    323 #endif
    324354        return VINF_SUCCESS;
    325355    }
注意: 瀏覽 TracChangeset 來幫助您使用更動檢視器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette