儲存庫 vbox 的更動 2248
- 時間撮記:
- 2007-4-19 下午09:43:29 (18 年 以前)
- 位置:
- trunk
- 檔案:
-
- 修改 7 筆資料
圖例:
- 未更動
- 新增
- 刪除
-
trunk/include/VBox/tm.h
r2074 r2248 114 114 115 115 /** 116 * Gets the current lag of the synchronous virtual clock (relative to the virtual clock). 117 * 118 * @return The current lag. 119 * @param pVM VM handle. 120 */ 121 TMDECL(uint64_t) TMVirtualSyncGetLag(PVM pVM); 122 123 /** 124 * Get the current catch-up percent. 125 * 126 * @return The current catch0up percent. 0 means running at the same speed as the virtual clock. 127 * @param pVM VM handle. 128 */ 129 TMDECL(uint32_t) TMVirtualSyncGetCatchUpPct(PVM pVM); 130 131 /** 132 * Gets the current TMCLOCK_VIRTUAL frequency. 133 * 134 * @returns The freqency. 135 * @param pVM VM handle. 136 */ 137 TMDECL(uint64_t) TMVirtualGetFreq(PVM pVM); 138 139 /** 116 140 * Gets the current TMCLOCK_VIRTUAL_SYNC time. 117 141 * 118 142 * @returns The timestamp. 119 143 * @param pVM VM handle. 120 * 121 */ 122 TMDECL(uint64_t) TMVirtualGetSync(PVM pVM); 123 124 /** 125 * Gets the current TMCLOCK_VIRTUAL frequency. 126 * 127 * @returns The freqency. 128 * @param pVM VM handle. 129 */ 130 TMDECL(uint64_t) TMVirtualGetFreq(PVM pVM); 144 * @thread EMT. 145 */ 146 TMDECL(uint64_t) TMVirtualSyncGet(PVM pVM); 131 147 132 148 /** -
trunk/include/VBox/vm.h
r1832 r2248 477 477 struct TM s; 478 478 #endif 479 char padding[ 768];/* multiple of 32 */479 char padding[1056]; /* multiple of 32 */ 480 480 } tm; 481 481 -
trunk/src/VBox/VMM/TM.cpp
r2105 r2248 131 131 static DECLCALLBACK(void) tmR3TimerCallback(PRTTIMER pTimer, void *pvUser); 132 132 static void tmR3TimerQueueRun(PVM pVM, PTMTIMERQUEUE pQueue); 133 static void tmR3TimerQueueRunVirtualSync(PVM pVM); 134 static uint64_t tmR3TimerQueueRunVirtualSyncGiveup(PVM pVM, uint64_t offNew); 133 135 static DECLCALLBACK(void) tmR3TimerInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); 134 136 static DECLCALLBACK(void) tmR3TimerInfoActive(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); … … 148 150 { 149 151 case TMCLOCK_VIRTUAL: return TMVirtualGet(pVM); 150 case TMCLOCK_VIRTUAL_SYNC: return TMVirtual GetSync(pVM);152 case TMCLOCK_VIRTUAL_SYNC: return TMVirtualSyncGet(pVM); 151 153 case TMCLOCK_REAL: return TMRealGet(pVM); 152 154 case TMCLOCK_TSC: return TMCpuTickGet(pVM); … … 215 217 216 218 /* 219 * Get our CFGM node, create it if necessary. 220 */ 221 PCFGMNODE pCfgHandle = CFGMR3GetChild(CFGMR3GetRoot(pVM), "TM"); 222 if (!pCfgHandle) 223 { 224 rc = CFGMR3InsertNode(CFGMR3GetRoot(pVM), "TM", &pCfgHandle); 225 AssertRCReturn(rc, rc); 226 } 227 228 /* 217 229 * Determin the TSC configuration and frequency. 218 230 */ 219 231 /* mode */ 220 rc = CFGMR3QueryBool( CFGMR3GetRoot(pVM), "TSCVirtualized", &pVM->tm.s.fTSCVirtualized);232 rc = CFGMR3QueryBool(pCfgHandle, "TSCVirtualized", &pVM->tm.s.fTSCVirtualized); 221 233 if (rc == VERR_CFGM_VALUE_NOT_FOUND) 222 234 pVM->tm.s.fTSCVirtualized = true; /* trap rdtsc */ … … 226 238 227 239 /* source */ 228 rc = CFGMR3QueryBool( CFGMR3GetRoot(pVM), "UseRealTSC", &pVM->tm.s.fTSCTicking);240 rc = CFGMR3QueryBool(pCfgHandle, "UseRealTSC", &pVM->tm.s.fTSCTicking); 229 241 if (rc == VERR_CFGM_VALUE_NOT_FOUND) 230 242 pVM->tm.s.fTSCUseRealTSC = false; /* use virtual time */ … … 236 248 237 249 /* frequency */ 238 rc = CFGMR3QueryU64( CFGMR3GetRoot(pVM), "TSCTicksPerSecond", &pVM->tm.s.cTSCTicksPerSecond);250 rc = CFGMR3QueryU64(pCfgHandle, "TSCTicksPerSecond", &pVM->tm.s.cTSCTicksPerSecond); 239 251 if (rc == VERR_CFGM_VALUE_NOT_FOUND) 240 252 { … … 267 279 268 280 /* 269 * Register saved state. 270 */ 271 rc = SSMR3RegisterInternal(pVM, "tm", 1, TM_SAVED_STATE_VERSION, sizeof(uint64_t) * 8, 272 NULL, tmR3Save, NULL, 273 NULL, tmR3Load, NULL); 274 if (VBOX_FAILURE(rc)) 275 return rc; 281 * Configure the timer synchronous virtual time. 282 */ 283 rc = CFGMR3QueryU32(pCfgHandle, "ScheduleSlack", &pVM->tm.s.u32VirtualSyncScheduleSlack); 284 if (rc == VERR_CFGM_VALUE_NOT_FOUND) 285 pVM->tm.s.u32VirtualSyncScheduleSlack = 250000; /* 0.25ms (ASSUMES virtual time is nanoseconds) */ 286 else if (VBOX_FAILURE(rc)) 287 return VMSetError(pVM, rc, RT_SRC_POS, 288 N_("Configuration error: Failed to querying 32-bit integer value \"ScheduleSlack\". (%Vrc)"), rc); 289 290 rc = CFGMR3QueryU64(pCfgHandle, "CatchUpStopThreshold", &pVM->tm.s.u64VirtualSyncCatchUpStopThreshold); 291 if (rc == VERR_CFGM_VALUE_NOT_FOUND) 292 pVM->tm.s.u64VirtualSyncCatchUpStopThreshold = 500000; /* 0.5ms */ 293 else if (VBOX_FAILURE(rc)) 294 return VMSetError(pVM, rc, RT_SRC_POS, 295 N_("Configuration error: Failed to querying 64-bit integer value \"CatchUpStopThreshold\". (%Vrc)"), rc); 296 297 rc = CFGMR3QueryU64(pCfgHandle, "CatchUpGiveUpThreshold", &pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold); 298 if (rc == VERR_CFGM_VALUE_NOT_FOUND) 299 pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold = 1500000000; /* 1.5 sec */ 300 else if (VBOX_FAILURE(rc)) 301 return VMSetError(pVM, rc, RT_SRC_POS, 302 N_("Configuration error: Failed to querying 64-bit integer value \"CatchUpGiveUpThreshold\". (%Vrc)"), rc); 303 304 305 #define TM_CFG_PERIOD(iPeriod, DefStart, DefPct) \ 306 do \ 307 { \ 308 uint64_t u64; \ 309 rc = CFGMR3QueryU64(pCfgHandle, "CatchUpStartThreshold" #iPeriod, &u64); \ 310 if (rc == VERR_CFGM_VALUE_NOT_FOUND) \ 311 u64 = (DefStart); \ 312 else if (VBOX_FAILURE(rc)) \ 313 return VMSetError(pVM, rc, RT_SRC_POS, N_("Configuration error: Failed to querying 64-bit integer value \"CatchUpThreshold" #iPeriod "\". (%Vrc)"), rc); \ 314 if ( (iPeriod > 0 && u64 <= pVM->tm.s.aVirtualSyncCatchUpPeriods[iPeriod - 1].u64Start) \ 315 || u64 >= pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold) \ 316 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("Configuration error: Invalid start of period #" #iPeriod ": %RU64\n"), u64); \ 317 pVM->tm.s.aVirtualSyncCatchUpPeriods[iPeriod].u64Start = u64; \ 318 rc = CFGMR3QueryU32(pCfgHandle, "CatchUpPrecentage" #iPeriod, &pVM->tm.s.aVirtualSyncCatchUpPeriods[iPeriod].u32Percentage); \ 319 if (rc == VERR_CFGM_VALUE_NOT_FOUND) \ 320 pVM->tm.s.aVirtualSyncCatchUpPeriods[iPeriod].u32Percentage = (DefPct); \ 321 else if (VBOX_FAILURE(rc)) \ 322 return VMSetError(pVM, rc, RT_SRC_POS, N_("Configuration error: Failed to querying 32-bit integer value \"CatchUpPrecentage" #iPeriod "\". (%Vrc)"), rc); \ 323 } while (0) 324 TM_CFG_PERIOD(0, 25000000, 25); /* 25ms at 1.25x */ 325 TM_CFG_PERIOD(1, 75000000, 50); /* 75ms at 1.50x */ 326 TM_CFG_PERIOD(2, 100000000, 75); /* 75ms at 1.75x */ 327 TM_CFG_PERIOD(3, 150000000, 100); /* 150ms at 2x */ 328 TM_CFG_PERIOD(4, 400000000, 200); /* 400ms at 3x */ 329 TM_CFG_PERIOD(5, 800000000, 300); /* 800ms at 4x */ 330 TM_CFG_PERIOD(6, 1200000000, 400); /* 1200ms at 6x */ 331 TM_CFG_PERIOD(7, 1400000000, 500); /* 1400ms at 8x */ 332 AssertCompile(RT_ELEMENTS(pVM->tm.s.aVirtualSyncCatchUpPeriods) == 8); 333 #undef TM_CFG_PERIOD 276 334 277 335 /* 278 336 * Setup the warp drive. 279 337 */ 280 rc = CFGMR3QueryU32(CFGMR3GetRoot(pVM), "WarpDrivePercentage", &pVM->tm.s.u32VirtualWarpDrivePercentage); 338 rc = CFGMR3QueryU32(pCfgHandle, "WarpDrivePercentage", &pVM->tm.s.u32VirtualWarpDrivePercentage); 339 if (rc == VERR_CFGM_VALUE_NOT_FOUND) 340 rc = CFGMR3QueryU32(CFGMR3GetRoot(pVM), "WarpDrivePercentage", &pVM->tm.s.u32VirtualWarpDrivePercentage); /* legacy */ 281 341 if (rc == VERR_CFGM_VALUE_NOT_FOUND) 282 342 pVM->tm.s.u32VirtualWarpDrivePercentage = 100; … … 297 357 */ 298 358 uint32_t u32Millies; 299 rc = CFGMR3QueryU32( CFGMR3GetRoot(pVM), "TimerMillies", &u32Millies);359 rc = CFGMR3QueryU32(pCfgHandle, "TimerMillies", &u32Millies); 300 360 if (rc == VERR_CFGM_VALUE_NOT_FOUND) 301 361 u32Millies = 10; … … 312 372 pVM->tm.s.u32TimerMillies = u32Millies; 313 373 374 /* 375 * Register saved state. 376 */ 377 rc = SSMR3RegisterInternal(pVM, "tm", 1, TM_SAVED_STATE_VERSION, sizeof(uint64_t) * 8, 378 NULL, tmR3Save, NULL, 379 NULL, tmR3Load, NULL); 380 if (VBOX_FAILURE(rc)) 381 return rc; 382 314 383 #ifdef VBOX_WITH_STATISTICS 315 384 /* … … 348 417 349 418 STAM_REG(pVM, &pVM->tm.s.StatTimerCallbackSetFF,STAMTYPE_COUNTER, "/TM/CallbackSetFF", STAMUNIT_OCCURENCES, "The number of times the timer callback set FF."); 419 420 421 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncRun, STAMTYPE_COUNTER, "/TM/VirtualSync/Run", STAMUNIT_OCCURENCES, "Times the virtual sync timer queue was considered."); 422 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncRunRestart, STAMTYPE_COUNTER, "/TM/VirtualSync/Run/Restarts", STAMUNIT_OCCURENCES, "Times the clock was restarted after a run."); 423 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncRunStop, STAMTYPE_COUNTER, "/TM/VirtualSync/Run/Stop", STAMUNIT_OCCURENCES, "Times the clock was stopped when calculating the current time before examining the timers."); 424 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncRunStoppedAlready, STAMTYPE_COUNTER, "/TM/VirtualSync/Run/StoppedAlready", STAMUNIT_OCCURENCES, "Times the clock was already stopped elsewhere (TMVirtualSyncGet)."); 425 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncRunSlack, STAMTYPE_PROFILE, "/TM/VirtualSync/Run/Slack", STAMUNIT_NS_PER_OCCURENCE, "The scheduling slack. (Catch-up handed out when running timers.)"); 426 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGiveUp, STAMTYPE_COUNTER, "/TM/VirtualSync/GiveUp", STAMUNIT_OCCURENCES, "Times the catch-up was abandoned."); 427 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGiveUpBeforeStarting,STAMTYPE_COUNTER, "/TM/VirtualSync/GiveUpBeforeStarting", STAMUNIT_OCCURENCES, "Times the catch-up was abandoned before even starting. (Typically debugging++.)"); 428 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncCatchup, STAMTYPE_PROFILE_ADV, "/TM/VirtualSync/CatchUp", STAMUNIT_TICKS_PER_OCCURENCE, "Counting and measuring the times spent catching up."); 429 for (unsigned i = 0; i < RT_ELEMENTS(pVM->tm.s.aVirtualSyncCatchUpPeriods); i++) 430 { 431 STAMR3RegisterF(pVM, &pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, "The catch-up percentage.", "/TM/VirtualSync/%u", i); 432 STAMR3RegisterF(pVM, &pVM->tm.s.aStatVirtualSyncCatchupAdjust[i], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Times adjusted to this period.", "/TM/VirtualSync/%u/Adjust", i); 433 STAMR3RegisterF(pVM, &pVM->tm.s.aStatVirtualSyncCatchupInitial[i], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Times started in this period.", "/TM/VirtualSync/%u/Initial", i); 434 STAMR3RegisterF(pVM, &pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u64Start, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "Start of this period (lag).", "/TM/VirtualSync/%u/Start", i); 435 } 436 350 437 #endif /* VBOX_WITH_STATISTICS */ 351 438 … … 624 711 /* the virtual timer synchronous clock. */ 625 712 pVM->tm.s.fVirtualSyncTicking = false; 626 SSMR3GetU64(pSSM, &pVM->tm.s.u64VirtualSync);627 713 uint64_t u64; 714 SSMR3GetU64(pSSM, &u64); 715 pVM->tm.s.u64VirtualSync = u64; 628 716 SSMR3GetU64(pSSM, &u64); 629 717 pVM->tm.s.u64VirtualSyncOffset = u64; … … 667 755 668 756 669 /** @todo doc */ 670 static int tmr3TimerCreate(PVM pVM, TMCLOCK enmClock, const char *pszDesc, PPTMTIMERHC ppTimer) 757 /** 758 * Internal TMR3TimerCreate worker. 759 * 760 * @returns VBox status code. 761 * @param pVM The VM handle. 762 * @param enmClock The timer clock. 763 * @param pszDesc The timer description. 764 * @param ppTimer Where to store the timer pointer on success. 765 */ 766 static int tmr3TimerCreate(PVM pVM, TMCLOCK enmClock, const char *pszDesc, PPTMTIMERR3 ppTimer) 671 767 { 672 768 VM_ASSERT_EMT(pVM); … … 697 793 pTimer->enmClock = enmClock; 698 794 pTimer->pVMR3 = pVM; 699 pTimer->pVMR0 = (PVMR0)pVM->pVMHC; /// @todo pTimer->pVMR0 =pVM->pVMR0;795 pTimer->pVMR0 = pVM->pVMR0; 700 796 pTimer->pVMGC = pVM->pVMGC; 701 797 pTimer->enmState = TMTIMERSTATE_STOPPED; … … 904 1000 905 1001 /** 906 * Checks if a queue has a pending timer. 907 * 908 * @returns true if it has a pending timer. 909 * @returns false is no pending timer. 1002 * Checks if the sync queue has one or more expired timers. 1003 * 1004 * @returns true / false. 910 1005 * 911 1006 * @param pVM The VM handle. 912 1007 * @param enmClock The queue. 913 1008 */ 914 DECLINLINE(bool) tmR3Has Pending(PVM pVM, TMCLOCK enmClock)1009 DECLINLINE(bool) tmR3HasExpiredTimer(PVM pVM, TMCLOCK enmClock) 915 1010 { 916 1011 const uint64_t u64Expire = pVM->tm.s.CTXALLSUFF(paTimerQueues)[enmClock].u64Expire; … … 920 1015 921 1016 /** 1017 * Checks for expired timers in all the queues. 1018 * 1019 * @returns true / false. 1020 * @param pVM The VM handle. 1021 */ 1022 DECLINLINE(bool) tmR3AnyExpiredTimers(PVM pVM) 1023 { 1024 /* 1025 * Combine the time calculation for the first two since we're not on EMT 1026 * TMVirtualSyncGet only permits EMT. 1027 */ 1028 uint64_t u64Now = TMVirtualGet(pVM); 1029 if (pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64Now) 1030 return true; 1031 u64Now = pVM->tm.s.fVirtualSyncTicking 1032 ? u64Now - pVM->tm.s.u64VirtualSyncOffset 1033 : pVM->tm.s.u64VirtualSync; 1034 if (pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64Now) 1035 return true; 1036 1037 /* 1038 * The remaining timers. 1039 */ 1040 if (tmR3HasExpiredTimer(pVM, TMCLOCK_REAL)) 1041 return true; 1042 if (tmR3HasExpiredTimer(pVM, TMCLOCK_TSC)) 1043 return true; 1044 return false; 1045 } 1046 1047 1048 /** 922 1049 * Schedulation timer callback. 923 1050 * 924 1051 * @param pTimer Timer handle. 925 1052 * @param pvUser VM handle. 1053 * @thread Timer thread. 1054 * 926 1055 * @remark We cannot do the scheduling and queues running from a timer handler 927 1056 * since it's not executing in EMT, and even if it was it would be async … … 942 1071 || pVM->tm.s.paTimerQueuesR3[TMCLOCK_REAL].offSchedule 943 1072 || pVM->tm.s.paTimerQueuesR3[TMCLOCK_TSC].offSchedule 944 || tmR3HasPending(pVM, TMCLOCK_VIRTUAL_SYNC) 945 || tmR3HasPending(pVM, TMCLOCK_VIRTUAL) 946 || tmR3HasPending(pVM, TMCLOCK_REAL) 947 || tmR3HasPending(pVM, TMCLOCK_TSC) 1073 || tmR3AnyExpiredTimers(pVM) 948 1074 ) 949 1075 && !VM_FF_ISSET(pVM, VM_FF_TIMER) … … 975 1101 AssertCompile(TMCLOCK_MAX == 4); 976 1102 1103 /* TMCLOCK_VIRTUAL_SYNC */ 1104 STAM_PROFILE_ADV_START(&pVM->tm.s.StatDoQueuesSchedule, s1); 1105 tmTimerQueueSchedule(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL_SYNC]); 1106 STAM_PROFILE_ADV_SUSPEND(&pVM->tm.s.StatDoQueuesSchedule, s1); 1107 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatDoQueuesRun, r1); 1108 tmR3TimerQueueRunVirtualSync(pVM); 1109 STAM_PROFILE_ADV_SUSPEND(&pVM->tm.s.StatDoQueuesRun, r1); 1110 977 1111 /* TMCLOCK_VIRTUAL */ 978 STAM_PROFILE_ADV_ START(&pVM->tm.s.StatDoQueuesSchedule, s1);1112 STAM_PROFILE_ADV_RESUME(&pVM->tm.s.StatDoQueuesSchedule, s1); 979 1113 tmTimerQueueSchedule(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL]); 980 STAM_PROFILE_ADV_SUSPEND(&pVM->tm.s.StatDoQueuesSchedule, s1);981 STAM_PROFILE_ADV_START(&pVM->tm.s.StatDoQueuesRun, r1);982 tmR3TimerQueueRun(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL]);983 STAM_PROFILE_ADV_SUSPEND(&pVM->tm.s.StatDoQueuesRun, r1);984 985 /* TMCLOCK_VIRTUAL_SYNC */986 STAM_PROFILE_ADV_RESUME(&pVM->tm.s.StatDoQueuesSchedule, s1);987 tmTimerQueueSchedule(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL_SYNC]);988 1114 STAM_PROFILE_ADV_SUSPEND(&pVM->tm.s.StatDoQueuesSchedule, s2); 989 1115 STAM_PROFILE_ADV_RESUME(&pVM->tm.s.StatDoQueuesRun, r1); 990 tmR3TimerQueueRun(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL _SYNC]);1116 tmR3TimerQueueRun(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL]); 991 1117 STAM_PROFILE_ADV_SUSPEND(&pVM->tm.s.StatDoQueuesRun, r2); 1118 1119 #if 0 /** @todo if ever used, remove this and fix the stam prefixes on TMCLOCK_REAL below. */ 1120 /* TMCLOCK_TSC */ 1121 STAM_PROFILE_ADV_RESUME(&pVM->tm.s.StatDoQueuesSchedule, s2); 1122 tmTimerQueueSchedule(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_TSC]); 1123 STAM_PROFILE_ADV_SUSPEND(&pVM->tm.s.StatDoQueuesSchedule, s3); 1124 STAM_PROFILE_ADV_RESUME(&pVM->tm.s.StatDoQueuesRun, r2); 1125 tmR3TimerQueueRun(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_TSC]); 1126 STAM_PROFILE_ADV_SUSPEND(&pVM->tm.s.StatDoQueuesRun, r3); 1127 #endif 992 1128 993 1129 /* TMCLOCK_REAL */ 994 1130 STAM_PROFILE_ADV_RESUME(&pVM->tm.s.StatDoQueuesSchedule, s2); 995 1131 tmTimerQueueSchedule(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_REAL]); 996 STAM_PROFILE_ADV_S USPEND(&pVM->tm.s.StatDoQueuesSchedule, s3);1132 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatDoQueuesSchedule, s3); 997 1133 STAM_PROFILE_ADV_RESUME(&pVM->tm.s.StatDoQueuesRun, r2); 998 1134 tmR3TimerQueueRun(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_REAL]); 999 STAM_PROFILE_ADV_SUSPEND(&pVM->tm.s.StatDoQueuesRun, r3);1000 1001 /* TMCLOCK_TSC */1002 STAM_PROFILE_ADV_RESUME(&pVM->tm.s.StatDoQueuesSchedule, s3);1003 tmTimerQueueSchedule(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_TSC]);1004 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatDoQueuesSchedule, s3);1005 STAM_PROFILE_ADV_RESUME(&pVM->tm.s.StatDoQueuesRun, r3);1006 tmR3TimerQueueRun(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_TSC]);1007 1135 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatDoQueuesRun, r3); 1008 1136 … … 1047 1175 if (!pNext) 1048 1176 return; 1049 /** @todo deal with the VIRTUAL_SYNC pausing and catch calcs ++ */ 1050 uint64_t u64Now = tmClock(pVM, pQueue->enmClock); 1177 const uint64_t u64Now = tmClock(pVM, pQueue->enmClock); 1051 1178 while (pNext && pNext->u64Expire <= u64Now) 1052 1179 { … … 1093 1220 } 1094 1221 } /* run loop */ 1222 } 1223 1224 1225 /** 1226 * Schedules and runs any pending times in the timer queue for the 1227 * synchronous virtual clock. 1228 * 1229 * This scheduling is a bit different from the other queues as it need 1230 * to implement the special requirements of the timer synchronous virtual 1231 * clock, thus this 2nd queue run funcion. 1232 * 1233 * @param pVM The VM to run the timers for. 1234 */ 1235 static void tmR3TimerQueueRunVirtualSync(PVM pVM) 1236 { 1237 PTMTIMERQUEUE const pQueue = &pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL_SYNC]; 1238 VM_ASSERT_EMT(pVM); 1239 1240 /* 1241 * Any timers? 1242 */ 1243 PTMTIMER pNext = TMTIMER_GET_HEAD(pQueue); 1244 if (RT_UNLIKELY(!pNext)) 1245 { 1246 Assert(pVM->tm.s.fVirtualSyncTicking || !pVM->tm.s.fVirtualTicking); 1247 return; 1248 } 1249 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncRun); 1250 1251 /* 1252 * Calculate the time frame for which we will dispatch timers. 1253 * 1254 * We use a time frame ranging from the current sync time (which is most likely the 1255 * same as the head timer) and some configurable period (250000ns) up towards the 1256 * current virtual time. This period might also need to be restricted by the catch-up 1257 * rate so frequent calls to this function won't accelerate the time too much, however 1258 * this will be implemented at a later point. 1259 * 1260 * Without this frame we would 1) having to run timers much more frequently 1261 * and 2) lag behind at a steady rate. 1262 */ 1263 const uint64_t u64VirtualNow = TMVirtualGetEx(pVM, false /* don't check timers */); 1264 uint64_t u64Now; 1265 uint64_t u64Max; 1266 if (!pVM->tm.s.fVirtualSyncTicking) 1267 { 1268 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncRunStoppedAlready); 1269 u64Now = pVM->tm.s.u64VirtualSync; 1270 Assert(u64Now >= pNext->u64Expire); 1271 1272 u64Max = u64Now + pVM->tm.s.u32VirtualSyncScheduleSlack; 1273 if (u64Max > u64VirtualNow) 1274 u64Max = u64VirtualNow; 1275 } 1276 else 1277 { 1278 /* Calc now. */ 1279 uint64_t off = pVM->tm.s.u64VirtualSyncOffset; 1280 if (pVM->tm.s.fVirtualSyncCatchUp) 1281 { 1282 const uint64_t u64Prev = pVM->tm.s.u64VirtualSyncCatchUpPrev; 1283 uint64_t u64Delta = u64VirtualNow - u64Prev; 1284 if (RT_LIKELY(!(u64Delta >> 32))) 1285 { 1286 uint32_t u32Sub = ASMDivU64ByU32RetU32(ASMMult2xU32RetU64((uint32_t)u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage), 1287 100); 1288 if (off > u32Sub) 1289 off -= u32Sub; 1290 else 1291 off = 0; 1292 } 1293 } 1294 u64Now = u64VirtualNow - off; 1295 1296 /* Check if stopped by expired timer and calc the frame end. */ 1297 if (u64Now <= pNext->u64Expire) 1298 { 1299 if (pVM->tm.s.u64VirtualSyncOffset <= pVM->tm.s.u32VirtualSyncScheduleSlack) 1300 u64Max = pVM->tm.s.u64VirtualSyncOffset; 1301 else 1302 u64Max = pVM->tm.s.u32VirtualSyncScheduleSlack; 1303 } 1304 else 1305 { 1306 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncRunStop); 1307 u64Now = pNext->u64Expire; 1308 ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSync, u64Now); 1309 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncTicking, false); 1310 1311 u64Max = u64Now + pVM->tm.s.u32VirtualSyncScheduleSlack; 1312 if (u64Max > u64VirtualNow) 1313 u64Max = u64VirtualNow; 1314 } 1315 } 1316 1317 /* 1318 * Process the expired timers moving the clock along as we progress. 1319 */ 1320 while (pNext && pNext->u64Expire <= u64Max) 1321 { 1322 PTMTIMER pTimer = pNext; 1323 pNext = TMTIMER_GET_NEXT(pTimer); 1324 Log2(("tmR3TimerQueueRun: pTimer=%p:{.enmState=%s, .enmClock=%d, .enmType=%d, u64Expire=%llx (now=%llx) .pszDesc=%s}\n", 1325 pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, pTimer->u64Expire, u64Now, pTimer->pszDesc)); 1326 bool fRc; 1327 TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_EXPIRED, TMTIMERSTATE_ACTIVE, fRc); 1328 if (fRc) 1329 { 1330 Assert(!pTimer->offScheduleNext); /* this can trigger falsely */ 1331 1332 /* unlink */ 1333 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer); 1334 if (pPrev) 1335 TMTIMER_SET_NEXT(pPrev, pNext); 1336 else 1337 { 1338 TMTIMER_SET_HEAD(pQueue, pNext); 1339 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX; 1340 } 1341 if (pNext) 1342 TMTIMER_SET_PREV(pNext, pPrev); 1343 pTimer->offNext = 0; 1344 pTimer->offPrev = 0; 1345 1346 /* advance the clock */ 1347 ASMAtomicXchgSize(&pVM->tm.s.fVirtualSyncTicking, false); 1348 ASMAtomicXchgU64(&pVM->tm.s.u64Virtual, pTimer->u64Expire); 1349 1350 /* fire */ 1351 switch (pTimer->enmType) 1352 { 1353 case TMTIMERTYPE_DEV: pTimer->u.Dev.pfnTimer(pTimer->u.Dev.pDevIns, pTimer); break; 1354 case TMTIMERTYPE_DRV: pTimer->u.Drv.pfnTimer(pTimer->u.Drv.pDrvIns, pTimer); break; 1355 case TMTIMERTYPE_INTERNAL: pTimer->u.Internal.pfnTimer(pVM, pTimer, pTimer->u.Internal.pvUser); break; 1356 case TMTIMERTYPE_EXTERNAL: pTimer->u.External.pfnTimer(pTimer->u.External.pvUser); break; 1357 default: 1358 AssertMsgFailed(("Invalid timer type %d (%s)\n", pTimer->enmType, pTimer->pszDesc)); 1359 break; 1360 } 1361 1362 /* change the state if it wasn't changed already in the handler. */ 1363 TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_EXPIRED, fRc); 1364 Log2(("tmR3TimerQueueRun: new state %s\n", tmTimerState(pTimer->enmState))); 1365 } 1366 } /* run loop */ 1367 1368 /* 1369 * Restart the clock if it was stopped to serve any timers, 1370 * and start/adjust catch-up if necessary. 1371 */ 1372 if ( !pVM->tm.s.fVirtualSyncTicking 1373 && pVM->tm.s.fVirtualTicking) 1374 { 1375 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncRunRestart); 1376 1377 const uint64_t u64VirtualNow2 = TMVirtualGetEx(pVM, false /* don't check timers */); 1378 Assert(u64VirtualNow2 >= u64VirtualNow); 1379 const uint64_t offSlack = pVM->tm.s.u64VirtualSync - u64Now; 1380 STAM_STATS( { 1381 if (offSlack) { 1382 PSTAMPROFILE p = &pVM->tm.s.StatVirtualSyncRunSlack; 1383 p->cPeriods++; 1384 p->cTicks += offSlack; 1385 if (p->cTicksMax < offSlack) p->cTicksMax = offSlack; 1386 if (p->cTicksMin > offSlack) p->cTicksMax = offSlack; 1387 } 1388 }); 1389 1390 /* Let the time run a little bit while we were busy running timers(?). */ 1391 uint64_t u64Elapsed; 1392 #define MAX_ELAPSED 15000 /*ns*/ 1393 if (offSlack > MAX_ELAPSED) 1394 u64Elapsed = 0; 1395 else 1396 { 1397 u64Elapsed = u64VirtualNow2 - u64VirtualNow; 1398 if (u64Elapsed > MAX_ELAPSED) 1399 u64Elapsed = MAX_ELAPSED; 1400 u64Elapsed = u64Elapsed > offSlack ? u64Elapsed - offSlack : 0; 1401 } 1402 #undef MAX_ELAPSED 1403 1404 /* Calc the current offset. */ 1405 uint64_t offNew = u64VirtualNow2 - pVM->tm.s.u64VirtualSync - u64Elapsed; 1406 1407 /* Deal with starting, adjusting and stopping catchup. */ 1408 if (pVM->tm.s.fVirtualSyncCatchUp) 1409 { 1410 if (offNew <= pVM->tm.s.u64VirtualSyncCatchUpStopThreshold) 1411 { 1412 /* stop */ 1413 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c); 1414 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false); 1415 } 1416 else if (offNew <= pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold) 1417 { 1418 /* adjust */ 1419 unsigned i = 0; 1420 while ( i + 1 < RT_ELEMENTS(pVM->tm.s.aVirtualSyncCatchUpPeriods) 1421 && offNew >= pVM->tm.s.aVirtualSyncCatchUpPeriods[i + 1].u64Start) 1422 i++; 1423 if (pVM->tm.s.u32VirtualSyncCatchUpPercentage < pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage) 1424 { 1425 STAM_COUNTER_INC(&pVM->tm.s.aStatVirtualSyncCatchupAdjust[i]); 1426 ASMAtomicXchgU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage, pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage); 1427 } 1428 pVM->tm.s.u64VirtualSyncCatchUpPrev = u64VirtualNow2; 1429 } 1430 else 1431 { 1432 /* give up */ 1433 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGiveUp); 1434 offNew = tmR3TimerQueueRunVirtualSyncGiveup(pVM, offNew); 1435 } 1436 } 1437 else if (offNew >= pVM->tm.s.aVirtualSyncCatchUpPeriods[0].u64Start) 1438 { 1439 if (offNew <= pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold) 1440 { 1441 /* start */ 1442 STAM_PROFILE_ADV_START(&pVM->tm.s.StatVirtualSyncCatchup, c); 1443 unsigned i = 0; 1444 while ( i + 1 < RT_ELEMENTS(pVM->tm.s.aVirtualSyncCatchUpPeriods) 1445 && offNew >= pVM->tm.s.aVirtualSyncCatchUpPeriods[i + 1].u64Start) 1446 i++; 1447 STAM_COUNTER_INC(&pVM->tm.s.aStatVirtualSyncCatchupInitial[i]); 1448 ASMAtomicXchgU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage, pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage); 1449 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, true); 1450 } 1451 else 1452 { 1453 /* not bother */ 1454 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGiveUpBeforeStarting); 1455 offNew = tmR3TimerQueueRunVirtualSyncGiveup(pVM, offNew); 1456 } 1457 } 1458 1459 /* Update the offset and start the clock. */ 1460 ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSyncOffset, offNew); 1461 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncTicking, true); 1462 } 1463 } 1464 1465 1466 /** 1467 * Give up the chase. 1468 * 1469 * Not quite sure how to let the devices know about this, but somehow they will have 1470 * to (quietly) drop interrupts en masse and not cause any interrupt storms... 1471 * 1472 * @returns New offset. 1473 * 1474 * @param pVM The VM handle. 1475 * @param offNew The current offset. 1476 */ 1477 static uint64_t tmR3TimerQueueRunVirtualSyncGiveup(PVM pVM, uint64_t offNew) 1478 { 1479 /** @todo deal with this. */ 1480 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false); 1481 return 0; 1095 1482 } 1096 1483 … … 1320 1707 1321 1708 /* virtual sync */ 1322 u64 = TMVirtual GetSync(pVM);1709 u64 = TMVirtualSyncGet(pVM); 1323 1710 pHlp->pfnPrintf(pHlp, 1324 1711 "VirtSync: %#RX64 (%RU64) %s%s", -
trunk/src/VBox/VMM/TMInternal.h
r1057 r2248 311 311 bool fVirtualWarpDrive; 312 312 /** Virtual timer synchronous time ticking enabled indicator (bool). (TMCLOCK_VIRTUAL_SYNC) */ 313 bool 313 bool volatile fVirtualSyncTicking; 314 314 /** Virtual timer synchronous time catch-up active. */ 315 315 bool volatile fVirtualSyncCatchUp; … … 335 335 uint64_t volatile u64VirtualSyncCatchUpPrev; 336 336 /** The guest virtual timer synchronous time when fVirtualSyncTicking is cleared. */ 337 uint64_t u64VirtualSync; 338 /** How many percent faster the clock should advance when catch-up is active. */ 339 uint32_t u32VirtualSyncCatchupPercentage; 337 uint64_t volatile u64VirtualSync; 338 /** The current catch-up percentage. */ 339 uint32_t volatile u32VirtualSyncCatchUpPercentage; 340 /** How much slack when processing timers. */ 341 uint32_t u32VirtualSyncScheduleSlack; 340 342 /** When to stop catch-up. */ 341 uint32_t u32VirtualSyncCatchupStopThreashold; 342 /** When to start catch-up. */ 343 uint64_t u64VirtualSyncCatchupStartTreashold; 343 uint64_t u64VirtualSyncCatchUpStopThreshold; 344 344 /** When to give up catch-up. */ 345 uint64_t u64VirtualSyncCatchupGiveUpTreashold; 345 uint64_t u64VirtualSyncCatchUpGiveUpThreshold; 346 /** @def TM_MAX_CATCHUP_PERIODS 347 * The number of catchup rates. */ 348 #define TM_MAX_CATCHUP_PERIODS 8 349 /** The agressivness of the catch-up relative to how far we've lagged behind. 350 * The idea is to have increasing catch-up percentage as the lag increases. */ 351 struct TMCATCHUPPERIOD 352 { 353 uint64_t u64Start; /**< When this period starts. (u64VirtualSyncOffset). */ 354 uint32_t u32Percentage; /**< The catch-up percent to apply. */ 355 uint32_t u32Alignment; /**< Structure alignment */ 356 } aVirtualSyncCatchUpPeriods[TM_MAX_CATCHUP_PERIODS]; 346 357 347 358 /** Timer queues for the different clock types - R3 Ptr */ … … 390 401 STAMPROFILE StatScheduleOneR3; 391 402 STAMCOUNTER StatScheduleSetFF; 392 /** @} */ 403 STAMCOUNTER StatPostponedR3; 404 STAMCOUNTER StatPostponedR0; 405 STAMCOUNTER StatPostponedGC; 406 /** @} */ 407 /** Read the time 408 * @{ */ 393 409 STAMCOUNTER StatVirtualGet; 394 410 STAMCOUNTER StatVirtualGetSync; 395 411 STAMCOUNTER StatVirtualPause; 396 412 STAMCOUNTER StatVirtualResume; 413 /* @} */ 397 414 /** TMTimerPoll 398 415 * @{ */ … … 414 431 STAMPROFILE StatTimerStopR3; 415 432 /** @} */ 416 /** 417 * @{ */ 418 STAMCOUNTER StatPostponedR3; 419 STAMCOUNTER StatPostponedR0; 420 STAMCOUNTER StatPostponedGC; 433 /** VirtualSync - Running and Catching Up 434 * @{ */ 435 STAMCOUNTER StatVirtualSyncRun; 436 STAMCOUNTER StatVirtualSyncRunRestart; 437 STAMPROFILE StatVirtualSyncRunSlack; 438 STAMCOUNTER StatVirtualSyncRunStop; 439 STAMCOUNTER StatVirtualSyncRunStoppedAlready; 440 STAMCOUNTER StatVirtualSyncGiveUp; 441 STAMCOUNTER StatVirtualSyncGiveUpBeforeStarting; 442 STAMPROFILEADV StatVirtualSyncCatchup; 443 STAMCOUNTER aStatVirtualSyncCatchupInitial[TM_MAX_CATCHUP_PERIODS]; 444 STAMCOUNTER aStatVirtualSyncCatchupAdjust[TM_MAX_CATCHUP_PERIODS]; 421 445 /** @} */ 422 446 /** The timer callback. */ -
trunk/src/VBox/VMM/VMMAll/TMAll.cpp
r1057 r2248 451 451 return TMTimerSet(pTimer, cMilliesToNext * (uint64_t)TMCLOCK_FREQ_VIRTUAL / 1000 + TMVirtualGet(pVM)); 452 452 case TMCLOCK_VIRTUAL_SYNC: 453 return TMTimerSet(pTimer, cMilliesToNext * (uint64_t)TMCLOCK_FREQ_VIRTUAL / 1000 + TMVirtual GetSync(pVM));453 return TMTimerSet(pTimer, cMilliesToNext * (uint64_t)TMCLOCK_FREQ_VIRTUAL / 1000 + TMVirtualSyncGet(pVM)); 454 454 case TMCLOCK_REAL: 455 455 AssertCompile(TMCLOCK_FREQ_REAL == 1000); … … 572 572 break; 573 573 case TMCLOCK_VIRTUAL_SYNC: 574 u64 = TMVirtual GetSync(pVM);574 u64 = TMVirtualSyncGet(pVM); 575 575 break; 576 576 case TMCLOCK_REAL: -
trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp
r2082 r2248 28 28 #ifdef IN_RING3 29 29 # include <VBox/rem.h> 30 # include <iprt/thread.h> 30 31 #endif 31 32 #include "TMInternal.h" … … 53 54 * @param pVM The VM handle. 54 55 */ 55 uint64_t tmVirtualGetRawNonNormal(PVM pVM)56 static uint64_t tmVirtualGetRawNonNormal(PVM pVM) 56 57 { 57 58 /* … … 90 91 91 92 /** 92 * Gets the current TMCLOCK_VIRTUAL time 93 * 94 * @returns The timestamp. 95 * @param pVM VM handle. 96 * 97 * @remark While the flow of time will never go backwards, the speed of the 98 * progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be 99 * influenced by power saving (SpeedStep, PowerNow!), while the former 100 * makes use of TSC and kernel timers. 101 */ 102 TMDECL(uint64_t) TMVirtualGet(PVM pVM) 103 { 104 return TMVirtualGetEx(pVM, true /* check timers */); 105 } 106 107 108 /** 109 * Gets the current TMCLOCK_VIRTUAL time 110 * 111 * @returns The timestamp. 112 * @param pVM VM handle. 113 * @param fCheckTimers Check timers or not 114 * 115 * @remark While the flow of time will never go backwards, the speed of the 116 * progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be 117 * influenced by power saving (SpeedStep, PowerNow!), while the former 118 * makes use of TSC and kernel timers. 119 */ 120 TMDECL(uint64_t) TMVirtualGetEx(PVM pVM, bool fCheckTimers) 93 * Inlined version of tmVirtualGetEx. 94 */ 95 DECLINLINE(uint64_t) tmVirtualGet(PVM pVM, bool fCheckTimers) 121 96 { 122 97 uint64_t u64; … … 152 127 153 128 /** 129 * Gets the current TMCLOCK_VIRTUAL time 130 * 131 * @returns The timestamp. 132 * @param pVM VM handle. 133 * 134 * @remark While the flow of time will never go backwards, the speed of the 135 * progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be 136 * influenced by power saving (SpeedStep, PowerNow!), while the former 137 * makes use of TSC and kernel timers. 138 */ 139 TMDECL(uint64_t) TMVirtualGet(PVM pVM) 140 { 141 return TMVirtualGetEx(pVM, true /* check timers */); 142 } 143 144 145 /** 146 * Gets the current TMCLOCK_VIRTUAL time 147 * 148 * @returns The timestamp. 149 * @param pVM VM handle. 150 * @param fCheckTimers Check timers or not 151 * 152 * @remark While the flow of time will never go backwards, the speed of the 153 * progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be 154 * influenced by power saving (SpeedStep, PowerNow!), while the former 155 * makes use of TSC and kernel timers. 156 */ 157 TMDECL(uint64_t) TMVirtualGetEx(PVM pVM, bool fCheckTimers) 158 { 159 return tmVirtualGet(pVM, fCheckTimers); 160 } 161 162 163 /** 154 164 * Gets the current TMCLOCK_VIRTUAL_SYNC time. 155 165 * 156 166 * @returns The timestamp. 157 167 * @param pVM VM handle. 158 */ 159 TMDECL(uint64_t) TMVirtualGetSync(PVM pVM) 160 { 168 * @thread EMT. 169 */ 170 TMDECL(uint64_t) TMVirtualSyncGet(PVM pVM) 171 { 172 VM_ASSERT_EMT(pVM); 173 161 174 uint64_t u64; 162 175 if (pVM->tm.s.fVirtualSyncTicking) … … 165 178 166 179 /* 167 * Do TMVirtualGet() to get the current TMCLOCK_VIRTUAL time.180 * Query the virtual clock and do the usual expired timer check. 168 181 */ 169 182 Assert(pVM->tm.s.fVirtualTicking); … … 183 196 * 184 197 * The catch-up adjusting work by us decrementing the offset by a percentage of 185 * the time elapsed since the previous TMVritualGetSync call. We take some simple 186 * precautions against racing other threads here, but assume that this isn't going 187 * to be much of a problem since calls to this function is unlikely from threads 188 * other than the EMT. 198 * the time elapsed since the previous TMVirtualGetSync call. 189 199 * 190 200 * It's possible to get a very long or even negative interval between two read … … 209 219 const uint64_t u64Prev = pVM->tm.s.u64VirtualSyncCatchUpPrev; 210 220 uint64_t u64Delta = u64 - u64Prev; 211 if ( !(u64Delta >> 32))221 if (RT_LIKELY(!(u64Delta >> 32))) 212 222 { 213 uint32_t u32Sub = ASMDivU64ByU32RetU32(ASMMult2xU32RetU64((uint32_t)u64Delta, pVM->tm.s.u32VirtualSyncCatch upPercentage),223 uint32_t u32Sub = ASMDivU64ByU32RetU32(ASMMult2xU32RetU64((uint32_t)u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage), 214 224 100); 215 if (u 32Sub < (uint32_t)u64Delta)225 if (u64Offset > u32Sub) 216 226 { 217 const uint64_t u64NewOffset = u64Offset - u32Sub; 218 if (ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64, u64Prev)) 219 ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualSyncOffset, u64NewOffset, u64Offset); 220 u64Offset = u64NewOffset; 227 u64Offset -= u32Sub; 228 ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSyncOffset, u64Offset); 229 pVM->tm.s.u64VirtualSyncCatchUpPrev = u64; 221 230 } 222 231 else 223 232 { 224 233 /* we've completely caught up. */ 225 if ( ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64, u64Prev) 226 && ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualSyncOffset, 0, u64Offset)) 227 ASMAtomicXchgSize(&pVM->tm.s.fVirtualSyncCatchUp, false); 234 u64Offset = 0; 235 ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSyncOffset, 0); 236 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false); 237 pVM->tm.s.u64VirtualSyncCatchUpPrev = u64; 228 238 } 229 239 } 230 240 else 231 241 { 232 /* Update the previous TMVirtualGetSync time it's not a negative delta. */233 if (!(u64Delta >> 63))234 ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64, u64Prev);235 Log(("TMVirtualGetSync: u64Delta=% VRU64\n", u64Delta));242 /* More than 4 seconds since last time (or negative), ignore it. */ 243 if (!(u64Delta & RT_BIT_64(63))) 244 pVM->tm.s.u64VirtualSyncCatchUpPrev = u64; 245 Log(("TMVirtualGetSync: u64Delta=%RX64\n", u64Delta)); 236 246 } 237 247 } 238 248 239 249 /* 240 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. 241 * The current approach will not let us pass any expired timer. 250 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current 251 * approach is to never pass the head timer. So, when we do stop the clock and 252 * set the the timer pending flag. 242 253 */ 243 254 u64 -= u64Offset; 244 if (pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64) 255 const uint64_t u64Expire = pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire; 256 if (u64 >= u64Expire) 245 257 { 258 u64 = u64Expire; 259 ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSync, u64); 260 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncTicking, false); 246 261 if (!VM_FF_ISSET(pVM, VM_FF_TIMER)) 247 262 { … … 252 267 #endif 253 268 } 254 const uint64_t u64Expire = pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire;255 if (u64Expire < u64)256 u64 = u64Expire;257 269 } 258 270 } … … 264 276 265 277 /** 278 * Gets the current lag of the synchronous virtual clock (relative to the virtual clock). 279 * 280 * @return The current lag. 281 * @param pVM VM handle. 282 */ 283 TMDECL(uint64_t) TMVirtualSyncGetLag(PVM pVM) 284 { 285 return pVM->tm.s.u64VirtualSyncOffset; 286 } 287 288 289 /** 290 * Get the current catch-up percent. 291 * 292 * @return The current catch0up percent. 0 means running at the same speed as the virtual clock. 293 * @param pVM VM handle. 294 */ 295 TMDECL(uint32_t) TMVirtualSyncGetCatchUpPct(PVM pVM) 296 { 297 if (pVM->tm.s.fVirtualSyncCatchUp) 298 return pVM->tm.s.u32VirtualSyncCatchUpPercentage; 299 return 0; 300 } 301 302 303 /** 266 304 * Gets the current TMCLOCK_VIRTUAL frequency. 267 305 * … … 274 312 } 275 313 276 277 //#define TM_CONTINUOUS_TIME278 314 279 315 /** … … 296 332 } 297 333 298 #ifndef TM_CONTINUOUS_TIME299 334 AssertFailed(); 300 335 return VERR_INTERNAL_ERROR; 301 #else302 return VINF_SUCCESS;303 #endif304 336 } 305 337 … … 316 348 if (pVM->tm.s.fVirtualTicking) 317 349 { 318 #ifndef TM_CONTINUOUS_TIME319 350 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualPause); 320 351 pVM->tm.s.u64Virtual = tmVirtualGetRaw(pVM); 321 352 pVM->tm.s.fVirtualSyncTicking = false; 322 353 pVM->tm.s.fVirtualTicking = false; 323 #endif324 354 return VINF_SUCCESS; 325 355 } -
trunk/src/VBox/VMM/testcase/tstVMStructGC.cpp
r1480 r2248 588 588 GEN_CHECK_OFF(TM, fVirtualTicking); 589 589 GEN_CHECK_OFF(TM, fVirtualWarpDrive); 590 GEN_CHECK_OFF(TM, fVirtualSyncTicking); 591 GEN_CHECK_OFF(TM, fVirtualSyncCatchUp); 590 592 GEN_CHECK_OFF(TM, u32VirtualWarpDrivePercentage); 591 593 GEN_CHECK_OFF(TM, u64VirtualOffset); 592 594 GEN_CHECK_OFF(TM, u64Virtual); 593 595 GEN_CHECK_OFF(TM, u64VirtualWarpDriveStart); 596 GEN_CHECK_OFF(TM, u64VirtualSyncOffset); 597 GEN_CHECK_OFF(TM, u64VirtualSyncCatchUpPrev); 594 598 GEN_CHECK_OFF(TM, u64VirtualSync); 595 GEN_CHECK_OFF(TM, u32VirtualSyncCatchupPercentage); 596 GEN_CHECK_OFF(TM, u32VirtualSyncCatchupStopThreashold); 597 GEN_CHECK_OFF(TM, u64VirtualSyncCatchupStartTreashold); 598 GEN_CHECK_OFF(TM, u64VirtualSyncCatchupGiveUpTreashold); 599 GEN_CHECK_OFF(TM, u32VirtualSyncCatchUpPercentage); 600 GEN_CHECK_OFF(TM, u32VirtualSyncScheduleSlack); 601 GEN_CHECK_OFF(TM, u64VirtualSyncCatchUpStopThreshold); 602 GEN_CHECK_OFF(TM, u64VirtualSyncCatchUpGiveUpThreshold); 603 GEN_CHECK_OFF(TM, aVirtualSyncCatchUpPeriods); 604 GEN_CHECK_OFF(TM, aVirtualSyncCatchUpPeriods[0].u64Start); 605 GEN_CHECK_OFF(TM, aVirtualSyncCatchUpPeriods[0].u32Percentage); 606 GEN_CHECK_OFF(TM, aVirtualSyncCatchUpPeriods[1].u64Start); 607 GEN_CHECK_OFF(TM, aVirtualSyncCatchUpPeriods[1].u32Percentage); 599 608 GEN_CHECK_OFF(TM, pTimer); 600 609 GEN_CHECK_OFF(TM, u32TimerMillies); … … 605 614 GEN_CHECK_OFF(TM, paTimerQueuesGC); 606 615 GEN_CHECK_OFF(TM, StatDoQueues); 616 GEN_CHECK_OFF(TM, StatTimerCallbackSetFF); 607 617 GEN_CHECK_SIZE(TMTIMER); 608 618 GEN_CHECK_OFF(TMTIMER, u64Expire);
注意:
瀏覽 TracChangeset
來幫助您使用更動檢視器