vbox的更動 2248 路徑 trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp
- 時間撮記:
- 2007-4-19 下午09:43:29 (18 年 以前)
- 檔案:
-
- 修改 1 筆資料
圖例:
- 未更動
- 新增
- 刪除
-
trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp
r2082 r2248 28 28 #ifdef IN_RING3 29 29 # include <VBox/rem.h> 30 # include <iprt/thread.h> 30 31 #endif 31 32 #include "TMInternal.h" … … 53 54 * @param pVM The VM handle. 54 55 */ 55 uint64_t tmVirtualGetRawNonNormal(PVM pVM)56 static uint64_t tmVirtualGetRawNonNormal(PVM pVM) 56 57 { 57 58 /* … … 90 91 91 92 /** 92 * Gets the current TMCLOCK_VIRTUAL time 93 * 94 * @returns The timestamp. 95 * @param pVM VM handle. 96 * 97 * @remark While the flow of time will never go backwards, the speed of the 98 * progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be 99 * influenced by power saving (SpeedStep, PowerNow!), while the former 100 * makes use of TSC and kernel timers. 101 */ 102 TMDECL(uint64_t) TMVirtualGet(PVM pVM) 103 { 104 return TMVirtualGetEx(pVM, true /* check timers */); 105 } 106 107 108 /** 109 * Gets the current TMCLOCK_VIRTUAL time 110 * 111 * @returns The timestamp. 112 * @param pVM VM handle. 113 * @param fCheckTimers Check timers or not 114 * 115 * @remark While the flow of time will never go backwards, the speed of the 116 * progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be 117 * influenced by power saving (SpeedStep, PowerNow!), while the former 118 * makes use of TSC and kernel timers. 119 */ 120 TMDECL(uint64_t) TMVirtualGetEx(PVM pVM, bool fCheckTimers) 93 * Inlined version of tmVirtualGetEx. 94 */ 95 DECLINLINE(uint64_t) tmVirtualGet(PVM pVM, bool fCheckTimers) 121 96 { 122 97 uint64_t u64; … … 152 127 153 128 /** 129 * Gets the current TMCLOCK_VIRTUAL time 130 * 131 * @returns The timestamp. 132 * @param pVM VM handle. 133 * 134 * @remark While the flow of time will never go backwards, the speed of the 135 * progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be 136 * influenced by power saving (SpeedStep, PowerNow!), while the former 137 * makes use of TSC and kernel timers. 138 */ 139 TMDECL(uint64_t) TMVirtualGet(PVM pVM) 140 { 141 return TMVirtualGetEx(pVM, true /* check timers */); 142 } 143 144 145 /** 146 * Gets the current TMCLOCK_VIRTUAL time 147 * 148 * @returns The timestamp. 149 * @param pVM VM handle. 150 * @param fCheckTimers Check timers or not 151 * 152 * @remark While the flow of time will never go backwards, the speed of the 153 * progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be 154 * influenced by power saving (SpeedStep, PowerNow!), while the former 155 * makes use of TSC and kernel timers. 156 */ 157 TMDECL(uint64_t) TMVirtualGetEx(PVM pVM, bool fCheckTimers) 158 { 159 return tmVirtualGet(pVM, fCheckTimers); 160 } 161 162 163 /** 154 164 * Gets the current TMCLOCK_VIRTUAL_SYNC time. 155 165 * 156 166 * @returns The timestamp. 157 167 * @param pVM VM handle. 158 */ 159 TMDECL(uint64_t) TMVirtualGetSync(PVM pVM) 160 { 168 * @thread EMT. 169 */ 170 TMDECL(uint64_t) TMVirtualSyncGet(PVM pVM) 171 { 172 VM_ASSERT_EMT(pVM); 173 161 174 uint64_t u64; 162 175 if (pVM->tm.s.fVirtualSyncTicking) … … 165 178 166 179 /* 167 * Do TMVirtualGet() to get the current TMCLOCK_VIRTUAL time.180 * Query the virtual clock and do the usual expired timer check. 168 181 */ 169 182 Assert(pVM->tm.s.fVirtualTicking); … … 183 196 * 184 197 * The catch-up adjusting work by us decrementing the offset by a percentage of 185 * the time elapsed since the previous TMVritualGetSync call. We take some simple 186 * precautions against racing other threads here, but assume that this isn't going 187 * to be much of a problem since calls to this function is unlikely from threads 188 * other than the EMT. 198 * the time elapsed since the previous TMVirtualGetSync call. 189 199 * 190 200 * It's possible to get a very long or even negative interval between two read … … 209 219 const uint64_t u64Prev = pVM->tm.s.u64VirtualSyncCatchUpPrev; 210 220 uint64_t u64Delta = u64 - u64Prev; 211 if ( !(u64Delta >> 32))221 if (RT_LIKELY(!(u64Delta >> 32))) 212 222 { 213 uint32_t u32Sub = ASMDivU64ByU32RetU32(ASMMult2xU32RetU64((uint32_t)u64Delta, pVM->tm.s.u32VirtualSyncCatch upPercentage),223 uint32_t u32Sub = ASMDivU64ByU32RetU32(ASMMult2xU32RetU64((uint32_t)u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage), 214 224 100); 215 if (u 32Sub < (uint32_t)u64Delta)225 if (u64Offset > u32Sub) 216 226 { 217 const uint64_t u64NewOffset = u64Offset - u32Sub; 218 if (ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64, u64Prev)) 219 ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualSyncOffset, u64NewOffset, u64Offset); 220 u64Offset = u64NewOffset; 227 u64Offset -= u32Sub; 228 ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSyncOffset, u64Offset); 229 pVM->tm.s.u64VirtualSyncCatchUpPrev = u64; 221 230 } 222 231 else 223 232 { 224 233 /* we've completely caught up. */ 225 if ( ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64, u64Prev) 226 && ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualSyncOffset, 0, u64Offset)) 227 ASMAtomicXchgSize(&pVM->tm.s.fVirtualSyncCatchUp, false); 234 u64Offset = 0; 235 ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSyncOffset, 0); 236 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false); 237 pVM->tm.s.u64VirtualSyncCatchUpPrev = u64; 228 238 } 229 239 } 230 240 else 231 241 { 232 /* Update the previous TMVirtualGetSync time it's not a negative delta. */233 if (!(u64Delta >> 63))234 ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64, u64Prev);235 Log(("TMVirtualGetSync: u64Delta=% VRU64\n", u64Delta));242 /* More than 4 seconds since last time (or negative), ignore it. */ 243 if (!(u64Delta & RT_BIT_64(63))) 244 pVM->tm.s.u64VirtualSyncCatchUpPrev = u64; 245 Log(("TMVirtualGetSync: u64Delta=%RX64\n", u64Delta)); 236 246 } 237 247 } 238 248 239 249 /* 240 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. 241 * The current approach will not let us pass any expired timer. 250 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current 251 * approach is to never pass the head timer. So, when we do stop the clock and 252 * set the the timer pending flag. 242 253 */ 243 254 u64 -= u64Offset; 244 if (pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64) 255 const uint64_t u64Expire = pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire; 256 if (u64 >= u64Expire) 245 257 { 258 u64 = u64Expire; 259 ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSync, u64); 260 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncTicking, false); 246 261 if (!VM_FF_ISSET(pVM, VM_FF_TIMER)) 247 262 { … … 252 267 #endif 253 268 } 254 const uint64_t u64Expire = pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire;255 if (u64Expire < u64)256 u64 = u64Expire;257 269 } 258 270 } … … 264 276 265 277 /** 278 * Gets the current lag of the synchronous virtual clock (relative to the virtual clock). 279 * 280 * @return The current lag. 281 * @param pVM VM handle. 282 */ 283 TMDECL(uint64_t) TMVirtualSyncGetLag(PVM pVM) 284 { 285 return pVM->tm.s.u64VirtualSyncOffset; 286 } 287 288 289 /** 290 * Get the current catch-up percent. 291 * 292 * @return The current catch0up percent. 0 means running at the same speed as the virtual clock. 293 * @param pVM VM handle. 294 */ 295 TMDECL(uint32_t) TMVirtualSyncGetCatchUpPct(PVM pVM) 296 { 297 if (pVM->tm.s.fVirtualSyncCatchUp) 298 return pVM->tm.s.u32VirtualSyncCatchUpPercentage; 299 return 0; 300 } 301 302 303 /** 266 304 * Gets the current TMCLOCK_VIRTUAL frequency. 267 305 * … … 274 312 } 275 313 276 277 //#define TM_CONTINUOUS_TIME278 314 279 315 /** … … 296 332 } 297 333 298 #ifndef TM_CONTINUOUS_TIME299 334 AssertFailed(); 300 335 return VERR_INTERNAL_ERROR; 301 #else302 return VINF_SUCCESS;303 #endif304 336 } 305 337 … … 316 348 if (pVM->tm.s.fVirtualTicking) 317 349 { 318 #ifndef TM_CONTINUOUS_TIME319 350 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualPause); 320 351 pVM->tm.s.u64Virtual = tmVirtualGetRaw(pVM); 321 352 pVM->tm.s.fVirtualSyncTicking = false; 322 353 pVM->tm.s.fVirtualTicking = false; 323 #endif324 354 return VINF_SUCCESS; 325 355 }
注意:
瀏覽 TracChangeset
來幫助您使用更動檢視器