儲存庫 vbox 的更動 48199
- 時間撮記:
- 2013-8-30 下午03:05:23 (11 年 以前)
- 檔案:
-
- 修改 1 筆資料
圖例:
- 未更動
- 新增
- 刪除
-
trunk/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp
r45310 r48199 110 110 AssertPtrReturn(pThis, RTLOCKVAL_SUB_CLASS_INVALID); 111 111 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID); 112 # if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)112 # if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3) 113 113 AssertReturn(!(pThis->s.Core.fFlags & RTCRITSECT_FLAGS_NOP), RTLOCKVAL_SUB_CLASS_INVALID); 114 114 … … 228 228 } 229 229 230 #if defined(IN_RING3) 231 /* 232 * Add ourselves to the queue and wait for the direction to change. 233 */ 234 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT; 235 c++; 236 Assert(c < RTCSRW_CNT_MASK / 2); 237 238 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT; 239 cWait++; 240 Assert(cWait <= c); 241 Assert(cWait < RTCSRW_CNT_MASK / 2); 242 243 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK); 244 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT); 245 246 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState)) 230 #if defined(IN_RING3) || defined(IN_RING0) 231 # ifdef IN_RING0 232 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD) 233 && ASMIntAreEnabled()) 234 # endif 247 235 { 248 for (uint32_t iLoop = 0; ; iLoop++) 236 /* 237 * Add ourselves to the queue and wait for the direction to change. 238 */ 239 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT; 240 c++; 241 Assert(c < RTCSRW_CNT_MASK / 2); 242 243 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT; 244 cWait++; 245 Assert(cWait <= c); 246 Assert(cWait < RTCSRW_CNT_MASK / 2); 247 248 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK); 249 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT); 250 251 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState)) 249 252 { 250 int rc; 253 for (uint32_t iLoop = 0; ; iLoop++) 254 { 255 int rc; 256 # ifdef IN_RING3 257 # if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3) 258 rc = RTLockValidatorRecSharedCheckBlocking(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, true, 259 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_READ, false); 260 if (RT_SUCCESS(rc)) 261 # else 262 RTTHREAD hThreadSelf = RTThreadSelf(); 263 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false); 264 # endif 265 # endif 266 { 267 do 268 rc = SUPSemEventMultiWaitNoResume(pThis->s.CTX_SUFF(pVM)->pSession, 269 (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead, 270 RT_INDEFINITE_WAIT); 271 while (rc == VERR_INTERRUPTED && pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC); 272 # ifdef IN_RING3 273 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ); 274 # endif 275 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC) 276 return VERR_SEM_DESTROYED; 277 } 278 if (RT_FAILURE(rc)) 279 { 280 /* Decrement the counts and return the error. */ 281 for (;;) 282 { 283 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State); 284 c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT; Assert(c > 0); 285 c--; 286 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT; Assert(cWait > 0); 287 cWait--; 288 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK); 289 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT); 290 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState)) 291 break; 292 } 293 return rc; 294 } 295 296 Assert(pThis->s.Core.fNeedReset); 297 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State); 298 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT)) 299 break; 300 AssertMsg(iLoop < 1, ("%u\n", iLoop)); 301 } 302 303 /* Decrement the wait count and maybe reset the semaphore (if we're last). */ 304 for (;;) 305 { 306 u64OldState = u64State; 307 308 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT; 309 Assert(cWait > 0); 310 cWait--; 311 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK; 312 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT; 313 314 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState)) 315 { 316 if (cWait == 0) 317 { 318 if (ASMAtomicXchgBool(&pThis->s.Core.fNeedReset, false)) 319 { 320 int rc = SUPSemEventMultiReset(pThis->s.CTX_SUFF(pVM)->pSession, 321 (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead); 322 AssertRCReturn(rc, rc); 323 } 324 } 325 break; 326 } 327 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State); 328 } 329 251 330 # if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3) 252 rc = RTLockValidatorRecSharedCheckBlocking(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, true, 253 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_READ, false); 254 if (RT_SUCCESS(rc)) 255 # else 256 RTTHREAD hThreadSelf = RTThreadSelf(); 257 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false); 331 if (!fNoVal) 332 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos); 258 333 # endif 259 { 260 do 261 rc = SUPSemEventMultiWaitNoResume(pThis->s.CTX_SUFF(pVM)->pSession, 262 (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead, 263 RT_INDEFINITE_WAIT); 264 while (rc == VERR_INTERRUPTED && pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC); 265 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ); 266 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC) 267 return VERR_SEM_DESTROYED; 268 } 269 if (RT_FAILURE(rc)) 270 { 271 /* Decrement the counts and return the error. */ 272 for (;;) 273 { 274 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State); 275 c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT; Assert(c > 0); 276 c--; 277 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT; Assert(cWait > 0); 278 cWait--; 279 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK); 280 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT); 281 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState)) 282 break; 283 } 284 return rc; 285 } 286 287 Assert(pThis->s.Core.fNeedReset); 288 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State); 289 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT)) 290 break; 291 AssertMsg(iLoop < 1, ("%u\n", iLoop)); 334 break; 292 335 } 293 294 /* Decrement the wait count and maybe reset the semaphore (if we're last). */ 295 for (;;) 336 } 337 #endif /* IN_RING3 || IN_RING3 */ 338 #ifndef IN_RING3 339 # ifdef IN_RING0 340 else 341 # endif 342 { 343 /* 344 * We cannot call SUPSemEventMultiWaitNoResume in this context. Go 345 * back to ring-3 and do it there or return rcBusy. 346 */ 347 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared)); 348 if (rcBusy == VINF_SUCCESS) 296 349 { 297 u64OldState = u64State; 298 299 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT; 300 Assert(cWait > 0); 301 cWait--; 302 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK; 303 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT; 304 305 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState)) 306 { 307 if (cWait == 0) 308 { 309 if (ASMAtomicXchgBool(&pThis->s.Core.fNeedReset, false)) 310 { 311 int rc = SUPSemEventMultiReset(pThis->s.CTX_SUFF(pVM)->pSession, 312 (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead); 313 AssertRCReturn(rc, rc); 314 } 315 } 316 break; 317 } 318 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State); 350 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM); 351 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu); 352 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way 353 * back to ring-3. Goes for both kind of crit sects. */ 354 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED, MMHyperCCToR3(pVM, pThis)); 319 355 } 320 321 # if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3) 322 if (!fNoVal) 323 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos); 324 # endif 325 break; 356 return rcBusy; 326 357 } 327 328 #else 329 /* 330 * We cannot call SUPSemEventMultiWaitNoResume in this context. Go 331 * back to ring-3 and do it there or return rcBusy. 332 */ 333 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared)); 334 if (rcBusy == VINF_SUCCESS) 335 { 336 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM); 337 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu); 338 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way 339 * back to ring-3. Goes for both kind of crit sects. */ 340 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED, MMHyperCCToR3(pVM, pThis)); 341 } 342 return rcBusy; 343 #endif 358 #endif /* !IN_RING3 */ 344 359 } 345 360 … … 558 573 else 559 574 { 560 #if defined(IN_RING3) 561 /* Reverse the direction and signal the writer threads. */ 562 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_DIR_MASK);563 u64State |= RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT;564 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState)) 575 #if defined(IN_RING3) || defined(IN_RING0) 576 # ifdef IN_RING0 577 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD) 578 && ASMIntAreEnabled()) 579 # endif 565 580 { 566 int rc = SUPSemEventSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite); 567 AssertRC(rc); 581 /* Reverse the direction and signal the writer threads. */ 582 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_DIR_MASK); 583 u64State |= RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT; 584 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState)) 585 { 586 int rc = SUPSemEventSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite); 587 AssertRC(rc); 588 break; 589 } 590 } 591 #endif /* IN_RING3 || IN_RING0 */ 592 #ifndef IN_RING3 593 # ifdef IN_RING0 594 else 595 # endif 596 { 597 /* Queue the exit request (ring-3). */ 598 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM); 599 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu); 600 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves++; 601 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3 c=%d (%#llx)\n", i, pThis, c, u64State)); 602 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves)); 603 pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] = MMHyperCCToR3(pVM, pThis); 604 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT); 605 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); 606 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves); 607 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveShared); 568 608 break; 569 609 } 570 #else571 /* Queue the exit request (ring-3). */572 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);573 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);574 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves++;575 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3 c=%d (%#llx)\n", i, pThis, c, u64State));576 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves));577 pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] = MMHyperCCToR3(pVM, pThis);578 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);579 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);580 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);581 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveShared);582 break;583 610 #endif 584 611 } … … 762 789 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl)); 763 790 764 #if defined(IN_RING3) 765 if (!fTryOnly) 766 { 791 #if defined(IN_RING3) || defined(IN_RING0) 792 if ( !fTryOnly 793 # ifdef IN_RING0 794 && RTThreadPreemptIsEnabled(NIL_RTTHREAD) 795 && ASMIntAreEnabled() 796 # endif 797 ) 798 { 799 767 800 /* 768 801 * Wait for our turn. … … 771 804 { 772 805 int rc; 773 # if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3) 806 # ifdef IN_RING3 807 # ifdef PDMCRITSECTRW_STRICT 774 808 if (hThreadSelf == NIL_RTTHREAD) 775 809 hThreadSelf = RTThreadSelfAutoAdopt(); … … 777 811 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_WRITE, false); 778 812 if (RT_SUCCESS(rc)) 779 # else813 # else 780 814 RTTHREAD hThreadSelf = RTThreadSelf(); 781 815 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false); 816 # endif 782 817 # endif 783 818 { … … 787 822 RT_INDEFINITE_WAIT); 788 823 while (rc == VERR_INTERRUPTED && pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC); 824 # ifdef IN_RING3 789 825 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE); 826 # endif 790 827 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC) 791 828 return VERR_SEM_DESTROYED; … … 819 856 } 820 857 else 821 #endif /* IN_RING3 */858 #endif /* IN_RING3 || IN_RING0 */ 822 859 { 823 860 #ifdef IN_RING3 … … 844 881 if (rcBusy == VINF_SUCCESS) 845 882 { 883 Assert(!fTryOnly); 846 884 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM); 847 885 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu); … … 1052 1090 * Update the state. 1053 1091 */ 1054 #if defined(IN_RING3) 1055 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0); 1056 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl); 1057 ASMAtomicWriteHandle(&pThis->s.Core.hNativeWriter, NIL_RTNATIVETHREAD); 1058 1059 for (;;) 1060 { 1061 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State); 1062 uint64_t u64OldState = u64State; 1063 1064 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; 1065 Assert(c > 0); 1066 c--; 1067 1068 if ( c > 0 1069 || (u64State & RTCSRW_CNT_RD_MASK) == 0) 1092 #if defined(IN_RING3) || defined(IN_RING0) 1093 # ifdef IN_RING0 1094 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD) 1095 && ASMIntAreEnabled()) 1096 # endif 1097 { 1098 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0); 1099 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl); 1100 ASMAtomicWriteHandle(&pThis->s.Core.hNativeWriter, NIL_RTNATIVETHREAD); 1101 1102 for (;;) 1070 1103 { 1071 /* Don't change the direction, wake up the next writer if any. */ 1072 u64State &= ~RTCSRW_CNT_WR_MASK; 1073 u64State |= c << RTCSRW_CNT_WR_SHIFT; 1074 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState)) 1104 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State); 1105 uint64_t u64OldState = u64State; 1106 1107 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; 1108 Assert(c > 0); 1109 c--; 1110 1111 if ( c > 0 1112 || (u64State & RTCSRW_CNT_RD_MASK) == 0) 1075 1113 { 1076 if (c > 0) 1114 /* Don't change the direction, wake up the next writer if any. */ 1115 u64State &= ~RTCSRW_CNT_WR_MASK; 1116 u64State |= c << RTCSRW_CNT_WR_SHIFT; 1117 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState)) 1077 1118 { 1078 int rc = SUPSemEventSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite); 1119 if (c > 0) 1120 { 1121 int rc = SUPSemEventSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite); 1122 AssertRC(rc); 1123 } 1124 break; 1125 } 1126 } 1127 else 1128 { 1129 /* Reverse the direction and signal the reader threads. */ 1130 u64State &= ~(RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK); 1131 u64State |= RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT; 1132 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState)) 1133 { 1134 Assert(!pThis->s.Core.fNeedReset); 1135 ASMAtomicWriteBool(&pThis->s.Core.fNeedReset, true); 1136 int rc = SUPSemEventMultiSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead); 1079 1137 AssertRC(rc); 1138 break; 1080 1139 } 1081 break;1082 1140 } 1141 1142 ASMNopPause(); 1143 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC) 1144 return VERR_SEM_DESTROYED; 1083 1145 } 1084 else 1085 { 1086 /* Reverse the direction and signal the reader threads. */ 1087 u64State &= ~(RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK); 1088 u64State |= RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT; 1089 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState)) 1090 { 1091 Assert(!pThis->s.Core.fNeedReset); 1092 ASMAtomicWriteBool(&pThis->s.Core.fNeedReset, true); 1093 int rc = SUPSemEventMultiSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead); 1094 AssertRC(rc); 1095 break; 1096 } 1097 } 1098 1099 ASMNopPause(); 1100 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC) 1101 return VERR_SEM_DESTROYED; 1102 } 1103 #else 1104 /* 1105 * We cannot call neither SUPSemEventSignal nor SUPSemEventMultiSignal, 1106 * so queue the exit request (ring-3). 1107 */ 1108 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM); 1109 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu); 1110 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves++; 1111 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis)); 1112 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves)); 1113 pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] = MMHyperCCToR3(pVM, pThis); 1114 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT); 1115 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); 1116 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves); 1117 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveExcl); 1146 } 1147 #endif /* IN_RING3 || IN_RING0 */ 1148 #ifndef IN_RING3 1149 # ifdef IN_RING0 1150 else 1151 # endif 1152 { 1153 /* 1154 * We cannot call neither SUPSemEventSignal nor SUPSemEventMultiSignal, 1155 * so queue the exit request (ring-3). 1156 */ 1157 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM); 1158 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu); 1159 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves++; 1160 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis)); 1161 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves)); 1162 pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] = MMHyperCCToR3(pVM, pThis); 1163 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT); 1164 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); 1165 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves); 1166 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveExcl); 1167 } 1118 1168 #endif 1119 1169 }
注意:
瀏覽 TracChangeset
來幫助您使用更動檢視器