儲存庫 vbox 的更動 18291
- 時間撮記:
- 2009-3-26 上午05:11:07 (16 年 以前)
- 位置:
- trunk
- 檔案:
-
- 修改 7 筆資料
圖例:
- 未更動
- 新增
- 刪除
-
trunk/include/VBox/pgm.h
r18143 r18291 511 511 #endif /* !VBOX_WITH_NEW_PHYS_CODE */ 512 512 VMMDECL(void) PGMR3PhysSetA20(PVM pVM, bool fEnable); 513 VMMR3DECL(int) PGMR3MapPT(PVM pVM, RTGCPTR GCPtr, uint32_t cb, PFNPGMRELOCATE pfnRelocate, void *pvUser, const char *pszDesc); 513 /** @name PGMR3MapPT flags. 514 * @{ */ 515 /** The mapping may be unmapped later. The default is permanent mappings. */ 516 #define PGMR3MAPPT_FLAGS_UNMAPPABLE RT_BIT(0) 517 /** @} */ 518 VMMR3DECL(int) PGMR3MapPT(PVM pVM, RTGCPTR GCPtr, uint32_t cb, uint32_t fFlags, PFNPGMRELOCATE pfnRelocate, void *pvUser, const char *pszDesc); 514 519 VMMR3DECL(int) PGMR3UnmapPT(PVM pVM, RTGCPTR GCPtr); 515 520 VMMR3DECL(int) PGMR3FinalizeMappings(PVM pVM); -
trunk/src/VBox/VMM/MMHyper.cpp
r18286 r18291 160 160 while ((RTINT)pVM->mm.s.offHyperNextStatic + 64*_1K < (RTINT)pVM->mm.s.cbHyperArea - _4M) 161 161 pVM->mm.s.cbHyperArea -= _4M; 162 int rc = PGMR3MapPT(pVM, pVM->mm.s.pvHyperAreaGC, pVM->mm.s.cbHyperArea, 162 int rc = PGMR3MapPT(pVM, pVM->mm.s.pvHyperAreaGC, pVM->mm.s.cbHyperArea, 0 /*fFlags*/, 163 163 mmR3HyperRelocateCallback, NULL, "Hypervisor Memory Area"); 164 164 if (RT_FAILURE(rc)) -
trunk/src/VBox/VMM/PGM.cpp
r18203 r18291 1185 1185 pVM->pgm.s.enmHostMode = SUPPAGINGMODE_INVALID; 1186 1186 pVM->pgm.s.GCPhysCR3 = NIL_RTGCPHYS; 1187 pVM->pgm.s.GCPtrPrevRamRangeMapping = MM_HYPER_AREA_ADDRESS; 1187 1188 pVM->pgm.s.fA20Enabled = true; 1188 1189 pVM->pgm.s.GCPhys4MBPSEMask = RT_BIT_64(32) - 1; /* default; checked later */ … … 1926 1927 if (pVM->pgm.s.pRamRangesR3) 1927 1928 { 1928 pVM->pgm.s.pRamRangesRC = MMHyperR3ToRC(pVM, pVM->pgm.s.pRamRangesR3); 1929 for (PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3; pCur->pNextR3; pCur = pCur->pNextR3) 1930 pCur->pNextRC = MMHyperR3ToRC(pVM, pCur->pNextR3); 1929 /* Update the pSelfRC pointers and relink them. */ 1930 for (PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3) 1931 if (!(pCur->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING)) 1932 pCur->pSelfRC = MMHyperCCToRC(pVM, pCur); 1933 pgmR3PhysRelinkRamRanges(pVM); 1931 1934 } 1932 1935 -
trunk/src/VBox/VMM/PGMInternal.h
r18230 r18291 364 364 #endif 365 365 366 /** Size of the GCPtrConflict array in PGMMAPPING. */ 366 /** Size of the GCPtrConflict array in PGMMAPPING. 367 * @remarks Must be a power of two. */ 367 368 #define PGMMAPPING_CONFLICT_MAX 8 368 369 … … 395 396 R3PTRTYPE(const char *) pszDesc; 396 397 /** Last 8 addresses that caused conflicts. */ 397 RTGCPTR GCPtrConflict[PGMMAPPING_CONFLICT_MAX];398 RTGCPTR aGCPtrConflicts[PGMMAPPING_CONFLICT_MAX]; 398 399 /** Number of conflicts for this hypervisor mapping. */ 399 400 uint32_t cConflicts; … … 1015 1016 typedef struct PGMRAMRANGE 1016 1017 { 1018 /** Start of the range. Page aligned. */ 1019 RTGCPHYS GCPhys; 1020 /** Size of the range. (Page aligned of course). */ 1021 RTGCPHYS cb; 1017 1022 /** Pointer to the next RAM range - for R3. */ 1018 1023 R3PTRTYPE(struct PGMRAMRANGE *) pNextR3; … … 1021 1026 /** Pointer to the next RAM range - for RC. */ 1022 1027 RCPTRTYPE(struct PGMRAMRANGE *) pNextRC; 1023 /** Pointer alignment. */ 1024 RTRCPTR RCPtrAlignment; 1025 /** Start of the range. Page aligned. */ 1026 RTGCPHYS GCPhys; 1028 /** PGM_RAM_RANGE_FLAGS_* flags. */ 1029 uint32_t fFlags; 1027 1030 /** Last address in the range (inclusive). Page aligned (-1). */ 1028 1031 RTGCPHYS GCPhysLast; 1029 /** Size of the range. (Page aligned of course). */ 1030 RTGCPHYS cb; 1031 /** MM_RAM_* flags */ 1032 uint32_t fFlags; 1033 uint32_t u32Alignment; /**< alignment. */ 1032 /** Start of the HC mapping of the range. This is only used for MMIO2. */ 1033 R3PTRTYPE(void *) pvR3; 1034 1034 #ifndef VBOX_WITH_NEW_PHYS_CODE 1035 1035 /** R3 virtual lookup ranges for chunks. … … 1042 1042 # endif 1043 1043 #endif 1044 /** Start of the HC mapping of the range. This is only used for MMIO2. */1045 R3PTRTYPE(void *) pvR3;1046 1044 /** The range description. */ 1047 1045 R3PTRTYPE(const char *) pszDesc; 1048 1046 /** Pointer to self - R0 pointer. */ 1047 R0PTRTYPE(struct PGMRAMRANGE *) pSelfR0; 1048 /** Pointer to self - RC pointer. */ 1049 RCPTRTYPE(struct PGMRAMRANGE *) pSelfRC; 1049 1050 /** Padding to make aPage aligned on sizeof(PGMPAGE). */ 1050 #ifdef VBOX_WITH_NEW_PHYS_CODE 1051 uint32_t au32Reserved[2]; 1052 #elif HC_ARCH_BITS == 32 1053 uint32_t au32Reserved[1]; 1054 #endif 1055 1051 #if HC_ARCH_BITS == (defined(VBOX_WITH_NEW_PHYS_CODE) ? 64 : 32) 1052 uint32_t u32Alignment2; 1053 #endif 1056 1054 /** Array of physical guest page tracking structures. */ 1057 1055 PGMPAGE aPages[1]; … … 1060 1058 typedef PGMRAMRANGE *PPGMRAMRANGE; 1061 1059 1062 #ifndef VBOX_WITH_NEW_PHYS_CODE 1060 #ifdef VBOX_WITH_NEW_PHYS_CODE 1061 /** @name PGMRAMRANGE::fFlags 1062 * @{ */ 1063 /** The RAM range is floating around as an independent guest mapping. */ 1064 #define PGM_RAM_RANGE_FLAGS_FLOATING RT_BIT(20) 1065 /** @} */ 1066 #else 1063 1067 /** Return hc ptr corresponding to the ram range and physical offset */ 1064 1068 #define PGMRAMRANGE_GETHCPTR(pRam, off) \ … … 2380 2384 /** RC pointer corresponding to PGM::pRamRangesR3. */ 2381 2385 RCPTRTYPE(PPGMRAMRANGE) pRamRangesRC; 2382 /** The configured RAM size. */ 2386 /** The configured RAM size. 2387 * @remarks Do NOT use this, it's too small to hold the whole stuff. 2388 * @todo Remove with VBOX_WITH_NEW_PHYS_CODE! */ 2383 2389 RTUINT cbRamSize; 2384 2390 … … 2428 2434 /** Base address (GC) of fixed mapping */ 2429 2435 RTGCPTR GCPtrMappingFixed; 2430 #if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32 2431 uint32_t u32Padding0; /**< alignment padding. */ 2432 #endif 2433 2436 /** The address of the previous RAM range mapping. */ 2437 RTGCPTR GCPtrPrevRamRangeMapping; 2434 2438 2435 2439 /** @name Intermediate Context … … 2889 2893 #ifdef IN_RING3 2890 2894 int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk); 2895 void pgmR3PhysRelinkRamRanges(PVM pVM); 2891 2896 int pgmR3PhysRamReset(PVM pVM); 2892 2897 int pgmR3PhysRomReset(PVM pVM); … … 2929 2934 #endif 2930 2935 2931 void pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE);2932 2936 void pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE); 2937 void pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE, bool fDeactivateCR3); 2938 int pgmMapActivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3); 2933 2939 int pgmMapDeactivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3); 2934 int pgmMapActivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3);2935 2940 2936 2941 int pgmShwSyncPaePDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD); -
trunk/src/VBox/VMM/PGMMap.cpp
r17898 r18291 53 53 * @param GCPtr Virtual Address. (Page table aligned!) 54 54 * @param cb Size of the range. Must be a 4MB aligned! 55 * @param fFlags PGMR3MAPPT_FLAGS_UNMAPPABLE or 0. 55 56 * @param pfnRelocate Relocation callback function. 56 57 * @param pvUser User argument to the callback. 57 58 * @param pszDesc Pointer to description string. This must not be freed. 58 59 */ 59 VMMR3DECL(int) PGMR3MapPT(PVM pVM, RTGCPTR GCPtr, uint32_t cb, PFNPGMRELOCATE pfnRelocate, void *pvUser, const char *pszDesc)60 { 61 LogFlow(("PGMR3MapPT: GCPtr=%#x cb=%d pfnRelocate=%p pvUser=%p pszDesc=%s\n", GCPtr, cb, pfnRelocate, pvUser, pszDesc));60 VMMR3DECL(int) PGMR3MapPT(PVM pVM, RTGCPTR GCPtr, uint32_t cb, uint32_t fFlags, PFNPGMRELOCATE pfnRelocate, void *pvUser, const char *pszDesc) 61 { 62 LogFlow(("PGMR3MapPT: GCPtr=%#x cb=%d fFlags=%#x pfnRelocate=%p pvUser=%p pszDesc=%s\n", GCPtr, cb, fFlags, pfnRelocate, pvUser, pszDesc)); 62 63 AssertMsg(pVM->pgm.s.pInterPD, ("Paging isn't initialized, init order problems!\n")); 63 64 … … 65 66 * Validate input. 66 67 */ 68 Assert(!fFlags || fFlags == PGMR3MAPPT_FLAGS_UNMAPPABLE); 67 69 if (cb < _2M || cb > 64 * _1M) 68 70 { … … 130 132 */ 131 133 PPGMMAPPING pNew; 132 int rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMMAPPING, aPTs[cPTs]), 0, MM_TAG_PGM, (void **)&pNew); 134 int rc; 135 if (fFlags & PGMR3MAPPT_FLAGS_UNMAPPABLE) 136 rc = MMHyperAlloc( pVM, RT_OFFSETOF(PGMMAPPING, aPTs[cPTs]), 0, MM_TAG_PGM_MAPPINGS, (void **)&pNew); 137 else 138 rc = MMR3HyperAllocOnceNoRel(pVM, RT_OFFSETOF(PGMMAPPING, aPTs[cPTs]), 0, MM_TAG_PGM_MAPPINGS, (void **)&pNew); 133 139 if (RT_FAILURE(rc)) 134 140 return rc; … … 146 152 */ 147 153 uint8_t *pbPTs; 148 rc = MMHyperAlloc(pVM, PAGE_SIZE * 3 * cPTs, PAGE_SIZE, MM_TAG_PGM, (void **)&pbPTs); 154 if (fFlags & PGMR3MAPPT_FLAGS_UNMAPPABLE) 155 rc = MMHyperAlloc( pVM, PAGE_SIZE * 3 * cPTs, PAGE_SIZE, MM_TAG_PGM_MAPPINGS, (void **)&pbPTs); 156 else 157 rc = MMR3HyperAllocOnceNoRel(pVM, PAGE_SIZE * 3 * cPTs, PAGE_SIZE, MM_TAG_PGM_MAPPINGS, (void **)&pbPTs); 149 158 if (RT_FAILURE(rc)) 150 159 { … … 216 225 * @param pVM VM Handle. 217 226 * @param GCPtr Virtual Address. (Page table aligned!) 227 * 228 * @remarks Don't call this without passing PGMR3MAPPT_FLAGS_UNMAPPABLE to 229 * PGMR3MapPT or you'll burn in the heap. 218 230 */ 219 231 VMMR3DECL(int) PGMR3UnmapPT(PVM pVM, RTGCPTR GCPtr) … … 648 660 649 661 /* 650 * Mark the mappings as fixed (using fake values) and disabled. 662 * Mark the mappings as fixed (using fake values) and disabled. 651 663 */ 652 664 pVM->pgm.s.fDisableMappings = true; … … 900 912 unsigned i = pMap->cPTs; 901 913 902 pgmMapClearShadowPDEs(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3), pMap, iOldPDE );914 pgmMapClearShadowPDEs(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3), pMap, iOldPDE, false /*fDeactivateCR3*/); 903 915 904 916 iOldPDE += i; … … 1070 1082 } 1071 1083 1084 1072 1085 /** 1073 1086 * Checks if a new mapping address wasn't previously used and caused a clash with guest mappings. … … 1079 1092 bool pgmR3MapIsKnownConflictAddress(PPGMMAPPING pMapping, RTGCPTR GCPtr) 1080 1093 { 1081 for (unsigned i =0; i<RT_ELEMENTS(pMapping->GCPtrConflict); i++)1082 { 1083 if (GCPtr == pMapping-> GCPtrConflict[i])1094 for (unsigned i = 0; i < RT_ELEMENTS(pMapping->aGCPtrConflicts); i++) 1095 { 1096 if (GCPtr == pMapping->aGCPtrConflicts[i]) 1084 1097 return true; 1085 1098 } 1086 1099 return false; 1087 1100 } 1101 1088 1102 1089 1103 /** … … 1102 1116 STAM_PROFILE_START(&pVM->pgm.s.StatR3ResolveConflict, a); 1103 1117 1104 pMapping-> GCPtrConflict[pMapping->cConflicts & (PGMMAPPING_CONFLICT_MAX-1)] = GCPtrOldMapping;1118 pMapping->aGCPtrConflicts[pMapping->cConflicts & (PGMMAPPING_CONFLICT_MAX-1)] = GCPtrOldMapping; 1105 1119 pMapping->cConflicts++; 1106 1120 … … 1175 1189 STAM_PROFILE_START(&pVM->pgm.s.StatR3ResolveConflict, a); 1176 1190 1177 pMapping-> GCPtrConflict[pMapping->cConflicts & (PGMMAPPING_CONFLICT_MAX-1)] = GCPtrOldMapping;1191 pMapping->aGCPtrConflicts[pMapping->cConflicts & (PGMMAPPING_CONFLICT_MAX-1)] = GCPtrOldMapping; 1178 1192 pMapping->cConflicts++; 1179 1193 … … 1244 1258 } 1245 1259 1260 1246 1261 /** 1247 1262 * Read memory from the guest mappings. -
trunk/src/VBox/VMM/PGMPhys.cpp
r18266 r18291 572 572 573 573 /** 574 * Relinks the RAM ranges using the pSelfRC and pSelfR0 pointers. 575 * 576 * Called when anything was relocated. 577 * 578 * @param pVM Pointer to the shared VM structure. 579 */ 580 void pgmR3PhysRelinkRamRanges(PVM pVM) 581 { 582 PPGMRAMRANGE pCur; 583 584 #ifdef VBOX_STRICT 585 for (pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3) 586 { 587 Assert((pCur->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pCur->pSelfR0 == MMHyperCCToR0(pVM, pCur)); 588 Assert((pCur->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pCur->pSelfRC == MMHyperCCToRC(pVM, pCur)); 589 Assert((pCur->GCPhys & PAGE_OFFSET_MASK) == 0); 590 Assert((pCur->GCPhysLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK); 591 Assert((pCur->cb & PAGE_OFFSET_MASK) == 0); 592 Assert(pCur->cb == pCur->GCPhysLast - pCur->GCPhys + 1); 593 for (PPGMRAMRANGE pCur2 = pVM->pgm.s.pRamRangesR3; pCur2; pCur2 = pCur2->pNextR3) 594 Assert( pCur2 == pCur 595 || strcmp(pCur2->pszDesc, pCur->pszDesc)); /** @todo fix MMIO ranges!! */ 596 } 597 #endif 598 599 pCur = pVM->pgm.s.pRamRangesR3; 600 if (pCur) 601 { 602 pVM->pgm.s.pRamRangesR0 = pCur->pSelfR0; 603 pVM->pgm.s.pRamRangesRC = pCur->pSelfRC; 604 605 for (; pCur->pNextR3; pCur = pCur->pNextR3) 606 { 607 pCur->pNextR0 = pCur->pNextR3->pSelfR0; 608 pCur->pNextRC = pCur->pNextR3->pSelfRC; 609 } 610 611 Assert(pCur->pNextR0 == NIL_RTR0PTR); 612 Assert(pCur->pNextRC == NIL_RTRCPTR); 613 } 614 else 615 { 616 Assert(pVM->pgm.s.pRamRangesR0 == NIL_RTR0PTR); 617 Assert(pVM->pgm.s.pRamRangesRC == NIL_RTRCPTR); 618 } 619 } 620 621 622 /** 574 623 * Links a new RAM range into the list. 575 624 * … … 581 630 { 582 631 AssertMsg(pNew->pszDesc, ("%RGp-%RGp\n", pNew->GCPhys, pNew->GCPhysLast)); 632 Assert((pNew->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pNew->pSelfR0 == MMHyperCCToR0(pVM, pNew)); 633 Assert((pNew->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pNew->pSelfRC == MMHyperCCToRC(pVM, pNew)); 583 634 584 635 pgmLock(pVM); … … 586 637 PPGMRAMRANGE pRam = pPrev ? pPrev->pNextR3 : pVM->pgm.s.pRamRangesR3; 587 638 pNew->pNextR3 = pRam; 588 pNew->pNextR0 = pRam ? MMHyperCCToR0(pVM, pRam): NIL_RTR0PTR;589 pNew->pNextRC = pRam ? MMHyperCCToRC(pVM, pRam): NIL_RTRCPTR;639 pNew->pNextR0 = pRam ? pRam->pSelfR0 : NIL_RTR0PTR; 640 pNew->pNextRC = pRam ? pRam->pSelfRC : NIL_RTRCPTR; 590 641 591 642 if (pPrev) 592 643 { 593 644 pPrev->pNextR3 = pNew; 594 pPrev->pNextR0 = MMHyperCCToR0(pVM, pNew);595 pPrev->pNextRC = MMHyperCCToRC(pVM, pNew);645 pPrev->pNextR0 = pNew->pSelfR0; 646 pPrev->pNextRC = pNew->pSelfRC; 596 647 } 597 648 else 598 649 { 599 650 pVM->pgm.s.pRamRangesR3 = pNew; 600 pVM->pgm.s.pRamRangesR0 = MMHyperCCToR0(pVM, pNew);601 pVM->pgm.s.pRamRangesRC = MMHyperCCToRC(pVM, pNew);651 pVM->pgm.s.pRamRangesR0 = pNew->pSelfR0; 652 pVM->pgm.s.pRamRangesRC = pNew->pSelfRC; 602 653 } 603 654 … … 616 667 { 617 668 Assert(pPrev ? pPrev->pNextR3 == pRam : pVM->pgm.s.pRamRangesR3 == pRam); 669 Assert((pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pRam->pSelfR0 == MMHyperCCToR0(pVM, pRam)); 670 Assert((pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pRam->pSelfRC == MMHyperCCToRC(pVM, pRam)); 618 671 619 672 pgmLock(pVM); … … 623 676 { 624 677 pPrev->pNextR3 = pNext; 625 pPrev->pNextR0 = pNext ? MMHyperCCToR0(pVM, pNext): NIL_RTR0PTR;626 pPrev->pNextRC = pNext ? MMHyperCCToRC(pVM, pNext): NIL_RTRCPTR;678 pPrev->pNextR0 = pNext ? pNext->pSelfR0 : NIL_RTR0PTR; 679 pPrev->pNextRC = pNext ? pNext->pSelfRC : NIL_RTRCPTR; 627 680 } 628 681 else … … 630 683 Assert(pVM->pgm.s.pRamRangesR3 == pRam); 631 684 pVM->pgm.s.pRamRangesR3 = pNext; 632 pVM->pgm.s.pRamRangesR0 = pNext ? MMHyperCCToR0(pVM, pNext): NIL_RTR0PTR;633 pVM->pgm.s.pRamRangesRC = pNext ? MMHyperCCToRC(pVM, pNext): NIL_RTRCPTR;685 pVM->pgm.s.pRamRangesR0 = pNext ? pNext->pSelfR0 : NIL_RTR0PTR; 686 pVM->pgm.s.pRamRangesRC = pNext ? pNext->pSelfRC : NIL_RTRCPTR; 634 687 } 635 688 … … 646 699 static void pgmR3PhysUnlinkRamRange(PVM pVM, PPGMRAMRANGE pRam) 647 700 { 701 pgmLock(pVM); 702 648 703 /* find prev. */ 649 704 PPGMRAMRANGE pPrev = NULL; … … 657 712 658 713 pgmR3PhysUnlinkRamRange2(pVM, pRam, pPrev); 714 715 pgmUnlock(pVM); 659 716 } 660 717 … … 702 759 } 703 760 #endif /* VBOX_WITH_NEW_PHYS_CODE */ 761 762 763 /** 764 * PGMR3PhysRegisterRam worker that initializes and links a RAM range. 765 * 766 * @param pVM The VM handle. 767 * @param pNew The new RAM range. 768 * @param GCPhys The address of the RAM range. 769 * @param GCPhysLast The last address of the RAM range. 770 * @param RCPtrNew The RC address if the range is floating. NIL_RTRCPTR 771 * if in HMA. 772 * @param R0PtrNew Ditto for R0. 773 * @param pszDesc The description. 774 * @param pPrev The previous RAM range (for linking). 775 */ 776 static void pgmR3PhysInitAndLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, 777 RTRCPTR RCPtrNew, RTR0PTR R0PtrNew, const char *pszDesc, PPGMRAMRANGE pPrev) 778 { 779 /* 780 * Initialize the range. 781 */ 782 pNew->pSelfR0 = R0PtrNew != NIL_RTR0PTR ? R0PtrNew : MMHyperCCToR0(pVM, pNew); 783 pNew->pSelfRC = RCPtrNew != NIL_RTRCPTR ? RCPtrNew : MMHyperCCToRC(pVM, pNew); 784 pNew->GCPhys = GCPhys; 785 pNew->GCPhysLast = GCPhysLast; 786 pNew->cb = GCPhysLast - GCPhys + 1; 787 pNew->pszDesc = pszDesc; 788 pNew->fFlags = RCPtrNew != NIL_RTR0PTR ? PGM_RAM_RANGE_FLAGS_FLOATING : 0; 789 pNew->pvR3 = NULL; 790 791 uint32_t const cPages = pNew->cb >> PAGE_SHIFT; 792 RTGCPHYS iPage = cPages; 793 while (iPage-- > 0) 794 PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_RAM); 795 796 /* Update the page count stats. */ 797 pVM->pgm.s.cZeroPages += cPages; 798 pVM->pgm.s.cAllPages += cPages; 799 800 /* 801 * Link it. 802 */ 803 pgmR3PhysLinkRamRange(pVM, pNew, pPrev); 804 } 805 806 807 /** 808 * Relocate a floating RAM range. 809 * 810 * @copydoc FNPGMRELOCATE. 811 */ 812 static DECLCALLBACK(bool) pgmR3PhysRamRangeRelocate(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew, PGMRELOCATECALL enmMode, void *pvUser) 813 { 814 PPGMRAMRANGE pRam = (PPGMRAMRANGE)pvUser; 815 Assert(pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING); 816 Assert(pRam->pSelfRC == GCPtrOld + PAGE_SIZE); 817 818 switch (enmMode) 819 { 820 case PGMRELOCATECALL_SUGGEST: 821 return true; 822 case PGMRELOCATECALL_RELOCATE: 823 { 824 /* Update myself and then relink all the ranges. */ 825 pgmLock(pVM); 826 pRam->pSelfRC = (RTRCPTR)(GCPtrNew + PAGE_SIZE); 827 pgmR3PhysRelinkRamRanges(pVM); 828 pgmUnlock(pVM); 829 return true; 830 } 831 832 default: 833 AssertFailedReturn(false); 834 } 835 } 836 837 838 /** 839 * PGMR3PhysRegisterRam worker that registers a high chunk. 840 * 841 * @returns VBox status code. 842 * @param pVM The VM handle. 843 * @param GCPhys The address of the RAM. 844 * @param cRamPages The number of RAM pages to register. 845 * @param cbChunk The size of the PGMRAMRANGE guest mapping. 846 * @param iChunk The chunk number. 847 * @param pszDesc The RAM range description. 848 * @param ppPrev Previous RAM range pointer. In/Out. 849 */ 850 static int pgmR3PhysRegisterHighRamChunk(PVM pVM, RTGCPHYS GCPhys, uint32_t cRamPages, 851 uint32_t cbChunk, uint32_t iChunk, const char *pszDesc, 852 PPGMRAMRANGE *ppPrev) 853 { 854 const char *pszDescChunk = iChunk == 0 855 ? pszDesc 856 : MMR3HeapAPrintf(pVM, MM_TAG_PGM_PHYS, "%s (#%u)", pszDesc, iChunk + 1); 857 AssertReturn(pszDescChunk, VERR_NO_MEMORY); 858 859 /* 860 * Allocate memory for the new chunk. 861 */ 862 size_t const cChunkPages = RT_ALIGN_Z(RT_UOFFSETOF(PGMRAMRANGE, aPages[cRamPages]), PAGE_SIZE) >> PAGE_SHIFT; 863 PSUPPAGE paChunkPages = (PSUPPAGE)RTMemTmpAllocZ(sizeof(SUPPAGE) * cChunkPages); 864 AssertReturn(paChunkPages, VERR_NO_TMP_MEMORY); 865 RTR0PTR R0PtrChunk = NIL_RTR0PTR; 866 void *pvChunk = NULL; 867 int rc = SUPR3PageAllocEx(cChunkPages, 0 /*fFlags*/, &pvChunk, 868 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE 869 VMMIsHwVirtExtForced(pVM) ? &pvR0 : NULL, 870 #else 871 NULL, 872 #endif 873 paChunkPages); 874 if (RT_SUCCESS(rc)) 875 { 876 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE 877 if (!VMMIsHwVirtExtForced(pVM)) 878 R0PtrChunk = NIL_RTR0PTR; 879 #else 880 R0PtrChunk = (uintptr_t)pvChunk; 881 #endif 882 memset(pvChunk, 0, cChunkPages << PAGE_SHIFT); 883 884 PPGMRAMRANGE pNew = (PPGMRAMRANGE)pvChunk; 885 886 /* 887 * Create a mapping and map the pages into it. 888 * We push these in below the HMA. 889 */ 890 RTGCPTR GCPtrChunkMap = pVM->pgm.s.GCPtrPrevRamRangeMapping - cbChunk; 891 rc = PGMR3MapPT(pVM, GCPtrChunkMap, cbChunk, 0 /*fFlags*/, pgmR3PhysRamRangeRelocate, pNew, pszDescChunk); 892 if (RT_SUCCESS(rc)) 893 { 894 pVM->pgm.s.GCPtrPrevRamRangeMapping = GCPtrChunkMap; 895 896 RTGCPTR const GCPtrChunk = GCPtrChunkMap + PAGE_SIZE; 897 RTGCPTR GCPtrPage = GCPtrChunk; 898 for (uint32_t iPage = 0; iPage < cChunkPages && RT_SUCCESS(rc); iPage++, GCPtrPage += PAGE_SIZE) 899 rc = PGMMap(pVM, GCPtrPage, paChunkPages[iPage].Phys, PAGE_SIZE, 0); 900 if (RT_SUCCESS(rc)) 901 { 902 /* 903 * Ok, init and link the range. 904 */ 905 pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhys + ((RTGCPHYS)cRamPages << PAGE_SHIFT) - 1, 906 (RTRCPTR)GCPtrChunk, R0PtrChunk, pszDescChunk, *ppPrev); 907 *ppPrev = pNew; 908 } 909 } 910 911 if (RT_FAILURE(rc)) 912 SUPR3PageFreeEx(pvChunk, cChunkPages); 913 } 914 915 RTMemTmpFree(paChunkPages); 916 return rc; 917 } 704 918 705 919 … … 759 973 return rc; 760 974 761 /* 762 * Allocate RAM range. 763 */ 764 const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]); 765 PPGMRAMRANGE pNew; 766 rc = MMR3HyperAllocOnceNoRel(pVM, cbRamRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew); 767 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zu\n", cbRamRange), rc); 768 769 /* 770 * Initialize the range. 771 */ 772 pNew->GCPhys = GCPhys; 773 pNew->GCPhysLast = GCPhysLast; 774 pNew->pszDesc = pszDesc; 775 pNew->cb = cb; 776 pNew->fFlags = 0; 777 778 pNew->pvR3 = NULL; 975 #ifdef VBOX_WITH_NEW_PHYS_CODE 976 if ( GCPhys >= _4G 977 && cPages > 256) 978 { 979 /* 980 * The PGMRAMRANGE structures for the high memory can get very big. 981 * In order to avoid SUPR3PageAllocEx allocation failures due to the 982 * allocation size limit there and also to avoid being unable to find 983 * guest mapping space for them, we split this memory up into 4MB in 984 * (potential) raw-mode configs and 16MB chunks in forced AMD-V/VT-x 985 * mode. 986 * 987 * The first and last page of each mapping are guard pages and marked 988 * not-present. So, we've got 4186112 and 16769024 bytes available for 989 * the PGMRAMRANGE structure. 990 * 991 * Note! The sizes used here will influence the saved state. 992 */ 993 uint32_t cbChunk; 994 uint32_t cPagesPerChunk; 995 if (VMMIsHwVirtExtForced(pVM)) 996 { 997 cbChunk = 16U*_1M; 998 cPagesPerChunk = 1048048; /* max ~1048059 */ 999 AssertCompile(sizeof(PGMRAMRANGE) + sizeof(PGMPAGE) * 1048048 < 16U*_1M - PAGE_SIZE * 2); 1000 } 1001 else 1002 { 1003 cbChunk = 4U*_1M; 1004 cPagesPerChunk = 261616; /* max ~261627 */ 1005 AssertCompile(sizeof(PGMRAMRANGE) + sizeof(PGMPAGE) * 261616 < 4U*_1M - PAGE_SIZE * 2); 1006 } 1007 AssertRelease(RT_UOFFSETOF(PGMRAMRANGE, aPages[cPagesPerChunk]) + PAGE_SIZE * 2 <= cbChunk); 1008 1009 RTGCPHYS cPagesLeft = cPages; 1010 RTGCPHYS GCPhysChunk = GCPhys; 1011 uint32_t iChunk = 0; 1012 while (cPagesLeft > 0) 1013 { 1014 uint32_t cPagesInChunk = cPagesLeft; 1015 if (cPagesInChunk > cPagesPerChunk) 1016 cPagesInChunk = cPagesPerChunk; 1017 1018 rc = pgmR3PhysRegisterHighRamChunk(pVM, GCPhysChunk, cPagesInChunk, cbChunk, iChunk, pszDesc, &pPrev); 1019 AssertRCReturn(rc, rc); 1020 1021 /* advance */ 1022 GCPhysChunk += (RTGCPHYS)cPagesInChunk << PAGE_SHIFT; 1023 cPagesLeft -= cPagesInChunk; 1024 iChunk++; 1025 } 1026 } 1027 else 1028 #endif 1029 { 1030 /* 1031 * Allocate, initialize and link the new RAM range. 1032 */ 1033 const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]); 1034 PPGMRAMRANGE pNew; 1035 rc = MMR3HyperAllocOnceNoRel(pVM, cbRamRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew); 1036 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zu\n", cbRamRange), rc); 1037 779 1038 #ifndef VBOX_WITH_NEW_PHYS_CODE 780 pNew->paChunkR3Ptrs = NULL; 781 782 /* Allocate memory for chunk to HC ptr lookup array. */ 783 rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->paChunkR3Ptrs); 784 AssertRCReturn(rc, rc); 785 pNew->fFlags |= MM_RAM_FLAGS_DYNAMIC_ALLOC; 786 1039 /* Allocate memory for chunk to HC ptr lookup array. */ 1040 pNew->paChunkR3Ptrs = NULL; 1041 rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->paChunkR3Ptrs); 1042 AssertRCReturn(rc, rc); 1043 pNew->fFlags |= MM_RAM_FLAGS_DYNAMIC_ALLOC; 787 1044 #endif 788 RTGCPHYS iPage = cPages; 789 while (iPage-- > 0) 790 PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_RAM); 791 792 /* Update the page count stats. */ 793 pVM->pgm.s.cZeroPages += cPages; 794 pVM->pgm.s.cAllPages += cPages; 795 796 /* 797 * Insert the new RAM range. 798 */ 799 pgmR3PhysLinkRamRange(pVM, pNew, pPrev); 1045 1046 pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhysLast, NIL_RTRCPTR, NIL_RTR0PTR, pszDesc, pPrev); 1047 } 800 1048 801 1049 /* … … 837 1085 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3; pRam; pRam = pRam->pNextR3) 838 1086 { 839 uint32_t iPage = pRam->cb >> PAGE_SHIFT; Assert((RTGCPHYS)iPage << PAGE_SHIFT == pRam->cb); 1087 uint32_t iPage = pRam->cb >> PAGE_SHIFT; 1088 AssertMsg(((RTGCPHYS)iPage << PAGE_SHIFT) == pRam->cb, ("%RGp %RGp\n", (RTGCPHYS)iPage << PAGE_SHIFT, pRam->cb)); 1089 840 1090 #ifdef VBOX_WITH_NEW_PHYS_CODE 841 1091 if (!pVM->pgm.s.fRamPreAlloc) … … 1067 1317 1068 1318 /* Initialize the range. */ 1319 pNew->pSelfR0 = MMHyperCCToR0(pVM, pNew); 1320 pNew->pSelfRC = MMHyperCCToRC(pVM, pNew); 1069 1321 pNew->GCPhys = GCPhys; 1070 1322 pNew->GCPhysLast = GCPhysLast; 1323 pNew->cb = cb; 1071 1324 pNew->pszDesc = pszDesc; 1072 pNew->cb = cb; 1073 pNew->fFlags = 0; /* Some MMIO flag here? */ 1325 pNew->fFlags = 0; /** @todo add some kind of ad-hoc flag? */ 1074 1326 1075 1327 pNew->pvR3 = NULL; … … 1290 1542 1291 1543 const uint32_t cPages = cb >> PAGE_SHIFT; 1292 AssertLogRelReturn(( RTGCPHYS)cPages << PAGE_SHIFT== cb, VERR_INVALID_PARAMETER);1544 AssertLogRelReturn(((RTGCPHYS)cPages << PAGE_SHIFT) == cb, VERR_INVALID_PARAMETER); 1293 1545 AssertLogRelReturn(cPages <= INT32_MAX / 2, VERR_NO_MEMORY); 1294 1546 … … 1318 1570 if (RT_SUCCESS(rc)) 1319 1571 { 1320 pNew->pDevInsR3 = pDevIns; 1321 pNew->pvR3 = pvPages; 1322 //pNew->pNext = NULL; 1323 //pNew->fMapped = false; 1324 //pNew->fOverlapping = false; 1325 pNew->iRegion = iRegion; 1326 pNew->RamRange.GCPhys = NIL_RTGCPHYS; 1327 pNew->RamRange.GCPhysLast = NIL_RTGCPHYS; 1328 pNew->RamRange.pszDesc = pszDesc; 1329 pNew->RamRange.cb = cb; 1330 //pNew->RamRange.fFlags = 0; 1331 1332 pNew->RamRange.pvR3 = pvPages; ///@todo remove this [new phys code] 1572 pNew->pDevInsR3 = pDevIns; 1573 pNew->pvR3 = pvPages; 1574 //pNew->pNext = NULL; 1575 //pNew->fMapped = false; 1576 //pNew->fOverlapping = false; 1577 pNew->iRegion = iRegion; 1578 pNew->RamRange.pSelfR0 = MMHyperCCToR0(pVM, &pNew->RamRange); 1579 pNew->RamRange.pSelfRC = MMHyperCCToRC(pVM, &pNew->RamRange); 1580 pNew->RamRange.GCPhys = NIL_RTGCPHYS; 1581 pNew->RamRange.GCPhysLast = NIL_RTGCPHYS; 1582 pNew->RamRange.pszDesc = pszDesc; 1583 pNew->RamRange.cb = cb; 1584 //pNew->RamRange.fFlags = 0; /// @todo MMIO2 flag? 1585 1586 pNew->RamRange.pvR3 = pvPages; 1333 1587 #ifndef VBOX_WITH_NEW_PHYS_CODE 1334 pNew->RamRange.paChunkR3Ptrs = NULL; ///@todo remove this [new phys code]1588 pNew->RamRange.paChunkR3Ptrs = NULL; 1335 1589 #endif 1336 1590 … … 1880 2134 { 1881 2135 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM, 1882 ("%RGp isn't a RAM page (%d)- registering %RGp-%RGp (%s).\n",1883 GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pszDesc),1884 VERR_PGM_RAM_CONFLICT);2136 ("%RGp (%R[pgmpage]) isn't a RAM page - registering %RGp-%RGp (%s).\n", 2137 pRam->GCPhys + (RTGCPTR)(uintptr_t)(pPage - &pRam->aPages[0]) << PAGE_SHIFT, 2138 pPage, GCPhys, GCPhysLast, pszDesc), VERR_PGM_RAM_CONFLICT); 1885 2139 Assert(PGM_PAGE_IS_ZERO(pPage)); 1886 2140 pPage++; … … 1944 2198 if (!fRamExists) 1945 2199 { 2200 pRamNew->pSelfR0 = MMHyperCCToR0(pVM, pRamNew); 2201 pRamNew->pSelfRC = MMHyperCCToRC(pVM, pRamNew); 1946 2202 pRamNew->GCPhys = GCPhys; 1947 2203 pRamNew->GCPhysLast = GCPhysLast; 2204 pRamNew->cb = cb; 1948 2205 pRamNew->pszDesc = pszDesc; 1949 pRamNew->cb = cb;1950 2206 pRamNew->fFlags = 0; 1951 2207 pRamNew->pvR3 = NULL; -
trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp
r17667 r18291 268 268 case PGMMODE_PAE_NX: 269 269 { 270 PX86PDPT pShwPdpt; 271 PX86PDPAE pShwPaePd; 272 const unsigned iPdPt = iNewPDE / 256; 273 unsigned iPDE = iNewPDE * 2 % 512; 274 275 pShwPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s); 270 const unsigned iPdPt = iNewPDE / 256; 271 unsigned iPDE = iNewPDE * 2 % 512; 272 PX86PDPT pShwPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s); 276 273 Assert(pShwPdpt); 277 274 #ifdef IN_RC /* Lock mapping to prevent it from being reused during pgmShwSyncPaePDPtr. */ 278 275 PGMDynLockHCPage(pVM, (uint8_t *)pShwPdpt); 279 276 #endif 280 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPdPt << X86_PDPT_SHIFT));277 PX86PDPAE pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPdPt << X86_PDPT_SHIFT)); 281 278 if (!pShwPaePd) 282 279 { … … 312 309 pgmPoolLockPage(pVM->pgm.s.CTX_SUFF(pPool), pPoolPagePd); 313 310 } 314 # 315 else 311 #ifdef VBOX_STRICT 312 else 316 313 if (pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING) 317 314 { … … 321 318 AssertFatalMsg((pShwPaePd->a[iPDE+1].u & X86_PDE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT1, ("%RX64 vs %RX64\n", pShwPaePd->a[iPDE+1].u & X86_PDE_PG_MASK, pMap->aPTs[i].HCPhysPaePT1)); 322 319 } 323 # 320 #endif 324 321 if ( pShwPaePd->a[iPDE].n.u1Present 325 322 && !(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING)) … … 364 361 } 365 362 363 366 364 /** 367 365 * Clears all PDEs involved with the mapping in the shadow page table. 368 366 * 369 * @param pVM The VM handle. 370 * @param pShwPageCR3 CR3 root page 371 * @param pMap Pointer to the mapping in question. 372 * @param iOldPDE The index of the 32-bit PDE corresponding to the base of the mapping. 373 */ 374 void pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE) 375 { 376 Log(("pgmMapClearShadowPDEs old pde %x (cPTs=%x) (mappings enabled %d)\n", iOldPDE, pMap->cPTs, pgmMapAreMappingsEnabled(&pVM->pgm.s))); 367 * @param pVM The VM handle. 368 * @param pShwPageCR3 CR3 root page 369 * @param pMap Pointer to the mapping in question. 370 * @param iOldPDE The index of the 32-bit PDE corresponding to the base of the mapping. 371 * @param fDeactivateCR3 Set if it's pgmMapDeactivateCR3 calling. 372 */ 373 void pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE, bool fDeactivateCR3) 374 { 375 Log(("pgmMapClearShadowPDEs: old pde %x (cPTs=%x) (mappings enabled %d) fDeactivateCR3=%RTbool\n", iOldPDE, pMap->cPTs, pgmMapAreMappingsEnabled(&pVM->pgm.s), fDeactivateCR3)); 377 376 378 377 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s)) … … 415 414 case PGMMODE_PAE_NX: 416 415 { 417 PX86PDPT pShwPdpt = NULL; 418 PX86PDPAE pShwPaePd = NULL; 419 420 const unsigned iPdpt = iOldPDE / 256; /* iOldPDE * 2 / 512; iOldPDE is in 4 MB pages */ 421 unsigned iPDE = iOldPDE * 2 % 512; 422 pShwPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3); 423 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, pShwPdpt, (iPdpt << X86_PDPT_SHIFT)); 416 const unsigned iPdpt = iOldPDE / 256; /* iOldPDE * 2 / 512; iOldPDE is in 4 MB pages */ 417 unsigned iPDE = iOldPDE * 2 % 512; 418 PX86PDPT pShwPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3); 419 PX86PDPAE pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, pShwPdpt, (iPdpt << X86_PDPT_SHIFT)); 424 420 425 421 /* Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entry. (legacy PAE guest mode) */ 426 pShwPdpt->a[iPdpt].u &= ~PGM_PLXFLAGS_MAPPING; 427 422 if (fDeactivateCR3) 423 pShwPdpt->a[iPdpt].u &= ~PGM_PLXFLAGS_MAPPING; 424 else if (pShwPdpt->a[iPdpt].u & PGM_PLXFLAGS_MAPPING) 425 { 426 /* See if there are any other mappings here. This is suboptimal code. */ 427 pShwPdpt->a[iPdpt].u &= ~PGM_PLXFLAGS_MAPPING; 428 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext)) 429 if ( pCur != pMap 430 && ( (pCur->GCPtr >> X86_PDPT_SHIFT) == iPdpt 431 || (pCur->GCPtrLast >> X86_PDPT_SHIFT) == iPdpt)) 432 { 433 pShwPdpt->a[iPdpt].u |= PGM_PLXFLAGS_MAPPING; 434 break; 435 } 436 } 428 437 if (pCurrentShwPdpt) 429 438 { … … 474 483 * @param iPDE The index of the 32-bit PDE corresponding to the base of the mapping. 475 484 */ 476 void pgmMapCheckShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iPDE)485 static void pgmMapCheckShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iPDE) 477 486 { 478 487 Assert(pShwPageCR3); 479 488 480 u nsignedi = pMap->cPTs;489 uint32_t i = pMap->cPTs; 481 490 PGMMODE enmShadowMode = PGMGetShadowMode(pVM); 482 491 … … 486 495 iPDE--; 487 496 488 switch (enmShadowMode)497 switch (enmShadowMode) 489 498 { 490 499 case PGMMODE_32_BIT: … … 494 503 495 504 AssertMsg(pShw32BitPd->a[iPDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT), 496 ("Expected %x vs %x\n", pShw32BitPd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT))); 505 ("Expected %x vs %x; iPDE=%#x %RGv %s\n", 506 pShw32BitPd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT), 507 iPDE, pMap->GCPtr, R3STRING(pMap->pszDesc) )); 497 508 break; 498 509 } … … 501 512 case PGMMODE_PAE_NX: 502 513 { 503 PX86PDPT pPdpt = NULL; 504 PX86PDPAE pShwPaePd = NULL; 505 506 const unsigned iPD = iPDE / 256; /* iPDE * 2 / 512; iPDE is in 4 MB pages */ 507 unsigned iPaePDE = iPDE * 2 % 512; 508 pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3); 509 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, pPdpt, (iPD << X86_PDPT_SHIFT)); 514 const unsigned iPD = iPDE / 256; /* iPDE * 2 / 512; iPDE is in 4 MB pages */ 515 unsigned iPaePDE = iPDE * 2 % 512; 516 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3); 517 PX86PDPAE pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, pPdpt, (iPD << X86_PDPT_SHIFT)); 510 518 AssertFatal(pShwPaePd); 511 519 512 520 AssertMsg(pShwPaePd->a[iPaePDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0), 513 ("Expected %RX64 vs %RX64\n", pShwPaePd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPaePT0))); 521 ("Expected %RX64 vs %RX64; iPDE=%#x iPD=%#x iPaePDE=%#x %RGv %s\n", 522 pShwPaePd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0), 523 iPDE, iPD, iPaePDE, pMap->GCPtr, R3STRING(pMap->pszDesc) )); 514 524 515 525 iPaePDE++; … … 517 527 518 528 AssertMsg(pShwPaePd->a[iPaePDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1), 519 ("Expected %RX64 vs %RX64\n", pShwPaePd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPaePT1))); 520 521 Assert(pPdpt->a[iPD].u & PGM_PLXFLAGS_MAPPING); 529 ("Expected %RX64 vs %RX64; iPDE=%#x iPD=%#x iPaePDE=%#x %RGv %s\n", 530 pShwPaePd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1), 531 iPDE, iPD, iPaePDE, pMap->GCPtr, R3STRING(pMap->pszDesc) )); 532 533 AssertMsg(pPdpt->a[iPD].u & PGM_PLXFLAGS_MAPPING, 534 ("%RX64; iPD=%#x iPDE=%#x iPaePDE=%#x %RGv %s\n", 535 pPdpt->a[iPD].u, 536 iPDE, iPD, iPaePDE, pMap->GCPtr, R3STRING(pMap->pszDesc) )); 522 537 break; 523 538 } … … 530 545 } 531 546 547 532 548 /** 533 549 * Check the hypervisor mappings in the active CR3. … … 558 574 559 575 #ifndef IN_RING0 576 560 577 /** 561 578 * Apply the hypervisor mappings to the active CR3. … … 573 590 return VINF_SUCCESS; 574 591 575 /* @noteA log flush (in RC) can cause problems when called from MapCR3 (inconsistent state will trigger assertions). */576 Log4((" PGMMapActivateAll fixed mappings=%d\n", pVM->pgm.s.fMappingsFixed));592 /* Note. A log flush (in RC) can cause problems when called from MapCR3 (inconsistent state will trigger assertions). */ 593 Log4(("pgmMapActivateCR3: fixed mappings=%d idxShwPageCR3=%#x\n", pVM->pgm.s.fMappingsFixed, pShwPageCR3 ? pShwPageCR3->idx : NIL_PGMPOOL_IDX)); 577 594 578 595 Assert(pShwPageCR3 && pShwPageCR3 == pVM->pgm.s.CTX_SUFF(pShwPageCR3)); … … 584 601 { 585 602 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT; 586 587 603 pgmMapSetShadowPDEs(pVM, pCur, iPDE); 588 604 } … … 607 623 608 624 Assert(pShwPageCR3); 625 Log4(("pgmMapDeactivateCR3: fixed mappings=%d idxShwPageCR3=%#x\n", pVM->pgm.s.fMappingsFixed, pShwPageCR3 ? pShwPageCR3->idx : NIL_PGMPOOL_IDX)); 609 626 610 627 /* … … 614 631 { 615 632 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT; 616 617 pgmMapClearShadowPDEs(pVM, pShwPageCR3, pCur, iPDE); 633 pgmMapClearShadowPDEs(pVM, pShwPageCR3, pCur, iPDE, true /*fDeactivateCR3*/); 618 634 } 619 635 return VINF_SUCCESS; 620 636 } 637 621 638 622 639 /** … … 711 728 } 712 729 730 713 731 /** 714 732 * Checks and resolves (ring 3 only) guest conflicts with VMM GC mappings. … … 728 746 Assert(enmGuestMode <= PGMMODE_PAE_NX); 729 747 730 /*731 * Iterate mappings.732 */733 748 if (enmGuestMode == PGMMODE_32_BIT) 734 749 { … … 739 754 Assert(pPD); 740 755 741 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext)) 756 /* 757 * Iterate mappings. 758 */ 759 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; ) 742 760 { 743 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT; 744 unsigned iPT = pCur->cPTs; 761 PPGMMAPPING pNext = pCur->CTX_SUFF(pNext); 762 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT; 763 unsigned iPT = pCur->cPTs; 745 764 while (iPT-- > 0) 746 765 { 747 766 if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */ 748 && (pVM->fRawR0Enabled || pPD->a[iPDE + iPT].n.u1User)) 767 && ( pVM->fRawR0Enabled 768 || pPD->a[iPDE + iPT].n.u1User)) 749 769 { 750 770 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts); … … 753 773 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n" 754 774 " iPDE=%#x iPT=%#x PDE=%RGp.\n", 755 (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc,756 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));775 (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc, 776 iPDE, iPT, pPD->a[iPDE + iPT].au32[0])); 757 777 int rc = pgmR3SyncPTResolveConflict(pVM, pCur, pPD, iPDE << X86_PD_SHIFT); 758 778 AssertRCReturn(rc, rc); 759 760 /*761 * Update pCur.762 */763 pCur = pVM->pgm.s.CTX_SUFF(pMappings);764 while (pCur && pCur->GCPtr < (iPDE << X86_PD_SHIFT))765 pCur = pCur->CTX_SUFF(pNext);766 779 break; 767 780 #else 768 781 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n" 769 782 " iPDE=%#x iPT=%#x PDE=%RGp.\n", 770 (iPT + iPDE) << X86_PD_SHIFT,771 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));783 (iPT + iPDE) << X86_PD_SHIFT, 784 iPDE, iPT, pPD->a[iPDE + iPT].au32[0])); 772 785 return VINF_PGM_SYNC_CR3; 773 786 #endif 774 787 } 775 788 } 776 if (!pCur) 777 break; 789 pCur = pNext; 778 790 } 779 791 } … … 781 793 || enmGuestMode == PGMMODE_PAE_NX) 782 794 { 783 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext)) 795 /* 796 * Iterate mappings. 797 */ 798 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur;) 784 799 { 785 RTGCPTR GCPtr = pCur->GCPtr;786 787 unsigned iPT= pCur->cb >> X86_PD_PAE_SHIFT;800 PPGMMAPPING pNext = pCur->CTX_SUFF(pNext); 801 RTGCPTR GCPtr = pCur->GCPtr; 802 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT; 788 803 while (iPT-- > 0) 789 804 { … … 797 812 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n" 798 813 " PDE=%016RX64.\n", 799 GCPtr, pCur->pszDesc, Pde.u));814 GCPtr, pCur->pszDesc, Pde.u)); 800 815 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pCur, pCur->GCPtr); 801 816 AssertRCReturn(rc, rc); 802 803 /*804 * Update pCur.805 */806 pCur = pVM->pgm.s.CTX_SUFF(pMappings);807 while (pCur && pCur->GCPtr < GCPtr)808 pCur = pCur->CTX_SUFF(pNext);809 817 break; 810 818 #else 811 819 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping (PAE)\n" 812 820 " PDE=%016RX64.\n", 813 GCPtr, Pde.u));821 GCPtr, Pde.u)); 814 822 return VINF_PGM_SYNC_CR3; 815 823 #endif … … 817 825 GCPtr += (1 << X86_PD_PAE_SHIFT); 818 826 } 819 if (!pCur) 820 break; 827 pCur = pNext; 821 828 } 822 829 } … … 824 831 AssertFailed(); 825 832 833 Assert(!PGMMapHasConflicts(pVM)); 826 834 return VINF_SUCCESS; 827 835 }
注意:
瀏覽 TracChangeset
來幫助您使用更動檢視器