儲存庫 vbox 的更動 13991
- 時間撮記:
- 2008-11-10 上午10:36:38 (16 年 以前)
- 位置:
- trunk
- 檔案:
-
- 修改 10 筆資料
圖例:
- 未更動
- 新增
- 刪除
-
trunk/include/VBox/pgm.h
r13970 r13991 327 327 VMMDECL(int) PGMShwSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags); 328 328 VMMDECL(int) PGMShwModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask); 329 VMMDECL(int) PGMShwSyncLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD);330 VMMDECL(int) PGMShwGetLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);331 VMMDECL(int) PGMShwGetEPTPDPtr(PVM pVM, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);332 VMMDECL(int) PGMShwSyncPAEPDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD);333 VMMDECL(int) PGMShwGetPAEPDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);334 329 VMMDECL(int) PGMGstGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys); 335 330 VMMDECL(bool) PGMGstIsPagePresent(PVM pVM, RTGCPTR GCPtr); -
trunk/src/VBox/VMM/PGM.cpp
r13937 r13991 1420 1420 pVM->pgm.s.apHCPaePDs[3] = (PX86PDPAE)MMR3PageAlloc(pVM); 1421 1421 AssertRelease((uintptr_t)pVM->pgm.s.apHCPaePDs[2] + PAGE_SIZE == (uintptr_t)pVM->pgm.s.apHCPaePDs[3]); 1422 pVM->pgm.s.pHCPaePDPT = (PX86PDPT)MMR3PageAllocLow(pVM); 1423 pVM->pgm.s.pHCNestedRoot = MMR3PageAllocLow(pVM); 1422 pVM->pgm.s.pHCPaePDPT = (PX86PDPT)MMR3PageAllocLow(pVM); 1423 //#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 1424 // pVM->pgm.s.pShwPaePdptR0 = (uintptr_t)pVM->pgm.s.pShwPaePdptR3; 1425 //#endif 1426 pVM->pgm.s.pShwNestedRootR3 = MMR3PageAllocLow(pVM); 1427 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 1428 pVM->pgm.s.pShwNestedRootR0 = (uintptr_t)pVM->pgm.s.pShwNestedRootR3; 1429 #endif 1424 1430 1425 1431 if ( !pVM->pgm.s.pHC32BitPD … … 1429 1435 || !pVM->pgm.s.apHCPaePDs[3] 1430 1436 || !pVM->pgm.s.pHCPaePDPT 1431 || !pVM->pgm.s.p HCNestedRoot)1437 || !pVM->pgm.s.pShwNestedRootR3) 1432 1438 { 1433 1439 AssertMsgFailed(("Failed to allocate pages for the intermediate context!\n")); … … 1443 1449 pVM->pgm.s.aHCPhysPaePDs[3] = MMPage2Phys(pVM, pVM->pgm.s.apHCPaePDs[3]); 1444 1450 pVM->pgm.s.HCPhysPaePDPT = MMPage2Phys(pVM, pVM->pgm.s.pHCPaePDPT); 1445 pVM->pgm.s.HCPhysNestedRoot = MMPage2Phys(pVM, pVM->pgm.s.p HCNestedRoot);1451 pVM->pgm.s.HCPhysNestedRoot = MMPage2Phys(pVM, pVM->pgm.s.pShwNestedRootR3); 1446 1452 1447 1453 /* … … 1450 1456 ASMMemZero32(pVM->pgm.s.pHC32BitPD, PAGE_SIZE); 1451 1457 ASMMemZero32(pVM->pgm.s.pHCPaePDPT, PAGE_SIZE); 1452 ASMMemZero32(pVM->pgm.s.p HCNestedRoot, PAGE_SIZE);1458 ASMMemZero32(pVM->pgm.s.pShwNestedRootR3, PAGE_SIZE); 1453 1459 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.apHCPaePDs); i++) 1454 1460 { -
trunk/src/VBox/VMM/PGMInternal.h
r13969 r13991 2087 2087 RTRCPTR alignment5; /**< structure size alignment. */ 2088 2088 #endif 2089 /** The Page Map Level 4 table - HCPtr. */2090 #if 0///@todo def VBOX_WITH_2X_4GB_ADDR_SPACE 2091 R3PTRTYPE(PX86PML4) pHCPaePML4; 2092 #else 2093 R 3R0PTRTYPE(PX86PML4) pHCPaePML4;2089 /** The Page Map Level 4 table - R3 Ptr. */ 2090 R3PTRTYPE(PX86PML4) pShwPaePml4R3; 2091 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 2092 /** The Page Map Level 4 table - R0 Ptr. */ 2093 R0PTRTYPE(PX86PML4) pShwPaePml4R0; 2094 2094 #endif 2095 2095 /** The Physical Address (HC) of the Page Map Level 4 table. */ … … 2101 2101 R3R0PTRTYPE(PPGMPOOLPAGE) pHCShwAmd64CR3; 2102 2102 #endif 2103 2104 2103 /** @}*/ 2105 2104 2106 2105 /** @name Nested Shadow Paging 2107 2106 * @{ */ 2108 /** Root table; format depends on the host paging mode (AMD-V) or EPT */2109 #if 0///@todo def VBOX_WITH_2X_4GB_ADDR_SPACE 2110 R3PTRTYPE(void *) pHCNestedRoot; 2111 #else 2112 R 3R0PTRTYPE(void *) pHCNestedRoot;2107 /** Root table; format depends on the host paging mode (AMD-V) or EPT - R3 pointer. */ 2108 RTR3PTR pShwNestedRootR3; 2109 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 2110 /** Root table; format depends on the host paging mode (AMD-V) or EPT - R0 pointer. */ 2111 RTR0PTR pShwNestedRootR0; 2113 2112 #endif 2114 2113 /** The Physical Address (HC) of the nested paging root. */ 2115 2114 RTHCPHYS HCPhysNestedRoot; 2115 /** @} */ 2116 2116 2117 2117 /** @name Function pointers for Shadow paging. … … 3582 3582 * @returns Pointer to the PML4 entry. 3583 3583 * @param pPGM Pointer to the PGM instance data. 3584 * @param iPml4 eThe index.3585 */ 3586 DECLINLINE(PX86PML4E) pgmGstGetLongModePML4EPtr(PPGM pPGM, unsigned int iPml4 e)3584 * @param iPml4 The index. 3585 */ 3586 DECLINLINE(PX86PML4E) pgmGstGetLongModePML4EPtr(PPGM pPGM, unsigned int iPml4) 3587 3587 { 3588 3588 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 … … 3590 3590 int rc = PGMDynMapGCPage(PGM2VM(pPGM), pPGM->GCPhysCR3, (void **)pGuestPml4); 3591 3591 AssertRCReturn(rc, NULL); 3592 return &pGuestPml4->a[iPml4 e];3592 return &pGuestPml4->a[iPml4]; 3593 3593 #else 3594 3594 Assert(pPGM->CTX_SUFF(pGstAmd64PML4)); 3595 return &pPGM->CTX_SUFF(pGstAmd64PML4)->a[iPml4 e];3595 return &pPGM->CTX_SUFF(pGstAmd64PML4)->a[iPml4]; 3596 3596 #endif 3597 3597 } … … 3603 3603 * @returns The PML4 entry. 3604 3604 * @param pPGM Pointer to the PGM instance data. 3605 * @param iPml4 eThe index.3606 */ 3607 DECLINLINE(X86PGPAEUINT) pgmGstGetLongModePML4E(PPGM pPGM, unsigned int iPml4 e)3605 * @param iPml4 The index. 3606 */ 3607 DECLINLINE(X86PGPAEUINT) pgmGstGetLongModePML4E(PPGM pPGM, unsigned int iPml4) 3608 3608 { 3609 3609 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 … … 3611 3611 int rc = PGMDynMapGCPage(PGM2VM(pPGM), pPGM->GCPhysCR3, (void **)pGuestPml4); 3612 3612 AssertRCReturn(rc, 0); 3613 return pGuestPml4->a[iPml4 e].u;3613 return pGuestPml4->a[iPml4].u; 3614 3614 #else 3615 3615 Assert(pPGM->CTX_SUFF(pGstAmd64PML4)); 3616 return pPGM->CTX_SUFF(pGstAmd64PML4)->a[iPml4 e].u;3616 return pPGM->CTX_SUFF(pGstAmd64PML4)->a[iPml4].u; 3617 3617 #endif 3618 3618 } … … 3631 3631 { 3632 3632 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM); 3633 const unsigned iPml4 e= (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;3634 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4 e];3633 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 3634 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4]; 3635 3635 if (pPml4e->n.u1Present) 3636 3636 { … … 3659 3659 { 3660 3660 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM); 3661 const unsigned iPml4 e= (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;3662 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4 e];3661 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 3662 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4]; 3663 3663 if (pPml4e->n.u1Present) 3664 3664 { … … 3694 3694 { 3695 3695 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM); 3696 const unsigned iPml4 e= (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;3697 if (pGuestPml4->a[iPml4 e].n.u1Present)3696 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 3697 if (pGuestPml4->a[iPml4].n.u1Present) 3698 3698 { 3699 3699 PCX86PDPT pPdptTemp; 3700 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pGuestPml4->a[iPml4 e].u & X86_PML4E_PG_MASK, &pPdptTemp);3700 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp); 3701 3701 AssertRCReturn(rc, 0); 3702 3702 … … 3727 3727 { 3728 3728 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM); 3729 const unsigned iPml4 e= (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;3730 if (pGuestPml4->a[iPml4 e].n.u1Present)3729 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 3730 if (pGuestPml4->a[iPml4].n.u1Present) 3731 3731 { 3732 3732 PCX86PDPT pPdptTemp; 3733 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pGuestPml4->a[iPml4 e].u & X86_PML4E_PG_MASK, &pPdptTemp);3733 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp); 3734 3734 AssertRCReturn(rc, NULL); 3735 3735 … … 3763 3763 { 3764 3764 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM); 3765 const unsigned iPml4 e= (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;3766 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4 e];3765 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 3766 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4]; 3767 3767 if (pPml4e->n.u1Present) 3768 3768 { … … 3786 3786 } 3787 3787 3788 #ifndef IN_RC 3789 3790 3791 /** 3792 * Gets the shadow page map level-4 pointer. 3793 * 3794 * @returns Pointer to the shadow PML4. 3795 * @param pPGM Pointer to the PGM instance data. 3796 */ 3797 DECLINLINE(PX86PML4) pgmShwGetLongModePML4Ptr(PPGM pPGM) 3798 { 3799 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_R0 3800 PX86PML4 pShwPml4; 3801 Assert(pPGM->HCPhysPaePML4 != 0 && pPGM->HCPhysPaePML4 != NIL_RTHCPHYS); 3802 int rc = PGM_HCPHYS_2_PTR(pVM, pPGM->HCPhysPaePML4, &pShwPml4); 3803 AssertRCReturn(rc, 0); 3804 return pShwPml4; 3805 #else 3806 Assert(pPGM->CTX_SUFF(pShwPaePml4)); 3807 return pPGM->CTX_SUFF(pShwPaePml4); 3808 #endif 3809 } 3810 3811 3812 /** 3813 * Gets the shadow page map level-4 entry for the specified address. 3814 * 3815 * @returns The entry. 3816 * @param pPGM Pointer to the PGM instance data. 3817 * @param GCPtr The address. 3818 */ 3819 DECLINLINE(X86PGPAEUINT) pgmShwGetLongModePML4E(PPGM pPGM, RTGCPTR GCPtr) 3820 { 3821 const unsigned iPml4 = ((RTGCUINTPTR64)GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 3822 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_R0 3823 PCX86PML4 pShwPml4; 3824 Assert(pPGM->HCPhysPaePML4 != 0 && pPGM->HCPhysPaePML4 != NIL_RTHCPHYS); 3825 int rc = PGM_HCPHYS_2_PTR(pVM, pPGM->HCPhysPaePML4, &pShwPml4); 3826 AssertRCReturn(rc, 0); 3827 return pShwPml4->a[iPml4].u; 3828 # else 3829 Assert(pPGM->CTX_SUFF(pShwPaePml4)); 3830 return pPGM->CTX_SUFF(pShwPaePml4)->a[iPml4].u; 3831 # endif 3832 } 3833 3834 3835 /** 3836 * Gets the pointer to the specified shadow page map level-4 entry. 3837 * 3838 * @returns The entry. 3839 * @param pPGM Pointer to the PGM instance data. 3840 * @param iPml4 The PML4 index. 3841 */ 3842 DECLINLINE(PX86PML4E) pgmShwGetLongModePML4EPtr(PPGM pPGM, unsigned int iPml4) 3843 { 3844 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_R0 3845 PX86PML4 pShwPml4; 3846 Assert(pPGM->HCPhysPaePML4 != 0 && pPGM->HCPhysPaePML4 != NIL_RTHCPHYS); 3847 int rc = PGM_HCPHYS_2_PTR(pVM, pPGM->HCPhysPaePML4, &pShwPml4); 3848 AssertRCReturn(rc, 0); 3849 return &pShwPml4->a[iPml4]; 3850 # else 3851 Assert(pPGM->CTX_SUFF(pShwPaePml4)); 3852 return &pPGM->CTX_SUFF(pShwPaePml4)->a[iPml4]; 3853 # endif 3854 } 3855 3856 #endif /* IN_RC */ 3788 3857 3789 3858 /** … … 3799 3868 { 3800 3869 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM); 3801 const unsigned iPml4 e= (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;3802 if (pGuestPml4->a[iPml4 e].n.u1Present)3870 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 3871 if (pGuestPml4->a[iPml4].n.u1Present) 3803 3872 { 3804 3873 PCX86PDPT pPdptTemp; 3805 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pGuestPml4->a[iPml4 e].u & X86_PML4E_PG_MASK, &pPdptTemp);3874 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp); 3806 3875 AssertRCReturn(rc, NULL); 3807 3876 -
trunk/src/VBox/VMM/PGMPool.cpp
r13824 r13991 286 286 pPool->aPages[PGMPOOL_IDX_AMD64_CR3].Core.Key = NIL_RTHCPHYS; 287 287 pPool->aPages[PGMPOOL_IDX_AMD64_CR3].GCPhys = NIL_RTGCPHYS; 288 pPool->aPages[PGMPOOL_IDX_AMD64_CR3].pvPageR3 = pVM->pgm.s.pHCPaePDPT; /* not used */288 pPool->aPages[PGMPOOL_IDX_AMD64_CR3].pvPageR3 = pVM->pgm.s.pHCPaePDPT; /* not used - isn't it wrong as well? */ 289 289 pPool->aPages[PGMPOOL_IDX_AMD64_CR3].enmKind = PGMPOOLKIND_64BIT_PML4_FOR_64BIT_PML4; 290 290 pPool->aPages[PGMPOOL_IDX_AMD64_CR3].idx = PGMPOOL_IDX_AMD64_CR3; … … 293 293 pPool->aPages[PGMPOOL_IDX_NESTED_ROOT].Core.Key = NIL_RTHCPHYS; 294 294 pPool->aPages[PGMPOOL_IDX_NESTED_ROOT].GCPhys = NIL_RTGCPHYS; 295 pPool->aPages[PGMPOOL_IDX_NESTED_ROOT].pvPageR3 = pVM->pgm.s.p HCNestedRoot;295 pPool->aPages[PGMPOOL_IDX_NESTED_ROOT].pvPageR3 = pVM->pgm.s.pShwNestedRootR3; 296 296 pPool->aPages[PGMPOOL_IDX_NESTED_ROOT].enmKind = PGMPOOLKIND_ROOT_NESTED; 297 297 pPool->aPages[PGMPOOL_IDX_NESTED_ROOT].idx = PGMPOOL_IDX_NESTED_ROOT; -
trunk/src/VBox/VMM/PGMShw.h
r13936 r13991 141 141 Assert(HWACCMIsNestedPagingActive(pVM)); 142 142 143 Log(("Enter nested shadow paging mode: root %RHv phys %RHp\n", pVM->pgm.s.p HCNestedRoot, pVM->pgm.s.HCPhysNestedRoot));143 Log(("Enter nested shadow paging mode: root %RHv phys %RHp\n", pVM->pgm.s.pShwNestedRootR3, pVM->pgm.s.HCPhysNestedRoot)); 144 144 /* In non-nested mode we allocate the PML4 page on-demand; in nested mode we just use our fixed nested paging root. */ 145 pVM->pgm.s.pHCPaePML4 = (PX86PML4)pVM->pgm.s.pHCNestedRoot; 145 pVM->pgm.s.pShwPaePml4R3 = (R3PTRTYPE(PX86PML4))pVM->pgm.s.pShwNestedRootR3; 146 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 147 pVM->pgm.s.pShwPaePml4R0 = (R0PTRTYPE(PX86PML4))pVM->pgm.s.pShwNestedRootR0; 148 # endif 146 149 pVM->pgm.s.HCPhysPaePML4 = pVM->pgm.s.HCPhysNestedRoot; 147 150 #endif … … 174 177 #if PGM_SHW_TYPE == PGM_TYPE_NESTED 175 178 Assert(HWACCMIsNestedPagingActive(pVM)); 176 pVM->pgm.s.pHCPaePML4 = 0; 179 pVM->pgm.s.pShwPaePml4R3 = 0; 180 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 181 pVM->pgm.s.pShwPaePml4R0 = 0; 182 # endif 177 183 pVM->pgm.s.HCPhysPaePML4 = 0; 178 184 Log(("Leave nested shadow paging mode\n")); -
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r13969 r13991 69 69 * Internal Functions * 70 70 *******************************************************************************/ 71 DECLINLINE(int) pgmShwGetLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD); 72 DECLINLINE(int) pgmShwSyncLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD); 73 DECLINLINE(int) pgmShwGetEPTPDPtr(PVM pVM, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD); 74 DECLINLINE(int) pgmShwSyncPAEPDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD); 75 DECLINLINE(int) pgmShwGetPAEPDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPT *ppPdpt, PX86PDPAE *ppPD); 76 71 77 72 78 /* … … 808 814 * @param pGstPdpe Guest PDPT entry 809 815 * @param ppPD Receives address of page directory 810 */ 811 VMMDECL(int) PGMShwSyncPAEPDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD) 816 * @remarks Unused. 817 */ 818 DECLINLINE(int) pgmShwSyncPAEPDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD) 812 819 { 813 820 PPGM pPGM = &pVM->pgm.s; … … 845 852 } 846 853 /* The PD was cached or created; hook it up now. */ 847 pPdpe->u |= 848 |(pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));854 pPdpe->u |= pShwPage->Core.Key 855 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT)); 849 856 850 857 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage); … … 861 868 * @param ppPdpt Receives address of pdpt 862 869 * @param ppPD Receives address of page directory 863 */ 864 VMMDECL(int) PGMShwGetPAEPDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPT *ppPdpt, PX86PDPAE *ppPD) 870 * @remarks Unused. 871 */ 872 DECLINLINE(int) pgmShwGetPAEPDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPT *ppPdpt, PX86PDPAE *ppPD) 865 873 { 866 874 PPGM pPGM = &pVM->pgm.s; … … 902 910 * @param ppPD Receives address of page directory 903 911 */ 904 VMMDECL(int) PGMShwSyncLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD) 905 { 906 PPGM pPGM = &pVM->pgm.s; 907 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 908 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool); 909 PX86PML4E pPml4e; 912 DECLINLINE(int) pgmShwSyncLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD) 913 { 914 PPGM pPGM = &pVM->pgm.s; 915 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool); 916 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 917 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4); 918 bool fNestedPaging = HWACCMIsNestedPagingActive(pVM); 910 919 PPGMPOOLPAGE pShwPage; 911 920 X86PML4E Pml4eGst; 912 921 int rc; 913 bool fNestedPaging = HWACCMIsNestedPagingActive(pVM);914 915 Assert(pVM->pgm.s.pHCPaePML4);916 922 917 923 /* Allocate page directory pointer table if not present. */ 918 pPml4e = &pPGM->pHCPaePML4->a[iPml4e];919 924 if ( !pPml4e->n.u1Present 920 925 && !(pPml4e->u & X86_PML4E_PG_MASK)) 921 926 { 922 927 Assert(!(pPml4e->u & X86_PML4E_PG_MASK)); 923 924 928 if (!fNestedPaging) 925 929 { … … 928 932 * are fine.) */ 929 933 Assert(pVM->pgm.s.pHCShwAmd64CR3); 930 Pml4eGst.u = pgmGstGetLongModePML4E(&pVM->pgm.s, iPml4 e);934 Pml4eGst.u = pgmGstGetLongModePML4E(&pVM->pgm.s, iPml4); 931 935 932 936 rc = pgmPoolAlloc(pVM, Pml4eGst.u & X86_PML4E_PG_MASK, 933 PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT, pVM->pgm.s.pHCShwAmd64CR3->idx, iPml4 e, &pShwPage);937 PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT, pVM->pgm.s.pHCShwAmd64CR3->idx, iPml4, &pShwPage); 934 938 } 935 939 else 936 940 rc = pgmPoolAlloc(pVM, GCPtr + RT_BIT_64(63) /* hack: make the address unique */, 937 PGMPOOLKIND_64BIT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4 e, &pShwPage);941 PGMPOOLKIND_64BIT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage); 938 942 939 943 if (rc == VERR_PGM_POOL_FLUSHED) … … 967 971 /** @todo why are we looking up the guest PDPTE here? Isn't pGstPdpe 968 972 * trustworthy? */ 969 Pml4eGst.u = pgmGstGetLongModePML4E(&pVM->pgm.s, iPml4 e);973 Pml4eGst.u = pgmGstGetLongModePML4E(&pVM->pgm.s, iPml4); 970 974 PX86PDPT pPdptGst; 971 975 rc = PGM_GCPHYS_2_PTR(pVM, Pml4eGst.u & X86_PML4E_PG_MASK, &pPdptGst); … … 1003 1007 1004 1008 /** 1005 * Gets the SHADOW page directory pointer for the specified address .1009 * Gets the SHADOW page directory pointer for the specified address (long mode). 1006 1010 * 1007 1011 * @returns VBox status. … … 1011 1015 * @param ppPD Receives address of page directory 1012 1016 */ 1013 VMMDECL(int) PGMShwGetLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PDPT *ppPdpt, PX86PDPAE *ppPD) 1014 { 1015 PPGM pPGM = &pVM->pgm.s; 1016 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 1017 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool); 1018 PX86PML4E pPml4e; 1019 PPGMPOOLPAGE pShwPage; 1020 1021 AssertReturn(pVM->pgm.s.pHCPaePML4, VERR_INTERNAL_ERROR); 1022 1023 pPml4e = &pPGM->pHCPaePML4->a[iPml4e]; 1017 DECLINLINE(int) pgmShwGetLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD) 1018 { 1019 PPGM pPGM = &pVM->pgm.s; 1020 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 1021 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4); 1022 AssertReturn(pPml4e, VERR_INTERNAL_ERROR); 1023 if (ppPml4e) 1024 *ppPml4e = (PX86PML4E)pPml4e; 1024 1025 if (!pPml4e->n.u1Present) 1025 1026 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT; 1026 1027 1027 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK); 1028 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool); 1029 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK); 1028 1030 AssertReturn(pShwPage, VERR_INTERNAL_ERROR); 1029 1031 1030 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; 1031 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage); 1032 PX86PDPE pPdpe = &pPdpt->a[iPdPt]; 1033 1034 *ppPdpt = pPdpt; 1035 if (!pPdpe->n.u1Present) 1032 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; 1033 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage); 1034 if (!pPdpt->a[iPdPt].n.u1Present) 1036 1035 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT; 1037 1036 1038 pShwPage = pgmPoolGetPage(pPool, pPdp e->u & X86_PDPE_PG_MASK);1037 pShwPage = pgmPoolGetPage(pPool, pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK); 1039 1038 AssertReturn(pShwPage, VERR_INTERNAL_ERROR); 1040 1039 … … 1054 1053 * @param ppPD Receives address of page directory 1055 1054 */ 1056 VMMDECL(int) PGMShwGetEPTPDPtr(PVM pVM, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)1057 { 1058 PPGM pPGM 1059 const unsigned iPml4 e= (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;1060 PPGMPOOL pPool 1061 PEPTPML4 pPml4 = (PEPTPML4)pPGM->pHCNestedRoot;1055 DECLINLINE(int) pgmShwGetEPTPDPtr(PVM pVM, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD) 1056 { 1057 PPGM pPGM = &pVM->pgm.s; 1058 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK; 1059 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool); 1060 PEPTPML4 pPml4; 1062 1061 PEPTPML4E pPml4e; 1063 1062 PPGMPOOLPAGE pShwPage; … … 1065 1064 1066 1065 Assert(HWACCMIsNestedPagingActive(pVM)); 1066 1067 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_R0 1068 rc = PGM_HCPHYS_2_PTR(pVM, pPGM->HCPhysNestedRoot, &pPml4); 1069 AssertRCReturn(rc); 1070 # else 1071 pPml4 = (PEPTPML4)pPGM->CTX_SUFF(pShwNestedRoot); 1072 # endif 1067 1073 Assert(pPml4); 1068 1074 1069 1075 /* Allocate page directory pointer table if not present. */ 1070 pPml4e = &pPml4->a[iPml4 e];1076 pPml4e = &pPml4->a[iPml4]; 1071 1077 if ( !pPml4e->n.u1Present 1072 1078 && !(pPml4e->u & EPT_PML4E_PG_MASK)) … … 1074 1080 Assert(!(pPml4e->u & EPT_PML4E_PG_MASK)); 1075 1081 1076 rc = pgmPoolAlloc(pVM, (GCPtr & EPT_PML4E_PG_MASK) + RT_BIT_64(63) /* hack: make the address unique */, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4 e, &pShwPage);1082 rc = pgmPoolAlloc(pVM, (GCPtr & EPT_PML4E_PG_MASK) + RT_BIT_64(63) /* hack: make the address unique */, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage); 1077 1083 if (rc == VERR_PGM_POOL_FLUSHED) 1078 1084 { -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r13937 r13991 170 170 # endif 171 171 172 rc = PGMShwSyncLongModePDPtr(pVM, pvFault, pPml4eSrc, &PdpeSrc, &pPDDst);172 rc = pgmShwSyncLongModePDPtr(pVM, pvFault, pPml4eSrc, &PdpeSrc, &pPDDst); 173 173 if (rc != VINF_SUCCESS) 174 174 { … … 182 182 PEPTPD pPDDst; 183 183 184 rc = PGMShwGetEPTPDPtr(pVM, pvFault, NULL, &pPDDst);184 rc = pgmShwGetEPTPDPtr(pVM, pvFault, NULL, &pPDDst); 185 185 if (rc != VINF_SUCCESS) 186 186 { … … 910 910 # else /* PGM_SHW_TYPE == PGM_TYPE_AMD64 */ 911 911 /* PML4 */ 912 AssertReturn(pVM->pgm.s.p HCPaePML4, VERR_INTERNAL_ERROR);913 914 const unsigned iPml4 e= (GCPtrPage >> X86_PML4_SHIFT) & X86_PML4_MASK;912 AssertReturn(pVM->pgm.s.pShwPaePml4R3, VERR_INTERNAL_ERROR); 913 914 const unsigned iPml4 = (GCPtrPage >> X86_PML4_SHIFT) & X86_PML4_MASK; 915 915 const unsigned iPdpte = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; 916 916 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK; 917 917 PX86PDPAE pPDDst; 918 918 PX86PDPT pPdptDst; 919 PX86PML4E pPml4eDst = &pVM->pgm.s.pHCPaePML4->a[iPml4e];920 rc = PGMShwGetLongModePDPtr(pVM, GCPtrPage, &pPdptDst, &pPDDst);919 PX86PML4E pPml4eDst; 920 rc = pgmShwGetLongModePDPtr(pVM, GCPtrPage, &pPml4eDst, &pPdptDst, &pPDDst); 921 921 if (rc != VINF_SUCCESS) 922 922 { … … 1021 1021 LogFlow(("InvalidatePage: Out-of-sync PML4E (P/GCPhys) at %RGv GCPhys=%RGp vs %RGp Pml4eSrc=%RX64 Pml4eDst=%RX64\n", 1022 1022 GCPtrPage, pShwPdpt->GCPhys, GCPhysPdpt, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u)); 1023 pgmPoolFreeByPage(pPool, pShwPdpt, pVM->pgm.s.pHCShwAmd64CR3->idx, iPml4 e);1023 pgmPoolFreeByPage(pPool, pShwPdpt, pVM->pgm.s.pHCShwAmd64CR3->idx, iPml4); 1024 1024 pPml4eDst->u = 0; 1025 1025 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNPs)); … … 1035 1035 LogFlow(("InvalidatePage: Out-of-sync PML4E at %RGv Pml4eSrc=%RX64 Pml4eDst=%RX64\n", 1036 1036 GCPtrPage, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u)); 1037 pgmPoolFreeByPage(pPool, pShwPdpt, pVM->pgm.s.pHCShwAmd64CR3->idx, iPml4 e);1037 pgmPoolFreeByPage(pPool, pShwPdpt, pVM->pgm.s.pHCShwAmd64CR3->idx, iPml4); 1038 1038 pPml4eDst->u = 0; 1039 1039 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync)); … … 1047 1047 LogFlow(("InvalidatePage: Out-of-sync PML4E (A) at %RGv Pml4eSrc=%RX64 Pml4eDst=%RX64\n", 1048 1048 GCPtrPage, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u)); 1049 pgmPoolFreeByPage(pPool, pShwPdpt, pVM->pgm.s.pHCShwAmd64CR3->idx, iPml4 e);1049 pgmPoolFreeByPage(pPool, pShwPdpt, pVM->pgm.s.pHCShwAmd64CR3->idx, iPml4); 1050 1050 pPml4eDst->u = 0; 1051 1051 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNAs)); … … 1602 1602 PX86PDPT pPdptDst; 1603 1603 1604 int rc = PGMShwGetLongModePDPtr(pVM, GCPtrPage, &pPdptDst, &pPDDst);1604 int rc = pgmShwGetLongModePDPtr(pVM, GCPtrPage, NULL, &pPdptDst, &pPDDst); 1605 1605 AssertRCSuccessReturn(rc, rc); 1606 1606 Assert(pPDDst && pPdptDst); … … 1867 1867 PX86PDPT pPdptDst; 1868 1868 1869 int rc = PGMShwGetLongModePDPtr(pVM, GCPtrPage, &pPdptDst, &pPDDst);1869 int rc = pgmShwGetLongModePDPtr(pVM, GCPtrPage, NULL, &pPdptDst, &pPDDst); 1870 1870 AssertRCSuccessReturn(rc, rc); 1871 1871 Assert(pPDDst && pPdptDst); … … 1876 1876 EPTPDE PdeDst; 1877 1877 1878 int rc = PGMShwGetEPTPDPtr(pVM, GCPtrPage, NULL, &pPDDst);1878 int rc = pgmShwGetEPTPDPtr(pVM, GCPtrPage, NULL, &pPDDst); 1879 1879 if (rc != VINF_SUCCESS) 1880 1880 { … … 2323 2323 PX86PDPAE pPDDst; 2324 2324 PX86PDPT pPdptDst; 2325 rc = PGMShwGetLongModePDPtr(pVM, GCPtrPage, &pPdptDst, &pPDDst);2325 rc = pgmShwGetLongModePDPtr(pVM, GCPtrPage, NULL, &pPdptDst, &pPDDst); 2326 2326 AssertRCSuccessReturn(rc, rc); 2327 2327 Assert(pPDDst); … … 2708 2708 PX86PDPAE pPDDst; 2709 2709 PX86PDPT pPdptDst; 2710 rc = PGMShwGetLongModePDPtr(pVM, GCPtrPage, &pPdptDst, &pPDDst);2710 rc = pgmShwGetLongModePDPtr(pVM, GCPtrPage, NULL, &pPdptDst, &pPDDst); 2711 2711 AssertRCSuccessReturn(rc, rc); 2712 2712 Assert(pPDDst); … … 2721 2721 PEPTPDPT pPdptDst; 2722 2722 2723 rc = PGMShwGetEPTPDPtr(pVM, GCPtrPage, &pPdptDst, &pPDDst);2723 rc = pgmShwGetEPTPDPtr(pVM, GCPtrPage, &pPdptDst, &pPDDst); 2724 2724 if (rc != VINF_SUCCESS) 2725 2725 { … … 2863 2863 # endif 2864 2864 2865 int rc = PGMShwSyncLongModePDPtr(pVM, GCPtrPage, pPml4eSrc, &PdpeSrc, &pPDDst);2865 int rc = pgmShwSyncLongModePDPtr(pVM, GCPtrPage, pPml4eSrc, &PdpeSrc, &pPDDst); 2866 2866 if (rc != VINF_SUCCESS) 2867 2867 { … … 2983 2983 # endif 2984 2984 2985 rc = PGMShwSyncLongModePDPtr(pVM, GCPtrPage, pPml4eSrc, &PdpeSrc, &pPDDst);2985 rc = pgmShwSyncLongModePDPtr(pVM, GCPtrPage, pPml4eSrc, &PdpeSrc, &pPDDst); 2986 2986 if (rc != VINF_SUCCESS) 2987 2987 { … … 3189 3189 } 3190 3190 # if PGM_GST_TYPE == PGM_TYPE_AMD64 3191 for (uint64_t iPml4 e = 0; iPml4e < X86_PG_PAE_ENTRIES; iPml4e++)3191 for (uint64_t iPml4 = 0; iPml4 < X86_PG_PAE_ENTRIES; iPml4++) 3192 3192 { 3193 3193 PPGMPOOLPAGE pShwPdpt = NULL; … … 3195 3195 RTGCPHYS GCPhysPdptSrc; 3196 3196 3197 pPml4eSrc = &pVM->pgm.s.CTXSUFF(pGstPaePML4)->a[iPml4 e];3198 pPml4eDst = &pVM->pgm.s.CTXMID(p,PaePML4)->a[iPml4 e];3197 pPml4eSrc = &pVM->pgm.s.CTXSUFF(pGstPaePML4)->a[iPml4]; 3198 pPml4eDst = &pVM->pgm.s.CTXMID(p,PaePML4)->a[iPml4]; 3199 3199 3200 3200 /* Fetch the pgm pool shadow descriptor if the shadow pml4e is present. */ … … 3211 3211 /* Free it. */ 3212 3212 LogFlow(("SyncCR3: Out-of-sync PML4E (GCPhys) GCPtr=%RX64 %RGp vs %RGp PdpeSrc=%RX64 PdpeDst=%RX64\n", 3213 (uint64_t)iPml4 e<< X86_PML4_SHIFT, pShwPdpt->GCPhys, GCPhysPdptSrc, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u));3214 pgmPoolFreeByPage(pPool, pShwPdpt, pVM->pgm.s.pHCShwAmd64CR3->idx, iPml4 e);3213 (uint64_t)iPml4 << X86_PML4_SHIFT, pShwPdpt->GCPhys, GCPhysPdptSrc, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u)); 3214 pgmPoolFreeByPage(pPool, pShwPdpt, pVM->pgm.s.pHCShwAmd64CR3->idx, iPml4); 3215 3215 pPml4eDst->u = 0; 3216 3216 continue; … … 3265 3265 PX86PDPAE pPDDst; 3266 3266 PX86PDEPAE pPDEDst; 3267 RTGCPTR GCPtr = (iPml4 e<< X86_PML4_SHIFT) || (iPdpte << X86_PDPT_SHIFT);3267 RTGCPTR GCPtr = (iPml4 << X86_PML4_SHIFT) || (iPdpte << X86_PDPT_SHIFT); 3268 3268 PGSTPD pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, GCPtr, &pPml4eSrc, &PdpeSrc, &iPDSrc); 3269 3269 3270 int rc = PGMShwGetLongModePDPtr(pVM, GCPtr, &pPdptDst, &pPDDst);3270 int rc = pgmShwGetLongModePDPtr(pVM, GCPtr, NULL, &pPdptDst, &pPDDst); 3271 3271 if (rc != VINF_SUCCESS) 3272 3272 { … … 3296 3296 /* Free it. */ 3297 3297 LogFlow(("SyncCR3: Out-of-sync PDPE (GCPhys) GCPtr=%RX64 %RGp vs %RGp PdpeSrc=%RX64 PdpeDst=%RX64\n", 3298 ((uint64_t)iPml4 e<< X86_PML4_SHIFT) + ((uint64_t)iPdpte << X86_PDPT_SHIFT), pShwPde->GCPhys, GCPhysPdeSrc, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u));3298 ((uint64_t)iPml4 << X86_PML4_SHIFT) + ((uint64_t)iPdpte << X86_PDPT_SHIFT), pShwPde->GCPhys, GCPhysPdeSrc, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u)); 3299 3299 3300 3300 /* Mark it as not present if there's no hypervisor mapping present. (bit flipped at the top of Trap0eHandler) */ … … 3700 3700 3701 3701 # if PGM_GST_TYPE == PGM_TYPE_AMD64 3702 unsigned iPml4 e= (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;3703 3704 for (; iPml4 e < X86_PG_PAE_ENTRIES; iPml4e++)3702 unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 3703 3704 for (; iPml4 < X86_PG_PAE_ENTRIES; iPml4++) 3705 3705 { 3706 3706 PPGMPOOLPAGE pShwPdpt = NULL; … … 3709 3709 RTGCPHYS GCPhysPdptSrc; 3710 3710 3711 pPml4eSrc = pgmGstGetLongModePML4EPtr(&pVM->pgm.s, iPml4 e);3712 pPml4eDst = &pVM->pgm.s.CTXMID(p,PaePML4)->a[iPml4e];3711 pPml4eSrc = pgmGstGetLongModePML4EPtr(&pVM->pgm.s, iPml4); 3712 pPml4eDst = pgmShwGetLongModePML4EPtr(&pVM->pgm.s, iPml4); 3713 3713 3714 3714 /* Fetch the pgm pool shadow descriptor if the shadow pml4e is present. */ … … 3736 3736 if (GCPhysPdptSrc != pShwPdpt->GCPhys) 3737 3737 { 3738 AssertMsgFailed(("Physical address doesn't match! iPml4 e %d pPml4eDst.u=%#RX64 pPml4eSrc.u=%RX64 Phys %RX64 vs %RX64\n", iPml4e, pPml4eDst->u, pPml4eSrc->u, pShwPdpt->GCPhys, GCPhysPdptSrc));3738 AssertMsgFailed(("Physical address doesn't match! iPml4 %d pPml4eDst.u=%#RX64 pPml4eSrc.u=%RX64 Phys %RX64 vs %RX64\n", iPml4, pPml4eDst->u, pPml4eSrc->u, pShwPdpt->GCPhys, GCPhysPdptSrc)); 3739 3739 GCPtr += _2M * UINT64_C(512) * UINT64_C(512); 3740 3740 cErrors++; … … 3779 3779 PGSTPD pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, GCPtr, &pPml4eSrc, &PdpeSrc, &iPDSrc); 3780 3780 3781 rc = PGMShwGetLongModePDPtr(pVM, GCPtr, &pPdptDst, &pPDDst);3781 rc = pgmShwGetLongModePDPtr(pVM, GCPtr, NULL, &pPdptDst, &pPDDst); 3782 3782 if (rc != VINF_SUCCESS) 3783 3783 { … … 3812 3812 { 3813 3813 # if PGM_GST_TYPE == PGM_TYPE_AMD64 3814 AssertMsgFailed(("Physical address doesn't match! iPml4 e %d iPdpte %d pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64 Phys %RX64 vs %RX64\n", iPml4e, iPdpte, pPdpeDst->u, PdpeSrc.u, pShwPde->GCPhys, GCPhysPdeSrc));3814 AssertMsgFailed(("Physical address doesn't match! iPml4 %d iPdpte %d pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64 Phys %RX64 vs %RX64\n", iPml4, iPdpte, pPdpeDst->u, PdpeSrc.u, pShwPde->GCPhys, GCPhysPdeSrc)); 3815 3815 # else 3816 3816 AssertMsgFailed(("Physical address doesn't match! iPdpte %d pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64 Phys %RX64 vs %RX64\n", iPdpte, pPdpeDst->u, PdpeSrc.u, pShwPde->GCPhys, GCPhysPdeSrc)); -
trunk/src/VBox/VMM/VMMAll/PGMAllGst.h
r13937 r13991 520 520 pgmPoolFreeByPage(pPool, pVM->pgm.s.pHCShwAmd64CR3, PGMPOOL_IDX_AMD64_CR3, pVM->pgm.s.pHCShwAmd64CR3->GCPhys >> PAGE_SHIFT); 521 521 pVM->pgm.s.pHCShwAmd64CR3 = 0; 522 pVM->pgm.s.pHCPaePML4 = 0; 522 pVM->pgm.s.pShwPaePml4R3 = 0; 523 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 524 pVM->pgm.s.pShwPaePml4R0 = 0; 525 # endif 523 526 pVM->pgm.s.HCPhysPaePML4 = 0; 524 527 } … … 535 538 goto l_try_again; 536 539 } 537 pVM->pgm.s.pHCPaePML4 = (PX86PML4)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pVM->pgm.s.pHCShwAmd64CR3); 540 pVM->pgm.s.pShwPaePml4R3 = (R3PTRTYPE(PX86PML4))PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pVM->pgm.s.pHCShwAmd64CR3); 541 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 542 pVM->pgm.s.pShwPaePml4R0 = (R0PTRTYPE(PX86PML4))PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pVM->pgm.s.pHCShwAmd64CR3); 543 # endif 538 544 pVM->pgm.s.HCPhysPaePML4 = pVM->pgm.s.pHCShwAmd64CR3->Core.Key; 539 545 } … … 595 601 if (!HWACCMIsNestedPagingActive(pVM)) 596 602 { 597 pVM->pgm.s.pHCPaePML4 = 0; 603 pVM->pgm.s.pShwPaePml4R3 = 0; 604 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 605 pVM->pgm.s.pShwPaePml4R0 = 0; 606 # endif 598 607 pVM->pgm.s.HCPhysPaePML4 = 0; 599 608 if (pVM->pgm.s.pHCShwAmd64CR3) -
trunk/src/VBox/VMM/VMMAll/PGMAllShw.h
r13823 r13991 149 149 150 150 /* PML4 */ 151 const unsigned iPml4 = ((RTGCUINTPTR64)GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;152 X86PML4E Pml4e = CTXMID(pVM->pgm.s.p,PaePML4)->a[iPml4];151 X86PML4E Pml4e; 152 Pml4e.u = pgmShwGetLongModePML4E(&pVM->pgm.s, GCPtr); 153 153 if (!Pml4e.n.u1Present) 154 154 return VERR_PAGE_TABLE_NOT_PRESENT; … … 189 189 EPTPDE Pde; 190 190 191 int rc = PGMShwGetEPTPDPtr(pVM, GCPtr, NULL, &pPDDst);191 int rc = pgmShwGetEPTPDPtr(pVM, GCPtr, NULL, &pPDDst); 192 192 if (rc != VINF_SUCCESS) 193 193 { … … 296 296 X86PDEPAE Pde; 297 297 /* PML4 */ 298 const unsigned iPml4 = ((RTGCUINTPTR64)GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;299 X86PML4E Pml4e = CTXMID(pVM->pgm.s.p,PaePML4)->a[iPml4];298 X86PML4E Pml4e; 299 Pml4e.u = pgmShwGetLongModePML4E(&pVM->pgm.s, GCPtr); 300 300 if (!Pml4e.n.u1Present) 301 301 return VERR_PAGE_TABLE_NOT_PRESENT; … … 329 329 EPTPDE Pde; 330 330 331 rc = PGMShwGetEPTPDPtr(pVM, GCPtr, NULL, &pPDDst);331 rc = pgmShwGetEPTPDPtr(pVM, GCPtr, NULL, &pPDDst); 332 332 if (rc != VINF_SUCCESS) 333 333 { -
trunk/src/VBox/VMM/testcase/tstVMStructGC.cpp
r13933 r13991 422 422 GEN_CHECK_OFF(PGM, pGCPaePDPT); 423 423 GEN_CHECK_OFF(PGM, HCPhysPaePDPT); 424 GEN_CHECK_OFF(PGM, pHCPaePML4); 424 GEN_CHECK_OFF(PGM, pShwPaePml4R3); 425 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 426 GEN_CHECK_OFF(PGM, pShwPaePml4R0); 427 #endif 425 428 GEN_CHECK_OFF(PGM, HCPhysPaePML4); 426 429 GEN_CHECK_OFF(PGM, pfnR3ShwRelocate);
注意:
瀏覽 TracChangeset
來幫助您使用更動檢視器