VirtualBox

儲存庫 vbox 的更動 18291


忽略:
時間撮記:
2009-3-26 上午05:11:07 (16 年 以前)
作者:
vboxsync
訊息:

PGM: Map PGMRAMRANGES above 4GB outside HMA (see defect). Changed PGMR3MapPT to take a flag indicating whether PGMR3UnmapPT will be used; this way we can select a more optimal allocation function for the ram ranges. PGMMapResolveConflicts: Walk the list correctly after reloc. pgmMapClearShadowPDEs: Don't clear PGM_PLXFLAGS_MAPPING when we shouldn't (odd PAE cases).

位置:
trunk
檔案:
修改 7 筆資料

圖例:

未更動
新增
刪除
  • trunk/include/VBox/pgm.h

    r18143 r18291  
    511511#endif /* !VBOX_WITH_NEW_PHYS_CODE */
    512512VMMDECL(void)       PGMR3PhysSetA20(PVM pVM, bool fEnable);
    513 VMMR3DECL(int)      PGMR3MapPT(PVM pVM, RTGCPTR GCPtr, uint32_t cb, PFNPGMRELOCATE pfnRelocate, void *pvUser, const char *pszDesc);
     513/** @name PGMR3MapPT flags.
     514 * @{ */
     515/** The mapping may be unmapped later. The default is permanent mappings. */
     516#define PGMR3MAPPT_FLAGS_UNMAPPABLE     RT_BIT(0)
     517/** @} */
     518VMMR3DECL(int)      PGMR3MapPT(PVM pVM, RTGCPTR GCPtr, uint32_t cb, uint32_t fFlags, PFNPGMRELOCATE pfnRelocate, void *pvUser, const char *pszDesc);
    514519VMMR3DECL(int)      PGMR3UnmapPT(PVM pVM, RTGCPTR GCPtr);
    515520VMMR3DECL(int)      PGMR3FinalizeMappings(PVM pVM);
  • trunk/src/VBox/VMM/MMHyper.cpp

    r18286 r18291  
    160160    while ((RTINT)pVM->mm.s.offHyperNextStatic + 64*_1K < (RTINT)pVM->mm.s.cbHyperArea - _4M)
    161161        pVM->mm.s.cbHyperArea -= _4M;
    162     int rc = PGMR3MapPT(pVM, pVM->mm.s.pvHyperAreaGC, pVM->mm.s.cbHyperArea,
     162    int rc = PGMR3MapPT(pVM, pVM->mm.s.pvHyperAreaGC, pVM->mm.s.cbHyperArea, 0 /*fFlags*/,
    163163                        mmR3HyperRelocateCallback, NULL, "Hypervisor Memory Area");
    164164    if (RT_FAILURE(rc))
  • trunk/src/VBox/VMM/PGM.cpp

    r18203 r18291  
    11851185    pVM->pgm.s.enmHostMode      = SUPPAGINGMODE_INVALID;
    11861186    pVM->pgm.s.GCPhysCR3        = NIL_RTGCPHYS;
     1187    pVM->pgm.s.GCPtrPrevRamRangeMapping = MM_HYPER_AREA_ADDRESS;
    11871188    pVM->pgm.s.fA20Enabled      = true;
    11881189    pVM->pgm.s.GCPhys4MBPSEMask = RT_BIT_64(32) - 1; /* default; checked later */
     
    19261927    if (pVM->pgm.s.pRamRangesR3)
    19271928    {
    1928         pVM->pgm.s.pRamRangesRC = MMHyperR3ToRC(pVM, pVM->pgm.s.pRamRangesR3);
    1929         for (PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3; pCur->pNextR3; pCur = pCur->pNextR3)
    1930             pCur->pNextRC = MMHyperR3ToRC(pVM, pCur->pNextR3);
     1929        /* Update the pSelfRC pointers and relink them. */
     1930        for (PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3)
     1931            if (!(pCur->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING))
     1932                pCur->pSelfRC = MMHyperCCToRC(pVM, pCur);
     1933        pgmR3PhysRelinkRamRanges(pVM);
    19311934    }
    19321935
  • trunk/src/VBox/VMM/PGMInternal.h

    r18230 r18291  
    364364#endif
    365365
    366 /** Size of the GCPtrConflict array in PGMMAPPING. */
     366/** Size of the GCPtrConflict array in PGMMAPPING.
     367 * @remarks Must be a power of two. */
    367368#define PGMMAPPING_CONFLICT_MAX         8
    368369
     
    395396    R3PTRTYPE(const char *)             pszDesc;
    396397    /** Last 8 addresses that caused conflicts. */
    397     RTGCPTR                             GCPtrConflict[PGMMAPPING_CONFLICT_MAX];
     398    RTGCPTR                             aGCPtrConflicts[PGMMAPPING_CONFLICT_MAX];
    398399    /** Number of conflicts for this hypervisor mapping. */
    399400    uint32_t                            cConflicts;
     
    10151016typedef struct PGMRAMRANGE
    10161017{
     1018    /** Start of the range. Page aligned. */
     1019    RTGCPHYS                            GCPhys;
     1020    /** Size of the range. (Page aligned of course). */
     1021    RTGCPHYS                            cb;
    10171022    /** Pointer to the next RAM range - for R3. */
    10181023    R3PTRTYPE(struct PGMRAMRANGE *)     pNextR3;
     
    10211026    /** Pointer to the next RAM range - for RC. */
    10221027    RCPTRTYPE(struct PGMRAMRANGE *)     pNextRC;
    1023     /** Pointer alignment. */
    1024     RTRCPTR                             RCPtrAlignment;
    1025     /** Start of the range. Page aligned. */
    1026     RTGCPHYS                            GCPhys;
     1028    /** PGM_RAM_RANGE_FLAGS_* flags. */
     1029    uint32_t                            fFlags;
    10271030    /** Last address in the range (inclusive). Page aligned (-1). */
    10281031    RTGCPHYS                            GCPhysLast;
    1029     /** Size of the range. (Page aligned of course). */
    1030     RTGCPHYS                            cb;
    1031     /** MM_RAM_* flags */
    1032     uint32_t                            fFlags;
    1033     uint32_t                            u32Alignment; /**< alignment. */
     1032    /** Start of the HC mapping of the range. This is only used for MMIO2. */
     1033    R3PTRTYPE(void *)                   pvR3;
    10341034#ifndef VBOX_WITH_NEW_PHYS_CODE
    10351035    /** R3 virtual lookup ranges for chunks.
     
    10421042# endif
    10431043#endif
    1044     /** Start of the HC mapping of the range. This is only used for MMIO2. */
    1045     R3PTRTYPE(void *)                   pvR3;
    10461044    /** The range description. */
    10471045    R3PTRTYPE(const char *)             pszDesc;
    1048 
     1046    /** Pointer to self - R0 pointer. */
     1047    R0PTRTYPE(struct PGMRAMRANGE *)     pSelfR0;
     1048    /** Pointer to self - RC pointer. */
     1049    RCPTRTYPE(struct PGMRAMRANGE *)     pSelfRC;
    10491050    /** Padding to make aPage aligned on sizeof(PGMPAGE). */
    1050 #ifdef VBOX_WITH_NEW_PHYS_CODE
    1051     uint32_t                            au32Reserved[2];
    1052 #elif HC_ARCH_BITS == 32
    1053     uint32_t                            au32Reserved[1];
    1054 #endif
    1055 
     1051#if HC_ARCH_BITS == (defined(VBOX_WITH_NEW_PHYS_CODE) ? 64 : 32)
     1052    uint32_t                            u32Alignment2;
     1053#endif
    10561054    /** Array of physical guest page tracking structures. */
    10571055    PGMPAGE                             aPages[1];
     
    10601058typedef PGMRAMRANGE *PPGMRAMRANGE;
    10611059
    1062 #ifndef VBOX_WITH_NEW_PHYS_CODE
     1060#ifdef VBOX_WITH_NEW_PHYS_CODE
     1061/** @name PGMRAMRANGE::fFlags
     1062 * @{ */
     1063/** The RAM range is floating around as an independent guest mapping. */
     1064#define PGM_RAM_RANGE_FLAGS_FLOATING        RT_BIT(20)
     1065/** @} */
     1066#else
    10631067/** Return hc ptr corresponding to the ram range and physical offset */
    10641068#define PGMRAMRANGE_GETHCPTR(pRam, off) \
     
    23802384    /** RC pointer corresponding to PGM::pRamRangesR3. */
    23812385    RCPTRTYPE(PPGMRAMRANGE)         pRamRangesRC;
    2382     /** The configured RAM size. */
     2386    /** The configured RAM size.
     2387     * @remarks Do NOT use this, it's too small to hold the whole stuff.
     2388     * @todo    Remove with VBOX_WITH_NEW_PHYS_CODE! */
    23832389    RTUINT                          cbRamSize;
    23842390
     
    24282434    /** Base address (GC) of fixed mapping */
    24292435    RTGCPTR                         GCPtrMappingFixed;
    2430 #if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
    2431     uint32_t                        u32Padding0; /**< alignment padding. */
    2432 #endif
    2433 
     2436    /** The address of the previous RAM range mapping. */
     2437    RTGCPTR                         GCPtrPrevRamRangeMapping;
    24342438
    24352439    /** @name Intermediate Context
     
    28892893#ifdef IN_RING3
    28902894int             pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk);
     2895void            pgmR3PhysRelinkRamRanges(PVM pVM);
    28912896int             pgmR3PhysRamReset(PVM pVM);
    28922897int             pgmR3PhysRomReset(PVM pVM);
     
    29292934#endif
    29302935
    2931 void            pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE);
    29322936void            pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE);
     2937void            pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE, bool fDeactivateCR3);
     2938int             pgmMapActivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3);
    29332939int             pgmMapDeactivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3);
    2934 int             pgmMapActivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3);
    29352940
    29362941int             pgmShwSyncPaePDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD);
  • trunk/src/VBox/VMM/PGMMap.cpp

    r17898 r18291  
    5353 * @param   GCPtr           Virtual Address. (Page table aligned!)
    5454 * @param   cb              Size of the range. Must be a 4MB aligned!
     55 * @param   fFlags          PGMR3MAPPT_FLAGS_UNMAPPABLE or 0.
    5556 * @param   pfnRelocate     Relocation callback function.
    5657 * @param   pvUser          User argument to the callback.
    5758 * @param   pszDesc         Pointer to description string. This must not be freed.
    5859 */
    59 VMMR3DECL(int) PGMR3MapPT(PVM pVM, RTGCPTR GCPtr, uint32_t cb, PFNPGMRELOCATE pfnRelocate, void *pvUser, const char *pszDesc)
    60 {
    61     LogFlow(("PGMR3MapPT: GCPtr=%#x cb=%d pfnRelocate=%p pvUser=%p pszDesc=%s\n", GCPtr, cb, pfnRelocate, pvUser, pszDesc));
     60VMMR3DECL(int) PGMR3MapPT(PVM pVM, RTGCPTR GCPtr, uint32_t cb, uint32_t fFlags, PFNPGMRELOCATE pfnRelocate, void *pvUser, const char *pszDesc)
     61{
     62    LogFlow(("PGMR3MapPT: GCPtr=%#x cb=%d fFlags=%#x pfnRelocate=%p pvUser=%p pszDesc=%s\n", GCPtr, cb, fFlags, pfnRelocate, pvUser, pszDesc));
    6263    AssertMsg(pVM->pgm.s.pInterPD, ("Paging isn't initialized, init order problems!\n"));
    6364
     
    6566     * Validate input.
    6667     */
     68    Assert(!fFlags || fFlags == PGMR3MAPPT_FLAGS_UNMAPPABLE);
    6769    if (cb < _2M || cb > 64 * _1M)
    6870    {
     
    130132     */
    131133    PPGMMAPPING pNew;
    132     int rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMMAPPING, aPTs[cPTs]), 0, MM_TAG_PGM, (void **)&pNew);
     134    int rc;
     135    if (fFlags & PGMR3MAPPT_FLAGS_UNMAPPABLE)
     136        rc = MMHyperAlloc(           pVM, RT_OFFSETOF(PGMMAPPING, aPTs[cPTs]), 0, MM_TAG_PGM_MAPPINGS, (void **)&pNew);
     137    else
     138        rc = MMR3HyperAllocOnceNoRel(pVM, RT_OFFSETOF(PGMMAPPING, aPTs[cPTs]), 0, MM_TAG_PGM_MAPPINGS, (void **)&pNew);
    133139    if (RT_FAILURE(rc))
    134140        return rc;
     
    146152     */
    147153    uint8_t *pbPTs;
    148     rc = MMHyperAlloc(pVM, PAGE_SIZE * 3 * cPTs, PAGE_SIZE, MM_TAG_PGM, (void **)&pbPTs);
     154    if (fFlags & PGMR3MAPPT_FLAGS_UNMAPPABLE)
     155        rc = MMHyperAlloc(           pVM, PAGE_SIZE * 3 * cPTs, PAGE_SIZE, MM_TAG_PGM_MAPPINGS, (void **)&pbPTs);
     156    else
     157        rc = MMR3HyperAllocOnceNoRel(pVM, PAGE_SIZE * 3 * cPTs, PAGE_SIZE, MM_TAG_PGM_MAPPINGS, (void **)&pbPTs);
    149158    if (RT_FAILURE(rc))
    150159    {
     
    216225 * @param   pVM     VM Handle.
    217226 * @param   GCPtr   Virtual Address. (Page table aligned!)
     227 *
     228 * @remarks Don't call this without passing PGMR3MAPPT_FLAGS_UNMAPPABLE to
     229 *          PGMR3MapPT or you'll burn in the heap.
    218230 */
    219231VMMR3DECL(int)  PGMR3UnmapPT(PVM pVM, RTGCPTR GCPtr)
     
    648660
    649661    /*
    650      * Mark the mappings as fixed (using fake values) and disabled. 
     662     * Mark the mappings as fixed (using fake values) and disabled.
    651663     */
    652664    pVM->pgm.s.fDisableMappings  = true;
     
    900912    unsigned i = pMap->cPTs;
    901913
    902     pgmMapClearShadowPDEs(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3), pMap, iOldPDE);
     914    pgmMapClearShadowPDEs(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3), pMap, iOldPDE, false /*fDeactivateCR3*/);
    903915
    904916    iOldPDE += i;
     
    10701082}
    10711083
     1084
    10721085/**
    10731086 * Checks if a new mapping address wasn't previously used and caused a clash with guest mappings.
     
    10791092bool pgmR3MapIsKnownConflictAddress(PPGMMAPPING pMapping, RTGCPTR GCPtr)
    10801093{
    1081     for (unsigned i=0; i<RT_ELEMENTS(pMapping->GCPtrConflict); i++)
    1082     {
    1083         if (GCPtr == pMapping->GCPtrConflict[i])
     1094    for (unsigned i = 0; i < RT_ELEMENTS(pMapping->aGCPtrConflicts); i++)
     1095    {
     1096        if (GCPtr == pMapping->aGCPtrConflicts[i])
    10841097            return true;
    10851098    }
    10861099    return false;
    10871100}
     1101
    10881102
    10891103/**
     
    11021116    STAM_PROFILE_START(&pVM->pgm.s.StatR3ResolveConflict, a);
    11031117
    1104     pMapping->GCPtrConflict[pMapping->cConflicts & (PGMMAPPING_CONFLICT_MAX-1)] = GCPtrOldMapping;
     1118    pMapping->aGCPtrConflicts[pMapping->cConflicts & (PGMMAPPING_CONFLICT_MAX-1)] = GCPtrOldMapping;
    11051119    pMapping->cConflicts++;
    11061120
     
    11751189    STAM_PROFILE_START(&pVM->pgm.s.StatR3ResolveConflict, a);
    11761190
    1177     pMapping->GCPtrConflict[pMapping->cConflicts & (PGMMAPPING_CONFLICT_MAX-1)] = GCPtrOldMapping;
     1191    pMapping->aGCPtrConflicts[pMapping->cConflicts & (PGMMAPPING_CONFLICT_MAX-1)] = GCPtrOldMapping;
    11781192    pMapping->cConflicts++;
    11791193
     
    12441258}
    12451259
     1260
    12461261/**
    12471262 * Read memory from the guest mappings.
  • trunk/src/VBox/VMM/PGMPhys.cpp

    r18266 r18291  
    572572
    573573/**
     574 * Relinks the RAM ranges using the pSelfRC and pSelfR0 pointers.
     575 *
     576 * Called when anything was relocated.
     577 *
     578 * @param   pVM         Pointer to the shared VM structure.
     579 */
     580void pgmR3PhysRelinkRamRanges(PVM pVM)
     581{
     582    PPGMRAMRANGE pCur;
     583
     584#ifdef VBOX_STRICT
     585    for (pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3)
     586    {
     587        Assert((pCur->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pCur->pSelfR0 == MMHyperCCToR0(pVM, pCur));
     588        Assert((pCur->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pCur->pSelfRC == MMHyperCCToRC(pVM, pCur));
     589        Assert((pCur->GCPhys     & PAGE_OFFSET_MASK) == 0);
     590        Assert((pCur->GCPhysLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
     591        Assert((pCur->cb         & PAGE_OFFSET_MASK) == 0);
     592        Assert(pCur->cb == pCur->GCPhysLast - pCur->GCPhys + 1);
     593        for (PPGMRAMRANGE pCur2 = pVM->pgm.s.pRamRangesR3; pCur2; pCur2 = pCur2->pNextR3)
     594            Assert(   pCur2 == pCur
     595                   || strcmp(pCur2->pszDesc, pCur->pszDesc)); /** @todo fix MMIO ranges!! */
     596    }
     597#endif
     598
     599    pCur = pVM->pgm.s.pRamRangesR3;
     600    if (pCur)
     601    {
     602        pVM->pgm.s.pRamRangesR0 = pCur->pSelfR0;
     603        pVM->pgm.s.pRamRangesRC = pCur->pSelfRC;
     604
     605        for (; pCur->pNextR3; pCur = pCur->pNextR3)
     606        {
     607            pCur->pNextR0 = pCur->pNextR3->pSelfR0;
     608            pCur->pNextRC = pCur->pNextR3->pSelfRC;
     609        }
     610
     611        Assert(pCur->pNextR0 == NIL_RTR0PTR);
     612        Assert(pCur->pNextRC == NIL_RTRCPTR);
     613    }
     614    else
     615    {
     616        Assert(pVM->pgm.s.pRamRangesR0 == NIL_RTR0PTR);
     617        Assert(pVM->pgm.s.pRamRangesRC == NIL_RTRCPTR);
     618    }
     619}
     620
     621
     622/**
    574623 * Links a new RAM range into the list.
    575624 *
     
    581630{
    582631    AssertMsg(pNew->pszDesc, ("%RGp-%RGp\n", pNew->GCPhys, pNew->GCPhysLast));
     632    Assert((pNew->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pNew->pSelfR0 == MMHyperCCToR0(pVM, pNew));
     633    Assert((pNew->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pNew->pSelfRC == MMHyperCCToRC(pVM, pNew));
    583634
    584635    pgmLock(pVM);
     
    586637    PPGMRAMRANGE pRam = pPrev ? pPrev->pNextR3 : pVM->pgm.s.pRamRangesR3;
    587638    pNew->pNextR3 = pRam;
    588     pNew->pNextR0 = pRam ? MMHyperCCToR0(pVM, pRam) : NIL_RTR0PTR;
    589     pNew->pNextRC = pRam ? MMHyperCCToRC(pVM, pRam) : NIL_RTRCPTR;
     639    pNew->pNextR0 = pRam ? pRam->pSelfR0 : NIL_RTR0PTR;
     640    pNew->pNextRC = pRam ? pRam->pSelfRC : NIL_RTRCPTR;
    590641
    591642    if (pPrev)
    592643    {
    593644        pPrev->pNextR3 = pNew;
    594         pPrev->pNextR0 = MMHyperCCToR0(pVM, pNew);
    595         pPrev->pNextRC = MMHyperCCToRC(pVM, pNew);
     645        pPrev->pNextR0 = pNew->pSelfR0;
     646        pPrev->pNextRC = pNew->pSelfRC;
    596647    }
    597648    else
    598649    {
    599650        pVM->pgm.s.pRamRangesR3 = pNew;
    600         pVM->pgm.s.pRamRangesR0 = MMHyperCCToR0(pVM, pNew);
    601         pVM->pgm.s.pRamRangesRC = MMHyperCCToRC(pVM, pNew);
     651        pVM->pgm.s.pRamRangesR0 = pNew->pSelfR0;
     652        pVM->pgm.s.pRamRangesRC = pNew->pSelfRC;
    602653    }
    603654
     
    616667{
    617668    Assert(pPrev ? pPrev->pNextR3 == pRam : pVM->pgm.s.pRamRangesR3 == pRam);
     669    Assert((pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pRam->pSelfR0 == MMHyperCCToR0(pVM, pRam));
     670    Assert((pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pRam->pSelfRC == MMHyperCCToRC(pVM, pRam));
    618671
    619672    pgmLock(pVM);
     
    623676    {
    624677        pPrev->pNextR3 = pNext;
    625         pPrev->pNextR0 = pNext ? MMHyperCCToR0(pVM, pNext) : NIL_RTR0PTR;
    626         pPrev->pNextRC = pNext ? MMHyperCCToRC(pVM, pNext) : NIL_RTRCPTR;
     678        pPrev->pNextR0 = pNext ? pNext->pSelfR0 : NIL_RTR0PTR;
     679        pPrev->pNextRC = pNext ? pNext->pSelfRC : NIL_RTRCPTR;
    627680    }
    628681    else
     
    630683        Assert(pVM->pgm.s.pRamRangesR3 == pRam);
    631684        pVM->pgm.s.pRamRangesR3 = pNext;
    632         pVM->pgm.s.pRamRangesR0 = pNext ? MMHyperCCToR0(pVM, pNext) : NIL_RTR0PTR;
    633         pVM->pgm.s.pRamRangesRC = pNext ? MMHyperCCToRC(pVM, pNext) : NIL_RTRCPTR;
     685        pVM->pgm.s.pRamRangesR0 = pNext ? pNext->pSelfR0 : NIL_RTR0PTR;
     686        pVM->pgm.s.pRamRangesRC = pNext ? pNext->pSelfRC : NIL_RTRCPTR;
    634687    }
    635688
     
    646699static void pgmR3PhysUnlinkRamRange(PVM pVM, PPGMRAMRANGE pRam)
    647700{
     701    pgmLock(pVM);
     702
    648703    /* find prev. */
    649704    PPGMRAMRANGE pPrev = NULL;
     
    657712
    658713    pgmR3PhysUnlinkRamRange2(pVM, pRam, pPrev);
     714
     715    pgmUnlock(pVM);
    659716}
    660717
     
    702759}
    703760#endif /* VBOX_WITH_NEW_PHYS_CODE */
     761
     762
     763/**
     764 * PGMR3PhysRegisterRam worker that initializes and links a RAM range.
     765 *
     766 * @param   pVM             The VM handle.
     767 * @param   pNew            The new RAM range.
     768 * @param   GCPhys          The address of the RAM range.
     769 * @param   GCPhysLast      The last address of the RAM range.
     770 * @param   RCPtrNew        The RC address if the range is floating. NIL_RTRCPTR
     771 *                          if in HMA.
     772 * @param   R0PtrNew        Ditto for R0.
     773 * @param   pszDesc         The description.
     774 * @param   pPrev           The previous RAM range (for linking).
     775 */
     776static void pgmR3PhysInitAndLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast,
     777                                         RTRCPTR RCPtrNew, RTR0PTR R0PtrNew, const char *pszDesc, PPGMRAMRANGE pPrev)
     778{
     779    /*
     780     * Initialize the range.
     781     */
     782    pNew->pSelfR0       = R0PtrNew != NIL_RTR0PTR ? R0PtrNew : MMHyperCCToR0(pVM, pNew);
     783    pNew->pSelfRC       = RCPtrNew != NIL_RTRCPTR ? RCPtrNew : MMHyperCCToRC(pVM, pNew);
     784    pNew->GCPhys        = GCPhys;
     785    pNew->GCPhysLast    = GCPhysLast;
     786    pNew->cb            = GCPhysLast - GCPhys + 1;
     787    pNew->pszDesc       = pszDesc;
     788    pNew->fFlags        = RCPtrNew != NIL_RTR0PTR ? PGM_RAM_RANGE_FLAGS_FLOATING : 0;
     789    pNew->pvR3          = NULL;
     790
     791    uint32_t const cPages = pNew->cb >> PAGE_SHIFT;
     792    RTGCPHYS iPage = cPages;
     793    while (iPage-- > 0)
     794        PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_RAM);
     795
     796    /* Update the page count stats. */
     797    pVM->pgm.s.cZeroPages += cPages;
     798    pVM->pgm.s.cAllPages  += cPages;
     799
     800    /*
     801     * Link it.
     802     */
     803    pgmR3PhysLinkRamRange(pVM, pNew, pPrev);
     804}
     805
     806
     807/**
     808 * Relocate a floating RAM range.
     809 *
     810 * @copydoc FNPGMRELOCATE.
     811 */
     812static DECLCALLBACK(bool) pgmR3PhysRamRangeRelocate(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew, PGMRELOCATECALL enmMode, void *pvUser)
     813{
     814    PPGMRAMRANGE pRam = (PPGMRAMRANGE)pvUser;
     815    Assert(pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING);
     816    Assert(pRam->pSelfRC == GCPtrOld + PAGE_SIZE);
     817
     818    switch (enmMode)
     819    {
     820        case PGMRELOCATECALL_SUGGEST:
     821            return true;
     822        case PGMRELOCATECALL_RELOCATE:
     823        {
     824            /* Update myself and then relink all the ranges. */
     825            pgmLock(pVM);
     826            pRam->pSelfRC = (RTRCPTR)(GCPtrNew + PAGE_SIZE);
     827            pgmR3PhysRelinkRamRanges(pVM);
     828            pgmUnlock(pVM);
     829            return true;
     830        }
     831
     832        default:
     833            AssertFailedReturn(false);
     834    }
     835}
     836
     837
     838/**
     839 * PGMR3PhysRegisterRam worker that registers a high chunk.
     840 *
     841 * @returns VBox status code.
     842 * @param   pVM             The VM handle.
     843 * @param   GCPhys          The address of the RAM.
     844 * @param   cRamPages       The number of RAM pages to register.
     845 * @param   cbChunk         The size of the PGMRAMRANGE guest mapping.
     846 * @param   iChunk          The chunk number.
     847 * @param   pszDesc         The RAM range description.
     848 * @param   ppPrev          Previous RAM range pointer. In/Out.
     849 */
     850static int pgmR3PhysRegisterHighRamChunk(PVM pVM, RTGCPHYS GCPhys, uint32_t cRamPages,
     851                                         uint32_t cbChunk, uint32_t iChunk, const char *pszDesc,
     852                                         PPGMRAMRANGE *ppPrev)
     853{
     854    const char *pszDescChunk = iChunk == 0
     855                             ? pszDesc
     856                             : MMR3HeapAPrintf(pVM, MM_TAG_PGM_PHYS, "%s (#%u)", pszDesc, iChunk + 1);
     857    AssertReturn(pszDescChunk, VERR_NO_MEMORY);
     858
     859    /*
     860     * Allocate memory for the new chunk.
     861     */
     862    size_t const cChunkPages  = RT_ALIGN_Z(RT_UOFFSETOF(PGMRAMRANGE, aPages[cRamPages]), PAGE_SIZE) >> PAGE_SHIFT;
     863    PSUPPAGE     paChunkPages = (PSUPPAGE)RTMemTmpAllocZ(sizeof(SUPPAGE) * cChunkPages);
     864    AssertReturn(paChunkPages, VERR_NO_TMP_MEMORY);
     865    RTR0PTR      R0PtrChunk   = NIL_RTR0PTR;
     866    void        *pvChunk      = NULL;
     867    int rc = SUPR3PageAllocEx(cChunkPages, 0 /*fFlags*/, &pvChunk,
     868#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
     869                              VMMIsHwVirtExtForced(pVM) ? &pvR0 : NULL,
     870#else
     871                              NULL,
     872#endif
     873                              paChunkPages);
     874    if (RT_SUCCESS(rc))
     875    {
     876#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
     877        if (!VMMIsHwVirtExtForced(pVM))
     878            R0PtrChunk = NIL_RTR0PTR;
     879#else
     880        R0PtrChunk = (uintptr_t)pvChunk;
     881#endif
     882        memset(pvChunk, 0, cChunkPages << PAGE_SHIFT);
     883
     884        PPGMRAMRANGE pNew = (PPGMRAMRANGE)pvChunk;
     885
     886        /*
     887         * Create a mapping and map the pages into it.
     888         * We push these in below the HMA.
     889         */
     890        RTGCPTR GCPtrChunkMap = pVM->pgm.s.GCPtrPrevRamRangeMapping - cbChunk;
     891        rc = PGMR3MapPT(pVM, GCPtrChunkMap, cbChunk, 0 /*fFlags*/, pgmR3PhysRamRangeRelocate, pNew, pszDescChunk);
     892        if (RT_SUCCESS(rc))
     893        {
     894            pVM->pgm.s.GCPtrPrevRamRangeMapping = GCPtrChunkMap;
     895
     896            RTGCPTR const   GCPtrChunk = GCPtrChunkMap + PAGE_SIZE;
     897            RTGCPTR         GCPtrPage  = GCPtrChunk;
     898            for (uint32_t iPage = 0; iPage < cChunkPages && RT_SUCCESS(rc); iPage++, GCPtrPage += PAGE_SIZE)
     899                rc = PGMMap(pVM, GCPtrPage, paChunkPages[iPage].Phys, PAGE_SIZE, 0);
     900            if (RT_SUCCESS(rc))
     901            {
     902                /*
     903                 * Ok, init and link the range.
     904                 */
     905                pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhys + ((RTGCPHYS)cRamPages << PAGE_SHIFT) - 1,
     906                                             (RTRCPTR)GCPtrChunk, R0PtrChunk, pszDescChunk, *ppPrev);
     907                *ppPrev = pNew;
     908            }
     909        }
     910
     911        if (RT_FAILURE(rc))
     912            SUPR3PageFreeEx(pvChunk, cChunkPages);
     913    }
     914
     915    RTMemTmpFree(paChunkPages);
     916    return rc;
     917}
    704918
    705919
     
    759973        return rc;
    760974
    761     /*
    762      * Allocate RAM range.
    763      */
    764     const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]);
    765     PPGMRAMRANGE pNew;
    766     rc = MMR3HyperAllocOnceNoRel(pVM, cbRamRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
    767     AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zu\n", cbRamRange), rc);
    768 
    769     /*
    770      * Initialize the range.
    771      */
    772     pNew->GCPhys        = GCPhys;
    773     pNew->GCPhysLast    = GCPhysLast;
    774     pNew->pszDesc       = pszDesc;
    775     pNew->cb            = cb;
    776     pNew->fFlags        = 0;
    777 
    778     pNew->pvR3          = NULL;
     975#ifdef VBOX_WITH_NEW_PHYS_CODE
     976    if (    GCPhys >= _4G
     977        &&  cPages > 256)
     978    {
     979        /*
     980         * The PGMRAMRANGE structures for the high memory can get very big.
     981         * In order to avoid SUPR3PageAllocEx allocation failures due to the
     982         * allocation size limit there and also to avoid being unable to find
     983         * guest mapping space for them, we split this memory up into 4MB in
     984         * (potential) raw-mode configs and 16MB chunks in forced AMD-V/VT-x
     985         * mode.
     986         *
     987         * The first and last page of each mapping are guard pages and marked
     988         * not-present. So, we've got 4186112 and 16769024 bytes available for
     989         * the PGMRAMRANGE structure.
     990         *
     991         * Note! The sizes used here will influence the saved state.
     992         */
     993        uint32_t cbChunk;
     994        uint32_t cPagesPerChunk;
     995        if (VMMIsHwVirtExtForced(pVM))
     996        {
     997            cbChunk = 16U*_1M;
     998            cPagesPerChunk = 1048048; /* max ~1048059 */
     999            AssertCompile(sizeof(PGMRAMRANGE) + sizeof(PGMPAGE) * 1048048 < 16U*_1M - PAGE_SIZE * 2);
     1000        }
     1001        else
     1002        {
     1003            cbChunk = 4U*_1M;
     1004            cPagesPerChunk = 261616; /* max ~261627 */
     1005            AssertCompile(sizeof(PGMRAMRANGE) + sizeof(PGMPAGE) * 261616  <  4U*_1M - PAGE_SIZE * 2);
     1006        }
     1007        AssertRelease(RT_UOFFSETOF(PGMRAMRANGE, aPages[cPagesPerChunk]) + PAGE_SIZE * 2 <= cbChunk);
     1008
     1009        RTGCPHYS cPagesLeft  = cPages;
     1010        RTGCPHYS GCPhysChunk = GCPhys;
     1011        uint32_t iChunk      = 0;
     1012        while (cPagesLeft > 0)
     1013        {
     1014            uint32_t cPagesInChunk = cPagesLeft;
     1015            if (cPagesInChunk > cPagesPerChunk)
     1016                cPagesInChunk = cPagesPerChunk;
     1017
     1018            rc = pgmR3PhysRegisterHighRamChunk(pVM, GCPhysChunk, cPagesInChunk, cbChunk, iChunk, pszDesc, &pPrev);
     1019            AssertRCReturn(rc, rc);
     1020
     1021            /* advance */
     1022            GCPhysChunk += (RTGCPHYS)cPagesInChunk << PAGE_SHIFT;
     1023            cPagesLeft  -= cPagesInChunk;
     1024            iChunk++;
     1025        }
     1026    }
     1027    else
     1028#endif
     1029    {
     1030        /*
     1031         * Allocate, initialize and link the new RAM range.
     1032         */
     1033        const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]);
     1034        PPGMRAMRANGE pNew;
     1035        rc = MMR3HyperAllocOnceNoRel(pVM, cbRamRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
     1036        AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zu\n", cbRamRange), rc);
     1037
    7791038#ifndef VBOX_WITH_NEW_PHYS_CODE
    780     pNew->paChunkR3Ptrs = NULL;
    781 
    782     /* Allocate memory for chunk to HC ptr lookup array. */
    783     rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->paChunkR3Ptrs);
    784     AssertRCReturn(rc, rc);
    785     pNew->fFlags |= MM_RAM_FLAGS_DYNAMIC_ALLOC;
    786 
     1039        /* Allocate memory for chunk to HC ptr lookup array. */
     1040        pNew->paChunkR3Ptrs = NULL;
     1041        rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->paChunkR3Ptrs);
     1042        AssertRCReturn(rc, rc);
     1043        pNew->fFlags |= MM_RAM_FLAGS_DYNAMIC_ALLOC;
    7871044#endif
    788     RTGCPHYS iPage = cPages;
    789     while (iPage-- > 0)
    790         PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_RAM);
    791 
    792     /* Update the page count stats. */
    793     pVM->pgm.s.cZeroPages += cPages;
    794     pVM->pgm.s.cAllPages  += cPages;
    795 
    796     /*
    797      * Insert the new RAM range.
    798      */
    799     pgmR3PhysLinkRamRange(pVM, pNew, pPrev);
     1045
     1046        pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhysLast, NIL_RTRCPTR, NIL_RTR0PTR, pszDesc, pPrev);
     1047    }
    8001048
    8011049    /*
     
    8371085    for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3; pRam; pRam = pRam->pNextR3)
    8381086    {
    839         uint32_t    iPage = pRam->cb >> PAGE_SHIFT; Assert((RTGCPHYS)iPage << PAGE_SHIFT == pRam->cb);
     1087        uint32_t iPage = pRam->cb >> PAGE_SHIFT;
     1088        AssertMsg(((RTGCPHYS)iPage << PAGE_SHIFT) == pRam->cb, ("%RGp %RGp\n", (RTGCPHYS)iPage << PAGE_SHIFT, pRam->cb));
     1089
    8401090#ifdef VBOX_WITH_NEW_PHYS_CODE
    8411091        if (!pVM->pgm.s.fRamPreAlloc)
     
    10671317
    10681318        /* Initialize the range. */
     1319        pNew->pSelfR0       = MMHyperCCToR0(pVM, pNew);
     1320        pNew->pSelfRC       = MMHyperCCToRC(pVM, pNew);
    10691321        pNew->GCPhys        = GCPhys;
    10701322        pNew->GCPhysLast    = GCPhysLast;
     1323        pNew->cb            = cb;
    10711324        pNew->pszDesc       = pszDesc;
    1072         pNew->cb            = cb;
    1073         pNew->fFlags        = 0; /* Some MMIO flag here? */
     1325        pNew->fFlags        = 0; /** @todo add some kind of ad-hoc flag? */
    10741326
    10751327        pNew->pvR3          = NULL;
     
    12901542
    12911543    const uint32_t cPages = cb >> PAGE_SHIFT;
    1292     AssertLogRelReturn((RTGCPHYS)cPages << PAGE_SHIFT == cb, VERR_INVALID_PARAMETER);
     1544    AssertLogRelReturn(((RTGCPHYS)cPages << PAGE_SHIFT) == cb, VERR_INVALID_PARAMETER);
    12931545    AssertLogRelReturn(cPages <= INT32_MAX / 2, VERR_NO_MEMORY);
    12941546
     
    13181570        if (RT_SUCCESS(rc))
    13191571        {
    1320             pNew->pDevInsR3 = pDevIns;
    1321             pNew->pvR3 = pvPages;
    1322             //pNew->pNext = NULL;
    1323             //pNew->fMapped = false;
    1324             //pNew->fOverlapping = false;
    1325             pNew->iRegion = iRegion;
    1326             pNew->RamRange.GCPhys = NIL_RTGCPHYS;
    1327             pNew->RamRange.GCPhysLast = NIL_RTGCPHYS;
    1328             pNew->RamRange.pszDesc = pszDesc;
    1329             pNew->RamRange.cb = cb;
    1330             //pNew->RamRange.fFlags = 0;
    1331 
    1332             pNew->RamRange.pvR3 = pvPages;       ///@todo remove this [new phys code]
     1572            pNew->pDevInsR3             = pDevIns;
     1573            pNew->pvR3                  = pvPages;
     1574            //pNew->pNext               = NULL;
     1575            //pNew->fMapped             = false;
     1576            //pNew->fOverlapping        = false;
     1577            pNew->iRegion               = iRegion;
     1578            pNew->RamRange.pSelfR0      = MMHyperCCToR0(pVM, &pNew->RamRange);
     1579            pNew->RamRange.pSelfRC      = MMHyperCCToRC(pVM, &pNew->RamRange);
     1580            pNew->RamRange.GCPhys       = NIL_RTGCPHYS;
     1581            pNew->RamRange.GCPhysLast   = NIL_RTGCPHYS;
     1582            pNew->RamRange.pszDesc      = pszDesc;
     1583            pNew->RamRange.cb           = cb;
     1584            //pNew->RamRange.fFlags     = 0; /// @todo MMIO2 flag?
     1585
     1586            pNew->RamRange.pvR3         = pvPages;
    13331587#ifndef VBOX_WITH_NEW_PHYS_CODE
    1334             pNew->RamRange.paChunkR3Ptrs = NULL; ///@todo remove this [new phys code]
     1588            pNew->RamRange.paChunkR3Ptrs = NULL;
    13351589#endif
    13361590
     
    18802134        {
    18812135            AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
    1882                                   ("%RGp isn't a RAM page (%d) - registering %RGp-%RGp (%s).\n",
    1883                                    GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pszDesc),
    1884                                   VERR_PGM_RAM_CONFLICT);
     2136                                  ("%RGp (%R[pgmpage]) isn't a RAM page - registering %RGp-%RGp (%s).\n",
     2137                                   pRam->GCPhys + (RTGCPTR)(uintptr_t)(pPage - &pRam->aPages[0]) << PAGE_SHIFT,
     2138                                   pPage, GCPhys, GCPhysLast, pszDesc), VERR_PGM_RAM_CONFLICT);
    18852139            Assert(PGM_PAGE_IS_ZERO(pPage));
    18862140            pPage++;
     
    19442198            if (!fRamExists)
    19452199            {
     2200                pRamNew->pSelfR0       = MMHyperCCToR0(pVM, pRamNew);
     2201                pRamNew->pSelfRC       = MMHyperCCToRC(pVM, pRamNew);
    19462202                pRamNew->GCPhys        = GCPhys;
    19472203                pRamNew->GCPhysLast    = GCPhysLast;
     2204                pRamNew->cb            = cb;
    19482205                pRamNew->pszDesc       = pszDesc;
    1949                 pRamNew->cb            = cb;
    19502206                pRamNew->fFlags        = 0;
    19512207                pRamNew->pvR3          = NULL;
  • trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp

    r17667 r18291  
    268268            case PGMMODE_PAE_NX:
    269269            {
    270                 PX86PDPT  pShwPdpt;
    271                 PX86PDPAE pShwPaePd;
    272                 const unsigned iPdPt = iNewPDE / 256;
    273                 unsigned iPDE = iNewPDE * 2 % 512;
    274 
    275                 pShwPdpt  = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
     270                const unsigned  iPdPt     = iNewPDE / 256;
     271                unsigned        iPDE      = iNewPDE * 2 % 512;
     272                PX86PDPT        pShwPdpt  = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
    276273                Assert(pShwPdpt);
    277274#ifdef IN_RC    /* Lock mapping to prevent it from being reused during pgmShwSyncPaePDPtr. */
    278275                PGMDynLockHCPage(pVM, (uint8_t *)pShwPdpt);
    279276#endif
    280                 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPdPt << X86_PDPT_SHIFT));
     277                PX86PDPAE       pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPdPt << X86_PDPT_SHIFT));
    281278                if (!pShwPaePd)
    282279                {
     
    312309                    pgmPoolLockPage(pVM->pgm.s.CTX_SUFF(pPool), pPoolPagePd);
    313310                }
    314 # ifdef VBOX_STRICT
    315                 else 
     311#ifdef VBOX_STRICT
     312                else
    316313                if (pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING)
    317314                {
     
    321318                    AssertFatalMsg((pShwPaePd->a[iPDE+1].u & X86_PDE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT1, ("%RX64 vs %RX64\n", pShwPaePd->a[iPDE+1].u & X86_PDE_PG_MASK, pMap->aPTs[i].HCPhysPaePT1));
    322319                }
    323 # endif
     320#endif
    324321                if (    pShwPaePd->a[iPDE].n.u1Present
    325322                    &&  !(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING))
     
    364361}
    365362
     363
    366364/**
    367365 * Clears all PDEs involved with the mapping in the shadow page table.
    368366 *
    369  * @param   pVM         The VM handle.
    370  * @param   pShwPageCR3 CR3 root page
    371  * @param   pMap        Pointer to the mapping in question.
    372  * @param   iOldPDE     The index of the 32-bit PDE corresponding to the base of the mapping.
    373  */
    374 void pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE)
    375 {
    376     Log(("pgmMapClearShadowPDEs old pde %x (cPTs=%x) (mappings enabled %d)\n", iOldPDE, pMap->cPTs, pgmMapAreMappingsEnabled(&pVM->pgm.s)));
     367 * @param   pVM             The VM handle.
     368 * @param   pShwPageCR3     CR3 root page
     369 * @param   pMap            Pointer to the mapping in question.
     370 * @param   iOldPDE         The index of the 32-bit PDE corresponding to the base of the mapping.
     371 * @param   fDeactivateCR3  Set if it's pgmMapDeactivateCR3 calling.
     372 */
     373void pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE, bool fDeactivateCR3)
     374{
     375    Log(("pgmMapClearShadowPDEs: old pde %x (cPTs=%x) (mappings enabled %d) fDeactivateCR3=%RTbool\n", iOldPDE, pMap->cPTs, pgmMapAreMappingsEnabled(&pVM->pgm.s), fDeactivateCR3));
    377376
    378377    if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
     
    415414            case PGMMODE_PAE_NX:
    416415            {
    417                 PX86PDPT  pShwPdpt = NULL;
    418                 PX86PDPAE pShwPaePd = NULL;
    419 
    420                 const unsigned iPdpt = iOldPDE / 256;         /* iOldPDE * 2 / 512; iOldPDE is in 4 MB pages */
    421                 unsigned iPDE = iOldPDE * 2 % 512;
    422                 pShwPdpt  = (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
    423                 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, pShwPdpt, (iPdpt << X86_PDPT_SHIFT));
     416                const unsigned  iPdpt     = iOldPDE / 256;      /* iOldPDE * 2 / 512; iOldPDE is in 4 MB pages */
     417                unsigned        iPDE      = iOldPDE * 2 % 512;
     418                PX86PDPT        pShwPdpt  = (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
     419                PX86PDPAE       pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, pShwPdpt, (iPdpt << X86_PDPT_SHIFT));
    424420
    425421                /* Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entry. (legacy PAE guest mode) */
    426                 pShwPdpt->a[iPdpt].u &= ~PGM_PLXFLAGS_MAPPING;
    427 
     422                if (fDeactivateCR3)
     423                    pShwPdpt->a[iPdpt].u &= ~PGM_PLXFLAGS_MAPPING;
     424                else if (pShwPdpt->a[iPdpt].u & PGM_PLXFLAGS_MAPPING)
     425                {
     426                    /* See if there are any other mappings here. This is suboptimal code. */
     427                    pShwPdpt->a[iPdpt].u &= ~PGM_PLXFLAGS_MAPPING;
     428                    for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
     429                        if (    pCur != pMap
     430                            &&  (   (pCur->GCPtr >> X86_PDPT_SHIFT) == iPdpt
     431                                 || (pCur->GCPtrLast >> X86_PDPT_SHIFT) == iPdpt))
     432                        {
     433                            pShwPdpt->a[iPdpt].u |= PGM_PLXFLAGS_MAPPING;
     434                            break;
     435                        }
     436                }
    428437                if (pCurrentShwPdpt)
    429438                {
     
    474483 * @param   iPDE        The index of the 32-bit PDE corresponding to the base of the mapping.
    475484 */
    476 void pgmMapCheckShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iPDE)
     485static void pgmMapCheckShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iPDE)
    477486{
    478487    Assert(pShwPageCR3);
    479488
    480     unsigned i = pMap->cPTs;
     489    uint32_t i = pMap->cPTs;
    481490    PGMMODE  enmShadowMode = PGMGetShadowMode(pVM);
    482491
     
    486495        iPDE--;
    487496
    488         switch(enmShadowMode)
     497        switch (enmShadowMode)
    489498        {
    490499            case PGMMODE_32_BIT:
     
    494503
    495504                AssertMsg(pShw32BitPd->a[iPDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT),
    496                           ("Expected %x vs %x\n", pShw32BitPd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT)));
     505                          ("Expected %x vs %x; iPDE=%#x %RGv %s\n",
     506                           pShw32BitPd->a[iPDE].u,  (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT),
     507                           iPDE, pMap->GCPtr, R3STRING(pMap->pszDesc) ));
    497508                break;
    498509            }
     
    501512            case PGMMODE_PAE_NX:
    502513            {
    503                 PX86PDPT  pPdpt = NULL;
    504                 PX86PDPAE pShwPaePd = NULL;
    505 
    506                 const unsigned iPD = iPDE / 256;         /* iPDE * 2 / 512; iPDE is in 4 MB pages */
    507                 unsigned iPaePDE = iPDE * 2 % 512;
    508                 pPdpt     = (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
    509                 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, pPdpt, (iPD << X86_PDPT_SHIFT));
     514                const unsigned  iPD       = iPDE / 256;         /* iPDE * 2 / 512; iPDE is in 4 MB pages */
     515                unsigned        iPaePDE   = iPDE * 2 % 512;
     516                PX86PDPT        pPdpt     = (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
     517                PX86PDPAE       pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, pPdpt, (iPD << X86_PDPT_SHIFT));
    510518                AssertFatal(pShwPaePd);
    511519
    512520                AssertMsg(pShwPaePd->a[iPaePDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0),
    513                          ("Expected %RX64 vs %RX64\n", pShwPaePd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPaePT0)));
     521                          ("Expected %RX64 vs %RX64; iPDE=%#x iPD=%#x iPaePDE=%#x %RGv %s\n",
     522                           pShwPaePd->a[iPDE].u,     (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0),
     523                           iPDE, iPD, iPaePDE, pMap->GCPtr, R3STRING(pMap->pszDesc) ));
    514524
    515525                iPaePDE++;
     
    517527
    518528                AssertMsg(pShwPaePd->a[iPaePDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1),
    519                          ("Expected %RX64 vs %RX64\n", pShwPaePd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPaePT1)));
    520 
    521                 Assert(pPdpt->a[iPD].u & PGM_PLXFLAGS_MAPPING);
     529                          ("Expected %RX64 vs %RX64; iPDE=%#x iPD=%#x iPaePDE=%#x %RGv %s\n",
     530                           pShwPaePd->a[iPDE].u,     (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1),
     531                           iPDE, iPD, iPaePDE, pMap->GCPtr, R3STRING(pMap->pszDesc) ));
     532
     533                AssertMsg(pPdpt->a[iPD].u & PGM_PLXFLAGS_MAPPING,
     534                          ("%RX64; iPD=%#x iPDE=%#x iPaePDE=%#x %RGv %s\n",
     535                           pPdpt->a[iPD].u,
     536                           iPDE, iPD, iPaePDE, pMap->GCPtr, R3STRING(pMap->pszDesc) ));
    522537                break;
    523538            }
     
    530545}
    531546
     547
    532548/**
    533549 * Check the hypervisor mappings in the active CR3.
     
    558574
    559575#ifndef IN_RING0
     576
    560577/**
    561578 * Apply the hypervisor mappings to the active CR3.
     
    573590        return VINF_SUCCESS;
    574591
    575     /* @note A log flush (in RC) can cause problems when called from MapCR3 (inconsistent state will trigger assertions). */
    576     Log4(("PGMMapActivateAll fixed mappings=%d\n", pVM->pgm.s.fMappingsFixed));
     592    /* Note. A log flush (in RC) can cause problems when called from MapCR3 (inconsistent state will trigger assertions). */
     593    Log4(("pgmMapActivateCR3: fixed mappings=%d idxShwPageCR3=%#x\n", pVM->pgm.s.fMappingsFixed, pShwPageCR3 ? pShwPageCR3->idx : NIL_PGMPOOL_IDX));
    577594
    578595    Assert(pShwPageCR3 && pShwPageCR3 == pVM->pgm.s.CTX_SUFF(pShwPageCR3));
     
    584601    {
    585602        unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
    586 
    587603        pgmMapSetShadowPDEs(pVM, pCur, iPDE);
    588604    }
     
    607623
    608624    Assert(pShwPageCR3);
     625    Log4(("pgmMapDeactivateCR3: fixed mappings=%d idxShwPageCR3=%#x\n", pVM->pgm.s.fMappingsFixed, pShwPageCR3 ? pShwPageCR3->idx : NIL_PGMPOOL_IDX));
    609626
    610627    /*
     
    614631    {
    615632        unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
    616 
    617         pgmMapClearShadowPDEs(pVM, pShwPageCR3, pCur, iPDE);
     633        pgmMapClearShadowPDEs(pVM, pShwPageCR3, pCur, iPDE, true /*fDeactivateCR3*/);
    618634    }
    619635    return VINF_SUCCESS;
    620636}
     637
    621638
    622639/**
     
    711728}
    712729
     730
    713731/**
    714732 * Checks and resolves (ring 3 only) guest conflicts with VMM GC mappings.
     
    728746    Assert(enmGuestMode <= PGMMODE_PAE_NX);
    729747
    730     /*
    731      * Iterate mappings.
    732      */
    733748    if (enmGuestMode == PGMMODE_32_BIT)
    734749    {
     
    739754        Assert(pPD);
    740755
    741         for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
     756        /*
     757         * Iterate mappings.
     758         */
     759        for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; )
    742760        {
    743             unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
    744             unsigned iPT = pCur->cPTs;
     761            PPGMMAPPING pNext = pCur->CTX_SUFF(pNext);
     762            unsigned    iPDE  = pCur->GCPtr >> X86_PD_SHIFT;
     763            unsigned    iPT   = pCur->cPTs;
    745764            while (iPT-- > 0)
    746765            {
    747766                if (    pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */
    748                     &&  (pVM->fRawR0Enabled || pPD->a[iPDE + iPT].n.u1User))
     767                    &&  (   pVM->fRawR0Enabled
     768                         || pPD->a[iPDE + iPT].n.u1User))
    749769                {
    750770                    STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
     
    753773                    Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
    754774                         "                        iPDE=%#x iPT=%#x PDE=%RGp.\n",
    755                         (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc,
    756                         iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
     775                         (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc,
     776                         iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
    757777                    int rc = pgmR3SyncPTResolveConflict(pVM, pCur, pPD, iPDE << X86_PD_SHIFT);
    758778                    AssertRCReturn(rc, rc);
    759 
    760                     /*
    761                      * Update pCur.
    762                      */
    763                     pCur = pVM->pgm.s.CTX_SUFF(pMappings);
    764                     while (pCur && pCur->GCPtr < (iPDE << X86_PD_SHIFT))
    765                         pCur = pCur->CTX_SUFF(pNext);
    766779                    break;
    767780#else
    768781                    Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n"
    769782                         "                        iPDE=%#x iPT=%#x PDE=%RGp.\n",
    770                         (iPT + iPDE) << X86_PD_SHIFT,
    771                         iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
     783                         (iPT + iPDE) << X86_PD_SHIFT,
     784                         iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
    772785                    return VINF_PGM_SYNC_CR3;
    773786#endif
    774787                }
    775788            }
    776             if (!pCur)
    777                 break;
     789            pCur = pNext;
    778790        }
    779791    }
     
    781793             || enmGuestMode == PGMMODE_PAE_NX)
    782794    {
    783         for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
     795        /*
     796         * Iterate mappings.
     797         */
     798        for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur;)
    784799        {
    785             RTGCPTR   GCPtr = pCur->GCPtr;
    786 
    787             unsigned  iPT = pCur->cb >> X86_PD_PAE_SHIFT;
     800            PPGMMAPPING pNext = pCur->CTX_SUFF(pNext);
     801            RTGCPTR     GCPtr = pCur->GCPtr;
     802            unsigned    iPT  = pCur->cb >> X86_PD_PAE_SHIFT;
    788803            while (iPT-- > 0)
    789804            {
     
    797812                    Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n"
    798813                         "                        PDE=%016RX64.\n",
    799                         GCPtr, pCur->pszDesc, Pde.u));
     814                         GCPtr, pCur->pszDesc, Pde.u));
    800815                    int rc = pgmR3SyncPTResolveConflictPAE(pVM, pCur, pCur->GCPtr);
    801816                    AssertRCReturn(rc, rc);
    802 
    803                     /*
    804                      * Update pCur.
    805                      */
    806                     pCur = pVM->pgm.s.CTX_SUFF(pMappings);
    807                     while (pCur && pCur->GCPtr < GCPtr)
    808                         pCur = pCur->CTX_SUFF(pNext);
    809817                    break;
    810818#else
    811819                    Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping (PAE)\n"
    812820                         "                        PDE=%016RX64.\n",
    813                         GCPtr, Pde.u));
     821                         GCPtr, Pde.u));
    814822                    return VINF_PGM_SYNC_CR3;
    815823#endif
     
    817825                GCPtr += (1 << X86_PD_PAE_SHIFT);
    818826            }
    819             if (!pCur)
    820                 break;
     827            pCur = pNext;
    821828        }
    822829    }
     
    824831        AssertFailed();
    825832
     833    Assert(!PGMMapHasConflicts(pVM));
    826834    return VINF_SUCCESS;
    827835}
注意: 瀏覽 TracChangeset 來幫助您使用更動檢視器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette