VirtualBox

忽略:
時間撮記:
2007-9-7 上午01:26:43 (17 年 以前)
作者:
vboxsync
訊息:

PGMPhysGCPhys2CCPtr + PGMPhysGCPhys2CCPtrRelease. Started on the NEW_PHYS_CODE.

檔案:
修改 1 筆資料

圖例:

未更動
新增
刪除
  • trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp

    r4563 r4591  
    158158    }
    159159    return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
     160}
     161
     162
     163#ifdef NEW_PHYS_CODE
     164
     165
     166/**
     167 * Replace a zero or shared page with new page that we can write to.
     168 *
     169 * @returns VBox status.
     170 * @todo    Define the return values and propagate them up the call tree..
     171 *
     172 * @param   pVM         The VM address.
     173 * @param   pPage       The physical page tracking structure.
     174 * @param   GCPhys      The address of the page.
     175 *
     176 * @remarks Called from within the PGM critical section.
     177 */
     178int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
     179{
     180    return VERR_NOT_IMPLEMENTED;
     181}
     182
     183
     184/**
     185 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
     186 *
     187 * @returns VBox status code.
     188 * @retval  VINF_SUCCESS on success.
     189 * @retval  VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
     190 *
     191 * @param   pVM         The VM address.
     192 * @param   pPage       The physical page tracking structure.
     193 * @param   GCPhys      The address of the page.
     194 *
     195 * @remarks Called from within the PGM critical section.
     196 */
     197int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
     198{
     199    switch (pPage->u2State)
     200    {
     201        case PGM_PAGE_STATE_WRITE_MONITORED:
     202            pPage->fWrittenTo = true;
     203            pPage->u2State = PGM_PAGE_STATE_WRITE_ALLOCATED;
     204            /* fall thru */
     205        case PGM_PAGE_STATE_ALLOCATED:
     206            return VINF_SUCCESS;
     207
     208        /*
     209         * Zero pages can be dummy pages for MMIO or reserved memory,
     210         * so we need to check the flags before joining cause with
     211         * shared page replacement.
     212         */
     213        case PGM_PAGE_STATE_ZERO:
     214            if (    PGM_PAGE_IS_MMIO(pPage)
     215                ||  PGM_PAGE_IS_RESERVED(pPage))
     216                return VERR_PGM_PHYS_PAGE_RESERVED;
     217            /* fall thru */
     218        case PGM_PAGE_STATE_SHARED:
     219            return pgmPhysAllocPage(pVM, pPage, GCPhys);
     220    }
     221}
     222
     223
     224#ifdef IN_RING3
     225
     226/**
     227 * Tree enumeration callback for dealing with age rollover.
     228 * It will perform a simple compression of the current age. 
     229 */
     230static DECLCALLBACK(int) pgmR3PhysChunkAgeingRolloverCallback(PAVLU32NODECORE pNode, void *pvUser)
     231{
     232    /* ASSMES iNow = 4 */
     233    PPGMCHUNKR3MAPPING pChunk = (PPGMCHUNKR3MAPPING)pNode;
     234    if (pChunk->iAge >= UINT32_C(0xffffff00))
     235        pChunk->iAge = 3;
     236    else if (pChunk->iAge >= UINT32_C(0xfffff000))
     237        pChunk->iAge = 2;
     238    else if (pChunk->iAge)
     239        pChunk->iAge = 1;
     240    return 0;
     241}
     242
     243
     244/**
     245 * Tree enumeration callback that updates the chunks that have
     246 * been used since the last
     247 */
     248static DECLCALLBACK(int) pgmR3PhysChunkAgeingCallback(PAVLU32NODECORE pNode, void *pvUser)
     249{
     250    PPGMCHUNKR3MAPPING pChunk = (PPGMCHUNKR3MAPPING)pNode;
     251    if (!pChunk->iAge)
     252    {
     253        PVM pVM = (PVM)pvUser;
     254        RTAvllU32Remove(&pVM->pgm.s.R3ChunkTlb.pAgeTree, pChunk->AgeCore.Key);
     255        pChunk->AgeCore.Key = pChunk->iAge = pVM->pgm.s.R3ChunkTlb.iNow;
     256        RTAvllU32Insert(&pVM->pgm.s.R3ChunkTlb.pAgeTree, &pChunk->AgeCore);
     257    }
     258   
     259    return 0;
     260}
     261
     262
     263/**
     264 * Performs ageing of the ring-3 chunk mappings.
     265 *
     266 * @param   pVM         The VM handle.
     267 */
     268PGMR3DECL(void) PGMR3PhysChunkAgeing(PVM pVM)
     269{
     270    pVM->pgm.s.R3ChunkMap.AgeingCountdown = RT_MIN(pVM->pgm.s.R3ChunkMap.cMax / 4, 1024);
     271    pVM->pgm.s.R3ChunkMap.iNow++;
     272    if (pVM->pgm.s.R3ChunkMap.iNow == 0)
     273    {
     274        pVM->pgm.s.R3ChunkMap.iNow = 20;
     275        RTAvlU32DoWithAll(&pVM->pgm.s.R3ChunkMap.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingRolloverCallback, NULL);
     276    }
     277    RTAvlU32DoWithAll(&pVM->pgm.s.R3ChunkMap.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingCallback, pVM);
     278}
     279
     280
     281/**
     282 * The structure passed in the pvUser argument of pgmR3PhysChunkUnmapCandidateCallback().
     283 */
     284typedef struct PGMR3PHYSCHUNKUNMAPCB
     285{
     286    PVM                 pVM;            /**< The VM handle. */
     287    PPGMR3CHUNKMAP      pChunk;         /**< The chunk to unmap. */
     288} PGMR3PHYSCHUNKUNMAPCB, *PPGMR3PHYSCHUNKUNMAPCB;
     289
     290
     291/**
     292 * Callback used to find the mapping that's been unused for
     293 * the longest time.
     294 */
     295static DECLCALLBACK(int) pgmR3PhysChunkUnmapCandidateCallback(PAVLLU32NODECORE pNode, void *pvUser)
     296{
     297    do
     298    {
     299        PPGMR3CHUNKMAP pChunk = (PPGMR3CHUNKMAP)((uint8_t *)pNode - RT_OFFSETOF(PGMR3CHUNKMAP, AgeCore));
     300        if (    pChunk->iAge
     301            &&  !pChunk->cRefs)
     302        {
     303            /*
     304             * Check that it's not in any of the TLBs.
     305             */
     306            PVM pVM = ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pVM;
     307            for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.R3ChunkTlb->aEntries); i++)
     308                if (pVM->pgm.s.R3ChunkTlb->aEntries[i].pChunk == pChunk)
     309                {
     310                    pChunk = NULL;
     311                    break;
     312                }
     313            if (pChunk)
     314                for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.CTXSUFF(PhysTlb)->aEntries); i++)
     315                    if (pVM->pgm.s.CTXSUFF(PhysTlb)->aEntries[i].pChunk == pChunk)
     316                    {
     317                        pChunk = NULL;
     318                        break;
     319                    }
     320            if (pChunk)
     321            {
     322                ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pChunk = pChunk;
     323                return 1; /* done */
     324            }
     325        }
     326
     327        /* next with the same age - this version of the AVL API doesn't enumerate the list, so we have to do it. */
     328        pNode = pNode->pList;
     329    } while (pNode);
     330    return 0;
     331}
     332
     333
     334/**
     335 * Finds a good candidate for unmapping when the ring-3 mapping cache is full.
     336 *
     337 * The candidate will not be part of any TLBs, so no need to flush
     338 * anything afterwards.
     339 *
     340 * @returns Chunk id.
     341 * @param   pVM         The VM handle.
     342 */
     343int pgmR3PhysChunkFindUnmapCandidate(PVM pVM)
     344{
     345    /*
     346     * Do tree ageing first?
     347     */
     348    if (pVM->pgm.s.R3ChunkMap.AgeingCountdown-- == 0)
     349        pgmR3PhysChunkAgeing(pVM);
     350
     351    /*
     352     * Enumerate the age tree starting with the left most node.
     353     */
     354    PGMR3PHYSCHUNKUNMAPCB Args;
     355    Args.pVM = pVM;
     356    Args.pChunk = NULL;
     357    if (RTAvlU32DoWithAll(&pVM->pgm.s.R3ChunkMap.pAgeTree, true /*fFromLeft*/, pgmR3PhysChunkUnmapCandidateCallback, pVM))
     358        return Args.pChunk->idChunk;
     359    return INT32_MAX;
     360}
     361
     362
     363/**
     364 * Maps the given chunk into the ring-3 mapping cache.
     365 *
     366 * This will call ring-0.
     367 *
     368 * @returns VBox status code.
     369 * @param   pVM         The VM handle.
     370 * @param   idChunk     The chunk in question.
     371 * @param   ppChunk     Where to store the chunk tracking structure.
     372 *
     373 * @remarks Called from within the PGM critical section.
     374 */
     375int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAPPING ppChunk)
     376{
     377    /*
     378     * Allocate a new tracking structure first.
     379     */
     380#if 0 /* for later when we've got a separate mapping method for ring-0. */
     381    PPGMCHUNKR3MAPPING pChunk = (PPGMCHUNKR3MAPPING)MMR3HeapAlloc(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk));
     382#else
     383    PPGMCHUNKR3MAPPING pChunk = (PPGMCHUNKR3MAPPING)MMHyperAlloc(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk));
     384#endif
     385    AssertReturn(pChunk, VERR_NO_MEMORY);
     386    pChunk->Core.Key = idChunk;
     387    pChunk->pv = NULL;
     388    pChunk->cRefs = 0;
     389    pChunk->iAge = 0;
     390
     391    /*
     392     * Request the ring-0 part to map the chunk in question and if
     393     * necessary unmap another one to make space in the mapping cache.
     394     */
     395    PGMMAPCHUNKREQ Req;
     396    Req.pvR3 = NULL;
     397    Req.idChunkMap = idChunck;
     398    Req.idChunkUnmap = INT32_MAX;
     399    if (pVM->pgm.R3ChunkMap.c >= pVM->pgm.R3ChunkMap.cMax)
     400        Req.idChunkUnmap = pgmR3PhysChunkFindUnmapCandidate(pVM);
     401    /** @todo SUPCallVMMR0Ex needs to support in+out or similar.  */
     402    int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_PGM_MAP_CHUNK, &Req, sizeof(Req));
     403    if (VBOX_SUCCESS(rc))
     404    {
     405        /*
     406         * Update the tree.
     407         */
     408        /* insert the new one. */
     409        AssertPtr(Req.pvR3);
     410        pChunk->pv = Req.pvR3;
     411        bool fRc = RTAvlU32Insert(&pVM->pgm.s.R3ChunkMap.Tree, &pChunk->Core);
     412        AssertRelease(fRc);
     413        pVM->pgm.s.R3ChunkMap.c++;
     414
     415        /* remove the unmapped one. */
     416        if (Req.idChunkUnmap != INT32_MAX)
     417        {
     418            PPGMCHUNKR3MAPPING pUnmappedChunk = (PPGMCHUNKR3MAPPING)RTAvlU32Remove(&pVM->pgm.s.R3ChunkMap.Tree, Req.idChunkUnmap);
     419            AssertRelease(pUnmappedChunk);
     420            pUnmappedChunk->pv = NULL;
     421            pUnmappedChunk->Key = INT32_MAX;
     422#if 0 /* for later when we've got a separate mapping method for ring-0. */
     423            MMR3HeapFree(pUnmappedChunk);
     424#else
     425            MMHyperFree(pVM, pUnmappedChunk);
     426#endif
     427            pVM->pgm.R3ChunkMap.c--;
     428        }
     429    }
     430    else
     431    {
     432        AssertRC(rc);
     433#if 0 /* for later when we've got a separate mapping method for ring-0. */
     434        MMR3HeapFree(pChunk);
     435#else
     436        MMHyperFree(pVM, pChunk);
     437#endif
     438        pChunk = NULL;
     439    }
     440
     441    *ppChunk = pChunk;
     442    return rc;
     443}
     444#endif /* IN_RING3 */
     445
     446
     447/**
     448 * Maps a page into the current virtual address space so it can be accessed.
     449 *
     450 * @returns VBox status code.
     451 * @retval  VINF_SUCCESS on success.
     452 * @retval  VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
     453 *
     454 * @param   pVM         The VM address.
     455 * @param   pPage       The physical page tracking structure.
     456 * @param   GCPhys      The address of the page.
     457 * @param   ppMap       Where to store the address of the mapping tracking structure.
     458 * @param   ppv         Where to store the mapping address of the page. The page
     459 *                      offset is masked off!
     460 *
     461 * @remarks Called from within the PGM critical section.
     462 */
     463int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
     464{
     465#ifdef IN_GC
     466    /*
     467     * Just some sketchy GC code.
     468     */
     469    *ppMap = NULL;
     470    RTHCPHYS HCPhys = pPage->HCPhys & PGM_HCPHYS_PAGE_MASK;
     471    Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg)
     472    return PGMGCDynMapHCPage(pVM, HCPhys, ppv);
     473
     474#else /* IN_RING3 || IN_RING0 */
     475
     476/**
     477 * Calculates the index of a guest page in the Ring-3 Chunk TLB.
     478 * @returns Chunk TLB index.
     479 * @param   idChunk         The Chunk ID.
     480 */
     481#define PGM_R3CHUNKTLB_IDX(idChunk)     ( (idChunk) & (PGM_R3CHUNKTLB_ENTRIES - 1) )
     482
     483    /*
     484     * Find/make Chunk TLB entry for the mapping chunk.
     485     */
     486    PPGMR3CHUNK pChunk;
     487    const uint32_t idChunk = PGM_PAGE_GET_PAGEID(pPage) >> XXX_CHUNKID_SHIFT;
     488    PGMR3CHUNKTLBE pTlbe = &pVM->pgm.s.R3ChunkTlb.aEntries[PGM_R3CHUNKTLB_IDX(idChunk)];
     489    if (pTlbe->idChunk == idChunk)
     490    {
     491        STAM_COUNTER_INC(&pVM->pgm.s.StatR3ChunkTlbHits);
     492        pChunk = pTlbe->pChunk;
     493    }
     494    else
     495    {
     496        STAM_COUNTER_INC(&pVM->pgm.s.StatR3ChunkTlbMisses);
     497
     498        /*
     499         * Find the chunk, map it if necessary.
     500         */
     501        pChunk = (PPGMR3CHUNK)RTAvlU32Get(&pVM->pgm.s.R3ChunkMap.Tree, idChunk);
     502        if (!pChunk)
     503        {
     504#ifdef IN_RING0
     505            int rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_MAP_CHUNK, idChunk);
     506            AssertRCReturn(rc, rc);
     507            pChunk = (PPGMR3CHUNK)RTAvlU32Get(&pVM->pgm.s.R3ChunkMap.Tree, idChunk);
     508            Assert(pChunk);
     509#else
     510            int rc = pgmR3PhysChunkMap(pVM, idChunk, &pChunk);
     511            if (VBOX_FAILURE(rc))
     512                return rc;
     513#endif
     514        }
     515
     516        /*
     517         * Enter it into the Chunk TLB.
     518         */
     519        pTlbe->idChunk = idChunk;
     520        pTlbe->pChunk = pChunk;
     521        pChunk->iAge = 0;
     522    }
     523
     524    *ppv = (uint8_t *)pMap->pv + (iPage << PAGE_SHIFT);
     525    *ppMap = pChunk;
     526    return VINF_SUCCESS;
     527#endif /* IN_RING3 */
     528}
     529
     530
     531/**
     532 * Calculates the index of a guest page in the Physical TLB.
     533 * @returns Physical TLB index.
     534 * @param   GCPhys      The guest physical address.
     535 */
     536#define PGM_R3PHYSTLB_IDX(GCPhys) ( ((GCPhys) >> PAGE_SHIFT) & (PGM_R3PHYSTLB_ENTRIES - 1) )
     537
     538#if defined(IN_RING3) || defined(IN_RING0)
     539# define PGM_PHYSTLB_IDX(GCPhys)   PGM_R3PHYSTLB_IDX(GCPhys)
     540# define PGMPHYSTLBE PGMR3PHYSTLBE
     541#else /* IN_GC */
     542# define PGM_PHYSTLB_IDX(GCPhys)   PGM_GCPHYSTLB_IDX(GCPhys)
     543# define PGMPHYSTLBE PGMGCPHYSTLBE
     544#endif
     545
     546
     547/**
     548 * Load a guest page into the ring-3 physical TLB.
     549 *
     550 * @returns VBox status code.
     551 * @retval  VINF_SUCCESS on success
     552 * @retval  VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
     553 * @param   pPGM        The PGM instance pointer.
     554 * @param   GCPhys      The guest physical address in question.
     555 */
     556int pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys)
     557{
     558    STAM_COUNTER_INC(&pPGM->StatR3PhysTlbMisses);
     559
     560    /*
     561     * Find the ram range.
     562     * 99.8% of requests are expected to be in the first range.
     563     */
     564    PPGMRAMRANGE pRam = CTXSUFF(pPGM->pRamRanges);
     565    RTGCPHYS off = GCPhys - pRam->GCPhys;
     566    if (RT_UNLIKELY(off >= pRam->cb))
     567    {
     568        do
     569        {
     570            pRam = CTXSUFF(pRam->pNext);
     571            if (!pRam)
     572                return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
     573            off = GCPhys - pRam->GCPhys;
     574        } while (off >= pRam->cb);
     575    }
     576
     577    /*
     578     * Map the page.
     579     * Make a special case for the zero page as it is kind of special.
     580     */
     581    PPGMPAGE pPage = &pRam->aPages[off >> PAGE_SHIFT];
     582    PPGMR3PHYSTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PHYSTLB_IDX(GCPhys)];
     583    if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ZERO)
     584    {
     585        void *pv;
     586        PPGMPAGEMAP pMap;
     587        int rc = pgmPhysPageMap(pVM, pPage, GCPhys, &pMap, &pv);
     588        if (VBOX_FAILURE(rc))
     589            return rc;
     590        pTlbe->pMap = pMap;
     591        pTlbe->pv = pv;
     592    }
     593    else
     594    {
     595        Assert(PGM_PAGE_GET_HCPHYS(pPage) == pPGM->HCPhysZeroPg);
     596        pTlbe->pMap = NULL;
     597        pTlbe->pv = pPGM->pvZeroPgR3;
     598    }
     599    pTlbe->pPage = pPage;
     600    return VINF_SUCCESS;
     601}
     602
     603
     604/**
     605 * Queries the Physical TLB entry for a physical guest page,
     606 * attemting to load the TLB entry if necessary.
     607 *
     608 * @returns VBox status code.
     609 * @retval  VINF_SUCCESS on success
     610 * @retval  VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
     611 * @param   pPgm        The PGM instance handle.
     612 * @param   GCPhys      The address of the guest page.
     613 * @param   ppTlbe      Where to store the pointer to the TLB entry.
     614 */
     615DECLINLINE(int) pgmPhysPageQueryTlbe(PPGM pPgm, RTGCPHYS GCPhys, PPPGMPHYSTLBE ppTlbe)
     616{
     617    int rc;
     618    PGMPHYSTLBE pTlbe = &pPgm->CTXSUFF(PhysTlb).aEntries[PGM_PHYSTLB_IDX(GCPhys)];
     619    if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
     620    {
     621        STAM_COUNTER_INC(&pPgm->StatR3PhysTlbHits);
     622        rc = VINF_SUCCESS;
     623    }
     624    else
     625        rc = pgmPhysPageLoadIntoTlb(pVM, GCPhys);
     626    *ppTlbe = pTlbe;
     627    return rc;
     628}
     629
     630
     631#endif /* NEW_PHYS_CODE */
     632
     633
     634/**
     635 * Requests the mapping of a guest page into the current context.
     636 *
     637 * This API should only be used for very short term, as it will consume
     638 * scarse resources (R0 and GC) in the mapping cache. When you're done
     639 * with the page, call PGMPhysGCPhys2CCPtrRelease() ASAP to release it.
     640 *
     641 * @returns VBox status code.
     642 * @retval  VINF_SUCCESS on success.
     643 * @retval  VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
     644 * @retval  VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
     645 *
     646 * @param   pVM         The VM handle.
     647 * @param   GCPhys      The guest physical address of the page that should be mapped.
     648 * @param   ppv         Where to store the address corresponding to GCPhys.
     649 *
     650 * @remark  Avoid calling this API from within critical sections (other than
     651 *          the PGM one) because of the deadlock risk.
     652 */
     653PGMDECL(int) PGMPhysGCPhys2CCPtr(PVM pVM, RTGCPHYS GCPhys, void **ppv)
     654{
     655# ifdef NEW_PHYS_CODE
     656    int rc = pgmLock(pVM);
     657    AssertRCReturn(rc);
     658
     659#ifdef IN_GC
     660    /* Until a physical TLB is implemented for GC, let PGMGCDynMapGCPageEx handle it. */
     661    return PGMGCDynMapGCPageEx(pVM, GCPhys, ppv);
     662
     663#else
     664    /*
     665     * Query the Physical TLB entry for the page (may fail).
     666     */
     667    PGMPHYSTLBE pTlbe;
     668    int rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
     669    if (RT_SUCCESS(rc))
     670    {
     671        /*
     672         * If the page is shared, the zero page, or being write monitored
     673         * it must be converted to an page that's writable if possible.
     674         */
     675        PPGMPAGE pPage = pTlbe->pPage;
     676        if (RT_UNLIKELY(pPage->u2State != PGM_PAGE_STATE_ALLOCATED))
     677            rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
     678        if (RT_SUCCESS(rc))
     679        {
     680            /*
     681             * Now, just perform the locking and calculate the return address.
     682             */
     683            PPGMPAGEMAP pMap = pTlbe->pMap;
     684            pMap->cRefs++;
     685            if (RT_LIKELY(pPage->cLocks != PGM_PAGE_MAX_LOCKS))
     686                if (RT_UNLIKELY(++pPage->cLocks == PGM_PAGE_MAX_LOCKS))
     687                {
     688                    AssertMsgFailed(("%VGp is entering permanent locked state!\n", GCPhys));
     689                    pMap->cRefs++; /* Extra ref to prevent it from going away. */
     690                }
     691       
     692            *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
     693        }
     694    }
     695
     696    pgmUnlock(pVM);
     697    return rc;
     698
     699#endif /* IN_RING3 || IN_RING0 */
     700
     701#else
     702    /*
     703     * Temporary fallback code.
     704     */
     705# ifdef IN_GC
     706    return PGMGCDynMapGCPageEx(pVM, GCPhys, ppv);
     707# else
     708    return PGMPhysGCPhys2HCPtr(pVM, GCPhys, 1, ppv);
     709# endif
     710#endif
     711}
     712
     713
     714/**
     715 * Release the mapping of a guest page.
     716 *
     717 * This is the counterpart to the PGMPhysGCPhys2CCPtr.
     718 *
     719 * @param   pVM         The VM handle.
     720 * @param   GCPhys      The address that was mapped using PGMPhysGCPhys2CCPtr.
     721 * @param   pv          The address that PGMPhysGCPhys2CCPtr returned.
     722 */
     723PGMDECL(void) PGMPhysGCPhys2CCPtrRelease(PVM pVM, RTGCPHYS GCPhys, void *pv)
     724{
     725#ifdef NEW_PHYS_CODE
     726#ifdef IN_GC
     727    /* currently nothing to do here. */
     728/* --- postponed
     729#elif defined(IN_RING0)
     730*/
     731
     732#else   /* IN_RING3 */
     733    pgmLock(pVM);
     734
     735    /*
     736     * Try the Physical TLB cache.
     737     * There's a high likely hood that this will work out since it's a short-term lock.
     738     */
     739    PPGMR3PHYSTLBE pTlbe = &pVM->pgm.s.R3PhysTlb.aEntries[PGM_R3PHYSTLB_IDX(GCPhys)];
     740    if (RT_LIKELY(pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK)))
     741    {
     742        PPGMPAGE pPage = pTlbe->pPage;
     743        Assert(PGM_PAGE_IS_NORMAL(pPage));
     744        Assert(pPage->cLocks >= 1);
     745        if (pPage->cLocks != PGM_PAGE_MAX_LOCKS)
     746            pPage->cLocks--;
     747
     748        PPGMR3CHUNK pChunk = pTlbe->pChunk;
     749        Assert(pChunk->cRefs >= 1);
     750        pChunk->cRefs--;
     751        pChunk->iAge = 0;
     752    }
     753    else
     754    {
     755        /*
     756         * Find the page and unlock it.
     757         */
     758        PPGMRAMRANGE pRam = CTXSUFF(pVM->pgm.s.pRamRanges);
     759        RTGCPHYS off = GCPhys - pRam->GCPhys;
     760        if (RT_UNLIKELY(off >= pRam->cb))
     761        {
     762            do
     763            {
     764                pRam = CTXSUFF(pRam->pNext);
     765                AssertMsgRelease(pRam, ("GCPhys=%RGp\n", GCPhys));
     766                off = GCPhys - pRam->GCPhys;
     767            } while (off >= pRam->cb);
     768        }
     769        PPGMPAGE pPage = &pRam->aPages[off >> PAGE_SHIFT];
     770        Assert(PGM_PAGE_IS_NORMAL(pTlbe->pPage));
     771        Assert(pPage->cLocks >= 1);
     772        if (pPage->cLocks != PGM_PAGE_MAX_LOCKS)
     773            pPage->cLocks--;
     774
     775        /*
     776         * Now find the chunk mapping and unlock it.
     777         */
     778        PPGMR3CHUNK pChunk;
     779        const uint32_t idChunk = PGM_PAGE_GET_PAGEID(pPage) >> XXX_CHUNKID_SHIFT;
     780        PGMR3CHUNKTLBE pTlbe = &pVM->pgm.s.R3ChunkTlb.aEntries[PGM_R3CHUNKTLB_IDX(idChunk)];
     781        if (pTlbe->idChunk == idChunk)
     782            pChunk = pTlbe->pChunk;
     783        else
     784        {
     785            pChunk = (PPGMR3CHUNK)RTAvlU32Get(&pVM->pgm.s.R3ChunkMap.Tree, idChunk);
     786            AssertMsgRelease(pChunk, ("GCPhys=%RGp\n", GCPhys));
     787            pChunk->iAge = 0;
     788        }
     789        Assert(pChunk->cRefs >= 1);
     790        pChunk->cRefs--;
     791    }
     792
     793    pgmUnlock(pVM);
     794#endif /* IN_RING3 */
     795#else
     796    NOREF(pVM);
     797    NOREF(GCPhys);
     798    NOREF(pv);
     799#endif
    160800}
    161801
注意: 瀏覽 TracChangeset 來幫助您使用更動檢視器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette