VirtualBox

儲存庫 vbox 的更動 18230


忽略:
時間撮記:
2009-3-25 上午01:13:09 (16 年 以前)
作者:
vboxsync
訊息:

PGM,IOM: Implemented MMIO2 aliases for MMIO pages. This involved some forgotten changes to the MMIO implementation too - changing the page type to MMIO, freeing the backing RAM page.

位置:
trunk
檔案:
修改 6 筆資料

圖例:

未更動
新增
刪除
  • trunk/include/VBox/err.h

    r17775 r18230  
    433433/** No CR3 root shadow page table.. */
    434434#define VERR_PGM_NO_CR3_SHADOW_ROOT             (-1636)
     435/** Trying to free a page with an invalid Page ID. */
     436#define VERR_PGM_PHYS_INVALID_PAGE_ID           (-1637)
     437/** PGMPhysWrite/Read hit a handler in Ring-0 or raw-mode context. */
     438#define VERR_PGM_PHYS_WR_HIT_HANDLER            (-1638)
    435439/** Trying to free a page that isn't RAM. */
    436 #define VERR_PGM_PHYS_NOT_RAM                   (-1637)
    437 /** Trying to free a page with an invalid Page ID. */
    438 #define VERR_PGM_PHYS_INVALID_PAGE_ID           (-1638)
    439 /** PGMPhysWrite/Read hit a handler in Ring-0 or raw-mode context. */
    440 #define VERR_PGM_PHYS_WR_HIT_HANDLER            (-1639)
     440#define VERR_PGM_PHYS_NOT_RAM                   (-1639)
     441/** Not ROM page. */
     442#define VERR_PGM_PHYS_NOT_ROM                   (-1640)
     443/** Not MMIO page. */
     444#define VERR_PGM_PHYS_NOT_MMIO                  (-1641)
     445/** Not MMIO2 page. */
     446#define VERR_PGM_PHYS_NOT_MMIO2                 (-1642)
     447/** Already aliased to a different page. */
     448#define VERR_PGM_HANDLER_ALREADY_ALIASED        (-1643)
     449/** Already aliased to the same page. */
     450#define VINF_PGM_HANDLER_ALREADY_ALIASED        (1643)
    441451/** @} */
    442452
  • trunk/src/VBox/VMM/PGMInternal.h

    r18143 r18230  
    28672867void            pgmR3HandlerPhysicalUpdateAll(PVM pVM);
    28682868bool            pgmHandlerPhysicalIsAll(PVM pVM, RTGCPHYS GCPhys);
     2869void            pgmHandlerPhysicalResetAliasedPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage);
    28692870int             pgmHandlerVirtualFindByPhysAddr(PVM pVM, RTGCPHYS GCPhys, PPGMVIRTHANDLER *ppVirt, unsigned *piPage);
    28702871DECLCALLBACK(int) pgmHandlerVirtualResetOne(PAVLROGCPTRNODECORE pNode, void *pvUser);
  • trunk/src/VBox/VMM/PGMPhys.cpp

    r18205 r18230  
    660660
    661661
     662#ifdef VBOX_WITH_NEW_PHYS_CODE
     663/**
     664 * Frees a range of pages, replacing them with ZERO pages of the specified type.
     665 *
     666 * @returns VBox status code.
     667 * @param   pVM         The VM handle.
     668 * @param   pRam        The RAM range in which the pages resides.
     669 * @param   GCPhys      The address of the first page.
     670 * @param   GCPhysLast  The address of the last page.
     671 * @param   uType       The page type to replace then with.
     672 */
     673static int pgmR3PhysFreePageRange(PVM pVM, PPGMRAMRANGE pRam, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, uint8_t uType)
     674{
     675    uint32_t            cPendingPages = 0;
     676    PGMMFREEPAGESREQ    pReq;
     677    int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
     678    AssertLogRelRCReturn(rc, rc);
     679
     680    /* Itegerate the pages. */
     681    PPGMPAGE pPageDst   = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
     682    uint32_t cPagesLeft = ((GCPhysLast - GCPhys) >> PAGE_SHIFT) + 1;
     683    while (cPagesLeft-- > 0)
     684    {
     685        rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPageDst, GCPhys);
     686        AssertLogRelRCReturn(rc, rc); /* We're done for if this goes wrong. */
     687
     688        PGM_PAGE_SET_TYPE(pPageDst, uType);
     689
     690        GCPhys += PAGE_SIZE;
     691        pPageDst++;
     692    }
     693
     694    if (cPendingPages)
     695    {
     696        rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
     697        AssertLogRelRCReturn(rc, rc);
     698    }
     699    GMMR3FreePagesCleanup(pReq);
     700
     701    return rc;
     702}
     703#endif /* VBOX_WITH_NEW_PHYS_CODE */
     704
     705
    662706/**
    663707 * Sets up a range RAM.
     
    809853                            AssertLogRelRCReturn(rc, rc);
    810854                        }
     855                        break;
     856
     857                    case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
     858                        pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
    811859                        break;
    812860
     
    871919#endif /* VBOX_WITH_NEW_PHYS_CODE */
    872920
     921                    case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
     922                        pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
     923                        break;
     924
    873925                    case PGMPAGETYPE_MMIO2:
    874926                    case PGMPAGETYPE_ROM_SHADOW:
     
    9821034    PPGMRAMRANGE pNew;
    9831035    if (fRamExists)
     1036    {
    9841037        pNew = NULL;
     1038#ifdef VBOX_WITH_NEW_PHYS_CODE
     1039        /*
     1040         * Make all the pages in the range MMIO/ZERO pages, freeing any
     1041         * RAM pages currently mapped here. This might not be 100% correct
     1042         * for PCI memory, but we're doing the same thing for MMIO2 pages.
     1043         */
     1044        rc = pgmLock(pVM);
     1045        if (RT_SUCCESS(rc))
     1046        {
     1047            rc = pgmR3PhysFreePageRange(pVM, pRam, GCPhys, GCPhysLast, PGMPAGETYPE_MMIO);
     1048            pgmUnlock(pVM);
     1049        }
     1050        AssertRCReturn(rc, rc);
     1051#endif
     1052    }
    9851053    else
    9861054    {
     
    10671135    if (RT_SUCCESS(rc))
    10681136    {
    1069         RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
    1070         PPGMRAMRANGE pRamPrev = NULL;
    1071         PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
     1137        RTGCPHYS        GCPhysLast = GCPhys + (cb - 1);
     1138        PPGMRAMRANGE    pRamPrev    = NULL;
     1139        PPGMRAMRANGE    pRam        = pVM->pgm.s.pRamRangesR3;
    10721140        while (pRam && GCPhysLast >= pRam->GCPhys)
    10731141        {
    1074             /*if (    GCPhysLast >= pRam->GCPhys
    1075                 &&  GCPhys     <= pRam->GCPhysLast) - later */
     1142            /** @todo We're being a bit too careful here. rewrite. */
    10761143            if (    GCPhysLast == pRam->GCPhysLast
    10771144                &&  GCPhys     == pRam->GCPhys)
     
    10821149                 * See if all the pages are dead MMIO pages.
    10831150                 */
     1151                uint32_t const  cPages   = cb >> PAGE_SHIFT;
    10841152                bool            fAllMMIO = true;
    1085                 PPGMPAGE        pPage    = &pRam->aPages[0];
    1086                 uint32_t const  cPages   = cb >> PAGE_SHIFT;
     1153                uint32_t        iPage    = 0;
    10871154                uint32_t        cLeft    = cPages;
    10881155                while (cLeft-- > 0)
    10891156                {
     1157                    PPGMPAGE    pPage    = &pRam->aPages[iPage];
    10901158                    if (    PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO
    10911159                        /*|| not-out-of-action later */)
    10921160                    {
    10931161                        fAllMMIO = false;
     1162#ifdef VBOX_WITH_NEW_PHYS_CODE
    10941163                        Assert(PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO2_ALIAS_MMIO);
     1164                        AssertMsgFailed(("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage));
     1165#endif
    10951166                        break;
    10961167                    }
     
    10981169                    pPage++;
    10991170                }
    1100 
    1101                 /*
    1102                  * Unlink it and free if it's all MMIO.
    1103                  */
    11041171                if (fAllMMIO)
    11051172                {
     1173                    /*
     1174                     * Ad-hoc range, unlink and free it.
     1175                     */
    11061176                    Log(("PGMR3PhysMMIODeregister: Freeing ad-hoc MMIO range for %RGp-%RGp %s\n",
    11071177                         GCPhys, GCPhysLast, pRam->pszDesc));
     
    11131183                    pRam->cb = pRam->GCPhys = pRam->GCPhysLast = NIL_RTGCPHYS;
    11141184                    MMHyperFree(pVM, pRam);
     1185                    break;
     1186                }
     1187            }
     1188
     1189#ifdef VBOX_WITH_NEW_PHYS_CODE
     1190            /*
     1191             * Range match? It will all be within one range (see PGMAllHandler.cpp).
     1192             */
     1193            if (    GCPhysLast >= pRam->GCPhys
     1194                &&  GCPhys     <= pRam->GCPhysLast)
     1195            {
     1196                Assert(GCPhys     >= pRam->GCPhys);
     1197                Assert(GCPhysLast <= pRam->GCPhysLast);
     1198
     1199                /*
     1200                 * Turn the pages back into RAM pages.
     1201                 */
     1202                uint32_t iPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
     1203                uint32_t cLeft = cb >> PAGE_SHIFT;
     1204                while (cLeft--)
     1205                {
     1206                    PPGMPAGE pPage = &pRam->aPages[iPage];
     1207                    AssertMsg(PGM_PAGE_IS_MMIO(pPage), ("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage));
     1208                    AssertMsg(PGM_PAGE_IS_ZERO(pPage), ("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage));
     1209                    if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO)
     1210                        PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_RAM);
    11151211                }
    11161212                break;
    11171213            }
     1214#endif
    11181215
    11191216            /* next */
     
    14521549    if (fRamExists)
    14531550    {
     1551/** @todo use pgmR3PhysFreePageRange here. */
    14541552        uint32_t            cPendingPages = 0;
    14551553        PGMMFREEPAGESREQ    pReq;
     
    30883186                    for (uint32_t iPage = 0; iPage < cPages; iPage++)
    30893187                        if (PGM_PAGE_GET_PAGEID(&pRam->aPages[iPage]) == idPage)
    3090                             LogRel(("PGM: Used by %RGp %R{pgmpage} (%s)\n",
     3188                            LogRel(("PGM: Used by %RGp %R[pgmpage] (%s)\n",
    30913189                                    pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pRam->aPages[iPage], pRam->pszDesc));
    30923190                }
  • trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp

    r17533 r18230  
    17641764#ifndef IN_RC
    17651765/**
    1766  * Modify an existing MMIO region page; map to another guest physical region and change the access flags
     1766 * Mapping an MMIO2 page in place of an MMIO page for direct access.
     1767 *
     1768 * (This is a special optimization used by the VGA device.)
    17671769 *
    17681770 * @returns VBox status code.
    17691771 *
    17701772 * @param   pVM             The virtual machine.
    1771  * @param   GCPhys          Physical address that's part of the MMIO region to be changed.
    1772  * @param   GCPhysRemapped  Remapped address.
    1773  * @param   fPageFlags      Page flags to set (typically X86_PTE_RW).
     1773 * @param   GCPhys          The address of the MMIO page to be changed.
     1774 * @param   GCPhysRemapped  The address of the MMIO2 page.
     1775 * @param   fPageFlags      Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
     1776 *                          for the time being.
    17741777 */
    17751778VMMDECL(int) IOMMMIOModifyPage(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags)
    17761779{
    1777     Assert(fPageFlags == (X86_PTE_RW | X86_PTE_P));
    1778 
    17791780    Log(("IOMMMIOModifyPage %RGp -> %RGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags));
     1781
     1782    AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
    17801783
    17811784    /* This currently only works in real mode, protected mode without paging or with nested paging. */
     
    17861789
    17871790    /*
    1788      * Lookup the current context range node and statistics.
     1791     * Lookup the context range node the page belongs to.
    17891792     */
    17901793    PIOMMMIORANGE pRange = iomMMIOGetRange(&pVM->iom.s, GCPhys);
    17911794    AssertMsgReturn(pRange,
    17921795                    ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys),
    1793                     VERR_INTERNAL_ERROR);
    1794 
    1795     GCPhys         &= ~(RTGCPHYS)0xfff;
    1796     GCPhysRemapped &= ~(RTGCPHYS)0xfff;
     1796                    VERR_IOM_MMIO_RANGE_NOT_FOUND);
     1797    Assert((pRange->GCPhys       & PAGE_OFFSET_MASK) == 0);
     1798    Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
     1799
     1800    /*
     1801     * Do the aliasing; page align the addresses since PGM is picky.
     1802     */
     1803    GCPhys         &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
     1804    GCPhysRemapped &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
    17971805
    17981806    int rc = PGMHandlerPhysicalPageAlias(pVM, pRange->GCPhys, GCPhys, GCPhysRemapped);
    17991807    AssertRCReturn(rc, rc);
    18001808
    1801 #ifdef VBOX_STRICT
     1809    /*
     1810     * Modify the shadow page table. Since it's an MMIO page it won't be present and we
     1811     * can simply prefetch it.
     1812     *
     1813     * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
     1814     */
     1815#ifndef VBOX_WITH_NEW_PHYS_CODE /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */
     1816# ifdef VBOX_STRICT
    18021817    uint64_t fFlags;
    18031818    RTHCPHYS HCPhys;
    18041819    rc = PGMShwGetPage(pVM, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
    18051820    Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
    1806 #endif
    1807 
    1808     /* @note this is a NOP in the EPT case; we'll just let it fault again to resync the page. */
     1821# endif
     1822#endif
    18091823    rc = PGMPrefetchPage(pVM, (RTGCPTR)GCPhys);
    18101824    Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
     
    18211835 * @param   GCPhys          Physical address that's part of the MMIO region to be reset.
    18221836 */
    1823 VMMDECL(int)  IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys)
     1837VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys)
    18241838{
    18251839    Log(("IOMMMIOResetRegion %RGp\n", GCPhys));
     
    18321846
    18331847    /*
    1834      * Lookup the current context range node and statistics.
     1848     * Lookup the context range node the page belongs to.
    18351849     */
    18361850    PIOMMMIORANGE pRange = iomMMIOGetRange(&pVM->iom.s, GCPhys);
    18371851    AssertMsgReturn(pRange,
    18381852                    ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys),
    1839                     VERR_INTERNAL_ERROR);
    1840 
    1841     /* Reset the entire range by clearing all shadow page table entries. */
     1853                    VERR_IOM_MMIO_RANGE_NOT_FOUND);
     1854
     1855    /*
     1856     * Call PGM to do the job work.
     1857     *
     1858     * After the call, all the pages should be non-present... unless there is
     1859     * a page pool flush pending (unlikely).
     1860     */
    18421861    int rc = PGMHandlerPhysicalReset(pVM, pRange->GCPhys);
    18431862    AssertRC(rc);
    18441863
    18451864#ifdef VBOX_STRICT
    1846     uint32_t cb = pRange->cb;
    1847 
    1848     GCPhys = pRange->GCPhys;
    1849 
    1850     while (cb)
    1851     {
    1852 
    1853         uint64_t fFlags;
    1854         RTHCPHYS HCPhys;
    1855         rc = PGMShwGetPage(pVM, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
    1856         Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
    1857         cb     -= PAGE_SIZE;
    1858         GCPhys += PAGE_SIZE;
    1859     }
    1860 #endif
    1861     return VINF_SUCCESS;
     1865    if (!VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3))
     1866    {
     1867        uint32_t cb = pRange->cb;
     1868        GCPhys = pRange->GCPhys;
     1869        while (cb)
     1870        {
     1871            uint64_t fFlags;
     1872            RTHCPHYS HCPhys;
     1873            rc = PGMShwGetPage(pVM, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
     1874            Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
     1875            cb     -= PAGE_SIZE;
     1876            GCPhys += PAGE_SIZE;
     1877        }
     1878    }
     1879#endif
     1880    return rc;
    18621881}
    18631882#endif /* !IN_RC */
  • trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp

    r17509 r18230  
    216216     */
    217217    bool            fFlushTLBs = false;
    218     int             rc = VINF_SUCCESS;
    219     const unsigned  uState = pgmHandlerPhysicalCalcState(pCur);
    220     RTUINT          cPages = pCur->cPages;
    221     RTUINT          i = (pCur->Core.Key - pRam->GCPhys) >> PAGE_SHIFT;
     218    int             rc         = VINF_SUCCESS;
     219    const unsigned  uState     = pgmHandlerPhysicalCalcState(pCur);
     220    uint32_t        cPages    = pCur->cPages;
     221    uint32_t        i          = (pCur->Core.Key - pRam->GCPhys) >> PAGE_SHIFT;
    222222    for (;;)
    223223    {
     
    235235                return rc2;
    236236        }
     237
    237238#endif /* !VBOX_WITH_NEW_PHYS_CODE */
     239        PPGMPAGE pPage = &pRam->aPages[i];
     240#ifdef VBOX_WITH_NEW_PHYS_CODE
     241        AssertMsg(pCur->enmType != PGMPHYSHANDLERTYPE_MMIO || PGM_PAGE_IS_MMIO(pPage),
     242                  ("%RGp %R[pgmpage]\n", pRam->GCPhys + (i << PAGE_SHIFT), pPage));
     243#endif
    238244
    239245        /* Only do upgrades. */
    240         PPGMPAGE pPage = &pRam->aPages[i];
    241246        if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
    242247        {
    243248            PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState);
     249#ifndef VBOX_WITH_NEW_PHYS_CODE
    244250            Assert(PGM_PAGE_GET_HCPHYS(pPage));
     251#endif
    245252
    246253            int rc2 = pgmPoolTrackFlushGCPhys(pVM, pPage, &fFlushTLBs);
     
    415422
    416423
     424#ifdef VBOX_WITH_NEW_PHYS_CODE
     425/**
     426 * Resets an aliased page.
     427 *
     428 * @param   pVM         The VM.
     429 * @param   pPage       The page.
     430 * @param   GCPhysPage  The page address in case it comes in handy.
     431 */
     432void pgmHandlerPhysicalResetAliasedPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage)
     433{
     434    Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO);
     435    Assert(PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
     436
     437    /*
     438     * Flush any shadow page table references *first*.
     439     */
     440    bool fFlushTLBs = false;
     441    int rc = pgmPoolTrackFlushGCPhys(pVM, pPage, &fFlushTLBs);
     442    AssertLogRelRCReturnVoid(rc);
     443    if (rc == VINF_PGM_GCPHYS_ALIASED)
     444    {
     445        pVM->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
     446        VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
     447        rc = VINF_PGM_SYNC_CR3;
     448    }
     449# ifdef IN_RC
     450    else if (fFlushTLBs)
     451        PGM_INVL_GUEST_TLBS();
     452# else
     453    HWACCMFlushTLB(pVM);
     454# endif
     455    pVM->pgm.s.fPhysCacheFlushPending = true;
     456
     457    /*
     458     * Make it an MMIO/Zero page.
     459     */
     460    PGM_PAGE_SET_HCPHYS(pPage, pVM->pgm.s.HCPhysZeroPg);
     461    PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_MMIO);
     462    PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ZERO);
     463    PGM_PAGE_SET_PAGEID(pPage, NIL_GMM_PAGEID);
     464    PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_ALL);
     465
     466    NOREF(GCPhysPage);
     467}
     468#endif
     469
     470
    417471/**
    418472 * Resets ram range flags.
     
    441495        int rc = pgmPhysGetPageWithHintEx(pPGM, GCPhys, &pPage, &pRamHint);
    442496        if (RT_SUCCESS(rc))
     497        {
     498#ifdef VBOX_WITH_NEW_PHYS_CODE
     499            /* Reset MMIO2 for MMIO pages to MMIO, since this aliasing is our business.
     500               (We don't flip MMIO to RAM though, that's PGMPhys.cpp's job.)  */
     501            if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
     502                pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhys);
     503            AssertMsg(pCur->enmType != PGMPHYSHANDLERTYPE_MMIO || PGM_PAGE_IS_MMIO(pPage), ("%RGp %R[pgmpage]\n", GCPhys, pPage));
     504#endif
    443505            PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_NONE);
     506        }
    444507        else
    445508            AssertRC(rc);
     
    767830 * page access handler region.
    768831 *
    769  * This is used in pair with PGMHandlerPhysicalPageTempOff().
     832 * This is used in pair with PGMHandlerPhysicalPageTempOff() or
     833 * PGMHandlerPhysicalPageAlias().
    770834 *
    771835 * @returns VBox status code.
    772836 * @param   pVM         VM Handle
    773  * @param   GCPhys      Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
     837 * @param   GCPhys      The start address of the handler regions, i.e. what you
     838 *                      passed to PGMR3HandlerPhysicalRegister(),
     839 *                      PGMHandlerPhysicalRegisterEx() or
     840 *                      PGMHandlerPhysicalModify().
    774841 */
    775842VMMDECL(int)  PGMHandlerPhysicalReset(PVM pVM, RTGCPHYS GCPhys)
     
    791858            case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
    792859            case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
    793             case PGMPHYSHANDLERTYPE_MMIO: /* @note Only use when clearing aliased mmio ranges! */
     860            case PGMPHYSHANDLERTYPE_MMIO: /* NOTE: Only use when clearing MMIO ranges with aliased MMIO2 pages! */
    794861            {
    795                 /*
    796                  * Set the flags and flush shadow PT entries.
    797                  */
    798                 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysHandlerReset));
     862                STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysHandlerReset)); /**@Todo move out of switch */
    799863                PPGMRAMRANGE pRam = pgmPhysGetRange(&pVM->pgm.s, GCPhys);
    800864                Assert(pRam);
    801                 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam);
    802                 if (rc == VINF_PGM_GCPHYS_ALIASED)
     865                Assert(pRam->GCPhys     <= pCur->Core.Key);
     866                Assert(pRam->GCPhysLast >= pCur->Core.KeyLast);
     867
     868#ifdef VBOX_WITH_NEW_PHYS_CODE
     869                if (pCur->enmType == PGMPHYSHANDLERTYPE_MMIO)
    803870                {
    804                     pVM->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
    805                     VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
     871                    /*
     872                     * Reset all the PGMPAGETYPE_MMIO2_ALIAS_MMIO pages first and that's it.
     873                     * This could probably be optimized a bit wrt to flushing, but I'm too lazy
     874                     * to do that now...
     875                     */
     876                    PPGMPAGE    pPage = &pRam->aPages[(pCur->Core.Key - pRam->GCPhys) >> PAGE_SHIFT];
     877                    uint32_t    cLeft = pCur->cPages;
     878                    while (cLeft-- > 0)
     879                    {
     880                        if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
     881                            pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)(uintptr_t)(pPage - &pRam->aPages[0]) << PAGE_SHIFT));
     882                        Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO);
     883                        pPage++;
     884                    }
    806885                }
    807                 pVM->pgm.s.fPhysCacheFlushPending = true;
    808                 HWACCMFlushTLB(pVM);
     886                else
     887#endif
     888                {
     889                    /*
     890                     * Set the flags and flush shadow PT entries.
     891                     */
     892                    rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam);
     893                    if (rc == VINF_PGM_GCPHYS_ALIASED)
     894                    {
     895                        pVM->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
     896                        VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
     897                    }
     898                    pVM->pgm.s.fPhysCacheFlushPending = true;
     899                    HWACCMFlushTLB(pVM);
     900                }
    809901
    810902                rc = VINF_SUCCESS;
     
    843935 *
    844936 * @returns VBox status code.
    845  * @param   pVM         VM Handle
    846  * @param   GCPhys      Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
    847  *                      This must be a fully page aligned range or we risk messing up other
    848  *                      handlers installed for the start and end pages.
    849  * @param   GCPhysPage  Physical address of the page to turn off access monitoring for.
     937 * @param   pVM                 VM Handle
     938 * @param   GCPhys              The start address of the access handler. This
     939 *                              must be a fully page aligned range or we risk
     940 *                              messing up other handlers installed for the
     941 *                              start and end pages.
     942 * @param   GCPhysPage          The physical address of the page to turn off
     943 *                              access monitoring for.
    850944 */
    851945VMMDECL(int)  PGMHandlerPhysicalPageTempOff(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage)
     
    891985
    892986/**
    893  * Temporarily turns off the access monitoring of a page within an MMIO
    894  * access handler region and remaps it to another guest physical region.
    895  *
    896  * Use this when no further \#PFs are required for that page. Be aware that
    897  * a page directory sync might reset the flags, and turn on access monitoring
    898  * for the page.
    899  *
    900  * The caller must do required page table modifications.
     987 * Replaces an MMIO page with an MMIO2 page.
     988 *
     989 * This is a worker for IOMMMIOModifyPage that works in a similar way to
     990 * PGMHandlerPhysicalPageTempOff but for an MMIO page. Since an MMIO page has no
     991 * backing, the caller must provide a replacement page. For various reasons the
     992 * replacement page must be an MMIO2 page.
     993 *
     994 * The caller must do required page table modifications. You can get away
     995 * without making any modifations since it's an MMIO page, the cost is an extra
     996 * \#PF which will the resync the page.
     997 *
     998 * Call PGMHandlerPhysicalReset() to restore the MMIO page.
     999 *
     1000 * The caller may still get handler callback even after this call and must be
     1001 * able to deal correctly with such calls. The reason for these callbacks are
     1002 * either that we're executing in the recompiler (which doesn't know about this
     1003 * arrangement) or that we've been restored from saved state (where we won't
     1004 * save the change).
    9011005 *
    9021006 * @returns VBox status code.
    903  * @param   pVM                 VM Handle
    904  * @param   GCPhys              Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
    905  *                              This must be a fully page aligned range or we risk messing up other
    906  *                              handlers installed for the start and end pages.
    907  * @param   GCPhysPage          Physical address of the page to turn off access monitoring for.
    908  * @param   GCPhysPageRemap     Physical address of the page that serves as backing memory.
     1007 * @param   pVM                 The VM handle
     1008 * @param   GCPhys              The start address of the access handler. This
     1009 *                              must be a fully page aligned range or we risk
     1010 *                              messing up other handlers installed for the
     1011 *                              start and end pages.
     1012 * @param   GCPhysPage          The physical address of the page to turn off
     1013 *                              access monitoring for.
     1014 * @param   GCPhysPageRemap     The physical address of the MMIO2 page that
     1015 *                              serves as backing memory.
     1016 *
     1017 * @remark  May cause a page pool flush if used on a page that is already
     1018 *          aliased.
     1019 *
     1020 * @note    This trick does only work reliably if the two pages are never ever
     1021 *          mapped in the same page table. If they are the page pool code will
     1022 *          be confused should either of them be flushed. See the special case
     1023 *          of zero page aliasing mentioned in #3170.
     1024 *
    9091025 */
    9101026VMMDECL(int)  PGMHandlerPhysicalPageAlias(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage, RTGCPHYS GCPhysPageRemap)
    9111027{
    9121028    /*
    913      * Validate the range.
     1029     * Lookup and validate the range.
    9141030     */
    9151031    PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
     
    9191035                      &&  GCPhysPage <= pCur->Core.KeyLast))
    9201036        {
    921             Assert(!(pCur->Core.Key & PAGE_OFFSET_MASK));
    922             Assert((pCur->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
    923 
    9241037            AssertReturn(pCur->enmType == PGMPHYSHANDLERTYPE_MMIO, VERR_ACCESS_DENIED);
    925             /** @todo r=bird: This totally breaks the new PGMPAGE management. Will probably
    926              *        have to require that the current page is the zero page... Require
    927              *        GCPhysPageRemap to be a MMIO2 page might help matters because those
    928              *        pages aren't managed dynamically (at least not yet).
    929              *        VBOX_WITH_NEW_PHYS_CODE TODO!
    930              *
    931              * A solution to this would be to temporarily change the page into a MMIO2 one
    932              * and record that we've changed it. Only the physical page address would
    933              * need to be copied over. The aliased page would have to be MMIO2 ofc, since
    934              * RAM or ROM pages would require write sharing which is something we don't
    935              * intend to implement just yet...
     1038            AssertReturn(!(pCur->Core.Key & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
     1039            AssertReturn((pCur->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK, VERR_INVALID_PARAMETER);
     1040
     1041            /*
     1042             * Get and validate the two pages.
    9361043             */
    937 
    938             /*
    939              * Note! This trick does only work reliably if the two pages are never ever
    940              *       mapped in the same page table. If they are the page pool code will
    941              *       be confused should either of them be flushed. See the special case
    942              *       of zero page aliasing mentioned in #3170.
    943              */
    944 
    9451044            PPGMPAGE pPageRemap;
    9461045            int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysPageRemap, &pPageRemap);
    9471046            AssertRCReturn(rc, rc);
    948 
    949             /*
    950              * Change the page status.
    951              */
     1047#ifdef VBOX_WITH_NEW_PHYS_CODE
     1048            AssertMsgReturn(PGM_PAGE_GET_TYPE(pPageRemap) == PGMPAGETYPE_MMIO2,
     1049                            ("GCPhysPageRemap=%RGp %R[pgmpage]\n", GCPhysPageRemap, pPageRemap),
     1050                            VERR_PGM_PHYS_NOT_MMIO2);
     1051#endif
     1052
    9521053            PPGMPAGE pPage;
    9531054            rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysPage, &pPage);
    9541055            AssertRCReturn(rc, rc);
    955 
    956             /* Do the actual remapping here. This page now serves as an alias for the backing memory specified. */
    9571056#ifdef VBOX_WITH_NEW_PHYS_CODE
    958             AssertReleaseFailed(); /** @todo see todo above! */
     1057            if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO)
     1058            {
     1059                AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO,
     1060                                ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage),
     1061                                VERR_PGM_PHYS_NOT_MMIO2);
     1062                if (PGM_PAGE_GET_HCPHYS(pPage) == PGM_PAGE_GET_HCPHYS(pPage))
     1063                    return VINF_PGM_HANDLER_ALREADY_ALIASED;
     1064
     1065                /*
     1066                 * The page is already mapped as some other page, reset it
     1067                 * to an MMIO/ZERO page before doing the new mapping.
     1068                 */
     1069                Log(("PGMHandlerPhysicalPageAlias: GCPhysPage=%RGp (%R[pgmpage]; %RHp -> %RHp\n",
     1070                     GCPhysPage, pPage, PGM_PAGE_GET_HCPHYS(pPage), PGM_PAGE_GET_HCPHYS(pPageRemap)));
     1071                pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhysPage);
     1072            }
     1073            Assert(PGM_PAGE_IS_ZERO(pPage));
     1074#endif
     1075
     1076            /*
     1077             * Do the actual remapping here.
     1078             * This page now serves as an alias for the backing memory specified.
     1079             */
     1080            LogFlow(("PGMHandlerPhysicalPageAlias: %RGp (%R[pgmpage]) alias for %RGp (%R[pgmpage])\n",
     1081                     GCPhysPage, pPage, GCPhysPageRemap, pPageRemap ));
     1082#ifdef VBOX_WITH_NEW_PHYS_CODE
     1083            PGM_PAGE_SET_HCPHYS(pPage, PGM_PAGE_GET_HCPHYS(pPageRemap));
     1084            PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO);
     1085            PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
     1086            PGM_PAGE_SET_PAGEID(pPage, PGM_PAGE_GET_PAGEID(pPageRemap));
     1087            PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
     1088            LogFlow(("PGMHandlerPhysicalPageAlias: => %R[pgmpage]\n", pPage));
    9591089#else
    9601090            pPage->HCPhys = pPageRemap->HCPhys;
    9611091            PGM_PAGE_SET_TRACKING(pPage, 0);
    962 #endif
    963 
    964             LogFlow(("PGMHandlerPhysicalPageAlias %RGp alias for %RGp (%R[pgmpage]) -> %R[pgmpage]\n",
    965                      GCPhysPage, GCPhysPageRemap, pPageRemap, pPage));
    9661092            PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
     1093#endif
     1094
    9671095#ifndef IN_RC
    9681096            HWACCMInvalidatePhysPage(pVM, GCPhysPage);
     
    9811109
    9821110
     1111#if 0/**@todo delete this. */
    9831112/**
    9841113 * Turns access monitoring of a page within a monitored
     
    10351164    return VERR_PGM_HANDLER_NOT_FOUND;
    10361165}
     1166#endif
    10371167
    10381168
  • trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp

    r18207 r18230  
    668668             * them, that would also avoid this mess. It would actually be kind of
    669669             * elegant... */
    670             AssertFailedReturn(VERR_INTERNAL_ERROR);
     670            AssertLogRelMsgFailedReturn(("%RGp\n", GCPhys), VERR_INTERNAL_ERROR);
    671671        }
    672672        else
注意: 瀏覽 TracChangeset 來幫助您使用更動檢視器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette