儲存庫 vbox 的更動 18230
- 時間撮記:
- 2009-3-25 上午01:13:09 (16 年 以前)
- 位置:
- trunk
- 檔案:
-
- 修改 6 筆資料
圖例:
- 未更動
- 新增
- 刪除
-
trunk/include/VBox/err.h
r17775 r18230 433 433 /** No CR3 root shadow page table.. */ 434 434 #define VERR_PGM_NO_CR3_SHADOW_ROOT (-1636) 435 /** Trying to free a page with an invalid Page ID. */ 436 #define VERR_PGM_PHYS_INVALID_PAGE_ID (-1637) 437 /** PGMPhysWrite/Read hit a handler in Ring-0 or raw-mode context. */ 438 #define VERR_PGM_PHYS_WR_HIT_HANDLER (-1638) 435 439 /** Trying to free a page that isn't RAM. */ 436 #define VERR_PGM_PHYS_NOT_RAM (-1637) 437 /** Trying to free a page with an invalid Page ID. */ 438 #define VERR_PGM_PHYS_INVALID_PAGE_ID (-1638) 439 /** PGMPhysWrite/Read hit a handler in Ring-0 or raw-mode context. */ 440 #define VERR_PGM_PHYS_WR_HIT_HANDLER (-1639) 440 #define VERR_PGM_PHYS_NOT_RAM (-1639) 441 /** Not ROM page. */ 442 #define VERR_PGM_PHYS_NOT_ROM (-1640) 443 /** Not MMIO page. */ 444 #define VERR_PGM_PHYS_NOT_MMIO (-1641) 445 /** Not MMIO2 page. */ 446 #define VERR_PGM_PHYS_NOT_MMIO2 (-1642) 447 /** Already aliased to a different page. */ 448 #define VERR_PGM_HANDLER_ALREADY_ALIASED (-1643) 449 /** Already aliased to the same page. */ 450 #define VINF_PGM_HANDLER_ALREADY_ALIASED (1643) 441 451 /** @} */ 442 452 -
trunk/src/VBox/VMM/PGMInternal.h
r18143 r18230 2867 2867 void pgmR3HandlerPhysicalUpdateAll(PVM pVM); 2868 2868 bool pgmHandlerPhysicalIsAll(PVM pVM, RTGCPHYS GCPhys); 2869 void pgmHandlerPhysicalResetAliasedPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage); 2869 2870 int pgmHandlerVirtualFindByPhysAddr(PVM pVM, RTGCPHYS GCPhys, PPGMVIRTHANDLER *ppVirt, unsigned *piPage); 2870 2871 DECLCALLBACK(int) pgmHandlerVirtualResetOne(PAVLROGCPTRNODECORE pNode, void *pvUser); -
trunk/src/VBox/VMM/PGMPhys.cpp
r18205 r18230 660 660 661 661 662 #ifdef VBOX_WITH_NEW_PHYS_CODE 663 /** 664 * Frees a range of pages, replacing them with ZERO pages of the specified type. 665 * 666 * @returns VBox status code. 667 * @param pVM The VM handle. 668 * @param pRam The RAM range in which the pages resides. 669 * @param GCPhys The address of the first page. 670 * @param GCPhysLast The address of the last page. 671 * @param uType The page type to replace then with. 672 */ 673 static int pgmR3PhysFreePageRange(PVM pVM, PPGMRAMRANGE pRam, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, uint8_t uType) 674 { 675 uint32_t cPendingPages = 0; 676 PGMMFREEPAGESREQ pReq; 677 int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE); 678 AssertLogRelRCReturn(rc, rc); 679 680 /* Itegerate the pages. */ 681 PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]; 682 uint32_t cPagesLeft = ((GCPhysLast - GCPhys) >> PAGE_SHIFT) + 1; 683 while (cPagesLeft-- > 0) 684 { 685 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPageDst, GCPhys); 686 AssertLogRelRCReturn(rc, rc); /* We're done for if this goes wrong. */ 687 688 PGM_PAGE_SET_TYPE(pPageDst, uType); 689 690 GCPhys += PAGE_SIZE; 691 pPageDst++; 692 } 693 694 if (cPendingPages) 695 { 696 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages); 697 AssertLogRelRCReturn(rc, rc); 698 } 699 GMMR3FreePagesCleanup(pReq); 700 701 return rc; 702 } 703 #endif /* VBOX_WITH_NEW_PHYS_CODE */ 704 705 662 706 /** 663 707 * Sets up a range RAM. … … 809 853 AssertLogRelRCReturn(rc, rc); 810 854 } 855 break; 856 857 case PGMPAGETYPE_MMIO2_ALIAS_MMIO: 858 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT)); 811 859 break; 812 860 … … 871 919 #endif /* VBOX_WITH_NEW_PHYS_CODE */ 872 920 921 case PGMPAGETYPE_MMIO2_ALIAS_MMIO: 922 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT)); 923 break; 924 873 925 case PGMPAGETYPE_MMIO2: 874 926 case PGMPAGETYPE_ROM_SHADOW: … … 982 1034 PPGMRAMRANGE pNew; 983 1035 if (fRamExists) 1036 { 984 1037 pNew = NULL; 1038 #ifdef VBOX_WITH_NEW_PHYS_CODE 1039 /* 1040 * Make all the pages in the range MMIO/ZERO pages, freeing any 1041 * RAM pages currently mapped here. This might not be 100% correct 1042 * for PCI memory, but we're doing the same thing for MMIO2 pages. 1043 */ 1044 rc = pgmLock(pVM); 1045 if (RT_SUCCESS(rc)) 1046 { 1047 rc = pgmR3PhysFreePageRange(pVM, pRam, GCPhys, GCPhysLast, PGMPAGETYPE_MMIO); 1048 pgmUnlock(pVM); 1049 } 1050 AssertRCReturn(rc, rc); 1051 #endif 1052 } 985 1053 else 986 1054 { … … 1067 1135 if (RT_SUCCESS(rc)) 1068 1136 { 1069 RTGCPHYS GCPhysLast= GCPhys + (cb - 1);1070 PPGMRAMRANGE pRamPrev= NULL;1071 PPGMRAMRANGE pRam= pVM->pgm.s.pRamRangesR3;1137 RTGCPHYS GCPhysLast = GCPhys + (cb - 1); 1138 PPGMRAMRANGE pRamPrev = NULL; 1139 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3; 1072 1140 while (pRam && GCPhysLast >= pRam->GCPhys) 1073 1141 { 1074 /*if ( GCPhysLast >= pRam->GCPhys 1075 && GCPhys <= pRam->GCPhysLast) - later */ 1142 /** @todo We're being a bit too careful here. rewrite. */ 1076 1143 if ( GCPhysLast == pRam->GCPhysLast 1077 1144 && GCPhys == pRam->GCPhys) … … 1082 1149 * See if all the pages are dead MMIO pages. 1083 1150 */ 1151 uint32_t const cPages = cb >> PAGE_SHIFT; 1084 1152 bool fAllMMIO = true; 1085 PPGMPAGE pPage = &pRam->aPages[0]; 1086 uint32_t const cPages = cb >> PAGE_SHIFT; 1153 uint32_t iPage = 0; 1087 1154 uint32_t cLeft = cPages; 1088 1155 while (cLeft-- > 0) 1089 1156 { 1157 PPGMPAGE pPage = &pRam->aPages[iPage]; 1090 1158 if ( PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO 1091 1159 /*|| not-out-of-action later */) 1092 1160 { 1093 1161 fAllMMIO = false; 1162 #ifdef VBOX_WITH_NEW_PHYS_CODE 1094 1163 Assert(PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO2_ALIAS_MMIO); 1164 AssertMsgFailed(("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage)); 1165 #endif 1095 1166 break; 1096 1167 } … … 1098 1169 pPage++; 1099 1170 } 1100 1101 /*1102 * Unlink it and free if it's all MMIO.1103 */1104 1171 if (fAllMMIO) 1105 1172 { 1173 /* 1174 * Ad-hoc range, unlink and free it. 1175 */ 1106 1176 Log(("PGMR3PhysMMIODeregister: Freeing ad-hoc MMIO range for %RGp-%RGp %s\n", 1107 1177 GCPhys, GCPhysLast, pRam->pszDesc)); … … 1113 1183 pRam->cb = pRam->GCPhys = pRam->GCPhysLast = NIL_RTGCPHYS; 1114 1184 MMHyperFree(pVM, pRam); 1185 break; 1186 } 1187 } 1188 1189 #ifdef VBOX_WITH_NEW_PHYS_CODE 1190 /* 1191 * Range match? It will all be within one range (see PGMAllHandler.cpp). 1192 */ 1193 if ( GCPhysLast >= pRam->GCPhys 1194 && GCPhys <= pRam->GCPhysLast) 1195 { 1196 Assert(GCPhys >= pRam->GCPhys); 1197 Assert(GCPhysLast <= pRam->GCPhysLast); 1198 1199 /* 1200 * Turn the pages back into RAM pages. 1201 */ 1202 uint32_t iPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT; 1203 uint32_t cLeft = cb >> PAGE_SHIFT; 1204 while (cLeft--) 1205 { 1206 PPGMPAGE pPage = &pRam->aPages[iPage]; 1207 AssertMsg(PGM_PAGE_IS_MMIO(pPage), ("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage)); 1208 AssertMsg(PGM_PAGE_IS_ZERO(pPage), ("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage)); 1209 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO) 1210 PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_RAM); 1115 1211 } 1116 1212 break; 1117 1213 } 1214 #endif 1118 1215 1119 1216 /* next */ … … 1452 1549 if (fRamExists) 1453 1550 { 1551 /** @todo use pgmR3PhysFreePageRange here. */ 1454 1552 uint32_t cPendingPages = 0; 1455 1553 PGMMFREEPAGESREQ pReq; … … 3088 3186 for (uint32_t iPage = 0; iPage < cPages; iPage++) 3089 3187 if (PGM_PAGE_GET_PAGEID(&pRam->aPages[iPage]) == idPage) 3090 LogRel(("PGM: Used by %RGp %R {pgmpage}(%s)\n",3188 LogRel(("PGM: Used by %RGp %R[pgmpage] (%s)\n", 3091 3189 pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pRam->aPages[iPage], pRam->pszDesc)); 3092 3190 } -
trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp
r17533 r18230 1764 1764 #ifndef IN_RC 1765 1765 /** 1766 * Modify an existing MMIO region page; map to another guest physical region and change the access flags 1766 * Mapping an MMIO2 page in place of an MMIO page for direct access. 1767 * 1768 * (This is a special optimization used by the VGA device.) 1767 1769 * 1768 1770 * @returns VBox status code. 1769 1771 * 1770 1772 * @param pVM The virtual machine. 1771 * @param GCPhys Physical address that's part of the MMIO region to be changed. 1772 * @param GCPhysRemapped Remapped address. 1773 * @param fPageFlags Page flags to set (typically X86_PTE_RW). 1773 * @param GCPhys The address of the MMIO page to be changed. 1774 * @param GCPhysRemapped The address of the MMIO2 page. 1775 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P) 1776 * for the time being. 1774 1777 */ 1775 1778 VMMDECL(int) IOMMMIOModifyPage(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags) 1776 1779 { 1777 Assert(fPageFlags == (X86_PTE_RW | X86_PTE_P));1778 1779 1780 Log(("IOMMMIOModifyPage %RGp -> %RGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags)); 1781 1782 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER); 1780 1783 1781 1784 /* This currently only works in real mode, protected mode without paging or with nested paging. */ … … 1786 1789 1787 1790 /* 1788 * Lookup the c urrent context range node and statistics.1791 * Lookup the context range node the page belongs to. 1789 1792 */ 1790 1793 PIOMMMIORANGE pRange = iomMMIOGetRange(&pVM->iom.s, GCPhys); 1791 1794 AssertMsgReturn(pRange, 1792 1795 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), 1793 VERR_INTERNAL_ERROR); 1794 1795 GCPhys &= ~(RTGCPHYS)0xfff; 1796 GCPhysRemapped &= ~(RTGCPHYS)0xfff; 1796 VERR_IOM_MMIO_RANGE_NOT_FOUND); 1797 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0); 1798 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK); 1799 1800 /* 1801 * Do the aliasing; page align the addresses since PGM is picky. 1802 */ 1803 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK; 1804 GCPhysRemapped &= ~(RTGCPHYS)PAGE_OFFSET_MASK; 1797 1805 1798 1806 int rc = PGMHandlerPhysicalPageAlias(pVM, pRange->GCPhys, GCPhys, GCPhysRemapped); 1799 1807 AssertRCReturn(rc, rc); 1800 1808 1801 #ifdef VBOX_STRICT 1809 /* 1810 * Modify the shadow page table. Since it's an MMIO page it won't be present and we 1811 * can simply prefetch it. 1812 * 1813 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page. 1814 */ 1815 #ifndef VBOX_WITH_NEW_PHYS_CODE /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */ 1816 # ifdef VBOX_STRICT 1802 1817 uint64_t fFlags; 1803 1818 RTHCPHYS HCPhys; 1804 1819 rc = PGMShwGetPage(pVM, (RTGCPTR)GCPhys, &fFlags, &HCPhys); 1805 1820 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT); 1806 #endif 1807 1808 /* @note this is a NOP in the EPT case; we'll just let it fault again to resync the page. */ 1821 # endif 1822 #endif 1809 1823 rc = PGMPrefetchPage(pVM, (RTGCPTR)GCPhys); 1810 1824 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT); … … 1821 1835 * @param GCPhys Physical address that's part of the MMIO region to be reset. 1822 1836 */ 1823 VMMDECL(int) 1837 VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys) 1824 1838 { 1825 1839 Log(("IOMMMIOResetRegion %RGp\n", GCPhys)); … … 1832 1846 1833 1847 /* 1834 * Lookup the c urrent context range node and statistics.1848 * Lookup the context range node the page belongs to. 1835 1849 */ 1836 1850 PIOMMMIORANGE pRange = iomMMIOGetRange(&pVM->iom.s, GCPhys); 1837 1851 AssertMsgReturn(pRange, 1838 1852 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), 1839 VERR_INTERNAL_ERROR); 1840 1841 /* Reset the entire range by clearing all shadow page table entries. */ 1853 VERR_IOM_MMIO_RANGE_NOT_FOUND); 1854 1855 /* 1856 * Call PGM to do the job work. 1857 * 1858 * After the call, all the pages should be non-present... unless there is 1859 * a page pool flush pending (unlikely). 1860 */ 1842 1861 int rc = PGMHandlerPhysicalReset(pVM, pRange->GCPhys); 1843 1862 AssertRC(rc); 1844 1863 1845 1864 #ifdef VBOX_STRICT 1846 uint32_t cb = pRange->cb;1847 1848 GCPhys = pRange->GCPhys;1849 1850 while (cb)1851 {1852 1853 uint64_t fFlags;1854 RTHCPHYS HCPhys;1855 rc = PGMShwGetPage(pVM, (RTGCPTR)GCPhys, &fFlags, &HCPhys);1856 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);1857 cb -= PAGE_SIZE;1858 GCPhys += PAGE_SIZE;1859 } 1860 #endif 1861 return VINF_SUCCESS;1865 if (!VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3)) 1866 { 1867 uint32_t cb = pRange->cb; 1868 GCPhys = pRange->GCPhys; 1869 while (cb) 1870 { 1871 uint64_t fFlags; 1872 RTHCPHYS HCPhys; 1873 rc = PGMShwGetPage(pVM, (RTGCPTR)GCPhys, &fFlags, &HCPhys); 1874 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT); 1875 cb -= PAGE_SIZE; 1876 GCPhys += PAGE_SIZE; 1877 } 1878 } 1879 #endif 1880 return rc; 1862 1881 } 1863 1882 #endif /* !IN_RC */ -
trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp
r17509 r18230 216 216 */ 217 217 bool fFlushTLBs = false; 218 int rc = VINF_SUCCESS;219 const unsigned uState = pgmHandlerPhysicalCalcState(pCur);220 RTUINT cPages= pCur->cPages;221 RTUINT i= (pCur->Core.Key - pRam->GCPhys) >> PAGE_SHIFT;218 int rc = VINF_SUCCESS; 219 const unsigned uState = pgmHandlerPhysicalCalcState(pCur); 220 uint32_t cPages = pCur->cPages; 221 uint32_t i = (pCur->Core.Key - pRam->GCPhys) >> PAGE_SHIFT; 222 222 for (;;) 223 223 { … … 235 235 return rc2; 236 236 } 237 237 238 #endif /* !VBOX_WITH_NEW_PHYS_CODE */ 239 PPGMPAGE pPage = &pRam->aPages[i]; 240 #ifdef VBOX_WITH_NEW_PHYS_CODE 241 AssertMsg(pCur->enmType != PGMPHYSHANDLERTYPE_MMIO || PGM_PAGE_IS_MMIO(pPage), 242 ("%RGp %R[pgmpage]\n", pRam->GCPhys + (i << PAGE_SHIFT), pPage)); 243 #endif 238 244 239 245 /* Only do upgrades. */ 240 PPGMPAGE pPage = &pRam->aPages[i];241 246 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState) 242 247 { 243 248 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState); 249 #ifndef VBOX_WITH_NEW_PHYS_CODE 244 250 Assert(PGM_PAGE_GET_HCPHYS(pPage)); 251 #endif 245 252 246 253 int rc2 = pgmPoolTrackFlushGCPhys(pVM, pPage, &fFlushTLBs); … … 415 422 416 423 424 #ifdef VBOX_WITH_NEW_PHYS_CODE 425 /** 426 * Resets an aliased page. 427 * 428 * @param pVM The VM. 429 * @param pPage The page. 430 * @param GCPhysPage The page address in case it comes in handy. 431 */ 432 void pgmHandlerPhysicalResetAliasedPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage) 433 { 434 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO); 435 Assert(PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED); 436 437 /* 438 * Flush any shadow page table references *first*. 439 */ 440 bool fFlushTLBs = false; 441 int rc = pgmPoolTrackFlushGCPhys(pVM, pPage, &fFlushTLBs); 442 AssertLogRelRCReturnVoid(rc); 443 if (rc == VINF_PGM_GCPHYS_ALIASED) 444 { 445 pVM->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL; 446 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3); 447 rc = VINF_PGM_SYNC_CR3; 448 } 449 # ifdef IN_RC 450 else if (fFlushTLBs) 451 PGM_INVL_GUEST_TLBS(); 452 # else 453 HWACCMFlushTLB(pVM); 454 # endif 455 pVM->pgm.s.fPhysCacheFlushPending = true; 456 457 /* 458 * Make it an MMIO/Zero page. 459 */ 460 PGM_PAGE_SET_HCPHYS(pPage, pVM->pgm.s.HCPhysZeroPg); 461 PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_MMIO); 462 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ZERO); 463 PGM_PAGE_SET_PAGEID(pPage, NIL_GMM_PAGEID); 464 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_ALL); 465 466 NOREF(GCPhysPage); 467 } 468 #endif 469 470 417 471 /** 418 472 * Resets ram range flags. … … 441 495 int rc = pgmPhysGetPageWithHintEx(pPGM, GCPhys, &pPage, &pRamHint); 442 496 if (RT_SUCCESS(rc)) 497 { 498 #ifdef VBOX_WITH_NEW_PHYS_CODE 499 /* Reset MMIO2 for MMIO pages to MMIO, since this aliasing is our business. 500 (We don't flip MMIO to RAM though, that's PGMPhys.cpp's job.) */ 501 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO) 502 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhys); 503 AssertMsg(pCur->enmType != PGMPHYSHANDLERTYPE_MMIO || PGM_PAGE_IS_MMIO(pPage), ("%RGp %R[pgmpage]\n", GCPhys, pPage)); 504 #endif 443 505 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_NONE); 506 } 444 507 else 445 508 AssertRC(rc); … … 767 830 * page access handler region. 768 831 * 769 * This is used in pair with PGMHandlerPhysicalPageTempOff(). 832 * This is used in pair with PGMHandlerPhysicalPageTempOff() or 833 * PGMHandlerPhysicalPageAlias(). 770 834 * 771 835 * @returns VBox status code. 772 836 * @param pVM VM Handle 773 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister(). 837 * @param GCPhys The start address of the handler regions, i.e. what you 838 * passed to PGMR3HandlerPhysicalRegister(), 839 * PGMHandlerPhysicalRegisterEx() or 840 * PGMHandlerPhysicalModify(). 774 841 */ 775 842 VMMDECL(int) PGMHandlerPhysicalReset(PVM pVM, RTGCPHYS GCPhys) … … 791 858 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE: 792 859 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL: 793 case PGMPHYSHANDLERTYPE_MMIO: /* @note Only use when clearing aliased mmio ranges! */860 case PGMPHYSHANDLERTYPE_MMIO: /* NOTE: Only use when clearing MMIO ranges with aliased MMIO2 pages! */ 794 861 { 795 /* 796 * Set the flags and flush shadow PT entries. 797 */ 798 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysHandlerReset)); 862 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysHandlerReset)); /**@Todo move out of switch */ 799 863 PPGMRAMRANGE pRam = pgmPhysGetRange(&pVM->pgm.s, GCPhys); 800 864 Assert(pRam); 801 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam); 802 if (rc == VINF_PGM_GCPHYS_ALIASED) 865 Assert(pRam->GCPhys <= pCur->Core.Key); 866 Assert(pRam->GCPhysLast >= pCur->Core.KeyLast); 867 868 #ifdef VBOX_WITH_NEW_PHYS_CODE 869 if (pCur->enmType == PGMPHYSHANDLERTYPE_MMIO) 803 870 { 804 pVM->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL; 805 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3); 871 /* 872 * Reset all the PGMPAGETYPE_MMIO2_ALIAS_MMIO pages first and that's it. 873 * This could probably be optimized a bit wrt to flushing, but I'm too lazy 874 * to do that now... 875 */ 876 PPGMPAGE pPage = &pRam->aPages[(pCur->Core.Key - pRam->GCPhys) >> PAGE_SHIFT]; 877 uint32_t cLeft = pCur->cPages; 878 while (cLeft-- > 0) 879 { 880 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO) 881 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)(uintptr_t)(pPage - &pRam->aPages[0]) << PAGE_SHIFT)); 882 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO); 883 pPage++; 884 } 806 885 } 807 pVM->pgm.s.fPhysCacheFlushPending = true; 808 HWACCMFlushTLB(pVM); 886 else 887 #endif 888 { 889 /* 890 * Set the flags and flush shadow PT entries. 891 */ 892 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam); 893 if (rc == VINF_PGM_GCPHYS_ALIASED) 894 { 895 pVM->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL; 896 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3); 897 } 898 pVM->pgm.s.fPhysCacheFlushPending = true; 899 HWACCMFlushTLB(pVM); 900 } 809 901 810 902 rc = VINF_SUCCESS; … … 843 935 * 844 936 * @returns VBox status code. 845 * @param pVM VM Handle 846 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister(). 847 * This must be a fully page aligned range or we risk messing up other 848 * handlers installed for the start and end pages. 849 * @param GCPhysPage Physical address of the page to turn off access monitoring for. 937 * @param pVM VM Handle 938 * @param GCPhys The start address of the access handler. This 939 * must be a fully page aligned range or we risk 940 * messing up other handlers installed for the 941 * start and end pages. 942 * @param GCPhysPage The physical address of the page to turn off 943 * access monitoring for. 850 944 */ 851 945 VMMDECL(int) PGMHandlerPhysicalPageTempOff(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage) … … 891 985 892 986 /** 893 * Temporarily turns off the access monitoring of a page within an MMIO 894 * access handler region and remaps it to another guest physical region. 895 * 896 * Use this when no further \#PFs are required for that page. Be aware that 897 * a page directory sync might reset the flags, and turn on access monitoring 898 * for the page. 899 * 900 * The caller must do required page table modifications. 987 * Replaces an MMIO page with an MMIO2 page. 988 * 989 * This is a worker for IOMMMIOModifyPage that works in a similar way to 990 * PGMHandlerPhysicalPageTempOff but for an MMIO page. Since an MMIO page has no 991 * backing, the caller must provide a replacement page. For various reasons the 992 * replacement page must be an MMIO2 page. 993 * 994 * The caller must do required page table modifications. You can get away 995 * without making any modifations since it's an MMIO page, the cost is an extra 996 * \#PF which will the resync the page. 997 * 998 * Call PGMHandlerPhysicalReset() to restore the MMIO page. 999 * 1000 * The caller may still get handler callback even after this call and must be 1001 * able to deal correctly with such calls. The reason for these callbacks are 1002 * either that we're executing in the recompiler (which doesn't know about this 1003 * arrangement) or that we've been restored from saved state (where we won't 1004 * save the change). 901 1005 * 902 1006 * @returns VBox status code. 903 * @param pVM VM Handle 904 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister(). 905 * This must be a fully page aligned range or we risk messing up other 906 * handlers installed for the start and end pages. 907 * @param GCPhysPage Physical address of the page to turn off access monitoring for. 908 * @param GCPhysPageRemap Physical address of the page that serves as backing memory. 1007 * @param pVM The VM handle 1008 * @param GCPhys The start address of the access handler. This 1009 * must be a fully page aligned range or we risk 1010 * messing up other handlers installed for the 1011 * start and end pages. 1012 * @param GCPhysPage The physical address of the page to turn off 1013 * access monitoring for. 1014 * @param GCPhysPageRemap The physical address of the MMIO2 page that 1015 * serves as backing memory. 1016 * 1017 * @remark May cause a page pool flush if used on a page that is already 1018 * aliased. 1019 * 1020 * @note This trick does only work reliably if the two pages are never ever 1021 * mapped in the same page table. If they are the page pool code will 1022 * be confused should either of them be flushed. See the special case 1023 * of zero page aliasing mentioned in #3170. 1024 * 909 1025 */ 910 1026 VMMDECL(int) PGMHandlerPhysicalPageAlias(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage, RTGCPHYS GCPhysPageRemap) 911 1027 { 912 1028 /* 913 * Validate the range.1029 * Lookup and validate the range. 914 1030 */ 915 1031 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys); … … 919 1035 && GCPhysPage <= pCur->Core.KeyLast)) 920 1036 { 921 Assert(!(pCur->Core.Key & PAGE_OFFSET_MASK));922 Assert((pCur->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);923 924 1037 AssertReturn(pCur->enmType == PGMPHYSHANDLERTYPE_MMIO, VERR_ACCESS_DENIED); 925 /** @todo r=bird: This totally breaks the new PGMPAGE management. Will probably 926 * have to require that the current page is the zero page... Require 927 * GCPhysPageRemap to be a MMIO2 page might help matters because those 928 * pages aren't managed dynamically (at least not yet). 929 * VBOX_WITH_NEW_PHYS_CODE TODO! 930 * 931 * A solution to this would be to temporarily change the page into a MMIO2 one 932 * and record that we've changed it. Only the physical page address would 933 * need to be copied over. The aliased page would have to be MMIO2 ofc, since 934 * RAM or ROM pages would require write sharing which is something we don't 935 * intend to implement just yet... 1038 AssertReturn(!(pCur->Core.Key & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER); 1039 AssertReturn((pCur->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK, VERR_INVALID_PARAMETER); 1040 1041 /* 1042 * Get and validate the two pages. 936 1043 */ 937 938 /*939 * Note! This trick does only work reliably if the two pages are never ever940 * mapped in the same page table. If they are the page pool code will941 * be confused should either of them be flushed. See the special case942 * of zero page aliasing mentioned in #3170.943 */944 945 1044 PPGMPAGE pPageRemap; 946 1045 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysPageRemap, &pPageRemap); 947 1046 AssertRCReturn(rc, rc); 948 949 /* 950 * Change the page status. 951 */ 1047 #ifdef VBOX_WITH_NEW_PHYS_CODE 1048 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPageRemap) == PGMPAGETYPE_MMIO2, 1049 ("GCPhysPageRemap=%RGp %R[pgmpage]\n", GCPhysPageRemap, pPageRemap), 1050 VERR_PGM_PHYS_NOT_MMIO2); 1051 #endif 1052 952 1053 PPGMPAGE pPage; 953 1054 rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysPage, &pPage); 954 1055 AssertRCReturn(rc, rc); 955 956 /* Do the actual remapping here. This page now serves as an alias for the backing memory specified. */957 1056 #ifdef VBOX_WITH_NEW_PHYS_CODE 958 AssertReleaseFailed(); /** @todo see todo above! */ 1057 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO) 1058 { 1059 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO, 1060 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage), 1061 VERR_PGM_PHYS_NOT_MMIO2); 1062 if (PGM_PAGE_GET_HCPHYS(pPage) == PGM_PAGE_GET_HCPHYS(pPage)) 1063 return VINF_PGM_HANDLER_ALREADY_ALIASED; 1064 1065 /* 1066 * The page is already mapped as some other page, reset it 1067 * to an MMIO/ZERO page before doing the new mapping. 1068 */ 1069 Log(("PGMHandlerPhysicalPageAlias: GCPhysPage=%RGp (%R[pgmpage]; %RHp -> %RHp\n", 1070 GCPhysPage, pPage, PGM_PAGE_GET_HCPHYS(pPage), PGM_PAGE_GET_HCPHYS(pPageRemap))); 1071 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhysPage); 1072 } 1073 Assert(PGM_PAGE_IS_ZERO(pPage)); 1074 #endif 1075 1076 /* 1077 * Do the actual remapping here. 1078 * This page now serves as an alias for the backing memory specified. 1079 */ 1080 LogFlow(("PGMHandlerPhysicalPageAlias: %RGp (%R[pgmpage]) alias for %RGp (%R[pgmpage])\n", 1081 GCPhysPage, pPage, GCPhysPageRemap, pPageRemap )); 1082 #ifdef VBOX_WITH_NEW_PHYS_CODE 1083 PGM_PAGE_SET_HCPHYS(pPage, PGM_PAGE_GET_HCPHYS(pPageRemap)); 1084 PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO); 1085 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED); 1086 PGM_PAGE_SET_PAGEID(pPage, PGM_PAGE_GET_PAGEID(pPageRemap)); 1087 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED); 1088 LogFlow(("PGMHandlerPhysicalPageAlias: => %R[pgmpage]\n", pPage)); 959 1089 #else 960 1090 pPage->HCPhys = pPageRemap->HCPhys; 961 1091 PGM_PAGE_SET_TRACKING(pPage, 0); 962 #endif963 964 LogFlow(("PGMHandlerPhysicalPageAlias %RGp alias for %RGp (%R[pgmpage]) -> %R[pgmpage]\n",965 GCPhysPage, GCPhysPageRemap, pPageRemap, pPage));966 1092 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED); 1093 #endif 1094 967 1095 #ifndef IN_RC 968 1096 HWACCMInvalidatePhysPage(pVM, GCPhysPage); … … 981 1109 982 1110 1111 #if 0/**@todo delete this. */ 983 1112 /** 984 1113 * Turns access monitoring of a page within a monitored … … 1035 1164 return VERR_PGM_HANDLER_NOT_FOUND; 1036 1165 } 1166 #endif 1037 1167 1038 1168 -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r18207 r18230 668 668 * them, that would also avoid this mess. It would actually be kind of 669 669 * elegant... */ 670 Assert FailedReturn(VERR_INTERNAL_ERROR);670 AssertLogRelMsgFailedReturn(("%RGp\n", GCPhys), VERR_INTERNAL_ERROR); 671 671 } 672 672 else
注意:
瀏覽 TracChangeset
來幫助您使用更動檢視器