儲存庫 vbox 的更動 9893
- 時間撮記:
- 2008-6-24 下午03:56:57 (16 年 以前)
- 位置:
- trunk/src/VBox/VMM/VMMAll
- 檔案:
-
- 修改 5 筆資料
圖例:
- 未更動
- 新增
- 刪除
-
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r9890 r9893 859 859 && !(pPdpe->u & X86_PDPE_PG_MASK)) 860 860 { 861 PX86PML4E pPml4eGst = &pPGM->pGstPaePML4HC->a[iPml4e]; 861 PX86PML4E pPml4eGst = &pPGM->pGstPaePML4HC->a[iPml4e]; 862 862 PX86PDPT pPdptGst; 863 863 rc = PGM_GCPHYS_2_PTR(pVM, pPml4eGst->u & X86_PML4E_PG_MASK, &pPdptGst); -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r9890 r9893 140 140 # if PGM_GST_TYPE == PGM_TYPE_PAE 141 141 /* Did we mark the PDPT as not present in SyncCR3? */ 142 unsigned iP DPTE= ((RTGCUINTPTR)pvFault >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK;143 if (!pVM->pgm.s.CTXMID(p,PaePDPT)->a[iP DPTE].n.u1Present)144 pVM->pgm.s.CTXMID(p,PaePDPT)->a[iP DPTE].n.u1Present = 1;142 unsigned iPdpte = ((RTGCUINTPTR)pvFault >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK; 143 if (!pVM->pgm.s.CTXMID(p,PaePDPT)->a[iPdpte].n.u1Present) 144 pVM->pgm.s.CTXMID(p,PaePDPT)->a[iPdpte].n.u1Present = 1; 145 145 146 146 # endif … … 874 874 PX86PDE pPdeDst = &pVM->pgm.s.CTXMID(p,32BitPD)->a[iPDDst]; 875 875 # elif PGM_SHW_TYPE == PGM_TYPE_PAE 876 const unsigned iPDDst = GCPtrPage >> SHW_PD_SHIFT; 877 const unsigned iPd Pte = (GCPtrPage >> X86_PDPT_SHIFT); /* no mask; flat index into the 2048 entry array. */876 const unsigned iPDDst = GCPtrPage >> SHW_PD_SHIFT; /* no mask; flat index into the 2048 entry array. */ 877 const unsigned iPdpte = (GCPtrPage >> X86_PDPT_SHIFT); 878 878 PX86PDEPAE pPdeDst = &pVM->pgm.s.CTXMID(ap,PaePDs[0])->a[iPDDst]; 879 879 PX86PDPT pPdptDst = pVM->pgm.s.CTXMID(p,PaePDPT); … … 883 883 884 884 const unsigned iPml4e = (GCPtrPage >> X86_PML4_SHIFT) & X86_PML4_MASK; 885 const unsigned iPd Pte = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;885 const unsigned iPdpte = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; 886 886 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK; 887 887 PX86PDPAE pPDDst; … … 893 893 AssertMsg(rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT, ("Unexpected rc=%Vrc\n", rc)); 894 894 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,InvalidatePageSkipped)); 895 if (!VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3)) 896 PGM_INVL_GUEST_TLBS(); 895 897 return VINF_SUCCESS; 896 898 } … … 898 900 899 901 PX86PDEPAE pPdeDst = &pPDDst->a[iPDDst]; 900 PX86PDPE pPdpeDst = &pPdptDst->a[iPdPte]; 902 PX86PDPE pPdpeDst = &pPdptDst->a[iPdpte]; 903 904 if (!pPdpeDst->n.u1Present) 905 { 906 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,InvalidatePageSkipped)); 907 if (!VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3)) 908 PGM_INVL_GUEST_TLBS(); 909 return VINF_SUCCESS; 910 } 911 901 912 # endif 902 913 … … 932 943 # endif 933 944 934 const uint32_t cr4 = CPUMGetGuestCR4(pVM);935 945 # if PGM_GST_TYPE == PGM_TYPE_AMD64 936 946 const bool fIsBigPage = PdeSrc.b.u1Size; 937 947 # else 938 const bool fIsBigPage = PdeSrc.b.u1Size && ( cr4& X86_CR4_PSE);948 const bool fIsBigPage = PdeSrc.b.u1Size && (CPUMGetGuestCR4(pVM) & X86_CR4_PSE); 939 949 # endif 940 950 … … 950 960 && fIsBigPage 951 961 && PdeSrc.b.u1Global 952 && (cr4 & X86_CR4_PGE)953 962 ) 954 963 ) … … 963 972 964 973 # if PGM_GST_TYPE == PGM_TYPE_AMD64 974 PPGMPOOL pPool = pVM->pgm.s.CTXSUFF(pPool); 975 965 976 /* Fetch the pgm pool shadow descriptor. */ 966 977 PPGMPOOLPAGE pShwPdpt = pgmPoolGetPageByHCPhys(pVM, pPml4eDst->u & X86_PML4E_PG_MASK); 967 978 Assert(pShwPdpt); 968 # endif969 970 # if PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64971 PPGMPOOL pPool = pVM->pgm.s.CTXSUFF(pPool);972 979 973 980 /* Fetch the pgm pool shadow descriptor. */ 974 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(pVM, pPdptDst->a[iPd Pte].u & SHW_PDPE_PG_MASK);981 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(pVM, pPdptDst->a[iPdpte].u & SHW_PDPE_PG_MASK); 975 982 Assert(pShwPde); 976 # endif 977 978 # if PGM_GST_TYPE == PGM_TYPE_AMD64 979 Assert(pPml4eDst->n.u1Present && pPml4eDst->u & SHW_PDPT_MASK); 980 if (pPml4eSrc->n.u1Present) 981 { 982 if ( pPml4eSrc->n.u1User != pPml4eDst->n.u1User 983 || (!pPml4eSrc->n.u1Write && pPml4eDst->n.u1Write)) 984 { 985 /* 986 * Mark not present so we can resync the PML4E when it's used. 987 */ 988 LogFlow(("InvalidatePage: Out-of-sync PML4E at %VGv Pml4eSrc=%RX64 Pml4eDst=%RX64\n", 989 GCPtrPage, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u)); 990 pgmPoolFreeByPage(pPool, pShwPdpt, pVM->pgm.s.pShwAmd64CR3->idx, iPml4e); 991 pPml4eDst->u = 0; 992 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,InvalidatePagePDOutOfSync)); 993 PGM_INVL_GUEST_TLBS(); 994 } 995 else if (!pPml4eSrc->n.u1Accessed) 996 { 997 /* 998 * Mark not present so we can set the accessed bit. 999 */ 1000 LogFlow(("InvalidatePage: Out-of-sync PML4E (A) at %VGv Pml4eSrc=%RX64 Pml4eDst=%RX64\n", 1001 GCPtrPage, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u)); 1002 pgmPoolFreeByPage(pPool, pShwPdpt, pVM->pgm.s.pShwAmd64CR3->idx, iPml4e); 1003 pPml4eDst->u = 0; 1004 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,InvalidatePagePDNAs)); 1005 PGM_INVL_GUEST_TLBS(); 1006 } 1007 } 1008 else 1009 { 1010 LogFlow(("InvalidatePage: Out-of-sync PML4E (P) at %VGv Pml4eSrc=%RX64 Pml4eDst=%RX64\n", 1011 GCPtrPage, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u)); 1012 pgmPoolFreeByPage(pPool, pShwPdpt, pVM->pgm.s.pShwAmd64CR3->idx, iPml4e); 1013 pPml4eDst->u = 0; 1014 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,InvalidatePagePDNPs)); 1015 PGM_INVL_PG(GCPtrPage); 1016 return VINF_SUCCESS; 1017 } 983 984 Assert(pPml4eDst->n.u1Present && (pPml4eDst->u & SHW_PDPT_MASK)); 1018 985 RTGCPHYS GCPhysPdpt = pPml4eSrc->u & X86_PML4E_PG_MASK; 1019 986 1020 /* Check if the PML4 entry has changed. */1021 if (pShwPdpt->GCPhys != GCPhysPdpt)1022 { 1023 LogFlow(("InvalidatePage: Out-of-sync PML4E ( GCPhys) at %VGv%VGp vs %VGp Pml4eSrc=%RX64 Pml4eDst=%RX64\n",987 if ( !pPml4eSrc->n.u1Present 988 || pShwPdpt->GCPhys != GCPhysPdpt) 989 { 990 LogFlow(("InvalidatePage: Out-of-sync PML4E (P/GCPhys) at %VGv GCPhys=%VGp vs %VGp Pml4eSrc=%RX64 Pml4eDst=%RX64\n", 1024 991 GCPtrPage, pShwPdpt->GCPhys, GCPhysPdpt, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u)); 1025 992 pgmPoolFreeByPage(pPool, pShwPdpt, pVM->pgm.s.pShwAmd64CR3->idx, iPml4e); 1026 993 pPml4eDst->u = 0; 1027 994 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,InvalidatePagePDNPs)); 1028 PGM_INVL_ PG(GCPtrPage);995 PGM_INVL_GUEST_TLBS(); 1029 996 return VINF_SUCCESS; 1030 997 } 1031 1032 998 if ( pPml4eSrc->n.u1User != pPml4eDst->n.u1User 999 || (!pPml4eSrc->n.u1Write && pPml4eDst->n.u1Write)) 1000 { 1001 /* 1002 * Mark not present so we can resync the PML4E when it's used. 1003 */ 1004 LogFlow(("InvalidatePage: Out-of-sync PML4E at %VGv Pml4eSrc=%RX64 Pml4eDst=%RX64\n", 1005 GCPtrPage, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u)); 1006 pgmPoolFreeByPage(pPool, pShwPdpt, pVM->pgm.s.pShwAmd64CR3->idx, iPml4e); 1007 pPml4eDst->u = 0; 1008 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,InvalidatePagePDOutOfSync)); 1009 PGM_INVL_GUEST_TLBS(); 1010 } 1011 else if (!pPml4eSrc->n.u1Accessed) 1012 { 1013 /* 1014 * Mark not present so we can set the accessed bit. 1015 */ 1016 LogFlow(("InvalidatePage: Out-of-sync PML4E (A) at %VGv Pml4eSrc=%RX64 Pml4eDst=%RX64\n", 1017 GCPtrPage, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u)); 1018 pgmPoolFreeByPage(pPool, pShwPdpt, pVM->pgm.s.pShwAmd64CR3->idx, iPml4e); 1019 pPml4eDst->u = 0; 1020 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,InvalidatePagePDNAs)); 1021 PGM_INVL_GUEST_TLBS(); 1022 } 1023 1024 /* Check if the PDPT entry has changed. */ 1033 1025 Assert(pPdpeDst->n.u1Present && pPdpeDst->u & SHW_PDPT_MASK); 1034 if (PdpeSrc.n.u1Present) 1035 { 1036 if ( PdpeSrc.lm.u1User != pPdpeDst->lm.u1User 1037 || (!PdpeSrc.lm.u1Write && pPdpeDst->lm.u1Write)) 1038 { 1039 /* 1040 * Mark not present so we can resync the PDPTE when it's used. 1041 */ 1042 LogFlow(("InvalidatePage: Out-of-sync PDPE at %VGv PdpeSrc=%RX64 PdpeDst=%RX64\n", 1043 GCPtrPage, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u)); 1044 pgmPoolFreeByPage(pPool, pShwPde, pShwPdpt->idx, iPdPte); 1045 pPdpeDst->u = 0; 1046 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,InvalidatePagePDOutOfSync)); 1047 PGM_INVL_GUEST_TLBS(); 1048 } 1049 else if (!PdpeSrc.lm.u1Accessed) 1050 { 1051 /* 1052 * Mark not present so we can set the accessed bit. 1053 */ 1054 LogFlow(("InvalidatePage: Out-of-sync PDPE (A) at %VGv PdpeSrc=%RX64 PdpeDst=%RX64\n", 1055 GCPtrPage, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u)); 1056 pgmPoolFreeByPage(pPool, pShwPde, pShwPdpt->idx, iPdPte); 1057 pPdpeDst->u = 0; 1058 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,InvalidatePagePDNAs)); 1059 PGM_INVL_GUEST_TLBS(); 1060 } 1061 } 1062 else 1063 { 1064 LogFlow(("InvalidatePage: Out-of-sync PDPE (P) at %VGv PdpeSrc=%RX64 PdpeDst=%RX64\n", 1065 GCPtrPage, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u)); 1066 pgmPoolFreeByPage(pPool, pShwPde, pShwPdpt->idx, iPdPte); 1026 RTGCPHYS GCPhysPd = PdpeSrc.u & GST_PDPE_PG_MASK; 1027 if ( !PdpeSrc.n.u1Present 1028 || pShwPde->GCPhys != GCPhysPd) 1029 { 1030 LogFlow(("InvalidatePage: Out-of-sync PDPE (P/GCPhys) at %VGv GCPhys=%VGp vs %VGp PdpeSrc=%RX64 PdpeDst=%RX64\n", 1031 GCPtrPage, pShwPde->GCPhys, GCPhysPd, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u)); 1032 pgmPoolFreeByPage(pPool, pShwPde, pShwPdpt->idx, iPdpte); 1067 1033 pPdpeDst->u = 0; 1068 1034 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,InvalidatePagePDNPs)); 1069 PGM_INVL_ PG(GCPtrPage);1035 PGM_INVL_GUEST_TLBS(); 1070 1036 return VINF_SUCCESS; 1071 1037 } 1072 RTGCPHYS GCPhysPd = PdpeSrc.u & GST_PDPE_PG_MASK; 1073 1074 /* Check if the PDPT entry has changed. */ 1075 if (pShwPde->GCPhys != GCPhysPd) 1076 { 1077 LogFlow(("InvalidatePage: Out-of-sync PDPE (GCPhys) at %VGv %VGp vs %VGp PdpeSrc=%RX64 PdpeDst=%RX64\n", 1078 GCPtrPage, pShwPde->GCPhys, GCPhysPd, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u)); 1079 pgmPoolFreeByPage(pPool, pShwPde, pShwPdpt->idx, iPdPte); 1038 if ( PdpeSrc.lm.u1User != pPdpeDst->lm.u1User 1039 || (!PdpeSrc.lm.u1Write && pPdpeDst->lm.u1Write)) 1040 { 1041 /* 1042 * Mark not present so we can resync the PDPTE when it's used. 1043 */ 1044 LogFlow(("InvalidatePage: Out-of-sync PDPE at %VGv PdpeSrc=%RX64 PdpeDst=%RX64\n", 1045 GCPtrPage, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u)); 1046 pgmPoolFreeByPage(pPool, pShwPde, pShwPdpt->idx, iPdpte); 1080 1047 pPdpeDst->u = 0; 1081 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,InvalidatePagePDNPs)); 1082 PGM_INVL_PG(GCPtrPage); 1083 return VINF_SUCCESS; 1084 } 1085 # endif 1048 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,InvalidatePagePDOutOfSync)); 1049 PGM_INVL_GUEST_TLBS(); 1050 } 1051 else if (!PdpeSrc.lm.u1Accessed) 1052 { 1053 /* 1054 * Mark not present so we can set the accessed bit. 1055 */ 1056 LogFlow(("InvalidatePage: Out-of-sync PDPE (A) at %VGv PdpeSrc=%RX64 PdpeDst=%RX64\n", 1057 GCPtrPage, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u)); 1058 pgmPoolFreeByPage(pPool, pShwPde, pShwPdpt->idx, iPdpte); 1059 pPdpeDst->u = 0; 1060 STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,InvalidatePagePDNAs)); 1061 PGM_INVL_GUEST_TLBS(); 1062 } 1063 # endif /* PGM_GST_TYPE != PGM_TYPE_AMD64 */ 1064 1065 # if PGM_GST_TYPE == PGM_TYPE_PAE 1066 1067 # endif 1068 1086 1069 1087 1070 /* … … 1108 1091 LogFlow(("InvalidatePage: Out-of-sync at %VGp PdeSrc=%RX64 PdeDst=%RX64\n", 1109 1092 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u)); 1110 # if PGM_GST_TYPE == PGM_TYPE_ PAE || PGM_GST_TYPE == PGM_TYPE_AMD641093 # if PGM_GST_TYPE == PGM_TYPE_AMD64 1111 1094 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst); 1112 1095 # else … … 1124 1107 LogFlow(("InvalidatePage: Out-of-sync (A) at %VGp PdeSrc=%RX64 PdeDst=%RX64\n", 1125 1108 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u)); 1126 # if PGM_GST_TYPE == PGM_TYPE_ PAE || PGM_GST_TYPE == PGM_TYPE_AMD641109 # if PGM_GST_TYPE == PGM_TYPE_AMD64 1127 1110 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst); 1128 1111 # else … … 1172 1155 LogFlow(("InvalidatePage: Out-of-sync at %VGp PdeSrc=%RX64 PdeDst=%RX64 ShwGCPhys=%VGp iPDDst=%#x\n", 1173 1156 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u, pShwPage->GCPhys, iPDDst)); 1174 # if PGM_GST_TYPE == PGM_TYPE_ PAE || PGM_GST_TYPE == PGM_TYPE_AMD641157 # if PGM_GST_TYPE == PGM_TYPE_AMD64 1175 1158 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst); 1176 1159 # else … … 1218 1201 LogFlow(("InvalidatePage: Out-of-sync PD at %VGp PdeSrc=%RX64 PdeDst=%RX64\n", 1219 1202 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u)); 1220 # if PGM_GST_TYPE == PGM_TYPE_ PAE || PGM_GST_TYPE == PGM_TYPE_AMD641203 # if PGM_GST_TYPE == PGM_TYPE_AMD64 1221 1204 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst); 1222 1205 # else … … 1235 1218 if (!(PdeDst.u & PGM_PDFLAGS_MAPPING)) 1236 1219 { 1237 # if PGM_GST_TYPE == PGM_TYPE_ PAE || PGM_GST_TYPE == PGM_TYPE_AMD641220 # if PGM_GST_TYPE == PGM_TYPE_AMD64 1238 1221 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst); 1239 1222 # else … … 1526 1509 # elif PGM_SHW_TYPE == PGM_TYPE_PAE 1527 1510 const unsigned iPDDst = GCPtrPage >> SHW_PD_SHIFT; 1528 const unsigned iPd Pte = (GCPtrPage >> X86_PDPT_SHIFT); /* no mask; flat index into the 2048 entry array. */1511 const unsigned iPdpte = (GCPtrPage >> X86_PDPT_SHIFT); /* no mask; flat index into the 2048 entry array. */ 1529 1512 PX86PDPT pPdptDst = pVM->pgm.s.CTXMID(p,PaePDPT); 1530 1513 X86PDEPAE PdeDst = pVM->pgm.s.CTXMID(ap,PaePDs)[0]->a[iPDDst]; 1531 1514 # elif PGM_SHW_TYPE == PGM_TYPE_AMD64 1532 1515 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK); 1533 const unsigned iPd Pte = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;1516 const unsigned iPdpte = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; 1534 1517 PX86PDPAE pPDDst; 1535 1518 X86PDEPAE PdeDst; … … 1546 1529 # if PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64 1547 1530 /* Fetch the pgm pool shadow descriptor. */ 1548 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(pVM, pPdptDst->a[iPd Pte].u & X86_PDPE_PG_MASK);1531 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(pVM, pPdptDst->a[iPdpte].u & X86_PDPE_PG_MASK); 1549 1532 Assert(pShwPde); 1550 1533 # endif … … 1764 1747 */ 1765 1748 PPGMPOOL pPool = pVM->pgm.s.CTXSUFF(pPool); 1766 # if PGM_GST_TYPE == PGM_TYPE_ PAE || PGM_GST_TYPE == PGM_TYPE_AMD641749 # if PGM_GST_TYPE == PGM_TYPE_AMD64 1767 1750 pgmPoolFreeByPage(pPool, pShwPage, pShwPde->idx, iPDDst); 1768 1751 # else … … 2232 2215 # elif PGM_SHW_TYPE == PGM_TYPE_PAE 2233 2216 const unsigned iPDDst = GCPtrPage >> SHW_PD_SHIFT; 2234 const unsigned iPd Pte = (GCPtrPage >> X86_PDPT_SHIFT);2217 const unsigned iPdpte = (GCPtrPage >> X86_PDPT_SHIFT); 2235 2218 PX86PDPT pPdptDst = pVM->pgm.s.CTXMID(p,PaePDPT); 2236 2219 PX86PDPAE pPDDst = pVM->pgm.s.CTXMID(ap,PaePDs)[0]; 2237 2220 # elif PGM_SHW_TYPE == PGM_TYPE_AMD64 2238 2221 const unsigned iPml4e = (GCPtrPage >> X86_PML4_SHIFT) & X86_PML4_MASK; 2239 const unsigned iPd Pte = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;2222 const unsigned iPdpte = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; 2240 2223 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK; 2241 2224 PX86PDPAE pPDDst; … … 2253 2236 SHWPDE PdeDst = *pPdeDst; 2254 2237 2255 # if PGM_GST_TYPE == PGM_TYPE_ PAE || PGM_GST_TYPE == PGM_TYPE_AMD642238 # if PGM_GST_TYPE == PGM_TYPE_AMD64 2256 2239 /* Fetch the pgm pool shadow descriptor. */ 2257 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(pVM, pPdptDst->a[iPd Pte].u & X86_PDPE_PG_MASK);2240 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(pVM, pPdptDst->a[iPdpte].u & X86_PDPE_PG_MASK); 2258 2241 Assert(pShwPde); 2259 2242 # endif … … 2319 2302 GCPhys |= (iPDDst & 1) * (PAGE_SIZE / 2); 2320 2303 # endif 2321 # if PGM_GST_TYPE == PGM_TYPE_ PAE || PGM_GST_TYPE == PGM_TYPE_AMD642304 # if PGM_GST_TYPE == PGM_TYPE_AMD64 2322 2305 rc = pgmPoolAlloc(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_PT, pShwPde->idx, iPDDst, &pShwPage); 2323 2306 # else … … 2332 2315 GCPhys |= GCPtrPage & (1 << X86_PD_PAE_SHIFT); 2333 2316 # endif 2334 # if PGM_GST_TYPE == PGM_TYPE_ PAE || PGM_GST_TYPE == PGM_TYPE_AMD642317 # if PGM_GST_TYPE == PGM_TYPE_AMD64 2335 2318 rc = pgmPoolAlloc(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_BIG, pShwPde->idx, iPDDst, &pShwPage); 2336 2319 # else … … 3114 3097 # endif 3115 3098 # if PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64 3116 for (uint64_t iP DPTE = 0; iPDPTE < GST_PDPE_ENTRIES; iPDPTE++)3099 for (uint64_t iPdpte = 0; iPdpte < GST_PDPE_ENTRIES; iPdpte++) 3117 3100 { 3118 3101 unsigned iPDSrc; 3119 PPGMPOOLPAGE pShwPde = NULL;3120 PX86PDPE pPdpeDst;3121 RTGCPHYS GCPhysPdeSrc;3122 3102 # if PGM_GST_TYPE == PGM_TYPE_PAE 3123 3103 PX86PDPAE pPDPAE = pVM->pgm.s.CTXMID(ap,PaePDs)[0]; 3124 PX86PDEPAE pPDEDst = &pPDPAE->a[iP DPTE* X86_PG_PAE_ENTRIES];3125 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, iP DPTE<< X86_PDPT_SHIFT, &iPDSrc);3104 PX86PDEPAE pPDEDst = &pPDPAE->a[iPdpte * X86_PG_PAE_ENTRIES]; 3105 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, iPdpte << X86_PDPT_SHIFT, &iPDSrc); 3126 3106 PX86PDPT pPdptDst = pVM->pgm.s.CTXMID(p,PaePDPT); 3127 X86PDPE PdpeSrc = CTXSUFF(pVM->pgm.s.pGstPaePDPT)->a[iPDPTE]; 3128 # else 3107 X86PDPE PdpeSrc = CTXSUFF(pVM->pgm.s.pGstPaePDPT)->a[iPdpte]; 3108 3109 if (pPDSrc == NULL) 3110 { 3111 /* PDPE not present */ 3112 if (pVM->pgm.s.CTXMID(p,PaePDPT)->a[iPdpte].n.u1Present) 3113 { 3114 /* for each page directory entry */ 3115 for (unsigned iPD = 0; iPD < ELEMENTS(pPDSrc->a); iPD++) 3116 { 3117 if ( pPDEDst[iPD].n.u1Present 3118 && !(pPDEDst[iPD].u & PGM_PDFLAGS_MAPPING)) 3119 { 3120 pgmPoolFreeByPage(pPool, pgmPoolGetPage(pPool, pPDEDst[iPD].u & SHW_PDE_PG_MASK), SHW_POOL_ROOT_IDX, iPdpte * X86_PG_PAE_ENTRIES + iPD); 3121 pPDEDst[iPD].u = 0; 3122 } 3123 } 3124 } 3125 if (!(pVM->pgm.s.CTXMID(p,PaePDPT)->a[iPdpte].u & PGM_PLXFLAGS_MAPPING)) 3126 pVM->pgm.s.CTXMID(p,PaePDPT)->a[iPdpte].n.u1Present = 0; 3127 continue; 3128 } 3129 # else /* PGM_GST_TYPE != PGM_TYPE_PAE */ 3130 PPGMPOOLPAGE pShwPde = NULL; 3131 RTGCPHYS GCPhysPdeSrc; 3132 PX86PDPE pPdpeDst; 3129 3133 PX86PML4E pPml4eSrc; 3130 3134 X86PDPE PdpeSrc; … … 3132 3136 PX86PDPAE pPDDst; 3133 3137 PX86PDEPAE pPDEDst; 3134 RTGCUINTPTR GCPtr = (iPml4e << X86_PML4_SHIFT) || (iP DPTE<< X86_PDPT_SHIFT);3138 RTGCUINTPTR GCPtr = (iPml4e << X86_PML4_SHIFT) || (iPdpte << X86_PDPT_SHIFT); 3135 3139 PGSTPD pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, GCPtr, &pPml4eSrc, &PdpeSrc, &iPDSrc); 3136 3140 … … 3146 3150 Assert(pPDDst); 3147 3151 pPDEDst = &pPDDst->a[0]; 3148 # endif3149 3152 Assert(iPDSrc == 0); 3150 3153 3151 pPdpeDst = &pPdptDst->a[iP DPTE];3154 pPdpeDst = &pPdptDst->a[iPdpte]; 3152 3155 3153 3156 /* Fetch the pgm pool shadow descriptor if the shadow pdpte is present. */ … … 3163 3166 { 3164 3167 /* Free it. */ 3165 # if PGM_GST_TYPE == PGM_TYPE_AMD643166 3168 LogFlow(("SyncCR3: Out-of-sync PDPE (GCPhys) GCPtr=%VGv %VGp vs %VGp PdpeSrc=%RX64 PdpeDst=%RX64\n", 3167 ((uint64_t)iPml4e << X86_PML4_SHIFT) + ((uint64_t)iPDPTE << X86_PDPT_SHIFT), pShwPde->GCPhys, GCPhysPdeSrc, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u)); 3168 # else 3169 LogFlow(("SyncCR3: Out-of-sync PDPE (GCPhys) GCPtr=%VGv %VGp vs %VGp PdpeSrc=%RX64 PdpeDst=%RX64\n", 3170 (uint64_t)iPDPTE << X86_PDPT_SHIFT, pShwPde->GCPhys, GCPhysPdeSrc, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u)); 3171 # endif 3169 ((uint64_t)iPml4e << X86_PML4_SHIFT) + ((uint64_t)iPdpte << X86_PDPT_SHIFT), pShwPde->GCPhys, GCPhysPdeSrc, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u)); 3172 3170 3173 3171 /* Mark it as not present if there's no hypervisor mapping present. (bit flipped at the top of Trap0eHandler) */ 3174 if (!(pPdpeDst->u & PGM_PLXFLAGS_MAPPING)) 3175 { 3176 pgmPoolFreeByPage(pPool, pShwPde, pShwPde->idx, iPDPTE); 3177 pPdpeDst->u = 0; 3178 continue; /* next guest PDPTE */ 3179 } 3172 Assert(!(pPdpeDst->u & PGM_PLXFLAGS_MAPPING)); 3173 pgmPoolFreeByPage(pPool, pShwPde, pShwPde->idx, iPdpte); 3174 pPdpeDst->u = 0; 3175 continue; /* next guest PDPTE */ 3180 3176 } 3181 # if PGM_GST_TYPE == PGM_TYPE_AMD643182 3177 /* Force an attribute sync. */ 3183 3178 pPdpeDst->lm.u1User = PdpeSrc.lm.u1User; 3184 3179 pPdpeDst->lm.u1Write = PdpeSrc.lm.u1Write; 3185 3180 pPdpeDst->lm.u1NoExecute = PdpeSrc.lm.u1NoExecute; 3186 # endif3181 # endif /* PGM_GST_TYPE != PGM_TYPE_PAE */ 3187 3182 3188 3183 # else /* PGM_GST_TYPE != PGM_TYPE_PAE && PGM_GST_TYPE != PGM_TYPE_AMD64 */ … … 3208 3203 */ 3209 3204 # if PGM_GST_TYPE == PGM_TYPE_PAE 3210 if (iPD + iP DPTE* X86_PG_PAE_ENTRIES == iPdNoMapping)3205 if (iPD + iPdpte * X86_PG_PAE_ENTRIES == iPdNoMapping) 3211 3206 # else 3212 3207 if (iPD == iPdNoMapping) … … 3227 3222 int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, iPD << GST_PD_SHIFT); 3228 3223 # elif PGM_GST_TYPE == PGM_TYPE_PAE 3229 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pMapping, (iP DPTE<< GST_PDPT_SHIFT) + (iPD << GST_PD_SHIFT));3224 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pMapping, (iPdpte << GST_PDPT_SHIFT) + (iPD << GST_PD_SHIFT)); 3230 3225 # endif 3231 3226 if (VBOX_FAILURE(rc)) … … 3330 3325 else 3331 3326 { 3332 # if PGM_GST_TYPE == PGM_TYPE_ PAE || PGM_GST_TYPE == PGM_TYPE_AMD643327 # if PGM_GST_TYPE == PGM_TYPE_AMD64 3333 3328 pgmPoolFreeByPage(pPool, pShwPage, pShwPde->idx, iPdShw); 3334 3329 # else … … 3345 3340 } 3346 3341 # if PGM_GST_TYPE == PGM_TYPE_PAE 3347 else if (iPD + iP DPTE* X86_PG_PAE_ENTRIES != iPdNoMapping)3342 else if (iPD + iPdpte * X86_PG_PAE_ENTRIES != iPdNoMapping) 3348 3343 # else 3349 3344 else if (iPD != iPdNoMapping) … … 3361 3356 if (pPDEDst->n.u1Present) 3362 3357 { 3363 # if PGM_GST_TYPE == PGM_TYPE_ PAE || PGM_GST_TYPE == PGM_TYPE_AMD643358 # if PGM_GST_TYPE == PGM_TYPE_AMD64 3364 3359 pgmPoolFreeByPage(pPool, pgmPoolGetPage(pPool, pPDEDst->u & SHW_PDE_PG_MASK), pShwPde->idx, iPdShw); 3365 3360 # else … … 3404 3399 int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, iPD << GST_PD_SHIFT); 3405 3400 # elif PGM_GST_TYPE == PGM_TYPE_PAE 3406 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pMapping, (iP DPTE<< GST_PDPT_SHIFT) + (iPD << GST_PD_SHIFT));3401 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pMapping, (iPdpte << GST_PDPT_SHIFT) + (iPD << GST_PD_SHIFT)); 3407 3402 # endif 3408 3403 if (VBOX_FAILURE(rc)) -
trunk/src/VBox/VMM/VMMAll/PGMAllGst.h
r9890 r9893 530 530 PPGMPOOL pPool = pVM->pgm.s.CTXSUFF(pPool); 531 531 pgmPoolFreeByPage(pPool, pVM->pgm.s.pShwAmd64CR3, PGMPOOL_IDX_AMD64_CR3, pVM->pgm.s.pShwAmd64CR3->GCPhys >> PAGE_SHIFT); 532 pVM->pgm.s.pShwAmd64CR3 = NULL; 532 533 } 533 534 -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r9890 r9893 318 318 uShw.pPTPae->a[iShw].u = 0; 319 319 } 320 321 /* paranoia / a bit assumptive. */ 322 if ( pCpu 323 && (off & 7) 324 && (off & 7) + pgmPoolDisasWriteSize(pCpu) > sizeof(X86PTEPAE)) 325 { 326 AssertFailed(); 327 } 328 320 329 break; 321 330 } … … 422 431 if ( pCpu 423 432 && (off & 7) 424 && (off & 7) + pgmPoolDisasWriteSize(pCpu) > sizeof(X86P TEPAE))433 && (off & 7) + pgmPoolDisasWriteSize(pCpu) > sizeof(X86PDEPAE)) 425 434 { 426 const unsigned iShw2 = (off + pgmPoolDisasWriteSize(pCpu) - 1) / sizeof(X86P TEPAE);435 const unsigned iShw2 = (off + pgmPoolDisasWriteSize(pCpu) - 1) / sizeof(X86PDEPAE); 427 436 if ( iShw2 != iShw 428 437 && iShw2 < ELEMENTS(uShw.pPDPae->a) -
trunk/src/VBox/VMM/VMMAll/PGMAllShw.h
r9858 r9893 80 80 # define SHW_PDPE_PG_MASK X86_PDPE_PG_MASK 81 81 # define SHW_TOTAL_PD_ENTRIES (X86_PG_AMD64_ENTRIES*X86_PG_AMD64_PDPE_ENTRIES) 82 # define SHW_POOL_ROOT_IDX PGMPOOL_IDX_PAE_PD 82 # define SHW_POOL_ROOT_IDX PGMPOOL_IDX_PAE_PD /* do not use! exception is real mode & protected mode without paging. */ 83 83 #else /* 32 bits PAE mode */ 84 84 # define SHW_PDPT_SHIFT X86_PDPT_SHIFT
注意:
瀏覽 TracChangeset
來幫助您使用更動檢視器