儲存庫 vbox 的更動 17035
- 時間撮記:
- 2009-2-23 下午10:26:39 (16 年 以前)
- 位置:
- trunk
- 檔案:
-
- 修改 10 筆資料
圖例:
- 未更動
- 新增
- 刪除
-
trunk/include/VBox/cpum.h
r16857 r17035 622 622 VMMDECL(void) CPUMGetGuestGDTR(PVM pVM, PVBOXGDTR pGDTR); 623 623 VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVM pVM, uint16_t *pcbLimit); 624 VMMDECL(RTSEL) CPUMGetGuestTR(PVM pVM );624 VMMDECL(RTSEL) CPUMGetGuestTR(PVM pVM, PCPUMSELREGHID pHidden); 625 625 VMMDECL(RTSEL) CPUMGetGuestLDTR(PVM pVM); 626 626 VMMDECL(uint64_t) CPUMGetGuestCR0(PVM pVM); … … 661 661 VMMDECL(uint32_t) CPUMGetGuestCpuIdExtMax(PVM pVM); 662 662 VMMDECL(uint32_t) CPUMGetGuestCpuIdCentaurMax(PVM pVM); 663 VMMDECL(CPUMSELREGHID *) CPUMGetGuestTRHid(PVM pVM);664 663 VMMDECL(uint64_t) CPUMGetGuestEFER(PVM pVM); 665 664 VMMDECL(uint64_t) CPUMGetGuestMsr(PVM pVM, unsigned idMsr); -
trunk/include/VBox/selm.h
r13718 r17035 45 45 VMMDECL(RTSEL) SELMGetTrap8Selector(PVM pVM); 46 46 VMMDECL(void) SELMSetTrap8EIP(PVM pVM, uint32_t u32EIP); 47 VMMDECL(void) SELMSetRing1Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp);48 47 VMMDECL(int) SELMGetRing1Stack(PVM pVM, uint32_t *pSS, PRTGCPTR32 pEsp); 49 48 VMMDECL(RTGCPTR) SELMGetGuestTSS(PVM pVM); -
trunk/src/VBox/VMM/SELM.cpp
r17032 r17035 1430 1430 1431 1431 /** 1432 * Check if the TSS ring 0 stack selector and pointer were updated (for now) 1432 * Synchronize the shadowed fields in the TSS. 1433 * 1434 * At present we're shadowing the ring-0 stack selector & pointer, and the 1435 * interrupt redirection bitmap (if present). We take the lazy approach wrt to 1436 * REM and this function is called both if REM made any changes to the TSS or 1437 * loaded TR. 1433 1438 * 1434 1439 * @returns VBox status code. … … 1445 1450 } 1446 1451 1447 /** @todo r=bird: SELMR3SyncTSS should be VMMAll code.1448 * All the base, size, flags and stuff must be kept up to date in the CPUM tr register.1449 */1450 1452 STAM_PROFILE_START(&pVM->selm.s.StatTSSSync, a); 1451 1452 Assert(!VM_FF_ISSET(pVM, VM_FF_SELM_SYNC_GDT));1453 1453 Assert(VM_FF_ISSET(pVM, VM_FF_SELM_SYNC_TSS)); 1454 1454 1455 1455 /* 1456 * TSS sync 1457 */ 1458 RTSEL SelTss = CPUMGetGuestTR(pVM); 1456 * Get TR and extract and store the basic info. 1457 * 1458 * Note! The TSS limit is not checked by the LTR code, so we 1459 * have to be a bit careful with it. We make sure cbTss 1460 * won't be zero if TR is valid and if it's NULL we'll 1461 * make sure cbTss is 0. 1462 */ 1463 CPUMSELREGHID trHid; 1464 RTSEL SelTss = CPUMGetGuestTR(pVM, &trHid); 1465 RTGCPTR GCPtrTss = trHid.u64Base; 1466 uint32_t cbTss = trHid.u32Limit; 1467 Assert( (SelTss & X86_SEL_MASK) 1468 || (cbTss == 0 && GCPtrTss == 0 && trHid.Attr.u == 0 /* TR=0 */) 1469 || (cbTss == 0xffff && GCPtrTss == 0 && trHid.Attr.n.u1Present && trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY /* RESET */)); 1459 1470 if (SelTss & X86_SEL_MASK) 1460 1471 { 1461 /** @todo r=bird: strictly speaking, this is wrong as we shouldn't bother with changes to 1462 * the TSS selector once its loaded. There are a bunch of this kind of problems (see Sander's 1463 * comment in the unzip defect) 1464 * The first part here should only be done when we're loading TR. The latter part which is 1465 * updating of the ss0:esp0 pair can be done by the access handler now since we can trap all 1466 * accesses, also REM ones. */ 1467 1468 /* 1469 * Guest TR is not NULL. 1470 */ 1471 PX86DESC pDesc = &pVM->selm.s.paGdtR3[SelTss >> X86_SEL_SHIFT]; 1472 RTGCPTR GCPtrTss = X86DESC_BASE(*pDesc); 1473 unsigned cbTss = X86DESC_LIMIT(*pDesc); 1474 if (pDesc->Gen.u1Granularity) 1475 cbTss = (cbTss << PAGE_SHIFT) | PAGE_OFFSET_MASK; 1476 cbTss++; 1477 pVM->selm.s.cbGuestTss = cbTss; 1478 pVM->selm.s.fGuestTss32Bit = pDesc->Gen.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL 1479 || pDesc->Gen.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY; 1480 1481 /* 1482 * Presently we monitor the TSS and the interrupt redirection bitmap if it's present. 1483 * We're assuming the guest is playing nice and that the bits we're monitoring won't 1484 * cross page boundraries. (The TSS core must be on a single page, while the bitmap 1485 * probably doesn't need to be.) 1486 */ 1487 if (cbTss > sizeof(VBOXTSS)) 1488 cbTss = sizeof(VBOXTSS); 1489 AssertMsg((GCPtrTss >> PAGE_SHIFT) == ((GCPtrTss + cbTss - 1) >> PAGE_SHIFT), 1490 ("GCPtrTss=%RGv cbTss=%#x - We assume everything is inside one page!\n", GCPtrTss, cbTss)); 1491 1492 // All system GDTs are marked not present above. That explains why this check fails. 1493 //if (pDesc->Gen.u1Present) 1494 /** @todo Handle only present TSS segments. */ 1495 { 1472 Assert(!(SelTss & X86_SEL_LDT)); 1473 Assert(trHid.Attr.n.u1DescType == 0); 1474 Assert( trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY 1475 || trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY); 1476 if (++cbTss) 1477 cbTss = UINT32_MAX; 1478 } 1479 else 1480 { 1481 Assert( (cbTss == 0 && GCPtrTss == 0 && trHid.Attr.u == 0 /* TR=0 */) 1482 || (cbTss == 0xffff && GCPtrTss == 0 && trHid.Attr.n.u1Present && trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY /* RESET */)); 1483 cbTss = 0; /* the reset case. */ 1484 } 1485 pVM->selm.s.cbGuestTss = cbTss; 1486 pVM->selm.s.fGuestTss32Bit = trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL 1487 || trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY; 1488 1489 /* 1490 * Presently we monitor the TSS and the interrupt redirection bitmap if it's present. 1491 * We're assuming the guest is playing nice and that the bits we're monitoring won't 1492 * cross page boundraries. (The TSS core must be on a single page, while the bitmap 1493 * probably doesn't need to be.) 1494 */ 1495 uint32_t cbMonitoredTss = cbTss > sizeof(VBOXTSS) ? sizeof(VBOXTSS) : cbTss; 1496 1497 /* 1498 * We're also completely uninterested in a 16-bit TSS. 1499 */ 1500 if ( trHid.Attr.n.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL 1501 && trHid.Attr.n.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY) 1502 cbMonitoredTss = 0; 1503 AssertMsg((GCPtrTss >> PAGE_SHIFT) == ((GCPtrTss + cbMonitoredTss - 1) >> PAGE_SHIFT) || !cbMonitoredTss, 1504 ("GCPtrTss=%RGv cbMonitoredTss=%#x - We assume everything is inside one page!\n", GCPtrTss, cbMonitoredTss)); 1505 1506 /* 1507 * Check for monitor changes and apply them. 1508 */ 1509 if ( GCPtrTss != pVM->selm.s.GCPtrGuestTss 1510 || cbMonitoredTss != pVM->selm.s.cbMonitoredGuestTss) 1511 { 1512 Log(("SELMR3SyncTSS: Guest's TSS is changed to pTss=%RGv cbMonitoredTss=%08X cbGuestTss=%#08x\n", 1513 GCPtrTss, cbMonitoredTss, pVM->selm.s.cbGuestTss)); 1514 1515 /* Release the old range first. */ 1516 if (pVM->selm.s.GCPtrGuestTss != RTRCPTR_MAX) 1517 { 1518 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestTss); 1519 AssertRC(rc); 1520 } 1521 1522 /* Register the write handler if TS != 0. */ 1523 if (cbMonitoredTss != 0) 1524 { 1525 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GCPtrTss, GCPtrTss + cbMonitoredTss - 1, 1526 0, selmR3GuestTSSWriteHandler, 1527 "selmRCGuestTSSWriteHandler", 0, "Guest TSS write access handler"); 1528 if (RT_FAILURE(rc)) 1529 { 1530 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a); 1531 return rc; 1532 } 1533 1534 /* Update saved Guest TSS info. */ 1535 pVM->selm.s.GCPtrGuestTss = GCPtrTss; 1536 pVM->selm.s.cbMonitoredGuestTss = cbMonitoredTss; 1537 pVM->selm.s.GCSelTss = SelTss; 1538 } 1539 else 1540 { 1541 pVM->selm.s.GCPtrGuestTss = RTRCPTR_MAX; 1542 pVM->selm.s.cbMonitoredGuestTss = 0; 1543 pVM->selm.s.GCSelTss = 0; 1544 } 1545 } 1546 1547 /* 1548 * Update the ring 0 stack selector and base address. 1549 * (Reading up to and including offIoBitmap to save effort in the VME case.) 1550 */ 1551 bool fNoRing1Stack = true; 1552 if (cbMonitoredTss) 1553 { 1554 VBOXTSS Tss; 1555 rc = PGMPhysSimpleReadGCPtr(pVM, &Tss, GCPtrTss, RT_OFFSETOF(VBOXTSS, offIoBitmap) + sizeof(Tss.offIoBitmap)); 1556 if (RT_SUCCESS(rc)) 1557 { 1558 #ifdef LOG_ENABLED 1559 if (LogIsEnabled()) 1560 { 1561 uint32_t ssr0, espr0; 1562 SELMGetRing1Stack(pVM, &ssr0, &espr0); 1563 if ((ssr0 & ~1) != Tss.ss0 || espr0 != Tss.esp0) 1564 { 1565 RTGCPHYS GCPhys = NIL_RTGCPHYS; 1566 rc = PGMGstGetPage(pVM, GCPtrTss, NULL, &GCPhys); AssertRC(rc); 1567 Log(("SELMR3SyncTSS: Updating TSS ring 0 stack to %04X:%08X from %04X:%08X; TSS Phys=%VGp)\n", 1568 Tss.ss0, Tss.esp0, (ssr0 & ~1), espr0, GCPhys)); 1569 AssertMsg(ssr0 != Tss.ss0, 1570 ("ring-1 leak into TSS.SS0! %04X:%08X from %04X:%08X; TSS Phys=%VGp)\n", 1571 Tss.ss0, Tss.esp0, (ssr0 & ~1), espr0, GCPhys)); 1572 } 1573 Log(("offIoBitmap=%#x\n", Tss.offIoBitmap)); 1574 } 1575 #endif /* LOG_ENABLED */ 1576 AssertMsg(!(Tss.ss0 & 3), ("ring-1 leak into TSS.SS0? %04X:%08X\n", Tss.ss0, Tss.esp0)); 1577 1578 1579 /* Update our TSS structure for the guest's ring 1 stack */ 1580 selmSetRing1Stack(pVM, Tss.ss0 | 1, Tss.esp0); 1581 pVM->selm.s.fSyncTSSRing0Stack = fNoRing1Stack = false; 1582 1496 1583 /* 1497 * Check if Guest's TSS is changed.1584 * Should we sync the virtual interrupt redirection bitmap as well? 1498 1585 */ 1499 if ( GCPtrTss != pVM->selm.s.GCPtrGuestTss 1500 || cbTss != pVM->selm.s.cbMonitoredGuestTss) 1586 if (CPUMGetGuestCR4(pVM) & X86_CR4_VME) 1501 1587 { 1502 Log(("SELMR3UpdateFromCPUM: Guest's TSS is changed to pTss=%08X cbTss=%08X cbGuestTss\n", GCPtrTss, cbTss, pVM->selm.s.cbGuestTss)); 1503 1504 /* 1505 * Validate it. 1506 */ 1507 if ( SelTss & X86_SEL_LDT 1508 || !cbTss 1509 || SelTss >= pVM->selm.s.GuestGdtr.cbGdt 1510 || pDesc->Gen.u1DescType 1511 || ( pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL 1512 && pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_BUSY 1513 && pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL 1514 && pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY) ) 1588 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */ 1589 if (Tss.offIoBitmap < RT_OFFSETOF(VBOXTSS, IntRedirBitmap) + sizeof(Tss.IntRedirBitmap)) 1515 1590 { 1516 AssertMsgFailed(("Invalid Guest TSS %04x!\n", SelTss)); 1591 Log(("Invalid io bitmap offset detected (%x)!\n", Tss.offIoBitmap)); 1592 Tss.offIoBitmap = RT_OFFSETOF(VBOXTSS, IntRedirBitmap) + sizeof(Tss.IntRedirBitmap); 1517 1593 } 1518 else 1594 1595 uint32_t offRedirBitmap = Tss.offIoBitmap - sizeof(Tss.IntRedirBitmap); 1596 1597 /** @todo not sure how the partial case is handled; probably not allowed */ 1598 if (offRedirBitmap + sizeof(Tss.IntRedirBitmap) <= pVM->selm.s.cbGuestTss) 1519 1599 { 1520 /* 1521 * [Re]Register write virtual handler for guest's TSS. 1522 */ 1523 if (pVM->selm.s.GCPtrGuestTss != RTRCPTR_MAX) 1524 { 1525 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestTss); 1526 AssertRC(rc); 1527 } 1528 1529 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GCPtrTss, GCPtrTss + cbTss - 1, 1530 0, selmR3GuestTSSWriteHandler, "selmRCGuestTSSWriteHandler", 0, "Guest TSS write access handler"); 1531 if (RT_FAILURE(rc)) 1532 { 1533 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a); 1534 return rc; 1535 } 1536 1537 /* Update saved Guest TSS info. */ 1538 pVM->selm.s.GCPtrGuestTss = GCPtrTss; 1539 pVM->selm.s.cbMonitoredGuestTss = cbTss; 1540 pVM->selm.s.GCSelTss = SelTss; 1600 rc = PGMPhysSimpleReadGCPtr(pVM, &pVM->selm.s.Tss.IntRedirBitmap, GCPtrTss + offRedirBitmap, sizeof(Tss.IntRedirBitmap)); 1601 AssertRC(rc); 1602 Log2(("Redirection bitmap:\n")); 1603 Log2(("%.*Rhxd\n", sizeof(Tss.IntRedirBitmap), &pVM->selm.s.Tss.IntRedirBitmap)); 1541 1604 } 1542 1605 } 1543 1544 /* 1545 * Update the ring 0 stack selector and base address. 1546 * (Reading up to and including offIoBitmap to save effort in the VME case.) 1547 */ 1548 VBOXTSS Tss; 1549 rc = PGMPhysSimpleReadGCPtr(pVM, &Tss, GCPtrTss, RT_OFFSETOF(VBOXTSS, offIoBitmap) + sizeof(Tss.offIoBitmap)); 1550 if (RT_SUCCESS(rc)) 1551 { 1552 #ifdef LOG_ENABLED 1553 if (LogIsEnabled()) 1554 { 1555 uint32_t ssr0, espr0; 1556 SELMGetRing1Stack(pVM, &ssr0, &espr0); 1557 if ((ssr0 & ~1) != Tss.ss0 || espr0 != Tss.esp0) 1558 { 1559 RTGCPHYS GCPhys = NIL_RTGCPHYS; 1560 rc = PGMGstGetPage(pVM, GCPtrTss, NULL, &GCPhys); AssertRC(rc); 1561 Log(("SELMR3SyncTSS: Updating TSS ring 0 stack to %04X:%08X from %04X:%08X; TSS Phys=%VGp)\n", 1562 Tss.ss0, Tss.esp0, (ssr0 & ~1), espr0, GCPhys)); 1563 AssertMsg(ssr0 != Tss.ss0, 1564 ("ring-1 leak into TSS.SS0! %04X:%08X from %04X:%08X; TSS Phys=%VGp)\n", 1565 Tss.ss0, Tss.esp0, (ssr0 & ~1), espr0, GCPhys)); 1566 } 1567 Log(("offIoBitmap=%#x\n", Tss.offIoBitmap)); 1568 } 1569 #endif /* LOG_ENABLED */ 1570 AssertMsg(!(Tss.ss0 & 3), ("ring-1 leak into TSS.SS0? %04X:%08X\n", Tss.ss0, Tss.esp0)); 1571 1572 1573 /* Update our TSS structure for the guest's ring 1 stack */ 1574 SELMSetRing1Stack(pVM, Tss.ss0 | 1, Tss.esp0); 1575 1576 /* 1577 * Should we sync the virtual interrupt redirection bitmap as well? 1578 */ 1579 if (CPUMGetGuestCR4(pVM) & X86_CR4_VME) 1580 { 1581 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */ 1582 if (Tss.offIoBitmap < RT_OFFSETOF(VBOXTSS, IntRedirBitmap) + sizeof(Tss.IntRedirBitmap)) 1583 { 1584 Log(("Invalid io bitmap offset detected (%x)!\n", Tss.offIoBitmap)); 1585 Tss.offIoBitmap = RT_OFFSETOF(VBOXTSS, IntRedirBitmap) + sizeof(Tss.IntRedirBitmap); 1586 } 1587 1588 uint32_t offRedirBitmap = Tss.offIoBitmap - sizeof(Tss.IntRedirBitmap); 1589 1590 /** @todo not sure how the partial case is handled; probably not allowed */ 1591 if (offRedirBitmap + sizeof(Tss.IntRedirBitmap) <= pVM->selm.s.cbGuestTss) 1592 { 1593 rc = PGMPhysSimpleReadGCPtr(pVM, &pVM->selm.s.Tss.IntRedirBitmap, GCPtrTss + offRedirBitmap, sizeof(Tss.IntRedirBitmap)); 1594 AssertRC(rc); 1595 Log2(("Redirection bitmap:\n")); 1596 Log2(("%.*Rhxd\n", sizeof(Tss.IntRedirBitmap), &pVM->selm.s.Tss.IntRedirBitmap)); 1597 } 1598 } 1599 } 1600 else 1601 { 1602 /* Note: the ring 0 stack selector and base address are updated on demand in this case. */ 1603 1604 /** @todo handle these dependencies better! */ 1605 TRPMR3SetGuestTrapHandler(pVM, 0x2E, TRPM_INVALID_HANDLER); 1606 TRPMR3SetGuestTrapHandler(pVM, 0x80, TRPM_INVALID_HANDLER); 1607 pVM->selm.s.fSyncTSSRing0Stack = true; 1608 } 1609 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_TSS); 1610 } 1611 } 1612 else /* Null TR means there's no TSS, has to be reloaded first, so clear the forced action. */ 1613 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_TSS); 1606 } 1607 } 1608 1609 /* 1610 * Flush the ring-1 stack and the direct syscall dispatching if we cannot obtain SS0:ESP0. 1611 */ 1612 if (fNoRing1Stack) 1613 { 1614 selmSetRing1Stack(pVM, 0 /* invalid SS */, 0); 1615 pVM->selm.s.fSyncTSSRing0Stack = cbMonitoredTss != 0; 1616 1617 /** @todo handle these dependencies better! */ 1618 TRPMR3SetGuestTrapHandler(pVM, 0x2E, TRPM_INVALID_HANDLER); 1619 TRPMR3SetGuestTrapHandler(pVM, 0x80, TRPM_INVALID_HANDLER); 1620 } 1621 1622 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_TSS); 1614 1623 1615 1624 STAM_PROFILE_STOP(&pVM->selm.s.StatTSSSync, a); … … 1646 1655 */ 1647 1656 RTGCPTR GCPtrGDTEGuest = GDTR.pGdt; 1648 PX86DESC pGDTE = pVM->selm.s.paGdtR3;1649 PX86DESC pGDTEEnd = (PX86DESC)((uintptr_t)pGDTE + GDTR.cbGdt);1657 PX86DESC pGDTE = pVM->selm.s.paGdtR3; 1658 PX86DESC pGDTEEnd = (PX86DESC)((uintptr_t)pGDTE + GDTR.cbGdt); 1650 1659 while (pGDTE < pGDTEEnd) 1651 1660 { … … 1767 1776 return true; 1768 1777 1769 RTSEL SelTss = CPUMGetGuestTR(pVM); 1778 /* 1779 * Get TR and extract the basic info. 1780 */ 1781 CPUMSELREGHID trHid; 1782 RTSEL SelTss = CPUMGetGuestTR(pVM, &trHid); 1783 RTGCPTR GCPtrTss = trHid.u64Base; 1784 uint32_t cbTss = trHid.u32Limit; 1785 Assert( (SelTss & X86_SEL_MASK) 1786 || (cbTss == 0 && GCPtrTss == 0 && trHid.Attr.u == 0 /* TR=0 */) 1787 || (cbTss == 0xffff && GCPtrTss == 0 && trHid.Attr.n.u1Present && trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY /* RESET */)); 1770 1788 if (SelTss & X86_SEL_MASK) 1771 1789 { 1772 AssertMsg((SelTss & X86_SEL_MASK) == (pVM->selm.s.GCSelTss & X86_SEL_MASK), ("New TSS selector = %04X, old TSS selector = %04X\n", SelTss, pVM->selm.s.GCSelTss)); 1773 1774 /* 1775 * Guest TR is not NULL. 1776 */ 1777 PX86DESC pDesc = &pVM->selm.s.paGdtR3[SelTss >> X86_SEL_SHIFT]; 1778 RTGCPTR GCPtrTss = X86DESC_BASE(*pDesc); 1779 unsigned cbTss = X86DESC_LIMIT(*pDesc); 1780 if (pDesc->Gen.u1Granularity) 1781 cbTss = (cbTss << PAGE_SHIFT) | PAGE_OFFSET_MASK; 1782 cbTss++; 1783 1784 /* 1785 * We only care about the Ring-0 ESP and SS values and the interrupt redirection bitmap. 1786 * See SELMR3SyncTSS for details. 1787 */ 1788 if (cbTss > sizeof(VBOXTSS)) 1789 cbTss = sizeof(VBOXTSS); 1790 AssertMsg((GCPtrTss >> PAGE_SHIFT) == ((GCPtrTss + cbTss - 1) >> PAGE_SHIFT), 1791 ("GCPtrTss=%RGv cbTss=%#x - We assume everything is inside one page!\n", GCPtrTss, cbTss)); 1792 1793 // All system GDTs are marked not present above. That explains why this check fails. 1794 //if (pDesc->Gen.u1Present) 1795 /** @todo Handle only present TSS segments. */ 1796 { 1797 /* 1798 * Check if Guest's TSS was changed. 1799 */ 1800 if ( GCPtrTss != pVM->selm.s.GCPtrGuestTss 1801 || cbTss != pVM->selm.s.cbMonitoredGuestTss) 1790 AssertReturn(!(SelTss & X86_SEL_LDT), false); 1791 AssertReturn(trHid.Attr.n.u1DescType == 0, false); 1792 AssertReturn( trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY 1793 || trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY, 1794 false); 1795 if (++cbTss) 1796 cbTss = UINT32_MAX; 1797 } 1798 else 1799 { 1800 AssertReturn( (cbTss == 0 && GCPtrTss == 0 && trHid.Attr.u == 0 /* TR=0 */) 1801 || (cbTss == 0xffff && GCPtrTss == 0 && trHid.Attr.n.u1Present && trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY /* RESET */), 1802 false); 1803 cbTss = 0; /* the reset case. */ 1804 } 1805 AssertMsgReturn(pVM->selm.s.cbGuestTss == cbTss, ("%#x %#x\n", pVM->selm.s.cbGuestTss, cbTss), false); 1806 AssertMsgReturn(pVM->selm.s.fGuestTss32Bit == ( trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL 1807 || trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY), 1808 ("%RTbool u4Type=%d\n", pVM->selm.s.fGuestTss32Bit, trHid.Attr.n.u4Type), 1809 false); 1810 AssertMsgReturn( pVM->selm.s.GCSelTss == SelTss 1811 || (!pVM->selm.s.GCSelTss && !(SelTss & X86_SEL_LDT)), 1812 ("%#x %#x\n", pVM->selm.s.GCSelTss, SelTss), 1813 false); 1814 AssertMsgReturn( pVM->selm.s.GCPtrGuestTss == GCPtrTss 1815 || (pVM->selm.s.GCPtrGuestTss == RTRCPTR_MAX && !GCPtrTss), 1816 ("%#RGv %#RGv\n", pVM->selm.s.GCPtrGuestTss, GCPtrTss), 1817 false); 1818 1819 /* 1820 * Cap the TSS size, see SELMR3SyncTSS for details. 1821 */ 1822 uint32_t cbMonitoredTss = cbTss > sizeof(VBOXTSS) ? sizeof(VBOXTSS) : cbTss; 1823 AssertMsg((GCPtrTss >> PAGE_SHIFT) == ((GCPtrTss + cbMonitoredTss - 1) >> PAGE_SHIFT) || !cbMonitoredTss, 1824 ("GCPtrTss=%RGv cbMonitoredTss=%#x - We assume everything is inside one page!\n", GCPtrTss, cbMonitoredTss)); 1825 AssertMsgReturn(pVM->selm.s.cbMonitoredGuestTss == cbMonitoredTss, ("%#x %#x\n", pVM->selm.s.cbMonitoredGuestTss, cbMonitoredTss), false); 1826 1827 /* 1828 * Check SS0 and ESP0. 1829 */ 1830 if ( cbMonitoredTss 1831 && !pVM->selm.s.fSyncTSSRing0Stack) 1832 { 1833 RTGCPTR GCPtrGuestTSS = pVM->selm.s.GCPtrGuestTss; 1834 uint32_t ESPR0; 1835 int rc = PGMPhysSimpleReadGCPtr(pVM, &ESPR0, GCPtrGuestTSS + RT_OFFSETOF(VBOXTSS, esp0), sizeof(ESPR0)); 1836 if (RT_SUCCESS(rc)) 1837 { 1838 RTSEL SelSS0; 1839 rc = PGMPhysSimpleReadGCPtr(pVM, &SelSS0, GCPtrGuestTSS + RT_OFFSETOF(VBOXTSS, ss0), sizeof(SelSS0)); 1840 AssertRCReturn(rc, false); 1841 1842 if ( ESPR0 != pVM->selm.s.Tss.esp1 1843 || SelSS0 != (pVM->selm.s.Tss.ss1 & ~1)) 1802 1844 { 1803 AssertMsgFailed(("Guest's TSS (Sel 0x%X) is changed from %RGv:%04x to %RGv:%04x\n", 1804 SelTss, pVM->selm.s.GCPtrGuestTss, pVM->selm.s.cbMonitoredGuestTss, 1805 GCPtrTss, cbTss)); 1845 RTGCPHYS GCPhys; 1846 rc = PGMGstGetPage(pVM, GCPtrGuestTSS, NULL, &GCPhys); AssertRC(rc); 1847 AssertMsgFailed(("TSS out of sync!! (%04X:%08X vs %04X:%08X (guest)) Tss=%RGv Phys=%RGp\n", 1848 (pVM->selm.s.Tss.ss1 & ~1), pVM->selm.s.Tss.esp1, SelSS0, ESPR0, GCPtrGuestTSS, GCPhys)); 1849 return false; 1806 1850 } 1807 1851 } 1808 1809 if (!pVM->selm.s.fSyncTSSRing0Stack) 1810 { 1811 RTGCPTR GCPtrGuestTSS = pVM->selm.s.GCPtrGuestTss; 1812 uint32_t ESPR0; 1813 int rc = PGMPhysSimpleReadGCPtr(pVM, &ESPR0, GCPtrGuestTSS + RT_OFFSETOF(VBOXTSS, esp0), sizeof(ESPR0)); 1814 if (RT_SUCCESS(rc)) 1815 { 1816 RTSEL SelSS0; 1817 rc = PGMPhysSimpleReadGCPtr(pVM, &SelSS0, GCPtrGuestTSS + RT_OFFSETOF(VBOXTSS, ss0), sizeof(SelSS0)); 1818 if (RT_SUCCESS(rc)) 1819 { 1820 if ( ESPR0 == pVM->selm.s.Tss.esp1 1821 && SelSS0 == (pVM->selm.s.Tss.ss1 & ~1)) 1822 return true; 1823 1824 RTGCPHYS GCPhys; 1825 rc = PGMGstGetPage(pVM, GCPtrGuestTSS, NULL, &GCPhys); AssertRC(rc); 1826 AssertMsgFailed(("TSS out of sync!! (%04X:%08X vs %04X:%08X (guest)) Tss=%RGv Phys=%RGp\n", 1827 (pVM->selm.s.Tss.ss1 & ~1), pVM->selm.s.Tss.esp1, SelSS0, ESPR0, GCPtrGuestTSS, GCPhys)); 1828 } 1829 else 1830 AssertRC(rc); 1831 } 1832 else 1833 /* Happens during early Windows XP boot when it is switching page tables. */ 1834 Assert(rc == VINF_SUCCESS || ((rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT) && !(CPUMGetGuestEFlags(pVM) & X86_EFL_IF))); 1835 } 1836 } 1837 return false; 1852 else 1853 /* Happens during early Windows XP boot when it is switching page tables. */ 1854 AssertReturn(rc == VINF_SUCCESS || ((rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT) && !(CPUMGetGuestEFlags(pVM) & X86_EFL_IF)), 1855 false); 1856 } 1857 else if (!cbMonitoredTss) 1858 { 1859 AssertMsgReturn(pVM->selm.s.Tss.ss1 == 0 && pVM->selm.s.Tss.esp1 == 0, ("%04x:%08x\n", pVM->selm.s.Tss.ss1, pVM->selm.s.Tss.esp1), false); 1860 AssertReturn(!pVM->selm.s.fSyncTSSRing0Stack, false); 1861 } 1862 return true; 1863 1838 1864 #else /* !VBOX_STRICT */ 1839 1865 NOREF(pVM); -
trunk/src/VBox/VMM/SELMInternal.h
r13577 r17035 187 187 VMMRCDECL(int) selmRCShadowTSSWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange); 188 188 189 void selmSetRing1Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp); 190 189 191 __END_DECLS 190 191 #ifdef IN_RING3192 193 #endif194 192 195 193 /** @} */ -
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r16859 r17035 520 520 { 521 521 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM); 522 AssertMsgFailed(("Need to load the hidden bits too!\n")); 522 523 523 524 pCpumCpu->Guest.tr = tr; … … 888 889 889 890 890 VMMDECL(RTSEL) CPUMGetGuestTR(PVM pVM) 891 { 892 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM); 893 891 VMMDECL(RTSEL) CPUMGetGuestTR(PVM pVM, PCPUMSELREGHID pHidden) 892 { 893 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM); 894 if (pHidden) 895 *pHidden = pCpumCpu->Guest.trHid; 894 896 return pCpumCpu->Guest.tr; 895 897 } … … 1077 1079 1078 1080 return pCpumCpu->Guest.eflags.u32; 1079 }1080 1081 1082 VMMDECL(CPUMSELREGHID *) CPUMGetGuestTRHid(PVM pVM)1083 {1084 PCPUMCPU pCpumCpu = cpumGetCpumCpu(pVM);1085 1086 return &pCpumCpu->Guest.trHid;1087 1081 } 1088 1082 -
trunk/src/VBox/VMM/VMMAll/SELMAll.cpp
r13832 r17035 929 929 /* Else compatibility or 32 bits mode. */ 930 930 return (pHiddenSel->Attr.n.u1DefBig) ? CPUMODE_32BIT : CPUMODE_16BIT; 931 932 931 } 933 932 … … 961 960 * 962 961 * @param pVM VM Handle. 963 * @param ss Ring1 SS register value. 962 * @param ss Ring1 SS register value. Pass 0 if invalid. 964 963 * @param esp Ring1 ESP register value. 965 964 */ 966 VMMDECL(void) SELMSetRing1Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp) 967 { 965 void selmSetRing1Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp) 966 { 967 Assert((ss & 1) || esp == 0); 968 968 pVM->selm.s.Tss.ss1 = ss; 969 969 pVM->selm.s.Tss.esp1 = (uint32_t)esp; … … 974 974 /** 975 975 * Gets ss:esp for ring1 in main Hypervisor's TSS. 976 * 977 * Returns SS=0 if the ring-1 stack isn't valid. 976 978 * 977 979 * @returns VBox status code. … … 979 981 * @param pSS Ring1 SS register value. 980 982 * @param pEsp Ring1 ESP register value. 981 *982 * @todo Merge in the GC version of this, eliminating it - or move this to983 * SELM.cpp, making it SELMR3GetRing1Stack.984 983 */ 985 984 VMMDECL(int) SELMGetRing1Stack(PVM pVM, uint32_t *pSS, PRTGCPTR32 pEsp) … … 1040 1039 # endif 1041 1040 /* Update our TSS structure for the guest's ring 1 stack */ 1042 SELMSetRing1Stack(pVM, tss.ss0 | 1, (RTGCPTR32)tss.esp0);1041 selmSetRing1Stack(pVM, tss.ss0 | 1, (RTGCPTR32)tss.esp0); 1043 1042 pVM->selm.s.fSyncTSSRing0Stack = false; 1044 1043 } … … 1190 1189 VMMDECL(int) SELMGetTSSInfo(PVM pVM, PRTGCUINTPTR pGCPtrTss, PRTGCUINTPTR pcbTss, bool *pfCanHaveIOBitmap) 1191 1190 { 1192 if (!CPUMAreHiddenSelRegsValid(pVM)) 1193 { 1194 /* 1195 * Do we have a valid TSS? 1196 */ 1197 if ( pVM->selm.s.GCSelTss == RTSEL_MAX 1198 || !pVM->selm.s.fGuestTss32Bit) 1199 return VERR_SELM_NO_TSS; 1200 1201 /* 1202 * Fill in return values. 1203 */ 1204 *pGCPtrTss = (RTGCUINTPTR)pVM->selm.s.GCPtrGuestTss; 1205 *pcbTss = pVM->selm.s.cbGuestTss; 1206 if (pfCanHaveIOBitmap) 1207 *pfCanHaveIOBitmap = pVM->selm.s.fGuestTss32Bit; 1208 } 1209 else 1210 { 1211 CPUMSELREGHID *pHiddenTRReg; 1212 1213 pHiddenTRReg = CPUMGetGuestTRHid(pVM); 1214 1215 *pGCPtrTss = pHiddenTRReg->u64Base; 1216 *pcbTss = pHiddenTRReg->u32Limit; 1217 1218 if (pfCanHaveIOBitmap) 1219 *pfCanHaveIOBitmap = pHiddenTRReg->Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL 1220 || pHiddenTRReg->Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY; 1221 } 1191 /* 1192 * The TR hidden register is always valid. 1193 */ 1194 CPUMSELREGHID trHid; 1195 RTSEL tr = CPUMGetGuestTR(pVM, &trHid); 1196 if (!(tr & X86_SEL_MASK)) 1197 return VERR_SELM_NO_TSS; 1198 1199 *pGCPtrTss = trHid.u64Base; 1200 *pcbTss = trHid.u32Limit + (trHid.u32Limit != UINT32_MAX); /* be careful. */ 1201 if (pfCanHaveIOBitmap) 1202 *pfCanHaveIOBitmap = trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL 1203 || trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY; 1222 1204 return VINF_SUCCESS; 1223 1205 } -
trunk/src/VBox/VMM/VMMGC/SELMGC.cpp
r13840 r17035 134 134 135 135 /* Check if we change the LDT selector */ 136 if (Sel == CPUMGetGuestLDTR(pVM)) 136 if (Sel == CPUMGetGuestLDTR(pVM)) /** @todo this isn't correct in two(+) ways! 1. It shouldn't be done until the LDTR is reloaded. 2. It caused the next instruction to be emulated. */ 137 137 { 138 138 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT); … … 140 140 } 141 141 142 /* Or the TR selector */ 143 if (Sel == CPUMGetGuestTR(pVM)) 144 { 145 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS); 146 return VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT; 147 } 148 149 #ifdef VBOX_STRICT 142 #ifdef LOG_ENABLED 150 143 if (Sel == (pRegFrame->cs & X86_SEL_MASK)) 151 144 Log(("GDT write to selector in CS register %04X\n", pRegFrame->cs)); … … 400 393 } 401 394 402 403 /**404 * Gets ss:esp for ring1 in main Hypervisor's TSS.405 *406 * @returns VBox status code.407 * @param pVM VM Handle.408 * @param pSS Ring1 SS register value.409 * @param pEsp Ring1 ESP register value.410 */411 VMMRCDECL(int) SELMGCGetRing1Stack(PVM pVM, uint32_t *pSS, uint32_t *pEsp)412 {413 if (pVM->selm.s.fSyncTSSRing0Stack)414 {415 uint8_t * GCPtrGuestTss = (uint8_t *)(uintptr_t)pVM->selm.s.GCPtrGuestTss;416 bool fTriedAlready = false;417 int rc;418 VBOXTSS tss;419 420 Assert(pVM->selm.s.GCPtrGuestTss && pVM->selm.s.cbMonitoredGuestTss);421 422 l_tryagain:423 rc = MMGCRamRead(pVM, &tss.ss0, GCPtrGuestTss + RT_OFFSETOF(VBOXTSS, ss0), sizeof(tss.ss0));424 rc |= MMGCRamRead(pVM, &tss.esp0, GCPtrGuestTss + RT_OFFSETOF(VBOXTSS, esp0), sizeof(tss.esp0));425 #ifdef DEBUG426 rc |= MMGCRamRead(pVM, &tss.offIoBitmap, GCPtrGuestTss + RT_OFFSETOF(VBOXTSS, offIoBitmap), sizeof(tss.offIoBitmap));427 #endif428 429 if (RT_FAILURE(rc))430 {431 if (!fTriedAlready)432 {433 /* Shadow page might be out of sync. Sync and try again */434 /** @todo might cross page boundary */435 fTriedAlready = true;436 rc = PGMPrefetchPage(pVM, (RTGCPTR)(uintptr_t)GCPtrGuestTss);437 if (rc != VINF_SUCCESS)438 return rc;439 goto l_tryagain;440 }441 AssertMsgFailed(("Unable to read TSS structure at %RRv\n", GCPtrGuestTss));442 return rc;443 }444 445 #ifdef LOG_ENABLED446 uint32_t ssr0 = pVM->selm.s.Tss.ss1;447 uint32_t espr0 = pVM->selm.s.Tss.esp1;448 ssr0 &= ~1;449 450 if (ssr0 != tss.ss0 || espr0 != tss.esp0)451 Log(("SELMGetRing1Stack: Updating TSS ring 0 stack to %04X:%08X\n", tss.ss0, tss.esp0));452 453 Log(("offIoBitmap=%#x\n", tss.offIoBitmap));454 #endif455 /* Update our TSS structure for the guest's ring 1 stack */456 SELMSetRing1Stack(pVM, tss.ss0 | 1, (RTGCPTR32)tss.esp0);457 pVM->selm.s.fSyncTSSRing0Stack = false;458 }459 460 *pSS = pVM->selm.s.Tss.ss1;461 *pEsp = pVM->selm.s.Tss.esp1;462 463 return VINF_SUCCESS;464 } -
trunk/src/recompiler_new/VBoxRecompiler.c
r16455 r17035 68 68 extern void sync_seg(CPUX86State *env1, int seg_reg, int selector); 69 69 extern void sync_ldtr(CPUX86State *env1, int selector); 70 extern int sync_tr(CPUX86State *env1, int selector);71 70 72 71 #ifdef VBOX_STRICT … … 1721 1720 LogFlow(("CPUMGetAndClearChangedFlagsREM %x\n", fFlags)); 1722 1721 if (fFlags & ( CPUM_CHANGED_CR4 | CPUM_CHANGED_CR3 | CPUM_CHANGED_CR0 1723 | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_LDTR | CPUM_CHANGED_TR1722 | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_LDTR 1724 1723 | CPUM_CHANGED_FPU_REM | CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_CPUID)) 1725 1724 { … … 1783 1782 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base; 1784 1783 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit; 1785 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF; ;1784 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF; 1786 1785 } 1787 1786 else 1788 1787 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr); 1789 }1790 1791 if (fFlags & CPUM_CHANGED_TR)1792 {1793 if (fHiddenSelRegsValid)1794 {1795 pVM->rem.s.Env.tr.selector = pCtx->tr;1796 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;1797 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;1798 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;;1799 }1800 else1801 sync_tr(&pVM->rem.s.Env, pCtx->tr);1802 1803 /** @note do_interrupt will fault if the busy flag is still set.... */1804 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;1805 1788 } 1806 1789 … … 1820 1803 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */ 1821 1804 } 1805 1806 /* 1807 * Sync TR unconditionally to make life simpler. 1808 */ 1809 pVM->rem.s.Env.tr.selector = pCtx->tr; 1810 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base; 1811 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit; 1812 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF; 1813 /* Note! do_interrupt will fault if the busy flag is still set... */ 1814 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK; 1822 1815 1823 1816 /* … … 2161 2154 } 2162 2155 2163 if (pCtx->ldtr != pVM->rem.s.Env.ldt.selector) 2164 { 2165 pCtx->ldtr = pVM->rem.s.Env.ldt.selector; 2156 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector 2157 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base 2158 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit 2159 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF)) 2160 { 2161 pCtx->ldtr = pVM->rem.s.Env.ldt.selector; 2162 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base; 2163 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit; 2164 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF; 2166 2165 STAM_COUNTER_INC(&gStatREMLDTRChange); 2167 2166 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT); 2168 2167 } 2169 if (pCtx->tr != pVM->rem.s.Env.tr.selector) 2170 { 2171 pCtx->tr = pVM->rem.s.Env.tr.selector; 2168 2169 if ( pCtx->tr != pVM->rem.s.Env.tr.selector 2170 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base 2171 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit 2172 /* Qemu and AMD/Intel have different ideas about the busy flag ... */ 2173 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF 2174 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 2175 : 0) ) 2176 { 2177 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n", 2178 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u, 2179 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit, 2180 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0)); 2181 pCtx->tr = pVM->rem.s.Env.tr.selector; 2182 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base; 2183 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit; 2184 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF; 2185 if (pCtx->trHid.Attr.u) 2186 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8; 2172 2187 STAM_COUNTER_INC(&gStatREMTRChange); 2173 2188 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS); … … 2177 2192 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base; 2178 2193 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit; 2179 /* * @noteQEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */2194 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */ 2180 2195 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF; 2181 2196 … … 2199 2214 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit; 2200 2215 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF; 2201 2202 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;2203 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;2204 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;2205 2206 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;2207 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;2208 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;2209 2216 2210 2217 /* Sysenter MSR */ -
trunk/src/recompiler_new/target-i386/helper.h
r15034 r17035 43 43 DEF_HELPER(void, helper_lcall_real, (int new_cs, target_ulong new_eip1, 44 44 int shift, int next_eip)) 45 DEF_HELPER(void, helper_lcall_protected, (int new_cs, target_ulong new_eip, 45 DEF_HELPER(void, helper_lcall_protected, (int new_cs, target_ulong new_eip, 46 46 int shift, int next_eip_addend)) 47 47 DEF_HELPER(void, helper_iret_real, (int shift)) … … 114 114 DEF_HELPER(void, helper_svm_check_intercept_param, (uint32_t type, uint64_t param)) 115 115 DEF_HELPER(void, helper_vmexit, (uint32_t exit_code, uint64_t exit_info_1)) 116 DEF_HELPER(void, helper_svm_check_io, (uint32_t port, uint32_t param, 116 DEF_HELPER(void, helper_svm_check_io, (uint32_t port, uint32_t param, 117 117 uint32_t next_eip_addend)) 118 118 DEF_HELPER(void, helper_vmrun, (int aflag, int next_eip_addend)) … … 134 134 DEF_HELPER(void, helper_fildl_ST0, (int32_t val)) 135 135 DEF_HELPER(void, helper_fildll_ST0, (int64_t val)) 136 #ifndef VBOX 136 #ifndef VBOX 137 137 DEF_HELPER(uint32_t, helper_fsts_ST0, (void)) 138 138 DEF_HELPER(uint64_t, helper_fstl_ST0, (void)) … … 257 257 void sync_seg(CPUX86State *env1, int seg_reg, int selector); 258 258 void sync_ldtr(CPUX86State *env1, int selector); 259 int sync_tr(CPUX86State *env1, int selector);260 259 261 260 #endif -
trunk/src/recompiler_new/target-i386/op_helper.c
r16505 r17035 5861 5861 } 5862 5862 5863 /**5864 * Correctly loads a new tr selector.5865 *5866 * @param env1 CPU environment.5867 * @param selector Selector to load.5868 */5869 int sync_tr(CPUX86State *env1, int selector)5870 {5871 /* ARG! this was going to call helper_ltr_T0 but that won't work because of busy flag. */5872 SegmentCache *dt;5873 uint32_t e1, e2;5874 int index, type, entry_limit;5875 target_ulong ptr;5876 CPUX86State *saved_env = env;5877 env = env1;5878 5879 selector &= 0xffff;5880 if ((selector & 0xfffc) == 0) {5881 /* NULL selector case: invalid TR */5882 env->tr.base = 0;5883 env->tr.limit = 0;5884 env->tr.flags = 0;5885 } else {5886 if (selector & 0x4)5887 goto l_failure;5888 dt = &env->gdt;5889 index = selector & ~7;5890 #ifdef TARGET_X86_645891 if (env->hflags & HF_LMA_MASK)5892 entry_limit = 15;5893 else5894 #endif5895 entry_limit = 7;5896 if ((index + entry_limit) > dt->limit)5897 goto l_failure;5898 ptr = dt->base + index;5899 e1 = ldl_kernel(ptr);5900 e2 = ldl_kernel(ptr + 4);5901 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;5902 if ((e2 & DESC_S_MASK) /*||5903 (type != 1 && type != 9)*/)5904 goto l_failure;5905 if (!(e2 & DESC_P_MASK))5906 goto l_failure;5907 #ifdef TARGET_X86_645908 if (env->hflags & HF_LMA_MASK) {5909 uint32_t e3;5910 e3 = ldl_kernel(ptr + 8);5911 load_seg_cache_raw_dt(&env->tr, e1, e2);5912 env->tr.base |= (target_ulong)e3 << 32;5913 } else5914 #endif5915 {5916 load_seg_cache_raw_dt(&env->tr, e1, e2);5917 }5918 e2 |= DESC_TSS_BUSY_MASK;5919 stl_kernel(ptr + 4, e2);5920 }5921 env->tr.selector = selector;5922 5923 env = saved_env;5924 return 0;5925 l_failure:5926 AssertMsgFailed(("selector=%d\n", selector));5927 return -1;5928 }5929 5930 5931 5863 int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr, 5932 5864 uint32_t *esp_ptr, int dpl)
注意:
瀏覽 TracChangeset
來幫助您使用更動檢視器