儲存庫 vbox 的更動 46310
- 時間撮記:
- 2013-5-29 下午12:52:37 (11 年 以前)
- 位置:
- trunk
- 檔案:
-
- 修改 3 筆資料
圖例:
- 未更動
- 新增
- 刪除
-
trunk/include/VBox/vmm/hm_svm.h
r46304 r46310 455 455 456 456 /** 457 * SVM Event injection structure .457 * SVM Event injection structure (EVENTINJ and EXITINTINFO). 458 458 */ 459 459 #pragma pack(1) … … 475 475 476 476 /** 477 * SVM Interrupt control structure .477 * SVM Interrupt control structure (Virtual Interrupt Control). 478 478 */ 479 479 #pragma pack(1) … … 489 489 uint32_t u3Reserved : 3; 490 490 uint32_t u1VIrqMasking : 1; 491 uint32_t u7Reserved2 : 7; 491 uint32_t u6Reserved : 6; 492 uint32_t u1AvicEnable : 1; 492 493 uint32_t u8VIrqVector : 8; 493 494 uint32_t u24Reserved : 24; … … 516 517 517 518 /** 518 * SVM IOIO exit structure .519 * SVM IOIO exit structure (EXITINFO1 for IOIO intercepts). 519 520 */ 520 521 #pragma pack(1) … … 548 549 struct 549 550 { 550 uint32_t u1NestedPaging : 1; /**< enabled/disabled */551 uint32_t u1NestedPaging : 1; /**< enabled/disabled */ 551 552 } n; 552 553 uint64_t au64[1]; 553 554 } SVMNPCTRL; 555 #pragma pack() 556 557 /** 558 * SVM AVIC. 559 */ 560 #pragma pack(1) 561 typedef union 562 { 563 struct 564 { 565 uint32_t u12Reserved1 : 12; 566 uint64_t u40Addr : 40; 567 uint32_t u12Reserved2 : 12; 568 } n; 569 uint64_t au64[1]; 570 } SVMAVIC; 571 #pragma pack() 572 573 /** 574 * SVM AVIC PHYSICAL_TABLE pointer. 575 */ 576 #pragma pack(1) 577 typedef union 578 { 579 struct 580 { 581 uint32_t u8LastGuestCoreId : 8; 582 uint32_t u4Reserved : 4; 583 uint64_t u40Addr : 40; 584 uint32_t u12Reserved : 12; 585 } n; 586 uint64_t au64[1]; 587 } SVMAVICPHYS; 554 588 #pragma pack() 555 589 … … 578 612 uint32_t u32InterceptCtrl2; 579 613 /** Offset 0x14-0x3F - Reserved. */ 580 uint8_t u8Reserved[0x3e - 0x14]; 614 uint8_t u8Reserved[0x3c - 0x14]; 615 /** Offset 0x3c - PAUSE filter threshold. */ 616 uint16_t u16PauseFilterThreshold; 581 617 /** Offset 0x3e - PAUSE intercept filter count. */ 582 618 uint16_t u16PauseFilterCount; … … 603 639 /** Offset 0x90 - Nested Paging. */ 604 640 SVMNPCTRL NestedPaging; 605 /** Offset 0x98-0xA7 - Reserved. */ 606 uint8_t u8Reserved2[0xA8-0x98]; 641 /** Offset 0x98 - AVIC APIC BAR. */ 642 SVMAVIC AvicBar; 643 /** Offset 0xA0-0xA7 - Reserved. */ 644 uint8_t u8Reserved2[0xA8-0xA0]; 607 645 /** Offset 0xA8 - Event injection. */ 608 646 SVMEVENT EventInject; … … 612 650 uint64_t u64LBRVirt; 613 651 /** Offset 0xC0 - VMCB Clean Bits. */ 614 uint64_t u64V MCBCleanBits;652 uint64_t u64VmcbCleanBits; 615 653 /** Offset 0xC8 - Next sequential instruction pointer. */ 616 654 uint64_t u64NextRIP; … … 619 657 /** Offset 0xD1 - Number of bytes fetched. */ 620 658 uint8_t abInstr[15]; 659 /** Offset 0xE0 - AVIC APIC_BACKING_PAGE pointer. */ 660 SVMAVIC AvicBackingPagePtr; 661 /** Offset 0xE8-0xEF - Reserved. */ 662 uint8_t u8Reserved3[0xF0 - 0xE8]; 663 /** Offset 0xF0 - AVIC LOGICAL_TABLE pointer. */ 664 SVMAVIC AvicLogicalTablePtr; 665 /** Offset 0xF8 - AVIC PHYSICAL_TABLE pointer. */ 666 SVMAVICPHYS AvicPhysicalTablePtr; 621 667 } ctrl; 622 668 623 /** Offset 0x C0-0x3FF - Reserved. */624 uint8_t u8Reserved3[0x400-0x E0];669 /** Offset 0x100-0x3FF - Reserved. */ 670 uint8_t u8Reserved3[0x400-0x100]; 625 671 626 672 /** State Save Area. Starts at offset 0x400. */ … … 719 765 /** Pointer to the SVMVMCB structure. */ 720 766 typedef SVMVMCB *PSVMVMCB; 721 AssertCompileMemberOffset(SVMVMCB, ctrl.u16InterceptRdCRx, 0x000); 722 AssertCompileMemberOffset(SVMVMCB, ctrl.u16PauseFilterCount, 0x03e); 723 AssertCompileMemberOffset(SVMVMCB, ctrl.TLBCtrl, 0x058); 724 AssertCompileMemberOffset(SVMVMCB, ctrl.ExitIntInfo, 0x088); 725 AssertCompileMemberOffset(SVMVMCB, ctrl.EventInject, 0x0A8); 726 AssertCompileMemberOffset(SVMVMCB, ctrl.abInstr, 0x0D1); 727 AssertCompileMemberOffset(SVMVMCB, guest, 0x400); 728 AssertCompileMemberOffset(SVMVMCB, guest.ES, 0x400); 729 AssertCompileMemberOffset(SVMVMCB, guest.TR, 0x490); 730 AssertCompileMemberOffset(SVMVMCB, guest.u64EFER, 0x4D0); 731 AssertCompileMemberOffset(SVMVMCB, guest.u64CR4, 0x548); 732 AssertCompileMemberOffset(SVMVMCB, guest.u64RIP, 0x578); 733 AssertCompileMemberOffset(SVMVMCB, guest.u64RSP, 0x5D8); 734 AssertCompileMemberOffset(SVMVMCB, guest.u64CR2, 0x640); 735 AssertCompileMemberOffset(SVMVMCB, guest.u8Reserved4, 0x4A0); 736 AssertCompileMemberOffset(SVMVMCB, guest.u8CPL, 0x4CB); 737 AssertCompileMemberOffset(SVMVMCB, guest.u8Reserved6, 0x4D8); 738 AssertCompileMemberOffset(SVMVMCB, guest.u8Reserved7, 0x580); 739 AssertCompileMemberOffset(SVMVMCB, guest.u8Reserved9, 0x648); 740 AssertCompileMemberOffset(SVMVMCB, guest.u64GPAT, 0x668); 741 AssertCompileMemberOffset(SVMVMCB, guest.u64LASTEXCPTO, 0x690); 742 AssertCompileMemberOffset(SVMVMCB, u8Reserved10, 0x698); 767 AssertCompileMemberOffset(SVMVMCB, ctrl.u16InterceptRdCRx, 0x000); 768 AssertCompileMemberOffset(SVMVMCB, ctrl.u16PauseFilterCount, 0x03e); 769 AssertCompileMemberOffset(SVMVMCB, ctrl.TLBCtrl, 0x058); 770 AssertCompileMemberOffset(SVMVMCB, ctrl.ExitIntInfo, 0x088); 771 AssertCompileMemberOffset(SVMVMCB, ctrl.EventInject, 0x0A8); 772 AssertCompileMemberOffset(SVMVMCB, ctrl.abInstr, 0x0D1); 773 AssertCompileMemberOffset(SVMVMCB, ctrl.AvicBackingPagePtr, 0x0E0); 774 AssertCompileMemberOffset(SVMVMCB, ctrl.AvicLogicalTablePtr, 0x0F0); 775 AssertCompileMemberOffset(SVMVMCB, ctrl.AvicPhysicalTablePtr, 0x0F8); 776 AssertCompileMemberOffset(SVMVMCB, guest, 0x400); 777 AssertCompileMemberOffset(SVMVMCB, guest.ES, 0x400); 778 AssertCompileMemberOffset(SVMVMCB, guest.TR, 0x490); 779 AssertCompileMemberOffset(SVMVMCB, guest.u64EFER, 0x4D0); 780 AssertCompileMemberOffset(SVMVMCB, guest.u64CR4, 0x548); 781 AssertCompileMemberOffset(SVMVMCB, guest.u64RIP, 0x578); 782 AssertCompileMemberOffset(SVMVMCB, guest.u64RSP, 0x5D8); 783 AssertCompileMemberOffset(SVMVMCB, guest.u64CR2, 0x640); 784 AssertCompileMemberOffset(SVMVMCB, guest.u8Reserved4, 0x4A0); 785 AssertCompileMemberOffset(SVMVMCB, guest.u8CPL, 0x4CB); 786 AssertCompileMemberOffset(SVMVMCB, guest.u8Reserved6, 0x4D8); 787 AssertCompileMemberOffset(SVMVMCB, guest.u8Reserved7, 0x580); 788 AssertCompileMemberOffset(SVMVMCB, guest.u8Reserved9, 0x648); 789 AssertCompileMemberOffset(SVMVMCB, guest.u64GPAT, 0x668); 790 AssertCompileMemberOffset(SVMVMCB, guest.u64LASTEXCPTO, 0x690); 791 AssertCompileMemberOffset(SVMVMCB, u8Reserved10, 0x698); 743 792 AssertCompileSize(SVMVMCB, 0x1000); 744 793 -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r46304 r46310 20 20 *******************************************************************************/ 21 21 22 #ifdef DEBUG_ramshankar 23 # define HMSVM_ALWAYS_TRAP_ALL_XCPTS 24 # define HMSVM_ALWAYS_TRAP_PF 25 #endif 22 26 23 27 /******************************************************************************* … … 211 215 VMMR0DECL(int) SVMR0InitVM(PVM pVM) 212 216 { 213 int rc ;217 int rc = VERR_INTERNAL_ERROR_5; 214 218 215 219 /* Check for an AMD CPU erratum which requires us to flush the TLB before every world-switch. */ … … 241 245 242 246 pVCpu->hm.s.svm.pvVmcbHost = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVmcbHost); 243 pVCpu->hm.s.svm.HCPhysVmcbHost = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVmcbHost, 0 );247 pVCpu->hm.s.svm.HCPhysVmcbHost = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVmcbHost, 0 /* iPage */); 244 248 Assert(pVCpu->hm.s.svm.HCPhysVmcbHost < _4G); 245 249 ASMMemZeroPage(pVCpu->hm.s.svm.pvVmcbHost); … … 251 255 252 256 pVCpu->hm.s.svm.pvVmcb = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVmcb); 253 pVCpu->hm.s.svm.HCPhysVmcb = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVmcb, 0 );257 pVCpu->hm.s.svm.HCPhysVmcb = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVmcb, 0 /* iPage */); 254 258 Assert(pVCpu->hm.s.svm.HCPhysVmcb < _4G); 255 259 ASMMemZeroPage(pVCpu->hm.s.svm.pvVmcb); … … 261 265 262 266 pVCpu->hm.s.svm.pvMsrBitmap = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjMsrBitmap); 263 pVCpu->hm.s.svm.HCPhysMsrBitmap = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjMsrBitmap, 0 );267 pVCpu->hm.s.svm.HCPhysMsrBitmap = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjMsrBitmap, 0 /* iPage */); 264 268 /* Set all bits to intercept all MSR accesses. */ 265 269 ASMMemFill32(pVCpu->hm.s.svm.pvMsrBitmap, 2 << PAGE_SHIFT, 0xffffffff); … … 287 291 288 292 289 293 /** 294 * Sets up AMD-V for the specified VM. 295 * This function is only called once per-VM during initalization. 296 * 297 * @returns VBox status code. 298 * @param pVM Pointer to the VM. 299 */ 300 VMMR0DECL(int) SVMR0SetupVM(PVM pVM) 301 { 302 int rc = VINF_SUCCESS; 303 304 AssertReturn(pVM, VERR_INVALID_PARAMETER); 305 Assert(pVM->hm.s.svm.fSupported); 306 307 for (uint32_t i = 0; i < pVM->cCpus; i++) 308 { 309 PVMCPU pVCpu = &pVM->aCpus[i]; 310 PSVMVMCB pVmcb = (PSVMVMCB)pVM->aCpus[i].hm.s.svm.pvVmcb; 311 312 AssertMsgReturn(pVmcb, ("Invalid pVmcb\n"), VERR_SVM_INVALID_PVMCB); 313 314 /* Intercept traps. */ 315 #ifdef HMSVM_ALWAYS_TRAP_PF 316 pVmcb->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_PF); 317 #endif 318 #ifdef HMSVM_ALWAYS_TRAP_ALL_XCPTS 319 pVmcb->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_BP) 320 | RT_BIT(X86_XCPT_DB) 321 | RT_BIT(X86_XCPT_DE) 322 | RT_BIT(X86_XCPT_UD) 323 | RT_BIT(X86_XCPT_NP) 324 | RT_BIT(X86_XCPT_SS) 325 | RT_BIT(X86_XCPT_GP) 326 | RT_BIT(X86_XCPT_MF) 327 | RT_BIT(X86_XCPT_PF); 328 #endif 329 330 /* -XXX- todo. */ 331 } 332 333 return rc; 334 } 335 -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
r46304 r46310 1584 1584 Log(("ctrl.IntCtrl.u3Reserved %x\n", pVmcb->ctrl.IntCtrl.n.u3Reserved)); 1585 1585 Log(("ctrl.IntCtrl.u1VIrqMasking %x\n", pVmcb->ctrl.IntCtrl.n.u1VIrqMasking)); 1586 Log(("ctrl.IntCtrl.u 7Reserved2 %x\n", pVmcb->ctrl.IntCtrl.n.u7Reserved2));1586 Log(("ctrl.IntCtrl.u6Reserved %x\n", pVmcb->ctrl.IntCtrl.n.u6Reserved)); 1587 1587 Log(("ctrl.IntCtrl.u8VIrqVector %x\n", pVmcb->ctrl.IntCtrl.n.u8VIrqVector)); 1588 1588 Log(("ctrl.IntCtrl.u24Reserved %x\n", pVmcb->ctrl.IntCtrl.n.u24Reserved));
注意:
瀏覽 TracChangeset
來幫助您使用更動檢視器