VirtualBox

儲存庫 vbox 的更動 36441


忽略:
時間撮記:
2011-3-25 下午09:11:56 (14 年 以前)
作者:
vboxsync
訊息:

VMM: Sketched out where to do the initial I/O MMU setup. This adds a VMINITCOMPLETED_HWACCM and makes HWACCMR3InitFinalizeR0 private (invoked from HWACCMR3InitCompleted(,_RING0).

位置:
trunk
檔案:
修改 14 筆資料

圖例:

未更動
新增
刪除
  • trunk/include/VBox/rawpci.h

    r36436 r36441  
    11/** @file
    2  * PDM - Pluggable Device Manager, raw PCI Devices. (VMM)
     2 * Raw PCI Devices (aka PCI pass-through). (VMM)
    33 */
    44
     
    4242typedef enum PCIRAWMEMINFOACTION
    4343{
    44     /* Pages mapped. */
     44    /** Pages mapped. */
    4545    PCIRAW_MEMINFO_MAP,
    46     /* Pages unmapped. */
     46    /** Pages unmapped. */
    4747    PCIRAW_MEMINFO_UNMAP,
    4848    /** The usual 32-bit type blow up. */
     
    7575typedef struct RAWPCIVM
    7676{
    77     /* Shall only be interpreted by the host PCI driver. */
     77    /** Shall only be interpreted by the host PCI driver. */
    7878    RTR0PTR                     pDriverData;
    79     /* Callback called when mapping of host pages to the guest changes. */
     79    /** Callback called when mapping of host pages to the guest changes. */
    8080    PFNRAWPCICONTIGPHYSMEMINFO  pfnContigMemInfo;
    8181} RAWPCIVM;
  • trunk/include/VBox/vmm/hwaccm.h

    r35361 r36441  
    115115VMMR3DECL(int)          HWACCMR3Init(PVM pVM);
    116116VMMR3_INT_DECL(int)     HWACCMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
    117 VMMR3DECL(int)          HWACCMR3InitFinalizeR0(PVM pVM);
    118117VMMR3DECL(void)         HWACCMR3Relocate(PVM pVM);
    119118VMMR3DECL(int)          HWACCMR3Term(PVM pVM);
  • trunk/include/VBox/vmm/pgm.h

    r36196 r36441  
    424424VMMR0DECL(int)      PGMR0PhysAllocateHandyPages(PVM pVM, PVMCPU pVCpu);
    425425VMMR0DECL(int)      PGMR0PhysAllocateLargeHandyPage(PVM pVM, PVMCPU pVCpu);
     426VMMR0_INT_DECL(int) PGMR0PhysSetupIommu(PVM pVM);
    426427VMMR0DECL(int)      PGMR0SharedModuleCheck(PVM pVM, PGVM pGVM, VMCPUID idCpu, PGMMSHAREDMODULE pModule, uint32_t cRegions, PGMMSHAREDREGIONDESC pRegions);
    427428VMMR0DECL(int)      PGMR0Trap0eHandlerNestedPaging(PVM pVM, PVMCPU pVCpu, PGMMODE enmShwPagingMode, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPHYS pvFault);
     
    449450VMMR3DECL(int)      PGMR3InitDynMap(PVM pVM);
    450451VMMR3DECL(int)      PGMR3InitFinalize(PVM pVM);
     452VMMR3DECL(int)      PGMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
    451453VMMR3DECL(void)     PGMR3Relocate(PVM pVM, RTGCINTPTR offDelta);
    452454VMMR3DECL(void)     PGMR3ResetUnpluggedCpu(PVM pVM, PVMCPU pVCpu);
  • trunk/include/VBox/vmm/vmapi.h

    r36041 r36441  
    302302typedef enum VMINITCOMPLETED
    303303{
    304     /** The Ring3 init is completed. */
     304    /** The ring-3 init is completed. */
    305305    VMINITCOMPLETED_RING3 = 1,
    306     /** The Ring0 init is completed. */
     306    /** The ring-0 init is completed. */
    307307    VMINITCOMPLETED_RING0,
     308    /** The hardware accelerated virtualization init is completed.
     309     * Used to make decisision depending on whether HWACCMIsEnabled(). */
     310    VMINITCOMPLETED_HWACCM,
    308311    /** The GC init is completed. */
    309312    VMINITCOMPLETED_GC
  • trunk/include/VBox/vmm/vmm.h

    r36437 r36441  
    365365    /** Call PGMR0AllocateLargePage(). */
    366366    VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE,
     367    /** Call PGMR0PhysSetupIommu(). */
     368    VMMR0_DO_PGM_PHYS_SETUP_IOMMU,
    367369
    368370    /** Call GMMR0InitialReservation(). */
  • trunk/src/VBox/VMM/Makefile.kmk

    r35855 r36441  
    4141ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
    4242 VMM_COMMON_DEFS += VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
     43endif
     44ifdef VBOX_WITH_PCI_PASSTHROUGH
     45 VMM_COMMON_DEFS += VBOX_WITH_PCI_PASSTHROUGH
    4346endif
    4447# VMM_COMMON_DEFS += VBOX_WITH_NS_ACCOUNTING_STATS
     
    438441 endif
    439442 ifdef VBOX_WITH_PCI_PASSTHROUGH
    440   VMMR0_DEFS    += VBOX_WITH_PCI_PASSTHROUGH IN_PCIRAW_R0
     443  VMMR0_DEFS    += IN_PCIRAW_R0
    441444 endif
    442445 VMMR0_DEFS.darwin = VMM_R0_SWITCH_STACK
  • trunk/src/VBox/VMM/VMMR0/GMMR0.cpp

    r36427 r36441  
    55
    66/*
    7  * Copyright (C) 2007 Oracle Corporation
     7 * Copyright (C) 2007-2011 Oracle Corporation
    88 *
    99 * This file is part of VirtualBox Open Source Edition (OSE), as
     
    21052105                                AssertCompile(NIL_RTHCPHYS > GMM_GCPHYS_LAST && GMM_GCPHYS_UNSHAREABLE > GMM_GCPHYS_LAST);
    21062106                                if (RT_LIKELY(paPages[iPage].HCPhysGCPhys <= GMM_GCPHYS_LAST))
    2107                                 {
    21082107                                    pPage->Private.pfn = paPages[iPage].HCPhysGCPhys >> PAGE_SHIFT;
    2109 #if 0 /* Not sure if this is the right place to tell pciraw about mappings. */
    2110                                     if (PciRawIsEnabled(pGVM/pVM))
    2111                                         PciRawR0NotifyGuestPageAssignment(pGVM, paPages[iPage].HCPhysGCPhys,
    2112                                                                           gmmR0GetPageHCPhys(pGMM, paPages[iPage].idPage));
    2113 #endif
    2114                                 }
    21152108                                else if (paPages[iPage].HCPhysGCPhys == GMM_GCPHYS_UNSHAREABLE)
    21162109                                    pPage->Private.pfn = GMM_PAGE_PFN_UNSHAREABLE;
  • trunk/src/VBox/VMM/VMMR0/PGMR0.cpp

    r35346 r36441  
    190190
    191191
     192#ifdef VBOX_WITH_PCI_PASSTHROUGH
     193/* Interface sketch.  The interface belongs to a global PCI pass-through
     194   manager.  It shall use the global VM handle, not the user VM handle to
     195   store the per-VM info (domain) since that is all ring-0 stuff, thus
     196   passing pGVM here.  I've tentitively prefixed the functions 'GPicRawR0',
     197   we can discuss the PciRaw code re-organtization when I'm back from
     198   vacation.
     199
     200   I've implemented the initial IOMMU set up below.  For things to work
     201   reliably, we will probably need add a whole bunch of checks and
     202   GPciRawR0GuestPageUpdate call to the PGM code.  For the present,
     203   assuming nested paging (enforced) and prealloc (enforced), no
     204   ballooning (check missing), page sharing (check missing) or live
     205   migration (check missing), it might work fine.  At least if some
     206   VM power-off hook is present and can tear down the IOMMU page tables. */
     207
     208/**
     209 * Tells the global PCI pass-through manager that we are about to set up the
     210 * guest page to host page mappings for the specfied VM.
     211 *
     212 * @returns VBox status code.
     213 *
     214 * @param   pGVM                The ring-0 VM structure.
     215 */
     216VMMR0_INT_DECL(int) GPciRawR0GuestPageBeginAssignments(PGVM pGVM)
     217{
     218    return VINF_SUCCESS;
     219}
     220
     221
     222/**
     223 * Assigns a host page mapping for a guest page.
     224 *
     225 * This is only used when setting up the mappings, i.e. between
     226 * GPciRawR0GuestPageBeginAssignments and GPciRawR0GuestPageEndAssignments.
     227 *
     228 * @returns VBox status code.
     229 * @param   pGVM                The ring-0 VM structure.
     230 * @param   GCPhys              The address of the guest page (page aligned).
     231 * @param   HCPhys              The address of the host page (page aligned).
     232 */
     233VMMR0_INT_DECL(int) GPciRawR0GuestPageAssign(PGVM pGVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys)
     234{
     235    AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
     236    AssertReturn(!(HCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
     237    return VINF_SUCCESS;
     238}
     239
     240
     241/**
     242 * Indicates that the specified guest page doesn't exists but doesn't have host
     243 * page mapping we trust PCI pass-through with.
     244 *
     245 * This is only used when setting up the mappings, i.e. between
     246 * GPciRawR0GuestPageBeginAssignments and GPciRawR0GuestPageEndAssignments.
     247 *
     248 * @returns VBox status code.
     249 * @param   pGVM                The ring-0 VM structure.
     250 * @param   GCPhys              The address of the guest page (page aligned).
     251 * @param   HCPhys              The address of the host page (page aligned).
     252 */
     253VMMR0_INT_DECL(int) GPciRawR0GuestPageUnassign(PGVM pGVM, RTGCPHYS GCPhys)
     254{
     255    AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
     256    return VINF_SUCCESS;
     257}
     258
     259
     260/**
     261 * Tells the global PCI pass-through manager that we have completed setting up
     262 * the guest page to host page mappings for the specfied VM.
     263 *
     264 * This complements GPciRawR0GuestPageBeginAssignments and will be called even
     265 * if some page assignment failed.
     266 *
     267 * @returns VBox status code.
     268 *
     269 * @param   pGVM                The ring-0 VM structure.
     270 */
     271VMMR0_INT_DECL(int) GPciRawR0GuestPageEndAssignments(PGVM pGVM)
     272{
     273    return VINF_SUCCESS;
     274}
     275
     276
     277/**
     278 * Tells the global PCI pass-through manager that a guest page mapping has
     279 * changed after the initial setup.
     280 *
     281 * @returns VBox status code.
     282 * @param   pGVM                The ring-0 VM structure.
     283 * @param   GCPhys              The address of the guest page (page aligned).
     284 * @param   HCPhys              The new host page address or NIL_RTHCPHYS if
     285 *                              now unassigned.
     286 */
     287VMMR0_INT_DECL(int) GPciRawR0GuestPageUpdate(PGVM pGVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys)
     288{
     289    AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_4);
     290    AssertReturn(!(HCPhys & PAGE_OFFSET_MASK) || HCPhys == NIL_RTHCPHYS, VERR_INTERNAL_ERROR_4);
     291    return VINF_SUCCESS;
     292}
     293
     294#endif /* VBOX_WITH_PCI_PASSTHROUGH */
     295
     296
     297/**
     298 * Sets up the IOMMU when raw PCI device is enabled.
     299 *
     300 * @note    This is a hack that will probably be remodelled and refined later!
     301 *
     302 * @returns VBox status code.
     303 *
     304 * @param   pVM                 The VM handle.
     305 */
     306VMMR0_INT_DECL(int) PGMR0PhysSetupIommu(PVM pVM)
     307{
     308    PGVM pGVM;
     309    int rc = GVMMR0ByVM(pVM, &pGVM);
     310    if (RT_FAILURE(rc))
     311        return rc;
     312
     313#ifdef VBOX_WITH_PCI_PASSTHROUGH
     314    if (pVM->pgm.s.fPciPassthrough)
     315    {
     316        /*
     317         * The Simplistic Approach - Enumerate all the pages and call tell the
     318         * IOMMU about each of them.
     319         */
     320        pgmLock(pVM);
     321        rc = GPciRawR0GuestPageBeginAssignments(pGVM);
     322        if (RT_SUCCESS(rc))
     323        {
     324            for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR0; RT_SUCCESS(rc) && pRam; pRam = pRam->pNextR0)
     325            {
     326                PPGMPAGE    pPage  = &pRam->aPages[0];
     327                RTGCPHYS    GCPhys = pRam->GCPhys;
     328                uint32_t    cLeft  = pRam->cb >> PAGE_SHIFT;
     329                while (cLeft-- > 0)
     330                {
     331                    /* Only expose pages that are 100% safe for now. */
     332                    if (   PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
     333                        && PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED
     334                        && !PGM_PAGE_HAS_ANY_HANDLERS(pPage))
     335                        rc = GPciRawR0GuestPageAssign(pGVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage));
     336                    else
     337                        rc = GPciRawR0GuestPageUnassign(pGVM, GCPhys);
     338
     339                    /* next */
     340                    pPage++;
     341                    GCPhys += PAGE_SIZE;
     342                }
     343            }
     344
     345            int rc2 = GPciRawR0GuestPageEndAssignments(pGVM);
     346            if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
     347                rc = rc2;
     348        }
     349        pgmUnlock(pVM);
     350    }
     351    else
     352#endif
     353        rc = VERR_NOT_SUPPORTED;
     354    return rc;
     355}
     356
     357
    192358/**
    193359 * #PF Handler for nested paging.
  • trunk/src/VBox/VMM/VMMR0/VMMR0.cpp

    r36329 r36441  
    975975            return PGMR0PhysAllocateLargeHandyPage(pVM, &pVM->aCpus[idCpu]);
    976976
     977        case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
     978            if (idCpu != 0)
     979                return VERR_INVALID_CPU_ID;
     980            return PGMR0PhysSetupIommu(pVM);
     981
    977982        /*
    978983         * GMM wrappers.
  • trunk/src/VBox/VMM/VMMR3/HWACCM.cpp

    r35346 r36441  
    272272static DECLCALLBACK(int) hwaccmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
    273273static int hwaccmR3InitCPU(PVM pVM);
     274static int hwaccmR3InitFinalizeR0(PVM pVM);
    274275static int hwaccmR3TermCPU(PVM pVM);
    275276
     
    640641    switch (enmWhat)
    641642    {
    642     case VMINITCOMPLETED_RING3:
    643         return hwaccmR3InitCPU(pVM);
    644     default:
    645         return VINF_SUCCESS;
     643        case VMINITCOMPLETED_RING3:
     644            return hwaccmR3InitCPU(pVM);
     645        case VMINITCOMPLETED_RING0:
     646            return HWACCMR3InitFinalizeR0(pVM);
     647        default:
     648            return VINF_SUCCESS;
    646649    }
    647650}
     
    686689 * @param   pVM         The VM handle.
    687690 */
    688 VMMR3DECL(int) HWACCMR3InitFinalizeR0(PVM pVM)
     691static int hwaccmR3InitFinalizeR0(PVM pVM)
    689692{
    690693    int rc;
  • trunk/src/VBox/VMM/VMMR3/PGM.cpp

    r36415 r36441  
    12761276        return rc;
    12771277    }
     1278
     1279    /*
     1280     * Check for PCI pass-through.
     1281     */
     1282    rc = CFGMR3QueryBoolDef(pCfgPGM, "PciPassThrough", &pVM->pgm.s.fPciPassthrough, false);
     1283    AssertMsgRCReturn(rc, ("Configuration error: Failed to query integer \"PciPassThrough\", rc=%Rrc.\n", rc), rc);
     1284    AssertLogRelReturn(!pVM->pgm.s.fPciPassthrough || pVM->pgm.s.fRamPreAlloc, VERR_INVALID_PARAMETER);
    12781285
    12791286#ifdef VBOX_WITH_STATISTICS
     
    21352142
    21362143/**
     2144 * Init phase completed callback.
     2145 *
     2146 * @returns VBox status code.
     2147 * @param   pVM                 The VM handle.
     2148 * @param   enmWhat             What has been completed.
     2149 * @thread  EMT(0)
     2150 */
     2151VMMR3_INT_DECL(int) PGMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
     2152{
     2153    switch (enmWhat)
     2154    {
     2155        case VMINITCOMPLETED_HWACCM:
     2156#ifdef VBOX_WITH_PCI_PASSTHROUGH
     2157            if (pVM->pgm.s.fPciPassthrough)
     2158            {
     2159                AssertLogRelReturn(pVM->pgm.s.fRamPreAlloc, VERR_INVALID_PARAMETER);
     2160                AssertLogRelReturn(HWACCMIsEnabled(pVM), VERR_INVALID_PARAMETER);
     2161                AssertLogRelReturn(HWACCMIsNestedPagingActive(pVM), VERR_INVALID_PARAMETER);
     2162
     2163                /*
     2164                 * Report assignments to the IOMMU (hope that's good enough for now).
     2165                 */
     2166                if (pVM->pgm.s.fPciPassthrough)
     2167                {
     2168                    int rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_PHYS_SETUP_IOMMU, 0, NULL);
     2169                    AssertRCReturn(rc, rc);
     2170                }
     2171            }
     2172#else
     2173            AssertLogRelReturn(!pVM->pgm.s.fPciPassthrough, VERR_INTERNAL_ERROR_5);
     2174#endif
     2175            break;
     2176
     2177        default:
     2178            /* shut up gcc */
     2179            break;
     2180    }
     2181
     2182    return VINF_SUCCESS;
     2183}
     2184
     2185
     2186/**
    21372187 * Applies relocations to data and code managed by this component.
    21382188 *
  • trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp

    r36009 r36441  
    10081008                            PGM_PAGE_SET_FT_DIRTY(pPage);
    10091009                        }
    1010    
     1010
    10111011                        pgmPhysPageWriteMonitor(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
    10121012                        break;
    1013    
     1013
    10141014                    case PGM_PAGE_STATE_SHARED:
    10151015                        AssertFailed();
    10161016                        break;
    1017    
     1017
    10181018                    case PGM_PAGE_STATE_WRITE_MONITORED:    /* nothing to change. */
    10191019                    default:
  • trunk/src/VBox/VMM/VMMR3/VM.cpp

    r36437 r36441  
    10321032    if (RT_SUCCESS(rc))
    10331033        rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING0);
    1034 
    1035     /** @todo Move this to the VMINITCOMPLETED_RING0 notification handler. */
    10361034    if (RT_SUCCESS(rc))
    1037     {
    1038         rc = HWACCMR3InitFinalizeR0(pVM);
     1035        rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_HWACCM);
     1036
     1037    /** @todo Move this to the VMINITCOMPLETED_HWACCM notification handler. */
     1038    if (RT_SUCCESS(rc))
    10391039        CPUMR3SetHWVirtEx(pVM, HWACCMIsEnabled(pVM));
    1040     }
    10411040
    10421041    LogFlow(("vmR3InitRing0: returns %Rrc\n", rc));
     
    10891088    if (RT_SUCCESS(rc))
    10901089        rc = HWACCMR3InitCompleted(pVM, enmWhat);
     1090    if (RT_SUCCESS(rc))
     1091        rc = PGMR3InitCompleted(pVM, enmWhat);
    10911092    return rc;
    10921093}
  • trunk/src/VBox/VMM/include/PGMInternal.h

    r36013 r36441  
    29142914     * (Only used in strict builds.) */
    29152915    bool                            fNoMorePhysWrites;
     2916    /** Set if PCI passthrough is enabled. */
     2917    bool                            fPciPassthrough;
    29162918    /** Alignment padding that makes the next member start on a 8 byte boundary. */
    2917     bool                            afAlignment1[3];
     2919    bool                            afAlignment1[2];
    29182920
    29192921    /** Indicates that PGMR3FinalizeMappings has been called and that further
注意: 瀏覽 TracChangeset 來幫助您使用更動檢視器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette