VirtualBox

vbox的更動 41660 路徑 trunk/src/VBox/Runtime/r0drv


忽略:
時間撮記:
2012-6-12 上午08:08:17 (12 年 以前)
作者:
vboxsync
訊息:

IPRT/r0drv/Linux: make it work with Linux 3.5 kernels

位置:
trunk/src/VBox/Runtime/r0drv/linux
檔案:
修改 2 筆資料

圖例:

未更動
新增
刪除
  • trunk/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c

    r41116 r41660  
    101101{
    102102    /** @todo fix rtR0ProcessToLinuxTask!! */
     103    /** @todo many (all?) callers currently assume that we return 'current'! */
    103104    return R0Process == RTR0ProcHandleSelf() ? current : NULL;
    104105}
     
    166167            return fKernel ? MY_PAGE_KERNEL_EXEC    : PAGE_SHARED_EXEC;
    167168    }
     169}
     170
     171
     172/**
     173 * Worker for rtR0MemObjNativeReserveUser and rtR0MemObjNativerMapUser that creates
     174 * an empty user space mapping.
     175 *
     176 * We acquire the mmap_sem of the task!
     177 *
     178 * @returns Pointer to the mapping.
     179 *          (void *)-1 on failure.
     180 * @param   R3PtrFixed  (RTR3PTR)-1 if anywhere, otherwise a specific location.
     181 * @param   cb          The size of the mapping.
     182 * @param   uAlignment  The alignment of the mapping.
     183 * @param   pTask       The Linux task to create this mapping in.
     184 * @param   fProt       The RTMEM_PROT_* mask.
     185 */
     186static void *rtR0MemObjLinuxDoMmap(RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, struct task_struct *pTask, unsigned fProt)
     187{
     188    unsigned fLnxProt;
     189    unsigned long ulAddr;
     190
     191    Assert((pTask == current)); /* do_mmap */
     192
     193    /*
     194     * Convert from IPRT protection to mman.h PROT_ and call do_mmap.
     195     */
     196    fProt &= (RTMEM_PROT_NONE | RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC);
     197    if (fProt == RTMEM_PROT_NONE)
     198        fLnxProt = PROT_NONE;
     199    else
     200    {
     201        fLnxProt = 0;
     202        if (fProt & RTMEM_PROT_READ)
     203            fLnxProt |= PROT_READ;
     204        if (fProt & RTMEM_PROT_WRITE)
     205            fLnxProt |= PROT_WRITE;
     206        if (fProt & RTMEM_PROT_EXEC)
     207            fLnxProt |= PROT_EXEC;
     208    }
     209
     210    if (R3PtrFixed != (RTR3PTR)-1)
     211    {
     212#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
     213        ulAddr = vm_mmap(NULL, R3PtrFixed, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, 0);
     214#else
     215        down_write(&pTask->mm->mmap_sem);
     216        ulAddr = do_mmap(NULL, R3PtrFixed, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, 0);
     217        up_write(&pTask->mm->mmap_sem);
     218#endif
     219    }
     220    else
     221    {
     222#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
     223        ulAddr = vm_mmap(NULL, 0, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS, 0);
     224#else
     225        down_write(&pTask->mm->mmap_sem);
     226        ulAddr = do_mmap(NULL, 0, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS, 0);
     227        up_write(&pTask->mm->mmap_sem);
     228#endif
     229        if (    !(ulAddr & ~PAGE_MASK)
     230            &&  (ulAddr & (uAlignment - 1)))
     231        {
     232            /** @todo implement uAlignment properly... We'll probably need to make some dummy mappings to fill
     233             * up alignment gaps. This is of course complicated by fragmentation (which we might have cause
     234             * ourselves) and further by there begin two mmap strategies (top / bottom). */
     235            /* For now, just ignore uAlignment requirements... */
     236        }
     237    }
     238
     239
     240    if (ulAddr & ~PAGE_MASK) /* ~PAGE_MASK == PAGE_OFFSET_MASK */
     241        return (void *)-1;
     242    return (void *)ulAddr;
     243}
     244
     245
     246/**
     247 * Worker that destroys a user space mapping.
     248 * Undoes what rtR0MemObjLinuxDoMmap did.
     249 *
     250 * We acquire the mmap_sem of the task!
     251 *
     252 * @param   pv          The ring-3 mapping.
     253 * @param   cb          The size of the mapping.
     254 * @param   pTask       The Linux task to destroy this mapping in.
     255 */
     256static void rtR0MemObjLinuxDoMunmap(void *pv, size_t cb, struct task_struct *pTask)
     257{
     258#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
     259    Assert(pTask == current);
     260    vm_munmap((unsigned long)pv, cb);
     261#elif defined(USE_RHEL4_MUNMAP)
     262    down_write(&pTask->mm->mmap_sem);
     263    do_munmap(pTask->mm, (unsigned long)pv, cb, 0); /* should it be 1 or 0? */
     264    up_write(&pTask->mm->mmap_sem);
     265#else
     266    down_write(&pTask->mm->mmap_sem);
     267    do_munmap(pTask->mm, (unsigned long)pv, cb);
     268    up_write(&pTask->mm->mmap_sem);
     269#endif
    168270}
    169271
     
    424526
    425527/**
    426  * Undos what rtR0MemObjLinuxVMap() did.
     528 * Undoes what rtR0MemObjLinuxVMap() did.
    427529 *
    428530 * @param   pMemLnx     The linux memory object.
     
    492594                Assert(pTask);
    493595                if (pTask && pTask->mm)
    494                 {
    495                     down_write(&pTask->mm->mmap_sem);
    496                     MY_DO_MUNMAP(pTask->mm, (unsigned long)pMemLnx->Core.pv, pMemLnx->Core.cb);
    497                     up_write(&pTask->mm->mmap_sem);
    498                 }
     596                    rtR0MemObjLinuxDoMunmap(pMemLnx->Core.pv, pMemLnx->Core.cb, pTask);
    499597            }
    500598            else
     
    517615                Assert(pTask);
    518616                if (pTask && pTask->mm)
    519                 {
    520                     down_write(&pTask->mm->mmap_sem);
    521                     MY_DO_MUNMAP(pTask->mm, (unsigned long)pMemLnx->Core.pv, pMemLnx->Core.cb);
    522                     up_write(&pTask->mm->mmap_sem);
    523                 }
     617                    rtR0MemObjLinuxDoMunmap(pMemLnx->Core.pv, pMemLnx->Core.cb, pTask);
    524618            }
    525619            else
     
    11201214
    11211215
    1122 /**
    1123  * Worker for rtR0MemObjNativeReserveUser and rtR0MemObjNativerMapUser that creates
    1124  * an empty user space mapping.
    1125  *
    1126  * The caller takes care of acquiring the mmap_sem of the task.
    1127  *
    1128  * @returns Pointer to the mapping.
    1129  *          (void *)-1 on failure.
    1130  * @param   R3PtrFixed  (RTR3PTR)-1 if anywhere, otherwise a specific location.
    1131  * @param   cb          The size of the mapping.
    1132  * @param   uAlignment  The alignment of the mapping.
    1133  * @param   pTask       The Linux task to create this mapping in.
    1134  * @param   fProt       The RTMEM_PROT_* mask.
    1135  */
    1136 static void *rtR0MemObjLinuxDoMmap(RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, struct task_struct *pTask, unsigned fProt)
    1137 {
    1138     unsigned fLnxProt;
    1139     unsigned long ulAddr;
    1140 
    1141     /*
    1142      * Convert from IPRT protection to mman.h PROT_ and call do_mmap.
    1143      */
    1144     fProt &= (RTMEM_PROT_NONE | RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC);
    1145     if (fProt == RTMEM_PROT_NONE)
    1146         fLnxProt = PROT_NONE;
    1147     else
    1148     {
    1149         fLnxProt = 0;
    1150         if (fProt & RTMEM_PROT_READ)
    1151             fLnxProt |= PROT_READ;
    1152         if (fProt & RTMEM_PROT_WRITE)
    1153             fLnxProt |= PROT_WRITE;
    1154         if (fProt & RTMEM_PROT_EXEC)
    1155             fLnxProt |= PROT_EXEC;
    1156     }
    1157 
    1158     if (R3PtrFixed != (RTR3PTR)-1)
    1159         ulAddr = do_mmap(NULL, R3PtrFixed, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, 0);
    1160     else
    1161     {
    1162         ulAddr = do_mmap(NULL, 0, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS, 0);
    1163         if (    !(ulAddr & ~PAGE_MASK)
    1164             &&  (ulAddr & (uAlignment - 1)))
    1165         {
    1166             /** @todo implement uAlignment properly... We'll probably need to make some dummy mappings to fill
    1167              * up alignment gaps. This is of course complicated by fragmentation (which we might have cause
    1168              * ourselves) and further by there begin two mmap strategies (top / bottom). */
    1169             /* For now, just ignore uAlignment requirements... */
    1170         }
    1171     }
    1172     if (ulAddr & ~PAGE_MASK) /* ~PAGE_MASK == PAGE_OFFSET_MASK */
    1173         return (void *)-1;
    1174     return (void *)ulAddr;
    1175 }
    1176 
    1177 
    11781216DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
    11791217{
     
    11931231     * Let rtR0MemObjLinuxDoMmap do the difficult bits.
    11941232     */
    1195     down_write(&pTask->mm->mmap_sem);
    11961233    pv = rtR0MemObjLinuxDoMmap(R3PtrFixed, cb, uAlignment, pTask, RTMEM_PROT_NONE);
    1197     up_write(&pTask->mm->mmap_sem);
    11981234    if (pv == (void *)-1)
    11991235        return VERR_NO_MEMORY;
     
    12021238    if (!pMemLnx)
    12031239    {
    1204         down_write(&pTask->mm->mmap_sem);
    1205         MY_DO_MUNMAP(pTask->mm, (unsigned long)pv, cb);
    1206         up_write(&pTask->mm->mmap_sem);
     1240        rtR0MemObjLinuxDoMunmap(pv, cb, pTask);
    12071241        return VERR_NO_MEMORY;
    12081242    }
     
    13911425         */
    13921426        void *pv;
    1393         down_write(&pTask->mm->mmap_sem);
    13941427        pv = rtR0MemObjLinuxDoMmap(R3PtrFixed, pMemLnxToMap->Core.cb, uAlignment, pTask, fProt);
    13951428        if (pv != (void *)-1)
     
    14041437            size_t          iPage;
    14051438
    1406             rc = 0;
     1439            down_write(&pTask->mm->mmap_sem);
     1440
     1441            rc = VINF_SUCCESS;
    14071442            if (pMemLnxToMap->cPages)
    14081443            {
     
    14861521                }
    14871522            }
    1488             if (!rc)
    1489             {
    1490                 up_write(&pTask->mm->mmap_sem);
     1523
     1524            up_write(&pTask->mm->mmap_sem);
     1525
     1526            if (RT_SUCCESS(rc))
     1527            {
    14911528#ifdef VBOX_USE_PAE_HACK
    14921529                __free_page(pDummyPage);
    14931530#endif
    1494 
    14951531                pMemLnx->Core.pv = pv;
    14961532                pMemLnx->Core.u.Mapping.R0Process = R0Process;
     
    15021538             * Bail out.
    15031539             */
    1504             MY_DO_MUNMAP(pTask->mm, (unsigned long)pv, pMemLnxToMap->Core.cb);
    1505         }
    1506         up_write(&pTask->mm->mmap_sem);
     1540            rtR0MemObjLinuxDoMunmap(pv, pMemLnxToMap->Core.cb, pTask);
     1541        }
    15071542        rtR0MemObjDelete(&pMemLnx->Core);
    15081543    }
  • trunk/src/VBox/Runtime/r0drv/linux/the-linux-kernel.h

    r39841 r41660  
    243243# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
    244244#  ifdef VM_ACCOUNT
    245 #   define MY_DO_MUNMAP(a,b,c) do_munmap(a, b, c, 0) /* should it be 1 or 0? */
     245#   define USE_RHEL4_MUNMAP
    246246#  endif
    247247# endif
     
    269269# endif  /* !RT_ARCH_AMD64 */
    270270#endif /* !NO_REDHAT_HACKS */
    271 
    272 #ifndef MY_DO_MUNMAP
    273 # define MY_DO_MUNMAP(a,b,c) do_munmap(a, b, c)
    274 #endif
    275271
    276272#ifndef MY_CHANGE_PAGE_ATTR
注意: 瀏覽 TracChangeset 來幫助您使用更動檢視器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette