VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/freebsd/memobj-r0drv-freebsd.c@ 40483

最後變更 在這個檔案從40483是 39744,由 vboxsync 提交於 13 年 前

rtr0memobj: Status code adjustments.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 29.8 KB
 
1/* $Id: memobj-r0drv-freebsd.c 39744 2012-01-10 18:15:04Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, FreeBSD.
4 */
5
6/*
7 * Copyright (c) 2007 knut st. osmundsen <[email protected]>
8 * Copyright (c) 2011 Andriy Gapon <[email protected]>
9 *
10 * Permission is hereby granted, free of charge, to any person
11 * obtaining a copy of this software and associated documentation
12 * files (the "Software"), to deal in the Software without
13 * restriction, including without limitation the rights to use,
14 * copy, modify, merge, publish, distribute, sublicense, and/or sell
15 * copies of the Software, and to permit persons to whom the
16 * Software is furnished to do so, subject to the following
17 * conditions:
18 *
19 * The above copyright notice and this permission notice shall be
20 * included in all copies or substantial portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
24 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
26 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
27 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
29 * OTHER DEALINGS IN THE SOFTWARE.
30 */
31
32
33/*******************************************************************************
34* Header Files *
35*******************************************************************************/
36#include "the-freebsd-kernel.h"
37
38#include <iprt/memobj.h>
39#include <iprt/mem.h>
40#include <iprt/err.h>
41#include <iprt/assert.h>
42#include <iprt/log.h>
43#include <iprt/param.h>
44#include <iprt/process.h>
45#include "internal/memobj.h"
46
47
48/*******************************************************************************
49* Structures and Typedefs *
50*******************************************************************************/
51/**
52 * The FreeBSD version of the memory object structure.
53 */
54typedef struct RTR0MEMOBJFREEBSD
55{
56 /** The core structure. */
57 RTR0MEMOBJINTERNAL Core;
58 /** The VM object associated with the allocation. */
59 vm_object_t pObject;
60} RTR0MEMOBJFREEBSD, *PRTR0MEMOBJFREEBSD;
61
62
63MALLOC_DEFINE(M_IPRTMOBJ, "iprtmobj", "IPRT - R0MemObj");
64
65
66/**
67 * Gets the virtual memory map the specified object is mapped into.
68 *
69 * @returns VM map handle on success, NULL if no map.
70 * @param pMem The memory object.
71 */
72static vm_map_t rtR0MemObjFreeBSDGetMap(PRTR0MEMOBJINTERNAL pMem)
73{
74 switch (pMem->enmType)
75 {
76 case RTR0MEMOBJTYPE_PAGE:
77 case RTR0MEMOBJTYPE_LOW:
78 case RTR0MEMOBJTYPE_CONT:
79 return kernel_map;
80
81 case RTR0MEMOBJTYPE_PHYS:
82 case RTR0MEMOBJTYPE_PHYS_NC:
83 return NULL; /* pretend these have no mapping atm. */
84
85 case RTR0MEMOBJTYPE_LOCK:
86 return pMem->u.Lock.R0Process == NIL_RTR0PROCESS
87 ? kernel_map
88 : &((struct proc *)pMem->u.Lock.R0Process)->p_vmspace->vm_map;
89
90 case RTR0MEMOBJTYPE_RES_VIRT:
91 return pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS
92 ? kernel_map
93 : &((struct proc *)pMem->u.ResVirt.R0Process)->p_vmspace->vm_map;
94
95 case RTR0MEMOBJTYPE_MAPPING:
96 return pMem->u.Mapping.R0Process == NIL_RTR0PROCESS
97 ? kernel_map
98 : &((struct proc *)pMem->u.Mapping.R0Process)->p_vmspace->vm_map;
99
100 default:
101 return NULL;
102 }
103}
104
105
106DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
107{
108 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)pMem;
109 int rc;
110
111 switch (pMemFreeBSD->Core.enmType)
112 {
113 case RTR0MEMOBJTYPE_PAGE:
114 case RTR0MEMOBJTYPE_LOW:
115 case RTR0MEMOBJTYPE_CONT:
116 rc = vm_map_remove(kernel_map,
117 (vm_offset_t)pMemFreeBSD->Core.pv,
118 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
119 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
120 break;
121
122 case RTR0MEMOBJTYPE_LOCK:
123 {
124 vm_map_t pMap = kernel_map;
125
126 if (pMemFreeBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
127 pMap = &((struct proc *)pMemFreeBSD->Core.u.Lock.R0Process)->p_vmspace->vm_map;
128
129 rc = vm_map_unwire(pMap,
130 (vm_offset_t)pMemFreeBSD->Core.pv,
131 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb,
132 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
133 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
134 break;
135 }
136
137 case RTR0MEMOBJTYPE_RES_VIRT:
138 {
139 vm_map_t pMap = kernel_map;
140 if (pMemFreeBSD->Core.u.ResVirt.R0Process != NIL_RTR0PROCESS)
141 pMap = &((struct proc *)pMemFreeBSD->Core.u.ResVirt.R0Process)->p_vmspace->vm_map;
142 rc = vm_map_remove(pMap,
143 (vm_offset_t)pMemFreeBSD->Core.pv,
144 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
145 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
146 break;
147 }
148
149 case RTR0MEMOBJTYPE_MAPPING:
150 {
151 vm_map_t pMap = kernel_map;
152
153 if (pMemFreeBSD->Core.u.Mapping.R0Process != NIL_RTR0PROCESS)
154 pMap = &((struct proc *)pMemFreeBSD->Core.u.Mapping.R0Process)->p_vmspace->vm_map;
155 rc = vm_map_remove(pMap,
156 (vm_offset_t)pMemFreeBSD->Core.pv,
157 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
158 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
159 break;
160 }
161
162 case RTR0MEMOBJTYPE_PHYS:
163 case RTR0MEMOBJTYPE_PHYS_NC:
164 {
165 VM_OBJECT_LOCK(pMemFreeBSD->pObject);
166 vm_page_t pPage = vm_page_find_least(pMemFreeBSD->pObject, 0);
167 vm_page_lock_queues();
168 for (vm_page_t pPage = vm_page_find_least(pMemFreeBSD->pObject, 0);
169 pPage != NULL;
170 pPage = vm_page_next(pPage))
171 {
172 vm_page_unwire(pPage, 0);
173 }
174 vm_page_unlock_queues();
175 VM_OBJECT_UNLOCK(pMemFreeBSD->pObject);
176 vm_object_deallocate(pMemFreeBSD->pObject);
177 break;
178 }
179
180 default:
181 AssertMsgFailed(("enmType=%d\n", pMemFreeBSD->Core.enmType));
182 return VERR_INTERNAL_ERROR;
183 }
184
185 return VINF_SUCCESS;
186}
187
188
189static vm_page_t rtR0MemObjFreeBSDContigPhysAllocHelper(vm_object_t pObject, vm_pindex_t iPIndex,
190 u_long cPages, vm_paddr_t VmPhysAddrHigh,
191 u_long uAlignment, bool fWire)
192{
193 vm_page_t pPages;
194#if __FreeBSD_version > 1000000
195 int fFlags = VM_ALLOC_INTERRUPT | VM_ALLOC_NOBUSY;
196 if (fWire)
197 fFlags |= VM_ALLOC_WIRED;
198 VM_OBJECT_LOCK(pObject);
199 pPages = vm_page_alloc_contig(pObject, iPIndex, fFlags, cPages, 0, VmPhysAddrHigh, uAlignment, 0, VM_MEMATTR_DEFAULT);
200 VM_OBJECT_UNLOCK(pObject);
201 return pPages;
202#else
203 pPages = vm_phys_alloc_contig(cPages, 0, VmPhysAddrHigh, uAlignment, 0);
204 if (!pPages)
205 return pPages;
206 VM_OBJECT_LOCK(pObject);
207 for (vm_pindex_t iPage = 0; iPage < cPages; iPage++)
208 {
209 vm_page_t pPage = pPages + iPage;
210 vm_page_insert(pPage, pObject, iPIndex + iPage);
211 pPage->valid = VM_PAGE_BITS_ALL;
212 if (fWire)
213 {
214 pPage->wire_count = 1;
215 atomic_add_int(&cnt.v_wire_count, 1);
216 }
217 }
218 VM_OBJECT_UNLOCK(pObject);
219 return pPages;
220#endif
221}
222
223static int rtR0MemObjFreeBSDPhysAllocHelper(vm_object_t pObject, u_long cPages,
224 vm_paddr_t VmPhysAddrHigh, u_long uAlignment,
225 bool fContiguous, bool fWire, int rcNoMem)
226{
227 if (fContiguous)
228 {
229 if (rtR0MemObjFreeBSDContigPhysAllocHelper(pObject, 0, cPages, VmPhysAddrHigh,
230 uAlignment, fWire) != NULL)
231 return VINF_SUCCESS;
232 return rcNoMem;
233 }
234
235 for (vm_pindex_t iPage = 0; iPage < cPages; iPage++)
236 {
237 vm_page_t pPage = rtR0MemObjFreeBSDContigPhysAllocHelper(pObject, iPage, 1, VmPhysAddrHigh,
238 uAlignment, fWire);
239 if (!pPage)
240 {
241 /* Free all allocated pages */
242 VM_OBJECT_LOCK(pObject);
243 while (iPage-- > 0)
244 {
245 pPage = vm_page_lookup(pObject, iPage);
246 vm_page_lock_queues();
247 if (fWire)
248 vm_page_unwire(pPage, 0);
249 vm_page_free(pPage);
250 vm_page_unlock_queues();
251 }
252 VM_OBJECT_UNLOCK(pObject);
253 return rcNoMem;
254 }
255 }
256 return VINF_SUCCESS;
257}
258
259static int rtR0MemObjFreeBSDAllocHelper(PRTR0MEMOBJFREEBSD pMemFreeBSD, bool fExecutable,
260 vm_paddr_t VmPhysAddrHigh, bool fContiguous, int rcNoMem)
261{
262 int rc;
263 size_t cPages = atop(pMemFreeBSD->Core.cb);
264
265 pMemFreeBSD->pObject = vm_object_allocate(OBJT_PHYS, cPages);
266 vm_offset_t MapAddress;
267
268 /* No additional object reference for auto-deallocation upon unmapping. */
269 rc = vm_map_find(kernel_map, pMemFreeBSD->pObject, 0,
270 &MapAddress, pMemFreeBSD->Core.cb, VMFS_ANY_SPACE,
271 fExecutable ? VM_PROT_ALL : VM_PROT_RW, VM_PROT_ALL, 0);
272
273 if (rc == KERN_SUCCESS)
274 {
275 rc = rtR0MemObjFreeBSDPhysAllocHelper(pMemFreeBSD->pObject, cPages,
276 VmPhysAddrHigh, PAGE_SIZE, fContiguous,
277 false, rcNoMem);
278 if (RT_SUCCESS(rc))
279 {
280 vm_map_wire(kernel_map, MapAddress, MapAddress + pMemFreeBSD->Core.cb,
281 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
282
283 /* Store start address */
284 pMemFreeBSD->Core.pv = (void *)MapAddress;
285 return VINF_SUCCESS;
286 }
287
288 vm_map_remove(kernel_map, MapAddress, MapAddress + pMemFreeBSD->Core.cb);
289 }
290 rc = rcNoMem; /** @todo fix translation (borrow from darwin) */
291
292 vm_object_deallocate(pMemFreeBSD->pObject);
293 rtR0MemObjDelete(&pMemFreeBSD->Core);
294 return rc;
295}
296DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
297{
298 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD),
299 RTR0MEMOBJTYPE_PAGE, NULL, cb);
300 if (!pMemFreeBSD)
301 return VERR_NO_MEMORY;
302
303 int rc = rtR0MemObjFreeBSDAllocHelper(pMemFreeBSD, fExecutable, ~(vm_paddr_t)0, false, VERR_NO_MEMORY);
304 if (RT_FAILURE(rc))
305 {
306 rtR0MemObjDelete(&pMemFreeBSD->Core);
307 return rc;
308 }
309
310 *ppMem = &pMemFreeBSD->Core;
311 return rc;
312}
313
314
315DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
316{
317 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD),
318 RTR0MEMOBJTYPE_LOW, NULL, cb);
319 if (!pMemFreeBSD)
320 return VERR_NO_MEMORY;
321
322 int rc = rtR0MemObjFreeBSDAllocHelper(pMemFreeBSD, fExecutable, _4G, false, VERR_NO_LOW_MEMORY);
323 if (RT_FAILURE(rc))
324 {
325 rtR0MemObjDelete(&pMemFreeBSD->Core);
326 return rc;
327 }
328
329 *ppMem = &pMemFreeBSD->Core;
330 return rc;
331}
332
333
334DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
335{
336 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD),
337 RTR0MEMOBJTYPE_CONT, NULL, cb);
338 if (!pMemFreeBSD)
339 return VERR_NO_MEMORY;
340
341 int rc = rtR0MemObjFreeBSDAllocHelper(pMemFreeBSD, fExecutable, _4G, true, VERR_NO_CONT_MEMORY);
342 if (RT_FAILURE(rc))
343 {
344 rtR0MemObjDelete(&pMemFreeBSD->Core);
345 return rc;
346 }
347
348 pMemFreeBSD->Core.u.Cont.Phys = vtophys(pMemFreeBSD->Core.pv);
349 *ppMem = &pMemFreeBSD->Core;
350 return rc;
351}
352
353
354static int rtR0MemObjFreeBSDAllocPhysPages(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJTYPE enmType,
355 size_t cb,
356 RTHCPHYS PhysHighest, size_t uAlignment,
357 bool fContiguous, int rcNoMem)
358{
359 uint32_t cPages = atop(cb);
360 vm_paddr_t VmPhysAddrHigh;
361
362 /* create the object. */
363 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD),
364 enmType, NULL, cb);
365 if (!pMemFreeBSD)
366 return VERR_NO_MEMORY;
367
368 pMemFreeBSD->pObject = vm_object_allocate(OBJT_PHYS, atop(cb));
369
370 if (PhysHighest != NIL_RTHCPHYS)
371 VmPhysAddrHigh = PhysHighest;
372 else
373 VmPhysAddrHigh = ~(vm_paddr_t)0;
374
375 int rc = rtR0MemObjFreeBSDPhysAllocHelper(pMemFreeBSD->pObject, cPages, VmPhysAddrHigh,
376 uAlignment, fContiguous, true, rcNoMem);
377 if (RT_SUCCESS(rc))
378 {
379 if (fContiguous)
380 {
381 Assert(enmType == RTR0MEMOBJTYPE_PHYS);
382 VM_OBJECT_LOCK(pMemFreeBSD->pObject);
383 pMemFreeBSD->Core.u.Phys.PhysBase = VM_PAGE_TO_PHYS(vm_page_find_least(pMemFreeBSD->pObject, 0));
384 VM_OBJECT_UNLOCK(pMemFreeBSD->pObject);
385 pMemFreeBSD->Core.u.Phys.fAllocated = true;
386 }
387
388 *ppMem = &pMemFreeBSD->Core;
389 }
390 else
391 {
392 vm_object_deallocate(pMemFreeBSD->pObject);
393 rtR0MemObjDelete(&pMemFreeBSD->Core);
394 }
395
396 return rc;
397}
398
399
400DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
401{
402 return rtR0MemObjFreeBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS, cb, PhysHighest, uAlignment, true, VERR_NO_MEMORY);
403}
404
405
406DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
407{
408 return rtR0MemObjFreeBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS_NC, cb, PhysHighest, PAGE_SIZE, false, VERR_NO_PHYS_MEMORY);
409}
410
411
412DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
413{
414 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
415
416 /* create the object. */
417 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_PHYS, NULL, cb);
418 if (!pMemFreeBSD)
419 return VERR_NO_MEMORY;
420
421 /* there is no allocation here, it needs to be mapped somewhere first. */
422 pMemFreeBSD->Core.u.Phys.fAllocated = false;
423 pMemFreeBSD->Core.u.Phys.PhysBase = Phys;
424 pMemFreeBSD->Core.u.Phys.uCachePolicy = uCachePolicy;
425 *ppMem = &pMemFreeBSD->Core;
426 return VINF_SUCCESS;
427}
428
429
430/**
431 * Worker locking the memory in either kernel or user maps.
432 */
433static int rtR0MemObjNativeLockInMap(PPRTR0MEMOBJINTERNAL ppMem, vm_map_t pVmMap,
434 vm_offset_t AddrStart, size_t cb, uint32_t fAccess,
435 RTR0PROCESS R0Process, int fFlags)
436{
437 int rc;
438 NOREF(fAccess);
439
440 /* create the object. */
441 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_LOCK, (void *)AddrStart, cb);
442 if (!pMemFreeBSD)
443 return VERR_NO_MEMORY;
444
445 /*
446 * We could've used vslock here, but we don't wish to be subject to
447 * resource usage restrictions, so we'll call vm_map_wire directly.
448 */
449 rc = vm_map_wire(pVmMap, /* the map */
450 AddrStart, /* start */
451 AddrStart + cb, /* end */
452 fFlags); /* flags */
453 if (rc == KERN_SUCCESS)
454 {
455 pMemFreeBSD->Core.u.Lock.R0Process = R0Process;
456 *ppMem = &pMemFreeBSD->Core;
457 return VINF_SUCCESS;
458 }
459 rtR0MemObjDelete(&pMemFreeBSD->Core);
460 return VERR_NO_MEMORY;/** @todo fix mach -> vbox error conversion for freebsd. */
461}
462
463
464DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
465{
466 return rtR0MemObjNativeLockInMap(ppMem,
467 &((struct proc *)R0Process)->p_vmspace->vm_map,
468 (vm_offset_t)R3Ptr,
469 cb,
470 fAccess,
471 R0Process,
472 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
473}
474
475
476DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
477{
478 return rtR0MemObjNativeLockInMap(ppMem,
479 kernel_map,
480 (vm_offset_t)pv,
481 cb,
482 fAccess,
483 NIL_RTR0PROCESS,
484 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
485}
486
487
488/**
489 * Worker for the two virtual address space reservers.
490 *
491 * We're leaning on the examples provided by mmap and vm_mmap in vm_mmap.c here.
492 */
493static int rtR0MemObjNativeReserveInMap(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process, vm_map_t pMap)
494{
495 int rc;
496
497 /*
498 * The pvFixed address range must be within the VM space when specified.
499 */
500 if ( pvFixed != (void *)-1
501 && ( (vm_offset_t)pvFixed < vm_map_min(pMap)
502 || (vm_offset_t)pvFixed + cb > vm_map_max(pMap)))
503 return VERR_INVALID_PARAMETER;
504
505 /*
506 * Check that the specified alignment is supported.
507 */
508 if (uAlignment > PAGE_SIZE)
509 return VERR_NOT_SUPPORTED;
510
511 /*
512 * Create the object.
513 */
514 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_RES_VIRT, NULL, cb);
515 if (!pMemFreeBSD)
516 return VERR_NO_MEMORY;
517
518 vm_offset_t MapAddress = pvFixed != (void *)-1
519 ? (vm_offset_t)pvFixed
520 : vm_map_min(pMap);
521 if (pvFixed != (void *)-1)
522 vm_map_remove(pMap,
523 MapAddress,
524 MapAddress + cb);
525
526 rc = vm_map_find(pMap, /* map */
527 NULL, /* object */
528 0, /* offset */
529 &MapAddress, /* addr (IN/OUT) */
530 cb, /* length */
531 pvFixed == (void *)-1 ? VMFS_ANY_SPACE : VMFS_NO_SPACE,
532 /* find_space */
533 VM_PROT_NONE, /* protection */
534 VM_PROT_ALL, /* max(_prot) ?? */
535 0); /* cow (copy-on-write) */
536 if (rc == KERN_SUCCESS)
537 {
538 if (R0Process != NIL_RTR0PROCESS)
539 {
540 rc = vm_map_inherit(pMap,
541 MapAddress,
542 MapAddress + cb,
543 VM_INHERIT_SHARE);
544 AssertMsg(rc == KERN_SUCCESS, ("%#x\n", rc));
545 }
546 pMemFreeBSD->Core.pv = (void *)MapAddress;
547 pMemFreeBSD->Core.u.ResVirt.R0Process = R0Process;
548 *ppMem = &pMemFreeBSD->Core;
549 return VINF_SUCCESS;
550 }
551
552 rc = VERR_NO_MEMORY; /** @todo fix translation (borrow from darwin) */
553 rtR0MemObjDelete(&pMemFreeBSD->Core);
554 return rc;
555
556}
557
558
559DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
560{
561 return rtR0MemObjNativeReserveInMap(ppMem, pvFixed, cb, uAlignment, NIL_RTR0PROCESS, kernel_map);
562}
563
564
565DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
566{
567 return rtR0MemObjNativeReserveInMap(ppMem, (void *)R3PtrFixed, cb, uAlignment, R0Process,
568 &((struct proc *)R0Process)->p_vmspace->vm_map);
569}
570
571
572DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
573 unsigned fProt, size_t offSub, size_t cbSub)
574{
575// AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
576 AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
577
578 /*
579 * Check that the specified alignment is supported.
580 */
581 if (uAlignment > PAGE_SIZE)
582 return VERR_NOT_SUPPORTED;
583
584 int rc;
585 PRTR0MEMOBJFREEBSD pMemToMapFreeBSD = (PRTR0MEMOBJFREEBSD)pMemToMap;
586
587 /* calc protection */
588 vm_prot_t ProtectionFlags = 0;
589 if ((fProt & RTMEM_PROT_NONE) == RTMEM_PROT_NONE)
590 ProtectionFlags = VM_PROT_NONE;
591 if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
592 ProtectionFlags |= VM_PROT_READ;
593 if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
594 ProtectionFlags |= VM_PROT_WRITE;
595 if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
596 ProtectionFlags |= VM_PROT_EXECUTE;
597
598 vm_offset_t Addr = vm_map_min(kernel_map);
599 if (cbSub == 0)
600 cbSub = pMemToMap->cb - offSub;
601
602 vm_object_reference(pMemToMapFreeBSD->pObject);
603 rc = vm_map_find(kernel_map, /* Map to insert the object in */
604 pMemToMapFreeBSD->pObject, /* Object to map */
605 offSub, /* Start offset in the object */
606 &Addr, /* Start address IN/OUT */
607 cbSub, /* Size of the mapping */
608 VMFS_ANY_SPACE, /* Whether a suitable address should be searched for first */
609 ProtectionFlags, /* protection flags */
610 VM_PROT_ALL, /* Maximum protection flags */
611 0); /* copy-on-write and similar flags */
612
613 if (rc == KERN_SUCCESS)
614 {
615 rc = vm_map_wire(kernel_map, Addr, Addr + cbSub, VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES);
616 AssertMsg(rc == KERN_SUCCESS, ("%#x\n", rc));
617
618 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(RTR0MEMOBJFREEBSD),
619 RTR0MEMOBJTYPE_MAPPING,
620 (void *)Addr,
621 cbSub);
622 if (pMemFreeBSD)
623 {
624 Assert((vm_offset_t)pMemFreeBSD->Core.pv == Addr);
625 pMemFreeBSD->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
626 *ppMem = &pMemFreeBSD->Core;
627 return VINF_SUCCESS;
628 }
629 rc = vm_map_remove(kernel_map, Addr, Addr + cbSub);
630 AssertMsg(rc == KERN_SUCCESS, ("Deleting mapping failed\n"));
631 }
632 else
633 vm_object_deallocate(pMemToMapFreeBSD->pObject);
634
635 return VERR_NO_MEMORY;
636}
637
638
639DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
640 unsigned fProt, RTR0PROCESS R0Process)
641{
642 /*
643 * Check for unsupported stuff.
644 */
645 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
646 if (uAlignment > PAGE_SIZE)
647 return VERR_NOT_SUPPORTED;
648
649 int rc;
650 PRTR0MEMOBJFREEBSD pMemToMapFreeBSD = (PRTR0MEMOBJFREEBSD)pMemToMap;
651 struct proc *pProc = (struct proc *)R0Process;
652 struct vm_map *pProcMap = &pProc->p_vmspace->vm_map;
653
654 /* calc protection */
655 vm_prot_t ProtectionFlags = 0;
656 if ((fProt & RTMEM_PROT_NONE) == RTMEM_PROT_NONE)
657 ProtectionFlags = VM_PROT_NONE;
658 if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
659 ProtectionFlags |= VM_PROT_READ;
660 if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
661 ProtectionFlags |= VM_PROT_WRITE;
662 if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
663 ProtectionFlags |= VM_PROT_EXECUTE;
664
665 /* calc mapping address */
666 vm_offset_t AddrR3;
667 if (R3PtrFixed == (RTR3PTR)-1)
668 {
669 /** @todo: is this needed?. */
670 PROC_LOCK(pProc);
671 AddrR3 = round_page((vm_offset_t)pProc->p_vmspace->vm_daddr + lim_max(pProc, RLIMIT_DATA));
672 PROC_UNLOCK(pProc);
673 }
674 else
675 AddrR3 = (vm_offset_t)R3PtrFixed;
676
677 /* Insert the pObject in the map. */
678 vm_object_reference(pMemToMapFreeBSD->pObject);
679 rc = vm_map_find(pProcMap, /* Map to insert the object in */
680 pMemToMapFreeBSD->pObject, /* Object to map */
681 0, /* Start offset in the object */
682 &AddrR3, /* Start address IN/OUT */
683 pMemToMap->cb, /* Size of the mapping */
684 R3PtrFixed == (RTR3PTR)-1 ? VMFS_ANY_SPACE : VMFS_NO_SPACE,
685 /* Whether a suitable address should be searched for first */
686 ProtectionFlags, /* protection flags */
687 VM_PROT_ALL, /* Maximum protection flags */
688 0); /* copy-on-write and similar flags */
689
690 if (rc == KERN_SUCCESS)
691 {
692 rc = vm_map_wire(pProcMap, AddrR3, AddrR3 + pMemToMap->cb, VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
693 AssertMsg(rc == KERN_SUCCESS, ("%#x\n", rc));
694
695 rc = vm_map_inherit(pProcMap, AddrR3, AddrR3 + pMemToMap->cb, VM_INHERIT_SHARE);
696 AssertMsg(rc == KERN_SUCCESS, ("%#x\n", rc));
697
698 /*
699 * Create a mapping object for it.
700 */
701 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(RTR0MEMOBJFREEBSD),
702 RTR0MEMOBJTYPE_MAPPING,
703 (void *)AddrR3,
704 pMemToMap->cb);
705 if (pMemFreeBSD)
706 {
707 Assert((vm_offset_t)pMemFreeBSD->Core.pv == AddrR3);
708 pMemFreeBSD->Core.u.Mapping.R0Process = R0Process;
709 *ppMem = &pMemFreeBSD->Core;
710 return VINF_SUCCESS;
711 }
712
713 rc = vm_map_remove(pProcMap, AddrR3, AddrR3 + pMemToMap->cb);
714 AssertMsg(rc == KERN_SUCCESS, ("Deleting mapping failed\n"));
715 }
716 else
717 vm_object_deallocate(pMemToMapFreeBSD->pObject);
718
719 return VERR_NO_MEMORY;
720}
721
722
723DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
724{
725 vm_prot_t ProtectionFlags = 0;
726 vm_offset_t AddrStart = (uintptr_t)pMem->pv + offSub;
727 vm_offset_t AddrEnd = AddrStart + cbSub;
728 vm_map_t pVmMap = rtR0MemObjFreeBSDGetMap(pMem);
729
730 if (!pVmMap)
731 return VERR_NOT_SUPPORTED;
732
733 if ((fProt & RTMEM_PROT_NONE) == RTMEM_PROT_NONE)
734 ProtectionFlags = VM_PROT_NONE;
735 if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
736 ProtectionFlags |= VM_PROT_READ;
737 if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
738 ProtectionFlags |= VM_PROT_WRITE;
739 if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
740 ProtectionFlags |= VM_PROT_EXECUTE;
741
742 int krc = vm_map_protect(pVmMap, AddrStart, AddrEnd, ProtectionFlags, FALSE);
743 if (krc == KERN_SUCCESS)
744 return VINF_SUCCESS;
745
746 return VERR_NOT_SUPPORTED;
747}
748
749
750DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
751{
752 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)pMem;
753
754 switch (pMemFreeBSD->Core.enmType)
755 {
756 case RTR0MEMOBJTYPE_LOCK:
757 {
758 if ( pMemFreeBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS
759 && pMemFreeBSD->Core.u.Lock.R0Process != (RTR0PROCESS)curproc)
760 {
761 /* later */
762 return NIL_RTHCPHYS;
763 }
764
765 vm_offset_t pb = (vm_offset_t)pMemFreeBSD->Core.pv + ptoa(iPage);
766
767 struct proc *pProc = (struct proc *)pMemFreeBSD->Core.u.Lock.R0Process;
768 struct vm_map *pProcMap = &pProc->p_vmspace->vm_map;
769 pmap_t pPhysicalMap = vm_map_pmap(pProcMap);
770
771 return pmap_extract(pPhysicalMap, pb);
772 }
773
774 case RTR0MEMOBJTYPE_MAPPING:
775 {
776 vm_offset_t pb = (vm_offset_t)pMemFreeBSD->Core.pv + ptoa(iPage);
777
778 if (pMemFreeBSD->Core.u.Mapping.R0Process != NIL_RTR0PROCESS)
779 {
780 struct proc *pProc = (struct proc *)pMemFreeBSD->Core.u.Mapping.R0Process;
781 struct vm_map *pProcMap = &pProc->p_vmspace->vm_map;
782 pmap_t pPhysicalMap = vm_map_pmap(pProcMap);
783
784 return pmap_extract(pPhysicalMap, pb);
785 }
786 return vtophys(pb);
787 }
788
789 case RTR0MEMOBJTYPE_PAGE:
790 case RTR0MEMOBJTYPE_LOW:
791 case RTR0MEMOBJTYPE_PHYS_NC:
792 {
793 RTHCPHYS addr;
794 VM_OBJECT_LOCK(pMemFreeBSD->pObject);
795 addr = VM_PAGE_TO_PHYS(vm_page_lookup(pMemFreeBSD->pObject, iPage));
796 VM_OBJECT_UNLOCK(pMemFreeBSD->pObject);
797 return addr;
798 }
799
800 case RTR0MEMOBJTYPE_PHYS:
801 return pMemFreeBSD->Core.u.Cont.Phys + ptoa(iPage);
802
803 case RTR0MEMOBJTYPE_CONT:
804 return pMemFreeBSD->Core.u.Phys.PhysBase + ptoa(iPage);
805
806 case RTR0MEMOBJTYPE_RES_VIRT:
807 default:
808 return NIL_RTHCPHYS;
809 }
810}
811
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette