VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/freebsd/memobj-r0drv-freebsd.c@ 37099

最後變更 在這個檔案從37099是 36555,由 vboxsync 提交於 14 年 前

Use DECLHIDDEN, especially in IPRT.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 32.1 KB
 
1/* $Id: memobj-r0drv-freebsd.c 36555 2011-04-05 12:34:09Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, FreeBSD.
4 */
5
6/*
7 * Copyright (c) 2007 knut st. osmundsen <[email protected]>
8 *
9 * Permission is hereby granted, free of charge, to any person
10 * obtaining a copy of this software and associated documentation
11 * files (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use,
13 * copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following
16 * conditions:
17 *
18 * The above copyright notice and this permission notice shall be
19 * included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31
32/*******************************************************************************
33* Header Files *
34*******************************************************************************/
35#include "the-freebsd-kernel.h"
36
37#include <iprt/memobj.h>
38#include <iprt/mem.h>
39#include <iprt/err.h>
40#include <iprt/assert.h>
41#include <iprt/log.h>
42#include <iprt/param.h>
43#include <iprt/process.h>
44#include "internal/memobj.h"
45
46/*******************************************************************************
47* Structures and Typedefs *
48*******************************************************************************/
49/**
50 * The FreeBSD version of the memory object structure.
51 */
52typedef struct RTR0MEMOBJFREEBSD
53{
54 /** The core structure. */
55 RTR0MEMOBJINTERNAL Core;
56 /** Type dependent data */
57 union
58 {
59 /** Non physical memory allocations */
60 struct
61 {
62 /** The VM object associated with the allocation. */
63 vm_object_t pObject;
64 } NonPhys;
65 /** Physical memory allocations */
66 struct
67 {
68 /** Number of pages */
69 uint32_t cPages;
70 /** Array of pages - variable */
71 vm_page_t apPages[1];
72 } Phys;
73 } u;
74} RTR0MEMOBJFREEBSD, *PRTR0MEMOBJFREEBSD;
75
76
77MALLOC_DEFINE(M_IPRTMOBJ, "iprtmobj", "IPRT - R0MemObj");
78
79
80
81/**
82 * Gets the virtual memory map the specified object is mapped into.
83 *
84 * @returns VM map handle on success, NULL if no map.
85 * @param pMem The memory object.
86 */
87static vm_map_t rtR0MemObjFreeBSDGetMap(PRTR0MEMOBJINTERNAL pMem)
88{
89 switch (pMem->enmType)
90 {
91 case RTR0MEMOBJTYPE_PAGE:
92 case RTR0MEMOBJTYPE_LOW:
93 case RTR0MEMOBJTYPE_CONT:
94 return kernel_map;
95
96 case RTR0MEMOBJTYPE_PHYS:
97 case RTR0MEMOBJTYPE_PHYS_NC:
98 return NULL; /* pretend these have no mapping atm. */
99
100 case RTR0MEMOBJTYPE_LOCK:
101 return pMem->u.Lock.R0Process == NIL_RTR0PROCESS
102 ? kernel_map
103 : &((struct proc *)pMem->u.Lock.R0Process)->p_vmspace->vm_map;
104
105 case RTR0MEMOBJTYPE_RES_VIRT:
106 return pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS
107 ? kernel_map
108 : &((struct proc *)pMem->u.ResVirt.R0Process)->p_vmspace->vm_map;
109
110 case RTR0MEMOBJTYPE_MAPPING:
111 return pMem->u.Mapping.R0Process == NIL_RTR0PROCESS
112 ? kernel_map
113 : &((struct proc *)pMem->u.Mapping.R0Process)->p_vmspace->vm_map;
114
115 default:
116 return NULL;
117 }
118}
119
120
121DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
122{
123 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)pMem;
124 int rc;
125
126 switch (pMemFreeBSD->Core.enmType)
127 {
128 case RTR0MEMOBJTYPE_CONT:
129 contigfree(pMemFreeBSD->Core.pv, pMemFreeBSD->Core.cb, M_IPRTMOBJ);
130 break;
131
132 case RTR0MEMOBJTYPE_PAGE:
133 {
134 rc = vm_map_remove(kernel_map,
135 (vm_offset_t)pMemFreeBSD->Core.pv,
136 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
137 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
138
139 vm_page_lock_queues();
140 for (uint32_t iPage = 0; iPage < pMemFreeBSD->u.Phys.cPages; iPage++)
141 {
142 vm_page_t pPage = pMemFreeBSD->u.Phys.apPages[iPage];
143 vm_page_unwire(pPage, 0);
144 vm_page_free(pPage);
145 }
146 vm_page_unlock_queues();
147 break;
148 }
149
150 case RTR0MEMOBJTYPE_LOCK:
151 {
152 vm_map_t pMap = kernel_map;
153
154 if (pMemFreeBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
155 pMap = &((struct proc *)pMemFreeBSD->Core.u.Lock.R0Process)->p_vmspace->vm_map;
156
157 rc = vm_map_unwire(pMap,
158 (vm_offset_t)pMemFreeBSD->Core.pv,
159 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb,
160 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
161 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
162 break;
163 }
164
165 case RTR0MEMOBJTYPE_RES_VIRT:
166 {
167 vm_map_t pMap = kernel_map;
168 if (pMemFreeBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
169 pMap = &((struct proc *)pMemFreeBSD->Core.u.Lock.R0Process)->p_vmspace->vm_map;
170 rc = vm_map_remove(pMap,
171 (vm_offset_t)pMemFreeBSD->Core.pv,
172 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
173 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
174 break;
175 }
176
177 case RTR0MEMOBJTYPE_MAPPING:
178 {
179 vm_map_t pMap = kernel_map;
180
181 if (pMemFreeBSD->Core.u.Mapping.R0Process != NIL_RTR0PROCESS)
182 pMap = &((struct proc *)pMemFreeBSD->Core.u.Mapping.R0Process)->p_vmspace->vm_map;
183
184 rc = vm_map_remove(pMap,
185 (vm_offset_t)pMemFreeBSD->Core.pv,
186 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
187 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
188 break;
189 }
190
191 case RTR0MEMOBJTYPE_PHYS:
192 case RTR0MEMOBJTYPE_PHYS_NC:
193 {
194 vm_page_lock_queues();
195 for (uint32_t iPage = 0; iPage < pMemFreeBSD->u.Phys.cPages; iPage++)
196 {
197 vm_page_t pPage = pMemFreeBSD->u.Phys.apPages[iPage];
198 vm_page_unwire(pPage, 0);
199 vm_page_free(pPage);
200 }
201 vm_page_unlock_queues();
202 break;
203 }
204
205#ifdef USE_KMEM_ALLOC_ATTR
206 case RTR0MEMOBJTYPE_LOW:
207 {
208 kmem_free(kernel_map, (vm_offset_t)pMemFreeBSD->Core.pv, pMemFreeBSD->Core.cb);
209 break;
210 }
211#else
212 case RTR0MEMOBJTYPE_LOW: /* unused */
213#endif
214 default:
215 AssertMsgFailed(("enmType=%d\n", pMemFreeBSD->Core.enmType));
216 return VERR_INTERNAL_ERROR;
217 }
218
219 return VINF_SUCCESS;
220}
221
222
223DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
224{
225 int rc;
226 size_t cPages = cb >> PAGE_SHIFT;
227
228 /* create the object. */
229 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJFREEBSD, u.Phys.apPages[cPages]),
230 RTR0MEMOBJTYPE_PAGE, NULL, cb);
231 if (!pMemFreeBSD)
232 return VERR_NO_MEMORY;
233
234 pMemFreeBSD->u.Phys.cPages = cPages;
235
236 vm_offset_t MapAddress = vm_map_min(kernel_map);
237 rc = vm_map_find(kernel_map, /* map */
238 NULL, /* object */
239 0, /* offset */
240 &MapAddress, /* addr (IN/OUT) */
241 cb, /* length */
242 TRUE, /* find_space */
243 fExecutable /* protection */
244 ? VM_PROT_ALL
245 : VM_PROT_RW,
246 VM_PROT_ALL, /* max(_prot) */
247 0); /* cow (copy-on-write) */
248 if (rc == KERN_SUCCESS)
249 {
250 rc = VINF_SUCCESS;
251
252 for (size_t iPage = 0; iPage < cPages; iPage++)
253 {
254 vm_page_t pPage;
255
256 pPage = vm_page_alloc(NULL, iPage,
257 VM_ALLOC_SYSTEM |
258 VM_ALLOC_WIRED | VM_ALLOC_NOOBJ);
259
260 if (!pPage)
261 {
262 /*
263 * Out of pages
264 * Remove already allocated pages
265 */
266 while (iPage-- > 0)
267 {
268 pPage = pMemFreeBSD->u.Phys.apPages[iPage];
269 vm_page_lock_queues();
270 vm_page_unwire(pPage, 0);
271 vm_page_free(pPage);
272 vm_page_unlock_queues();
273 }
274 rc = VERR_NO_MEMORY;
275 break;
276 }
277
278 pPage->valid = VM_PAGE_BITS_ALL;
279 pMemFreeBSD->u.Phys.apPages[iPage] = pPage;
280 }
281
282 if (rc == VINF_SUCCESS)
283 {
284 vm_offset_t AddressDst = MapAddress;
285
286 for (size_t iPage = 0; iPage < cPages; iPage++)
287 {
288 vm_page_t pPage = pMemFreeBSD->u.Phys.apPages[iPage];
289
290 MY_PMAP_ENTER(kernel_map->pmap, AddressDst, pPage,
291 fExecutable
292 ? VM_PROT_ALL
293 : VM_PROT_RW,
294 TRUE);
295
296 AddressDst += PAGE_SIZE;
297 }
298
299 /* Store start address */
300 pMemFreeBSD->Core.pv = (void *)MapAddress;
301 *ppMem = &pMemFreeBSD->Core;
302 return VINF_SUCCESS;
303 }
304 }
305 rc = VERR_NO_MEMORY; /** @todo fix translation (borrow from darwin) */
306
307 rtR0MemObjDelete(&pMemFreeBSD->Core);
308 return rc;
309}
310
311
312DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
313{
314#ifdef USE_KMEM_ALLOC_ATTR
315 /*
316 * Use kmem_alloc_attr, fExectuable is not needed because the
317 * memory will be executable by default
318 */
319 NOREF(fExecutable);
320
321 /* create the object. */
322 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_LOW, NULL, cb);
323 if (!pMemFreeBSD)
324 return VERR_NO_MEMORY;
325
326 pMemFreeBSD->Core.pv = (void *)kmem_alloc_attr(kernel_map, /* Kernel */
327 cb, /* Amount */
328 M_ZERO, /* Zero memory */
329 0, /* Low physical address */
330 _4G - PAGE_SIZE, /* Highest physical address */
331 VM_MEMATTR_DEFAULT); /* Default memory attributes */
332 if (!pMemFreeBSD->Core.pv)
333 return VERR_NO_MEMORY;
334
335 *ppMem = &pMemFreeBSD->Core;
336
337 return VINF_SUCCESS;
338#else
339 /*
340 * Try a Alloc first and see if we get luck, if not try contigmalloc.
341 * Might wish to try find our own pages or something later if this
342 * turns into a problemspot on AMD64 boxes.
343 */
344 int rc = rtR0MemObjNativeAllocPage(ppMem, cb, fExecutable);
345 if (RT_SUCCESS(rc))
346 {
347 size_t iPage = cb >> PAGE_SHIFT;
348 while (iPage-- > 0)
349 if (rtR0MemObjNativeGetPagePhysAddr(*ppMem, iPage) > (_4G - PAGE_SIZE))
350 {
351 RTR0MemObjFree(*ppMem, false);
352 *ppMem = NULL;
353 rc = VERR_NO_MEMORY;
354 break;
355 }
356 }
357 if (RT_FAILURE(rc))
358 rc = rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
359 return rc;
360#endif
361}
362
363
364DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
365{
366 /* create the object. */
367 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_CONT, NULL, cb);
368 if (!pMemFreeBSD)
369 return VERR_NO_MEMORY;
370
371 /* do the allocation. */
372 pMemFreeBSD->Core.pv = contigmalloc(cb, /* size */
373 M_IPRTMOBJ, /* type */
374 M_NOWAIT | M_ZERO, /* flags */
375 0, /* lowest physical address*/
376 _4G-1, /* highest physical address */
377 PAGE_SIZE, /* alignment. */
378 0); /* boundary */
379 if (pMemFreeBSD->Core.pv)
380 {
381 pMemFreeBSD->Core.u.Cont.Phys = vtophys(pMemFreeBSD->Core.pv);
382 *ppMem = &pMemFreeBSD->Core;
383 return VINF_SUCCESS;
384 }
385
386 NOREF(fExecutable);
387 rtR0MemObjDelete(&pMemFreeBSD->Core);
388 return VERR_NO_MEMORY;
389}
390
391
392static void rtR0MemObjFreeBSDPhysPageInit(vm_page_t pPage, vm_pindex_t iPage)
393{
394 pPage->wire_count = 1;
395 pPage->pindex = iPage;
396 pPage->act_count = 0;
397 pPage->oflags = 0;
398 pPage->flags = PG_UNMANAGED;
399 atomic_add_int(&cnt.v_wire_count, 1);
400}
401
402
403static int rtR0MemObjFreeBSDAllocPhysPages(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJTYPE enmType,
404 size_t cb,
405 RTHCPHYS PhysHighest, size_t uAlignment,
406 bool fContiguous)
407{
408 int rc = VINF_SUCCESS;
409 uint32_t cPages = cb >> PAGE_SHIFT;
410 vm_paddr_t VmPhysAddrHigh;
411
412 /* create the object. */
413 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJFREEBSD, u.Phys.apPages[cPages]),
414 enmType, NULL, cb);
415 if (!pMemFreeBSD)
416 return VERR_NO_MEMORY;
417
418 pMemFreeBSD->u.Phys.cPages = cPages;
419
420 if (PhysHighest != NIL_RTHCPHYS)
421 VmPhysAddrHigh = PhysHighest;
422 else
423 VmPhysAddrHigh = ~(vm_paddr_t)0;
424
425 if (fContiguous)
426 {
427 vm_page_t pPage = vm_phys_alloc_contig(cPages, 0, VmPhysAddrHigh, uAlignment, 0);
428
429 if (pPage)
430 for (uint32_t iPage = 0; iPage < cPages; iPage++)
431 {
432 rtR0MemObjFreeBSDPhysPageInit(&pPage[iPage], iPage);
433 pMemFreeBSD->u.Phys.apPages[iPage] = &pPage[iPage];
434 }
435 else
436 rc = VERR_NO_MEMORY;
437 }
438 else
439 {
440 /* Allocate page by page */
441 for (uint32_t iPage = 0; iPage < cPages; iPage++)
442 {
443 vm_page_t pPage = vm_phys_alloc_contig(1, 0, VmPhysAddrHigh, uAlignment, 0);
444
445 if (!pPage)
446 {
447 /* Free all allocated pages */
448 while (iPage-- > 0)
449 {
450 pPage = pMemFreeBSD->u.Phys.apPages[iPage];
451 vm_page_lock_queues();
452 vm_page_unwire(pPage, 0);
453 vm_page_free(pPage);
454 vm_page_unlock_queues();
455 }
456 rc = VERR_NO_MEMORY;
457 break;
458 }
459 rtR0MemObjFreeBSDPhysPageInit(pPage, iPage);
460 pMemFreeBSD->u.Phys.apPages[iPage] = pPage;
461 }
462 }
463
464 if (RT_FAILURE(rc))
465 rtR0MemObjDelete(&pMemFreeBSD->Core);
466 else
467 {
468 if (enmType == RTR0MEMOBJTYPE_PHYS)
469 {
470 pMemFreeBSD->Core.u.Phys.PhysBase = VM_PAGE_TO_PHYS(pMemFreeBSD->u.Phys.apPages[0]);
471 pMemFreeBSD->Core.u.Phys.fAllocated = true;
472 }
473
474 *ppMem = &pMemFreeBSD->Core;
475 }
476
477 return rc;
478}
479
480
481DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
482{
483#if 1
484 return rtR0MemObjFreeBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS, cb, PhysHighest, uAlignment, true);
485#else
486 /* create the object. */
487 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_CONT, NULL, cb);
488 if (!pMemFreeBSD)
489 return VERR_NO_MEMORY;
490
491 /* do the allocation. */
492 pMemFreeBSD->Core.pv = contigmalloc(cb, /* size */
493 M_IPRTMOBJ, /* type */
494 M_NOWAIT | M_ZERO, /* flags */
495 0, /* lowest physical address*/
496 _4G-1, /* highest physical address */
497 uAlignment, /* alignment. */
498 0); /* boundary */
499 if (pMemFreeBSD->Core.pv)
500 {
501 pMemFreeBSD->Core.u.Cont.Phys = vtophys(pMemFreeBSD->Core.pv);
502 *ppMem = &pMemFreeBSD->Core;
503 return VINF_SUCCESS;
504 }
505
506 rtR0MemObjDelete(&pMemFreeBSD->Core);
507 return VERR_NO_MEMORY;
508#endif
509}
510
511
512DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
513{
514#if 1
515 return rtR0MemObjFreeBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS_NC, cb, PhysHighest, PAGE_SIZE, false);
516#else
517 return VERR_NOT_SUPPORTED;
518#endif
519}
520
521
522DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
523{
524 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
525
526 /* create the object. */
527 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_PHYS, NULL, cb);
528 if (!pMemFreeBSD)
529 return VERR_NO_MEMORY;
530
531 /* there is no allocation here, it needs to be mapped somewhere first. */
532 pMemFreeBSD->Core.u.Phys.fAllocated = false;
533 pMemFreeBSD->Core.u.Phys.PhysBase = Phys;
534 pMemFreeBSD->Core.u.Phys.uCachePolicy = uCachePolicy;
535 *ppMem = &pMemFreeBSD->Core;
536 return VINF_SUCCESS;
537}
538
539
540/**
541 * Worker locking the memory in either kernel or user maps.
542 */
543static int rtR0MemObjNativeLockInMap(PPRTR0MEMOBJINTERNAL ppMem, vm_map_t pVmMap,
544 vm_offset_t AddrStart, size_t cb, uint32_t fAccess,
545 RTR0PROCESS R0Process, int fFlags)
546{
547 int rc;
548 NOREF(fAccess);
549
550 /* create the object. */
551 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_LOCK, (void *)AddrStart, cb);
552 if (!pMemFreeBSD)
553 return VERR_NO_MEMORY;
554
555 /*
556 * We could've used vslock here, but we don't wish to be subject to
557 * resource usage restrictions, so we'll call vm_map_wire directly.
558 */
559 rc = vm_map_wire(pVmMap, /* the map */
560 AddrStart, /* start */
561 AddrStart + cb, /* end */
562 fFlags); /* flags */
563 if (rc == KERN_SUCCESS)
564 {
565 pMemFreeBSD->Core.u.Lock.R0Process = R0Process;
566 *ppMem = &pMemFreeBSD->Core;
567 return VINF_SUCCESS;
568 }
569 rtR0MemObjDelete(&pMemFreeBSD->Core);
570 return VERR_NO_MEMORY;/** @todo fix mach -> vbox error conversion for freebsd. */
571}
572
573
574DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
575{
576 return rtR0MemObjNativeLockInMap(ppMem,
577 &((struct proc *)R0Process)->p_vmspace->vm_map,
578 (vm_offset_t)R3Ptr,
579 cb,
580 fAccess,
581 R0Process,
582 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
583}
584
585
586DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
587{
588 return rtR0MemObjNativeLockInMap(ppMem,
589 kernel_map,
590 (vm_offset_t)pv,
591 cb,
592 fAccess,
593 NIL_RTR0PROCESS,
594 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
595}
596
597
598/**
599 * Worker for the two virtual address space reservers.
600 *
601 * We're leaning on the examples provided by mmap and vm_mmap in vm_mmap.c here.
602 */
603static int rtR0MemObjNativeReserveInMap(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process, vm_map_t pMap)
604{
605 int rc;
606
607 /*
608 * The pvFixed address range must be within the VM space when specified.
609 */
610 if (pvFixed != (void *)-1
611 && ( (vm_offset_t)pvFixed < vm_map_min(pMap)
612 || (vm_offset_t)pvFixed + cb > vm_map_max(pMap)))
613 return VERR_INVALID_PARAMETER;
614
615 /*
616 * Check that the specified alignment is supported.
617 */
618 if (uAlignment > PAGE_SIZE)
619 return VERR_NOT_SUPPORTED;
620
621 /*
622 * Create the object.
623 */
624 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_RES_VIRT, NULL, cb);
625 if (!pMemFreeBSD)
626 return VERR_NO_MEMORY;
627
628 /*
629 * Allocate an empty VM object and map it into the requested map.
630 */
631 pMemFreeBSD->u.NonPhys.pObject = vm_object_allocate(OBJT_DEFAULT, cb >> PAGE_SHIFT);
632 if (pMemFreeBSD->u.NonPhys.pObject)
633 {
634 vm_offset_t MapAddress = pvFixed != (void *)-1
635 ? (vm_offset_t)pvFixed
636 : vm_map_min(pMap);
637 if (pvFixed != (void *)-1)
638 vm_map_remove(pMap,
639 MapAddress,
640 MapAddress + cb);
641
642 rc = vm_map_find(pMap, /* map */
643 pMemFreeBSD->u.NonPhys.pObject, /* object */
644 0, /* offset */
645 &MapAddress, /* addr (IN/OUT) */
646 cb, /* length */
647 pvFixed == (void *)-1, /* find_space */
648 VM_PROT_NONE, /* protection */
649 VM_PROT_ALL, /* max(_prot) ?? */
650 0); /* cow (copy-on-write) */
651 if (rc == KERN_SUCCESS)
652 {
653 if (R0Process != NIL_RTR0PROCESS)
654 {
655 rc = vm_map_inherit(pMap,
656 MapAddress,
657 MapAddress + cb,
658 VM_INHERIT_SHARE);
659 AssertMsg(rc == KERN_SUCCESS, ("%#x\n", rc));
660 }
661 pMemFreeBSD->Core.pv = (void *)MapAddress;
662 pMemFreeBSD->Core.u.ResVirt.R0Process = R0Process;
663 *ppMem = &pMemFreeBSD->Core;
664 return VINF_SUCCESS;
665 }
666 vm_object_deallocate(pMemFreeBSD->u.NonPhys.pObject);
667 rc = VERR_NO_MEMORY; /** @todo fix translation (borrow from darwin) */
668 }
669 else
670 rc = VERR_NO_MEMORY;
671 rtR0MemObjDelete(&pMemFreeBSD->Core);
672 return rc;
673
674}
675
676
677DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
678{
679 return rtR0MemObjNativeReserveInMap(ppMem, pvFixed, cb, uAlignment, NIL_RTR0PROCESS, kernel_map);
680}
681
682
683DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
684{
685 return rtR0MemObjNativeReserveInMap(ppMem, (void *)R3PtrFixed, cb, uAlignment, R0Process,
686 &((struct proc *)R0Process)->p_vmspace->vm_map);
687}
688
689
690DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
691 unsigned fProt, size_t offSub, size_t cbSub)
692{
693 AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
694 AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
695
696 /*
697 * Check that the specified alignment is supported.
698 */
699 if (uAlignment > PAGE_SIZE)
700 return VERR_NOT_SUPPORTED;
701
702/* Phys: see pmap_mapdev in i386/i386/pmap.c (http://fxr.watson.org/fxr/source/i386/i386/pmap.c?v=RELENG62#L2860) */
703/** @todo finish the implementation. */
704
705 return VERR_NOT_SUPPORTED;
706}
707
708
709/* see http://markmail.org/message/udhq33tefgtyfozs */
710DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
711{
712 /*
713 * Check for unsupported stuff.
714 */
715 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
716 AssertMsgReturn(R3PtrFixed == (RTR3PTR)-1, ("%p\n", R3PtrFixed), VERR_NOT_SUPPORTED);
717 if (uAlignment > PAGE_SIZE)
718 return VERR_NOT_SUPPORTED;
719
720 int rc;
721 PRTR0MEMOBJFREEBSD pMemToMapFreeBSD = (PRTR0MEMOBJFREEBSD)pMemToMap;
722 struct proc *pProc = (struct proc *)R0Process;
723 struct vm_map *pProcMap = &pProc->p_vmspace->vm_map;
724
725 /* calc protection */
726 vm_prot_t ProtectionFlags = 0;
727 if ((fProt & RTMEM_PROT_NONE) == RTMEM_PROT_NONE)
728 ProtectionFlags = VM_PROT_NONE;
729 if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
730 ProtectionFlags |= VM_PROT_READ;
731 if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
732 ProtectionFlags |= VM_PROT_WRITE;
733 if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
734 ProtectionFlags |= VM_PROT_EXECUTE;
735
736 /* calc mapping address */
737 PROC_LOCK(pProc);
738 vm_offset_t AddrR3 = round_page((vm_offset_t)pProc->p_vmspace->vm_daddr + lim_max(pProc, RLIMIT_DATA));
739 PROC_UNLOCK(pProc);
740
741 /* Insert the object in the map. */
742 rc = vm_map_find(pProcMap, /* Map to insert the object in */
743 NULL, /* Object to map */
744 0, /* Start offset in the object */
745 &AddrR3, /* Start address IN/OUT */
746 pMemToMap->cb, /* Size of the mapping */
747 TRUE, /* Whether a suitable address should be searched for first */
748 ProtectionFlags, /* protection flags */
749 VM_PROT_ALL, /* Maximum protection flags */
750 0); /* Copy on write */
751
752 /* Map the memory page by page into the destination map. */
753 if (rc == KERN_SUCCESS)
754 {
755 size_t cPages = pMemToMap->cb >> PAGE_SHIFT;;
756 pmap_t pPhysicalMap = pProcMap->pmap;
757 vm_offset_t AddrR3Dst = AddrR3;
758
759 if ( pMemToMap->enmType == RTR0MEMOBJTYPE_PHYS
760 || pMemToMap->enmType == RTR0MEMOBJTYPE_PHYS_NC
761 || pMemToMap->enmType == RTR0MEMOBJTYPE_PAGE)
762 {
763 /* Mapping physical allocations */
764 Assert(cPages == pMemToMapFreeBSD->u.Phys.cPages);
765
766 /* Insert the memory page by page into the mapping. */
767 for (uint32_t iPage = 0; iPage < cPages; iPage++)
768 {
769 vm_page_t pPage = pMemToMapFreeBSD->u.Phys.apPages[iPage];
770
771 MY_PMAP_ENTER(pPhysicalMap, AddrR3Dst, pPage, ProtectionFlags, TRUE);
772 AddrR3Dst += PAGE_SIZE;
773 }
774 }
775 else
776 {
777 /* Mapping cont or low memory types */
778 vm_offset_t AddrToMap = (vm_offset_t)pMemToMap->pv;
779
780 for (uint32_t iPage = 0; iPage < cPages; iPage++)
781 {
782 vm_page_t pPage = PHYS_TO_VM_PAGE(vtophys(AddrToMap));
783
784 MY_PMAP_ENTER(pPhysicalMap, AddrR3Dst, pPage, ProtectionFlags, TRUE);
785 AddrR3Dst += PAGE_SIZE;
786 AddrToMap += PAGE_SIZE;
787 }
788 }
789 }
790
791 if (RT_SUCCESS(rc))
792 {
793 /*
794 * Create a mapping object for it.
795 */
796 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(RTR0MEMOBJFREEBSD),
797 RTR0MEMOBJTYPE_MAPPING,
798 (void *)AddrR3,
799 pMemToMap->cb);
800 if (pMemFreeBSD)
801 {
802 Assert((vm_offset_t)pMemFreeBSD->Core.pv == AddrR3);
803 pMemFreeBSD->Core.u.Mapping.R0Process = R0Process;
804 *ppMem = &pMemFreeBSD->Core;
805 return VINF_SUCCESS;
806 }
807
808 rc = vm_map_remove(pProcMap, ((vm_offset_t)AddrR3), ((vm_offset_t)AddrR3) + pMemToMap->cb);
809 AssertMsg(rc == KERN_SUCCESS, ("Deleting mapping failed\n"));
810 }
811
812 return VERR_NO_MEMORY;
813}
814
815
816DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
817{
818 vm_prot_t ProtectionFlags = 0;
819 vm_offset_t AddrStart = (uintptr_t)pMem->pv + offSub;
820 vm_offset_t AddrEnd = AddrStart + cbSub;
821 vm_map_t pVmMap = rtR0MemObjFreeBSDGetMap(pMem);
822
823 if (!pVmMap)
824 return VERR_NOT_SUPPORTED;
825
826 if ((fProt & RTMEM_PROT_NONE) == RTMEM_PROT_NONE)
827 ProtectionFlags = VM_PROT_NONE;
828 if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
829 ProtectionFlags |= VM_PROT_READ;
830 if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
831 ProtectionFlags |= VM_PROT_WRITE;
832 if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
833 ProtectionFlags |= VM_PROT_EXECUTE;
834
835 int krc = vm_map_protect(pVmMap, AddrStart, AddrEnd, ProtectionFlags, FALSE);
836 if (krc == KERN_SUCCESS)
837 return VINF_SUCCESS;
838
839 return VERR_NOT_SUPPORTED;
840}
841
842
843DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
844{
845 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)pMem;
846
847 switch (pMemFreeBSD->Core.enmType)
848 {
849 case RTR0MEMOBJTYPE_LOCK:
850 {
851 if ( pMemFreeBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS
852 && pMemFreeBSD->Core.u.Lock.R0Process != (RTR0PROCESS)curproc)
853 {
854 /* later */
855 return NIL_RTHCPHYS;
856 }
857
858 vm_offset_t pb = (vm_offset_t)pMemFreeBSD->Core.pv + (iPage << PAGE_SHIFT);
859
860 struct proc *pProc = (struct proc *)pMemFreeBSD->Core.u.Lock.R0Process;
861 struct vm_map *pProcMap = &pProc->p_vmspace->vm_map;
862 pmap_t pPhysicalMap = pProcMap->pmap;
863
864 return pmap_extract(pPhysicalMap, pb);
865 }
866
867 case RTR0MEMOBJTYPE_MAPPING:
868 {
869 vm_offset_t pb = (vm_offset_t)pMemFreeBSD->Core.pv + (iPage << PAGE_SHIFT);
870
871 if (pMemFreeBSD->Core.u.Mapping.R0Process != NIL_RTR0PROCESS)
872 {
873 struct proc *pProc = (struct proc *)pMemFreeBSD->Core.u.Mapping.R0Process;
874 struct vm_map *pProcMap = &pProc->p_vmspace->vm_map;
875 pmap_t pPhysicalMap = pProcMap->pmap;
876
877 return pmap_extract(pPhysicalMap, pb);
878 }
879 return vtophys(pb);
880 }
881
882 case RTR0MEMOBJTYPE_CONT:
883 return pMemFreeBSD->Core.u.Cont.Phys + (iPage << PAGE_SHIFT);
884
885 case RTR0MEMOBJTYPE_PHYS:
886 return pMemFreeBSD->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
887
888 case RTR0MEMOBJTYPE_PAGE:
889 case RTR0MEMOBJTYPE_PHYS_NC:
890 return VM_PAGE_TO_PHYS(pMemFreeBSD->u.Phys.apPages[iPage]);
891
892#ifdef USE_KMEM_ALLOC_ATTR
893 case RTR0MEMOBJTYPE_LOW:
894 {
895 vm_offset_t pb = (vm_offset_t)pMemFreeBSD->Core.pv + (iPage << PAGE_SHIFT);
896 return vtophys(pb);
897 }
898#else
899 case RTR0MEMOBJTYPE_LOW:
900#endif
901 case RTR0MEMOBJTYPE_RES_VIRT:
902 default:
903 return NIL_RTHCPHYS;
904 }
905}
906
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette