VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/netbsd/memobj-r0drv-netbsd.c@ 63549

最後變更 在這個檔案從63549是 63549,由 vboxsync 提交於 8 年 前

scm cleanups

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 17.1 KB
 
1/* $Id: memobj-r0drv-netbsd.c 63549 2016-08-16 12:55:14Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, NetBSD.
4 */
5
6
7/*********************************************************************************************************************************
8* Header Files *
9*********************************************************************************************************************************/
10#include "the-netbsd-kernel.h"
11
12#include <iprt/memobj.h>
13#include <iprt/mem.h>
14#include <iprt/err.h>
15#include <iprt/assert.h>
16#include <iprt/log.h>
17#include <iprt/param.h>
18#include <iprt/process.h>
19#include "internal/memobj.h"
20
21
22/*********************************************************************************************************************************
23* Structures and Typedefs *
24*********************************************************************************************************************************/
25/**
26 * The NetBSD version of the memory object structure.
27 */
28typedef struct RTR0MEMOBJNETBSD
29{
30 /** The core structure. */
31 RTR0MEMOBJINTERNAL Core;
32 size_t size;
33 struct pglist pglist;
34} RTR0MEMOBJNETBSD, *PRTR0MEMOBJNETBSD;
35
36
37typedef struct vm_map* vm_map_t;
38
39/**
40 * Gets the virtual memory map the specified object is mapped into.
41 *
42 * @returns VM map handle on success, NULL if no map.
43 * @param pMem The memory object.
44 */
45static vm_map_t rtR0MemObjNetBSDGetMap(PRTR0MEMOBJINTERNAL pMem)
46{
47 switch (pMem->enmType)
48 {
49 case RTR0MEMOBJTYPE_PAGE:
50 case RTR0MEMOBJTYPE_LOW:
51 case RTR0MEMOBJTYPE_CONT:
52 return kernel_map;
53
54 case RTR0MEMOBJTYPE_PHYS:
55 case RTR0MEMOBJTYPE_PHYS_NC:
56 return NULL; /* pretend these have no mapping atm. */
57
58 case RTR0MEMOBJTYPE_LOCK:
59 return pMem->u.Lock.R0Process == NIL_RTR0PROCESS
60 ? kernel_map
61 : &((struct proc *)pMem->u.Lock.R0Process)->p_vmspace->vm_map;
62
63 case RTR0MEMOBJTYPE_RES_VIRT:
64 return pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS
65 ? kernel_map
66 : &((struct proc *)pMem->u.ResVirt.R0Process)->p_vmspace->vm_map;
67
68 case RTR0MEMOBJTYPE_MAPPING:
69 return pMem->u.Mapping.R0Process == NIL_RTR0PROCESS
70 ? kernel_map
71 : &((struct proc *)pMem->u.Mapping.R0Process)->p_vmspace->vm_map;
72
73 default:
74 return NULL;
75 }
76}
77
78
79DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
80{
81 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)pMem;
82 int rc;
83
84 switch (pMemNetBSD->Core.enmType)
85 {
86 case RTR0MEMOBJTYPE_PAGE:
87 {
88 kmem_free(pMemNetBSD->Core.pv, pMemNetBSD->Core.cb);
89 break;
90 }
91 case RTR0MEMOBJTYPE_LOW:
92 case RTR0MEMOBJTYPE_CONT:
93 {
94 /* Unmap */
95 pmap_kremove((vaddr_t)pMemNetBSD->Core.pv, pMemNetBSD->Core.cb);
96 /* Free the virtual space */
97 uvm_km_free(kernel_map, (vaddr_t)pMemNetBSD->Core.pv, pMemNetBSD->Core.cb, UVM_KMF_VAONLY);
98 /* Free the physical pages */
99 uvm_pglistfree(&pMemNetBSD->pglist);
100 break;
101 }
102 case RTR0MEMOBJTYPE_PHYS:
103 case RTR0MEMOBJTYPE_PHYS_NC:
104 {
105 /* Free the physical pages */
106 uvm_pglistfree(&pMemNetBSD->pglist);
107 break;
108 }
109 case RTR0MEMOBJTYPE_LOCK:
110 if (pMemNetBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
111 {
112 uvm_map_pageable(
113 &((struct proc *)pMemNetBSD->Core.u.Lock.R0Process)->p_vmspace->vm_map,
114 (vaddr_t)pMemNetBSD->Core.pv,
115 ((vaddr_t)pMemNetBSD->Core.pv) + pMemNetBSD->Core.cb,
116 1, 0);
117 }
118 break;
119 case RTR0MEMOBJTYPE_RES_VIRT:
120 if (pMemNetBSD->Core.u.Lock.R0Process == NIL_RTR0PROCESS)
121 {
122 uvm_km_free(kernel_map, (vaddr_t)pMemNetBSD->Core.pv, pMemNetBSD->Core.cb, UVM_KMF_VAONLY);
123 }
124 break;
125 case RTR0MEMOBJTYPE_MAPPING:
126 if (pMemNetBSD->Core.u.Lock.R0Process == NIL_RTR0PROCESS)
127 {
128 pmap_kremove((vaddr_t)pMemNetBSD->Core.pv, pMemNetBSD->Core.cb);
129 uvm_km_free(kernel_map, (vaddr_t)pMemNetBSD->Core.pv, pMemNetBSD->Core.cb, UVM_KMF_VAONLY);
130 }
131 break;
132
133 default:
134 AssertMsgFailed(("enmType=%d\n", pMemNetBSD->Core.enmType));
135 return VERR_INTERNAL_ERROR;
136 }
137
138 return VINF_SUCCESS;
139}
140
141static int rtR0MemObjNetBSDAllocHelper(PRTR0MEMOBJNETBSD pMemNetBSD, size_t cb, bool fExecutable,
142 paddr_t VmPhysAddrHigh, bool fContiguous)
143{
144 /* Virtual space first */
145 vaddr_t virt = uvm_km_alloc(kernel_map, cb, 0,
146 UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_CANFAIL);
147 if (virt == 0)
148 return VERR_NO_MEMORY;
149
150 struct pglist *rlist = &pMemNetBSD->pglist;
151
152 int nsegs = fContiguous ? 1 : INT_MAX;
153
154 /* Physical pages */
155 if (uvm_pglistalloc(cb, 0, VmPhysAddrHigh,
156 PAGE_SIZE, 0, rlist, nsegs, 1) != 0)
157 {
158 uvm_km_free(kernel_map, virt, cb, UVM_KMF_VAONLY);
159 return VERR_NO_MEMORY;
160 }
161
162 /* Map */
163 struct vm_page *page;
164 vm_prot_t prot = VM_PROT_READ | VM_PROT_WRITE;
165 if (fExecutable)
166 prot |= VM_PROT_EXECUTE;
167 vaddr_t virt2 = virt;
168 TAILQ_FOREACH(page, rlist, pageq.queue)
169 {
170 pmap_kenter_pa(virt2, VM_PAGE_TO_PHYS(page), prot, 0);
171 virt2 += PAGE_SIZE;
172 }
173
174 pMemNetBSD->Core.pv = (void *)virt;
175 if (fContiguous)
176 {
177 page = TAILQ_FIRST(rlist);
178 pMemNetBSD->Core.u.Cont.Phys = VM_PAGE_TO_PHYS(page);
179 }
180 return VINF_SUCCESS;
181}
182
183DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
184{
185 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD),
186 RTR0MEMOBJTYPE_PAGE, NULL, cb);
187 if (!pMemNetBSD)
188 return VERR_NO_MEMORY;
189
190 void *pvMem = kmem_alloc(cb, KM_SLEEP);
191 if (RT_UNLIKELY(!pvMem))
192 {
193 rtR0MemObjDelete(&pMemNetBSD->Core);
194 return VERR_NO_PAGE_MEMORY;
195 }
196 if (fExecutable)
197 {
198 pmap_protect(pmap_kernel(), (vaddr_t)pvMem, ((vaddr_t)pvMem) + cb,
199 VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE);
200 }
201
202 pMemNetBSD->Core.pv = pvMem;
203 *ppMem = &pMemNetBSD->Core;
204 return VINF_SUCCESS;
205}
206
207
208DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
209{
210 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD),
211 RTR0MEMOBJTYPE_LOW, NULL, cb);
212 if (!pMemNetBSD)
213 return VERR_NO_MEMORY;
214
215 int rc = rtR0MemObjNetBSDAllocHelper(pMemNetBSD, cb, fExecutable, _4G - 1, false);
216 if (rc)
217 {
218 rtR0MemObjDelete(&pMemNetBSD->Core);
219 return rc;
220 }
221
222 *ppMem = &pMemNetBSD->Core;
223 return VINF_SUCCESS;
224}
225
226
227DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
228{
229 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD),
230 RTR0MEMOBJTYPE_CONT, NULL, cb);
231 if (!pMemNetBSD)
232 return VERR_NO_MEMORY;
233
234 int rc = rtR0MemObjNetBSDAllocHelper(pMemNetBSD, cb, fExecutable, _4G - 1, true);
235 if (rc)
236 {
237 rtR0MemObjDelete(&pMemNetBSD->Core);
238 return rc;
239 }
240
241 *ppMem = &pMemNetBSD->Core;
242 return VINF_SUCCESS;
243}
244
245
246static int rtR0MemObjNetBSDAllocPhysPages(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJTYPE enmType,
247 size_t cb,
248 RTHCPHYS PhysHighest, size_t uAlignment,
249 bool fContiguous)
250{
251 paddr_t VmPhysAddrHigh;
252
253 /* create the object. */
254 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD),
255 enmType, NULL, cb);
256 if (!pMemNetBSD)
257 return VERR_NO_MEMORY;
258
259 if (PhysHighest != NIL_RTHCPHYS)
260 VmPhysAddrHigh = PhysHighest;
261 else
262 VmPhysAddrHigh = ~(paddr_t)0;
263
264 int nsegs = fContiguous ? 1 : INT_MAX;
265
266 int error = uvm_pglistalloc(cb, 0, VmPhysAddrHigh, uAlignment, 0, &pMemNetBSD->pglist, nsegs, 1);
267 if (error)
268 {
269 rtR0MemObjDelete(&pMemNetBSD->Core);
270 return VERR_NO_MEMORY;
271 }
272
273 if (fContiguous)
274 {
275 Assert(enmType == RTR0MEMOBJTYPE_PHYS);
276 const struct vm_page * const pg = TAILQ_FIRST(&pMemNetBSD->pglist);
277 pMemNetBSD->Core.u.Phys.PhysBase = VM_PAGE_TO_PHYS(pg);
278 pMemNetBSD->Core.u.Phys.fAllocated = true;
279 }
280 *ppMem = &pMemNetBSD->Core;
281
282 return VINF_SUCCESS;
283}
284
285
286DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
287{
288 return rtR0MemObjNetBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS, cb, PhysHighest, uAlignment, true);
289}
290
291
292DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
293{
294 return rtR0MemObjNetBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS_NC, cb, PhysHighest, PAGE_SIZE, false);
295}
296
297
298DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
299{
300 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
301
302 /* create the object. */
303 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_PHYS, NULL, cb);
304 if (!pMemNetBSD)
305 return VERR_NO_MEMORY;
306
307 /* there is no allocation here, it needs to be mapped somewhere first. */
308 pMemNetBSD->Core.u.Phys.fAllocated = false;
309 pMemNetBSD->Core.u.Phys.PhysBase = Phys;
310 pMemNetBSD->Core.u.Phys.uCachePolicy = uCachePolicy;
311 TAILQ_INIT(&pMemNetBSD->pglist);
312 *ppMem = &pMemNetBSD->Core;
313 return VINF_SUCCESS;
314}
315
316
317DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
318{
319 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
320 if (!pMemNetBSD)
321 return VERR_NO_MEMORY;
322
323 int rc = uvm_map_pageable(
324 &((struct proc *)R0Process)->p_vmspace->vm_map,
325 R3Ptr,
326 R3Ptr + cb,
327 0, 0);
328 if (rc)
329 {
330 rtR0MemObjDelete(&pMemNetBSD->Core);
331 return VERR_NO_MEMORY;
332 }
333
334 pMemNetBSD->Core.u.Lock.R0Process = R0Process;
335 *ppMem = &pMemNetBSD->Core;
336 return VINF_SUCCESS;
337}
338
339
340DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
341{
342 /* Kernel memory (always?) wired; all memory allocated by vbox code is? */
343 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_LOCK, pv, cb);
344 if (!pMemNetBSD)
345 return VERR_NO_MEMORY;
346
347 pMemNetBSD->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
348 pMemNetBSD->Core.pv = pv;
349 *ppMem = &pMemNetBSD->Core;
350 return VINF_SUCCESS;
351}
352
353DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
354{
355 if (pvFixed != (void *)-1)
356 {
357 /* can we support this? or can we assume the virtual space is already reserved? */
358 printf("reserve specified kernel virtual address not supported\n");
359 return VERR_NOT_SUPPORTED;
360 }
361
362 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_RES_VIRT, NULL, cb);
363 if (!pMemNetBSD)
364 return VERR_NO_MEMORY;
365
366 vaddr_t virt = uvm_km_alloc(kernel_map, cb, uAlignment,
367 UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_CANFAIL);
368 if (virt == 0)
369 {
370 rtR0MemObjDelete(&pMemNetBSD->Core);
371 return VERR_NO_MEMORY;
372 }
373
374 pMemNetBSD->Core.u.ResVirt.R0Process = NIL_RTR0PROCESS;
375 pMemNetBSD->Core.pv = (void *)virt;
376 *ppMem = &pMemNetBSD->Core;
377 return VINF_SUCCESS;
378}
379
380
381DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
382{
383 printf("NativeReserveUser\n");
384 return VERR_NOT_SUPPORTED;
385}
386
387
388DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
389 unsigned fProt, size_t offSub, size_t cbSub)
390{
391 if (pvFixed != (void *)-1)
392 {
393 /* can we support this? or can we assume the virtual space is already reserved? */
394 printf("map to specified kernel virtual address not supported\n");
395 return VERR_NOT_SUPPORTED;
396 }
397
398 PRTR0MEMOBJNETBSD pMemNetBSD0 = (PRTR0MEMOBJNETBSD)pMemToMap;
399 if ((pMemNetBSD0->Core.enmType != RTR0MEMOBJTYPE_PHYS)
400 && (pMemNetBSD0->Core.enmType != RTR0MEMOBJTYPE_PHYS_NC))
401 {
402 printf("memory to map is not physical\n");
403 return VERR_NOT_SUPPORTED;
404 }
405 size_t sz = cbSub > 0 ? cbSub : pMemNetBSD0->Core.cb;
406
407 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_MAPPING, NULL, sz);
408
409 vaddr_t virt = uvm_km_alloc(kernel_map, sz, uAlignment,
410 UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_CANFAIL);
411 if (virt == 0)
412 {
413 rtR0MemObjDelete(&pMemNetBSD->Core);
414 return VERR_NO_MEMORY;
415 }
416
417 vm_prot_t prot = 0;
418
419 if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
420 prot |= VM_PROT_READ;
421 if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
422 prot |= VM_PROT_WRITE;
423 if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
424 prot |= VM_PROT_EXECUTE;
425
426 struct vm_page *page;
427 vaddr_t virt2 = virt;
428 size_t map_pos = 0;
429 TAILQ_FOREACH(page, &pMemNetBSD0->pglist, pageq.queue)
430 {
431 if (map_pos >= offSub)
432 {
433 if (cbSub > 0 && (map_pos >= offSub + cbSub))
434 break;
435
436 pmap_kenter_pa(virt2, VM_PAGE_TO_PHYS(page), prot, 0);
437 virt2 += PAGE_SIZE;
438 }
439 map_pos += PAGE_SIZE;
440 }
441
442 pMemNetBSD->Core.pv = (void *)virt;
443 pMemNetBSD->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
444 *ppMem = &pMemNetBSD->Core;
445
446 return VINF_SUCCESS;
447}
448
449
450DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
451 unsigned fProt, RTR0PROCESS R0Process)
452{
453 printf("NativeMapUser\n");
454 return VERR_NOT_SUPPORTED;
455}
456
457
458DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
459{
460 vm_prot_t ProtectionFlags = 0;
461 vaddr_t AddrStart = (vaddr_t)pMem->pv + offSub;
462 vm_map_t pVmMap = rtR0MemObjNetBSDGetMap(pMem);
463
464 if (!pVmMap)
465 return VERR_NOT_SUPPORTED;
466
467 if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
468 ProtectionFlags |= UVM_PROT_R;
469 if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
470 ProtectionFlags |= UVM_PROT_W;
471 if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
472 ProtectionFlags |= UVM_PROT_X;
473
474 int error = uvm_map_protect(pVmMap, AddrStart, AddrStart + cbSub,
475 ProtectionFlags, 0);
476 if (!error)
477 return VINF_SUCCESS;
478
479 return VERR_NOT_SUPPORTED;
480}
481
482
483DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
484{
485 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)pMem;
486
487 switch (pMemNetBSD->Core.enmType)
488 {
489 case RTR0MEMOBJTYPE_PAGE:
490 case RTR0MEMOBJTYPE_LOW:
491 {
492 vaddr_t va = (vaddr_t)pMemNetBSD->Core.pv + ptoa(iPage);
493 paddr_t pa = 0;
494 pmap_extract(pmap_kernel(), va, &pa);
495 return pa;
496 }
497 case RTR0MEMOBJTYPE_CONT:
498 return pMemNetBSD->Core.u.Cont.Phys + ptoa(iPage);
499 case RTR0MEMOBJTYPE_PHYS:
500 return pMemNetBSD->Core.u.Phys.PhysBase + ptoa(iPage);
501 case RTR0MEMOBJTYPE_PHYS_NC:
502 {
503 struct vm_page *page;
504 size_t i = 0;
505 TAILQ_FOREACH(page, &pMemNetBSD->pglist, pageq.queue)
506 {
507 if (i == iPage)
508 break;
509 i++;
510 }
511 return VM_PAGE_TO_PHYS(page);
512 }
513 case RTR0MEMOBJTYPE_LOCK:
514 case RTR0MEMOBJTYPE_MAPPING:
515 {
516 pmap_t pmap;
517 if (pMem->u.Lock.R0Process == NIL_RTR0PROCESS)
518 pmap = pmap_kernel();
519 else
520 pmap = ((struct proc *)pMem->u.Lock.R0Process)->p_vmspace->vm_map.pmap;
521 vaddr_t va = (vaddr_t)pMemNetBSD->Core.pv + ptoa(iPage);
522 paddr_t pa = 0;
523 pmap_extract(pmap, va, &pa);
524 return pa;
525 }
526 case RTR0MEMOBJTYPE_RES_VIRT:
527 return NIL_RTHCPHYS;
528 default:
529 return NIL_RTHCPHYS;
530 }
531}
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette