VirtualBox

source: vbox/trunk/src/VBox/Additions/linux/drm/vbox_ttm.c@ 76553

最後變更 在這個檔案從76553是 76553,由 vboxsync 提交於 6 年 前

scm --update-copyright-year

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 14.0 KB
 
1/* $Id: vbox_ttm.c 76553 2019-01-01 01:45:53Z vboxsync $ */
2/** @file
3 * VirtualBox Additions Linux kernel video driver
4 */
5
6/*
7 * Copyright (C) 2013-2019 Oracle Corporation
8 * This file is based on ast_ttm.c
9 * Copyright 2012 Red Hat Inc.
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the
13 * "Software"), to deal in the Software without restriction, including
14 * without limitation the rights to use, copy, modify, merge, publish,
15 * distribute, sub license, and/or sell copies of the Software, and to
16 * permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * The above copyright notice and this permission notice (including the
28 * next paragraph) shall be included in all copies or substantial portions
29 * of the Software.
30 *
31 *
32 * Authors: Dave Airlie <[email protected]>
33 * Michael Thayer <[email protected]>
34 */
35#include "vbox_drv.h"
36#include <ttm/ttm_page_alloc.h>
37
38#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0) && !defined(RHEL_72)
39#define PLACEMENT_FLAGS(placement) (placement)
40#else
41#define PLACEMENT_FLAGS(placement) ((placement).flags)
42#endif
43
44static inline struct vbox_private *vbox_bdev(struct ttm_bo_device *bd)
45{
46 return container_of(bd, struct vbox_private, ttm.bdev);
47}
48
49static int vbox_ttm_mem_global_init(struct drm_global_reference *ref)
50{
51 return ttm_mem_global_init(ref->object);
52}
53
54static void vbox_ttm_mem_global_release(struct drm_global_reference *ref)
55{
56 ttm_mem_global_release(ref->object);
57}
58
59/**
60 * Adds the vbox memory manager object/structures to the global memory manager.
61 */
62static int vbox_ttm_global_init(struct vbox_private *vbox)
63{
64 struct drm_global_reference *global_ref;
65 int ret;
66
67 global_ref = &vbox->ttm.mem_global_ref;
68 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
69 global_ref->size = sizeof(struct ttm_mem_global);
70 global_ref->init = &vbox_ttm_mem_global_init;
71 global_ref->release = &vbox_ttm_mem_global_release;
72 ret = drm_global_item_ref(global_ref);
73 if (ret) {
74 DRM_ERROR("Failed setting up TTM memory subsystem.\n");
75 return ret;
76 }
77
78 vbox->ttm.bo_global_ref.mem_glob = vbox->ttm.mem_global_ref.object;
79 global_ref = &vbox->ttm.bo_global_ref.ref;
80 global_ref->global_type = DRM_GLOBAL_TTM_BO;
81 global_ref->size = sizeof(struct ttm_bo_global);
82 global_ref->init = &ttm_bo_global_init;
83 global_ref->release = &ttm_bo_global_release;
84
85 ret = drm_global_item_ref(global_ref);
86 if (ret) {
87 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
88 drm_global_item_unref(&vbox->ttm.mem_global_ref);
89 return ret;
90 }
91
92 return 0;
93}
94
95/**
96 * Removes the vbox memory manager object from the global memory manager.
97 */
98static void vbox_ttm_global_release(struct vbox_private *vbox)
99{
100 drm_global_item_unref(&vbox->ttm.bo_global_ref.ref);
101 drm_global_item_unref(&vbox->ttm.mem_global_ref);
102}
103
104static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo)
105{
106 struct vbox_bo *bo;
107
108 bo = container_of(tbo, struct vbox_bo, bo);
109
110 drm_gem_object_release(&bo->gem);
111 kfree(bo);
112}
113
114static bool vbox_ttm_bo_is_vbox_bo(struct ttm_buffer_object *bo)
115{
116 if (bo->destroy == &vbox_bo_ttm_destroy)
117 return true;
118
119 return false;
120}
121
122static int
123vbox_bo_init_mem_type(struct ttm_bo_device *bdev, u32 type,
124 struct ttm_mem_type_manager *man)
125{
126 switch (type) {
127 case TTM_PL_SYSTEM:
128 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
129 man->available_caching = TTM_PL_MASK_CACHING;
130 man->default_caching = TTM_PL_FLAG_CACHED;
131 break;
132 case TTM_PL_VRAM:
133 man->func = &ttm_bo_manager_func;
134 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
135 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
136 man->default_caching = TTM_PL_FLAG_WC;
137 break;
138 default:
139 DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
140 return -EINVAL;
141 }
142
143 return 0;
144}
145
146static void
147vbox_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
148{
149 struct vbox_bo *vboxbo = vbox_bo(bo);
150
151 if (!vbox_ttm_bo_is_vbox_bo(bo))
152 return;
153
154 vbox_ttm_placement(vboxbo, TTM_PL_FLAG_SYSTEM);
155 *pl = vboxbo->placement;
156}
157
158static int vbox_bo_verify_access(struct ttm_buffer_object *bo,
159 struct file *filp)
160{
161 return 0;
162}
163
164static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
165 struct ttm_mem_reg *mem)
166{
167 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
168 struct vbox_private *vbox = vbox_bdev(bdev);
169
170 mem->bus.addr = NULL;
171 mem->bus.offset = 0;
172 mem->bus.size = mem->num_pages << PAGE_SHIFT;
173 mem->bus.base = 0;
174 mem->bus.is_iomem = false;
175 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
176 return -EINVAL;
177 switch (mem->mem_type) {
178 case TTM_PL_SYSTEM:
179 /* system memory */
180 return 0;
181 case TTM_PL_VRAM:
182 mem->bus.offset = mem->start << PAGE_SHIFT;
183 mem->bus.base = pci_resource_start(vbox->dev->pdev, 0);
184 mem->bus.is_iomem = true;
185 break;
186 default:
187 return -EINVAL;
188 }
189 return 0;
190}
191
192static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev,
193 struct ttm_mem_reg *mem)
194{
195}
196
197static void vbox_ttm_backend_destroy(struct ttm_tt *tt)
198{
199 ttm_tt_fini(tt);
200 kfree(tt);
201}
202
203static struct ttm_backend_func vbox_tt_backend_func = {
204 .destroy = &vbox_ttm_backend_destroy,
205};
206
207#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0)) && !defined(RHEL_76)
208static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev,
209 unsigned long size,
210 u32 page_flags,
211 struct page *dummy_read_page)
212#else
213static struct ttm_tt *vbox_ttm_tt_create(struct ttm_buffer_object *bo,
214 u32 page_flags)
215#endif
216{
217 struct ttm_tt *tt;
218
219 tt = kzalloc(sizeof(*tt), GFP_KERNEL);
220 if (!tt)
221 return NULL;
222
223 tt->func = &vbox_tt_backend_func;
224#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0)) && !defined(RHEL_76)
225 if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
226#else
227 if (ttm_tt_init(tt, bo, page_flags)) {
228#endif
229 kfree(tt);
230 return NULL;
231 }
232
233 return tt;
234}
235
236#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0)
237# if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) && !defined(RHEL_76)
238static int vbox_ttm_tt_populate(struct ttm_tt *ttm)
239{
240 return ttm_pool_populate(ttm);
241}
242# else
243static int vbox_ttm_tt_populate(struct ttm_tt *ttm,
244 struct ttm_operation_ctx *ctx)
245{
246 return ttm_pool_populate(ttm, ctx);
247}
248# endif
249
250static void vbox_ttm_tt_unpopulate(struct ttm_tt *ttm)
251{
252 ttm_pool_unpopulate(ttm);
253}
254#endif
255
256static struct ttm_bo_driver vbox_bo_driver = {
257 .ttm_tt_create = vbox_ttm_tt_create,
258#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0)
259 .ttm_tt_populate = vbox_ttm_tt_populate,
260 .ttm_tt_unpopulate = vbox_ttm_tt_unpopulate,
261#endif
262 .init_mem_type = vbox_bo_init_mem_type,
263#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) || defined(RHEL_74)
264 .eviction_valuable = ttm_bo_eviction_valuable,
265#endif
266 .evict_flags = vbox_bo_evict_flags,
267 .verify_access = vbox_bo_verify_access,
268 .io_mem_reserve = &vbox_ttm_io_mem_reserve,
269 .io_mem_free = &vbox_ttm_io_mem_free,
270#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) || defined(RHEL_75)
271# if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) && !defined(RHEL_76)
272 .io_mem_pfn = ttm_bo_default_io_mem_pfn,
273# endif
274#endif
275#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) \
276 || defined(RHEL_74)
277# ifndef RHEL_75
278 .lru_tail = &ttm_bo_default_lru_tail,
279 .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
280# endif
281#endif
282};
283
284int vbox_mm_init(struct vbox_private *vbox)
285{
286 int ret;
287 struct drm_device *dev = vbox->dev;
288 struct ttm_bo_device *bdev = &vbox->ttm.bdev;
289
290 ret = vbox_ttm_global_init(vbox);
291 if (ret)
292 return ret;
293
294 ret = ttm_bo_device_init(&vbox->ttm.bdev,
295 vbox->ttm.bo_global_ref.ref.object,
296 &vbox_bo_driver,
297#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) || defined(RHEL_71)
298 dev->anon_inode->i_mapping,
299#endif
300 DRM_FILE_PAGE_OFFSET, true);
301 if (ret) {
302 DRM_ERROR("Error initialising bo driver; %d\n", ret);
303 goto err_ttm_global_release;
304 }
305
306 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
307 vbox->available_vram_size >> PAGE_SHIFT);
308 if (ret) {
309 DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
310 goto err_device_release;
311 }
312
313#ifdef DRM_MTRR_WC
314 vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
315 pci_resource_len(dev->pdev, 0),
316 DRM_MTRR_WC);
317#else
318 vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
319 pci_resource_len(dev->pdev, 0));
320#endif
321 return 0;
322
323err_device_release:
324 ttm_bo_device_release(&vbox->ttm.bdev);
325err_ttm_global_release:
326 vbox_ttm_global_release(vbox);
327 return ret;
328}
329
330void vbox_mm_fini(struct vbox_private *vbox)
331{
332#ifdef DRM_MTRR_WC
333 drm_mtrr_del(vbox->fb_mtrr,
334 pci_resource_start(vbox->dev->pdev, 0),
335 pci_resource_len(vbox->dev->pdev, 0), DRM_MTRR_WC);
336#else
337 arch_phys_wc_del(vbox->fb_mtrr);
338#endif
339 ttm_bo_device_release(&vbox->ttm.bdev);
340 vbox_ttm_global_release(vbox);
341}
342
343void vbox_ttm_placement(struct vbox_bo *bo, int domain)
344{
345 u32 c = 0;
346#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0) && !defined(RHEL_72)
347 bo->placement.fpfn = 0;
348 bo->placement.lpfn = 0;
349#else
350 unsigned int i;
351#endif
352
353 bo->placement.placement = bo->placements;
354 bo->placement.busy_placement = bo->placements;
355
356 if (domain & TTM_PL_FLAG_VRAM)
357 PLACEMENT_FLAGS(bo->placements[c++]) =
358 TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
359 if (domain & TTM_PL_FLAG_SYSTEM)
360 PLACEMENT_FLAGS(bo->placements[c++]) =
361 TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
362 if (!c)
363 PLACEMENT_FLAGS(bo->placements[c++]) =
364 TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
365
366 bo->placement.num_placement = c;
367 bo->placement.num_busy_placement = c;
368
369#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) || defined(RHEL_72)
370 for (i = 0; i < c; ++i) {
371 bo->placements[i].fpfn = 0;
372 bo->placements[i].lpfn = 0;
373 }
374#endif
375}
376
377int vbox_bo_create(struct drm_device *dev, int size, int align,
378 u32 flags, struct vbox_bo **pvboxbo)
379{
380 struct vbox_private *vbox = dev->dev_private;
381 struct vbox_bo *vboxbo;
382 size_t acc_size;
383 int ret;
384
385 vboxbo = kzalloc(sizeof(*vboxbo), GFP_KERNEL);
386 if (!vboxbo)
387 return -ENOMEM;
388
389 ret = drm_gem_object_init(dev, &vboxbo->gem, size);
390 if (ret)
391 goto err_free_vboxbo;
392
393 vboxbo->bo.bdev = &vbox->ttm.bdev;
394#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) && !defined(RHEL_71)
395 vboxbo->bo.bdev->dev_mapping = dev->dev_mapping;
396#endif
397
398 vbox_ttm_placement(vboxbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
399
400 acc_size = ttm_bo_dma_acc_size(&vbox->ttm.bdev, size,
401 sizeof(struct vbox_bo));
402
403 ret = ttm_bo_init(&vbox->ttm.bdev, &vboxbo->bo, size,
404 ttm_bo_type_device, &vboxbo->placement,
405#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0) && !defined(RHEL_76)
406 align >> PAGE_SHIFT, false, NULL, acc_size,
407#else
408 align >> PAGE_SHIFT, false, acc_size,
409#endif
410#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) || defined(RHEL_72)
411 NULL, NULL, vbox_bo_ttm_destroy);
412#else
413 NULL, vbox_bo_ttm_destroy);
414#endif
415 if (ret)
416 goto err_free_vboxbo;
417
418 *pvboxbo = vboxbo;
419
420 return 0;
421
422err_free_vboxbo:
423 kfree(vboxbo);
424 return ret;
425}
426
427static inline u64 vbox_bo_gpu_offset(struct vbox_bo *bo)
428{
429 return bo->bo.offset;
430}
431
432int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag, u64 *gpu_addr)
433{
434#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0)) || defined(RHEL_76)
435 struct ttm_operation_ctx ctx = { false, false };
436#endif
437 int i, ret;
438
439 if (bo->pin_count) {
440 bo->pin_count++;
441 if (gpu_addr)
442 *gpu_addr = vbox_bo_gpu_offset(bo);
443
444 return 0;
445 }
446
447 vbox_ttm_placement(bo, pl_flag);
448
449 for (i = 0; i < bo->placement.num_placement; i++)
450 PLACEMENT_FLAGS(bo->placements[i]) |= TTM_PL_FLAG_NO_EVICT;
451
452#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) && !defined(RHEL_76)
453 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
454#else
455 ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
456#endif
457 if (ret)
458 return ret;
459
460 bo->pin_count = 1;
461
462 if (gpu_addr)
463 *gpu_addr = vbox_bo_gpu_offset(bo);
464
465 return 0;
466}
467
468int vbox_bo_unpin(struct vbox_bo *bo)
469{
470#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0)) || defined(RHEL_76)
471 struct ttm_operation_ctx ctx = { false, false };
472#endif
473 int i, ret;
474
475 if (!bo->pin_count) {
476 DRM_ERROR("unpin bad %p\n", bo);
477 return 0;
478 }
479 bo->pin_count--;
480 if (bo->pin_count)
481 return 0;
482
483 for (i = 0; i < bo->placement.num_placement; i++)
484 PLACEMENT_FLAGS(bo->placements[i]) &= ~TTM_PL_FLAG_NO_EVICT;
485
486#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) && !defined(RHEL_76)
487 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
488#else
489 ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
490#endif
491 if (ret)
492 return ret;
493
494 return 0;
495}
496
497/*
498 * Move a vbox-owned buffer object to system memory if no one else has it
499 * pinned. The caller must have pinned it previously, and this call will
500 * release the caller's pin.
501 */
502int vbox_bo_push_sysram(struct vbox_bo *bo)
503{
504#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0)) || defined(RHEL_76)
505 struct ttm_operation_ctx ctx = { false, false };
506#endif
507 int i, ret;
508
509 if (!bo->pin_count) {
510 DRM_ERROR("unpin bad %p\n", bo);
511 return 0;
512 }
513 bo->pin_count--;
514 if (bo->pin_count)
515 return 0;
516
517 if (bo->kmap.virtual)
518 ttm_bo_kunmap(&bo->kmap);
519
520 vbox_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
521
522 for (i = 0; i < bo->placement.num_placement; i++)
523 PLACEMENT_FLAGS(bo->placements[i]) |= TTM_PL_FLAG_NO_EVICT;
524
525#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) && !defined(RHEL_76)
526 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
527#else
528 ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
529#endif
530 if (ret) {
531 DRM_ERROR("pushing to VRAM failed\n");
532 return ret;
533 }
534
535 return 0;
536}
537
538int vbox_mmap(struct file *filp, struct vm_area_struct *vma)
539{
540 struct drm_file *file_priv;
541 struct vbox_private *vbox;
542
543 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
544 return -EINVAL;
545
546 file_priv = filp->private_data;
547 vbox = file_priv->minor->dev->dev_private;
548
549 return ttm_bo_mmap(filp, vma, &vbox->ttm.bdev);
550}
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette