VirtualBox

source: vbox/trunk/src/VBox/Additions/linux/drm/vbox_ttm.c@ 66940

最後變更 在這個檔案從66940是 66544,由 vboxsync 提交於 8 年 前

bugref:8524: Additions/linux: play nicely with distribution-installed Additions
Change header of files which are expected to end up in the Linux kernel to the MIT licence to simplify life for people wanting to port vboxvideo to other kernels and to simplify synchronising changes back to VirtualBox. Update author information in files which have it, but do not add it to files which do not.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 13.6 KB
 
1/* $Id: vbox_ttm.c 66544 2017-04-12 17:02:30Z vboxsync $ */
2/** @file
3 * VirtualBox Additions Linux kernel video driver
4 */
5
6/*
7 * Copyright (C) 2013-2017 Oracle Corporation
8 * This file is based on ast_ttm.c
9 * Copyright 2012 Red Hat Inc.
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the
13 * "Software"), to deal in the Software without restriction, including
14 * without limitation the rights to use, copy, modify, merge, publish,
15 * distribute, sub license, and/or sell copies of the Software, and to
16 * permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * The above copyright notice and this permission notice (including the
28 * next paragraph) shall be included in all copies or substantial portions
29 * of the Software.
30 *
31 *
32 * Authors: Dave Airlie <[email protected]>
33 * Michael Thayer <[email protected]>
34 */
35#include "vbox_drv.h"
36#include <ttm/ttm_page_alloc.h>
37
38#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)
39# define PLACEMENT_FLAGS(placement) (placement)
40#else
41# define PLACEMENT_FLAGS(placement) (placement).flags
42#endif
43
44static inline struct vbox_private *
45vbox_bdev(struct ttm_bo_device *bd)
46{
47 return container_of(bd, struct vbox_private, ttm.bdev);
48}
49
50static int
51vbox_ttm_mem_global_init(struct drm_global_reference *ref)
52{
53 return ttm_mem_global_init(ref->object);
54}
55
56static void
57vbox_ttm_mem_global_release(struct drm_global_reference *ref)
58{
59 ttm_mem_global_release(ref->object);
60}
61
62/**
63 * Adds the vbox memory manager object/structures to the global memory manager.
64 */
65static int vbox_ttm_global_init(struct vbox_private *vbox)
66{
67 struct drm_global_reference *global_ref;
68 int r;
69
70 global_ref = &vbox->ttm.mem_global_ref;
71 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
72 global_ref->size = sizeof(struct ttm_mem_global);
73 global_ref->init = &vbox_ttm_mem_global_init;
74 global_ref->release = &vbox_ttm_mem_global_release;
75 r = drm_global_item_ref(global_ref);
76 if (r != 0) {
77 DRM_ERROR("Failed setting up TTM memory accounting "
78 "subsystem.\n");
79 return r;
80 }
81
82 vbox->ttm.bo_global_ref.mem_glob =
83 vbox->ttm.mem_global_ref.object;
84 global_ref = &vbox->ttm.bo_global_ref.ref;
85 global_ref->global_type = DRM_GLOBAL_TTM_BO;
86 global_ref->size = sizeof(struct ttm_bo_global);
87 global_ref->init = &ttm_bo_global_init;
88 global_ref->release = &ttm_bo_global_release;
89 r = drm_global_item_ref(global_ref);
90 if (r != 0) {
91 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
92 drm_global_item_unref(&vbox->ttm.mem_global_ref);
93 return r;
94 }
95 return 0;
96}
97
98/**
99 * Removes the vbox memory manager object from the global memory manager.
100 */
101static void
102vbox_ttm_global_release(struct vbox_private *vbox)
103{
104 if (vbox->ttm.mem_global_ref.release == NULL)
105 return;
106
107 drm_global_item_unref(&vbox->ttm.bo_global_ref.ref);
108 drm_global_item_unref(&vbox->ttm.mem_global_ref);
109 vbox->ttm.mem_global_ref.release = NULL;
110}
111
112
113static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo)
114{
115 struct vbox_bo *bo;
116
117 bo = container_of(tbo, struct vbox_bo, bo);
118
119 drm_gem_object_release(&bo->gem);
120 kfree(bo);
121}
122
123static bool vbox_ttm_bo_is_vbox_bo(struct ttm_buffer_object *bo)
124{
125 if (bo->destroy == &vbox_bo_ttm_destroy)
126 return true;
127 return false;
128}
129
130static int
131vbox_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
132 struct ttm_mem_type_manager *man)
133{
134 switch (type) {
135 case TTM_PL_SYSTEM:
136 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
137 man->available_caching = TTM_PL_MASK_CACHING;
138 man->default_caching = TTM_PL_FLAG_CACHED;
139 break;
140 case TTM_PL_VRAM:
141 man->func = &ttm_bo_manager_func;
142 man->flags = TTM_MEMTYPE_FLAG_FIXED |
143 TTM_MEMTYPE_FLAG_MAPPABLE;
144 man->available_caching = TTM_PL_FLAG_UNCACHED |
145 TTM_PL_FLAG_WC;
146 man->default_caching = TTM_PL_FLAG_WC;
147 break;
148 default:
149 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
150 return -EINVAL;
151 }
152 return 0;
153}
154
155static void
156vbox_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
157{
158 struct vbox_bo *vboxbo = vbox_bo(bo);
159
160 if (!vbox_ttm_bo_is_vbox_bo(bo))
161 return;
162
163 vbox_ttm_placement(vboxbo, TTM_PL_FLAG_SYSTEM);
164 *pl = vboxbo->placement;
165}
166
167static int vbox_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
168{
169 return 0;
170}
171
172static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
173 struct ttm_mem_reg *mem)
174{
175 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
176 struct vbox_private *vbox = vbox_bdev(bdev);
177
178 mem->bus.addr = NULL;
179 mem->bus.offset = 0;
180 mem->bus.size = mem->num_pages << PAGE_SHIFT;
181 mem->bus.base = 0;
182 mem->bus.is_iomem = false;
183 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
184 return -EINVAL;
185 switch (mem->mem_type) {
186 case TTM_PL_SYSTEM:
187 /* system memory */
188 return 0;
189 case TTM_PL_VRAM:
190 mem->bus.offset = mem->start << PAGE_SHIFT;
191 mem->bus.base = pci_resource_start(vbox->dev->pdev, 0);
192 mem->bus.is_iomem = true;
193 break;
194 default:
195 return -EINVAL;
196 break;
197 }
198 return 0;
199}
200
201static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
202{
203}
204
205static int vbox_bo_move(struct ttm_buffer_object *bo,
206 bool evict, bool interruptible,
207 bool no_wait_gpu,
208 struct ttm_mem_reg *new_mem)
209{
210 int r;
211#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)
212 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
213#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
214 r = ttm_bo_move_memcpy(bo, evict, interruptible, no_wait_gpu, new_mem);
215#else
216 r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
217#endif
218 return r;
219}
220
221
222static void vbox_ttm_backend_destroy(struct ttm_tt *tt)
223{
224 ttm_tt_fini(tt);
225 kfree(tt);
226}
227
228static struct ttm_backend_func vbox_tt_backend_func = {
229 .destroy = &vbox_ttm_backend_destroy,
230};
231
232
233static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev,
234 unsigned long size, uint32_t page_flags,
235 struct page *dummy_read_page)
236{
237 struct ttm_tt *tt;
238
239 tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
240 if (tt == NULL)
241 return NULL;
242 tt->func = &vbox_tt_backend_func;
243 if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
244 kfree(tt);
245 return NULL;
246 }
247 return tt;
248}
249
250static int vbox_ttm_tt_populate(struct ttm_tt *ttm)
251{
252 return ttm_pool_populate(ttm);
253}
254
255static void vbox_ttm_tt_unpopulate(struct ttm_tt *ttm)
256{
257 ttm_pool_unpopulate(ttm);
258}
259
260struct ttm_bo_driver vbox_bo_driver = {
261 .ttm_tt_create = vbox_ttm_tt_create,
262 .ttm_tt_populate = vbox_ttm_tt_populate,
263 .ttm_tt_unpopulate = vbox_ttm_tt_unpopulate,
264 .init_mem_type = vbox_bo_init_mem_type,
265 .evict_flags = vbox_bo_evict_flags,
266 .move = vbox_bo_move,
267 .verify_access = vbox_bo_verify_access,
268 .io_mem_reserve = &vbox_ttm_io_mem_reserve,
269 .io_mem_free = &vbox_ttm_io_mem_free,
270#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
271 .lru_tail = &ttm_bo_default_lru_tail,
272 .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
273#endif
274};
275
276int vbox_mm_init(struct vbox_private *vbox)
277{
278 int ret;
279 struct drm_device *dev = vbox->dev;
280 struct ttm_bo_device *bdev = &vbox->ttm.bdev;
281
282 ret = vbox_ttm_global_init(vbox);
283 if (ret)
284 return ret;
285
286 ret = ttm_bo_device_init(&vbox->ttm.bdev,
287 vbox->ttm.bo_global_ref.ref.object,
288 &vbox_bo_driver,
289#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
290 dev->anon_inode->i_mapping,
291#endif
292 DRM_FILE_PAGE_OFFSET,
293 true);
294 if (ret) {
295 DRM_ERROR("Error initialising bo driver; %d\n", ret);
296 return ret;
297 }
298
299 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
300 vbox->available_vram_size >> PAGE_SHIFT);
301 if (ret) {
302 DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
303 return ret;
304 }
305
306#ifdef DRM_MTRR_WC
307 vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
308 pci_resource_len(dev->pdev, 0),
309 DRM_MTRR_WC);
310#else
311 vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
312 pci_resource_len(dev->pdev, 0));
313#endif
314
315 vbox->ttm.mm_initialised = true;
316 return 0;
317}
318
319void vbox_mm_fini(struct vbox_private *vbox)
320{
321#ifdef DRM_MTRR_WC
322 struct drm_device *dev = vbox->dev;
323#endif
324 if (!vbox->ttm.mm_initialised)
325 return;
326 ttm_bo_device_release(&vbox->ttm.bdev);
327
328 vbox_ttm_global_release(vbox);
329
330#ifdef DRM_MTRR_WC
331 drm_mtrr_del(vbox->fb_mtrr,
332 pci_resource_start(dev->pdev, 0),
333 pci_resource_len(dev->pdev, 0), DRM_MTRR_WC);
334#else
335 arch_phys_wc_del(vbox->fb_mtrr);
336#endif
337}
338
339void vbox_ttm_placement(struct vbox_bo *bo, int domain)
340{
341 u32 c = 0;
342#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)
343 bo->placement.fpfn = 0;
344 bo->placement.lpfn = 0;
345#else
346 unsigned i;
347#endif
348
349 bo->placement.placement = bo->placements;
350 bo->placement.busy_placement = bo->placements;
351 if (domain & TTM_PL_FLAG_VRAM)
352 PLACEMENT_FLAGS(bo->placements[c++]) = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
353 if (domain & TTM_PL_FLAG_SYSTEM)
354 PLACEMENT_FLAGS(bo->placements[c++]) = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
355 if (!c)
356 PLACEMENT_FLAGS(bo->placements[c++]) = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
357 bo->placement.num_placement = c;
358 bo->placement.num_busy_placement = c;
359#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)
360 for (i = 0; i < c; ++i) {
361 bo->placements[i].fpfn = 0;
362 bo->placements[i].lpfn = 0;
363 }
364#endif
365}
366
367int vbox_bo_create(struct drm_device *dev, int size, int align,
368 uint32_t flags, struct vbox_bo **pvboxbo)
369{
370 struct vbox_private *vbox = dev->dev_private;
371 struct vbox_bo *vboxbo;
372 size_t acc_size;
373 int ret;
374
375 vboxbo = kzalloc(sizeof(struct vbox_bo), GFP_KERNEL);
376 if (!vboxbo)
377 return -ENOMEM;
378
379 ret = drm_gem_object_init(dev, &vboxbo->gem, size);
380 if (ret) {
381 kfree(vboxbo);
382 return ret;
383 }
384
385 vboxbo->bo.bdev = &vbox->ttm.bdev;
386#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)
387 vboxbo->bo.bdev->dev_mapping = dev->dev_mapping;
388#endif
389
390 vbox_ttm_placement(vboxbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
391
392 acc_size = ttm_bo_dma_acc_size(&vbox->ttm.bdev, size,
393 sizeof(struct vbox_bo));
394
395 ret = ttm_bo_init(&vbox->ttm.bdev, &vboxbo->bo, size,
396 ttm_bo_type_device, &vboxbo->placement,
397 align >> PAGE_SHIFT, false, NULL, acc_size,
398#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)
399 NULL,
400#endif
401 NULL, vbox_bo_ttm_destroy);
402 if (ret)
403 return ret;
404
405 *pvboxbo = vboxbo;
406 return 0;
407}
408
409static inline u64 vbox_bo_gpu_offset(struct vbox_bo *bo)
410{
411 return bo->bo.offset;
412}
413
414int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag, u64 *gpu_addr)
415{
416 int i, ret;
417
418 if (bo->pin_count) {
419 bo->pin_count++;
420 if (gpu_addr)
421 *gpu_addr = vbox_bo_gpu_offset(bo);
422 return 0;
423 }
424
425 vbox_ttm_placement(bo, pl_flag);
426 for (i = 0; i < bo->placement.num_placement; i++)
427 PLACEMENT_FLAGS(bo->placements[i]) |= TTM_PL_FLAG_NO_EVICT;
428 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
429 if (ret)
430 return ret;
431
432 bo->pin_count = 1;
433 if (gpu_addr)
434 *gpu_addr = vbox_bo_gpu_offset(bo);
435 return 0;
436}
437
438int vbox_bo_unpin(struct vbox_bo *bo)
439{
440 int i, ret;
441 if (!bo->pin_count) {
442 DRM_ERROR("unpin bad %p\n", bo);
443 return 0;
444 }
445 bo->pin_count--;
446 if (bo->pin_count)
447 return 0;
448
449 for (i = 0; i < bo->placement.num_placement ; i++)
450 PLACEMENT_FLAGS(bo->placements[i]) &= ~TTM_PL_FLAG_NO_EVICT;
451 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
452 if (ret)
453 return ret;
454
455 return 0;
456}
457
458/* Move a vbox-owned buffer object to system memory if no one else has it
459 * pinned. The caller must have pinned it previously, and this call will
460 * release the caller's pin. */
461int vbox_bo_push_sysram(struct vbox_bo *bo)
462{
463 int i, ret;
464 if (!bo->pin_count) {
465 DRM_ERROR("unpin bad %p\n", bo);
466 return 0;
467 }
468 bo->pin_count--;
469 if (bo->pin_count)
470 return 0;
471
472 if (bo->kmap.virtual)
473 ttm_bo_kunmap(&bo->kmap);
474
475 vbox_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
476 for (i = 0; i < bo->placement.num_placement ; i++)
477 PLACEMENT_FLAGS(bo->placements[i]) |= TTM_PL_FLAG_NO_EVICT;
478
479 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
480 if (ret) {
481 DRM_ERROR("pushing to VRAM failed\n");
482 return ret;
483 }
484 return 0;
485}
486
487int vbox_mmap(struct file *filp, struct vm_area_struct *vma)
488{
489 struct drm_file *file_priv;
490 struct vbox_private *vbox;
491
492 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
493 return -EINVAL;
494
495 file_priv = filp->private_data;
496 vbox = file_priv->minor->dev->dev_private;
497 return ttm_bo_mmap(filp, vma, &vbox->ttm.bdev);
498}
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette