1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2018 Noralf Trønnes
4 */
5
6 #include <linux/dma-buf.h>
7 #include <linux/export.h>
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13 #include <linux/module.h>
14
15 #ifdef CONFIG_X86
16 #include <asm/set_memory.h>
17 #endif
18
19 #include <drm/drm.h>
20 #include <drm/drm_device.h>
21 #include <drm/drm_drv.h>
22 #include <drm/drm_gem_shmem_helper.h>
23 #include <drm/drm_prime.h>
24 #include <drm/drm_print.h>
25
26 MODULE_IMPORT_NS(DMA_BUF);
27
28 /**
29 * DOC: overview
30 *
31 * This library provides helpers for GEM objects backed by shmem buffers
32 * allocated using anonymous pageable memory.
33 *
34 * Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
35 * For GEM callback helpers in struct &drm_gem_object functions, see likewise
36 * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
37 * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
38 */
39
40 static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
41 .free = drm_gem_shmem_object_free,
42 .print_info = drm_gem_shmem_object_print_info,
43 .pin = drm_gem_shmem_object_pin,
44 .unpin = drm_gem_shmem_object_unpin,
45 .get_sg_table = drm_gem_shmem_object_get_sg_table,
46 .vmap = drm_gem_shmem_object_vmap,
47 .vunmap = drm_gem_shmem_object_vunmap,
48 .mmap = drm_gem_shmem_object_mmap,
49 .vm_ops = &drm_gem_shmem_vm_ops,
50 };
51
52 static struct drm_gem_shmem_object *
__drm_gem_shmem_create(struct drm_device * dev,size_t size,bool private)53 __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
54 {
55 struct drm_gem_shmem_object *shmem;
56 struct drm_gem_object *obj;
57 int ret = 0;
58
59 size = PAGE_ALIGN(size);
60
61 if (dev->driver->gem_create_object) {
62 obj = dev->driver->gem_create_object(dev, size);
63 if (IS_ERR(obj))
64 return ERR_CAST(obj);
65 shmem = to_drm_gem_shmem_obj(obj);
66 } else {
67 shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
68 if (!shmem)
69 return ERR_PTR(-ENOMEM);
70 obj = &shmem->base;
71 }
72
73 if (!obj->funcs)
74 obj->funcs = &drm_gem_shmem_funcs;
75
76 if (private) {
77 drm_gem_private_object_init(dev, obj, size);
78 shmem->map_wc = false; /* dma-buf mappings use always writecombine */
79 } else {
80 ret = drm_gem_object_init(dev, obj, size);
81 }
82 if (ret) {
83 drm_gem_private_object_fini(obj);
84 goto err_free;
85 }
86
87 ret = drm_gem_create_mmap_offset(obj);
88 if (ret)
89 goto err_release;
90
91 mutex_init(&shmem->pages_lock);
92 mutex_init(&shmem->vmap_lock);
93 INIT_LIST_HEAD(&shmem->madv_list);
94
95 if (!private) {
96 /*
97 * Our buffers are kept pinned, so allocating them
98 * from the MOVABLE zone is a really bad idea, and
99 * conflicts with CMA. See comments above new_inode()
100 * why this is required _and_ expected if you're
101 * going to pin these pages.
102 */
103 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
104 __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
105 }
106
107 return shmem;
108
109 err_release:
110 drm_gem_object_release(obj);
111 err_free:
112 kfree(obj);
113
114 return ERR_PTR(ret);
115 }
116 /**
117 * drm_gem_shmem_create - Allocate an object with the given size
118 * @dev: DRM device
119 * @size: Size of the object to allocate
120 *
121 * This function creates a shmem GEM object.
122 *
123 * Returns:
124 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
125 * error code on failure.
126 */
drm_gem_shmem_create(struct drm_device * dev,size_t size)127 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
128 {
129 return __drm_gem_shmem_create(dev, size, false);
130 }
131 EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
132
133 /**
134 * drm_gem_shmem_free - Free resources associated with a shmem GEM object
135 * @shmem: shmem GEM object to free
136 *
137 * This function cleans up the GEM object state and frees the memory used to
138 * store the object itself.
139 */
drm_gem_shmem_free(struct drm_gem_shmem_object * shmem)140 void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
141 {
142 struct drm_gem_object *obj = &shmem->base;
143
144 WARN_ON(shmem->vmap_use_count);
145
146 if (obj->import_attach) {
147 drm_prime_gem_destroy(obj, shmem->sgt);
148 } else {
149 if (shmem->sgt) {
150 dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
151 DMA_BIDIRECTIONAL, 0);
152 sg_free_table(shmem->sgt);
153 kfree(shmem->sgt);
154 }
155 if (shmem->pages)
156 drm_gem_shmem_put_pages(shmem);
157 }
158
159 WARN_ON(shmem->pages_use_count);
160
161 drm_gem_object_release(obj);
162 mutex_destroy(&shmem->pages_lock);
163 mutex_destroy(&shmem->vmap_lock);
164 kfree(shmem);
165 }
166 EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
167
drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object * shmem)168 static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
169 {
170 struct drm_gem_object *obj = &shmem->base;
171 struct page **pages;
172
173 if (shmem->pages_use_count++ > 0)
174 return 0;
175
176 pages = drm_gem_get_pages(obj);
177 if (IS_ERR(pages)) {
178 DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
179 shmem->pages_use_count = 0;
180 return PTR_ERR(pages);
181 }
182
183 /*
184 * TODO: Allocating WC pages which are correctly flushed is only
185 * supported on x86. Ideal solution would be a GFP_WC flag, which also
186 * ttm_pool.c could use.
187 */
188 #ifdef CONFIG_X86
189 if (shmem->map_wc)
190 set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
191 #endif
192
193 shmem->pages = pages;
194
195 return 0;
196 }
197
198 /*
199 * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
200 * @shmem: shmem GEM object
201 *
202 * This function makes sure that backing pages exists for the shmem GEM object
203 * and increases the use count.
204 *
205 * Returns:
206 * 0 on success or a negative error code on failure.
207 */
drm_gem_shmem_get_pages(struct drm_gem_shmem_object * shmem)208 int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
209 {
210 int ret;
211
212 WARN_ON(shmem->base.import_attach);
213
214 ret = mutex_lock_interruptible(&shmem->pages_lock);
215 if (ret)
216 return ret;
217 ret = drm_gem_shmem_get_pages_locked(shmem);
218 mutex_unlock(&shmem->pages_lock);
219
220 return ret;
221 }
222 EXPORT_SYMBOL(drm_gem_shmem_get_pages);
223
drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object * shmem)224 static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
225 {
226 struct drm_gem_object *obj = &shmem->base;
227
228 if (WARN_ON_ONCE(!shmem->pages_use_count))
229 return;
230
231 if (--shmem->pages_use_count > 0)
232 return;
233
234 #ifdef CONFIG_X86
235 if (shmem->map_wc)
236 set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
237 #endif
238
239 drm_gem_put_pages(obj, shmem->pages,
240 shmem->pages_mark_dirty_on_put,
241 shmem->pages_mark_accessed_on_put);
242 shmem->pages = NULL;
243 }
244
245 /*
246 * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
247 * @shmem: shmem GEM object
248 *
249 * This function decreases the use count and puts the backing pages when use drops to zero.
250 */
drm_gem_shmem_put_pages(struct drm_gem_shmem_object * shmem)251 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
252 {
253 mutex_lock(&shmem->pages_lock);
254 drm_gem_shmem_put_pages_locked(shmem);
255 mutex_unlock(&shmem->pages_lock);
256 }
257 EXPORT_SYMBOL(drm_gem_shmem_put_pages);
258
259 /**
260 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
261 * @shmem: shmem GEM object
262 *
263 * This function makes sure the backing pages are pinned in memory while the
264 * buffer is exported.
265 *
266 * Returns:
267 * 0 on success or a negative error code on failure.
268 */
drm_gem_shmem_pin(struct drm_gem_shmem_object * shmem)269 int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
270 {
271 WARN_ON(shmem->base.import_attach);
272
273 return drm_gem_shmem_get_pages(shmem);
274 }
275 EXPORT_SYMBOL(drm_gem_shmem_pin);
276
277 /**
278 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
279 * @shmem: shmem GEM object
280 *
281 * This function removes the requirement that the backing pages are pinned in
282 * memory.
283 */
drm_gem_shmem_unpin(struct drm_gem_shmem_object * shmem)284 void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
285 {
286 WARN_ON(shmem->base.import_attach);
287
288 drm_gem_shmem_put_pages(shmem);
289 }
290 EXPORT_SYMBOL(drm_gem_shmem_unpin);
291
drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object * shmem,struct iosys_map * map)292 static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
293 struct iosys_map *map)
294 {
295 struct drm_gem_object *obj = &shmem->base;
296 int ret = 0;
297
298 if (shmem->vmap_use_count++ > 0) {
299 iosys_map_set_vaddr(map, shmem->vaddr);
300 return 0;
301 }
302
303 if (obj->import_attach) {
304 ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
305 if (!ret) {
306 if (WARN_ON(map->is_iomem)) {
307 dma_buf_vunmap(obj->import_attach->dmabuf, map);
308 ret = -EIO;
309 goto err_put_pages;
310 }
311 shmem->vaddr = map->vaddr;
312 }
313 } else {
314 pgprot_t prot = PAGE_KERNEL;
315
316 ret = drm_gem_shmem_get_pages(shmem);
317 if (ret)
318 goto err_zero_use;
319
320 if (shmem->map_wc)
321 prot = pgprot_writecombine(prot);
322 shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
323 VM_MAP, prot);
324 if (!shmem->vaddr)
325 ret = -ENOMEM;
326 else
327 iosys_map_set_vaddr(map, shmem->vaddr);
328 }
329
330 if (ret) {
331 DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret);
332 goto err_put_pages;
333 }
334
335 return 0;
336
337 err_put_pages:
338 if (!obj->import_attach)
339 drm_gem_shmem_put_pages(shmem);
340 err_zero_use:
341 shmem->vmap_use_count = 0;
342
343 return ret;
344 }
345
346 /*
347 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
348 * @shmem: shmem GEM object
349 * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
350 * store.
351 *
352 * This function makes sure that a contiguous kernel virtual address mapping
353 * exists for the buffer backing the shmem GEM object. It hides the differences
354 * between dma-buf imported and natively allocated objects.
355 *
356 * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
357 *
358 * Returns:
359 * 0 on success or a negative error code on failure.
360 */
drm_gem_shmem_vmap(struct drm_gem_shmem_object * shmem,struct iosys_map * map)361 int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
362 struct iosys_map *map)
363 {
364 int ret;
365
366 ret = mutex_lock_interruptible(&shmem->vmap_lock);
367 if (ret)
368 return ret;
369 ret = drm_gem_shmem_vmap_locked(shmem, map);
370 mutex_unlock(&shmem->vmap_lock);
371
372 return ret;
373 }
374 EXPORT_SYMBOL(drm_gem_shmem_vmap);
375
drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object * shmem,struct iosys_map * map)376 static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
377 struct iosys_map *map)
378 {
379 struct drm_gem_object *obj = &shmem->base;
380
381 if (WARN_ON_ONCE(!shmem->vmap_use_count))
382 return;
383
384 if (--shmem->vmap_use_count > 0)
385 return;
386
387 if (obj->import_attach) {
388 dma_buf_vunmap(obj->import_attach->dmabuf, map);
389 } else {
390 vunmap(shmem->vaddr);
391 drm_gem_shmem_put_pages(shmem);
392 }
393
394 shmem->vaddr = NULL;
395 }
396
397 /*
398 * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
399 * @shmem: shmem GEM object
400 * @map: Kernel virtual address where the SHMEM GEM object was mapped
401 *
402 * This function cleans up a kernel virtual address mapping acquired by
403 * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
404 * zero.
405 *
406 * This function hides the differences between dma-buf imported and natively
407 * allocated objects.
408 */
drm_gem_shmem_vunmap(struct drm_gem_shmem_object * shmem,struct iosys_map * map)409 void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
410 struct iosys_map *map)
411 {
412 mutex_lock(&shmem->vmap_lock);
413 drm_gem_shmem_vunmap_locked(shmem, map);
414 mutex_unlock(&shmem->vmap_lock);
415 }
416 EXPORT_SYMBOL(drm_gem_shmem_vunmap);
417
418 static int
drm_gem_shmem_create_with_handle(struct drm_file * file_priv,struct drm_device * dev,size_t size,uint32_t * handle)419 drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
420 struct drm_device *dev, size_t size,
421 uint32_t *handle)
422 {
423 struct drm_gem_shmem_object *shmem;
424 int ret;
425
426 shmem = drm_gem_shmem_create(dev, size);
427 if (IS_ERR(shmem))
428 return PTR_ERR(shmem);
429
430 /*
431 * Allocate an id of idr table where the obj is registered
432 * and handle has the id what user can see.
433 */
434 ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
435 /* drop reference from allocate - handle holds it now. */
436 drm_gem_object_put(&shmem->base);
437
438 return ret;
439 }
440
441 /* Update madvise status, returns true if not purged, else
442 * false or -errno.
443 */
drm_gem_shmem_madvise(struct drm_gem_shmem_object * shmem,int madv)444 int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
445 {
446 mutex_lock(&shmem->pages_lock);
447
448 if (shmem->madv >= 0)
449 shmem->madv = madv;
450
451 madv = shmem->madv;
452
453 mutex_unlock(&shmem->pages_lock);
454
455 return (madv >= 0);
456 }
457 EXPORT_SYMBOL(drm_gem_shmem_madvise);
458
drm_gem_shmem_purge_locked(struct drm_gem_shmem_object * shmem)459 void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
460 {
461 struct drm_gem_object *obj = &shmem->base;
462 struct drm_device *dev = obj->dev;
463
464 WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
465
466 dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
467 sg_free_table(shmem->sgt);
468 kfree(shmem->sgt);
469 shmem->sgt = NULL;
470
471 drm_gem_shmem_put_pages_locked(shmem);
472
473 shmem->madv = -1;
474
475 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
476 drm_gem_free_mmap_offset(obj);
477
478 /* Our goal here is to return as much of the memory as
479 * is possible back to the system as we are called from OOM.
480 * To do this we must instruct the shmfs to drop all of its
481 * backing pages, *now*.
482 */
483 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
484
485 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
486 }
487 EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
488
drm_gem_shmem_purge(struct drm_gem_shmem_object * shmem)489 bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
490 {
491 if (!mutex_trylock(&shmem->pages_lock))
492 return false;
493 drm_gem_shmem_purge_locked(shmem);
494 mutex_unlock(&shmem->pages_lock);
495
496 return true;
497 }
498 EXPORT_SYMBOL(drm_gem_shmem_purge);
499
500 /**
501 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
502 * @file: DRM file structure to create the dumb buffer for
503 * @dev: DRM device
504 * @args: IOCTL data
505 *
506 * This function computes the pitch of the dumb buffer and rounds it up to an
507 * integer number of bytes per pixel. Drivers for hardware that doesn't have
508 * any additional restrictions on the pitch can directly use this function as
509 * their &drm_driver.dumb_create callback.
510 *
511 * For hardware with additional restrictions, drivers can adjust the fields
512 * set up by userspace before calling into this function.
513 *
514 * Returns:
515 * 0 on success or a negative error code on failure.
516 */
drm_gem_shmem_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)517 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
518 struct drm_mode_create_dumb *args)
519 {
520 u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
521
522 if (!args->pitch || !args->size) {
523 args->pitch = min_pitch;
524 args->size = PAGE_ALIGN(args->pitch * args->height);
525 } else {
526 /* ensure sane minimum values */
527 if (args->pitch < min_pitch)
528 args->pitch = min_pitch;
529 if (args->size < args->pitch * args->height)
530 args->size = PAGE_ALIGN(args->pitch * args->height);
531 }
532
533 return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
534 }
535 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
536
drm_gem_shmem_fault(struct vm_fault * vmf)537 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
538 {
539 struct vm_area_struct *vma = vmf->vma;
540 struct drm_gem_object *obj = vma->vm_private_data;
541 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
542 loff_t num_pages = obj->size >> PAGE_SHIFT;
543 vm_fault_t ret;
544 struct page *page;
545 pgoff_t page_offset;
546
547 /* We don't use vmf->pgoff since that has the fake offset */
548 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
549
550 mutex_lock(&shmem->pages_lock);
551
552 if (page_offset >= num_pages ||
553 WARN_ON_ONCE(!shmem->pages) ||
554 shmem->madv < 0) {
555 ret = VM_FAULT_SIGBUS;
556 } else {
557 page = shmem->pages[page_offset];
558
559 ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
560 }
561
562 mutex_unlock(&shmem->pages_lock);
563
564 return ret;
565 }
566
drm_gem_shmem_vm_open(struct vm_area_struct * vma)567 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
568 {
569 struct drm_gem_object *obj = vma->vm_private_data;
570 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
571
572 WARN_ON(shmem->base.import_attach);
573
574 mutex_lock(&shmem->pages_lock);
575
576 /*
577 * We should have already pinned the pages when the buffer was first
578 * mmap'd, vm_open() just grabs an additional reference for the new
579 * mm the vma is getting copied into (ie. on fork()).
580 */
581 if (!WARN_ON_ONCE(!shmem->pages_use_count))
582 shmem->pages_use_count++;
583
584 mutex_unlock(&shmem->pages_lock);
585
586 drm_gem_vm_open(vma);
587 }
588
drm_gem_shmem_vm_close(struct vm_area_struct * vma)589 static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
590 {
591 struct drm_gem_object *obj = vma->vm_private_data;
592 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
593
594 drm_gem_shmem_put_pages(shmem);
595 drm_gem_vm_close(vma);
596 }
597
598 const struct vm_operations_struct drm_gem_shmem_vm_ops = {
599 .fault = drm_gem_shmem_fault,
600 .open = drm_gem_shmem_vm_open,
601 .close = drm_gem_shmem_vm_close,
602 };
603 EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
604
605 /**
606 * drm_gem_shmem_mmap - Memory-map a shmem GEM object
607 * @shmem: shmem GEM object
608 * @vma: VMA for the area to be mapped
609 *
610 * This function implements an augmented version of the GEM DRM file mmap
611 * operation for shmem objects.
612 *
613 * Returns:
614 * 0 on success or a negative error code on failure.
615 */
drm_gem_shmem_mmap(struct drm_gem_shmem_object * shmem,struct vm_area_struct * vma)616 int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
617 {
618 struct drm_gem_object *obj = &shmem->base;
619 int ret;
620
621 if (obj->import_attach) {
622 /* Drop the reference drm_gem_mmap_obj() acquired.*/
623 drm_gem_object_put(obj);
624 vma->vm_private_data = NULL;
625
626 return dma_buf_mmap(obj->dma_buf, vma, 0);
627 }
628
629 ret = drm_gem_shmem_get_pages(shmem);
630 if (ret)
631 return ret;
632
633 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
634 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
635 if (shmem->map_wc)
636 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
637
638 return 0;
639 }
640 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
641
642 /**
643 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
644 * @shmem: shmem GEM object
645 * @p: DRM printer
646 * @indent: Tab indentation level
647 */
drm_gem_shmem_print_info(const struct drm_gem_shmem_object * shmem,struct drm_printer * p,unsigned int indent)648 void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
649 struct drm_printer *p, unsigned int indent)
650 {
651 drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
652 drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
653 drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
654 }
655 EXPORT_SYMBOL(drm_gem_shmem_print_info);
656
657 /**
658 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
659 * pages for a shmem GEM object
660 * @shmem: shmem GEM object
661 *
662 * This function exports a scatter/gather table suitable for PRIME usage by
663 * calling the standard DMA mapping API.
664 *
665 * Drivers who need to acquire an scatter/gather table for objects need to call
666 * drm_gem_shmem_get_pages_sgt() instead.
667 *
668 * Returns:
669 * A pointer to the scatter/gather table of pinned pages or error pointer on failure.
670 */
drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object * shmem)671 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
672 {
673 struct drm_gem_object *obj = &shmem->base;
674
675 WARN_ON(shmem->base.import_attach);
676
677 return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
678 }
679 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
680
drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object * shmem)681 static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object *shmem)
682 {
683 struct drm_gem_object *obj = &shmem->base;
684 int ret;
685 struct sg_table *sgt;
686
687 if (shmem->sgt)
688 return shmem->sgt;
689
690 WARN_ON(obj->import_attach);
691
692 ret = drm_gem_shmem_get_pages_locked(shmem);
693 if (ret)
694 return ERR_PTR(ret);
695
696 sgt = drm_gem_shmem_get_sg_table(shmem);
697 if (IS_ERR(sgt)) {
698 ret = PTR_ERR(sgt);
699 goto err_put_pages;
700 }
701 /* Map the pages for use by the h/w. */
702 ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
703 if (ret)
704 goto err_free_sgt;
705
706 shmem->sgt = sgt;
707
708 return sgt;
709
710 err_free_sgt:
711 sg_free_table(sgt);
712 kfree(sgt);
713 err_put_pages:
714 drm_gem_shmem_put_pages_locked(shmem);
715 return ERR_PTR(ret);
716 }
717
718 /**
719 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
720 * scatter/gather table for a shmem GEM object.
721 * @shmem: shmem GEM object
722 *
723 * This function returns a scatter/gather table suitable for driver usage. If
724 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
725 * table created.
726 *
727 * This is the main function for drivers to get at backing storage, and it hides
728 * and difference between dma-buf imported and natively allocated objects.
729 * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
730 *
731 * Returns:
732 * A pointer to the scatter/gather table of pinned pages or errno on failure.
733 */
drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object * shmem)734 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
735 {
736 int ret;
737 struct sg_table *sgt;
738
739 ret = mutex_lock_interruptible(&shmem->pages_lock);
740 if (ret)
741 return ERR_PTR(ret);
742 sgt = drm_gem_shmem_get_pages_sgt_locked(shmem);
743 mutex_unlock(&shmem->pages_lock);
744
745 return sgt;
746 }
747 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
748
749 /**
750 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
751 * another driver's scatter/gather table of pinned pages
752 * @dev: Device to import into
753 * @attach: DMA-BUF attachment
754 * @sgt: Scatter/gather table of pinned pages
755 *
756 * This function imports a scatter/gather table exported via DMA-BUF by
757 * another driver. Drivers that use the shmem helpers should set this as their
758 * &drm_driver.gem_prime_import_sg_table callback.
759 *
760 * Returns:
761 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
762 * error code on failure.
763 */
764 struct drm_gem_object *
drm_gem_shmem_prime_import_sg_table(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * sgt)765 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
766 struct dma_buf_attachment *attach,
767 struct sg_table *sgt)
768 {
769 size_t size = PAGE_ALIGN(attach->dmabuf->size);
770 struct drm_gem_shmem_object *shmem;
771
772 shmem = __drm_gem_shmem_create(dev, size, true);
773 if (IS_ERR(shmem))
774 return ERR_CAST(shmem);
775
776 shmem->sgt = sgt;
777
778 drm_dbg_prime(dev, "size = %zu\n", size);
779
780 return &shmem->base;
781 }
782 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
783
784 MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
785 MODULE_IMPORT_NS(DMA_BUF);
786 MODULE_LICENSE("GPL v2");
787