1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2018 Noralf Trønnes
4 */
5
6 #include <linux/dma-buf.h>
7 #include <linux/export.h>
8 #include <linux/mutex.h>
9 #include <linux/shmem_fs.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
12
13 #include <drm/drm.h>
14 #include <drm/drm_device.h>
15 #include <drm/drm_drv.h>
16 #include <drm/drm_gem_shmem_helper.h>
17 #include <drm/drm_prime.h>
18 #include <drm/drm_print.h>
19
20 /**
21 * DOC: overview
22 *
23 * This library provides helpers for GEM objects backed by shmem buffers
24 * allocated using anonymous pageable memory.
25 */
26
27 static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
28 .free = drm_gem_shmem_free_object,
29 .print_info = drm_gem_shmem_print_info,
30 .pin = drm_gem_shmem_pin,
31 .unpin = drm_gem_shmem_unpin,
32 .get_sg_table = drm_gem_shmem_get_sg_table,
33 .vmap = drm_gem_shmem_vmap,
34 .vunmap = drm_gem_shmem_vunmap,
35 .mmap = drm_gem_shmem_mmap,
36 };
37
38 static struct drm_gem_shmem_object *
__drm_gem_shmem_create(struct drm_device * dev,size_t size,bool private)39 __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
40 {
41 struct drm_gem_shmem_object *shmem;
42 struct drm_gem_object *obj;
43 int ret = 0;
44
45 size = PAGE_ALIGN(size);
46
47 if (dev->driver->gem_create_object)
48 obj = dev->driver->gem_create_object(dev, size);
49 else
50 obj = kzalloc(sizeof(*shmem), GFP_KERNEL);
51 if (!obj)
52 return ERR_PTR(-ENOMEM);
53
54 if (!obj->funcs)
55 obj->funcs = &drm_gem_shmem_funcs;
56
57 if (private)
58 drm_gem_private_object_init(dev, obj, size);
59 else
60 ret = drm_gem_object_init(dev, obj, size);
61 if (ret)
62 goto err_free;
63
64 ret = drm_gem_create_mmap_offset(obj);
65 if (ret)
66 goto err_release;
67
68 shmem = to_drm_gem_shmem_obj(obj);
69 mutex_init(&shmem->pages_lock);
70 mutex_init(&shmem->vmap_lock);
71 INIT_LIST_HEAD(&shmem->madv_list);
72
73 if (!private) {
74 /*
75 * Our buffers are kept pinned, so allocating them
76 * from the MOVABLE zone is a really bad idea, and
77 * conflicts with CMA. See comments above new_inode()
78 * why this is required _and_ expected if you're
79 * going to pin these pages.
80 */
81 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
82 __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
83 }
84
85 return shmem;
86
87 err_release:
88 drm_gem_object_release(obj);
89 err_free:
90 kfree(obj);
91
92 return ERR_PTR(ret);
93 }
94 /**
95 * drm_gem_shmem_create - Allocate an object with the given size
96 * @dev: DRM device
97 * @size: Size of the object to allocate
98 *
99 * This function creates a shmem GEM object.
100 *
101 * Returns:
102 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
103 * error code on failure.
104 */
drm_gem_shmem_create(struct drm_device * dev,size_t size)105 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
106 {
107 return __drm_gem_shmem_create(dev, size, false);
108 }
109 EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
110
111 /**
112 * drm_gem_shmem_free_object - Free resources associated with a shmem GEM object
113 * @obj: GEM object to free
114 *
115 * This function cleans up the GEM object state and frees the memory used to
116 * store the object itself. It should be used to implement
117 * &drm_gem_object_funcs.free.
118 */
drm_gem_shmem_free_object(struct drm_gem_object * obj)119 void drm_gem_shmem_free_object(struct drm_gem_object *obj)
120 {
121 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
122
123 WARN_ON(shmem->vmap_use_count);
124
125 if (obj->import_attach) {
126 drm_prime_gem_destroy(obj, shmem->sgt);
127 } else {
128 if (shmem->sgt) {
129 dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
130 DMA_BIDIRECTIONAL, 0);
131 sg_free_table(shmem->sgt);
132 kfree(shmem->sgt);
133 }
134 if (shmem->pages)
135 drm_gem_shmem_put_pages(shmem);
136 }
137
138 WARN_ON(shmem->pages_use_count);
139
140 drm_gem_object_release(obj);
141 mutex_destroy(&shmem->pages_lock);
142 mutex_destroy(&shmem->vmap_lock);
143 kfree(shmem);
144 }
145 EXPORT_SYMBOL_GPL(drm_gem_shmem_free_object);
146
drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object * shmem)147 static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
148 {
149 struct drm_gem_object *obj = &shmem->base;
150 struct page **pages;
151
152 if (shmem->pages_use_count++ > 0)
153 return 0;
154
155 pages = drm_gem_get_pages(obj);
156 if (IS_ERR(pages)) {
157 DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
158 shmem->pages_use_count = 0;
159 return PTR_ERR(pages);
160 }
161
162 shmem->pages = pages;
163
164 return 0;
165 }
166
167 /*
168 * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
169 * @shmem: shmem GEM object
170 *
171 * This function makes sure that backing pages exists for the shmem GEM object
172 * and increases the use count.
173 *
174 * Returns:
175 * 0 on success or a negative error code on failure.
176 */
drm_gem_shmem_get_pages(struct drm_gem_shmem_object * shmem)177 int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
178 {
179 int ret;
180
181 WARN_ON(shmem->base.import_attach);
182
183 ret = mutex_lock_interruptible(&shmem->pages_lock);
184 if (ret)
185 return ret;
186 ret = drm_gem_shmem_get_pages_locked(shmem);
187 mutex_unlock(&shmem->pages_lock);
188
189 return ret;
190 }
191 EXPORT_SYMBOL(drm_gem_shmem_get_pages);
192
drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object * shmem)193 static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
194 {
195 struct drm_gem_object *obj = &shmem->base;
196
197 if (WARN_ON_ONCE(!shmem->pages_use_count))
198 return;
199
200 if (--shmem->pages_use_count > 0)
201 return;
202
203 drm_gem_put_pages(obj, shmem->pages,
204 shmem->pages_mark_dirty_on_put,
205 shmem->pages_mark_accessed_on_put);
206 shmem->pages = NULL;
207 }
208
209 /*
210 * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
211 * @shmem: shmem GEM object
212 *
213 * This function decreases the use count and puts the backing pages when use drops to zero.
214 */
drm_gem_shmem_put_pages(struct drm_gem_shmem_object * shmem)215 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
216 {
217 mutex_lock(&shmem->pages_lock);
218 drm_gem_shmem_put_pages_locked(shmem);
219 mutex_unlock(&shmem->pages_lock);
220 }
221 EXPORT_SYMBOL(drm_gem_shmem_put_pages);
222
223 /**
224 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
225 * @obj: GEM object
226 *
227 * This function makes sure the backing pages are pinned in memory while the
228 * buffer is exported. It should only be used to implement
229 * &drm_gem_object_funcs.pin.
230 *
231 * Returns:
232 * 0 on success or a negative error code on failure.
233 */
drm_gem_shmem_pin(struct drm_gem_object * obj)234 int drm_gem_shmem_pin(struct drm_gem_object *obj)
235 {
236 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
237
238 WARN_ON(shmem->base.import_attach);
239
240 return drm_gem_shmem_get_pages(shmem);
241 }
242 EXPORT_SYMBOL(drm_gem_shmem_pin);
243
244 /**
245 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
246 * @obj: GEM object
247 *
248 * This function removes the requirement that the backing pages are pinned in
249 * memory. It should only be used to implement &drm_gem_object_funcs.unpin.
250 */
drm_gem_shmem_unpin(struct drm_gem_object * obj)251 void drm_gem_shmem_unpin(struct drm_gem_object *obj)
252 {
253 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
254
255 WARN_ON(shmem->base.import_attach);
256
257 drm_gem_shmem_put_pages(shmem);
258 }
259 EXPORT_SYMBOL(drm_gem_shmem_unpin);
260
drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object * shmem)261 static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
262 {
263 struct drm_gem_object *obj = &shmem->base;
264 int ret;
265
266 if (shmem->vmap_use_count++ > 0)
267 return shmem->vaddr;
268
269 if (obj->import_attach) {
270 shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
271 } else {
272 pgprot_t prot = PAGE_KERNEL;
273
274 ret = drm_gem_shmem_get_pages(shmem);
275 if (ret)
276 goto err_zero_use;
277
278 if (!shmem->map_cached)
279 prot = pgprot_writecombine(prot);
280 shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
281 VM_MAP, prot);
282 }
283
284 if (!shmem->vaddr) {
285 DRM_DEBUG_KMS("Failed to vmap pages\n");
286 ret = -ENOMEM;
287 goto err_put_pages;
288 }
289
290 return shmem->vaddr;
291
292 err_put_pages:
293 if (!obj->import_attach)
294 drm_gem_shmem_put_pages(shmem);
295 err_zero_use:
296 shmem->vmap_use_count = 0;
297
298 return ERR_PTR(ret);
299 }
300
301 /*
302 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
303 * @shmem: shmem GEM object
304 *
305 * This function makes sure that a contiguous kernel virtual address mapping
306 * exists for the buffer backing the shmem GEM object.
307 *
308 * This function can be used to implement &drm_gem_object_funcs.vmap. But it can
309 * also be called by drivers directly, in which case it will hide the
310 * differences between dma-buf imported and natively allocated objects.
311 *
312 * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
313 *
314 * Returns:
315 * 0 on success or a negative error code on failure.
316 */
drm_gem_shmem_vmap(struct drm_gem_object * obj)317 void *drm_gem_shmem_vmap(struct drm_gem_object *obj)
318 {
319 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
320 void *vaddr;
321 int ret;
322
323 ret = mutex_lock_interruptible(&shmem->vmap_lock);
324 if (ret)
325 return ERR_PTR(ret);
326 vaddr = drm_gem_shmem_vmap_locked(shmem);
327 mutex_unlock(&shmem->vmap_lock);
328
329 return vaddr;
330 }
331 EXPORT_SYMBOL(drm_gem_shmem_vmap);
332
drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object * shmem)333 static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem)
334 {
335 struct drm_gem_object *obj = &shmem->base;
336
337 if (WARN_ON_ONCE(!shmem->vmap_use_count))
338 return;
339
340 if (--shmem->vmap_use_count > 0)
341 return;
342
343 if (obj->import_attach) {
344 dma_buf_vunmap(obj->import_attach->dmabuf, shmem->vaddr);
345 } else {
346 vunmap(shmem->vaddr);
347 drm_gem_shmem_put_pages(shmem);
348 }
349
350 shmem->vaddr = NULL;
351 }
352
353 /*
354 * drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object
355 * @shmem: shmem GEM object
356 *
357 * This function cleans up a kernel virtual address mapping acquired by
358 * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
359 * zero.
360 *
361 * This function can be used to implement &drm_gem_object_funcs.vmap. But it can
362 * also be called by drivers directly, in which case it will hide the
363 * differences between dma-buf imported and natively allocated objects.
364 */
drm_gem_shmem_vunmap(struct drm_gem_object * obj,void * vaddr)365 void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr)
366 {
367 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
368
369 mutex_lock(&shmem->vmap_lock);
370 drm_gem_shmem_vunmap_locked(shmem);
371 mutex_unlock(&shmem->vmap_lock);
372 }
373 EXPORT_SYMBOL(drm_gem_shmem_vunmap);
374
375 struct drm_gem_shmem_object *
drm_gem_shmem_create_with_handle(struct drm_file * file_priv,struct drm_device * dev,size_t size,uint32_t * handle)376 drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
377 struct drm_device *dev, size_t size,
378 uint32_t *handle)
379 {
380 struct drm_gem_shmem_object *shmem;
381 int ret;
382
383 shmem = drm_gem_shmem_create(dev, size);
384 if (IS_ERR(shmem))
385 return shmem;
386
387 /*
388 * Allocate an id of idr table where the obj is registered
389 * and handle has the id what user can see.
390 */
391 ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
392 /* drop reference from allocate - handle holds it now. */
393 drm_gem_object_put(&shmem->base);
394 if (ret)
395 return ERR_PTR(ret);
396
397 return shmem;
398 }
399 EXPORT_SYMBOL(drm_gem_shmem_create_with_handle);
400
401 /* Update madvise status, returns true if not purged, else
402 * false or -errno.
403 */
drm_gem_shmem_madvise(struct drm_gem_object * obj,int madv)404 int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv)
405 {
406 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
407
408 mutex_lock(&shmem->pages_lock);
409
410 if (shmem->madv >= 0)
411 shmem->madv = madv;
412
413 madv = shmem->madv;
414
415 mutex_unlock(&shmem->pages_lock);
416
417 return (madv >= 0);
418 }
419 EXPORT_SYMBOL(drm_gem_shmem_madvise);
420
drm_gem_shmem_purge_locked(struct drm_gem_object * obj)421 void drm_gem_shmem_purge_locked(struct drm_gem_object *obj)
422 {
423 struct drm_device *dev = obj->dev;
424 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
425
426 WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
427
428 dma_unmap_sgtable(obj->dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
429 sg_free_table(shmem->sgt);
430 kfree(shmem->sgt);
431 shmem->sgt = NULL;
432
433 drm_gem_shmem_put_pages_locked(shmem);
434
435 shmem->madv = -1;
436
437 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
438 drm_gem_free_mmap_offset(obj);
439
440 /* Our goal here is to return as much of the memory as
441 * is possible back to the system as we are called from OOM.
442 * To do this we must instruct the shmfs to drop all of its
443 * backing pages, *now*.
444 */
445 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
446
447 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
448 0, (loff_t)-1);
449 }
450 EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
451
drm_gem_shmem_purge(struct drm_gem_object * obj)452 bool drm_gem_shmem_purge(struct drm_gem_object *obj)
453 {
454 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
455
456 if (!mutex_trylock(&shmem->pages_lock))
457 return false;
458 drm_gem_shmem_purge_locked(obj);
459 mutex_unlock(&shmem->pages_lock);
460
461 return true;
462 }
463 EXPORT_SYMBOL(drm_gem_shmem_purge);
464
465 /**
466 * drm_gem_shmem_create_object_cached - Create a shmem buffer object with
467 * cached mappings
468 * @dev: DRM device
469 * @size: Size of the object to allocate
470 *
471 * By default, shmem buffer objects use writecombine mappings. This
472 * function implements struct drm_driver.gem_create_object for shmem
473 * buffer objects with cached mappings.
474 *
475 * Returns:
476 * A struct drm_gem_shmem_object * on success or NULL negative on failure.
477 */
478 struct drm_gem_object *
drm_gem_shmem_create_object_cached(struct drm_device * dev,size_t size)479 drm_gem_shmem_create_object_cached(struct drm_device *dev, size_t size)
480 {
481 struct drm_gem_shmem_object *shmem;
482
483 shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
484 if (!shmem)
485 return NULL;
486 shmem->map_cached = true;
487
488 return &shmem->base;
489 }
490 EXPORT_SYMBOL(drm_gem_shmem_create_object_cached);
491
492 /**
493 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
494 * @file: DRM file structure to create the dumb buffer for
495 * @dev: DRM device
496 * @args: IOCTL data
497 *
498 * This function computes the pitch of the dumb buffer and rounds it up to an
499 * integer number of bytes per pixel. Drivers for hardware that doesn't have
500 * any additional restrictions on the pitch can directly use this function as
501 * their &drm_driver.dumb_create callback.
502 *
503 * For hardware with additional restrictions, drivers can adjust the fields
504 * set up by userspace before calling into this function.
505 *
506 * Returns:
507 * 0 on success or a negative error code on failure.
508 */
drm_gem_shmem_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)509 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
510 struct drm_mode_create_dumb *args)
511 {
512 u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
513 struct drm_gem_shmem_object *shmem;
514
515 if (!args->pitch || !args->size) {
516 args->pitch = min_pitch;
517 args->size = args->pitch * args->height;
518 } else {
519 /* ensure sane minimum values */
520 if (args->pitch < min_pitch)
521 args->pitch = min_pitch;
522 if (args->size < args->pitch * args->height)
523 args->size = args->pitch * args->height;
524 }
525
526 shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
527
528 return PTR_ERR_OR_ZERO(shmem);
529 }
530 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
531
drm_gem_shmem_fault(struct vm_fault * vmf)532 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
533 {
534 struct vm_area_struct *vma = vmf->vma;
535 struct drm_gem_object *obj = vma->vm_private_data;
536 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
537 loff_t num_pages = obj->size >> PAGE_SHIFT;
538 vm_fault_t ret;
539 struct page *page;
540 pgoff_t page_offset;
541
542 /* We don't use vmf->pgoff since that has the fake offset */
543 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
544
545 mutex_lock(&shmem->pages_lock);
546
547 if (page_offset >= num_pages ||
548 WARN_ON_ONCE(!shmem->pages) ||
549 shmem->madv < 0) {
550 ret = VM_FAULT_SIGBUS;
551 } else {
552 page = shmem->pages[page_offset];
553
554 ret = vmf_insert_page(vma, vmf->address, page);
555 }
556
557 mutex_unlock(&shmem->pages_lock);
558
559 return ret;
560 }
561
drm_gem_shmem_vm_open(struct vm_area_struct * vma)562 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
563 {
564 struct drm_gem_object *obj = vma->vm_private_data;
565 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
566
567 WARN_ON(shmem->base.import_attach);
568
569 mutex_lock(&shmem->pages_lock);
570
571 /*
572 * We should have already pinned the pages when the buffer was first
573 * mmap'd, vm_open() just grabs an additional reference for the new
574 * mm the vma is getting copied into (ie. on fork()).
575 */
576 if (!WARN_ON_ONCE(!shmem->pages_use_count))
577 shmem->pages_use_count++;
578
579 mutex_unlock(&shmem->pages_lock);
580
581 drm_gem_vm_open(vma);
582 }
583
drm_gem_shmem_vm_close(struct vm_area_struct * vma)584 static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
585 {
586 struct drm_gem_object *obj = vma->vm_private_data;
587 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
588
589 drm_gem_shmem_put_pages(shmem);
590 drm_gem_vm_close(vma);
591 }
592
593 static const struct vm_operations_struct drm_gem_shmem_vm_ops = {
594 .fault = drm_gem_shmem_fault,
595 .open = drm_gem_shmem_vm_open,
596 .close = drm_gem_shmem_vm_close,
597 };
598
599 /**
600 * drm_gem_shmem_mmap - Memory-map a shmem GEM object
601 * @obj: gem object
602 * @vma: VMA for the area to be mapped
603 *
604 * This function implements an augmented version of the GEM DRM file mmap
605 * operation for shmem objects. Drivers which employ the shmem helpers should
606 * use this function as their &drm_gem_object_funcs.mmap handler.
607 *
608 * Returns:
609 * 0 on success or a negative error code on failure.
610 */
drm_gem_shmem_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)611 int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
612 {
613 struct drm_gem_shmem_object *shmem;
614 int ret;
615
616 if (obj->import_attach) {
617 /* Reset both vm_ops and vm_private_data, so we don't end up with
618 * vm_ops pointing to our implementation if the dma-buf backend
619 * doesn't set those fields.
620 */
621 vma->vm_private_data = NULL;
622 vma->vm_ops = NULL;
623
624 ret = dma_buf_mmap(obj->dma_buf, vma, 0);
625
626 /* Drop the reference drm_gem_mmap_obj() acquired.*/
627 if (!ret)
628 drm_gem_object_put(obj);
629
630 return ret;
631 }
632
633 shmem = to_drm_gem_shmem_obj(obj);
634
635 ret = drm_gem_shmem_get_pages(shmem);
636 if (ret)
637 return ret;
638
639 vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
640 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
641 if (!shmem->map_cached)
642 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
643 vma->vm_ops = &drm_gem_shmem_vm_ops;
644
645 return 0;
646 }
647 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
648
649 /**
650 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
651 * @p: DRM printer
652 * @indent: Tab indentation level
653 * @obj: GEM object
654 *
655 * This implements the &drm_gem_object_funcs.info callback.
656 */
drm_gem_shmem_print_info(struct drm_printer * p,unsigned int indent,const struct drm_gem_object * obj)657 void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
658 const struct drm_gem_object *obj)
659 {
660 const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
661
662 drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
663 drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
664 drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
665 }
666 EXPORT_SYMBOL(drm_gem_shmem_print_info);
667
668 /**
669 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
670 * pages for a shmem GEM object
671 * @obj: GEM object
672 *
673 * This function exports a scatter/gather table suitable for PRIME usage by
674 * calling the standard DMA mapping API. Drivers should not call this function
675 * directly, instead it should only be used as an implementation for
676 * &drm_gem_object_funcs.get_sg_table.
677 *
678 * Drivers who need to acquire an scatter/gather table for objects need to call
679 * drm_gem_shmem_get_pages_sgt() instead.
680 *
681 * Returns:
682 * A pointer to the scatter/gather table of pinned pages or NULL on failure.
683 */
drm_gem_shmem_get_sg_table(struct drm_gem_object * obj)684 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj)
685 {
686 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
687
688 WARN_ON(shmem->base.import_attach);
689
690 return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
691 }
692 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
693
694 /**
695 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
696 * scatter/gather table for a shmem GEM object.
697 * @obj: GEM object
698 *
699 * This function returns a scatter/gather table suitable for driver usage. If
700 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
701 * table created.
702 *
703 * This is the main function for drivers to get at backing storage, and it hides
704 * and difference between dma-buf imported and natively allocated objects.
705 * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
706 *
707 * Returns:
708 * A pointer to the scatter/gather table of pinned pages or errno on failure.
709 */
drm_gem_shmem_get_pages_sgt(struct drm_gem_object * obj)710 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj)
711 {
712 int ret;
713 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
714 struct sg_table *sgt;
715
716 if (shmem->sgt)
717 return shmem->sgt;
718
719 WARN_ON(obj->import_attach);
720
721 ret = drm_gem_shmem_get_pages(shmem);
722 if (ret)
723 return ERR_PTR(ret);
724
725 sgt = drm_gem_shmem_get_sg_table(&shmem->base);
726 if (IS_ERR(sgt)) {
727 ret = PTR_ERR(sgt);
728 goto err_put_pages;
729 }
730 /* Map the pages for use by the h/w. */
731 ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
732 if (ret)
733 goto err_free_sgt;
734
735 shmem->sgt = sgt;
736
737 return sgt;
738
739 err_free_sgt:
740 sg_free_table(sgt);
741 kfree(sgt);
742 err_put_pages:
743 drm_gem_shmem_put_pages(shmem);
744 return ERR_PTR(ret);
745 }
746 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
747
748 /**
749 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
750 * another driver's scatter/gather table of pinned pages
751 * @dev: Device to import into
752 * @attach: DMA-BUF attachment
753 * @sgt: Scatter/gather table of pinned pages
754 *
755 * This function imports a scatter/gather table exported via DMA-BUF by
756 * another driver. Drivers that use the shmem helpers should set this as their
757 * &drm_driver.gem_prime_import_sg_table callback.
758 *
759 * Returns:
760 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
761 * error code on failure.
762 */
763 struct drm_gem_object *
drm_gem_shmem_prime_import_sg_table(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * sgt)764 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
765 struct dma_buf_attachment *attach,
766 struct sg_table *sgt)
767 {
768 size_t size = PAGE_ALIGN(attach->dmabuf->size);
769 struct drm_gem_shmem_object *shmem;
770
771 shmem = __drm_gem_shmem_create(dev, size, true);
772 if (IS_ERR(shmem))
773 return ERR_CAST(shmem);
774
775 shmem->sgt = sgt;
776
777 DRM_DEBUG_PRIME("size = %zu\n", size);
778
779 return &shmem->base;
780 }
781 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
782