• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2018 Noralf Trønnes
4  */
5 
6 #include <linux/dma-buf.h>
7 #include <linux/export.h>
8 #include <linux/mutex.h>
9 #include <linux/shmem_fs.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
12 
13 #include <drm/drm.h>
14 #include <drm/drm_device.h>
15 #include <drm/drm_drv.h>
16 #include <drm/drm_gem_shmem_helper.h>
17 #include <drm/drm_prime.h>
18 #include <drm/drm_print.h>
19 
20 /**
21  * DOC: overview
22  *
23  * This library provides helpers for GEM objects backed by shmem buffers
24  * allocated using anonymous pageable memory.
25  *
26  * Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
27  * For GEM callback helpers in struct &drm_gem_object functions, see likewise
28  * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
29  * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
30  */
31 
32 static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
33 	.free = drm_gem_shmem_object_free,
34 	.print_info = drm_gem_shmem_object_print_info,
35 	.pin = drm_gem_shmem_object_pin,
36 	.unpin = drm_gem_shmem_object_unpin,
37 	.get_sg_table = drm_gem_shmem_object_get_sg_table,
38 	.vmap = drm_gem_shmem_object_vmap,
39 	.vunmap = drm_gem_shmem_object_vunmap,
40 	.mmap = drm_gem_shmem_object_mmap,
41 };
42 
43 static struct drm_gem_shmem_object *
__drm_gem_shmem_create(struct drm_device * dev,size_t size,bool private)44 __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
45 {
46 	struct drm_gem_shmem_object *shmem;
47 	struct drm_gem_object *obj;
48 	int ret = 0;
49 
50 	size = PAGE_ALIGN(size);
51 
52 	if (dev->driver->gem_create_object)
53 		obj = dev->driver->gem_create_object(dev, size);
54 	else
55 		obj = kzalloc(sizeof(*shmem), GFP_KERNEL);
56 	if (!obj)
57 		return ERR_PTR(-ENOMEM);
58 
59 	shmem = to_drm_gem_shmem_obj(obj);
60 
61 	if (!obj->funcs)
62 		obj->funcs = &drm_gem_shmem_funcs;
63 
64 	if (private) {
65 		drm_gem_private_object_init(dev, obj, size);
66 		shmem->map_wc = false; /* dma-buf mappings use always writecombine */
67 	} else {
68 		ret = drm_gem_object_init(dev, obj, size);
69 	}
70 	if (ret)
71 		goto err_free;
72 
73 	ret = drm_gem_create_mmap_offset(obj);
74 	if (ret)
75 		goto err_release;
76 
77 	mutex_init(&shmem->pages_lock);
78 	mutex_init(&shmem->vmap_lock);
79 	INIT_LIST_HEAD(&shmem->madv_list);
80 
81 	if (!private) {
82 		/*
83 		 * Our buffers are kept pinned, so allocating them
84 		 * from the MOVABLE zone is a really bad idea, and
85 		 * conflicts with CMA. See comments above new_inode()
86 		 * why this is required _and_ expected if you're
87 		 * going to pin these pages.
88 		 */
89 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
90 				     __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
91 	}
92 
93 	return shmem;
94 
95 err_release:
96 	drm_gem_object_release(obj);
97 err_free:
98 	kfree(obj);
99 
100 	return ERR_PTR(ret);
101 }
102 /**
103  * drm_gem_shmem_create - Allocate an object with the given size
104  * @dev: DRM device
105  * @size: Size of the object to allocate
106  *
107  * This function creates a shmem GEM object.
108  *
109  * Returns:
110  * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
111  * error code on failure.
112  */
drm_gem_shmem_create(struct drm_device * dev,size_t size)113 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
114 {
115 	return __drm_gem_shmem_create(dev, size, false);
116 }
117 EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
118 
119 /**
120  * drm_gem_shmem_free - Free resources associated with a shmem GEM object
121  * @shmem: shmem GEM object to free
122  *
123  * This function cleans up the GEM object state and frees the memory used to
124  * store the object itself.
125  */
drm_gem_shmem_free(struct drm_gem_shmem_object * shmem)126 void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
127 {
128 	struct drm_gem_object *obj = &shmem->base;
129 
130 	WARN_ON(shmem->vmap_use_count);
131 
132 	if (obj->import_attach) {
133 		drm_prime_gem_destroy(obj, shmem->sgt);
134 	} else {
135 		if (shmem->sgt) {
136 			dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
137 					  DMA_BIDIRECTIONAL, 0);
138 			sg_free_table(shmem->sgt);
139 			kfree(shmem->sgt);
140 		}
141 		if (shmem->pages)
142 			drm_gem_shmem_put_pages(shmem);
143 	}
144 
145 	WARN_ON(shmem->pages_use_count);
146 
147 	drm_gem_object_release(obj);
148 	mutex_destroy(&shmem->pages_lock);
149 	mutex_destroy(&shmem->vmap_lock);
150 	kfree(shmem);
151 }
152 EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
153 
drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object * shmem)154 static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
155 {
156 	struct drm_gem_object *obj = &shmem->base;
157 	struct page **pages;
158 
159 	if (shmem->pages_use_count++ > 0)
160 		return 0;
161 
162 	pages = drm_gem_get_pages(obj);
163 	if (IS_ERR(pages)) {
164 		DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
165 		shmem->pages_use_count = 0;
166 		return PTR_ERR(pages);
167 	}
168 
169 	shmem->pages = pages;
170 
171 	return 0;
172 }
173 
174 /*
175  * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
176  * @shmem: shmem GEM object
177  *
178  * This function makes sure that backing pages exists for the shmem GEM object
179  * and increases the use count.
180  *
181  * Returns:
182  * 0 on success or a negative error code on failure.
183  */
drm_gem_shmem_get_pages(struct drm_gem_shmem_object * shmem)184 int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
185 {
186 	int ret;
187 
188 	WARN_ON(shmem->base.import_attach);
189 
190 	ret = mutex_lock_interruptible(&shmem->pages_lock);
191 	if (ret)
192 		return ret;
193 	ret = drm_gem_shmem_get_pages_locked(shmem);
194 	mutex_unlock(&shmem->pages_lock);
195 
196 	return ret;
197 }
198 EXPORT_SYMBOL(drm_gem_shmem_get_pages);
199 
drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object * shmem)200 static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
201 {
202 	struct drm_gem_object *obj = &shmem->base;
203 
204 	if (WARN_ON_ONCE(!shmem->pages_use_count))
205 		return;
206 
207 	if (--shmem->pages_use_count > 0)
208 		return;
209 
210 	drm_gem_put_pages(obj, shmem->pages,
211 			  shmem->pages_mark_dirty_on_put,
212 			  shmem->pages_mark_accessed_on_put);
213 	shmem->pages = NULL;
214 }
215 
216 /*
217  * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
218  * @shmem: shmem GEM object
219  *
220  * This function decreases the use count and puts the backing pages when use drops to zero.
221  */
drm_gem_shmem_put_pages(struct drm_gem_shmem_object * shmem)222 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
223 {
224 	mutex_lock(&shmem->pages_lock);
225 	drm_gem_shmem_put_pages_locked(shmem);
226 	mutex_unlock(&shmem->pages_lock);
227 }
228 EXPORT_SYMBOL(drm_gem_shmem_put_pages);
229 
230 /**
231  * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
232  * @shmem: shmem GEM object
233  *
234  * This function makes sure the backing pages are pinned in memory while the
235  * buffer is exported.
236  *
237  * Returns:
238  * 0 on success or a negative error code on failure.
239  */
drm_gem_shmem_pin(struct drm_gem_shmem_object * shmem)240 int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
241 {
242 	WARN_ON(shmem->base.import_attach);
243 
244 	return drm_gem_shmem_get_pages(shmem);
245 }
246 EXPORT_SYMBOL(drm_gem_shmem_pin);
247 
248 /**
249  * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
250  * @shmem: shmem GEM object
251  *
252  * This function removes the requirement that the backing pages are pinned in
253  * memory.
254  */
drm_gem_shmem_unpin(struct drm_gem_shmem_object * shmem)255 void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
256 {
257 	WARN_ON(shmem->base.import_attach);
258 
259 	drm_gem_shmem_put_pages(shmem);
260 }
261 EXPORT_SYMBOL(drm_gem_shmem_unpin);
262 
drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object * shmem,struct dma_buf_map * map)263 static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map)
264 {
265 	struct drm_gem_object *obj = &shmem->base;
266 	int ret = 0;
267 
268 	if (shmem->vmap_use_count++ > 0) {
269 		dma_buf_map_set_vaddr(map, shmem->vaddr);
270 		return 0;
271 	}
272 
273 	if (obj->import_attach) {
274 		ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
275 		if (!ret) {
276 			if (WARN_ON(map->is_iomem)) {
277 				dma_buf_vunmap(obj->import_attach->dmabuf, map);
278 				ret = -EIO;
279 				goto err_put_pages;
280 			}
281 			shmem->vaddr = map->vaddr;
282 		}
283 	} else {
284 		pgprot_t prot = PAGE_KERNEL;
285 
286 		ret = drm_gem_shmem_get_pages(shmem);
287 		if (ret)
288 			goto err_zero_use;
289 
290 		if (shmem->map_wc)
291 			prot = pgprot_writecombine(prot);
292 		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
293 				    VM_MAP, prot);
294 		if (!shmem->vaddr)
295 			ret = -ENOMEM;
296 		else
297 			dma_buf_map_set_vaddr(map, shmem->vaddr);
298 	}
299 
300 	if (ret) {
301 		DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret);
302 		goto err_put_pages;
303 	}
304 
305 	return 0;
306 
307 err_put_pages:
308 	if (!obj->import_attach)
309 		drm_gem_shmem_put_pages(shmem);
310 err_zero_use:
311 	shmem->vmap_use_count = 0;
312 
313 	return ret;
314 }
315 
316 /*
317  * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
318  * @shmem: shmem GEM object
319  * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
320  *       store.
321  *
322  * This function makes sure that a contiguous kernel virtual address mapping
323  * exists for the buffer backing the shmem GEM object. It hides the differences
324  * between dma-buf imported and natively allocated objects.
325  *
326  * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
327  *
328  * Returns:
329  * 0 on success or a negative error code on failure.
330  */
drm_gem_shmem_vmap(struct drm_gem_shmem_object * shmem,struct dma_buf_map * map)331 int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map)
332 {
333 	int ret;
334 
335 	ret = mutex_lock_interruptible(&shmem->vmap_lock);
336 	if (ret)
337 		return ret;
338 	ret = drm_gem_shmem_vmap_locked(shmem, map);
339 	mutex_unlock(&shmem->vmap_lock);
340 
341 	return ret;
342 }
343 EXPORT_SYMBOL(drm_gem_shmem_vmap);
344 
drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object * shmem,struct dma_buf_map * map)345 static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
346 					struct dma_buf_map *map)
347 {
348 	struct drm_gem_object *obj = &shmem->base;
349 
350 	if (WARN_ON_ONCE(!shmem->vmap_use_count))
351 		return;
352 
353 	if (--shmem->vmap_use_count > 0)
354 		return;
355 
356 	if (obj->import_attach) {
357 		dma_buf_vunmap(obj->import_attach->dmabuf, map);
358 	} else {
359 		vunmap(shmem->vaddr);
360 		drm_gem_shmem_put_pages(shmem);
361 	}
362 
363 	shmem->vaddr = NULL;
364 }
365 
366 /*
367  * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
368  * @shmem: shmem GEM object
369  * @map: Kernel virtual address where the SHMEM GEM object was mapped
370  *
371  * This function cleans up a kernel virtual address mapping acquired by
372  * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
373  * zero.
374  *
375  * This function hides the differences between dma-buf imported and natively
376  * allocated objects.
377  */
drm_gem_shmem_vunmap(struct drm_gem_shmem_object * shmem,struct dma_buf_map * map)378 void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map)
379 {
380 	mutex_lock(&shmem->vmap_lock);
381 	drm_gem_shmem_vunmap_locked(shmem, map);
382 	mutex_unlock(&shmem->vmap_lock);
383 }
384 EXPORT_SYMBOL(drm_gem_shmem_vunmap);
385 
386 static struct drm_gem_shmem_object *
drm_gem_shmem_create_with_handle(struct drm_file * file_priv,struct drm_device * dev,size_t size,uint32_t * handle)387 drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
388 				 struct drm_device *dev, size_t size,
389 				 uint32_t *handle)
390 {
391 	struct drm_gem_shmem_object *shmem;
392 	int ret;
393 
394 	shmem = drm_gem_shmem_create(dev, size);
395 	if (IS_ERR(shmem))
396 		return shmem;
397 
398 	/*
399 	 * Allocate an id of idr table where the obj is registered
400 	 * and handle has the id what user can see.
401 	 */
402 	ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
403 	/* drop reference from allocate - handle holds it now. */
404 	drm_gem_object_put(&shmem->base);
405 	if (ret)
406 		return ERR_PTR(ret);
407 
408 	return shmem;
409 }
410 
411 /* Update madvise status, returns true if not purged, else
412  * false or -errno.
413  */
drm_gem_shmem_madvise(struct drm_gem_shmem_object * shmem,int madv)414 int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
415 {
416 	mutex_lock(&shmem->pages_lock);
417 
418 	if (shmem->madv >= 0)
419 		shmem->madv = madv;
420 
421 	madv = shmem->madv;
422 
423 	mutex_unlock(&shmem->pages_lock);
424 
425 	return (madv >= 0);
426 }
427 EXPORT_SYMBOL(drm_gem_shmem_madvise);
428 
drm_gem_shmem_purge_locked(struct drm_gem_shmem_object * shmem)429 void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
430 {
431 	struct drm_gem_object *obj = &shmem->base;
432 	struct drm_device *dev = obj->dev;
433 
434 	WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
435 
436 	dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
437 	sg_free_table(shmem->sgt);
438 	kfree(shmem->sgt);
439 	shmem->sgt = NULL;
440 
441 	drm_gem_shmem_put_pages_locked(shmem);
442 
443 	shmem->madv = -1;
444 
445 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
446 	drm_gem_free_mmap_offset(obj);
447 
448 	/* Our goal here is to return as much of the memory as
449 	 * is possible back to the system as we are called from OOM.
450 	 * To do this we must instruct the shmfs to drop all of its
451 	 * backing pages, *now*.
452 	 */
453 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
454 
455 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
456 }
457 EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
458 
drm_gem_shmem_purge(struct drm_gem_shmem_object * shmem)459 bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
460 {
461 	if (!mutex_trylock(&shmem->pages_lock))
462 		return false;
463 	drm_gem_shmem_purge_locked(shmem);
464 	mutex_unlock(&shmem->pages_lock);
465 
466 	return true;
467 }
468 EXPORT_SYMBOL(drm_gem_shmem_purge);
469 
470 /**
471  * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
472  * @file: DRM file structure to create the dumb buffer for
473  * @dev: DRM device
474  * @args: IOCTL data
475  *
476  * This function computes the pitch of the dumb buffer and rounds it up to an
477  * integer number of bytes per pixel. Drivers for hardware that doesn't have
478  * any additional restrictions on the pitch can directly use this function as
479  * their &drm_driver.dumb_create callback.
480  *
481  * For hardware with additional restrictions, drivers can adjust the fields
482  * set up by userspace before calling into this function.
483  *
484  * Returns:
485  * 0 on success or a negative error code on failure.
486  */
drm_gem_shmem_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)487 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
488 			      struct drm_mode_create_dumb *args)
489 {
490 	u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
491 	struct drm_gem_shmem_object *shmem;
492 
493 	if (!args->pitch || !args->size) {
494 		args->pitch = min_pitch;
495 		args->size = PAGE_ALIGN(args->pitch * args->height);
496 	} else {
497 		/* ensure sane minimum values */
498 		if (args->pitch < min_pitch)
499 			args->pitch = min_pitch;
500 		if (args->size < args->pitch * args->height)
501 			args->size = PAGE_ALIGN(args->pitch * args->height);
502 	}
503 
504 	shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
505 
506 	return PTR_ERR_OR_ZERO(shmem);
507 }
508 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
509 
drm_gem_shmem_fault(struct vm_fault * vmf)510 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
511 {
512 	struct vm_area_struct *vma = vmf->vma;
513 	struct drm_gem_object *obj = vma->vm_private_data;
514 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
515 	loff_t num_pages = obj->size >> PAGE_SHIFT;
516 	vm_fault_t ret;
517 	struct page *page;
518 	pgoff_t page_offset;
519 
520 	/* We don't use vmf->pgoff since that has the fake offset */
521 	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
522 
523 	mutex_lock(&shmem->pages_lock);
524 
525 	if (page_offset >= num_pages ||
526 	    WARN_ON_ONCE(!shmem->pages) ||
527 	    shmem->madv < 0) {
528 		ret = VM_FAULT_SIGBUS;
529 	} else {
530 		page = shmem->pages[page_offset];
531 
532 		ret = vmf_insert_page(vma, vmf->address, page);
533 	}
534 
535 	mutex_unlock(&shmem->pages_lock);
536 
537 	return ret;
538 }
539 
drm_gem_shmem_vm_open(struct vm_area_struct * vma)540 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
541 {
542 	struct drm_gem_object *obj = vma->vm_private_data;
543 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
544 
545 	WARN_ON(shmem->base.import_attach);
546 
547 	mutex_lock(&shmem->pages_lock);
548 
549 	/*
550 	 * We should have already pinned the pages when the buffer was first
551 	 * mmap'd, vm_open() just grabs an additional reference for the new
552 	 * mm the vma is getting copied into (ie. on fork()).
553 	 */
554 	if (!WARN_ON_ONCE(!shmem->pages_use_count))
555 		shmem->pages_use_count++;
556 
557 	mutex_unlock(&shmem->pages_lock);
558 
559 	drm_gem_vm_open(vma);
560 }
561 
drm_gem_shmem_vm_close(struct vm_area_struct * vma)562 static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
563 {
564 	struct drm_gem_object *obj = vma->vm_private_data;
565 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
566 
567 	drm_gem_shmem_put_pages(shmem);
568 	drm_gem_vm_close(vma);
569 }
570 
571 static const struct vm_operations_struct drm_gem_shmem_vm_ops = {
572 	.fault = drm_gem_shmem_fault,
573 	.open = drm_gem_shmem_vm_open,
574 	.close = drm_gem_shmem_vm_close,
575 };
576 
577 /**
578  * drm_gem_shmem_mmap - Memory-map a shmem GEM object
579  * @shmem: shmem GEM object
580  * @vma: VMA for the area to be mapped
581  *
582  * This function implements an augmented version of the GEM DRM file mmap
583  * operation for shmem objects.
584  *
585  * Returns:
586  * 0 on success or a negative error code on failure.
587  */
drm_gem_shmem_mmap(struct drm_gem_shmem_object * shmem,struct vm_area_struct * vma)588 int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
589 {
590 	struct drm_gem_object *obj = &shmem->base;
591 	int ret;
592 
593 	if (obj->import_attach) {
594 		/* Reset both vm_ops and vm_private_data, so we don't end up with
595 		 * vm_ops pointing to our implementation if the dma-buf backend
596 		 * doesn't set those fields.
597 		 */
598 		vma->vm_private_data = NULL;
599 		vma->vm_ops = NULL;
600 
601 		ret = dma_buf_mmap(obj->dma_buf, vma, 0);
602 
603 		/* Drop the reference drm_gem_mmap_obj() acquired.*/
604 		if (!ret)
605 			drm_gem_object_put(obj);
606 
607 		return ret;
608 	}
609 
610 	ret = drm_gem_shmem_get_pages(shmem);
611 	if (ret)
612 		return ret;
613 
614 	vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
615 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
616 	if (shmem->map_wc)
617 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
618 	vma->vm_ops = &drm_gem_shmem_vm_ops;
619 
620 	return 0;
621 }
622 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
623 
624 /**
625  * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
626  * @shmem: shmem GEM object
627  * @p: DRM printer
628  * @indent: Tab indentation level
629  */
drm_gem_shmem_print_info(const struct drm_gem_shmem_object * shmem,struct drm_printer * p,unsigned int indent)630 void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
631 			      struct drm_printer *p, unsigned int indent)
632 {
633 	drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
634 	drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
635 	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
636 }
637 EXPORT_SYMBOL(drm_gem_shmem_print_info);
638 
639 /**
640  * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
641  *                              pages for a shmem GEM object
642  * @shmem: shmem GEM object
643  *
644  * This function exports a scatter/gather table suitable for PRIME usage by
645  * calling the standard DMA mapping API.
646  *
647  * Drivers who need to acquire an scatter/gather table for objects need to call
648  * drm_gem_shmem_get_pages_sgt() instead.
649  *
650  * Returns:
651  * A pointer to the scatter/gather table of pinned pages or NULL on failure.
652  */
drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object * shmem)653 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
654 {
655 	struct drm_gem_object *obj = &shmem->base;
656 
657 	WARN_ON(shmem->base.import_attach);
658 
659 	return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
660 }
661 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
662 
drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object * shmem)663 static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object *shmem)
664 {
665 	struct drm_gem_object *obj = &shmem->base;
666 	int ret;
667 	struct sg_table *sgt;
668 
669 	if (shmem->sgt)
670 		return shmem->sgt;
671 
672 	WARN_ON(obj->import_attach);
673 
674 	ret = drm_gem_shmem_get_pages_locked(shmem);
675 	if (ret)
676 		return ERR_PTR(ret);
677 
678 	sgt = drm_gem_shmem_get_sg_table(shmem);
679 	if (IS_ERR(sgt)) {
680 		ret = PTR_ERR(sgt);
681 		goto err_put_pages;
682 	}
683 	/* Map the pages for use by the h/w. */
684 	ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
685 	if (ret)
686 		goto err_free_sgt;
687 
688 	shmem->sgt = sgt;
689 
690 	return sgt;
691 
692 err_free_sgt:
693 	sg_free_table(sgt);
694 	kfree(sgt);
695 err_put_pages:
696 	drm_gem_shmem_put_pages_locked(shmem);
697 	return ERR_PTR(ret);
698 }
699 
700 /**
701  * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
702  *				 scatter/gather table for a shmem GEM object.
703  * @shmem: shmem GEM object
704  *
705  * This function returns a scatter/gather table suitable for driver usage. If
706  * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
707  * table created.
708  *
709  * This is the main function for drivers to get at backing storage, and it hides
710  * and difference between dma-buf imported and natively allocated objects.
711  * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
712  *
713  * Returns:
714  * A pointer to the scatter/gather table of pinned pages or errno on failure.
715  */
drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object * shmem)716 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
717 {
718 	int ret;
719 	struct sg_table *sgt;
720 
721 	ret = mutex_lock_interruptible(&shmem->pages_lock);
722 	if (ret)
723 		return ERR_PTR(ret);
724 	sgt = drm_gem_shmem_get_pages_sgt_locked(shmem);
725 	mutex_unlock(&shmem->pages_lock);
726 
727 	return sgt;
728 }
729 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
730 
731 /**
732  * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
733  *                 another driver's scatter/gather table of pinned pages
734  * @dev: Device to import into
735  * @attach: DMA-BUF attachment
736  * @sgt: Scatter/gather table of pinned pages
737  *
738  * This function imports a scatter/gather table exported via DMA-BUF by
739  * another driver. Drivers that use the shmem helpers should set this as their
740  * &drm_driver.gem_prime_import_sg_table callback.
741  *
742  * Returns:
743  * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
744  * error code on failure.
745  */
746 struct drm_gem_object *
drm_gem_shmem_prime_import_sg_table(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * sgt)747 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
748 				    struct dma_buf_attachment *attach,
749 				    struct sg_table *sgt)
750 {
751 	size_t size = PAGE_ALIGN(attach->dmabuf->size);
752 	struct drm_gem_shmem_object *shmem;
753 
754 	shmem = __drm_gem_shmem_create(dev, size, true);
755 	if (IS_ERR(shmem))
756 		return ERR_CAST(shmem);
757 
758 	shmem->sgt = sgt;
759 
760 	DRM_DEBUG_PRIME("size = %zu\n", size);
761 
762 	return &shmem->base;
763 }
764 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
765