1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/mm.h>
31 #include <linux/uaccess.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/pagemap.h>
37 #include <linux/shmem_fs.h>
38 #include <linux/dma-buf.h>
39 #include <linux/mem_encrypt.h>
40 #include <linux/pagevec.h>
41
42 #include <drm/drm.h>
43 #include <drm/drm_device.h>
44 #include <drm/drm_drv.h>
45 #include <drm/drm_file.h>
46 #include <drm/drm_gem.h>
47 #include <drm/drm_managed.h>
48 #include <drm/drm_print.h>
49 #include <drm/drm_vma_manager.h>
50
51 #include "drm_internal.h"
52
53 /** @file drm_gem.c
54 *
55 * This file provides some of the base ioctls and library routines for
56 * the graphics memory manager implemented by each device driver.
57 *
58 * Because various devices have different requirements in terms of
59 * synchronization and migration strategies, implementing that is left up to
60 * the driver, and all that the general API provides should be generic --
61 * allocating objects, reading/writing data with the cpu, freeing objects.
62 * Even there, platform-dependent optimizations for reading/writing data with
63 * the CPU mean we'll likely hook those out to driver-specific calls. However,
64 * the DRI2 implementation wants to have at least allocate/mmap be generic.
65 *
66 * The goal was to have swap-backed object allocation managed through
67 * struct file. However, file descriptors as handles to a struct file have
68 * two major failings:
69 * - Process limits prevent more than 1024 or so being used at a time by
70 * default.
71 * - Inability to allocate high fds will aggravate the X Server's select()
72 * handling, and likely that of many GL client applications as well.
73 *
74 * This led to a plan of using our own integer IDs (called handles, following
75 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
76 * ioctls. The objects themselves will still include the struct file so
77 * that we can transition to fds if the required kernel infrastructure shows
78 * up at a later date, and as our interface with shmfs for memory allocation.
79 */
80
81 static void
drm_gem_init_release(struct drm_device * dev,void * ptr)82 drm_gem_init_release(struct drm_device *dev, void *ptr)
83 {
84 drm_vma_offset_manager_destroy(dev->vma_offset_manager);
85 }
86
87 /**
88 * drm_gem_init - Initialize the GEM device fields
89 * @dev: drm_devic structure to initialize
90 */
91 int
drm_gem_init(struct drm_device * dev)92 drm_gem_init(struct drm_device *dev)
93 {
94 struct drm_vma_offset_manager *vma_offset_manager;
95
96 mutex_init(&dev->object_name_lock);
97 idr_init_base(&dev->object_name_idr, 1);
98
99 vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
100 GFP_KERNEL);
101 if (!vma_offset_manager) {
102 DRM_ERROR("out of memory\n");
103 return -ENOMEM;
104 }
105
106 dev->vma_offset_manager = vma_offset_manager;
107 drm_vma_offset_manager_init(vma_offset_manager,
108 DRM_FILE_PAGE_OFFSET_START,
109 DRM_FILE_PAGE_OFFSET_SIZE);
110
111 return drmm_add_action(dev, drm_gem_init_release, NULL);
112 }
113
114 /**
115 * drm_gem_object_init - initialize an allocated shmem-backed GEM object
116 * @dev: drm_device the object should be initialized for
117 * @obj: drm_gem_object to initialize
118 * @size: object size
119 *
120 * Initialize an already allocated GEM object of the specified size with
121 * shmfs backing store.
122 */
drm_gem_object_init(struct drm_device * dev,struct drm_gem_object * obj,size_t size)123 int drm_gem_object_init(struct drm_device *dev,
124 struct drm_gem_object *obj, size_t size)
125 {
126 struct file *filp;
127
128 drm_gem_private_object_init(dev, obj, size);
129
130 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
131 if (IS_ERR(filp))
132 return PTR_ERR(filp);
133
134 obj->filp = filp;
135
136 return 0;
137 }
138 EXPORT_SYMBOL(drm_gem_object_init);
139
140 /**
141 * drm_gem_private_object_init - initialize an allocated private GEM object
142 * @dev: drm_device the object should be initialized for
143 * @obj: drm_gem_object to initialize
144 * @size: object size
145 *
146 * Initialize an already allocated GEM object of the specified size with
147 * no GEM provided backing store. Instead the caller is responsible for
148 * backing the object and handling it.
149 */
drm_gem_private_object_init(struct drm_device * dev,struct drm_gem_object * obj,size_t size)150 void drm_gem_private_object_init(struct drm_device *dev,
151 struct drm_gem_object *obj, size_t size)
152 {
153 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
154
155 obj->dev = dev;
156 obj->filp = NULL;
157
158 kref_init(&obj->refcount);
159 obj->handle_count = 0;
160 obj->size = size;
161 dma_resv_init(&obj->_resv);
162 if (!obj->resv)
163 obj->resv = &obj->_resv;
164
165 drm_vma_node_reset(&obj->vma_node);
166 }
167 EXPORT_SYMBOL(drm_gem_private_object_init);
168
169 /**
170 * drm_gem_object_handle_free - release resources bound to userspace handles
171 * @obj: GEM object to clean up.
172 *
173 * Called after the last handle to the object has been closed
174 *
175 * Removes any name for the object. Note that this must be
176 * called before drm_gem_object_free or we'll be touching
177 * freed memory
178 */
drm_gem_object_handle_free(struct drm_gem_object * obj)179 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
180 {
181 struct drm_device *dev = obj->dev;
182
183 /* Remove any name for this object */
184 if (obj->name) {
185 idr_remove(&dev->object_name_idr, obj->name);
186 obj->name = 0;
187 }
188 }
189
drm_gem_object_exported_dma_buf_free(struct drm_gem_object * obj)190 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
191 {
192 /* Unbreak the reference cycle if we have an exported dma_buf. */
193 if (obj->dma_buf) {
194 dma_buf_put(obj->dma_buf);
195 obj->dma_buf = NULL;
196 }
197 }
198
199 static void
drm_gem_object_handle_put_unlocked(struct drm_gem_object * obj)200 drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
201 {
202 struct drm_device *dev = obj->dev;
203 bool final = false;
204
205 if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
206 return;
207
208 /*
209 * Must bump handle count first as this may be the last
210 * ref, in which case the object would disappear before we
211 * checked for a name
212 */
213
214 mutex_lock(&dev->object_name_lock);
215 if (--obj->handle_count == 0) {
216 drm_gem_object_handle_free(obj);
217 drm_gem_object_exported_dma_buf_free(obj);
218 final = true;
219 }
220 mutex_unlock(&dev->object_name_lock);
221
222 if (final)
223 drm_gem_object_put(obj);
224 }
225
226 /*
227 * Called at device or object close to release the file's
228 * handle references on objects.
229 */
230 static int
drm_gem_object_release_handle(int id,void * ptr,void * data)231 drm_gem_object_release_handle(int id, void *ptr, void *data)
232 {
233 struct drm_file *file_priv = data;
234 struct drm_gem_object *obj = ptr;
235 struct drm_device *dev = obj->dev;
236
237 if (obj->funcs && obj->funcs->close)
238 obj->funcs->close(obj, file_priv);
239 else if (dev->driver->gem_close_object)
240 dev->driver->gem_close_object(obj, file_priv);
241
242 drm_prime_remove_buf_handle(&file_priv->prime, id);
243 drm_vma_node_revoke(&obj->vma_node, file_priv);
244
245 drm_gem_object_handle_put_unlocked(obj);
246
247 return 0;
248 }
249
250 /**
251 * drm_gem_handle_delete - deletes the given file-private handle
252 * @filp: drm file-private structure to use for the handle look up
253 * @handle: userspace handle to delete
254 *
255 * Removes the GEM handle from the @filp lookup table which has been added with
256 * drm_gem_handle_create(). If this is the last handle also cleans up linked
257 * resources like GEM names.
258 */
259 int
drm_gem_handle_delete(struct drm_file * filp,u32 handle)260 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
261 {
262 struct drm_gem_object *obj;
263
264 spin_lock(&filp->table_lock);
265
266 /* Check if we currently have a reference on the object */
267 obj = idr_replace(&filp->object_idr, NULL, handle);
268 spin_unlock(&filp->table_lock);
269 if (IS_ERR_OR_NULL(obj))
270 return -EINVAL;
271
272 /* Release driver's reference and decrement refcount. */
273 drm_gem_object_release_handle(handle, obj, filp);
274
275 /* And finally make the handle available for future allocations. */
276 spin_lock(&filp->table_lock);
277 idr_remove(&filp->object_idr, handle);
278 spin_unlock(&filp->table_lock);
279
280 return 0;
281 }
282 EXPORT_SYMBOL(drm_gem_handle_delete);
283
284 /**
285 * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
286 * @file: drm file-private structure containing the gem object
287 * @dev: corresponding drm_device
288 * @handle: gem object handle
289 * @offset: return location for the fake mmap offset
290 *
291 * This implements the &drm_driver.dumb_map_offset kms driver callback for
292 * drivers which use gem to manage their backing storage.
293 *
294 * Returns:
295 * 0 on success or a negative error code on failure.
296 */
drm_gem_dumb_map_offset(struct drm_file * file,struct drm_device * dev,u32 handle,u64 * offset)297 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
298 u32 handle, u64 *offset)
299 {
300 struct drm_gem_object *obj;
301 int ret;
302
303 obj = drm_gem_object_lookup(file, handle);
304 if (!obj)
305 return -ENOENT;
306
307 /* Don't allow imported objects to be mapped */
308 if (obj->import_attach) {
309 ret = -EINVAL;
310 goto out;
311 }
312
313 ret = drm_gem_create_mmap_offset(obj);
314 if (ret)
315 goto out;
316
317 *offset = drm_vma_node_offset_addr(&obj->vma_node);
318 out:
319 drm_gem_object_put(obj);
320
321 return ret;
322 }
323 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
324
325 /**
326 * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
327 * @file: drm file-private structure to remove the dumb handle from
328 * @dev: corresponding drm_device
329 * @handle: the dumb handle to remove
330 *
331 * This implements the &drm_driver.dumb_destroy kms driver callback for drivers
332 * which use gem to manage their backing storage.
333 */
drm_gem_dumb_destroy(struct drm_file * file,struct drm_device * dev,uint32_t handle)334 int drm_gem_dumb_destroy(struct drm_file *file,
335 struct drm_device *dev,
336 uint32_t handle)
337 {
338 return drm_gem_handle_delete(file, handle);
339 }
340 EXPORT_SYMBOL(drm_gem_dumb_destroy);
341
342 /**
343 * drm_gem_handle_create_tail - internal functions to create a handle
344 * @file_priv: drm file-private structure to register the handle for
345 * @obj: object to register
346 * @handlep: pointer to return the created handle to the caller
347 *
348 * This expects the &drm_device.object_name_lock to be held already and will
349 * drop it before returning. Used to avoid races in establishing new handles
350 * when importing an object from either an flink name or a dma-buf.
351 *
352 * Handles must be release again through drm_gem_handle_delete(). This is done
353 * when userspace closes @file_priv for all attached handles, or through the
354 * GEM_CLOSE ioctl for individual handles.
355 */
356 int
drm_gem_handle_create_tail(struct drm_file * file_priv,struct drm_gem_object * obj,u32 * handlep)357 drm_gem_handle_create_tail(struct drm_file *file_priv,
358 struct drm_gem_object *obj,
359 u32 *handlep)
360 {
361 struct drm_device *dev = obj->dev;
362 u32 handle;
363 int ret;
364
365 WARN_ON(!mutex_is_locked(&dev->object_name_lock));
366 if (obj->handle_count++ == 0)
367 drm_gem_object_get(obj);
368
369 /*
370 * Get the user-visible handle using idr. Preload and perform
371 * allocation under our spinlock.
372 */
373 idr_preload(GFP_KERNEL);
374 spin_lock(&file_priv->table_lock);
375
376 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
377
378 spin_unlock(&file_priv->table_lock);
379 idr_preload_end();
380
381 mutex_unlock(&dev->object_name_lock);
382 if (ret < 0)
383 goto err_unref;
384
385 handle = ret;
386
387 ret = drm_vma_node_allow(&obj->vma_node, file_priv);
388 if (ret)
389 goto err_remove;
390
391 if (obj->funcs && obj->funcs->open) {
392 ret = obj->funcs->open(obj, file_priv);
393 if (ret)
394 goto err_revoke;
395 } else if (dev->driver->gem_open_object) {
396 ret = dev->driver->gem_open_object(obj, file_priv);
397 if (ret)
398 goto err_revoke;
399 }
400
401 *handlep = handle;
402 return 0;
403
404 err_revoke:
405 drm_vma_node_revoke(&obj->vma_node, file_priv);
406 err_remove:
407 spin_lock(&file_priv->table_lock);
408 idr_remove(&file_priv->object_idr, handle);
409 spin_unlock(&file_priv->table_lock);
410 err_unref:
411 drm_gem_object_handle_put_unlocked(obj);
412 return ret;
413 }
414
415 /**
416 * drm_gem_handle_create - create a gem handle for an object
417 * @file_priv: drm file-private structure to register the handle for
418 * @obj: object to register
419 * @handlep: pointer to return the created handle to the caller
420 *
421 * Create a handle for this object. This adds a handle reference to the object,
422 * which includes a regular reference count. Callers will likely want to
423 * dereference the object afterwards.
424 *
425 * Since this publishes @obj to userspace it must be fully set up by this point,
426 * drivers must call this last in their buffer object creation callbacks.
427 */
drm_gem_handle_create(struct drm_file * file_priv,struct drm_gem_object * obj,u32 * handlep)428 int drm_gem_handle_create(struct drm_file *file_priv,
429 struct drm_gem_object *obj,
430 u32 *handlep)
431 {
432 mutex_lock(&obj->dev->object_name_lock);
433
434 return drm_gem_handle_create_tail(file_priv, obj, handlep);
435 }
436 EXPORT_SYMBOL(drm_gem_handle_create);
437
438
439 /**
440 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
441 * @obj: obj in question
442 *
443 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
444 *
445 * Note that drm_gem_object_release() already calls this function, so drivers
446 * don't have to take care of releasing the mmap offset themselves when freeing
447 * the GEM object.
448 */
449 void
drm_gem_free_mmap_offset(struct drm_gem_object * obj)450 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
451 {
452 struct drm_device *dev = obj->dev;
453
454 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
455 }
456 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
457
458 /**
459 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
460 * @obj: obj in question
461 * @size: the virtual size
462 *
463 * GEM memory mapping works by handing back to userspace a fake mmap offset
464 * it can use in a subsequent mmap(2) call. The DRM core code then looks
465 * up the object based on the offset and sets up the various memory mapping
466 * structures.
467 *
468 * This routine allocates and attaches a fake offset for @obj, in cases where
469 * the virtual size differs from the physical size (ie. &drm_gem_object.size).
470 * Otherwise just use drm_gem_create_mmap_offset().
471 *
472 * This function is idempotent and handles an already allocated mmap offset
473 * transparently. Drivers do not need to check for this case.
474 */
475 int
drm_gem_create_mmap_offset_size(struct drm_gem_object * obj,size_t size)476 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
477 {
478 struct drm_device *dev = obj->dev;
479
480 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
481 size / PAGE_SIZE);
482 }
483 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
484
485 /**
486 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
487 * @obj: obj in question
488 *
489 * GEM memory mapping works by handing back to userspace a fake mmap offset
490 * it can use in a subsequent mmap(2) call. The DRM core code then looks
491 * up the object based on the offset and sets up the various memory mapping
492 * structures.
493 *
494 * This routine allocates and attaches a fake offset for @obj.
495 *
496 * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
497 * the fake offset again.
498 */
drm_gem_create_mmap_offset(struct drm_gem_object * obj)499 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
500 {
501 return drm_gem_create_mmap_offset_size(obj, obj->size);
502 }
503 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
504
505 /*
506 * Move pages to appropriate lru and release the pagevec, decrementing the
507 * ref count of those pages.
508 */
drm_gem_check_release_pagevec(struct pagevec * pvec)509 static void drm_gem_check_release_pagevec(struct pagevec *pvec)
510 {
511 check_move_unevictable_pages(pvec);
512 __pagevec_release(pvec);
513 cond_resched();
514 }
515
516 /**
517 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
518 * from shmem
519 * @obj: obj in question
520 *
521 * This reads the page-array of the shmem-backing storage of the given gem
522 * object. An array of pages is returned. If a page is not allocated or
523 * swapped-out, this will allocate/swap-in the required pages. Note that the
524 * whole object is covered by the page-array and pinned in memory.
525 *
526 * Use drm_gem_put_pages() to release the array and unpin all pages.
527 *
528 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
529 * If you require other GFP-masks, you have to do those allocations yourself.
530 *
531 * Note that you are not allowed to change gfp-zones during runtime. That is,
532 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
533 * set during initialization. If you have special zone constraints, set them
534 * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
535 * to keep pages in the required zone during swap-in.
536 *
537 * This function is only valid on objects initialized with
538 * drm_gem_object_init(), but not for those initialized with
539 * drm_gem_private_object_init() only.
540 */
drm_gem_get_pages(struct drm_gem_object * obj)541 struct page **drm_gem_get_pages(struct drm_gem_object *obj)
542 {
543 struct address_space *mapping;
544 struct page *p, **pages;
545 struct pagevec pvec;
546 int i, npages;
547
548
549 if (WARN_ON(!obj->filp))
550 return ERR_PTR(-EINVAL);
551
552 /* This is the shared memory object that backs the GEM resource */
553 mapping = obj->filp->f_mapping;
554
555 /* We already BUG_ON() for non-page-aligned sizes in
556 * drm_gem_object_init(), so we should never hit this unless
557 * driver author is doing something really wrong:
558 */
559 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
560
561 npages = obj->size >> PAGE_SHIFT;
562
563 pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
564 if (pages == NULL)
565 return ERR_PTR(-ENOMEM);
566
567 mapping_set_unevictable(mapping);
568
569 for (i = 0; i < npages; i++) {
570 p = shmem_read_mapping_page(mapping, i);
571 if (IS_ERR(p))
572 goto fail;
573 pages[i] = p;
574
575 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the
576 * correct region during swapin. Note that this requires
577 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
578 * so shmem can relocate pages during swapin if required.
579 */
580 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
581 (page_to_pfn(p) >= 0x00100000UL));
582 }
583
584 return pages;
585
586 fail:
587 mapping_clear_unevictable(mapping);
588 pagevec_init(&pvec);
589 while (i--) {
590 if (!pagevec_add(&pvec, pages[i]))
591 drm_gem_check_release_pagevec(&pvec);
592 }
593 if (pagevec_count(&pvec))
594 drm_gem_check_release_pagevec(&pvec);
595
596 kvfree(pages);
597 return ERR_CAST(p);
598 }
599 EXPORT_SYMBOL(drm_gem_get_pages);
600
601 /**
602 * drm_gem_put_pages - helper to free backing pages for a GEM object
603 * @obj: obj in question
604 * @pages: pages to free
605 * @dirty: if true, pages will be marked as dirty
606 * @accessed: if true, the pages will be marked as accessed
607 */
drm_gem_put_pages(struct drm_gem_object * obj,struct page ** pages,bool dirty,bool accessed)608 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
609 bool dirty, bool accessed)
610 {
611 int i, npages;
612 struct address_space *mapping;
613 struct pagevec pvec;
614
615 mapping = file_inode(obj->filp)->i_mapping;
616 mapping_clear_unevictable(mapping);
617
618 /* We already BUG_ON() for non-page-aligned sizes in
619 * drm_gem_object_init(), so we should never hit this unless
620 * driver author is doing something really wrong:
621 */
622 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
623
624 npages = obj->size >> PAGE_SHIFT;
625
626 pagevec_init(&pvec);
627 for (i = 0; i < npages; i++) {
628 if (!pages[i])
629 continue;
630
631 if (dirty)
632 set_page_dirty(pages[i]);
633
634 if (accessed)
635 mark_page_accessed(pages[i]);
636
637 /* Undo the reference we took when populating the table */
638 if (!pagevec_add(&pvec, pages[i]))
639 drm_gem_check_release_pagevec(&pvec);
640 }
641 if (pagevec_count(&pvec))
642 drm_gem_check_release_pagevec(&pvec);
643
644 kvfree(pages);
645 }
646 EXPORT_SYMBOL(drm_gem_put_pages);
647
objects_lookup(struct drm_file * filp,u32 * handle,int count,struct drm_gem_object ** objs)648 static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
649 struct drm_gem_object **objs)
650 {
651 int i, ret = 0;
652 struct drm_gem_object *obj;
653
654 spin_lock(&filp->table_lock);
655
656 for (i = 0; i < count; i++) {
657 /* Check if we currently have a reference on the object */
658 obj = idr_find(&filp->object_idr, handle[i]);
659 if (!obj) {
660 ret = -ENOENT;
661 break;
662 }
663 drm_gem_object_get(obj);
664 objs[i] = obj;
665 }
666 spin_unlock(&filp->table_lock);
667
668 return ret;
669 }
670
671 /**
672 * drm_gem_objects_lookup - look up GEM objects from an array of handles
673 * @filp: DRM file private date
674 * @bo_handles: user pointer to array of userspace handle
675 * @count: size of handle array
676 * @objs_out: returned pointer to array of drm_gem_object pointers
677 *
678 * Takes an array of userspace handles and returns a newly allocated array of
679 * GEM objects.
680 *
681 * For a single handle lookup, use drm_gem_object_lookup().
682 *
683 * Returns:
684 *
685 * @objs filled in with GEM object pointers. Returned GEM objects need to be
686 * released with drm_gem_object_put(). -ENOENT is returned on a lookup
687 * failure. 0 is returned on success.
688 *
689 */
drm_gem_objects_lookup(struct drm_file * filp,void __user * bo_handles,int count,struct drm_gem_object *** objs_out)690 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
691 int count, struct drm_gem_object ***objs_out)
692 {
693 int ret;
694 u32 *handles;
695 struct drm_gem_object **objs;
696
697 if (!count)
698 return 0;
699
700 objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
701 GFP_KERNEL | __GFP_ZERO);
702 if (!objs)
703 return -ENOMEM;
704
705 *objs_out = objs;
706
707 handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
708 if (!handles) {
709 ret = -ENOMEM;
710 goto out;
711 }
712
713 if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
714 ret = -EFAULT;
715 DRM_DEBUG("Failed to copy in GEM handles\n");
716 goto out;
717 }
718
719 ret = objects_lookup(filp, handles, count, objs);
720 out:
721 kvfree(handles);
722 return ret;
723
724 }
725 EXPORT_SYMBOL(drm_gem_objects_lookup);
726
727 /**
728 * drm_gem_object_lookup - look up a GEM object from its handle
729 * @filp: DRM file private date
730 * @handle: userspace handle
731 *
732 * Returns:
733 *
734 * A reference to the object named by the handle if such exists on @filp, NULL
735 * otherwise.
736 *
737 * If looking up an array of handles, use drm_gem_objects_lookup().
738 */
739 struct drm_gem_object *
drm_gem_object_lookup(struct drm_file * filp,u32 handle)740 drm_gem_object_lookup(struct drm_file *filp, u32 handle)
741 {
742 struct drm_gem_object *obj = NULL;
743
744 objects_lookup(filp, &handle, 1, &obj);
745 return obj;
746 }
747 EXPORT_SYMBOL(drm_gem_object_lookup);
748
749 /**
750 * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects
751 * shared and/or exclusive fences.
752 * @filep: DRM file private date
753 * @handle: userspace handle
754 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
755 * @timeout: timeout value in jiffies or zero to return immediately
756 *
757 * Returns:
758 *
759 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
760 * greater than 0 on success.
761 */
drm_gem_dma_resv_wait(struct drm_file * filep,u32 handle,bool wait_all,unsigned long timeout)762 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
763 bool wait_all, unsigned long timeout)
764 {
765 long ret;
766 struct drm_gem_object *obj;
767
768 obj = drm_gem_object_lookup(filep, handle);
769 if (!obj) {
770 DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
771 return -EINVAL;
772 }
773
774 ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all,
775 true, timeout);
776 if (ret == 0)
777 ret = -ETIME;
778 else if (ret > 0)
779 ret = 0;
780
781 drm_gem_object_put(obj);
782
783 return ret;
784 }
785 EXPORT_SYMBOL(drm_gem_dma_resv_wait);
786
787 /**
788 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
789 * @dev: drm_device
790 * @data: ioctl data
791 * @file_priv: drm file-private structure
792 *
793 * Releases the handle to an mm object.
794 */
795 int
drm_gem_close_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)796 drm_gem_close_ioctl(struct drm_device *dev, void *data,
797 struct drm_file *file_priv)
798 {
799 struct drm_gem_close *args = data;
800 int ret;
801
802 if (!drm_core_check_feature(dev, DRIVER_GEM))
803 return -EOPNOTSUPP;
804
805 ret = drm_gem_handle_delete(file_priv, args->handle);
806
807 return ret;
808 }
809
810 /**
811 * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
812 * @dev: drm_device
813 * @data: ioctl data
814 * @file_priv: drm file-private structure
815 *
816 * Create a global name for an object, returning the name.
817 *
818 * Note that the name does not hold a reference; when the object
819 * is freed, the name goes away.
820 */
821 int
drm_gem_flink_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)822 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
823 struct drm_file *file_priv)
824 {
825 struct drm_gem_flink *args = data;
826 struct drm_gem_object *obj;
827 int ret;
828
829 if (!drm_core_check_feature(dev, DRIVER_GEM))
830 return -EOPNOTSUPP;
831
832 obj = drm_gem_object_lookup(file_priv, args->handle);
833 if (obj == NULL)
834 return -ENOENT;
835
836 mutex_lock(&dev->object_name_lock);
837 /* prevent races with concurrent gem_close. */
838 if (obj->handle_count == 0) {
839 ret = -ENOENT;
840 goto err;
841 }
842
843 if (!obj->name) {
844 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
845 if (ret < 0)
846 goto err;
847
848 obj->name = ret;
849 }
850
851 args->name = (uint64_t) obj->name;
852 ret = 0;
853
854 err:
855 mutex_unlock(&dev->object_name_lock);
856 drm_gem_object_put(obj);
857 return ret;
858 }
859
860 /**
861 * drm_gem_open - implementation of the GEM_OPEN ioctl
862 * @dev: drm_device
863 * @data: ioctl data
864 * @file_priv: drm file-private structure
865 *
866 * Open an object using the global name, returning a handle and the size.
867 *
868 * This handle (of course) holds a reference to the object, so the object
869 * will not go away until the handle is deleted.
870 */
871 int
drm_gem_open_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)872 drm_gem_open_ioctl(struct drm_device *dev, void *data,
873 struct drm_file *file_priv)
874 {
875 struct drm_gem_open *args = data;
876 struct drm_gem_object *obj;
877 int ret;
878 u32 handle;
879
880 if (!drm_core_check_feature(dev, DRIVER_GEM))
881 return -EOPNOTSUPP;
882
883 mutex_lock(&dev->object_name_lock);
884 obj = idr_find(&dev->object_name_idr, (int) args->name);
885 if (obj) {
886 drm_gem_object_get(obj);
887 } else {
888 mutex_unlock(&dev->object_name_lock);
889 return -ENOENT;
890 }
891
892 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
893 ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
894 if (ret)
895 goto err;
896
897 args->handle = handle;
898 args->size = obj->size;
899
900 err:
901 drm_gem_object_put(obj);
902 return ret;
903 }
904
905 /**
906 * gem_gem_open - initalizes GEM file-private structures at devnode open time
907 * @dev: drm_device which is being opened by userspace
908 * @file_private: drm file-private structure to set up
909 *
910 * Called at device open time, sets up the structure for handling refcounting
911 * of mm objects.
912 */
913 void
drm_gem_open(struct drm_device * dev,struct drm_file * file_private)914 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
915 {
916 idr_init_base(&file_private->object_idr, 1);
917 spin_lock_init(&file_private->table_lock);
918 }
919
920 /**
921 * drm_gem_release - release file-private GEM resources
922 * @dev: drm_device which is being closed by userspace
923 * @file_private: drm file-private structure to clean up
924 *
925 * Called at close time when the filp is going away.
926 *
927 * Releases any remaining references on objects by this filp.
928 */
929 void
drm_gem_release(struct drm_device * dev,struct drm_file * file_private)930 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
931 {
932 idr_for_each(&file_private->object_idr,
933 &drm_gem_object_release_handle, file_private);
934 idr_destroy(&file_private->object_idr);
935 }
936
937 /**
938 * drm_gem_object_release - release GEM buffer object resources
939 * @obj: GEM buffer object
940 *
941 * This releases any structures and resources used by @obj and is the invers of
942 * drm_gem_object_init().
943 */
944 void
drm_gem_object_release(struct drm_gem_object * obj)945 drm_gem_object_release(struct drm_gem_object *obj)
946 {
947 WARN_ON(obj->dma_buf);
948
949 if (obj->filp)
950 fput(obj->filp);
951
952 dma_resv_fini(&obj->_resv);
953 drm_gem_free_mmap_offset(obj);
954 }
955 EXPORT_SYMBOL(drm_gem_object_release);
956
957 /**
958 * drm_gem_object_free - free a GEM object
959 * @kref: kref of the object to free
960 *
961 * Called after the last reference to the object has been lost.
962 *
963 * Frees the object
964 */
965 void
drm_gem_object_free(struct kref * kref)966 drm_gem_object_free(struct kref *kref)
967 {
968 struct drm_gem_object *obj =
969 container_of(kref, struct drm_gem_object, refcount);
970 struct drm_device *dev = obj->dev;
971
972 if (obj->funcs)
973 obj->funcs->free(obj);
974 else if (dev->driver->gem_free_object_unlocked)
975 dev->driver->gem_free_object_unlocked(obj);
976 }
977 EXPORT_SYMBOL(drm_gem_object_free);
978
979 /**
980 * drm_gem_object_put_locked - release a GEM buffer object reference
981 * @obj: GEM buffer object
982 *
983 * This releases a reference to @obj. Callers must hold the
984 * &drm_device.struct_mutex lock when calling this function, even when the
985 * driver doesn't use &drm_device.struct_mutex for anything.
986 *
987 * For drivers not encumbered with legacy locking use
988 * drm_gem_object_put() instead.
989 */
990 void
drm_gem_object_put_locked(struct drm_gem_object * obj)991 drm_gem_object_put_locked(struct drm_gem_object *obj)
992 {
993 if (obj) {
994 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
995
996 kref_put(&obj->refcount, drm_gem_object_free);
997 }
998 }
999 EXPORT_SYMBOL(drm_gem_object_put_locked);
1000
1001 /**
1002 * drm_gem_vm_open - vma->ops->open implementation for GEM
1003 * @vma: VM area structure
1004 *
1005 * This function implements the #vm_operations_struct open() callback for GEM
1006 * drivers. This must be used together with drm_gem_vm_close().
1007 */
drm_gem_vm_open(struct vm_area_struct * vma)1008 void drm_gem_vm_open(struct vm_area_struct *vma)
1009 {
1010 struct drm_gem_object *obj = vma->vm_private_data;
1011
1012 drm_gem_object_get(obj);
1013 }
1014 EXPORT_SYMBOL(drm_gem_vm_open);
1015
1016 /**
1017 * drm_gem_vm_close - vma->ops->close implementation for GEM
1018 * @vma: VM area structure
1019 *
1020 * This function implements the #vm_operations_struct close() callback for GEM
1021 * drivers. This must be used together with drm_gem_vm_open().
1022 */
drm_gem_vm_close(struct vm_area_struct * vma)1023 void drm_gem_vm_close(struct vm_area_struct *vma)
1024 {
1025 struct drm_gem_object *obj = vma->vm_private_data;
1026
1027 drm_gem_object_put(obj);
1028 }
1029 EXPORT_SYMBOL(drm_gem_vm_close);
1030
1031 /**
1032 * drm_gem_mmap_obj - memory map a GEM object
1033 * @obj: the GEM object to map
1034 * @obj_size: the object size to be mapped, in bytes
1035 * @vma: VMA for the area to be mapped
1036 *
1037 * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
1038 * provided by the driver. Depending on their requirements, drivers can either
1039 * provide a fault handler in their gem_vm_ops (in which case any accesses to
1040 * the object will be trapped, to perform migration, GTT binding, surface
1041 * register allocation, or performance monitoring), or mmap the buffer memory
1042 * synchronously after calling drm_gem_mmap_obj.
1043 *
1044 * This function is mainly intended to implement the DMABUF mmap operation, when
1045 * the GEM object is not looked up based on its fake offset. To implement the
1046 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
1047 *
1048 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
1049 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
1050 * callers must verify access restrictions before calling this helper.
1051 *
1052 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
1053 * size, or if no gem_vm_ops are provided.
1054 */
drm_gem_mmap_obj(struct drm_gem_object * obj,unsigned long obj_size,struct vm_area_struct * vma)1055 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1056 struct vm_area_struct *vma)
1057 {
1058 struct drm_device *dev = obj->dev;
1059 int ret;
1060
1061 /* Check for valid size. */
1062 if (obj_size < vma->vm_end - vma->vm_start)
1063 return -EINVAL;
1064
1065 /* Take a ref for this mapping of the object, so that the fault
1066 * handler can dereference the mmap offset's pointer to the object.
1067 * This reference is cleaned up by the corresponding vm_close
1068 * (which should happen whether the vma was created by this call, or
1069 * by a vm_open due to mremap or partial unmap or whatever).
1070 */
1071 drm_gem_object_get(obj);
1072
1073 vma->vm_private_data = obj;
1074
1075 if (obj->funcs && obj->funcs->mmap) {
1076 ret = obj->funcs->mmap(obj, vma);
1077 if (ret) {
1078 drm_gem_object_put(obj);
1079 return ret;
1080 }
1081 WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
1082 } else {
1083 if (obj->funcs && obj->funcs->vm_ops)
1084 vma->vm_ops = obj->funcs->vm_ops;
1085 else if (dev->driver->gem_vm_ops)
1086 vma->vm_ops = dev->driver->gem_vm_ops;
1087 else {
1088 drm_gem_object_put(obj);
1089 return -EINVAL;
1090 }
1091
1092 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1093 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1094 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1095 }
1096
1097 return 0;
1098 }
1099 EXPORT_SYMBOL(drm_gem_mmap_obj);
1100
1101 /**
1102 * drm_gem_mmap - memory map routine for GEM objects
1103 * @filp: DRM file pointer
1104 * @vma: VMA for the area to be mapped
1105 *
1106 * If a driver supports GEM object mapping, mmap calls on the DRM file
1107 * descriptor will end up here.
1108 *
1109 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
1110 * contain the fake offset we created when the GTT map ioctl was called on
1111 * the object) and map it with a call to drm_gem_mmap_obj().
1112 *
1113 * If the caller is not granted access to the buffer object, the mmap will fail
1114 * with EACCES. Please see the vma manager for more information.
1115 */
drm_gem_mmap(struct file * filp,struct vm_area_struct * vma)1116 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1117 {
1118 struct drm_file *priv = filp->private_data;
1119 struct drm_device *dev = priv->minor->dev;
1120 struct drm_gem_object *obj = NULL;
1121 struct drm_vma_offset_node *node;
1122 int ret;
1123
1124 if (drm_dev_is_unplugged(dev))
1125 return -ENODEV;
1126
1127 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1128 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1129 vma->vm_pgoff,
1130 vma_pages(vma));
1131 if (likely(node)) {
1132 obj = container_of(node, struct drm_gem_object, vma_node);
1133 /*
1134 * When the object is being freed, after it hits 0-refcnt it
1135 * proceeds to tear down the object. In the process it will
1136 * attempt to remove the VMA offset and so acquire this
1137 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
1138 * that matches our range, we know it is in the process of being
1139 * destroyed and will be freed as soon as we release the lock -
1140 * so we have to check for the 0-refcnted object and treat it as
1141 * invalid.
1142 */
1143 if (!kref_get_unless_zero(&obj->refcount))
1144 obj = NULL;
1145 }
1146 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1147
1148 if (!obj)
1149 return -EINVAL;
1150
1151 if (!drm_vma_node_is_allowed(node, priv)) {
1152 drm_gem_object_put(obj);
1153 return -EACCES;
1154 }
1155
1156 if (node->readonly) {
1157 if (vma->vm_flags & VM_WRITE) {
1158 drm_gem_object_put(obj);
1159 return -EINVAL;
1160 }
1161
1162 vma->vm_flags &= ~VM_MAYWRITE;
1163 }
1164
1165 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1166 vma);
1167
1168 drm_gem_object_put(obj);
1169
1170 return ret;
1171 }
1172 EXPORT_SYMBOL(drm_gem_mmap);
1173
drm_gem_print_info(struct drm_printer * p,unsigned int indent,const struct drm_gem_object * obj)1174 void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
1175 const struct drm_gem_object *obj)
1176 {
1177 drm_printf_indent(p, indent, "name=%d\n", obj->name);
1178 drm_printf_indent(p, indent, "refcount=%u\n",
1179 kref_read(&obj->refcount));
1180 drm_printf_indent(p, indent, "start=%08lx\n",
1181 drm_vma_node_start(&obj->vma_node));
1182 drm_printf_indent(p, indent, "size=%zu\n", obj->size);
1183 drm_printf_indent(p, indent, "imported=%s\n",
1184 obj->import_attach ? "yes" : "no");
1185
1186 if (obj->funcs && obj->funcs->print_info)
1187 obj->funcs->print_info(p, indent, obj);
1188 }
1189
drm_gem_pin(struct drm_gem_object * obj)1190 int drm_gem_pin(struct drm_gem_object *obj)
1191 {
1192 if (obj->funcs && obj->funcs->pin)
1193 return obj->funcs->pin(obj);
1194 else if (obj->dev->driver->gem_prime_pin)
1195 return obj->dev->driver->gem_prime_pin(obj);
1196 else
1197 return 0;
1198 }
1199
drm_gem_unpin(struct drm_gem_object * obj)1200 void drm_gem_unpin(struct drm_gem_object *obj)
1201 {
1202 if (obj->funcs && obj->funcs->unpin)
1203 obj->funcs->unpin(obj);
1204 else if (obj->dev->driver->gem_prime_unpin)
1205 obj->dev->driver->gem_prime_unpin(obj);
1206 }
1207
drm_gem_vmap(struct drm_gem_object * obj)1208 void *drm_gem_vmap(struct drm_gem_object *obj)
1209 {
1210 void *vaddr;
1211
1212 if (obj->funcs && obj->funcs->vmap)
1213 vaddr = obj->funcs->vmap(obj);
1214 else if (obj->dev->driver->gem_prime_vmap)
1215 vaddr = obj->dev->driver->gem_prime_vmap(obj);
1216 else
1217 vaddr = ERR_PTR(-EOPNOTSUPP);
1218
1219 if (!vaddr)
1220 vaddr = ERR_PTR(-ENOMEM);
1221
1222 return vaddr;
1223 }
1224
drm_gem_vunmap(struct drm_gem_object * obj,void * vaddr)1225 void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
1226 {
1227 if (!vaddr)
1228 return;
1229
1230 if (obj->funcs && obj->funcs->vunmap)
1231 obj->funcs->vunmap(obj, vaddr);
1232 else if (obj->dev->driver->gem_prime_vunmap)
1233 obj->dev->driver->gem_prime_vunmap(obj, vaddr);
1234 }
1235
1236 /**
1237 * drm_gem_lock_reservations - Sets up the ww context and acquires
1238 * the lock on an array of GEM objects.
1239 *
1240 * Once you've locked your reservations, you'll want to set up space
1241 * for your shared fences (if applicable), submit your job, then
1242 * drm_gem_unlock_reservations().
1243 *
1244 * @objs: drm_gem_objects to lock
1245 * @count: Number of objects in @objs
1246 * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
1247 * part of tracking this set of locked reservations.
1248 */
1249 int
drm_gem_lock_reservations(struct drm_gem_object ** objs,int count,struct ww_acquire_ctx * acquire_ctx)1250 drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
1251 struct ww_acquire_ctx *acquire_ctx)
1252 {
1253 int contended = -1;
1254 int i, ret;
1255
1256 ww_acquire_init(acquire_ctx, &reservation_ww_class);
1257
1258 retry:
1259 if (contended != -1) {
1260 struct drm_gem_object *obj = objs[contended];
1261
1262 ret = dma_resv_lock_slow_interruptible(obj->resv,
1263 acquire_ctx);
1264 if (ret) {
1265 ww_acquire_fini(acquire_ctx);
1266 return ret;
1267 }
1268 }
1269
1270 for (i = 0; i < count; i++) {
1271 if (i == contended)
1272 continue;
1273
1274 ret = dma_resv_lock_interruptible(objs[i]->resv,
1275 acquire_ctx);
1276 if (ret) {
1277 int j;
1278
1279 for (j = 0; j < i; j++)
1280 dma_resv_unlock(objs[j]->resv);
1281
1282 if (contended != -1 && contended >= i)
1283 dma_resv_unlock(objs[contended]->resv);
1284
1285 if (ret == -EDEADLK) {
1286 contended = i;
1287 goto retry;
1288 }
1289
1290 ww_acquire_fini(acquire_ctx);
1291 return ret;
1292 }
1293 }
1294
1295 ww_acquire_done(acquire_ctx);
1296
1297 return 0;
1298 }
1299 EXPORT_SYMBOL(drm_gem_lock_reservations);
1300
1301 void
drm_gem_unlock_reservations(struct drm_gem_object ** objs,int count,struct ww_acquire_ctx * acquire_ctx)1302 drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
1303 struct ww_acquire_ctx *acquire_ctx)
1304 {
1305 int i;
1306
1307 for (i = 0; i < count; i++)
1308 dma_resv_unlock(objs[i]->resv);
1309
1310 ww_acquire_fini(acquire_ctx);
1311 }
1312 EXPORT_SYMBOL(drm_gem_unlock_reservations);
1313
1314 /**
1315 * drm_gem_fence_array_add - Adds the fence to an array of fences to be
1316 * waited on, deduplicating fences from the same context.
1317 *
1318 * @fence_array: array of dma_fence * for the job to block on.
1319 * @fence: the dma_fence to add to the list of dependencies.
1320 *
1321 * Returns:
1322 * 0 on success, or an error on failing to expand the array.
1323 */
drm_gem_fence_array_add(struct xarray * fence_array,struct dma_fence * fence)1324 int drm_gem_fence_array_add(struct xarray *fence_array,
1325 struct dma_fence *fence)
1326 {
1327 struct dma_fence *entry;
1328 unsigned long index;
1329 u32 id = 0;
1330 int ret;
1331
1332 if (!fence)
1333 return 0;
1334
1335 /* Deduplicate if we already depend on a fence from the same context.
1336 * This lets the size of the array of deps scale with the number of
1337 * engines involved, rather than the number of BOs.
1338 */
1339 xa_for_each(fence_array, index, entry) {
1340 if (entry->context != fence->context)
1341 continue;
1342
1343 if (dma_fence_is_later(fence, entry)) {
1344 dma_fence_put(entry);
1345 xa_store(fence_array, index, fence, GFP_KERNEL);
1346 } else {
1347 dma_fence_put(fence);
1348 }
1349 return 0;
1350 }
1351
1352 ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
1353 if (ret != 0)
1354 dma_fence_put(fence);
1355
1356 return ret;
1357 }
1358 EXPORT_SYMBOL(drm_gem_fence_array_add);
1359
1360 /**
1361 * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked
1362 * in the GEM object's reservation object to an array of dma_fences for use in
1363 * scheduling a rendering job.
1364 *
1365 * This should be called after drm_gem_lock_reservations() on your array of
1366 * GEM objects used in the job but before updating the reservations with your
1367 * own fences.
1368 *
1369 * @fence_array: array of dma_fence * for the job to block on.
1370 * @obj: the gem object to add new dependencies from.
1371 * @write: whether the job might write the object (so we need to depend on
1372 * shared fences in the reservation object).
1373 */
drm_gem_fence_array_add_implicit(struct xarray * fence_array,struct drm_gem_object * obj,bool write)1374 int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
1375 struct drm_gem_object *obj,
1376 bool write)
1377 {
1378 int ret;
1379 struct dma_fence **fences;
1380 unsigned int i, fence_count;
1381
1382 if (!write) {
1383 struct dma_fence *fence =
1384 dma_resv_get_excl_rcu(obj->resv);
1385
1386 return drm_gem_fence_array_add(fence_array, fence);
1387 }
1388
1389 ret = dma_resv_get_fences_rcu(obj->resv, NULL,
1390 &fence_count, &fences);
1391 if (ret || !fence_count)
1392 return ret;
1393
1394 for (i = 0; i < fence_count; i++) {
1395 ret = drm_gem_fence_array_add(fence_array, fences[i]);
1396 if (ret)
1397 break;
1398 }
1399
1400 for (; i < fence_count; i++)
1401 dma_fence_put(fences[i]);
1402 kfree(fences);
1403 return ret;
1404 }
1405 EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);
1406