1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/mm.h>
31 #include <linux/uaccess.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/pagemap.h>
37 #include <linux/shmem_fs.h>
38 #include <linux/dma-buf.h>
39 #include <linux/dma-buf-map.h>
40 #include <linux/mem_encrypt.h>
41 #include <linux/pagevec.h>
42
43 #include <drm/drm.h>
44 #include <drm/drm_device.h>
45 #include <drm/drm_drv.h>
46 #include <drm/drm_file.h>
47 #include <drm/drm_gem.h>
48 #include <drm/drm_managed.h>
49 #include <drm/drm_print.h>
50 #include <drm/drm_vma_manager.h>
51
52 #include "drm_internal.h"
53
54 /** @file drm_gem.c
55 *
56 * This file provides some of the base ioctls and library routines for
57 * the graphics memory manager implemented by each device driver.
58 *
59 * Because various devices have different requirements in terms of
60 * synchronization and migration strategies, implementing that is left up to
61 * the driver, and all that the general API provides should be generic --
62 * allocating objects, reading/writing data with the cpu, freeing objects.
63 * Even there, platform-dependent optimizations for reading/writing data with
64 * the CPU mean we'll likely hook those out to driver-specific calls. However,
65 * the DRI2 implementation wants to have at least allocate/mmap be generic.
66 *
67 * The goal was to have swap-backed object allocation managed through
68 * struct file. However, file descriptors as handles to a struct file have
69 * two major failings:
70 * - Process limits prevent more than 1024 or so being used at a time by
71 * default.
72 * - Inability to allocate high fds will aggravate the X Server's select()
73 * handling, and likely that of many GL client applications as well.
74 *
75 * This led to a plan of using our own integer IDs (called handles, following
76 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
77 * ioctls. The objects themselves will still include the struct file so
78 * that we can transition to fds if the required kernel infrastructure shows
79 * up at a later date, and as our interface with shmfs for memory allocation.
80 */
81
82 static void
drm_gem_init_release(struct drm_device * dev,void * ptr)83 drm_gem_init_release(struct drm_device *dev, void *ptr)
84 {
85 drm_vma_offset_manager_destroy(dev->vma_offset_manager);
86 }
87
88 /**
89 * drm_gem_init - Initialize the GEM device fields
90 * @dev: drm_devic structure to initialize
91 */
92 int
drm_gem_init(struct drm_device * dev)93 drm_gem_init(struct drm_device *dev)
94 {
95 struct drm_vma_offset_manager *vma_offset_manager;
96
97 mutex_init(&dev->object_name_lock);
98 idr_init_base(&dev->object_name_idr, 1);
99
100 vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
101 GFP_KERNEL);
102 if (!vma_offset_manager) {
103 DRM_ERROR("out of memory\n");
104 return -ENOMEM;
105 }
106
107 dev->vma_offset_manager = vma_offset_manager;
108 drm_vma_offset_manager_init(vma_offset_manager,
109 DRM_FILE_PAGE_OFFSET_START,
110 DRM_FILE_PAGE_OFFSET_SIZE);
111
112 return drmm_add_action(dev, drm_gem_init_release, NULL);
113 }
114
115 /**
116 * drm_gem_object_init - initialize an allocated shmem-backed GEM object
117 * @dev: drm_device the object should be initialized for
118 * @obj: drm_gem_object to initialize
119 * @size: object size
120 *
121 * Initialize an already allocated GEM object of the specified size with
122 * shmfs backing store.
123 */
drm_gem_object_init(struct drm_device * dev,struct drm_gem_object * obj,size_t size)124 int drm_gem_object_init(struct drm_device *dev,
125 struct drm_gem_object *obj, size_t size)
126 {
127 struct file *filp;
128
129 drm_gem_private_object_init(dev, obj, size);
130
131 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
132 if (IS_ERR(filp))
133 return PTR_ERR(filp);
134
135 obj->filp = filp;
136
137 return 0;
138 }
139 EXPORT_SYMBOL(drm_gem_object_init);
140
141 /**
142 * drm_gem_private_object_init - initialize an allocated private GEM object
143 * @dev: drm_device the object should be initialized for
144 * @obj: drm_gem_object to initialize
145 * @size: object size
146 *
147 * Initialize an already allocated GEM object of the specified size with
148 * no GEM provided backing store. Instead the caller is responsible for
149 * backing the object and handling it.
150 */
drm_gem_private_object_init(struct drm_device * dev,struct drm_gem_object * obj,size_t size)151 void drm_gem_private_object_init(struct drm_device *dev,
152 struct drm_gem_object *obj, size_t size)
153 {
154 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
155
156 obj->dev = dev;
157 obj->filp = NULL;
158
159 kref_init(&obj->refcount);
160 obj->handle_count = 0;
161 obj->size = size;
162 dma_resv_init(&obj->_resv);
163 if (!obj->resv)
164 obj->resv = &obj->_resv;
165
166 drm_vma_node_reset(&obj->vma_node);
167 }
168 EXPORT_SYMBOL(drm_gem_private_object_init);
169
170 /**
171 * drm_gem_object_handle_free - release resources bound to userspace handles
172 * @obj: GEM object to clean up.
173 *
174 * Called after the last handle to the object has been closed
175 *
176 * Removes any name for the object. Note that this must be
177 * called before drm_gem_object_free or we'll be touching
178 * freed memory
179 */
drm_gem_object_handle_free(struct drm_gem_object * obj)180 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
181 {
182 struct drm_device *dev = obj->dev;
183
184 /* Remove any name for this object */
185 if (obj->name) {
186 idr_remove(&dev->object_name_idr, obj->name);
187 obj->name = 0;
188 }
189 }
190
drm_gem_object_exported_dma_buf_free(struct drm_gem_object * obj)191 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
192 {
193 /* Unbreak the reference cycle if we have an exported dma_buf. */
194 if (obj->dma_buf) {
195 dma_buf_put(obj->dma_buf);
196 obj->dma_buf = NULL;
197 }
198 }
199
200 static void
drm_gem_object_handle_put_unlocked(struct drm_gem_object * obj)201 drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
202 {
203 struct drm_device *dev = obj->dev;
204 bool final = false;
205
206 if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
207 return;
208
209 /*
210 * Must bump handle count first as this may be the last
211 * ref, in which case the object would disappear before we
212 * checked for a name
213 */
214
215 mutex_lock(&dev->object_name_lock);
216 if (--obj->handle_count == 0) {
217 drm_gem_object_handle_free(obj);
218 drm_gem_object_exported_dma_buf_free(obj);
219 final = true;
220 }
221 mutex_unlock(&dev->object_name_lock);
222
223 if (final)
224 drm_gem_object_put(obj);
225 }
226
227 /*
228 * Called at device or object close to release the file's
229 * handle references on objects.
230 */
231 static int
drm_gem_object_release_handle(int id,void * ptr,void * data)232 drm_gem_object_release_handle(int id, void *ptr, void *data)
233 {
234 struct drm_file *file_priv = data;
235 struct drm_gem_object *obj = ptr;
236
237 if (obj->funcs->close)
238 obj->funcs->close(obj, file_priv);
239
240 drm_prime_remove_buf_handle(&file_priv->prime, id);
241 drm_vma_node_revoke(&obj->vma_node, file_priv);
242
243 drm_gem_object_handle_put_unlocked(obj);
244
245 return 0;
246 }
247
248 /**
249 * drm_gem_handle_delete - deletes the given file-private handle
250 * @filp: drm file-private structure to use for the handle look up
251 * @handle: userspace handle to delete
252 *
253 * Removes the GEM handle from the @filp lookup table which has been added with
254 * drm_gem_handle_create(). If this is the last handle also cleans up linked
255 * resources like GEM names.
256 */
257 int
drm_gem_handle_delete(struct drm_file * filp,u32 handle)258 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
259 {
260 struct drm_gem_object *obj;
261
262 spin_lock(&filp->table_lock);
263
264 /* Check if we currently have a reference on the object */
265 obj = idr_replace(&filp->object_idr, NULL, handle);
266 spin_unlock(&filp->table_lock);
267 if (IS_ERR_OR_NULL(obj))
268 return -EINVAL;
269
270 /* Release driver's reference and decrement refcount. */
271 drm_gem_object_release_handle(handle, obj, filp);
272
273 /* And finally make the handle available for future allocations. */
274 spin_lock(&filp->table_lock);
275 idr_remove(&filp->object_idr, handle);
276 spin_unlock(&filp->table_lock);
277
278 return 0;
279 }
280 EXPORT_SYMBOL(drm_gem_handle_delete);
281
282 /**
283 * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
284 * @file: drm file-private structure containing the gem object
285 * @dev: corresponding drm_device
286 * @handle: gem object handle
287 * @offset: return location for the fake mmap offset
288 *
289 * This implements the &drm_driver.dumb_map_offset kms driver callback for
290 * drivers which use gem to manage their backing storage.
291 *
292 * Returns:
293 * 0 on success or a negative error code on failure.
294 */
drm_gem_dumb_map_offset(struct drm_file * file,struct drm_device * dev,u32 handle,u64 * offset)295 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
296 u32 handle, u64 *offset)
297 {
298 struct drm_gem_object *obj;
299 int ret;
300
301 obj = drm_gem_object_lookup(file, handle);
302 if (!obj)
303 return -ENOENT;
304
305 /* Don't allow imported objects to be mapped */
306 if (obj->import_attach) {
307 ret = -EINVAL;
308 goto out;
309 }
310
311 ret = drm_gem_create_mmap_offset(obj);
312 if (ret)
313 goto out;
314
315 *offset = drm_vma_node_offset_addr(&obj->vma_node);
316 out:
317 drm_gem_object_put(obj);
318
319 return ret;
320 }
321 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
322
drm_gem_dumb_destroy(struct drm_file * file,struct drm_device * dev,u32 handle)323 int drm_gem_dumb_destroy(struct drm_file *file,
324 struct drm_device *dev,
325 u32 handle)
326 {
327 return drm_gem_handle_delete(file, handle);
328 }
329
330 /**
331 * drm_gem_handle_create_tail - internal functions to create a handle
332 * @file_priv: drm file-private structure to register the handle for
333 * @obj: object to register
334 * @handlep: pointer to return the created handle to the caller
335 *
336 * This expects the &drm_device.object_name_lock to be held already and will
337 * drop it before returning. Used to avoid races in establishing new handles
338 * when importing an object from either an flink name or a dma-buf.
339 *
340 * Handles must be release again through drm_gem_handle_delete(). This is done
341 * when userspace closes @file_priv for all attached handles, or through the
342 * GEM_CLOSE ioctl for individual handles.
343 */
344 int
drm_gem_handle_create_tail(struct drm_file * file_priv,struct drm_gem_object * obj,u32 * handlep)345 drm_gem_handle_create_tail(struct drm_file *file_priv,
346 struct drm_gem_object *obj,
347 u32 *handlep)
348 {
349 struct drm_device *dev = obj->dev;
350 u32 handle;
351 int ret;
352
353 WARN_ON(!mutex_is_locked(&dev->object_name_lock));
354 if (obj->handle_count++ == 0)
355 drm_gem_object_get(obj);
356
357 /*
358 * Get the user-visible handle using idr. Preload and perform
359 * allocation under our spinlock.
360 */
361 idr_preload(GFP_KERNEL);
362 spin_lock(&file_priv->table_lock);
363
364 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
365
366 spin_unlock(&file_priv->table_lock);
367 idr_preload_end();
368
369 mutex_unlock(&dev->object_name_lock);
370 if (ret < 0)
371 goto err_unref;
372
373 handle = ret;
374
375 ret = drm_vma_node_allow(&obj->vma_node, file_priv);
376 if (ret)
377 goto err_remove;
378
379 if (obj->funcs->open) {
380 ret = obj->funcs->open(obj, file_priv);
381 if (ret)
382 goto err_revoke;
383 }
384
385 *handlep = handle;
386 return 0;
387
388 err_revoke:
389 drm_vma_node_revoke(&obj->vma_node, file_priv);
390 err_remove:
391 spin_lock(&file_priv->table_lock);
392 idr_remove(&file_priv->object_idr, handle);
393 spin_unlock(&file_priv->table_lock);
394 err_unref:
395 drm_gem_object_handle_put_unlocked(obj);
396 return ret;
397 }
398
399 /**
400 * drm_gem_handle_create - create a gem handle for an object
401 * @file_priv: drm file-private structure to register the handle for
402 * @obj: object to register
403 * @handlep: pointer to return the created handle to the caller
404 *
405 * Create a handle for this object. This adds a handle reference to the object,
406 * which includes a regular reference count. Callers will likely want to
407 * dereference the object afterwards.
408 *
409 * Since this publishes @obj to userspace it must be fully set up by this point,
410 * drivers must call this last in their buffer object creation callbacks.
411 */
drm_gem_handle_create(struct drm_file * file_priv,struct drm_gem_object * obj,u32 * handlep)412 int drm_gem_handle_create(struct drm_file *file_priv,
413 struct drm_gem_object *obj,
414 u32 *handlep)
415 {
416 mutex_lock(&obj->dev->object_name_lock);
417
418 return drm_gem_handle_create_tail(file_priv, obj, handlep);
419 }
420 EXPORT_SYMBOL(drm_gem_handle_create);
421
422
423 /**
424 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
425 * @obj: obj in question
426 *
427 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
428 *
429 * Note that drm_gem_object_release() already calls this function, so drivers
430 * don't have to take care of releasing the mmap offset themselves when freeing
431 * the GEM object.
432 */
433 void
drm_gem_free_mmap_offset(struct drm_gem_object * obj)434 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
435 {
436 struct drm_device *dev = obj->dev;
437
438 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
439 }
440 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
441
442 /**
443 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
444 * @obj: obj in question
445 * @size: the virtual size
446 *
447 * GEM memory mapping works by handing back to userspace a fake mmap offset
448 * it can use in a subsequent mmap(2) call. The DRM core code then looks
449 * up the object based on the offset and sets up the various memory mapping
450 * structures.
451 *
452 * This routine allocates and attaches a fake offset for @obj, in cases where
453 * the virtual size differs from the physical size (ie. &drm_gem_object.size).
454 * Otherwise just use drm_gem_create_mmap_offset().
455 *
456 * This function is idempotent and handles an already allocated mmap offset
457 * transparently. Drivers do not need to check for this case.
458 */
459 int
drm_gem_create_mmap_offset_size(struct drm_gem_object * obj,size_t size)460 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
461 {
462 struct drm_device *dev = obj->dev;
463
464 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
465 size / PAGE_SIZE);
466 }
467 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
468
469 /**
470 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
471 * @obj: obj in question
472 *
473 * GEM memory mapping works by handing back to userspace a fake mmap offset
474 * it can use in a subsequent mmap(2) call. The DRM core code then looks
475 * up the object based on the offset and sets up the various memory mapping
476 * structures.
477 *
478 * This routine allocates and attaches a fake offset for @obj.
479 *
480 * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
481 * the fake offset again.
482 */
drm_gem_create_mmap_offset(struct drm_gem_object * obj)483 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
484 {
485 return drm_gem_create_mmap_offset_size(obj, obj->size);
486 }
487 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
488
489 /*
490 * Move pages to appropriate lru and release the pagevec, decrementing the
491 * ref count of those pages.
492 */
drm_gem_check_release_pagevec(struct pagevec * pvec)493 static void drm_gem_check_release_pagevec(struct pagevec *pvec)
494 {
495 check_move_unevictable_pages(pvec);
496 __pagevec_release(pvec);
497 cond_resched();
498 }
499
500 /**
501 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
502 * from shmem
503 * @obj: obj in question
504 *
505 * This reads the page-array of the shmem-backing storage of the given gem
506 * object. An array of pages is returned. If a page is not allocated or
507 * swapped-out, this will allocate/swap-in the required pages. Note that the
508 * whole object is covered by the page-array and pinned in memory.
509 *
510 * Use drm_gem_put_pages() to release the array and unpin all pages.
511 *
512 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
513 * If you require other GFP-masks, you have to do those allocations yourself.
514 *
515 * Note that you are not allowed to change gfp-zones during runtime. That is,
516 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
517 * set during initialization. If you have special zone constraints, set them
518 * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
519 * to keep pages in the required zone during swap-in.
520 *
521 * This function is only valid on objects initialized with
522 * drm_gem_object_init(), but not for those initialized with
523 * drm_gem_private_object_init() only.
524 */
drm_gem_get_pages(struct drm_gem_object * obj)525 struct page **drm_gem_get_pages(struct drm_gem_object *obj)
526 {
527 struct address_space *mapping;
528 struct page *p, **pages;
529 struct pagevec pvec;
530 int i, npages;
531
532
533 if (WARN_ON(!obj->filp))
534 return ERR_PTR(-EINVAL);
535
536 /* This is the shared memory object that backs the GEM resource */
537 mapping = obj->filp->f_mapping;
538
539 /* We already BUG_ON() for non-page-aligned sizes in
540 * drm_gem_object_init(), so we should never hit this unless
541 * driver author is doing something really wrong:
542 */
543 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
544
545 npages = obj->size >> PAGE_SHIFT;
546
547 pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
548 if (pages == NULL)
549 return ERR_PTR(-ENOMEM);
550
551 mapping_set_unevictable(mapping);
552
553 for (i = 0; i < npages; i++) {
554 p = shmem_read_mapping_page(mapping, i);
555 if (IS_ERR(p))
556 goto fail;
557 pages[i] = p;
558
559 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the
560 * correct region during swapin. Note that this requires
561 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
562 * so shmem can relocate pages during swapin if required.
563 */
564 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
565 (page_to_pfn(p) >= 0x00100000UL));
566 }
567
568 return pages;
569
570 fail:
571 mapping_clear_unevictable(mapping);
572 pagevec_init(&pvec);
573 while (i--) {
574 if (!pagevec_add(&pvec, pages[i]))
575 drm_gem_check_release_pagevec(&pvec);
576 }
577 if (pagevec_count(&pvec))
578 drm_gem_check_release_pagevec(&pvec);
579
580 kvfree(pages);
581 return ERR_CAST(p);
582 }
583 EXPORT_SYMBOL(drm_gem_get_pages);
584
585 /**
586 * drm_gem_put_pages - helper to free backing pages for a GEM object
587 * @obj: obj in question
588 * @pages: pages to free
589 * @dirty: if true, pages will be marked as dirty
590 * @accessed: if true, the pages will be marked as accessed
591 */
drm_gem_put_pages(struct drm_gem_object * obj,struct page ** pages,bool dirty,bool accessed)592 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
593 bool dirty, bool accessed)
594 {
595 int i, npages;
596 struct address_space *mapping;
597 struct pagevec pvec;
598
599 mapping = file_inode(obj->filp)->i_mapping;
600 mapping_clear_unevictable(mapping);
601
602 /* We already BUG_ON() for non-page-aligned sizes in
603 * drm_gem_object_init(), so we should never hit this unless
604 * driver author is doing something really wrong:
605 */
606 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
607
608 npages = obj->size >> PAGE_SHIFT;
609
610 pagevec_init(&pvec);
611 for (i = 0; i < npages; i++) {
612 if (!pages[i])
613 continue;
614
615 if (dirty)
616 set_page_dirty(pages[i]);
617
618 if (accessed)
619 mark_page_accessed(pages[i]);
620
621 /* Undo the reference we took when populating the table */
622 if (!pagevec_add(&pvec, pages[i]))
623 drm_gem_check_release_pagevec(&pvec);
624 }
625 if (pagevec_count(&pvec))
626 drm_gem_check_release_pagevec(&pvec);
627
628 kvfree(pages);
629 }
630 EXPORT_SYMBOL(drm_gem_put_pages);
631
objects_lookup(struct drm_file * filp,u32 * handle,int count,struct drm_gem_object ** objs)632 static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
633 struct drm_gem_object **objs)
634 {
635 int i, ret = 0;
636 struct drm_gem_object *obj;
637
638 spin_lock(&filp->table_lock);
639
640 for (i = 0; i < count; i++) {
641 /* Check if we currently have a reference on the object */
642 obj = idr_find(&filp->object_idr, handle[i]);
643 if (!obj) {
644 ret = -ENOENT;
645 break;
646 }
647 drm_gem_object_get(obj);
648 objs[i] = obj;
649 }
650 spin_unlock(&filp->table_lock);
651
652 return ret;
653 }
654
655 /**
656 * drm_gem_objects_lookup - look up GEM objects from an array of handles
657 * @filp: DRM file private date
658 * @bo_handles: user pointer to array of userspace handle
659 * @count: size of handle array
660 * @objs_out: returned pointer to array of drm_gem_object pointers
661 *
662 * Takes an array of userspace handles and returns a newly allocated array of
663 * GEM objects.
664 *
665 * For a single handle lookup, use drm_gem_object_lookup().
666 *
667 * Returns:
668 *
669 * @objs filled in with GEM object pointers. Returned GEM objects need to be
670 * released with drm_gem_object_put(). -ENOENT is returned on a lookup
671 * failure. 0 is returned on success.
672 *
673 */
drm_gem_objects_lookup(struct drm_file * filp,void __user * bo_handles,int count,struct drm_gem_object *** objs_out)674 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
675 int count, struct drm_gem_object ***objs_out)
676 {
677 int ret;
678 u32 *handles;
679 struct drm_gem_object **objs;
680
681 if (!count)
682 return 0;
683
684 objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
685 GFP_KERNEL | __GFP_ZERO);
686 if (!objs)
687 return -ENOMEM;
688
689 *objs_out = objs;
690
691 handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
692 if (!handles) {
693 ret = -ENOMEM;
694 goto out;
695 }
696
697 if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
698 ret = -EFAULT;
699 DRM_DEBUG("Failed to copy in GEM handles\n");
700 goto out;
701 }
702
703 ret = objects_lookup(filp, handles, count, objs);
704 out:
705 kvfree(handles);
706 return ret;
707
708 }
709 EXPORT_SYMBOL(drm_gem_objects_lookup);
710
711 /**
712 * drm_gem_object_lookup - look up a GEM object from its handle
713 * @filp: DRM file private date
714 * @handle: userspace handle
715 *
716 * Returns:
717 *
718 * A reference to the object named by the handle if such exists on @filp, NULL
719 * otherwise.
720 *
721 * If looking up an array of handles, use drm_gem_objects_lookup().
722 */
723 struct drm_gem_object *
drm_gem_object_lookup(struct drm_file * filp,u32 handle)724 drm_gem_object_lookup(struct drm_file *filp, u32 handle)
725 {
726 struct drm_gem_object *obj = NULL;
727
728 objects_lookup(filp, &handle, 1, &obj);
729 return obj;
730 }
731 EXPORT_SYMBOL(drm_gem_object_lookup);
732
733 /**
734 * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects
735 * shared and/or exclusive fences.
736 * @filep: DRM file private date
737 * @handle: userspace handle
738 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
739 * @timeout: timeout value in jiffies or zero to return immediately
740 *
741 * Returns:
742 *
743 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
744 * greater than 0 on success.
745 */
drm_gem_dma_resv_wait(struct drm_file * filep,u32 handle,bool wait_all,unsigned long timeout)746 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
747 bool wait_all, unsigned long timeout)
748 {
749 long ret;
750 struct drm_gem_object *obj;
751
752 obj = drm_gem_object_lookup(filep, handle);
753 if (!obj) {
754 DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
755 return -EINVAL;
756 }
757
758 ret = dma_resv_wait_timeout(obj->resv, wait_all, true, timeout);
759 if (ret == 0)
760 ret = -ETIME;
761 else if (ret > 0)
762 ret = 0;
763
764 drm_gem_object_put(obj);
765
766 return ret;
767 }
768 EXPORT_SYMBOL(drm_gem_dma_resv_wait);
769
770 /**
771 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
772 * @dev: drm_device
773 * @data: ioctl data
774 * @file_priv: drm file-private structure
775 *
776 * Releases the handle to an mm object.
777 */
778 int
drm_gem_close_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)779 drm_gem_close_ioctl(struct drm_device *dev, void *data,
780 struct drm_file *file_priv)
781 {
782 struct drm_gem_close *args = data;
783 int ret;
784
785 if (!drm_core_check_feature(dev, DRIVER_GEM))
786 return -EOPNOTSUPP;
787
788 ret = drm_gem_handle_delete(file_priv, args->handle);
789
790 return ret;
791 }
792
793 /**
794 * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
795 * @dev: drm_device
796 * @data: ioctl data
797 * @file_priv: drm file-private structure
798 *
799 * Create a global name for an object, returning the name.
800 *
801 * Note that the name does not hold a reference; when the object
802 * is freed, the name goes away.
803 */
804 int
drm_gem_flink_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)805 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
806 struct drm_file *file_priv)
807 {
808 struct drm_gem_flink *args = data;
809 struct drm_gem_object *obj;
810 int ret;
811
812 if (!drm_core_check_feature(dev, DRIVER_GEM))
813 return -EOPNOTSUPP;
814
815 obj = drm_gem_object_lookup(file_priv, args->handle);
816 if (obj == NULL)
817 return -ENOENT;
818
819 mutex_lock(&dev->object_name_lock);
820 /* prevent races with concurrent gem_close. */
821 if (obj->handle_count == 0) {
822 ret = -ENOENT;
823 goto err;
824 }
825
826 if (!obj->name) {
827 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
828 if (ret < 0)
829 goto err;
830
831 obj->name = ret;
832 }
833
834 args->name = (uint64_t) obj->name;
835 ret = 0;
836
837 err:
838 mutex_unlock(&dev->object_name_lock);
839 drm_gem_object_put(obj);
840 return ret;
841 }
842
843 /**
844 * drm_gem_open_ioctl - implementation of the GEM_OPEN ioctl
845 * @dev: drm_device
846 * @data: ioctl data
847 * @file_priv: drm file-private structure
848 *
849 * Open an object using the global name, returning a handle and the size.
850 *
851 * This handle (of course) holds a reference to the object, so the object
852 * will not go away until the handle is deleted.
853 */
854 int
drm_gem_open_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)855 drm_gem_open_ioctl(struct drm_device *dev, void *data,
856 struct drm_file *file_priv)
857 {
858 struct drm_gem_open *args = data;
859 struct drm_gem_object *obj;
860 int ret;
861 u32 handle;
862
863 if (!drm_core_check_feature(dev, DRIVER_GEM))
864 return -EOPNOTSUPP;
865
866 mutex_lock(&dev->object_name_lock);
867 obj = idr_find(&dev->object_name_idr, (int) args->name);
868 if (obj) {
869 drm_gem_object_get(obj);
870 } else {
871 mutex_unlock(&dev->object_name_lock);
872 return -ENOENT;
873 }
874
875 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
876 ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
877 if (ret)
878 goto err;
879
880 args->handle = handle;
881 args->size = obj->size;
882
883 err:
884 drm_gem_object_put(obj);
885 return ret;
886 }
887
888 /**
889 * drm_gem_open - initializes GEM file-private structures at devnode open time
890 * @dev: drm_device which is being opened by userspace
891 * @file_private: drm file-private structure to set up
892 *
893 * Called at device open time, sets up the structure for handling refcounting
894 * of mm objects.
895 */
896 void
drm_gem_open(struct drm_device * dev,struct drm_file * file_private)897 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
898 {
899 idr_init_base(&file_private->object_idr, 1);
900 spin_lock_init(&file_private->table_lock);
901 }
902
903 /**
904 * drm_gem_release - release file-private GEM resources
905 * @dev: drm_device which is being closed by userspace
906 * @file_private: drm file-private structure to clean up
907 *
908 * Called at close time when the filp is going away.
909 *
910 * Releases any remaining references on objects by this filp.
911 */
912 void
drm_gem_release(struct drm_device * dev,struct drm_file * file_private)913 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
914 {
915 idr_for_each(&file_private->object_idr,
916 &drm_gem_object_release_handle, file_private);
917 idr_destroy(&file_private->object_idr);
918 }
919
920 /**
921 * drm_gem_object_release - release GEM buffer object resources
922 * @obj: GEM buffer object
923 *
924 * This releases any structures and resources used by @obj and is the inverse of
925 * drm_gem_object_init().
926 */
927 void
drm_gem_object_release(struct drm_gem_object * obj)928 drm_gem_object_release(struct drm_gem_object *obj)
929 {
930 WARN_ON(obj->dma_buf);
931
932 if (obj->filp)
933 fput(obj->filp);
934
935 dma_resv_fini(&obj->_resv);
936 drm_gem_free_mmap_offset(obj);
937 }
938 EXPORT_SYMBOL(drm_gem_object_release);
939
940 /**
941 * drm_gem_object_free - free a GEM object
942 * @kref: kref of the object to free
943 *
944 * Called after the last reference to the object has been lost.
945 *
946 * Frees the object
947 */
948 void
drm_gem_object_free(struct kref * kref)949 drm_gem_object_free(struct kref *kref)
950 {
951 struct drm_gem_object *obj =
952 container_of(kref, struct drm_gem_object, refcount);
953
954 if (WARN_ON(!obj->funcs->free))
955 return;
956
957 obj->funcs->free(obj);
958 }
959 EXPORT_SYMBOL(drm_gem_object_free);
960
961 /**
962 * drm_gem_vm_open - vma->ops->open implementation for GEM
963 * @vma: VM area structure
964 *
965 * This function implements the #vm_operations_struct open() callback for GEM
966 * drivers. This must be used together with drm_gem_vm_close().
967 */
drm_gem_vm_open(struct vm_area_struct * vma)968 void drm_gem_vm_open(struct vm_area_struct *vma)
969 {
970 struct drm_gem_object *obj = vma->vm_private_data;
971
972 drm_gem_object_get(obj);
973 }
974 EXPORT_SYMBOL(drm_gem_vm_open);
975
976 /**
977 * drm_gem_vm_close - vma->ops->close implementation for GEM
978 * @vma: VM area structure
979 *
980 * This function implements the #vm_operations_struct close() callback for GEM
981 * drivers. This must be used together with drm_gem_vm_open().
982 */
drm_gem_vm_close(struct vm_area_struct * vma)983 void drm_gem_vm_close(struct vm_area_struct *vma)
984 {
985 struct drm_gem_object *obj = vma->vm_private_data;
986
987 drm_gem_object_put(obj);
988 }
989 EXPORT_SYMBOL(drm_gem_vm_close);
990
991 /**
992 * drm_gem_mmap_obj - memory map a GEM object
993 * @obj: the GEM object to map
994 * @obj_size: the object size to be mapped, in bytes
995 * @vma: VMA for the area to be mapped
996 *
997 * Set up the VMA to prepare mapping of the GEM object using the GEM object's
998 * vm_ops. Depending on their requirements, GEM objects can either
999 * provide a fault handler in their vm_ops (in which case any accesses to
1000 * the object will be trapped, to perform migration, GTT binding, surface
1001 * register allocation, or performance monitoring), or mmap the buffer memory
1002 * synchronously after calling drm_gem_mmap_obj.
1003 *
1004 * This function is mainly intended to implement the DMABUF mmap operation, when
1005 * the GEM object is not looked up based on its fake offset. To implement the
1006 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
1007 *
1008 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
1009 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
1010 * callers must verify access restrictions before calling this helper.
1011 *
1012 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
1013 * size, or if no vm_ops are provided.
1014 */
drm_gem_mmap_obj(struct drm_gem_object * obj,unsigned long obj_size,struct vm_area_struct * vma)1015 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1016 struct vm_area_struct *vma)
1017 {
1018 int ret;
1019
1020 /* Check for valid size. */
1021 if (obj_size < vma->vm_end - vma->vm_start)
1022 return -EINVAL;
1023
1024 /* Take a ref for this mapping of the object, so that the fault
1025 * handler can dereference the mmap offset's pointer to the object.
1026 * This reference is cleaned up by the corresponding vm_close
1027 * (which should happen whether the vma was created by this call, or
1028 * by a vm_open due to mremap or partial unmap or whatever).
1029 */
1030 drm_gem_object_get(obj);
1031
1032 vma->vm_private_data = obj;
1033 vma->vm_ops = obj->funcs->vm_ops;
1034
1035 if (obj->funcs->mmap) {
1036 ret = obj->funcs->mmap(obj, vma);
1037 if (ret)
1038 goto err_drm_gem_object_put;
1039 WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
1040 } else {
1041 if (!vma->vm_ops) {
1042 ret = -EINVAL;
1043 goto err_drm_gem_object_put;
1044 }
1045
1046 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1047 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1048 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1049 }
1050
1051 return 0;
1052
1053 err_drm_gem_object_put:
1054 drm_gem_object_put(obj);
1055 return ret;
1056 }
1057 EXPORT_SYMBOL(drm_gem_mmap_obj);
1058
1059 /**
1060 * drm_gem_mmap - memory map routine for GEM objects
1061 * @filp: DRM file pointer
1062 * @vma: VMA for the area to be mapped
1063 *
1064 * If a driver supports GEM object mapping, mmap calls on the DRM file
1065 * descriptor will end up here.
1066 *
1067 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
1068 * contain the fake offset we created when the GTT map ioctl was called on
1069 * the object) and map it with a call to drm_gem_mmap_obj().
1070 *
1071 * If the caller is not granted access to the buffer object, the mmap will fail
1072 * with EACCES. Please see the vma manager for more information.
1073 */
drm_gem_mmap(struct file * filp,struct vm_area_struct * vma)1074 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1075 {
1076 struct drm_file *priv = filp->private_data;
1077 struct drm_device *dev = priv->minor->dev;
1078 struct drm_gem_object *obj = NULL;
1079 struct drm_vma_offset_node *node;
1080 int ret;
1081
1082 if (drm_dev_is_unplugged(dev))
1083 return -ENODEV;
1084
1085 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1086 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1087 vma->vm_pgoff,
1088 vma_pages(vma));
1089 if (likely(node)) {
1090 obj = container_of(node, struct drm_gem_object, vma_node);
1091 /*
1092 * When the object is being freed, after it hits 0-refcnt it
1093 * proceeds to tear down the object. In the process it will
1094 * attempt to remove the VMA offset and so acquire this
1095 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
1096 * that matches our range, we know it is in the process of being
1097 * destroyed and will be freed as soon as we release the lock -
1098 * so we have to check for the 0-refcnted object and treat it as
1099 * invalid.
1100 */
1101 if (!kref_get_unless_zero(&obj->refcount))
1102 obj = NULL;
1103 }
1104 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1105
1106 if (!obj)
1107 return -EINVAL;
1108
1109 if (!drm_vma_node_is_allowed(node, priv)) {
1110 drm_gem_object_put(obj);
1111 return -EACCES;
1112 }
1113
1114 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1115 vma);
1116
1117 drm_gem_object_put(obj);
1118
1119 return ret;
1120 }
1121 EXPORT_SYMBOL(drm_gem_mmap);
1122
drm_gem_print_info(struct drm_printer * p,unsigned int indent,const struct drm_gem_object * obj)1123 void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
1124 const struct drm_gem_object *obj)
1125 {
1126 drm_printf_indent(p, indent, "name=%d\n", obj->name);
1127 drm_printf_indent(p, indent, "refcount=%u\n",
1128 kref_read(&obj->refcount));
1129 drm_printf_indent(p, indent, "start=%08lx\n",
1130 drm_vma_node_start(&obj->vma_node));
1131 drm_printf_indent(p, indent, "size=%zu\n", obj->size);
1132 drm_printf_indent(p, indent, "imported=%s\n",
1133 obj->import_attach ? "yes" : "no");
1134
1135 if (obj->funcs->print_info)
1136 obj->funcs->print_info(p, indent, obj);
1137 }
1138
drm_gem_pin(struct drm_gem_object * obj)1139 int drm_gem_pin(struct drm_gem_object *obj)
1140 {
1141 if (obj->funcs->pin)
1142 return obj->funcs->pin(obj);
1143 else
1144 return 0;
1145 }
1146
drm_gem_unpin(struct drm_gem_object * obj)1147 void drm_gem_unpin(struct drm_gem_object *obj)
1148 {
1149 if (obj->funcs->unpin)
1150 obj->funcs->unpin(obj);
1151 }
1152
drm_gem_vmap(struct drm_gem_object * obj,struct dma_buf_map * map)1153 int drm_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
1154 {
1155 int ret;
1156
1157 if (!obj->funcs->vmap)
1158 return -EOPNOTSUPP;
1159
1160 ret = obj->funcs->vmap(obj, map);
1161 if (ret)
1162 return ret;
1163 else if (dma_buf_map_is_null(map))
1164 return -ENOMEM;
1165
1166 return 0;
1167 }
1168 EXPORT_SYMBOL(drm_gem_vmap);
1169
drm_gem_vunmap(struct drm_gem_object * obj,struct dma_buf_map * map)1170 void drm_gem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
1171 {
1172 if (dma_buf_map_is_null(map))
1173 return;
1174
1175 if (obj->funcs->vunmap)
1176 obj->funcs->vunmap(obj, map);
1177
1178 /* Always set the mapping to NULL. Callers may rely on this. */
1179 dma_buf_map_clear(map);
1180 }
1181 EXPORT_SYMBOL(drm_gem_vunmap);
1182
1183 /**
1184 * drm_gem_lock_reservations - Sets up the ww context and acquires
1185 * the lock on an array of GEM objects.
1186 *
1187 * Once you've locked your reservations, you'll want to set up space
1188 * for your shared fences (if applicable), submit your job, then
1189 * drm_gem_unlock_reservations().
1190 *
1191 * @objs: drm_gem_objects to lock
1192 * @count: Number of objects in @objs
1193 * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
1194 * part of tracking this set of locked reservations.
1195 */
1196 int
drm_gem_lock_reservations(struct drm_gem_object ** objs,int count,struct ww_acquire_ctx * acquire_ctx)1197 drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
1198 struct ww_acquire_ctx *acquire_ctx)
1199 {
1200 int contended = -1;
1201 int i, ret;
1202
1203 ww_acquire_init(acquire_ctx, &reservation_ww_class);
1204
1205 retry:
1206 if (contended != -1) {
1207 struct drm_gem_object *obj = objs[contended];
1208
1209 ret = dma_resv_lock_slow_interruptible(obj->resv,
1210 acquire_ctx);
1211 if (ret) {
1212 ww_acquire_fini(acquire_ctx);
1213 return ret;
1214 }
1215 }
1216
1217 for (i = 0; i < count; i++) {
1218 if (i == contended)
1219 continue;
1220
1221 ret = dma_resv_lock_interruptible(objs[i]->resv,
1222 acquire_ctx);
1223 if (ret) {
1224 int j;
1225
1226 for (j = 0; j < i; j++)
1227 dma_resv_unlock(objs[j]->resv);
1228
1229 if (contended != -1 && contended >= i)
1230 dma_resv_unlock(objs[contended]->resv);
1231
1232 if (ret == -EDEADLK) {
1233 contended = i;
1234 goto retry;
1235 }
1236
1237 ww_acquire_fini(acquire_ctx);
1238 return ret;
1239 }
1240 }
1241
1242 ww_acquire_done(acquire_ctx);
1243
1244 return 0;
1245 }
1246 EXPORT_SYMBOL(drm_gem_lock_reservations);
1247
1248 void
drm_gem_unlock_reservations(struct drm_gem_object ** objs,int count,struct ww_acquire_ctx * acquire_ctx)1249 drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
1250 struct ww_acquire_ctx *acquire_ctx)
1251 {
1252 int i;
1253
1254 for (i = 0; i < count; i++)
1255 dma_resv_unlock(objs[i]->resv);
1256
1257 ww_acquire_fini(acquire_ctx);
1258 }
1259 EXPORT_SYMBOL(drm_gem_unlock_reservations);
1260
1261 /**
1262 * drm_gem_fence_array_add - Adds the fence to an array of fences to be
1263 * waited on, deduplicating fences from the same context.
1264 *
1265 * @fence_array: array of dma_fence * for the job to block on.
1266 * @fence: the dma_fence to add to the list of dependencies.
1267 *
1268 * This functions consumes the reference for @fence both on success and error
1269 * cases.
1270 *
1271 * Returns:
1272 * 0 on success, or an error on failing to expand the array.
1273 */
drm_gem_fence_array_add(struct xarray * fence_array,struct dma_fence * fence)1274 int drm_gem_fence_array_add(struct xarray *fence_array,
1275 struct dma_fence *fence)
1276 {
1277 struct dma_fence *entry;
1278 unsigned long index;
1279 u32 id = 0;
1280 int ret;
1281
1282 if (!fence)
1283 return 0;
1284
1285 /* Deduplicate if we already depend on a fence from the same context.
1286 * This lets the size of the array of deps scale with the number of
1287 * engines involved, rather than the number of BOs.
1288 */
1289 xa_for_each(fence_array, index, entry) {
1290 if (entry->context != fence->context)
1291 continue;
1292
1293 if (dma_fence_is_later(fence, entry)) {
1294 dma_fence_put(entry);
1295 xa_store(fence_array, index, fence, GFP_KERNEL);
1296 } else {
1297 dma_fence_put(fence);
1298 }
1299 return 0;
1300 }
1301
1302 ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
1303 if (ret != 0)
1304 dma_fence_put(fence);
1305
1306 return ret;
1307 }
1308 EXPORT_SYMBOL(drm_gem_fence_array_add);
1309
1310 /**
1311 * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked
1312 * in the GEM object's reservation object to an array of dma_fences for use in
1313 * scheduling a rendering job.
1314 *
1315 * This should be called after drm_gem_lock_reservations() on your array of
1316 * GEM objects used in the job but before updating the reservations with your
1317 * own fences.
1318 *
1319 * @fence_array: array of dma_fence * for the job to block on.
1320 * @obj: the gem object to add new dependencies from.
1321 * @write: whether the job might write the object (so we need to depend on
1322 * shared fences in the reservation object).
1323 */
drm_gem_fence_array_add_implicit(struct xarray * fence_array,struct drm_gem_object * obj,bool write)1324 int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
1325 struct drm_gem_object *obj,
1326 bool write)
1327 {
1328 int ret;
1329 struct dma_fence **fences;
1330 unsigned int i, fence_count;
1331
1332 if (!write) {
1333 struct dma_fence *fence =
1334 dma_resv_get_excl_unlocked(obj->resv);
1335
1336 return drm_gem_fence_array_add(fence_array, fence);
1337 }
1338
1339 ret = dma_resv_get_fences(obj->resv, NULL,
1340 &fence_count, &fences);
1341 if (ret || !fence_count)
1342 return ret;
1343
1344 for (i = 0; i < fence_count; i++) {
1345 ret = drm_gem_fence_array_add(fence_array, fences[i]);
1346 if (ret)
1347 break;
1348 }
1349
1350 for (; i < fence_count; i++)
1351 dma_fence_put(fences[i]);
1352 kfree(fences);
1353 return ret;
1354 }
1355 EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);
1356