1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
4 *
5 * Based on bo.c which bears the following copyright notice,
6 * but is dual licensed:
7 *
8 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
9 * All Rights Reserved.
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the
13 * "Software"), to deal in the Software without restriction, including
14 * without limitation the rights to use, copy, modify, merge, publish,
15 * distribute, sub license, and/or sell copies of the Software, and to
16 * permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * The above copyright notice and this permission notice (including the
20 * next paragraph) shall be included in all copies or substantial portions
21 * of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
26 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
27 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
28 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
29 * USE OR OTHER DEALINGS IN THE SOFTWARE.
30 *
31 **************************************************************************/
32 /*
33 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
34 */
35
36 #include <linux/dma-resv.h>
37 #include <linux/dma-fence-array.h>
38 #include <linux/export.h>
39 #include <linux/mm.h>
40 #include <linux/sched/mm.h>
41 #include <linux/mmu_notifier.h>
42 #include <linux/seq_file.h>
43
44 /**
45 * DOC: Reservation Object Overview
46 *
47 * The reservation object provides a mechanism to manage a container of
48 * dma_fence object associated with a resource. A reservation object
49 * can have any number of fences attaches to it. Each fence carries an usage
50 * parameter determining how the operation represented by the fence is using the
51 * resource. The RCU mechanism is used to protect read access to fences from
52 * locked write-side updates.
53 *
54 * See struct dma_resv for more details.
55 */
56
57 DEFINE_WD_CLASS(reservation_ww_class);
58 EXPORT_SYMBOL(reservation_ww_class);
59
60 /* Mask for the lower fence pointer bits */
61 #define DMA_RESV_LIST_MASK 0x3
62
63 struct dma_resv_list {
64 struct rcu_head rcu;
65 u32 num_fences, max_fences;
66 struct dma_fence __rcu *table[];
67 };
68
69 /* Extract the fence and usage flags from an RCU protected entry in the list. */
dma_resv_list_entry(struct dma_resv_list * list,unsigned int index,struct dma_resv * resv,struct dma_fence ** fence,enum dma_resv_usage * usage)70 static void dma_resv_list_entry(struct dma_resv_list *list, unsigned int index,
71 struct dma_resv *resv, struct dma_fence **fence,
72 enum dma_resv_usage *usage)
73 {
74 long tmp;
75
76 tmp = (long)rcu_dereference_check(list->table[index],
77 resv ? dma_resv_held(resv) : true);
78 *fence = (struct dma_fence *)(tmp & ~DMA_RESV_LIST_MASK);
79 if (usage)
80 *usage = tmp & DMA_RESV_LIST_MASK;
81 }
82
83 /* Set the fence and usage flags at the specific index in the list. */
dma_resv_list_set(struct dma_resv_list * list,unsigned int index,struct dma_fence * fence,enum dma_resv_usage usage)84 static void dma_resv_list_set(struct dma_resv_list *list,
85 unsigned int index,
86 struct dma_fence *fence,
87 enum dma_resv_usage usage)
88 {
89 long tmp = ((long)fence) | usage;
90
91 RCU_INIT_POINTER(list->table[index], (struct dma_fence *)tmp);
92 }
93
94 /*
95 * Allocate a new dma_resv_list and make sure to correctly initialize
96 * max_fences.
97 */
dma_resv_list_alloc(unsigned int max_fences)98 static struct dma_resv_list *dma_resv_list_alloc(unsigned int max_fences)
99 {
100 struct dma_resv_list *list;
101
102 list = kmalloc(struct_size(list, table, max_fences), GFP_KERNEL);
103 if (!list)
104 return NULL;
105
106 list->max_fences = (ksize(list) - offsetof(typeof(*list), table)) /
107 sizeof(*list->table);
108
109 return list;
110 }
111
112 /* Free a dma_resv_list and make sure to drop all references. */
dma_resv_list_free(struct dma_resv_list * list)113 static void dma_resv_list_free(struct dma_resv_list *list)
114 {
115 unsigned int i;
116
117 if (!list)
118 return;
119
120 for (i = 0; i < list->num_fences; ++i) {
121 struct dma_fence *fence;
122
123 dma_resv_list_entry(list, i, NULL, &fence, NULL);
124 dma_fence_put(fence);
125 }
126 kfree_rcu(list, rcu);
127 }
128
129 /**
130 * dma_resv_init - initialize a reservation object
131 * @obj: the reservation object
132 */
dma_resv_init(struct dma_resv * obj)133 void dma_resv_init(struct dma_resv *obj)
134 {
135 ww_mutex_init(&obj->lock, &reservation_ww_class);
136
137 RCU_INIT_POINTER(obj->fences, NULL);
138 }
139 EXPORT_SYMBOL(dma_resv_init);
140
141 /**
142 * dma_resv_fini - destroys a reservation object
143 * @obj: the reservation object
144 */
dma_resv_fini(struct dma_resv * obj)145 void dma_resv_fini(struct dma_resv *obj)
146 {
147 /*
148 * This object should be dead and all references must have
149 * been released to it, so no need to be protected with rcu.
150 */
151 dma_resv_list_free(rcu_dereference_protected(obj->fences, true));
152 ww_mutex_destroy(&obj->lock);
153 }
154 EXPORT_SYMBOL(dma_resv_fini);
155
156 /* Dereference the fences while ensuring RCU rules */
dma_resv_fences_list(struct dma_resv * obj)157 static inline struct dma_resv_list *dma_resv_fences_list(struct dma_resv *obj)
158 {
159 return rcu_dereference_check(obj->fences, dma_resv_held(obj));
160 }
161
162 /**
163 * dma_resv_reserve_fences - Reserve space to add fences to a dma_resv object.
164 * @obj: reservation object
165 * @num_fences: number of fences we want to add
166 *
167 * Should be called before dma_resv_add_fence(). Must be called with @obj
168 * locked through dma_resv_lock().
169 *
170 * Note that the preallocated slots need to be re-reserved if @obj is unlocked
171 * at any time before calling dma_resv_add_fence(). This is validated when
172 * CONFIG_DEBUG_MUTEXES is enabled.
173 *
174 * RETURNS
175 * Zero for success, or -errno
176 */
dma_resv_reserve_fences(struct dma_resv * obj,unsigned int num_fences)177 int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences)
178 {
179 struct dma_resv_list *old, *new;
180 unsigned int i, j, k, max;
181
182 dma_resv_assert_held(obj);
183
184 old = dma_resv_fences_list(obj);
185 if (old && old->max_fences) {
186 if ((old->num_fences + num_fences) <= old->max_fences)
187 return 0;
188 max = max(old->num_fences + num_fences, old->max_fences * 2);
189 } else {
190 max = max(4ul, roundup_pow_of_two(num_fences));
191 }
192
193 new = dma_resv_list_alloc(max);
194 if (!new)
195 return -ENOMEM;
196
197 /*
198 * no need to bump fence refcounts, rcu_read access
199 * requires the use of kref_get_unless_zero, and the
200 * references from the old struct are carried over to
201 * the new.
202 */
203 for (i = 0, j = 0, k = max; i < (old ? old->num_fences : 0); ++i) {
204 enum dma_resv_usage usage;
205 struct dma_fence *fence;
206
207 dma_resv_list_entry(old, i, obj, &fence, &usage);
208 if (dma_fence_is_signaled(fence))
209 RCU_INIT_POINTER(new->table[--k], fence);
210 else
211 dma_resv_list_set(new, j++, fence, usage);
212 }
213 new->num_fences = j;
214
215 /*
216 * We are not changing the effective set of fences here so can
217 * merely update the pointer to the new array; both existing
218 * readers and new readers will see exactly the same set of
219 * active (unsignaled) fences. Individual fences and the
220 * old array are protected by RCU and so will not vanish under
221 * the gaze of the rcu_read_lock() readers.
222 */
223 rcu_assign_pointer(obj->fences, new);
224
225 if (!old)
226 return 0;
227
228 /* Drop the references to the signaled fences */
229 for (i = k; i < max; ++i) {
230 struct dma_fence *fence;
231
232 fence = rcu_dereference_protected(new->table[i],
233 dma_resv_held(obj));
234 dma_fence_put(fence);
235 }
236 kfree_rcu(old, rcu);
237
238 return 0;
239 }
240 EXPORT_SYMBOL(dma_resv_reserve_fences);
241
242 #ifdef CONFIG_DEBUG_MUTEXES
243 /**
244 * dma_resv_reset_max_fences - reset fences for debugging
245 * @obj: the dma_resv object to reset
246 *
247 * Reset the number of pre-reserved fence slots to test that drivers do
248 * correct slot allocation using dma_resv_reserve_fences(). See also
249 * &dma_resv_list.max_fences.
250 */
dma_resv_reset_max_fences(struct dma_resv * obj)251 void dma_resv_reset_max_fences(struct dma_resv *obj)
252 {
253 struct dma_resv_list *fences = dma_resv_fences_list(obj);
254
255 dma_resv_assert_held(obj);
256
257 /* Test fence slot reservation */
258 if (fences)
259 fences->max_fences = fences->num_fences;
260 }
261 EXPORT_SYMBOL(dma_resv_reset_max_fences);
262 #endif
263
264 /**
265 * dma_resv_add_fence - Add a fence to the dma_resv obj
266 * @obj: the reservation object
267 * @fence: the fence to add
268 * @usage: how the fence is used, see enum dma_resv_usage
269 *
270 * Add a fence to a slot, @obj must be locked with dma_resv_lock(), and
271 * dma_resv_reserve_fences() has been called.
272 *
273 * See also &dma_resv.fence for a discussion of the semantics.
274 */
dma_resv_add_fence(struct dma_resv * obj,struct dma_fence * fence,enum dma_resv_usage usage)275 void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
276 enum dma_resv_usage usage)
277 {
278 struct dma_resv_list *fobj;
279 struct dma_fence *old;
280 unsigned int i, count;
281
282 dma_fence_get(fence);
283
284 dma_resv_assert_held(obj);
285
286 /* Drivers should not add containers here, instead add each fence
287 * individually.
288 */
289 WARN_ON(dma_fence_is_container(fence));
290
291 fobj = dma_resv_fences_list(obj);
292 count = fobj->num_fences;
293
294 for (i = 0; i < count; ++i) {
295 enum dma_resv_usage old_usage;
296
297 dma_resv_list_entry(fobj, i, obj, &old, &old_usage);
298 if ((old->context == fence->context && old_usage >= usage &&
299 dma_fence_is_later_or_same(fence, old)) ||
300 dma_fence_is_signaled(old)) {
301 dma_resv_list_set(fobj, i, fence, usage);
302 dma_fence_put(old);
303 return;
304 }
305 }
306
307 BUG_ON(fobj->num_fences >= fobj->max_fences);
308 count++;
309
310 dma_resv_list_set(fobj, i, fence, usage);
311 /* pointer update must be visible before we extend the num_fences */
312 smp_store_mb(fobj->num_fences, count);
313 }
314 EXPORT_SYMBOL(dma_resv_add_fence);
315
316 /**
317 * dma_resv_replace_fences - replace fences in the dma_resv obj
318 * @obj: the reservation object
319 * @context: the context of the fences to replace
320 * @replacement: the new fence to use instead
321 * @usage: how the new fence is used, see enum dma_resv_usage
322 *
323 * Replace fences with a specified context with a new fence. Only valid if the
324 * operation represented by the original fence has no longer access to the
325 * resources represented by the dma_resv object when the new fence completes.
326 *
327 * And example for using this is replacing a preemption fence with a page table
328 * update fence which makes the resource inaccessible.
329 */
dma_resv_replace_fences(struct dma_resv * obj,uint64_t context,struct dma_fence * replacement,enum dma_resv_usage usage)330 void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
331 struct dma_fence *replacement,
332 enum dma_resv_usage usage)
333 {
334 struct dma_resv_list *list;
335 unsigned int i;
336
337 dma_resv_assert_held(obj);
338
339 list = dma_resv_fences_list(obj);
340 for (i = 0; list && i < list->num_fences; ++i) {
341 struct dma_fence *old;
342
343 dma_resv_list_entry(list, i, obj, &old, NULL);
344 if (old->context != context)
345 continue;
346
347 dma_resv_list_set(list, i, dma_fence_get(replacement), usage);
348 dma_fence_put(old);
349 }
350 }
351 EXPORT_SYMBOL(dma_resv_replace_fences);
352
353 /* Restart the unlocked iteration by initializing the cursor object. */
dma_resv_iter_restart_unlocked(struct dma_resv_iter * cursor)354 static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
355 {
356 cursor->index = 0;
357 cursor->num_fences = 0;
358 cursor->fences = dma_resv_fences_list(cursor->obj);
359 if (cursor->fences)
360 cursor->num_fences = cursor->fences->num_fences;
361 cursor->is_restarted = true;
362 }
363
364 /* Walk to the next not signaled fence and grab a reference to it */
dma_resv_iter_walk_unlocked(struct dma_resv_iter * cursor)365 static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
366 {
367 if (!cursor->fences)
368 return;
369
370 do {
371 /* Drop the reference from the previous round */
372 dma_fence_put(cursor->fence);
373
374 if (cursor->index >= cursor->num_fences) {
375 cursor->fence = NULL;
376 break;
377
378 }
379
380 dma_resv_list_entry(cursor->fences, cursor->index++,
381 cursor->obj, &cursor->fence,
382 &cursor->fence_usage);
383 cursor->fence = dma_fence_get_rcu(cursor->fence);
384 if (!cursor->fence) {
385 dma_resv_iter_restart_unlocked(cursor);
386 continue;
387 }
388
389 if (!dma_fence_is_signaled(cursor->fence) &&
390 cursor->usage >= cursor->fence_usage)
391 break;
392 } while (true);
393 }
394
395 /**
396 * dma_resv_iter_first_unlocked - first fence in an unlocked dma_resv obj.
397 * @cursor: the cursor with the current position
398 *
399 * Subsequent fences are iterated with dma_resv_iter_next_unlocked().
400 *
401 * Beware that the iterator can be restarted. Code which accumulates statistics
402 * or similar needs to check for this with dma_resv_iter_is_restarted(). For
403 * this reason prefer the locked dma_resv_iter_first() whenver possible.
404 *
405 * Returns the first fence from an unlocked dma_resv obj.
406 */
dma_resv_iter_first_unlocked(struct dma_resv_iter * cursor)407 struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor)
408 {
409 rcu_read_lock();
410 do {
411 dma_resv_iter_restart_unlocked(cursor);
412 dma_resv_iter_walk_unlocked(cursor);
413 } while (dma_resv_fences_list(cursor->obj) != cursor->fences);
414 rcu_read_unlock();
415
416 return cursor->fence;
417 }
418 EXPORT_SYMBOL(dma_resv_iter_first_unlocked);
419
420 /**
421 * dma_resv_iter_next_unlocked - next fence in an unlocked dma_resv obj.
422 * @cursor: the cursor with the current position
423 *
424 * Beware that the iterator can be restarted. Code which accumulates statistics
425 * or similar needs to check for this with dma_resv_iter_is_restarted(). For
426 * this reason prefer the locked dma_resv_iter_next() whenver possible.
427 *
428 * Returns the next fence from an unlocked dma_resv obj.
429 */
dma_resv_iter_next_unlocked(struct dma_resv_iter * cursor)430 struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor)
431 {
432 bool restart;
433
434 rcu_read_lock();
435 cursor->is_restarted = false;
436 restart = dma_resv_fences_list(cursor->obj) != cursor->fences;
437 do {
438 if (restart)
439 dma_resv_iter_restart_unlocked(cursor);
440 dma_resv_iter_walk_unlocked(cursor);
441 restart = true;
442 } while (dma_resv_fences_list(cursor->obj) != cursor->fences);
443 rcu_read_unlock();
444
445 return cursor->fence;
446 }
447 EXPORT_SYMBOL(dma_resv_iter_next_unlocked);
448
449 /**
450 * dma_resv_iter_first - first fence from a locked dma_resv object
451 * @cursor: cursor to record the current position
452 *
453 * Subsequent fences are iterated with dma_resv_iter_next_unlocked().
454 *
455 * Return the first fence in the dma_resv object while holding the
456 * &dma_resv.lock.
457 */
dma_resv_iter_first(struct dma_resv_iter * cursor)458 struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor)
459 {
460 struct dma_fence *fence;
461
462 dma_resv_assert_held(cursor->obj);
463
464 cursor->index = 0;
465 cursor->fences = dma_resv_fences_list(cursor->obj);
466
467 fence = dma_resv_iter_next(cursor);
468 cursor->is_restarted = true;
469 return fence;
470 }
471 EXPORT_SYMBOL_GPL(dma_resv_iter_first);
472
473 /**
474 * dma_resv_iter_next - next fence from a locked dma_resv object
475 * @cursor: cursor to record the current position
476 *
477 * Return the next fences from the dma_resv object while holding the
478 * &dma_resv.lock.
479 */
dma_resv_iter_next(struct dma_resv_iter * cursor)480 struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor)
481 {
482 struct dma_fence *fence;
483
484 dma_resv_assert_held(cursor->obj);
485
486 cursor->is_restarted = false;
487
488 do {
489 if (!cursor->fences ||
490 cursor->index >= cursor->fences->num_fences)
491 return NULL;
492
493 dma_resv_list_entry(cursor->fences, cursor->index++,
494 cursor->obj, &fence, &cursor->fence_usage);
495 } while (cursor->fence_usage > cursor->usage);
496
497 return fence;
498 }
499 EXPORT_SYMBOL_GPL(dma_resv_iter_next);
500
501 /**
502 * dma_resv_copy_fences - Copy all fences from src to dst.
503 * @dst: the destination reservation object
504 * @src: the source reservation object
505 *
506 * Copy all fences from src to dst. dst-lock must be held.
507 */
dma_resv_copy_fences(struct dma_resv * dst,struct dma_resv * src)508 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
509 {
510 struct dma_resv_iter cursor;
511 struct dma_resv_list *list;
512 struct dma_fence *f;
513
514 dma_resv_assert_held(dst);
515
516 list = NULL;
517
518 dma_resv_iter_begin(&cursor, src, DMA_RESV_USAGE_BOOKKEEP);
519 dma_resv_for_each_fence_unlocked(&cursor, f) {
520
521 if (dma_resv_iter_is_restarted(&cursor)) {
522 dma_resv_list_free(list);
523
524 list = dma_resv_list_alloc(cursor.num_fences);
525 if (!list) {
526 dma_resv_iter_end(&cursor);
527 return -ENOMEM;
528 }
529 list->num_fences = 0;
530 }
531
532 dma_fence_get(f);
533 dma_resv_list_set(list, list->num_fences++, f,
534 dma_resv_iter_usage(&cursor));
535 }
536 dma_resv_iter_end(&cursor);
537
538 list = rcu_replace_pointer(dst->fences, list, dma_resv_held(dst));
539 dma_resv_list_free(list);
540 return 0;
541 }
542 EXPORT_SYMBOL(dma_resv_copy_fences);
543
544 /**
545 * dma_resv_get_fences - Get an object's fences
546 * fences without update side lock held
547 * @obj: the reservation object
548 * @usage: controls which fences to include, see enum dma_resv_usage.
549 * @num_fences: the number of fences returned
550 * @fences: the array of fence ptrs returned (array is krealloc'd to the
551 * required size, and must be freed by caller)
552 *
553 * Retrieve all fences from the reservation object.
554 * Returns either zero or -ENOMEM.
555 */
dma_resv_get_fences(struct dma_resv * obj,enum dma_resv_usage usage,unsigned int * num_fences,struct dma_fence *** fences)556 int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
557 unsigned int *num_fences, struct dma_fence ***fences)
558 {
559 struct dma_resv_iter cursor;
560 struct dma_fence *fence;
561
562 *num_fences = 0;
563 *fences = NULL;
564
565 dma_resv_iter_begin(&cursor, obj, usage);
566 dma_resv_for_each_fence_unlocked(&cursor, fence) {
567
568 if (dma_resv_iter_is_restarted(&cursor)) {
569 struct dma_fence **new_fences;
570 unsigned int count;
571
572 while (*num_fences)
573 dma_fence_put((*fences)[--(*num_fences)]);
574
575 count = cursor.num_fences + 1;
576
577 /* Eventually re-allocate the array */
578 new_fences = krealloc_array(*fences, count,
579 sizeof(void *),
580 GFP_KERNEL);
581 if (count && !new_fences) {
582 kfree(*fences);
583 *fences = NULL;
584 *num_fences = 0;
585 dma_resv_iter_end(&cursor);
586 return -ENOMEM;
587 }
588 *fences = new_fences;
589 }
590
591 (*fences)[(*num_fences)++] = dma_fence_get(fence);
592 }
593 dma_resv_iter_end(&cursor);
594
595 return 0;
596 }
597 EXPORT_SYMBOL_GPL(dma_resv_get_fences);
598
599 /**
600 * dma_resv_get_singleton - Get a single fence for all the fences
601 * @obj: the reservation object
602 * @usage: controls which fences to include, see enum dma_resv_usage.
603 * @fence: the resulting fence
604 *
605 * Get a single fence representing all the fences inside the resv object.
606 * Returns either 0 for success or -ENOMEM.
607 *
608 * Warning: This can't be used like this when adding the fence back to the resv
609 * object since that can lead to stack corruption when finalizing the
610 * dma_fence_array.
611 *
612 * Returns 0 on success and negative error values on failure.
613 */
dma_resv_get_singleton(struct dma_resv * obj,enum dma_resv_usage usage,struct dma_fence ** fence)614 int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,
615 struct dma_fence **fence)
616 {
617 struct dma_fence_array *array;
618 struct dma_fence **fences;
619 unsigned count;
620 int r;
621
622 r = dma_resv_get_fences(obj, usage, &count, &fences);
623 if (r)
624 return r;
625
626 if (count == 0) {
627 *fence = NULL;
628 return 0;
629 }
630
631 if (count == 1) {
632 *fence = fences[0];
633 kfree(fences);
634 return 0;
635 }
636
637 array = dma_fence_array_create(count, fences,
638 dma_fence_context_alloc(1),
639 1, false);
640 if (!array) {
641 while (count--)
642 dma_fence_put(fences[count]);
643 kfree(fences);
644 return -ENOMEM;
645 }
646
647 *fence = &array->base;
648 return 0;
649 }
650 EXPORT_SYMBOL_GPL(dma_resv_get_singleton);
651
652 /**
653 * dma_resv_wait_timeout - Wait on reservation's objects fences
654 * @obj: the reservation object
655 * @usage: controls which fences to include, see enum dma_resv_usage.
656 * @intr: if true, do interruptible wait
657 * @timeout: timeout value in jiffies or zero to return immediately
658 *
659 * Callers are not required to hold specific locks, but maybe hold
660 * dma_resv_lock() already
661 * RETURNS
662 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
663 * greater than zer on success.
664 */
dma_resv_wait_timeout(struct dma_resv * obj,enum dma_resv_usage usage,bool intr,unsigned long timeout)665 long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
666 bool intr, unsigned long timeout)
667 {
668 long ret = timeout ? timeout : 1;
669 struct dma_resv_iter cursor;
670 struct dma_fence *fence;
671
672 dma_resv_iter_begin(&cursor, obj, usage);
673 dma_resv_for_each_fence_unlocked(&cursor, fence) {
674
675 ret = dma_fence_wait_timeout(fence, intr, ret);
676 if (ret <= 0) {
677 dma_resv_iter_end(&cursor);
678 return ret;
679 }
680 }
681 dma_resv_iter_end(&cursor);
682
683 return ret;
684 }
685 EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
686
687
688 /**
689 * dma_resv_test_signaled - Test if a reservation object's fences have been
690 * signaled.
691 * @obj: the reservation object
692 * @usage: controls which fences to include, see enum dma_resv_usage.
693 *
694 * Callers are not required to hold specific locks, but maybe hold
695 * dma_resv_lock() already.
696 *
697 * RETURNS
698 *
699 * True if all fences signaled, else false.
700 */
dma_resv_test_signaled(struct dma_resv * obj,enum dma_resv_usage usage)701 bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage)
702 {
703 struct dma_resv_iter cursor;
704 struct dma_fence *fence;
705
706 dma_resv_iter_begin(&cursor, obj, usage);
707 dma_resv_for_each_fence_unlocked(&cursor, fence) {
708 dma_resv_iter_end(&cursor);
709 return false;
710 }
711 dma_resv_iter_end(&cursor);
712 return true;
713 }
714 EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
715
716 /**
717 * dma_resv_describe - Dump description of the resv object into seq_file
718 * @obj: the reservation object
719 * @seq: the seq_file to dump the description into
720 *
721 * Dump a textual description of the fences inside an dma_resv object into the
722 * seq_file.
723 */
dma_resv_describe(struct dma_resv * obj,struct seq_file * seq)724 void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq)
725 {
726 static const char *usage[] = { "kernel", "write", "read", "bookkeep" };
727 struct dma_resv_iter cursor;
728 struct dma_fence *fence;
729
730 dma_resv_for_each_fence(&cursor, obj, DMA_RESV_USAGE_READ, fence) {
731 seq_printf(seq, "\t%s fence:",
732 usage[dma_resv_iter_usage(&cursor)]);
733 dma_fence_describe(fence, seq);
734 }
735 }
736 EXPORT_SYMBOL_GPL(dma_resv_describe);
737
738 #if IS_ENABLED(CONFIG_LOCKDEP)
dma_resv_lockdep(void)739 static int __init dma_resv_lockdep(void)
740 {
741 struct mm_struct *mm = mm_alloc();
742 struct ww_acquire_ctx ctx;
743 struct dma_resv obj;
744 struct address_space mapping;
745 int ret;
746
747 if (!mm)
748 return -ENOMEM;
749
750 dma_resv_init(&obj);
751 address_space_init_once(&mapping);
752
753 mmap_read_lock(mm);
754 ww_acquire_init(&ctx, &reservation_ww_class);
755 ret = dma_resv_lock(&obj, &ctx);
756 if (ret == -EDEADLK)
757 dma_resv_lock_slow(&obj, &ctx);
758 fs_reclaim_acquire(GFP_KERNEL);
759 /* for unmap_mapping_range on trylocked buffer objects in shrinkers */
760 i_mmap_lock_write(&mapping);
761 i_mmap_unlock_write(&mapping);
762 #ifdef CONFIG_MMU_NOTIFIER
763 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
764 __dma_fence_might_wait();
765 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
766 #else
767 __dma_fence_might_wait();
768 #endif
769 fs_reclaim_release(GFP_KERNEL);
770 ww_mutex_unlock(&obj.lock);
771 ww_acquire_fini(&ctx);
772 mmap_read_unlock(mm);
773
774 mmput(mm);
775
776 return 0;
777 }
778 subsys_initcall(dma_resv_lockdep);
779 #endif
780