• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
4  *
5  * Based on bo.c which bears the following copyright notice,
6  * but is dual licensed:
7  *
8  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
9  * All Rights Reserved.
10  *
11  * Permission is hereby granted, free of charge, to any person obtaining a
12  * copy of this software and associated documentation files (the
13  * "Software"), to deal in the Software without restriction, including
14  * without limitation the rights to use, copy, modify, merge, publish,
15  * distribute, sub license, and/or sell copies of the Software, and to
16  * permit persons to whom the Software is furnished to do so, subject to
17  * the following conditions:
18  *
19  * The above copyright notice and this permission notice (including the
20  * next paragraph) shall be included in all copies or substantial portions
21  * of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
26  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
27  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
28  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
29  * USE OR OTHER DEALINGS IN THE SOFTWARE.
30  *
31  **************************************************************************/
32 /*
33  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
34  */
35 
36 #include <linux/dma-resv.h>
37 #include <linux/export.h>
38 #include <linux/mm.h>
39 #include <linux/sched/mm.h>
40 #include <linux/mmu_notifier.h>
41 
42 /**
43  * DOC: Reservation Object Overview
44  *
45  * The reservation object provides a mechanism to manage shared and
46  * exclusive fences associated with a buffer.  A reservation object
47  * can have attached one exclusive fence (normally associated with
48  * write operations) or N shared fences (read operations).  The RCU
49  * mechanism is used to protect read access to fences from locked
50  * write-side updates.
51  */
52 
53 DEFINE_WD_CLASS(reservation_ww_class);
54 EXPORT_SYMBOL(reservation_ww_class);
55 
56 /**
57  * dma_resv_list_alloc - allocate fence list
58  * @shared_max: number of fences we need space for
59  *
60  * Allocate a new dma_resv_list and make sure to correctly initialize
61  * shared_max.
62  */
dma_resv_list_alloc(unsigned int shared_max)63 static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
64 {
65 	struct dma_resv_list *list;
66 
67 	list = kmalloc(struct_size(list, shared, shared_max), GFP_KERNEL);
68 	if (!list)
69 		return NULL;
70 
71 	list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) /
72 		sizeof(*list->shared);
73 
74 	return list;
75 }
76 
77 /**
78  * dma_resv_list_free - free fence list
79  * @list: list to free
80  *
81  * Free a dma_resv_list and make sure to drop all references.
82  */
dma_resv_list_free(struct dma_resv_list * list)83 static void dma_resv_list_free(struct dma_resv_list *list)
84 {
85 	unsigned int i;
86 
87 	if (!list)
88 		return;
89 
90 	for (i = 0; i < list->shared_count; ++i)
91 		dma_fence_put(rcu_dereference_protected(list->shared[i], true));
92 
93 	kfree_rcu(list, rcu);
94 }
95 
96 /**
97  * dma_resv_init - initialize a reservation object
98  * @obj: the reservation object
99  */
dma_resv_init(struct dma_resv * obj)100 void dma_resv_init(struct dma_resv *obj)
101 {
102 	ww_mutex_init(&obj->lock, &reservation_ww_class);
103 	seqcount_ww_mutex_init(&obj->seq, &obj->lock);
104 
105 	RCU_INIT_POINTER(obj->fence, NULL);
106 	RCU_INIT_POINTER(obj->fence_excl, NULL);
107 }
108 EXPORT_SYMBOL(dma_resv_init);
109 
110 /**
111  * dma_resv_fini - destroys a reservation object
112  * @obj: the reservation object
113  */
dma_resv_fini(struct dma_resv * obj)114 void dma_resv_fini(struct dma_resv *obj)
115 {
116 	struct dma_resv_list *fobj;
117 	struct dma_fence *excl;
118 
119 	/*
120 	 * This object should be dead and all references must have
121 	 * been released to it, so no need to be protected with rcu.
122 	 */
123 	excl = rcu_dereference_protected(obj->fence_excl, 1);
124 	if (excl)
125 		dma_fence_put(excl);
126 
127 	fobj = rcu_dereference_protected(obj->fence, 1);
128 	dma_resv_list_free(fobj);
129 	ww_mutex_destroy(&obj->lock);
130 }
131 EXPORT_SYMBOL(dma_resv_fini);
132 
133 /**
134  * dma_resv_reserve_shared - Reserve space to add shared fences to
135  * a dma_resv.
136  * @obj: reservation object
137  * @num_fences: number of fences we want to add
138  *
139  * Should be called before dma_resv_add_shared_fence().  Must
140  * be called with obj->lock held.
141  *
142  * RETURNS
143  * Zero for success, or -errno
144  */
dma_resv_reserve_shared(struct dma_resv * obj,unsigned int num_fences)145 int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
146 {
147 	struct dma_resv_list *old, *new;
148 	unsigned int i, j, k, max;
149 
150 	dma_resv_assert_held(obj);
151 
152 	old = dma_resv_shared_list(obj);
153 	if (old && old->shared_max) {
154 		if ((old->shared_count + num_fences) <= old->shared_max)
155 			return 0;
156 		max = max(old->shared_count + num_fences, old->shared_max * 2);
157 	} else {
158 		max = max(4ul, roundup_pow_of_two(num_fences));
159 	}
160 
161 	new = dma_resv_list_alloc(max);
162 	if (!new)
163 		return -ENOMEM;
164 
165 	/*
166 	 * no need to bump fence refcounts, rcu_read access
167 	 * requires the use of kref_get_unless_zero, and the
168 	 * references from the old struct are carried over to
169 	 * the new.
170 	 */
171 	for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
172 		struct dma_fence *fence;
173 
174 		fence = rcu_dereference_protected(old->shared[i],
175 						  dma_resv_held(obj));
176 		if (dma_fence_is_signaled(fence))
177 			RCU_INIT_POINTER(new->shared[--k], fence);
178 		else
179 			RCU_INIT_POINTER(new->shared[j++], fence);
180 	}
181 	new->shared_count = j;
182 
183 	/*
184 	 * We are not changing the effective set of fences here so can
185 	 * merely update the pointer to the new array; both existing
186 	 * readers and new readers will see exactly the same set of
187 	 * active (unsignaled) shared fences. Individual fences and the
188 	 * old array are protected by RCU and so will not vanish under
189 	 * the gaze of the rcu_read_lock() readers.
190 	 */
191 	rcu_assign_pointer(obj->fence, new);
192 
193 	if (!old)
194 		return 0;
195 
196 	/* Drop the references to the signaled fences */
197 	for (i = k; i < max; ++i) {
198 		struct dma_fence *fence;
199 
200 		fence = rcu_dereference_protected(new->shared[i],
201 						  dma_resv_held(obj));
202 		dma_fence_put(fence);
203 	}
204 	kfree_rcu(old, rcu);
205 
206 	return 0;
207 }
208 EXPORT_SYMBOL(dma_resv_reserve_shared);
209 
210 #ifdef CONFIG_DEBUG_MUTEXES
211 /**
212  * dma_resv_reset_shared_max - reset shared fences for debugging
213  * @obj: the dma_resv object to reset
214  *
215  * Reset the number of pre-reserved shared slots to test that drivers do
216  * correct slot allocation using dma_resv_reserve_shared(). See also
217  * &dma_resv_list.shared_max.
218  */
dma_resv_reset_shared_max(struct dma_resv * obj)219 void dma_resv_reset_shared_max(struct dma_resv *obj)
220 {
221 	struct dma_resv_list *fences = dma_resv_shared_list(obj);
222 
223 	dma_resv_assert_held(obj);
224 
225 	/* Test shared fence slot reservation */
226 	if (fences)
227 		fences->shared_max = fences->shared_count;
228 }
229 EXPORT_SYMBOL(dma_resv_reset_shared_max);
230 #endif
231 
232 /**
233  * dma_resv_add_shared_fence - Add a fence to a shared slot
234  * @obj: the reservation object
235  * @fence: the shared fence to add
236  *
237  * Add a fence to a shared slot, obj->lock must be held, and
238  * dma_resv_reserve_shared() has been called.
239  */
dma_resv_add_shared_fence(struct dma_resv * obj,struct dma_fence * fence)240 void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
241 {
242 	struct dma_resv_list *fobj;
243 	struct dma_fence *old;
244 	unsigned int i, count;
245 
246 	dma_fence_get(fence);
247 
248 	dma_resv_assert_held(obj);
249 
250 	fobj = dma_resv_shared_list(obj);
251 	count = fobj->shared_count;
252 
253 	write_seqcount_begin(&obj->seq);
254 
255 	for (i = 0; i < count; ++i) {
256 
257 		old = rcu_dereference_protected(fobj->shared[i],
258 						dma_resv_held(obj));
259 		if (old->context == fence->context ||
260 		    dma_fence_is_signaled(old))
261 			goto replace;
262 	}
263 
264 	BUG_ON(fobj->shared_count >= fobj->shared_max);
265 	old = NULL;
266 	count++;
267 
268 replace:
269 	RCU_INIT_POINTER(fobj->shared[i], fence);
270 	/* pointer update must be visible before we extend the shared_count */
271 	smp_store_mb(fobj->shared_count, count);
272 
273 	write_seqcount_end(&obj->seq);
274 	dma_fence_put(old);
275 }
276 EXPORT_SYMBOL(dma_resv_add_shared_fence);
277 
278 /**
279  * dma_resv_add_excl_fence - Add an exclusive fence.
280  * @obj: the reservation object
281  * @fence: the shared fence to add
282  *
283  * Add a fence to the exclusive slot.  The obj->lock must be held.
284  */
dma_resv_add_excl_fence(struct dma_resv * obj,struct dma_fence * fence)285 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
286 {
287 	struct dma_fence *old_fence = dma_resv_excl_fence(obj);
288 	struct dma_resv_list *old;
289 	u32 i = 0;
290 
291 	dma_resv_assert_held(obj);
292 
293 	old = dma_resv_shared_list(obj);
294 	if (old)
295 		i = old->shared_count;
296 
297 	if (fence)
298 		dma_fence_get(fence);
299 
300 	write_seqcount_begin(&obj->seq);
301 	/* write_seqcount_begin provides the necessary memory barrier */
302 	RCU_INIT_POINTER(obj->fence_excl, fence);
303 	if (old)
304 		old->shared_count = 0;
305 	write_seqcount_end(&obj->seq);
306 
307 	/* inplace update, no shared fences */
308 	while (i--)
309 		dma_fence_put(rcu_dereference_protected(old->shared[i],
310 						dma_resv_held(obj)));
311 
312 	dma_fence_put(old_fence);
313 }
314 EXPORT_SYMBOL(dma_resv_add_excl_fence);
315 
316 /**
317  * dma_resv_copy_fences - Copy all fences from src to dst.
318  * @dst: the destination reservation object
319  * @src: the source reservation object
320  *
321  * Copy all fences from src to dst. dst-lock must be held.
322  */
dma_resv_copy_fences(struct dma_resv * dst,struct dma_resv * src)323 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
324 {
325 	struct dma_resv_list *src_list, *dst_list;
326 	struct dma_fence *old, *new;
327 	unsigned int i;
328 
329 	dma_resv_assert_held(dst);
330 
331 	rcu_read_lock();
332 	src_list = dma_resv_shared_list(src);
333 
334 retry:
335 	if (src_list) {
336 		unsigned int shared_count = src_list->shared_count;
337 
338 		rcu_read_unlock();
339 
340 		dst_list = dma_resv_list_alloc(shared_count);
341 		if (!dst_list)
342 			return -ENOMEM;
343 
344 		rcu_read_lock();
345 		src_list = dma_resv_shared_list(src);
346 		if (!src_list || src_list->shared_count > shared_count) {
347 			kfree(dst_list);
348 			goto retry;
349 		}
350 
351 		dst_list->shared_count = 0;
352 		for (i = 0; i < src_list->shared_count; ++i) {
353 			struct dma_fence __rcu **dst;
354 			struct dma_fence *fence;
355 
356 			fence = rcu_dereference(src_list->shared[i]);
357 			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
358 				     &fence->flags))
359 				continue;
360 
361 			if (!dma_fence_get_rcu(fence)) {
362 				dma_resv_list_free(dst_list);
363 				src_list = dma_resv_shared_list(src);
364 				goto retry;
365 			}
366 
367 			if (dma_fence_is_signaled(fence)) {
368 				dma_fence_put(fence);
369 				continue;
370 			}
371 
372 			dst = &dst_list->shared[dst_list->shared_count++];
373 			rcu_assign_pointer(*dst, fence);
374 		}
375 	} else {
376 		dst_list = NULL;
377 	}
378 
379 	new = dma_fence_get_rcu_safe(&src->fence_excl);
380 	rcu_read_unlock();
381 
382 	src_list = dma_resv_shared_list(dst);
383 	old = dma_resv_excl_fence(dst);
384 
385 	write_seqcount_begin(&dst->seq);
386 	/* write_seqcount_begin provides the necessary memory barrier */
387 	RCU_INIT_POINTER(dst->fence_excl, new);
388 	RCU_INIT_POINTER(dst->fence, dst_list);
389 	write_seqcount_end(&dst->seq);
390 
391 	dma_resv_list_free(src_list);
392 	dma_fence_put(old);
393 
394 	return 0;
395 }
396 EXPORT_SYMBOL(dma_resv_copy_fences);
397 
398 /**
399  * dma_resv_get_fences - Get an object's shared and exclusive
400  * fences without update side lock held
401  * @obj: the reservation object
402  * @pfence_excl: the returned exclusive fence (or NULL)
403  * @pshared_count: the number of shared fences returned
404  * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
405  * the required size, and must be freed by caller)
406  *
407  * Retrieve all fences from the reservation object. If the pointer for the
408  * exclusive fence is not specified the fence is put into the array of the
409  * shared fences as well. Returns either zero or -ENOMEM.
410  */
dma_resv_get_fences(struct dma_resv * obj,struct dma_fence ** pfence_excl,unsigned int * pshared_count,struct dma_fence *** pshared)411 int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
412 			unsigned int *pshared_count,
413 			struct dma_fence ***pshared)
414 {
415 	struct dma_fence **shared = NULL;
416 	struct dma_fence *fence_excl;
417 	unsigned int shared_count;
418 	int ret = 1;
419 
420 	do {
421 		struct dma_resv_list *fobj;
422 		unsigned int i, seq;
423 		size_t sz = 0;
424 
425 		shared_count = i = 0;
426 
427 		rcu_read_lock();
428 		seq = read_seqcount_begin(&obj->seq);
429 
430 		fence_excl = dma_resv_excl_fence(obj);
431 		if (fence_excl && !dma_fence_get_rcu(fence_excl))
432 			goto unlock;
433 
434 		fobj = dma_resv_shared_list(obj);
435 		if (fobj)
436 			sz += sizeof(*shared) * fobj->shared_max;
437 
438 		if (!pfence_excl && fence_excl)
439 			sz += sizeof(*shared);
440 
441 		if (sz) {
442 			struct dma_fence **nshared;
443 
444 			nshared = krealloc(shared, sz,
445 					   GFP_NOWAIT | __GFP_NOWARN);
446 			if (!nshared) {
447 				rcu_read_unlock();
448 
449 				dma_fence_put(fence_excl);
450 				fence_excl = NULL;
451 
452 				nshared = krealloc(shared, sz, GFP_KERNEL);
453 				if (nshared) {
454 					shared = nshared;
455 					continue;
456 				}
457 
458 				ret = -ENOMEM;
459 				break;
460 			}
461 			shared = nshared;
462 			shared_count = fobj ? fobj->shared_count : 0;
463 			for (i = 0; i < shared_count; ++i) {
464 				shared[i] = rcu_dereference(fobj->shared[i]);
465 				if (!dma_fence_get_rcu(shared[i]))
466 					break;
467 			}
468 		}
469 
470 		if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
471 			while (i--)
472 				dma_fence_put(shared[i]);
473 			dma_fence_put(fence_excl);
474 			goto unlock;
475 		}
476 
477 		ret = 0;
478 unlock:
479 		rcu_read_unlock();
480 	} while (ret);
481 
482 	if (pfence_excl)
483 		*pfence_excl = fence_excl;
484 	else if (fence_excl)
485 		shared[shared_count++] = fence_excl;
486 
487 	if (!shared_count) {
488 		kfree(shared);
489 		shared = NULL;
490 	}
491 
492 	*pshared_count = shared_count;
493 	*pshared = shared;
494 	return ret;
495 }
496 EXPORT_SYMBOL_GPL(dma_resv_get_fences);
497 
498 /**
499  * dma_resv_wait_timeout - Wait on reservation's objects
500  * shared and/or exclusive fences.
501  * @obj: the reservation object
502  * @wait_all: if true, wait on all fences, else wait on just exclusive fence
503  * @intr: if true, do interruptible wait
504  * @timeout: timeout value in jiffies or zero to return immediately
505  *
506  * Callers are not required to hold specific locks, but maybe hold
507  * dma_resv_lock() already
508  * RETURNS
509  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
510  * greater than zer on success.
511  */
dma_resv_wait_timeout(struct dma_resv * obj,bool wait_all,bool intr,unsigned long timeout)512 long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
513 			   unsigned long timeout)
514 {
515 	long ret = timeout ? timeout : 1;
516 	unsigned int seq, shared_count;
517 	struct dma_fence *fence;
518 	int i;
519 
520 retry:
521 	shared_count = 0;
522 	seq = read_seqcount_begin(&obj->seq);
523 	rcu_read_lock();
524 	i = -1;
525 
526 	fence = dma_resv_excl_fence(obj);
527 	if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
528 		if (!dma_fence_get_rcu(fence))
529 			goto unlock_retry;
530 
531 		if (dma_fence_is_signaled(fence)) {
532 			dma_fence_put(fence);
533 			fence = NULL;
534 		}
535 
536 	} else {
537 		fence = NULL;
538 	}
539 
540 	if (wait_all) {
541 		struct dma_resv_list *fobj = dma_resv_shared_list(obj);
542 
543 		if (fobj)
544 			shared_count = fobj->shared_count;
545 
546 		for (i = 0; !fence && i < shared_count; ++i) {
547 			struct dma_fence *lfence;
548 
549 			lfence = rcu_dereference(fobj->shared[i]);
550 			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
551 				     &lfence->flags))
552 				continue;
553 
554 			if (!dma_fence_get_rcu(lfence))
555 				goto unlock_retry;
556 
557 			if (dma_fence_is_signaled(lfence)) {
558 				dma_fence_put(lfence);
559 				continue;
560 			}
561 
562 			fence = lfence;
563 			break;
564 		}
565 	}
566 
567 	rcu_read_unlock();
568 	if (fence) {
569 		if (read_seqcount_retry(&obj->seq, seq)) {
570 			dma_fence_put(fence);
571 			goto retry;
572 		}
573 
574 		ret = dma_fence_wait_timeout(fence, intr, ret);
575 		dma_fence_put(fence);
576 		if (ret > 0 && wait_all && (i + 1 < shared_count))
577 			goto retry;
578 	}
579 	return ret;
580 
581 unlock_retry:
582 	rcu_read_unlock();
583 	goto retry;
584 }
585 EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
586 
587 
dma_resv_test_signaled_single(struct dma_fence * passed_fence)588 static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
589 {
590 	struct dma_fence *fence, *lfence = passed_fence;
591 	int ret = 1;
592 
593 	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
594 		fence = dma_fence_get_rcu(lfence);
595 		if (!fence)
596 			return -1;
597 
598 		ret = !!dma_fence_is_signaled(fence);
599 		dma_fence_put(fence);
600 	}
601 	return ret;
602 }
603 
604 /**
605  * dma_resv_test_signaled - Test if a reservation object's fences have been
606  * signaled.
607  * @obj: the reservation object
608  * @test_all: if true, test all fences, otherwise only test the exclusive
609  * fence
610  *
611  * Callers are not required to hold specific locks, but maybe hold
612  * dma_resv_lock() already
613  * RETURNS
614  * true if all fences signaled, else false
615  */
dma_resv_test_signaled(struct dma_resv * obj,bool test_all)616 bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
617 {
618 	struct dma_fence *fence;
619 	unsigned int seq;
620 	int ret;
621 
622 	rcu_read_lock();
623 retry:
624 	ret = true;
625 	seq = read_seqcount_begin(&obj->seq);
626 
627 	if (test_all) {
628 		struct dma_resv_list *fobj = dma_resv_shared_list(obj);
629 		unsigned int i, shared_count;
630 
631 		shared_count = fobj ? fobj->shared_count : 0;
632 		for (i = 0; i < shared_count; ++i) {
633 			fence = rcu_dereference(fobj->shared[i]);
634 			ret = dma_resv_test_signaled_single(fence);
635 			if (ret < 0)
636 				goto retry;
637 			else if (!ret)
638 				break;
639 		}
640 	}
641 
642 	fence = dma_resv_excl_fence(obj);
643 	if (ret && fence) {
644 		ret = dma_resv_test_signaled_single(fence);
645 		if (ret < 0)
646 			goto retry;
647 
648 	}
649 
650 	if (read_seqcount_retry(&obj->seq, seq))
651 		goto retry;
652 
653 	rcu_read_unlock();
654 	return ret;
655 }
656 EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
657 
658 #if IS_ENABLED(CONFIG_LOCKDEP)
dma_resv_lockdep(void)659 static int __init dma_resv_lockdep(void)
660 {
661 	struct mm_struct *mm = mm_alloc();
662 	struct ww_acquire_ctx ctx;
663 	struct dma_resv obj;
664 	struct address_space mapping;
665 	int ret;
666 
667 	if (!mm)
668 		return -ENOMEM;
669 
670 	dma_resv_init(&obj);
671 	address_space_init_once(&mapping);
672 
673 	mmap_read_lock(mm);
674 	ww_acquire_init(&ctx, &reservation_ww_class);
675 	ret = dma_resv_lock(&obj, &ctx);
676 	if (ret == -EDEADLK)
677 		dma_resv_lock_slow(&obj, &ctx);
678 	fs_reclaim_acquire(GFP_KERNEL);
679 	/* for unmap_mapping_range on trylocked buffer objects in shrinkers */
680 	i_mmap_lock_write(&mapping);
681 	i_mmap_unlock_write(&mapping);
682 #ifdef CONFIG_MMU_NOTIFIER
683 	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
684 	__dma_fence_might_wait();
685 	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
686 #else
687 	__dma_fence_might_wait();
688 #endif
689 	fs_reclaim_release(GFP_KERNEL);
690 	ww_mutex_unlock(&obj.lock);
691 	ww_acquire_fini(&ctx);
692 	mmap_read_unlock(mm);
693 
694 	mmput(mm);
695 
696 	return 0;
697 }
698 subsys_initcall(dma_resv_lockdep);
699 #endif
700