• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Fence mechanism for dma-buf and to allow for asynchronous dma access
3  *
4  * Copyright (C) 2012 Canonical Ltd
5  * Copyright (C) 2012 Texas Instruments
6  *
7  * Authors:
8  * Rob Clark <robdclark@gmail.com>
9  * Maarten Lankhorst <maarten.lankhorst@canonical.com>
10  *
11  * This program is free software; you can redistribute it and/or modify it
12  * under the terms of the GNU General Public License version 2 as published by
13  * the Free Software Foundation.
14  *
15  * This program is distributed in the hope that it will be useful, but WITHOUT
16  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
18  * more details.
19  */
20 
21 #include <linux/slab.h>
22 #include <linux/export.h>
23 #include <linux/atomic.h>
24 #include <linux/fence.h>
25 
26 #define CREATE_TRACE_POINTS
27 #include <trace/events/fence.h>
28 
29 EXPORT_TRACEPOINT_SYMBOL(fence_annotate_wait_on);
30 EXPORT_TRACEPOINT_SYMBOL(fence_emit);
31 
32 /*
33  * fence context counter: each execution context should have its own
34  * fence context, this allows checking if fences belong to the same
35  * context or not. One device can have multiple separate contexts,
36  * and they're used if some engine can run independently of another.
37  */
38 static atomic64_t fence_context_counter = ATOMIC64_INIT(0);
39 
40 /**
41  * fence_context_alloc - allocate an array of fence contexts
42  * @num:	[in]	amount of contexts to allocate
43  *
44  * This function will return the first index of the number of fences allocated.
45  * The fence context is used for setting fence->context to a unique number.
46  */
fence_context_alloc(unsigned num)47 u64 fence_context_alloc(unsigned num)
48 {
49 	BUG_ON(!num);
50 	return atomic64_add_return(num, &fence_context_counter) - num;
51 }
52 EXPORT_SYMBOL(fence_context_alloc);
53 
54 /**
55  * fence_signal_locked - signal completion of a fence
56  * @fence: the fence to signal
57  *
58  * Signal completion for software callbacks on a fence, this will unblock
59  * fence_wait() calls and run all the callbacks added with
60  * fence_add_callback(). Can be called multiple times, but since a fence
61  * can only go from unsignaled to signaled state, it will only be effective
62  * the first time.
63  *
64  * Unlike fence_signal, this function must be called with fence->lock held.
65  */
fence_signal_locked(struct fence * fence)66 int fence_signal_locked(struct fence *fence)
67 {
68 	struct fence_cb *cur, *tmp;
69 	int ret = 0;
70 
71 	lockdep_assert_held(fence->lock);
72 
73 	if (WARN_ON(!fence))
74 		return -EINVAL;
75 
76 	if (!ktime_to_ns(fence->timestamp)) {
77 		fence->timestamp = ktime_get();
78 		smp_mb__before_atomic();
79 	}
80 
81 	if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
82 		ret = -EINVAL;
83 
84 		/*
85 		 * we might have raced with the unlocked fence_signal,
86 		 * still run through all callbacks
87 		 */
88 	} else
89 		trace_fence_signaled(fence);
90 
91 	list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
92 		list_del_init(&cur->node);
93 		cur->func(fence, cur);
94 	}
95 	return ret;
96 }
97 EXPORT_SYMBOL(fence_signal_locked);
98 
99 /**
100  * fence_signal - signal completion of a fence
101  * @fence: the fence to signal
102  *
103  * Signal completion for software callbacks on a fence, this will unblock
104  * fence_wait() calls and run all the callbacks added with
105  * fence_add_callback(). Can be called multiple times, but since a fence
106  * can only go from unsignaled to signaled state, it will only be effective
107  * the first time.
108  */
fence_signal(struct fence * fence)109 int fence_signal(struct fence *fence)
110 {
111 	unsigned long flags;
112 
113 	if (!fence)
114 		return -EINVAL;
115 
116 	if (!ktime_to_ns(fence->timestamp)) {
117 		fence->timestamp = ktime_get();
118 		smp_mb__before_atomic();
119 	}
120 
121 	if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
122 		return -EINVAL;
123 
124 	trace_fence_signaled(fence);
125 
126 	if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
127 		struct fence_cb *cur, *tmp;
128 
129 		spin_lock_irqsave(fence->lock, flags);
130 		list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
131 			list_del_init(&cur->node);
132 			cur->func(fence, cur);
133 		}
134 		spin_unlock_irqrestore(fence->lock, flags);
135 	}
136 	return 0;
137 }
138 EXPORT_SYMBOL(fence_signal);
139 
140 /**
141  * fence_wait_timeout - sleep until the fence gets signaled
142  * or until timeout elapses
143  * @fence:	[in]	the fence to wait on
144  * @intr:	[in]	if true, do an interruptible wait
145  * @timeout:	[in]	timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
146  *
147  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
148  * remaining timeout in jiffies on success. Other error values may be
149  * returned on custom implementations.
150  *
151  * Performs a synchronous wait on this fence. It is assumed the caller
152  * directly or indirectly (buf-mgr between reservation and committing)
153  * holds a reference to the fence, otherwise the fence might be
154  * freed before return, resulting in undefined behavior.
155  */
156 signed long
fence_wait_timeout(struct fence * fence,bool intr,signed long timeout)157 fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
158 {
159 	signed long ret;
160 
161 	if (WARN_ON(timeout < 0))
162 		return -EINVAL;
163 
164 	trace_fence_wait_start(fence);
165 	ret = fence->ops->wait(fence, intr, timeout);
166 	trace_fence_wait_end(fence);
167 	return ret;
168 }
169 EXPORT_SYMBOL(fence_wait_timeout);
170 
fence_release(struct kref * kref)171 void fence_release(struct kref *kref)
172 {
173 	struct fence *fence =
174 			container_of(kref, struct fence, refcount);
175 
176 	trace_fence_destroy(fence);
177 
178 	BUG_ON(!list_empty(&fence->cb_list));
179 
180 	if (fence->ops->release)
181 		fence->ops->release(fence);
182 	else
183 		fence_free(fence);
184 }
185 EXPORT_SYMBOL(fence_release);
186 
fence_free(struct fence * fence)187 void fence_free(struct fence *fence)
188 {
189 	kfree_rcu(fence, rcu);
190 }
191 EXPORT_SYMBOL(fence_free);
192 
193 /**
194  * fence_enable_sw_signaling - enable signaling on fence
195  * @fence:	[in]	the fence to enable
196  *
197  * this will request for sw signaling to be enabled, to make the fence
198  * complete as soon as possible
199  */
fence_enable_sw_signaling(struct fence * fence)200 void fence_enable_sw_signaling(struct fence *fence)
201 {
202 	unsigned long flags;
203 
204 	if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
205 	    !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
206 		trace_fence_enable_signal(fence);
207 
208 		spin_lock_irqsave(fence->lock, flags);
209 
210 		if (!fence->ops->enable_signaling(fence))
211 			fence_signal_locked(fence);
212 
213 		spin_unlock_irqrestore(fence->lock, flags);
214 	}
215 }
216 EXPORT_SYMBOL(fence_enable_sw_signaling);
217 
218 /**
219  * fence_add_callback - add a callback to be called when the fence
220  * is signaled
221  * @fence:	[in]	the fence to wait on
222  * @cb:		[in]	the callback to register
223  * @func:	[in]	the function to call
224  *
225  * cb will be initialized by fence_add_callback, no initialization
226  * by the caller is required. Any number of callbacks can be registered
227  * to a fence, but a callback can only be registered to one fence at a time.
228  *
229  * Note that the callback can be called from an atomic context.  If
230  * fence is already signaled, this function will return -ENOENT (and
231  * *not* call the callback)
232  *
233  * Add a software callback to the fence. Same restrictions apply to
234  * refcount as it does to fence_wait, however the caller doesn't need to
235  * keep a refcount to fence afterwards: when software access is enabled,
236  * the creator of the fence is required to keep the fence alive until
237  * after it signals with fence_signal. The callback itself can be called
238  * from irq context.
239  *
240  */
fence_add_callback(struct fence * fence,struct fence_cb * cb,fence_func_t func)241 int fence_add_callback(struct fence *fence, struct fence_cb *cb,
242 		       fence_func_t func)
243 {
244 	unsigned long flags;
245 	int ret = 0;
246 	bool was_set;
247 
248 	if (WARN_ON(!fence || !func))
249 		return -EINVAL;
250 
251 	if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
252 		INIT_LIST_HEAD(&cb->node);
253 		return -ENOENT;
254 	}
255 
256 	spin_lock_irqsave(fence->lock, flags);
257 
258 	was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
259 
260 	if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
261 		ret = -ENOENT;
262 	else if (!was_set) {
263 		trace_fence_enable_signal(fence);
264 
265 		if (!fence->ops->enable_signaling(fence)) {
266 			fence_signal_locked(fence);
267 			ret = -ENOENT;
268 		}
269 	}
270 
271 	if (!ret) {
272 		cb->func = func;
273 		list_add_tail(&cb->node, &fence->cb_list);
274 	} else
275 		INIT_LIST_HEAD(&cb->node);
276 	spin_unlock_irqrestore(fence->lock, flags);
277 
278 	return ret;
279 }
280 EXPORT_SYMBOL(fence_add_callback);
281 
282 /**
283  * fence_get_status - returns the status upon completion
284  * @fence: [in]	the fence to query
285  *
286  * This wraps fence_get_status_locked() to return the error status
287  * condition on a signaled fence. See fence_get_status_locked() for more
288  * details.
289  *
290  * Returns 0 if the fence has not yet been signaled, 1 if the fence has
291  * been signaled without an error condition, or a negative error code
292  * if the fence has been completed in err.
293  */
fence_get_status(struct fence * fence)294 int fence_get_status(struct fence *fence)
295 {
296 	unsigned long flags;
297 	int status;
298 
299 	spin_lock_irqsave(fence->lock, flags);
300 	status = fence_get_status_locked(fence);
301 	spin_unlock_irqrestore(fence->lock, flags);
302 
303 	return status;
304 }
305 EXPORT_SYMBOL(fence_get_status);
306 
307 /**
308  * fence_remove_callback - remove a callback from the signaling list
309  * @fence:	[in]	the fence to wait on
310  * @cb:		[in]	the callback to remove
311  *
312  * Remove a previously queued callback from the fence. This function returns
313  * true if the callback is successfully removed, or false if the fence has
314  * already been signaled.
315  *
316  * *WARNING*:
317  * Cancelling a callback should only be done if you really know what you're
318  * doing, since deadlocks and race conditions could occur all too easily. For
319  * this reason, it should only ever be done on hardware lockup recovery,
320  * with a reference held to the fence.
321  */
322 bool
fence_remove_callback(struct fence * fence,struct fence_cb * cb)323 fence_remove_callback(struct fence *fence, struct fence_cb *cb)
324 {
325 	unsigned long flags;
326 	bool ret;
327 
328 	spin_lock_irqsave(fence->lock, flags);
329 
330 	ret = !list_empty(&cb->node);
331 	if (ret) {
332 		list_del_init(&cb->node);
333 		if (list_empty(&fence->cb_list))
334 			if (fence->ops->disable_signaling)
335 				fence->ops->disable_signaling(fence);
336 	}
337 
338 	spin_unlock_irqrestore(fence->lock, flags);
339 
340 	return ret;
341 }
342 EXPORT_SYMBOL(fence_remove_callback);
343 
344 struct default_wait_cb {
345 	struct fence_cb base;
346 	struct task_struct *task;
347 };
348 
349 static void
fence_default_wait_cb(struct fence * fence,struct fence_cb * cb)350 fence_default_wait_cb(struct fence *fence, struct fence_cb *cb)
351 {
352 	struct default_wait_cb *wait =
353 		container_of(cb, struct default_wait_cb, base);
354 
355 	wake_up_state(wait->task, TASK_NORMAL);
356 }
357 
358 /**
359  * fence_default_wait - default sleep until the fence gets signaled
360  * or until timeout elapses
361  * @fence:	[in]	the fence to wait on
362  * @intr:	[in]	if true, do an interruptible wait
363  * @timeout:	[in]	timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
364  *
365  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
366  * remaining timeout in jiffies on success.
367  */
368 signed long
fence_default_wait(struct fence * fence,bool intr,signed long timeout)369 fence_default_wait(struct fence *fence, bool intr, signed long timeout)
370 {
371 	struct default_wait_cb cb;
372 	unsigned long flags;
373 	signed long ret = timeout;
374 	bool was_set;
375 
376 	if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
377 		return timeout;
378 
379 	spin_lock_irqsave(fence->lock, flags);
380 
381 	if (intr && signal_pending(current)) {
382 		ret = -ERESTARTSYS;
383 		goto out;
384 	}
385 
386 	was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
387 
388 	if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
389 		goto out;
390 
391 	if (!was_set) {
392 		trace_fence_enable_signal(fence);
393 
394 		if (!fence->ops->enable_signaling(fence)) {
395 			fence_signal_locked(fence);
396 			goto out;
397 		}
398 	}
399 
400 	cb.base.func = fence_default_wait_cb;
401 	cb.task = current;
402 	list_add(&cb.base.node, &fence->cb_list);
403 
404 	while (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
405 		if (intr)
406 			__set_current_state(TASK_INTERRUPTIBLE);
407 		else
408 			__set_current_state(TASK_UNINTERRUPTIBLE);
409 		spin_unlock_irqrestore(fence->lock, flags);
410 
411 		ret = schedule_timeout(ret);
412 
413 		spin_lock_irqsave(fence->lock, flags);
414 		if (ret > 0 && intr && signal_pending(current))
415 			ret = -ERESTARTSYS;
416 	}
417 
418 	if (!list_empty(&cb.base.node))
419 		list_del(&cb.base.node);
420 	__set_current_state(TASK_RUNNING);
421 
422 out:
423 	spin_unlock_irqrestore(fence->lock, flags);
424 	return ret;
425 }
426 EXPORT_SYMBOL(fence_default_wait);
427 
428 static bool
fence_test_signaled_any(struct fence ** fences,uint32_t count)429 fence_test_signaled_any(struct fence **fences, uint32_t count)
430 {
431 	int i;
432 
433 	for (i = 0; i < count; ++i) {
434 		struct fence *fence = fences[i];
435 		if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
436 			return true;
437 	}
438 	return false;
439 }
440 
441 /**
442  * fence_wait_any_timeout - sleep until any fence gets signaled
443  * or until timeout elapses
444  * @fences:	[in]	array of fences to wait on
445  * @count:	[in]	number of fences to wait on
446  * @intr:	[in]	if true, do an interruptible wait
447  * @timeout:	[in]	timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
448  *
449  * Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if
450  * interrupted, 0 if the wait timed out, or the remaining timeout in jiffies
451  * on success.
452  *
453  * Synchronous waits for the first fence in the array to be signaled. The
454  * caller needs to hold a reference to all fences in the array, otherwise a
455  * fence might be freed before return, resulting in undefined behavior.
456  */
457 signed long
fence_wait_any_timeout(struct fence ** fences,uint32_t count,bool intr,signed long timeout)458 fence_wait_any_timeout(struct fence **fences, uint32_t count,
459 		       bool intr, signed long timeout)
460 {
461 	struct default_wait_cb *cb;
462 	signed long ret = timeout;
463 	unsigned i;
464 
465 	if (WARN_ON(!fences || !count || timeout < 0))
466 		return -EINVAL;
467 
468 	if (timeout == 0) {
469 		for (i = 0; i < count; ++i)
470 			if (fence_is_signaled(fences[i]))
471 				return 1;
472 
473 		return 0;
474 	}
475 
476 	cb = kcalloc(count, sizeof(struct default_wait_cb), GFP_KERNEL);
477 	if (cb == NULL) {
478 		ret = -ENOMEM;
479 		goto err_free_cb;
480 	}
481 
482 	for (i = 0; i < count; ++i) {
483 		struct fence *fence = fences[i];
484 
485 		if (fence->ops->wait != fence_default_wait) {
486 			ret = -EINVAL;
487 			goto fence_rm_cb;
488 		}
489 
490 		cb[i].task = current;
491 		if (fence_add_callback(fence, &cb[i].base,
492 				       fence_default_wait_cb)) {
493 			/* This fence is already signaled */
494 			goto fence_rm_cb;
495 		}
496 	}
497 
498 	while (ret > 0) {
499 		if (intr)
500 			set_current_state(TASK_INTERRUPTIBLE);
501 		else
502 			set_current_state(TASK_UNINTERRUPTIBLE);
503 
504 		if (fence_test_signaled_any(fences, count))
505 			break;
506 
507 		ret = schedule_timeout(ret);
508 
509 		if (ret > 0 && intr && signal_pending(current))
510 			ret = -ERESTARTSYS;
511 	}
512 
513 	__set_current_state(TASK_RUNNING);
514 
515 fence_rm_cb:
516 	while (i-- > 0)
517 		fence_remove_callback(fences[i], &cb[i].base);
518 
519 err_free_cb:
520 	kfree(cb);
521 
522 	return ret;
523 }
524 EXPORT_SYMBOL(fence_wait_any_timeout);
525 
526 /**
527  * fence_init - Initialize a custom fence.
528  * @fence:	[in]	the fence to initialize
529  * @ops:	[in]	the fence_ops for operations on this fence
530  * @lock:	[in]	the irqsafe spinlock to use for locking this fence
531  * @context:	[in]	the execution context this fence is run on
532  * @seqno:	[in]	a linear increasing sequence number for this context
533  *
534  * Initializes an allocated fence, the caller doesn't have to keep its
535  * refcount after committing with this fence, but it will need to hold a
536  * refcount again if fence_ops.enable_signaling gets called. This can
537  * be used for other implementing other types of fence.
538  *
539  * context and seqno are used for easy comparison between fences, allowing
540  * to check which fence is later by simply using fence_later.
541  */
542 void
fence_init(struct fence * fence,const struct fence_ops * ops,spinlock_t * lock,u64 context,unsigned seqno)543 fence_init(struct fence *fence, const struct fence_ops *ops,
544 	     spinlock_t *lock, u64 context, unsigned seqno)
545 {
546 	BUG_ON(!lock);
547 	BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
548 	       !ops->get_driver_name || !ops->get_timeline_name);
549 
550 	kref_init(&fence->refcount);
551 	fence->ops = ops;
552 	INIT_LIST_HEAD(&fence->cb_list);
553 	fence->lock = lock;
554 	fence->context = context;
555 	fence->seqno = seqno;
556 	fence->flags = 0UL;
557 	fence->error = 0;
558 
559 	trace_fence_init(fence);
560 }
561 EXPORT_SYMBOL(fence_init);
562