1 /*
2 * Header file for reservations for dma-buf and ttm
3 *
4 * Copyright(C) 2011 Linaro Limited. All rights reserved.
5 * Copyright (C) 2012-2013 Canonical Ltd
6 * Copyright (C) 2012 Texas Instruments
7 *
8 * Authors:
9 * Rob Clark <robdclark@gmail.com>
10 * Maarten Lankhorst <maarten.lankhorst@canonical.com>
11 * Thomas Hellstrom <thellstrom-at-vmware-dot-com>
12 *
13 * Based on bo.c which bears the following copyright notice,
14 * but is dual licensed:
15 *
16 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
17 * All Rights Reserved.
18 *
19 * Permission is hereby granted, free of charge, to any person obtaining a
20 * copy of this software and associated documentation files (the
21 * "Software"), to deal in the Software without restriction, including
22 * without limitation the rights to use, copy, modify, merge, publish,
23 * distribute, sub license, and/or sell copies of the Software, and to
24 * permit persons to whom the Software is furnished to do so, subject to
25 * the following conditions:
26 *
27 * The above copyright notice and this permission notice (including the
28 * next paragraph) shall be included in all copies or substantial portions
29 * of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
34 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
35 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
36 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
37 * USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39 #ifndef _LINUX_RESERVATION_H
40 #define _LINUX_RESERVATION_H
41
42 #include <linux/ww_mutex.h>
43 #include <linux/dma-fence.h>
44 #include <linux/slab.h>
45 #include <linux/seqlock.h>
46 #include <linux/rcupdate.h>
47
48 extern struct ww_class reservation_ww_class;
49
50 /**
51 * struct dma_resv_list - a list of shared fences
52 * @rcu: for internal use
53 * @shared_count: table of shared fences
54 * @shared_max: for growing shared fence table
55 * @shared: shared fence table
56 */
57 struct dma_resv_list {
58 struct rcu_head rcu;
59 u32 shared_count, shared_max;
60 struct dma_fence __rcu *shared[];
61 };
62
63 /**
64 * struct dma_resv - a reservation object manages fences for a buffer
65 * @lock: update side lock
66 * @seq: sequence count for managing RCU read-side synchronization
67 * @fence_excl: the exclusive fence, if there is one currently
68 * @fence: list of current shared fences
69 */
70 struct dma_resv {
71 struct ww_mutex lock;
72 seqcount_ww_mutex_t seq;
73
74 struct dma_fence __rcu *fence_excl;
75 struct dma_resv_list __rcu *fence;
76 };
77
78 #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
79 #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
80
81 #ifdef CONFIG_DEBUG_MUTEXES
82 void dma_resv_reset_shared_max(struct dma_resv *obj);
83 #else
dma_resv_reset_shared_max(struct dma_resv * obj)84 static inline void dma_resv_reset_shared_max(struct dma_resv *obj) {}
85 #endif
86
87 /**
88 * dma_resv_lock - lock the reservation object
89 * @obj: the reservation object
90 * @ctx: the locking context
91 *
92 * Locks the reservation object for exclusive access and modification. Note,
93 * that the lock is only against other writers, readers will run concurrently
94 * with a writer under RCU. The seqlock is used to notify readers if they
95 * overlap with a writer.
96 *
97 * As the reservation object may be locked by multiple parties in an
98 * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
99 * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
100 * object may be locked by itself by passing NULL as @ctx.
101 */
dma_resv_lock(struct dma_resv * obj,struct ww_acquire_ctx * ctx)102 static inline int dma_resv_lock(struct dma_resv *obj,
103 struct ww_acquire_ctx *ctx)
104 {
105 return ww_mutex_lock(&obj->lock, ctx);
106 }
107
108 /**
109 * dma_resv_lock_interruptible - lock the reservation object
110 * @obj: the reservation object
111 * @ctx: the locking context
112 *
113 * Locks the reservation object interruptible for exclusive access and
114 * modification. Note, that the lock is only against other writers, readers
115 * will run concurrently with a writer under RCU. The seqlock is used to
116 * notify readers if they overlap with a writer.
117 *
118 * As the reservation object may be locked by multiple parties in an
119 * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
120 * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
121 * object may be locked by itself by passing NULL as @ctx.
122 */
dma_resv_lock_interruptible(struct dma_resv * obj,struct ww_acquire_ctx * ctx)123 static inline int dma_resv_lock_interruptible(struct dma_resv *obj,
124 struct ww_acquire_ctx *ctx)
125 {
126 return ww_mutex_lock_interruptible(&obj->lock, ctx);
127 }
128
129 /**
130 * dma_resv_lock_slow - slowpath lock the reservation object
131 * @obj: the reservation object
132 * @ctx: the locking context
133 *
134 * Acquires the reservation object after a die case. This function
135 * will sleep until the lock becomes available. See dma_resv_lock() as
136 * well.
137 */
dma_resv_lock_slow(struct dma_resv * obj,struct ww_acquire_ctx * ctx)138 static inline void dma_resv_lock_slow(struct dma_resv *obj,
139 struct ww_acquire_ctx *ctx)
140 {
141 ww_mutex_lock_slow(&obj->lock, ctx);
142 }
143
144 /**
145 * dma_resv_lock_slow_interruptible - slowpath lock the reservation
146 * object, interruptible
147 * @obj: the reservation object
148 * @ctx: the locking context
149 *
150 * Acquires the reservation object interruptible after a die case. This function
151 * will sleep until the lock becomes available. See
152 * dma_resv_lock_interruptible() as well.
153 */
dma_resv_lock_slow_interruptible(struct dma_resv * obj,struct ww_acquire_ctx * ctx)154 static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj,
155 struct ww_acquire_ctx *ctx)
156 {
157 return ww_mutex_lock_slow_interruptible(&obj->lock, ctx);
158 }
159
160 /**
161 * dma_resv_trylock - trylock the reservation object
162 * @obj: the reservation object
163 *
164 * Tries to lock the reservation object for exclusive access and modification.
165 * Note, that the lock is only against other writers, readers will run
166 * concurrently with a writer under RCU. The seqlock is used to notify readers
167 * if they overlap with a writer.
168 *
169 * Also note that since no context is provided, no deadlock protection is
170 * possible.
171 *
172 * Returns true if the lock was acquired, false otherwise.
173 */
dma_resv_trylock(struct dma_resv * obj)174 static inline bool __must_check dma_resv_trylock(struct dma_resv *obj)
175 {
176 return ww_mutex_trylock(&obj->lock);
177 }
178
179 /**
180 * dma_resv_is_locked - is the reservation object locked
181 * @obj: the reservation object
182 *
183 * Returns true if the mutex is locked, false if unlocked.
184 */
dma_resv_is_locked(struct dma_resv * obj)185 static inline bool dma_resv_is_locked(struct dma_resv *obj)
186 {
187 return ww_mutex_is_locked(&obj->lock);
188 }
189
190 /**
191 * dma_resv_locking_ctx - returns the context used to lock the object
192 * @obj: the reservation object
193 *
194 * Returns the context used to lock a reservation object or NULL if no context
195 * was used or the object is not locked at all.
196 */
dma_resv_locking_ctx(struct dma_resv * obj)197 static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
198 {
199 return READ_ONCE(obj->lock.ctx);
200 }
201
202 /**
203 * dma_resv_unlock - unlock the reservation object
204 * @obj: the reservation object
205 *
206 * Unlocks the reservation object following exclusive access.
207 */
dma_resv_unlock(struct dma_resv * obj)208 static inline void dma_resv_unlock(struct dma_resv *obj)
209 {
210 dma_resv_reset_shared_max(obj);
211 ww_mutex_unlock(&obj->lock);
212 }
213
214 /**
215 * dma_resv_excl_fence - return the object's exclusive fence
216 * @obj: the reservation object
217 *
218 * Returns the exclusive fence (if any). Caller must either hold the objects
219 * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(),
220 * or one of the variants of each
221 *
222 * RETURNS
223 * The exclusive fence or NULL
224 */
225 static inline struct dma_fence *
dma_resv_excl_fence(struct dma_resv * obj)226 dma_resv_excl_fence(struct dma_resv *obj)
227 {
228 return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj));
229 }
230
231 /**
232 * dma_resv_get_excl_unlocked - get the reservation object's
233 * exclusive fence, without lock held.
234 * @obj: the reservation object
235 *
236 * If there is an exclusive fence, this atomically increments it's
237 * reference count and returns it.
238 *
239 * RETURNS
240 * The exclusive fence or NULL if none
241 */
242 static inline struct dma_fence *
dma_resv_get_excl_unlocked(struct dma_resv * obj)243 dma_resv_get_excl_unlocked(struct dma_resv *obj)
244 {
245 struct dma_fence *fence;
246
247 if (!rcu_access_pointer(obj->fence_excl))
248 return NULL;
249
250 rcu_read_lock();
251 fence = dma_fence_get_rcu_safe(&obj->fence_excl);
252 rcu_read_unlock();
253
254 return fence;
255 }
256
257 /**
258 * dma_resv_shared_list - get the reservation object's shared fence list
259 * @obj: the reservation object
260 *
261 * Returns the shared fence list. Caller must either hold the objects
262 * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(),
263 * or one of the variants of each
264 */
dma_resv_shared_list(struct dma_resv * obj)265 static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj)
266 {
267 return rcu_dereference_check(obj->fence, dma_resv_held(obj));
268 }
269
270 void dma_resv_init(struct dma_resv *obj);
271 void dma_resv_fini(struct dma_resv *obj);
272 int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
273 void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
274 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
275 int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
276 unsigned *pshared_count, struct dma_fence ***pshared);
277 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
278 long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
279 unsigned long timeout);
280 bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all);
281
282 #endif /* _LINUX_RESERVATION_H */
283