• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
3  *
4  * Based on bo.c which bears the following copyright notice,
5  * but is dual licensed:
6  *
7  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
8  * All Rights Reserved.
9  *
10  * Permission is hereby granted, free of charge, to any person obtaining a
11  * copy of this software and associated documentation files (the
12  * "Software"), to deal in the Software without restriction, including
13  * without limitation the rights to use, copy, modify, merge, publish,
14  * distribute, sub license, and/or sell copies of the Software, and to
15  * permit persons to whom the Software is furnished to do so, subject to
16  * the following conditions:
17  *
18  * The above copyright notice and this permission notice (including the
19  * next paragraph) shall be included in all copies or substantial portions
20  * of the Software.
21  *
22  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
25  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
26  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
27  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
28  * USE OR OTHER DEALINGS IN THE SOFTWARE.
29  *
30  **************************************************************************/
31 /*
32  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
33  */
34 
35 #include <linux/reservation.h>
36 #include <linux/export.h>
37 
38 /**
39  * DOC: Reservation Object Overview
40  *
41  * The reservation object provides a mechanism to manage shared and
42  * exclusive fences associated with a buffer.  A reservation object
43  * can have attached one exclusive fence (normally associated with
44  * write operations) or N shared fences (read operations).  The RCU
45  * mechanism is used to protect read access to fences from locked
46  * write-side updates.
47  */
48 
49 DEFINE_WW_CLASS(reservation_ww_class);
50 EXPORT_SYMBOL(reservation_ww_class);
51 
52 struct lock_class_key reservation_seqcount_class;
53 EXPORT_SYMBOL(reservation_seqcount_class);
54 
55 const char reservation_seqcount_string[] = "reservation_seqcount";
56 EXPORT_SYMBOL(reservation_seqcount_string);
57 
58 /**
59  * reservation_object_reserve_shared - Reserve space to add a shared
60  * fence to a reservation_object.
61  * @obj: reservation object
62  *
63  * Should be called before reservation_object_add_shared_fence().  Must
64  * be called with obj->lock held.
65  *
66  * RETURNS
67  * Zero for success, or -errno
68  */
reservation_object_reserve_shared(struct reservation_object * obj)69 int reservation_object_reserve_shared(struct reservation_object *obj)
70 {
71 	struct reservation_object_list *fobj, *old;
72 	u32 max;
73 
74 	old = reservation_object_get_list(obj);
75 
76 	if (old && old->shared_max) {
77 		if (old->shared_count < old->shared_max) {
78 			/* perform an in-place update */
79 			kfree(obj->staged);
80 			obj->staged = NULL;
81 			return 0;
82 		} else
83 			max = old->shared_max * 2;
84 	} else
85 		max = 4;
86 
87 	/*
88 	 * resize obj->staged or allocate if it doesn't exist,
89 	 * noop if already correct size
90 	 */
91 	fobj = krealloc(obj->staged, offsetof(typeof(*fobj), shared[max]),
92 			GFP_KERNEL);
93 	if (!fobj)
94 		return -ENOMEM;
95 
96 	obj->staged = fobj;
97 	fobj->shared_max = max;
98 	return 0;
99 }
100 EXPORT_SYMBOL(reservation_object_reserve_shared);
101 
102 static void
reservation_object_add_shared_inplace(struct reservation_object * obj,struct reservation_object_list * fobj,struct fence * fence)103 reservation_object_add_shared_inplace(struct reservation_object *obj,
104 				      struct reservation_object_list *fobj,
105 				      struct fence *fence)
106 {
107 	u32 i;
108 
109 	fence_get(fence);
110 
111 	preempt_disable();
112 	write_seqcount_begin(&obj->seq);
113 
114 	for (i = 0; i < fobj->shared_count; ++i) {
115 		struct fence *old_fence;
116 
117 		old_fence = rcu_dereference_protected(fobj->shared[i],
118 						reservation_object_held(obj));
119 
120 		if (old_fence->context == fence->context) {
121 			/* memory barrier is added by write_seqcount_begin */
122 			RCU_INIT_POINTER(fobj->shared[i], fence);
123 			write_seqcount_end(&obj->seq);
124 			preempt_enable();
125 
126 			fence_put(old_fence);
127 			return;
128 		}
129 	}
130 
131 	/*
132 	 * memory barrier is added by write_seqcount_begin,
133 	 * fobj->shared_count is protected by this lock too
134 	 */
135 	RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
136 	fobj->shared_count++;
137 
138 	write_seqcount_end(&obj->seq);
139 	preempt_enable();
140 }
141 
142 static void
reservation_object_add_shared_replace(struct reservation_object * obj,struct reservation_object_list * old,struct reservation_object_list * fobj,struct fence * fence)143 reservation_object_add_shared_replace(struct reservation_object *obj,
144 				      struct reservation_object_list *old,
145 				      struct reservation_object_list *fobj,
146 				      struct fence *fence)
147 {
148 	unsigned i;
149 	struct fence *old_fence = NULL;
150 
151 	fence_get(fence);
152 
153 	if (!old) {
154 		RCU_INIT_POINTER(fobj->shared[0], fence);
155 		fobj->shared_count = 1;
156 		goto done;
157 	}
158 
159 	/*
160 	 * no need to bump fence refcounts, rcu_read access
161 	 * requires the use of kref_get_unless_zero, and the
162 	 * references from the old struct are carried over to
163 	 * the new.
164 	 */
165 	fobj->shared_count = old->shared_count;
166 
167 	for (i = 0; i < old->shared_count; ++i) {
168 		struct fence *check;
169 
170 		check = rcu_dereference_protected(old->shared[i],
171 						reservation_object_held(obj));
172 
173 		if (!old_fence && check->context == fence->context) {
174 			old_fence = check;
175 			RCU_INIT_POINTER(fobj->shared[i], fence);
176 		} else
177 			RCU_INIT_POINTER(fobj->shared[i], check);
178 	}
179 	if (!old_fence) {
180 		RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
181 		fobj->shared_count++;
182 	}
183 
184 done:
185 	preempt_disable();
186 	write_seqcount_begin(&obj->seq);
187 	/*
188 	 * RCU_INIT_POINTER can be used here,
189 	 * seqcount provides the necessary barriers
190 	 */
191 	RCU_INIT_POINTER(obj->fence, fobj);
192 	write_seqcount_end(&obj->seq);
193 	preempt_enable();
194 
195 	if (old)
196 		kfree_rcu(old, rcu);
197 
198 	if (old_fence)
199 		fence_put(old_fence);
200 }
201 
202 /**
203  * reservation_object_add_shared_fence - Add a fence to a shared slot
204  * @obj: the reservation object
205  * @fence: the shared fence to add
206  *
207  * Add a fence to a shared slot, obj->lock must be held, and
208  * reservation_object_reserve_shared() has been called.
209  */
reservation_object_add_shared_fence(struct reservation_object * obj,struct fence * fence)210 void reservation_object_add_shared_fence(struct reservation_object *obj,
211 					 struct fence *fence)
212 {
213 	struct reservation_object_list *old, *fobj = obj->staged;
214 
215 	old = reservation_object_get_list(obj);
216 	obj->staged = NULL;
217 
218 	if (!fobj) {
219 		BUG_ON(old->shared_count >= old->shared_max);
220 		reservation_object_add_shared_inplace(obj, old, fence);
221 	} else
222 		reservation_object_add_shared_replace(obj, old, fobj, fence);
223 }
224 EXPORT_SYMBOL(reservation_object_add_shared_fence);
225 
226 /**
227  * reservation_object_add_excl_fence - Add an exclusive fence.
228  * @obj: the reservation object
229  * @fence: the shared fence to add
230  *
231  * Add a fence to the exclusive slot.  The obj->lock must be held.
232  */
reservation_object_add_excl_fence(struct reservation_object * obj,struct fence * fence)233 void reservation_object_add_excl_fence(struct reservation_object *obj,
234 				       struct fence *fence)
235 {
236 	struct fence *old_fence = reservation_object_get_excl(obj);
237 	struct reservation_object_list *old;
238 	u32 i = 0;
239 
240 	old = reservation_object_get_list(obj);
241 	if (old)
242 		i = old->shared_count;
243 
244 	if (fence)
245 		fence_get(fence);
246 
247 	preempt_disable();
248 	write_seqcount_begin(&obj->seq);
249 	/* write_seqcount_begin provides the necessary memory barrier */
250 	RCU_INIT_POINTER(obj->fence_excl, fence);
251 	if (old)
252 		old->shared_count = 0;
253 	write_seqcount_end(&obj->seq);
254 	preempt_enable();
255 
256 	/* inplace update, no shared fences */
257 	while (i--)
258 		fence_put(rcu_dereference_protected(old->shared[i],
259 						reservation_object_held(obj)));
260 
261 	if (old_fence)
262 		fence_put(old_fence);
263 }
264 EXPORT_SYMBOL(reservation_object_add_excl_fence);
265 
266 /**
267  * reservation_object_get_fences_rcu - Get an object's shared and exclusive
268  * fences without update side lock held
269  * @obj: the reservation object
270  * @pfence_excl: the returned exclusive fence (or NULL)
271  * @pshared_count: the number of shared fences returned
272  * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
273  * the required size, and must be freed by caller)
274  *
275  * RETURNS
276  * Zero or -errno
277  */
reservation_object_get_fences_rcu(struct reservation_object * obj,struct fence ** pfence_excl,unsigned * pshared_count,struct fence *** pshared)278 int reservation_object_get_fences_rcu(struct reservation_object *obj,
279 				      struct fence **pfence_excl,
280 				      unsigned *pshared_count,
281 				      struct fence ***pshared)
282 {
283 	struct fence **shared = NULL;
284 	struct fence *fence_excl;
285 	unsigned int shared_count;
286 	int ret = 1;
287 
288 	do {
289 		struct reservation_object_list *fobj;
290 		unsigned seq;
291 		unsigned int i;
292 
293 		shared_count = i = 0;
294 
295 		rcu_read_lock();
296 		seq = read_seqcount_begin(&obj->seq);
297 
298 		fence_excl = rcu_dereference(obj->fence_excl);
299 		if (fence_excl && !fence_get_rcu(fence_excl))
300 			goto unlock;
301 
302 		fobj = rcu_dereference(obj->fence);
303 		if (fobj) {
304 			struct fence **nshared;
305 			size_t sz = sizeof(*shared) * fobj->shared_max;
306 
307 			nshared = krealloc(shared, sz,
308 					   GFP_NOWAIT | __GFP_NOWARN);
309 			if (!nshared) {
310 				rcu_read_unlock();
311 				nshared = krealloc(shared, sz, GFP_KERNEL);
312 				if (nshared) {
313 					shared = nshared;
314 					continue;
315 				}
316 
317 				ret = -ENOMEM;
318 				break;
319 			}
320 			shared = nshared;
321 			shared_count = fobj->shared_count;
322 
323 			for (i = 0; i < shared_count; ++i) {
324 				shared[i] = rcu_dereference(fobj->shared[i]);
325 				if (!fence_get_rcu(shared[i]))
326 					break;
327 			}
328 		}
329 
330 		if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
331 			while (i--)
332 				fence_put(shared[i]);
333 			fence_put(fence_excl);
334 			goto unlock;
335 		}
336 
337 		ret = 0;
338 unlock:
339 		rcu_read_unlock();
340 	} while (ret);
341 
342 	if (!shared_count) {
343 		kfree(shared);
344 		shared = NULL;
345 	}
346 
347 	*pshared_count = shared_count;
348 	*pshared = shared;
349 	*pfence_excl = fence_excl;
350 
351 	return ret;
352 }
353 EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
354 
355 /**
356  * reservation_object_wait_timeout_rcu - Wait on reservation's objects
357  * shared and/or exclusive fences.
358  * @obj: the reservation object
359  * @wait_all: if true, wait on all fences, else wait on just exclusive fence
360  * @intr: if true, do interruptible wait
361  * @timeout: timeout value in jiffies or zero to return immediately
362  *
363  * RETURNS
364  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
365  * greater than zer on success.
366  */
reservation_object_wait_timeout_rcu(struct reservation_object * obj,bool wait_all,bool intr,unsigned long timeout)367 long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
368 					 bool wait_all, bool intr,
369 					 unsigned long timeout)
370 {
371 	struct fence *fence;
372 	unsigned seq, shared_count, i = 0;
373 	long ret = timeout ? timeout : 1;
374 
375 retry:
376 	fence = NULL;
377 	shared_count = 0;
378 	seq = read_seqcount_begin(&obj->seq);
379 	rcu_read_lock();
380 
381 	if (wait_all) {
382 		struct reservation_object_list *fobj =
383 						rcu_dereference(obj->fence);
384 
385 		if (fobj)
386 			shared_count = fobj->shared_count;
387 
388 		for (i = 0; i < shared_count; ++i) {
389 			struct fence *lfence = rcu_dereference(fobj->shared[i]);
390 
391 			if (test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags))
392 				continue;
393 
394 			if (!fence_get_rcu(lfence))
395 				goto unlock_retry;
396 
397 			if (fence_is_signaled(lfence)) {
398 				fence_put(lfence);
399 				continue;
400 			}
401 
402 			fence = lfence;
403 			break;
404 		}
405 	}
406 
407 	if (!shared_count) {
408 		struct fence *fence_excl = rcu_dereference(obj->fence_excl);
409 
410 		if (fence_excl &&
411 		    !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) {
412 			if (!fence_get_rcu(fence_excl))
413 				goto unlock_retry;
414 
415 			if (fence_is_signaled(fence_excl))
416 				fence_put(fence_excl);
417 			else
418 				fence = fence_excl;
419 		}
420 	}
421 
422 	rcu_read_unlock();
423 	if (fence) {
424 		if (read_seqcount_retry(&obj->seq, seq)) {
425 			fence_put(fence);
426 			goto retry;
427 		}
428 
429 		ret = fence_wait_timeout(fence, intr, ret);
430 		fence_put(fence);
431 		if (ret > 0 && wait_all && (i + 1 < shared_count))
432 			goto retry;
433 	}
434 	return ret;
435 
436 unlock_retry:
437 	rcu_read_unlock();
438 	goto retry;
439 }
440 EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu);
441 
442 
443 static inline int
reservation_object_test_signaled_single(struct fence * passed_fence)444 reservation_object_test_signaled_single(struct fence *passed_fence)
445 {
446 	struct fence *fence, *lfence = passed_fence;
447 	int ret = 1;
448 
449 	if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
450 		fence = fence_get_rcu(lfence);
451 		if (!fence)
452 			return -1;
453 
454 		ret = !!fence_is_signaled(fence);
455 		fence_put(fence);
456 	}
457 	return ret;
458 }
459 
460 /**
461  * reservation_object_test_signaled_rcu - Test if a reservation object's
462  * fences have been signaled.
463  * @obj: the reservation object
464  * @test_all: if true, test all fences, otherwise only test the exclusive
465  * fence
466  *
467  * RETURNS
468  * true if all fences signaled, else false
469  */
reservation_object_test_signaled_rcu(struct reservation_object * obj,bool test_all)470 bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
471 					  bool test_all)
472 {
473 	unsigned seq, shared_count;
474 	int ret;
475 
476 	rcu_read_lock();
477 retry:
478 	ret = true;
479 	shared_count = 0;
480 	seq = read_seqcount_begin(&obj->seq);
481 
482 	if (test_all) {
483 		unsigned i;
484 
485 		struct reservation_object_list *fobj =
486 						rcu_dereference(obj->fence);
487 
488 		if (fobj)
489 			shared_count = fobj->shared_count;
490 
491 		for (i = 0; i < shared_count; ++i) {
492 			struct fence *fence = rcu_dereference(fobj->shared[i]);
493 
494 			ret = reservation_object_test_signaled_single(fence);
495 			if (ret < 0)
496 				goto retry;
497 			else if (!ret)
498 				break;
499 		}
500 
501 		if (read_seqcount_retry(&obj->seq, seq))
502 			goto retry;
503 	}
504 
505 	if (!shared_count) {
506 		struct fence *fence_excl = rcu_dereference(obj->fence_excl);
507 
508 		if (fence_excl) {
509 			ret = reservation_object_test_signaled_single(
510 								fence_excl);
511 			if (ret < 0)
512 				goto retry;
513 
514 			if (read_seqcount_retry(&obj->seq, seq))
515 				goto retry;
516 		}
517 	}
518 
519 	rcu_read_unlock();
520 	return ret;
521 }
522 EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu);
523