• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Ldisc rw semaphore
3  *
4  * The ldisc semaphore is semantically a rw_semaphore but which enforces
5  * an alternate policy, namely:
6  *   1) Supports lock wait timeouts
7  *   2) Write waiter has priority
8  *   3) Downgrading is not supported
9  *
10  * Implementation notes:
11  *   1) Upper half of semaphore count is a wait count (differs from rwsem
12  *	in that rwsem normalizes the upper half to the wait bias)
13  *   2) Lacks overflow checking
14  *
15  * The generic counting was copied and modified from include/asm-generic/rwsem.h
16  * by Paul Mackerras <paulus@samba.org>.
17  *
18  * The scheduling policy was copied and modified from lib/rwsem.c
19  * Written by David Howells (dhowells@redhat.com).
20  *
21  * This implementation incorporates the write lock stealing work of
22  * Michel Lespinasse <walken@google.com>.
23  *
24  * Copyright (C) 2013 Peter Hurley <peter@hurleysoftware.com>
25  *
26  * This file may be redistributed under the terms of the GNU General Public
27  * License v2.
28  */
29 
30 #include <linux/list.h>
31 #include <linux/spinlock.h>
32 #include <linux/atomic.h>
33 #include <linux/tty.h>
34 #include <linux/sched.h>
35 
36 
37 #ifdef CONFIG_DEBUG_LOCK_ALLOC
38 # define __acq(l, s, t, r, c, n, i)		\
39 				lock_acquire(&(l)->dep_map, s, t, r, c, n, i)
40 # define __rel(l, n, i)				\
41 				lock_release(&(l)->dep_map, n, i)
42 #define lockdep_acquire(l, s, t, i)		__acq(l, s, t, 0, 1, NULL, i)
43 #define lockdep_acquire_nest(l, s, t, n, i)	__acq(l, s, t, 0, 1, n, i)
44 #define lockdep_acquire_read(l, s, t, i)	__acq(l, s, t, 1, 1, NULL, i)
45 #define lockdep_release(l, n, i)		__rel(l, n, i)
46 #else
47 # define lockdep_acquire(l, s, t, i)		do { } while (0)
48 # define lockdep_acquire_nest(l, s, t, n, i)	do { } while (0)
49 # define lockdep_acquire_read(l, s, t, i)	do { } while (0)
50 # define lockdep_release(l, n, i)		do { } while (0)
51 #endif
52 
53 #ifdef CONFIG_LOCK_STAT
54 # define lock_stat(_lock, stat)		lock_##stat(&(_lock)->dep_map, _RET_IP_)
55 #else
56 # define lock_stat(_lock, stat)		do { } while (0)
57 #endif
58 
59 
60 #if BITS_PER_LONG == 64
61 # define LDSEM_ACTIVE_MASK	0xffffffffL
62 #else
63 # define LDSEM_ACTIVE_MASK	0x0000ffffL
64 #endif
65 
66 #define LDSEM_UNLOCKED		0L
67 #define LDSEM_ACTIVE_BIAS	1L
68 #define LDSEM_WAIT_BIAS		(-LDSEM_ACTIVE_MASK-1)
69 #define LDSEM_READ_BIAS		LDSEM_ACTIVE_BIAS
70 #define LDSEM_WRITE_BIAS	(LDSEM_WAIT_BIAS + LDSEM_ACTIVE_BIAS)
71 
72 struct ldsem_waiter {
73 	struct list_head list;
74 	struct task_struct *task;
75 };
76 
ldsem_atomic_update(long delta,struct ld_semaphore * sem)77 static inline long ldsem_atomic_update(long delta, struct ld_semaphore *sem)
78 {
79 	return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
80 }
81 
82 /*
83  * ldsem_cmpxchg() updates @*old with the last-known sem->count value.
84  * Returns 1 if count was successfully changed; @*old will have @new value.
85  * Returns 0 if count was not changed; @*old will have most recent sem->count
86  */
ldsem_cmpxchg(long * old,long new,struct ld_semaphore * sem)87 static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem)
88 {
89 	long tmp = atomic_long_cmpxchg(&sem->count, *old, new);
90 	if (tmp == *old) {
91 		*old = new;
92 		return 1;
93 	} else {
94 		*old = tmp;
95 		return 0;
96 	}
97 }
98 
99 /*
100  * Initialize an ldsem:
101  */
__init_ldsem(struct ld_semaphore * sem,const char * name,struct lock_class_key * key)102 void __init_ldsem(struct ld_semaphore *sem, const char *name,
103 		  struct lock_class_key *key)
104 {
105 #ifdef CONFIG_DEBUG_LOCK_ALLOC
106 	/*
107 	 * Make sure we are not reinitializing a held semaphore:
108 	 */
109 	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
110 	lockdep_init_map(&sem->dep_map, name, key, 0);
111 #endif
112 	sem->count = LDSEM_UNLOCKED;
113 	sem->wait_readers = 0;
114 	raw_spin_lock_init(&sem->wait_lock);
115 	INIT_LIST_HEAD(&sem->read_wait);
116 	INIT_LIST_HEAD(&sem->write_wait);
117 }
118 
__ldsem_wake_readers(struct ld_semaphore * sem)119 static void __ldsem_wake_readers(struct ld_semaphore *sem)
120 {
121 	struct ldsem_waiter *waiter, *next;
122 	struct task_struct *tsk;
123 	long adjust, count;
124 
125 	/* Try to grant read locks to all readers on the read wait list.
126 	 * Note the 'active part' of the count is incremented by
127 	 * the number of readers before waking any processes up.
128 	 */
129 	adjust = sem->wait_readers * (LDSEM_ACTIVE_BIAS - LDSEM_WAIT_BIAS);
130 	count = ldsem_atomic_update(adjust, sem);
131 	do {
132 		if (count > 0)
133 			break;
134 		if (ldsem_cmpxchg(&count, count - adjust, sem))
135 			return;
136 	} while (1);
137 
138 	list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
139 		tsk = waiter->task;
140 		smp_store_release(&waiter->task, NULL);
141 		wake_up_process(tsk);
142 		put_task_struct(tsk);
143 	}
144 	INIT_LIST_HEAD(&sem->read_wait);
145 	sem->wait_readers = 0;
146 }
147 
writer_trylock(struct ld_semaphore * sem)148 static inline int writer_trylock(struct ld_semaphore *sem)
149 {
150 	/* only wake this writer if the active part of the count can be
151 	 * transitioned from 0 -> 1
152 	 */
153 	long count = ldsem_atomic_update(LDSEM_ACTIVE_BIAS, sem);
154 	do {
155 		if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS)
156 			return 1;
157 		if (ldsem_cmpxchg(&count, count - LDSEM_ACTIVE_BIAS, sem))
158 			return 0;
159 	} while (1);
160 }
161 
__ldsem_wake_writer(struct ld_semaphore * sem)162 static void __ldsem_wake_writer(struct ld_semaphore *sem)
163 {
164 	struct ldsem_waiter *waiter;
165 
166 	waiter = list_entry(sem->write_wait.next, struct ldsem_waiter, list);
167 	wake_up_process(waiter->task);
168 }
169 
170 /*
171  * handle the lock release when processes blocked on it that can now run
172  * - if we come here from up_xxxx(), then:
173  *   - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
174  *   - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
175  * - the spinlock must be held by the caller
176  * - woken process blocks are discarded from the list after having task zeroed
177  */
__ldsem_wake(struct ld_semaphore * sem)178 static void __ldsem_wake(struct ld_semaphore *sem)
179 {
180 	if (!list_empty(&sem->write_wait))
181 		__ldsem_wake_writer(sem);
182 	else if (!list_empty(&sem->read_wait))
183 		__ldsem_wake_readers(sem);
184 }
185 
ldsem_wake(struct ld_semaphore * sem)186 static void ldsem_wake(struct ld_semaphore *sem)
187 {
188 	unsigned long flags;
189 
190 	raw_spin_lock_irqsave(&sem->wait_lock, flags);
191 	__ldsem_wake(sem);
192 	raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
193 }
194 
195 /*
196  * wait for the read lock to be granted
197  */
198 static struct ld_semaphore __sched *
down_read_failed(struct ld_semaphore * sem,long count,long timeout)199 down_read_failed(struct ld_semaphore *sem, long count, long timeout)
200 {
201 	struct ldsem_waiter waiter;
202 	struct task_struct *tsk = current;
203 	long adjust = -LDSEM_ACTIVE_BIAS + LDSEM_WAIT_BIAS;
204 
205 	/* set up my own style of waitqueue */
206 	raw_spin_lock_irq(&sem->wait_lock);
207 
208 	/* Try to reverse the lock attempt but if the count has changed
209 	 * so that reversing fails, check if there are are no waiters,
210 	 * and early-out if not */
211 	do {
212 		if (ldsem_cmpxchg(&count, count + adjust, sem))
213 			break;
214 		if (count > 0) {
215 			raw_spin_unlock_irq(&sem->wait_lock);
216 			return sem;
217 		}
218 	} while (1);
219 
220 	list_add_tail(&waiter.list, &sem->read_wait);
221 	sem->wait_readers++;
222 
223 	waiter.task = tsk;
224 	get_task_struct(tsk);
225 
226 	/* if there are no active locks, wake the new lock owner(s) */
227 	if ((count & LDSEM_ACTIVE_MASK) == 0)
228 		__ldsem_wake(sem);
229 
230 	raw_spin_unlock_irq(&sem->wait_lock);
231 
232 	/* wait to be given the lock */
233 	for (;;) {
234 		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
235 
236 		if (!smp_load_acquire(&waiter.task))
237 			break;
238 		if (!timeout)
239 			break;
240 		timeout = schedule_timeout(timeout);
241 	}
242 
243 	__set_task_state(tsk, TASK_RUNNING);
244 
245 	if (!timeout) {
246 		/* lock timed out but check if this task was just
247 		 * granted lock ownership - if so, pretend there
248 		 * was no timeout; otherwise, cleanup lock wait */
249 		raw_spin_lock_irq(&sem->wait_lock);
250 		if (waiter.task) {
251 			ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem);
252 			list_del(&waiter.list);
253 			raw_spin_unlock_irq(&sem->wait_lock);
254 			put_task_struct(waiter.task);
255 			return NULL;
256 		}
257 		raw_spin_unlock_irq(&sem->wait_lock);
258 	}
259 
260 	return sem;
261 }
262 
263 /*
264  * wait for the write lock to be granted
265  */
266 static struct ld_semaphore __sched *
down_write_failed(struct ld_semaphore * sem,long count,long timeout)267 down_write_failed(struct ld_semaphore *sem, long count, long timeout)
268 {
269 	struct ldsem_waiter waiter;
270 	struct task_struct *tsk = current;
271 	long adjust = -LDSEM_ACTIVE_BIAS;
272 	int locked = 0;
273 
274 	/* set up my own style of waitqueue */
275 	raw_spin_lock_irq(&sem->wait_lock);
276 
277 	/* Try to reverse the lock attempt but if the count has changed
278 	 * so that reversing fails, check if the lock is now owned,
279 	 * and early-out if so */
280 	do {
281 		if (ldsem_cmpxchg(&count, count + adjust, sem))
282 			break;
283 		if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS) {
284 			raw_spin_unlock_irq(&sem->wait_lock);
285 			return sem;
286 		}
287 	} while (1);
288 
289 	list_add_tail(&waiter.list, &sem->write_wait);
290 
291 	waiter.task = tsk;
292 
293 	set_task_state(tsk, TASK_UNINTERRUPTIBLE);
294 	for (;;) {
295 		if (!timeout)
296 			break;
297 		raw_spin_unlock_irq(&sem->wait_lock);
298 		timeout = schedule_timeout(timeout);
299 		raw_spin_lock_irq(&sem->wait_lock);
300 		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
301 		locked = writer_trylock(sem);
302 		if (locked)
303 			break;
304 	}
305 
306 	if (!locked)
307 		ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem);
308 	list_del(&waiter.list);
309 
310 	/*
311 	 * In case of timeout, wake up every reader who gave the right of way
312 	 * to writer. Prevent separation readers into two groups:
313 	 * one that helds semaphore and another that sleeps.
314 	 * (in case of no contention with a writer)
315 	 */
316 	if (!locked && list_empty(&sem->write_wait))
317 		__ldsem_wake_readers(sem);
318 
319 	raw_spin_unlock_irq(&sem->wait_lock);
320 
321 	__set_task_state(tsk, TASK_RUNNING);
322 
323 	/* lock wait may have timed out */
324 	if (!locked)
325 		return NULL;
326 	return sem;
327 }
328 
329 
330 
__ldsem_down_read_nested(struct ld_semaphore * sem,int subclass,long timeout)331 static inline int __ldsem_down_read_nested(struct ld_semaphore *sem,
332 					   int subclass, long timeout)
333 {
334 	long count;
335 
336 	lockdep_acquire_read(sem, subclass, 0, _RET_IP_);
337 
338 	count = ldsem_atomic_update(LDSEM_READ_BIAS, sem);
339 	if (count <= 0) {
340 		lock_stat(sem, contended);
341 		if (!down_read_failed(sem, count, timeout)) {
342 			lockdep_release(sem, 1, _RET_IP_);
343 			return 0;
344 		}
345 	}
346 	lock_stat(sem, acquired);
347 	return 1;
348 }
349 
__ldsem_down_write_nested(struct ld_semaphore * sem,int subclass,long timeout)350 static inline int __ldsem_down_write_nested(struct ld_semaphore *sem,
351 					    int subclass, long timeout)
352 {
353 	long count;
354 
355 	lockdep_acquire(sem, subclass, 0, _RET_IP_);
356 
357 	count = ldsem_atomic_update(LDSEM_WRITE_BIAS, sem);
358 	if ((count & LDSEM_ACTIVE_MASK) != LDSEM_ACTIVE_BIAS) {
359 		lock_stat(sem, contended);
360 		if (!down_write_failed(sem, count, timeout)) {
361 			lockdep_release(sem, 1, _RET_IP_);
362 			return 0;
363 		}
364 	}
365 	lock_stat(sem, acquired);
366 	return 1;
367 }
368 
369 
370 /*
371  * lock for reading -- returns 1 if successful, 0 if timed out
372  */
ldsem_down_read(struct ld_semaphore * sem,long timeout)373 int __sched ldsem_down_read(struct ld_semaphore *sem, long timeout)
374 {
375 	might_sleep();
376 	return __ldsem_down_read_nested(sem, 0, timeout);
377 }
378 
379 /*
380  * trylock for reading -- returns 1 if successful, 0 if contention
381  */
ldsem_down_read_trylock(struct ld_semaphore * sem)382 int ldsem_down_read_trylock(struct ld_semaphore *sem)
383 {
384 	long count = sem->count;
385 
386 	while (count >= 0) {
387 		if (ldsem_cmpxchg(&count, count + LDSEM_READ_BIAS, sem)) {
388 			lockdep_acquire_read(sem, 0, 1, _RET_IP_);
389 			lock_stat(sem, acquired);
390 			return 1;
391 		}
392 	}
393 	return 0;
394 }
395 
396 /*
397  * lock for writing -- returns 1 if successful, 0 if timed out
398  */
ldsem_down_write(struct ld_semaphore * sem,long timeout)399 int __sched ldsem_down_write(struct ld_semaphore *sem, long timeout)
400 {
401 	might_sleep();
402 	return __ldsem_down_write_nested(sem, 0, timeout);
403 }
404 
405 /*
406  * trylock for writing -- returns 1 if successful, 0 if contention
407  */
ldsem_down_write_trylock(struct ld_semaphore * sem)408 int ldsem_down_write_trylock(struct ld_semaphore *sem)
409 {
410 	long count = sem->count;
411 
412 	while ((count & LDSEM_ACTIVE_MASK) == 0) {
413 		if (ldsem_cmpxchg(&count, count + LDSEM_WRITE_BIAS, sem)) {
414 			lockdep_acquire(sem, 0, 1, _RET_IP_);
415 			lock_stat(sem, acquired);
416 			return 1;
417 		}
418 	}
419 	return 0;
420 }
421 
422 /*
423  * release a read lock
424  */
ldsem_up_read(struct ld_semaphore * sem)425 void ldsem_up_read(struct ld_semaphore *sem)
426 {
427 	long count;
428 
429 	lockdep_release(sem, 1, _RET_IP_);
430 
431 	count = ldsem_atomic_update(-LDSEM_READ_BIAS, sem);
432 	if (count < 0 && (count & LDSEM_ACTIVE_MASK) == 0)
433 		ldsem_wake(sem);
434 }
435 
436 /*
437  * release a write lock
438  */
ldsem_up_write(struct ld_semaphore * sem)439 void ldsem_up_write(struct ld_semaphore *sem)
440 {
441 	long count;
442 
443 	lockdep_release(sem, 1, _RET_IP_);
444 
445 	count = ldsem_atomic_update(-LDSEM_WRITE_BIAS, sem);
446 	if (count < 0)
447 		ldsem_wake(sem);
448 }
449 
450 
451 #ifdef CONFIG_DEBUG_LOCK_ALLOC
452 
ldsem_down_read_nested(struct ld_semaphore * sem,int subclass,long timeout)453 int ldsem_down_read_nested(struct ld_semaphore *sem, int subclass, long timeout)
454 {
455 	might_sleep();
456 	return __ldsem_down_read_nested(sem, subclass, timeout);
457 }
458 
ldsem_down_write_nested(struct ld_semaphore * sem,int subclass,long timeout)459 int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
460 			    long timeout)
461 {
462 	might_sleep();
463 	return __ldsem_down_write_nested(sem, subclass, timeout);
464 }
465 
466 #endif
467