• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Ldisc rw semaphore
3  *
4  * The ldisc semaphore is semantically a rw_semaphore but which enforces
5  * an alternate policy, namely:
6  *   1) Supports lock wait timeouts
7  *   2) Write waiter has priority
8  *   3) Downgrading is not supported
9  *
10  * Implementation notes:
11  *   1) Upper half of semaphore count is a wait count (differs from rwsem
12  *	in that rwsem normalizes the upper half to the wait bias)
13  *   2) Lacks overflow checking
14  *
15  * The generic counting was copied and modified from include/asm-generic/rwsem.h
16  * by Paul Mackerras <paulus@samba.org>.
17  *
18  * The scheduling policy was copied and modified from lib/rwsem.c
19  * Written by David Howells (dhowells@redhat.com).
20  *
21  * This implementation incorporates the write lock stealing work of
22  * Michel Lespinasse <walken@google.com>.
23  *
24  * Copyright (C) 2013 Peter Hurley <peter@hurleysoftware.com>
25  *
26  * This file may be redistributed under the terms of the GNU General Public
27  * License v2.
28  */
29 
30 #include <linux/list.h>
31 #include <linux/spinlock.h>
32 #include <linux/atomic.h>
33 #include <linux/tty.h>
34 #include <linux/sched.h>
35 
36 
37 #ifdef CONFIG_DEBUG_LOCK_ALLOC
38 # define __acq(l, s, t, r, c, n, i)		\
39 				lock_acquire(&(l)->dep_map, s, t, r, c, n, i)
40 # define __rel(l, n, i)				\
41 				lock_release(&(l)->dep_map, n, i)
42 #define lockdep_acquire(l, s, t, i)		__acq(l, s, t, 0, 1, NULL, i)
43 #define lockdep_acquire_nest(l, s, t, n, i)	__acq(l, s, t, 0, 1, n, i)
44 #define lockdep_acquire_read(l, s, t, i)	__acq(l, s, t, 1, 1, NULL, i)
45 #define lockdep_release(l, n, i)		__rel(l, n, i)
46 #else
47 # define lockdep_acquire(l, s, t, i)		do { } while (0)
48 # define lockdep_acquire_nest(l, s, t, n, i)	do { } while (0)
49 # define lockdep_acquire_read(l, s, t, i)	do { } while (0)
50 # define lockdep_release(l, n, i)		do { } while (0)
51 #endif
52 
53 #ifdef CONFIG_LOCK_STAT
54 # define lock_stat(_lock, stat)		lock_##stat(&(_lock)->dep_map, _RET_IP_)
55 #else
56 # define lock_stat(_lock, stat)		do { } while (0)
57 #endif
58 
59 
60 #if BITS_PER_LONG == 64
61 # define LDSEM_ACTIVE_MASK	0xffffffffL
62 #else
63 # define LDSEM_ACTIVE_MASK	0x0000ffffL
64 #endif
65 
66 #define LDSEM_UNLOCKED		0L
67 #define LDSEM_ACTIVE_BIAS	1L
68 #define LDSEM_WAIT_BIAS		(-LDSEM_ACTIVE_MASK-1)
69 #define LDSEM_READ_BIAS		LDSEM_ACTIVE_BIAS
70 #define LDSEM_WRITE_BIAS	(LDSEM_WAIT_BIAS + LDSEM_ACTIVE_BIAS)
71 
72 struct ldsem_waiter {
73 	struct list_head list;
74 	struct task_struct *task;
75 };
76 
ldsem_atomic_update(long delta,struct ld_semaphore * sem)77 static inline long ldsem_atomic_update(long delta, struct ld_semaphore *sem)
78 {
79 	return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
80 }
81 
82 /*
83  * ldsem_cmpxchg() updates @*old with the last-known sem->count value.
84  * Returns 1 if count was successfully changed; @*old will have @new value.
85  * Returns 0 if count was not changed; @*old will have most recent sem->count
86  */
ldsem_cmpxchg(long * old,long new,struct ld_semaphore * sem)87 static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem)
88 {
89 	long tmp = atomic_long_cmpxchg(&sem->count, *old, new);
90 	if (tmp == *old) {
91 		*old = new;
92 		return 1;
93 	} else {
94 		*old = tmp;
95 		return 0;
96 	}
97 }
98 
99 /*
100  * Initialize an ldsem:
101  */
__init_ldsem(struct ld_semaphore * sem,const char * name,struct lock_class_key * key)102 void __init_ldsem(struct ld_semaphore *sem, const char *name,
103 		  struct lock_class_key *key)
104 {
105 #ifdef CONFIG_DEBUG_LOCK_ALLOC
106 	/*
107 	 * Make sure we are not reinitializing a held semaphore:
108 	 */
109 	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
110 	lockdep_init_map(&sem->dep_map, name, key, 0);
111 #endif
112 	sem->count = LDSEM_UNLOCKED;
113 	sem->wait_readers = 0;
114 	raw_spin_lock_init(&sem->wait_lock);
115 	INIT_LIST_HEAD(&sem->read_wait);
116 	INIT_LIST_HEAD(&sem->write_wait);
117 }
118 
__ldsem_wake_readers(struct ld_semaphore * sem)119 static void __ldsem_wake_readers(struct ld_semaphore *sem)
120 {
121 	struct ldsem_waiter *waiter, *next;
122 	struct task_struct *tsk;
123 	long adjust, count;
124 
125 	/* Try to grant read locks to all readers on the read wait list.
126 	 * Note the 'active part' of the count is incremented by
127 	 * the number of readers before waking any processes up.
128 	 */
129 	adjust = sem->wait_readers * (LDSEM_ACTIVE_BIAS - LDSEM_WAIT_BIAS);
130 	count = ldsem_atomic_update(adjust, sem);
131 	do {
132 		if (count > 0)
133 			break;
134 		if (ldsem_cmpxchg(&count, count - adjust, sem))
135 			return;
136 	} while (1);
137 
138 	list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
139 		tsk = waiter->task;
140 		smp_mb();
141 		waiter->task = NULL;
142 		wake_up_process(tsk);
143 		put_task_struct(tsk);
144 	}
145 	INIT_LIST_HEAD(&sem->read_wait);
146 	sem->wait_readers = 0;
147 }
148 
writer_trylock(struct ld_semaphore * sem)149 static inline int writer_trylock(struct ld_semaphore *sem)
150 {
151 	/* only wake this writer if the active part of the count can be
152 	 * transitioned from 0 -> 1
153 	 */
154 	long count = ldsem_atomic_update(LDSEM_ACTIVE_BIAS, sem);
155 	do {
156 		if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS)
157 			return 1;
158 		if (ldsem_cmpxchg(&count, count - LDSEM_ACTIVE_BIAS, sem))
159 			return 0;
160 	} while (1);
161 }
162 
__ldsem_wake_writer(struct ld_semaphore * sem)163 static void __ldsem_wake_writer(struct ld_semaphore *sem)
164 {
165 	struct ldsem_waiter *waiter;
166 
167 	waiter = list_entry(sem->write_wait.next, struct ldsem_waiter, list);
168 	wake_up_process(waiter->task);
169 }
170 
171 /*
172  * handle the lock release when processes blocked on it that can now run
173  * - if we come here from up_xxxx(), then:
174  *   - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
175  *   - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
176  * - the spinlock must be held by the caller
177  * - woken process blocks are discarded from the list after having task zeroed
178  */
__ldsem_wake(struct ld_semaphore * sem)179 static void __ldsem_wake(struct ld_semaphore *sem)
180 {
181 	if (!list_empty(&sem->write_wait))
182 		__ldsem_wake_writer(sem);
183 	else if (!list_empty(&sem->read_wait))
184 		__ldsem_wake_readers(sem);
185 }
186 
ldsem_wake(struct ld_semaphore * sem)187 static void ldsem_wake(struct ld_semaphore *sem)
188 {
189 	unsigned long flags;
190 
191 	raw_spin_lock_irqsave(&sem->wait_lock, flags);
192 	__ldsem_wake(sem);
193 	raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
194 }
195 
196 /*
197  * wait for the read lock to be granted
198  */
199 static struct ld_semaphore __sched *
down_read_failed(struct ld_semaphore * sem,long count,long timeout)200 down_read_failed(struct ld_semaphore *sem, long count, long timeout)
201 {
202 	struct ldsem_waiter waiter;
203 	struct task_struct *tsk = current;
204 	long adjust = -LDSEM_ACTIVE_BIAS + LDSEM_WAIT_BIAS;
205 
206 	/* set up my own style of waitqueue */
207 	raw_spin_lock_irq(&sem->wait_lock);
208 
209 	/* Try to reverse the lock attempt but if the count has changed
210 	 * so that reversing fails, check if there are are no waiters,
211 	 * and early-out if not */
212 	do {
213 		if (ldsem_cmpxchg(&count, count + adjust, sem))
214 			break;
215 		if (count > 0) {
216 			raw_spin_unlock_irq(&sem->wait_lock);
217 			return sem;
218 		}
219 	} while (1);
220 
221 	list_add_tail(&waiter.list, &sem->read_wait);
222 	sem->wait_readers++;
223 
224 	waiter.task = tsk;
225 	get_task_struct(tsk);
226 
227 	/* if there are no active locks, wake the new lock owner(s) */
228 	if ((count & LDSEM_ACTIVE_MASK) == 0)
229 		__ldsem_wake(sem);
230 
231 	raw_spin_unlock_irq(&sem->wait_lock);
232 
233 	/* wait to be given the lock */
234 	for (;;) {
235 		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
236 
237 		if (!waiter.task)
238 			break;
239 		if (!timeout)
240 			break;
241 		timeout = schedule_timeout(timeout);
242 	}
243 
244 	__set_task_state(tsk, TASK_RUNNING);
245 
246 	if (!timeout) {
247 		/* lock timed out but check if this task was just
248 		 * granted lock ownership - if so, pretend there
249 		 * was no timeout; otherwise, cleanup lock wait */
250 		raw_spin_lock_irq(&sem->wait_lock);
251 		if (waiter.task) {
252 			ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem);
253 			list_del(&waiter.list);
254 			raw_spin_unlock_irq(&sem->wait_lock);
255 			put_task_struct(waiter.task);
256 			return NULL;
257 		}
258 		raw_spin_unlock_irq(&sem->wait_lock);
259 	}
260 
261 	return sem;
262 }
263 
264 /*
265  * wait for the write lock to be granted
266  */
267 static struct ld_semaphore __sched *
down_write_failed(struct ld_semaphore * sem,long count,long timeout)268 down_write_failed(struct ld_semaphore *sem, long count, long timeout)
269 {
270 	struct ldsem_waiter waiter;
271 	struct task_struct *tsk = current;
272 	long adjust = -LDSEM_ACTIVE_BIAS;
273 	int locked = 0;
274 
275 	/* set up my own style of waitqueue */
276 	raw_spin_lock_irq(&sem->wait_lock);
277 
278 	/* Try to reverse the lock attempt but if the count has changed
279 	 * so that reversing fails, check if the lock is now owned,
280 	 * and early-out if so */
281 	do {
282 		if (ldsem_cmpxchg(&count, count + adjust, sem))
283 			break;
284 		if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS) {
285 			raw_spin_unlock_irq(&sem->wait_lock);
286 			return sem;
287 		}
288 	} while (1);
289 
290 	list_add_tail(&waiter.list, &sem->write_wait);
291 
292 	waiter.task = tsk;
293 
294 	set_task_state(tsk, TASK_UNINTERRUPTIBLE);
295 	for (;;) {
296 		if (!timeout)
297 			break;
298 		raw_spin_unlock_irq(&sem->wait_lock);
299 		timeout = schedule_timeout(timeout);
300 		raw_spin_lock_irq(&sem->wait_lock);
301 		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
302 		if ((locked = writer_trylock(sem)))
303 			break;
304 	}
305 
306 	if (!locked)
307 		ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem);
308 	list_del(&waiter.list);
309 	raw_spin_unlock_irq(&sem->wait_lock);
310 
311 	__set_task_state(tsk, TASK_RUNNING);
312 
313 	/* lock wait may have timed out */
314 	if (!locked)
315 		return NULL;
316 	return sem;
317 }
318 
319 
320 
__ldsem_down_read_nested(struct ld_semaphore * sem,int subclass,long timeout)321 static inline int __ldsem_down_read_nested(struct ld_semaphore *sem,
322 					   int subclass, long timeout)
323 {
324 	long count;
325 
326 	lockdep_acquire_read(sem, subclass, 0, _RET_IP_);
327 
328 	count = ldsem_atomic_update(LDSEM_READ_BIAS, sem);
329 	if (count <= 0) {
330 		lock_stat(sem, contended);
331 		if (!down_read_failed(sem, count, timeout)) {
332 			lockdep_release(sem, 1, _RET_IP_);
333 			return 0;
334 		}
335 	}
336 	lock_stat(sem, acquired);
337 	return 1;
338 }
339 
__ldsem_down_write_nested(struct ld_semaphore * sem,int subclass,long timeout)340 static inline int __ldsem_down_write_nested(struct ld_semaphore *sem,
341 					    int subclass, long timeout)
342 {
343 	long count;
344 
345 	lockdep_acquire(sem, subclass, 0, _RET_IP_);
346 
347 	count = ldsem_atomic_update(LDSEM_WRITE_BIAS, sem);
348 	if ((count & LDSEM_ACTIVE_MASK) != LDSEM_ACTIVE_BIAS) {
349 		lock_stat(sem, contended);
350 		if (!down_write_failed(sem, count, timeout)) {
351 			lockdep_release(sem, 1, _RET_IP_);
352 			return 0;
353 		}
354 	}
355 	lock_stat(sem, acquired);
356 	return 1;
357 }
358 
359 
360 /*
361  * lock for reading -- returns 1 if successful, 0 if timed out
362  */
ldsem_down_read(struct ld_semaphore * sem,long timeout)363 int __sched ldsem_down_read(struct ld_semaphore *sem, long timeout)
364 {
365 	might_sleep();
366 	return __ldsem_down_read_nested(sem, 0, timeout);
367 }
368 
369 /*
370  * trylock for reading -- returns 1 if successful, 0 if contention
371  */
ldsem_down_read_trylock(struct ld_semaphore * sem)372 int ldsem_down_read_trylock(struct ld_semaphore *sem)
373 {
374 	long count = sem->count;
375 
376 	while (count >= 0) {
377 		if (ldsem_cmpxchg(&count, count + LDSEM_READ_BIAS, sem)) {
378 			lockdep_acquire_read(sem, 0, 1, _RET_IP_);
379 			lock_stat(sem, acquired);
380 			return 1;
381 		}
382 	}
383 	return 0;
384 }
385 
386 /*
387  * lock for writing -- returns 1 if successful, 0 if timed out
388  */
ldsem_down_write(struct ld_semaphore * sem,long timeout)389 int __sched ldsem_down_write(struct ld_semaphore *sem, long timeout)
390 {
391 	might_sleep();
392 	return __ldsem_down_write_nested(sem, 0, timeout);
393 }
394 
395 /*
396  * trylock for writing -- returns 1 if successful, 0 if contention
397  */
ldsem_down_write_trylock(struct ld_semaphore * sem)398 int ldsem_down_write_trylock(struct ld_semaphore *sem)
399 {
400 	long count = sem->count;
401 
402 	while ((count & LDSEM_ACTIVE_MASK) == 0) {
403 		if (ldsem_cmpxchg(&count, count + LDSEM_WRITE_BIAS, sem)) {
404 			lockdep_acquire(sem, 0, 1, _RET_IP_);
405 			lock_stat(sem, acquired);
406 			return 1;
407 		}
408 	}
409 	return 0;
410 }
411 
412 /*
413  * release a read lock
414  */
ldsem_up_read(struct ld_semaphore * sem)415 void ldsem_up_read(struct ld_semaphore *sem)
416 {
417 	long count;
418 
419 	lockdep_release(sem, 1, _RET_IP_);
420 
421 	count = ldsem_atomic_update(-LDSEM_READ_BIAS, sem);
422 	if (count < 0 && (count & LDSEM_ACTIVE_MASK) == 0)
423 		ldsem_wake(sem);
424 }
425 
426 /*
427  * release a write lock
428  */
ldsem_up_write(struct ld_semaphore * sem)429 void ldsem_up_write(struct ld_semaphore *sem)
430 {
431 	long count;
432 
433 	lockdep_release(sem, 1, _RET_IP_);
434 
435 	count = ldsem_atomic_update(-LDSEM_WRITE_BIAS, sem);
436 	if (count < 0)
437 		ldsem_wake(sem);
438 }
439 
440 
441 #ifdef CONFIG_DEBUG_LOCK_ALLOC
442 
ldsem_down_read_nested(struct ld_semaphore * sem,int subclass,long timeout)443 int ldsem_down_read_nested(struct ld_semaphore *sem, int subclass, long timeout)
444 {
445 	might_sleep();
446 	return __ldsem_down_read_nested(sem, subclass, timeout);
447 }
448 
ldsem_down_write_nested(struct ld_semaphore * sem,int subclass,long timeout)449 int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
450 			    long timeout)
451 {
452 	might_sleep();
453 	return __ldsem_down_write_nested(sem, subclass, timeout);
454 }
455 
456 #endif
457