• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Queued spinlock
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
15  * (C) Copyright 2013-2014 Red Hat, Inc.
16  * (C) Copyright 2015 Intel Corp.
17  * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
18  *
19  * Authors: Waiman Long <waiman.long@hpe.com>
20  *          Peter Zijlstra <peterz@infradead.org>
21  */
22 
23 #ifndef _GEN_PV_LOCK_SLOWPATH
24 
25 #include <linux/smp.h>
26 #include <linux/bug.h>
27 #include <linux/cpumask.h>
28 #include <linux/percpu.h>
29 #include <linux/hardirq.h>
30 #include <linux/mutex.h>
31 #include <linux/prefetch.h>
32 #include <asm/byteorder.h>
33 #include <asm/qspinlock.h>
34 
35 /*
36  * The basic principle of a queue-based spinlock can best be understood
37  * by studying a classic queue-based spinlock implementation called the
38  * MCS lock. The paper below provides a good description for this kind
39  * of lock.
40  *
41  * http://www.cise.ufl.edu/tr/DOC/REP-1992-71.pdf
42  *
43  * This queued spinlock implementation is based on the MCS lock, however to make
44  * it fit the 4 bytes we assume spinlock_t to be, and preserve its existing
45  * API, we must modify it somehow.
46  *
47  * In particular; where the traditional MCS lock consists of a tail pointer
48  * (8 bytes) and needs the next pointer (another 8 bytes) of its own node to
49  * unlock the next pending (next->locked), we compress both these: {tail,
50  * next->locked} into a single u32 value.
51  *
52  * Since a spinlock disables recursion of its own context and there is a limit
53  * to the contexts that can nest; namely: task, softirq, hardirq, nmi. As there
54  * are at most 4 nesting levels, it can be encoded by a 2-bit number. Now
55  * we can encode the tail by combining the 2-bit nesting level with the cpu
56  * number. With one byte for the lock value and 3 bytes for the tail, only a
57  * 32-bit word is now needed. Even though we only need 1 bit for the lock,
58  * we extend it to a full byte to achieve better performance for architectures
59  * that support atomic byte write.
60  *
61  * We also change the first spinner to spin on the lock bit instead of its
62  * node; whereby avoiding the need to carry a node from lock to unlock, and
63  * preserving existing lock API. This also makes the unlock code simpler and
64  * faster.
65  *
66  * N.B. The current implementation only supports architectures that allow
67  *      atomic operations on smaller 8-bit and 16-bit data types.
68  *
69  */
70 
71 #include "mcs_spinlock.h"
72 
73 #ifdef CONFIG_PARAVIRT_SPINLOCKS
74 #define MAX_NODES	8
75 #else
76 #define MAX_NODES	4
77 #endif
78 
79 /*
80  * The pending bit spinning loop count.
81  * This heuristic is used to limit the number of lockword accesses
82  * made by atomic_cond_read_relaxed when waiting for the lock to
83  * transition out of the "== _Q_PENDING_VAL" state. We don't spin
84  * indefinitely because there's no guarantee that we'll make forward
85  * progress.
86  */
87 #ifndef _Q_PENDING_LOOPS
88 #define _Q_PENDING_LOOPS	1
89 #endif
90 
91 /*
92  * Per-CPU queue node structures; we can never have more than 4 nested
93  * contexts: task, softirq, hardirq, nmi.
94  *
95  * Exactly fits one 64-byte cacheline on a 64-bit architecture.
96  *
97  * PV doubles the storage and uses the second cacheline for PV state.
98  */
99 static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[MAX_NODES]);
100 
101 /*
102  * We must be able to distinguish between no-tail and the tail at 0:0,
103  * therefore increment the cpu number by one.
104  */
105 
encode_tail(int cpu,int idx)106 static inline __pure u32 encode_tail(int cpu, int idx)
107 {
108 	u32 tail;
109 
110 #ifdef CONFIG_DEBUG_SPINLOCK
111 	BUG_ON(idx > 3);
112 #endif
113 	tail  = (cpu + 1) << _Q_TAIL_CPU_OFFSET;
114 	tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */
115 
116 	return tail;
117 }
118 
decode_tail(u32 tail)119 static inline __pure struct mcs_spinlock *decode_tail(u32 tail)
120 {
121 	int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1;
122 	int idx = (tail &  _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
123 
124 	return per_cpu_ptr(&mcs_nodes[idx], cpu);
125 }
126 
127 #define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)
128 
129 #if _Q_PENDING_BITS == 8
130 /**
131  * clear_pending - clear the pending bit.
132  * @lock: Pointer to queued spinlock structure
133  *
134  * *,1,* -> *,0,*
135  */
clear_pending(struct qspinlock * lock)136 static __always_inline void clear_pending(struct qspinlock *lock)
137 {
138 	WRITE_ONCE(lock->pending, 0);
139 }
140 
141 /**
142  * clear_pending_set_locked - take ownership and clear the pending bit.
143  * @lock: Pointer to queued spinlock structure
144  *
145  * *,1,0 -> *,0,1
146  *
147  * Lock stealing is not allowed if this function is used.
148  */
clear_pending_set_locked(struct qspinlock * lock)149 static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
150 {
151 	WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL);
152 }
153 
154 /*
155  * xchg_tail - Put in the new queue tail code word & retrieve previous one
156  * @lock : Pointer to queued spinlock structure
157  * @tail : The new queue tail code word
158  * Return: The previous queue tail code word
159  *
160  * xchg(lock, tail), which heads an address dependency
161  *
162  * p,*,* -> n,*,* ; prev = xchg(lock, node)
163  */
xchg_tail(struct qspinlock * lock,u32 tail)164 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
165 {
166 	/*
167 	 * Use release semantics to make sure that the MCS node is properly
168 	 * initialized before changing the tail code.
169 	 */
170 	return (u32)xchg_release(&lock->tail,
171 				 tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET;
172 }
173 
174 #else /* _Q_PENDING_BITS == 8 */
175 
176 /**
177  * clear_pending - clear the pending bit.
178  * @lock: Pointer to queued spinlock structure
179  *
180  * *,1,* -> *,0,*
181  */
clear_pending(struct qspinlock * lock)182 static __always_inline void clear_pending(struct qspinlock *lock)
183 {
184 	atomic_andnot(_Q_PENDING_VAL, &lock->val);
185 }
186 
187 /**
188  * clear_pending_set_locked - take ownership and clear the pending bit.
189  * @lock: Pointer to queued spinlock structure
190  *
191  * *,1,0 -> *,0,1
192  */
clear_pending_set_locked(struct qspinlock * lock)193 static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
194 {
195 	atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val);
196 }
197 
198 /**
199  * xchg_tail - Put in the new queue tail code word & retrieve previous one
200  * @lock : Pointer to queued spinlock structure
201  * @tail : The new queue tail code word
202  * Return: The previous queue tail code word
203  *
204  * xchg(lock, tail)
205  *
206  * p,*,* -> n,*,* ; prev = xchg(lock, node)
207  */
xchg_tail(struct qspinlock * lock,u32 tail)208 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
209 {
210 	u32 old, new, val = atomic_read(&lock->val);
211 
212 	for (;;) {
213 		new = (val & _Q_LOCKED_PENDING_MASK) | tail;
214 		/*
215 		 * Use release semantics to make sure that the MCS node is
216 		 * properly initialized before changing the tail code.
217 		 */
218 		old = atomic_cmpxchg_release(&lock->val, val, new);
219 		if (old == val)
220 			break;
221 
222 		val = old;
223 	}
224 	return old;
225 }
226 #endif /* _Q_PENDING_BITS == 8 */
227 
228 /**
229  * queued_fetch_set_pending_acquire - fetch the whole lock value and set pending
230  * @lock : Pointer to queued spinlock structure
231  * Return: The previous lock value
232  *
233  * *,*,* -> *,1,*
234  */
235 #ifndef queued_fetch_set_pending_acquire
queued_fetch_set_pending_acquire(struct qspinlock * lock)236 static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
237 {
238 	return atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val);
239 }
240 #endif
241 
242 /**
243  * set_locked - Set the lock bit and own the lock
244  * @lock: Pointer to queued spinlock structure
245  *
246  * *,*,0 -> *,0,1
247  */
set_locked(struct qspinlock * lock)248 static __always_inline void set_locked(struct qspinlock *lock)
249 {
250 	WRITE_ONCE(lock->locked, _Q_LOCKED_VAL);
251 }
252 
253 
254 /*
255  * Generate the native code for queued_spin_unlock_slowpath(); provide NOPs for
256  * all the PV callbacks.
257  */
258 
__pv_init_node(struct mcs_spinlock * node)259 static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
__pv_wait_node(struct mcs_spinlock * node,struct mcs_spinlock * prev)260 static __always_inline void __pv_wait_node(struct mcs_spinlock *node,
261 					   struct mcs_spinlock *prev) { }
__pv_kick_node(struct qspinlock * lock,struct mcs_spinlock * node)262 static __always_inline void __pv_kick_node(struct qspinlock *lock,
263 					   struct mcs_spinlock *node) { }
__pv_wait_head_or_lock(struct qspinlock * lock,struct mcs_spinlock * node)264 static __always_inline u32  __pv_wait_head_or_lock(struct qspinlock *lock,
265 						   struct mcs_spinlock *node)
266 						   { return 0; }
267 
268 #define pv_enabled()		false
269 
270 #define pv_init_node		__pv_init_node
271 #define pv_wait_node		__pv_wait_node
272 #define pv_kick_node		__pv_kick_node
273 #define pv_wait_head_or_lock	__pv_wait_head_or_lock
274 
275 #ifdef CONFIG_PARAVIRT_SPINLOCKS
276 #define queued_spin_lock_slowpath	native_queued_spin_lock_slowpath
277 #endif
278 
279 #endif /* _GEN_PV_LOCK_SLOWPATH */
280 
281 /**
282  * queued_spin_lock_slowpath - acquire the queued spinlock
283  * @lock: Pointer to queued spinlock structure
284  * @val: Current value of the queued spinlock 32-bit word
285  *
286  * (queue tail, pending bit, lock value)
287  *
288  *              fast     :    slow                                  :    unlock
289  *                       :                                          :
290  * uncontended  (0,0,0) -:--> (0,0,1) ------------------------------:--> (*,*,0)
291  *                       :       | ^--------.------.             /  :
292  *                       :       v           \      \            |  :
293  * pending               :    (0,1,1) +--> (0,1,0)   \           |  :
294  *                       :       | ^--'              |           |  :
295  *                       :       v                   |           |  :
296  * uncontended           :    (n,x,y) +--> (n,0,0) --'           |  :
297  *   queue               :       | ^--'                          |  :
298  *                       :       v                               |  :
299  * contended             :    (*,x,y) +--> (*,0,0) ---> (*,0,1) -'  :
300  *   queue               :         ^--'                             :
301  */
queued_spin_lock_slowpath(struct qspinlock * lock,u32 val)302 void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
303 {
304 	struct mcs_spinlock *prev, *next, *node;
305 	u32 old, tail;
306 	int idx;
307 
308 	BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
309 
310 	if (pv_enabled())
311 		goto queue;
312 
313 	if (virt_spin_lock(lock))
314 		return;
315 
316 	/*
317 	 * Wait for in-progress pending->locked hand-overs with a bounded
318 	 * number of spins so that we guarantee forward progress.
319 	 *
320 	 * 0,1,0 -> 0,0,1
321 	 */
322 	if (val == _Q_PENDING_VAL) {
323 		int cnt = _Q_PENDING_LOOPS;
324 		val = smp_cond_load_acquire(&lock->val.counter,
325 					       (VAL != _Q_PENDING_VAL) || !cnt--);
326 	}
327 
328 	/*
329 	 * If we observe any contention; queue.
330 	 */
331 	if (val & ~_Q_LOCKED_MASK)
332 		goto queue;
333 
334 	/*
335 	 * trylock || pending
336 	 *
337 	 * 0,0,0 -> 0,0,1 ; trylock
338 	 * 0,0,1 -> 0,1,1 ; pending
339 	 */
340 	val = queued_fetch_set_pending_acquire(lock);
341 
342 	/*
343 	 * If we observe any contention; undo and queue.
344 	 */
345 	if (unlikely(val & ~_Q_LOCKED_MASK)) {
346 		if (!(val & _Q_PENDING_MASK))
347 			clear_pending(lock);
348 		goto queue;
349 	}
350 
351 	/*
352 	 * We're pending, wait for the owner to go away.
353 	 *
354 	 * 0,1,1 -> 0,1,0
355 	 *
356 	 * this wait loop must be a load-acquire such that we match the
357 	 * store-release that clears the locked bit and create lock
358 	 * sequentiality; this is because not all
359 	 * clear_pending_set_locked() implementations imply full
360 	 * barriers.
361 	 */
362 	if (val & _Q_LOCKED_MASK)
363 		smp_cond_load_acquire(&lock->val.counter, !(VAL & _Q_LOCKED_MASK));
364 
365 	/*
366 	 * take ownership and clear the pending bit.
367 	 *
368 	 * 0,1,0 -> 0,0,1
369 	 */
370 	clear_pending_set_locked(lock);
371 	return;
372 
373 	/*
374 	 * End of pending bit optimistic spinning and beginning of MCS
375 	 * queuing.
376 	 */
377 queue:
378 	node = this_cpu_ptr(&mcs_nodes[0]);
379 	idx = node->count++;
380 	tail = encode_tail(smp_processor_id(), idx);
381 
382 	node += idx;
383 
384 	/*
385 	 * Ensure that we increment the head node->count before initialising
386 	 * the actual node. If the compiler is kind enough to reorder these
387 	 * stores, then an IRQ could overwrite our assignments.
388 	 */
389 	barrier();
390 
391 	node->locked = 0;
392 	node->next = NULL;
393 	pv_init_node(node);
394 
395 	/*
396 	 * We touched a (possibly) cold cacheline in the per-cpu queue node;
397 	 * attempt the trylock once more in the hope someone let go while we
398 	 * weren't watching.
399 	 */
400 	if (queued_spin_trylock(lock))
401 		goto release;
402 
403 	/*
404 	 * We have already touched the queueing cacheline; don't bother with
405 	 * pending stuff.
406 	 *
407 	 * p,*,* -> n,*,*
408 	 *
409 	 * RELEASE, such that the stores to @node must be complete.
410 	 */
411 	old = xchg_tail(lock, tail);
412 	next = NULL;
413 
414 	/*
415 	 * if there was a previous node; link it and wait until reaching the
416 	 * head of the waitqueue.
417 	 */
418 	if (old & _Q_TAIL_MASK) {
419 		prev = decode_tail(old);
420 
421 		/*
422 		 * We must ensure that the stores to @node are observed before
423 		 * the write to prev->next. The address dependency from
424 		 * xchg_tail is not sufficient to ensure this because the read
425 		 * component of xchg_tail is unordered with respect to the
426 		 * initialisation of @node.
427 		 */
428 		smp_store_release(&prev->next, node);
429 
430 		pv_wait_node(node, prev);
431 		arch_mcs_spin_lock_contended(&node->locked);
432 
433 		/*
434 		 * While waiting for the MCS lock, the next pointer may have
435 		 * been set by another lock waiter. We optimistically load
436 		 * the next pointer & prefetch the cacheline for writing
437 		 * to reduce latency in the upcoming MCS unlock operation.
438 		 */
439 		next = READ_ONCE(node->next);
440 		if (next)
441 			prefetchw(next);
442 	}
443 
444 	/*
445 	 * we're at the head of the waitqueue, wait for the owner & pending to
446 	 * go away.
447 	 *
448 	 * *,x,y -> *,0,0
449 	 *
450 	 * this wait loop must use a load-acquire such that we match the
451 	 * store-release that clears the locked bit and create lock
452 	 * sequentiality; this is because the set_locked() function below
453 	 * does not imply a full barrier.
454 	 *
455 	 * The PV pv_wait_head_or_lock function, if active, will acquire
456 	 * the lock and return a non-zero value. So we have to skip the
457 	 * smp_cond_load_acquire() call. As the next PV queue head hasn't been
458 	 * designated yet, there is no way for the locked value to become
459 	 * _Q_SLOW_VAL. So both the set_locked() and the
460 	 * atomic_cmpxchg_relaxed() calls will be safe.
461 	 *
462 	 * If PV isn't active, 0 will be returned instead.
463 	 *
464 	 */
465 	if ((val = pv_wait_head_or_lock(lock, node)))
466 		goto locked;
467 
468 	val = smp_cond_load_acquire(&lock->val.counter, !(VAL & _Q_LOCKED_PENDING_MASK));
469 
470 locked:
471 	/*
472 	 * claim the lock:
473 	 *
474 	 * n,0,0 -> 0,0,1 : lock, uncontended
475 	 * *,*,0 -> *,*,1 : lock, contended
476 	 *
477 	 * If the queue head is the only one in the queue (lock value == tail)
478 	 * and nobody is pending, clear the tail code and grab the lock.
479 	 * Otherwise, we only need to grab the lock.
480 	 */
481 
482 	/* In the PV case we might already have _Q_LOCKED_VAL set */
483 	if ((val & _Q_TAIL_MASK) == tail) {
484 		/*
485 		 * The smp_cond_load_acquire() call above has provided the
486 		 * necessary acquire semantics required for locking.
487 		 */
488 		old = atomic_cmpxchg_relaxed(&lock->val, val, _Q_LOCKED_VAL);
489 		if (old == val)
490 			goto release; /* No contention */
491 	}
492 
493 	/* Either somebody is queued behind us or _Q_PENDING_VAL is set */
494 	set_locked(lock);
495 
496 	/*
497 	 * contended path; wait for next if not observed yet, release.
498 	 */
499 	if (!next) {
500 		while (!(next = READ_ONCE(node->next)))
501 			cpu_relax();
502 	}
503 
504 	arch_mcs_spin_unlock_contended(&next->locked);
505 	pv_kick_node(lock, next);
506 
507 release:
508 	/*
509 	 * release the node
510 	 */
511 	__this_cpu_dec(mcs_nodes[0].count);
512 }
513 EXPORT_SYMBOL(queued_spin_lock_slowpath);
514 
515 /*
516  * Generate the paravirt code for queued_spin_unlock_slowpath().
517  */
518 #if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS)
519 #define _GEN_PV_LOCK_SLOWPATH
520 
521 #undef  pv_enabled
522 #define pv_enabled()	true
523 
524 #undef pv_init_node
525 #undef pv_wait_node
526 #undef pv_kick_node
527 #undef pv_wait_head_or_lock
528 
529 #undef  queued_spin_lock_slowpath
530 #define queued_spin_lock_slowpath	__pv_queued_spin_lock_slowpath
531 
532 #include "qspinlock_paravirt.h"
533 #include "qspinlock.c"
534 
535 #endif
536