1 #ifndef __LINUX_SEQLOCK_H
2 #define __LINUX_SEQLOCK_H
3 /*
4 * Reader/writer consistent mechanism without starving writers. This type of
5 * lock for data where the reader wants a consistent set of information
6 * and is willing to retry if the information changes. There are two types
7 * of readers:
8 * 1. Sequence readers which never block a writer but they may have to retry
9 * if a writer is in progress by detecting change in sequence number.
10 * Writers do not wait for a sequence reader.
11 * 2. Locking readers which will wait if a writer or another locking reader
12 * is in progress. A locking reader in progress will also block a writer
13 * from going forward. Unlike the regular rwlock, the read lock here is
14 * exclusive so that only one locking reader can get it.
15 *
16 * This is not as cache friendly as brlock. Also, this may not work well
17 * for data that contains pointers, because any writer could
18 * invalidate a pointer that a reader was following.
19 *
20 * Expected non-blocking reader usage:
21 * do {
22 * seq = read_seqbegin(&foo);
23 * ...
24 * } while (read_seqretry(&foo, seq));
25 *
26 *
27 * On non-SMP the spin locks disappear but the writer still needs
28 * to increment the sequence variables because an interrupt routine could
29 * change the state of the data.
30 *
31 * Based on x86_64 vsyscall gettimeofday
32 * by Keith Owens and Andrea Arcangeli
33 */
34
35 #include <linux/spinlock.h>
36 #include <linux/preempt.h>
37 #include <linux/lockdep.h>
38 #include <linux/compiler.h>
39 #include <asm/processor.h>
40
41 /*
42 * Version using sequence counter only.
43 * This can be used when code has its own mutex protecting the
44 * updating starting before the write_seqcountbeqin() and ending
45 * after the write_seqcount_end().
46 */
47 typedef struct seqcount {
48 unsigned sequence;
49 #ifdef CONFIG_DEBUG_LOCK_ALLOC
50 struct lockdep_map dep_map;
51 #endif
52 } seqcount_t;
53
__seqcount_init(seqcount_t * s,const char * name,struct lock_class_key * key)54 static inline void __seqcount_init(seqcount_t *s, const char *name,
55 struct lock_class_key *key)
56 {
57 /*
58 * Make sure we are not reinitializing a held lock:
59 */
60 lockdep_init_map(&s->dep_map, name, key, 0);
61 s->sequence = 0;
62 }
63
64 #ifdef CONFIG_DEBUG_LOCK_ALLOC
65 # define SEQCOUNT_DEP_MAP_INIT(lockname) \
66 .dep_map = { .name = #lockname } \
67
68 # define seqcount_init(s) \
69 do { \
70 static struct lock_class_key __key; \
71 __seqcount_init((s), #s, &__key); \
72 } while (0)
73
seqcount_lockdep_reader_access(const seqcount_t * s)74 static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
75 {
76 seqcount_t *l = (seqcount_t *)s;
77 unsigned long flags;
78
79 local_irq_save(flags);
80 seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
81 seqcount_release(&l->dep_map, 1, _RET_IP_);
82 local_irq_restore(flags);
83 }
84
85 #else
86 # define SEQCOUNT_DEP_MAP_INIT(lockname)
87 # define seqcount_init(s) __seqcount_init(s, NULL, NULL)
88 # define seqcount_lockdep_reader_access(x)
89 #endif
90
91 #define SEQCNT_ZERO(lockname) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(lockname)}
92
93
94 /**
95 * __read_seqcount_begin - begin a seq-read critical section (without barrier)
96 * @s: pointer to seqcount_t
97 * Returns: count to be passed to read_seqcount_retry
98 *
99 * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
100 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
101 * provided before actually loading any of the variables that are to be
102 * protected in this critical section.
103 *
104 * Use carefully, only in critical code, and comment how the barrier is
105 * provided.
106 */
__read_seqcount_begin(const seqcount_t * s)107 static inline unsigned __read_seqcount_begin(const seqcount_t *s)
108 {
109 unsigned ret;
110
111 repeat:
112 ret = READ_ONCE(s->sequence);
113 if (unlikely(ret & 1)) {
114 cpu_relax();
115 goto repeat;
116 }
117 return ret;
118 }
119
120 /**
121 * raw_read_seqcount - Read the raw seqcount
122 * @s: pointer to seqcount_t
123 * Returns: count to be passed to read_seqcount_retry
124 *
125 * raw_read_seqcount opens a read critical section of the given
126 * seqcount without any lockdep checking and without checking or
127 * masking the LSB. Calling code is responsible for handling that.
128 */
raw_read_seqcount(const seqcount_t * s)129 static inline unsigned raw_read_seqcount(const seqcount_t *s)
130 {
131 unsigned ret = READ_ONCE(s->sequence);
132 smp_rmb();
133 return ret;
134 }
135
136 /**
137 * raw_read_seqcount_begin - start seq-read critical section w/o lockdep
138 * @s: pointer to seqcount_t
139 * Returns: count to be passed to read_seqcount_retry
140 *
141 * raw_read_seqcount_begin opens a read critical section of the given
142 * seqcount, but without any lockdep checking. Validity of the critical
143 * section is tested by checking read_seqcount_retry function.
144 */
raw_read_seqcount_begin(const seqcount_t * s)145 static inline unsigned raw_read_seqcount_begin(const seqcount_t *s)
146 {
147 unsigned ret = __read_seqcount_begin(s);
148 smp_rmb();
149 return ret;
150 }
151
152 /**
153 * read_seqcount_begin - begin a seq-read critical section
154 * @s: pointer to seqcount_t
155 * Returns: count to be passed to read_seqcount_retry
156 *
157 * read_seqcount_begin opens a read critical section of the given seqcount.
158 * Validity of the critical section is tested by checking read_seqcount_retry
159 * function.
160 */
read_seqcount_begin(const seqcount_t * s)161 static inline unsigned read_seqcount_begin(const seqcount_t *s)
162 {
163 seqcount_lockdep_reader_access(s);
164 return raw_read_seqcount_begin(s);
165 }
166
167 /**
168 * raw_seqcount_begin - begin a seq-read critical section
169 * @s: pointer to seqcount_t
170 * Returns: count to be passed to read_seqcount_retry
171 *
172 * raw_seqcount_begin opens a read critical section of the given seqcount.
173 * Validity of the critical section is tested by checking read_seqcount_retry
174 * function.
175 *
176 * Unlike read_seqcount_begin(), this function will not wait for the count
177 * to stabilize. If a writer is active when we begin, we will fail the
178 * read_seqcount_retry() instead of stabilizing at the beginning of the
179 * critical section.
180 */
raw_seqcount_begin(const seqcount_t * s)181 static inline unsigned raw_seqcount_begin(const seqcount_t *s)
182 {
183 unsigned ret = READ_ONCE(s->sequence);
184 smp_rmb();
185 return ret & ~1;
186 }
187
188 /**
189 * __read_seqcount_retry - end a seq-read critical section (without barrier)
190 * @s: pointer to seqcount_t
191 * @start: count, from read_seqcount_begin
192 * Returns: 1 if retry is required, else 0
193 *
194 * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
195 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
196 * provided before actually loading any of the variables that are to be
197 * protected in this critical section.
198 *
199 * Use carefully, only in critical code, and comment how the barrier is
200 * provided.
201 */
__read_seqcount_retry(const seqcount_t * s,unsigned start)202 static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
203 {
204 return unlikely(s->sequence != start);
205 }
206
207 /**
208 * read_seqcount_retry - end a seq-read critical section
209 * @s: pointer to seqcount_t
210 * @start: count, from read_seqcount_begin
211 * Returns: 1 if retry is required, else 0
212 *
213 * read_seqcount_retry closes a read critical section of the given seqcount.
214 * If the critical section was invalid, it must be ignored (and typically
215 * retried).
216 */
read_seqcount_retry(const seqcount_t * s,unsigned start)217 static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
218 {
219 smp_rmb();
220 return __read_seqcount_retry(s, start);
221 }
222
223
224
raw_write_seqcount_begin(seqcount_t * s)225 static inline void raw_write_seqcount_begin(seqcount_t *s)
226 {
227 s->sequence++;
228 smp_wmb();
229 }
230
raw_write_seqcount_end(seqcount_t * s)231 static inline void raw_write_seqcount_end(seqcount_t *s)
232 {
233 smp_wmb();
234 s->sequence++;
235 }
236
237 /**
238 * raw_write_seqcount_barrier - do a seq write barrier
239 * @s: pointer to seqcount_t
240 *
241 * This can be used to provide an ordering guarantee instead of the
242 * usual consistency guarantee. It is one wmb cheaper, because we can
243 * collapse the two back-to-back wmb()s.
244 *
245 * Note that, writes surrounding the barrier should be declared atomic (e.g.
246 * via WRITE_ONCE): a) to ensure the writes become visible to other threads
247 * atomically, avoiding compiler optimizations; b) to document which writes are
248 * meant to propagate to the reader critical section. This is necessary because
249 * neither writes before and after the barrier are enclosed in a seq-writer
250 * critical section that would ensure readers are aware of ongoing writes.
251 *
252 * seqcount_t seq;
253 * bool X = true, Y = false;
254 *
255 * void read(void)
256 * {
257 * bool x, y;
258 *
259 * do {
260 * int s = read_seqcount_begin(&seq);
261 *
262 * x = X; y = Y;
263 *
264 * } while (read_seqcount_retry(&seq, s));
265 *
266 * BUG_ON(!x && !y);
267 * }
268 *
269 * void write(void)
270 * {
271 * WRITE_ONCE(Y, true);
272 *
273 * raw_write_seqcount_barrier(seq);
274 *
275 * WRITE_ONCE(X, false);
276 * }
277 */
raw_write_seqcount_barrier(seqcount_t * s)278 static inline void raw_write_seqcount_barrier(seqcount_t *s)
279 {
280 s->sequence++;
281 smp_wmb();
282 s->sequence++;
283 }
284
raw_read_seqcount_latch(seqcount_t * s)285 static inline int raw_read_seqcount_latch(seqcount_t *s)
286 {
287 return lockless_dereference(s->sequence);
288 }
289
290 /**
291 * raw_write_seqcount_latch - redirect readers to even/odd copy
292 * @s: pointer to seqcount_t
293 *
294 * The latch technique is a multiversion concurrency control method that allows
295 * queries during non-atomic modifications. If you can guarantee queries never
296 * interrupt the modification -- e.g. the concurrency is strictly between CPUs
297 * -- you most likely do not need this.
298 *
299 * Where the traditional RCU/lockless data structures rely on atomic
300 * modifications to ensure queries observe either the old or the new state the
301 * latch allows the same for non-atomic updates. The trade-off is doubling the
302 * cost of storage; we have to maintain two copies of the entire data
303 * structure.
304 *
305 * Very simply put: we first modify one copy and then the other. This ensures
306 * there is always one copy in a stable state, ready to give us an answer.
307 *
308 * The basic form is a data structure like:
309 *
310 * struct latch_struct {
311 * seqcount_t seq;
312 * struct data_struct data[2];
313 * };
314 *
315 * Where a modification, which is assumed to be externally serialized, does the
316 * following:
317 *
318 * void latch_modify(struct latch_struct *latch, ...)
319 * {
320 * smp_wmb(); <- Ensure that the last data[1] update is visible
321 * latch->seq++;
322 * smp_wmb(); <- Ensure that the seqcount update is visible
323 *
324 * modify(latch->data[0], ...);
325 *
326 * smp_wmb(); <- Ensure that the data[0] update is visible
327 * latch->seq++;
328 * smp_wmb(); <- Ensure that the seqcount update is visible
329 *
330 * modify(latch->data[1], ...);
331 * }
332 *
333 * The query will have a form like:
334 *
335 * struct entry *latch_query(struct latch_struct *latch, ...)
336 * {
337 * struct entry *entry;
338 * unsigned seq, idx;
339 *
340 * do {
341 * seq = lockless_dereference(latch->seq);
342 *
343 * idx = seq & 0x01;
344 * entry = data_query(latch->data[idx], ...);
345 *
346 * smp_rmb();
347 * } while (seq != latch->seq);
348 *
349 * return entry;
350 * }
351 *
352 * So during the modification, queries are first redirected to data[1]. Then we
353 * modify data[0]. When that is complete, we redirect queries back to data[0]
354 * and we can modify data[1].
355 *
356 * NOTE: The non-requirement for atomic modifications does _NOT_ include
357 * the publishing of new entries in the case where data is a dynamic
358 * data structure.
359 *
360 * An iteration might start in data[0] and get suspended long enough
361 * to miss an entire modification sequence, once it resumes it might
362 * observe the new entry.
363 *
364 * NOTE: When data is a dynamic data structure; one should use regular RCU
365 * patterns to manage the lifetimes of the objects within.
366 */
raw_write_seqcount_latch(seqcount_t * s)367 static inline void raw_write_seqcount_latch(seqcount_t *s)
368 {
369 smp_wmb(); /* prior stores before incrementing "sequence" */
370 s->sequence++;
371 smp_wmb(); /* increment "sequence" before following stores */
372 }
373
374 /*
375 * Sequence counter only version assumes that callers are using their
376 * own mutexing.
377 */
write_seqcount_begin_nested(seqcount_t * s,int subclass)378 static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
379 {
380 raw_write_seqcount_begin(s);
381 seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
382 }
383
write_seqcount_begin(seqcount_t * s)384 static inline void write_seqcount_begin(seqcount_t *s)
385 {
386 write_seqcount_begin_nested(s, 0);
387 }
388
write_seqcount_end(seqcount_t * s)389 static inline void write_seqcount_end(seqcount_t *s)
390 {
391 seqcount_release(&s->dep_map, 1, _RET_IP_);
392 raw_write_seqcount_end(s);
393 }
394
395 /**
396 * write_seqcount_invalidate - invalidate in-progress read-side seq operations
397 * @s: pointer to seqcount_t
398 *
399 * After write_seqcount_invalidate, no read-side seq operations will complete
400 * successfully and see data older than this.
401 */
write_seqcount_invalidate(seqcount_t * s)402 static inline void write_seqcount_invalidate(seqcount_t *s)
403 {
404 smp_wmb();
405 s->sequence+=2;
406 }
407
408 typedef struct {
409 struct seqcount seqcount;
410 spinlock_t lock;
411 } seqlock_t;
412
413 /*
414 * These macros triggered gcc-3.x compile-time problems. We think these are
415 * OK now. Be cautious.
416 */
417 #define __SEQLOCK_UNLOCKED(lockname) \
418 { \
419 .seqcount = SEQCNT_ZERO(lockname), \
420 .lock = __SPIN_LOCK_UNLOCKED(lockname) \
421 }
422
423 #define seqlock_init(x) \
424 do { \
425 seqcount_init(&(x)->seqcount); \
426 spin_lock_init(&(x)->lock); \
427 } while (0)
428
429 #define DEFINE_SEQLOCK(x) \
430 seqlock_t x = __SEQLOCK_UNLOCKED(x)
431
432 /*
433 * Read side functions for starting and finalizing a read side section.
434 */
read_seqbegin(const seqlock_t * sl)435 static inline unsigned read_seqbegin(const seqlock_t *sl)
436 {
437 return read_seqcount_begin(&sl->seqcount);
438 }
439
read_seqretry(const seqlock_t * sl,unsigned start)440 static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
441 {
442 return read_seqcount_retry(&sl->seqcount, start);
443 }
444
445 /*
446 * Lock out other writers and update the count.
447 * Acts like a normal spin_lock/unlock.
448 * Don't need preempt_disable() because that is in the spin_lock already.
449 */
write_seqlock(seqlock_t * sl)450 static inline void write_seqlock(seqlock_t *sl)
451 {
452 spin_lock(&sl->lock);
453 write_seqcount_begin(&sl->seqcount);
454 }
455
write_sequnlock(seqlock_t * sl)456 static inline void write_sequnlock(seqlock_t *sl)
457 {
458 write_seqcount_end(&sl->seqcount);
459 spin_unlock(&sl->lock);
460 }
461
write_seqlock_bh(seqlock_t * sl)462 static inline void write_seqlock_bh(seqlock_t *sl)
463 {
464 spin_lock_bh(&sl->lock);
465 write_seqcount_begin(&sl->seqcount);
466 }
467
write_sequnlock_bh(seqlock_t * sl)468 static inline void write_sequnlock_bh(seqlock_t *sl)
469 {
470 write_seqcount_end(&sl->seqcount);
471 spin_unlock_bh(&sl->lock);
472 }
473
write_seqlock_irq(seqlock_t * sl)474 static inline void write_seqlock_irq(seqlock_t *sl)
475 {
476 spin_lock_irq(&sl->lock);
477 write_seqcount_begin(&sl->seqcount);
478 }
479
write_sequnlock_irq(seqlock_t * sl)480 static inline void write_sequnlock_irq(seqlock_t *sl)
481 {
482 write_seqcount_end(&sl->seqcount);
483 spin_unlock_irq(&sl->lock);
484 }
485
__write_seqlock_irqsave(seqlock_t * sl)486 static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
487 {
488 unsigned long flags;
489
490 spin_lock_irqsave(&sl->lock, flags);
491 write_seqcount_begin(&sl->seqcount);
492 return flags;
493 }
494
495 #define write_seqlock_irqsave(lock, flags) \
496 do { flags = __write_seqlock_irqsave(lock); } while (0)
497
498 static inline void
write_sequnlock_irqrestore(seqlock_t * sl,unsigned long flags)499 write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
500 {
501 write_seqcount_end(&sl->seqcount);
502 spin_unlock_irqrestore(&sl->lock, flags);
503 }
504
505 /*
506 * A locking reader exclusively locks out other writers and locking readers,
507 * but doesn't update the sequence number. Acts like a normal spin_lock/unlock.
508 * Don't need preempt_disable() because that is in the spin_lock already.
509 */
read_seqlock_excl(seqlock_t * sl)510 static inline void read_seqlock_excl(seqlock_t *sl)
511 {
512 spin_lock(&sl->lock);
513 }
514
read_sequnlock_excl(seqlock_t * sl)515 static inline void read_sequnlock_excl(seqlock_t *sl)
516 {
517 spin_unlock(&sl->lock);
518 }
519
520 /**
521 * read_seqbegin_or_lock - begin a sequence number check or locking block
522 * @lock: sequence lock
523 * @seq : sequence number to be checked
524 *
525 * First try it once optimistically without taking the lock. If that fails,
526 * take the lock. The sequence number is also used as a marker for deciding
527 * whether to be a reader (even) or writer (odd).
528 * N.B. seq must be initialized to an even number to begin with.
529 */
read_seqbegin_or_lock(seqlock_t * lock,int * seq)530 static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
531 {
532 if (!(*seq & 1)) /* Even */
533 *seq = read_seqbegin(lock);
534 else /* Odd */
535 read_seqlock_excl(lock);
536 }
537
need_seqretry(seqlock_t * lock,int seq)538 static inline int need_seqretry(seqlock_t *lock, int seq)
539 {
540 return !(seq & 1) && read_seqretry(lock, seq);
541 }
542
done_seqretry(seqlock_t * lock,int seq)543 static inline void done_seqretry(seqlock_t *lock, int seq)
544 {
545 if (seq & 1)
546 read_sequnlock_excl(lock);
547 }
548
read_seqlock_excl_bh(seqlock_t * sl)549 static inline void read_seqlock_excl_bh(seqlock_t *sl)
550 {
551 spin_lock_bh(&sl->lock);
552 }
553
read_sequnlock_excl_bh(seqlock_t * sl)554 static inline void read_sequnlock_excl_bh(seqlock_t *sl)
555 {
556 spin_unlock_bh(&sl->lock);
557 }
558
read_seqlock_excl_irq(seqlock_t * sl)559 static inline void read_seqlock_excl_irq(seqlock_t *sl)
560 {
561 spin_lock_irq(&sl->lock);
562 }
563
read_sequnlock_excl_irq(seqlock_t * sl)564 static inline void read_sequnlock_excl_irq(seqlock_t *sl)
565 {
566 spin_unlock_irq(&sl->lock);
567 }
568
__read_seqlock_excl_irqsave(seqlock_t * sl)569 static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
570 {
571 unsigned long flags;
572
573 spin_lock_irqsave(&sl->lock, flags);
574 return flags;
575 }
576
577 #define read_seqlock_excl_irqsave(lock, flags) \
578 do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
579
580 static inline void
read_sequnlock_excl_irqrestore(seqlock_t * sl,unsigned long flags)581 read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
582 {
583 spin_unlock_irqrestore(&sl->lock, flags);
584 }
585
586 static inline unsigned long
read_seqbegin_or_lock_irqsave(seqlock_t * lock,int * seq)587 read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
588 {
589 unsigned long flags = 0;
590
591 if (!(*seq & 1)) /* Even */
592 *seq = read_seqbegin(lock);
593 else /* Odd */
594 read_seqlock_excl_irqsave(lock, flags);
595
596 return flags;
597 }
598
599 static inline void
done_seqretry_irqrestore(seqlock_t * lock,int seq,unsigned long flags)600 done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
601 {
602 if (seq & 1)
603 read_sequnlock_excl_irqrestore(lock, flags);
604 }
605 #endif /* __LINUX_SEQLOCK_H */
606