1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_SEQLOCK_H
3 #define __LINUX_SEQLOCK_H
4
5 /*
6 * seqcount_t / seqlock_t - a reader-writer consistency mechanism with
7 * lockless readers (read-only retry loops), and no writer starvation.
8 *
9 * See Documentation/locking/seqlock.rst
10 *
11 * Copyrights:
12 * - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli
13 * - Sequence counters with associated locks, (C) 2020 Linutronix GmbH
14 */
15
16 #include <linux/compiler.h>
17 #include <linux/kcsan-checks.h>
18 #include <linux/lockdep.h>
19 #include <linux/mutex.h>
20 #include <linux/ww_mutex.h>
21 #include <linux/preempt.h>
22 #include <linux/spinlock.h>
23
24 #include <asm/processor.h>
25
26 /*
27 * The seqlock seqcount_t interface does not prescribe a precise sequence of
28 * read begin/retry/end. For readers, typically there is a call to
29 * read_seqcount_begin() and read_seqcount_retry(), however, there are more
30 * esoteric cases which do not follow this pattern.
31 *
32 * As a consequence, we take the following best-effort approach for raw usage
33 * via seqcount_t under KCSAN: upon beginning a seq-reader critical section,
34 * pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as
35 * atomics; if there is a matching read_seqcount_retry() call, no following
36 * memory operations are considered atomic. Usage of the seqlock_t interface
37 * is not affected.
38 */
39 #define KCSAN_SEQLOCK_REGION_MAX 1000
40
41 /*
42 * Sequence counters (seqcount_t)
43 *
44 * This is the raw counting mechanism, without any writer protection.
45 *
46 * Write side critical sections must be serialized and non-preemptible.
47 *
48 * If readers can be invoked from hardirq or softirq contexts,
49 * interrupts or bottom halves must also be respectively disabled before
50 * entering the write section.
51 *
52 * This mechanism can't be used if the protected data contains pointers,
53 * as the writer can invalidate a pointer that a reader is following.
54 *
55 * If the write serialization mechanism is one of the common kernel
56 * locking primitives, use a sequence counter with associated lock
57 * (seqcount_LOCKNAME_t) instead.
58 *
59 * If it's desired to automatically handle the sequence counter writer
60 * serialization and non-preemptibility requirements, use a sequential
61 * lock (seqlock_t) instead.
62 *
63 * See Documentation/locking/seqlock.rst
64 */
65 typedef struct seqcount {
66 unsigned sequence;
67 #ifdef CONFIG_DEBUG_LOCK_ALLOC
68 struct lockdep_map dep_map;
69 #endif
70 } seqcount_t;
71
__seqcount_init(seqcount_t * s,const char * name,struct lock_class_key * key)72 static inline void __seqcount_init(seqcount_t *s, const char *name,
73 struct lock_class_key *key)
74 {
75 /*
76 * Make sure we are not reinitializing a held lock:
77 */
78 lockdep_init_map(&s->dep_map, name, key, 0);
79 s->sequence = 0;
80 }
81
82 #ifdef CONFIG_DEBUG_LOCK_ALLOC
83
84 # define SEQCOUNT_DEP_MAP_INIT(lockname) \
85 .dep_map = { .name = #lockname }
86
87 /**
88 * seqcount_init() - runtime initializer for seqcount_t
89 * @s: Pointer to the seqcount_t instance
90 */
91 # define seqcount_init(s) \
92 do { \
93 static struct lock_class_key __key; \
94 __seqcount_init((s), #s, &__key); \
95 } while (0)
96
seqcount_lockdep_reader_access(const seqcount_t * s)97 static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
98 {
99 seqcount_t *l = (seqcount_t *)s;
100 unsigned long flags;
101
102 local_irq_save(flags);
103 seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
104 seqcount_release(&l->dep_map, _RET_IP_);
105 local_irq_restore(flags);
106 }
107
108 #else
109 # define SEQCOUNT_DEP_MAP_INIT(lockname)
110 # define seqcount_init(s) __seqcount_init(s, NULL, NULL)
111 # define seqcount_lockdep_reader_access(x)
112 #endif
113
114 /**
115 * SEQCNT_ZERO() - static initializer for seqcount_t
116 * @name: Name of the seqcount_t instance
117 */
118 #define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) }
119
120 /*
121 * Sequence counters with associated locks (seqcount_LOCKNAME_t)
122 *
123 * A sequence counter which associates the lock used for writer
124 * serialization at initialization time. This enables lockdep to validate
125 * that the write side critical section is properly serialized.
126 *
127 * For associated locks which do not implicitly disable preemption,
128 * preemption protection is enforced in the write side function.
129 *
130 * Lockdep is never used in any for the raw write variants.
131 *
132 * See Documentation/locking/seqlock.rst
133 */
134
135 /*
136 * For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot
137 * disable preemption. It can lead to higher latencies, and the write side
138 * sections will not be able to acquire locks which become sleeping locks
139 * (e.g. spinlock_t).
140 *
141 * To remain preemptible while avoiding a possible livelock caused by the
142 * reader preempting the writer, use a different technique: let the reader
143 * detect if a seqcount_LOCKNAME_t writer is in progress. If that is the
144 * case, acquire then release the associated LOCKNAME writer serialization
145 * lock. This will allow any possibly-preempted writer to make progress
146 * until the end of its writer serialization lock critical section.
147 *
148 * This lock-unlock technique must be implemented for all of PREEMPT_RT
149 * sleeping locks. See Documentation/locking/locktypes.rst
150 */
151 #if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT)
152 #define __SEQ_LOCK(expr) expr
153 #else
154 #define __SEQ_LOCK(expr)
155 #endif
156
157 /*
158 * typedef seqcount_LOCKNAME_t - sequence counter with LOCKNAME associated
159 * @seqcount: The real sequence counter
160 * @lock: Pointer to the associated lock
161 *
162 * A plain sequence counter with external writer synchronization by
163 * LOCKNAME @lock. The lock is associated to the sequence counter in the
164 * static initializer or init function. This enables lockdep to validate
165 * that the write side critical section is properly serialized.
166 *
167 * LOCKNAME: raw_spinlock, spinlock, rwlock, mutex, or ww_mutex.
168 */
169
170 /*
171 * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t
172 * @s: Pointer to the seqcount_LOCKNAME_t instance
173 * @lock: Pointer to the associated lock
174 */
175
176 #define seqcount_LOCKNAME_init(s, _lock, lockname) \
177 do { \
178 seqcount_##lockname##_t *____s = (s); \
179 seqcount_init(&____s->seqcount); \
180 __SEQ_LOCK(____s->lock = (_lock)); \
181 } while (0)
182
183 #define seqcount_raw_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, raw_spinlock)
184 #define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock)
185 #define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock);
186 #define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex);
187 #define seqcount_ww_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, ww_mutex);
188
189 /*
190 * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers
191 * seqprop_LOCKNAME_*() - Property accessors for seqcount_LOCKNAME_t
192 *
193 * @lockname: "LOCKNAME" part of seqcount_LOCKNAME_t
194 * @locktype: LOCKNAME canonical C data type
195 * @preemptible: preemptibility of above locktype
196 * @lockmember: argument for lockdep_assert_held()
197 * @lockbase: associated lock release function (prefix only)
198 * @lock_acquire: associated lock acquisition function (full call)
199 */
200 #define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockmember, lockbase, lock_acquire) \
201 typedef struct seqcount_##lockname { \
202 seqcount_t seqcount; \
203 __SEQ_LOCK(locktype *lock); \
204 } seqcount_##lockname##_t; \
205 \
206 static __always_inline seqcount_t * \
207 __seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \
208 { \
209 return &s->seqcount; \
210 } \
211 \
212 static __always_inline unsigned \
213 __seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \
214 { \
215 unsigned seq = READ_ONCE(s->seqcount.sequence); \
216 \
217 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
218 return seq; \
219 \
220 if (preemptible && unlikely(seq & 1)) { \
221 __SEQ_LOCK(lock_acquire); \
222 __SEQ_LOCK(lockbase##_unlock(s->lock)); \
223 \
224 /* \
225 * Re-read the sequence counter since the (possibly \
226 * preempted) writer made progress. \
227 */ \
228 seq = READ_ONCE(s->seqcount.sequence); \
229 } \
230 \
231 return seq; \
232 } \
233 \
234 static __always_inline bool \
235 __seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s) \
236 { \
237 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
238 return preemptible; \
239 \
240 /* PREEMPT_RT relies on the above LOCK+UNLOCK */ \
241 return false; \
242 } \
243 \
244 static __always_inline void \
245 __seqprop_##lockname##_assert(const seqcount_##lockname##_t *s) \
246 { \
247 __SEQ_LOCK(lockdep_assert_held(lockmember)); \
248 }
249
250 /*
251 * __seqprop() for seqcount_t
252 */
253
__seqprop_ptr(seqcount_t * s)254 static inline seqcount_t *__seqprop_ptr(seqcount_t *s)
255 {
256 return s;
257 }
258
__seqprop_sequence(const seqcount_t * s)259 static inline unsigned __seqprop_sequence(const seqcount_t *s)
260 {
261 return READ_ONCE(s->sequence);
262 }
263
__seqprop_preemptible(const seqcount_t * s)264 static inline bool __seqprop_preemptible(const seqcount_t *s)
265 {
266 return false;
267 }
268
__seqprop_assert(const seqcount_t * s)269 static inline void __seqprop_assert(const seqcount_t *s)
270 {
271 lockdep_assert_preemption_disabled();
272 }
273
274 #define __SEQ_RT IS_ENABLED(CONFIG_PREEMPT_RT)
275
276 SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, s->lock, raw_spin, raw_spin_lock(s->lock))
277 SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, s->lock, spin, spin_lock(s->lock))
278 SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, s->lock, read, read_lock(s->lock))
279 SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex, mutex_lock(s->lock))
280 SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mutex, ww_mutex_lock(s->lock, NULL))
281
282 /*
283 * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
284 * @name: Name of the seqcount_LOCKNAME_t instance
285 * @lock: Pointer to the associated LOCKNAME
286 */
287
288 #define SEQCOUNT_LOCKNAME_ZERO(seq_name, assoc_lock) { \
289 .seqcount = SEQCNT_ZERO(seq_name.seqcount), \
290 __SEQ_LOCK(.lock = (assoc_lock)) \
291 }
292
293 #define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
294 #define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
295 #define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
296 #define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
297 #define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
298
299 #define __seqprop_case(s, lockname, prop) \
300 seqcount_##lockname##_t: __seqprop_##lockname##_##prop((void *)(s))
301
302 #define __seqprop(s, prop) _Generic(*(s), \
303 seqcount_t: __seqprop_##prop((void *)(s)), \
304 __seqprop_case((s), raw_spinlock, prop), \
305 __seqprop_case((s), spinlock, prop), \
306 __seqprop_case((s), rwlock, prop), \
307 __seqprop_case((s), mutex, prop), \
308 __seqprop_case((s), ww_mutex, prop))
309
310 #define __seqcount_ptr(s) __seqprop(s, ptr)
311 #define __seqcount_sequence(s) __seqprop(s, sequence)
312 #define __seqcount_lock_preemptible(s) __seqprop(s, preemptible)
313 #define __seqcount_assert_lock_held(s) __seqprop(s, assert)
314
315 /**
316 * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
317 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
318 *
319 * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
320 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
321 * provided before actually loading any of the variables that are to be
322 * protected in this critical section.
323 *
324 * Use carefully, only in critical code, and comment how the barrier is
325 * provided.
326 *
327 * Return: count to be passed to read_seqcount_retry()
328 */
329 #define __read_seqcount_begin(s) \
330 ({ \
331 unsigned __seq; \
332 \
333 while ((__seq = __seqcount_sequence(s)) & 1) \
334 cpu_relax(); \
335 \
336 kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
337 __seq; \
338 })
339
340 /**
341 * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
342 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
343 *
344 * Return: count to be passed to read_seqcount_retry()
345 */
346 #define raw_read_seqcount_begin(s) \
347 ({ \
348 unsigned _seq = __read_seqcount_begin(s); \
349 \
350 smp_rmb(); \
351 _seq; \
352 })
353
354 /**
355 * read_seqcount_begin() - begin a seqcount_t read critical section
356 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
357 *
358 * Return: count to be passed to read_seqcount_retry()
359 */
360 #define read_seqcount_begin(s) \
361 ({ \
362 seqcount_lockdep_reader_access(__seqcount_ptr(s)); \
363 raw_read_seqcount_begin(s); \
364 })
365
366 /**
367 * raw_read_seqcount() - read the raw seqcount_t counter value
368 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
369 *
370 * raw_read_seqcount opens a read critical section of the given
371 * seqcount_t, without any lockdep checking, and without checking or
372 * masking the sequence counter LSB. Calling code is responsible for
373 * handling that.
374 *
375 * Return: count to be passed to read_seqcount_retry()
376 */
377 #define raw_read_seqcount(s) \
378 ({ \
379 unsigned __seq = __seqcount_sequence(s); \
380 \
381 smp_rmb(); \
382 kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
383 __seq; \
384 })
385
386 /**
387 * raw_seqcount_begin() - begin a seqcount_t read critical section w/o
388 * lockdep and w/o counter stabilization
389 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
390 *
391 * raw_seqcount_begin opens a read critical section of the given
392 * seqcount_t. Unlike read_seqcount_begin(), this function will not wait
393 * for the count to stabilize. If a writer is active when it begins, it
394 * will fail the read_seqcount_retry() at the end of the read critical
395 * section instead of stabilizing at the beginning of it.
396 *
397 * Use this only in special kernel hot paths where the read section is
398 * small and has a high probability of success through other external
399 * means. It will save a single branching instruction.
400 *
401 * Return: count to be passed to read_seqcount_retry()
402 */
403 #define raw_seqcount_begin(s) \
404 ({ \
405 /* \
406 * If the counter is odd, let read_seqcount_retry() fail \
407 * by decrementing the counter. \
408 */ \
409 raw_read_seqcount(s) & ~1; \
410 })
411
412 /**
413 * __read_seqcount_retry() - end a seqcount_t read section w/o barrier
414 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
415 * @start: count, from read_seqcount_begin()
416 *
417 * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
418 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
419 * provided before actually loading any of the variables that are to be
420 * protected in this critical section.
421 *
422 * Use carefully, only in critical code, and comment how the barrier is
423 * provided.
424 *
425 * Return: true if a read section retry is required, else false
426 */
427 #define __read_seqcount_retry(s, start) \
428 __read_seqcount_t_retry(__seqcount_ptr(s), start)
429
__read_seqcount_t_retry(const seqcount_t * s,unsigned start)430 static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start)
431 {
432 kcsan_atomic_next(0);
433 return unlikely(READ_ONCE(s->sequence) != start);
434 }
435
436 /**
437 * read_seqcount_retry() - end a seqcount_t read critical section
438 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
439 * @start: count, from read_seqcount_begin()
440 *
441 * read_seqcount_retry closes the read critical section of given
442 * seqcount_t. If the critical section was invalid, it must be ignored
443 * (and typically retried).
444 *
445 * Return: true if a read section retry is required, else false
446 */
447 #define read_seqcount_retry(s, start) \
448 read_seqcount_t_retry(__seqcount_ptr(s), start)
449
read_seqcount_t_retry(const seqcount_t * s,unsigned start)450 static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start)
451 {
452 smp_rmb();
453 return __read_seqcount_t_retry(s, start);
454 }
455
456 /**
457 * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
458 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
459 */
460 #define raw_write_seqcount_begin(s) \
461 do { \
462 if (__seqcount_lock_preemptible(s)) \
463 preempt_disable(); \
464 \
465 raw_write_seqcount_t_begin(__seqcount_ptr(s)); \
466 } while (0)
467
raw_write_seqcount_t_begin(seqcount_t * s)468 static inline void raw_write_seqcount_t_begin(seqcount_t *s)
469 {
470 kcsan_nestable_atomic_begin();
471 s->sequence++;
472 smp_wmb();
473 }
474
475 /**
476 * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep
477 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
478 */
479 #define raw_write_seqcount_end(s) \
480 do { \
481 raw_write_seqcount_t_end(__seqcount_ptr(s)); \
482 \
483 if (__seqcount_lock_preemptible(s)) \
484 preempt_enable(); \
485 } while (0)
486
raw_write_seqcount_t_end(seqcount_t * s)487 static inline void raw_write_seqcount_t_end(seqcount_t *s)
488 {
489 smp_wmb();
490 s->sequence++;
491 kcsan_nestable_atomic_end();
492 }
493
494 /**
495 * write_seqcount_begin_nested() - start a seqcount_t write section with
496 * custom lockdep nesting level
497 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
498 * @subclass: lockdep nesting level
499 *
500 * See Documentation/locking/lockdep-design.rst
501 */
502 #define write_seqcount_begin_nested(s, subclass) \
503 do { \
504 __seqcount_assert_lock_held(s); \
505 \
506 if (__seqcount_lock_preemptible(s)) \
507 preempt_disable(); \
508 \
509 write_seqcount_t_begin_nested(__seqcount_ptr(s), subclass); \
510 } while (0)
511
write_seqcount_t_begin_nested(seqcount_t * s,int subclass)512 static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass)
513 {
514 raw_write_seqcount_t_begin(s);
515 seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
516 }
517
518 /**
519 * write_seqcount_begin() - start a seqcount_t write side critical section
520 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
521 *
522 * write_seqcount_begin opens a write side critical section of the given
523 * seqcount_t.
524 *
525 * Context: seqcount_t write side critical sections must be serialized and
526 * non-preemptible. If readers can be invoked from hardirq or softirq
527 * context, interrupts or bottom halves must be respectively disabled.
528 */
529 #define write_seqcount_begin(s) \
530 do { \
531 __seqcount_assert_lock_held(s); \
532 \
533 if (__seqcount_lock_preemptible(s)) \
534 preempt_disable(); \
535 \
536 write_seqcount_t_begin(__seqcount_ptr(s)); \
537 } while (0)
538
write_seqcount_t_begin(seqcount_t * s)539 static inline void write_seqcount_t_begin(seqcount_t *s)
540 {
541 write_seqcount_t_begin_nested(s, 0);
542 }
543
544 /**
545 * write_seqcount_end() - end a seqcount_t write side critical section
546 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
547 *
548 * The write section must've been opened with write_seqcount_begin().
549 */
550 #define write_seqcount_end(s) \
551 do { \
552 write_seqcount_t_end(__seqcount_ptr(s)); \
553 \
554 if (__seqcount_lock_preemptible(s)) \
555 preempt_enable(); \
556 } while (0)
557
write_seqcount_t_end(seqcount_t * s)558 static inline void write_seqcount_t_end(seqcount_t *s)
559 {
560 seqcount_release(&s->dep_map, _RET_IP_);
561 raw_write_seqcount_t_end(s);
562 }
563
564 /**
565 * raw_write_seqcount_barrier() - do a seqcount_t write barrier
566 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
567 *
568 * This can be used to provide an ordering guarantee instead of the usual
569 * consistency guarantee. It is one wmb cheaper, because it can collapse
570 * the two back-to-back wmb()s.
571 *
572 * Note that writes surrounding the barrier should be declared atomic (e.g.
573 * via WRITE_ONCE): a) to ensure the writes become visible to other threads
574 * atomically, avoiding compiler optimizations; b) to document which writes are
575 * meant to propagate to the reader critical section. This is necessary because
576 * neither writes before and after the barrier are enclosed in a seq-writer
577 * critical section that would ensure readers are aware of ongoing writes::
578 *
579 * seqcount_t seq;
580 * bool X = true, Y = false;
581 *
582 * void read(void)
583 * {
584 * bool x, y;
585 *
586 * do {
587 * int s = read_seqcount_begin(&seq);
588 *
589 * x = X; y = Y;
590 *
591 * } while (read_seqcount_retry(&seq, s));
592 *
593 * BUG_ON(!x && !y);
594 * }
595 *
596 * void write(void)
597 * {
598 * WRITE_ONCE(Y, true);
599 *
600 * raw_write_seqcount_barrier(seq);
601 *
602 * WRITE_ONCE(X, false);
603 * }
604 */
605 #define raw_write_seqcount_barrier(s) \
606 raw_write_seqcount_t_barrier(__seqcount_ptr(s))
607
raw_write_seqcount_t_barrier(seqcount_t * s)608 static inline void raw_write_seqcount_t_barrier(seqcount_t *s)
609 {
610 kcsan_nestable_atomic_begin();
611 s->sequence++;
612 smp_wmb();
613 s->sequence++;
614 kcsan_nestable_atomic_end();
615 }
616
617 /**
618 * write_seqcount_invalidate() - invalidate in-progress seqcount_t read
619 * side operations
620 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
621 *
622 * After write_seqcount_invalidate, no seqcount_t read side operations
623 * will complete successfully and see data older than this.
624 */
625 #define write_seqcount_invalidate(s) \
626 write_seqcount_t_invalidate(__seqcount_ptr(s))
627
write_seqcount_t_invalidate(seqcount_t * s)628 static inline void write_seqcount_t_invalidate(seqcount_t *s)
629 {
630 smp_wmb();
631 kcsan_nestable_atomic_begin();
632 s->sequence+=2;
633 kcsan_nestable_atomic_end();
634 }
635
636 /*
637 * Latch sequence counters (seqcount_latch_t)
638 *
639 * A sequence counter variant where the counter even/odd value is used to
640 * switch between two copies of protected data. This allows the read path,
641 * typically NMIs, to safely interrupt the write side critical section.
642 *
643 * As the write sections are fully preemptible, no special handling for
644 * PREEMPT_RT is needed.
645 */
646 typedef struct {
647 seqcount_t seqcount;
648 } seqcount_latch_t;
649
650 /**
651 * SEQCNT_LATCH_ZERO() - static initializer for seqcount_latch_t
652 * @seq_name: Name of the seqcount_latch_t instance
653 */
654 #define SEQCNT_LATCH_ZERO(seq_name) { \
655 .seqcount = SEQCNT_ZERO(seq_name.seqcount), \
656 }
657
658 /**
659 * seqcount_latch_init() - runtime initializer for seqcount_latch_t
660 * @s: Pointer to the seqcount_latch_t instance
661 */
662 #define seqcount_latch_init(s) seqcount_init(&(s)->seqcount)
663
664 /**
665 * raw_read_seqcount_latch() - pick even/odd latch data copy
666 * @s: Pointer to seqcount_latch_t
667 *
668 * See raw_write_seqcount_latch() for details and a full reader/writer
669 * usage example.
670 *
671 * Return: sequence counter raw value. Use the lowest bit as an index for
672 * picking which data copy to read. The full counter must then be checked
673 * with read_seqcount_latch_retry().
674 */
raw_read_seqcount_latch(const seqcount_latch_t * s)675 static inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
676 {
677 /*
678 * Pairs with the first smp_wmb() in raw_write_seqcount_latch().
679 * Due to the dependent load, a full smp_rmb() is not needed.
680 */
681 return READ_ONCE(s->seqcount.sequence);
682 }
683
684 /**
685 * read_seqcount_latch_retry() - end a seqcount_latch_t read section
686 * @s: Pointer to seqcount_latch_t
687 * @start: count, from raw_read_seqcount_latch()
688 *
689 * Return: true if a read section retry is required, else false
690 */
691 static inline int
read_seqcount_latch_retry(const seqcount_latch_t * s,unsigned start)692 read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
693 {
694 return read_seqcount_retry(&s->seqcount, start);
695 }
696
697 /**
698 * raw_write_seqcount_latch() - redirect latch readers to even/odd copy
699 * @s: Pointer to seqcount_latch_t
700 *
701 * The latch technique is a multiversion concurrency control method that allows
702 * queries during non-atomic modifications. If you can guarantee queries never
703 * interrupt the modification -- e.g. the concurrency is strictly between CPUs
704 * -- you most likely do not need this.
705 *
706 * Where the traditional RCU/lockless data structures rely on atomic
707 * modifications to ensure queries observe either the old or the new state the
708 * latch allows the same for non-atomic updates. The trade-off is doubling the
709 * cost of storage; we have to maintain two copies of the entire data
710 * structure.
711 *
712 * Very simply put: we first modify one copy and then the other. This ensures
713 * there is always one copy in a stable state, ready to give us an answer.
714 *
715 * The basic form is a data structure like::
716 *
717 * struct latch_struct {
718 * seqcount_latch_t seq;
719 * struct data_struct data[2];
720 * };
721 *
722 * Where a modification, which is assumed to be externally serialized, does the
723 * following::
724 *
725 * void latch_modify(struct latch_struct *latch, ...)
726 * {
727 * smp_wmb(); // Ensure that the last data[1] update is visible
728 * latch->seq.sequence++;
729 * smp_wmb(); // Ensure that the seqcount update is visible
730 *
731 * modify(latch->data[0], ...);
732 *
733 * smp_wmb(); // Ensure that the data[0] update is visible
734 * latch->seq.sequence++;
735 * smp_wmb(); // Ensure that the seqcount update is visible
736 *
737 * modify(latch->data[1], ...);
738 * }
739 *
740 * The query will have a form like::
741 *
742 * struct entry *latch_query(struct latch_struct *latch, ...)
743 * {
744 * struct entry *entry;
745 * unsigned seq, idx;
746 *
747 * do {
748 * seq = raw_read_seqcount_latch(&latch->seq);
749 *
750 * idx = seq & 0x01;
751 * entry = data_query(latch->data[idx], ...);
752 *
753 * // This includes needed smp_rmb()
754 * } while (read_seqcount_latch_retry(&latch->seq, seq));
755 *
756 * return entry;
757 * }
758 *
759 * So during the modification, queries are first redirected to data[1]. Then we
760 * modify data[0]. When that is complete, we redirect queries back to data[0]
761 * and we can modify data[1].
762 *
763 * NOTE:
764 *
765 * The non-requirement for atomic modifications does _NOT_ include
766 * the publishing of new entries in the case where data is a dynamic
767 * data structure.
768 *
769 * An iteration might start in data[0] and get suspended long enough
770 * to miss an entire modification sequence, once it resumes it might
771 * observe the new entry.
772 *
773 * NOTE2:
774 *
775 * When data is a dynamic data structure; one should use regular RCU
776 * patterns to manage the lifetimes of the objects within.
777 */
raw_write_seqcount_latch(seqcount_latch_t * s)778 static inline void raw_write_seqcount_latch(seqcount_latch_t *s)
779 {
780 smp_wmb(); /* prior stores before incrementing "sequence" */
781 s->seqcount.sequence++;
782 smp_wmb(); /* increment "sequence" before following stores */
783 }
784
785 /*
786 * Sequential locks (seqlock_t)
787 *
788 * Sequence counters with an embedded spinlock for writer serialization
789 * and non-preemptibility.
790 *
791 * For more info, see:
792 * - Comments on top of seqcount_t
793 * - Documentation/locking/seqlock.rst
794 */
795 typedef struct {
796 /*
797 * Make sure that readers don't starve writers on PREEMPT_RT: use
798 * seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK().
799 */
800 seqcount_spinlock_t seqcount;
801 spinlock_t lock;
802 } seqlock_t;
803
804 #define __SEQLOCK_UNLOCKED(lockname) \
805 { \
806 .seqcount = SEQCNT_SPINLOCK_ZERO(lockname, &(lockname).lock), \
807 .lock = __SPIN_LOCK_UNLOCKED(lockname) \
808 }
809
810 /**
811 * seqlock_init() - dynamic initializer for seqlock_t
812 * @sl: Pointer to the seqlock_t instance
813 */
814 #define seqlock_init(sl) \
815 do { \
816 spin_lock_init(&(sl)->lock); \
817 seqcount_spinlock_init(&(sl)->seqcount, &(sl)->lock); \
818 } while (0)
819
820 /**
821 * DEFINE_SEQLOCK(sl) - Define a statically allocated seqlock_t
822 * @sl: Name of the seqlock_t instance
823 */
824 #define DEFINE_SEQLOCK(sl) \
825 seqlock_t sl = __SEQLOCK_UNLOCKED(sl)
826
827 /**
828 * read_seqbegin() - start a seqlock_t read side critical section
829 * @sl: Pointer to seqlock_t
830 *
831 * Return: count, to be passed to read_seqretry()
832 */
read_seqbegin(const seqlock_t * sl)833 static inline unsigned read_seqbegin(const seqlock_t *sl)
834 {
835 unsigned ret = read_seqcount_begin(&sl->seqcount);
836
837 kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry() */
838 kcsan_flat_atomic_begin();
839 return ret;
840 }
841
842 /**
843 * read_seqretry() - end a seqlock_t read side section
844 * @sl: Pointer to seqlock_t
845 * @start: count, from read_seqbegin()
846 *
847 * read_seqretry closes the read side critical section of given seqlock_t.
848 * If the critical section was invalid, it must be ignored (and typically
849 * retried).
850 *
851 * Return: true if a read section retry is required, else false
852 */
read_seqretry(const seqlock_t * sl,unsigned start)853 static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
854 {
855 /*
856 * Assume not nested: read_seqretry() may be called multiple times when
857 * completing read critical section.
858 */
859 kcsan_flat_atomic_end();
860
861 return read_seqcount_retry(&sl->seqcount, start);
862 }
863
864 /*
865 * For all seqlock_t write side functions, use write_seqcount_*t*_begin()
866 * instead of the generic write_seqcount_begin(). This way, no redundant
867 * lockdep_assert_held() checks are added.
868 */
869
870 /**
871 * write_seqlock() - start a seqlock_t write side critical section
872 * @sl: Pointer to seqlock_t
873 *
874 * write_seqlock opens a write side critical section for the given
875 * seqlock_t. It also implicitly acquires the spinlock_t embedded inside
876 * that sequential lock. All seqlock_t write side sections are thus
877 * automatically serialized and non-preemptible.
878 *
879 * Context: if the seqlock_t read section, or other write side critical
880 * sections, can be invoked from hardirq or softirq contexts, use the
881 * _irqsave or _bh variants of this function instead.
882 */
write_seqlock(seqlock_t * sl)883 static inline void write_seqlock(seqlock_t *sl)
884 {
885 spin_lock(&sl->lock);
886 write_seqcount_t_begin(&sl->seqcount.seqcount);
887 }
888
889 /**
890 * write_sequnlock() - end a seqlock_t write side critical section
891 * @sl: Pointer to seqlock_t
892 *
893 * write_sequnlock closes the (serialized and non-preemptible) write side
894 * critical section of given seqlock_t.
895 */
write_sequnlock(seqlock_t * sl)896 static inline void write_sequnlock(seqlock_t *sl)
897 {
898 write_seqcount_t_end(&sl->seqcount.seqcount);
899 spin_unlock(&sl->lock);
900 }
901
902 /**
903 * write_seqlock_bh() - start a softirqs-disabled seqlock_t write section
904 * @sl: Pointer to seqlock_t
905 *
906 * _bh variant of write_seqlock(). Use only if the read side section, or
907 * other write side sections, can be invoked from softirq contexts.
908 */
write_seqlock_bh(seqlock_t * sl)909 static inline void write_seqlock_bh(seqlock_t *sl)
910 {
911 spin_lock_bh(&sl->lock);
912 write_seqcount_t_begin(&sl->seqcount.seqcount);
913 }
914
915 /**
916 * write_sequnlock_bh() - end a softirqs-disabled seqlock_t write section
917 * @sl: Pointer to seqlock_t
918 *
919 * write_sequnlock_bh closes the serialized, non-preemptible, and
920 * softirqs-disabled, seqlock_t write side critical section opened with
921 * write_seqlock_bh().
922 */
write_sequnlock_bh(seqlock_t * sl)923 static inline void write_sequnlock_bh(seqlock_t *sl)
924 {
925 write_seqcount_t_end(&sl->seqcount.seqcount);
926 spin_unlock_bh(&sl->lock);
927 }
928
929 /**
930 * write_seqlock_irq() - start a non-interruptible seqlock_t write section
931 * @sl: Pointer to seqlock_t
932 *
933 * _irq variant of write_seqlock(). Use only if the read side section, or
934 * other write sections, can be invoked from hardirq contexts.
935 */
write_seqlock_irq(seqlock_t * sl)936 static inline void write_seqlock_irq(seqlock_t *sl)
937 {
938 spin_lock_irq(&sl->lock);
939 write_seqcount_t_begin(&sl->seqcount.seqcount);
940 }
941
942 /**
943 * write_sequnlock_irq() - end a non-interruptible seqlock_t write section
944 * @sl: Pointer to seqlock_t
945 *
946 * write_sequnlock_irq closes the serialized and non-interruptible
947 * seqlock_t write side section opened with write_seqlock_irq().
948 */
write_sequnlock_irq(seqlock_t * sl)949 static inline void write_sequnlock_irq(seqlock_t *sl)
950 {
951 write_seqcount_t_end(&sl->seqcount.seqcount);
952 spin_unlock_irq(&sl->lock);
953 }
954
__write_seqlock_irqsave(seqlock_t * sl)955 static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
956 {
957 unsigned long flags;
958
959 spin_lock_irqsave(&sl->lock, flags);
960 write_seqcount_t_begin(&sl->seqcount.seqcount);
961 return flags;
962 }
963
964 /**
965 * write_seqlock_irqsave() - start a non-interruptible seqlock_t write
966 * section
967 * @lock: Pointer to seqlock_t
968 * @flags: Stack-allocated storage for saving caller's local interrupt
969 * state, to be passed to write_sequnlock_irqrestore().
970 *
971 * _irqsave variant of write_seqlock(). Use it only if the read side
972 * section, or other write sections, can be invoked from hardirq context.
973 */
974 #define write_seqlock_irqsave(lock, flags) \
975 do { flags = __write_seqlock_irqsave(lock); } while (0)
976
977 /**
978 * write_sequnlock_irqrestore() - end non-interruptible seqlock_t write
979 * section
980 * @sl: Pointer to seqlock_t
981 * @flags: Caller's saved interrupt state, from write_seqlock_irqsave()
982 *
983 * write_sequnlock_irqrestore closes the serialized and non-interruptible
984 * seqlock_t write section previously opened with write_seqlock_irqsave().
985 */
986 static inline void
write_sequnlock_irqrestore(seqlock_t * sl,unsigned long flags)987 write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
988 {
989 write_seqcount_t_end(&sl->seqcount.seqcount);
990 spin_unlock_irqrestore(&sl->lock, flags);
991 }
992
993 /**
994 * read_seqlock_excl() - begin a seqlock_t locking reader section
995 * @sl: Pointer to seqlock_t
996 *
997 * read_seqlock_excl opens a seqlock_t locking reader critical section. A
998 * locking reader exclusively locks out *both* other writers *and* other
999 * locking readers, but it does not update the embedded sequence number.
1000 *
1001 * Locking readers act like a normal spin_lock()/spin_unlock().
1002 *
1003 * Context: if the seqlock_t write section, *or other read sections*, can
1004 * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
1005 * variant of this function instead.
1006 *
1007 * The opened read section must be closed with read_sequnlock_excl().
1008 */
read_seqlock_excl(seqlock_t * sl)1009 static inline void read_seqlock_excl(seqlock_t *sl)
1010 {
1011 spin_lock(&sl->lock);
1012 }
1013
1014 /**
1015 * read_sequnlock_excl() - end a seqlock_t locking reader critical section
1016 * @sl: Pointer to seqlock_t
1017 */
read_sequnlock_excl(seqlock_t * sl)1018 static inline void read_sequnlock_excl(seqlock_t *sl)
1019 {
1020 spin_unlock(&sl->lock);
1021 }
1022
1023 /**
1024 * read_seqlock_excl_bh() - start a seqlock_t locking reader section with
1025 * softirqs disabled
1026 * @sl: Pointer to seqlock_t
1027 *
1028 * _bh variant of read_seqlock_excl(). Use this variant only if the
1029 * seqlock_t write side section, *or other read sections*, can be invoked
1030 * from softirq contexts.
1031 */
read_seqlock_excl_bh(seqlock_t * sl)1032 static inline void read_seqlock_excl_bh(seqlock_t *sl)
1033 {
1034 spin_lock_bh(&sl->lock);
1035 }
1036
1037 /**
1038 * read_sequnlock_excl_bh() - stop a seqlock_t softirq-disabled locking
1039 * reader section
1040 * @sl: Pointer to seqlock_t
1041 */
read_sequnlock_excl_bh(seqlock_t * sl)1042 static inline void read_sequnlock_excl_bh(seqlock_t *sl)
1043 {
1044 spin_unlock_bh(&sl->lock);
1045 }
1046
1047 /**
1048 * read_seqlock_excl_irq() - start a non-interruptible seqlock_t locking
1049 * reader section
1050 * @sl: Pointer to seqlock_t
1051 *
1052 * _irq variant of read_seqlock_excl(). Use this only if the seqlock_t
1053 * write side section, *or other read sections*, can be invoked from a
1054 * hardirq context.
1055 */
read_seqlock_excl_irq(seqlock_t * sl)1056 static inline void read_seqlock_excl_irq(seqlock_t *sl)
1057 {
1058 spin_lock_irq(&sl->lock);
1059 }
1060
1061 /**
1062 * read_sequnlock_excl_irq() - end an interrupts-disabled seqlock_t
1063 * locking reader section
1064 * @sl: Pointer to seqlock_t
1065 */
read_sequnlock_excl_irq(seqlock_t * sl)1066 static inline void read_sequnlock_excl_irq(seqlock_t *sl)
1067 {
1068 spin_unlock_irq(&sl->lock);
1069 }
1070
__read_seqlock_excl_irqsave(seqlock_t * sl)1071 static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
1072 {
1073 unsigned long flags;
1074
1075 spin_lock_irqsave(&sl->lock, flags);
1076 return flags;
1077 }
1078
1079 /**
1080 * read_seqlock_excl_irqsave() - start a non-interruptible seqlock_t
1081 * locking reader section
1082 * @lock: Pointer to seqlock_t
1083 * @flags: Stack-allocated storage for saving caller's local interrupt
1084 * state, to be passed to read_sequnlock_excl_irqrestore().
1085 *
1086 * _irqsave variant of read_seqlock_excl(). Use this only if the seqlock_t
1087 * write side section, *or other read sections*, can be invoked from a
1088 * hardirq context.
1089 */
1090 #define read_seqlock_excl_irqsave(lock, flags) \
1091 do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
1092
1093 /**
1094 * read_sequnlock_excl_irqrestore() - end non-interruptible seqlock_t
1095 * locking reader section
1096 * @sl: Pointer to seqlock_t
1097 * @flags: Caller saved interrupt state, from read_seqlock_excl_irqsave()
1098 */
1099 static inline void
read_sequnlock_excl_irqrestore(seqlock_t * sl,unsigned long flags)1100 read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
1101 {
1102 spin_unlock_irqrestore(&sl->lock, flags);
1103 }
1104
1105 /**
1106 * read_seqbegin_or_lock() - begin a seqlock_t lockless or locking reader
1107 * @lock: Pointer to seqlock_t
1108 * @seq : Marker and return parameter. If the passed value is even, the
1109 * reader will become a *lockless* seqlock_t reader as in read_seqbegin().
1110 * If the passed value is odd, the reader will become a *locking* reader
1111 * as in read_seqlock_excl(). In the first call to this function, the
1112 * caller *must* initialize and pass an even value to @seq; this way, a
1113 * lockless read can be optimistically tried first.
1114 *
1115 * read_seqbegin_or_lock is an API designed to optimistically try a normal
1116 * lockless seqlock_t read section first. If an odd counter is found, the
1117 * lockless read trial has failed, and the next read iteration transforms
1118 * itself into a full seqlock_t locking reader.
1119 *
1120 * This is typically used to avoid seqlock_t lockless readers starvation
1121 * (too much retry loops) in the case of a sharp spike in write side
1122 * activity.
1123 *
1124 * Context: if the seqlock_t write section, *or other read sections*, can
1125 * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
1126 * variant of this function instead.
1127 *
1128 * Check Documentation/locking/seqlock.rst for template example code.
1129 *
1130 * Return: the encountered sequence counter value, through the @seq
1131 * parameter, which is overloaded as a return parameter. This returned
1132 * value must be checked with need_seqretry(). If the read section need to
1133 * be retried, this returned value must also be passed as the @seq
1134 * parameter of the next read_seqbegin_or_lock() iteration.
1135 */
read_seqbegin_or_lock(seqlock_t * lock,int * seq)1136 static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
1137 {
1138 if (!(*seq & 1)) /* Even */
1139 *seq = read_seqbegin(lock);
1140 else /* Odd */
1141 read_seqlock_excl(lock);
1142 }
1143
1144 /**
1145 * need_seqretry() - validate seqlock_t "locking or lockless" read section
1146 * @lock: Pointer to seqlock_t
1147 * @seq: sequence count, from read_seqbegin_or_lock()
1148 *
1149 * Return: true if a read section retry is required, false otherwise
1150 */
need_seqretry(seqlock_t * lock,int seq)1151 static inline int need_seqretry(seqlock_t *lock, int seq)
1152 {
1153 return !(seq & 1) && read_seqretry(lock, seq);
1154 }
1155
1156 /**
1157 * done_seqretry() - end seqlock_t "locking or lockless" reader section
1158 * @lock: Pointer to seqlock_t
1159 * @seq: count, from read_seqbegin_or_lock()
1160 *
1161 * done_seqretry finishes the seqlock_t read side critical section started
1162 * with read_seqbegin_or_lock() and validated by need_seqretry().
1163 */
done_seqretry(seqlock_t * lock,int seq)1164 static inline void done_seqretry(seqlock_t *lock, int seq)
1165 {
1166 if (seq & 1)
1167 read_sequnlock_excl(lock);
1168 }
1169
1170 /**
1171 * read_seqbegin_or_lock_irqsave() - begin a seqlock_t lockless reader, or
1172 * a non-interruptible locking reader
1173 * @lock: Pointer to seqlock_t
1174 * @seq: Marker and return parameter. Check read_seqbegin_or_lock().
1175 *
1176 * This is the _irqsave variant of read_seqbegin_or_lock(). Use it only if
1177 * the seqlock_t write section, *or other read sections*, can be invoked
1178 * from hardirq context.
1179 *
1180 * Note: Interrupts will be disabled only for "locking reader" mode.
1181 *
1182 * Return:
1183 *
1184 * 1. The saved local interrupts state in case of a locking reader, to
1185 * be passed to done_seqretry_irqrestore().
1186 *
1187 * 2. The encountered sequence counter value, returned through @seq
1188 * overloaded as a return parameter. Check read_seqbegin_or_lock().
1189 */
1190 static inline unsigned long
read_seqbegin_or_lock_irqsave(seqlock_t * lock,int * seq)1191 read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
1192 {
1193 unsigned long flags = 0;
1194
1195 if (!(*seq & 1)) /* Even */
1196 *seq = read_seqbegin(lock);
1197 else /* Odd */
1198 read_seqlock_excl_irqsave(lock, flags);
1199
1200 return flags;
1201 }
1202
1203 /**
1204 * done_seqretry_irqrestore() - end a seqlock_t lockless reader, or a
1205 * non-interruptible locking reader section
1206 * @lock: Pointer to seqlock_t
1207 * @seq: Count, from read_seqbegin_or_lock_irqsave()
1208 * @flags: Caller's saved local interrupt state in case of a locking
1209 * reader, also from read_seqbegin_or_lock_irqsave()
1210 *
1211 * This is the _irqrestore variant of done_seqretry(). The read section
1212 * must've been opened with read_seqbegin_or_lock_irqsave(), and validated
1213 * by need_seqretry().
1214 */
1215 static inline void
done_seqretry_irqrestore(seqlock_t * lock,int seq,unsigned long flags)1216 done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
1217 {
1218 if (seq & 1)
1219 read_sequnlock_excl_irqrestore(lock, flags);
1220 }
1221 #endif /* __LINUX_SEQLOCK_H */
1222