1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_PERCPU_RWSEM_H
3 #define _LINUX_PERCPU_RWSEM_H
4
5 #include <linux/atomic.h>
6 #include <linux/percpu.h>
7 #include <linux/rcuwait.h>
8 #include <linux/wait.h>
9 #include <linux/rcu_sync.h>
10 #include <linux/lockdep.h>
11
12 void _trace_android_vh_record_pcpu_rwsem_starttime(
13 struct task_struct *tsk, unsigned long settime);
14
15 struct percpu_rw_semaphore {
16 struct rcu_sync rss;
17 unsigned int __percpu *read_count;
18 struct rcuwait writer;
19 wait_queue_head_t waiters;
20 atomic_t block;
21 #ifdef CONFIG_DEBUG_LOCK_ALLOC
22 struct lockdep_map dep_map;
23 #endif
24 };
25
26 void _trace_android_vh_record_pcpu_rwsem_time_early(
27 unsigned long settime, struct percpu_rw_semaphore *sem);
28
29 #ifdef CONFIG_DEBUG_LOCK_ALLOC
30 #define __PERCPU_RWSEM_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname },
31 #else
32 #define __PERCPU_RWSEM_DEP_MAP_INIT(lockname)
33 #endif
34
35 #define __DEFINE_PERCPU_RWSEM(name, is_static) \
36 static DEFINE_PER_CPU(unsigned int, __percpu_rwsem_rc_##name); \
37 is_static struct percpu_rw_semaphore name = { \
38 .rss = __RCU_SYNC_INITIALIZER(name.rss), \
39 .read_count = &__percpu_rwsem_rc_##name, \
40 .writer = __RCUWAIT_INITIALIZER(name.writer), \
41 .waiters = __WAIT_QUEUE_HEAD_INITIALIZER(name.waiters), \
42 .block = ATOMIC_INIT(0), \
43 __PERCPU_RWSEM_DEP_MAP_INIT(name) \
44 }
45
46 #define DEFINE_PERCPU_RWSEM(name) \
47 __DEFINE_PERCPU_RWSEM(name, /* not static */)
48 #define DEFINE_STATIC_PERCPU_RWSEM(name) \
49 __DEFINE_PERCPU_RWSEM(name, static)
50
51 extern bool __percpu_down_read(struct percpu_rw_semaphore *, bool);
52
percpu_down_read(struct percpu_rw_semaphore * sem)53 static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
54 {
55 might_sleep();
56
57 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
58
59 preempt_disable();
60 _trace_android_vh_record_pcpu_rwsem_time_early(jiffies, sem);
61
62 /*
63 * We are in an RCU-sched read-side critical section, so the writer
64 * cannot both change sem->state from readers_fast and start checking
65 * counters while we are here. So if we see !sem->state, we know that
66 * the writer won't be checking until we're past the preempt_enable()
67 * and that once the synchronize_rcu() is done, the writer will see
68 * anything we did within this RCU-sched read-size critical section.
69 */
70 if (likely(rcu_sync_is_idle(&sem->rss)))
71 this_cpu_inc(*sem->read_count);
72 else
73 __percpu_down_read(sem, false); /* Unconditional memory barrier */
74 /*
75 * The preempt_enable() prevents the compiler from
76 * bleeding the critical section out.
77 */
78 preempt_enable();
79 _trace_android_vh_record_pcpu_rwsem_starttime(current, jiffies);
80 }
81
percpu_down_read_trylock(struct percpu_rw_semaphore * sem)82 static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
83 {
84 bool ret = true;
85
86 preempt_disable();
87 /*
88 * Same as in percpu_down_read().
89 */
90 if (likely(rcu_sync_is_idle(&sem->rss)))
91 this_cpu_inc(*sem->read_count);
92 else
93 ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */
94 preempt_enable();
95 /*
96 * The barrier() from preempt_enable() prevents the compiler from
97 * bleeding the critical section out.
98 */
99
100 if (ret) {
101 _trace_android_vh_record_pcpu_rwsem_time_early(jiffies, sem);
102 _trace_android_vh_record_pcpu_rwsem_starttime(current, jiffies);
103 rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
104 }
105
106 return ret;
107 }
108
percpu_up_read(struct percpu_rw_semaphore * sem)109 static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
110 {
111 rwsem_release(&sem->dep_map, _RET_IP_);
112
113 preempt_disable();
114 /*
115 * Same as in percpu_down_read().
116 */
117 if (likely(rcu_sync_is_idle(&sem->rss))) {
118 this_cpu_dec(*sem->read_count);
119 } else {
120 /*
121 * slowpath; reader will only ever wake a single blocked
122 * writer.
123 */
124 smp_mb(); /* B matches C */
125 /*
126 * In other words, if they see our decrement (presumably to
127 * aggregate zero, as that is the only time it matters) they
128 * will also see our critical section.
129 */
130 this_cpu_dec(*sem->read_count);
131 rcuwait_wake_up(&sem->writer);
132 }
133 _trace_android_vh_record_pcpu_rwsem_time_early(0, sem);
134 _trace_android_vh_record_pcpu_rwsem_starttime(current, 0);
135 preempt_enable();
136 }
137
138 extern bool percpu_is_read_locked(struct percpu_rw_semaphore *);
139 extern void percpu_down_write(struct percpu_rw_semaphore *);
140 extern void percpu_up_write(struct percpu_rw_semaphore *);
141
percpu_is_write_locked(struct percpu_rw_semaphore * sem)142 static inline bool percpu_is_write_locked(struct percpu_rw_semaphore *sem)
143 {
144 return atomic_read(&sem->block);
145 }
146
147 extern int __percpu_init_rwsem(struct percpu_rw_semaphore *,
148 const char *, struct lock_class_key *);
149
150 extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
151
152 #define percpu_init_rwsem(sem) \
153 ({ \
154 static struct lock_class_key rwsem_key; \
155 __percpu_init_rwsem(sem, #sem, &rwsem_key); \
156 })
157
158 #define percpu_rwsem_is_held(sem) lockdep_is_held(sem)
159 #define percpu_rwsem_assert_held(sem) lockdep_assert_held(sem)
160
percpu_rwsem_release(struct percpu_rw_semaphore * sem,bool read,unsigned long ip)161 static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
162 bool read, unsigned long ip)
163 {
164 lock_release(&sem->dep_map, ip);
165 }
166
percpu_rwsem_acquire(struct percpu_rw_semaphore * sem,bool read,unsigned long ip)167 static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem,
168 bool read, unsigned long ip)
169 {
170 lock_acquire(&sem->dep_map, 0, 1, read, 1, NULL, ip);
171 }
172
173 #endif
174