• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/atomic.h>
3 #include <linux/percpu.h>
4 #include <linux/wait.h>
5 #include <linux/lockdep.h>
6 #include <linux/percpu-rwsem.h>
7 #include <linux/rcupdate.h>
8 #include <linux/sched.h>
9 #include <linux/sched/task.h>
10 #include <linux/sched/debug.h>
11 #include <linux/errno.h>
12 #include <trace/events/lock.h>
13 
14 #include <trace/hooks/dtask.h>
15 
16 /*
17  * trace_android_vh_record_pcpu_rwsem_starttime  is called in
18  * include/linux/percpu-rwsem.h by including include/hooks/dtask.h, which
19  * will result to build-err. So we create
20  * func:_trace_android_vh_record_pcpu_rwsem_starttime for percpu-rwsem.h to call.
21  */
_trace_android_vh_record_pcpu_rwsem_starttime(struct task_struct * tsk,unsigned long settime)22 void _trace_android_vh_record_pcpu_rwsem_starttime(struct task_struct *tsk,
23 		unsigned long settime)
24 {
25 	trace_android_vh_record_pcpu_rwsem_starttime(tsk, settime);
26 }
27 EXPORT_SYMBOL_GPL(_trace_android_vh_record_pcpu_rwsem_starttime);
28 
29 /*
30  * trace_android_vh_record_pcpu_rwsem_time_early is called in
31  * include/linux/percpu-rwsem.h by including include/hooks/dtask.h, which
32  * will result to build-err. So we create
33  * func: _trace_android_vh_record_pcpu_rwsem_time_early for percpu-rwsem.h to call.
34 */
35 
_trace_android_vh_record_pcpu_rwsem_time_early(unsigned long settime,struct percpu_rw_semaphore * sem)36 void _trace_android_vh_record_pcpu_rwsem_time_early(
37 		unsigned long settime, struct percpu_rw_semaphore *sem)
38 {
39 	trace_android_vh_record_pcpu_rwsem_time_early(settime, sem);
40 }
41 EXPORT_SYMBOL_GPL(_trace_android_vh_record_pcpu_rwsem_time_early);
42 
__percpu_init_rwsem(struct percpu_rw_semaphore * sem,const char * name,struct lock_class_key * key)43 int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
44 			const char *name, struct lock_class_key *key)
45 {
46 	sem->read_count = alloc_percpu(int);
47 	if (unlikely(!sem->read_count))
48 		return -ENOMEM;
49 
50 	rcu_sync_init(&sem->rss);
51 	rcuwait_init(&sem->writer);
52 	init_waitqueue_head(&sem->waiters);
53 	atomic_set(&sem->block, 0);
54 #ifdef CONFIG_DEBUG_LOCK_ALLOC
55 	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
56 	lockdep_init_map(&sem->dep_map, name, key, 0);
57 #endif
58 	return 0;
59 }
60 EXPORT_SYMBOL_GPL(__percpu_init_rwsem);
61 
percpu_free_rwsem(struct percpu_rw_semaphore * sem)62 void percpu_free_rwsem(struct percpu_rw_semaphore *sem)
63 {
64 	/*
65 	 * XXX: temporary kludge. The error path in alloc_super()
66 	 * assumes that percpu_free_rwsem() is safe after kzalloc().
67 	 */
68 	if (!sem->read_count)
69 		return;
70 
71 	rcu_sync_dtor(&sem->rss);
72 	free_percpu(sem->read_count);
73 	sem->read_count = NULL; /* catch use after free bugs */
74 }
75 EXPORT_SYMBOL_GPL(percpu_free_rwsem);
76 
__percpu_down_read_trylock(struct percpu_rw_semaphore * sem)77 static bool __percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
78 {
79 	this_cpu_inc(*sem->read_count);
80 
81 	/*
82 	 * Due to having preemption disabled the decrement happens on
83 	 * the same CPU as the increment, avoiding the
84 	 * increment-on-one-CPU-and-decrement-on-another problem.
85 	 *
86 	 * If the reader misses the writer's assignment of sem->block, then the
87 	 * writer is guaranteed to see the reader's increment.
88 	 *
89 	 * Conversely, any readers that increment their sem->read_count after
90 	 * the writer looks are guaranteed to see the sem->block value, which
91 	 * in turn means that they are guaranteed to immediately decrement
92 	 * their sem->read_count, so that it doesn't matter that the writer
93 	 * missed them.
94 	 */
95 
96 	smp_mb(); /* A matches D */
97 
98 	/*
99 	 * If !sem->block the critical section starts here, matched by the
100 	 * release in percpu_up_write().
101 	 */
102 	if (likely(!atomic_read_acquire(&sem->block)))
103 		return true;
104 
105 	this_cpu_dec(*sem->read_count);
106 
107 	/* Prod writer to re-evaluate readers_active_check() */
108 	rcuwait_wake_up(&sem->writer);
109 
110 	return false;
111 }
112 
__percpu_down_write_trylock(struct percpu_rw_semaphore * sem)113 static inline bool __percpu_down_write_trylock(struct percpu_rw_semaphore *sem)
114 {
115 	if (atomic_read(&sem->block))
116 		return false;
117 
118 	return atomic_xchg(&sem->block, 1) == 0;
119 }
120 
__percpu_rwsem_trylock(struct percpu_rw_semaphore * sem,bool reader)121 static bool __percpu_rwsem_trylock(struct percpu_rw_semaphore *sem, bool reader)
122 {
123 	if (reader) {
124 		bool ret;
125 
126 		preempt_disable();
127 		ret = __percpu_down_read_trylock(sem);
128 		preempt_enable();
129 
130 		return ret;
131 	}
132 	return __percpu_down_write_trylock(sem);
133 }
134 
135 /*
136  * The return value of wait_queue_entry::func means:
137  *
138  *  <0 - error, wakeup is terminated and the error is returned
139  *   0 - no wakeup, a next waiter is tried
140  *  >0 - woken, if EXCLUSIVE, counted towards @nr_exclusive.
141  *
142  * We use EXCLUSIVE for both readers and writers to preserve FIFO order,
143  * and play games with the return value to allow waking multiple readers.
144  *
145  * Specifically, we wake readers until we've woken a single writer, or until a
146  * trylock fails.
147  */
percpu_rwsem_wake_function(struct wait_queue_entry * wq_entry,unsigned int mode,int wake_flags,void * key)148 static int percpu_rwsem_wake_function(struct wait_queue_entry *wq_entry,
149 				      unsigned int mode, int wake_flags,
150 				      void *key)
151 {
152 	bool reader = wq_entry->flags & WQ_FLAG_CUSTOM;
153 	struct percpu_rw_semaphore *sem = key;
154 	struct task_struct *p;
155 
156 	/* concurrent against percpu_down_write(), can get stolen */
157 	if (!__percpu_rwsem_trylock(sem, reader))
158 		return 1;
159 
160 	p = get_task_struct(wq_entry->private);
161 	list_del_init(&wq_entry->entry);
162 	smp_store_release(&wq_entry->private, NULL);
163 
164 	wake_up_process(p);
165 	put_task_struct(p);
166 
167 	return !reader; /* wake (readers until) 1 writer */
168 }
169 
percpu_rwsem_wait(struct percpu_rw_semaphore * sem,bool reader)170 static void percpu_rwsem_wait(struct percpu_rw_semaphore *sem, bool reader)
171 {
172 	DEFINE_WAIT_FUNC(wq_entry, percpu_rwsem_wake_function);
173 	bool wait;
174 
175 	spin_lock_irq(&sem->waiters.lock);
176 	/*
177 	 * Serialize against the wakeup in percpu_up_write(), if we fail
178 	 * the trylock, the wakeup must see us on the list.
179 	 */
180 	wait = !__percpu_rwsem_trylock(sem, reader);
181 	if (wait) {
182 		wq_entry.flags |= WQ_FLAG_EXCLUSIVE | reader * WQ_FLAG_CUSTOM;
183 		__add_wait_queue_entry_tail(&sem->waiters, &wq_entry);
184 		trace_android_vh_percpu_rwsem_wq_add(sem, reader);
185 	}
186 	spin_unlock_irq(&sem->waiters.lock);
187 
188 	while (wait) {
189 		set_current_state(TASK_UNINTERRUPTIBLE);
190 		if (!smp_load_acquire(&wq_entry.private))
191 			break;
192 		schedule();
193 	}
194 	__set_current_state(TASK_RUNNING);
195 }
196 
__percpu_down_read(struct percpu_rw_semaphore * sem,bool try)197 bool __sched __percpu_down_read(struct percpu_rw_semaphore *sem, bool try)
198 {
199 	if (__percpu_down_read_trylock(sem))
200 		return true;
201 
202 	if (try)
203 		return false;
204 
205 	trace_contention_begin(sem, LCB_F_PERCPU | LCB_F_READ);
206 	preempt_enable();
207 	percpu_rwsem_wait(sem, /* .reader = */ true);
208 	preempt_disable();
209 	trace_contention_end(sem, 0);
210 
211 	return true;
212 }
213 EXPORT_SYMBOL_GPL(__percpu_down_read);
214 
215 #define per_cpu_sum(var)						\
216 ({									\
217 	typeof(var) __sum = 0;						\
218 	int cpu;							\
219 	compiletime_assert_atomic_type(__sum);				\
220 	for_each_possible_cpu(cpu)					\
221 		__sum += per_cpu(var, cpu);				\
222 	__sum;								\
223 })
224 
percpu_is_read_locked(struct percpu_rw_semaphore * sem)225 bool percpu_is_read_locked(struct percpu_rw_semaphore *sem)
226 {
227 	return per_cpu_sum(*sem->read_count) != 0 && !atomic_read(&sem->block);
228 }
229 EXPORT_SYMBOL_GPL(percpu_is_read_locked);
230 
231 /*
232  * Return true if the modular sum of the sem->read_count per-CPU variable is
233  * zero.  If this sum is zero, then it is stable due to the fact that if any
234  * newly arriving readers increment a given counter, they will immediately
235  * decrement that same counter.
236  *
237  * Assumes sem->block is set.
238  */
readers_active_check(struct percpu_rw_semaphore * sem)239 static bool readers_active_check(struct percpu_rw_semaphore *sem)
240 {
241 	if (per_cpu_sum(*sem->read_count) != 0)
242 		return false;
243 
244 	/*
245 	 * If we observed the decrement; ensure we see the entire critical
246 	 * section.
247 	 */
248 
249 	smp_mb(); /* C matches B */
250 
251 	return true;
252 }
253 
percpu_down_write(struct percpu_rw_semaphore * sem)254 void __sched percpu_down_write(struct percpu_rw_semaphore *sem)
255 {
256 	might_sleep();
257 	rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
258 	trace_contention_begin(sem, LCB_F_PERCPU | LCB_F_WRITE);
259 
260 	trace_android_vh_record_pcpu_rwsem_time_early(jiffies, sem);
261 
262 	/* Notify readers to take the slow path. */
263 	rcu_sync_enter(&sem->rss);
264 
265 	/*
266 	 * Try set sem->block; this provides writer-writer exclusion.
267 	 * Having sem->block set makes new readers block.
268 	 */
269 	if (!__percpu_down_write_trylock(sem))
270 		percpu_rwsem_wait(sem, /* .reader = */ false);
271 
272 	/* smp_mb() implied by __percpu_down_write_trylock() on success -- D matches A */
273 
274 	/*
275 	 * If they don't see our store of sem->block, then we are guaranteed to
276 	 * see their sem->read_count increment, and therefore will wait for
277 	 * them.
278 	 */
279 
280 	/* Wait for all active readers to complete. */
281 	rcuwait_wait_event(&sem->writer, readers_active_check(sem), TASK_UNINTERRUPTIBLE);
282 	trace_contention_end(sem, 0);
283 	trace_android_vh_record_pcpu_rwsem_starttime(current, jiffies);
284 }
285 EXPORT_SYMBOL_GPL(percpu_down_write);
286 
percpu_up_write(struct percpu_rw_semaphore * sem)287 void percpu_up_write(struct percpu_rw_semaphore *sem)
288 {
289 	rwsem_release(&sem->dep_map, _RET_IP_);
290 
291 	/*
292 	 * Signal the writer is done, no fast path yet.
293 	 *
294 	 * One reason that we cannot just immediately flip to readers_fast is
295 	 * that new readers might fail to see the results of this writer's
296 	 * critical section.
297 	 *
298 	 * Therefore we force it through the slow path which guarantees an
299 	 * acquire and thereby guarantees the critical section's consistency.
300 	 */
301 	atomic_set_release(&sem->block, 0);
302 
303 	/*
304 	 * Prod any pending reader/writer to make progress.
305 	 */
306 	__wake_up(&sem->waiters, TASK_NORMAL, 1, sem);
307 
308 	/*
309 	 * Once this completes (at least one RCU-sched grace period hence) the
310 	 * reader fast path will be available again. Safe to use outside the
311 	 * exclusive write lock because its counting.
312 	 */
313 	rcu_sync_exit(&sem->rss);
314 	trace_android_vh_record_pcpu_rwsem_time_early(0, sem);
315 	trace_android_vh_record_pcpu_rwsem_starttime(current, 0);
316 }
317 EXPORT_SYMBOL_GPL(percpu_up_write);
318