• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/atomic.h>
3 #include <linux/percpu.h>
4 #include <linux/wait.h>
5 #include <linux/lockdep.h>
6 #include <linux/percpu-rwsem.h>
7 #include <linux/rcupdate.h>
8 #include <linux/sched.h>
9 #include <linux/sched/task.h>
10 #include <linux/slab.h>
11 #include <linux/sched/debug.h>
12 #include <linux/errno.h>
13 
14 #include <trace/hooks/dtask.h>
15 
16 /*
17  * trace_android_vh_record_pcpu_rwsem_starttime  is called in
18  * include/linux/percpu-rwsem.h by including include/hooks/dtask.h, which
19  * will result to build-err. So we create
20  * func:_trace_android_vh_record_pcpu_rwsem_starttime for percpu-rwsem.h to call.
21  */
_trace_android_vh_record_pcpu_rwsem_starttime(struct task_struct * tsk,unsigned long settime)22 void _trace_android_vh_record_pcpu_rwsem_starttime(struct task_struct *tsk,
23 		unsigned long settime)
24 {
25 	trace_android_vh_record_pcpu_rwsem_starttime(tsk, settime);
26 }
27 EXPORT_SYMBOL_GPL(_trace_android_vh_record_pcpu_rwsem_starttime);
28 
__percpu_init_rwsem(struct percpu_rw_semaphore * sem,const char * name,struct lock_class_key * key)29 int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
30 			const char *name, struct lock_class_key *key)
31 {
32 	sem->read_count = alloc_percpu(int);
33 	if (unlikely(!sem->read_count))
34 		return -ENOMEM;
35 
36 	rcu_sync_init(&sem->rss);
37 	rcuwait_init(&sem->writer);
38 	init_waitqueue_head(&sem->waiters);
39 	atomic_set(&sem->block, 0);
40 #ifdef CONFIG_DEBUG_LOCK_ALLOC
41 	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
42 	lockdep_init_map(&sem->dep_map, name, key, 0);
43 #endif
44 	return 0;
45 }
46 EXPORT_SYMBOL_GPL(__percpu_init_rwsem);
47 
percpu_free_rwsem(struct percpu_rw_semaphore * sem)48 void percpu_free_rwsem(struct percpu_rw_semaphore *sem)
49 {
50 	/*
51 	 * XXX: temporary kludge. The error path in alloc_super()
52 	 * assumes that percpu_free_rwsem() is safe after kzalloc().
53 	 */
54 	if (!sem->read_count)
55 		return;
56 
57 	rcu_sync_dtor(&sem->rss);
58 	free_percpu(sem->read_count);
59 	sem->read_count = NULL; /* catch use after free bugs */
60 }
61 EXPORT_SYMBOL_GPL(percpu_free_rwsem);
62 
__percpu_down_read_trylock(struct percpu_rw_semaphore * sem)63 static bool __percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
64 {
65 	this_cpu_inc(*sem->read_count);
66 
67 	/*
68 	 * Due to having preemption disabled the decrement happens on
69 	 * the same CPU as the increment, avoiding the
70 	 * increment-on-one-CPU-and-decrement-on-another problem.
71 	 *
72 	 * If the reader misses the writer's assignment of sem->block, then the
73 	 * writer is guaranteed to see the reader's increment.
74 	 *
75 	 * Conversely, any readers that increment their sem->read_count after
76 	 * the writer looks are guaranteed to see the sem->block value, which
77 	 * in turn means that they are guaranteed to immediately decrement
78 	 * their sem->read_count, so that it doesn't matter that the writer
79 	 * missed them.
80 	 */
81 
82 	smp_mb(); /* A matches D */
83 
84 	/*
85 	 * If !sem->block the critical section starts here, matched by the
86 	 * release in percpu_up_write().
87 	 */
88 	if (likely(!atomic_read_acquire(&sem->block)))
89 		return true;
90 
91 	this_cpu_dec(*sem->read_count);
92 
93 	/* Prod writer to re-evaluate readers_active_check() */
94 	rcuwait_wake_up(&sem->writer);
95 
96 	return false;
97 }
98 
__percpu_down_write_trylock(struct percpu_rw_semaphore * sem)99 static inline bool __percpu_down_write_trylock(struct percpu_rw_semaphore *sem)
100 {
101 	if (atomic_read(&sem->block))
102 		return false;
103 
104 	return atomic_xchg(&sem->block, 1) == 0;
105 }
106 
__percpu_rwsem_trylock(struct percpu_rw_semaphore * sem,bool reader)107 static bool __percpu_rwsem_trylock(struct percpu_rw_semaphore *sem, bool reader)
108 {
109 	if (reader) {
110 		bool ret;
111 
112 		preempt_disable();
113 		ret = __percpu_down_read_trylock(sem);
114 		preempt_enable();
115 
116 		return ret;
117 	}
118 	return __percpu_down_write_trylock(sem);
119 }
120 
121 /*
122  * The return value of wait_queue_entry::func means:
123  *
124  *  <0 - error, wakeup is terminated and the error is returned
125  *   0 - no wakeup, a next waiter is tried
126  *  >0 - woken, if EXCLUSIVE, counted towards @nr_exclusive.
127  *
128  * We use EXCLUSIVE for both readers and writers to preserve FIFO order,
129  * and play games with the return value to allow waking multiple readers.
130  *
131  * Specifically, we wake readers until we've woken a single writer, or until a
132  * trylock fails.
133  */
percpu_rwsem_wake_function(struct wait_queue_entry * wq_entry,unsigned int mode,int wake_flags,void * key)134 static int percpu_rwsem_wake_function(struct wait_queue_entry *wq_entry,
135 				      unsigned int mode, int wake_flags,
136 				      void *key)
137 {
138 	bool reader = wq_entry->flags & WQ_FLAG_CUSTOM;
139 	struct percpu_rw_semaphore *sem = key;
140 	struct task_struct *p;
141 
142 	/* concurrent against percpu_down_write(), can get stolen */
143 	if (!__percpu_rwsem_trylock(sem, reader))
144 		return 1;
145 
146 	p = get_task_struct(wq_entry->private);
147 	list_del_init(&wq_entry->entry);
148 	smp_store_release(&wq_entry->private, NULL);
149 
150 	wake_up_process(p);
151 	put_task_struct(p);
152 
153 	return !reader; /* wake (readers until) 1 writer */
154 }
155 
percpu_rwsem_wait(struct percpu_rw_semaphore * sem,bool reader)156 static void percpu_rwsem_wait(struct percpu_rw_semaphore *sem, bool reader)
157 {
158 	DEFINE_WAIT_FUNC(wq_entry, percpu_rwsem_wake_function);
159 	bool wait;
160 
161 	spin_lock_irq(&sem->waiters.lock);
162 	/*
163 	 * Serialize against the wakeup in percpu_up_write(), if we fail
164 	 * the trylock, the wakeup must see us on the list.
165 	 */
166 	wait = !__percpu_rwsem_trylock(sem, reader);
167 	if (wait) {
168 		wq_entry.flags |= WQ_FLAG_EXCLUSIVE | reader * WQ_FLAG_CUSTOM;
169 		__add_wait_queue_entry_tail(&sem->waiters, &wq_entry);
170 		trace_android_vh_percpu_rwsem_wq_add(sem, reader);
171 	}
172 	spin_unlock_irq(&sem->waiters.lock);
173 
174 	while (wait) {
175 		set_current_state(TASK_UNINTERRUPTIBLE);
176 		if (!smp_load_acquire(&wq_entry.private))
177 			break;
178 		schedule();
179 	}
180 	__set_current_state(TASK_RUNNING);
181 }
182 
__percpu_down_read(struct percpu_rw_semaphore * sem,bool try)183 bool __sched __percpu_down_read(struct percpu_rw_semaphore *sem, bool try)
184 {
185 	if (__percpu_down_read_trylock(sem))
186 		return true;
187 
188 	if (try)
189 		return false;
190 
191 	preempt_enable();
192 	percpu_rwsem_wait(sem, /* .reader = */ true);
193 	preempt_disable();
194 
195 	return true;
196 }
197 EXPORT_SYMBOL_GPL(__percpu_down_read);
198 
199 #define per_cpu_sum(var)						\
200 ({									\
201 	typeof(var) __sum = 0;						\
202 	int cpu;							\
203 	compiletime_assert_atomic_type(__sum);				\
204 	for_each_possible_cpu(cpu)					\
205 		__sum += per_cpu(var, cpu);				\
206 	__sum;								\
207 })
208 
209 /*
210  * Return true if the modular sum of the sem->read_count per-CPU variable is
211  * zero.  If this sum is zero, then it is stable due to the fact that if any
212  * newly arriving readers increment a given counter, they will immediately
213  * decrement that same counter.
214  *
215  * Assumes sem->block is set.
216  */
readers_active_check(struct percpu_rw_semaphore * sem)217 static bool readers_active_check(struct percpu_rw_semaphore *sem)
218 {
219 	if (per_cpu_sum(*sem->read_count) != 0)
220 		return false;
221 
222 	/*
223 	 * If we observed the decrement; ensure we see the entire critical
224 	 * section.
225 	 */
226 
227 	smp_mb(); /* C matches B */
228 
229 	return true;
230 }
231 
percpu_down_write(struct percpu_rw_semaphore * sem)232 void __sched percpu_down_write(struct percpu_rw_semaphore *sem)
233 {
234 	might_sleep();
235 	rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
236 
237 	/* Notify readers to take the slow path. */
238 	rcu_sync_enter(&sem->rss);
239 
240 	/*
241 	 * Try set sem->block; this provides writer-writer exclusion.
242 	 * Having sem->block set makes new readers block.
243 	 */
244 	if (!__percpu_down_write_trylock(sem))
245 		percpu_rwsem_wait(sem, /* .reader = */ false);
246 
247 	/* smp_mb() implied by __percpu_down_write_trylock() on success -- D matches A */
248 
249 	/*
250 	 * If they don't see our store of sem->block, then we are guaranteed to
251 	 * see their sem->read_count increment, and therefore will wait for
252 	 * them.
253 	 */
254 
255 	/* Wait for all active readers to complete. */
256 	rcuwait_wait_event(&sem->writer, readers_active_check(sem), TASK_UNINTERRUPTIBLE);
257 	trace_android_vh_record_pcpu_rwsem_starttime(current, jiffies);
258 }
259 EXPORT_SYMBOL_GPL(percpu_down_write);
260 
percpu_up_write(struct percpu_rw_semaphore * sem)261 void percpu_up_write(struct percpu_rw_semaphore *sem)
262 {
263 	rwsem_release(&sem->dep_map, _RET_IP_);
264 
265 	/*
266 	 * Signal the writer is done, no fast path yet.
267 	 *
268 	 * One reason that we cannot just immediately flip to readers_fast is
269 	 * that new readers might fail to see the results of this writer's
270 	 * critical section.
271 	 *
272 	 * Therefore we force it through the slow path which guarantees an
273 	 * acquire and thereby guarantees the critical section's consistency.
274 	 */
275 	atomic_set_release(&sem->block, 0);
276 
277 	/*
278 	 * Prod any pending reader/writer to make progress.
279 	 */
280 	__wake_up(&sem->waiters, TASK_NORMAL, 1, sem);
281 
282 	/*
283 	 * Once this completes (at least one RCU-sched grace period hence) the
284 	 * reader fast path will be available again. Safe to use outside the
285 	 * exclusive write lock because its counting.
286 	 */
287 	rcu_sync_exit(&sem->rss);
288 	trace_android_vh_record_pcpu_rwsem_starttime(current, 0);
289 }
290 EXPORT_SYMBOL_GPL(percpu_up_write);
291 
292 static LIST_HEAD(destroy_list);
293 static DEFINE_SPINLOCK(destroy_list_lock);
294 
destroy_list_workfn(struct work_struct * work)295 static void destroy_list_workfn(struct work_struct *work)
296 {
297 	struct percpu_rw_semaphore *sem, *sem2;
298 	LIST_HEAD(to_destroy);
299 
300 	spin_lock(&destroy_list_lock);
301 	list_splice_init(&destroy_list, &to_destroy);
302 	spin_unlock(&destroy_list_lock);
303 
304 	if (list_empty(&to_destroy))
305 		return;
306 
307 	list_for_each_entry_safe(sem, sem2, &to_destroy, destroy_list_entry) {
308 		percpu_free_rwsem(sem);
309 		kfree(sem);
310 	}
311 }
312 
313 static DECLARE_WORK(destroy_list_work, destroy_list_workfn);
314 
percpu_rwsem_async_destroy(struct percpu_rw_semaphore * sem)315 void percpu_rwsem_async_destroy(struct percpu_rw_semaphore *sem)
316 {
317 	spin_lock(&destroy_list_lock);
318 	list_add_tail(&sem->destroy_list_entry, &destroy_list);
319 	spin_unlock(&destroy_list_lock);
320 	schedule_work(&destroy_list_work);
321 }
322