• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/slab.h>
3 #include <linux/spinlock.h>
4 #include <linux/once.h>
5 #include <linux/random.h>
6 
7 struct once_work {
8 	struct work_struct work;
9 	struct static_key_true *key;
10 };
11 
once_deferred(struct work_struct * w)12 static void once_deferred(struct work_struct *w)
13 {
14 	struct once_work *work;
15 
16 	work = container_of(w, struct once_work, work);
17 	BUG_ON(!static_key_enabled(work->key));
18 	static_branch_disable(work->key);
19 	kfree(work);
20 }
21 
once_disable_jump(struct static_key_true * key)22 static void once_disable_jump(struct static_key_true *key)
23 {
24 	struct once_work *w;
25 
26 	w = kmalloc(sizeof(*w), GFP_ATOMIC);
27 	if (!w)
28 		return;
29 
30 	INIT_WORK(&w->work, once_deferred);
31 	w->key = key;
32 	schedule_work(&w->work);
33 }
34 
35 static DEFINE_SPINLOCK(once_lock);
36 
__do_once_start(bool * done,unsigned long * flags)37 bool __do_once_start(bool *done, unsigned long *flags)
38 	__acquires(once_lock)
39 {
40 	spin_lock_irqsave(&once_lock, *flags);
41 	if (*done) {
42 		spin_unlock_irqrestore(&once_lock, *flags);
43 		/* Keep sparse happy by restoring an even lock count on
44 		 * this lock. In case we return here, we don't call into
45 		 * __do_once_done but return early in the DO_ONCE() macro.
46 		 */
47 		__acquire(once_lock);
48 		return false;
49 	}
50 
51 	return true;
52 }
53 EXPORT_SYMBOL(__do_once_start);
54 
__do_once_done(bool * done,struct static_key_true * once_key,unsigned long * flags)55 void __do_once_done(bool *done, struct static_key_true *once_key,
56 		    unsigned long *flags)
57 	__releases(once_lock)
58 {
59 	*done = true;
60 	spin_unlock_irqrestore(&once_lock, *flags);
61 	once_disable_jump(once_key);
62 }
63 EXPORT_SYMBOL(__do_once_done);
64 
65 static DEFINE_MUTEX(once_mutex);
66 
__do_once_slow_start(bool * done)67 bool __do_once_slow_start(bool *done)
68 	__acquires(once_mutex)
69 {
70 	mutex_lock(&once_mutex);
71 	if (*done) {
72 		mutex_unlock(&once_mutex);
73 		/* Keep sparse happy by restoring an even lock count on
74 		 * this mutex. In case we return here, we don't call into
75 		 * __do_once_done but return early in the DO_ONCE_SLOW() macro.
76 		 */
77 		__acquire(once_mutex);
78 		return false;
79 	}
80 
81 	return true;
82 }
83 EXPORT_SYMBOL(__do_once_slow_start);
84 
__do_once_slow_done(bool * done,struct static_key_true * once_key,struct module * mod)85 void __do_once_slow_done(bool *done, struct static_key_true *once_key,
86 			 struct module *mod)
87 	__releases(once_mutex)
88 {
89 	*done = true;
90 	mutex_unlock(&once_mutex);
91 	once_disable_jump(once_key);
92 }
93 EXPORT_SYMBOL(__do_once_slow_done);
94