1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_STATIC_CALL_H
3 #define _LINUX_STATIC_CALL_H
4
5 /*
6 * Static call support
7 *
8 * Static calls use code patching to hard-code function pointers into direct
9 * branch instructions. They give the flexibility of function pointers, but
10 * with improved performance. This is especially important for cases where
11 * retpolines would otherwise be used, as retpolines can significantly impact
12 * performance.
13 *
14 *
15 * API overview:
16 *
17 * DECLARE_STATIC_CALL(name, func);
18 * DEFINE_STATIC_CALL(name, func);
19 * DEFINE_STATIC_CALL_NULL(name, typename);
20 * static_call(name)(args...);
21 * static_call_cond(name)(args...);
22 * static_call_update(name, func);
23 *
24 * Usage example:
25 *
26 * # Start with the following functions (with identical prototypes):
27 * int func_a(int arg1, int arg2);
28 * int func_b(int arg1, int arg2);
29 *
30 * # Define a 'my_name' reference, associated with func_a() by default
31 * DEFINE_STATIC_CALL(my_name, func_a);
32 *
33 * # Call func_a()
34 * static_call(my_name)(arg1, arg2);
35 *
36 * # Update 'my_name' to point to func_b()
37 * static_call_update(my_name, &func_b);
38 *
39 * # Call func_b()
40 * static_call(my_name)(arg1, arg2);
41 *
42 *
43 * Implementation details:
44 *
45 * This requires some arch-specific code (CONFIG_HAVE_STATIC_CALL).
46 * Otherwise basic indirect calls are used (with function pointers).
47 *
48 * Each static_call() site calls into a trampoline associated with the name.
49 * The trampoline has a direct branch to the default function. Updates to a
50 * name will modify the trampoline's branch destination.
51 *
52 * If the arch has CONFIG_HAVE_STATIC_CALL_INLINE, then the call sites
53 * themselves will be patched at runtime to call the functions directly,
54 * rather than calling through the trampoline. This requires objtool or a
55 * compiler plugin to detect all the static_call() sites and annotate them
56 * in the .static_call_sites section.
57 *
58 *
59 * Notes on NULL function pointers:
60 *
61 * Static_call()s support NULL functions, with many of the caveats that
62 * regular function pointers have.
63 *
64 * Clearly calling a NULL function pointer is 'BAD', so too for
65 * static_call()s (although when HAVE_STATIC_CALL it might not be immediately
66 * fatal). A NULL static_call can be the result of:
67 *
68 * DECLARE_STATIC_CALL_NULL(my_static_call, void (*)(int));
69 *
70 * which is equivalent to declaring a NULL function pointer with just a
71 * typename:
72 *
73 * void (*my_func_ptr)(int arg1) = NULL;
74 *
75 * or using static_call_update() with a NULL function. In both cases the
76 * HAVE_STATIC_CALL implementation will patch the trampoline with a RET
77 * instruction, instead of an immediate tail-call JMP. HAVE_STATIC_CALL_INLINE
78 * architectures can patch the trampoline call to a NOP.
79 *
80 * In all cases, any argument evaluation is unconditional. Unlike a regular
81 * conditional function pointer call:
82 *
83 * if (my_func_ptr)
84 * my_func_ptr(arg1)
85 *
86 * where the argument evaludation also depends on the pointer value.
87 *
88 * When calling a static_call that can be NULL, use:
89 *
90 * static_call_cond(name)(arg1);
91 *
92 * which will include the required value tests to avoid NULL-pointer
93 * dereferences.
94 */
95
96 #include <linux/types.h>
97 #include <linux/cpu.h>
98 #include <linux/static_call_types.h>
99
100 #ifdef CONFIG_HAVE_STATIC_CALL
101 #include <asm/static_call.h>
102
103 /*
104 * Either @site or @tramp can be NULL.
105 */
106 extern void arch_static_call_transform(void *site, void *tramp, void *func, bool tail);
107
108 #define STATIC_CALL_TRAMP_ADDR(name) &STATIC_CALL_TRAMP(name)
109
110 #else
111 #define STATIC_CALL_TRAMP_ADDR(name) NULL
112 #endif
113
114 #define static_call_update(name, func) \
115 ({ \
116 BUILD_BUG_ON(!__same_type(*(func), STATIC_CALL_TRAMP(name))); \
117 __static_call_update(&STATIC_CALL_KEY(name), \
118 STATIC_CALL_TRAMP_ADDR(name), func); \
119 })
120
121 #ifdef CONFIG_HAVE_STATIC_CALL_INLINE
122
123 extern int __init static_call_init(void);
124
125 struct static_call_mod {
126 struct static_call_mod *next;
127 struct module *mod; /* for vmlinux, mod == NULL */
128 struct static_call_site *sites;
129 };
130
131 struct static_call_key {
132 void *func;
133 union {
134 /* bit 0: 0 = mods, 1 = sites */
135 unsigned long type;
136 struct static_call_mod *mods;
137 struct static_call_site *sites;
138 };
139 };
140
141 /* For finding the key associated with a trampoline */
142 struct static_call_tramp_key {
143 s32 tramp;
144 s32 key;
145 };
146
147 extern void __static_call_update(struct static_call_key *key, void *tramp, void *func);
148 extern int static_call_mod_init(struct module *mod);
149 extern int static_call_text_reserved(void *start, void *end);
150
151 #define DEFINE_STATIC_CALL(name, _func) \
152 DECLARE_STATIC_CALL(name, _func); \
153 struct static_call_key STATIC_CALL_KEY(name) = { \
154 .func = _func, \
155 .type = 1, \
156 }; \
157 ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func)
158
159 #define DEFINE_STATIC_CALL_NULL(name, _func) \
160 DECLARE_STATIC_CALL(name, _func); \
161 struct static_call_key STATIC_CALL_KEY(name) = { \
162 .func = NULL, \
163 .type = 1, \
164 }; \
165 ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
166
167 #define static_call_cond(name) (void)__static_call(name)
168
169 #define EXPORT_STATIC_CALL(name) \
170 EXPORT_SYMBOL(STATIC_CALL_KEY(name)); \
171 EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
172 #define EXPORT_STATIC_CALL_GPL(name) \
173 EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name)); \
174 EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
175
176 /* Leave the key unexported, so modules can't change static call targets: */
177 #define EXPORT_STATIC_CALL_TRAMP(name) \
178 EXPORT_SYMBOL(STATIC_CALL_TRAMP(name)); \
179 ARCH_ADD_TRAMP_KEY(name)
180 #define EXPORT_STATIC_CALL_TRAMP_GPL(name) \
181 EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name)); \
182 ARCH_ADD_TRAMP_KEY(name)
183
184 #elif defined(CONFIG_HAVE_STATIC_CALL)
185
static_call_init(void)186 static inline int static_call_init(void) { return 0; }
187
188 struct static_call_key {
189 void *func;
190 };
191
192 #define DEFINE_STATIC_CALL(name, _func) \
193 DECLARE_STATIC_CALL(name, _func); \
194 struct static_call_key STATIC_CALL_KEY(name) = { \
195 .func = _func, \
196 }; \
197 ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func)
198
199 #define DEFINE_STATIC_CALL_NULL(name, _func) \
200 DECLARE_STATIC_CALL(name, _func); \
201 struct static_call_key STATIC_CALL_KEY(name) = { \
202 .func = NULL, \
203 }; \
204 ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
205
206 #define static_call_cond(name) (void)__static_call(name)
207
208 static inline
__static_call_update(struct static_call_key * key,void * tramp,void * func)209 void __static_call_update(struct static_call_key *key, void *tramp, void *func)
210 {
211 cpus_read_lock();
212 WRITE_ONCE(key->func, func);
213 arch_static_call_transform(NULL, tramp, func, false);
214 cpus_read_unlock();
215 }
216
static_call_text_reserved(void * start,void * end)217 static inline int static_call_text_reserved(void *start, void *end)
218 {
219 return 0;
220 }
221
222 #define EXPORT_STATIC_CALL(name) \
223 EXPORT_SYMBOL(STATIC_CALL_KEY(name)); \
224 EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
225 #define EXPORT_STATIC_CALL_GPL(name) \
226 EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name)); \
227 EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
228
229 /* Leave the key unexported, so modules can't change static call targets: */
230 #define EXPORT_STATIC_CALL_TRAMP(name) \
231 EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
232 #define EXPORT_STATIC_CALL_TRAMP_GPL(name) \
233 EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
234
235 #else /* Generic implementation */
236
static_call_init(void)237 static inline int static_call_init(void) { return 0; }
238
239 struct static_call_key {
240 void *func;
241 };
242
243 #define DEFINE_STATIC_CALL(name, _func) \
244 DECLARE_STATIC_CALL(name, _func); \
245 struct static_call_key STATIC_CALL_KEY(name) = { \
246 .func = _func, \
247 }
248
249 #define DEFINE_STATIC_CALL_NULL(name, _func) \
250 DECLARE_STATIC_CALL(name, _func); \
251 struct static_call_key STATIC_CALL_KEY(name) = { \
252 .func = NULL, \
253 }
254
__static_call_nop(void)255 static inline void __static_call_nop(void) { }
256
257 /*
258 * This horrific hack takes care of two things:
259 *
260 * - it ensures the compiler will only load the function pointer ONCE,
261 * which avoids a reload race.
262 *
263 * - it ensures the argument evaluation is unconditional, similar
264 * to the HAVE_STATIC_CALL variant.
265 *
266 * Sadly current GCC/Clang (10 for both) do not optimize this properly
267 * and will emit an indirect call for the NULL case :-(
268 */
269 #define __static_call_cond(name) \
270 ({ \
271 void *func = READ_ONCE(STATIC_CALL_KEY(name).func); \
272 if (!func) \
273 func = &__static_call_nop; \
274 (typeof(STATIC_CALL_TRAMP(name))*)func; \
275 })
276
277 #define static_call_cond(name) (void)__static_call_cond(name)
278
279 static inline
__static_call_update(struct static_call_key * key,void * tramp,void * func)280 void __static_call_update(struct static_call_key *key, void *tramp, void *func)
281 {
282 WRITE_ONCE(key->func, func);
283 }
284
static_call_text_reserved(void * start,void * end)285 static inline int static_call_text_reserved(void *start, void *end)
286 {
287 return 0;
288 }
289
290 #define EXPORT_STATIC_CALL(name) EXPORT_SYMBOL(STATIC_CALL_KEY(name))
291 #define EXPORT_STATIC_CALL_GPL(name) EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name))
292
293 #endif /* CONFIG_HAVE_STATIC_CALL */
294
295 #endif /* _LINUX_STATIC_CALL_H */
296