1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * patch.c - livepatch patching functions
4 *
5 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
6 * Copyright (C) 2014 SUSE
7 * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
8 */
9
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12 #include <linux/livepatch.h>
13 #include <linux/list.h>
14 #include <linux/ftrace.h>
15 #include <linux/rculist.h>
16 #include <linux/slab.h>
17 #include <linux/bug.h>
18 #include <linux/printk.h>
19 #include "core.h"
20 #include "patch.h"
21 #include "transition.h"
22
23 static LIST_HEAD(klp_ops);
24
klp_find_ops(void * old_func)25 struct klp_ops *klp_find_ops(void *old_func)
26 {
27 struct klp_ops *ops;
28 struct klp_func *func;
29
30 list_for_each_entry(ops, &klp_ops, node) {
31 func = list_first_entry(&ops->func_stack, struct klp_func,
32 stack_node);
33 if (func->old_func == old_func)
34 return ops;
35 }
36
37 return NULL;
38 }
39
klp_ftrace_handler(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * fops,struct ftrace_regs * fregs)40 static void notrace klp_ftrace_handler(unsigned long ip,
41 unsigned long parent_ip,
42 struct ftrace_ops *fops,
43 struct ftrace_regs *fregs)
44 {
45 struct klp_ops *ops;
46 struct klp_func *func;
47 int patch_state;
48 int bit;
49
50 ops = container_of(fops, struct klp_ops, fops);
51
52 bit = ftrace_test_recursion_trylock(ip, parent_ip);
53 if (WARN_ON_ONCE(bit < 0))
54 return;
55 /*
56 * A variant of synchronize_rcu() is used to allow patching functions
57 * where RCU is not watching, see klp_synchronize_transition().
58 */
59 preempt_disable_notrace();
60
61 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
62 stack_node);
63
64 /*
65 * func should never be NULL because preemption should be disabled here
66 * and unregister_ftrace_function() does the equivalent of a
67 * synchronize_rcu() before the func_stack removal.
68 */
69 if (WARN_ON_ONCE(!func))
70 goto unlock;
71
72 /*
73 * In the enable path, enforce the order of the ops->func_stack and
74 * func->transition reads. The corresponding write barrier is in
75 * __klp_enable_patch().
76 *
77 * (Note that this barrier technically isn't needed in the disable
78 * path. In the rare case where klp_update_patch_state() runs before
79 * this handler, its TIF_PATCH_PENDING read and this func->transition
80 * read need to be ordered. But klp_update_patch_state() already
81 * enforces that.)
82 */
83 smp_rmb();
84
85 if (unlikely(func->transition)) {
86
87 /*
88 * Enforce the order of the func->transition and
89 * current->patch_state reads. Otherwise we could read an
90 * out-of-date task state and pick the wrong function. The
91 * corresponding write barrier is in klp_init_transition().
92 */
93 smp_rmb();
94
95 patch_state = current->patch_state;
96
97 WARN_ON_ONCE(patch_state == KLP_UNDEFINED);
98
99 if (patch_state == KLP_UNPATCHED) {
100 /*
101 * Use the previously patched version of the function.
102 * If no previous patches exist, continue with the
103 * original function.
104 */
105 func = list_entry_rcu(func->stack_node.next,
106 struct klp_func, stack_node);
107
108 if (&func->stack_node == &ops->func_stack)
109 goto unlock;
110 }
111 }
112
113 /*
114 * NOPs are used to replace existing patches with original code.
115 * Do nothing! Setting pc would cause an infinite loop.
116 */
117 if (func->nop)
118 goto unlock;
119
120 klp_arch_set_pc(fregs, (unsigned long)func->new_func);
121
122 unlock:
123 preempt_enable_notrace();
124 ftrace_test_recursion_unlock(bit);
125 }
126
127 /*
128 * Convert a function address into the appropriate ftrace location.
129 *
130 * Usually this is just the address of the function, but on some architectures
131 * it's more complicated so allow them to provide a custom behaviour.
132 */
133 #ifndef klp_get_ftrace_location
klp_get_ftrace_location(unsigned long faddr)134 static unsigned long klp_get_ftrace_location(unsigned long faddr)
135 {
136 return faddr;
137 }
138 #endif
139
klp_unpatch_func(struct klp_func * func)140 static void klp_unpatch_func(struct klp_func *func)
141 {
142 struct klp_ops *ops;
143
144 if (WARN_ON(!func->patched))
145 return;
146 if (WARN_ON(!func->old_func))
147 return;
148
149 ops = klp_find_ops(func->old_func);
150 if (WARN_ON(!ops))
151 return;
152
153 if (list_is_singular(&ops->func_stack)) {
154 unsigned long ftrace_loc;
155
156 ftrace_loc =
157 klp_get_ftrace_location((unsigned long)func->old_func);
158 if (WARN_ON(!ftrace_loc))
159 return;
160
161 WARN_ON(unregister_ftrace_function(&ops->fops));
162 WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
163
164 list_del_rcu(&func->stack_node);
165 list_del(&ops->node);
166 kfree(ops);
167 } else {
168 list_del_rcu(&func->stack_node);
169 }
170
171 func->patched = false;
172 }
173
klp_patch_func(struct klp_func * func)174 static int klp_patch_func(struct klp_func *func)
175 {
176 struct klp_ops *ops;
177 int ret;
178
179 if (WARN_ON(!func->old_func))
180 return -EINVAL;
181
182 if (WARN_ON(func->patched))
183 return -EINVAL;
184
185 ops = klp_find_ops(func->old_func);
186 if (!ops) {
187 unsigned long ftrace_loc;
188
189 ftrace_loc =
190 klp_get_ftrace_location((unsigned long)func->old_func);
191 if (!ftrace_loc) {
192 pr_err("failed to find location for function '%s'\n",
193 func->old_name);
194 return -EINVAL;
195 }
196
197 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
198 if (!ops)
199 return -ENOMEM;
200
201 ops->fops.func = klp_ftrace_handler;
202 ops->fops.flags = FTRACE_OPS_FL_DYNAMIC |
203 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
204 FTRACE_OPS_FL_SAVE_REGS |
205 #endif
206 FTRACE_OPS_FL_IPMODIFY |
207 FTRACE_OPS_FL_PERMANENT;
208
209 list_add(&ops->node, &klp_ops);
210
211 INIT_LIST_HEAD(&ops->func_stack);
212 list_add_rcu(&func->stack_node, &ops->func_stack);
213
214 ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
215 if (ret) {
216 pr_err("failed to set ftrace filter for function '%s' (%d)\n",
217 func->old_name, ret);
218 goto err;
219 }
220
221 ret = register_ftrace_function(&ops->fops);
222 if (ret) {
223 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
224 func->old_name, ret);
225 ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
226 goto err;
227 }
228
229
230 } else {
231 list_add_rcu(&func->stack_node, &ops->func_stack);
232 }
233
234 func->patched = true;
235
236 return 0;
237
238 err:
239 list_del_rcu(&func->stack_node);
240 list_del(&ops->node);
241 kfree(ops);
242 return ret;
243 }
244
__klp_unpatch_object(struct klp_object * obj,bool nops_only)245 static void __klp_unpatch_object(struct klp_object *obj, bool nops_only)
246 {
247 struct klp_func *func;
248
249 klp_for_each_func(obj, func) {
250 if (nops_only && !func->nop)
251 continue;
252
253 if (func->patched)
254 klp_unpatch_func(func);
255 }
256
257 if (obj->dynamic || !nops_only)
258 obj->patched = false;
259 }
260
261
klp_unpatch_object(struct klp_object * obj)262 void klp_unpatch_object(struct klp_object *obj)
263 {
264 __klp_unpatch_object(obj, false);
265 }
266
klp_patch_object(struct klp_object * obj)267 int klp_patch_object(struct klp_object *obj)
268 {
269 struct klp_func *func;
270 int ret;
271
272 if (WARN_ON(obj->patched))
273 return -EINVAL;
274
275 klp_for_each_func(obj, func) {
276 ret = klp_patch_func(func);
277 if (ret) {
278 klp_unpatch_object(obj);
279 return ret;
280 }
281 }
282 obj->patched = true;
283
284 return 0;
285 }
286
__klp_unpatch_objects(struct klp_patch * patch,bool nops_only)287 static void __klp_unpatch_objects(struct klp_patch *patch, bool nops_only)
288 {
289 struct klp_object *obj;
290
291 klp_for_each_object(patch, obj)
292 if (obj->patched)
293 __klp_unpatch_object(obj, nops_only);
294 }
295
klp_unpatch_objects(struct klp_patch * patch)296 void klp_unpatch_objects(struct klp_patch *patch)
297 {
298 __klp_unpatch_objects(patch, false);
299 }
300
klp_unpatch_objects_dynamic(struct klp_patch * patch)301 void klp_unpatch_objects_dynamic(struct klp_patch *patch)
302 {
303 __klp_unpatch_objects(patch, true);
304 }
305