1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * patch.c - livepatch patching functions
4 *
5 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
6 * Copyright (C) 2014 SUSE
7 * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
8 */
9
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12 #include <linux/livepatch.h>
13 #include <linux/list.h>
14 #include <linux/ftrace.h>
15 #include <linux/rculist.h>
16 #include <linux/slab.h>
17 #include <linux/bug.h>
18 #include <linux/printk.h>
19 #include "core.h"
20 #include "patch.h"
21 #include "transition.h"
22
23 static LIST_HEAD(klp_ops);
24
klp_find_ops(void * old_func)25 struct klp_ops *klp_find_ops(void *old_func)
26 {
27 struct klp_ops *ops;
28 struct klp_func *func;
29
30 list_for_each_entry(ops, &klp_ops, node) {
31 func = list_first_entry(&ops->func_stack, struct klp_func,
32 stack_node);
33 if (func->old_func == old_func)
34 return ops;
35 }
36
37 return NULL;
38 }
39
klp_ftrace_handler(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * fops,struct pt_regs * regs)40 static void notrace klp_ftrace_handler(unsigned long ip,
41 unsigned long parent_ip,
42 struct ftrace_ops *fops,
43 struct pt_regs *regs)
44 {
45 struct klp_ops *ops;
46 struct klp_func *func;
47 int patch_state;
48
49 ops = container_of(fops, struct klp_ops, fops);
50
51 /*
52 * A variant of synchronize_rcu() is used to allow patching functions
53 * where RCU is not watching, see klp_synchronize_transition().
54 */
55 preempt_disable_notrace();
56
57 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
58 stack_node);
59
60 /*
61 * func should never be NULL because preemption should be disabled here
62 * and unregister_ftrace_function() does the equivalent of a
63 * synchronize_rcu() before the func_stack removal.
64 */
65 if (WARN_ON_ONCE(!func))
66 goto unlock;
67
68 /*
69 * In the enable path, enforce the order of the ops->func_stack and
70 * func->transition reads. The corresponding write barrier is in
71 * __klp_enable_patch().
72 *
73 * (Note that this barrier technically isn't needed in the disable
74 * path. In the rare case where klp_update_patch_state() runs before
75 * this handler, its TIF_PATCH_PENDING read and this func->transition
76 * read need to be ordered. But klp_update_patch_state() already
77 * enforces that.)
78 */
79 smp_rmb();
80
81 if (unlikely(func->transition)) {
82
83 /*
84 * Enforce the order of the func->transition and
85 * current->patch_state reads. Otherwise we could read an
86 * out-of-date task state and pick the wrong function. The
87 * corresponding write barrier is in klp_init_transition().
88 */
89 smp_rmb();
90
91 patch_state = current->patch_state;
92
93 WARN_ON_ONCE(patch_state == KLP_UNDEFINED);
94
95 if (patch_state == KLP_UNPATCHED) {
96 /*
97 * Use the previously patched version of the function.
98 * If no previous patches exist, continue with the
99 * original function.
100 */
101 func = list_entry_rcu(func->stack_node.next,
102 struct klp_func, stack_node);
103
104 if (&func->stack_node == &ops->func_stack)
105 goto unlock;
106 }
107 }
108
109 /*
110 * NOPs are used to replace existing patches with original code.
111 * Do nothing! Setting pc would cause an infinite loop.
112 */
113 if (func->nop)
114 goto unlock;
115
116 klp_arch_set_pc(regs, (unsigned long)func->new_func);
117
118 unlock:
119 preempt_enable_notrace();
120 }
121
122 /*
123 * Convert a function address into the appropriate ftrace location.
124 *
125 * Usually this is just the address of the function, but on some architectures
126 * it's more complicated so allow them to provide a custom behaviour.
127 */
128 #ifndef klp_get_ftrace_location
klp_get_ftrace_location(unsigned long faddr)129 static unsigned long klp_get_ftrace_location(unsigned long faddr)
130 {
131 return faddr;
132 }
133 #endif
134
klp_unpatch_func(struct klp_func * func)135 static void klp_unpatch_func(struct klp_func *func)
136 {
137 struct klp_ops *ops;
138
139 if (WARN_ON(!func->patched))
140 return;
141 if (WARN_ON(!func->old_func))
142 return;
143
144 ops = klp_find_ops(func->old_func);
145 if (WARN_ON(!ops))
146 return;
147
148 if (list_is_singular(&ops->func_stack)) {
149 unsigned long ftrace_loc;
150
151 ftrace_loc =
152 klp_get_ftrace_location((unsigned long)func->old_func);
153 if (WARN_ON(!ftrace_loc))
154 return;
155
156 WARN_ON(unregister_ftrace_function(&ops->fops));
157 WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
158
159 list_del_rcu(&func->stack_node);
160 list_del(&ops->node);
161 kfree(ops);
162 } else {
163 list_del_rcu(&func->stack_node);
164 }
165
166 func->patched = false;
167 }
168
klp_patch_func(struct klp_func * func)169 static int klp_patch_func(struct klp_func *func)
170 {
171 struct klp_ops *ops;
172 int ret;
173
174 if (WARN_ON(!func->old_func))
175 return -EINVAL;
176
177 if (WARN_ON(func->patched))
178 return -EINVAL;
179
180 ops = klp_find_ops(func->old_func);
181 if (!ops) {
182 unsigned long ftrace_loc;
183
184 ftrace_loc =
185 klp_get_ftrace_location((unsigned long)func->old_func);
186 if (!ftrace_loc) {
187 pr_err("failed to find location for function '%s'\n",
188 func->old_name);
189 return -EINVAL;
190 }
191
192 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
193 if (!ops)
194 return -ENOMEM;
195
196 ops->fops.func = klp_ftrace_handler;
197 ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
198 FTRACE_OPS_FL_DYNAMIC |
199 FTRACE_OPS_FL_IPMODIFY |
200 FTRACE_OPS_FL_PERMANENT;
201
202 list_add(&ops->node, &klp_ops);
203
204 INIT_LIST_HEAD(&ops->func_stack);
205 list_add_rcu(&func->stack_node, &ops->func_stack);
206
207 ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
208 if (ret) {
209 pr_err("failed to set ftrace filter for function '%s' (%d)\n",
210 func->old_name, ret);
211 goto err;
212 }
213
214 ret = register_ftrace_function(&ops->fops);
215 if (ret) {
216 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
217 func->old_name, ret);
218 ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
219 goto err;
220 }
221
222
223 } else {
224 list_add_rcu(&func->stack_node, &ops->func_stack);
225 }
226
227 func->patched = true;
228
229 return 0;
230
231 err:
232 list_del_rcu(&func->stack_node);
233 list_del(&ops->node);
234 kfree(ops);
235 return ret;
236 }
237
__klp_unpatch_object(struct klp_object * obj,bool nops_only)238 static void __klp_unpatch_object(struct klp_object *obj, bool nops_only)
239 {
240 struct klp_func *func;
241
242 klp_for_each_func(obj, func) {
243 if (nops_only && !func->nop)
244 continue;
245
246 if (func->patched)
247 klp_unpatch_func(func);
248 }
249
250 if (obj->dynamic || !nops_only)
251 obj->patched = false;
252 }
253
254
klp_unpatch_object(struct klp_object * obj)255 void klp_unpatch_object(struct klp_object *obj)
256 {
257 __klp_unpatch_object(obj, false);
258 }
259
klp_patch_object(struct klp_object * obj)260 int klp_patch_object(struct klp_object *obj)
261 {
262 struct klp_func *func;
263 int ret;
264
265 if (WARN_ON(obj->patched))
266 return -EINVAL;
267
268 klp_for_each_func(obj, func) {
269 ret = klp_patch_func(func);
270 if (ret) {
271 klp_unpatch_object(obj);
272 return ret;
273 }
274 }
275 obj->patched = true;
276
277 return 0;
278 }
279
__klp_unpatch_objects(struct klp_patch * patch,bool nops_only)280 static void __klp_unpatch_objects(struct klp_patch *patch, bool nops_only)
281 {
282 struct klp_object *obj;
283
284 klp_for_each_object(patch, obj)
285 if (obj->patched)
286 __klp_unpatch_object(obj, nops_only);
287 }
288
klp_unpatch_objects(struct klp_patch * patch)289 void klp_unpatch_objects(struct klp_patch *patch)
290 {
291 __klp_unpatch_objects(patch, false);
292 }
293
klp_unpatch_objects_dynamic(struct klp_patch * patch)294 void klp_unpatch_objects_dynamic(struct klp_patch *patch)
295 {
296 __klp_unpatch_objects(patch, true);
297 }
298