1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Kernel Probes (KProbes)
4 *
5 * arch/loongarch/kernel/kprobes.c
6 *
7 * Copyright (C) 2020 Loongson Technology Corporation Limited
8 */
9
10 #include <linux/kprobes.h>
11 #include <linux/preempt.h>
12 #include <linux/uaccess.h>
13 #include <linux/kdebug.h>
14 #include <linux/slab.h>
15
16 #include <asm/ptrace.h>
17 #include <asm/branch.h>
18 #include <asm/break.h>
19
20 static const union loongarch_instruction breakpoint_insn = {
21 .reg0i15_format = {
22 .opcode = break_op,
23 .simmediate = BRK_KPROBE_BP,
24 }
25 };
26
27 static const union loongarch_instruction breakpoint2_insn = {
28 .reg0i15_format = {
29 .opcode = break_op,
30 .simmediate = BRK_KPROBE_SSTEPBP,
31 }
32 };
33
34 DEFINE_PER_CPU(struct kprobe *, current_kprobe);
35 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
36
37 #define SS_FAIL -1
38 #define SS_NONE 0
39 #define SS_BRANCH 1
40 #define SS_PC 2
41
42 /*
43 * insn_has_ll_or_sc function checks whether instruction is ll or sc
44 * one; putting breakpoint on top of atomic ll/sc pair is bad idea;
45 * so we need to prevent it and refuse kprobes insertion for such
46 * instructions; cannot do much about breakpoint in the middle of
47 * ll/sc pair; it is upto user to avoid those places
48 */
insn_has_ll_or_sc(union loongarch_instruction insn)49 static int __kprobes insn_has_ll_or_sc(union loongarch_instruction insn)
50 {
51 int ret = 0;
52
53 switch (insn.reg2i14_format.opcode) {
54 case llw_op:
55 case lld_op:
56 case scw_op:
57 case scd_op:
58 ret = 1;
59 break;
60 }
61 return ret;
62 }
63
arch_prepare_kprobe(struct kprobe * p)64 int __kprobes arch_prepare_kprobe(struct kprobe *p)
65 {
66 union loongarch_instruction insn;
67 int ret = 0;
68
69 insn = p->addr[0];
70
71 if (insn_has_ll_or_sc(insn)) {
72 pr_notice("Kprobes for ll and sc instructions are not"
73 "supported\n");
74 ret = -EINVAL;
75 goto out;
76 }
77
78 if (insn.reg1i21_format.opcode == bceqz_op) {
79 pr_notice("Kprobes for bceqz and bcnez instructions are not"
80 "supported\n");
81 ret = -EINVAL;
82 goto out;
83 }
84
85 /* insn: must be on special executable page on loongarch. */
86 p->ainsn.insn = get_insn_slot();
87 if (!p->ainsn.insn) {
88 ret = -ENOMEM;
89 goto out;
90 }
91
92 /*
93 * In the kprobe->ainsn.insn[] array we store the original
94 * instruction at index zero and a break trap instruction at
95 * index one.
96 */
97 memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t));
98
99 p->ainsn.insn[1] = breakpoint2_insn;
100 p->opcode = *p->addr;
101
102 out:
103 return ret;
104 }
105
arch_arm_kprobe(struct kprobe * p)106 void __kprobes arch_arm_kprobe(struct kprobe *p)
107 {
108 *p->addr = breakpoint_insn;
109 flush_insn_slot(p);
110 }
111
arch_disarm_kprobe(struct kprobe * p)112 void __kprobes arch_disarm_kprobe(struct kprobe *p)
113 {
114 *p->addr = p->opcode;
115 flush_insn_slot(p);
116 }
117
arch_remove_kprobe(struct kprobe * p)118 void __kprobes arch_remove_kprobe(struct kprobe *p)
119 {
120 if (p->ainsn.insn) {
121 free_insn_slot(p->ainsn.insn, 0);
122 p->ainsn.insn = NULL;
123 }
124 }
125
save_previous_kprobe(struct kprobe_ctlblk * kcb)126 static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
127 {
128 kcb->prev_kprobe.kp = kprobe_running();
129 kcb->prev_kprobe.status = kcb->kprobe_status;
130 kcb->prev_kprobe.old_SR = kcb->kprobe_old_SR;
131 kcb->prev_kprobe.saved_SR = kcb->kprobe_saved_SR;
132 kcb->prev_kprobe.saved_era = kcb->kprobe_saved_era;
133 }
134
restore_previous_kprobe(struct kprobe_ctlblk * kcb)135 static void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
136 {
137 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
138 kcb->kprobe_status = kcb->prev_kprobe.status;
139 kcb->kprobe_old_SR = kcb->prev_kprobe.old_SR;
140 kcb->kprobe_saved_SR = kcb->prev_kprobe.saved_SR;
141 kcb->kprobe_saved_era = kcb->prev_kprobe.saved_era;
142 }
143
set_current_kprobe(struct kprobe * p,struct pt_regs * regs,struct kprobe_ctlblk * kcb)144 static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
145 struct kprobe_ctlblk *kcb)
146 {
147 __this_cpu_write(current_kprobe, p);
148 kcb->kprobe_saved_SR = kcb->kprobe_old_SR = (regs->csr_prmd & CSR_PRMD_PIE);
149 kcb->kprobe_saved_era = regs->csr_era;
150 }
151
prepare_singlestep(struct kprobe * p,struct pt_regs * regs)152 static int prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
153 {
154 if (is_branch_insn(p->opcode)) {
155 if (!simu_branch(regs, p->opcode))
156 return SS_BRANCH;
157 } else if (is_pc_insn(p->opcode)) {
158 if (!simu_pc(regs, p->opcode))
159 return SS_PC;
160 } else {
161 regs->csr_era = (unsigned long)&p->ainsn.insn[0];
162 return SS_NONE;
163 }
164
165 pr_notice("Kprobes: Error in simulate insn\n");
166 regs->csr_era = (unsigned long)&p->ainsn.insn[0];
167 return SS_FAIL;
168 }
169
setup_singlestep(struct kprobe * p,struct pt_regs * regs,struct kprobe_ctlblk * kcb,int reenter)170 static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
171 struct kprobe_ctlblk *kcb, int reenter)
172 {
173 int ss;
174
175 if (reenter) {
176 save_previous_kprobe(kcb);
177 set_current_kprobe(p, regs, kcb);
178 kcb->kprobe_status = KPROBE_REENTER;
179 } else {
180 kcb->kprobe_status = KPROBE_HIT_SS;
181 }
182 /* single step inline if the instruction is an break */
183 if (p->ainsn.insn->word == breakpoint_insn.word) {
184 regs->csr_prmd &= ~CSR_PRMD_PIE;
185 regs->csr_prmd |= kcb->kprobe_saved_SR;
186 preempt_enable_no_resched();
187 } else {
188 regs->csr_prmd &= ~CSR_PRMD_PIE;
189 ss = prepare_singlestep(p, regs);
190 if (ss == SS_NONE) {
191 kcb->kprobe_status = KPROBE_HIT_SS;
192 } else if (ss == SS_BRANCH || ss == SS_PC) {
193 kcb->kprobe_status = KPROBE_HIT_SSDONE;
194 if (p->post_handler)
195 p->post_handler(p, regs, 0);
196 reset_current_kprobe();
197 preempt_enable_no_resched();
198 } else {
199 if (p->fault_handler)
200 p->fault_handler(p, regs, 0);
201 reset_current_kprobe();
202 preempt_enable_no_resched();
203 }
204 }
205 }
206
reenter_kprobe(struct kprobe * p,struct pt_regs * regs,struct kprobe_ctlblk * kcb)207 static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
208 struct kprobe_ctlblk *kcb)
209 {
210 switch (kcb->kprobe_status) {
211 case KPROBE_HIT_SSDONE:
212 case KPROBE_HIT_ACTIVE:
213 kprobes_inc_nmissed_count(p);
214 setup_singlestep(p, regs, kcb, 1);
215 break;
216 case KPROBE_HIT_SS:
217 case KPROBE_REENTER:
218 pr_err("Unrecoverable kprobe detected. \n");
219 BUG();
220 break;
221 default:
222 /* impossible cases */
223 WARN_ON(1);
224 return 0;
225 }
226
227 return 1;
228 }
229
kprobe_handler(struct pt_regs * regs)230 static int __kprobes kprobe_handler(struct pt_regs *regs)
231 {
232 struct kprobe *p;
233 kprobe_opcode_t *addr;
234 struct kprobe_ctlblk *kcb;
235
236 addr = (kprobe_opcode_t *) regs->csr_era;
237
238 /*
239 * We don't want to be preempted for the entire
240 * duration of kprobe processing
241 */
242 preempt_disable();
243 kcb = get_kprobe_ctlblk();
244
245 p = get_kprobe(addr);
246 if (p) {
247 if (kprobe_running()) {
248 if (reenter_kprobe(p, regs, kcb))
249 return 1;
250 } else {
251 set_current_kprobe(p, regs, kcb);
252 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
253 if (p->pre_handler && p->pre_handler(p, regs)) {
254 /* handler has already set things up, so skip ss setup */
255 reset_current_kprobe();
256 preempt_enable_no_resched();
257 return 1;
258 } else {
259 setup_singlestep(p, regs, kcb, 0);
260 return 1;
261 }
262 }
263 } else {
264 if (addr->word != breakpoint_insn.word) {
265 /*
266 * The breakpoint instruction was removed right
267 * after we hit it. Another cpu has removed
268 * either a probepoint or a debugger breakpoint
269 * at this address. In either case, no further
270 * handling of this interrupt is appropriate.
271 */
272 preempt_enable_no_resched();
273 return 1;
274 }
275 }
276
277 preempt_enable_no_resched();
278
279 return 0;
280 }
281
post_kprobe_handler(struct pt_regs * regs)282 static inline int post_kprobe_handler(struct pt_regs *regs)
283 {
284 struct kprobe *cur = kprobe_running();
285 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
286
287 if (!cur)
288 return 0;
289
290 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
291 kcb->kprobe_status = KPROBE_HIT_SSDONE;
292 cur->post_handler(cur, regs, 0);
293 }
294
295 regs->csr_era = kcb->kprobe_saved_era + LOONGARCH_INSN_SIZE;
296 regs->csr_prmd |= kcb->kprobe_saved_SR;
297
298 /* Restore back the original saved kprobes variables and continue. */
299 if (kcb->kprobe_status == KPROBE_REENTER) {
300 restore_previous_kprobe(kcb);
301 goto out;
302 }
303 reset_current_kprobe();
304 out:
305 preempt_enable_no_resched();
306
307 return 1;
308 }
309
kprobe_fault_handler(struct pt_regs * regs,int trapnr)310 int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
311 {
312 struct kprobe *cur = kprobe_running();
313 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
314
315 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
316 return 1;
317
318 if (kcb->kprobe_status & KPROBE_HIT_SS) {
319 regs->csr_era = kcb->kprobe_saved_era + LOONGARCH_INSN_SIZE;
320 regs->csr_prmd |= kcb->kprobe_old_SR;
321
322 reset_current_kprobe();
323 preempt_enable_no_resched();
324 }
325 return 0;
326 }
327
328 /*
329 * Wrapper routine for handling exceptions.
330 */
kprobe_exceptions_notify(struct notifier_block * self,unsigned long val,void * data)331 int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
332 unsigned long val, void *data)
333 {
334
335 struct die_args *args = (struct die_args *)data;
336 int ret = NOTIFY_DONE;
337
338 switch (val) {
339 case DIE_BREAK:
340 if (kprobe_handler(args->regs))
341 ret = NOTIFY_STOP;
342 break;
343 case DIE_SSTEPBP:
344 if (post_kprobe_handler(args->regs))
345 ret = NOTIFY_STOP;
346 break;
347
348 case DIE_PAGE_FAULT:
349 /* kprobe_running() needs smp_processor_id() */
350 preempt_disable();
351
352 if (kprobe_running()
353 && kprobe_fault_handler(args->regs, args->trapnr))
354 ret = NOTIFY_STOP;
355 preempt_enable();
356 break;
357 default:
358 break;
359 }
360 return ret;
361 }
362
363 /*
364 * Function return probe trampoline:
365 * - init_kprobes() establishes a probepoint here
366 * - When the probed function returns, this probe causes the
367 * handlers to fire
368 */
kretprobe_trampoline_holder(void)369 static void __used kretprobe_trampoline_holder(void)
370 {
371 asm volatile(
372 /* Keep the assembler from reordering and placing JR here. */
373 "nop\n\t"
374 ".global kretprobe_trampoline\n"
375 "kretprobe_trampoline:\n\t"
376 "nop\n\t"
377 : : : "memory");
378 }
379
380 void kretprobe_trampoline(void);
381
arch_prepare_kretprobe(struct kretprobe_instance * ri,struct pt_regs * regs)382 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
383 struct pt_regs *regs)
384 {
385 ri->ret_addr = (kprobe_opcode_t *) regs->regs[1];
386 ri->fp = NULL;
387
388 /* Replace the return addr with trampoline addr */
389 regs->regs[1] = (unsigned long)kretprobe_trampoline;
390 }
391
392 /*
393 * Called when the probe at kretprobe trampoline is hit
394 */
trampoline_probe_handler(struct kprobe * p,struct pt_regs * regs)395 static int __kprobes trampoline_probe_handler(struct kprobe *p,
396 struct pt_regs *regs)
397 {
398 instruction_pointer(regs) = __kretprobe_trampoline_handler(regs,
399 kretprobe_trampoline, NULL);
400 /*
401 * By returning a non-zero value, we are telling
402 * kprobe_handler() that we don't want the post_handler
403 * to run (and have re-enabled preemption)
404 */
405 return 1;
406 }
407
arch_trampoline_kprobe(struct kprobe * p)408 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
409 {
410 if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline)
411 return 1;
412
413 return 0;
414 }
415
416 static struct kprobe trampoline_p = {
417 .addr = (kprobe_opcode_t *)kretprobe_trampoline,
418 .pre_handler = trampoline_probe_handler
419 };
420
arch_init_kprobes(void)421 int __init arch_init_kprobes(void)
422 {
423 return register_kprobe(&trampoline_p);
424 }
425