1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
4 * using the CPU's debug registers. Derived from
5 * "arch/x86/kernel/hw_breakpoint.c"
6 *
7 * Copyright 2010 IBM Corporation
8 * Author: K.Prasad <prasad@linux.vnet.ibm.com>
9 */
10
11 #include <linux/hw_breakpoint.h>
12 #include <linux/notifier.h>
13 #include <linux/kprobes.h>
14 #include <linux/percpu.h>
15 #include <linux/kernel.h>
16 #include <linux/sched.h>
17 #include <linux/smp.h>
18 #include <linux/debugfs.h>
19 #include <linux/init.h>
20
21 #include <asm/hw_breakpoint.h>
22 #include <asm/processor.h>
23 #include <asm/sstep.h>
24 #include <asm/debug.h>
25 #include <asm/hvcall.h>
26 #include <asm/inst.h>
27 #include <linux/uaccess.h>
28
29 /*
30 * Stores the breakpoints currently in use on each breakpoint address
31 * register for every cpu
32 */
33 static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM_MAX]);
34
35 /*
36 * Returns total number of data or instruction breakpoints available.
37 */
hw_breakpoint_slots(int type)38 int hw_breakpoint_slots(int type)
39 {
40 if (type == TYPE_DATA)
41 return nr_wp_slots();
42 return 0; /* no instruction breakpoints available */
43 }
44
single_step_pending(void)45 static bool single_step_pending(void)
46 {
47 int i;
48
49 for (i = 0; i < nr_wp_slots(); i++) {
50 if (current->thread.last_hit_ubp[i])
51 return true;
52 }
53 return false;
54 }
55
56 /*
57 * Install a perf counter breakpoint.
58 *
59 * We seek a free debug address register and use it for this
60 * breakpoint.
61 *
62 * Atomic: we hold the counter->ctx->lock and we only handle variables
63 * and registers local to this cpu.
64 */
arch_install_hw_breakpoint(struct perf_event * bp)65 int arch_install_hw_breakpoint(struct perf_event *bp)
66 {
67 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
68 struct perf_event **slot;
69 int i;
70
71 for (i = 0; i < nr_wp_slots(); i++) {
72 slot = this_cpu_ptr(&bp_per_reg[i]);
73 if (!*slot) {
74 *slot = bp;
75 break;
76 }
77 }
78
79 if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot"))
80 return -EBUSY;
81
82 /*
83 * Do not install DABR values if the instruction must be single-stepped.
84 * If so, DABR will be populated in single_step_dabr_instruction().
85 */
86 if (!single_step_pending())
87 __set_breakpoint(i, info);
88
89 return 0;
90 }
91
92 /*
93 * Uninstall the breakpoint contained in the given counter.
94 *
95 * First we search the debug address register it uses and then we disable
96 * it.
97 *
98 * Atomic: we hold the counter->ctx->lock and we only handle variables
99 * and registers local to this cpu.
100 */
arch_uninstall_hw_breakpoint(struct perf_event * bp)101 void arch_uninstall_hw_breakpoint(struct perf_event *bp)
102 {
103 struct arch_hw_breakpoint null_brk = {0};
104 struct perf_event **slot;
105 int i;
106
107 for (i = 0; i < nr_wp_slots(); i++) {
108 slot = this_cpu_ptr(&bp_per_reg[i]);
109 if (*slot == bp) {
110 *slot = NULL;
111 break;
112 }
113 }
114
115 if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot"))
116 return;
117
118 __set_breakpoint(i, &null_brk);
119 }
120
is_ptrace_bp(struct perf_event * bp)121 static bool is_ptrace_bp(struct perf_event *bp)
122 {
123 return bp->overflow_handler == ptrace_triggered;
124 }
125
126 struct breakpoint {
127 struct list_head list;
128 struct perf_event *bp;
129 bool ptrace_bp;
130 };
131
132 static DEFINE_PER_CPU(struct breakpoint *, cpu_bps[HBP_NUM_MAX]);
133 static LIST_HEAD(task_bps);
134
alloc_breakpoint(struct perf_event * bp)135 static struct breakpoint *alloc_breakpoint(struct perf_event *bp)
136 {
137 struct breakpoint *tmp;
138
139 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
140 if (!tmp)
141 return ERR_PTR(-ENOMEM);
142 tmp->bp = bp;
143 tmp->ptrace_bp = is_ptrace_bp(bp);
144 return tmp;
145 }
146
bp_addr_range_overlap(struct perf_event * bp1,struct perf_event * bp2)147 static bool bp_addr_range_overlap(struct perf_event *bp1, struct perf_event *bp2)
148 {
149 __u64 bp1_saddr, bp1_eaddr, bp2_saddr, bp2_eaddr;
150
151 bp1_saddr = ALIGN_DOWN(bp1->attr.bp_addr, HW_BREAKPOINT_SIZE);
152 bp1_eaddr = ALIGN(bp1->attr.bp_addr + bp1->attr.bp_len, HW_BREAKPOINT_SIZE);
153 bp2_saddr = ALIGN_DOWN(bp2->attr.bp_addr, HW_BREAKPOINT_SIZE);
154 bp2_eaddr = ALIGN(bp2->attr.bp_addr + bp2->attr.bp_len, HW_BREAKPOINT_SIZE);
155
156 return (bp1_saddr < bp2_eaddr && bp1_eaddr > bp2_saddr);
157 }
158
alternate_infra_bp(struct breakpoint * b,struct perf_event * bp)159 static bool alternate_infra_bp(struct breakpoint *b, struct perf_event *bp)
160 {
161 return is_ptrace_bp(bp) ? !b->ptrace_bp : b->ptrace_bp;
162 }
163
can_co_exist(struct breakpoint * b,struct perf_event * bp)164 static bool can_co_exist(struct breakpoint *b, struct perf_event *bp)
165 {
166 return !(alternate_infra_bp(b, bp) && bp_addr_range_overlap(b->bp, bp));
167 }
168
task_bps_add(struct perf_event * bp)169 static int task_bps_add(struct perf_event *bp)
170 {
171 struct breakpoint *tmp;
172
173 tmp = alloc_breakpoint(bp);
174 if (IS_ERR(tmp))
175 return PTR_ERR(tmp);
176
177 list_add(&tmp->list, &task_bps);
178 return 0;
179 }
180
task_bps_remove(struct perf_event * bp)181 static void task_bps_remove(struct perf_event *bp)
182 {
183 struct list_head *pos, *q;
184
185 list_for_each_safe(pos, q, &task_bps) {
186 struct breakpoint *tmp = list_entry(pos, struct breakpoint, list);
187
188 if (tmp->bp == bp) {
189 list_del(&tmp->list);
190 kfree(tmp);
191 break;
192 }
193 }
194 }
195
196 /*
197 * If any task has breakpoint from alternate infrastructure,
198 * return true. Otherwise return false.
199 */
all_task_bps_check(struct perf_event * bp)200 static bool all_task_bps_check(struct perf_event *bp)
201 {
202 struct breakpoint *tmp;
203
204 list_for_each_entry(tmp, &task_bps, list) {
205 if (!can_co_exist(tmp, bp))
206 return true;
207 }
208 return false;
209 }
210
211 /*
212 * If same task has breakpoint from alternate infrastructure,
213 * return true. Otherwise return false.
214 */
same_task_bps_check(struct perf_event * bp)215 static bool same_task_bps_check(struct perf_event *bp)
216 {
217 struct breakpoint *tmp;
218
219 list_for_each_entry(tmp, &task_bps, list) {
220 if (tmp->bp->hw.target == bp->hw.target &&
221 !can_co_exist(tmp, bp))
222 return true;
223 }
224 return false;
225 }
226
cpu_bps_add(struct perf_event * bp)227 static int cpu_bps_add(struct perf_event *bp)
228 {
229 struct breakpoint **cpu_bp;
230 struct breakpoint *tmp;
231 int i = 0;
232
233 tmp = alloc_breakpoint(bp);
234 if (IS_ERR(tmp))
235 return PTR_ERR(tmp);
236
237 cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu);
238 for (i = 0; i < nr_wp_slots(); i++) {
239 if (!cpu_bp[i]) {
240 cpu_bp[i] = tmp;
241 break;
242 }
243 }
244 return 0;
245 }
246
cpu_bps_remove(struct perf_event * bp)247 static void cpu_bps_remove(struct perf_event *bp)
248 {
249 struct breakpoint **cpu_bp;
250 int i = 0;
251
252 cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu);
253 for (i = 0; i < nr_wp_slots(); i++) {
254 if (!cpu_bp[i])
255 continue;
256
257 if (cpu_bp[i]->bp == bp) {
258 kfree(cpu_bp[i]);
259 cpu_bp[i] = NULL;
260 break;
261 }
262 }
263 }
264
cpu_bps_check(int cpu,struct perf_event * bp)265 static bool cpu_bps_check(int cpu, struct perf_event *bp)
266 {
267 struct breakpoint **cpu_bp;
268 int i;
269
270 cpu_bp = per_cpu_ptr(cpu_bps, cpu);
271 for (i = 0; i < nr_wp_slots(); i++) {
272 if (cpu_bp[i] && !can_co_exist(cpu_bp[i], bp))
273 return true;
274 }
275 return false;
276 }
277
all_cpu_bps_check(struct perf_event * bp)278 static bool all_cpu_bps_check(struct perf_event *bp)
279 {
280 int cpu;
281
282 for_each_online_cpu(cpu) {
283 if (cpu_bps_check(cpu, bp))
284 return true;
285 }
286 return false;
287 }
288
289 /*
290 * We don't use any locks to serialize accesses to cpu_bps or task_bps
291 * because are already inside nr_bp_mutex.
292 */
arch_reserve_bp_slot(struct perf_event * bp)293 int arch_reserve_bp_slot(struct perf_event *bp)
294 {
295 int ret;
296
297 /* ptrace breakpoint */
298 if (is_ptrace_bp(bp)) {
299 if (all_cpu_bps_check(bp))
300 return -ENOSPC;
301
302 if (same_task_bps_check(bp))
303 return -ENOSPC;
304
305 return task_bps_add(bp);
306 }
307
308 /* perf breakpoint */
309 if (is_kernel_addr(bp->attr.bp_addr))
310 return 0;
311
312 if (bp->hw.target && bp->cpu == -1) {
313 if (same_task_bps_check(bp))
314 return -ENOSPC;
315
316 return task_bps_add(bp);
317 } else if (!bp->hw.target && bp->cpu != -1) {
318 if (all_task_bps_check(bp))
319 return -ENOSPC;
320
321 return cpu_bps_add(bp);
322 }
323
324 if (same_task_bps_check(bp))
325 return -ENOSPC;
326
327 ret = cpu_bps_add(bp);
328 if (ret)
329 return ret;
330 ret = task_bps_add(bp);
331 if (ret)
332 cpu_bps_remove(bp);
333
334 return ret;
335 }
336
arch_release_bp_slot(struct perf_event * bp)337 void arch_release_bp_slot(struct perf_event *bp)
338 {
339 if (!is_kernel_addr(bp->attr.bp_addr)) {
340 if (bp->hw.target)
341 task_bps_remove(bp);
342 if (bp->cpu != -1)
343 cpu_bps_remove(bp);
344 }
345 }
346
347 /*
348 * Perform cleanup of arch-specific counters during unregistration
349 * of the perf-event
350 */
arch_unregister_hw_breakpoint(struct perf_event * bp)351 void arch_unregister_hw_breakpoint(struct perf_event *bp)
352 {
353 /*
354 * If the breakpoint is unregistered between a hw_breakpoint_handler()
355 * and the single_step_dabr_instruction(), then cleanup the breakpoint
356 * restoration variables to prevent dangling pointers.
357 * FIXME, this should not be using bp->ctx at all! Sayeth peterz.
358 */
359 if (bp->ctx && bp->ctx->task && bp->ctx->task != ((void *)-1L)) {
360 int i;
361
362 for (i = 0; i < nr_wp_slots(); i++) {
363 if (bp->ctx->task->thread.last_hit_ubp[i] == bp)
364 bp->ctx->task->thread.last_hit_ubp[i] = NULL;
365 }
366 }
367 }
368
369 /*
370 * Check for virtual address in kernel space.
371 */
arch_check_bp_in_kernelspace(struct arch_hw_breakpoint * hw)372 int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
373 {
374 return is_kernel_addr(hw->address);
375 }
376
arch_bp_generic_fields(int type,int * gen_bp_type)377 int arch_bp_generic_fields(int type, int *gen_bp_type)
378 {
379 *gen_bp_type = 0;
380 if (type & HW_BRK_TYPE_READ)
381 *gen_bp_type |= HW_BREAKPOINT_R;
382 if (type & HW_BRK_TYPE_WRITE)
383 *gen_bp_type |= HW_BREAKPOINT_W;
384 if (*gen_bp_type == 0)
385 return -EINVAL;
386 return 0;
387 }
388
389 /*
390 * Watchpoint match range is always doubleword(8 bytes) aligned on
391 * powerpc. If the given range is crossing doubleword boundary, we
392 * need to increase the length such that next doubleword also get
393 * covered. Ex,
394 *
395 * address len = 6 bytes
396 * |=========.
397 * |------------v--|------v--------|
398 * | | | | | | | | | | | | | | | | |
399 * |---------------|---------------|
400 * <---8 bytes--->
401 *
402 * In this case, we should configure hw as:
403 * start_addr = address & ~(HW_BREAKPOINT_SIZE - 1)
404 * len = 16 bytes
405 *
406 * @start_addr is inclusive but @end_addr is exclusive.
407 */
hw_breakpoint_validate_len(struct arch_hw_breakpoint * hw)408 static int hw_breakpoint_validate_len(struct arch_hw_breakpoint *hw)
409 {
410 u16 max_len = DABR_MAX_LEN;
411 u16 hw_len;
412 unsigned long start_addr, end_addr;
413
414 start_addr = ALIGN_DOWN(hw->address, HW_BREAKPOINT_SIZE);
415 end_addr = ALIGN(hw->address + hw->len, HW_BREAKPOINT_SIZE);
416 hw_len = end_addr - start_addr;
417
418 if (dawr_enabled()) {
419 max_len = DAWR_MAX_LEN;
420 /* DAWR region can't cross 512 bytes boundary on p10 predecessors */
421 if (!cpu_has_feature(CPU_FTR_ARCH_31) &&
422 (ALIGN_DOWN(start_addr, SZ_512) != ALIGN_DOWN(end_addr - 1, SZ_512)))
423 return -EINVAL;
424 } else if (IS_ENABLED(CONFIG_PPC_8xx)) {
425 /* 8xx can setup a range without limitation */
426 max_len = U16_MAX;
427 }
428
429 if (hw_len > max_len)
430 return -EINVAL;
431
432 hw->hw_len = hw_len;
433 return 0;
434 }
435
436 /*
437 * Validate the arch-specific HW Breakpoint register settings
438 */
hw_breakpoint_arch_parse(struct perf_event * bp,const struct perf_event_attr * attr,struct arch_hw_breakpoint * hw)439 int hw_breakpoint_arch_parse(struct perf_event *bp,
440 const struct perf_event_attr *attr,
441 struct arch_hw_breakpoint *hw)
442 {
443 int ret = -EINVAL;
444
445 if (!bp || !attr->bp_len)
446 return ret;
447
448 hw->type = HW_BRK_TYPE_TRANSLATE;
449 if (attr->bp_type & HW_BREAKPOINT_R)
450 hw->type |= HW_BRK_TYPE_READ;
451 if (attr->bp_type & HW_BREAKPOINT_W)
452 hw->type |= HW_BRK_TYPE_WRITE;
453 if (hw->type == HW_BRK_TYPE_TRANSLATE)
454 /* must set alteast read or write */
455 return ret;
456 if (!attr->exclude_user)
457 hw->type |= HW_BRK_TYPE_USER;
458 if (!attr->exclude_kernel)
459 hw->type |= HW_BRK_TYPE_KERNEL;
460 if (!attr->exclude_hv)
461 hw->type |= HW_BRK_TYPE_HYP;
462 hw->address = attr->bp_addr;
463 hw->len = attr->bp_len;
464
465 if (!ppc_breakpoint_available())
466 return -ENODEV;
467
468 return hw_breakpoint_validate_len(hw);
469 }
470
471 /*
472 * Restores the breakpoint on the debug registers.
473 * Invoke this function if it is known that the execution context is
474 * about to change to cause loss of MSR_SE settings.
475 */
thread_change_pc(struct task_struct * tsk,struct pt_regs * regs)476 void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
477 {
478 struct arch_hw_breakpoint *info;
479 int i;
480
481 preempt_disable();
482
483 for (i = 0; i < nr_wp_slots(); i++) {
484 if (unlikely(tsk->thread.last_hit_ubp[i]))
485 goto reset;
486 }
487 goto out;
488
489 reset:
490 regs_set_return_msr(regs, regs->msr & ~MSR_SE);
491 for (i = 0; i < nr_wp_slots(); i++) {
492 info = counter_arch_bp(__this_cpu_read(bp_per_reg[i]));
493 __set_breakpoint(i, info);
494 tsk->thread.last_hit_ubp[i] = NULL;
495 }
496
497 out:
498 preempt_enable();
499 }
500
is_larx_stcx_instr(int type)501 static bool is_larx_stcx_instr(int type)
502 {
503 return type == LARX || type == STCX;
504 }
505
is_octword_vsx_instr(int type,int size)506 static bool is_octword_vsx_instr(int type, int size)
507 {
508 return ((type == LOAD_VSX || type == STORE_VSX) && size == 32);
509 }
510
511 /*
512 * We've failed in reliably handling the hw-breakpoint. Unregister
513 * it and throw a warning message to let the user know about it.
514 */
handler_error(struct perf_event * bp,struct arch_hw_breakpoint * info)515 static void handler_error(struct perf_event *bp, struct arch_hw_breakpoint *info)
516 {
517 WARN(1, "Unable to handle hardware breakpoint. Breakpoint at 0x%lx will be disabled.",
518 info->address);
519 perf_event_disable_inatomic(bp);
520 }
521
larx_stcx_err(struct perf_event * bp,struct arch_hw_breakpoint * info)522 static void larx_stcx_err(struct perf_event *bp, struct arch_hw_breakpoint *info)
523 {
524 printk_ratelimited("Breakpoint hit on instruction that can't be emulated. Breakpoint at 0x%lx will be disabled.\n",
525 info->address);
526 perf_event_disable_inatomic(bp);
527 }
528
stepping_handler(struct pt_regs * regs,struct perf_event ** bp,struct arch_hw_breakpoint ** info,int * hit,struct ppc_inst instr)529 static bool stepping_handler(struct pt_regs *regs, struct perf_event **bp,
530 struct arch_hw_breakpoint **info, int *hit,
531 struct ppc_inst instr)
532 {
533 int i;
534 int stepped;
535
536 /* Do not emulate user-space instructions, instead single-step them */
537 if (user_mode(regs)) {
538 for (i = 0; i < nr_wp_slots(); i++) {
539 if (!hit[i])
540 continue;
541 current->thread.last_hit_ubp[i] = bp[i];
542 info[i] = NULL;
543 }
544 regs_set_return_msr(regs, regs->msr | MSR_SE);
545 return false;
546 }
547
548 stepped = emulate_step(regs, instr);
549 if (!stepped) {
550 for (i = 0; i < nr_wp_slots(); i++) {
551 if (!hit[i])
552 continue;
553 handler_error(bp[i], info[i]);
554 info[i] = NULL;
555 }
556 return false;
557 }
558 return true;
559 }
560
handle_p10dd1_spurious_exception(struct arch_hw_breakpoint ** info,int * hit,unsigned long ea)561 static void handle_p10dd1_spurious_exception(struct arch_hw_breakpoint **info,
562 int *hit, unsigned long ea)
563 {
564 int i;
565 unsigned long hw_end_addr;
566
567 /*
568 * Handle spurious exception only when any bp_per_reg is set.
569 * Otherwise this might be created by xmon and not actually a
570 * spurious exception.
571 */
572 for (i = 0; i < nr_wp_slots(); i++) {
573 if (!info[i])
574 continue;
575
576 hw_end_addr = ALIGN(info[i]->address + info[i]->len, HW_BREAKPOINT_SIZE);
577
578 /*
579 * Ending address of DAWR range is less than starting
580 * address of op.
581 */
582 if ((hw_end_addr - 1) >= ea)
583 continue;
584
585 /*
586 * Those addresses need to be in the same or in two
587 * consecutive 512B blocks;
588 */
589 if (((hw_end_addr - 1) >> 10) != (ea >> 10))
590 continue;
591
592 /*
593 * 'op address + 64B' generates an address that has a
594 * carry into bit 52 (crosses 2K boundary).
595 */
596 if ((ea & 0x800) == ((ea + 64) & 0x800))
597 continue;
598
599 break;
600 }
601
602 if (i == nr_wp_slots())
603 return;
604
605 for (i = 0; i < nr_wp_slots(); i++) {
606 if (info[i]) {
607 hit[i] = 1;
608 info[i]->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
609 }
610 }
611 }
612
613 /*
614 * Handle a DABR or DAWR exception.
615 *
616 * Called in atomic context.
617 */
hw_breakpoint_handler(struct die_args * args)618 int hw_breakpoint_handler(struct die_args *args)
619 {
620 bool err = false;
621 int rc = NOTIFY_STOP;
622 struct perf_event *bp[HBP_NUM_MAX] = { NULL };
623 struct pt_regs *regs = args->regs;
624 struct arch_hw_breakpoint *info[HBP_NUM_MAX] = { NULL };
625 int i;
626 int hit[HBP_NUM_MAX] = {0};
627 int nr_hit = 0;
628 bool ptrace_bp = false;
629 struct ppc_inst instr = ppc_inst(0);
630 int type = 0;
631 int size = 0;
632 unsigned long ea;
633
634 /* Disable breakpoints during exception handling */
635 hw_breakpoint_disable();
636
637 /*
638 * The counter may be concurrently released but that can only
639 * occur from a call_rcu() path. We can then safely fetch
640 * the breakpoint, use its callback, touch its counter
641 * while we are in an rcu_read_lock() path.
642 */
643 rcu_read_lock();
644
645 if (!IS_ENABLED(CONFIG_PPC_8xx))
646 wp_get_instr_detail(regs, &instr, &type, &size, &ea);
647
648 for (i = 0; i < nr_wp_slots(); i++) {
649 bp[i] = __this_cpu_read(bp_per_reg[i]);
650 if (!bp[i])
651 continue;
652
653 info[i] = counter_arch_bp(bp[i]);
654 info[i]->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ;
655
656 if (wp_check_constraints(regs, instr, ea, type, size, info[i])) {
657 if (!IS_ENABLED(CONFIG_PPC_8xx) &&
658 ppc_inst_equal(instr, ppc_inst(0))) {
659 handler_error(bp[i], info[i]);
660 info[i] = NULL;
661 err = 1;
662 continue;
663 }
664
665 if (is_ptrace_bp(bp[i]))
666 ptrace_bp = true;
667 hit[i] = 1;
668 nr_hit++;
669 }
670 }
671
672 if (err)
673 goto reset;
674
675 if (!nr_hit) {
676 /* Workaround for Power10 DD1 */
677 if (!IS_ENABLED(CONFIG_PPC_8xx) && mfspr(SPRN_PVR) == 0x800100 &&
678 is_octword_vsx_instr(type, size)) {
679 handle_p10dd1_spurious_exception(info, hit, ea);
680 } else {
681 rc = NOTIFY_DONE;
682 goto out;
683 }
684 }
685
686 /*
687 * Return early after invoking user-callback function without restoring
688 * DABR if the breakpoint is from ptrace which always operates in
689 * one-shot mode. The ptrace-ed process will receive the SIGTRAP signal
690 * generated in do_dabr().
691 */
692 if (ptrace_bp) {
693 for (i = 0; i < nr_wp_slots(); i++) {
694 if (!hit[i])
695 continue;
696 perf_bp_event(bp[i], regs);
697 info[i] = NULL;
698 }
699 rc = NOTIFY_DONE;
700 goto reset;
701 }
702
703 if (!IS_ENABLED(CONFIG_PPC_8xx)) {
704 if (is_larx_stcx_instr(type)) {
705 for (i = 0; i < nr_wp_slots(); i++) {
706 if (!hit[i])
707 continue;
708 larx_stcx_err(bp[i], info[i]);
709 info[i] = NULL;
710 }
711 goto reset;
712 }
713
714 if (!stepping_handler(regs, bp, info, hit, instr))
715 goto reset;
716 }
717
718 /*
719 * As a policy, the callback is invoked in a 'trigger-after-execute'
720 * fashion
721 */
722 for (i = 0; i < nr_wp_slots(); i++) {
723 if (!hit[i])
724 continue;
725 if (!(info[i]->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
726 perf_bp_event(bp[i], regs);
727 }
728
729 reset:
730 for (i = 0; i < nr_wp_slots(); i++) {
731 if (!info[i])
732 continue;
733 __set_breakpoint(i, info[i]);
734 }
735
736 out:
737 rcu_read_unlock();
738 return rc;
739 }
740 NOKPROBE_SYMBOL(hw_breakpoint_handler);
741
742 /*
743 * Handle single-step exceptions following a DABR hit.
744 *
745 * Called in atomic context.
746 */
single_step_dabr_instruction(struct die_args * args)747 static int single_step_dabr_instruction(struct die_args *args)
748 {
749 struct pt_regs *regs = args->regs;
750 struct perf_event *bp = NULL;
751 struct arch_hw_breakpoint *info;
752 int i;
753 bool found = false;
754
755 /*
756 * Check if we are single-stepping as a result of a
757 * previous HW Breakpoint exception
758 */
759 for (i = 0; i < nr_wp_slots(); i++) {
760 bp = current->thread.last_hit_ubp[i];
761
762 if (!bp)
763 continue;
764
765 found = true;
766 info = counter_arch_bp(bp);
767
768 /*
769 * We shall invoke the user-defined callback function in the
770 * single stepping handler to confirm to 'trigger-after-execute'
771 * semantics
772 */
773 if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
774 perf_bp_event(bp, regs);
775 current->thread.last_hit_ubp[i] = NULL;
776 }
777
778 if (!found)
779 return NOTIFY_DONE;
780
781 for (i = 0; i < nr_wp_slots(); i++) {
782 bp = __this_cpu_read(bp_per_reg[i]);
783 if (!bp)
784 continue;
785
786 info = counter_arch_bp(bp);
787 __set_breakpoint(i, info);
788 }
789
790 /*
791 * If the process was being single-stepped by ptrace, let the
792 * other single-step actions occur (e.g. generate SIGTRAP).
793 */
794 if (test_thread_flag(TIF_SINGLESTEP))
795 return NOTIFY_DONE;
796
797 return NOTIFY_STOP;
798 }
799 NOKPROBE_SYMBOL(single_step_dabr_instruction);
800
801 /*
802 * Handle debug exception notifications.
803 *
804 * Called in atomic context.
805 */
hw_breakpoint_exceptions_notify(struct notifier_block * unused,unsigned long val,void * data)806 int hw_breakpoint_exceptions_notify(
807 struct notifier_block *unused, unsigned long val, void *data)
808 {
809 int ret = NOTIFY_DONE;
810
811 switch (val) {
812 case DIE_DABR_MATCH:
813 ret = hw_breakpoint_handler(data);
814 break;
815 case DIE_SSTEP:
816 ret = single_step_dabr_instruction(data);
817 break;
818 }
819
820 return ret;
821 }
822 NOKPROBE_SYMBOL(hw_breakpoint_exceptions_notify);
823
824 /*
825 * Release the user breakpoints used by ptrace
826 */
flush_ptrace_hw_breakpoint(struct task_struct * tsk)827 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
828 {
829 int i;
830 struct thread_struct *t = &tsk->thread;
831
832 for (i = 0; i < nr_wp_slots(); i++) {
833 unregister_hw_breakpoint(t->ptrace_bps[i]);
834 t->ptrace_bps[i] = NULL;
835 }
836 }
837
hw_breakpoint_pmu_read(struct perf_event * bp)838 void hw_breakpoint_pmu_read(struct perf_event *bp)
839 {
840 /* TODO */
841 }
842
ptrace_triggered(struct perf_event * bp,struct perf_sample_data * data,struct pt_regs * regs)843 void ptrace_triggered(struct perf_event *bp,
844 struct perf_sample_data *data, struct pt_regs *regs)
845 {
846 struct perf_event_attr attr;
847
848 /*
849 * Disable the breakpoint request here since ptrace has defined a
850 * one-shot behaviour for breakpoint exceptions in PPC64.
851 * The SIGTRAP signal is generated automatically for us in do_dabr().
852 * We don't have to do anything about that here
853 */
854 attr = bp->attr;
855 attr.disabled = true;
856 modify_user_hw_breakpoint(bp, &attr);
857 }
858