• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
4   * using the CPU's debug registers. Derived from
5   * "arch/x86/kernel/hw_breakpoint.c"
6   *
7   * Copyright 2010 IBM Corporation
8   * Author: K.Prasad <prasad@linux.vnet.ibm.com>
9   */
10  
11  #include <linux/hw_breakpoint.h>
12  #include <linux/notifier.h>
13  #include <linux/kprobes.h>
14  #include <linux/percpu.h>
15  #include <linux/kernel.h>
16  #include <linux/sched.h>
17  #include <linux/smp.h>
18  #include <linux/debugfs.h>
19  #include <linux/init.h>
20  
21  #include <asm/hw_breakpoint.h>
22  #include <asm/processor.h>
23  #include <asm/sstep.h>
24  #include <asm/debug.h>
25  #include <asm/debugfs.h>
26  #include <asm/hvcall.h>
27  #include <linux/uaccess.h>
28  
29  /*
30   * Stores the breakpoints currently in use on each breakpoint address
31   * register for every cpu
32   */
33  static DEFINE_PER_CPU(struct perf_event *, bp_per_reg);
34  
35  /*
36   * Returns total number of data or instruction breakpoints available.
37   */
hw_breakpoint_slots(int type)38  int hw_breakpoint_slots(int type)
39  {
40  	if (type == TYPE_DATA)
41  		return HBP_NUM;
42  	return 0;		/* no instruction breakpoints available */
43  }
44  
45  /*
46   * Install a perf counter breakpoint.
47   *
48   * We seek a free debug address register and use it for this
49   * breakpoint.
50   *
51   * Atomic: we hold the counter->ctx->lock and we only handle variables
52   * and registers local to this cpu.
53   */
arch_install_hw_breakpoint(struct perf_event * bp)54  int arch_install_hw_breakpoint(struct perf_event *bp)
55  {
56  	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
57  	struct perf_event **slot = this_cpu_ptr(&bp_per_reg);
58  
59  	*slot = bp;
60  
61  	/*
62  	 * Do not install DABR values if the instruction must be single-stepped.
63  	 * If so, DABR will be populated in single_step_dabr_instruction().
64  	 */
65  	if (current->thread.last_hit_ubp != bp)
66  		__set_breakpoint(info);
67  
68  	return 0;
69  }
70  
71  /*
72   * Uninstall the breakpoint contained in the given counter.
73   *
74   * First we search the debug address register it uses and then we disable
75   * it.
76   *
77   * Atomic: we hold the counter->ctx->lock and we only handle variables
78   * and registers local to this cpu.
79   */
arch_uninstall_hw_breakpoint(struct perf_event * bp)80  void arch_uninstall_hw_breakpoint(struct perf_event *bp)
81  {
82  	struct perf_event **slot = this_cpu_ptr(&bp_per_reg);
83  
84  	if (*slot != bp) {
85  		WARN_ONCE(1, "Can't find the breakpoint");
86  		return;
87  	}
88  
89  	*slot = NULL;
90  	hw_breakpoint_disable();
91  }
92  
93  /*
94   * Perform cleanup of arch-specific counters during unregistration
95   * of the perf-event
96   */
arch_unregister_hw_breakpoint(struct perf_event * bp)97  void arch_unregister_hw_breakpoint(struct perf_event *bp)
98  {
99  	/*
100  	 * If the breakpoint is unregistered between a hw_breakpoint_handler()
101  	 * and the single_step_dabr_instruction(), then cleanup the breakpoint
102  	 * restoration variables to prevent dangling pointers.
103  	 * FIXME, this should not be using bp->ctx at all! Sayeth peterz.
104  	 */
105  	if (bp->ctx && bp->ctx->task && bp->ctx->task != ((void *)-1L))
106  		bp->ctx->task->thread.last_hit_ubp = NULL;
107  }
108  
109  /*
110   * Check for virtual address in kernel space.
111   */
arch_check_bp_in_kernelspace(struct arch_hw_breakpoint * hw)112  int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
113  {
114  	return is_kernel_addr(hw->address);
115  }
116  
arch_bp_generic_fields(int type,int * gen_bp_type)117  int arch_bp_generic_fields(int type, int *gen_bp_type)
118  {
119  	*gen_bp_type = 0;
120  	if (type & HW_BRK_TYPE_READ)
121  		*gen_bp_type |= HW_BREAKPOINT_R;
122  	if (type & HW_BRK_TYPE_WRITE)
123  		*gen_bp_type |= HW_BREAKPOINT_W;
124  	if (*gen_bp_type == 0)
125  		return -EINVAL;
126  	return 0;
127  }
128  
129  /*
130   * Validate the arch-specific HW Breakpoint register settings
131   */
hw_breakpoint_arch_parse(struct perf_event * bp,const struct perf_event_attr * attr,struct arch_hw_breakpoint * hw)132  int hw_breakpoint_arch_parse(struct perf_event *bp,
133  			     const struct perf_event_attr *attr,
134  			     struct arch_hw_breakpoint *hw)
135  {
136  	int ret = -EINVAL, length_max;
137  
138  	if (!bp)
139  		return ret;
140  
141  	hw->type = HW_BRK_TYPE_TRANSLATE;
142  	if (attr->bp_type & HW_BREAKPOINT_R)
143  		hw->type |= HW_BRK_TYPE_READ;
144  	if (attr->bp_type & HW_BREAKPOINT_W)
145  		hw->type |= HW_BRK_TYPE_WRITE;
146  	if (hw->type == HW_BRK_TYPE_TRANSLATE)
147  		/* must set alteast read or write */
148  		return ret;
149  	if (!attr->exclude_user)
150  		hw->type |= HW_BRK_TYPE_USER;
151  	if (!attr->exclude_kernel)
152  		hw->type |= HW_BRK_TYPE_KERNEL;
153  	if (!attr->exclude_hv)
154  		hw->type |= HW_BRK_TYPE_HYP;
155  	hw->address = attr->bp_addr;
156  	hw->len = attr->bp_len;
157  
158  	/*
159  	 * Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8)
160  	 * and breakpoint addresses are aligned to nearest double-word
161  	 * HW_BREAKPOINT_ALIGN by rounding off to the lower address, the
162  	 * 'symbolsize' should satisfy the check below.
163  	 */
164  	if (!ppc_breakpoint_available())
165  		return -ENODEV;
166  	length_max = 8; /* DABR */
167  	if (dawr_enabled()) {
168  		length_max = 512 ; /* 64 doublewords */
169  		/* DAWR region can't cross 512 boundary */
170  		if ((attr->bp_addr >> 9) !=
171  		    ((attr->bp_addr + attr->bp_len - 1) >> 9))
172  			return -EINVAL;
173  	}
174  	if (hw->len >
175  	    (length_max - (hw->address & HW_BREAKPOINT_ALIGN)))
176  		return -EINVAL;
177  	return 0;
178  }
179  
180  /*
181   * Restores the breakpoint on the debug registers.
182   * Invoke this function if it is known that the execution context is
183   * about to change to cause loss of MSR_SE settings.
184   */
thread_change_pc(struct task_struct * tsk,struct pt_regs * regs)185  void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
186  {
187  	struct arch_hw_breakpoint *info;
188  
189  	if (likely(!tsk->thread.last_hit_ubp))
190  		return;
191  
192  	info = counter_arch_bp(tsk->thread.last_hit_ubp);
193  	regs->msr &= ~MSR_SE;
194  	__set_breakpoint(info);
195  	tsk->thread.last_hit_ubp = NULL;
196  }
197  
is_larx_stcx_instr(struct pt_regs * regs,unsigned int instr)198  static bool is_larx_stcx_instr(struct pt_regs *regs, unsigned int instr)
199  {
200  	int ret, type;
201  	struct instruction_op op;
202  
203  	ret = analyse_instr(&op, regs, instr);
204  	type = GETTYPE(op.type);
205  	return (!ret && (type == LARX || type == STCX));
206  }
207  
208  /*
209   * Handle debug exception notifications.
210   */
stepping_handler(struct pt_regs * regs,struct perf_event * bp,unsigned long addr)211  static bool stepping_handler(struct pt_regs *regs, struct perf_event *bp,
212  			     unsigned long addr)
213  {
214  	unsigned int instr = 0;
215  
216  	if (__get_user_inatomic(instr, (unsigned int *)regs->nip))
217  		goto fail;
218  
219  	if (is_larx_stcx_instr(regs, instr)) {
220  		printk_ratelimited("Breakpoint hit on instruction that can't be emulated."
221  				   " Breakpoint at 0x%lx will be disabled.\n", addr);
222  		goto disable;
223  	}
224  
225  	/* Do not emulate user-space instructions, instead single-step them */
226  	if (user_mode(regs)) {
227  		current->thread.last_hit_ubp = bp;
228  		regs->msr |= MSR_SE;
229  		return false;
230  	}
231  
232  	if (!emulate_step(regs, instr))
233  		goto fail;
234  
235  	return true;
236  
237  fail:
238  	/*
239  	 * We've failed in reliably handling the hw-breakpoint. Unregister
240  	 * it and throw a warning message to let the user know about it.
241  	 */
242  	WARN(1, "Unable to handle hardware breakpoint. Breakpoint at "
243  		"0x%lx will be disabled.", addr);
244  
245  disable:
246  	perf_event_disable_inatomic(bp);
247  	return false;
248  }
249  
hw_breakpoint_handler(struct die_args * args)250  int hw_breakpoint_handler(struct die_args *args)
251  {
252  	int rc = NOTIFY_STOP;
253  	struct perf_event *bp;
254  	struct pt_regs *regs = args->regs;
255  	struct arch_hw_breakpoint *info;
256  	unsigned long dar = regs->dar;
257  
258  	/* Disable breakpoints during exception handling */
259  	hw_breakpoint_disable();
260  
261  	/*
262  	 * The counter may be concurrently released but that can only
263  	 * occur from a call_rcu() path. We can then safely fetch
264  	 * the breakpoint, use its callback, touch its counter
265  	 * while we are in an rcu_read_lock() path.
266  	 */
267  	rcu_read_lock();
268  
269  	bp = __this_cpu_read(bp_per_reg);
270  	if (!bp) {
271  		rc = NOTIFY_DONE;
272  		goto out;
273  	}
274  	info = counter_arch_bp(bp);
275  
276  	/*
277  	 * Return early after invoking user-callback function without restoring
278  	 * DABR if the breakpoint is from ptrace which always operates in
279  	 * one-shot mode. The ptrace-ed process will receive the SIGTRAP signal
280  	 * generated in do_dabr().
281  	 */
282  	if (bp->overflow_handler == ptrace_triggered) {
283  		perf_bp_event(bp, regs);
284  		rc = NOTIFY_DONE;
285  		goto out;
286  	}
287  
288  	/*
289  	 * Verify if dar lies within the address range occupied by the symbol
290  	 * being watched to filter extraneous exceptions.  If it doesn't,
291  	 * we still need to single-step the instruction, but we don't
292  	 * generate an event.
293  	 */
294  	info->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ;
295  	if (!((bp->attr.bp_addr <= dar) &&
296  	      (dar - bp->attr.bp_addr < bp->attr.bp_len)))
297  		info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
298  
299  	if (!IS_ENABLED(CONFIG_PPC_8xx) && !stepping_handler(regs, bp, info->address))
300  		goto out;
301  
302  	/*
303  	 * As a policy, the callback is invoked in a 'trigger-after-execute'
304  	 * fashion
305  	 */
306  	if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
307  		perf_bp_event(bp, regs);
308  
309  	__set_breakpoint(info);
310  out:
311  	rcu_read_unlock();
312  	return rc;
313  }
314  NOKPROBE_SYMBOL(hw_breakpoint_handler);
315  
316  /*
317   * Handle single-step exceptions following a DABR hit.
318   */
single_step_dabr_instruction(struct die_args * args)319  static int single_step_dabr_instruction(struct die_args *args)
320  {
321  	struct pt_regs *regs = args->regs;
322  	struct perf_event *bp = NULL;
323  	struct arch_hw_breakpoint *info;
324  
325  	bp = current->thread.last_hit_ubp;
326  	/*
327  	 * Check if we are single-stepping as a result of a
328  	 * previous HW Breakpoint exception
329  	 */
330  	if (!bp)
331  		return NOTIFY_DONE;
332  
333  	info = counter_arch_bp(bp);
334  
335  	/*
336  	 * We shall invoke the user-defined callback function in the single
337  	 * stepping handler to confirm to 'trigger-after-execute' semantics
338  	 */
339  	if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
340  		perf_bp_event(bp, regs);
341  
342  	__set_breakpoint(info);
343  	current->thread.last_hit_ubp = NULL;
344  
345  	/*
346  	 * If the process was being single-stepped by ptrace, let the
347  	 * other single-step actions occur (e.g. generate SIGTRAP).
348  	 */
349  	if (test_thread_flag(TIF_SINGLESTEP))
350  		return NOTIFY_DONE;
351  
352  	return NOTIFY_STOP;
353  }
354  NOKPROBE_SYMBOL(single_step_dabr_instruction);
355  
356  /*
357   * Handle debug exception notifications.
358   */
hw_breakpoint_exceptions_notify(struct notifier_block * unused,unsigned long val,void * data)359  int hw_breakpoint_exceptions_notify(
360  		struct notifier_block *unused, unsigned long val, void *data)
361  {
362  	int ret = NOTIFY_DONE;
363  
364  	switch (val) {
365  	case DIE_DABR_MATCH:
366  		ret = hw_breakpoint_handler(data);
367  		break;
368  	case DIE_SSTEP:
369  		ret = single_step_dabr_instruction(data);
370  		break;
371  	}
372  
373  	return ret;
374  }
375  NOKPROBE_SYMBOL(hw_breakpoint_exceptions_notify);
376  
377  /*
378   * Release the user breakpoints used by ptrace
379   */
flush_ptrace_hw_breakpoint(struct task_struct * tsk)380  void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
381  {
382  	struct thread_struct *t = &tsk->thread;
383  
384  	unregister_hw_breakpoint(t->ptrace_bps[0]);
385  	t->ptrace_bps[0] = NULL;
386  }
387  
hw_breakpoint_pmu_read(struct perf_event * bp)388  void hw_breakpoint_pmu_read(struct perf_event *bp)
389  {
390  	/* TODO */
391  }
392