1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 *
16 * Copyright (C) 2007 Alan Stern
17 * Copyright (C) 2009 IBM Corporation
18 * Copyright (C) 2009 Frederic Weisbecker <fweisbec@gmail.com>
19 *
20 * Authors: Alan Stern <stern@rowland.harvard.edu>
21 * K.Prasad <prasad@linux.vnet.ibm.com>
22 * Frederic Weisbecker <fweisbec@gmail.com>
23 */
24
25 /*
26 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
27 * using the CPU's debug registers.
28 */
29
30 #include <linux/perf_event.h>
31 #include <linux/hw_breakpoint.h>
32 #include <linux/irqflags.h>
33 #include <linux/notifier.h>
34 #include <linux/kallsyms.h>
35 #include <linux/kprobes.h>
36 #include <linux/percpu.h>
37 #include <linux/kdebug.h>
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/sched.h>
41 #include <linux/smp.h>
42
43 #include <asm/hw_breakpoint.h>
44 #include <asm/processor.h>
45 #include <asm/debugreg.h>
46
47 /* Per cpu debug control register value */
48 DEFINE_PER_CPU(unsigned long, cpu_dr7);
49 EXPORT_PER_CPU_SYMBOL(cpu_dr7);
50
51 /* Per cpu debug address registers values */
52 static DEFINE_PER_CPU(unsigned long, cpu_debugreg[HBP_NUM]);
53
54 /*
55 * Stores the breakpoints currently in use on each breakpoint address
56 * register for each cpus
57 */
58 static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
59
60
61 static inline unsigned long
__encode_dr7(int drnum,unsigned int len,unsigned int type)62 __encode_dr7(int drnum, unsigned int len, unsigned int type)
63 {
64 unsigned long bp_info;
65
66 bp_info = (len | type) & 0xf;
67 bp_info <<= (DR_CONTROL_SHIFT + drnum * DR_CONTROL_SIZE);
68 bp_info |= (DR_GLOBAL_ENABLE << (drnum * DR_ENABLE_SIZE));
69
70 return bp_info;
71 }
72
73 /*
74 * Encode the length, type, Exact, and Enable bits for a particular breakpoint
75 * as stored in debug register 7.
76 */
encode_dr7(int drnum,unsigned int len,unsigned int type)77 unsigned long encode_dr7(int drnum, unsigned int len, unsigned int type)
78 {
79 return __encode_dr7(drnum, len, type) | DR_GLOBAL_SLOWDOWN;
80 }
81
82 /*
83 * Decode the length and type bits for a particular breakpoint as
84 * stored in debug register 7. Return the "enabled" status.
85 */
decode_dr7(unsigned long dr7,int bpnum,unsigned * len,unsigned * type)86 int decode_dr7(unsigned long dr7, int bpnum, unsigned *len, unsigned *type)
87 {
88 int bp_info = dr7 >> (DR_CONTROL_SHIFT + bpnum * DR_CONTROL_SIZE);
89
90 *len = (bp_info & 0xc) | 0x40;
91 *type = (bp_info & 0x3) | 0x80;
92
93 return (dr7 >> (bpnum * DR_ENABLE_SIZE)) & 0x3;
94 }
95
96 /*
97 * Install a perf counter breakpoint.
98 *
99 * We seek a free debug address register and use it for this
100 * breakpoint. Eventually we enable it in the debug control register.
101 *
102 * Atomic: we hold the counter->ctx->lock and we only handle variables
103 * and registers local to this cpu.
104 */
arch_install_hw_breakpoint(struct perf_event * bp)105 int arch_install_hw_breakpoint(struct perf_event *bp)
106 {
107 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
108 unsigned long *dr7;
109 int i;
110
111 for (i = 0; i < HBP_NUM; i++) {
112 struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
113
114 if (!*slot) {
115 *slot = bp;
116 break;
117 }
118 }
119
120 if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot"))
121 return -EBUSY;
122
123 set_debugreg(info->address, i);
124 __this_cpu_write(cpu_debugreg[i], info->address);
125
126 dr7 = this_cpu_ptr(&cpu_dr7);
127 *dr7 |= encode_dr7(i, info->len, info->type);
128
129 set_debugreg(*dr7, 7);
130 if (info->mask)
131 set_dr_addr_mask(info->mask, i);
132
133 return 0;
134 }
135
136 /*
137 * Uninstall the breakpoint contained in the given counter.
138 *
139 * First we search the debug address register it uses and then we disable
140 * it.
141 *
142 * Atomic: we hold the counter->ctx->lock and we only handle variables
143 * and registers local to this cpu.
144 */
arch_uninstall_hw_breakpoint(struct perf_event * bp)145 void arch_uninstall_hw_breakpoint(struct perf_event *bp)
146 {
147 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
148 unsigned long *dr7;
149 int i;
150
151 for (i = 0; i < HBP_NUM; i++) {
152 struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
153
154 if (*slot == bp) {
155 *slot = NULL;
156 break;
157 }
158 }
159
160 if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot"))
161 return;
162
163 dr7 = this_cpu_ptr(&cpu_dr7);
164 *dr7 &= ~__encode_dr7(i, info->len, info->type);
165
166 set_debugreg(*dr7, 7);
167 if (info->mask)
168 set_dr_addr_mask(0, i);
169 }
170
171 /*
172 * Check for virtual address in kernel space.
173 */
arch_check_bp_in_kernelspace(struct perf_event * bp)174 int arch_check_bp_in_kernelspace(struct perf_event *bp)
175 {
176 unsigned int len;
177 unsigned long va;
178 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
179
180 va = info->address;
181 len = bp->attr.bp_len;
182
183 /*
184 * We don't need to worry about va + len - 1 overflowing:
185 * we already require that va is aligned to a multiple of len.
186 */
187 return (va >= TASK_SIZE_MAX) || ((va + len - 1) >= TASK_SIZE_MAX);
188 }
189
arch_bp_generic_fields(int x86_len,int x86_type,int * gen_len,int * gen_type)190 int arch_bp_generic_fields(int x86_len, int x86_type,
191 int *gen_len, int *gen_type)
192 {
193 /* Type */
194 switch (x86_type) {
195 case X86_BREAKPOINT_EXECUTE:
196 if (x86_len != X86_BREAKPOINT_LEN_X)
197 return -EINVAL;
198
199 *gen_type = HW_BREAKPOINT_X;
200 *gen_len = sizeof(long);
201 return 0;
202 case X86_BREAKPOINT_WRITE:
203 *gen_type = HW_BREAKPOINT_W;
204 break;
205 case X86_BREAKPOINT_RW:
206 *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
207 break;
208 default:
209 return -EINVAL;
210 }
211
212 /* Len */
213 switch (x86_len) {
214 case X86_BREAKPOINT_LEN_1:
215 *gen_len = HW_BREAKPOINT_LEN_1;
216 break;
217 case X86_BREAKPOINT_LEN_2:
218 *gen_len = HW_BREAKPOINT_LEN_2;
219 break;
220 case X86_BREAKPOINT_LEN_4:
221 *gen_len = HW_BREAKPOINT_LEN_4;
222 break;
223 #ifdef CONFIG_X86_64
224 case X86_BREAKPOINT_LEN_8:
225 *gen_len = HW_BREAKPOINT_LEN_8;
226 break;
227 #endif
228 default:
229 return -EINVAL;
230 }
231
232 return 0;
233 }
234
235
arch_build_bp_info(struct perf_event * bp)236 static int arch_build_bp_info(struct perf_event *bp)
237 {
238 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
239
240 info->address = bp->attr.bp_addr;
241
242 /* Type */
243 switch (bp->attr.bp_type) {
244 case HW_BREAKPOINT_W:
245 info->type = X86_BREAKPOINT_WRITE;
246 break;
247 case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
248 info->type = X86_BREAKPOINT_RW;
249 break;
250 case HW_BREAKPOINT_X:
251 /*
252 * We don't allow kernel breakpoints in places that are not
253 * acceptable for kprobes. On non-kprobes kernels, we don't
254 * allow kernel breakpoints at all.
255 */
256 if (bp->attr.bp_addr >= TASK_SIZE_MAX) {
257 #ifdef CONFIG_KPROBES
258 if (within_kprobe_blacklist(bp->attr.bp_addr))
259 return -EINVAL;
260 #else
261 return -EINVAL;
262 #endif
263 }
264
265 info->type = X86_BREAKPOINT_EXECUTE;
266 /*
267 * x86 inst breakpoints need to have a specific undefined len.
268 * But we still need to check userspace is not trying to setup
269 * an unsupported length, to get a range breakpoint for example.
270 */
271 if (bp->attr.bp_len == sizeof(long)) {
272 info->len = X86_BREAKPOINT_LEN_X;
273 return 0;
274 }
275 default:
276 return -EINVAL;
277 }
278
279 /* Len */
280 info->mask = 0;
281
282 switch (bp->attr.bp_len) {
283 case HW_BREAKPOINT_LEN_1:
284 info->len = X86_BREAKPOINT_LEN_1;
285 break;
286 case HW_BREAKPOINT_LEN_2:
287 info->len = X86_BREAKPOINT_LEN_2;
288 break;
289 case HW_BREAKPOINT_LEN_4:
290 info->len = X86_BREAKPOINT_LEN_4;
291 break;
292 #ifdef CONFIG_X86_64
293 case HW_BREAKPOINT_LEN_8:
294 info->len = X86_BREAKPOINT_LEN_8;
295 break;
296 #endif
297 default:
298 /* AMD range breakpoint */
299 if (!is_power_of_2(bp->attr.bp_len))
300 return -EINVAL;
301 if (bp->attr.bp_addr & (bp->attr.bp_len - 1))
302 return -EINVAL;
303
304 if (!boot_cpu_has(X86_FEATURE_BPEXT))
305 return -EOPNOTSUPP;
306
307 /*
308 * It's impossible to use a range breakpoint to fake out
309 * user vs kernel detection because bp_len - 1 can't
310 * have the high bit set. If we ever allow range instruction
311 * breakpoints, then we'll have to check for kprobe-blacklisted
312 * addresses anywhere in the range.
313 */
314 info->mask = bp->attr.bp_len - 1;
315 info->len = X86_BREAKPOINT_LEN_1;
316 }
317
318 return 0;
319 }
320
321 /*
322 * Validate the arch-specific HW Breakpoint register settings
323 */
arch_validate_hwbkpt_settings(struct perf_event * bp)324 int arch_validate_hwbkpt_settings(struct perf_event *bp)
325 {
326 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
327 unsigned int align;
328 int ret;
329
330
331 ret = arch_build_bp_info(bp);
332 if (ret)
333 return ret;
334
335 switch (info->len) {
336 case X86_BREAKPOINT_LEN_1:
337 align = 0;
338 if (info->mask)
339 align = info->mask;
340 break;
341 case X86_BREAKPOINT_LEN_2:
342 align = 1;
343 break;
344 case X86_BREAKPOINT_LEN_4:
345 align = 3;
346 break;
347 #ifdef CONFIG_X86_64
348 case X86_BREAKPOINT_LEN_8:
349 align = 7;
350 break;
351 #endif
352 default:
353 WARN_ON_ONCE(1);
354 return -EINVAL;
355 }
356
357 /*
358 * Check that the low-order bits of the address are appropriate
359 * for the alignment implied by len.
360 */
361 if (info->address & align)
362 return -EINVAL;
363
364 return 0;
365 }
366
367 /*
368 * Dump the debug register contents to the user.
369 * We can't dump our per cpu values because it
370 * may contain cpu wide breakpoint, something that
371 * doesn't belong to the current task.
372 *
373 * TODO: include non-ptrace user breakpoints (perf)
374 */
aout_dump_debugregs(struct user * dump)375 void aout_dump_debugregs(struct user *dump)
376 {
377 int i;
378 int dr7 = 0;
379 struct perf_event *bp;
380 struct arch_hw_breakpoint *info;
381 struct thread_struct *thread = ¤t->thread;
382
383 for (i = 0; i < HBP_NUM; i++) {
384 bp = thread->ptrace_bps[i];
385
386 if (bp && !bp->attr.disabled) {
387 dump->u_debugreg[i] = bp->attr.bp_addr;
388 info = counter_arch_bp(bp);
389 dr7 |= encode_dr7(i, info->len, info->type);
390 } else {
391 dump->u_debugreg[i] = 0;
392 }
393 }
394
395 dump->u_debugreg[4] = 0;
396 dump->u_debugreg[5] = 0;
397 dump->u_debugreg[6] = current->thread.debugreg6;
398
399 dump->u_debugreg[7] = dr7;
400 }
401 EXPORT_SYMBOL_GPL(aout_dump_debugregs);
402
403 /*
404 * Release the user breakpoints used by ptrace
405 */
flush_ptrace_hw_breakpoint(struct task_struct * tsk)406 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
407 {
408 int i;
409 struct thread_struct *t = &tsk->thread;
410
411 for (i = 0; i < HBP_NUM; i++) {
412 unregister_hw_breakpoint(t->ptrace_bps[i]);
413 t->ptrace_bps[i] = NULL;
414 }
415
416 t->debugreg6 = 0;
417 t->ptrace_dr7 = 0;
418 }
419
hw_breakpoint_restore(void)420 void hw_breakpoint_restore(void)
421 {
422 set_debugreg(__this_cpu_read(cpu_debugreg[0]), 0);
423 set_debugreg(__this_cpu_read(cpu_debugreg[1]), 1);
424 set_debugreg(__this_cpu_read(cpu_debugreg[2]), 2);
425 set_debugreg(__this_cpu_read(cpu_debugreg[3]), 3);
426 set_debugreg(current->thread.debugreg6, 6);
427 set_debugreg(__this_cpu_read(cpu_dr7), 7);
428 }
429 EXPORT_SYMBOL_GPL(hw_breakpoint_restore);
430
431 /*
432 * Handle debug exception notifications.
433 *
434 * Return value is either NOTIFY_STOP or NOTIFY_DONE as explained below.
435 *
436 * NOTIFY_DONE returned if one of the following conditions is true.
437 * i) When the causative address is from user-space and the exception
438 * is a valid one, i.e. not triggered as a result of lazy debug register
439 * switching
440 * ii) When there are more bits than trap<n> set in DR6 register (such
441 * as BD, BS or BT) indicating that more than one debug condition is
442 * met and requires some more action in do_debug().
443 *
444 * NOTIFY_STOP returned for all other cases
445 *
446 */
hw_breakpoint_handler(struct die_args * args)447 static int hw_breakpoint_handler(struct die_args *args)
448 {
449 int i, cpu, rc = NOTIFY_STOP;
450 struct perf_event *bp;
451 unsigned long dr7, dr6;
452 unsigned long *dr6_p;
453
454 /* The DR6 value is pointed by args->err */
455 dr6_p = (unsigned long *)ERR_PTR(args->err);
456 dr6 = *dr6_p;
457
458 /* If it's a single step, TRAP bits are random */
459 if (dr6 & DR_STEP)
460 return NOTIFY_DONE;
461
462 /* Do an early return if no trap bits are set in DR6 */
463 if ((dr6 & DR_TRAP_BITS) == 0)
464 return NOTIFY_DONE;
465
466 get_debugreg(dr7, 7);
467 /* Disable breakpoints during exception handling */
468 set_debugreg(0UL, 7);
469 /*
470 * Assert that local interrupts are disabled
471 * Reset the DRn bits in the virtualized register value.
472 * The ptrace trigger routine will add in whatever is needed.
473 */
474 current->thread.debugreg6 &= ~DR_TRAP_BITS;
475 cpu = get_cpu();
476
477 /* Handle all the breakpoints that were triggered */
478 for (i = 0; i < HBP_NUM; ++i) {
479 if (likely(!(dr6 & (DR_TRAP0 << i))))
480 continue;
481
482 /*
483 * The counter may be concurrently released but that can only
484 * occur from a call_rcu() path. We can then safely fetch
485 * the breakpoint, use its callback, touch its counter
486 * while we are in an rcu_read_lock() path.
487 */
488 rcu_read_lock();
489
490 bp = per_cpu(bp_per_reg[i], cpu);
491 /*
492 * Reset the 'i'th TRAP bit in dr6 to denote completion of
493 * exception handling
494 */
495 (*dr6_p) &= ~(DR_TRAP0 << i);
496 /*
497 * bp can be NULL due to lazy debug register switching
498 * or due to concurrent perf counter removing.
499 */
500 if (!bp) {
501 rcu_read_unlock();
502 break;
503 }
504
505 perf_bp_event(bp, args->regs);
506
507 /*
508 * Set up resume flag to avoid breakpoint recursion when
509 * returning back to origin.
510 */
511 if (bp->hw.info.type == X86_BREAKPOINT_EXECUTE)
512 args->regs->flags |= X86_EFLAGS_RF;
513
514 rcu_read_unlock();
515 }
516 /*
517 * Further processing in do_debug() is needed for a) user-space
518 * breakpoints (to generate signals) and b) when the system has
519 * taken exception due to multiple causes
520 */
521 if ((current->thread.debugreg6 & DR_TRAP_BITS) ||
522 (dr6 & (~DR_TRAP_BITS)))
523 rc = NOTIFY_DONE;
524
525 set_debugreg(dr7, 7);
526 put_cpu();
527
528 return rc;
529 }
530
531 /*
532 * Handle debug exception notifications.
533 */
hw_breakpoint_exceptions_notify(struct notifier_block * unused,unsigned long val,void * data)534 int hw_breakpoint_exceptions_notify(
535 struct notifier_block *unused, unsigned long val, void *data)
536 {
537 if (val != DIE_DEBUG)
538 return NOTIFY_DONE;
539
540 return hw_breakpoint_handler(data);
541 }
542
hw_breakpoint_pmu_read(struct perf_event * bp)543 void hw_breakpoint_pmu_read(struct perf_event *bp)
544 {
545 /* TODO */
546 }
547