1 /*
2 * BTS PMU driver for perf
3 * Copyright (c) 2013-2014, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14
15 #undef DEBUG
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/bitops.h>
20 #include <linux/types.h>
21 #include <linux/slab.h>
22 #include <linux/debugfs.h>
23 #include <linux/device.h>
24 #include <linux/coredump.h>
25
26 #include <asm-generic/sizes.h>
27 #include <asm/perf_event.h>
28
29 #include "../perf_event.h"
30
31 struct bts_ctx {
32 struct perf_output_handle handle;
33 struct debug_store ds_back;
34 int state;
35 };
36
37 /* BTS context states: */
38 enum {
39 /* no ongoing AUX transactions */
40 BTS_STATE_STOPPED = 0,
41 /* AUX transaction is on, BTS tracing is disabled */
42 BTS_STATE_INACTIVE,
43 /* AUX transaction is on, BTS tracing is running */
44 BTS_STATE_ACTIVE,
45 };
46
47 static DEFINE_PER_CPU(struct bts_ctx, bts_ctx);
48
49 #define BTS_RECORD_SIZE 24
50 #define BTS_SAFETY_MARGIN 4080
51
52 struct bts_phys {
53 struct page *page;
54 unsigned long size;
55 unsigned long offset;
56 unsigned long displacement;
57 };
58
59 struct bts_buffer {
60 size_t real_size; /* multiple of BTS_RECORD_SIZE */
61 unsigned int nr_pages;
62 unsigned int nr_bufs;
63 unsigned int cur_buf;
64 bool snapshot;
65 local_t data_size;
66 local_t head;
67 unsigned long end;
68 void **data_pages;
69 struct bts_phys buf[0];
70 };
71
72 static struct pmu bts_pmu;
73
buf_nr_pages(struct page * page)74 static int buf_nr_pages(struct page *page)
75 {
76 if (!PagePrivate(page))
77 return 1;
78
79 return 1 << page_private(page);
80 }
81
buf_size(struct page * page)82 static size_t buf_size(struct page *page)
83 {
84 return buf_nr_pages(page) * PAGE_SIZE;
85 }
86
87 static void *
bts_buffer_setup_aux(int cpu,void ** pages,int nr_pages,bool overwrite)88 bts_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool overwrite)
89 {
90 struct bts_buffer *buf;
91 struct page *page;
92 int node = (cpu == -1) ? cpu : cpu_to_node(cpu);
93 unsigned long offset;
94 size_t size = nr_pages << PAGE_SHIFT;
95 int pg, nbuf, pad;
96
97 /* count all the high order buffers */
98 for (pg = 0, nbuf = 0; pg < nr_pages;) {
99 page = virt_to_page(pages[pg]);
100 pg += buf_nr_pages(page);
101 nbuf++;
102 }
103
104 /*
105 * to avoid interrupts in overwrite mode, only allow one physical
106 */
107 if (overwrite && nbuf > 1)
108 return NULL;
109
110 buf = kzalloc_node(offsetof(struct bts_buffer, buf[nbuf]), GFP_KERNEL, node);
111 if (!buf)
112 return NULL;
113
114 buf->nr_pages = nr_pages;
115 buf->nr_bufs = nbuf;
116 buf->snapshot = overwrite;
117 buf->data_pages = pages;
118 buf->real_size = size - size % BTS_RECORD_SIZE;
119
120 for (pg = 0, nbuf = 0, offset = 0, pad = 0; nbuf < buf->nr_bufs; nbuf++) {
121 unsigned int __nr_pages;
122
123 page = virt_to_page(pages[pg]);
124 __nr_pages = buf_nr_pages(page);
125 buf->buf[nbuf].page = page;
126 buf->buf[nbuf].offset = offset;
127 buf->buf[nbuf].displacement = (pad ? BTS_RECORD_SIZE - pad : 0);
128 buf->buf[nbuf].size = buf_size(page) - buf->buf[nbuf].displacement;
129 pad = buf->buf[nbuf].size % BTS_RECORD_SIZE;
130 buf->buf[nbuf].size -= pad;
131
132 pg += __nr_pages;
133 offset += __nr_pages << PAGE_SHIFT;
134 }
135
136 return buf;
137 }
138
bts_buffer_free_aux(void * data)139 static void bts_buffer_free_aux(void *data)
140 {
141 kfree(data);
142 }
143
bts_buffer_offset(struct bts_buffer * buf,unsigned int idx)144 static unsigned long bts_buffer_offset(struct bts_buffer *buf, unsigned int idx)
145 {
146 return buf->buf[idx].offset + buf->buf[idx].displacement;
147 }
148
149 static void
bts_config_buffer(struct bts_buffer * buf)150 bts_config_buffer(struct bts_buffer *buf)
151 {
152 int cpu = raw_smp_processor_id();
153 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
154 struct bts_phys *phys = &buf->buf[buf->cur_buf];
155 unsigned long index, thresh = 0, end = phys->size;
156 struct page *page = phys->page;
157
158 index = local_read(&buf->head);
159
160 if (!buf->snapshot) {
161 if (buf->end < phys->offset + buf_size(page))
162 end = buf->end - phys->offset - phys->displacement;
163
164 index -= phys->offset + phys->displacement;
165
166 if (end - index > BTS_SAFETY_MARGIN)
167 thresh = end - BTS_SAFETY_MARGIN;
168 else if (end - index > BTS_RECORD_SIZE)
169 thresh = end - BTS_RECORD_SIZE;
170 else
171 thresh = end;
172 }
173
174 ds->bts_buffer_base = (u64)(long)page_address(page) + phys->displacement;
175 ds->bts_index = ds->bts_buffer_base + index;
176 ds->bts_absolute_maximum = ds->bts_buffer_base + end;
177 ds->bts_interrupt_threshold = !buf->snapshot
178 ? ds->bts_buffer_base + thresh
179 : ds->bts_absolute_maximum + BTS_RECORD_SIZE;
180 }
181
bts_buffer_pad_out(struct bts_phys * phys,unsigned long head)182 static void bts_buffer_pad_out(struct bts_phys *phys, unsigned long head)
183 {
184 unsigned long index = head - phys->offset;
185
186 memset(page_address(phys->page) + index, 0, phys->size - index);
187 }
188
bts_update(struct bts_ctx * bts)189 static void bts_update(struct bts_ctx *bts)
190 {
191 int cpu = raw_smp_processor_id();
192 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
193 struct bts_buffer *buf = perf_get_aux(&bts->handle);
194 unsigned long index = ds->bts_index - ds->bts_buffer_base, old, head;
195
196 if (!buf)
197 return;
198
199 head = index + bts_buffer_offset(buf, buf->cur_buf);
200 old = local_xchg(&buf->head, head);
201
202 if (!buf->snapshot) {
203 if (old == head)
204 return;
205
206 if (ds->bts_index >= ds->bts_absolute_maximum)
207 perf_aux_output_flag(&bts->handle,
208 PERF_AUX_FLAG_TRUNCATED);
209
210 /*
211 * old and head are always in the same physical buffer, so we
212 * can subtract them to get the data size.
213 */
214 local_add(head - old, &buf->data_size);
215 } else {
216 local_set(&buf->data_size, head);
217 }
218 }
219
220 static int
221 bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle);
222
223 /*
224 * Ordering PMU callbacks wrt themselves and the PMI is done by means
225 * of bts::state, which:
226 * - is set when bts::handle::event is valid, that is, between
227 * perf_aux_output_begin() and perf_aux_output_end();
228 * - is zero otherwise;
229 * - is ordered against bts::handle::event with a compiler barrier.
230 */
231
__bts_event_start(struct perf_event * event)232 static void __bts_event_start(struct perf_event *event)
233 {
234 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
235 struct bts_buffer *buf = perf_get_aux(&bts->handle);
236 u64 config = 0;
237
238 if (!buf->snapshot)
239 config |= ARCH_PERFMON_EVENTSEL_INT;
240 if (!event->attr.exclude_kernel)
241 config |= ARCH_PERFMON_EVENTSEL_OS;
242 if (!event->attr.exclude_user)
243 config |= ARCH_PERFMON_EVENTSEL_USR;
244
245 bts_config_buffer(buf);
246
247 /*
248 * local barrier to make sure that ds configuration made it
249 * before we enable BTS and bts::state goes ACTIVE
250 */
251 wmb();
252
253 /* INACTIVE/STOPPED -> ACTIVE */
254 WRITE_ONCE(bts->state, BTS_STATE_ACTIVE);
255
256 intel_pmu_enable_bts(config);
257
258 }
259
bts_event_start(struct perf_event * event,int flags)260 static void bts_event_start(struct perf_event *event, int flags)
261 {
262 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
263 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
264 struct bts_buffer *buf;
265
266 buf = perf_aux_output_begin(&bts->handle, event);
267 if (!buf)
268 goto fail_stop;
269
270 if (bts_buffer_reset(buf, &bts->handle))
271 goto fail_end_stop;
272
273 bts->ds_back.bts_buffer_base = cpuc->ds->bts_buffer_base;
274 bts->ds_back.bts_absolute_maximum = cpuc->ds->bts_absolute_maximum;
275 bts->ds_back.bts_interrupt_threshold = cpuc->ds->bts_interrupt_threshold;
276
277 perf_event_itrace_started(event);
278 event->hw.state = 0;
279
280 __bts_event_start(event);
281
282 return;
283
284 fail_end_stop:
285 perf_aux_output_end(&bts->handle, 0);
286
287 fail_stop:
288 event->hw.state = PERF_HES_STOPPED;
289 }
290
__bts_event_stop(struct perf_event * event,int state)291 static void __bts_event_stop(struct perf_event *event, int state)
292 {
293 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
294
295 /* ACTIVE -> INACTIVE(PMI)/STOPPED(->stop()) */
296 WRITE_ONCE(bts->state, state);
297
298 /*
299 * No extra synchronization is mandated by the documentation to have
300 * BTS data stores globally visible.
301 */
302 intel_pmu_disable_bts();
303 }
304
bts_event_stop(struct perf_event * event,int flags)305 static void bts_event_stop(struct perf_event *event, int flags)
306 {
307 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
308 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
309 struct bts_buffer *buf = NULL;
310 int state = READ_ONCE(bts->state);
311
312 if (state == BTS_STATE_ACTIVE)
313 __bts_event_stop(event, BTS_STATE_STOPPED);
314
315 if (state != BTS_STATE_STOPPED)
316 buf = perf_get_aux(&bts->handle);
317
318 event->hw.state |= PERF_HES_STOPPED;
319
320 if (flags & PERF_EF_UPDATE) {
321 bts_update(bts);
322
323 if (buf) {
324 if (buf->snapshot)
325 bts->handle.head =
326 local_xchg(&buf->data_size,
327 buf->nr_pages << PAGE_SHIFT);
328 perf_aux_output_end(&bts->handle,
329 local_xchg(&buf->data_size, 0));
330 }
331
332 cpuc->ds->bts_index = bts->ds_back.bts_buffer_base;
333 cpuc->ds->bts_buffer_base = bts->ds_back.bts_buffer_base;
334 cpuc->ds->bts_absolute_maximum = bts->ds_back.bts_absolute_maximum;
335 cpuc->ds->bts_interrupt_threshold = bts->ds_back.bts_interrupt_threshold;
336 }
337 }
338
intel_bts_enable_local(void)339 void intel_bts_enable_local(void)
340 {
341 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
342 int state = READ_ONCE(bts->state);
343
344 /*
345 * Here we transition from INACTIVE to ACTIVE;
346 * if we instead are STOPPED from the interrupt handler,
347 * stay that way. Can't be ACTIVE here though.
348 */
349 if (WARN_ON_ONCE(state == BTS_STATE_ACTIVE))
350 return;
351
352 if (state == BTS_STATE_STOPPED)
353 return;
354
355 if (bts->handle.event)
356 __bts_event_start(bts->handle.event);
357 }
358
intel_bts_disable_local(void)359 void intel_bts_disable_local(void)
360 {
361 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
362
363 /*
364 * Here we transition from ACTIVE to INACTIVE;
365 * do nothing for STOPPED or INACTIVE.
366 */
367 if (READ_ONCE(bts->state) != BTS_STATE_ACTIVE)
368 return;
369
370 if (bts->handle.event)
371 __bts_event_stop(bts->handle.event, BTS_STATE_INACTIVE);
372 }
373
374 static int
bts_buffer_reset(struct bts_buffer * buf,struct perf_output_handle * handle)375 bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle)
376 {
377 unsigned long head, space, next_space, pad, gap, skip, wakeup;
378 unsigned int next_buf;
379 struct bts_phys *phys, *next_phys;
380 int ret;
381
382 if (buf->snapshot)
383 return 0;
384
385 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
386
387 phys = &buf->buf[buf->cur_buf];
388 space = phys->offset + phys->displacement + phys->size - head;
389 pad = space;
390 if (space > handle->size) {
391 space = handle->size;
392 space -= space % BTS_RECORD_SIZE;
393 }
394 if (space <= BTS_SAFETY_MARGIN) {
395 /* See if next phys buffer has more space */
396 next_buf = buf->cur_buf + 1;
397 if (next_buf >= buf->nr_bufs)
398 next_buf = 0;
399 next_phys = &buf->buf[next_buf];
400 gap = buf_size(phys->page) - phys->displacement - phys->size +
401 next_phys->displacement;
402 skip = pad + gap;
403 if (handle->size >= skip) {
404 next_space = next_phys->size;
405 if (next_space + skip > handle->size) {
406 next_space = handle->size - skip;
407 next_space -= next_space % BTS_RECORD_SIZE;
408 }
409 if (next_space > space || !space) {
410 if (pad)
411 bts_buffer_pad_out(phys, head);
412 ret = perf_aux_output_skip(handle, skip);
413 if (ret)
414 return ret;
415 /* Advance to next phys buffer */
416 phys = next_phys;
417 space = next_space;
418 head = phys->offset + phys->displacement;
419 /*
420 * After this, cur_buf and head won't match ds
421 * anymore, so we must not be racing with
422 * bts_update().
423 */
424 buf->cur_buf = next_buf;
425 local_set(&buf->head, head);
426 }
427 }
428 }
429
430 /* Don't go far beyond wakeup watermark */
431 wakeup = BTS_SAFETY_MARGIN + BTS_RECORD_SIZE + handle->wakeup -
432 handle->head;
433 if (space > wakeup) {
434 space = wakeup;
435 space -= space % BTS_RECORD_SIZE;
436 }
437
438 buf->end = head + space;
439
440 /*
441 * If we have no space, the lost notification would have been sent when
442 * we hit absolute_maximum - see bts_update()
443 */
444 if (!space)
445 return -ENOSPC;
446
447 return 0;
448 }
449
intel_bts_interrupt(void)450 int intel_bts_interrupt(void)
451 {
452 struct debug_store *ds = this_cpu_ptr(&cpu_hw_events)->ds;
453 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
454 struct perf_event *event = bts->handle.event;
455 struct bts_buffer *buf;
456 s64 old_head;
457 int err = -ENOSPC, handled = 0;
458
459 /*
460 * The only surefire way of knowing if this NMI is ours is by checking
461 * the write ptr against the PMI threshold.
462 */
463 if (ds && (ds->bts_index >= ds->bts_interrupt_threshold))
464 handled = 1;
465
466 /*
467 * this is wrapped in intel_bts_enable_local/intel_bts_disable_local,
468 * so we can only be INACTIVE or STOPPED
469 */
470 if (READ_ONCE(bts->state) == BTS_STATE_STOPPED)
471 return handled;
472
473 buf = perf_get_aux(&bts->handle);
474 if (!buf)
475 return handled;
476
477 /*
478 * Skip snapshot counters: they don't use the interrupt, but
479 * there's no other way of telling, because the pointer will
480 * keep moving
481 */
482 if (buf->snapshot)
483 return 0;
484
485 old_head = local_read(&buf->head);
486 bts_update(bts);
487
488 /* no new data */
489 if (old_head == local_read(&buf->head))
490 return handled;
491
492 perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0));
493
494 buf = perf_aux_output_begin(&bts->handle, event);
495 if (buf)
496 err = bts_buffer_reset(buf, &bts->handle);
497
498 if (err) {
499 WRITE_ONCE(bts->state, BTS_STATE_STOPPED);
500
501 if (buf) {
502 /*
503 * BTS_STATE_STOPPED should be visible before
504 * cleared handle::event
505 */
506 barrier();
507 perf_aux_output_end(&bts->handle, 0);
508 }
509 }
510
511 return 1;
512 }
513
bts_event_del(struct perf_event * event,int mode)514 static void bts_event_del(struct perf_event *event, int mode)
515 {
516 bts_event_stop(event, PERF_EF_UPDATE);
517 }
518
bts_event_add(struct perf_event * event,int mode)519 static int bts_event_add(struct perf_event *event, int mode)
520 {
521 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
522 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
523 struct hw_perf_event *hwc = &event->hw;
524
525 event->hw.state = PERF_HES_STOPPED;
526
527 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
528 return -EBUSY;
529
530 if (bts->handle.event)
531 return -EBUSY;
532
533 if (mode & PERF_EF_START) {
534 bts_event_start(event, 0);
535 if (hwc->state & PERF_HES_STOPPED)
536 return -EINVAL;
537 }
538
539 return 0;
540 }
541
bts_event_destroy(struct perf_event * event)542 static void bts_event_destroy(struct perf_event *event)
543 {
544 x86_release_hardware();
545 x86_del_exclusive(x86_lbr_exclusive_bts);
546 }
547
bts_event_init(struct perf_event * event)548 static int bts_event_init(struct perf_event *event)
549 {
550 int ret;
551
552 if (event->attr.type != bts_pmu.type)
553 return -ENOENT;
554
555 /*
556 * BTS leaks kernel addresses even when CPL0 tracing is
557 * disabled, so disallow intel_bts driver for unprivileged
558 * users on paranoid systems since it provides trace data
559 * to the user in a zero-copy fashion.
560 *
561 * Note that the default paranoia setting permits unprivileged
562 * users to profile the kernel.
563 */
564 if (event->attr.exclude_kernel && perf_paranoid_kernel() &&
565 !capable(CAP_SYS_ADMIN))
566 return -EACCES;
567
568 if (x86_add_exclusive(x86_lbr_exclusive_bts))
569 return -EBUSY;
570
571 ret = x86_reserve_hardware();
572 if (ret) {
573 x86_del_exclusive(x86_lbr_exclusive_bts);
574 return ret;
575 }
576
577 event->destroy = bts_event_destroy;
578
579 return 0;
580 }
581
bts_event_read(struct perf_event * event)582 static void bts_event_read(struct perf_event *event)
583 {
584 }
585
bts_init(void)586 static __init int bts_init(void)
587 {
588 if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts)
589 return -ENODEV;
590
591 if (boot_cpu_has(X86_FEATURE_PTI)) {
592 /*
593 * BTS hardware writes through a virtual memory map we must
594 * either use the kernel physical map, or the user mapping of
595 * the AUX buffer.
596 *
597 * However, since this driver supports per-CPU and per-task inherit
598 * we cannot use the user mapping since it will not be availble
599 * if we're not running the owning process.
600 *
601 * With PTI we can't use the kernal map either, because its not
602 * there when we run userspace.
603 *
604 * For now, disable this driver when using PTI.
605 */
606 return -ENODEV;
607 }
608
609 bts_pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE |
610 PERF_PMU_CAP_EXCLUSIVE;
611 bts_pmu.task_ctx_nr = perf_sw_context;
612 bts_pmu.event_init = bts_event_init;
613 bts_pmu.add = bts_event_add;
614 bts_pmu.del = bts_event_del;
615 bts_pmu.start = bts_event_start;
616 bts_pmu.stop = bts_event_stop;
617 bts_pmu.read = bts_event_read;
618 bts_pmu.setup_aux = bts_buffer_setup_aux;
619 bts_pmu.free_aux = bts_buffer_free_aux;
620
621 return perf_pmu_register(&bts_pmu, "intel_bts", -1);
622 }
623 arch_initcall(bts_init);
624