1 /*
2 * BTS PMU driver for perf
3 * Copyright (c) 2013-2014, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14
15 #undef DEBUG
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/bitops.h>
20 #include <linux/types.h>
21 #include <linux/slab.h>
22 #include <linux/debugfs.h>
23 #include <linux/device.h>
24 #include <linux/coredump.h>
25 #include <linux/kaiser.h>
26
27 #include <asm-generic/sizes.h>
28 #include <asm/perf_event.h>
29
30 #include "../perf_event.h"
31
32 struct bts_ctx {
33 struct perf_output_handle handle;
34 struct debug_store ds_back;
35 int state;
36 };
37
38 /* BTS context states: */
39 enum {
40 /* no ongoing AUX transactions */
41 BTS_STATE_STOPPED = 0,
42 /* AUX transaction is on, BTS tracing is disabled */
43 BTS_STATE_INACTIVE,
44 /* AUX transaction is on, BTS tracing is running */
45 BTS_STATE_ACTIVE,
46 };
47
48 static DEFINE_PER_CPU(struct bts_ctx, bts_ctx);
49
50 #define BTS_RECORD_SIZE 24
51 #define BTS_SAFETY_MARGIN 4080
52
53 struct bts_phys {
54 struct page *page;
55 unsigned long size;
56 unsigned long offset;
57 unsigned long displacement;
58 };
59
60 struct bts_buffer {
61 size_t real_size; /* multiple of BTS_RECORD_SIZE */
62 unsigned int nr_pages;
63 unsigned int nr_bufs;
64 unsigned int cur_buf;
65 bool snapshot;
66 local_t data_size;
67 local_t lost;
68 local_t head;
69 unsigned long end;
70 void **data_pages;
71 struct bts_phys buf[0];
72 };
73
74 struct pmu bts_pmu;
75
buf_size(struct page * page)76 static size_t buf_size(struct page *page)
77 {
78 return 1 << (PAGE_SHIFT + page_private(page));
79 }
80
bts_buffer_free_aux(void * data)81 static void bts_buffer_free_aux(void *data)
82 {
83 #ifdef CONFIG_PAGE_TABLE_ISOLATION
84 struct bts_buffer *buf = data;
85 int nbuf;
86
87 for (nbuf = 0; nbuf < buf->nr_bufs; nbuf++) {
88 struct page *page = buf->buf[nbuf].page;
89 void *kaddr = page_address(page);
90 size_t page_size = buf_size(page);
91
92 kaiser_remove_mapping((unsigned long)kaddr, page_size);
93 }
94 #endif
95 kfree(data);
96 }
97
98 static void *
bts_buffer_setup_aux(int cpu,void ** pages,int nr_pages,bool overwrite)99 bts_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool overwrite)
100 {
101 struct bts_buffer *buf;
102 struct page *page;
103 int node = (cpu == -1) ? cpu : cpu_to_node(cpu);
104 unsigned long offset;
105 size_t size = nr_pages << PAGE_SHIFT;
106 int pg, nbuf, pad;
107
108 /* count all the high order buffers */
109 for (pg = 0, nbuf = 0; pg < nr_pages;) {
110 page = virt_to_page(pages[pg]);
111 if (WARN_ON_ONCE(!PagePrivate(page) && nr_pages > 1))
112 return NULL;
113 pg += 1 << page_private(page);
114 nbuf++;
115 }
116
117 /*
118 * to avoid interrupts in overwrite mode, only allow one physical
119 */
120 if (overwrite && nbuf > 1)
121 return NULL;
122
123 buf = kzalloc_node(offsetof(struct bts_buffer, buf[nbuf]), GFP_KERNEL, node);
124 if (!buf)
125 return NULL;
126
127 buf->nr_pages = nr_pages;
128 buf->nr_bufs = nbuf;
129 buf->snapshot = overwrite;
130 buf->data_pages = pages;
131 buf->real_size = size - size % BTS_RECORD_SIZE;
132
133 for (pg = 0, nbuf = 0, offset = 0, pad = 0; nbuf < buf->nr_bufs; nbuf++) {
134 void *kaddr = pages[pg];
135 size_t page_size;
136
137 page = virt_to_page(kaddr);
138 page_size = buf_size(page);
139
140 if (kaiser_add_mapping((unsigned long)kaddr,
141 page_size, __PAGE_KERNEL) < 0) {
142 buf->nr_bufs = nbuf;
143 bts_buffer_free_aux(buf);
144 return NULL;
145 }
146
147 buf->buf[nbuf].page = page;
148 buf->buf[nbuf].offset = offset;
149 buf->buf[nbuf].displacement = (pad ? BTS_RECORD_SIZE - pad : 0);
150 buf->buf[nbuf].size = page_size - buf->buf[nbuf].displacement;
151 pad = buf->buf[nbuf].size % BTS_RECORD_SIZE;
152 buf->buf[nbuf].size -= pad;
153
154 pg += page_size >> PAGE_SHIFT;
155 offset += page_size;
156 }
157
158 return buf;
159 }
160
bts_buffer_offset(struct bts_buffer * buf,unsigned int idx)161 static unsigned long bts_buffer_offset(struct bts_buffer *buf, unsigned int idx)
162 {
163 return buf->buf[idx].offset + buf->buf[idx].displacement;
164 }
165
166 static void
bts_config_buffer(struct bts_buffer * buf)167 bts_config_buffer(struct bts_buffer *buf)
168 {
169 int cpu = raw_smp_processor_id();
170 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
171 struct bts_phys *phys = &buf->buf[buf->cur_buf];
172 unsigned long index, thresh = 0, end = phys->size;
173 struct page *page = phys->page;
174
175 index = local_read(&buf->head);
176
177 if (!buf->snapshot) {
178 if (buf->end < phys->offset + buf_size(page))
179 end = buf->end - phys->offset - phys->displacement;
180
181 index -= phys->offset + phys->displacement;
182
183 if (end - index > BTS_SAFETY_MARGIN)
184 thresh = end - BTS_SAFETY_MARGIN;
185 else if (end - index > BTS_RECORD_SIZE)
186 thresh = end - BTS_RECORD_SIZE;
187 else
188 thresh = end;
189 }
190
191 ds->bts_buffer_base = (u64)(long)page_address(page) + phys->displacement;
192 ds->bts_index = ds->bts_buffer_base + index;
193 ds->bts_absolute_maximum = ds->bts_buffer_base + end;
194 ds->bts_interrupt_threshold = !buf->snapshot
195 ? ds->bts_buffer_base + thresh
196 : ds->bts_absolute_maximum + BTS_RECORD_SIZE;
197 }
198
bts_buffer_pad_out(struct bts_phys * phys,unsigned long head)199 static void bts_buffer_pad_out(struct bts_phys *phys, unsigned long head)
200 {
201 unsigned long index = head - phys->offset;
202
203 memset(page_address(phys->page) + index, 0, phys->size - index);
204 }
205
bts_update(struct bts_ctx * bts)206 static void bts_update(struct bts_ctx *bts)
207 {
208 int cpu = raw_smp_processor_id();
209 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
210 struct bts_buffer *buf = perf_get_aux(&bts->handle);
211 unsigned long index = ds->bts_index - ds->bts_buffer_base, old, head;
212
213 if (!buf)
214 return;
215
216 head = index + bts_buffer_offset(buf, buf->cur_buf);
217 old = local_xchg(&buf->head, head);
218
219 if (!buf->snapshot) {
220 if (old == head)
221 return;
222
223 if (ds->bts_index >= ds->bts_absolute_maximum)
224 local_inc(&buf->lost);
225
226 /*
227 * old and head are always in the same physical buffer, so we
228 * can subtract them to get the data size.
229 */
230 local_add(head - old, &buf->data_size);
231 } else {
232 local_set(&buf->data_size, head);
233 }
234 }
235
236 static int
237 bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle);
238
239 /*
240 * Ordering PMU callbacks wrt themselves and the PMI is done by means
241 * of bts::state, which:
242 * - is set when bts::handle::event is valid, that is, between
243 * perf_aux_output_begin() and perf_aux_output_end();
244 * - is zero otherwise;
245 * - is ordered against bts::handle::event with a compiler barrier.
246 */
247
__bts_event_start(struct perf_event * event)248 static void __bts_event_start(struct perf_event *event)
249 {
250 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
251 struct bts_buffer *buf = perf_get_aux(&bts->handle);
252 u64 config = 0;
253
254 if (!buf->snapshot)
255 config |= ARCH_PERFMON_EVENTSEL_INT;
256 if (!event->attr.exclude_kernel)
257 config |= ARCH_PERFMON_EVENTSEL_OS;
258 if (!event->attr.exclude_user)
259 config |= ARCH_PERFMON_EVENTSEL_USR;
260
261 bts_config_buffer(buf);
262
263 /*
264 * local barrier to make sure that ds configuration made it
265 * before we enable BTS and bts::state goes ACTIVE
266 */
267 wmb();
268
269 /* INACTIVE/STOPPED -> ACTIVE */
270 WRITE_ONCE(bts->state, BTS_STATE_ACTIVE);
271
272 intel_pmu_enable_bts(config);
273
274 }
275
bts_event_start(struct perf_event * event,int flags)276 static void bts_event_start(struct perf_event *event, int flags)
277 {
278 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
279 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
280 struct bts_buffer *buf;
281
282 buf = perf_aux_output_begin(&bts->handle, event);
283 if (!buf)
284 goto fail_stop;
285
286 if (bts_buffer_reset(buf, &bts->handle))
287 goto fail_end_stop;
288
289 bts->ds_back.bts_buffer_base = cpuc->ds->bts_buffer_base;
290 bts->ds_back.bts_absolute_maximum = cpuc->ds->bts_absolute_maximum;
291 bts->ds_back.bts_interrupt_threshold = cpuc->ds->bts_interrupt_threshold;
292
293 event->hw.itrace_started = 1;
294 event->hw.state = 0;
295
296 __bts_event_start(event);
297
298 return;
299
300 fail_end_stop:
301 perf_aux_output_end(&bts->handle, 0, false);
302
303 fail_stop:
304 event->hw.state = PERF_HES_STOPPED;
305 }
306
__bts_event_stop(struct perf_event * event,int state)307 static void __bts_event_stop(struct perf_event *event, int state)
308 {
309 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
310
311 /* ACTIVE -> INACTIVE(PMI)/STOPPED(->stop()) */
312 WRITE_ONCE(bts->state, state);
313
314 /*
315 * No extra synchronization is mandated by the documentation to have
316 * BTS data stores globally visible.
317 */
318 intel_pmu_disable_bts();
319 }
320
bts_event_stop(struct perf_event * event,int flags)321 static void bts_event_stop(struct perf_event *event, int flags)
322 {
323 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
324 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
325 struct bts_buffer *buf = NULL;
326 int state = READ_ONCE(bts->state);
327
328 if (state == BTS_STATE_ACTIVE)
329 __bts_event_stop(event, BTS_STATE_STOPPED);
330
331 if (state != BTS_STATE_STOPPED)
332 buf = perf_get_aux(&bts->handle);
333
334 event->hw.state |= PERF_HES_STOPPED;
335
336 if (flags & PERF_EF_UPDATE) {
337 bts_update(bts);
338
339 if (buf) {
340 if (buf->snapshot)
341 bts->handle.head =
342 local_xchg(&buf->data_size,
343 buf->nr_pages << PAGE_SHIFT);
344
345 perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0),
346 !!local_xchg(&buf->lost, 0));
347 }
348
349 cpuc->ds->bts_index = bts->ds_back.bts_buffer_base;
350 cpuc->ds->bts_buffer_base = bts->ds_back.bts_buffer_base;
351 cpuc->ds->bts_absolute_maximum = bts->ds_back.bts_absolute_maximum;
352 cpuc->ds->bts_interrupt_threshold = bts->ds_back.bts_interrupt_threshold;
353 }
354 }
355
intel_bts_enable_local(void)356 void intel_bts_enable_local(void)
357 {
358 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
359 int state = READ_ONCE(bts->state);
360
361 /*
362 * Here we transition from INACTIVE to ACTIVE;
363 * if we instead are STOPPED from the interrupt handler,
364 * stay that way. Can't be ACTIVE here though.
365 */
366 if (WARN_ON_ONCE(state == BTS_STATE_ACTIVE))
367 return;
368
369 if (state == BTS_STATE_STOPPED)
370 return;
371
372 if (bts->handle.event)
373 __bts_event_start(bts->handle.event);
374 }
375
intel_bts_disable_local(void)376 void intel_bts_disable_local(void)
377 {
378 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
379
380 /*
381 * Here we transition from ACTIVE to INACTIVE;
382 * do nothing for STOPPED or INACTIVE.
383 */
384 if (READ_ONCE(bts->state) != BTS_STATE_ACTIVE)
385 return;
386
387 if (bts->handle.event)
388 __bts_event_stop(bts->handle.event, BTS_STATE_INACTIVE);
389 }
390
391 static int
bts_buffer_reset(struct bts_buffer * buf,struct perf_output_handle * handle)392 bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle)
393 {
394 unsigned long head, space, next_space, pad, gap, skip, wakeup;
395 unsigned int next_buf;
396 struct bts_phys *phys, *next_phys;
397 int ret;
398
399 if (buf->snapshot)
400 return 0;
401
402 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
403
404 phys = &buf->buf[buf->cur_buf];
405 space = phys->offset + phys->displacement + phys->size - head;
406 pad = space;
407 if (space > handle->size) {
408 space = handle->size;
409 space -= space % BTS_RECORD_SIZE;
410 }
411 if (space <= BTS_SAFETY_MARGIN) {
412 /* See if next phys buffer has more space */
413 next_buf = buf->cur_buf + 1;
414 if (next_buf >= buf->nr_bufs)
415 next_buf = 0;
416 next_phys = &buf->buf[next_buf];
417 gap = buf_size(phys->page) - phys->displacement - phys->size +
418 next_phys->displacement;
419 skip = pad + gap;
420 if (handle->size >= skip) {
421 next_space = next_phys->size;
422 if (next_space + skip > handle->size) {
423 next_space = handle->size - skip;
424 next_space -= next_space % BTS_RECORD_SIZE;
425 }
426 if (next_space > space || !space) {
427 if (pad)
428 bts_buffer_pad_out(phys, head);
429 ret = perf_aux_output_skip(handle, skip);
430 if (ret)
431 return ret;
432 /* Advance to next phys buffer */
433 phys = next_phys;
434 space = next_space;
435 head = phys->offset + phys->displacement;
436 /*
437 * After this, cur_buf and head won't match ds
438 * anymore, so we must not be racing with
439 * bts_update().
440 */
441 buf->cur_buf = next_buf;
442 local_set(&buf->head, head);
443 }
444 }
445 }
446
447 /* Don't go far beyond wakeup watermark */
448 wakeup = BTS_SAFETY_MARGIN + BTS_RECORD_SIZE + handle->wakeup -
449 handle->head;
450 if (space > wakeup) {
451 space = wakeup;
452 space -= space % BTS_RECORD_SIZE;
453 }
454
455 buf->end = head + space;
456
457 /*
458 * If we have no space, the lost notification would have been sent when
459 * we hit absolute_maximum - see bts_update()
460 */
461 if (!space)
462 return -ENOSPC;
463
464 return 0;
465 }
466
intel_bts_interrupt(void)467 int intel_bts_interrupt(void)
468 {
469 struct debug_store *ds = this_cpu_ptr(&cpu_hw_events)->ds;
470 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
471 struct perf_event *event = bts->handle.event;
472 struct bts_buffer *buf;
473 s64 old_head;
474 int err = -ENOSPC, handled = 0;
475
476 /*
477 * The only surefire way of knowing if this NMI is ours is by checking
478 * the write ptr against the PMI threshold.
479 */
480 if (ds && (ds->bts_index >= ds->bts_interrupt_threshold))
481 handled = 1;
482
483 /*
484 * this is wrapped in intel_bts_enable_local/intel_bts_disable_local,
485 * so we can only be INACTIVE or STOPPED
486 */
487 if (READ_ONCE(bts->state) == BTS_STATE_STOPPED)
488 return handled;
489
490 buf = perf_get_aux(&bts->handle);
491 if (!buf)
492 return handled;
493
494 /*
495 * Skip snapshot counters: they don't use the interrupt, but
496 * there's no other way of telling, because the pointer will
497 * keep moving
498 */
499 if (buf->snapshot)
500 return 0;
501
502 old_head = local_read(&buf->head);
503 bts_update(bts);
504
505 /* no new data */
506 if (old_head == local_read(&buf->head))
507 return handled;
508
509 perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0),
510 !!local_xchg(&buf->lost, 0));
511
512 buf = perf_aux_output_begin(&bts->handle, event);
513 if (buf)
514 err = bts_buffer_reset(buf, &bts->handle);
515
516 if (err) {
517 WRITE_ONCE(bts->state, BTS_STATE_STOPPED);
518
519 if (buf) {
520 /*
521 * BTS_STATE_STOPPED should be visible before
522 * cleared handle::event
523 */
524 barrier();
525 perf_aux_output_end(&bts->handle, 0, false);
526 }
527 }
528
529 return 1;
530 }
531
bts_event_del(struct perf_event * event,int mode)532 static void bts_event_del(struct perf_event *event, int mode)
533 {
534 bts_event_stop(event, PERF_EF_UPDATE);
535 }
536
bts_event_add(struct perf_event * event,int mode)537 static int bts_event_add(struct perf_event *event, int mode)
538 {
539 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
540 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
541 struct hw_perf_event *hwc = &event->hw;
542
543 event->hw.state = PERF_HES_STOPPED;
544
545 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
546 return -EBUSY;
547
548 if (bts->handle.event)
549 return -EBUSY;
550
551 if (mode & PERF_EF_START) {
552 bts_event_start(event, 0);
553 if (hwc->state & PERF_HES_STOPPED)
554 return -EINVAL;
555 }
556
557 return 0;
558 }
559
bts_event_destroy(struct perf_event * event)560 static void bts_event_destroy(struct perf_event *event)
561 {
562 x86_release_hardware();
563 x86_del_exclusive(x86_lbr_exclusive_bts);
564 }
565
bts_event_init(struct perf_event * event)566 static int bts_event_init(struct perf_event *event)
567 {
568 int ret;
569
570 if (event->attr.type != bts_pmu.type)
571 return -ENOENT;
572
573 if (x86_add_exclusive(x86_lbr_exclusive_bts))
574 return -EBUSY;
575
576 /*
577 * BTS leaks kernel addresses even when CPL0 tracing is
578 * disabled, so disallow intel_bts driver for unprivileged
579 * users on paranoid systems since it provides trace data
580 * to the user in a zero-copy fashion.
581 *
582 * Note that the default paranoia setting permits unprivileged
583 * users to profile the kernel.
584 */
585 if (event->attr.exclude_kernel && perf_paranoid_kernel() &&
586 !capable(CAP_SYS_ADMIN))
587 return -EACCES;
588
589 ret = x86_reserve_hardware();
590 if (ret) {
591 x86_del_exclusive(x86_lbr_exclusive_bts);
592 return ret;
593 }
594
595 event->destroy = bts_event_destroy;
596
597 return 0;
598 }
599
bts_event_read(struct perf_event * event)600 static void bts_event_read(struct perf_event *event)
601 {
602 }
603
bts_init(void)604 static __init int bts_init(void)
605 {
606 if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts)
607 return -ENODEV;
608
609 bts_pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE |
610 PERF_PMU_CAP_EXCLUSIVE;
611 bts_pmu.task_ctx_nr = perf_sw_context;
612 bts_pmu.event_init = bts_event_init;
613 bts_pmu.add = bts_event_add;
614 bts_pmu.del = bts_event_del;
615 bts_pmu.start = bts_event_start;
616 bts_pmu.stop = bts_event_stop;
617 bts_pmu.read = bts_event_read;
618 bts_pmu.setup_aux = bts_buffer_setup_aux;
619 bts_pmu.free_aux = bts_buffer_free_aux;
620
621 return perf_pmu_register(&bts_pmu, "intel_bts", -1);
622 }
623 arch_initcall(bts_init);
624