• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * intel-bts.c: Intel Processor Trace support
3  * Copyright (c) 2013-2015, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  */
15 
16 #include <endian.h>
17 #include <byteswap.h>
18 #include <linux/kernel.h>
19 #include <linux/types.h>
20 #include <linux/bitops.h>
21 #include <linux/log2.h>
22 
23 #include "cpumap.h"
24 #include "color.h"
25 #include "evsel.h"
26 #include "evlist.h"
27 #include "machine.h"
28 #include "session.h"
29 #include "util.h"
30 #include "thread.h"
31 #include "thread-stack.h"
32 #include "debug.h"
33 #include "tsc.h"
34 #include "auxtrace.h"
35 #include "intel-pt-decoder/intel-pt-insn-decoder.h"
36 #include "intel-bts.h"
37 
38 #define MAX_TIMESTAMP (~0ULL)
39 
40 #define INTEL_BTS_ERR_NOINSN  5
41 #define INTEL_BTS_ERR_LOST    9
42 
43 #if __BYTE_ORDER == __BIG_ENDIAN
44 #define le64_to_cpu bswap_64
45 #else
46 #define le64_to_cpu
47 #endif
48 
49 struct intel_bts {
50 	struct auxtrace			auxtrace;
51 	struct auxtrace_queues		queues;
52 	struct auxtrace_heap		heap;
53 	u32				auxtrace_type;
54 	struct perf_session		*session;
55 	struct machine			*machine;
56 	bool				sampling_mode;
57 	bool				snapshot_mode;
58 	bool				data_queued;
59 	u32				pmu_type;
60 	struct perf_tsc_conversion	tc;
61 	bool				cap_user_time_zero;
62 	struct itrace_synth_opts	synth_opts;
63 	bool				sample_branches;
64 	u32				branches_filter;
65 	u64				branches_sample_type;
66 	u64				branches_id;
67 	size_t				branches_event_size;
68 	bool				synth_needs_swap;
69 	unsigned long			num_events;
70 };
71 
72 struct intel_bts_queue {
73 	struct intel_bts	*bts;
74 	unsigned int		queue_nr;
75 	struct auxtrace_buffer	*buffer;
76 	bool			on_heap;
77 	bool			done;
78 	pid_t			pid;
79 	pid_t			tid;
80 	int			cpu;
81 	u64			time;
82 	struct intel_pt_insn	intel_pt_insn;
83 	u32			sample_flags;
84 };
85 
86 struct branch {
87 	u64 from;
88 	u64 to;
89 	u64 misc;
90 };
91 
intel_bts_dump(struct intel_bts * bts __maybe_unused,unsigned char * buf,size_t len)92 static void intel_bts_dump(struct intel_bts *bts __maybe_unused,
93 			   unsigned char *buf, size_t len)
94 {
95 	struct branch *branch;
96 	size_t i, pos = 0, br_sz = sizeof(struct branch), sz;
97 	const char *color = PERF_COLOR_BLUE;
98 
99 	color_fprintf(stdout, color,
100 		      ". ... Intel BTS data: size %zu bytes\n",
101 		      len);
102 
103 	while (len) {
104 		if (len >= br_sz)
105 			sz = br_sz;
106 		else
107 			sz = len;
108 		printf(".");
109 		color_fprintf(stdout, color, "  %08x: ", pos);
110 		for (i = 0; i < sz; i++)
111 			color_fprintf(stdout, color, " %02x", buf[i]);
112 		for (; i < br_sz; i++)
113 			color_fprintf(stdout, color, "   ");
114 		if (len >= br_sz) {
115 			branch = (struct branch *)buf;
116 			color_fprintf(stdout, color, " %"PRIx64" -> %"PRIx64" %s\n",
117 				      le64_to_cpu(branch->from),
118 				      le64_to_cpu(branch->to),
119 				      le64_to_cpu(branch->misc) & 0x10 ?
120 							"pred" : "miss");
121 		} else {
122 			color_fprintf(stdout, color, " Bad record!\n");
123 		}
124 		pos += sz;
125 		buf += sz;
126 		len -= sz;
127 	}
128 }
129 
intel_bts_dump_event(struct intel_bts * bts,unsigned char * buf,size_t len)130 static void intel_bts_dump_event(struct intel_bts *bts, unsigned char *buf,
131 				 size_t len)
132 {
133 	printf(".\n");
134 	intel_bts_dump(bts, buf, len);
135 }
136 
intel_bts_lost(struct intel_bts * bts,struct perf_sample * sample)137 static int intel_bts_lost(struct intel_bts *bts, struct perf_sample *sample)
138 {
139 	union perf_event event;
140 	int err;
141 
142 	auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
143 			     INTEL_BTS_ERR_LOST, sample->cpu, sample->pid,
144 			     sample->tid, 0, "Lost trace data");
145 
146 	err = perf_session__deliver_synth_event(bts->session, &event, NULL);
147 	if (err)
148 		pr_err("Intel BTS: failed to deliver error event, error %d\n",
149 		       err);
150 
151 	return err;
152 }
153 
intel_bts_alloc_queue(struct intel_bts * bts,unsigned int queue_nr)154 static struct intel_bts_queue *intel_bts_alloc_queue(struct intel_bts *bts,
155 						     unsigned int queue_nr)
156 {
157 	struct intel_bts_queue *btsq;
158 
159 	btsq = zalloc(sizeof(struct intel_bts_queue));
160 	if (!btsq)
161 		return NULL;
162 
163 	btsq->bts = bts;
164 	btsq->queue_nr = queue_nr;
165 	btsq->pid = -1;
166 	btsq->tid = -1;
167 	btsq->cpu = -1;
168 
169 	return btsq;
170 }
171 
intel_bts_setup_queue(struct intel_bts * bts,struct auxtrace_queue * queue,unsigned int queue_nr)172 static int intel_bts_setup_queue(struct intel_bts *bts,
173 				 struct auxtrace_queue *queue,
174 				 unsigned int queue_nr)
175 {
176 	struct intel_bts_queue *btsq = queue->priv;
177 
178 	if (list_empty(&queue->head))
179 		return 0;
180 
181 	if (!btsq) {
182 		btsq = intel_bts_alloc_queue(bts, queue_nr);
183 		if (!btsq)
184 			return -ENOMEM;
185 		queue->priv = btsq;
186 
187 		if (queue->cpu != -1)
188 			btsq->cpu = queue->cpu;
189 		btsq->tid = queue->tid;
190 	}
191 
192 	if (bts->sampling_mode)
193 		return 0;
194 
195 	if (!btsq->on_heap && !btsq->buffer) {
196 		int ret;
197 
198 		btsq->buffer = auxtrace_buffer__next(queue, NULL);
199 		if (!btsq->buffer)
200 			return 0;
201 
202 		ret = auxtrace_heap__add(&bts->heap, queue_nr,
203 					 btsq->buffer->reference);
204 		if (ret)
205 			return ret;
206 		btsq->on_heap = true;
207 	}
208 
209 	return 0;
210 }
211 
intel_bts_setup_queues(struct intel_bts * bts)212 static int intel_bts_setup_queues(struct intel_bts *bts)
213 {
214 	unsigned int i;
215 	int ret;
216 
217 	for (i = 0; i < bts->queues.nr_queues; i++) {
218 		ret = intel_bts_setup_queue(bts, &bts->queues.queue_array[i],
219 					    i);
220 		if (ret)
221 			return ret;
222 	}
223 	return 0;
224 }
225 
intel_bts_update_queues(struct intel_bts * bts)226 static inline int intel_bts_update_queues(struct intel_bts *bts)
227 {
228 	if (bts->queues.new_data) {
229 		bts->queues.new_data = false;
230 		return intel_bts_setup_queues(bts);
231 	}
232 	return 0;
233 }
234 
intel_bts_find_overlap(unsigned char * buf_a,size_t len_a,unsigned char * buf_b,size_t len_b)235 static unsigned char *intel_bts_find_overlap(unsigned char *buf_a, size_t len_a,
236 					     unsigned char *buf_b, size_t len_b)
237 {
238 	size_t offs, len;
239 
240 	if (len_a > len_b)
241 		offs = len_a - len_b;
242 	else
243 		offs = 0;
244 
245 	for (; offs < len_a; offs += sizeof(struct branch)) {
246 		len = len_a - offs;
247 		if (!memcmp(buf_a + offs, buf_b, len))
248 			return buf_b + len;
249 	}
250 
251 	return buf_b;
252 }
253 
intel_bts_do_fix_overlap(struct auxtrace_queue * queue,struct auxtrace_buffer * b)254 static int intel_bts_do_fix_overlap(struct auxtrace_queue *queue,
255 				    struct auxtrace_buffer *b)
256 {
257 	struct auxtrace_buffer *a;
258 	void *start;
259 
260 	if (b->list.prev == &queue->head)
261 		return 0;
262 	a = list_entry(b->list.prev, struct auxtrace_buffer, list);
263 	start = intel_bts_find_overlap(a->data, a->size, b->data, b->size);
264 	if (!start)
265 		return -EINVAL;
266 	b->use_size = b->data + b->size - start;
267 	b->use_data = start;
268 	return 0;
269 }
270 
intel_bts_synth_branch_sample(struct intel_bts_queue * btsq,struct branch * branch)271 static int intel_bts_synth_branch_sample(struct intel_bts_queue *btsq,
272 					 struct branch *branch)
273 {
274 	int ret;
275 	struct intel_bts *bts = btsq->bts;
276 	union perf_event event;
277 	struct perf_sample sample = { .ip = 0, };
278 
279 	if (bts->synth_opts.initial_skip &&
280 	    bts->num_events++ <= bts->synth_opts.initial_skip)
281 		return 0;
282 
283 	event.sample.header.type = PERF_RECORD_SAMPLE;
284 	event.sample.header.misc = PERF_RECORD_MISC_USER;
285 	event.sample.header.size = sizeof(struct perf_event_header);
286 
287 	sample.cpumode = PERF_RECORD_MISC_USER;
288 	sample.ip = le64_to_cpu(branch->from);
289 	sample.pid = btsq->pid;
290 	sample.tid = btsq->tid;
291 	sample.addr = le64_to_cpu(branch->to);
292 	sample.id = btsq->bts->branches_id;
293 	sample.stream_id = btsq->bts->branches_id;
294 	sample.period = 1;
295 	sample.cpu = btsq->cpu;
296 	sample.flags = btsq->sample_flags;
297 	sample.insn_len = btsq->intel_pt_insn.length;
298 
299 	if (bts->synth_opts.inject) {
300 		event.sample.header.size = bts->branches_event_size;
301 		ret = perf_event__synthesize_sample(&event,
302 						    bts->branches_sample_type,
303 						    0, &sample,
304 						    bts->synth_needs_swap);
305 		if (ret)
306 			return ret;
307 	}
308 
309 	ret = perf_session__deliver_synth_event(bts->session, &event, &sample);
310 	if (ret)
311 		pr_err("Intel BTS: failed to deliver branch event, error %d\n",
312 		       ret);
313 
314 	return ret;
315 }
316 
intel_bts_get_next_insn(struct intel_bts_queue * btsq,u64 ip)317 static int intel_bts_get_next_insn(struct intel_bts_queue *btsq, u64 ip)
318 {
319 	struct machine *machine = btsq->bts->machine;
320 	struct thread *thread;
321 	struct addr_location al;
322 	unsigned char buf[1024];
323 	size_t bufsz;
324 	ssize_t len;
325 	int x86_64;
326 	uint8_t cpumode;
327 	int err = -1;
328 
329 	bufsz = intel_pt_insn_max_size();
330 
331 	if (machine__kernel_ip(machine, ip))
332 		cpumode = PERF_RECORD_MISC_KERNEL;
333 	else
334 		cpumode = PERF_RECORD_MISC_USER;
335 
336 	thread = machine__find_thread(machine, -1, btsq->tid);
337 	if (!thread)
338 		return -1;
339 
340 	thread__find_addr_map(thread, cpumode, MAP__FUNCTION, ip, &al);
341 	if (!al.map || !al.map->dso)
342 		goto out_put;
343 
344 	len = dso__data_read_addr(al.map->dso, al.map, machine, ip, buf, bufsz);
345 	if (len <= 0)
346 		goto out_put;
347 
348 	/* Load maps to ensure dso->is_64_bit has been updated */
349 	map__load(al.map);
350 
351 	x86_64 = al.map->dso->is_64_bit;
352 
353 	if (intel_pt_get_insn(buf, len, x86_64, &btsq->intel_pt_insn))
354 		goto out_put;
355 
356 	err = 0;
357 out_put:
358 	thread__put(thread);
359 	return err;
360 }
361 
intel_bts_synth_error(struct intel_bts * bts,int cpu,pid_t pid,pid_t tid,u64 ip)362 static int intel_bts_synth_error(struct intel_bts *bts, int cpu, pid_t pid,
363 				 pid_t tid, u64 ip)
364 {
365 	union perf_event event;
366 	int err;
367 
368 	auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
369 			     INTEL_BTS_ERR_NOINSN, cpu, pid, tid, ip,
370 			     "Failed to get instruction");
371 
372 	err = perf_session__deliver_synth_event(bts->session, &event, NULL);
373 	if (err)
374 		pr_err("Intel BTS: failed to deliver error event, error %d\n",
375 		       err);
376 
377 	return err;
378 }
379 
intel_bts_get_branch_type(struct intel_bts_queue * btsq,struct branch * branch)380 static int intel_bts_get_branch_type(struct intel_bts_queue *btsq,
381 				     struct branch *branch)
382 {
383 	int err;
384 
385 	if (!branch->from) {
386 		if (branch->to)
387 			btsq->sample_flags = PERF_IP_FLAG_BRANCH |
388 					     PERF_IP_FLAG_TRACE_BEGIN;
389 		else
390 			btsq->sample_flags = 0;
391 		btsq->intel_pt_insn.length = 0;
392 	} else if (!branch->to) {
393 		btsq->sample_flags = PERF_IP_FLAG_BRANCH |
394 				     PERF_IP_FLAG_TRACE_END;
395 		btsq->intel_pt_insn.length = 0;
396 	} else {
397 		err = intel_bts_get_next_insn(btsq, branch->from);
398 		if (err) {
399 			btsq->sample_flags = 0;
400 			btsq->intel_pt_insn.length = 0;
401 			if (!btsq->bts->synth_opts.errors)
402 				return 0;
403 			err = intel_bts_synth_error(btsq->bts, btsq->cpu,
404 						    btsq->pid, btsq->tid,
405 						    branch->from);
406 			return err;
407 		}
408 		btsq->sample_flags = intel_pt_insn_type(btsq->intel_pt_insn.op);
409 		/* Check for an async branch into the kernel */
410 		if (!machine__kernel_ip(btsq->bts->machine, branch->from) &&
411 		    machine__kernel_ip(btsq->bts->machine, branch->to) &&
412 		    btsq->sample_flags != (PERF_IP_FLAG_BRANCH |
413 					   PERF_IP_FLAG_CALL |
414 					   PERF_IP_FLAG_SYSCALLRET))
415 			btsq->sample_flags = PERF_IP_FLAG_BRANCH |
416 					     PERF_IP_FLAG_CALL |
417 					     PERF_IP_FLAG_ASYNC |
418 					     PERF_IP_FLAG_INTERRUPT;
419 	}
420 
421 	return 0;
422 }
423 
intel_bts_process_buffer(struct intel_bts_queue * btsq,struct auxtrace_buffer * buffer,struct thread * thread)424 static int intel_bts_process_buffer(struct intel_bts_queue *btsq,
425 				    struct auxtrace_buffer *buffer,
426 				    struct thread *thread)
427 {
428 	struct branch *branch;
429 	size_t sz, bsz = sizeof(struct branch);
430 	u32 filter = btsq->bts->branches_filter;
431 	int err = 0;
432 
433 	if (buffer->use_data) {
434 		sz = buffer->use_size;
435 		branch = buffer->use_data;
436 	} else {
437 		sz = buffer->size;
438 		branch = buffer->data;
439 	}
440 
441 	if (!btsq->bts->sample_branches)
442 		return 0;
443 
444 	for (; sz > bsz; branch += 1, sz -= bsz) {
445 		if (!branch->from && !branch->to)
446 			continue;
447 		intel_bts_get_branch_type(btsq, branch);
448 		if (btsq->bts->synth_opts.thread_stack)
449 			thread_stack__event(thread, btsq->sample_flags,
450 					    le64_to_cpu(branch->from),
451 					    le64_to_cpu(branch->to),
452 					    btsq->intel_pt_insn.length,
453 					    buffer->buffer_nr + 1);
454 		if (filter && !(filter & btsq->sample_flags))
455 			continue;
456 		err = intel_bts_synth_branch_sample(btsq, branch);
457 		if (err)
458 			break;
459 	}
460 	return err;
461 }
462 
intel_bts_process_queue(struct intel_bts_queue * btsq,u64 * timestamp)463 static int intel_bts_process_queue(struct intel_bts_queue *btsq, u64 *timestamp)
464 {
465 	struct auxtrace_buffer *buffer = btsq->buffer, *old_buffer = buffer;
466 	struct auxtrace_queue *queue;
467 	struct thread *thread;
468 	int err;
469 
470 	if (btsq->done)
471 		return 1;
472 
473 	if (btsq->pid == -1) {
474 		thread = machine__find_thread(btsq->bts->machine, -1,
475 					      btsq->tid);
476 		if (thread)
477 			btsq->pid = thread->pid_;
478 	} else {
479 		thread = machine__findnew_thread(btsq->bts->machine, btsq->pid,
480 						 btsq->tid);
481 	}
482 
483 	queue = &btsq->bts->queues.queue_array[btsq->queue_nr];
484 
485 	if (!buffer)
486 		buffer = auxtrace_buffer__next(queue, NULL);
487 
488 	if (!buffer) {
489 		if (!btsq->bts->sampling_mode)
490 			btsq->done = 1;
491 		err = 1;
492 		goto out_put;
493 	}
494 
495 	/* Currently there is no support for split buffers */
496 	if (buffer->consecutive) {
497 		err = -EINVAL;
498 		goto out_put;
499 	}
500 
501 	if (!buffer->data) {
502 		int fd = perf_data_file__fd(btsq->bts->session->file);
503 
504 		buffer->data = auxtrace_buffer__get_data(buffer, fd);
505 		if (!buffer->data) {
506 			err = -ENOMEM;
507 			goto out_put;
508 		}
509 	}
510 
511 	if (btsq->bts->snapshot_mode && !buffer->consecutive &&
512 	    intel_bts_do_fix_overlap(queue, buffer)) {
513 		err = -ENOMEM;
514 		goto out_put;
515 	}
516 
517 	if (!btsq->bts->synth_opts.callchain &&
518 	    !btsq->bts->synth_opts.thread_stack && thread &&
519 	    (!old_buffer || btsq->bts->sampling_mode ||
520 	     (btsq->bts->snapshot_mode && !buffer->consecutive)))
521 		thread_stack__set_trace_nr(thread, buffer->buffer_nr + 1);
522 
523 	err = intel_bts_process_buffer(btsq, buffer, thread);
524 
525 	auxtrace_buffer__drop_data(buffer);
526 
527 	btsq->buffer = auxtrace_buffer__next(queue, buffer);
528 	if (btsq->buffer) {
529 		if (timestamp)
530 			*timestamp = btsq->buffer->reference;
531 	} else {
532 		if (!btsq->bts->sampling_mode)
533 			btsq->done = 1;
534 	}
535 out_put:
536 	thread__put(thread);
537 	return err;
538 }
539 
intel_bts_flush_queue(struct intel_bts_queue * btsq)540 static int intel_bts_flush_queue(struct intel_bts_queue *btsq)
541 {
542 	u64 ts = 0;
543 	int ret;
544 
545 	while (1) {
546 		ret = intel_bts_process_queue(btsq, &ts);
547 		if (ret < 0)
548 			return ret;
549 		if (ret)
550 			break;
551 	}
552 	return 0;
553 }
554 
intel_bts_process_tid_exit(struct intel_bts * bts,pid_t tid)555 static int intel_bts_process_tid_exit(struct intel_bts *bts, pid_t tid)
556 {
557 	struct auxtrace_queues *queues = &bts->queues;
558 	unsigned int i;
559 
560 	for (i = 0; i < queues->nr_queues; i++) {
561 		struct auxtrace_queue *queue = &bts->queues.queue_array[i];
562 		struct intel_bts_queue *btsq = queue->priv;
563 
564 		if (btsq && btsq->tid == tid)
565 			return intel_bts_flush_queue(btsq);
566 	}
567 	return 0;
568 }
569 
intel_bts_process_queues(struct intel_bts * bts,u64 timestamp)570 static int intel_bts_process_queues(struct intel_bts *bts, u64 timestamp)
571 {
572 	while (1) {
573 		unsigned int queue_nr;
574 		struct auxtrace_queue *queue;
575 		struct intel_bts_queue *btsq;
576 		u64 ts = 0;
577 		int ret;
578 
579 		if (!bts->heap.heap_cnt)
580 			return 0;
581 
582 		if (bts->heap.heap_array[0].ordinal > timestamp)
583 			return 0;
584 
585 		queue_nr = bts->heap.heap_array[0].queue_nr;
586 		queue = &bts->queues.queue_array[queue_nr];
587 		btsq = queue->priv;
588 
589 		auxtrace_heap__pop(&bts->heap);
590 
591 		ret = intel_bts_process_queue(btsq, &ts);
592 		if (ret < 0) {
593 			auxtrace_heap__add(&bts->heap, queue_nr, ts);
594 			return ret;
595 		}
596 
597 		if (!ret) {
598 			ret = auxtrace_heap__add(&bts->heap, queue_nr, ts);
599 			if (ret < 0)
600 				return ret;
601 		} else {
602 			btsq->on_heap = false;
603 		}
604 	}
605 
606 	return 0;
607 }
608 
intel_bts_process_event(struct perf_session * session,union perf_event * event,struct perf_sample * sample,struct perf_tool * tool)609 static int intel_bts_process_event(struct perf_session *session,
610 				   union perf_event *event,
611 				   struct perf_sample *sample,
612 				   struct perf_tool *tool)
613 {
614 	struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
615 					     auxtrace);
616 	u64 timestamp;
617 	int err;
618 
619 	if (dump_trace)
620 		return 0;
621 
622 	if (!tool->ordered_events) {
623 		pr_err("Intel BTS requires ordered events\n");
624 		return -EINVAL;
625 	}
626 
627 	if (sample->time && sample->time != (u64)-1)
628 		timestamp = perf_time_to_tsc(sample->time, &bts->tc);
629 	else
630 		timestamp = 0;
631 
632 	err = intel_bts_update_queues(bts);
633 	if (err)
634 		return err;
635 
636 	err = intel_bts_process_queues(bts, timestamp);
637 	if (err)
638 		return err;
639 	if (event->header.type == PERF_RECORD_EXIT) {
640 		err = intel_bts_process_tid_exit(bts, event->fork.tid);
641 		if (err)
642 			return err;
643 	}
644 
645 	if (event->header.type == PERF_RECORD_AUX &&
646 	    (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) &&
647 	    bts->synth_opts.errors)
648 		err = intel_bts_lost(bts, sample);
649 
650 	return err;
651 }
652 
intel_bts_process_auxtrace_event(struct perf_session * session,union perf_event * event,struct perf_tool * tool __maybe_unused)653 static int intel_bts_process_auxtrace_event(struct perf_session *session,
654 					    union perf_event *event,
655 					    struct perf_tool *tool __maybe_unused)
656 {
657 	struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
658 					     auxtrace);
659 
660 	if (bts->sampling_mode)
661 		return 0;
662 
663 	if (!bts->data_queued) {
664 		struct auxtrace_buffer *buffer;
665 		off_t data_offset;
666 		int fd = perf_data_file__fd(session->file);
667 		int err;
668 
669 		if (perf_data_file__is_pipe(session->file)) {
670 			data_offset = 0;
671 		} else {
672 			data_offset = lseek(fd, 0, SEEK_CUR);
673 			if (data_offset == -1)
674 				return -errno;
675 		}
676 
677 		err = auxtrace_queues__add_event(&bts->queues, session, event,
678 						 data_offset, &buffer);
679 		if (err)
680 			return err;
681 
682 		/* Dump here now we have copied a piped trace out of the pipe */
683 		if (dump_trace) {
684 			if (auxtrace_buffer__get_data(buffer, fd)) {
685 				intel_bts_dump_event(bts, buffer->data,
686 						     buffer->size);
687 				auxtrace_buffer__put_data(buffer);
688 			}
689 		}
690 	}
691 
692 	return 0;
693 }
694 
intel_bts_flush(struct perf_session * session,struct perf_tool * tool __maybe_unused)695 static int intel_bts_flush(struct perf_session *session,
696 			   struct perf_tool *tool __maybe_unused)
697 {
698 	struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
699 					     auxtrace);
700 	int ret;
701 
702 	if (dump_trace || bts->sampling_mode)
703 		return 0;
704 
705 	if (!tool->ordered_events)
706 		return -EINVAL;
707 
708 	ret = intel_bts_update_queues(bts);
709 	if (ret < 0)
710 		return ret;
711 
712 	return intel_bts_process_queues(bts, MAX_TIMESTAMP);
713 }
714 
intel_bts_free_queue(void * priv)715 static void intel_bts_free_queue(void *priv)
716 {
717 	struct intel_bts_queue *btsq = priv;
718 
719 	if (!btsq)
720 		return;
721 	free(btsq);
722 }
723 
intel_bts_free_events(struct perf_session * session)724 static void intel_bts_free_events(struct perf_session *session)
725 {
726 	struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
727 					     auxtrace);
728 	struct auxtrace_queues *queues = &bts->queues;
729 	unsigned int i;
730 
731 	for (i = 0; i < queues->nr_queues; i++) {
732 		intel_bts_free_queue(queues->queue_array[i].priv);
733 		queues->queue_array[i].priv = NULL;
734 	}
735 	auxtrace_queues__free(queues);
736 }
737 
intel_bts_free(struct perf_session * session)738 static void intel_bts_free(struct perf_session *session)
739 {
740 	struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
741 					     auxtrace);
742 
743 	auxtrace_heap__free(&bts->heap);
744 	intel_bts_free_events(session);
745 	session->auxtrace = NULL;
746 	free(bts);
747 }
748 
749 struct intel_bts_synth {
750 	struct perf_tool dummy_tool;
751 	struct perf_session *session;
752 };
753 
intel_bts_event_synth(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)754 static int intel_bts_event_synth(struct perf_tool *tool,
755 				 union perf_event *event,
756 				 struct perf_sample *sample __maybe_unused,
757 				 struct machine *machine __maybe_unused)
758 {
759 	struct intel_bts_synth *intel_bts_synth =
760 			container_of(tool, struct intel_bts_synth, dummy_tool);
761 
762 	return perf_session__deliver_synth_event(intel_bts_synth->session,
763 						 event, NULL);
764 }
765 
intel_bts_synth_event(struct perf_session * session,struct perf_event_attr * attr,u64 id)766 static int intel_bts_synth_event(struct perf_session *session,
767 				 struct perf_event_attr *attr, u64 id)
768 {
769 	struct intel_bts_synth intel_bts_synth;
770 
771 	memset(&intel_bts_synth, 0, sizeof(struct intel_bts_synth));
772 	intel_bts_synth.session = session;
773 
774 	return perf_event__synthesize_attr(&intel_bts_synth.dummy_tool, attr, 1,
775 					   &id, intel_bts_event_synth);
776 }
777 
intel_bts_synth_events(struct intel_bts * bts,struct perf_session * session)778 static int intel_bts_synth_events(struct intel_bts *bts,
779 				  struct perf_session *session)
780 {
781 	struct perf_evlist *evlist = session->evlist;
782 	struct perf_evsel *evsel;
783 	struct perf_event_attr attr;
784 	bool found = false;
785 	u64 id;
786 	int err;
787 
788 	evlist__for_each_entry(evlist, evsel) {
789 		if (evsel->attr.type == bts->pmu_type && evsel->ids) {
790 			found = true;
791 			break;
792 		}
793 	}
794 
795 	if (!found) {
796 		pr_debug("There are no selected events with Intel BTS data\n");
797 		return 0;
798 	}
799 
800 	memset(&attr, 0, sizeof(struct perf_event_attr));
801 	attr.size = sizeof(struct perf_event_attr);
802 	attr.type = PERF_TYPE_HARDWARE;
803 	attr.sample_type = evsel->attr.sample_type & PERF_SAMPLE_MASK;
804 	attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
805 			    PERF_SAMPLE_PERIOD;
806 	attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
807 	attr.sample_type &= ~(u64)PERF_SAMPLE_CPU;
808 	attr.exclude_user = evsel->attr.exclude_user;
809 	attr.exclude_kernel = evsel->attr.exclude_kernel;
810 	attr.exclude_hv = evsel->attr.exclude_hv;
811 	attr.exclude_host = evsel->attr.exclude_host;
812 	attr.exclude_guest = evsel->attr.exclude_guest;
813 	attr.sample_id_all = evsel->attr.sample_id_all;
814 	attr.read_format = evsel->attr.read_format;
815 
816 	id = evsel->id[0] + 1000000000;
817 	if (!id)
818 		id = 1;
819 
820 	if (bts->synth_opts.branches) {
821 		attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
822 		attr.sample_period = 1;
823 		attr.sample_type |= PERF_SAMPLE_ADDR;
824 		pr_debug("Synthesizing 'branches' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
825 			 id, (u64)attr.sample_type);
826 		err = intel_bts_synth_event(session, &attr, id);
827 		if (err) {
828 			pr_err("%s: failed to synthesize 'branches' event type\n",
829 			       __func__);
830 			return err;
831 		}
832 		bts->sample_branches = true;
833 		bts->branches_sample_type = attr.sample_type;
834 		bts->branches_id = id;
835 		/*
836 		 * We only use sample types from PERF_SAMPLE_MASK so we can use
837 		 * __perf_evsel__sample_size() here.
838 		 */
839 		bts->branches_event_size = sizeof(struct sample_event) +
840 				__perf_evsel__sample_size(attr.sample_type);
841 	}
842 
843 	bts->synth_needs_swap = evsel->needs_swap;
844 
845 	return 0;
846 }
847 
848 static const char * const intel_bts_info_fmts[] = {
849 	[INTEL_BTS_PMU_TYPE]		= "  PMU Type           %"PRId64"\n",
850 	[INTEL_BTS_TIME_SHIFT]		= "  Time Shift         %"PRIu64"\n",
851 	[INTEL_BTS_TIME_MULT]		= "  Time Muliplier     %"PRIu64"\n",
852 	[INTEL_BTS_TIME_ZERO]		= "  Time Zero          %"PRIu64"\n",
853 	[INTEL_BTS_CAP_USER_TIME_ZERO]	= "  Cap Time Zero      %"PRId64"\n",
854 	[INTEL_BTS_SNAPSHOT_MODE]	= "  Snapshot mode      %"PRId64"\n",
855 };
856 
intel_bts_print_info(u64 * arr,int start,int finish)857 static void intel_bts_print_info(u64 *arr, int start, int finish)
858 {
859 	int i;
860 
861 	if (!dump_trace)
862 		return;
863 
864 	for (i = start; i <= finish; i++)
865 		fprintf(stdout, intel_bts_info_fmts[i], arr[i]);
866 }
867 
868 u64 intel_bts_auxtrace_info_priv[INTEL_BTS_AUXTRACE_PRIV_SIZE];
869 
intel_bts_process_auxtrace_info(union perf_event * event,struct perf_session * session)870 int intel_bts_process_auxtrace_info(union perf_event *event,
871 				    struct perf_session *session)
872 {
873 	struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info;
874 	size_t min_sz = sizeof(u64) * INTEL_BTS_SNAPSHOT_MODE;
875 	struct intel_bts *bts;
876 	int err;
877 
878 	if (auxtrace_info->header.size < sizeof(struct auxtrace_info_event) +
879 					min_sz)
880 		return -EINVAL;
881 
882 	bts = zalloc(sizeof(struct intel_bts));
883 	if (!bts)
884 		return -ENOMEM;
885 
886 	err = auxtrace_queues__init(&bts->queues);
887 	if (err)
888 		goto err_free;
889 
890 	bts->session = session;
891 	bts->machine = &session->machines.host; /* No kvm support */
892 	bts->auxtrace_type = auxtrace_info->type;
893 	bts->pmu_type = auxtrace_info->priv[INTEL_BTS_PMU_TYPE];
894 	bts->tc.time_shift = auxtrace_info->priv[INTEL_BTS_TIME_SHIFT];
895 	bts->tc.time_mult = auxtrace_info->priv[INTEL_BTS_TIME_MULT];
896 	bts->tc.time_zero = auxtrace_info->priv[INTEL_BTS_TIME_ZERO];
897 	bts->cap_user_time_zero =
898 			auxtrace_info->priv[INTEL_BTS_CAP_USER_TIME_ZERO];
899 	bts->snapshot_mode = auxtrace_info->priv[INTEL_BTS_SNAPSHOT_MODE];
900 
901 	bts->sampling_mode = false;
902 
903 	bts->auxtrace.process_event = intel_bts_process_event;
904 	bts->auxtrace.process_auxtrace_event = intel_bts_process_auxtrace_event;
905 	bts->auxtrace.flush_events = intel_bts_flush;
906 	bts->auxtrace.free_events = intel_bts_free_events;
907 	bts->auxtrace.free = intel_bts_free;
908 	session->auxtrace = &bts->auxtrace;
909 
910 	intel_bts_print_info(&auxtrace_info->priv[0], INTEL_BTS_PMU_TYPE,
911 			     INTEL_BTS_SNAPSHOT_MODE);
912 
913 	if (dump_trace)
914 		return 0;
915 
916 	if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
917 		bts->synth_opts = *session->itrace_synth_opts;
918 	} else {
919 		itrace_synth_opts__set_default(&bts->synth_opts);
920 		if (session->itrace_synth_opts)
921 			bts->synth_opts.thread_stack =
922 				session->itrace_synth_opts->thread_stack;
923 	}
924 
925 	if (bts->synth_opts.calls)
926 		bts->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
927 					PERF_IP_FLAG_TRACE_END;
928 	if (bts->synth_opts.returns)
929 		bts->branches_filter |= PERF_IP_FLAG_RETURN |
930 					PERF_IP_FLAG_TRACE_BEGIN;
931 
932 	err = intel_bts_synth_events(bts, session);
933 	if (err)
934 		goto err_free_queues;
935 
936 	err = auxtrace_queues__process_index(&bts->queues, session);
937 	if (err)
938 		goto err_free_queues;
939 
940 	if (bts->queues.populated)
941 		bts->data_queued = true;
942 
943 	return 0;
944 
945 err_free_queues:
946 	auxtrace_queues__free(&bts->queues);
947 	session->auxtrace = NULL;
948 err_free:
949 	free(bts);
950 	return err;
951 }
952