• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * auxtrace.c: AUX area trace support
3  * Copyright (c) 2013-2015, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  */
15 
16 #include <sys/types.h>
17 #include <sys/mman.h>
18 #include <stdbool.h>
19 
20 #include <linux/kernel.h>
21 #include <linux/perf_event.h>
22 #include <linux/types.h>
23 #include <linux/bitops.h>
24 #include <linux/log2.h>
25 #include <linux/string.h>
26 
27 #include <sys/param.h>
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <string.h>
31 #include <limits.h>
32 #include <errno.h>
33 #include <linux/list.h>
34 
35 #include "../perf.h"
36 #include "util.h"
37 #include "evlist.h"
38 #include "cpumap.h"
39 #include "thread_map.h"
40 #include "asm/bug.h"
41 #include "auxtrace.h"
42 
43 #include <linux/hash.h>
44 
45 #include "event.h"
46 #include "session.h"
47 #include "debug.h"
48 #include "parse-options.h"
49 
50 #include "intel-pt.h"
51 #include "intel-bts.h"
52 
auxtrace_mmap__mmap(struct auxtrace_mmap * mm,struct auxtrace_mmap_params * mp,void * userpg,int fd)53 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
54 			struct auxtrace_mmap_params *mp,
55 			void *userpg, int fd)
56 {
57 	struct perf_event_mmap_page *pc = userpg;
58 
59 	WARN_ONCE(mm->base, "Uninitialized auxtrace_mmap\n");
60 
61 	mm->userpg = userpg;
62 	mm->mask = mp->mask;
63 	mm->len = mp->len;
64 	mm->prev = 0;
65 	mm->idx = mp->idx;
66 	mm->tid = mp->tid;
67 	mm->cpu = mp->cpu;
68 
69 	if (!mp->len) {
70 		mm->base = NULL;
71 		return 0;
72 	}
73 
74 #if BITS_PER_LONG != 64 && !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
75 	pr_err("Cannot use AUX area tracing mmaps\n");
76 	return -1;
77 #endif
78 
79 	pc->aux_offset = mp->offset;
80 	pc->aux_size = mp->len;
81 
82 	mm->base = mmap(NULL, mp->len, mp->prot, MAP_SHARED, fd, mp->offset);
83 	if (mm->base == MAP_FAILED) {
84 		pr_debug2("failed to mmap AUX area\n");
85 		mm->base = NULL;
86 		return -1;
87 	}
88 
89 	return 0;
90 }
91 
auxtrace_mmap__munmap(struct auxtrace_mmap * mm)92 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm)
93 {
94 	if (mm->base) {
95 		munmap(mm->base, mm->len);
96 		mm->base = NULL;
97 	}
98 }
99 
auxtrace_mmap_params__init(struct auxtrace_mmap_params * mp,off_t auxtrace_offset,unsigned int auxtrace_pages,bool auxtrace_overwrite)100 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
101 				off_t auxtrace_offset,
102 				unsigned int auxtrace_pages,
103 				bool auxtrace_overwrite)
104 {
105 	if (auxtrace_pages) {
106 		mp->offset = auxtrace_offset;
107 		mp->len = auxtrace_pages * (size_t)page_size;
108 		mp->mask = is_power_of_2(mp->len) ? mp->len - 1 : 0;
109 		mp->prot = PROT_READ | (auxtrace_overwrite ? 0 : PROT_WRITE);
110 		pr_debug2("AUX area mmap length %zu\n", mp->len);
111 	} else {
112 		mp->len = 0;
113 	}
114 }
115 
auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params * mp,struct perf_evlist * evlist,int idx,bool per_cpu)116 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
117 				   struct perf_evlist *evlist, int idx,
118 				   bool per_cpu)
119 {
120 	mp->idx = idx;
121 
122 	if (per_cpu) {
123 		mp->cpu = evlist->cpus->map[idx];
124 		if (evlist->threads)
125 			mp->tid = thread_map__pid(evlist->threads, 0);
126 		else
127 			mp->tid = -1;
128 	} else {
129 		mp->cpu = -1;
130 		mp->tid = thread_map__pid(evlist->threads, idx);
131 	}
132 }
133 
134 #define AUXTRACE_INIT_NR_QUEUES	32
135 
auxtrace_alloc_queue_array(unsigned int nr_queues)136 static struct auxtrace_queue *auxtrace_alloc_queue_array(unsigned int nr_queues)
137 {
138 	struct auxtrace_queue *queue_array;
139 	unsigned int max_nr_queues, i;
140 
141 	max_nr_queues = UINT_MAX / sizeof(struct auxtrace_queue);
142 	if (nr_queues > max_nr_queues)
143 		return NULL;
144 
145 	queue_array = calloc(nr_queues, sizeof(struct auxtrace_queue));
146 	if (!queue_array)
147 		return NULL;
148 
149 	for (i = 0; i < nr_queues; i++) {
150 		INIT_LIST_HEAD(&queue_array[i].head);
151 		queue_array[i].priv = NULL;
152 	}
153 
154 	return queue_array;
155 }
156 
auxtrace_queues__init(struct auxtrace_queues * queues)157 int auxtrace_queues__init(struct auxtrace_queues *queues)
158 {
159 	queues->nr_queues = AUXTRACE_INIT_NR_QUEUES;
160 	queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues);
161 	if (!queues->queue_array)
162 		return -ENOMEM;
163 	return 0;
164 }
165 
auxtrace_queues__grow(struct auxtrace_queues * queues,unsigned int new_nr_queues)166 static int auxtrace_queues__grow(struct auxtrace_queues *queues,
167 				 unsigned int new_nr_queues)
168 {
169 	unsigned int nr_queues = queues->nr_queues;
170 	struct auxtrace_queue *queue_array;
171 	unsigned int i;
172 
173 	if (!nr_queues)
174 		nr_queues = AUXTRACE_INIT_NR_QUEUES;
175 
176 	while (nr_queues && nr_queues < new_nr_queues)
177 		nr_queues <<= 1;
178 
179 	if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues)
180 		return -EINVAL;
181 
182 	queue_array = auxtrace_alloc_queue_array(nr_queues);
183 	if (!queue_array)
184 		return -ENOMEM;
185 
186 	for (i = 0; i < queues->nr_queues; i++) {
187 		list_splice_tail(&queues->queue_array[i].head,
188 				 &queue_array[i].head);
189 		queue_array[i].tid = queues->queue_array[i].tid;
190 		queue_array[i].cpu = queues->queue_array[i].cpu;
191 		queue_array[i].set = queues->queue_array[i].set;
192 		queue_array[i].priv = queues->queue_array[i].priv;
193 	}
194 
195 	queues->nr_queues = nr_queues;
196 	queues->queue_array = queue_array;
197 
198 	return 0;
199 }
200 
auxtrace_copy_data(u64 size,struct perf_session * session)201 static void *auxtrace_copy_data(u64 size, struct perf_session *session)
202 {
203 	int fd = perf_data_file__fd(session->file);
204 	void *p;
205 	ssize_t ret;
206 
207 	if (size > SSIZE_MAX)
208 		return NULL;
209 
210 	p = malloc(size);
211 	if (!p)
212 		return NULL;
213 
214 	ret = readn(fd, p, size);
215 	if (ret != (ssize_t)size) {
216 		free(p);
217 		return NULL;
218 	}
219 
220 	return p;
221 }
222 
auxtrace_queues__add_buffer(struct auxtrace_queues * queues,unsigned int idx,struct auxtrace_buffer * buffer)223 static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues,
224 				       unsigned int idx,
225 				       struct auxtrace_buffer *buffer)
226 {
227 	struct auxtrace_queue *queue;
228 	int err;
229 
230 	if (idx >= queues->nr_queues) {
231 		err = auxtrace_queues__grow(queues, idx + 1);
232 		if (err)
233 			return err;
234 	}
235 
236 	queue = &queues->queue_array[idx];
237 
238 	if (!queue->set) {
239 		queue->set = true;
240 		queue->tid = buffer->tid;
241 		queue->cpu = buffer->cpu;
242 	}
243 
244 	buffer->buffer_nr = queues->next_buffer_nr++;
245 
246 	list_add_tail(&buffer->list, &queue->head);
247 
248 	queues->new_data = true;
249 	queues->populated = true;
250 
251 	return 0;
252 }
253 
254 /* Limit buffers to 32MiB on 32-bit */
255 #define BUFFER_LIMIT_FOR_32_BIT (32 * 1024 * 1024)
256 
auxtrace_queues__split_buffer(struct auxtrace_queues * queues,unsigned int idx,struct auxtrace_buffer * buffer)257 static int auxtrace_queues__split_buffer(struct auxtrace_queues *queues,
258 					 unsigned int idx,
259 					 struct auxtrace_buffer *buffer)
260 {
261 	u64 sz = buffer->size;
262 	bool consecutive = false;
263 	struct auxtrace_buffer *b;
264 	int err;
265 
266 	while (sz > BUFFER_LIMIT_FOR_32_BIT) {
267 		b = memdup(buffer, sizeof(struct auxtrace_buffer));
268 		if (!b)
269 			return -ENOMEM;
270 		b->size = BUFFER_LIMIT_FOR_32_BIT;
271 		b->consecutive = consecutive;
272 		err = auxtrace_queues__add_buffer(queues, idx, b);
273 		if (err) {
274 			auxtrace_buffer__free(b);
275 			return err;
276 		}
277 		buffer->data_offset += BUFFER_LIMIT_FOR_32_BIT;
278 		sz -= BUFFER_LIMIT_FOR_32_BIT;
279 		consecutive = true;
280 	}
281 
282 	buffer->size = sz;
283 	buffer->consecutive = consecutive;
284 
285 	return 0;
286 }
287 
auxtrace_queues__add_event_buffer(struct auxtrace_queues * queues,struct perf_session * session,unsigned int idx,struct auxtrace_buffer * buffer)288 static int auxtrace_queues__add_event_buffer(struct auxtrace_queues *queues,
289 					     struct perf_session *session,
290 					     unsigned int idx,
291 					     struct auxtrace_buffer *buffer)
292 {
293 	if (session->one_mmap) {
294 		buffer->data = buffer->data_offset - session->one_mmap_offset +
295 			       session->one_mmap_addr;
296 	} else if (perf_data_file__is_pipe(session->file)) {
297 		buffer->data = auxtrace_copy_data(buffer->size, session);
298 		if (!buffer->data)
299 			return -ENOMEM;
300 		buffer->data_needs_freeing = true;
301 	} else if (BITS_PER_LONG == 32 &&
302 		   buffer->size > BUFFER_LIMIT_FOR_32_BIT) {
303 		int err;
304 
305 		err = auxtrace_queues__split_buffer(queues, idx, buffer);
306 		if (err)
307 			return err;
308 	}
309 
310 	return auxtrace_queues__add_buffer(queues, idx, buffer);
311 }
312 
auxtrace_queues__add_event(struct auxtrace_queues * queues,struct perf_session * session,union perf_event * event,off_t data_offset,struct auxtrace_buffer ** buffer_ptr)313 int auxtrace_queues__add_event(struct auxtrace_queues *queues,
314 			       struct perf_session *session,
315 			       union perf_event *event, off_t data_offset,
316 			       struct auxtrace_buffer **buffer_ptr)
317 {
318 	struct auxtrace_buffer *buffer;
319 	unsigned int idx;
320 	int err;
321 
322 	buffer = zalloc(sizeof(struct auxtrace_buffer));
323 	if (!buffer)
324 		return -ENOMEM;
325 
326 	buffer->pid = -1;
327 	buffer->tid = event->auxtrace.tid;
328 	buffer->cpu = event->auxtrace.cpu;
329 	buffer->data_offset = data_offset;
330 	buffer->offset = event->auxtrace.offset;
331 	buffer->reference = event->auxtrace.reference;
332 	buffer->size = event->auxtrace.size;
333 	idx = event->auxtrace.idx;
334 
335 	err = auxtrace_queues__add_event_buffer(queues, session, idx, buffer);
336 	if (err)
337 		goto out_err;
338 
339 	if (buffer_ptr)
340 		*buffer_ptr = buffer;
341 
342 	return 0;
343 
344 out_err:
345 	auxtrace_buffer__free(buffer);
346 	return err;
347 }
348 
auxtrace_queues__add_indexed_event(struct auxtrace_queues * queues,struct perf_session * session,off_t file_offset,size_t sz)349 static int auxtrace_queues__add_indexed_event(struct auxtrace_queues *queues,
350 					      struct perf_session *session,
351 					      off_t file_offset, size_t sz)
352 {
353 	union perf_event *event;
354 	int err;
355 	char buf[PERF_SAMPLE_MAX_SIZE];
356 
357 	err = perf_session__peek_event(session, file_offset, buf,
358 				       PERF_SAMPLE_MAX_SIZE, &event, NULL);
359 	if (err)
360 		return err;
361 
362 	if (event->header.type == PERF_RECORD_AUXTRACE) {
363 		if (event->header.size < sizeof(struct auxtrace_event) ||
364 		    event->header.size != sz) {
365 			err = -EINVAL;
366 			goto out;
367 		}
368 		file_offset += event->header.size;
369 		err = auxtrace_queues__add_event(queues, session, event,
370 						 file_offset, NULL);
371 	}
372 out:
373 	return err;
374 }
375 
auxtrace_queues__free(struct auxtrace_queues * queues)376 void auxtrace_queues__free(struct auxtrace_queues *queues)
377 {
378 	unsigned int i;
379 
380 	for (i = 0; i < queues->nr_queues; i++) {
381 		while (!list_empty(&queues->queue_array[i].head)) {
382 			struct auxtrace_buffer *buffer;
383 
384 			buffer = list_entry(queues->queue_array[i].head.next,
385 					    struct auxtrace_buffer, list);
386 			list_del(&buffer->list);
387 			auxtrace_buffer__free(buffer);
388 		}
389 	}
390 
391 	zfree(&queues->queue_array);
392 	queues->nr_queues = 0;
393 }
394 
auxtrace_heapify(struct auxtrace_heap_item * heap_array,unsigned int pos,unsigned int queue_nr,u64 ordinal)395 static void auxtrace_heapify(struct auxtrace_heap_item *heap_array,
396 			     unsigned int pos, unsigned int queue_nr,
397 			     u64 ordinal)
398 {
399 	unsigned int parent;
400 
401 	while (pos) {
402 		parent = (pos - 1) >> 1;
403 		if (heap_array[parent].ordinal <= ordinal)
404 			break;
405 		heap_array[pos] = heap_array[parent];
406 		pos = parent;
407 	}
408 	heap_array[pos].queue_nr = queue_nr;
409 	heap_array[pos].ordinal = ordinal;
410 }
411 
auxtrace_heap__add(struct auxtrace_heap * heap,unsigned int queue_nr,u64 ordinal)412 int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
413 		       u64 ordinal)
414 {
415 	struct auxtrace_heap_item *heap_array;
416 
417 	if (queue_nr >= heap->heap_sz) {
418 		unsigned int heap_sz = AUXTRACE_INIT_NR_QUEUES;
419 
420 		while (heap_sz <= queue_nr)
421 			heap_sz <<= 1;
422 		heap_array = realloc(heap->heap_array,
423 				     heap_sz * sizeof(struct auxtrace_heap_item));
424 		if (!heap_array)
425 			return -ENOMEM;
426 		heap->heap_array = heap_array;
427 		heap->heap_sz = heap_sz;
428 	}
429 
430 	auxtrace_heapify(heap->heap_array, heap->heap_cnt++, queue_nr, ordinal);
431 
432 	return 0;
433 }
434 
auxtrace_heap__free(struct auxtrace_heap * heap)435 void auxtrace_heap__free(struct auxtrace_heap *heap)
436 {
437 	zfree(&heap->heap_array);
438 	heap->heap_cnt = 0;
439 	heap->heap_sz = 0;
440 }
441 
auxtrace_heap__pop(struct auxtrace_heap * heap)442 void auxtrace_heap__pop(struct auxtrace_heap *heap)
443 {
444 	unsigned int pos, last, heap_cnt = heap->heap_cnt;
445 	struct auxtrace_heap_item *heap_array;
446 
447 	if (!heap_cnt)
448 		return;
449 
450 	heap->heap_cnt -= 1;
451 
452 	heap_array = heap->heap_array;
453 
454 	pos = 0;
455 	while (1) {
456 		unsigned int left, right;
457 
458 		left = (pos << 1) + 1;
459 		if (left >= heap_cnt)
460 			break;
461 		right = left + 1;
462 		if (right >= heap_cnt) {
463 			heap_array[pos] = heap_array[left];
464 			return;
465 		}
466 		if (heap_array[left].ordinal < heap_array[right].ordinal) {
467 			heap_array[pos] = heap_array[left];
468 			pos = left;
469 		} else {
470 			heap_array[pos] = heap_array[right];
471 			pos = right;
472 		}
473 	}
474 
475 	last = heap_cnt - 1;
476 	auxtrace_heapify(heap_array, pos, heap_array[last].queue_nr,
477 			 heap_array[last].ordinal);
478 }
479 
auxtrace_record__info_priv_size(struct auxtrace_record * itr)480 size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr)
481 {
482 	if (itr)
483 		return itr->info_priv_size(itr);
484 	return 0;
485 }
486 
auxtrace_not_supported(void)487 static int auxtrace_not_supported(void)
488 {
489 	pr_err("AUX area tracing is not supported on this architecture\n");
490 	return -EINVAL;
491 }
492 
auxtrace_record__info_fill(struct auxtrace_record * itr,struct perf_session * session,struct auxtrace_info_event * auxtrace_info,size_t priv_size)493 int auxtrace_record__info_fill(struct auxtrace_record *itr,
494 			       struct perf_session *session,
495 			       struct auxtrace_info_event *auxtrace_info,
496 			       size_t priv_size)
497 {
498 	if (itr)
499 		return itr->info_fill(itr, session, auxtrace_info, priv_size);
500 	return auxtrace_not_supported();
501 }
502 
auxtrace_record__free(struct auxtrace_record * itr)503 void auxtrace_record__free(struct auxtrace_record *itr)
504 {
505 	if (itr)
506 		itr->free(itr);
507 }
508 
auxtrace_record__snapshot_start(struct auxtrace_record * itr)509 int auxtrace_record__snapshot_start(struct auxtrace_record *itr)
510 {
511 	if (itr && itr->snapshot_start)
512 		return itr->snapshot_start(itr);
513 	return 0;
514 }
515 
auxtrace_record__snapshot_finish(struct auxtrace_record * itr)516 int auxtrace_record__snapshot_finish(struct auxtrace_record *itr)
517 {
518 	if (itr && itr->snapshot_finish)
519 		return itr->snapshot_finish(itr);
520 	return 0;
521 }
522 
auxtrace_record__find_snapshot(struct auxtrace_record * itr,int idx,struct auxtrace_mmap * mm,unsigned char * data,u64 * head,u64 * old)523 int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
524 				   struct auxtrace_mmap *mm,
525 				   unsigned char *data, u64 *head, u64 *old)
526 {
527 	if (itr && itr->find_snapshot)
528 		return itr->find_snapshot(itr, idx, mm, data, head, old);
529 	return 0;
530 }
531 
auxtrace_record__options(struct auxtrace_record * itr,struct perf_evlist * evlist,struct record_opts * opts)532 int auxtrace_record__options(struct auxtrace_record *itr,
533 			     struct perf_evlist *evlist,
534 			     struct record_opts *opts)
535 {
536 	if (itr)
537 		return itr->recording_options(itr, evlist, opts);
538 	return 0;
539 }
540 
auxtrace_record__reference(struct auxtrace_record * itr)541 u64 auxtrace_record__reference(struct auxtrace_record *itr)
542 {
543 	if (itr)
544 		return itr->reference(itr);
545 	return 0;
546 }
547 
auxtrace_parse_snapshot_options(struct auxtrace_record * itr,struct record_opts * opts,const char * str)548 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
549 				    struct record_opts *opts, const char *str)
550 {
551 	if (!str)
552 		return 0;
553 
554 	if (itr)
555 		return itr->parse_snapshot_options(itr, opts, str);
556 
557 	pr_err("No AUX area tracing to snapshot\n");
558 	return -EINVAL;
559 }
560 
561 struct auxtrace_record *__weak
auxtrace_record__init(struct perf_evlist * evlist __maybe_unused,int * err)562 auxtrace_record__init(struct perf_evlist *evlist __maybe_unused, int *err)
563 {
564 	*err = 0;
565 	return NULL;
566 }
567 
auxtrace_index__alloc(struct list_head * head)568 static int auxtrace_index__alloc(struct list_head *head)
569 {
570 	struct auxtrace_index *auxtrace_index;
571 
572 	auxtrace_index = malloc(sizeof(struct auxtrace_index));
573 	if (!auxtrace_index)
574 		return -ENOMEM;
575 
576 	auxtrace_index->nr = 0;
577 	INIT_LIST_HEAD(&auxtrace_index->list);
578 
579 	list_add_tail(&auxtrace_index->list, head);
580 
581 	return 0;
582 }
583 
auxtrace_index__free(struct list_head * head)584 void auxtrace_index__free(struct list_head *head)
585 {
586 	struct auxtrace_index *auxtrace_index, *n;
587 
588 	list_for_each_entry_safe(auxtrace_index, n, head, list) {
589 		list_del(&auxtrace_index->list);
590 		free(auxtrace_index);
591 	}
592 }
593 
auxtrace_index__last(struct list_head * head)594 static struct auxtrace_index *auxtrace_index__last(struct list_head *head)
595 {
596 	struct auxtrace_index *auxtrace_index;
597 	int err;
598 
599 	if (list_empty(head)) {
600 		err = auxtrace_index__alloc(head);
601 		if (err)
602 			return NULL;
603 	}
604 
605 	auxtrace_index = list_entry(head->prev, struct auxtrace_index, list);
606 
607 	if (auxtrace_index->nr >= PERF_AUXTRACE_INDEX_ENTRY_COUNT) {
608 		err = auxtrace_index__alloc(head);
609 		if (err)
610 			return NULL;
611 		auxtrace_index = list_entry(head->prev, struct auxtrace_index,
612 					    list);
613 	}
614 
615 	return auxtrace_index;
616 }
617 
auxtrace_index__auxtrace_event(struct list_head * head,union perf_event * event,off_t file_offset)618 int auxtrace_index__auxtrace_event(struct list_head *head,
619 				   union perf_event *event, off_t file_offset)
620 {
621 	struct auxtrace_index *auxtrace_index;
622 	size_t nr;
623 
624 	auxtrace_index = auxtrace_index__last(head);
625 	if (!auxtrace_index)
626 		return -ENOMEM;
627 
628 	nr = auxtrace_index->nr;
629 	auxtrace_index->entries[nr].file_offset = file_offset;
630 	auxtrace_index->entries[nr].sz = event->header.size;
631 	auxtrace_index->nr += 1;
632 
633 	return 0;
634 }
635 
auxtrace_index__do_write(int fd,struct auxtrace_index * auxtrace_index)636 static int auxtrace_index__do_write(int fd,
637 				    struct auxtrace_index *auxtrace_index)
638 {
639 	struct auxtrace_index_entry ent;
640 	size_t i;
641 
642 	for (i = 0; i < auxtrace_index->nr; i++) {
643 		ent.file_offset = auxtrace_index->entries[i].file_offset;
644 		ent.sz = auxtrace_index->entries[i].sz;
645 		if (writen(fd, &ent, sizeof(ent)) != sizeof(ent))
646 			return -errno;
647 	}
648 	return 0;
649 }
650 
auxtrace_index__write(int fd,struct list_head * head)651 int auxtrace_index__write(int fd, struct list_head *head)
652 {
653 	struct auxtrace_index *auxtrace_index;
654 	u64 total = 0;
655 	int err;
656 
657 	list_for_each_entry(auxtrace_index, head, list)
658 		total += auxtrace_index->nr;
659 
660 	if (writen(fd, &total, sizeof(total)) != sizeof(total))
661 		return -errno;
662 
663 	list_for_each_entry(auxtrace_index, head, list) {
664 		err = auxtrace_index__do_write(fd, auxtrace_index);
665 		if (err)
666 			return err;
667 	}
668 
669 	return 0;
670 }
671 
auxtrace_index__process_entry(int fd,struct list_head * head,bool needs_swap)672 static int auxtrace_index__process_entry(int fd, struct list_head *head,
673 					 bool needs_swap)
674 {
675 	struct auxtrace_index *auxtrace_index;
676 	struct auxtrace_index_entry ent;
677 	size_t nr;
678 
679 	if (readn(fd, &ent, sizeof(ent)) != sizeof(ent))
680 		return -1;
681 
682 	auxtrace_index = auxtrace_index__last(head);
683 	if (!auxtrace_index)
684 		return -1;
685 
686 	nr = auxtrace_index->nr;
687 	if (needs_swap) {
688 		auxtrace_index->entries[nr].file_offset =
689 						bswap_64(ent.file_offset);
690 		auxtrace_index->entries[nr].sz = bswap_64(ent.sz);
691 	} else {
692 		auxtrace_index->entries[nr].file_offset = ent.file_offset;
693 		auxtrace_index->entries[nr].sz = ent.sz;
694 	}
695 
696 	auxtrace_index->nr = nr + 1;
697 
698 	return 0;
699 }
700 
auxtrace_index__process(int fd,u64 size,struct perf_session * session,bool needs_swap)701 int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
702 			    bool needs_swap)
703 {
704 	struct list_head *head = &session->auxtrace_index;
705 	u64 nr;
706 
707 	if (readn(fd, &nr, sizeof(u64)) != sizeof(u64))
708 		return -1;
709 
710 	if (needs_swap)
711 		nr = bswap_64(nr);
712 
713 	if (sizeof(u64) + nr * sizeof(struct auxtrace_index_entry) > size)
714 		return -1;
715 
716 	while (nr--) {
717 		int err;
718 
719 		err = auxtrace_index__process_entry(fd, head, needs_swap);
720 		if (err)
721 			return -1;
722 	}
723 
724 	return 0;
725 }
726 
auxtrace_queues__process_index_entry(struct auxtrace_queues * queues,struct perf_session * session,struct auxtrace_index_entry * ent)727 static int auxtrace_queues__process_index_entry(struct auxtrace_queues *queues,
728 						struct perf_session *session,
729 						struct auxtrace_index_entry *ent)
730 {
731 	return auxtrace_queues__add_indexed_event(queues, session,
732 						  ent->file_offset, ent->sz);
733 }
734 
auxtrace_queues__process_index(struct auxtrace_queues * queues,struct perf_session * session)735 int auxtrace_queues__process_index(struct auxtrace_queues *queues,
736 				   struct perf_session *session)
737 {
738 	struct auxtrace_index *auxtrace_index;
739 	struct auxtrace_index_entry *ent;
740 	size_t i;
741 	int err;
742 
743 	list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) {
744 		for (i = 0; i < auxtrace_index->nr; i++) {
745 			ent = &auxtrace_index->entries[i];
746 			err = auxtrace_queues__process_index_entry(queues,
747 								   session,
748 								   ent);
749 			if (err)
750 				return err;
751 		}
752 	}
753 	return 0;
754 }
755 
auxtrace_buffer__next(struct auxtrace_queue * queue,struct auxtrace_buffer * buffer)756 struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
757 					      struct auxtrace_buffer *buffer)
758 {
759 	if (buffer) {
760 		if (list_is_last(&buffer->list, &queue->head))
761 			return NULL;
762 		return list_entry(buffer->list.next, struct auxtrace_buffer,
763 				  list);
764 	} else {
765 		if (list_empty(&queue->head))
766 			return NULL;
767 		return list_entry(queue->head.next, struct auxtrace_buffer,
768 				  list);
769 	}
770 }
771 
auxtrace_buffer__get_data(struct auxtrace_buffer * buffer,int fd)772 void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd)
773 {
774 	size_t adj = buffer->data_offset & (page_size - 1);
775 	size_t size = buffer->size + adj;
776 	off_t file_offset = buffer->data_offset - adj;
777 	void *addr;
778 
779 	if (buffer->data)
780 		return buffer->data;
781 
782 	addr = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, file_offset);
783 	if (addr == MAP_FAILED)
784 		return NULL;
785 
786 	buffer->mmap_addr = addr;
787 	buffer->mmap_size = size;
788 
789 	buffer->data = addr + adj;
790 
791 	return buffer->data;
792 }
793 
auxtrace_buffer__put_data(struct auxtrace_buffer * buffer)794 void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer)
795 {
796 	if (!buffer->data || !buffer->mmap_addr)
797 		return;
798 	munmap(buffer->mmap_addr, buffer->mmap_size);
799 	buffer->mmap_addr = NULL;
800 	buffer->mmap_size = 0;
801 	buffer->data = NULL;
802 	buffer->use_data = NULL;
803 }
804 
auxtrace_buffer__drop_data(struct auxtrace_buffer * buffer)805 void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer)
806 {
807 	auxtrace_buffer__put_data(buffer);
808 	if (buffer->data_needs_freeing) {
809 		buffer->data_needs_freeing = false;
810 		zfree(&buffer->data);
811 		buffer->use_data = NULL;
812 		buffer->size = 0;
813 	}
814 }
815 
auxtrace_buffer__free(struct auxtrace_buffer * buffer)816 void auxtrace_buffer__free(struct auxtrace_buffer *buffer)
817 {
818 	auxtrace_buffer__drop_data(buffer);
819 	free(buffer);
820 }
821 
auxtrace_synth_error(struct auxtrace_error_event * auxtrace_error,int type,int code,int cpu,pid_t pid,pid_t tid,u64 ip,const char * msg)822 void auxtrace_synth_error(struct auxtrace_error_event *auxtrace_error, int type,
823 			  int code, int cpu, pid_t pid, pid_t tid, u64 ip,
824 			  const char *msg)
825 {
826 	size_t size;
827 
828 	memset(auxtrace_error, 0, sizeof(struct auxtrace_error_event));
829 
830 	auxtrace_error->header.type = PERF_RECORD_AUXTRACE_ERROR;
831 	auxtrace_error->type = type;
832 	auxtrace_error->code = code;
833 	auxtrace_error->cpu = cpu;
834 	auxtrace_error->pid = pid;
835 	auxtrace_error->tid = tid;
836 	auxtrace_error->ip = ip;
837 	strlcpy(auxtrace_error->msg, msg, MAX_AUXTRACE_ERROR_MSG);
838 
839 	size = (void *)auxtrace_error->msg - (void *)auxtrace_error +
840 	       strlen(auxtrace_error->msg) + 1;
841 	auxtrace_error->header.size = PERF_ALIGN(size, sizeof(u64));
842 }
843 
perf_event__synthesize_auxtrace_info(struct auxtrace_record * itr,struct perf_tool * tool,struct perf_session * session,perf_event__handler_t process)844 int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
845 					 struct perf_tool *tool,
846 					 struct perf_session *session,
847 					 perf_event__handler_t process)
848 {
849 	union perf_event *ev;
850 	size_t priv_size;
851 	int err;
852 
853 	pr_debug2("Synthesizing auxtrace information\n");
854 	priv_size = auxtrace_record__info_priv_size(itr);
855 	ev = zalloc(sizeof(struct auxtrace_info_event) + priv_size);
856 	if (!ev)
857 		return -ENOMEM;
858 
859 	ev->auxtrace_info.header.type = PERF_RECORD_AUXTRACE_INFO;
860 	ev->auxtrace_info.header.size = sizeof(struct auxtrace_info_event) +
861 					priv_size;
862 	err = auxtrace_record__info_fill(itr, session, &ev->auxtrace_info,
863 					 priv_size);
864 	if (err)
865 		goto out_free;
866 
867 	err = process(tool, ev, NULL, NULL);
868 out_free:
869 	free(ev);
870 	return err;
871 }
872 
auxtrace__dont_decode(struct perf_session * session)873 static bool auxtrace__dont_decode(struct perf_session *session)
874 {
875 	return !session->itrace_synth_opts ||
876 	       session->itrace_synth_opts->dont_decode;
877 }
878 
perf_event__process_auxtrace_info(struct perf_tool * tool __maybe_unused,union perf_event * event,struct perf_session * session)879 int perf_event__process_auxtrace_info(struct perf_tool *tool __maybe_unused,
880 				      union perf_event *event,
881 				      struct perf_session *session)
882 {
883 	enum auxtrace_type type = event->auxtrace_info.type;
884 
885 	if (dump_trace)
886 		fprintf(stdout, " type: %u\n", type);
887 
888 	switch (type) {
889 	case PERF_AUXTRACE_INTEL_PT:
890 		return intel_pt_process_auxtrace_info(event, session);
891 	case PERF_AUXTRACE_INTEL_BTS:
892 		return intel_bts_process_auxtrace_info(event, session);
893 	case PERF_AUXTRACE_UNKNOWN:
894 	default:
895 		return -EINVAL;
896 	}
897 }
898 
perf_event__process_auxtrace(struct perf_tool * tool,union perf_event * event,struct perf_session * session)899 s64 perf_event__process_auxtrace(struct perf_tool *tool,
900 				 union perf_event *event,
901 				 struct perf_session *session)
902 {
903 	s64 err;
904 
905 	if (dump_trace)
906 		fprintf(stdout, " size: %#"PRIx64"  offset: %#"PRIx64"  ref: %#"PRIx64"  idx: %u  tid: %d  cpu: %d\n",
907 			event->auxtrace.size, event->auxtrace.offset,
908 			event->auxtrace.reference, event->auxtrace.idx,
909 			event->auxtrace.tid, event->auxtrace.cpu);
910 
911 	if (auxtrace__dont_decode(session))
912 		return event->auxtrace.size;
913 
914 	if (!session->auxtrace || event->header.type != PERF_RECORD_AUXTRACE)
915 		return -EINVAL;
916 
917 	err = session->auxtrace->process_auxtrace_event(session, event, tool);
918 	if (err < 0)
919 		return err;
920 
921 	return event->auxtrace.size;
922 }
923 
924 #define PERF_ITRACE_DEFAULT_PERIOD_TYPE		PERF_ITRACE_PERIOD_NANOSECS
925 #define PERF_ITRACE_DEFAULT_PERIOD		100000
926 #define PERF_ITRACE_DEFAULT_CALLCHAIN_SZ	16
927 #define PERF_ITRACE_MAX_CALLCHAIN_SZ		1024
928 #define PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ	64
929 #define PERF_ITRACE_MAX_LAST_BRANCH_SZ		1024
930 
itrace_synth_opts__set_default(struct itrace_synth_opts * synth_opts)931 void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts)
932 {
933 	synth_opts->instructions = true;
934 	synth_opts->branches = true;
935 	synth_opts->transactions = true;
936 	synth_opts->errors = true;
937 	synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE;
938 	synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
939 	synth_opts->callchain_sz = PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
940 	synth_opts->last_branch_sz = PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
941 }
942 
943 /*
944  * Please check tools/perf/Documentation/perf-script.txt for information
945  * about the options parsed here, which is introduced after this cset,
946  * when support in 'perf script' for these options is introduced.
947  */
itrace_parse_synth_opts(const struct option * opt,const char * str,int unset)948 int itrace_parse_synth_opts(const struct option *opt, const char *str,
949 			    int unset)
950 {
951 	struct itrace_synth_opts *synth_opts = opt->value;
952 	const char *p;
953 	char *endptr;
954 	bool period_type_set = false;
955 	bool period_set = false;
956 
957 	synth_opts->set = true;
958 
959 	if (unset) {
960 		synth_opts->dont_decode = true;
961 		return 0;
962 	}
963 
964 	if (!str) {
965 		itrace_synth_opts__set_default(synth_opts);
966 		return 0;
967 	}
968 
969 	for (p = str; *p;) {
970 		switch (*p++) {
971 		case 'i':
972 			synth_opts->instructions = true;
973 			while (*p == ' ' || *p == ',')
974 				p += 1;
975 			if (isdigit(*p)) {
976 				synth_opts->period = strtoull(p, &endptr, 10);
977 				period_set = true;
978 				p = endptr;
979 				while (*p == ' ' || *p == ',')
980 					p += 1;
981 				switch (*p++) {
982 				case 'i':
983 					synth_opts->period_type =
984 						PERF_ITRACE_PERIOD_INSTRUCTIONS;
985 					period_type_set = true;
986 					break;
987 				case 't':
988 					synth_opts->period_type =
989 						PERF_ITRACE_PERIOD_TICKS;
990 					period_type_set = true;
991 					break;
992 				case 'm':
993 					synth_opts->period *= 1000;
994 					/* Fall through */
995 				case 'u':
996 					synth_opts->period *= 1000;
997 					/* Fall through */
998 				case 'n':
999 					if (*p++ != 's')
1000 						goto out_err;
1001 					synth_opts->period_type =
1002 						PERF_ITRACE_PERIOD_NANOSECS;
1003 					period_type_set = true;
1004 					break;
1005 				case '\0':
1006 					goto out;
1007 				default:
1008 					goto out_err;
1009 				}
1010 			}
1011 			break;
1012 		case 'b':
1013 			synth_opts->branches = true;
1014 			break;
1015 		case 'x':
1016 			synth_opts->transactions = true;
1017 			break;
1018 		case 'e':
1019 			synth_opts->errors = true;
1020 			break;
1021 		case 'd':
1022 			synth_opts->log = true;
1023 			break;
1024 		case 'c':
1025 			synth_opts->branches = true;
1026 			synth_opts->calls = true;
1027 			break;
1028 		case 'r':
1029 			synth_opts->branches = true;
1030 			synth_opts->returns = true;
1031 			break;
1032 		case 'g':
1033 			synth_opts->callchain = true;
1034 			synth_opts->callchain_sz =
1035 					PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
1036 			while (*p == ' ' || *p == ',')
1037 				p += 1;
1038 			if (isdigit(*p)) {
1039 				unsigned int val;
1040 
1041 				val = strtoul(p, &endptr, 10);
1042 				p = endptr;
1043 				if (!val || val > PERF_ITRACE_MAX_CALLCHAIN_SZ)
1044 					goto out_err;
1045 				synth_opts->callchain_sz = val;
1046 			}
1047 			break;
1048 		case 'l':
1049 			synth_opts->last_branch = true;
1050 			synth_opts->last_branch_sz =
1051 					PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
1052 			while (*p == ' ' || *p == ',')
1053 				p += 1;
1054 			if (isdigit(*p)) {
1055 				unsigned int val;
1056 
1057 				val = strtoul(p, &endptr, 10);
1058 				p = endptr;
1059 				if (!val ||
1060 				    val > PERF_ITRACE_MAX_LAST_BRANCH_SZ)
1061 					goto out_err;
1062 				synth_opts->last_branch_sz = val;
1063 			}
1064 			break;
1065 		case ' ':
1066 		case ',':
1067 			break;
1068 		default:
1069 			goto out_err;
1070 		}
1071 	}
1072 out:
1073 	if (synth_opts->instructions) {
1074 		if (!period_type_set)
1075 			synth_opts->period_type =
1076 					PERF_ITRACE_DEFAULT_PERIOD_TYPE;
1077 		if (!period_set)
1078 			synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
1079 	}
1080 
1081 	return 0;
1082 
1083 out_err:
1084 	pr_err("Bad Instruction Tracing options '%s'\n", str);
1085 	return -EINVAL;
1086 }
1087 
1088 static const char * const auxtrace_error_type_name[] = {
1089 	[PERF_AUXTRACE_ERROR_ITRACE] = "instruction trace",
1090 };
1091 
auxtrace_error_name(int type)1092 static const char *auxtrace_error_name(int type)
1093 {
1094 	const char *error_type_name = NULL;
1095 
1096 	if (type < PERF_AUXTRACE_ERROR_MAX)
1097 		error_type_name = auxtrace_error_type_name[type];
1098 	if (!error_type_name)
1099 		error_type_name = "unknown AUX";
1100 	return error_type_name;
1101 }
1102 
perf_event__fprintf_auxtrace_error(union perf_event * event,FILE * fp)1103 size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp)
1104 {
1105 	struct auxtrace_error_event *e = &event->auxtrace_error;
1106 	int ret;
1107 
1108 	ret = fprintf(fp, " %s error type %u",
1109 		      auxtrace_error_name(e->type), e->type);
1110 	ret += fprintf(fp, " cpu %d pid %d tid %d ip %#"PRIx64" code %u: %s\n",
1111 		       e->cpu, e->pid, e->tid, e->ip, e->code, e->msg);
1112 	return ret;
1113 }
1114 
perf_session__auxtrace_error_inc(struct perf_session * session,union perf_event * event)1115 void perf_session__auxtrace_error_inc(struct perf_session *session,
1116 				      union perf_event *event)
1117 {
1118 	struct auxtrace_error_event *e = &event->auxtrace_error;
1119 
1120 	if (e->type < PERF_AUXTRACE_ERROR_MAX)
1121 		session->evlist->stats.nr_auxtrace_errors[e->type] += 1;
1122 }
1123 
events_stats__auxtrace_error_warn(const struct events_stats * stats)1124 void events_stats__auxtrace_error_warn(const struct events_stats *stats)
1125 {
1126 	int i;
1127 
1128 	for (i = 0; i < PERF_AUXTRACE_ERROR_MAX; i++) {
1129 		if (!stats->nr_auxtrace_errors[i])
1130 			continue;
1131 		ui__warning("%u %s errors\n",
1132 			    stats->nr_auxtrace_errors[i],
1133 			    auxtrace_error_name(i));
1134 	}
1135 }
1136 
perf_event__process_auxtrace_error(struct perf_tool * tool __maybe_unused,union perf_event * event,struct perf_session * session)1137 int perf_event__process_auxtrace_error(struct perf_tool *tool __maybe_unused,
1138 				       union perf_event *event,
1139 				       struct perf_session *session)
1140 {
1141 	if (auxtrace__dont_decode(session))
1142 		return 0;
1143 
1144 	perf_event__fprintf_auxtrace_error(event, stdout);
1145 	return 0;
1146 }
1147 
__auxtrace_mmap__read(struct auxtrace_mmap * mm,struct auxtrace_record * itr,struct perf_tool * tool,process_auxtrace_t fn,bool snapshot,size_t snapshot_size)1148 static int __auxtrace_mmap__read(struct auxtrace_mmap *mm,
1149 				 struct auxtrace_record *itr,
1150 				 struct perf_tool *tool, process_auxtrace_t fn,
1151 				 bool snapshot, size_t snapshot_size)
1152 {
1153 	u64 head, old = mm->prev, offset, ref;
1154 	unsigned char *data = mm->base;
1155 	size_t size, head_off, old_off, len1, len2, padding;
1156 	union perf_event ev;
1157 	void *data1, *data2;
1158 
1159 	if (snapshot) {
1160 		head = auxtrace_mmap__read_snapshot_head(mm);
1161 		if (auxtrace_record__find_snapshot(itr, mm->idx, mm, data,
1162 						   &head, &old))
1163 			return -1;
1164 	} else {
1165 		head = auxtrace_mmap__read_head(mm);
1166 	}
1167 
1168 	if (old == head)
1169 		return 0;
1170 
1171 	pr_debug3("auxtrace idx %d old %#"PRIx64" head %#"PRIx64" diff %#"PRIx64"\n",
1172 		  mm->idx, old, head, head - old);
1173 
1174 	if (mm->mask) {
1175 		head_off = head & mm->mask;
1176 		old_off = old & mm->mask;
1177 	} else {
1178 		head_off = head % mm->len;
1179 		old_off = old % mm->len;
1180 	}
1181 
1182 	if (head_off > old_off)
1183 		size = head_off - old_off;
1184 	else
1185 		size = mm->len - (old_off - head_off);
1186 
1187 	if (snapshot && size > snapshot_size)
1188 		size = snapshot_size;
1189 
1190 	ref = auxtrace_record__reference(itr);
1191 
1192 	if (head > old || size <= head || mm->mask) {
1193 		offset = head - size;
1194 	} else {
1195 		/*
1196 		 * When the buffer size is not a power of 2, 'head' wraps at the
1197 		 * highest multiple of the buffer size, so we have to subtract
1198 		 * the remainder here.
1199 		 */
1200 		u64 rem = (0ULL - mm->len) % mm->len;
1201 
1202 		offset = head - size - rem;
1203 	}
1204 
1205 	if (size > head_off) {
1206 		len1 = size - head_off;
1207 		data1 = &data[mm->len - len1];
1208 		len2 = head_off;
1209 		data2 = &data[0];
1210 	} else {
1211 		len1 = size;
1212 		data1 = &data[head_off - len1];
1213 		len2 = 0;
1214 		data2 = NULL;
1215 	}
1216 
1217 	if (itr->alignment) {
1218 		unsigned int unwanted = len1 % itr->alignment;
1219 
1220 		len1 -= unwanted;
1221 		size -= unwanted;
1222 	}
1223 
1224 	/* padding must be written by fn() e.g. record__process_auxtrace() */
1225 	padding = size & (PERF_AUXTRACE_RECORD_ALIGNMENT - 1);
1226 	if (padding)
1227 		padding = PERF_AUXTRACE_RECORD_ALIGNMENT - padding;
1228 
1229 	memset(&ev, 0, sizeof(ev));
1230 	ev.auxtrace.header.type = PERF_RECORD_AUXTRACE;
1231 	ev.auxtrace.header.size = sizeof(ev.auxtrace);
1232 	ev.auxtrace.size = size + padding;
1233 	ev.auxtrace.offset = offset;
1234 	ev.auxtrace.reference = ref;
1235 	ev.auxtrace.idx = mm->idx;
1236 	ev.auxtrace.tid = mm->tid;
1237 	ev.auxtrace.cpu = mm->cpu;
1238 
1239 	if (fn(tool, &ev, data1, len1, data2, len2))
1240 		return -1;
1241 
1242 	mm->prev = head;
1243 
1244 	if (!snapshot) {
1245 		auxtrace_mmap__write_tail(mm, head);
1246 		if (itr->read_finish) {
1247 			int err;
1248 
1249 			err = itr->read_finish(itr, mm->idx);
1250 			if (err < 0)
1251 				return err;
1252 		}
1253 	}
1254 
1255 	return 1;
1256 }
1257 
auxtrace_mmap__read(struct auxtrace_mmap * mm,struct auxtrace_record * itr,struct perf_tool * tool,process_auxtrace_t fn)1258 int auxtrace_mmap__read(struct auxtrace_mmap *mm, struct auxtrace_record *itr,
1259 			struct perf_tool *tool, process_auxtrace_t fn)
1260 {
1261 	return __auxtrace_mmap__read(mm, itr, tool, fn, false, 0);
1262 }
1263 
auxtrace_mmap__read_snapshot(struct auxtrace_mmap * mm,struct auxtrace_record * itr,struct perf_tool * tool,process_auxtrace_t fn,size_t snapshot_size)1264 int auxtrace_mmap__read_snapshot(struct auxtrace_mmap *mm,
1265 				 struct auxtrace_record *itr,
1266 				 struct perf_tool *tool, process_auxtrace_t fn,
1267 				 size_t snapshot_size)
1268 {
1269 	return __auxtrace_mmap__read(mm, itr, tool, fn, true, snapshot_size);
1270 }
1271 
1272 /**
1273  * struct auxtrace_cache - hash table to implement a cache
1274  * @hashtable: the hashtable
1275  * @sz: hashtable size (number of hlists)
1276  * @entry_size: size of an entry
1277  * @limit: limit the number of entries to this maximum, when reached the cache
1278  *         is dropped and caching begins again with an empty cache
1279  * @cnt: current number of entries
1280  * @bits: hashtable size (@sz = 2^@bits)
1281  */
1282 struct auxtrace_cache {
1283 	struct hlist_head *hashtable;
1284 	size_t sz;
1285 	size_t entry_size;
1286 	size_t limit;
1287 	size_t cnt;
1288 	unsigned int bits;
1289 };
1290 
auxtrace_cache__new(unsigned int bits,size_t entry_size,unsigned int limit_percent)1291 struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size,
1292 					   unsigned int limit_percent)
1293 {
1294 	struct auxtrace_cache *c;
1295 	struct hlist_head *ht;
1296 	size_t sz, i;
1297 
1298 	c = zalloc(sizeof(struct auxtrace_cache));
1299 	if (!c)
1300 		return NULL;
1301 
1302 	sz = 1UL << bits;
1303 
1304 	ht = calloc(sz, sizeof(struct hlist_head));
1305 	if (!ht)
1306 		goto out_free;
1307 
1308 	for (i = 0; i < sz; i++)
1309 		INIT_HLIST_HEAD(&ht[i]);
1310 
1311 	c->hashtable = ht;
1312 	c->sz = sz;
1313 	c->entry_size = entry_size;
1314 	c->limit = (c->sz * limit_percent) / 100;
1315 	c->bits = bits;
1316 
1317 	return c;
1318 
1319 out_free:
1320 	free(c);
1321 	return NULL;
1322 }
1323 
auxtrace_cache__drop(struct auxtrace_cache * c)1324 static void auxtrace_cache__drop(struct auxtrace_cache *c)
1325 {
1326 	struct auxtrace_cache_entry *entry;
1327 	struct hlist_node *tmp;
1328 	size_t i;
1329 
1330 	if (!c)
1331 		return;
1332 
1333 	for (i = 0; i < c->sz; i++) {
1334 		hlist_for_each_entry_safe(entry, tmp, &c->hashtable[i], hash) {
1335 			hlist_del(&entry->hash);
1336 			auxtrace_cache__free_entry(c, entry);
1337 		}
1338 	}
1339 
1340 	c->cnt = 0;
1341 }
1342 
auxtrace_cache__free(struct auxtrace_cache * c)1343 void auxtrace_cache__free(struct auxtrace_cache *c)
1344 {
1345 	if (!c)
1346 		return;
1347 
1348 	auxtrace_cache__drop(c);
1349 	free(c->hashtable);
1350 	free(c);
1351 }
1352 
auxtrace_cache__alloc_entry(struct auxtrace_cache * c)1353 void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c)
1354 {
1355 	return malloc(c->entry_size);
1356 }
1357 
auxtrace_cache__free_entry(struct auxtrace_cache * c __maybe_unused,void * entry)1358 void auxtrace_cache__free_entry(struct auxtrace_cache *c __maybe_unused,
1359 				void *entry)
1360 {
1361 	free(entry);
1362 }
1363 
auxtrace_cache__add(struct auxtrace_cache * c,u32 key,struct auxtrace_cache_entry * entry)1364 int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
1365 			struct auxtrace_cache_entry *entry)
1366 {
1367 	if (c->limit && ++c->cnt > c->limit)
1368 		auxtrace_cache__drop(c);
1369 
1370 	entry->key = key;
1371 	hlist_add_head(&entry->hash, &c->hashtable[hash_32(key, c->bits)]);
1372 
1373 	return 0;
1374 }
1375 
auxtrace_cache__lookup(struct auxtrace_cache * c,u32 key)1376 void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key)
1377 {
1378 	struct auxtrace_cache_entry *entry;
1379 	struct hlist_head *hlist;
1380 
1381 	if (!c)
1382 		return NULL;
1383 
1384 	hlist = &c->hashtable[hash_32(key, c->bits)];
1385 	hlist_for_each_entry(entry, hlist, hash) {
1386 		if (entry->key == key)
1387 			return entry;
1388 	}
1389 
1390 	return NULL;
1391 }
1392