• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include "builtin.h"
2 #include "perf.h"
3 
4 #include "util/evlist.h"
5 #include "util/evsel.h"
6 #include "util/util.h"
7 #include "util/cache.h"
8 #include "util/symbol.h"
9 #include "util/thread.h"
10 #include "util/header.h"
11 #include "util/session.h"
12 #include "util/tool.h"
13 
14 #include "util/parse-options.h"
15 #include "util/trace-event.h"
16 #include "util/data.h"
17 #include "util/cpumap.h"
18 
19 #include "util/debug.h"
20 
21 #include <linux/rbtree.h>
22 #include <linux/string.h>
23 
24 struct alloc_stat;
25 typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *);
26 
27 static int			alloc_flag;
28 static int			caller_flag;
29 
30 static int			alloc_lines = -1;
31 static int			caller_lines = -1;
32 
33 static bool			raw_ip;
34 
35 struct alloc_stat {
36 	u64	call_site;
37 	u64	ptr;
38 	u64	bytes_req;
39 	u64	bytes_alloc;
40 	u32	hit;
41 	u32	pingpong;
42 
43 	short	alloc_cpu;
44 
45 	struct rb_node node;
46 };
47 
48 static struct rb_root root_alloc_stat;
49 static struct rb_root root_alloc_sorted;
50 static struct rb_root root_caller_stat;
51 static struct rb_root root_caller_sorted;
52 
53 static unsigned long total_requested, total_allocated;
54 static unsigned long nr_allocs, nr_cross_allocs;
55 
insert_alloc_stat(unsigned long call_site,unsigned long ptr,int bytes_req,int bytes_alloc,int cpu)56 static int insert_alloc_stat(unsigned long call_site, unsigned long ptr,
57 			     int bytes_req, int bytes_alloc, int cpu)
58 {
59 	struct rb_node **node = &root_alloc_stat.rb_node;
60 	struct rb_node *parent = NULL;
61 	struct alloc_stat *data = NULL;
62 
63 	while (*node) {
64 		parent = *node;
65 		data = rb_entry(*node, struct alloc_stat, node);
66 
67 		if (ptr > data->ptr)
68 			node = &(*node)->rb_right;
69 		else if (ptr < data->ptr)
70 			node = &(*node)->rb_left;
71 		else
72 			break;
73 	}
74 
75 	if (data && data->ptr == ptr) {
76 		data->hit++;
77 		data->bytes_req += bytes_req;
78 		data->bytes_alloc += bytes_alloc;
79 	} else {
80 		data = malloc(sizeof(*data));
81 		if (!data) {
82 			pr_err("%s: malloc failed\n", __func__);
83 			return -1;
84 		}
85 		data->ptr = ptr;
86 		data->pingpong = 0;
87 		data->hit = 1;
88 		data->bytes_req = bytes_req;
89 		data->bytes_alloc = bytes_alloc;
90 
91 		rb_link_node(&data->node, parent, node);
92 		rb_insert_color(&data->node, &root_alloc_stat);
93 	}
94 	data->call_site = call_site;
95 	data->alloc_cpu = cpu;
96 	return 0;
97 }
98 
insert_caller_stat(unsigned long call_site,int bytes_req,int bytes_alloc)99 static int insert_caller_stat(unsigned long call_site,
100 			      int bytes_req, int bytes_alloc)
101 {
102 	struct rb_node **node = &root_caller_stat.rb_node;
103 	struct rb_node *parent = NULL;
104 	struct alloc_stat *data = NULL;
105 
106 	while (*node) {
107 		parent = *node;
108 		data = rb_entry(*node, struct alloc_stat, node);
109 
110 		if (call_site > data->call_site)
111 			node = &(*node)->rb_right;
112 		else if (call_site < data->call_site)
113 			node = &(*node)->rb_left;
114 		else
115 			break;
116 	}
117 
118 	if (data && data->call_site == call_site) {
119 		data->hit++;
120 		data->bytes_req += bytes_req;
121 		data->bytes_alloc += bytes_alloc;
122 	} else {
123 		data = malloc(sizeof(*data));
124 		if (!data) {
125 			pr_err("%s: malloc failed\n", __func__);
126 			return -1;
127 		}
128 		data->call_site = call_site;
129 		data->pingpong = 0;
130 		data->hit = 1;
131 		data->bytes_req = bytes_req;
132 		data->bytes_alloc = bytes_alloc;
133 
134 		rb_link_node(&data->node, parent, node);
135 		rb_insert_color(&data->node, &root_caller_stat);
136 	}
137 
138 	return 0;
139 }
140 
perf_evsel__process_alloc_event(struct perf_evsel * evsel,struct perf_sample * sample)141 static int perf_evsel__process_alloc_event(struct perf_evsel *evsel,
142 					   struct perf_sample *sample)
143 {
144 	unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"),
145 		      call_site = perf_evsel__intval(evsel, sample, "call_site");
146 	int bytes_req = perf_evsel__intval(evsel, sample, "bytes_req"),
147 	    bytes_alloc = perf_evsel__intval(evsel, sample, "bytes_alloc");
148 
149 	if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) ||
150 	    insert_caller_stat(call_site, bytes_req, bytes_alloc))
151 		return -1;
152 
153 	total_requested += bytes_req;
154 	total_allocated += bytes_alloc;
155 
156 	nr_allocs++;
157 	return 0;
158 }
159 
perf_evsel__process_alloc_node_event(struct perf_evsel * evsel,struct perf_sample * sample)160 static int perf_evsel__process_alloc_node_event(struct perf_evsel *evsel,
161 						struct perf_sample *sample)
162 {
163 	int ret = perf_evsel__process_alloc_event(evsel, sample);
164 
165 	if (!ret) {
166 		int node1 = cpu__get_node(sample->cpu),
167 		    node2 = perf_evsel__intval(evsel, sample, "node");
168 
169 		if (node1 != node2)
170 			nr_cross_allocs++;
171 	}
172 
173 	return ret;
174 }
175 
176 static int ptr_cmp(struct alloc_stat *, struct alloc_stat *);
177 static int callsite_cmp(struct alloc_stat *, struct alloc_stat *);
178 
search_alloc_stat(unsigned long ptr,unsigned long call_site,struct rb_root * root,sort_fn_t sort_fn)179 static struct alloc_stat *search_alloc_stat(unsigned long ptr,
180 					    unsigned long call_site,
181 					    struct rb_root *root,
182 					    sort_fn_t sort_fn)
183 {
184 	struct rb_node *node = root->rb_node;
185 	struct alloc_stat key = { .ptr = ptr, .call_site = call_site };
186 
187 	while (node) {
188 		struct alloc_stat *data;
189 		int cmp;
190 
191 		data = rb_entry(node, struct alloc_stat, node);
192 
193 		cmp = sort_fn(&key, data);
194 		if (cmp < 0)
195 			node = node->rb_left;
196 		else if (cmp > 0)
197 			node = node->rb_right;
198 		else
199 			return data;
200 	}
201 	return NULL;
202 }
203 
perf_evsel__process_free_event(struct perf_evsel * evsel,struct perf_sample * sample)204 static int perf_evsel__process_free_event(struct perf_evsel *evsel,
205 					  struct perf_sample *sample)
206 {
207 	unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr");
208 	struct alloc_stat *s_alloc, *s_caller;
209 
210 	s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
211 	if (!s_alloc)
212 		return 0;
213 
214 	if ((short)sample->cpu != s_alloc->alloc_cpu) {
215 		s_alloc->pingpong++;
216 
217 		s_caller = search_alloc_stat(0, s_alloc->call_site,
218 					     &root_caller_stat, callsite_cmp);
219 		if (!s_caller)
220 			return -1;
221 		s_caller->pingpong++;
222 	}
223 	s_alloc->alloc_cpu = -1;
224 
225 	return 0;
226 }
227 
228 typedef int (*tracepoint_handler)(struct perf_evsel *evsel,
229 				  struct perf_sample *sample);
230 
process_sample_event(struct perf_tool * tool __maybe_unused,union perf_event * event,struct perf_sample * sample,struct perf_evsel * evsel,struct machine * machine)231 static int process_sample_event(struct perf_tool *tool __maybe_unused,
232 				union perf_event *event,
233 				struct perf_sample *sample,
234 				struct perf_evsel *evsel,
235 				struct machine *machine)
236 {
237 	struct thread *thread = machine__findnew_thread(machine, sample->pid,
238 							sample->tid);
239 
240 	if (thread == NULL) {
241 		pr_debug("problem processing %d event, skipping it.\n",
242 			 event->header.type);
243 		return -1;
244 	}
245 
246 	dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
247 
248 	if (evsel->handler != NULL) {
249 		tracepoint_handler f = evsel->handler;
250 		return f(evsel, sample);
251 	}
252 
253 	return 0;
254 }
255 
256 static struct perf_tool perf_kmem = {
257 	.sample		 = process_sample_event,
258 	.comm		 = perf_event__process_comm,
259 	.mmap		 = perf_event__process_mmap,
260 	.mmap2		 = perf_event__process_mmap2,
261 	.ordered_events	 = true,
262 };
263 
fragmentation(unsigned long n_req,unsigned long n_alloc)264 static double fragmentation(unsigned long n_req, unsigned long n_alloc)
265 {
266 	if (n_alloc == 0)
267 		return 0.0;
268 	else
269 		return 100.0 - (100.0 * n_req / n_alloc);
270 }
271 
__print_result(struct rb_root * root,struct perf_session * session,int n_lines,int is_caller)272 static void __print_result(struct rb_root *root, struct perf_session *session,
273 			   int n_lines, int is_caller)
274 {
275 	struct rb_node *next;
276 	struct machine *machine = &session->machines.host;
277 
278 	printf("%.102s\n", graph_dotted_line);
279 	printf(" %-34s |",  is_caller ? "Callsite": "Alloc Ptr");
280 	printf(" Total_alloc/Per | Total_req/Per   | Hit      | Ping-pong | Frag\n");
281 	printf("%.102s\n", graph_dotted_line);
282 
283 	next = rb_first(root);
284 
285 	while (next && n_lines--) {
286 		struct alloc_stat *data = rb_entry(next, struct alloc_stat,
287 						   node);
288 		struct symbol *sym = NULL;
289 		struct map *map;
290 		char buf[BUFSIZ];
291 		u64 addr;
292 
293 		if (is_caller) {
294 			addr = data->call_site;
295 			if (!raw_ip)
296 				sym = machine__find_kernel_function(machine, addr, &map, NULL);
297 		} else
298 			addr = data->ptr;
299 
300 		if (sym != NULL)
301 			snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name,
302 				 addr - map->unmap_ip(map, sym->start));
303 		else
304 			snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr);
305 		printf(" %-34s |", buf);
306 
307 		printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %8lu | %6.3f%%\n",
308 		       (unsigned long long)data->bytes_alloc,
309 		       (unsigned long)data->bytes_alloc / data->hit,
310 		       (unsigned long long)data->bytes_req,
311 		       (unsigned long)data->bytes_req / data->hit,
312 		       (unsigned long)data->hit,
313 		       (unsigned long)data->pingpong,
314 		       fragmentation(data->bytes_req, data->bytes_alloc));
315 
316 		next = rb_next(next);
317 	}
318 
319 	if (n_lines == -1)
320 		printf(" ...                                | ...             | ...             | ...    | ...      | ...   \n");
321 
322 	printf("%.102s\n", graph_dotted_line);
323 }
324 
print_summary(void)325 static void print_summary(void)
326 {
327 	printf("\nSUMMARY\n=======\n");
328 	printf("Total bytes requested: %lu\n", total_requested);
329 	printf("Total bytes allocated: %lu\n", total_allocated);
330 	printf("Total bytes wasted on internal fragmentation: %lu\n",
331 	       total_allocated - total_requested);
332 	printf("Internal fragmentation: %f%%\n",
333 	       fragmentation(total_requested, total_allocated));
334 	printf("Cross CPU allocations: %lu/%lu\n", nr_cross_allocs, nr_allocs);
335 }
336 
print_result(struct perf_session * session)337 static void print_result(struct perf_session *session)
338 {
339 	if (caller_flag)
340 		__print_result(&root_caller_sorted, session, caller_lines, 1);
341 	if (alloc_flag)
342 		__print_result(&root_alloc_sorted, session, alloc_lines, 0);
343 	print_summary();
344 }
345 
346 struct sort_dimension {
347 	const char		name[20];
348 	sort_fn_t		cmp;
349 	struct list_head	list;
350 };
351 
352 static LIST_HEAD(caller_sort);
353 static LIST_HEAD(alloc_sort);
354 
sort_insert(struct rb_root * root,struct alloc_stat * data,struct list_head * sort_list)355 static void sort_insert(struct rb_root *root, struct alloc_stat *data,
356 			struct list_head *sort_list)
357 {
358 	struct rb_node **new = &(root->rb_node);
359 	struct rb_node *parent = NULL;
360 	struct sort_dimension *sort;
361 
362 	while (*new) {
363 		struct alloc_stat *this;
364 		int cmp = 0;
365 
366 		this = rb_entry(*new, struct alloc_stat, node);
367 		parent = *new;
368 
369 		list_for_each_entry(sort, sort_list, list) {
370 			cmp = sort->cmp(data, this);
371 			if (cmp)
372 				break;
373 		}
374 
375 		if (cmp > 0)
376 			new = &((*new)->rb_left);
377 		else
378 			new = &((*new)->rb_right);
379 	}
380 
381 	rb_link_node(&data->node, parent, new);
382 	rb_insert_color(&data->node, root);
383 }
384 
__sort_result(struct rb_root * root,struct rb_root * root_sorted,struct list_head * sort_list)385 static void __sort_result(struct rb_root *root, struct rb_root *root_sorted,
386 			  struct list_head *sort_list)
387 {
388 	struct rb_node *node;
389 	struct alloc_stat *data;
390 
391 	for (;;) {
392 		node = rb_first(root);
393 		if (!node)
394 			break;
395 
396 		rb_erase(node, root);
397 		data = rb_entry(node, struct alloc_stat, node);
398 		sort_insert(root_sorted, data, sort_list);
399 	}
400 }
401 
sort_result(void)402 static void sort_result(void)
403 {
404 	__sort_result(&root_alloc_stat, &root_alloc_sorted, &alloc_sort);
405 	__sort_result(&root_caller_stat, &root_caller_sorted, &caller_sort);
406 }
407 
__cmd_kmem(struct perf_session * session)408 static int __cmd_kmem(struct perf_session *session)
409 {
410 	int err = -EINVAL;
411 	const struct perf_evsel_str_handler kmem_tracepoints[] = {
412 		{ "kmem:kmalloc",		perf_evsel__process_alloc_event, },
413     		{ "kmem:kmem_cache_alloc",	perf_evsel__process_alloc_event, },
414 		{ "kmem:kmalloc_node",		perf_evsel__process_alloc_node_event, },
415     		{ "kmem:kmem_cache_alloc_node", perf_evsel__process_alloc_node_event, },
416 		{ "kmem:kfree",			perf_evsel__process_free_event, },
417     		{ "kmem:kmem_cache_free",	perf_evsel__process_free_event, },
418 	};
419 
420 	if (!perf_session__has_traces(session, "kmem record"))
421 		goto out;
422 
423 	if (perf_session__set_tracepoints_handlers(session, kmem_tracepoints)) {
424 		pr_err("Initializing perf session tracepoint handlers failed\n");
425 		goto out;
426 	}
427 
428 	setup_pager();
429 	err = perf_session__process_events(session, &perf_kmem);
430 	if (err != 0)
431 		goto out;
432 	sort_result();
433 	print_result(session);
434 out:
435 	return err;
436 }
437 
ptr_cmp(struct alloc_stat * l,struct alloc_stat * r)438 static int ptr_cmp(struct alloc_stat *l, struct alloc_stat *r)
439 {
440 	if (l->ptr < r->ptr)
441 		return -1;
442 	else if (l->ptr > r->ptr)
443 		return 1;
444 	return 0;
445 }
446 
447 static struct sort_dimension ptr_sort_dimension = {
448 	.name	= "ptr",
449 	.cmp	= ptr_cmp,
450 };
451 
callsite_cmp(struct alloc_stat * l,struct alloc_stat * r)452 static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r)
453 {
454 	if (l->call_site < r->call_site)
455 		return -1;
456 	else if (l->call_site > r->call_site)
457 		return 1;
458 	return 0;
459 }
460 
461 static struct sort_dimension callsite_sort_dimension = {
462 	.name	= "callsite",
463 	.cmp	= callsite_cmp,
464 };
465 
hit_cmp(struct alloc_stat * l,struct alloc_stat * r)466 static int hit_cmp(struct alloc_stat *l, struct alloc_stat *r)
467 {
468 	if (l->hit < r->hit)
469 		return -1;
470 	else if (l->hit > r->hit)
471 		return 1;
472 	return 0;
473 }
474 
475 static struct sort_dimension hit_sort_dimension = {
476 	.name	= "hit",
477 	.cmp	= hit_cmp,
478 };
479 
bytes_cmp(struct alloc_stat * l,struct alloc_stat * r)480 static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r)
481 {
482 	if (l->bytes_alloc < r->bytes_alloc)
483 		return -1;
484 	else if (l->bytes_alloc > r->bytes_alloc)
485 		return 1;
486 	return 0;
487 }
488 
489 static struct sort_dimension bytes_sort_dimension = {
490 	.name	= "bytes",
491 	.cmp	= bytes_cmp,
492 };
493 
frag_cmp(struct alloc_stat * l,struct alloc_stat * r)494 static int frag_cmp(struct alloc_stat *l, struct alloc_stat *r)
495 {
496 	double x, y;
497 
498 	x = fragmentation(l->bytes_req, l->bytes_alloc);
499 	y = fragmentation(r->bytes_req, r->bytes_alloc);
500 
501 	if (x < y)
502 		return -1;
503 	else if (x > y)
504 		return 1;
505 	return 0;
506 }
507 
508 static struct sort_dimension frag_sort_dimension = {
509 	.name	= "frag",
510 	.cmp	= frag_cmp,
511 };
512 
pingpong_cmp(struct alloc_stat * l,struct alloc_stat * r)513 static int pingpong_cmp(struct alloc_stat *l, struct alloc_stat *r)
514 {
515 	if (l->pingpong < r->pingpong)
516 		return -1;
517 	else if (l->pingpong > r->pingpong)
518 		return 1;
519 	return 0;
520 }
521 
522 static struct sort_dimension pingpong_sort_dimension = {
523 	.name	= "pingpong",
524 	.cmp	= pingpong_cmp,
525 };
526 
527 static struct sort_dimension *avail_sorts[] = {
528 	&ptr_sort_dimension,
529 	&callsite_sort_dimension,
530 	&hit_sort_dimension,
531 	&bytes_sort_dimension,
532 	&frag_sort_dimension,
533 	&pingpong_sort_dimension,
534 };
535 
536 #define NUM_AVAIL_SORTS	((int)ARRAY_SIZE(avail_sorts))
537 
sort_dimension__add(const char * tok,struct list_head * list)538 static int sort_dimension__add(const char *tok, struct list_head *list)
539 {
540 	struct sort_dimension *sort;
541 	int i;
542 
543 	for (i = 0; i < NUM_AVAIL_SORTS; i++) {
544 		if (!strcmp(avail_sorts[i]->name, tok)) {
545 			sort = memdup(avail_sorts[i], sizeof(*avail_sorts[i]));
546 			if (!sort) {
547 				pr_err("%s: memdup failed\n", __func__);
548 				return -1;
549 			}
550 			list_add_tail(&sort->list, list);
551 			return 0;
552 		}
553 	}
554 
555 	return -1;
556 }
557 
setup_sorting(struct list_head * sort_list,const char * arg)558 static int setup_sorting(struct list_head *sort_list, const char *arg)
559 {
560 	char *tok;
561 	char *str = strdup(arg);
562 
563 	if (!str) {
564 		pr_err("%s: strdup failed\n", __func__);
565 		return -1;
566 	}
567 
568 	while (true) {
569 		tok = strsep(&str, ",");
570 		if (!tok)
571 			break;
572 		if (sort_dimension__add(tok, sort_list) < 0) {
573 			error("Unknown --sort key: '%s'", tok);
574 			free(str);
575 			return -1;
576 		}
577 	}
578 
579 	free(str);
580 	return 0;
581 }
582 
parse_sort_opt(const struct option * opt __maybe_unused,const char * arg,int unset __maybe_unused)583 static int parse_sort_opt(const struct option *opt __maybe_unused,
584 			  const char *arg, int unset __maybe_unused)
585 {
586 	if (!arg)
587 		return -1;
588 
589 	if (caller_flag > alloc_flag)
590 		return setup_sorting(&caller_sort, arg);
591 	else
592 		return setup_sorting(&alloc_sort, arg);
593 
594 	return 0;
595 }
596 
parse_caller_opt(const struct option * opt __maybe_unused,const char * arg __maybe_unused,int unset __maybe_unused)597 static int parse_caller_opt(const struct option *opt __maybe_unused,
598 			    const char *arg __maybe_unused,
599 			    int unset __maybe_unused)
600 {
601 	caller_flag = (alloc_flag + 1);
602 	return 0;
603 }
604 
parse_alloc_opt(const struct option * opt __maybe_unused,const char * arg __maybe_unused,int unset __maybe_unused)605 static int parse_alloc_opt(const struct option *opt __maybe_unused,
606 			   const char *arg __maybe_unused,
607 			   int unset __maybe_unused)
608 {
609 	alloc_flag = (caller_flag + 1);
610 	return 0;
611 }
612 
parse_line_opt(const struct option * opt __maybe_unused,const char * arg,int unset __maybe_unused)613 static int parse_line_opt(const struct option *opt __maybe_unused,
614 			  const char *arg, int unset __maybe_unused)
615 {
616 	int lines;
617 
618 	if (!arg)
619 		return -1;
620 
621 	lines = strtoul(arg, NULL, 10);
622 
623 	if (caller_flag > alloc_flag)
624 		caller_lines = lines;
625 	else
626 		alloc_lines = lines;
627 
628 	return 0;
629 }
630 
__cmd_record(int argc,const char ** argv)631 static int __cmd_record(int argc, const char **argv)
632 {
633 	const char * const record_args[] = {
634 	"record", "-a", "-R", "-c", "1",
635 	"-e", "kmem:kmalloc",
636 	"-e", "kmem:kmalloc_node",
637 	"-e", "kmem:kfree",
638 	"-e", "kmem:kmem_cache_alloc",
639 	"-e", "kmem:kmem_cache_alloc_node",
640 	"-e", "kmem:kmem_cache_free",
641 	};
642 	unsigned int rec_argc, i, j;
643 	const char **rec_argv;
644 
645 	rec_argc = ARRAY_SIZE(record_args) + argc - 1;
646 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
647 
648 	if (rec_argv == NULL)
649 		return -ENOMEM;
650 
651 	for (i = 0; i < ARRAY_SIZE(record_args); i++)
652 		rec_argv[i] = strdup(record_args[i]);
653 
654 	for (j = 1; j < (unsigned int)argc; j++, i++)
655 		rec_argv[i] = argv[j];
656 
657 	return cmd_record(i, rec_argv, NULL);
658 }
659 
cmd_kmem(int argc,const char ** argv,const char * prefix __maybe_unused)660 int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
661 {
662 	const char * const default_sort_order = "frag,hit,bytes";
663 	const struct option kmem_options[] = {
664 	OPT_STRING('i', "input", &input_name, "file", "input file name"),
665 	OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL,
666 			   "show per-callsite statistics", parse_caller_opt),
667 	OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL,
668 			   "show per-allocation statistics", parse_alloc_opt),
669 	OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
670 		     "sort by keys: ptr, call_site, bytes, hit, pingpong, frag",
671 		     parse_sort_opt),
672 	OPT_CALLBACK('l', "line", NULL, "num", "show n lines", parse_line_opt),
673 	OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
674 	OPT_END()
675 	};
676 	const char *const kmem_subcommands[] = { "record", "stat", NULL };
677 	const char *kmem_usage[] = {
678 		NULL,
679 		NULL
680 	};
681 	struct perf_session *session;
682 	struct perf_data_file file = {
683 		.path = input_name,
684 		.mode = PERF_DATA_MODE_READ,
685 	};
686 	int ret = -1;
687 
688 	argc = parse_options_subcommand(argc, argv, kmem_options,
689 					kmem_subcommands, kmem_usage, 0);
690 
691 	if (!argc)
692 		usage_with_options(kmem_usage, kmem_options);
693 
694 	if (!strncmp(argv[0], "rec", 3)) {
695 		symbol__init(NULL);
696 		return __cmd_record(argc, argv);
697 	}
698 
699 	session = perf_session__new(&file, false, &perf_kmem);
700 	if (session == NULL)
701 		return -1;
702 
703 	symbol__init(&session->header.env);
704 
705 	if (!strcmp(argv[0], "stat")) {
706 		if (cpu__setup_cpunode_map())
707 			goto out_delete;
708 
709 		if (list_empty(&caller_sort))
710 			setup_sorting(&caller_sort, default_sort_order);
711 		if (list_empty(&alloc_sort))
712 			setup_sorting(&alloc_sort, default_sort_order);
713 
714 		ret = __cmd_kmem(session);
715 	} else
716 		usage_with_options(kmem_usage, kmem_options);
717 
718 out_delete:
719 	perf_session__delete(session);
720 
721 	return ret;
722 }
723 
724