1 /**
2 * @file daemon/opd_sfile.c
3 * Management of sample files
4 *
5 * @remark Copyright 2002, 2005 OProfile authors
6 * @remark Read the file COPYING
7 *
8 * @author John Levon
9 * @author Philippe Elie
10 */
11
12 #include "opd_sfile.h"
13
14 #include "opd_trans.h"
15 #include "opd_kernel.h"
16 #include "opd_mangling.h"
17 #include "opd_anon.h"
18 #include "opd_printf.h"
19 #include "opd_stats.h"
20 #include "opd_extended.h"
21 #include "oprofiled.h"
22
23 #include "op_libiberty.h"
24
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28
29 #define HASH_SIZE 2048
30 #define HASH_BITS (HASH_SIZE - 1)
31
32 /** All sfiles are hashed into these lists */
33 static struct list_head hashes[HASH_SIZE];
34
35 /** All sfiles are on this list. */
36 static LIST_HEAD(lru_list);
37
38
39 /* FIXME: can undoubtedly improve this hashing */
40 /** Hash the transient parameters for lookup. */
41 static unsigned long
sfile_hash(struct transient const * trans,struct kernel_image * ki)42 sfile_hash(struct transient const * trans, struct kernel_image * ki)
43 {
44 unsigned long val = 0;
45
46 if (separate_thread) {
47 val ^= trans->tid << 2;
48 val ^= trans->tgid << 2;
49 }
50
51 if (separate_kernel || ((trans->anon || separate_lib) && !ki))
52 val ^= trans->app_cookie >> (DCOOKIE_SHIFT + 3);
53
54 if (separate_cpu)
55 val ^= trans->cpu;
56
57 /* cookie meaningless for kernel, shouldn't hash */
58 if (trans->in_kernel) {
59 val ^= ki->start >> 14;
60 val ^= ki->end >> 7;
61 return val & HASH_BITS;
62 }
63
64 if (trans->cookie != NO_COOKIE) {
65 val ^= trans->cookie >> DCOOKIE_SHIFT;
66 return val & HASH_BITS;
67 }
68
69 if (!separate_thread)
70 val ^= trans->tgid << 2;
71
72 if (trans->anon) {
73 val ^= trans->anon->start >> VMA_SHIFT;
74 val ^= trans->anon->end >> (VMA_SHIFT + 1);
75 }
76
77 return val & HASH_BITS;
78 }
79
80
81 static int
do_match(struct sfile const * sf,cookie_t cookie,cookie_t app_cookie,struct kernel_image const * ki,struct anon_mapping const * anon,pid_t tgid,pid_t tid,unsigned int cpu)82 do_match(struct sfile const * sf, cookie_t cookie, cookie_t app_cookie,
83 struct kernel_image const * ki, struct anon_mapping const * anon,
84 pid_t tgid, pid_t tid, unsigned int cpu)
85 {
86 /* this is a simplified check for "is a kernel image" AND
87 * "is the right kernel image". Also handles no-vmlinux
88 * correctly.
89 */
90 if (sf->kernel != ki)
91 return 0;
92
93 if (separate_thread) {
94 if (sf->tid != tid || sf->tgid != tgid)
95 return 0;
96 }
97
98 if (separate_cpu) {
99 if (sf->cpu != cpu)
100 return 0;
101 }
102
103 if (separate_kernel || ((anon || separate_lib) && !ki)) {
104 if (sf->app_cookie != app_cookie)
105 return 0;
106 }
107
108 /* ignore the cached trans->cookie for kernel images,
109 * it's meaningless and we checked all others already
110 */
111 if (ki)
112 return 1;
113
114 if (sf->anon != anon)
115 return 0;
116
117 return sf->cookie == cookie;
118 }
119
120
121 static int
trans_match(struct transient const * trans,struct sfile const * sfile,struct kernel_image const * ki)122 trans_match(struct transient const * trans, struct sfile const * sfile,
123 struct kernel_image const * ki)
124 {
125 return do_match(sfile, trans->cookie, trans->app_cookie, ki,
126 trans->anon, trans->tgid, trans->tid, trans->cpu);
127 }
128
129
130 int
sfile_equal(struct sfile const * sf,struct sfile const * sf2)131 sfile_equal(struct sfile const * sf, struct sfile const * sf2)
132 {
133 return do_match(sf, sf2->cookie, sf2->app_cookie, sf2->kernel,
134 sf2->anon, sf2->tgid, sf2->tid, sf2->cpu);
135 }
136
137
138 static int
is_sf_ignored(struct sfile const * sf)139 is_sf_ignored(struct sfile const * sf)
140 {
141 if (sf->kernel) {
142 if (!is_image_ignored(sf->kernel->name))
143 return 0;
144
145 /* Let a dependent kernel image redeem the sf if we're
146 * executing on behalf of an application.
147 */
148 return is_cookie_ignored(sf->app_cookie);
149 }
150
151 /* Anon regions are always dependent on the application.
152 * Otherwise, let a dependent image redeem the sf.
153 */
154 if (sf->anon || is_cookie_ignored(sf->cookie))
155 return is_cookie_ignored(sf->app_cookie);
156
157 return 0;
158 }
159
160
161 /** create a new sfile matching the current transient parameters */
162 static struct sfile *
create_sfile(unsigned long hash,struct transient const * trans,struct kernel_image * ki)163 create_sfile(unsigned long hash, struct transient const * trans,
164 struct kernel_image * ki)
165 {
166 size_t i;
167 struct sfile * sf;
168
169 sf = xmalloc(sizeof(struct sfile));
170
171 sf->hashval = hash;
172
173 /* The logic here: if we're in the kernel, the cached cookie is
174 * meaningless (though not the app_cookie if separate_kernel)
175 */
176 sf->cookie = trans->in_kernel ? INVALID_COOKIE : trans->cookie;
177 sf->app_cookie = INVALID_COOKIE;
178 sf->tid = (pid_t)-1;
179 sf->tgid = (pid_t)-1;
180 sf->cpu = 0;
181 sf->kernel = ki;
182 sf->anon = trans->anon;
183
184 for (i = 0 ; i < op_nr_counters ; ++i)
185 odb_init(&sf->files[i]);
186
187 if (trans->ext)
188 opd_ext_sfile_create(sf);
189 else
190 sf->ext_files = NULL;
191
192 for (i = 0; i < CG_HASH_SIZE; ++i)
193 list_init(&sf->cg_hash[i]);
194
195 if (separate_thread)
196 sf->tid = trans->tid;
197 if (separate_thread || trans->cookie == NO_COOKIE)
198 sf->tgid = trans->tgid;
199
200 if (separate_cpu)
201 sf->cpu = trans->cpu;
202
203 if (separate_kernel || ((trans->anon || separate_lib) && !ki))
204 sf->app_cookie = trans->app_cookie;
205
206 sf->ignored = is_sf_ignored(sf);
207
208 sf->embedded_offset = trans->embedded_offset;
209
210 /* If embedded_offset is a valid value, it means we're
211 * processing a Cell BE SPU profile; in which case, we
212 * want sf->app_cookie to hold trans->app_cookie.
213 */
214 if (trans->embedded_offset != UNUSED_EMBEDDED_OFFSET)
215 sf->app_cookie = trans->app_cookie;
216 return sf;
217 }
218
219
sfile_find(struct transient const * trans)220 struct sfile * sfile_find(struct transient const * trans)
221 {
222 struct sfile * sf;
223 struct list_head * pos;
224 struct kernel_image * ki = NULL;
225 unsigned long hash;
226
227 if (trans->tracing != TRACING_ON) {
228 opd_stats[OPD_SAMPLES]++;
229 opd_stats[trans->in_kernel == 1 ? OPD_KERNEL : OPD_PROCESS]++;
230 }
231
232 /* There is a small race where this *can* happen, see
233 * caller of cpu_buffer_reset() in the kernel
234 */
235 if (trans->in_kernel == -1) {
236 verbprintf(vsamples, "Losing sample at 0x%llx of unknown provenance.\n",
237 trans->pc);
238 opd_stats[OPD_NO_CTX]++;
239 return NULL;
240 }
241
242 /* we might need a kernel image start/end to hash on */
243 if (trans->in_kernel) {
244 ki = find_kernel_image(trans);
245 if (!ki) {
246 verbprintf(vsamples, "Lost kernel sample %llx\n", trans->pc);
247 opd_stats[OPD_LOST_KERNEL]++;
248 return NULL;
249 }
250 } else if (trans->cookie == NO_COOKIE && !trans->anon) {
251 if (vsamples) {
252 char const * app = verbose_cookie(trans->app_cookie);
253 printf("No anon map for pc %llx, app %s.\n",
254 trans->pc, app);
255 }
256 opd_stats[OPD_LOST_NO_MAPPING]++;
257 return NULL;
258 }
259
260 hash = sfile_hash(trans, ki);
261 list_for_each(pos, &hashes[hash]) {
262 sf = list_entry(pos, struct sfile, hash);
263 if (trans_match(trans, sf, ki)) {
264 sfile_get(sf);
265 goto lru;
266 }
267 }
268
269 sf = create_sfile(hash, trans, ki);
270 list_add(&sf->hash, &hashes[hash]);
271
272 lru:
273 sfile_put(sf);
274 return sf;
275 }
276
277
sfile_dup(struct sfile * to,struct sfile * from)278 void sfile_dup(struct sfile * to, struct sfile * from)
279 {
280 size_t i;
281
282 memcpy(to, from, sizeof (struct sfile));
283
284 for (i = 0 ; i < op_nr_counters ; ++i)
285 odb_init(&to->files[i]);
286
287 opd_ext_sfile_dup(to, from);
288
289 for (i = 0; i < CG_HASH_SIZE; ++i)
290 list_init(&to->cg_hash[i]);
291
292 list_init(&to->hash);
293 list_init(&to->lru);
294 }
295
296
get_file(struct transient const * trans,int is_cg)297 static odb_t * get_file(struct transient const * trans, int is_cg)
298 {
299 struct sfile * sf = trans->current;
300 struct sfile * last = trans->last;
301 struct cg_entry * cg;
302 struct list_head * pos;
303 unsigned long hash;
304 odb_t * file;
305
306 if ((trans->ext) != NULL)
307 return opd_ext_sfile_get(trans, is_cg);
308
309 if (trans->event >= op_nr_counters) {
310 fprintf(stderr, "%s: Invalid counter %lu\n", __FUNCTION__,
311 trans->event);
312 abort();
313 }
314
315 file = &sf->files[trans->event];
316
317 if (!is_cg)
318 goto open;
319
320 hash = last->hashval & (CG_HASH_SIZE - 1);
321
322 /* Need to look for the right 'to'. Since we're looking for
323 * 'last', we use its hash.
324 */
325 list_for_each(pos, &sf->cg_hash[hash]) {
326 cg = list_entry(pos, struct cg_entry, hash);
327 if (sfile_equal(last, &cg->to)) {
328 file = &cg->to.files[trans->event];
329 goto open;
330 }
331 }
332
333 cg = xmalloc(sizeof(struct cg_entry));
334 sfile_dup(&cg->to, last);
335 list_add(&cg->hash, &sf->cg_hash[hash]);
336 file = &cg->to.files[trans->event];
337
338 open:
339 if (!odb_open_count(file))
340 opd_open_sample_file(file, last, sf, trans->event, is_cg);
341
342 /* Error is logged by opd_open_sample_file */
343 if (!odb_open_count(file))
344 return NULL;
345
346 return file;
347 }
348
349
verbose_print_sample(struct sfile * sf,vma_t pc,uint counter)350 static void verbose_print_sample(struct sfile * sf, vma_t pc, uint counter)
351 {
352 char const * app = verbose_cookie(sf->app_cookie);
353 printf("0x%llx(%u): ", pc, counter);
354 if (sf->anon) {
355 printf("anon (tgid %u, 0x%llx-0x%llx), ",
356 (unsigned int)sf->anon->tgid,
357 sf->anon->start, sf->anon->end);
358 } else if (sf->kernel) {
359 printf("kern (name %s, 0x%llx-0x%llx), ", sf->kernel->name,
360 sf->kernel->start, sf->kernel->end);
361 } else {
362 printf("%s(%llx), ", verbose_cookie(sf->cookie), sf->cookie);
363 }
364 printf("app %s(%llx)", app, sf->app_cookie);
365 }
366
367
verbose_sample(struct transient const * trans,vma_t pc)368 static void verbose_sample(struct transient const * trans, vma_t pc)
369 {
370 printf("Sample ");
371 verbose_print_sample(trans->current, pc, trans->event);
372 printf("\n");
373 }
374
375
376 static void
verbose_arc(struct transient const * trans,vma_t from,vma_t to)377 verbose_arc(struct transient const * trans, vma_t from, vma_t to)
378 {
379 printf("Arc ");
380 verbose_print_sample(trans->current, from, trans->event);
381 printf(" -> 0x%llx", to);
382 printf("\n");
383 }
384
385
sfile_log_arc(struct transient const * trans)386 static void sfile_log_arc(struct transient const * trans)
387 {
388 int err;
389 vma_t from = trans->pc;
390 vma_t to = trans->last_pc;
391 uint64_t key;
392 odb_t * file;
393
394 file = get_file(trans, 1);
395
396 /* absolute value -> offset */
397 if (trans->current->kernel)
398 from -= trans->current->kernel->start;
399
400 if (trans->last->kernel)
401 to -= trans->last->kernel->start;
402
403 if (trans->current->anon)
404 from -= trans->current->anon->start;
405
406 if (trans->last->anon)
407 to -= trans->last->anon->start;
408
409 if (varcs)
410 verbose_arc(trans, from, to);
411
412 if (!file) {
413 opd_stats[OPD_LOST_SAMPLEFILE]++;
414 return;
415 }
416
417 /* Possible narrowings to 32-bit value only. */
418 key = to & (0xffffffff);
419 key |= ((uint64_t)from) << 32;
420
421 err = odb_update_node(file, key);
422 if (err) {
423 fprintf(stderr, "%s: %s\n", __FUNCTION__, strerror(err));
424 abort();
425 }
426 }
427
428
sfile_log_sample(struct transient const * trans)429 void sfile_log_sample(struct transient const * trans)
430 {
431 sfile_log_sample_count(trans, 1);
432 }
433
434
sfile_log_sample_count(struct transient const * trans,unsigned long int count)435 void sfile_log_sample_count(struct transient const * trans,
436 unsigned long int count)
437 {
438 int err;
439 vma_t pc = trans->pc;
440 odb_t * file;
441
442 if (trans->tracing == TRACING_ON) {
443 /* can happen if kernel sample falls through the cracks,
444 * see opd_put_sample() */
445 if (trans->last)
446 sfile_log_arc(trans);
447 return;
448 }
449
450 file = get_file(trans, 0);
451
452 /* absolute value -> offset */
453 if (trans->current->kernel)
454 pc -= trans->current->kernel->start;
455
456 if (trans->current->anon)
457 pc -= trans->current->anon->start;
458
459 if (vsamples)
460 verbose_sample(trans, pc);
461
462 if (!file) {
463 opd_stats[OPD_LOST_SAMPLEFILE]++;
464 return;
465 }
466
467 err = odb_update_node_with_offset(file,
468 (odb_key_t)pc,
469 count);
470 if (err) {
471 fprintf(stderr, "%s: %s\n", __FUNCTION__, strerror(err));
472 abort();
473 }
474 }
475
476
close_sfile(struct sfile * sf,void * data)477 static int close_sfile(struct sfile * sf, void * data __attribute__((unused)))
478 {
479 size_t i;
480
481 /* it's OK to close a non-open odb file */
482 for (i = 0; i < op_nr_counters; ++i)
483 odb_close(&sf->files[i]);
484
485 opd_ext_sfile_close(sf);
486
487 return 0;
488 }
489
490
kill_sfile(struct sfile * sf)491 static void kill_sfile(struct sfile * sf)
492 {
493 close_sfile(sf, NULL);
494 list_del(&sf->hash);
495 list_del(&sf->lru);
496 }
497
498
sync_sfile(struct sfile * sf,void * data)499 static int sync_sfile(struct sfile * sf, void * data __attribute__((unused)))
500 {
501 size_t i;
502
503 for (i = 0; i < op_nr_counters; ++i)
504 odb_sync(&sf->files[i]);
505
506 opd_ext_sfile_sync(sf);
507
508 return 0;
509 }
510
511
is_sfile_kernel(struct sfile * sf,void * data)512 static int is_sfile_kernel(struct sfile * sf, void * data __attribute__((unused)))
513 {
514 return !!sf->kernel;
515 }
516
517
is_sfile_anon(struct sfile * sf,void * data)518 static int is_sfile_anon(struct sfile * sf, void * data)
519 {
520 return sf->anon == data;
521 }
522
523
524 typedef int (*sfile_func)(struct sfile *, void *);
525
526 static void
for_one_sfile(struct sfile * sf,sfile_func func,void * data)527 for_one_sfile(struct sfile * sf, sfile_func func, void * data)
528 {
529 size_t i;
530 int free_sf = func(sf, data);
531
532 for (i = 0; i < CG_HASH_SIZE; ++i) {
533 struct list_head * pos;
534 struct list_head * pos2;
535 list_for_each_safe(pos, pos2, &sf->cg_hash[i]) {
536 struct cg_entry * cg =
537 list_entry(pos, struct cg_entry, hash);
538 if (free_sf || func(&cg->to, data)) {
539 kill_sfile(&cg->to);
540 list_del(&cg->hash);
541 free(cg);
542 }
543 }
544 }
545
546 if (free_sf) {
547 kill_sfile(sf);
548 free(sf);
549 }
550 }
551
552
for_each_sfile(sfile_func func,void * data)553 static void for_each_sfile(sfile_func func, void * data)
554 {
555 struct list_head * pos;
556 struct list_head * pos2;
557
558 list_for_each_safe(pos, pos2, &lru_list) {
559 struct sfile * sf = list_entry(pos, struct sfile, lru);
560 for_one_sfile(sf, func, data);
561 }
562 }
563
564
sfile_clear_kernel(void)565 void sfile_clear_kernel(void)
566 {
567 for_each_sfile(is_sfile_kernel, NULL);
568 }
569
570
sfile_clear_anon(struct anon_mapping * anon)571 void sfile_clear_anon(struct anon_mapping * anon)
572 {
573 for_each_sfile(is_sfile_anon, anon);
574 }
575
576
sfile_sync_files(void)577 void sfile_sync_files(void)
578 {
579 for_each_sfile(sync_sfile, NULL);
580 }
581
582
sfile_close_files(void)583 void sfile_close_files(void)
584 {
585 for_each_sfile(close_sfile, NULL);
586 }
587
588
always_true(void)589 static int always_true(void)
590 {
591 return 1;
592 }
593
594
595 #define LRU_AMOUNT 256
596
597 /*
598 * Clear out older sfiles. Note the current sfiles we're using
599 * will not be present in this list, due to sfile_get/put() pairs
600 * around the caller of this.
601 */
sfile_lru_clear(void)602 int sfile_lru_clear(void)
603 {
604 struct list_head * pos;
605 struct list_head * pos2;
606 int amount = LRU_AMOUNT;
607
608 if (list_empty(&lru_list))
609 return 1;
610
611 list_for_each_safe(pos, pos2, &lru_list) {
612 struct sfile * sf;
613 if (!--amount)
614 break;
615 sf = list_entry(pos, struct sfile, lru);
616 for_one_sfile(sf, (sfile_func)always_true, NULL);
617 }
618
619 return 0;
620 }
621
622
sfile_get(struct sfile * sf)623 void sfile_get(struct sfile * sf)
624 {
625 if (sf)
626 list_del(&sf->lru);
627 }
628
629
sfile_put(struct sfile * sf)630 void sfile_put(struct sfile * sf)
631 {
632 if (sf)
633 list_add_tail(&sf->lru, &lru_list);
634 }
635
636
sfile_init(void)637 void sfile_init(void)
638 {
639 size_t i = 0;
640
641 for (; i < HASH_SIZE; ++i)
642 list_init(&hashes[i]);
643 }
644