1 /**
2 * @file daemon/opd_sfile.c
3 * Management of sample files
4 *
5 * @remark Copyright 2002, 2005 OProfile authors
6 * @remark Read the file COPYING
7 *
8 * @author John Levon
9 * @author Philippe Elie
10 */
11
12 #include "opd_sfile.h"
13
14 #include "opd_trans.h"
15 #include "opd_kernel.h"
16 #include "opd_mangling.h"
17 #include "opd_anon.h"
18 #include "opd_printf.h"
19 #include "opd_stats.h"
20 #include "oprofiled.h"
21
22 #include "op_libiberty.h"
23
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <string.h>
27
28 #define HASH_SIZE 2048
29 #define HASH_BITS (HASH_SIZE - 1)
30
31 /** All sfiles are hashed into these lists */
32 static struct list_head hashes[HASH_SIZE];
33
34 /** All sfiles are on this list. */
35 static LIST_HEAD(lru_list);
36
37
38 /* FIXME: can undoubtedly improve this hashing */
39 /** Hash the transient parameters for lookup. */
40 static unsigned long
sfile_hash(struct transient const * trans,struct kernel_image * ki)41 sfile_hash(struct transient const * trans, struct kernel_image * ki)
42 {
43 unsigned long val = 0;
44
45 if (separate_thread) {
46 val ^= trans->tid << 2;
47 val ^= trans->tgid << 2;
48 }
49
50 if (separate_kernel || ((trans->anon || separate_lib) && !ki))
51 val ^= trans->app_cookie >> (DCOOKIE_SHIFT + 3);
52
53 if (separate_cpu)
54 val ^= trans->cpu;
55
56 /* cookie meaningless for kernel, shouldn't hash */
57 if (trans->in_kernel) {
58 val ^= ki->start >> 14;
59 val ^= ki->end >> 7;
60 return val & HASH_BITS;
61 }
62
63 if (trans->cookie != NO_COOKIE) {
64 val ^= trans->cookie >> DCOOKIE_SHIFT;
65 return val & HASH_BITS;
66 }
67
68 if (!separate_thread)
69 val ^= trans->tgid << 2;
70
71 if (trans->anon) {
72 val ^= trans->anon->start >> VMA_SHIFT;
73 val ^= trans->anon->end >> (VMA_SHIFT + 1);
74 }
75
76 return val & HASH_BITS;
77 }
78
79
80 static int
do_match(struct sfile const * sf,cookie_t cookie,cookie_t app_cookie,struct kernel_image const * ki,struct anon_mapping const * anon,pid_t tgid,pid_t tid,unsigned int cpu)81 do_match(struct sfile const * sf, cookie_t cookie, cookie_t app_cookie,
82 struct kernel_image const * ki, struct anon_mapping const * anon,
83 pid_t tgid, pid_t tid, unsigned int cpu)
84 {
85 /* this is a simplified check for "is a kernel image" AND
86 * "is the right kernel image". Also handles no-vmlinux
87 * correctly.
88 */
89 if (sf->kernel != ki)
90 return 0;
91
92 if (separate_thread) {
93 if (sf->tid != tid || sf->tgid != tgid)
94 return 0;
95 }
96
97 if (separate_cpu) {
98 if (sf->cpu != cpu)
99 return 0;
100 }
101
102 if (separate_kernel || ((anon || separate_lib) && !ki)) {
103 if (sf->app_cookie != app_cookie)
104 return 0;
105 }
106
107 /* ignore the cached trans->cookie for kernel images,
108 * it's meaningless and we checked all others already
109 */
110 if (ki)
111 return 1;
112
113 if (sf->anon != anon)
114 return 0;
115
116 return sf->cookie == cookie;
117 }
118
119
120 static int
trans_match(struct transient const * trans,struct sfile const * sfile,struct kernel_image const * ki)121 trans_match(struct transient const * trans, struct sfile const * sfile,
122 struct kernel_image const * ki)
123 {
124 return do_match(sfile, trans->cookie, trans->app_cookie, ki,
125 trans->anon, trans->tgid, trans->tid, trans->cpu);
126 }
127
128
129 static int
sfile_equal(struct sfile const * sf,struct sfile const * sf2)130 sfile_equal(struct sfile const * sf, struct sfile const * sf2)
131 {
132 return do_match(sf, sf2->cookie, sf2->app_cookie, sf2->kernel,
133 sf2->anon, sf2->tgid, sf2->tid, sf2->cpu);
134 }
135
136
137 static int
is_sf_ignored(struct sfile const * sf)138 is_sf_ignored(struct sfile const * sf)
139 {
140 if (sf->kernel) {
141 if (!is_image_ignored(sf->kernel->name))
142 return 0;
143
144 /* Let a dependent kernel image redeem the sf if we're
145 * executing on behalf of an application.
146 */
147 return is_cookie_ignored(sf->app_cookie);
148 }
149
150 /* Anon regions are always dependent on the application.
151 * Otherwise, let a dependent image redeem the sf.
152 */
153 if (sf->anon || is_cookie_ignored(sf->cookie))
154 return is_cookie_ignored(sf->app_cookie);
155
156 return 0;
157 }
158
159
160 /** create a new sfile matching the current transient parameters */
161 static struct sfile *
create_sfile(unsigned long hash,struct transient const * trans,struct kernel_image * ki)162 create_sfile(unsigned long hash, struct transient const * trans,
163 struct kernel_image * ki)
164 {
165 size_t i;
166 struct sfile * sf;
167
168 sf = xmalloc(sizeof(struct sfile));
169
170 sf->hashval = hash;
171
172 /* The logic here: if we're in the kernel, the cached cookie is
173 * meaningless (though not the app_cookie if separate_kernel)
174 */
175 sf->cookie = trans->in_kernel ? INVALID_COOKIE : trans->cookie;
176 sf->app_cookie = INVALID_COOKIE;
177 sf->tid = (pid_t)-1;
178 sf->tgid = (pid_t)-1;
179 sf->cpu = 0;
180 sf->kernel = ki;
181 sf->anon = trans->anon;
182
183 for (i = 0 ; i < op_nr_counters ; ++i)
184 odb_init(&sf->files[i]);
185
186 for (i = 0; i < CG_HASH_SIZE; ++i)
187 list_init(&sf->cg_hash[i]);
188
189 if (separate_thread)
190 sf->tid = trans->tid;
191 if (separate_thread || trans->cookie == NO_COOKIE)
192 sf->tgid = trans->tgid;
193
194 if (separate_cpu)
195 sf->cpu = trans->cpu;
196
197 if (separate_kernel || ((trans->anon || separate_lib) && !ki))
198 sf->app_cookie = trans->app_cookie;
199
200 sf->ignored = is_sf_ignored(sf);
201
202 sf->embedded_offset = trans->embedded_offset;
203
204 /* If embedded_offset is a valid value, it means we're
205 * processing a Cell BE SPU profile; in which case, we
206 * want sf->app_cookie to hold trans->app_cookie.
207 */
208 if (trans->embedded_offset != UNUSED_EMBEDDED_OFFSET)
209 sf->app_cookie = trans->app_cookie;
210 return sf;
211 }
212
213
sfile_find(struct transient const * trans)214 struct sfile * sfile_find(struct transient const * trans)
215 {
216 struct sfile * sf;
217 struct list_head * pos;
218 struct kernel_image * ki = NULL;
219 unsigned long hash;
220
221 if (trans->tracing != TRACING_ON) {
222 opd_stats[OPD_SAMPLES]++;
223 opd_stats[trans->in_kernel == 1 ? OPD_KERNEL : OPD_PROCESS]++;
224 }
225
226 /* There is a small race where this *can* happen, see
227 * caller of cpu_buffer_reset() in the kernel
228 */
229 if (trans->in_kernel == -1) {
230 verbprintf(vsamples, "Losing sample at 0x%llx of unknown provenance.\n",
231 trans->pc);
232 opd_stats[OPD_NO_CTX]++;
233 return NULL;
234 }
235
236 /* we might need a kernel image start/end to hash on */
237 if (trans->in_kernel) {
238 ki = find_kernel_image(trans);
239 if (!ki) {
240 verbprintf(vsamples, "Lost kernel sample %llx\n", trans->pc);
241 opd_stats[OPD_LOST_KERNEL]++;
242 return NULL;
243 }
244 } else if (trans->cookie == NO_COOKIE && !trans->anon) {
245 if (vsamples) {
246 char const * app = verbose_cookie(trans->app_cookie);
247 printf("No anon map for pc %llx, app %s.\n",
248 trans->pc, app);
249 }
250 opd_stats[OPD_LOST_NO_MAPPING]++;
251 return NULL;
252 }
253
254 hash = sfile_hash(trans, ki);
255 list_for_each(pos, &hashes[hash]) {
256 sf = list_entry(pos, struct sfile, hash);
257 if (trans_match(trans, sf, ki)) {
258 sfile_get(sf);
259 goto lru;
260 }
261 }
262
263 sf = create_sfile(hash, trans, ki);
264 list_add(&sf->hash, &hashes[hash]);
265
266 lru:
267 sfile_put(sf);
268 return sf;
269 }
270
271
sfile_dup(struct sfile * to,struct sfile * from)272 static void sfile_dup(struct sfile * to, struct sfile * from)
273 {
274 size_t i;
275
276 memcpy(to, from, sizeof (struct sfile));
277
278 for (i = 0 ; i < op_nr_counters ; ++i)
279 odb_init(&to->files[i]);
280
281 for (i = 0; i < CG_HASH_SIZE; ++i)
282 list_init(&to->cg_hash[i]);
283
284 list_init(&to->hash);
285 list_init(&to->lru);
286 }
287
288
get_file(struct transient const * trans,int is_cg)289 static odb_t * get_file(struct transient const * trans, int is_cg)
290 {
291 struct sfile * sf = trans->current;
292 struct sfile * last = trans->last;
293 struct cg_entry * cg;
294 struct list_head * pos;
295 unsigned long hash;
296 odb_t * file;
297
298 if (trans->event >= op_nr_counters) {
299 fprintf(stderr, "%s: Invalid counter %lu\n", __FUNCTION__,
300 trans->event);
301 abort();
302 }
303
304 file = &sf->files[trans->event];
305
306 if (!is_cg)
307 goto open;
308
309 hash = last->hashval & (CG_HASH_SIZE - 1);
310
311 /* Need to look for the right 'to'. Since we're looking for
312 * 'last', we use its hash.
313 */
314 list_for_each(pos, &sf->cg_hash[hash]) {
315 cg = list_entry(pos, struct cg_entry, hash);
316 if (sfile_equal(last, &cg->to)) {
317 file = &cg->to.files[trans->event];
318 goto open;
319 }
320 }
321
322 cg = xmalloc(sizeof(struct cg_entry));
323 sfile_dup(&cg->to, last);
324 list_add(&cg->hash, &sf->cg_hash[hash]);
325 file = &cg->to.files[trans->event];
326
327 open:
328 if (!odb_open_count(file))
329 opd_open_sample_file(file, last, sf, trans->event, is_cg);
330
331 /* Error is logged by opd_open_sample_file */
332 if (!odb_open_count(file))
333 return NULL;
334
335 return file;
336 }
337
338
verbose_print_sample(struct sfile * sf,vma_t pc,uint counter)339 static void verbose_print_sample(struct sfile * sf, vma_t pc, uint counter)
340 {
341 char const * app = verbose_cookie(sf->app_cookie);
342 printf("0x%llx(%u): ", pc, counter);
343 if (sf->anon) {
344 printf("anon (tgid %u, 0x%llx-0x%llx), ",
345 (unsigned int)sf->anon->tgid,
346 sf->anon->start, sf->anon->end);
347 } else if (sf->kernel) {
348 printf("kern (name %s, 0x%llx-0x%llx), ", sf->kernel->name,
349 sf->kernel->start, sf->kernel->end);
350 } else {
351 printf("%s(%llx), ", verbose_cookie(sf->cookie), sf->cookie);
352 }
353 printf("app %s(%llx)", app, sf->app_cookie);
354 }
355
356
verbose_sample(struct transient const * trans,vma_t pc)357 static void verbose_sample(struct transient const * trans, vma_t pc)
358 {
359 printf("Sample ");
360 verbose_print_sample(trans->current, pc, trans->event);
361 printf("\n");
362 }
363
364
365 static void
verbose_arc(struct transient const * trans,vma_t from,vma_t to)366 verbose_arc(struct transient const * trans, vma_t from, vma_t to)
367 {
368 printf("Arc ");
369 verbose_print_sample(trans->current, from, trans->event);
370 printf(" -> 0x%llx", to);
371 printf("\n");
372 }
373
374
sfile_log_arc(struct transient const * trans)375 static void sfile_log_arc(struct transient const * trans)
376 {
377 int err;
378 vma_t from = trans->pc;
379 vma_t to = trans->last_pc;
380 uint64_t key;
381 odb_t * file;
382
383 file = get_file(trans, 1);
384
385 /* absolute value -> offset */
386 if (trans->current->kernel)
387 from -= trans->current->kernel->start;
388
389 if (trans->last->kernel)
390 to -= trans->last->kernel->start;
391
392 if (trans->current->anon)
393 from -= trans->current->anon->start;
394
395 if (trans->last->anon)
396 to -= trans->last->anon->start;
397
398 if (varcs)
399 verbose_arc(trans, from, to);
400
401 if (!file) {
402 opd_stats[OPD_LOST_SAMPLEFILE]++;
403 return;
404 }
405
406 /* Possible narrowings to 32-bit value only. */
407 key = to & (0xffffffff);
408 key |= ((uint64_t)from) << 32;
409
410 err = odb_update_node(file, key);
411 if (err) {
412 fprintf(stderr, "%s: %s\n", __FUNCTION__, strerror(err));
413 abort();
414 }
415 }
416
417
sfile_log_sample(struct transient const * trans)418 void sfile_log_sample(struct transient const * trans)
419 {
420 int err;
421 vma_t pc = trans->pc;
422 odb_t * file;
423
424 if (trans->tracing == TRACING_ON) {
425 /* can happen if kernel sample falls through the cracks,
426 * see opd_put_sample() */
427 if (trans->last)
428 sfile_log_arc(trans);
429 return;
430 }
431
432 file = get_file(trans, 0);
433
434 /* absolute value -> offset */
435 if (trans->current->kernel)
436 pc -= trans->current->kernel->start;
437
438 if (trans->current->anon)
439 pc -= trans->current->anon->start;
440
441 if (vsamples)
442 verbose_sample(trans, pc);
443
444 if (!file) {
445 opd_stats[OPD_LOST_SAMPLEFILE]++;
446 return;
447 }
448
449 err = odb_update_node(file, (uint64_t)pc);
450 if (err) {
451 fprintf(stderr, "%s: %s\n", __FUNCTION__, strerror(err));
452 abort();
453 }
454 }
455
456
close_sfile(struct sfile * sf,void * data)457 static int close_sfile(struct sfile * sf, void * data __attribute__((unused)))
458 {
459 size_t i;
460
461 /* it's OK to close a non-open odb file */
462 for (i = 0; i < op_nr_counters; ++i)
463 odb_close(&sf->files[i]);
464
465 return 0;
466 }
467
468
kill_sfile(struct sfile * sf)469 static void kill_sfile(struct sfile * sf)
470 {
471 close_sfile(sf, NULL);
472 list_del(&sf->hash);
473 list_del(&sf->lru);
474 }
475
476
sync_sfile(struct sfile * sf,void * data)477 static int sync_sfile(struct sfile * sf, void * data __attribute__((unused)))
478 {
479 size_t i;
480
481 for (i = 0; i < op_nr_counters; ++i)
482 odb_sync(&sf->files[i]);
483
484 return 0;
485 }
486
487
is_sfile_kernel(struct sfile * sf,void * data)488 static int is_sfile_kernel(struct sfile * sf, void * data __attribute__((unused)))
489 {
490 return !!sf->kernel;
491 }
492
493
is_sfile_anon(struct sfile * sf,void * data)494 static int is_sfile_anon(struct sfile * sf, void * data)
495 {
496 return sf->anon == data;
497 }
498
499
500 typedef int (*sfile_func)(struct sfile *, void *);
501
502 static void
for_one_sfile(struct sfile * sf,sfile_func func,void * data)503 for_one_sfile(struct sfile * sf, sfile_func func, void * data)
504 {
505 size_t i;
506 int free_sf = func(sf, data);
507
508 for (i = 0; i < CG_HASH_SIZE; ++i) {
509 struct list_head * pos;
510 struct list_head * pos2;
511 list_for_each_safe(pos, pos2, &sf->cg_hash[i]) {
512 struct cg_entry * cg =
513 list_entry(pos, struct cg_entry, hash);
514 if (free_sf || func(&cg->to, data)) {
515 kill_sfile(&cg->to);
516 list_del(&cg->hash);
517 free(cg);
518 }
519 }
520 }
521
522 if (free_sf) {
523 kill_sfile(sf);
524 free(sf);
525 }
526 }
527
528
for_each_sfile(sfile_func func,void * data)529 static void for_each_sfile(sfile_func func, void * data)
530 {
531 struct list_head * pos;
532 struct list_head * pos2;
533
534 list_for_each_safe(pos, pos2, &lru_list) {
535 struct sfile * sf = list_entry(pos, struct sfile, lru);
536 for_one_sfile(sf, func, data);
537 }
538 }
539
540
sfile_clear_kernel(void)541 void sfile_clear_kernel(void)
542 {
543 for_each_sfile(is_sfile_kernel, NULL);
544 }
545
546
sfile_clear_anon(struct anon_mapping * anon)547 void sfile_clear_anon(struct anon_mapping * anon)
548 {
549 for_each_sfile(is_sfile_anon, anon);
550 }
551
552
sfile_sync_files(void)553 void sfile_sync_files(void)
554 {
555 for_each_sfile(sync_sfile, NULL);
556 }
557
558
sfile_close_files(void)559 void sfile_close_files(void)
560 {
561 for_each_sfile(close_sfile, NULL);
562 }
563
564
always_true(void)565 static int always_true(void)
566 {
567 return 1;
568 }
569
570
571 #define LRU_AMOUNT 256
572
573 /*
574 * Clear out older sfiles. Note the current sfiles we're using
575 * will not be present in this list, due to sfile_get/put() pairs
576 * around the caller of this.
577 */
sfile_lru_clear(void)578 int sfile_lru_clear(void)
579 {
580 struct list_head * pos;
581 struct list_head * pos2;
582 int amount = LRU_AMOUNT;
583
584 if (list_empty(&lru_list))
585 return 1;
586
587 list_for_each_safe(pos, pos2, &lru_list) {
588 struct sfile * sf;
589 if (!--amount)
590 break;
591 sf = list_entry(pos, struct sfile, lru);
592 for_one_sfile(sf, (sfile_func)always_true, NULL);
593 }
594
595 return 0;
596 }
597
598
sfile_get(struct sfile * sf)599 void sfile_get(struct sfile * sf)
600 {
601 if (sf)
602 list_del(&sf->lru);
603 }
604
605
sfile_put(struct sfile * sf)606 void sfile_put(struct sfile * sf)
607 {
608 if (sf)
609 list_add_tail(&sf->lru, &lru_list);
610 }
611
612
sfile_init(void)613 void sfile_init(void)
614 {
615 size_t i = 0;
616
617 for (; i < HASH_SIZE; ++i)
618 list_init(&hashes[i]);
619 }
620