1 // SPDX-License-Identifier: GPL-2.0
2 #include <sys/sysmacros.h>
3 #include <sys/types.h>
4 #include <errno.h>
5 #include <libgen.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <fcntl.h>
10 #include <unistd.h>
11 #include <inttypes.h>
12 #include <byteswap.h>
13 #include <sys/stat.h>
14 #include <sys/mman.h>
15 #include <linux/stringify.h>
16
17 #include "build-id.h"
18 #include "event.h"
19 #include "debug.h"
20 #include "evlist.h"
21 #include "symbol.h"
22 #include <elf.h>
23
24 #include "tsc.h"
25 #include "session.h"
26 #include "jit.h"
27 #include "jitdump.h"
28 #include "genelf.h"
29 #include "thread.h"
30
31 #include <linux/ctype.h>
32 #include <linux/zalloc.h>
33
34 struct jit_buf_desc {
35 struct perf_data *output;
36 struct perf_session *session;
37 struct machine *machine;
38 union jr_entry *entry;
39 void *buf;
40 uint64_t sample_type;
41 size_t bufsize;
42 FILE *in;
43 bool needs_bswap; /* handles cross-endianness */
44 bool use_arch_timestamp;
45 void *debug_data;
46 void *unwinding_data;
47 uint64_t unwinding_size;
48 uint64_t unwinding_mapped_size;
49 uint64_t eh_frame_hdr_size;
50 size_t nr_debug_entries;
51 uint32_t code_load_count;
52 u64 bytes_written;
53 struct rb_root code_root;
54 char dir[PATH_MAX];
55 };
56
57 struct debug_line_info {
58 unsigned long vma;
59 unsigned int lineno;
60 /* The filename format is unspecified, absolute path, relative etc. */
61 char const filename[];
62 };
63
64 struct jit_tool {
65 struct perf_tool tool;
66 struct perf_data output;
67 struct perf_data input;
68 u64 bytes_written;
69 };
70
71 #define hmax(a, b) ((a) > (b) ? (a) : (b))
72 #define get_jit_tool(t) (container_of(tool, struct jit_tool, tool))
73
74 static int
jit_emit_elf(char * filename,const char * sym,uint64_t code_addr,const void * code,int csize,void * debug,int nr_debug_entries,void * unwinding,uint32_t unwinding_header_size,uint32_t unwinding_size)75 jit_emit_elf(char *filename,
76 const char *sym,
77 uint64_t code_addr,
78 const void *code,
79 int csize,
80 void *debug,
81 int nr_debug_entries,
82 void *unwinding,
83 uint32_t unwinding_header_size,
84 uint32_t unwinding_size)
85 {
86 int ret, fd;
87
88 if (verbose > 0)
89 fprintf(stderr, "write ELF image %s\n", filename);
90
91 fd = open(filename, O_CREAT|O_TRUNC|O_WRONLY, 0644);
92 if (fd == -1) {
93 pr_warning("cannot create jit ELF %s: %s\n", filename, strerror(errno));
94 return -1;
95 }
96
97 ret = jit_write_elf(fd, code_addr, sym, (const void *)code, csize, debug, nr_debug_entries,
98 unwinding, unwinding_header_size, unwinding_size);
99
100 close(fd);
101
102 if (ret)
103 unlink(filename);
104
105 return ret;
106 }
107
108 static void
jit_close(struct jit_buf_desc * jd)109 jit_close(struct jit_buf_desc *jd)
110 {
111 if (!(jd && jd->in))
112 return;
113 funlockfile(jd->in);
114 fclose(jd->in);
115 jd->in = NULL;
116 }
117
118 static int
jit_validate_events(struct perf_session * session)119 jit_validate_events(struct perf_session *session)
120 {
121 struct evsel *evsel;
122
123 /*
124 * check that all events use CLOCK_MONOTONIC
125 */
126 evlist__for_each_entry(session->evlist, evsel) {
127 if (evsel->core.attr.use_clockid == 0 || evsel->core.attr.clockid != CLOCK_MONOTONIC)
128 return -1;
129 }
130 return 0;
131 }
132
133 static int
jit_open(struct jit_buf_desc * jd,const char * name)134 jit_open(struct jit_buf_desc *jd, const char *name)
135 {
136 struct jitheader header;
137 struct jr_prefix *prefix;
138 ssize_t bs, bsz = 0;
139 void *n, *buf = NULL;
140 int ret, retval = -1;
141
142 jd->in = fopen(name, "r");
143 if (!jd->in)
144 return -1;
145
146 bsz = hmax(sizeof(header), sizeof(*prefix));
147
148 buf = malloc(bsz);
149 if (!buf)
150 goto error;
151
152 /*
153 * protect from writer modifying the file while we are reading it
154 */
155 flockfile(jd->in);
156
157 ret = fread(buf, sizeof(header), 1, jd->in);
158 if (ret != 1)
159 goto error;
160
161 memcpy(&header, buf, sizeof(header));
162
163 if (header.magic != JITHEADER_MAGIC) {
164 if (header.magic != JITHEADER_MAGIC_SW)
165 goto error;
166 jd->needs_bswap = true;
167 }
168
169 if (jd->needs_bswap) {
170 header.version = bswap_32(header.version);
171 header.total_size = bswap_32(header.total_size);
172 header.pid = bswap_32(header.pid);
173 header.elf_mach = bswap_32(header.elf_mach);
174 header.timestamp = bswap_64(header.timestamp);
175 header.flags = bswap_64(header.flags);
176 }
177
178 jd->use_arch_timestamp = header.flags & JITDUMP_FLAGS_ARCH_TIMESTAMP;
179
180 if (verbose > 2)
181 pr_debug("version=%u\nhdr.size=%u\nts=0x%llx\npid=%d\nelf_mach=%d\nuse_arch_timestamp=%d\n",
182 header.version,
183 header.total_size,
184 (unsigned long long)header.timestamp,
185 header.pid,
186 header.elf_mach,
187 jd->use_arch_timestamp);
188
189 if (header.version > JITHEADER_VERSION) {
190 pr_err("wrong jitdump version %u, expected " __stringify(JITHEADER_VERSION),
191 header.version);
192 goto error;
193 }
194
195 if (header.flags & JITDUMP_FLAGS_RESERVED) {
196 pr_err("jitdump file contains invalid or unsupported flags 0x%llx\n",
197 (unsigned long long)header.flags & JITDUMP_FLAGS_RESERVED);
198 goto error;
199 }
200
201 if (jd->use_arch_timestamp && !jd->session->time_conv.time_mult) {
202 pr_err("jitdump file uses arch timestamps but there is no timestamp conversion\n");
203 goto error;
204 }
205
206 /*
207 * validate event is using the correct clockid
208 */
209 if (!jd->use_arch_timestamp && jit_validate_events(jd->session)) {
210 pr_err("error, jitted code must be sampled with perf record -k 1\n");
211 goto error;
212 }
213
214 bs = header.total_size - sizeof(header);
215
216 if (bs > bsz) {
217 n = realloc(buf, bs);
218 if (!n)
219 goto error;
220 bsz = bs;
221 buf = n;
222 /* read extra we do not know about */
223 ret = fread(buf, bs - bsz, 1, jd->in);
224 if (ret != 1)
225 goto error;
226 }
227 /*
228 * keep dirname for generating files and mmap records
229 */
230 strcpy(jd->dir, name);
231 dirname(jd->dir);
232
233 return 0;
234 error:
235 funlockfile(jd->in);
236 fclose(jd->in);
237 return retval;
238 }
239
240 static union jr_entry *
jit_get_next_entry(struct jit_buf_desc * jd)241 jit_get_next_entry(struct jit_buf_desc *jd)
242 {
243 struct jr_prefix *prefix;
244 union jr_entry *jr;
245 void *addr;
246 size_t bs, size;
247 int id, ret;
248
249 if (!(jd && jd->in))
250 return NULL;
251
252 if (jd->buf == NULL) {
253 size_t sz = getpagesize();
254 if (sz < sizeof(*prefix))
255 sz = sizeof(*prefix);
256
257 jd->buf = malloc(sz);
258 if (jd->buf == NULL)
259 return NULL;
260
261 jd->bufsize = sz;
262 }
263
264 prefix = jd->buf;
265
266 /*
267 * file is still locked at this point
268 */
269 ret = fread(prefix, sizeof(*prefix), 1, jd->in);
270 if (ret != 1)
271 return NULL;
272
273 if (jd->needs_bswap) {
274 prefix->id = bswap_32(prefix->id);
275 prefix->total_size = bswap_32(prefix->total_size);
276 prefix->timestamp = bswap_64(prefix->timestamp);
277 }
278 id = prefix->id;
279 size = prefix->total_size;
280
281 bs = (size_t)size;
282 if (bs < sizeof(*prefix))
283 return NULL;
284
285 if (id >= JIT_CODE_MAX) {
286 pr_warning("next_entry: unknown record type %d, skipping\n", id);
287 }
288 if (bs > jd->bufsize) {
289 void *n;
290 n = realloc(jd->buf, bs);
291 if (!n)
292 return NULL;
293 jd->buf = n;
294 jd->bufsize = bs;
295 }
296
297 addr = ((void *)jd->buf) + sizeof(*prefix);
298
299 ret = fread(addr, bs - sizeof(*prefix), 1, jd->in);
300 if (ret != 1)
301 return NULL;
302
303 jr = (union jr_entry *)jd->buf;
304
305 switch(id) {
306 case JIT_CODE_DEBUG_INFO:
307 if (jd->needs_bswap) {
308 uint64_t n;
309 jr->info.code_addr = bswap_64(jr->info.code_addr);
310 jr->info.nr_entry = bswap_64(jr->info.nr_entry);
311 for (n = 0 ; n < jr->info.nr_entry; n++) {
312 jr->info.entries[n].addr = bswap_64(jr->info.entries[n].addr);
313 jr->info.entries[n].lineno = bswap_32(jr->info.entries[n].lineno);
314 jr->info.entries[n].discrim = bswap_32(jr->info.entries[n].discrim);
315 }
316 }
317 break;
318 case JIT_CODE_UNWINDING_INFO:
319 if (jd->needs_bswap) {
320 jr->unwinding.unwinding_size = bswap_64(jr->unwinding.unwinding_size);
321 jr->unwinding.eh_frame_hdr_size = bswap_64(jr->unwinding.eh_frame_hdr_size);
322 jr->unwinding.mapped_size = bswap_64(jr->unwinding.mapped_size);
323 }
324 break;
325 case JIT_CODE_CLOSE:
326 break;
327 case JIT_CODE_LOAD:
328 if (jd->needs_bswap) {
329 jr->load.pid = bswap_32(jr->load.pid);
330 jr->load.tid = bswap_32(jr->load.tid);
331 jr->load.vma = bswap_64(jr->load.vma);
332 jr->load.code_addr = bswap_64(jr->load.code_addr);
333 jr->load.code_size = bswap_64(jr->load.code_size);
334 jr->load.code_index= bswap_64(jr->load.code_index);
335 }
336 jd->code_load_count++;
337 break;
338 case JIT_CODE_MOVE:
339 if (jd->needs_bswap) {
340 jr->move.pid = bswap_32(jr->move.pid);
341 jr->move.tid = bswap_32(jr->move.tid);
342 jr->move.vma = bswap_64(jr->move.vma);
343 jr->move.old_code_addr = bswap_64(jr->move.old_code_addr);
344 jr->move.new_code_addr = bswap_64(jr->move.new_code_addr);
345 jr->move.code_size = bswap_64(jr->move.code_size);
346 jr->move.code_index = bswap_64(jr->move.code_index);
347 }
348 break;
349 case JIT_CODE_MAX:
350 default:
351 /* skip unknown record (we have read them) */
352 break;
353 }
354 return jr;
355 }
356
357 static int
jit_inject_event(struct jit_buf_desc * jd,union perf_event * event)358 jit_inject_event(struct jit_buf_desc *jd, union perf_event *event)
359 {
360 ssize_t size;
361
362 size = perf_data__write(jd->output, event, event->header.size);
363 if (size < 0)
364 return -1;
365
366 jd->bytes_written += size;
367 return 0;
368 }
369
convert_timestamp(struct jit_buf_desc * jd,uint64_t timestamp)370 static uint64_t convert_timestamp(struct jit_buf_desc *jd, uint64_t timestamp)
371 {
372 struct perf_tsc_conversion tc = { .time_shift = 0, };
373 struct perf_record_time_conv *time_conv = &jd->session->time_conv;
374
375 if (!jd->use_arch_timestamp)
376 return timestamp;
377
378 tc.time_shift = time_conv->time_shift;
379 tc.time_mult = time_conv->time_mult;
380 tc.time_zero = time_conv->time_zero;
381
382 /*
383 * The event TIME_CONV was extended for the fields from "time_cycles"
384 * when supported cap_user_time_short, for backward compatibility,
385 * checks the event size and assigns these extended fields if these
386 * fields are contained in the event.
387 */
388 if (event_contains(*time_conv, time_cycles)) {
389 tc.time_cycles = time_conv->time_cycles;
390 tc.time_mask = time_conv->time_mask;
391 tc.cap_user_time_zero = time_conv->cap_user_time_zero;
392 tc.cap_user_time_short = time_conv->cap_user_time_short;
393
394 if (!tc.cap_user_time_zero)
395 return 0;
396 }
397
398 return tsc_to_perf_time(timestamp, &tc);
399 }
400
jit_repipe_code_load(struct jit_buf_desc * jd,union jr_entry * jr)401 static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
402 {
403 struct perf_sample sample;
404 union perf_event *event;
405 struct perf_tool *tool = jd->session->tool;
406 uint64_t code, addr;
407 uintptr_t uaddr;
408 char *filename;
409 struct stat st;
410 size_t size;
411 u16 idr_size;
412 const char *sym;
413 uint64_t count;
414 int ret, csize, usize;
415 pid_t pid, tid;
416 struct {
417 u32 pid, tid;
418 u64 time;
419 } *id;
420
421 pid = jr->load.pid;
422 tid = jr->load.tid;
423 csize = jr->load.code_size;
424 usize = jd->unwinding_mapped_size;
425 addr = jr->load.code_addr;
426 sym = (void *)((unsigned long)jr + sizeof(jr->load));
427 code = (unsigned long)jr + jr->load.p.total_size - csize;
428 count = jr->load.code_index;
429 idr_size = jd->machine->id_hdr_size;
430
431 event = calloc(1, sizeof(*event) + idr_size);
432 if (!event)
433 return -1;
434
435 filename = event->mmap2.filename;
436 size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%" PRIu64 ".so",
437 jd->dir,
438 pid,
439 count);
440
441 size++; /* for \0 */
442
443 size = PERF_ALIGN(size, sizeof(u64));
444 uaddr = (uintptr_t)code;
445 ret = jit_emit_elf(filename, sym, addr, (const void *)uaddr, csize, jd->debug_data, jd->nr_debug_entries,
446 jd->unwinding_data, jd->eh_frame_hdr_size, jd->unwinding_size);
447
448 if (jd->debug_data && jd->nr_debug_entries) {
449 zfree(&jd->debug_data);
450 jd->nr_debug_entries = 0;
451 }
452
453 if (jd->unwinding_data && jd->eh_frame_hdr_size) {
454 zfree(&jd->unwinding_data);
455 jd->eh_frame_hdr_size = 0;
456 jd->unwinding_mapped_size = 0;
457 jd->unwinding_size = 0;
458 }
459
460 if (ret) {
461 free(event);
462 return -1;
463 }
464 if (stat(filename, &st))
465 memset(&st, 0, sizeof(st));
466
467 event->mmap2.header.type = PERF_RECORD_MMAP2;
468 event->mmap2.header.misc = PERF_RECORD_MISC_USER;
469 event->mmap2.header.size = (sizeof(event->mmap2) -
470 (sizeof(event->mmap2.filename) - size) + idr_size);
471
472 event->mmap2.pgoff = GEN_ELF_TEXT_OFFSET;
473 event->mmap2.start = addr;
474 event->mmap2.len = usize ? ALIGN_8(csize) + usize : csize;
475 event->mmap2.pid = pid;
476 event->mmap2.tid = tid;
477 event->mmap2.ino = st.st_ino;
478 event->mmap2.maj = major(st.st_dev);
479 event->mmap2.min = minor(st.st_dev);
480 event->mmap2.prot = st.st_mode;
481 event->mmap2.flags = MAP_SHARED;
482 event->mmap2.ino_generation = 1;
483
484 id = (void *)((unsigned long)event + event->mmap.header.size - idr_size);
485 if (jd->sample_type & PERF_SAMPLE_TID) {
486 id->pid = pid;
487 id->tid = tid;
488 }
489 if (jd->sample_type & PERF_SAMPLE_TIME)
490 id->time = convert_timestamp(jd, jr->load.p.timestamp);
491
492 /*
493 * create pseudo sample to induce dso hit increment
494 * use first address as sample address
495 */
496 memset(&sample, 0, sizeof(sample));
497 sample.cpumode = PERF_RECORD_MISC_USER;
498 sample.pid = pid;
499 sample.tid = tid;
500 sample.time = id->time;
501 sample.ip = addr;
502
503 ret = perf_event__process_mmap2(tool, event, &sample, jd->machine);
504 if (ret)
505 return ret;
506
507 ret = jit_inject_event(jd, event);
508 /*
509 * mark dso as use to generate buildid in the header
510 */
511 if (!ret)
512 build_id__mark_dso_hit(tool, event, &sample, NULL, jd->machine);
513
514 return ret;
515 }
516
jit_repipe_code_move(struct jit_buf_desc * jd,union jr_entry * jr)517 static int jit_repipe_code_move(struct jit_buf_desc *jd, union jr_entry *jr)
518 {
519 struct perf_sample sample;
520 union perf_event *event;
521 struct perf_tool *tool = jd->session->tool;
522 char *filename;
523 size_t size;
524 struct stat st;
525 int usize;
526 u16 idr_size;
527 int ret;
528 pid_t pid, tid;
529 struct {
530 u32 pid, tid;
531 u64 time;
532 } *id;
533
534 pid = jr->move.pid;
535 tid = jr->move.tid;
536 usize = jd->unwinding_mapped_size;
537 idr_size = jd->machine->id_hdr_size;
538
539 /*
540 * +16 to account for sample_id_all (hack)
541 */
542 event = calloc(1, sizeof(*event) + 16);
543 if (!event)
544 return -1;
545
546 filename = event->mmap2.filename;
547 size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%" PRIu64 ".so",
548 jd->dir,
549 pid,
550 jr->move.code_index);
551
552 size++; /* for \0 */
553
554 if (stat(filename, &st))
555 memset(&st, 0, sizeof(st));
556
557 size = PERF_ALIGN(size, sizeof(u64));
558
559 event->mmap2.header.type = PERF_RECORD_MMAP2;
560 event->mmap2.header.misc = PERF_RECORD_MISC_USER;
561 event->mmap2.header.size = (sizeof(event->mmap2) -
562 (sizeof(event->mmap2.filename) - size) + idr_size);
563 event->mmap2.pgoff = GEN_ELF_TEXT_OFFSET;
564 event->mmap2.start = jr->move.new_code_addr;
565 event->mmap2.len = usize ? ALIGN_8(jr->move.code_size) + usize
566 : jr->move.code_size;
567 event->mmap2.pid = pid;
568 event->mmap2.tid = tid;
569 event->mmap2.ino = st.st_ino;
570 event->mmap2.maj = major(st.st_dev);
571 event->mmap2.min = minor(st.st_dev);
572 event->mmap2.prot = st.st_mode;
573 event->mmap2.flags = MAP_SHARED;
574 event->mmap2.ino_generation = 1;
575
576 id = (void *)((unsigned long)event + event->mmap.header.size - idr_size);
577 if (jd->sample_type & PERF_SAMPLE_TID) {
578 id->pid = pid;
579 id->tid = tid;
580 }
581 if (jd->sample_type & PERF_SAMPLE_TIME)
582 id->time = convert_timestamp(jd, jr->load.p.timestamp);
583
584 /*
585 * create pseudo sample to induce dso hit increment
586 * use first address as sample address
587 */
588 memset(&sample, 0, sizeof(sample));
589 sample.cpumode = PERF_RECORD_MISC_USER;
590 sample.pid = pid;
591 sample.tid = tid;
592 sample.time = id->time;
593 sample.ip = jr->move.new_code_addr;
594
595 ret = perf_event__process_mmap2(tool, event, &sample, jd->machine);
596 if (ret)
597 return ret;
598
599 ret = jit_inject_event(jd, event);
600 if (!ret)
601 build_id__mark_dso_hit(tool, event, &sample, NULL, jd->machine);
602
603 return ret;
604 }
605
jit_repipe_debug_info(struct jit_buf_desc * jd,union jr_entry * jr)606 static int jit_repipe_debug_info(struct jit_buf_desc *jd, union jr_entry *jr)
607 {
608 void *data;
609 size_t sz;
610
611 if (!(jd && jr))
612 return -1;
613
614 sz = jr->prefix.total_size - sizeof(jr->info);
615 data = malloc(sz);
616 if (!data)
617 return -1;
618
619 memcpy(data, &jr->info.entries, sz);
620
621 jd->debug_data = data;
622
623 /*
624 * we must use nr_entry instead of size here because
625 * we cannot distinguish actual entry from padding otherwise
626 */
627 jd->nr_debug_entries = jr->info.nr_entry;
628
629 return 0;
630 }
631
632 static int
jit_repipe_unwinding_info(struct jit_buf_desc * jd,union jr_entry * jr)633 jit_repipe_unwinding_info(struct jit_buf_desc *jd, union jr_entry *jr)
634 {
635 void *unwinding_data;
636 uint32_t unwinding_data_size;
637
638 if (!(jd && jr))
639 return -1;
640
641 unwinding_data_size = jr->prefix.total_size - sizeof(jr->unwinding);
642 unwinding_data = malloc(unwinding_data_size);
643 if (!unwinding_data)
644 return -1;
645
646 memcpy(unwinding_data, &jr->unwinding.unwinding_data,
647 unwinding_data_size);
648
649 jd->eh_frame_hdr_size = jr->unwinding.eh_frame_hdr_size;
650 jd->unwinding_size = jr->unwinding.unwinding_size;
651 jd->unwinding_mapped_size = jr->unwinding.mapped_size;
652 jd->unwinding_data = unwinding_data;
653
654 return 0;
655 }
656
657 static int
jit_process_dump(struct jit_buf_desc * jd)658 jit_process_dump(struct jit_buf_desc *jd)
659 {
660 union jr_entry *jr;
661 int ret = 0;
662
663 while ((jr = jit_get_next_entry(jd))) {
664 switch(jr->prefix.id) {
665 case JIT_CODE_LOAD:
666 ret = jit_repipe_code_load(jd, jr);
667 break;
668 case JIT_CODE_MOVE:
669 ret = jit_repipe_code_move(jd, jr);
670 break;
671 case JIT_CODE_DEBUG_INFO:
672 ret = jit_repipe_debug_info(jd, jr);
673 break;
674 case JIT_CODE_UNWINDING_INFO:
675 ret = jit_repipe_unwinding_info(jd, jr);
676 break;
677 default:
678 ret = 0;
679 continue;
680 }
681 }
682 return ret;
683 }
684
685 static int
jit_inject(struct jit_buf_desc * jd,char * path)686 jit_inject(struct jit_buf_desc *jd, char *path)
687 {
688 int ret;
689
690 if (verbose > 0)
691 fprintf(stderr, "injecting: %s\n", path);
692
693 ret = jit_open(jd, path);
694 if (ret)
695 return -1;
696
697 ret = jit_process_dump(jd);
698
699 jit_close(jd);
700
701 if (verbose > 0)
702 fprintf(stderr, "injected: %s (%d)\n", path, ret);
703
704 return 0;
705 }
706
707 /*
708 * File must be with pattern .../jit-XXXX.dump
709 * where XXXX is the PID of the process which did the mmap()
710 * as captured in the RECORD_MMAP record
711 */
712 static int
jit_detect(char * mmap_name,pid_t pid)713 jit_detect(char *mmap_name, pid_t pid)
714 {
715 char *p;
716 char *end = NULL;
717 pid_t pid2;
718
719 if (verbose > 2)
720 fprintf(stderr, "jit marker trying : %s\n", mmap_name);
721 /*
722 * get file name
723 */
724 p = strrchr(mmap_name, '/');
725 if (!p)
726 return -1;
727
728 /*
729 * match prefix
730 */
731 if (strncmp(p, "/jit-", 5))
732 return -1;
733
734 /*
735 * skip prefix
736 */
737 p += 5;
738
739 /*
740 * must be followed by a pid
741 */
742 if (!isdigit(*p))
743 return -1;
744
745 pid2 = (int)strtol(p, &end, 10);
746 if (!end)
747 return -1;
748
749 /*
750 * pid does not match mmap pid
751 * pid==0 in system-wide mode (synthesized)
752 */
753 if (pid && pid2 != pid)
754 return -1;
755 /*
756 * validate suffix
757 */
758 if (strcmp(end, ".dump"))
759 return -1;
760
761 if (verbose > 0)
762 fprintf(stderr, "jit marker found: %s\n", mmap_name);
763
764 return 0;
765 }
766
jit_add_pid(struct machine * machine,pid_t pid)767 static void jit_add_pid(struct machine *machine, pid_t pid)
768 {
769 struct thread *thread = machine__findnew_thread(machine, pid, pid);
770
771 if (!thread) {
772 pr_err("%s: thread %d not found or created\n", __func__, pid);
773 return;
774 }
775
776 thread->priv = (void *)1;
777 }
778
jit_has_pid(struct machine * machine,pid_t pid)779 static bool jit_has_pid(struct machine *machine, pid_t pid)
780 {
781 struct thread *thread = machine__find_thread(machine, pid, pid);
782
783 if (!thread)
784 return 0;
785
786 return (bool)thread->priv;
787 }
788
789 int
jit_process(struct perf_session * session,struct perf_data * output,struct machine * machine,char * filename,pid_t pid,u64 * nbytes)790 jit_process(struct perf_session *session,
791 struct perf_data *output,
792 struct machine *machine,
793 char *filename,
794 pid_t pid,
795 u64 *nbytes)
796 {
797 struct evsel *first;
798 struct jit_buf_desc jd;
799 int ret;
800
801 /*
802 * first, detect marker mmap (i.e., the jitdump mmap)
803 */
804 if (jit_detect(filename, pid)) {
805 // Strip //anon* mmaps if we processed a jitdump for this pid
806 if (jit_has_pid(machine, pid) && (strncmp(filename, "//anon", 6) == 0))
807 return 1;
808
809 return 0;
810 }
811
812 memset(&jd, 0, sizeof(jd));
813
814 jd.session = session;
815 jd.output = output;
816 jd.machine = machine;
817
818 /*
819 * track sample_type to compute id_all layout
820 * perf sets the same sample type to all events as of now
821 */
822 first = evlist__first(session->evlist);
823 jd.sample_type = first->core.attr.sample_type;
824
825 *nbytes = 0;
826
827 ret = jit_inject(&jd, filename);
828 if (!ret) {
829 jit_add_pid(machine, pid);
830 *nbytes = jd.bytes_written;
831 ret = 1;
832 }
833
834 return ret;
835 }
836