Lines Matching refs:ff
101 static int __do_write_fd(struct feat_fd *ff, const void *buf, size_t size) in __do_write_fd() argument
103 ssize_t ret = writen(ff->fd, buf, size); in __do_write_fd()
110 static int __do_write_buf(struct feat_fd *ff, const void *buf, size_t size) in __do_write_buf() argument
114 size_t new_size = ff->size; in __do_write_buf()
117 if (size + ff->offset > max_size) in __do_write_buf()
120 while (size > (new_size - ff->offset)) in __do_write_buf()
124 if (ff->size < new_size) { in __do_write_buf()
125 addr = realloc(ff->buf, new_size); in __do_write_buf()
128 ff->buf = addr; in __do_write_buf()
129 ff->size = new_size; in __do_write_buf()
132 memcpy(ff->buf + ff->offset, buf, size); in __do_write_buf()
133 ff->offset += size; in __do_write_buf()
139 int do_write(struct feat_fd *ff, const void *buf, size_t size) in do_write() argument
141 if (!ff->buf) in do_write()
142 return __do_write_fd(ff, buf, size); in do_write()
143 return __do_write_buf(ff, buf, size); in do_write()
147 static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size) in do_write_bitmap() argument
152 ret = do_write(ff, &size, sizeof(size)); in do_write_bitmap()
157 ret = do_write(ff, p + i, sizeof(*p)); in do_write_bitmap()
166 int write_padded(struct feat_fd *ff, const void *bf, in write_padded() argument
170 int err = do_write(ff, bf, count); in write_padded()
173 err = do_write(ff, zero_buf, count_aligned - count); in write_padded()
182 static int do_write_string(struct feat_fd *ff, const char *str) in do_write_string() argument
191 ret = do_write(ff, &len, sizeof(len)); in do_write_string()
195 return write_padded(ff, str, olen, len); in do_write_string()
198 static int __do_read_fd(struct feat_fd *ff, void *addr, ssize_t size) in __do_read_fd() argument
200 ssize_t ret = readn(ff->fd, addr, size); in __do_read_fd()
207 static int __do_read_buf(struct feat_fd *ff, void *addr, ssize_t size) in __do_read_buf() argument
209 if (size > (ssize_t)ff->size - ff->offset) in __do_read_buf()
212 memcpy(addr, ff->buf + ff->offset, size); in __do_read_buf()
213 ff->offset += size; in __do_read_buf()
219 static int __do_read(struct feat_fd *ff, void *addr, ssize_t size) in __do_read() argument
221 if (!ff->buf) in __do_read()
222 return __do_read_fd(ff, addr, size); in __do_read()
223 return __do_read_buf(ff, addr, size); in __do_read()
226 static int do_read_u32(struct feat_fd *ff, u32 *addr) in do_read_u32() argument
230 ret = __do_read(ff, addr, sizeof(*addr)); in do_read_u32()
234 if (ff->ph->needs_swap) in do_read_u32()
239 static int do_read_u64(struct feat_fd *ff, u64 *addr) in do_read_u64() argument
243 ret = __do_read(ff, addr, sizeof(*addr)); in do_read_u64()
247 if (ff->ph->needs_swap) in do_read_u64()
252 static char *do_read_string(struct feat_fd *ff) in do_read_string() argument
257 if (do_read_u32(ff, &len)) in do_read_string()
264 if (!__do_read(ff, buf, len)) { in do_read_string()
278 static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize) in do_read_bitmap() argument
284 ret = do_read_u64(ff, &size); in do_read_bitmap()
295 ret = do_read_u64(ff, p + i); in do_read_bitmap()
308 static int write_tracing_data(struct feat_fd *ff, in write_tracing_data() argument
311 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) in write_tracing_data()
314 return read_tracing_data(ff->fd, &evlist->core.entries); in write_tracing_data()
318 static int write_build_id(struct feat_fd *ff, in write_build_id() argument
324 session = container_of(ff->ph, struct perf_session, header); in write_build_id()
329 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) in write_build_id()
332 err = perf_session__write_buildid_table(session, ff); in write_build_id()
342 static int write_hostname(struct feat_fd *ff, in write_hostname() argument
352 return do_write_string(ff, uts.nodename); in write_hostname()
355 static int write_osrelease(struct feat_fd *ff, in write_osrelease() argument
365 return do_write_string(ff, uts.release); in write_osrelease()
368 static int write_arch(struct feat_fd *ff, in write_arch() argument
378 return do_write_string(ff, uts.machine); in write_arch()
381 static int write_version(struct feat_fd *ff, in write_version() argument
384 return do_write_string(ff, perf_version_string); in write_version()
387 static int __write_cpudesc(struct feat_fd *ff, const char *cpuinfo_proc) in __write_cpudesc() argument
435 ret = do_write_string(ff, s); in __write_cpudesc()
442 static int write_cpudesc(struct feat_fd *ff, in write_cpudesc() argument
470 ret = __write_cpudesc(ff, cpuinfo_procs[i]); in write_cpudesc()
478 static int write_nrcpus(struct feat_fd *ff, in write_nrcpus() argument
493 ret = do_write(ff, &nrc, sizeof(nrc)); in write_nrcpus()
497 return do_write(ff, &nra, sizeof(nra)); in write_nrcpus()
500 static int write_event_desc(struct feat_fd *ff, in write_event_desc() argument
512 ret = do_write(ff, &nre, sizeof(nre)); in write_event_desc()
520 ret = do_write(ff, &sz, sizeof(sz)); in write_event_desc()
525 ret = do_write(ff, &evsel->core.attr, sz); in write_event_desc()
536 ret = do_write(ff, &nri, sizeof(nri)); in write_event_desc()
543 ret = do_write_string(ff, evsel__name(evsel)); in write_event_desc()
549 ret = do_write(ff, evsel->core.id, evsel->core.ids * sizeof(u64)); in write_event_desc()
556 static int write_cmdline(struct feat_fd *ff, in write_cmdline() argument
568 ret = do_write(ff, &n, sizeof(n)); in write_cmdline()
572 ret = do_write_string(ff, buf); in write_cmdline()
577 ret = do_write_string(ff, perf_env.cmdline_argv[i]); in write_cmdline()
585 static int write_cpu_topology(struct feat_fd *ff, in write_cpu_topology() argument
596 ret = do_write(ff, &tp->package_cpus_lists, sizeof(tp->package_cpus_lists)); in write_cpu_topology()
601 ret = do_write_string(ff, tp->package_cpus_list[i]); in write_cpu_topology()
605 ret = do_write(ff, &tp->core_cpus_lists, sizeof(tp->core_cpus_lists)); in write_cpu_topology()
610 ret = do_write_string(ff, tp->core_cpus_list[i]); in write_cpu_topology()
620 ret = do_write(ff, &perf_env.cpu[j].core_id, in write_cpu_topology()
624 ret = do_write(ff, &perf_env.cpu[j].socket_id, in write_cpu_topology()
633 ret = do_write(ff, &tp->die_cpus_lists, sizeof(tp->die_cpus_lists)); in write_cpu_topology()
638 ret = do_write_string(ff, tp->die_cpus_list[i]); in write_cpu_topology()
644 ret = do_write(ff, &perf_env.cpu[j].die_id, in write_cpu_topology()
657 static int write_total_mem(struct feat_fd *ff, in write_total_mem() argument
678 ret = do_write(ff, &mem, sizeof(mem)); in write_total_mem()
686 static int write_numa_topology(struct feat_fd *ff, in write_numa_topology() argument
697 ret = do_write(ff, &tp->nr, sizeof(u32)); in write_numa_topology()
704 ret = do_write(ff, &n->node, sizeof(u32)); in write_numa_topology()
708 ret = do_write(ff, &n->mem_total, sizeof(u64)); in write_numa_topology()
712 ret = do_write(ff, &n->mem_free, sizeof(u64)); in write_numa_topology()
716 ret = do_write_string(ff, n->cpus); in write_numa_topology()
740 static int write_pmu_mappings(struct feat_fd *ff, in write_pmu_mappings() argument
754 ret = do_write(ff, &pmu_num, sizeof(pmu_num)); in write_pmu_mappings()
759 ret = do_write(ff, &pmu->type, sizeof(pmu->type)); in write_pmu_mappings()
763 ret = do_write_string(ff, pmu->name); in write_pmu_mappings()
783 static int write_group_desc(struct feat_fd *ff, in write_group_desc() argument
790 ret = do_write(ff, &nr_groups, sizeof(nr_groups)); in write_group_desc()
800 ret = do_write_string(ff, name); in write_group_desc()
804 ret = do_write(ff, &leader_idx, sizeof(leader_idx)); in write_group_desc()
808 ret = do_write(ff, &nr_members, sizeof(nr_members)); in write_group_desc()
864 static int write_cpuid(struct feat_fd *ff, in write_cpuid() argument
874 return do_write_string(ff, buffer); in write_cpuid()
877 static int write_branch_stack(struct feat_fd *ff __maybe_unused, in write_branch_stack()
883 static int write_auxtrace(struct feat_fd *ff, in write_auxtrace() argument
889 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) in write_auxtrace()
892 session = container_of(ff->ph, struct perf_session, header); in write_auxtrace()
894 err = auxtrace_index__write(ff->fd, &session->auxtrace_index); in write_auxtrace()
900 static int write_clockid(struct feat_fd *ff, in write_clockid() argument
903 return do_write(ff, &ff->ph->env.clock.clockid_res_ns, in write_clockid()
904 sizeof(ff->ph->env.clock.clockid_res_ns)); in write_clockid()
907 static int write_clock_data(struct feat_fd *ff, in write_clock_data() argument
917 ret = do_write(ff, &data32, sizeof(data32)); in write_clock_data()
922 data32 = ff->ph->env.clock.clockid; in write_clock_data()
924 ret = do_write(ff, &data32, sizeof(data32)); in write_clock_data()
929 data64 = &ff->ph->env.clock.tod_ns; in write_clock_data()
931 ret = do_write(ff, data64, sizeof(*data64)); in write_clock_data()
936 data64 = &ff->ph->env.clock.clockid_ns; in write_clock_data()
938 return do_write(ff, data64, sizeof(*data64)); in write_clock_data()
941 static int write_hybrid_topology(struct feat_fd *ff, in write_hybrid_topology() argument
952 ret = do_write(ff, &tp->nr, sizeof(u32)); in write_hybrid_topology()
959 ret = do_write_string(ff, n->pmu_name); in write_hybrid_topology()
963 ret = do_write_string(ff, n->cpus); in write_hybrid_topology()
975 static int write_dir_format(struct feat_fd *ff, in write_dir_format() argument
981 session = container_of(ff->ph, struct perf_session, header); in write_dir_format()
987 return do_write(ff, &data->dir.version, sizeof(data->dir.version)); in write_dir_format()
1042 static int write_bpf_prog_info(struct feat_fd *ff, in write_bpf_prog_info() argument
1045 struct perf_env *env = &ff->ph->env; in write_bpf_prog_info()
1052 ret = do_write(ff, &env->bpf_progs.infos_cnt, in write_bpf_prog_info()
1070 ret = do_write(ff, node->info_linear, len); in write_bpf_prog_info()
1084 static int write_bpf_btf(struct feat_fd *ff, in write_bpf_btf() argument
1087 struct perf_env *env = &ff->ph->env; in write_bpf_btf()
1094 ret = do_write(ff, &env->bpf_progs.btfs_cnt, in write_bpf_btf()
1107 ret = do_write(ff, &node->id, in write_bpf_btf()
1266 static int write_cache(struct feat_fd *ff, in write_cache() argument
1280 ret = do_write(ff, &version, sizeof(u32)); in write_cache()
1284 ret = do_write(ff, &cnt, sizeof(u32)); in write_cache()
1292 ret = do_write(ff, &c->v, sizeof(u32)); \ in write_cache()
1303 ret = do_write_string(ff, (const char *) c->v); \ in write_cache()
1319 static int write_stat(struct feat_fd *ff __maybe_unused, in write_stat()
1325 static int write_sample_time(struct feat_fd *ff, in write_sample_time() argument
1330 ret = do_write(ff, &evlist->first_sample_time, in write_sample_time()
1335 return do_write(ff, &evlist->last_sample_time, in write_sample_time()
1478 static int write_mem_topology(struct feat_fd *ff __maybe_unused, in write_mem_topology()
1494 ret = do_write(ff, &version, sizeof(version)); in write_mem_topology()
1498 ret = do_write(ff, &bsize, sizeof(bsize)); in write_mem_topology()
1502 ret = do_write(ff, &nr, sizeof(nr)); in write_mem_topology()
1510 ret = do_write(ff, &n->v, sizeof(n->v)); \ in write_mem_topology()
1519 ret = do_write_bitmap(ff, n->set, n->size); in write_mem_topology()
1529 static int write_compressed(struct feat_fd *ff __maybe_unused, in write_compressed()
1534 ret = do_write(ff, &(ff->ph->env.comp_ver), sizeof(ff->ph->env.comp_ver)); in write_compressed()
1538 ret = do_write(ff, &(ff->ph->env.comp_type), sizeof(ff->ph->env.comp_type)); in write_compressed()
1542 ret = do_write(ff, &(ff->ph->env.comp_level), sizeof(ff->ph->env.comp_level)); in write_compressed()
1546 ret = do_write(ff, &(ff->ph->env.comp_ratio), sizeof(ff->ph->env.comp_ratio)); in write_compressed()
1550 return do_write(ff, &(ff->ph->env.comp_mmap_len), sizeof(ff->ph->env.comp_mmap_len)); in write_compressed()
1553 static int __write_pmu_caps(struct feat_fd *ff, struct perf_pmu *pmu, in __write_pmu_caps() argument
1559 ret = do_write(ff, &pmu->nr_caps, sizeof(pmu->nr_caps)); in __write_pmu_caps()
1564 ret = do_write_string(ff, caps->name); in __write_pmu_caps()
1568 ret = do_write_string(ff, caps->value); in __write_pmu_caps()
1574 ret = do_write_string(ff, pmu->name); in __write_pmu_caps()
1582 static int write_cpu_pmu_caps(struct feat_fd *ff, in write_cpu_pmu_caps() argument
1595 return __write_pmu_caps(ff, cpu_pmu, false); in write_cpu_pmu_caps()
1598 static int write_pmu_caps(struct feat_fd *ff, in write_pmu_caps() argument
1619 ret = do_write(ff, &nr_pmu, sizeof(nr_pmu)); in write_pmu_caps()
1638 ret = __write_pmu_caps(ff, pmu, true); in write_pmu_caps()
1645 static void print_hostname(struct feat_fd *ff, FILE *fp) in print_hostname() argument
1647 fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname); in print_hostname()
1650 static void print_osrelease(struct feat_fd *ff, FILE *fp) in print_osrelease() argument
1652 fprintf(fp, "# os release : %s\n", ff->ph->env.os_release); in print_osrelease()
1655 static void print_arch(struct feat_fd *ff, FILE *fp) in print_arch() argument
1657 fprintf(fp, "# arch : %s\n", ff->ph->env.arch); in print_arch()
1660 static void print_cpudesc(struct feat_fd *ff, FILE *fp) in print_cpudesc() argument
1662 fprintf(fp, "# cpudesc : %s\n", ff->ph->env.cpu_desc); in print_cpudesc()
1665 static void print_nrcpus(struct feat_fd *ff, FILE *fp) in print_nrcpus() argument
1667 fprintf(fp, "# nrcpus online : %u\n", ff->ph->env.nr_cpus_online); in print_nrcpus()
1668 fprintf(fp, "# nrcpus avail : %u\n", ff->ph->env.nr_cpus_avail); in print_nrcpus()
1671 static void print_version(struct feat_fd *ff, FILE *fp) in print_version() argument
1673 fprintf(fp, "# perf version : %s\n", ff->ph->env.version); in print_version()
1676 static void print_cmdline(struct feat_fd *ff, FILE *fp) in print_cmdline() argument
1680 nr = ff->ph->env.nr_cmdline; in print_cmdline()
1685 char *argv_i = strdup(ff->ph->env.cmdline_argv[i]); in print_cmdline()
1687 fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]); in print_cmdline()
1705 static void print_cpu_topology(struct feat_fd *ff, FILE *fp) in print_cpu_topology() argument
1707 struct perf_header *ph = ff->ph; in print_cpu_topology()
1762 static void print_clockid(struct feat_fd *ff, FILE *fp) in print_clockid() argument
1765 ff->ph->env.clock.clockid_res_ns * 1000); in print_clockid()
1768 static void print_clock_data(struct feat_fd *ff, FILE *fp) in print_clock_data() argument
1777 if (!ff->ph->env.clock.enabled) { in print_clock_data()
1783 ref = ff->ph->env.clock.tod_ns; in print_clock_data()
1789 ref = ff->ph->env.clock.clockid_ns; in print_clock_data()
1794 clockid = ff->ph->env.clock.clockid; in print_clock_data()
1811 static void print_hybrid_topology(struct feat_fd *ff, FILE *fp) in print_hybrid_topology() argument
1817 for (i = 0; i < ff->ph->env.nr_hybrid_nodes; i++) { in print_hybrid_topology()
1818 n = &ff->ph->env.hybrid_nodes[i]; in print_hybrid_topology()
1823 static void print_dir_format(struct feat_fd *ff, FILE *fp) in print_dir_format() argument
1828 session = container_of(ff->ph, struct perf_session, header); in print_dir_format()
1835 static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp) in print_bpf_prog_info() argument
1837 struct perf_env *env = &ff->ph->env; in print_bpf_prog_info()
1859 static void print_bpf_btf(struct feat_fd *ff, FILE *fp) in print_bpf_btf() argument
1861 struct perf_env *env = &ff->ph->env; in print_bpf_btf()
1931 static struct evsel *read_event_desc(struct feat_fd *ff) in read_event_desc() argument
1940 if (do_read_u32(ff, &nre)) in read_event_desc()
1943 if (do_read_u32(ff, &sz)) in read_event_desc()
1967 if (__do_read(ff, buf, sz)) in read_event_desc()
1970 if (ff->ph->needs_swap) in read_event_desc()
1978 if (do_read_u32(ff, &nr)) in read_event_desc()
1981 if (ff->ph->needs_swap) in read_event_desc()
1984 evsel->name = do_read_string(ff); in read_event_desc()
1998 if (do_read_u64(ff, id)) in read_event_desc()
2018 static void print_event_desc(struct feat_fd *ff, FILE *fp) in print_event_desc() argument
2024 if (ff->events) in print_event_desc()
2025 events = ff->events; in print_event_desc()
2027 events = read_event_desc(ff); in print_event_desc()
2053 ff->events = NULL; in print_event_desc()
2056 static void print_total_mem(struct feat_fd *ff, FILE *fp) in print_total_mem() argument
2058 fprintf(fp, "# total memory : %llu kB\n", ff->ph->env.total_mem); in print_total_mem()
2061 static void print_numa_topology(struct feat_fd *ff, FILE *fp) in print_numa_topology() argument
2066 for (i = 0; i < ff->ph->env.nr_numa_nodes; i++) { in print_numa_topology()
2067 n = &ff->ph->env.numa_nodes[i]; in print_numa_topology()
2078 static void print_cpuid(struct feat_fd *ff, FILE *fp) in print_cpuid() argument
2080 fprintf(fp, "# cpuid : %s\n", ff->ph->env.cpuid); in print_cpuid()
2083 static void print_branch_stack(struct feat_fd *ff __maybe_unused, FILE *fp) in print_branch_stack()
2088 static void print_auxtrace(struct feat_fd *ff __maybe_unused, FILE *fp) in print_auxtrace()
2093 static void print_stat(struct feat_fd *ff __maybe_unused, FILE *fp) in print_stat()
2098 static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused) in print_cache() argument
2103 for (i = 0; i < ff->ph->env.caches_cnt; i++) { in print_cache()
2105 cpu_cache_level__fprintf(fp, &ff->ph->env.caches[i]); in print_cache()
2109 static void print_compressed(struct feat_fd *ff, FILE *fp) in print_compressed() argument
2112 ff->ph->env.comp_type == PERF_COMP_ZSTD ? "Zstd" : "Unknown", in print_compressed()
2113 ff->ph->env.comp_level, ff->ph->env.comp_ratio); in print_compressed()
2135 static void print_cpu_pmu_caps(struct feat_fd *ff, FILE *fp) in print_cpu_pmu_caps() argument
2137 __print_pmu_caps(fp, ff->ph->env.nr_cpu_pmu_caps, in print_cpu_pmu_caps()
2138 ff->ph->env.cpu_pmu_caps, (char *)"cpu"); in print_cpu_pmu_caps()
2141 static void print_pmu_caps(struct feat_fd *ff, FILE *fp) in print_pmu_caps() argument
2145 for (int i = 0; i < ff->ph->env.nr_pmus_with_caps; i++) { in print_pmu_caps()
2146 pmu_caps = &ff->ph->env.pmu_caps[i]; in print_pmu_caps()
2152 static void print_pmu_mappings(struct feat_fd *ff, FILE *fp) in print_pmu_mappings() argument
2159 pmu_num = ff->ph->env.nr_pmu_mappings; in print_pmu_mappings()
2165 str = ff->ph->env.pmu_mappings; in print_pmu_mappings()
2188 static void print_group_desc(struct feat_fd *ff, FILE *fp) in print_group_desc() argument
2194 session = container_of(ff->ph, struct perf_session, header); in print_group_desc()
2210 static void print_sample_time(struct feat_fd *ff, FILE *fp) in print_sample_time() argument
2216 session = container_of(ff->ph, struct perf_session, header); in print_sample_time()
2245 static void print_mem_topology(struct feat_fd *ff, FILE *fp) in print_mem_topology() argument
2250 nodes = ff->ph->env.memory_nodes; in print_mem_topology()
2251 nr = ff->ph->env.nr_memory_nodes; in print_mem_topology()
2254 nr, ff->ph->env.memory_bsize); in print_mem_topology()
2257 memory_node__fprintf(&nodes[i], ff->ph->env.memory_bsize, fp); in print_mem_topology()
2423 static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \
2425 free(ff->ph->env.__feat_env); \
2426 ff->ph->env.__feat_env = do_read_string(ff); \
2427 return ff->ph->env.__feat_env ? 0 : -ENOMEM; \
2438 static int process_tracing_data(struct feat_fd *ff, void *data) in process_tracing_data() argument
2440 ssize_t ret = trace_report(ff->fd, data, false); in process_tracing_data()
2446 static int process_build_id(struct feat_fd *ff, void *data __maybe_unused) in process_build_id() argument
2448 if (perf_header__read_build_ids(ff->ph, ff->fd, ff->offset, ff->size)) in process_build_id()
2453 static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused) in process_nrcpus() argument
2458 ret = do_read_u32(ff, &nr_cpus_avail); in process_nrcpus()
2462 ret = do_read_u32(ff, &nr_cpus_online); in process_nrcpus()
2465 ff->ph->env.nr_cpus_avail = (int)nr_cpus_avail; in process_nrcpus()
2466 ff->ph->env.nr_cpus_online = (int)nr_cpus_online; in process_nrcpus()
2470 static int process_total_mem(struct feat_fd *ff, void *data __maybe_unused) in process_total_mem() argument
2475 ret = do_read_u64(ff, &total_mem); in process_total_mem()
2478 ff->ph->env.total_mem = (unsigned long long)total_mem; in process_total_mem()
2512 process_event_desc(struct feat_fd *ff, void *data __maybe_unused) in process_event_desc() argument
2515 struct evsel *evsel, *events = read_event_desc(ff); in process_event_desc()
2520 session = container_of(ff->ph, struct perf_session, header); in process_event_desc()
2525 ff->events = events; in process_event_desc()
2537 static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused) in process_cmdline() argument
2542 if (do_read_u32(ff, &nr)) in process_cmdline()
2545 ff->ph->env.nr_cmdline = nr; in process_cmdline()
2547 cmdline = zalloc(ff->size + nr + 1); in process_cmdline()
2556 str = do_read_string(ff); in process_cmdline()
2565 ff->ph->env.cmdline = cmdline; in process_cmdline()
2566 ff->ph->env.cmdline_argv = (const char **) argv; in process_cmdline()
2575 static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused) in process_cpu_topology() argument
2580 int cpu_nr = ff->ph->env.nr_cpus_avail; in process_cpu_topology()
2582 struct perf_header *ph = ff->ph; in process_cpu_topology()
2589 if (do_read_u32(ff, &nr)) in process_cpu_topology()
2598 str = do_read_string(ff); in process_cpu_topology()
2610 if (do_read_u32(ff, &nr)) in process_cpu_topology()
2617 str = do_read_string(ff); in process_cpu_topology()
2633 if (ff->size <= size) { in process_cpu_topology()
2648 if (do_read_u32(ff, &nr)) in process_cpu_topology()
2654 if (do_read_u32(ff, &nr)) in process_cpu_topology()
2671 if (ff->size <= size) in process_cpu_topology()
2674 if (do_read_u32(ff, &nr)) in process_cpu_topology()
2681 str = do_read_string(ff); in process_cpu_topology()
2694 if (do_read_u32(ff, &nr)) in process_cpu_topology()
2709 static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused) in process_numa_topology() argument
2716 if (do_read_u32(ff, &nr)) in process_numa_topology()
2727 if (do_read_u32(ff, &n->node)) in process_numa_topology()
2730 if (do_read_u64(ff, &n->mem_total)) in process_numa_topology()
2733 if (do_read_u64(ff, &n->mem_free)) in process_numa_topology()
2736 str = do_read_string(ff); in process_numa_topology()
2746 ff->ph->env.nr_numa_nodes = nr; in process_numa_topology()
2747 ff->ph->env.numa_nodes = nodes; in process_numa_topology()
2755 static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused) in process_pmu_mappings() argument
2762 if (do_read_u32(ff, &pmu_num)) in process_pmu_mappings()
2770 ff->ph->env.nr_pmu_mappings = pmu_num; in process_pmu_mappings()
2775 if (do_read_u32(ff, &type)) in process_pmu_mappings()
2778 name = do_read_string(ff); in process_pmu_mappings()
2789 ff->ph->env.msr_pmu_type = type; in process_pmu_mappings()
2794 ff->ph->env.pmu_mappings = strbuf_detach(&sb, NULL); in process_pmu_mappings()
2802 static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused) in process_group_desc() argument
2814 if (do_read_u32(ff, &nr_groups)) in process_group_desc()
2817 ff->ph->env.nr_groups = nr_groups; in process_group_desc()
2828 desc[i].name = do_read_string(ff); in process_group_desc()
2832 if (do_read_u32(ff, &desc[i].leader_idx)) in process_group_desc()
2835 if (do_read_u32(ff, &desc[i].nr_members)) in process_group_desc()
2842 session = container_of(ff->ph, struct perf_session, header); in process_group_desc()
2885 static int process_auxtrace(struct feat_fd *ff, void *data __maybe_unused) in process_auxtrace() argument
2890 session = container_of(ff->ph, struct perf_session, header); in process_auxtrace()
2892 err = auxtrace_index__process(ff->fd, ff->size, session, in process_auxtrace()
2893 ff->ph->needs_swap); in process_auxtrace()
2899 static int process_cache(struct feat_fd *ff, void *data __maybe_unused) in process_cache() argument
2904 if (do_read_u32(ff, &version)) in process_cache()
2910 if (do_read_u32(ff, &cnt)) in process_cache()
2921 if (do_read_u32(ff, &c.v))\ in process_cache()
2931 c.v = do_read_string(ff); \ in process_cache()
2943 ff->ph->env.caches = caches; in process_cache()
2944 ff->ph->env.caches_cnt = cnt; in process_cache()
2951 static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused) in process_sample_time() argument
2957 session = container_of(ff->ph, struct perf_session, header); in process_sample_time()
2959 ret = do_read_u64(ff, &first_sample_time); in process_sample_time()
2963 ret = do_read_u64(ff, &last_sample_time); in process_sample_time()
2972 static int process_mem_topology(struct feat_fd *ff, in process_mem_topology() argument
2979 if (do_read_u64(ff, &version)) in process_mem_topology()
2985 if (do_read_u64(ff, &bsize)) in process_mem_topology()
2988 if (do_read_u64(ff, &nr)) in process_mem_topology()
2999 if (do_read_u64(ff, &n.v)) \ in process_mem_topology()
3007 if (do_read_bitmap(ff, &n.set, &n.size)) in process_mem_topology()
3013 ff->ph->env.memory_bsize = bsize; in process_mem_topology()
3014 ff->ph->env.memory_nodes = nodes; in process_mem_topology()
3015 ff->ph->env.nr_memory_nodes = nr; in process_mem_topology()
3024 static int process_clockid(struct feat_fd *ff, in process_clockid() argument
3027 if (do_read_u64(ff, &ff->ph->env.clock.clockid_res_ns)) in process_clockid()
3033 static int process_clock_data(struct feat_fd *ff, in process_clock_data() argument
3040 if (do_read_u32(ff, &data32)) in process_clock_data()
3047 if (do_read_u32(ff, &data32)) in process_clock_data()
3050 ff->ph->env.clock.clockid = data32; in process_clock_data()
3053 if (do_read_u64(ff, &data64)) in process_clock_data()
3056 ff->ph->env.clock.tod_ns = data64; in process_clock_data()
3059 if (do_read_u64(ff, &data64)) in process_clock_data()
3062 ff->ph->env.clock.clockid_ns = data64; in process_clock_data()
3063 ff->ph->env.clock.enabled = true; in process_clock_data()
3067 static int process_hybrid_topology(struct feat_fd *ff, in process_hybrid_topology() argument
3074 if (do_read_u32(ff, &nr)) in process_hybrid_topology()
3084 n->pmu_name = do_read_string(ff); in process_hybrid_topology()
3088 n->cpus = do_read_string(ff); in process_hybrid_topology()
3093 ff->ph->env.nr_hybrid_nodes = nr; in process_hybrid_topology()
3094 ff->ph->env.hybrid_nodes = nodes; in process_hybrid_topology()
3107 static int process_dir_format(struct feat_fd *ff, in process_dir_format() argument
3113 session = container_of(ff->ph, struct perf_session, header); in process_dir_format()
3119 return do_read_u64(ff, &data->dir.version); in process_dir_format()
3123 static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused) in process_bpf_prog_info() argument
3126 struct perf_env *env = &ff->ph->env; in process_bpf_prog_info()
3131 if (ff->ph->needs_swap) { in process_bpf_prog_info()
3136 if (do_read_u32(ff, &count)) in process_bpf_prog_info()
3146 if (do_read_u32(ff, &info_len)) in process_bpf_prog_info()
3148 if (do_read_u32(ff, &data_len)) in process_bpf_prog_info()
3162 if (do_read_u64(ff, (u64 *)(&info_linear->arrays))) in process_bpf_prog_info()
3164 if (__do_read(ff, &info_linear->info, info_len)) in process_bpf_prog_info()
3170 if (__do_read(ff, info_linear->data, data_len)) in process_bpf_prog_info()
3192 static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused) in process_bpf_btf() argument
3194 struct perf_env *env = &ff->ph->env; in process_bpf_btf()
3199 if (ff->ph->needs_swap) { in process_bpf_btf()
3204 if (do_read_u32(ff, &count)) in process_bpf_btf()
3212 if (do_read_u32(ff, &id)) in process_bpf_btf()
3214 if (do_read_u32(ff, &data_size)) in process_bpf_btf()
3224 if (__do_read(ff, node->data, data_size)) in process_bpf_btf()
3239 static int process_compressed(struct feat_fd *ff, in process_compressed() argument
3242 if (do_read_u32(ff, &(ff->ph->env.comp_ver))) in process_compressed()
3245 if (do_read_u32(ff, &(ff->ph->env.comp_type))) in process_compressed()
3248 if (do_read_u32(ff, &(ff->ph->env.comp_level))) in process_compressed()
3251 if (do_read_u32(ff, &(ff->ph->env.comp_ratio))) in process_compressed()
3254 if (do_read_u32(ff, &(ff->ph->env.comp_mmap_len))) in process_compressed()
3260 static int __process_pmu_caps(struct feat_fd *ff, int *nr_caps, in __process_pmu_caps() argument
3269 if (do_read_u32(ff, &nr_pmu_caps)) in __process_pmu_caps()
3280 name = do_read_string(ff); in __process_pmu_caps()
3284 value = do_read_string(ff); in __process_pmu_caps()
3315 static int process_cpu_pmu_caps(struct feat_fd *ff, in process_cpu_pmu_caps() argument
3318 int ret = __process_pmu_caps(ff, &ff->ph->env.nr_cpu_pmu_caps, in process_cpu_pmu_caps()
3319 &ff->ph->env.cpu_pmu_caps, in process_cpu_pmu_caps()
3320 &ff->ph->env.max_branches); in process_cpu_pmu_caps()
3322 if (!ret && !ff->ph->env.cpu_pmu_caps) in process_cpu_pmu_caps()
3327 static int process_pmu_caps(struct feat_fd *ff, void *data __maybe_unused) in process_pmu_caps() argument
3334 if (do_read_u32(ff, &nr_pmu)) in process_pmu_caps()
3347 ret = __process_pmu_caps(ff, &pmu_caps[i].nr_caps, in process_pmu_caps()
3353 pmu_caps[i].pmu_name = do_read_string(ff); in process_pmu_caps()
3364 ff->ph->env.nr_pmus_with_caps = nr_pmu; in process_pmu_caps()
3365 ff->ph->env.pmu_caps = pmu_caps; in process_pmu_caps()
3457 struct feat_fd ff; in perf_file_section__fprintf_info() local
3471 ff = (struct feat_fd) { in perf_file_section__fprintf_info()
3477 feat_ops[feat].print(&ff, hd->fp); in perf_file_section__fprintf_info()
3527 struct feat_fd *ff; member
3534 return do_write(h->ff, buf, sz); in feat_writer_cb()
3537 static int do_write_feat(struct feat_fd *ff, int type, in do_write_feat() argument
3545 if (perf_header__has_feat(ff->ph, type)) { in do_write_feat()
3549 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) in do_write_feat()
3552 (*p)->offset = lseek(ff->fd, 0, SEEK_CUR); in do_write_feat()
3561 .ff = ff, in do_write_feat()
3570 err = feat_ops[type].write(ff, evlist); in do_write_feat()
3575 lseek(ff->fd, (*p)->offset, SEEK_SET); in do_write_feat()
3579 (*p)->size = lseek(ff->fd, 0, SEEK_CUR) - (*p)->offset; in do_write_feat()
3590 struct feat_fd ff; in perf_header__adds_write() local
3597 ff = (struct feat_fd){ in perf_header__adds_write()
3616 if (do_write_feat(&ff, feat, &p, evlist, fc)) in perf_header__adds_write()
3625 err = do_write(&ff, feat_sec, sec_size); in perf_header__adds_write()
3635 struct feat_fd ff; in perf_header__write_pipe() local
3638 ff = (struct feat_fd){ .fd = fd }; in perf_header__write_pipe()
3645 err = do_write(&ff, &f_header, sizeof(f_header)); in perf_header__write_pipe()
3663 struct feat_fd ff; in perf_session__do_write_header() local
3667 ff = (struct feat_fd){ .fd = fd}; in perf_session__do_write_header()
3672 err = do_write(&ff, evsel->core.id, evsel->core.ids * sizeof(u64)); in perf_session__do_write_header()
3679 attr_offset = lseek(ff.fd, 0, SEEK_CUR); in perf_session__do_write_header()
3697 err = do_write(&ff, &f_attr, sizeof(f_attr)); in perf_session__do_write_header()
3732 err = do_write(&ff, &f_header, sizeof(f_header)); in perf_session__do_write_header()
4044 struct feat_fd ff = { in perf_file_header__read_pipe() local
4062 if (repipe && do_write(&ff, header, sizeof(*header)) < 0) in perf_file_header__read_pipe()
4302 struct feat_fd ff = { .fd = 0 }; in perf_event__process_feature() local
4320 ff.buf = (void *)fe->data; in perf_event__process_feature()
4321 ff.size = event->header.size - sizeof(*fe); in perf_event__process_feature()
4322 ff.ph = &session->header; in perf_event__process_feature()
4324 if (feat_ops[feat].process(&ff, NULL)) { in perf_event__process_feature()
4334 feat_ops[feat].print(&ff, stdout); in perf_event__process_feature()
4340 free_event_desc(ff.events); in perf_event__process_feature()