1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <linux/string.h>
5
6 #include <sched.h>
7 #include <perf/mmap.h>
8 #include "evlist.h"
9 #include "evsel.h"
10 #include "debug.h"
11 #include "record.h"
12 #include "tests.h"
13 #include "util/mmap.h"
14
sched__get_first_possible_cpu(pid_t pid,cpu_set_t * maskp)15 static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp)
16 {
17 int i, cpu = -1, nrcpus = 1024;
18 realloc:
19 CPU_ZERO(maskp);
20
21 if (sched_getaffinity(pid, sizeof(*maskp), maskp) == -1) {
22 if (errno == EINVAL && nrcpus < (1024 << 8)) {
23 nrcpus = nrcpus << 2;
24 goto realloc;
25 }
26 perror("sched_getaffinity");
27 return -1;
28 }
29
30 for (i = 0; i < nrcpus; i++) {
31 if (CPU_ISSET(i, maskp)) {
32 if (cpu == -1)
33 cpu = i;
34 else
35 CPU_CLR(i, maskp);
36 }
37 }
38
39 return cpu;
40 }
41
test__PERF_RECORD(struct test_suite * test __maybe_unused,int subtest __maybe_unused)42 static int test__PERF_RECORD(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
43 {
44 struct record_opts opts = {
45 .target = {
46 .uid = UINT_MAX,
47 .uses_mmap = true,
48 },
49 .no_buffering = true,
50 .mmap_pages = 256,
51 };
52 cpu_set_t cpu_mask;
53 size_t cpu_mask_size = sizeof(cpu_mask);
54 struct evlist *evlist = evlist__new_dummy();
55 struct evsel *evsel;
56 struct perf_sample sample;
57 const char *cmd = "sleep";
58 const char *argv[] = { cmd, "1", NULL, };
59 char *bname, *mmap_filename;
60 u64 prev_time = 0;
61 bool found_cmd_mmap = false,
62 found_coreutils_mmap = false,
63 found_libc_mmap = false,
64 found_vdso_mmap = false,
65 found_ld_mmap = false;
66 int err = -1, errs = 0, i, wakeups = 0;
67 u32 cpu;
68 int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
69 char sbuf[STRERR_BUFSIZE];
70
71 if (evlist == NULL) /* Fallback for kernels lacking PERF_COUNT_SW_DUMMY */
72 evlist = evlist__new_default();
73
74 if (evlist == NULL) {
75 pr_debug("Not enough memory to create evlist\n");
76 goto out;
77 }
78
79 /*
80 * Create maps of threads and cpus to monitor. In this case
81 * we start with all threads and cpus (-1, -1) but then in
82 * evlist__prepare_workload we'll fill in the only thread
83 * we're monitoring, the one forked there.
84 */
85 err = evlist__create_maps(evlist, &opts.target);
86 if (err < 0) {
87 pr_debug("Not enough memory to create thread/cpu maps\n");
88 goto out_delete_evlist;
89 }
90
91 /*
92 * Prepare the workload in argv[] to run, it'll fork it, and then wait
93 * for evlist__start_workload() to exec it. This is done this way
94 * so that we have time to open the evlist (calling sys_perf_event_open
95 * on all the fds) and then mmap them.
96 */
97 err = evlist__prepare_workload(evlist, &opts.target, argv, false, NULL);
98 if (err < 0) {
99 pr_debug("Couldn't run the workload!\n");
100 goto out_delete_evlist;
101 }
102
103 /*
104 * Config the evsels, setting attr->comm on the first one, etc.
105 */
106 evsel = evlist__first(evlist);
107 evsel__set_sample_bit(evsel, CPU);
108 evsel__set_sample_bit(evsel, TID);
109 evsel__set_sample_bit(evsel, TIME);
110 evlist__config(evlist, &opts, NULL);
111
112 err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask);
113 if (err < 0) {
114 pr_debug("sched__get_first_possible_cpu: %s\n",
115 str_error_r(errno, sbuf, sizeof(sbuf)));
116 goto out_delete_evlist;
117 }
118
119 cpu = err;
120
121 /*
122 * So that we can check perf_sample.cpu on all the samples.
123 */
124 if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) {
125 pr_debug("sched_setaffinity: %s\n",
126 str_error_r(errno, sbuf, sizeof(sbuf)));
127 goto out_delete_evlist;
128 }
129
130 /*
131 * Call sys_perf_event_open on all the fds on all the evsels,
132 * grouping them if asked to.
133 */
134 err = evlist__open(evlist);
135 if (err < 0) {
136 pr_debug("perf_evlist__open: %s\n",
137 str_error_r(errno, sbuf, sizeof(sbuf)));
138 goto out_delete_evlist;
139 }
140
141 /*
142 * mmap the first fd on a given CPU and ask for events for the other
143 * fds in the same CPU to be injected in the same mmap ring buffer
144 * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
145 */
146 err = evlist__mmap(evlist, opts.mmap_pages);
147 if (err < 0) {
148 pr_debug("evlist__mmap: %s\n",
149 str_error_r(errno, sbuf, sizeof(sbuf)));
150 goto out_delete_evlist;
151 }
152
153 /*
154 * Now that all is properly set up, enable the events, they will
155 * count just on workload.pid, which will start...
156 */
157 evlist__enable(evlist);
158
159 /*
160 * Now!
161 */
162 evlist__start_workload(evlist);
163
164 while (1) {
165 int before = total_events;
166
167 for (i = 0; i < evlist->core.nr_mmaps; i++) {
168 union perf_event *event;
169 struct mmap *md;
170
171 md = &evlist->mmap[i];
172 if (perf_mmap__read_init(&md->core) < 0)
173 continue;
174
175 while ((event = perf_mmap__read_event(&md->core)) != NULL) {
176 const u32 type = event->header.type;
177 const char *name = perf_event__name(type);
178
179 ++total_events;
180 if (type < PERF_RECORD_MAX)
181 nr_events[type]++;
182
183 err = evlist__parse_sample(evlist, event, &sample);
184 if (err < 0) {
185 if (verbose > 0)
186 perf_event__fprintf(event, NULL, stderr);
187 pr_debug("Couldn't parse sample\n");
188 goto out_delete_evlist;
189 }
190
191 if (verbose > 0) {
192 pr_info("%" PRIu64" %d ", sample.time, sample.cpu);
193 perf_event__fprintf(event, NULL, stderr);
194 }
195
196 if (prev_time > sample.time) {
197 pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n",
198 name, prev_time, sample.time);
199 ++errs;
200 }
201
202 prev_time = sample.time;
203
204 if (sample.cpu != cpu) {
205 pr_debug("%s with unexpected cpu, expected %d, got %d\n",
206 name, cpu, sample.cpu);
207 ++errs;
208 }
209
210 if ((pid_t)sample.pid != evlist->workload.pid) {
211 pr_debug("%s with unexpected pid, expected %d, got %d\n",
212 name, evlist->workload.pid, sample.pid);
213 ++errs;
214 }
215
216 if ((pid_t)sample.tid != evlist->workload.pid) {
217 pr_debug("%s with unexpected tid, expected %d, got %d\n",
218 name, evlist->workload.pid, sample.tid);
219 ++errs;
220 }
221
222 if ((type == PERF_RECORD_COMM ||
223 type == PERF_RECORD_MMAP ||
224 type == PERF_RECORD_MMAP2 ||
225 type == PERF_RECORD_FORK ||
226 type == PERF_RECORD_EXIT) &&
227 (pid_t)event->comm.pid != evlist->workload.pid) {
228 pr_debug("%s with unexpected pid/tid\n", name);
229 ++errs;
230 }
231
232 if ((type == PERF_RECORD_COMM ||
233 type == PERF_RECORD_MMAP ||
234 type == PERF_RECORD_MMAP2) &&
235 event->comm.pid != event->comm.tid) {
236 pr_debug("%s with different pid/tid!\n", name);
237 ++errs;
238 }
239
240 switch (type) {
241 case PERF_RECORD_COMM:
242 if (strcmp(event->comm.comm, cmd)) {
243 pr_debug("%s with unexpected comm!\n", name);
244 ++errs;
245 }
246 break;
247 case PERF_RECORD_EXIT:
248 goto found_exit;
249 case PERF_RECORD_MMAP:
250 mmap_filename = event->mmap.filename;
251 goto check_bname;
252 case PERF_RECORD_MMAP2:
253 mmap_filename = event->mmap2.filename;
254 check_bname:
255 bname = strrchr(mmap_filename, '/');
256 if (bname != NULL) {
257 if (!found_cmd_mmap)
258 found_cmd_mmap = !strcmp(bname + 1, cmd);
259 if (!found_coreutils_mmap)
260 found_coreutils_mmap = !strcmp(bname + 1, "coreutils");
261 if (!found_libc_mmap)
262 found_libc_mmap = !strncmp(bname + 1, "libc", 4);
263 if (!found_ld_mmap)
264 found_ld_mmap = !strncmp(bname + 1, "ld", 2);
265 } else if (!found_vdso_mmap)
266 found_vdso_mmap = !strcmp(mmap_filename, "[vdso]");
267 break;
268
269 case PERF_RECORD_SAMPLE:
270 /* Just ignore samples for now */
271 break;
272 default:
273 pr_debug("Unexpected perf_event->header.type %d!\n",
274 type);
275 ++errs;
276 }
277
278 perf_mmap__consume(&md->core);
279 }
280 perf_mmap__read_done(&md->core);
281 }
282
283 /*
284 * We don't use poll here because at least at 3.1 times the
285 * PERF_RECORD_{!SAMPLE} events don't honour
286 * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
287 */
288 if (total_events == before && false)
289 evlist__poll(evlist, -1);
290
291 sleep(1);
292 if (++wakeups > 5) {
293 pr_debug("No PERF_RECORD_EXIT event!\n");
294 break;
295 }
296 }
297
298 found_exit:
299 if (nr_events[PERF_RECORD_COMM] > 1 + !!found_coreutils_mmap) {
300 pr_debug("Excessive number of PERF_RECORD_COMM events!\n");
301 ++errs;
302 }
303
304 if (nr_events[PERF_RECORD_COMM] == 0) {
305 pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd);
306 ++errs;
307 }
308
309 if (!found_cmd_mmap && !found_coreutils_mmap) {
310 pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd);
311 ++errs;
312 }
313
314 if (!found_libc_mmap) {
315 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc");
316 ++errs;
317 }
318
319 if (!found_ld_mmap) {
320 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld");
321 ++errs;
322 }
323
324 if (!found_vdso_mmap) {
325 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]");
326 ++errs;
327 }
328 out_delete_evlist:
329 evlist__delete(evlist);
330 out:
331 if (err == -EACCES)
332 return TEST_SKIP;
333 if (err < 0 || errs != 0)
334 return TEST_FAIL;
335 return TEST_OK;
336 }
337
338 static struct test_case tests__PERF_RECORD[] = {
339 TEST_CASE_REASON("PERF_RECORD_* events & perf_sample fields",
340 PERF_RECORD,
341 "permissions"),
342 { .name = NULL, }
343 };
344
345 struct test_suite suite__PERF_RECORD = {
346 .desc = "PERF_RECORD_* events & perf_sample fields",
347 .test_cases = tests__PERF_RECORD,
348 };
349