1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <limits.h>
5 #include <stdbool.h>
6 #include <stdio.h>
7 #include <unistd.h>
8 #include <linux/types.h>
9 #include <sys/prctl.h>
10 #include <perf/cpumap.h>
11 #include <perf/evlist.h>
12 #include <perf/mmap.h>
13
14 #include "debug.h"
15 #include "parse-events.h"
16 #include "evlist.h"
17 #include "evsel.h"
18 #include "thread_map.h"
19 #include "record.h"
20 #include "tsc.h"
21 #include "mmap.h"
22 #include "tests.h"
23
24 #define CHECK__(x) { \
25 while ((x) < 0) { \
26 pr_debug(#x " failed!\n"); \
27 goto out_err; \
28 } \
29 }
30
31 #define CHECK_NOT_NULL__(x) { \
32 while ((x) == NULL) { \
33 pr_debug(#x " failed!\n"); \
34 goto out_err; \
35 } \
36 }
37
38 /**
39 * test__perf_time_to_tsc - test converting perf time to TSC.
40 *
41 * This function implements a test that checks that the conversion of perf time
42 * to and from TSC is consistent with the order of events. If the test passes
43 * %0 is returned, otherwise %-1 is returned. If TSC conversion is not
44 * supported then then the test passes but " (not supported)" is printed.
45 */
test__perf_time_to_tsc(struct test * test __maybe_unused,int subtest __maybe_unused)46 int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe_unused)
47 {
48 struct record_opts opts = {
49 .mmap_pages = UINT_MAX,
50 .user_freq = UINT_MAX,
51 .user_interval = ULLONG_MAX,
52 .target = {
53 .uses_mmap = true,
54 },
55 .sample_time = true,
56 };
57 struct perf_thread_map *threads = NULL;
58 struct perf_cpu_map *cpus = NULL;
59 struct evlist *evlist = NULL;
60 struct evsel *evsel = NULL;
61 int err = -1, ret, i;
62 const char *comm1, *comm2;
63 struct perf_tsc_conversion tc;
64 struct perf_event_mmap_page *pc;
65 union perf_event *event;
66 u64 test_tsc, comm1_tsc, comm2_tsc;
67 u64 test_time, comm1_time = 0, comm2_time = 0;
68 struct mmap *md;
69
70 threads = thread_map__new(-1, getpid(), UINT_MAX);
71 CHECK_NOT_NULL__(threads);
72
73 cpus = perf_cpu_map__new(NULL);
74 CHECK_NOT_NULL__(cpus);
75
76 evlist = evlist__new();
77 CHECK_NOT_NULL__(evlist);
78
79 perf_evlist__set_maps(&evlist->core, cpus, threads);
80
81 CHECK__(parse_events(evlist, "cycles:u", NULL));
82
83 evlist__config(evlist, &opts, NULL);
84
85 /* For hybrid "cycles:u", it creates two events */
86 evlist__for_each_entry(evlist, evsel) {
87 evsel->core.attr.comm = 1;
88 evsel->core.attr.disabled = 1;
89 evsel->core.attr.enable_on_exec = 0;
90 }
91
92 CHECK__(evlist__open(evlist));
93
94 CHECK__(evlist__mmap(evlist, UINT_MAX));
95
96 pc = evlist->mmap[0].core.base;
97 ret = perf_read_tsc_conversion(pc, &tc);
98 if (ret) {
99 if (ret == -EOPNOTSUPP) {
100 fprintf(stderr, " (not supported)");
101 return 0;
102 }
103 goto out_err;
104 }
105
106 evlist__enable(evlist);
107
108 comm1 = "Test COMM 1";
109 CHECK__(prctl(PR_SET_NAME, (unsigned long)comm1, 0, 0, 0));
110
111 test_tsc = rdtsc();
112
113 comm2 = "Test COMM 2";
114 CHECK__(prctl(PR_SET_NAME, (unsigned long)comm2, 0, 0, 0));
115
116 evlist__disable(evlist);
117
118 for (i = 0; i < evlist->core.nr_mmaps; i++) {
119 md = &evlist->mmap[i];
120 if (perf_mmap__read_init(&md->core) < 0)
121 continue;
122
123 while ((event = perf_mmap__read_event(&md->core)) != NULL) {
124 struct perf_sample sample;
125
126 if (event->header.type != PERF_RECORD_COMM ||
127 (pid_t)event->comm.pid != getpid() ||
128 (pid_t)event->comm.tid != getpid())
129 goto next_event;
130
131 if (strcmp(event->comm.comm, comm1) == 0) {
132 CHECK_NOT_NULL__(evsel = evlist__event2evsel(evlist, event));
133 CHECK__(evsel__parse_sample(evsel, event, &sample));
134 comm1_time = sample.time;
135 }
136 if (strcmp(event->comm.comm, comm2) == 0) {
137 CHECK_NOT_NULL__(evsel = evlist__event2evsel(evlist, event));
138 CHECK__(evsel__parse_sample(evsel, event, &sample));
139 comm2_time = sample.time;
140 }
141 next_event:
142 perf_mmap__consume(&md->core);
143 }
144 perf_mmap__read_done(&md->core);
145 }
146
147 if (!comm1_time || !comm2_time)
148 goto out_err;
149
150 test_time = tsc_to_perf_time(test_tsc, &tc);
151 comm1_tsc = perf_time_to_tsc(comm1_time, &tc);
152 comm2_tsc = perf_time_to_tsc(comm2_time, &tc);
153
154 pr_debug("1st event perf time %"PRIu64" tsc %"PRIu64"\n",
155 comm1_time, comm1_tsc);
156 pr_debug("rdtsc time %"PRIu64" tsc %"PRIu64"\n",
157 test_time, test_tsc);
158 pr_debug("2nd event perf time %"PRIu64" tsc %"PRIu64"\n",
159 comm2_time, comm2_tsc);
160
161 if (test_time <= comm1_time ||
162 test_time >= comm2_time)
163 goto out_err;
164
165 if (test_tsc <= comm1_tsc ||
166 test_tsc >= comm2_tsc)
167 goto out_err;
168
169 err = 0;
170
171 out_err:
172 evlist__delete(evlist);
173 perf_cpu_map__put(cpus);
174 perf_thread_map__put(threads);
175 return err;
176 }
177
test__tsc_is_supported(void)178 bool test__tsc_is_supported(void)
179 {
180 /*
181 * Except x86_64/i386 and Arm64, other archs don't support TSC in perf.
182 * Just enable the test for x86_64/i386 and Arm64 archs.
183 */
184 #if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__)
185 return true;
186 #else
187 return false;
188 #endif
189 }
190