1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 // Copyright (c) 2020 Wenbo Zhang
3 //
4 // Based on runqlen(8) from BCC by Brendan Gregg.
5 // 11-Sep-2020 Wenbo Zhang Created this.
6 #include <argp.h>
7 #include <signal.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <unistd.h>
12 #include <time.h>
13 #include <linux/perf_event.h>
14 #include <asm/unistd.h>
15 #include <bpf/libbpf.h>
16 #include <bpf/bpf.h>
17 #include "runqlen.h"
18 #include "runqlen.skel.h"
19 #include "trace_helpers.h"
20
21 #define max(x, y) ({ \
22 typeof(x) _max1 = (x); \
23 typeof(y) _max2 = (y); \
24 (void) (&_max1 == &_max2); \
25 _max1 > _max2 ? _max1 : _max2; })
26
27 struct env {
28 bool per_cpu;
29 bool runqocc;
30 bool timestamp;
31 time_t interval;
32 int freq;
33 int times;
34 bool verbose;
35 } env = {
36 .interval = 99999999,
37 .times = 99999999,
38 .freq = 99,
39 };
40
41 static volatile bool exiting;
42
43 const char *argp_program_version = "runqlen 0.1";
44 const char *argp_program_bug_address =
45 "https://github.com/iovisor/bcc/tree/master/libbpf-tools";
46 const char argp_program_doc[] =
47 "Summarize scheduler run queue length as a histogram.\n"
48 "\n"
49 "USAGE: runqlen [--help] [-C] [-O] [-T] [-f FREQUENCY] [interval] [count]\n"
50 "\n"
51 "EXAMPLES:\n"
52 " runqlen # summarize run queue length as a histogram\n"
53 " runqlen 1 10 # print 1 second summaries, 10 times\n"
54 " runqlen -T 1 # 1s summaries and timestamps\n"
55 " runqlen -O # report run queue occupancy\n"
56 " runqlen -C # show each CPU separately\n"
57 " runqlen -f 199 # sample at 199HZ\n";
58
59 static const struct argp_option opts[] = {
60 { "cpus", 'C', NULL, 0, "Print output for each CPU separately" },
61 { "frequency", 'f', "FREQUENCY", 0, "Sample with a certain frequency" },
62 { "runqocc", 'O', NULL, 0, "Report run queue occupancy" },
63 { "timestamp", 'T', NULL, 0, "Include timestamp on output" },
64 { "verbose", 'v', NULL, 0, "Verbose debug output" },
65 { NULL, 'h', NULL, OPTION_HIDDEN, "Show the full help" },
66 {},
67 };
68
parse_arg(int key,char * arg,struct argp_state * state)69 static error_t parse_arg(int key, char *arg, struct argp_state *state)
70 {
71 static int pos_args;
72
73 switch (key) {
74 case 'h':
75 argp_state_help(state, stderr, ARGP_HELP_STD_HELP);
76 break;
77 case 'v':
78 env.verbose = true;
79 break;
80 case 'C':
81 env.per_cpu = true;
82 break;
83 case 'O':
84 env.runqocc = true;
85 break;
86 case 'T':
87 env.timestamp = true;
88 break;
89 case 'f':
90 errno = 0;
91 env.freq = strtol(arg, NULL, 10);
92 if (errno || env.freq <= 0) {
93 fprintf(stderr, "Invalid freq (in hz): %s\n", arg);
94 argp_usage(state);
95 }
96 break;
97 case ARGP_KEY_ARG:
98 errno = 0;
99 if (pos_args == 0) {
100 env.interval = strtol(arg, NULL, 10);
101 if (errno) {
102 fprintf(stderr, "invalid internal\n");
103 argp_usage(state);
104 }
105 } else if (pos_args == 1) {
106 env.times = strtol(arg, NULL, 10);
107 if (errno) {
108 fprintf(stderr, "invalid times\n");
109 argp_usage(state);
110 }
111 } else {
112 fprintf(stderr,
113 "unrecognized positional argument: %s\n", arg);
114 argp_usage(state);
115 }
116 pos_args++;
117 break;
118 default:
119 return ARGP_ERR_UNKNOWN;
120 }
121 return 0;
122 }
123
124 static int nr_cpus;
125
open_and_attach_perf_event(int freq,struct bpf_program * prog,struct bpf_link * links[])126 static int open_and_attach_perf_event(int freq, struct bpf_program *prog,
127 struct bpf_link *links[])
128 {
129 struct perf_event_attr attr = {
130 .type = PERF_TYPE_SOFTWARE,
131 .freq = 1,
132 .sample_period = freq,
133 .config = PERF_COUNT_SW_CPU_CLOCK,
134 };
135 int i, fd;
136
137 for (i = 0; i < nr_cpus; i++) {
138 fd = syscall(__NR_perf_event_open, &attr, -1, i, -1, 0);
139 if (fd < 0) {
140 /* Ignore CPU that is offline */
141 if (errno == ENODEV)
142 continue;
143 fprintf(stderr, "failed to init perf sampling: %s\n",
144 strerror(errno));
145 return -1;
146 }
147 links[i] = bpf_program__attach_perf_event(prog, fd);
148 if (!links[i]) {
149 fprintf(stderr, "failed to attach perf event on cpu: %d\n", i);
150 close(fd);
151 return -1;
152 }
153 }
154
155 return 0;
156 }
157
libbpf_print_fn(enum libbpf_print_level level,const char * format,va_list args)158 static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va_list args)
159 {
160 if (level == LIBBPF_DEBUG && !env.verbose)
161 return 0;
162 return vfprintf(stderr, format, args);
163 }
164
sig_handler(int sig)165 static void sig_handler(int sig)
166 {
167 exiting = true;
168 }
169
170 static struct hist zero;
171
print_runq_occupancy(struct runqlen_bpf__bss * bss)172 static void print_runq_occupancy(struct runqlen_bpf__bss *bss)
173 {
174 struct hist hist;
175 int slot, i = 0;
176 float runqocc;
177
178 do {
179 __u64 samples, idle = 0, queued = 0;
180
181 hist = bss->hists[i];
182 bss->hists[i] = zero;
183 for (slot = 0; slot < MAX_SLOTS; slot++) {
184 __u64 val = hist.slots[slot];
185
186 if (slot == 0)
187 idle += val;
188 else
189 queued += val;
190 }
191 samples = idle + queued;
192 runqocc = queued * 1.0 / max(1ULL, samples);
193 if (env.per_cpu)
194 printf("runqocc, CPU %-3d %6.2f%%\n", i,
195 100 * runqocc);
196 else
197 printf("runqocc: %0.2f%%\n", 100 * runqocc);
198 } while (env.per_cpu && ++i < nr_cpus);
199 }
200
print_linear_hists(struct runqlen_bpf__bss * bss)201 static void print_linear_hists(struct runqlen_bpf__bss *bss)
202 {
203 struct hist hist;
204 int i = 0;
205
206 do {
207 hist = bss->hists[i];
208 bss->hists[i] = zero;
209 if (env.per_cpu)
210 printf("cpu = %d\n", i);
211 print_linear_hist(hist.slots, MAX_SLOTS, 0, 1, "runqlen");
212 } while (env.per_cpu && ++i < nr_cpus);
213 }
214
main(int argc,char ** argv)215 int main(int argc, char **argv)
216 {
217 static const struct argp argp = {
218 .options = opts,
219 .parser = parse_arg,
220 .doc = argp_program_doc,
221 };
222 struct bpf_link *links[MAX_CPU_NR] = {};
223 struct runqlen_bpf *obj;
224 struct tm *tm;
225 char ts[32];
226 int err, i;
227 time_t t;
228
229 err = argp_parse(&argp, argc, argv, 0, NULL, NULL);
230 if (err)
231 return err;
232
233 libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
234 libbpf_set_print(libbpf_print_fn);
235
236 nr_cpus = libbpf_num_possible_cpus();
237 if (nr_cpus < 0) {
238 printf("failed to get # of possible cpus: '%s'!\n",
239 strerror(-nr_cpus));
240 return 1;
241 }
242 if (nr_cpus > MAX_CPU_NR) {
243 fprintf(stderr, "the number of cpu cores is too big, please "
244 "increase MAX_CPU_NR's value and recompile");
245 return 1;
246 }
247
248 obj = runqlen_bpf__open();
249 if (!obj) {
250 fprintf(stderr, "failed to open BPF object\n");
251 return 1;
252 }
253
254 /* initialize global data (filtering options) */
255 obj->rodata->targ_per_cpu = env.per_cpu;
256
257 err = runqlen_bpf__load(obj);
258 if (err) {
259 fprintf(stderr, "failed to load BPF object: %d\n", err);
260 goto cleanup;
261 }
262
263 if (!obj->bss) {
264 fprintf(stderr, "Memory-mapping BPF maps is supported starting from Linux 5.7, please upgrade.\n");
265 goto cleanup;
266 }
267
268 err = open_and_attach_perf_event(env.freq, obj->progs.do_sample, links);
269 if (err)
270 goto cleanup;
271
272 printf("Sampling run queue length... Hit Ctrl-C to end.\n");
273
274 signal(SIGINT, sig_handler);
275
276 while (1) {
277 sleep(env.interval);
278 printf("\n");
279
280 if (env.timestamp) {
281 time(&t);
282 tm = localtime(&t);
283 strftime(ts, sizeof(ts), "%H:%M:%S", tm);
284 printf("%-8s\n", ts);
285 }
286
287 if (env.runqocc)
288 print_runq_occupancy(obj->bss);
289 else
290 print_linear_hists(obj->bss);
291
292 if (exiting || --env.times == 0)
293 break;
294 }
295
296 cleanup:
297 for (i = 0; i < nr_cpus; i++)
298 bpf_link__destroy(links[i]);
299 runqlen_bpf__destroy(obj);
300
301 return err != 0;
302 }
303