1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat, Inc.
3 */
4 static const char *__doc__ =
5 " XDP redirect with a CPU-map type \"BPF_MAP_TYPE_CPUMAP\"";
6
7 #include <errno.h>
8 #include <signal.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <stdbool.h>
12 #include <string.h>
13 #include <unistd.h>
14 #include <locale.h>
15 #include <sys/resource.h>
16 #include <getopt.h>
17 #include <net/if.h>
18 #include <time.h>
19
20 #include <arpa/inet.h>
21 #include <linux/if_link.h>
22
23 #define MAX_CPUS 64 /* WARNING - sync with _kern.c */
24
25 /* How many xdp_progs are defined in _kern.c */
26 #define MAX_PROG 6
27
28 #include <bpf/bpf.h>
29 #include "libbpf.h"
30
31 #include "bpf_util.h"
32
33 static int ifindex = -1;
34 static char ifname_buf[IF_NAMESIZE];
35 static char *ifname;
36 static __u32 prog_id;
37
38 static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
39 static int cpu_map_fd;
40 static int rx_cnt_map_fd;
41 static int redirect_err_cnt_map_fd;
42 static int cpumap_enqueue_cnt_map_fd;
43 static int cpumap_kthread_cnt_map_fd;
44 static int cpus_available_map_fd;
45 static int cpus_count_map_fd;
46 static int cpus_iterator_map_fd;
47 static int exception_cnt_map_fd;
48
49 /* Exit return codes */
50 #define EXIT_OK 0
51 #define EXIT_FAIL 1
52 #define EXIT_FAIL_OPTION 2
53 #define EXIT_FAIL_XDP 3
54 #define EXIT_FAIL_BPF 4
55 #define EXIT_FAIL_MEM 5
56
57 static const struct option long_options[] = {
58 {"help", no_argument, NULL, 'h' },
59 {"dev", required_argument, NULL, 'd' },
60 {"skb-mode", no_argument, NULL, 'S' },
61 {"sec", required_argument, NULL, 's' },
62 {"progname", required_argument, NULL, 'p' },
63 {"qsize", required_argument, NULL, 'q' },
64 {"cpu", required_argument, NULL, 'c' },
65 {"stress-mode", no_argument, NULL, 'x' },
66 {"no-separators", no_argument, NULL, 'z' },
67 {"force", no_argument, NULL, 'F' },
68 {0, 0, NULL, 0 }
69 };
70
int_exit(int sig)71 static void int_exit(int sig)
72 {
73 __u32 curr_prog_id = 0;
74
75 if (ifindex > -1) {
76 if (bpf_get_link_xdp_id(ifindex, &curr_prog_id, xdp_flags)) {
77 printf("bpf_get_link_xdp_id failed\n");
78 exit(EXIT_FAIL);
79 }
80 if (prog_id == curr_prog_id) {
81 fprintf(stderr,
82 "Interrupted: Removing XDP program on ifindex:%d device:%s\n",
83 ifindex, ifname);
84 bpf_set_link_xdp_fd(ifindex, -1, xdp_flags);
85 } else if (!curr_prog_id) {
86 printf("couldn't find a prog id on a given iface\n");
87 } else {
88 printf("program on interface changed, not removing\n");
89 }
90 }
91 exit(EXIT_OK);
92 }
93
print_avail_progs(struct bpf_object * obj)94 static void print_avail_progs(struct bpf_object *obj)
95 {
96 struct bpf_program *pos;
97
98 bpf_object__for_each_program(pos, obj) {
99 if (bpf_program__is_xdp(pos))
100 printf(" %s\n", bpf_program__title(pos, false));
101 }
102 }
103
usage(char * argv[],struct bpf_object * obj)104 static void usage(char *argv[], struct bpf_object *obj)
105 {
106 int i;
107
108 printf("\nDOCUMENTATION:\n%s\n", __doc__);
109 printf("\n");
110 printf(" Usage: %s (options-see-below)\n", argv[0]);
111 printf(" Listing options:\n");
112 for (i = 0; long_options[i].name != 0; i++) {
113 printf(" --%-12s", long_options[i].name);
114 if (long_options[i].flag != NULL)
115 printf(" flag (internal value:%d)",
116 *long_options[i].flag);
117 else
118 printf(" short-option: -%c",
119 long_options[i].val);
120 printf("\n");
121 }
122 printf("\n Programs to be used for --progname:\n");
123 print_avail_progs(obj);
124 printf("\n");
125 }
126
127 /* gettime returns the current time of day in nanoseconds.
128 * Cost: clock_gettime (ns) => 26ns (CLOCK_MONOTONIC)
129 * clock_gettime (ns) => 9ns (CLOCK_MONOTONIC_COARSE)
130 */
131 #define NANOSEC_PER_SEC 1000000000 /* 10^9 */
gettime(void)132 static __u64 gettime(void)
133 {
134 struct timespec t;
135 int res;
136
137 res = clock_gettime(CLOCK_MONOTONIC, &t);
138 if (res < 0) {
139 fprintf(stderr, "Error with gettimeofday! (%i)\n", res);
140 exit(EXIT_FAIL);
141 }
142 return (__u64) t.tv_sec * NANOSEC_PER_SEC + t.tv_nsec;
143 }
144
145 /* Common stats data record shared with _kern.c */
146 struct datarec {
147 __u64 processed;
148 __u64 dropped;
149 __u64 issue;
150 };
151 struct record {
152 __u64 timestamp;
153 struct datarec total;
154 struct datarec *cpu;
155 };
156 struct stats_record {
157 struct record rx_cnt;
158 struct record redir_err;
159 struct record kthread;
160 struct record exception;
161 struct record enq[MAX_CPUS];
162 };
163
map_collect_percpu(int fd,__u32 key,struct record * rec)164 static bool map_collect_percpu(int fd, __u32 key, struct record *rec)
165 {
166 /* For percpu maps, userspace gets a value per possible CPU */
167 unsigned int nr_cpus = bpf_num_possible_cpus();
168 struct datarec values[nr_cpus];
169 __u64 sum_processed = 0;
170 __u64 sum_dropped = 0;
171 __u64 sum_issue = 0;
172 int i;
173
174 if ((bpf_map_lookup_elem(fd, &key, values)) != 0) {
175 fprintf(stderr,
176 "ERR: bpf_map_lookup_elem failed key:0x%X\n", key);
177 return false;
178 }
179 /* Get time as close as possible to reading map contents */
180 rec->timestamp = gettime();
181
182 /* Record and sum values from each CPU */
183 for (i = 0; i < nr_cpus; i++) {
184 rec->cpu[i].processed = values[i].processed;
185 sum_processed += values[i].processed;
186 rec->cpu[i].dropped = values[i].dropped;
187 sum_dropped += values[i].dropped;
188 rec->cpu[i].issue = values[i].issue;
189 sum_issue += values[i].issue;
190 }
191 rec->total.processed = sum_processed;
192 rec->total.dropped = sum_dropped;
193 rec->total.issue = sum_issue;
194 return true;
195 }
196
alloc_record_per_cpu(void)197 static struct datarec *alloc_record_per_cpu(void)
198 {
199 unsigned int nr_cpus = bpf_num_possible_cpus();
200 struct datarec *array;
201 size_t size;
202
203 size = sizeof(struct datarec) * nr_cpus;
204 array = malloc(size);
205 memset(array, 0, size);
206 if (!array) {
207 fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
208 exit(EXIT_FAIL_MEM);
209 }
210 return array;
211 }
212
alloc_stats_record(void)213 static struct stats_record *alloc_stats_record(void)
214 {
215 struct stats_record *rec;
216 int i;
217
218 rec = malloc(sizeof(*rec));
219 memset(rec, 0, sizeof(*rec));
220 if (!rec) {
221 fprintf(stderr, "Mem alloc error\n");
222 exit(EXIT_FAIL_MEM);
223 }
224 rec->rx_cnt.cpu = alloc_record_per_cpu();
225 rec->redir_err.cpu = alloc_record_per_cpu();
226 rec->kthread.cpu = alloc_record_per_cpu();
227 rec->exception.cpu = alloc_record_per_cpu();
228 for (i = 0; i < MAX_CPUS; i++)
229 rec->enq[i].cpu = alloc_record_per_cpu();
230
231 return rec;
232 }
233
free_stats_record(struct stats_record * r)234 static void free_stats_record(struct stats_record *r)
235 {
236 int i;
237
238 for (i = 0; i < MAX_CPUS; i++)
239 free(r->enq[i].cpu);
240 free(r->exception.cpu);
241 free(r->kthread.cpu);
242 free(r->redir_err.cpu);
243 free(r->rx_cnt.cpu);
244 free(r);
245 }
246
calc_period(struct record * r,struct record * p)247 static double calc_period(struct record *r, struct record *p)
248 {
249 double period_ = 0;
250 __u64 period = 0;
251
252 period = r->timestamp - p->timestamp;
253 if (period > 0)
254 period_ = ((double) period / NANOSEC_PER_SEC);
255
256 return period_;
257 }
258
calc_pps(struct datarec * r,struct datarec * p,double period_)259 static __u64 calc_pps(struct datarec *r, struct datarec *p, double period_)
260 {
261 __u64 packets = 0;
262 __u64 pps = 0;
263
264 if (period_ > 0) {
265 packets = r->processed - p->processed;
266 pps = packets / period_;
267 }
268 return pps;
269 }
270
calc_drop_pps(struct datarec * r,struct datarec * p,double period_)271 static __u64 calc_drop_pps(struct datarec *r, struct datarec *p, double period_)
272 {
273 __u64 packets = 0;
274 __u64 pps = 0;
275
276 if (period_ > 0) {
277 packets = r->dropped - p->dropped;
278 pps = packets / period_;
279 }
280 return pps;
281 }
282
calc_errs_pps(struct datarec * r,struct datarec * p,double period_)283 static __u64 calc_errs_pps(struct datarec *r,
284 struct datarec *p, double period_)
285 {
286 __u64 packets = 0;
287 __u64 pps = 0;
288
289 if (period_ > 0) {
290 packets = r->issue - p->issue;
291 pps = packets / period_;
292 }
293 return pps;
294 }
295
stats_print(struct stats_record * stats_rec,struct stats_record * stats_prev,char * prog_name)296 static void stats_print(struct stats_record *stats_rec,
297 struct stats_record *stats_prev,
298 char *prog_name)
299 {
300 unsigned int nr_cpus = bpf_num_possible_cpus();
301 double pps = 0, drop = 0, err = 0;
302 struct record *rec, *prev;
303 int to_cpu;
304 double t;
305 int i;
306
307 /* Header */
308 printf("Running XDP/eBPF prog_name:%s\n", prog_name);
309 printf("%-15s %-7s %-14s %-11s %-9s\n",
310 "XDP-cpumap", "CPU:to", "pps", "drop-pps", "extra-info");
311
312 /* XDP rx_cnt */
313 {
314 char *fmt_rx = "%-15s %-7d %'-14.0f %'-11.0f %'-10.0f %s\n";
315 char *fm2_rx = "%-15s %-7s %'-14.0f %'-11.0f\n";
316 char *errstr = "";
317
318 rec = &stats_rec->rx_cnt;
319 prev = &stats_prev->rx_cnt;
320 t = calc_period(rec, prev);
321 for (i = 0; i < nr_cpus; i++) {
322 struct datarec *r = &rec->cpu[i];
323 struct datarec *p = &prev->cpu[i];
324
325 pps = calc_pps(r, p, t);
326 drop = calc_drop_pps(r, p, t);
327 err = calc_errs_pps(r, p, t);
328 if (err > 0)
329 errstr = "cpu-dest/err";
330 if (pps > 0)
331 printf(fmt_rx, "XDP-RX",
332 i, pps, drop, err, errstr);
333 }
334 pps = calc_pps(&rec->total, &prev->total, t);
335 drop = calc_drop_pps(&rec->total, &prev->total, t);
336 err = calc_errs_pps(&rec->total, &prev->total, t);
337 printf(fm2_rx, "XDP-RX", "total", pps, drop);
338 }
339
340 /* cpumap enqueue stats */
341 for (to_cpu = 0; to_cpu < MAX_CPUS; to_cpu++) {
342 char *fmt = "%-15s %3d:%-3d %'-14.0f %'-11.0f %'-10.2f %s\n";
343 char *fm2 = "%-15s %3s:%-3d %'-14.0f %'-11.0f %'-10.2f %s\n";
344 char *errstr = "";
345
346 rec = &stats_rec->enq[to_cpu];
347 prev = &stats_prev->enq[to_cpu];
348 t = calc_period(rec, prev);
349 for (i = 0; i < nr_cpus; i++) {
350 struct datarec *r = &rec->cpu[i];
351 struct datarec *p = &prev->cpu[i];
352
353 pps = calc_pps(r, p, t);
354 drop = calc_drop_pps(r, p, t);
355 err = calc_errs_pps(r, p, t);
356 if (err > 0) {
357 errstr = "bulk-average";
358 err = pps / err; /* calc average bulk size */
359 }
360 if (pps > 0)
361 printf(fmt, "cpumap-enqueue",
362 i, to_cpu, pps, drop, err, errstr);
363 }
364 pps = calc_pps(&rec->total, &prev->total, t);
365 if (pps > 0) {
366 drop = calc_drop_pps(&rec->total, &prev->total, t);
367 err = calc_errs_pps(&rec->total, &prev->total, t);
368 if (err > 0) {
369 errstr = "bulk-average";
370 err = pps / err; /* calc average bulk size */
371 }
372 printf(fm2, "cpumap-enqueue",
373 "sum", to_cpu, pps, drop, err, errstr);
374 }
375 }
376
377 /* cpumap kthread stats */
378 {
379 char *fmt_k = "%-15s %-7d %'-14.0f %'-11.0f %'-10.0f %s\n";
380 char *fm2_k = "%-15s %-7s %'-14.0f %'-11.0f %'-10.0f %s\n";
381 char *e_str = "";
382
383 rec = &stats_rec->kthread;
384 prev = &stats_prev->kthread;
385 t = calc_period(rec, prev);
386 for (i = 0; i < nr_cpus; i++) {
387 struct datarec *r = &rec->cpu[i];
388 struct datarec *p = &prev->cpu[i];
389
390 pps = calc_pps(r, p, t);
391 drop = calc_drop_pps(r, p, t);
392 err = calc_errs_pps(r, p, t);
393 if (err > 0)
394 e_str = "sched";
395 if (pps > 0)
396 printf(fmt_k, "cpumap_kthread",
397 i, pps, drop, err, e_str);
398 }
399 pps = calc_pps(&rec->total, &prev->total, t);
400 drop = calc_drop_pps(&rec->total, &prev->total, t);
401 err = calc_errs_pps(&rec->total, &prev->total, t);
402 if (err > 0)
403 e_str = "sched-sum";
404 printf(fm2_k, "cpumap_kthread", "total", pps, drop, err, e_str);
405 }
406
407 /* XDP redirect err tracepoints (very unlikely) */
408 {
409 char *fmt_err = "%-15s %-7d %'-14.0f %'-11.0f\n";
410 char *fm2_err = "%-15s %-7s %'-14.0f %'-11.0f\n";
411
412 rec = &stats_rec->redir_err;
413 prev = &stats_prev->redir_err;
414 t = calc_period(rec, prev);
415 for (i = 0; i < nr_cpus; i++) {
416 struct datarec *r = &rec->cpu[i];
417 struct datarec *p = &prev->cpu[i];
418
419 pps = calc_pps(r, p, t);
420 drop = calc_drop_pps(r, p, t);
421 if (pps > 0)
422 printf(fmt_err, "redirect_err", i, pps, drop);
423 }
424 pps = calc_pps(&rec->total, &prev->total, t);
425 drop = calc_drop_pps(&rec->total, &prev->total, t);
426 printf(fm2_err, "redirect_err", "total", pps, drop);
427 }
428
429 /* XDP general exception tracepoints */
430 {
431 char *fmt_err = "%-15s %-7d %'-14.0f %'-11.0f\n";
432 char *fm2_err = "%-15s %-7s %'-14.0f %'-11.0f\n";
433
434 rec = &stats_rec->exception;
435 prev = &stats_prev->exception;
436 t = calc_period(rec, prev);
437 for (i = 0; i < nr_cpus; i++) {
438 struct datarec *r = &rec->cpu[i];
439 struct datarec *p = &prev->cpu[i];
440
441 pps = calc_pps(r, p, t);
442 drop = calc_drop_pps(r, p, t);
443 if (pps > 0)
444 printf(fmt_err, "xdp_exception", i, pps, drop);
445 }
446 pps = calc_pps(&rec->total, &prev->total, t);
447 drop = calc_drop_pps(&rec->total, &prev->total, t);
448 printf(fm2_err, "xdp_exception", "total", pps, drop);
449 }
450
451 printf("\n");
452 fflush(stdout);
453 }
454
stats_collect(struct stats_record * rec)455 static void stats_collect(struct stats_record *rec)
456 {
457 int fd, i;
458
459 fd = rx_cnt_map_fd;
460 map_collect_percpu(fd, 0, &rec->rx_cnt);
461
462 fd = redirect_err_cnt_map_fd;
463 map_collect_percpu(fd, 1, &rec->redir_err);
464
465 fd = cpumap_enqueue_cnt_map_fd;
466 for (i = 0; i < MAX_CPUS; i++)
467 map_collect_percpu(fd, i, &rec->enq[i]);
468
469 fd = cpumap_kthread_cnt_map_fd;
470 map_collect_percpu(fd, 0, &rec->kthread);
471
472 fd = exception_cnt_map_fd;
473 map_collect_percpu(fd, 0, &rec->exception);
474 }
475
476
477 /* Pointer swap trick */
swap(struct stats_record ** a,struct stats_record ** b)478 static inline void swap(struct stats_record **a, struct stats_record **b)
479 {
480 struct stats_record *tmp;
481
482 tmp = *a;
483 *a = *b;
484 *b = tmp;
485 }
486
create_cpu_entry(__u32 cpu,__u32 queue_size,__u32 avail_idx,bool new)487 static int create_cpu_entry(__u32 cpu, __u32 queue_size,
488 __u32 avail_idx, bool new)
489 {
490 __u32 curr_cpus_count = 0;
491 __u32 key = 0;
492 int ret;
493
494 /* Add a CPU entry to cpumap, as this allocate a cpu entry in
495 * the kernel for the cpu.
496 */
497 ret = bpf_map_update_elem(cpu_map_fd, &cpu, &queue_size, 0);
498 if (ret) {
499 fprintf(stderr, "Create CPU entry failed (err:%d)\n", ret);
500 exit(EXIT_FAIL_BPF);
501 }
502
503 /* Inform bpf_prog's that a new CPU is available to select
504 * from via some control maps.
505 */
506 ret = bpf_map_update_elem(cpus_available_map_fd, &avail_idx, &cpu, 0);
507 if (ret) {
508 fprintf(stderr, "Add to avail CPUs failed\n");
509 exit(EXIT_FAIL_BPF);
510 }
511
512 /* When not replacing/updating existing entry, bump the count */
513 ret = bpf_map_lookup_elem(cpus_count_map_fd, &key, &curr_cpus_count);
514 if (ret) {
515 fprintf(stderr, "Failed reading curr cpus_count\n");
516 exit(EXIT_FAIL_BPF);
517 }
518 if (new) {
519 curr_cpus_count++;
520 ret = bpf_map_update_elem(cpus_count_map_fd, &key,
521 &curr_cpus_count, 0);
522 if (ret) {
523 fprintf(stderr, "Failed write curr cpus_count\n");
524 exit(EXIT_FAIL_BPF);
525 }
526 }
527 /* map_fd[7] = cpus_iterator */
528 printf("%s CPU:%u as idx:%u queue_size:%d (total cpus_count:%u)\n",
529 new ? "Add-new":"Replace", cpu, avail_idx,
530 queue_size, curr_cpus_count);
531
532 return 0;
533 }
534
535 /* CPUs are zero-indexed. Thus, add a special sentinel default value
536 * in map cpus_available to mark CPU index'es not configured
537 */
mark_cpus_unavailable(void)538 static void mark_cpus_unavailable(void)
539 {
540 __u32 invalid_cpu = MAX_CPUS;
541 int ret, i;
542
543 for (i = 0; i < MAX_CPUS; i++) {
544 ret = bpf_map_update_elem(cpus_available_map_fd, &i,
545 &invalid_cpu, 0);
546 if (ret) {
547 fprintf(stderr, "Failed marking CPU unavailable\n");
548 exit(EXIT_FAIL_BPF);
549 }
550 }
551 }
552
553 /* Stress cpumap management code by concurrently changing underlying cpumap */
stress_cpumap(void)554 static void stress_cpumap(void)
555 {
556 /* Changing qsize will cause kernel to free and alloc a new
557 * bpf_cpu_map_entry, with an associated/complicated tear-down
558 * procedure.
559 */
560 create_cpu_entry(1, 1024, 0, false);
561 create_cpu_entry(1, 8, 0, false);
562 create_cpu_entry(1, 16000, 0, false);
563 }
564
stats_poll(int interval,bool use_separators,char * prog_name,bool stress_mode)565 static void stats_poll(int interval, bool use_separators, char *prog_name,
566 bool stress_mode)
567 {
568 struct stats_record *record, *prev;
569
570 record = alloc_stats_record();
571 prev = alloc_stats_record();
572 stats_collect(record);
573
574 /* Trick to pretty printf with thousands separators use %' */
575 if (use_separators)
576 setlocale(LC_NUMERIC, "en_US");
577
578 while (1) {
579 swap(&prev, &record);
580 stats_collect(record);
581 stats_print(record, prev, prog_name);
582 sleep(interval);
583 if (stress_mode)
584 stress_cpumap();
585 }
586
587 free_stats_record(record);
588 free_stats_record(prev);
589 }
590
init_map_fds(struct bpf_object * obj)591 static int init_map_fds(struct bpf_object *obj)
592 {
593 cpu_map_fd = bpf_object__find_map_fd_by_name(obj, "cpu_map");
594 rx_cnt_map_fd = bpf_object__find_map_fd_by_name(obj, "rx_cnt");
595 redirect_err_cnt_map_fd =
596 bpf_object__find_map_fd_by_name(obj, "redirect_err_cnt");
597 cpumap_enqueue_cnt_map_fd =
598 bpf_object__find_map_fd_by_name(obj, "cpumap_enqueue_cnt");
599 cpumap_kthread_cnt_map_fd =
600 bpf_object__find_map_fd_by_name(obj, "cpumap_kthread_cnt");
601 cpus_available_map_fd =
602 bpf_object__find_map_fd_by_name(obj, "cpus_available");
603 cpus_count_map_fd = bpf_object__find_map_fd_by_name(obj, "cpus_count");
604 cpus_iterator_map_fd =
605 bpf_object__find_map_fd_by_name(obj, "cpus_iterator");
606 exception_cnt_map_fd =
607 bpf_object__find_map_fd_by_name(obj, "exception_cnt");
608
609 if (cpu_map_fd < 0 || rx_cnt_map_fd < 0 ||
610 redirect_err_cnt_map_fd < 0 || cpumap_enqueue_cnt_map_fd < 0 ||
611 cpumap_kthread_cnt_map_fd < 0 || cpus_available_map_fd < 0 ||
612 cpus_count_map_fd < 0 || cpus_iterator_map_fd < 0 ||
613 exception_cnt_map_fd < 0)
614 return -ENOENT;
615
616 return 0;
617 }
618
main(int argc,char ** argv)619 int main(int argc, char **argv)
620 {
621 struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY};
622 char *prog_name = "xdp_cpu_map5_lb_hash_ip_pairs";
623 struct bpf_prog_load_attr prog_load_attr = {
624 .prog_type = BPF_PROG_TYPE_UNSPEC,
625 };
626 struct bpf_prog_info info = {};
627 __u32 info_len = sizeof(info);
628 bool use_separators = true;
629 bool stress_mode = false;
630 struct bpf_program *prog;
631 struct bpf_object *obj;
632 char filename[256];
633 int added_cpus = 0;
634 int longindex = 0;
635 int interval = 2;
636 int add_cpu = -1;
637 int opt, err;
638 int prog_fd;
639 __u32 qsize;
640
641 /* Notice: choosing he queue size is very important with the
642 * ixgbe driver, because it's driver page recycling trick is
643 * dependend on pages being returned quickly. The number of
644 * out-standing packets in the system must be less-than 2x
645 * RX-ring size.
646 */
647 qsize = 128+64;
648
649 snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
650 prog_load_attr.file = filename;
651
652 if (setrlimit(RLIMIT_MEMLOCK, &r)) {
653 perror("setrlimit(RLIMIT_MEMLOCK)");
654 return 1;
655 }
656
657 if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
658 return EXIT_FAIL;
659
660 if (prog_fd < 0) {
661 fprintf(stderr, "ERR: bpf_prog_load_xattr: %s\n",
662 strerror(errno));
663 return EXIT_FAIL;
664 }
665 if (init_map_fds(obj) < 0) {
666 fprintf(stderr, "bpf_object__find_map_fd_by_name failed\n");
667 return EXIT_FAIL;
668 }
669 mark_cpus_unavailable();
670
671 /* Parse commands line args */
672 while ((opt = getopt_long(argc, argv, "hSd:s:p:q:c:xzF",
673 long_options, &longindex)) != -1) {
674 switch (opt) {
675 case 'd':
676 if (strlen(optarg) >= IF_NAMESIZE) {
677 fprintf(stderr, "ERR: --dev name too long\n");
678 goto error;
679 }
680 ifname = (char *)&ifname_buf;
681 strncpy(ifname, optarg, IF_NAMESIZE);
682 ifindex = if_nametoindex(ifname);
683 if (ifindex == 0) {
684 fprintf(stderr,
685 "ERR: --dev name unknown err(%d):%s\n",
686 errno, strerror(errno));
687 goto error;
688 }
689 break;
690 case 's':
691 interval = atoi(optarg);
692 break;
693 case 'S':
694 xdp_flags |= XDP_FLAGS_SKB_MODE;
695 break;
696 case 'x':
697 stress_mode = true;
698 break;
699 case 'z':
700 use_separators = false;
701 break;
702 case 'p':
703 /* Selecting eBPF prog to load */
704 prog_name = optarg;
705 break;
706 case 'c':
707 /* Add multiple CPUs */
708 add_cpu = strtoul(optarg, NULL, 0);
709 if (add_cpu >= MAX_CPUS) {
710 fprintf(stderr,
711 "--cpu nr too large for cpumap err(%d):%s\n",
712 errno, strerror(errno));
713 goto error;
714 }
715 create_cpu_entry(add_cpu, qsize, added_cpus, true);
716 added_cpus++;
717 break;
718 case 'q':
719 qsize = atoi(optarg);
720 break;
721 case 'F':
722 xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
723 break;
724 case 'h':
725 error:
726 default:
727 usage(argv, obj);
728 return EXIT_FAIL_OPTION;
729 }
730 }
731 /* Required option */
732 if (ifindex == -1) {
733 fprintf(stderr, "ERR: required option --dev missing\n");
734 usage(argv, obj);
735 return EXIT_FAIL_OPTION;
736 }
737 /* Required option */
738 if (add_cpu == -1) {
739 fprintf(stderr, "ERR: required option --cpu missing\n");
740 fprintf(stderr, " Specify multiple --cpu option to add more\n");
741 usage(argv, obj);
742 return EXIT_FAIL_OPTION;
743 }
744
745 /* Remove XDP program when program is interrupted or killed */
746 signal(SIGINT, int_exit);
747 signal(SIGTERM, int_exit);
748
749 prog = bpf_object__find_program_by_title(obj, prog_name);
750 if (!prog) {
751 fprintf(stderr, "bpf_object__find_program_by_title failed\n");
752 return EXIT_FAIL;
753 }
754
755 prog_fd = bpf_program__fd(prog);
756 if (prog_fd < 0) {
757 fprintf(stderr, "bpf_program__fd failed\n");
758 return EXIT_FAIL;
759 }
760
761 if (bpf_set_link_xdp_fd(ifindex, prog_fd, xdp_flags) < 0) {
762 fprintf(stderr, "link set xdp fd failed\n");
763 return EXIT_FAIL_XDP;
764 }
765
766 err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
767 if (err) {
768 printf("can't get prog info - %s\n", strerror(errno));
769 return err;
770 }
771 prog_id = info.id;
772
773 stats_poll(interval, use_separators, prog_name, stress_mode);
774 return EXIT_OK;
775 }
776