• Home
  • Raw
  • Download

Lines Matching refs:i

244 		for (int i = 2; i < XDP_REDIRECT_ERR_MAX; i++)  in sample_print_help()  local
245 printf("\t\t\t %s: %s\n", xdp_redirect_err_names[i], in sample_print_help()
246 xdp_redirect_err_help[i - 1]); in sample_print_help()
298 int i; in sample_usage() local
304 for (i = 0; long_options[i].name != 0; i++) { in sample_usage()
305 printf(" --%-15s", long_options[i].name); in sample_usage()
306 if (long_options[i].flag != NULL) in sample_usage()
308 *long_options[i].flag); in sample_usage()
310 printf("\t short-option: -%c", long_options[i].val); in sample_usage()
351 int i; in map_collect_percpu() local
357 for (i = 0; i < nr_cpus; i++) { in map_collect_percpu()
358 rec->cpu[i].processed = READ_ONCE(values[i].processed); in map_collect_percpu()
359 rec->cpu[i].dropped = READ_ONCE(values[i].dropped); in map_collect_percpu()
360 rec->cpu[i].issue = READ_ONCE(values[i].issue); in map_collect_percpu()
361 rec->cpu[i].xdp_pass = READ_ONCE(values[i].xdp_pass); in map_collect_percpu()
362 rec->cpu[i].xdp_drop = READ_ONCE(values[i].xdp_drop); in map_collect_percpu()
363 rec->cpu[i].xdp_redirect = READ_ONCE(values[i].xdp_redirect); in map_collect_percpu()
365 sum_processed += rec->cpu[i].processed; in map_collect_percpu()
366 sum_dropped += rec->cpu[i].dropped; in map_collect_percpu()
367 sum_issue += rec->cpu[i].issue; in map_collect_percpu()
368 sum_xdp_pass += rec->cpu[i].xdp_pass; in map_collect_percpu()
369 sum_xdp_drop += rec->cpu[i].xdp_drop; in map_collect_percpu()
370 sum_xdp_redirect += rec->cpu[i].xdp_redirect; in map_collect_percpu()
388 int i, ret; in map_collect_percpu_devmap() local
410 for (i = 0; i < count; i++) { in map_collect_percpu_devmap()
412 __u64 pair = keys[i]; in map_collect_percpu_devmap()
415 arr = &values[i * nr_cpus]; in map_collect_percpu_devmap()
452 int i; in alloc_stats_record() local
469 for (i = 0; i < XDP_REDIRECT_ERR_MAX; i++) { in alloc_stats_record()
470 rec->redir_err[i].cpu = alloc_record_per_cpu(); in alloc_stats_record()
471 if (!rec->redir_err[i].cpu) { in alloc_stats_record()
475 xdp_redirect_err_names[i]); in alloc_stats_record()
476 while (i--) in alloc_stats_record()
477 free(rec->redir_err[i].cpu); in alloc_stats_record()
491 for (i = 0; i < XDP_ACTION_MAX; i++) { in alloc_stats_record()
492 rec->exception[i].cpu = alloc_record_per_cpu(); in alloc_stats_record()
493 if (!rec->exception[i].cpu) { in alloc_stats_record()
497 action2str(i)); in alloc_stats_record()
498 while (i--) in alloc_stats_record()
499 free(rec->exception[i].cpu); in alloc_stats_record()
515 for (i = 0; i < sample_n_cpus; i++) { in alloc_stats_record()
516 rec->enq[i].cpu = alloc_record_per_cpu(); in alloc_stats_record()
517 if (!rec->enq[i].cpu) { in alloc_stats_record()
521 i); in alloc_stats_record()
522 while (i--) in alloc_stats_record()
523 free(rec->enq[i].cpu); in alloc_stats_record()
534 for (i = 0; i < XDP_ACTION_MAX; i++) in alloc_stats_record()
535 free(rec->exception[i].cpu); in alloc_stats_record()
539 for (i = 0; i < XDP_REDIRECT_ERR_MAX; i++) in alloc_stats_record()
540 free(rec->redir_err[i].cpu); in alloc_stats_record()
552 int i; in free_stats_record() local
554 for (i = 0; i < sample_n_cpus; i++) in free_stats_record()
555 free(r->enq[i].cpu); in free_stats_record()
556 hash_for_each_safe(r->xmit_map, i, tmp, e, node) { in free_stats_record()
562 for (i = 0; i < XDP_ACTION_MAX; i++) in free_stats_record()
563 free(r->exception[i].cpu); in free_stats_record()
565 for (i = 0; i < XDP_REDIRECT_ERR_MAX; i++) in free_stats_record()
566 free(r->redir_err[i].cpu); in free_stats_record()
655 int i; in stats_get_rx_cnt() local
661 for (i = 0; i < nr_cpus; i++) { in stats_get_rx_cnt()
662 struct datarec *r = &rec->cpu[i]; in stats_get_rx_cnt()
663 struct datarec *p = &prev->cpu[i]; in stats_get_rx_cnt()
672 snprintf(str, sizeof(str), "cpu:%d", i); in stats_get_rx_cnt()
698 int i, to_cpu; in stats_get_cpumap_enqueue() local
724 for (i = 0; i < nr_cpus; i++) { in stats_get_cpumap_enqueue()
725 struct datarec *r = &rec->cpu[i]; in stats_get_cpumap_enqueue()
726 struct datarec *p = &prev->cpu[i]; in stats_get_cpumap_enqueue()
735 snprintf(str, sizeof(str), "cpu:%d->%d", i, to_cpu); in stats_get_cpumap_enqueue()
753 int i; in stats_get_cpumap_remote() local
768 for (i = 0; i < nr_cpus; i++) { in stats_get_cpumap_remote()
769 struct datarec *r = &rec->cpu[i]; in stats_get_cpumap_remote()
770 struct datarec *p = &prev->cpu[i]; in stats_get_cpumap_remote()
777 snprintf(str, sizeof(str), "cpu:%d", i); in stats_get_cpumap_remote()
791 int i; in stats_get_cpumap_kthread() local
805 for (i = 0; i < nr_cpus; i++) { in stats_get_cpumap_kthread()
806 struct datarec *r = &rec->cpu[i]; in stats_get_cpumap_kthread()
807 struct datarec *p = &prev->cpu[i]; in stats_get_cpumap_kthread()
816 snprintf(str, sizeof(str), "cpu:%d", i); in stats_get_cpumap_kthread()
830 int i; in stats_get_redirect_cnt() local
835 for (i = 0; i < nr_cpus; i++) { in stats_get_redirect_cnt()
836 struct datarec *r = &rec->cpu[i]; in stats_get_redirect_cnt()
837 struct datarec *p = &prev->cpu[i]; in stats_get_redirect_cnt()
844 snprintf(str, sizeof(str), "cpu:%d", i); in stats_get_redirect_cnt()
862 int rec_i, i; in stats_get_redirect_err_cnt() local
881 for (i = 0; i < nr_cpus; i++) { in stats_get_redirect_err_cnt()
882 struct datarec *r = &rec->cpu[i]; in stats_get_redirect_err_cnt()
883 struct datarec *p = &prev->cpu[i]; in stats_get_redirect_err_cnt()
890 snprintf(str, sizeof(str), "cpu:%d", i); in stats_get_redirect_err_cnt()
911 int rec_i, i; in stats_get_exception_cnt() local
926 for (i = 0; i < nr_cpus; i++) { in stats_get_exception_cnt()
927 struct datarec *r = &rec->cpu[i]; in stats_get_exception_cnt()
928 struct datarec *p = &prev->cpu[i]; in stats_get_exception_cnt()
936 snprintf(str, sizeof(str), "cpu:%d", i); in stats_get_exception_cnt()
957 int i; in stats_get_devmap_xmit() local
962 for (i = 0; i < nr_cpus; i++) { in stats_get_devmap_xmit()
963 struct datarec *r = &rec->cpu[i]; in stats_get_devmap_xmit()
964 struct datarec *p = &prev->cpu[i]; in stats_get_devmap_xmit()
974 snprintf(str, sizeof(str), "cpu:%d", i); in stats_get_devmap_xmit()
1023 int i; in stats_get_devmap_xmit_multi() local
1077 for (i = 0; i < nr_cpus; i++) { in stats_get_devmap_xmit_multi()
1078 struct datarec *rc = &r->cpu[i]; in stats_get_devmap_xmit_multi()
1082 pc = p == &beg ? &p_beg : &p->cpu[i]; in stats_get_devmap_xmit_multi()
1091 snprintf(str, sizeof(str), "cpu:%d", i); in stats_get_devmap_xmit_multi()
1200 for (int i = 0; i < MAP_DEVMAP_XMIT_MULTI; i++) { in sample_setup_maps() local
1201 sample_map[i] = maps[i]; in sample_setup_maps()
1203 switch (i) { in sample_setup_maps()
1207 sample_map_count[i] = sample_n_cpus; in sample_setup_maps()
1210 sample_map_count[i] = in sample_setup_maps()
1214 sample_map_count[i] = XDP_ACTION_MAX * sample_n_cpus; in sample_setup_maps()
1216 sample_map_count[i] = sample_n_cpus * sample_n_cpus; in sample_setup_maps()
1221 if (bpf_map__resize(sample_map[i], sample_map_count[i]) < 0) in sample_setup_maps()
1230 for (int i = 0; i < MAP_DEVMAP_XMIT_MULTI; i++) { in sample_setup_maps_mappings() local
1231 size_t size = sample_map_count[i] * sizeof(struct datarec); in sample_setup_maps_mappings()
1233 sample_mmap[i] = mmap(NULL, size, PROT_READ | PROT_WRITE, in sample_setup_maps_mappings()
1234 MAP_SHARED, bpf_map__fd(sample_map[i]), 0); in sample_setup_maps_mappings()
1235 if (sample_mmap[i] == MAP_FAILED) in sample_setup_maps_mappings()
1369 for (int i = 0; i < NUM_MAP; i++) { in sample_exit() local
1370 size = sample_map_count[i] * sizeof(**sample_mmap); in sample_exit()
1371 munmap(sample_mmap[i], size); in sample_exit()
1374 int i = sample_xdp_cnt, ifindex, xdp_flags; in sample_exit() local
1377 prog_id = sample_xdp_progs[i].prog_id; in sample_exit()
1378 ifindex = sample_xdp_progs[i].ifindex; in sample_exit()
1379 xdp_flags = sample_xdp_progs[i].flags; in sample_exit()
1390 int i; in sample_stats_collect() local
1399 for (i = 1; i < XDP_REDIRECT_ERR_MAX; i++) in sample_stats_collect()
1400 map_collect_percpu(&sample_mmap[MAP_REDIRECT_ERR][i * sample_n_cpus], in sample_stats_collect()
1401 &rec->redir_err[i]); in sample_stats_collect()
1405 for (i = 0; i < sample_n_cpus; i++) in sample_stats_collect()
1406 map_collect_percpu(&sample_mmap[MAP_CPUMAP_ENQUEUE][i * sample_n_cpus], in sample_stats_collect()
1407 &rec->enq[i]); in sample_stats_collect()
1414 for (i = 0; i < XDP_ACTION_MAX; i++) in sample_stats_collect()
1415 map_collect_percpu(&sample_mmap[MAP_EXCEPTION][i * sample_n_cpus], in sample_stats_collect()
1416 &rec->exception[i]); in sample_stats_collect()