Home
last modified time | relevance | path

Searched refs:opts (Results 1 – 25 of 125) sorted by relevance

12345

/tools/testing/selftests/bpf/prog_tests/
Dxdp_context_test_run.c6 void test_xdp_context_error(int prog_fd, struct bpf_test_run_opts opts, in test_xdp_context_error() argument
21 opts.ctx_in = &ctx; in test_xdp_context_error()
22 opts.ctx_size_in = sizeof(ctx); in test_xdp_context_error()
23 err = bpf_prog_test_run_opts(prog_fd, &opts); in test_xdp_context_error()
34 DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, in test_xdp_context_test_run()
50 opts.ctx_in = bad_ctx; in test_xdp_context_test_run()
51 opts.ctx_size_in = sizeof(bad_ctx); in test_xdp_context_test_run()
52 err = bpf_prog_test_run_opts(prog_fd, &opts); in test_xdp_context_test_run()
58 opts.ctx_in = &ctx_in; in test_xdp_context_test_run()
59 opts.ctx_size_in = sizeof(ctx_in); in test_xdp_context_test_run()
[all …]
Dbpf_cookie.c13 DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts); in kprobe_subtest()
18 opts.bpf_cookie = 0x1; in kprobe_subtest()
19 opts.retprobe = false; in kprobe_subtest()
21 SYS_NANOSLEEP_KPROBE_NAME, &opts); in kprobe_subtest()
25 opts.bpf_cookie = 0x2; in kprobe_subtest()
26 opts.retprobe = false; in kprobe_subtest()
28 SYS_NANOSLEEP_KPROBE_NAME, &opts); in kprobe_subtest()
33 opts.bpf_cookie = 0x10; in kprobe_subtest()
34 opts.retprobe = true; in kprobe_subtest()
36 SYS_NANOSLEEP_KPROBE_NAME, &opts); in kprobe_subtest()
[all …]
Dpe_preserve_elems.c13 DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts); in test_one_map()
30 err = bpf_prog_test_run_opts(bpf_program__fd(prog), &opts); in test_one_map()
33 if (CHECK(opts.retval != 0, "bpf_perf_event_read_value", in test_one_map()
34 "failed with %d\n", opts.retval)) in test_one_map()
40 err = bpf_prog_test_run_opts(bpf_program__fd(prog), &opts); in test_one_map()
45 CHECK(opts.retval != 0, "bpf_perf_event_read_value", in test_one_map()
46 "failed with %d\n", opts.retval); in test_one_map()
48 CHECK(opts.retval != -ENOENT, "bpf_perf_event_read_value", in test_one_map()
50 opts.retval); in test_one_map()
Draw_tp_test_run.c19 DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, in test_raw_tp_test_run()
66 opts.cpu = i; in test_raw_tp_test_run()
67 opts.retval = 0; in test_raw_tp_test_run()
68 err = bpf_prog_test_run_opts(prog_fd, &opts); in test_raw_tp_test_run()
72 CHECK(opts.retval != expected_retval, in test_raw_tp_test_run()
74 expected_retval, opts.retval); in test_raw_tp_test_run()
78 opts.cpu = 0xffffffff; in test_raw_tp_test_run()
79 err = bpf_prog_test_run_opts(prog_fd, &opts); in test_raw_tp_test_run()
85 opts.cpu = 1; in test_raw_tp_test_run()
86 opts.flags = 0; in test_raw_tp_test_run()
[all …]
Dtc_bpf.c27 DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts, .handle = 1, .priority = 1, .prog_fd = fd); in test_tc_bpf_basic()
36 ret = bpf_tc_attach(hook, &opts); in test_tc_bpf_basic()
40 if (!ASSERT_EQ(opts.handle, 1, "handle set") || in test_tc_bpf_basic()
41 !ASSERT_EQ(opts.priority, 1, "priority set") || in test_tc_bpf_basic()
42 !ASSERT_EQ(opts.prog_id, info.id, "prog_id set")) in test_tc_bpf_basic()
45 opts.prog_id = 0; in test_tc_bpf_basic()
46 opts.flags = BPF_TC_F_REPLACE; in test_tc_bpf_basic()
47 ret = bpf_tc_attach(hook, &opts); in test_tc_bpf_basic()
51 opts.flags = opts.prog_fd = opts.prog_id = 0; in test_tc_bpf_basic()
52 ret = bpf_tc_query(hook, &opts); in test_tc_bpf_basic()
[all …]
Dxdp_attach.c14 DECLARE_LIBBPF_OPTS(bpf_xdp_set_link_opts, opts, in test_xdp_attach()
42 &opts); in test_xdp_attach()
52 &opts); in test_xdp_attach()
56 opts.old_fd = fd1; in test_xdp_attach()
57 err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, fd2, 0, &opts); in test_xdp_attach()
65 err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, fd3, 0, &opts); in test_xdp_attach()
69 err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, -1, 0, &opts); in test_xdp_attach()
73 opts.old_fd = fd2; in test_xdp_attach()
74 err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, -1, 0, &opts); in test_xdp_attach()
/tools/perf/util/
Drecord.c95 void evlist__config(struct evlist *evlist, struct record_opts *opts, struct callchain_param *callch… in evlist__config() argument
100 bool sample_id = opts->sample_id; in evlist__config()
106 if (opts->group) in evlist__config()
110 opts->no_inherit = true; in evlist__config()
115 evsel__config(evsel, opts, callchain); in evlist__config()
124 if (opts->full_auxtrace) { in evlist__config()
157 static int record_opts__config_freq(struct record_opts *opts) in record_opts__config_freq() argument
159 bool user_freq = opts->user_freq != UINT_MAX; in record_opts__config_freq()
160 bool user_interval = opts->user_interval != ULLONG_MAX; in record_opts__config_freq()
169 opts->default_interval = opts->user_interval; in record_opts__config_freq()
[all …]
Dclockid.c70 struct record_opts *opts = (struct record_opts *)opt->value; in parse_clockid() local
75 opts->use_clockid = 0; in parse_clockid()
84 if (opts->use_clockid) in parse_clockid()
87 opts->use_clockid = true; in parse_clockid()
90 if (sscanf(str, "%d", &opts->clockid) == 1) in parse_clockid()
91 return get_clockid_res(opts->clockid, &opts->clockid_res_ns); in parse_clockid()
99 opts->clockid = cm->clockid; in parse_clockid()
100 return get_clockid_res(opts->clockid, in parse_clockid()
101 &opts->clockid_res_ns); in parse_clockid()
105 opts->use_clockid = false; in parse_clockid()
/tools/perf/arch/x86/util/
Dintel-bts.c107 struct record_opts *opts) in intel_bts_recording_options() argument
116 if (opts->auxtrace_sample_mode) { in intel_bts_recording_options()
122 btsr->snapshot_mode = opts->auxtrace_snapshot_mode; in intel_bts_recording_options()
133 opts->full_auxtrace = true; in intel_bts_recording_options()
137 if (opts->auxtrace_snapshot_mode && !opts->full_auxtrace) { in intel_bts_recording_options()
142 if (!opts->full_auxtrace) in intel_bts_recording_options()
145 if (opts->full_auxtrace && !perf_cpu_map__empty(cpus)) { in intel_bts_recording_options()
151 if (opts->auxtrace_snapshot_mode) { in intel_bts_recording_options()
152 if (!opts->auxtrace_snapshot_size && !opts->auxtrace_mmap_pages) { in intel_bts_recording_options()
154 opts->auxtrace_mmap_pages = MiB(4) / page_size; in intel_bts_recording_options()
[all …]
Dintel-pt.c242 struct record_opts *opts, in intel_pt_parse_snapshot_options() argument
256 opts->auxtrace_snapshot_mode = true; in intel_pt_parse_snapshot_options()
257 opts->auxtrace_snapshot_size = snapshot_size; in intel_pt_parse_snapshot_options()
621 struct record_opts *opts) in intel_pt_recording_options() argument
634 ptr->snapshot_mode = opts->auxtrace_snapshot_mode; in intel_pt_recording_options()
646 opts->full_auxtrace = true; in intel_pt_recording_options()
650 if (opts->auxtrace_snapshot_mode && !opts->full_auxtrace) { in intel_pt_recording_options()
655 if (opts->auxtrace_snapshot_mode && opts->auxtrace_sample_mode) { in intel_pt_recording_options()
660 if (opts->use_clockid) { in intel_pt_recording_options()
668 if (!opts->full_auxtrace) in intel_pt_recording_options()
[all …]
/tools/perf/
Dbuiltin-record.c92 struct record_opts opts; member
410 struct record_opts *opts = (struct record_opts *)opt->value; in record__aio_parse() local
413 opts->nr_cblocks = 0; in record__aio_parse()
416 opts->nr_cblocks = strtol(str, NULL, 0); in record__aio_parse()
417 if (!opts->nr_cblocks) in record__aio_parse()
418 opts->nr_cblocks = nr_cblocks_default; in record__aio_parse()
448 return rec->opts.nr_cblocks > 0; in record__aio_enabled()
457 struct record_opts *opts = (struct record_opts *)opt->value; in record__mmap_flush_parse() local
470 opts->mmap_flush = parse_tag_value(str, tags); in record__mmap_flush_parse()
471 if (opts->mmap_flush == (int)-1) in record__mmap_flush_parse()
[all …]
Dbuiltin-data.c33 struct perf_data_convert_opts opts = { variable
44 OPT_BOOLEAN(0, "tod", &opts.tod, "Convert time to wall clock time"),
46 OPT_BOOLEAN('f', "force", &opts.force, "don't complain, do it"),
47 OPT_BOOLEAN(0, "all", &opts.all, "Convert all events"),
71 return bt_convert__perf2json(input_name, to_json, &opts); in cmd_data_convert()
75 return bt_convert__perf2ctf(input_name, to_ctf, &opts); in cmd_data_convert()
/tools/lib/bpf/
Dskel_internal.h63 static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts) in bpf_load_and_run() argument
69 opts->data_sz, 1, 0); in bpf_load_and_run()
71 opts->errstr = "failed to create loader map"; in bpf_load_and_run()
76 err = bpf_map_update_elem(map_fd, &key, opts->data, 0); in bpf_load_and_run()
78 opts->errstr = "failed to update loader map"; in bpf_load_and_run()
85 attr.insns = (long) opts->insns; in bpf_load_and_run()
86 attr.insn_cnt = opts->insns_sz / sizeof(struct bpf_insn); in bpf_load_and_run()
90 attr.log_level = opts->ctx->log_level; in bpf_load_and_run()
91 attr.log_size = opts->ctx->log_size; in bpf_load_and_run()
92 attr.log_buf = opts->ctx->log_buf; in bpf_load_and_run()
[all …]
Dlibbpf_internal.h210 static inline bool libbpf_validate_opts(const char *opts, in libbpf_validate_opts() argument
218 if (!libbpf_is_mem_zeroed(opts + opts_sz, (ssize_t)user_sz - opts_sz)) { in libbpf_validate_opts()
225 #define OPTS_VALID(opts, type) \ argument
226 (!(opts) || libbpf_validate_opts((const char *)opts, \
229 (opts)->sz, #type))
230 #define OPTS_HAS(opts, field) \ argument
231 ((opts) && opts->sz >= offsetofend(typeof(*(opts)), field))
232 #define OPTS_GET(opts, field, fallback_value) \ argument
233 (OPTS_HAS(opts, field) ? (opts)->field : fallback_value)
234 #define OPTS_SET(opts, field, value) \ argument
[all …]
Dnetlink.c270 const struct bpf_xdp_set_link_opts *opts) in bpf_set_link_xdp_fd_opts() argument
274 if (!OPTS_VALID(opts, bpf_xdp_set_link_opts)) in bpf_set_link_xdp_fd_opts()
277 if (OPTS_HAS(opts, old_fd)) { in bpf_set_link_xdp_fd_opts()
278 old_fd = OPTS_GET(opts, old_fd, -1); in bpf_set_link_xdp_fd_opts()
517 const struct bpf_tc_opts *opts,
540 struct bpf_tc_opts *opts; member
550 if (!info || !info->opts) in __get_tc_info()
561 OPTS_SET(info->opts, prog_id, libbpf_nla_getattr_u32(tbb[TCA_BPF_ID])); in __get_tc_info()
562 OPTS_SET(info->opts, handle, tc->tcm_handle); in __get_tc_info()
563 OPTS_SET(info->opts, priority, TC_H_MAJ(tc->tcm_info) >> 16); in __get_tc_info()
[all …]
Dbpf.c537 const struct bpf_map_batch_opts *opts) in bpf_map_batch_common() argument
542 if (!OPTS_VALID(opts, bpf_map_batch_opts)) in bpf_map_batch_common()
552 attr.batch.elem_flags = OPTS_GET(opts, elem_flags, 0); in bpf_map_batch_common()
553 attr.batch.flags = OPTS_GET(opts, flags, 0); in bpf_map_batch_common()
562 const struct bpf_map_batch_opts *opts) in bpf_map_delete_batch() argument
565 NULL, keys, NULL, count, opts); in bpf_map_delete_batch()
570 const struct bpf_map_batch_opts *opts) in bpf_map_lookup_batch() argument
573 out_batch, keys, values, count, opts); in bpf_map_lookup_batch()
578 const struct bpf_map_batch_opts *opts) in bpf_map_lookup_and_delete_batch() argument
582 count, opts); in bpf_map_lookup_and_delete_batch()
[all …]
/tools/testing/selftests/netfilter/
Dnf-queue.c29 static struct options opts; variable
90 if (opts.verbose > 0) in queue_cb()
99 if (opts.verbose > 0) { in queue_cb()
113 if (opts.count_packets) in queue_cb()
227 queue_num = opts.queue_num; in open_queue()
237 flags = opts.gso_enabled ? NFQA_CFG_F_GSO : 0; in open_queue()
248 tv.tv_sec = opts.timeout; in open_queue()
249 if (opts.timeout && setsockopt(mnl_socket_get_fd(nl), in open_queue()
313 if (opts.delay_ms) in mainloop()
314 sleep_ms(opts.delay_ms); in mainloop()
[all …]
/tools/testing/vsock/
Dvsock_test.c24 static void test_stream_connection_reset(const struct test_opts *opts) in test_stream_connection_reset() argument
33 .svm_cid = opts->peer_cid, in test_stream_connection_reset()
60 static void test_stream_bind_only_client(const struct test_opts *opts) in test_stream_bind_only_client() argument
69 .svm_cid = opts->peer_cid, in test_stream_bind_only_client()
102 static void test_stream_bind_only_server(const struct test_opts *opts) in test_stream_bind_only_server() argument
132 static void test_stream_client_close_client(const struct test_opts *opts) in test_stream_client_close_client() argument
136 fd = vsock_stream_connect(opts->peer_cid, 1234); in test_stream_client_close_client()
146 static void test_stream_client_close_server(const struct test_opts *opts) in test_stream_client_close_server() argument
167 static void test_stream_server_close_client(const struct test_opts *opts) in test_stream_server_close_client() argument
171 fd = vsock_stream_connect(opts->peer_cid, 1234); in test_stream_server_close_client()
[all …]
/tools/testing/selftests/net/
Dreuseaddr_ports_exhausted.c83 struct reuse_opts *opts; in TEST() local
87 opts = &unreusable_opts[i]; in TEST()
90 fd[j] = bind_port(_metadata, opts->reuseaddr[j], opts->reuseport[j]); in TEST()
103 struct reuse_opts *opts; in TEST() local
107 opts = &reusable_opts[i]; in TEST()
110 fd[j] = bind_port(_metadata, opts->reuseaddr[j], opts->reuseport[j]); in TEST()
114 if (opts->reuseport[0] && opts->reuseport[1]) { in TEST()
128 struct reuse_opts *opts; in TEST() local
133 opts = &reusable_opts[i]; in TEST()
139 fd[j] = bind_port(_metadata, opts->reuseaddr[j], opts->reuseport[j]); in TEST()
/tools/lib/subcmd/
Dparse-options.c699 static void print_option_help(const struct option *opts, int full) in print_option_help() argument
704 if (opts->type == OPTION_GROUP) { in print_option_help()
706 if (*opts->help) in print_option_help()
707 fprintf(stderr, "%s\n", opts->help); in print_option_help()
710 if (!full && (opts->flags & PARSE_OPT_HIDDEN)) in print_option_help()
712 if (opts->flags & PARSE_OPT_DISABLED) in print_option_help()
716 if (opts->short_name) in print_option_help()
717 pos += fprintf(stderr, "-%c", opts->short_name); in print_option_help()
721 if (opts->long_name && opts->short_name) in print_option_help()
723 if (opts->long_name) in print_option_help()
[all …]
/tools/perf/scripts/python/
Dstackcollapse.py58 (opts, args) = parser.parse_args()
62 if opts.include_tid and not opts.include_comm:
64 if opts.include_pid and not opts.include_comm:
77 if opts.tidy_java:
92 if opts.annotate_kernel and dso == '[kernel.kallsyms]':
111 if opts.include_comm:
114 if opts.include_pid:
117 if opts.include_tid:
/tools/perf/bench/
Devlist-open-close.c28 static struct record_opts opts = { variable
49 OPT_BOOLEAN('a', "all-cpus", &opts.target.system_wide, "system-wide collection from all CPUs"),
50 OPT_STRING('C', "cpu", &opts.target.cpu_list, "cpu", "list of cpus where to open events"),
51 OPT_STRING('p', "pid", &opts.target.pid, "pid", "record events on existing process id"),
52 OPT_STRING('t', "tid", &opts.target.tid, "tid", "record events on existing thread id"),
53 OPT_STRING('u', "uid", &opts.target.uid_str, "user", "user to profile"),
54 OPT_BOOLEAN(0, "per-thread", &opts.target.per_thread, "use per-thread mmaps"),
93 ret = evlist__create_maps(evlist, &opts.target); in bench__create_evlist()
99 evlist__config(evlist, &opts, NULL); in bench__create_evlist()
118 err = evlist__mmap(evlist, opts.mmap_pages); in bench__do_evlist_open_close()
[all …]
/tools/perf/arch/arm/util/
Dcs-etm.c233 struct record_opts *opts, in cs_etm_parse_snapshot_options() argument
247 opts->auxtrace_snapshot_mode = true; in cs_etm_parse_snapshot_options()
248 opts->auxtrace_snapshot_size = snapshot_size; in cs_etm_parse_snapshot_options()
293 struct record_opts *opts) in cs_etm_recording_options() argument
305 ptr->snapshot_mode = opts->auxtrace_snapshot_mode; in cs_etm_recording_options()
307 if (!record_opts__no_switch_events(opts) && in cs_etm_recording_options()
309 opts->record_switch_events = true; in cs_etm_recording_options()
321 opts->full_auxtrace = true; in cs_etm_recording_options()
333 if (opts->use_clockid) { in cs_etm_recording_options()
340 if (opts->auxtrace_snapshot_mode) { in cs_etm_recording_options()
[all …]
/tools/perf/arch/s390/util/
Dauxtrace.c49 struct record_opts *opts) in cpumsf_recording_options() argument
54 opts->full_auxtrace = true; in cpumsf_recording_options()
64 if (!opts->auxtrace_mmap_pages) { in cpumsf_recording_options()
65 if (opts->user_freq != UINT_MAX) in cpumsf_recording_options()
66 factor = (opts->user_freq + DEFAULT_FREQ in cpumsf_recording_options()
69 opts->auxtrace_mmap_pages = roundup_pow_of_two(pages); in cpumsf_recording_options()
77 struct record_opts *opts __maybe_unused, in cpumsf_parse_snapshot_options()
/tools/perf/arch/arm64/util/
Darm-spe.c89 struct record_opts *opts) in arm_spe_recording_options() argument
111 opts->full_auxtrace = true; in arm_spe_recording_options()
115 if (!opts->full_auxtrace) in arm_spe_recording_options()
119 if (!opts->auxtrace_mmap_pages) { in arm_spe_recording_options()
121 opts->auxtrace_mmap_pages = MiB(4) / page_size; in arm_spe_recording_options()
123 opts->auxtrace_mmap_pages = KiB(128) / page_size; in arm_spe_recording_options()
124 if (opts->mmap_pages == UINT_MAX) in arm_spe_recording_options()
125 opts->mmap_pages = KiB(256) / page_size; in arm_spe_recording_options()
130 if (opts->auxtrace_mmap_pages) { in arm_spe_recording_options()
131 size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size; in arm_spe_recording_options()

12345