Home
last modified time | relevance | path

Searched refs:data (Results 1 – 25 of 869) sorted by relevance

12345678910>>...35

/tools/testing/selftests/arm64/fp/
Dvec-syscfg.c69 static bool vec_type_supported(struct vec_data *data) in vec_type_supported() argument
71 return getauxval(data->hwcap_type) & data->hwcap; in vec_type_supported()
89 static int get_child_rdvl(struct vec_data *data) in get_child_rdvl() argument
127 ret = execl(data->rdvl_binary, data->rdvl_binary, NULL); in get_child_rdvl()
129 data->rdvl_binary, errno, strerror(errno)); in get_child_rdvl()
218 static void proc_read_default(struct vec_data *data) in proc_read_default() argument
222 ret = file_read_integer(data->default_vl_file, &default_vl); in proc_read_default()
227 child_vl = get_child_rdvl(data); in proc_read_default()
230 data->default_vl_file, in proc_read_default()
235 ksft_test_result_pass("%s default vector length %d\n", data->name, in proc_read_default()
[all …]
/tools/perf/util/
Ddata.c32 void perf_data__close_dir(struct perf_data *data) in perf_data__close_dir() argument
34 close_dir(data->dir.files, data->dir.nr); in perf_data__close_dir()
37 int perf_data__create_dir(struct perf_data *data, int nr) in perf_data__create_dir() argument
43 if (WARN_ON(!data->is_dir)) in perf_data__create_dir()
53 ret = asprintf(&file->path, "%s/data.%d", data->path, i); in perf_data__create_dir()
78 data->dir.version = PERF_DIR_VERSION; in perf_data__create_dir()
79 data->dir.files = files; in perf_data__create_dir()
80 data->dir.nr = nr; in perf_data__create_dir()
88 int perf_data__open_dir(struct perf_data *data) in perf_data__open_dir() argument
100 if (perf_data__is_single_file(data)) in perf_data__open_dir()
[all …]
Ddata.h46 static inline bool perf_data__is_read(struct perf_data *data) in perf_data__is_read() argument
48 return data->mode == PERF_DATA_MODE_READ; in perf_data__is_read()
51 static inline bool perf_data__is_write(struct perf_data *data) in perf_data__is_write() argument
53 return data->mode == PERF_DATA_MODE_WRITE; in perf_data__is_write()
56 static inline int perf_data__is_pipe(struct perf_data *data) in perf_data__is_pipe() argument
58 return data->is_pipe; in perf_data__is_pipe()
61 static inline bool perf_data__is_dir(struct perf_data *data) in perf_data__is_dir() argument
63 return data->is_dir; in perf_data__is_dir()
66 static inline bool perf_data__is_single_file(struct perf_data *data) in perf_data__is_single_file() argument
68 return data->dir.version == PERF_DIR_SINGLE_FILE; in perf_data__is_single_file()
[all …]
Dzstd.c8 int zstd_init(struct zstd_data *data, int level) in zstd_init() argument
10 data->comp_level = level; in zstd_init()
11 data->dstream = NULL; in zstd_init()
12 data->cstream = NULL; in zstd_init()
16 int zstd_fini(struct zstd_data *data) in zstd_fini() argument
18 if (data->dstream) { in zstd_fini()
19 ZSTD_freeDStream(data->dstream); in zstd_fini()
20 data->dstream = NULL; in zstd_fini()
23 if (data->cstream) { in zstd_fini()
24 ZSTD_freeCStream(data->cstream); in zstd_fini()
[all …]
/tools/testing/selftests/kvm/x86_64/
Dxapic_ipi_test.c76 struct test_data_page *data; member
89 static void halter_guest_code(struct test_data_page *data) in halter_guest_code() argument
94 data->halter_apic_id = GET_APIC_ID_FIELD(xapic_read_reg(APIC_ID)); in halter_guest_code()
95 data->halter_lvr = xapic_read_reg(APIC_LVR); in halter_guest_code()
106 data->halter_tpr = xapic_read_reg(APIC_TASKPRI); in halter_guest_code()
107 data->halter_ppr = xapic_read_reg(APIC_PROCPRI); in halter_guest_code()
108 data->hlt_count++; in halter_guest_code()
110 data->wake_count++; in halter_guest_code()
125 static void sender_guest_code(struct test_data_page *data) in sender_guest_code() argument
149 icr2_val = SET_APIC_DEST_FIELD(data->halter_apic_id); in sender_guest_code()
[all …]
Dhyperv_tlb_flush.c73 struct test_data *data = (struct test_data *)test_data; in worker_guest_code() local
75 void *exp_page = (void *)data->test_pages + PAGE_SIZE * NTEST_PAGES; in worker_guest_code()
94 val = READ_ONCE(*(u64 *)data->test_pages); in worker_guest_code()
159 static inline void prepare_to_test(struct test_data *data) in prepare_to_test() argument
162 memset((void *)data->hcall_gva, 0, PAGE_SIZE); in prepare_to_test()
165 set_expected_val((void *)data->test_pages, 0x0, WORKER_VCPU_ID_1); in prepare_to_test()
166 set_expected_val((void *)data->test_pages, 0x0, WORKER_VCPU_ID_2); in prepare_to_test()
175 swap_two_test_pages(data->test_pages_pte[0], data->test_pages_pte[1]); in prepare_to_test()
182 static inline void post_test(struct test_data *data, u64 exp1, u64 exp2) in post_test() argument
188 set_expected_val((void *)data->test_pages, exp1, WORKER_VCPU_ID_1); in post_test()
[all …]
/tools/testing/selftests/bpf/prog_tests/
Dksyms_btf.c21 struct test_ksyms_btf__data *data; in test_basic() local
47 data = skel->data; in test_basic()
48 CHECK(data->out__runqueues_addr != runqueues_addr, "runqueues_addr", in test_basic()
50 (unsigned long long)data->out__runqueues_addr, in test_basic()
52 CHECK(data->out__bpf_prog_active_addr != bpf_prog_active_addr, "bpf_prog_active_addr", in test_basic()
54 (unsigned long long)data->out__bpf_prog_active_addr, in test_basic()
57 CHECK(data->out__rq_cpu == -1, "rq_cpu", in test_basic()
58 "got %u, exp != -1\n", data->out__rq_cpu); in test_basic()
59 CHECK(data->out__bpf_prog_active < 0, "bpf_prog_active", in test_basic()
60 "got %d, exp >= 0\n", data->out__bpf_prog_active); in test_basic()
[all …]
Dvarlen.c17 struct test_varlen__data *data; in test_varlen() local
28 data = skel->data; in test_varlen()
49 CHECK_VAL(data->payload2_len1, size1); in test_varlen()
50 CHECK_VAL(data->payload2_len2, size2); in test_varlen()
51 CHECK_VAL(data->total2, size1 + size2); in test_varlen()
52 CHECK(memcmp(data->payload2, exp_str, size1 + size2), "content_check", in test_varlen()
55 CHECK_VAL(data->payload3_len1, size1); in test_varlen()
56 CHECK_VAL(data->payload3_len2, size2); in test_varlen()
57 CHECK_VAL(data->total3, size1 + size2); in test_varlen()
58 CHECK(memcmp(data->payload3, exp_str, size1 + size2), "content_check", in test_varlen()
[all …]
Dxdp_context_test_run.c7 __u32 data_meta, __u32 data, __u32 data_end, in test_xdp_context_error() argument
12 .data = data, in test_xdp_context_error()
31 char data[sizeof(pkt_v4) + sizeof(__u32)]; in test_xdp_context_test_run() local
35 .data_in = &data, in test_xdp_context_test_run()
36 .data_size_in = sizeof(data), in test_xdp_context_test_run()
56 *(__u32 *)data = XDP_PASS; in test_xdp_context_test_run()
57 *(struct ipv4_packet *)(data + sizeof(__u32)) = pkt_v4; in test_xdp_context_test_run()
62 ctx_in.data = sizeof(__u32); in test_xdp_context_test_run()
63 ctx_in.data_end = ctx_in.data + sizeof(pkt_v4); in test_xdp_context_test_run()
70 ASSERT_EQ(ctx_out.data, 0, "valid-data"); in test_xdp_context_test_run()
[all …]
Dcore_extern.c27 struct test_core_extern__data data; member
29 { .name = "default search path", .data = { .bpf_syscall = true } },
41 .data = {
55 .data = { .tristate_val = TRI_YES } },
57 .data = { .tristate_val = TRI_NO } },
59 .data = { .tristate_val = TRI_MODULE } },
64 .data = { .bool_val = true } },
66 .data = { .bool_val = false } },
71 .data = { .char_val = 'm' } },
77 .data = { .str_val = "\0\0\0\0\0\0\0" } },
[all …]
Dskeleton.c19 struct test_skeleton__data *data; in test_skeleton() local
37 data = skel->data; in test_skeleton()
46 CHECK(data->in1 != -1, "in1", "got %d != exp %d\n", data->in1, -1); in test_skeleton()
47 CHECK(data->out1 != -1, "out1", "got %d != exp %d\n", data->out1, -1); in test_skeleton()
48 CHECK(data->in2 != -1, "in2", "got %lld != exp %lld\n", data->in2, -1LL); in test_skeleton()
49 CHECK(data->out2 != -1, "out2", "got %lld != exp %lld\n", data->out2, -1LL); in test_skeleton()
66 data->in1 = 10; in test_skeleton()
67 data->in2 = 11; in test_skeleton()
81 CHECK(data->in1 != 10, "in1", "got %d != exp %d\n", data->in1, 10); in test_skeleton()
82 CHECK(data->in2 != 11, "in2", "got %lld != exp %lld\n", data->in2, 11LL); in test_skeleton()
[all …]
/tools/testing/selftests/bpf/
Dcap_helpers.c7 int capget(cap_user_header_t header, cap_user_data_t data);
8 int capset(cap_user_header_t header, const cap_user_data_t data);
12 struct __user_cap_data_struct data[_LINUX_CAPABILITY_U32S_3]; in cap_enable_effective() local
20 err = capget(&hdr, data); in cap_enable_effective()
25 *old_caps = (__u64)(data[1].effective) << 32 | data[0].effective; in cap_enable_effective()
27 if ((data[0].effective & cap0) == cap0 && in cap_enable_effective()
28 (data[1].effective & cap1) == cap1) in cap_enable_effective()
31 data[0].effective |= cap0; in cap_enable_effective()
32 data[1].effective |= cap1; in cap_enable_effective()
33 err = capset(&hdr, data); in cap_enable_effective()
[all …]
Dtest_lpm_map.c215 uint8_t *data, *value; in test_lpm_map() local
228 data = alloca(keysize); in test_lpm_map()
229 memset(data, 0, keysize); in test_lpm_map()
252 memcpy(key->data, value, keysize); in test_lpm_map()
259 data[j] = rand() & 0xff; in test_lpm_map()
261 t = tlpm_match(list, data, 8 * keysize); in test_lpm_map()
264 memcpy(key->data, data, keysize); in test_lpm_map()
288 memcpy(key->data, list->key, keysize); in test_lpm_map()
296 data[j] = rand() & 0xff; in test_lpm_map()
298 t = tlpm_match(list, data, 8 * keysize); in test_lpm_map()
[all …]
/tools/testing/selftests/kvm/
Dmemslot_perf_test.c127 static void check_mmio_access(struct vm_data *data, struct kvm_run *run) in check_mmio_access() argument
129 TEST_ASSERT(data->mmio_ok, "Unexpected mmio exit"); in check_mmio_access()
133 TEST_ASSERT(run->mmio.phys_addr >= data->mmio_gpa_min && in check_mmio_access()
134 run->mmio.phys_addr <= data->mmio_gpa_max, in check_mmio_access()
141 struct vm_data *data = __data; in vcpu_worker() local
142 struct kvm_vcpu *vcpu = data->vcpu; in vcpu_worker()
158 check_mmio_access(data, run); in vcpu_worker()
188 static void *vm_gpa2hva(struct vm_data *data, uint64_t gpa, uint64_t *rempages) in vm_gpa2hva() argument
193 uint32_t guest_page_size = data->vm->page_size; in vm_gpa2hva()
196 TEST_ASSERT(gpa < MEM_GPA + data->npages * guest_page_size, in vm_gpa2hva()
[all …]
/tools/testing/selftests/hid/progs/
Dhid.c100 __u8 data[10]; member
115 if (size > sizeof(args->data)) in hid_user_raw_request()
123 args->data, in hid_user_raw_request()
141 if (size > sizeof(args->data)) in hid_user_output_report()
149 args->data, in hid_user_output_report()
165 if (size > sizeof(args->data)) in hid_user_input_report()
172 ret = hid_bpf_input_report(ctx, HID_INPUT_REPORT, args->data, size); in hid_user_input_report()
222 __u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 4096 /* size */); in BPF_PROG() local
224 if (!data) in BPF_PROG()
227 callback2_check = data[4]; in BPF_PROG()
[all …]
/tools/testing/selftests/bpf/progs/
Dfor_each_hash_map_elem.c30 struct callback_ctx *data) in check_hash_elem() argument
32 struct __sk_buff *skb = data->ctx; in check_hash_elem()
40 data->output = 3; /* impossible path */ in check_hash_elem()
42 data->output = 4; in check_hash_elem()
44 data->output = data->input; in check_hash_elem()
61 struct callback_ctx data; in check_percpu_elem() local
68 data.ctx = 0; in check_percpu_elem()
69 data.input = 100; in check_percpu_elem()
70 data.output = 0; in check_percpu_elem()
71 bpf_for_each_map_elem(&hashmap, check_hash_elem, &data, 0); in check_percpu_elem()
[all …]
Dtest_kfunc_param_nullable.c12 struct bpf_dynptr data; in kfunc_dynptr_nullable_test1() local
14 bpf_dynptr_from_skb(skb, 0, &data); in kfunc_dynptr_nullable_test1()
15 bpf_kfunc_dynptr_test(&data, NULL); in kfunc_dynptr_nullable_test1()
23 struct bpf_dynptr data; in kfunc_dynptr_nullable_test2() local
25 bpf_dynptr_from_skb(skb, 0, &data); in kfunc_dynptr_nullable_test2()
26 bpf_kfunc_dynptr_test(&data, &data); in kfunc_dynptr_nullable_test2()
35 struct bpf_dynptr data; in kfunc_dynptr_nullable_test3() local
37 bpf_dynptr_from_skb(skb, 0, &data); in kfunc_dynptr_nullable_test3()
38 bpf_kfunc_dynptr_test(NULL, &data); in kfunc_dynptr_nullable_test3()
Dtest_xdp_meta.c14 __u8 *data, *data_meta, *data_end; in ing_cls() local
19 data = ctx_ptr(ctx, data); in ing_cls()
21 if (data + ETH_ALEN > data_end || in ing_cls()
22 data_meta + round_up(ETH_ALEN, 4) > data) in ing_cls()
25 diff |= ((__u32 *)data_meta)[0] ^ ((__u32 *)data)[0]; in ing_cls()
26 diff |= ((__u16 *)data_meta)[2] ^ ((__u16 *)data)[2]; in ing_cls()
34 __u8 *data, *data_meta, *data_end; in ing_xdp() local
43 data = ctx_ptr(ctx, data); in ing_xdp()
45 if (data + ETH_ALEN > data_end || in ing_xdp()
46 data_meta + round_up(ETH_ALEN, 4) > data) in ing_xdp()
[all …]
Dtest_xdp_do_redirect.c33 void *data = (void *)(long)xdp->data; in xdp_redirect() local
35 __u8 *payload = data + HDR_SZ; in xdp_redirect()
44 if (metadata + 1 > data) in xdp_redirect()
67 static bool check_pkt(void *data, void *data_end, const __u32 mark) in check_pkt() argument
69 struct ipv6hdr *iph = data + sizeof(struct ethhdr); in check_pkt()
70 __u8 *payload = data + HDR_SZ; in check_pkt()
88 void *data = (void *)(long)xdp->data; in xdp_count_pkts() local
91 if (check_pkt(data, data_end, MARK_XMIT)) in xdp_count_pkts()
104 void *data = (void *)(long)skb->data; in tc_count_pkts() local
107 if (check_pkt(data, data_end, MARK_SKB)) in tc_count_pkts()
Dverifier_bits_iter.c26 u64 data = 1; in BPF_PROG() local
28 bpf_iter_bits_new(&it, &data, 1); in BPF_PROG()
79 u64 data = 0xf7310UL; /* 4 + 3 + 2 + 1 + 0*/ in bits_copy() local
83 bpf_for_each(bits, bit, &data, 1) in bits_copy()
93 u64 data[2]; in bits_memalloc() local
97 __builtin_memset(&data, 0xf0, sizeof(data)); /* 4 * 16 */ in bits_memalloc()
98 bpf_for_each(bits, bit, &data[0], ARRAY_SIZE(data)) in bits_memalloc()
108 u64 data = 0x100; in bit_index() local
112 bpf_for_each(bits, bit, &data, 1) { in bit_index()
125 u64 data[4]; in bits_too_big() local
[all …]
Dsockmap_parse_prog.c9 void *data = (void *)(long) skb->data; in bpf_prog1() local
10 __u8 *d = data; in bpf_prog1()
13 if (data + 10 > data_end) { in bpf_prog1()
19 data = (void *)(long)skb->data; in bpf_prog1()
20 if (data + 10 > data_end) in bpf_prog1()
28 d = data; in bpf_prog1()
/tools/perf/bench/
Dinject-buildid.c137 static ssize_t synthesize_attr(struct bench_data *data) in synthesize_attr() argument
152 return writen(data->input_pipe[1], &event, event.header.size); in synthesize_attr()
155 static ssize_t synthesize_fork(struct bench_data *data) in synthesize_fork() argument
167 event.fork.pid = data->pid; in synthesize_fork()
168 event.fork.tid = data->pid; in synthesize_fork()
170 return writen(data->input_pipe[1], &event, event.header.size); in synthesize_fork()
173 static ssize_t synthesize_mmap(struct bench_data *data, struct bench_dso *dso, u64 timestamp) in synthesize_mmap() argument
188 event.mmap2.pid = data->pid; in synthesize_mmap()
189 event.mmap2.tid = data->pid; in synthesize_mmap()
201 if (writen(data->input_pipe[1], &event, len - bench_id_hdr_size) < 0) in synthesize_mmap()
[all …]
/tools/perf/util/bpf_skel/
Dsample_filter.bpf.c77 struct perf_sample_data___new *data = (void *)kctx->data; in perf_get_sample() local
79 if (!bpf_core_field_exists(data->sample_flags)) in perf_get_sample()
104 (data->sample_flags & (1 << (entry->term - PBF_TERM_SAMPLE_START))) == 0) in perf_get_sample()
109 return kctx->data->ip; in perf_get_sample()
111 return kctx->data->id; in perf_get_sample()
114 return kctx->data->tid_entry.pid; in perf_get_sample()
116 return kctx->data->tid_entry.tid; in perf_get_sample()
118 return kctx->data->cpu_entry.cpu; in perf_get_sample()
120 return kctx->data->time; in perf_get_sample()
122 return kctx->data->addr; in perf_get_sample()
[all …]
/tools/tracing/rtla/src/
Dosnoise_hist.c71 osnoise_free_histogram(struct osnoise_hist_data *data) in osnoise_free_histogram() argument
76 for (cpu = 0; cpu < data->nr_cpus; cpu++) { in osnoise_free_histogram()
77 if (data->hist[cpu].samples) in osnoise_free_histogram()
78 free(data->hist[cpu].samples); in osnoise_free_histogram()
82 if (data->hist) in osnoise_free_histogram()
83 free(data->hist); in osnoise_free_histogram()
85 free(data); in osnoise_free_histogram()
94 struct osnoise_hist_data *data; in osnoise_alloc_histogram() local
97 data = calloc(1, sizeof(*data)); in osnoise_alloc_histogram()
98 if (!data) in osnoise_alloc_histogram()
[all …]
Dtimerlat_hist.c93 timerlat_free_histogram(struct timerlat_hist_data *data) in timerlat_free_histogram() argument
98 for (cpu = 0; cpu < data->nr_cpus; cpu++) { in timerlat_free_histogram()
99 if (data->hist[cpu].irq) in timerlat_free_histogram()
100 free(data->hist[cpu].irq); in timerlat_free_histogram()
102 if (data->hist[cpu].thread) in timerlat_free_histogram()
103 free(data->hist[cpu].thread); in timerlat_free_histogram()
105 if (data->hist[cpu].user) in timerlat_free_histogram()
106 free(data->hist[cpu].user); in timerlat_free_histogram()
111 if (data->hist) in timerlat_free_histogram()
112 free(data->hist); in timerlat_free_histogram()
[all …]

12345678910>>...35