/tools/testing/selftests/bpf/progs/ |
D | test_tunnel_kern.c | 132 struct erspan_metadata md; in _erspan_set_tunnel() local 148 __builtin_memset(&md, 0, sizeof(md)); in _erspan_set_tunnel() 150 md.version = 1; in _erspan_set_tunnel() 151 md.u.index = bpf_htonl(123); in _erspan_set_tunnel() 156 md.version = 2; in _erspan_set_tunnel() 157 md.u.md2.dir = direction; in _erspan_set_tunnel() 158 md.u.md2.hwid = hwid & 0xf; in _erspan_set_tunnel() 159 md.u.md2.hwid_upper = (hwid >> 4) & 0x3; in _erspan_set_tunnel() 162 ret = bpf_skb_set_tunnel_opt(skb, &md, sizeof(md)); in _erspan_set_tunnel() 176 struct erspan_metadata md; in _erspan_get_tunnel() local [all …]
|
/tools/lib/perf/ |
D | mmap.c | 74 static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail) in perf_mmap__write_tail() argument 76 ring_buffer_write_tail(md->base, tail); in perf_mmap__write_tail() 138 static int __perf_mmap__read_init(struct perf_mmap *md) in __perf_mmap__read_init() argument 140 u64 head = perf_mmap__read_head(md); in __perf_mmap__read_init() 141 u64 old = md->prev; in __perf_mmap__read_init() 142 unsigned char *data = md->base + page_size; in __perf_mmap__read_init() 145 md->start = md->overwrite ? head : old; in __perf_mmap__read_init() 146 md->end = md->overwrite ? old : head; in __perf_mmap__read_init() 148 if ((md->end - md->start) < md->flush) in __perf_mmap__read_init() 151 size = md->end - md->start; in __perf_mmap__read_init() [all …]
|
/tools/perf/util/ |
D | mmap.c | 313 int perf_mmap__push(struct mmap *md, void *to, in perf_mmap__push() argument 316 u64 head = perf_mmap__read_head(&md->core); in perf_mmap__push() 317 unsigned char *data = md->core.base + page_size; in perf_mmap__push() 322 rc = perf_mmap__read_init(&md->core); in perf_mmap__push() 326 size = md->core.end - md->core.start; in perf_mmap__push() 328 if ((md->core.start & md->core.mask) + size != (md->core.end & md->core.mask)) { in perf_mmap__push() 329 buf = &data[md->core.start & md->core.mask]; in perf_mmap__push() 330 size = md->core.mask + 1 - (md->core.start & md->core.mask); in perf_mmap__push() 331 md->core.start += size; in perf_mmap__push() 333 if (push(md, to, buf, size) < 0) { in perf_mmap__push() [all …]
|
D | mmap.h | 59 int perf_mmap__push(struct mmap *md, void *to,
|
D | python.c | 1019 struct mmap *md = &evlist->mmap[i]; in get_md() local 1021 if (md->core.cpu == cpu) in get_md() 1022 return md; in get_md() 1035 struct mmap *md; in pyrf_evlist__read_on_cpu() local 1042 md = get_md(evlist, cpu); in pyrf_evlist__read_on_cpu() 1043 if (!md) in pyrf_evlist__read_on_cpu() 1046 if (perf_mmap__read_init(&md->core) < 0) in pyrf_evlist__read_on_cpu() 1049 event = perf_mmap__read_event(&md->core); in pyrf_evlist__read_on_cpu() 1069 perf_mmap__consume(&md->core); in pyrf_evlist__read_on_cpu()
|
D | symbol.c | 1201 struct kcore_mapfn_data *md = data; in kcore_mapfn() local 1204 map = map__new2(start, md->dso); in kcore_mapfn() 1211 list_add(&map->node, &md->maps); in kcore_mapfn() 1299 struct kcore_mapfn_data md; in dso__load_kcore() local 1324 md.dso = dso; in dso__load_kcore() 1325 INIT_LIST_HEAD(&md.maps); in dso__load_kcore() 1335 err = file__read_maps(fd, map->prot & PROT_EXEC, kcore_mapfn, &md, in dso__load_kcore() 1341 if (list_empty(&md.maps)) { in dso__load_kcore() 1360 list_for_each_entry(new_map, &md.maps, node) { in dso__load_kcore() 1369 replacement_map = list_entry(md.maps.next, struct map, node); in dso__load_kcore() [all …]
|
/tools/build/feature/ |
D | test-libcrypto.c | 9 unsigned char md[MD5_DIGEST_LENGTH + SHA_DIGEST_LENGTH]; in main() local 19 EVP_DigestFinal_ex(mdctx, &md[0], &digest_len); in main() 22 SHA1(&dat[0], sizeof(dat), &md[0]); in main()
|
/tools/perf/tests/ |
D | task-exit.c | 56 struct mmap *md; in test__task_exit() local 122 md = &evlist->mmap[0]; in test__task_exit() 123 if (perf_mmap__read_init(&md->core) < 0) in test__task_exit() 126 while ((event = perf_mmap__read_event(&md->core)) != NULL) { in test__task_exit() 130 perf_mmap__consume(&md->core); in test__task_exit() 132 perf_mmap__read_done(&md->core); in test__task_exit()
|
D | openat-syscall-tp-fields.c | 93 struct mmap *md; in test__syscall_openat_tp_fields() local 95 md = &evlist->mmap[i]; in test__syscall_openat_tp_fields() 96 if (perf_mmap__read_init(&md->core) < 0) in test__syscall_openat_tp_fields() 99 while ((event = perf_mmap__read_event(&md->core)) != NULL) { in test__syscall_openat_tp_fields() 107 perf_mmap__consume(&md->core); in test__syscall_openat_tp_fields() 127 perf_mmap__read_done(&md->core); in test__syscall_openat_tp_fields()
|
D | sw-clock.c | 47 struct mmap *md; in __test__sw_clock_freq() local 102 md = &evlist->mmap[0]; in __test__sw_clock_freq() 103 if (perf_mmap__read_init(&md->core) < 0) in __test__sw_clock_freq() 106 while ((event = perf_mmap__read_event(&md->core)) != NULL) { in __test__sw_clock_freq() 121 perf_mmap__consume(&md->core); in __test__sw_clock_freq() 123 perf_mmap__read_done(&md->core); in __test__sw_clock_freq()
|
D | keep-tracking.c | 36 struct mmap *md; in find_comm() local 41 md = &evlist->mmap[i]; in find_comm() 42 if (perf_mmap__read_init(&md->core) < 0) in find_comm() 44 while ((event = perf_mmap__read_event(&md->core)) != NULL) { in find_comm() 50 perf_mmap__consume(&md->core); in find_comm() 52 perf_mmap__read_done(&md->core); in find_comm()
|
D | mmap-basic.c | 47 struct mmap *md; in test__basic_mmap() local 116 md = &evlist->mmap[0]; in test__basic_mmap() 117 if (perf_mmap__read_init(&md->core) < 0) in test__basic_mmap() 120 while ((event = perf_mmap__read_event(&md->core)) != NULL) { in test__basic_mmap() 143 perf_mmap__consume(&md->core); in test__basic_mmap() 145 perf_mmap__read_done(&md->core); in test__basic_mmap()
|
D | perf-record.c | 171 struct mmap *md; in test__PERF_RECORD() local 173 md = &evlist->mmap[i]; in test__PERF_RECORD() 174 if (perf_mmap__read_init(&md->core) < 0) in test__PERF_RECORD() 177 while ((event = perf_mmap__read_event(&md->core)) != NULL) { in test__PERF_RECORD() 280 perf_mmap__consume(&md->core); in test__PERF_RECORD() 282 perf_mmap__read_done(&md->core); in test__PERF_RECORD()
|
D | bpf.c | 186 struct mmap *md; in do_test() local 188 md = &evlist->mmap[i]; in do_test() 189 if (perf_mmap__read_init(&md->core) < 0) in do_test() 192 while ((event = perf_mmap__read_event(&md->core)) != NULL) { in do_test() 198 perf_mmap__read_done(&md->core); in do_test()
|
D | switch-tracking.c | 268 struct mmap *md; in process_events() local 272 md = &evlist->mmap[i]; in process_events() 273 if (perf_mmap__read_init(&md->core) < 0) in process_events() 276 while ((event = perf_mmap__read_event(&md->core)) != NULL) { in process_events() 279 perf_mmap__consume(&md->core); in process_events() 283 perf_mmap__read_done(&md->core); in process_events()
|
D | code-reading.c | 424 struct mmap *md; in process_events() local 428 md = &evlist->mmap[i]; in process_events() 429 if (perf_mmap__read_init(&md->core) < 0) in process_events() 432 while ((event = perf_mmap__read_event(&md->core)) != NULL) { in process_events() 434 perf_mmap__consume(&md->core); in process_events() 438 perf_mmap__read_done(&md->core); in process_events()
|
/tools/perf/arch/x86/tests/ |
D | perf-time-to-tsc.c | 70 struct mmap *md; in test__perf_time_to_tsc() local 120 md = &evlist->mmap[i]; in test__perf_time_to_tsc() 121 if (perf_mmap__read_init(&md->core) < 0) in test__perf_time_to_tsc() 124 while ((event = perf_mmap__read_event(&md->core)) != NULL) { in test__perf_time_to_tsc() 141 perf_mmap__consume(&md->core); in test__perf_time_to_tsc() 143 perf_mmap__read_done(&md->core); in test__perf_time_to_tsc()
|
/tools/perf/ |
D | builtin-kvm.c | 755 struct mmap *md; in perf_kvm__mmap_read_idx() local 761 md = &evlist->mmap[idx]; in perf_kvm__mmap_read_idx() 762 err = perf_mmap__read_init(&md->core); in perf_kvm__mmap_read_idx() 766 while ((event = perf_mmap__read_event(&md->core)) != NULL) { in perf_kvm__mmap_read_idx() 769 perf_mmap__consume(&md->core); in perf_kvm__mmap_read_idx() 779 perf_mmap__consume(&md->core); in perf_kvm__mmap_read_idx() 796 perf_mmap__read_done(&md->core); in perf_kvm__mmap_read_idx()
|
D | builtin-top.c | 883 struct mmap *md; in perf_top__mmap_read_idx() local 886 md = opts->overwrite ? &evlist->overwrite_mmap[idx] : &evlist->mmap[idx]; in perf_top__mmap_read_idx() 887 if (perf_mmap__read_init(&md->core) < 0) in perf_top__mmap_read_idx() 890 while ((event = perf_mmap__read_event(&md->core)) != NULL) { in perf_top__mmap_read_idx() 901 perf_mmap__consume(&md->core); in perf_top__mmap_read_idx() 911 perf_mmap__read_done(&md->core); in perf_top__mmap_read_idx()
|
D | builtin-record.c | 204 static int record__aio_complete(struct mmap *md, struct aiocb *cblock) in record__aio_complete() argument 232 perf_mmap__put(&md->core); in record__aio_complete() 250 static int record__aio_sync(struct mmap *md, bool sync_all) in record__aio_sync() argument 252 struct aiocb **aiocb = md->aio.aiocb; in record__aio_sync() 253 struct aiocb *cblocks = md->aio.cblocks; in record__aio_sync() 259 for (i = 0; i < md->aio.nr_cblocks; ++i) { in record__aio_sync() 260 if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) { in record__aio_sync() 278 while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) { in record__aio_sync()
|
D | builtin-trace.c | 4079 struct mmap *md; local 4081 md = &evlist->mmap[i]; 4082 if (perf_mmap__read_init(&md->core) < 0) 4085 while ((event = perf_mmap__read_event(&md->core)) != NULL) { 4092 perf_mmap__consume(&md->core); 4102 perf_mmap__read_done(&md->core);
|
/tools/testing/selftests/seccomp/ |
D | seccomp_bpf.c | 3205 struct seccomp_metadata md; in TEST() local 3248 md.filter_off = 0; in TEST() 3250 ret = ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md); in TEST() 3251 EXPECT_EQ(sizeof(md), ret) { in TEST() 3256 EXPECT_EQ(md.flags, SECCOMP_FILTER_FLAG_LOG); in TEST() 3257 EXPECT_EQ(md.filter_off, 0); in TEST() 3259 md.filter_off = 1; in TEST() 3260 ret = ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md); in TEST() 3261 EXPECT_EQ(sizeof(md), ret); in TEST() 3262 EXPECT_EQ(md.flags, 0); in TEST() [all …]
|
/tools/memory-model/ |
D | README | 28 See "herdtools7/INSTALL.md" for installation instructions.
|
/tools/power/pm-graph/ |
D | sleepgraph.py | 2336 md = self.sv.max_graph_depth 2339 if (md and self.depth >= md - 1) or (line.name in self.sv.cgblacklist): 2347 (md and last and last.depth >= md) or \
|
/tools/testing/selftests/filesystems/incfs/ |
D | incfs_test.c | 3800 const EVP_MD *md = EVP_sha256(); in sign() local 3810 TESTNE(PKCS7_sign_add_signer(p7, cert, key, md, pkcs7_flags), 0); in sign()
|