| /tools/lib/bpf/ |
| D | btf_iter.c | 16 int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t, in btf_field_iter_init() argument 19 it->p = NULL; in btf_field_iter_init() 20 it->m_idx = -1; in btf_field_iter_init() 21 it->off_idx = 0; in btf_field_iter_init() 22 it->vlen = 0; in btf_field_iter_init() 32 it->desc = (struct btf_field_desc) {}; in btf_field_iter_init() 44 it->desc = (struct btf_field_desc) { 1, {offsetof(struct btf_type, type)} }; in btf_field_iter_init() 47 it->desc = (struct btf_field_desc) { in btf_field_iter_init() 54 it->desc = (struct btf_field_desc) { in btf_field_iter_init() 61 it->desc = (struct btf_field_desc) { in btf_field_iter_init() [all …]
|
| D | btf_relocate.c | 69 struct btf_field_iter it; in btf_relocate_rewrite_type_id() local 73 err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS); in btf_relocate_rewrite_type_id() 77 while ((id = btf_field_iter_next(&it))) in btf_relocate_rewrite_type_id() 135 struct btf_field_iter it; in btf_mark_embedded_composite_type_ids() local 142 err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS); in btf_mark_embedded_composite_type_ids() 146 while ((id = btf_field_iter_next(&it))) { in btf_mark_embedded_composite_type_ids() 415 struct btf_field_iter it; in btf_relocate_rewrite_strs() local 419 err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS); in btf_relocate_rewrite_strs() 423 while ((str_off = btf_field_iter_next(&it))) { in btf_relocate_rewrite_strs()
|
| /tools/testing/selftests/bpf/progs/ |
| D | iters.c | 33 struct bpf_iter_num it; in iter_err_unsafe_c_loop() local 38 bpf_iter_num_new(&it, 0, 1000); in iter_err_unsafe_c_loop() 39 while ((v = bpf_iter_num_next(&it))) { in iter_err_unsafe_c_loop() 42 bpf_iter_num_destroy(&it); in iter_err_unsafe_c_loop() 53 struct bpf_iter_num it; in iter_err_unsafe_asm_loop() local 79 : [it]"r"(&it), in iter_err_unsafe_asm_loop() 95 struct bpf_iter_num it; in iter_while_loop() local 100 bpf_iter_num_new(&it, 0, 3); in iter_while_loop() 101 while ((v = bpf_iter_num_next(&it))) { in iter_while_loop() 104 bpf_iter_num_destroy(&it); in iter_while_loop() [all …]
|
| D | iters_testmod_seq.c | 13 extern int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt) __ksym; 14 extern s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq *it) __ksym; 15 extern s64 bpf_iter_testmod_seq_value(int blah, struct bpf_iter_testmod_seq *it) __ksym; 16 extern void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it) __ksym; 85 struct bpf_iter_testmod_seq it; in testmod_seq_getter_before_bad() local 87 return bpf_iter_testmod_seq_value(0, &it); in testmod_seq_getter_before_bad() 95 struct bpf_iter_testmod_seq it; in testmod_seq_getter_after_bad() local 98 bpf_iter_testmod_seq_new(&it, 100, 100); in testmod_seq_getter_after_bad() 100 while ((v = bpf_iter_testmod_seq_next(&it))) { in testmod_seq_getter_after_bad() 104 bpf_iter_testmod_seq_destroy(&it); in testmod_seq_getter_after_bad() [all …]
|
| D | iters_num.c | 142 struct bpf_iter_num it; in num_invalid_range() local 144 res_invalid_range = bpf_iter_num_new(&it, 1, 0); in num_invalid_range() 145 bpf_iter_num_destroy(&it); in num_invalid_range() 156 struct bpf_iter_num it; in num_max_range() local 158 res_max_range = 10 + bpf_iter_num_new(&it, 0, BPF_MAX_LOOPS); in num_max_range() 159 bpf_iter_num_destroy(&it); in num_max_range() 170 struct bpf_iter_num it; in num_e2big_range() local 172 res_e2big_range = bpf_iter_num_new(&it, -1, BPF_MAX_LOOPS); in num_e2big_range() 173 bpf_iter_num_destroy(&it); in num_e2big_range() 184 struct bpf_iter_num it; in num_succ_elem_cnt() local [all …]
|
| D | verifier_bits_iter.c | 13 int bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign, 15 int *bpf_iter_bits_next(struct bpf_iter_bits *it) __ksym __weak; 16 void bpf_iter_bits_destroy(struct bpf_iter_bits *it) __ksym __weak; 25 struct bpf_iter_bits it; in BPF_PROG() local 28 bpf_iter_bits_new(&it, &data, 1); in BPF_PROG() 29 bpf_iter_bits_next(&it); in BPF_PROG() 38 struct bpf_iter_bits it = {}; in BPF_PROG() local 40 bpf_iter_bits_next(&it); in BPF_PROG() 49 struct bpf_iter_bits it = {}; in BPF_PROG() local 51 bpf_iter_bits_destroy(&it); in BPF_PROG()
|
| D | verifier_iterating_callbacks.c | 562 struct bpf_iter_num it; in __flag() local 566 bpf_iter_num_new(&it, 0, ARR2_SZ); in __flag() 567 while ((v = bpf_iter_num_next(&it))) { in __flag() 571 bpf_iter_num_destroy(&it); in __flag() 579 struct bpf_iter_num it; in __flag() local 583 bpf_iter_num_new(&it, 0, ARR2_SZ); in __flag() 584 while ((v = bpf_iter_num_next(&it))) { in __flag() 588 bpf_iter_num_destroy(&it); in __flag() 598 struct bpf_iter_num it; in __flag() local 602 bpf_iter_num_new(&it, 0, ARR2_SZ); in __flag() [all …]
|
| /tools/sched_ext/include/scx/ |
| D | compat.bpf.h | 24 #define __COMPAT_scx_bpf_dispatch_from_dsq_set_slice(it, slice) \ argument 26 scx_bpf_dispatch_from_dsq_set_slice((it), (slice)) : (void)0) 27 #define __COMPAT_scx_bpf_dispatch_from_dsq_set_vtime(it, vtime) \ argument 29 scx_bpf_dispatch_from_dsq_set_vtime((it), (vtime)) : (void)0) 30 #define __COMPAT_scx_bpf_dispatch_from_dsq(it, p, dsq_id, enq_flags) \ argument 32 scx_bpf_dispatch_from_dsq((it), (p), (dsq_id), (enq_flags)) : false) 33 #define __COMPAT_scx_bpf_dispatch_vtime_from_dsq(it, p, dsq_id, enq_flags) \ argument 35 scx_bpf_dispatch_vtime_from_dsq((it), (p), (dsq_id), (enq_flags)) : false)
|
| /tools/testing/selftests/sync/ |
| D | sync_stress_consumer.c | 108 int fence, merged, tmp, valid, it, i; in mpcs_consumer_thread() local 114 for (it = 1; it <= iterations; it++) { in mpcs_consumer_thread() 115 fence = sw_sync_fence_create(producer_timelines[0], "name", it); in mpcs_consumer_thread() 118 "name", it); in mpcs_consumer_thread() 140 ASSERT(test_data_mpsc.counter == n * it, in mpcs_consumer_thread()
|
| /tools/perf/Documentation/ |
| D | guest-files.txt | 4 Guest OS /proc/kallsyms file copy. perf reads it to get guest 5 kernel symbols. Users copy it out from guest OS. 8 Guest OS /proc/modules file copy. perf reads it to get guest 9 kernel module information. Users copy it out from guest OS.
|
| D | perf-arm-spe.txt | 17 events down to individual instructions. Rather than being interrupt-driven, it picks an 18 instruction to sample and then captures data for it during execution. Data includes execution time 19 in cycles. For loads and stores it also includes data address, cache miss events, and data origin. 34 architecture provides a mechanism for the SPE driver to infer the minimum interval at which it shou… 47 Based on programmable criteria, choose whether to keep the record or discard it. If the record is 95 number for samples dropped that would have made it through the filter, but can be a rough 104 For example, if a given instruction A is always converted into two micro-operations, A0 and A1, it 120 unmap_kernel_at_el0() in the kernel sources. Common cases where it's not required 125 /sys/bus/event_source/devices/, then it's possible that the SPE interrupt isn't described by 136 it's recommended to set this to a higher value. The value is written to PMSIRR.INTERVAL. [all …]
|
| /tools/testing/selftests/bpf/ |
| D | bpf_experimental.h | 164 extern int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it, 167 extern struct vm_area_struct *bpf_iter_task_vma_next(struct bpf_iter_task_vma *it) __ksym; 168 extern void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it) __ksym; 561 extern int bpf_iter_css_task_new(struct bpf_iter_css_task *it, 563 extern struct task_struct *bpf_iter_css_task_next(struct bpf_iter_css_task *it) __weak __ksym; 564 extern void bpf_iter_css_task_destroy(struct bpf_iter_css_task *it) __weak __ksym; 567 extern int bpf_iter_task_new(struct bpf_iter_task *it, 569 extern struct task_struct *bpf_iter_task_next(struct bpf_iter_task *it) __weak __ksym; 570 extern void bpf_iter_task_destroy(struct bpf_iter_task *it) __weak __ksym; 573 extern int bpf_iter_css_new(struct bpf_iter_css *it, [all …]
|
| D | bpf_arena_list.h | 28 static inline void *bpf_iter_num_new(struct bpf_iter_num *it, int i, int j) { return NULL; } in bpf_iter_num_new() argument 29 static inline void bpf_iter_num_destroy(struct bpf_iter_num *it) {} in bpf_iter_num_destroy() argument 30 static inline bool bpf_iter_num_next(struct bpf_iter_num *it) { return true; } in bpf_iter_num_next() argument
|
| /tools/usb/usbip/ |
| D | COPYING | 7 of this license document, but changing it is not allowed. 12 freedom to share and change it. By contrast, the GNU General Public 17 using it. (Some other Free Software Foundation software is covered by 18 the GNU Library General Public License instead.) You can apply it to 24 this service if you wish), that you receive source code or can get it 25 if you want it, that you can change the software or use pieces of it 31 distribute copies of the software, or if you modify it. 53 program proprietary. To prevent this, we have made it clear that any 63 a notice placed by the copyright holder saying it may be distributed 67 that is to say, a work containing the Program or a portion of it, [all …]
|
| D | INSTALL | 8 unlimited permission to copy, distribute and modify it. 22 definitions. Finally, it creates a shell script `config.status' that 38 may remove or edit it. 42 you want to change it or regenerate `configure' using a newer version 50 Running `configure' might take a while. While running, it prints 51 some messages telling which features it is checking for. 66 for the package's developers. If you use it, you may have to get 98 With a non-GNU `make', it is safer to compile the package for one 122 If the package supports it, you can cause programs to be installed 137 find the X include and library files automatically, but if it doesn't, [all …]
|
| /tools/tracing/rtla/ |
| D | Makefile.config | 12 …ror: $(PKG_CONFIG) needed by libtraceevent/libtracefs is missing on this system, please install it) 28 $(info libtraceevent version is too low, it must be at least $(LIBTRACEEVENT_MIN_VERSION)) 44 $(info libtracefs version is too low, it must be at least $(LIBTRACEFS_MIN_VERSION))
|
| /tools/bpf/bpftool/Documentation/ |
| D | bpftool-struct_ops.rst | 41 *STRUCT_OPS_MAP* is specified, it shows information only for the given 42 struct_ops. Otherwise, it lists all struct_ops currently existing in the 50 *STRUCT_OPS_MAP* is specified, it dumps information only for the given 51 struct_ops. Otherwise, it dumps all struct_ops currently existing in the
|
| /tools/testing/selftests/net/forwarding/ |
| D | README | 64 influence its behavior and tools it invokes, and how it invokes them, in 79 The variable NETIFS is special. Since it is an array variable, there is no 80 way to pass it through the environment. Its value can instead be given as 87 present, so it can contain any shell code. Typically it will contain
|
| /tools/memory-model/litmus-tests/ |
| D | WRC+pooncerelease+fencermbonceonce+Once.litmus | 7 * the first write is moved to a separate process. Because it features 8 * a release and a read memory barrier, it should be forbidden. More
|
| /tools/testing/selftests/arm64/signal/ |
| D | README | 16 by receiving some kind of fatal signal from the Kernel, so it's safer 38 kind of tests it is extremely easy in fact to end-up injecting other 39 unrelated SEGV bugs in the testcases, it becomes extremely tricky to 47 and verify if it is indeed GOOD or BAD (depending on what we were 52 default it takes care to verify that the test-execution had at least
|
| /tools/objtool/Documentation/ |
| D | objtool.txt | 9 kernel depends on it for a variety of security and performance features 71 - If noinstr validation is enabled, it also runs on vmlinux.o, with all 74 - If IBT or LTO is enabled, it doesn't run on TUs at all. Instead it 102 For each function, it recursively follows all possible code paths and 108 instructions). Similarly, it knows how to follow switch statements, for 154 what it looks like instead: 168 The benefit of objtool here is that because it ensures that *all* 181 band. So it doesn't affect runtime performance and it can be 201 outside of a function, it flags an error since that usually indicates 229 the same value it had on function entry. [all …]
|
| /tools/testing/ktest/examples/ |
| D | README | 2 The configs still need to be customized for your environment, but it 3 is broken up by task which makes it easier to understand how to set up 12 it easy to compile test different archs. You can download the arch
|
| /tools/verification/rv/ |
| D | Makefile.config | 20 $(info libtraceevent version is too low, it must be at least $(LIBTRACEEVENT_MIN_VERSION)) 36 $(info libtracefs version is too low, it must be at least $(LIBTRACEFS_MIN_VERSION))
|
| /tools/memory-model/Documentation/ |
| D | explanation.txt | 50 rather, it explains in English what the code expresses symbolically. 79 for the loads, the model will predict whether it is possible for the 103 device, stores it in a buffer, and sets a flag to indicate the buffer 107 ready, and if it is, copies the data back to userspace. The buffer 136 reads flag into the private variable r1, and if it is set, reads the 174 If this were to occur it would mean the driver contains a bug, because 175 incorrect data would get sent to the user: 0 instead of 1. As it 198 it, as loads can obtain values only from earlier stores. 203 P1 must load 0 from buf before P0 stores 1 to it; otherwise r2 207 P0 stores 1 to buf before storing 1 to flag, since it executes [all …]
|
| /tools/testing/memblock/ |
| D | README | 13 Because it is used so early in the booting process, testing and debugging it is 26 before it. Most of them don't match the kernel implementation, so one should 32 To run the tests, build the main target and run it: 58 allocation functions. Tests for each group are defined in dedicated files, as it 82 Some allocation functions clear the memory in the process, so it is required for
|