/tools/lib/api/fd/ |
D | array.c | 15 fda->entries = NULL; in fdarray__init() 27 struct pollfd *entries = realloc(fda->entries, size); in fdarray__grow() local 29 if (entries == NULL) in fdarray__grow() 34 free(entries); in fdarray__grow() 38 memset(&entries[fda->nr_alloc], 0, sizeof(struct pollfd) * nr); in fdarray__grow() 42 fda->entries = entries; in fdarray__grow() 65 free(fda->entries); in fdarray__exit() 84 fda->entries[fda->nr].fd = fd; in fdarray__add() 85 fda->entries[fda->nr].events = revents; in fdarray__add() 101 if (!fda->entries[fd].events) in fdarray__filter() [all …]
|
/tools/perf/util/ |
D | mem2node.c | 50 struct phys_entry *entries, *tmp_entries; in mem2node__init() local 62 entries = zalloc(sizeof(*entries) * max); in mem2node__init() 63 if (!entries) in mem2node__init() 84 struct phys_entry *prev = &entries[j - 1]; in mem2node__init() 93 phys_entry__init(&entries[j++], start, bsize, n->node); in mem2node__init() 98 tmp_entries = realloc(entries, sizeof(*entries) * j); in mem2node__init() 100 entries = tmp_entries; in mem2node__init() 104 entries[i].node, entries[i].start, entries[i].end); in mem2node__init() 106 phys_entry__insert(&entries[i], &map->root); in mem2node__init() 109 map->entries = entries; in mem2node__init() [all …]
|
D | pstack.c | 18 void *entries[]; member 45 if (pstack->entries[i] == key) { in pstack__remove() 47 memmove(pstack->entries + i, in pstack__remove() 48 pstack->entries + i + 1, in pstack__remove() 63 pstack->entries[pstack->top++] = key; in pstack__push() 75 ret = pstack->entries[--pstack->top]; in pstack__pop() 76 pstack->entries[pstack->top] = NULL; in pstack__pop() 84 return pstack->entries[pstack->top - 1]; in pstack__peek()
|
D | rb_resort.h | 72 struct rb_root entries; \ 79 struct rb_node **p = &sorted->entries.rb_node, *parent = NULL; \ 88 rb_insert_color(sorted_nd, &sorted->entries); \ 92 struct rb_root *entries) \ 96 for (nd = rb_first(entries); nd; nd = rb_next(nd)) { \ 103 static struct __name##_sorted *__name##_sorted__new(struct rb_root *entries, \ 109 sorted->entries = RB_ROOT; \ 110 __name##_sorted__sort(sorted, entries); \ 128 for (__nd = rb_first(&__name->entries); \ 143 DECLARE_RESORT_RB(__name)(&__ilist->rblist.entries.rb_root, \ [all …]
|
D | syscalltbl.c | 62 struct syscall *entries; in syscalltbl__init_native() local 68 entries = tbl->syscalls.entries = malloc(sizeof(struct syscall) * nr_entries); in syscalltbl__init_native() 69 if (tbl->syscalls.entries == NULL) in syscalltbl__init_native() 74 entries[j].name = syscalltbl_native[i]; in syscalltbl__init_native() 75 entries[j].id = i; in syscalltbl__init_native() 80 qsort(tbl->syscalls.entries, nr_entries, sizeof(struct syscall), syscallcmp); in syscalltbl__init_native() 100 zfree(&tbl->syscalls.entries); in syscalltbl__delete() 111 struct syscall *sc = bsearch(name, tbl->syscalls.entries, in syscalltbl__id() 121 struct syscall *syscalls = tbl->syscalls.entries; in syscalltbl__strglobmatch_next()
|
D | rblist.c | 15 struct rb_node **p = &rblist->entries.rb_root.rb_node; in rblist__add_node() 40 rb_insert_color_cached(new_node, &rblist->entries, leftmost); in rblist__add_node() 48 rb_erase_cached(rb_node, &rblist->entries); in rblist__remove_node() 57 struct rb_node **p = &rblist->entries.rb_root.rb_node; in __rblist__findnew() 82 &rblist->entries, leftmost); in __rblist__findnew() 103 rblist->entries = RB_ROOT_CACHED; in rblist__init() 112 struct rb_node *pos, *next = rb_first_cached(&rblist->entries); in rblist__exit() 133 for (node = rb_first_cached(&rblist->entries); node; in rblist__entry()
|
D | xyarray.c | 15 xy->entries = xlen * ylen; in xyarray__new() 25 size_t n = xy->entries * xy->entry_size; in xyarray__reset()
|
D | probe-file.h | 21 struct list_head entries; member 36 list_for_each_entry(entry, &pcache->entries, node)
|
D | mem2node.h | 12 struct phys_entry *entries; member
|
/tools/perf/trace/beauty/ |
D | ioctl.c | 41 if (nr < strarray__ioctl_tty_cmd.nr_entries && strarray__ioctl_tty_cmd.entries[nr] != NULL) in ioctl__scnprintf_tty_cmd() 42 return scnprintf(bf, size, "%s", strarray__ioctl_tty_cmd.entries[nr]); in ioctl__scnprintf_tty_cmd() 52 if (nr < strarray__drm_ioctl_cmds.nr_entries && strarray__drm_ioctl_cmds.entries[nr] != NULL) in ioctl__scnprintf_drm_cmd() 53 return scnprintf(bf, size, "DRM_%s", strarray__drm_ioctl_cmds.entries[nr]); in ioctl__scnprintf_drm_cmd() 63 …if (nr < strarray__sndrv_pcm_ioctl_cmds.nr_entries && strarray__sndrv_pcm_ioctl_cmds.entries[nr] !… in ioctl__scnprintf_sndrv_pcm_cmd() 64 return scnprintf(bf, size, "SNDRV_PCM_%s", strarray__sndrv_pcm_ioctl_cmds.entries[nr]); in ioctl__scnprintf_sndrv_pcm_cmd() 74 …if (nr < strarray__sndrv_ctl_ioctl_cmds.nr_entries && strarray__sndrv_ctl_ioctl_cmds.entries[nr] !… in ioctl__scnprintf_sndrv_ctl_cmd() 75 return scnprintf(bf, size, "SNDRV_CTL_%s", strarray__sndrv_ctl_ioctl_cmds.entries[nr]); in ioctl__scnprintf_sndrv_ctl_cmd() 85 if (nr < strarray__kvm_ioctl_cmds.nr_entries && strarray__kvm_ioctl_cmds.entries[nr] != NULL) in ioctl__scnprintf_kvm_cmd() 86 return scnprintf(bf, size, "KVM_%s", strarray__kvm_ioctl_cmds.entries[nr]); in ioctl__scnprintf_kvm_cmd() [all …]
|
D | pkey_alloc.c | 17 const char *s = sa->entries[0]; in strarray__scnprintf_flags() 32 if (sa->entries[i] != NULL) in strarray__scnprintf_flags() 33 … += scnprintf(bf + printed, size - printed, "%s%s", show_prefix ? sa->prefix : "", sa->entries[i]); in strarray__scnprintf_flags()
|
D | mmap.c | 70 …if (behavior < strarray__madvise_advices.nr_entries && strarray__madvise_advices.entries[behavior]… in madvise__scnprintf_behavior() 71 return scnprintf(bf, size, "MADV_%s", strarray__madvise_advices.entries[behavior]); in madvise__scnprintf_behavior()
|
D | beauty.h | 14 const char **entries; member 19 .entries = array, \ 26 .entries = array, \ 49 struct strarray **entries; member 54 .entries = array, \
|
/tools/perf/tests/ |
D | fdarray.c | 14 fda->entries[fd].fd = fda->nr - fd; in fdarray__init_revents() 15 fda->entries[fd].events = revents; in fdarray__init_revents() 16 fda->entries[fd].revents = revents; in fdarray__init_revents() 58 fda->entries[2].revents = POLLIN; in test__fdarray__filter() 70 fda->entries[0].revents = POLLIN; in test__fdarray__filter() 71 fda->entries[3].revents = POLLIN; in test__fdarray__filter() 103 if (fda->entries[_idx].fd != _fd) { \ in test__fdarray__add() 105 __LINE__, _idx, fda->entries[1].fd, _fd); \ in test__fdarray__add() 108 if (fda->entries[_idx].events != (_revents)) { \ in test__fdarray__add() 110 __LINE__, _idx, fda->entries[_idx].fd, _revents); \ in test__fdarray__add() [all …]
|
/tools/build/feature/ |
D | test-backtrace.c | 8 size_t entries; in main() local 10 entries = backtrace(backtrace_fns, 10); in main() 11 backtrace_symbols_fd(backtrace_fns, entries, 1); in main()
|
/tools/lib/perf/include/internal/ |
D | evlist.h | 17 struct list_head entries; member 71 __perf_evlist__for_each_entry(&(evlist)->entries, evsel) 87 __perf_evlist__for_each_entry_reverse(&(evlist)->entries, evsel) 105 __perf_evlist__for_each_entry_safe(&(evlist)->entries, tmp, evsel) 109 return list_entry(evlist->entries.next, struct perf_evsel, node); in perf_evlist__first() 114 return list_entry(evlist->entries.prev, struct perf_evsel, node); in perf_evlist__last()
|
/tools/include/linux/ |
D | stacktrace.h | 9 unsigned long *entries; member 15 backtrace_symbols_fd((void **)trace->entries, trace->nr_entries, 1); in print_stack_trace() 20 backtrace((void **)(trace)->entries, (trace)->max_entries))
|
/tools/testing/selftests/bpf/progs/ |
D | test_perf_branches.c | 24 __u64 entries[4 * 3] = {0}; in perf_branches() local 28 written_stack = bpf_read_branch_records(ctx, entries, sizeof(entries), 0); in perf_branches()
|
D | bpf_iter_task_stack.c | 10 unsigned long entries[MAX_STACK_TRACE_DEPTH] = {}; variable 23 retlen = bpf_get_task_stack(task, entries, in dump_task_stack() 32 BPF_SEQ_PRINTF(seq, "[<0>] %pB\n", (void *)entries[i]); in dump_task_stack()
|
/tools/lib/bpf/ |
D | xsk.h | 106 __u32 entries = r->cached_prod - r->cached_cons; in xsk_cons_nb_avail() local 108 if (entries == 0) { in xsk_cons_nb_avail() 110 entries = r->cached_prod - r->cached_cons; in xsk_cons_nb_avail() 113 return (entries > nb) ? nb : entries; in xsk_cons_nb_avail() 141 size_t entries = xsk_cons_nb_avail(cons, nb); in xsk_ring_cons__peek() local 143 if (entries > 0) { in xsk_ring_cons__peek() 150 cons->cached_cons += entries; in xsk_ring_cons__peek() 153 return entries; in xsk_ring_cons__peek()
|
/tools/testing/kunit/ |
D | kunit_config.py | 36 def entries(self): member in Kconfig 43 for a in self.entries(): 45 for b in other.entries(): 57 for entry in self.entries():
|
/tools/io_uring/ |
D | syscall.c | 42 int io_uring_setup(unsigned int entries, struct io_uring_params *p) in io_uring_setup() argument 44 return syscall(__NR_io_uring_setup, entries, p); in io_uring_setup()
|
/tools/lib/perf/ |
D | xyarray.c | 15 xy->entries = xlen * ylen; in xyarray__new() 25 size_t n = xy->entries * xy->entry_size; in xyarray__reset()
|
/tools/perf/ui/ |
D | browser.c | 85 } while (pos != browser->entries); in ui_browser__list_head_filter_entries() 98 } while (pos != browser->entries); in ui_browser__list_head_filter_prev_entries() 105 struct list_head *head = browser->entries; in ui_browser__list_head_seek() 140 struct rb_root *root = browser->entries; in ui_browser__rb_tree_seek() 174 browser->top = rb_first(browser->entries); in ui_browser__rb_tree_refresh() 501 struct list_head *head = browser->entries; in ui_browser__list_head_refresh() 504 if (browser->top == NULL || browser->top == browser->entries) in ui_browser__list_head_refresh() 612 browser->top = browser->entries; in ui_browser__argv_seek() 618 browser->top = (char **)browser->entries + browser->nr_entries - 1 + offset; in ui_browser__argv_seek() 623 assert((char **)browser->top < (char **)browser->entries + browser->nr_entries); in ui_browser__argv_seek() [all …]
|
/tools/testing/kunit/test_data/ |
D | test_is_test_passed-no_tests_run.log | 11 PID hash table entries: 256 (order: -1, 2048 bytes) 12 Dentry cache hash table entries: 8192 (order: 4, 65536 bytes) 13 Inode-cache hash table entries: 4096 (order: 3, 32768 bytes) 20 Mount-cache hash table entries: 512 (order: 0, 4096 bytes) 21 Mountpoint-cache hash table entries: 512 (order: 0, 4096 bytes) 26 futex hash table entries: 256 (order: 0, 6144 bytes)
|