Home
last modified time | relevance | path

Searched refs:entries (Results 1 – 7 of 7) sorted by relevance

/lib/
Dlist-test.c366 struct list_head entries[3], *cur; in list_test_list_cut_position() local
371 list_add_tail(&entries[0], &list1); in list_test_list_cut_position()
372 list_add_tail(&entries[1], &list1); in list_test_list_cut_position()
373 list_add_tail(&entries[2], &list1); in list_test_list_cut_position()
376 list_cut_position(&list2, &list1, &entries[1]); in list_test_list_cut_position()
380 KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]); in list_test_list_cut_position()
387 KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]); in list_test_list_cut_position()
394 struct list_head entries[3], *cur; in list_test_list_cut_before() local
399 list_add_tail(&entries[0], &list1); in list_test_list_cut_before()
400 list_add_tail(&entries[1], &list1); in list_test_list_cut_before()
[all …]
Dstackdepot.c65 unsigned long entries[]; /* Variable-sized array of entries. */ member
105 depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc) in depot_alloc_stack() argument
108 size_t required_size = struct_size(stack, entries, size); in depot_alloc_stack()
138 memcpy(stack->entries, entries, flex_array_size(stack, entries, size)); in depot_alloc_stack()
178 static inline u32 hash_stack(unsigned long *entries, unsigned int size) in hash_stack() argument
180 return jhash2((u32 *)entries, in hash_stack()
181 array_size(size, sizeof(*entries)) / sizeof(u32), in hash_stack()
202 unsigned long *entries, int size, in find_stack() argument
210 !stackdepot_memcmp(entries, found->entries, size)) in find_stack()
225 unsigned long *entries; in stack_depot_print() local
[all …]
Dtest_rhashtable.c73 unsigned int entries; member
138 unsigned int entries) in test_rht_lookup() argument
142 for (i = 0; i < entries; i++) { in test_rht_lookup()
175 static void test_bucket_stats(struct rhashtable *ht, unsigned int entries) in test_bucket_stats() argument
202 total, atomic_read(&ht->nelems), entries, chain_len); in test_bucket_stats()
204 if (total != atomic_read(&ht->nelems) || total != entries) in test_bucket_stats()
209 unsigned int entries) in test_rhashtable() argument
220 pr_info(" Adding %d keys\n", entries); in test_rhashtable()
222 for (i = 0; i < entries; i++) { in test_rhashtable()
237 test_bucket_stats(ht, entries); in test_rhashtable()
[all …]
Dfault-inject.c70 unsigned long entries[MAX_STACK_TRACE_DEPTH]; in fail_stacktrace() local
77 nr_entries = stack_trace_save(entries, depth, 1); in fail_stacktrace()
79 if (attr->reject_start <= entries[n] && in fail_stacktrace()
80 entries[n] < attr->reject_end) in fail_stacktrace()
82 if (attr->require_start <= entries[n] && in fail_stacktrace()
83 entries[n] < attr->require_end) in fail_stacktrace()
Ddynamic_debug.c1075 int n = 0, entries = 0, modct = 0; in dynamic_debug_init() local
1090 entries++; in dynamic_debug_init()
1108 entries, modct, (int)((modct * sizeof(struct ddebug_table)) >> 10), in dynamic_debug_init()
1109 (int)((entries * sizeof(struct _ddebug)) >> 10)); in dynamic_debug_init()
DKconfig471 Support entries which occupy multiple consecutive indices in the
DKconfig.debug1951 bool "Debugfs entries for fault-injection capabilities"