Searched refs:entries (Results 1 – 8 of 8) sorted by relevance
/lib/ |
D | stackdepot.c | 63 unsigned long entries[]; /* Variable-sized array of frames */ member 126 unsigned long entries = 0; in stack_depot_early_init() local 149 entries = 1UL << stack_bucket_number_order; in stack_depot_early_init() 153 entries, in stack_depot_early_init() 173 unsigned long entries; in stack_depot_init() local 186 entries = 1UL << stack_bucket_number_order; in stack_depot_init() 190 entries = nr_free_buffer_pages(); in stack_depot_init() 191 entries = roundup_pow_of_two(entries); in stack_depot_init() 194 entries >>= (scale - PAGE_SHIFT); in stack_depot_init() 196 entries <<= (PAGE_SHIFT - scale); in stack_depot_init() [all …]
|
D | hashtable_test.c | 125 struct hashtable_test_entry entries[3]; in hashtable_test_hash_for_each() local 132 entries[i].key = i; in hashtable_test_hash_for_each() 133 entries[i].data = i + 10; in hashtable_test_hash_for_each() 134 entries[i].visited = 0; in hashtable_test_hash_for_each() 135 hash_add(hash, &entries[i].node, entries[i].key); in hashtable_test_hash_for_each() 149 KUNIT_EXPECT_EQ(test, entries[j].visited, 1); in hashtable_test_hash_for_each() 154 struct hashtable_test_entry entries[3]; in hashtable_test_hash_for_each_safe() local 162 entries[i].key = i; in hashtable_test_hash_for_each_safe() 163 entries[i].data = i + 10; in hashtable_test_hash_for_each_safe() 164 entries[i].visited = 0; in hashtable_test_hash_for_each_safe() [all …]
|
D | list-test.c | 387 struct list_head entries[3], *cur; in list_test_list_cut_position() local 392 list_add_tail(&entries[0], &list1); in list_test_list_cut_position() 393 list_add_tail(&entries[1], &list1); in list_test_list_cut_position() 394 list_add_tail(&entries[2], &list1); in list_test_list_cut_position() 397 list_cut_position(&list2, &list1, &entries[1]); in list_test_list_cut_position() 401 KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]); in list_test_list_cut_position() 408 KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]); in list_test_list_cut_position() 415 struct list_head entries[3], *cur; in list_test_list_cut_before() local 420 list_add_tail(&entries[0], &list1); in list_test_list_cut_before() 421 list_add_tail(&entries[1], &list1); in list_test_list_cut_before() [all …]
|
D | test_rhashtable.c | 73 unsigned int entries; member 138 unsigned int entries) in test_rht_lookup() argument 142 for (i = 0; i < entries; i++) { in test_rht_lookup() 175 static void test_bucket_stats(struct rhashtable *ht, unsigned int entries) in test_bucket_stats() argument 202 total, atomic_read(&ht->nelems), entries, chain_len); in test_bucket_stats() 204 if (total != atomic_read(&ht->nelems) || total != entries) in test_bucket_stats() 209 unsigned int entries) in test_rhashtable() argument 220 pr_info(" Adding %d keys\n", entries); in test_rhashtable() 222 for (i = 0; i < entries; i++) { in test_rhashtable() 237 test_bucket_stats(ht, entries); in test_rhashtable() [all …]
|
D | ref_tracker.c | 189 unsigned long entries[REF_TRACKER_STACK_ENTRIES]; in ref_tracker_alloc() local 209 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1); in ref_tracker_alloc() 210 tracker->alloc_stack_handle = stack_depot_save(entries, nr_entries, gfp); in ref_tracker_alloc() 222 unsigned long entries[REF_TRACKER_STACK_ENTRIES]; in ref_tracker_free() local 239 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1); in ref_tracker_free() 240 stack_handle = stack_depot_save(entries, nr_entries, in ref_tracker_free()
|
D | fault-inject.c | 70 unsigned long entries[MAX_STACK_TRACE_DEPTH]; in fail_stacktrace() local 77 nr_entries = stack_trace_save(entries, depth, 1); in fail_stacktrace() 79 if (attr->reject_start <= entries[n] && in fail_stacktrace() 80 entries[n] < attr->reject_end) in fail_stacktrace() 82 if (attr->require_start <= entries[n] && in fail_stacktrace() 83 entries[n] < attr->require_end) in fail_stacktrace()
|
D | Kconfig | 492 Support entries which occupy multiple consecutive indices in the
|
D | Kconfig.debug | 2021 bool "Debugfs entries for fault-injection capabilities"
|