/mm/ |
D | page_pinner.c | 79 unsigned long entries[4]; in register_failure_stack() local 82 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); in register_failure_stack() 83 failure_handle = stack_depot_save(entries, nr_entries, GFP_KERNEL); in register_failure_stack() 118 unsigned long entries[PAGE_PINNER_STACK_DEPTH]; in save_stack() local 122 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2); in save_stack() 123 handle = stack_depot_save(entries, nr_entries, flags); in save_stack() 194 unsigned long *entries; in print_page_pinner() local 232 nr_entries = stack_depot_fetch(record->handle, &entries); in print_page_pinner() 233 ret += stack_trace_snprint(kbuf + ret, count - ret, entries, in print_page_pinner()
|
D | page_owner.c | 56 unsigned long entries[4]; in create_dummy_stack() local 59 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); in create_dummy_stack() 60 return stack_depot_save(entries, nr_entries, GFP_KERNEL); in create_dummy_stack() 122 unsigned long entries[PAGE_OWNER_STACK_DEPTH]; in save_stack() local 138 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2); in save_stack() 139 handle = stack_depot_save(entries, nr_entries, flags); in save_stack() 370 unsigned long *entries; in print_page_owner() local 402 nr_entries = stack_depot_fetch(handle, &entries); in print_page_owner() 403 ret += stack_trace_snprint(kbuf + ret, count - ret, entries, nr_entries, 0); in print_page_owner()
|
D | page_ext.c | 95 int entries = ARRAY_SIZE(page_ext_ops); in invoke_need_callbacks() local 98 for (i = 0; i < entries; i++) { in invoke_need_callbacks() 112 int entries = ARRAY_SIZE(page_ext_ops); in invoke_init_callbacks() local 114 for (i = 0; i < entries; i++) { in invoke_init_callbacks()
|
D | memcontrol.c | 4179 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) in __mem_cgroup_threshold() 4180 eventfd_signal(t->entries[i].eventfd, 1); in __mem_cgroup_threshold() 4191 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) in __mem_cgroup_threshold() 4192 eventfd_signal(t->entries[i].eventfd, 1); in __mem_cgroup_threshold() 4277 new = kmalloc(struct_size(new, entries, size), GFP_KERNEL); in __mem_cgroup_usage_register_event() 4286 memcpy(new->entries, thresholds->primary->entries, in __mem_cgroup_usage_register_event() 4287 flex_array_size(new, entries, size - 1)); in __mem_cgroup_usage_register_event() 4290 new->entries[size - 1].eventfd = eventfd; in __mem_cgroup_usage_register_event() 4291 new->entries[size - 1].threshold = threshold; in __mem_cgroup_usage_register_event() 4294 sort(new->entries, size, sizeof(*new->entries), in __mem_cgroup_usage_register_event() [all …]
|
D | slab.c | 533 static struct array_cache *alloc_arraycache(int node, int entries, in alloc_arraycache() argument 536 size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache); in alloc_arraycache() 548 init_arraycache(ac, entries, batchcount); in alloc_arraycache() 644 static struct alien_cache *__alloc_alien_cache(int node, int entries, in __alloc_alien_cache() argument 647 size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache); in __alloc_alien_cache() 653 init_arraycache(&alc->ac, entries, batch); in __alloc_alien_cache() 1734 struct kmem_cache *cachep, int entries, int batchcount) in alloc_kmem_cache_cpus() argument 1740 size = sizeof(void *) * entries + sizeof(struct array_cache); in alloc_kmem_cache_cpus() 1748 entries, batchcount); in alloc_kmem_cache_cpus()
|
D | swapfile.c | 1421 void swapcache_free_entries(swp_entry_t *entries, int n) in swapcache_free_entries() argument 1438 sort(entries, n, sizeof(entries[0]), swp_entry_cmp, NULL); in swapcache_free_entries() 1440 p = swap_info_get_cont(entries[i], prev); in swapcache_free_entries() 1442 swap_entry_free(p, entries[i]); in swapcache_free_entries()
|
D | shmem.c | 1175 struct page **entries, pgoff_t *indices, in shmem_find_swap_entries() argument 1202 entries[ret] = page; in shmem_find_swap_entries()
|
/mm/kasan/ |
D | tags.c | 87 stack_ring.entries = memblock_alloc( in kasan_init_tags() 88 sizeof(stack_ring.entries[0]) * stack_ring.size, in kasan_init_tags() 90 if (WARN_ON(!stack_ring.entries)) in kasan_init_tags() 114 entry = &stack_ring.entries[pos % stack_ring.size]; in save_stack_info()
|
D | common.c | 35 unsigned long entries[KASAN_STACK_DEPTH]; in kasan_save_stack() local 38 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); in kasan_save_stack() 39 return __stack_depot_save(entries, nr_entries, flags, can_alloc); in kasan_save_stack()
|
D | report_tags.c | 62 entry = &stack_ring.entries[i % stack_ring.size]; in kasan_complete_mode_report_info()
|
D | kasan.h | 292 struct kasan_stack_ring_entry *entries; member
|