Searched refs:entries (Results 1 – 10 of 10) sorted by relevance
/mm/ |
D | page_ext.c | 73 int entries = ARRAY_SIZE(page_ext_ops); in invoke_need_callbacks() local 75 for (i = 0; i < entries; i++) { in invoke_need_callbacks() 86 int entries = ARRAY_SIZE(page_ext_ops); in invoke_init_callbacks() local 88 for (i = 0; i < entries; i++) { in invoke_init_callbacks()
|
D | slab_common.c | 186 memcpy(new->entries, old->entries, in update_memcg_params() 526 if (arr->entries[idx]) in memcg_create_kmem_cache() 557 arr->entries[idx] = s; in memcg_create_kmem_cache() 584 c = arr->entries[idx]; in memcg_deactivate_kmem_caches() 589 arr->entries[idx] = NULL; in memcg_deactivate_kmem_caches() 653 c = arr->entries[i]; in shutdown_memcg_caches() 670 arr->entries[i] = NULL; in shutdown_memcg_caches()
|
D | memcontrol.c | 3316 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) in __mem_cgroup_threshold() 3317 eventfd_signal(t->entries[i].eventfd, 1); in __mem_cgroup_threshold() 3328 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) in __mem_cgroup_threshold() 3329 eventfd_signal(t->entries[i].eventfd, 1); in __mem_cgroup_threshold() 3424 memcpy(new->entries, thresholds->primary->entries, (size - 1) * in __mem_cgroup_usage_register_event() 3429 new->entries[size - 1].eventfd = eventfd; in __mem_cgroup_usage_register_event() 3430 new->entries[size - 1].threshold = threshold; in __mem_cgroup_usage_register_event() 3433 sort(new->entries, size, sizeof(struct mem_cgroup_threshold), in __mem_cgroup_usage_register_event() 3439 if (new->entries[i].threshold <= usage) { in __mem_cgroup_usage_register_event() 3483 int i, j, size, entries; in __mem_cgroup_usage_unregister_event() local [all …]
|
D | page_owner.c | 69 .entries = &page_ext->trace_entries[0], in __set_page_owner() 107 .entries = &page_ext->trace_entries[0], in print_page_owner()
|
D | slab.c | 234 void *entries[BOOT_CPUCACHE_ENTRIES]; member 655 static struct array_cache *alloc_arraycache(int node, int entries, in alloc_arraycache() argument 658 size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache); in alloc_arraycache() 670 init_arraycache(ac, entries, batchcount); in alloc_arraycache() 855 static struct alien_cache *__alloc_alien_cache(int node, int entries, in __alloc_alien_cache() argument 858 size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache); in __alloc_alien_cache() 864 init_arraycache(&alc->ac, entries, batch); in __alloc_alien_cache() 1993 struct kmem_cache *cachep, int entries, int batchcount) in alloc_kmem_cache_cpus() argument 1999 size = sizeof(void *) * entries + sizeof(struct array_cache); in alloc_kmem_cache_cpus() 2007 entries, batchcount); in alloc_kmem_cache_cpus()
|
D | kmemleak.c | 383 trace.entries = object->trace; in dump_object_info() 527 stack_trace.entries = trace; in __save_stack_trace() 1851 trace.entries = log->trace; in print_log_trace()
|
D | slab.h | 226 cachep = lockless_dereference(arr->entries[idx]); in cache_from_memcg_idx()
|
D | filemap.c | 1234 struct page **entries, pgoff_t *indices) in find_get_entries() argument 1271 entries[ret] = page; in find_get_entries()
|
D | slub.c | 554 trace.entries = p->addrs; in set_track() 562 trace.entries[trace.nr_entries - 1] == ULONG_MAX) in set_track()
|
/mm/kasan/ |
D | kasan.c | 495 if (in_irqentry_text(trace->entries[i])) { in filter_irq_stacks() 504 unsigned long entries[KASAN_STACK_DEPTH]; in save_stack() local 507 .entries = entries, in save_stack() 515 trace.entries[trace.nr_entries-1] == ULONG_MAX) in save_stack()
|