/mm/ |
D | swap_slots.c | 114 struct swap_slots_cache *cache; in alloc_swap_slot_cache() local 135 cache = &per_cpu(swp_slots, cpu); in alloc_swap_slot_cache() 136 if (cache->slots || cache->slots_ret) { in alloc_swap_slot_cache() 146 if (!cache->lock_initialized) { in alloc_swap_slot_cache() 147 mutex_init(&cache->alloc_lock); in alloc_swap_slot_cache() 148 spin_lock_init(&cache->free_lock); in alloc_swap_slot_cache() 149 cache->lock_initialized = true; in alloc_swap_slot_cache() 151 cache->nr = 0; in alloc_swap_slot_cache() 152 cache->cur = 0; in alloc_swap_slot_cache() 153 cache->n_ret = 0; in alloc_swap_slot_cache() [all …]
|
D | slab.c | 210 static int drain_freelist(struct kmem_cache *cache, 375 static inline void *index_to_obj(struct kmem_cache *cache, struct page *page, in index_to_obj() argument 378 return page->s_mem + cache->size * idx; in index_to_obj() 2192 static int drain_freelist(struct kmem_cache *cache, in drain_freelist() argument 2217 n->free_objects -= cache->num; in drain_freelist() 2219 slab_destroy(cache, page); in drain_freelist() 2554 static void slab_map_pages(struct kmem_cache *cache, struct page *page, in slab_map_pages() argument 2557 page->slab_cache = cache; in slab_map_pages() 2685 static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) in verify_redzone_free() argument 2689 redzone1 = *dbg_redzone1(cache, obj); in verify_redzone_free() [all …]
|
D | Kconfig | 440 bool "Enable cleancache driver to cache clean pages if tmem is present" 442 Cleancache can be thought of as a page-granularity victim cache 462 bool "Enable frontswap to cache swap pages if tmem is present" 539 bool "Compressed cache for swap pages (EXPERIMENTAL)" 543 A lightweight compressed cache for swap pages. It takes 557 prompt "Compressed cache for swap pages default compressor" 561 Selects the default compression algorithm for the compressed cache 623 prompt "Compressed cache for swap pages default allocator" 627 Selects the default allocator for the compressed cache for 664 bool "Enable the compressed cache for swap pages by default" [all …]
|
D | slab.h | 622 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
|
D | slub.c | 3520 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr) in ___cache_free() argument 3522 do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr); in ___cache_free()
|
/mm/kasan/ |
D | common.c | 118 void __kasan_cache_create_kmalloc(struct kmem_cache *cache) in __kasan_cache_create_kmalloc() argument 120 cache->kasan_info.is_kmalloc = true; in __kasan_cache_create_kmalloc() 133 void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object) in __kasan_unpoison_object_data() argument 135 kasan_unpoison(object, cache->object_size, false); in __kasan_unpoison_object_data() 138 void __kasan_poison_object_data(struct kmem_cache *cache, void *object) in __kasan_poison_object_data() argument 140 kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE), in __kasan_poison_object_data() 158 static inline u8 assign_tag(struct kmem_cache *cache, in assign_tag() argument 168 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU)) in assign_tag() 174 return (u8)obj_to_index(cache, virt_to_head_page(object), (void *)object); in assign_tag() 184 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache, in __kasan_init_slab_obj() argument [all …]
|
D | generic.c | 199 void kasan_cache_shrink(struct kmem_cache *cache) in kasan_cache_shrink() argument 201 kasan_quarantine_remove_cache(cache); in kasan_cache_shrink() 204 void kasan_cache_shutdown(struct kmem_cache *cache) in kasan_cache_shutdown() argument 206 if (!__kmem_cache_empty(cache)) in kasan_cache_shutdown() 207 kasan_quarantine_remove_cache(cache); in kasan_cache_shutdown() 355 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, in kasan_cache_create() argument 378 cache->kasan_info.alloc_meta_offset = *size; in kasan_cache_create() 388 cache->kasan_info.alloc_meta_offset = 0; in kasan_cache_create() 403 if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor || in kasan_cache_create() 404 cache->object_size < sizeof(struct kasan_free_meta)) { in kasan_cache_create() [all …]
|
D | quarantine.c | 134 static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache) in qlink_to_object() argument 140 return ((void *)free_info) - cache->kasan_info.free_meta_offset; in qlink_to_object() 143 static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache) in qlink_free() argument 145 void *object = qlink_to_object(qlink, cache); in qlink_free() 146 struct kasan_free_meta *meta = kasan_get_free_meta(cache, object); in qlink_free() 158 if (slab_want_init_on_free(cache) && in qlink_free() 159 cache->kasan_info.free_meta_offset == 0) in qlink_free() 168 ___cache_free(cache, object, _THIS_IP_); in qlink_free() 174 static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache) in qlist_free_all() argument 184 cache ? cache : qlink_to_cache(qlink); in qlist_free_all() [all …]
|
D | kasan_test.c | 677 struct kmem_cache *cache; in kmem_cache_oob() local 679 cache = kmem_cache_create("test_cache", size, 0, 0, NULL); in kmem_cache_oob() 680 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); in kmem_cache_oob() 682 p = kmem_cache_alloc(cache, GFP_KERNEL); in kmem_cache_oob() 685 kmem_cache_destroy(cache); in kmem_cache_oob() 691 kmem_cache_free(cache, p); in kmem_cache_oob() 692 kmem_cache_destroy(cache); in kmem_cache_oob() 700 struct kmem_cache *cache; in kmem_cache_accounted() local 702 cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL); in kmem_cache_accounted() 703 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); in kmem_cache_accounted() [all …]
|
D | kasan.h | 214 struct kmem_cache *cache; member 348 void kasan_print_aux_stacks(struct kmem_cache *cache, const void *object); 350 static inline void kasan_print_aux_stacks(struct kmem_cache *cache, const void *object) { } in kasan_print_aux_stacks() argument 360 void kasan_init_cache_meta(struct kmem_cache *cache, unsigned int *size); 361 void kasan_init_object_meta(struct kmem_cache *cache, const void *object); 362 struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache, 364 struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache, 367 static inline void kasan_init_cache_meta(struct kmem_cache *cache, unsigned int *size) { } in kasan_init_cache_meta() argument 368 static inline void kasan_init_object_meta(struct kmem_cache *cache, const void *object) { } in kasan_init_object_meta() argument 373 void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags); [all …]
|
D | report.c | 240 static void describe_object_addr(const void *addr, struct kmem_cache *cache, in describe_object_addr() argument 250 object, cache->name, cache->object_size); in describe_object_addr() 255 } else if (access_addr >= object_addr + cache->object_size) { in describe_object_addr() 257 rel_bytes = access_addr - (object_addr + cache->object_size); in describe_object_addr() 265 rel_bytes, rel_type, cache->object_size, (void *)object_addr, in describe_object_addr() 266 (void *)(object_addr + cache->object_size)); in describe_object_addr() 281 kasan_print_aux_stacks(info->cache, info->object); in describe_object_stacks() 288 describe_object_addr(addr, info->cache, info->object); in describe_object() 315 if (info->cache && info->object) { in print_address_description() 443 info->cache = page->slab_cache; in complete_report_info() [all …]
|
D | tags.c | 95 static void save_stack_info(struct kmem_cache *cache, void *object, in save_stack_info() argument 123 WRITE_ONCE(entry->size, cache->object_size); in save_stack_info() 136 void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) in kasan_save_alloc_info() argument 138 save_stack_info(cache, object, flags, false); in kasan_save_alloc_info() 141 void kasan_save_free_info(struct kmem_cache *cache, void *object) in kasan_save_free_info() argument 143 save_stack_info(cache, object, GFP_NOWAIT, true); in kasan_save_free_info()
|
D | report_generic.c | 138 if (!info->cache || !info->object) in kasan_complete_mode_report_info() 141 alloc_meta = kasan_get_alloc_meta(info->cache, info->object); in kasan_complete_mode_report_info() 148 free_meta = kasan_get_free_meta(info->cache, info->object); in kasan_complete_mode_report_info() 159 void kasan_print_aux_stacks(struct kmem_cache *cache, const void *object) in kasan_print_aux_stacks() argument 163 alloc_meta = kasan_get_alloc_meta(cache, object); in kasan_print_aux_stacks()
|
D | report_tags.c | 40 if ((!info->cache || !info->object) && !info->bug_type) { in kasan_complete_mode_report_info()
|
/mm/kfence/ |
D | core.c | 331 static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp, in kfence_guarded_alloc() argument 386 meta->addr = ALIGN_DOWN(meta->addr, cache->align); in kfence_guarded_alloc() 394 WRITE_ONCE(meta->cache, cache); in kfence_guarded_alloc() 402 page->slab_cache = cache; in kfence_guarded_alloc() 419 if (unlikely(slab_want_init_on_alloc(gfp, cache))) in kfence_guarded_alloc() 421 if (cache->ctor) in kfence_guarded_alloc() 422 cache->ctor(addr); in kfence_guarded_alloc() 472 if (!zombie && unlikely(slab_want_init_on_free(meta->cache))) in kfence_guarded_free() 806 if (READ_ONCE(meta->cache) != s || in kfence_shutdown_cache() 811 in_use = meta->cache == s && meta->state == KFENCE_OBJECT_ALLOCATED; in kfence_shutdown_cache() [all …]
|
D | report.c | 128 const struct kmem_cache *const cache = meta->cache; in kfence_print_object() local 139 size, (cache && cache->name) ? cache->name : "<destroyed>"); in kfence_print_object() 309 kpp->kp_slab_cache = meta->cache; in __kfence_obj_info()
|
D | kfence.h | 79 struct kmem_cache *cache; member
|