/mm/ |
D | swap_slots.c | 117 struct swap_slots_cache *cache; in alloc_swap_slot_cache() local 138 cache = &per_cpu(swp_slots, cpu); in alloc_swap_slot_cache() 139 if (cache->slots || cache->slots_ret) in alloc_swap_slot_cache() 142 if (!cache->lock_initialized) { in alloc_swap_slot_cache() 143 mutex_init(&cache->alloc_lock); in alloc_swap_slot_cache() 144 spin_lock_init(&cache->free_lock); in alloc_swap_slot_cache() 145 cache->lock_initialized = true; in alloc_swap_slot_cache() 147 cache->nr = 0; in alloc_swap_slot_cache() 148 cache->cur = 0; in alloc_swap_slot_cache() 149 cache->n_ret = 0; in alloc_swap_slot_cache() [all …]
|
D | slab.c | 209 static int drain_freelist(struct kmem_cache *cache, 374 static inline void *index_to_obj(struct kmem_cache *cache, struct page *page, in index_to_obj() argument 377 return page->s_mem + cache->size * idx; in index_to_obj() 2181 static int drain_freelist(struct kmem_cache *cache, in drain_freelist() argument 2206 n->free_objects -= cache->num; in drain_freelist() 2208 slab_destroy(cache, page); in drain_freelist() 2556 static void slab_map_pages(struct kmem_cache *cache, struct page *page, in slab_map_pages() argument 2559 page->slab_cache = cache; in slab_map_pages() 2691 static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) in verify_redzone_free() argument 2695 redzone1 = *dbg_redzone1(cache, obj); in verify_redzone_free() [all …]
|
D | Kconfig | 435 bool "Enable cleancache driver to cache clean pages if tmem is present" 437 Cleancache can be thought of as a page-granularity victim cache 457 bool "Enable frontswap to cache swap pages if tmem is present" 526 bool "Compressed cache for swap pages (EXPERIMENTAL)" 531 A lightweight compressed cache for swap pages. It takes
|
D | slab.h | 659 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
|
D | slub.c | 3009 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr) in ___cache_free() argument 3011 do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr); in ___cache_free()
|
/mm/kasan/ |
D | common.c | 255 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, in kasan_cache_create() argument 263 cache->kasan_info.alloc_meta_offset = *size; in kasan_cache_create() 268 (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor || in kasan_cache_create() 269 cache->object_size < sizeof(struct kasan_free_meta))) { in kasan_cache_create() 270 cache->kasan_info.free_meta_offset = *size; in kasan_cache_create() 274 redzone_size = optimal_redzone(cache->object_size); in kasan_cache_create() 275 redzone_adjust = redzone_size - (*size - cache->object_size); in kasan_cache_create() 280 max(*size, cache->object_size + redzone_size)); in kasan_cache_create() 285 if (*size <= cache->kasan_info.alloc_meta_offset || in kasan_cache_create() 286 *size <= cache->kasan_info.free_meta_offset) { in kasan_cache_create() [all …]
|
D | quarantine.c | 131 static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache) in qlink_to_object() argument 137 return ((void *)free_info) - cache->kasan_info.free_meta_offset; in qlink_to_object() 140 static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache) in qlink_free() argument 142 void *object = qlink_to_object(qlink, cache); in qlink_free() 148 ___cache_free(cache, object, _THIS_IP_); in qlink_free() 154 static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache) in qlist_free_all() argument 164 cache ? cache : qlink_to_cache(qlink); in qlist_free_all() 173 void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache) in quarantine_put() argument 190 qlist_put(q, &info->quarantine_link, cache->size); in quarantine_put() 266 struct kmem_cache *cache) in qlist_move_cache() argument [all …]
|
D | report.c | 122 static void describe_object_addr(struct kmem_cache *cache, void *object, in describe_object_addr() argument 132 object, cache->name, cache->object_size); in describe_object_addr() 140 } else if (access_addr >= object_addr + cache->object_size) { in describe_object_addr() 142 rel_bytes = access_addr - (object_addr + cache->object_size); in describe_object_addr() 150 rel_bytes, rel_type, cache->object_size, (void *)object_addr, in describe_object_addr() 151 (void *)(object_addr + cache->object_size)); in describe_object_addr() 154 static struct kasan_track *kasan_get_free_track(struct kmem_cache *cache, in kasan_get_free_track() argument 160 alloc_meta = get_alloc_info(cache, object); in kasan_get_free_track() 174 static void describe_object(struct kmem_cache *cache, void *object, in describe_object() argument 177 struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object); in describe_object() [all …]
|
D | kasan.h | 123 struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache, 125 struct kasan_free_meta *get_free_info(struct kmem_cache *cache, 163 void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache); 165 void quarantine_remove_cache(struct kmem_cache *cache); 168 struct kmem_cache *cache) { } in quarantine_put() argument 170 static inline void quarantine_remove_cache(struct kmem_cache *cache) { } in quarantine_remove_cache() argument
|
D | tags_report.c | 41 struct kmem_cache *cache; in get_bug_type() local 52 cache = page->slab_cache; in get_bug_type() 53 object = nearest_obj(cache, page, (void *)addr); in get_bug_type() 54 alloc_meta = get_alloc_info(cache, object); in get_bug_type()
|
D | generic.c | 195 void kasan_cache_shrink(struct kmem_cache *cache) in kasan_cache_shrink() argument 197 quarantine_remove_cache(cache); in kasan_cache_shrink() 200 void kasan_cache_shutdown(struct kmem_cache *cache) in kasan_cache_shutdown() argument 202 if (!__kmem_cache_empty(cache)) in kasan_cache_shutdown() 203 quarantine_remove_cache(cache); in kasan_cache_shutdown()
|