Searched refs:object_size (Results 1 – 10 of 10) sorted by relevance
/mm/kasan/ |
D | generic.c | 343 static inline unsigned int optimal_redzone(unsigned int object_size) in optimal_redzone() argument 346 object_size <= 64 - 16 ? 16 : in optimal_redzone() 347 object_size <= 128 - 32 ? 32 : in optimal_redzone() 348 object_size <= 512 - 64 ? 64 : in optimal_redzone() 349 object_size <= 4096 - 128 ? 128 : in optimal_redzone() 350 object_size <= (1 << 14) - 256 ? 256 : in optimal_redzone() 351 object_size <= (1 << 15) - 512 ? 512 : in optimal_redzone() 352 object_size <= (1 << 16) - 1024 ? 1024 : 2048; in optimal_redzone() 404 cache->object_size < sizeof(struct kasan_free_meta)) { in kasan_cache_create() 418 optimal_size = cache->object_size + optimal_redzone(cache->object_size); in kasan_cache_create()
|
D | common.c | 135 kasan_unpoison(object, cache->object_size, false); in __kasan_unpoison_object_data() 140 kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE), in __kasan_poison_object_data() 226 kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE), in ____kasan_slab_free() 316 kasan_unpoison(tagged_object, cache->object_size, init); in __kasan_slab_alloc() 356 redzone_end = round_up((unsigned long)(object + cache->object_size), in ____kasan_kmalloc()
|
D | report.c | 250 object, cache->name, cache->object_size); in describe_object_addr() 255 } else if (access_addr >= object_addr + cache->object_size) { in describe_object_addr() 257 rel_bytes = access_addr - (object_addr + cache->object_size); in describe_object_addr() 265 rel_bytes, rel_type, cache->object_size, (void *)object_addr, in describe_object_addr() 266 (void *)(object_addr + cache->object_size)); in describe_object_addr()
|
D | tags.c | 123 WRITE_ONCE(entry->size, cache->object_size); in save_stack_info()
|
/mm/ |
D | slab.h | 21 unsigned int object_size;/* The original size of the object */ member 130 slab_flags_t kmem_cache_flags(unsigned int object_size, 138 static inline slab_flags_t kmem_cache_flags(unsigned int object_size, in kmem_cache_flags() argument 490 return s->object_size; in slab_ksize() 499 return s->object_size; in slab_ksize() 502 return s->object_size; in slab_ksize() 552 memset(p[i], 0, s->object_size); in slab_post_alloc_hook() 553 kmemleak_alloc_recursive(p[i], s->object_size, 1, in slab_post_alloc_hook()
|
D | slub.c | 856 min_t(unsigned int, s->object_size, PAGE_SIZE)); in print_trailer() 858 print_section(KERN_ERR, "Redzone ", p + s->object_size, in print_trailer() 859 s->inuse - s->object_size); in print_trailer() 913 memset(p, POISON_FREE, s->object_size - 1); in init_object() 914 p[s->object_size - 1] = POISON_END; in init_object() 918 memset(p + s->object_size, val, s->inuse - s->object_size); in init_object() 1057 u8 *endobject = object + s->object_size; in check_object() 1065 endobject, val, s->inuse - s->object_size)) in check_object() 1068 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { in check_object() 1071 s->inuse - s->object_size); in check_object() [all …]
|
D | slab.c | 1450 int size = cachep->object_size; in poison_obj() 1503 size = cachep->object_size; in print_objinfo() 1523 size = cachep->object_size; in check_poison_obj() 1793 slab_flags_t kmem_cache_flags(unsigned int object_size, in kmem_cache_flags() argument 1813 cachep->object_size = max_t(int, cachep->object_size, size); in __kmem_cache_alias() 1841 if (cachep->num * sizeof(freelist_idx_t) > cachep->object_size) in set_objfreelist_slab_cache() 2022 size >= 256 && cachep->object_size > cache_line_size()) { in __kmem_cache_create() 3444 memset(objp, 0, cachep->object_size); in __cache_free() 3451 __kcsan_check_access(objp, cachep->object_size, in __cache_free() 3508 void *ret = slab_alloc(cachep, flags, cachep->object_size, _RET_IP_); in kmem_cache_alloc() [all …]
|
D | slab_common.c | 94 return s->object_size; in kmem_cache_size() 243 unsigned int object_size, unsigned int align, in create_cache() argument 251 if (WARN_ON(useroffset + usersize > object_size)) in create_cache() 260 s->size = s->object_size = object_size; in create_cache() 659 s->size = s->object_size = size; in create_boot_cache()
|
D | failslab.c | 33 return should_fail(&failslab.attr, s->object_size); in __should_failslab()
|
D | slob.c | 611 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size, in slob_alloc_node() 616 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size, in slob_alloc_node()
|