Lines Matching +full:cache +full:- +full:size
7 * Some code borrowed from https://github.com/xairy/kasan-prototype by
45 current->kasan_depth++; in kasan_enable_current()
50 current->kasan_depth--; in kasan_disable_current()
54 * Poisons the shadow memory for 'size' bytes starting from 'addr'.
57 static void kasan_poison_shadow(const void *address, size_t size, u8 value) in kasan_poison_shadow() argument
62 shadow_end = kasan_mem_to_shadow(address + size); in kasan_poison_shadow()
64 memset(shadow_start, value, shadow_end - shadow_start); in kasan_poison_shadow()
67 void kasan_unpoison_shadow(const void *address, size_t size) in kasan_unpoison_shadow() argument
69 kasan_poison_shadow(address, size, 0); in kasan_unpoison_shadow()
71 if (size & KASAN_SHADOW_MASK) { in kasan_unpoison_shadow()
72 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size); in kasan_unpoison_shadow()
73 *shadow = size & KASAN_SHADOW_MASK; in kasan_unpoison_shadow()
80 size_t size = sp - base; in __kasan_unpoison_stack() local
82 kasan_unpoison_shadow(base, size); in __kasan_unpoison_stack()
99 void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1)); in kasan_unpoison_task_stack_below()
101 kasan_unpoison_shadow(base, watermark - base); in kasan_unpoison_task_stack_below()
106 * watermark value, as is sometimes required prior to hand-crafted asm function
112 size_t size = watermark - sp; in kasan_unpoison_stack_above_sp_to() local
116 kasan_unpoison_shadow(sp, size); in kasan_unpoison_stack_above_sp_to()
122 * depending on memory access size X.
138 unsigned long size) in memory_is_poisoned_2_4_8() argument
143 * Access crosses 8(shadow size)-byte boundary. Such access maps in memory_is_poisoned_2_4_8()
146 if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1)) in memory_is_poisoned_2_4_8()
147 return *shadow_addr || memory_is_poisoned_1(addr + size - 1); in memory_is_poisoned_2_4_8()
149 return memory_is_poisoned_1(addr + size - 1); in memory_is_poisoned_2_4_8()
156 /* Unaligned 16-bytes access maps into 3 shadow bytes. */ in memory_is_poisoned_16()
164 size_t size) in bytes_is_nonzero() argument
166 while (size) { in bytes_is_nonzero()
170 size--; in bytes_is_nonzero()
183 if (end - start <= 16) in memory_is_nonzero()
184 return bytes_is_nonzero(start, end - start); in memory_is_nonzero()
187 prefix = 8 - prefix; in memory_is_nonzero()
194 words = (end - start) / 8; in memory_is_nonzero()
199 words--; in memory_is_nonzero()
202 return bytes_is_nonzero(start, (end - start) % 8); in memory_is_nonzero()
206 size_t size) in memory_is_poisoned_n() argument
211 kasan_mem_to_shadow((void *)addr + size - 1) + 1); in memory_is_poisoned_n()
214 unsigned long last_byte = addr + size - 1; in memory_is_poisoned_n()
224 static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size) in memory_is_poisoned() argument
226 if (__builtin_constant_p(size)) { in memory_is_poisoned()
227 switch (size) { in memory_is_poisoned()
233 return memory_is_poisoned_2_4_8(addr, size); in memory_is_poisoned()
241 return memory_is_poisoned_n(addr, size); in memory_is_poisoned()
245 size_t size, bool write, in check_memory_region_inline() argument
248 if (unlikely(size == 0)) in check_memory_region_inline()
253 kasan_report(addr, size, write, ret_ip); in check_memory_region_inline()
257 if (likely(!memory_is_poisoned(addr, size))) in check_memory_region_inline()
260 kasan_report(addr, size, write, ret_ip); in check_memory_region_inline()
264 size_t size, bool write, in check_memory_region() argument
267 check_memory_region_inline(addr, size, write, ret_ip); in check_memory_region()
270 void kasan_check_read(const volatile void *p, unsigned int size) in kasan_check_read() argument
272 check_memory_region((unsigned long)p, size, false, _RET_IP_); in kasan_check_read()
276 void kasan_check_write(const volatile void *p, unsigned int size) in kasan_check_write() argument
278 check_memory_region((unsigned long)p, size, true, _RET_IP_); in kasan_check_write()
329 object_size <= 64 - 16 ? 16 : in optimal_redzone()
330 object_size <= 128 - 32 ? 32 : in optimal_redzone()
331 object_size <= 512 - 64 ? 64 : in optimal_redzone()
332 object_size <= 4096 - 128 ? 128 : in optimal_redzone()
333 object_size <= (1 << 14) - 256 ? 256 : in optimal_redzone()
334 object_size <= (1 << 15) - 512 ? 512 : in optimal_redzone()
335 object_size <= (1 << 16) - 1024 ? 1024 : 2048; in optimal_redzone()
338 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, in kasan_cache_create() argument
341 unsigned int orig_size = *size; in kasan_cache_create()
345 cache->kasan_info.alloc_meta_offset = *size; in kasan_cache_create()
346 *size += sizeof(struct kasan_alloc_meta); in kasan_cache_create()
349 if (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor || in kasan_cache_create()
350 cache->object_size < sizeof(struct kasan_free_meta)) { in kasan_cache_create()
351 cache->kasan_info.free_meta_offset = *size; in kasan_cache_create()
352 *size += sizeof(struct kasan_free_meta); in kasan_cache_create()
354 redzone_adjust = optimal_redzone(cache->object_size) - in kasan_cache_create()
355 (*size - cache->object_size); in kasan_cache_create()
358 *size += redzone_adjust; in kasan_cache_create()
360 *size = min_t(unsigned int, KMALLOC_MAX_SIZE, in kasan_cache_create()
361 max(*size, cache->object_size + in kasan_cache_create()
362 optimal_redzone(cache->object_size))); in kasan_cache_create()
367 if (*size <= cache->kasan_info.alloc_meta_offset || in kasan_cache_create()
368 *size <= cache->kasan_info.free_meta_offset) { in kasan_cache_create()
369 cache->kasan_info.alloc_meta_offset = 0; in kasan_cache_create()
370 cache->kasan_info.free_meta_offset = 0; in kasan_cache_create()
371 *size = orig_size; in kasan_cache_create()
378 void kasan_cache_shrink(struct kmem_cache *cache) in kasan_cache_shrink() argument
380 quarantine_remove_cache(cache); in kasan_cache_shrink()
383 void kasan_cache_shutdown(struct kmem_cache *cache) in kasan_cache_shutdown() argument
385 if (!__kmem_cache_empty(cache)) in kasan_cache_shutdown()
386 quarantine_remove_cache(cache); in kasan_cache_shutdown()
389 size_t kasan_metadata_size(struct kmem_cache *cache) in kasan_metadata_size() argument
391 return (cache->kasan_info.alloc_meta_offset ? in kasan_metadata_size()
393 (cache->kasan_info.free_meta_offset ? in kasan_metadata_size()
404 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object) in kasan_unpoison_object_data() argument
406 kasan_unpoison_shadow(object, cache->object_size); in kasan_unpoison_object_data()
409 void kasan_poison_object_data(struct kmem_cache *cache, void *object) in kasan_poison_object_data() argument
412 round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE), in kasan_poison_object_data()
428 if (!trace->nr_entries) in filter_irq_stacks()
430 for (i = 0; i < trace->nr_entries; i++) in filter_irq_stacks()
431 if (in_irqentry_text(trace->entries[i])) { in filter_irq_stacks()
433 trace->nr_entries = i + 1; in filter_irq_stacks()
451 trace.entries[trace.nr_entries-1] == ULONG_MAX) in save_stack()
452 trace.nr_entries--; in save_stack()
459 track->pid = current->pid; in set_track()
460 track->stack = save_stack(flags); in set_track()
463 struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache, in get_alloc_info() argument
467 return (void *)object + cache->kasan_info.alloc_meta_offset; in get_alloc_info()
470 struct kasan_free_meta *get_free_info(struct kmem_cache *cache, in get_free_info() argument
474 return (void *)object + cache->kasan_info.free_meta_offset; in get_free_info()
477 void kasan_init_slab_obj(struct kmem_cache *cache, const void *object) in kasan_init_slab_obj() argument
481 if (!(cache->flags & SLAB_KASAN)) in kasan_init_slab_obj()
484 alloc_info = get_alloc_info(cache, object); in kasan_init_slab_obj()
488 void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags) in kasan_slab_alloc() argument
490 kasan_kmalloc(cache, object, cache->object_size, flags); in kasan_slab_alloc()
493 static bool __kasan_slab_free(struct kmem_cache *cache, void *object, in __kasan_slab_free() argument
499 if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) != in __kasan_slab_free()
506 if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU)) in __kasan_slab_free()
515 rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE); in __kasan_slab_free()
518 if (!quarantine || unlikely(!(cache->flags & SLAB_KASAN))) in __kasan_slab_free()
521 set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT); in __kasan_slab_free()
522 quarantine_put(get_free_info(cache, object), cache); in __kasan_slab_free()
526 bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) in kasan_slab_free() argument
528 return __kasan_slab_free(cache, object, ip, true); in kasan_slab_free()
531 void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size, in kasan_kmalloc() argument
543 redzone_start = round_up((unsigned long)(object + size), in kasan_kmalloc()
545 redzone_end = round_up((unsigned long)object + cache->object_size, in kasan_kmalloc()
548 kasan_unpoison_shadow(object, size); in kasan_kmalloc()
549 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, in kasan_kmalloc()
552 if (cache->flags & SLAB_KASAN) in kasan_kmalloc()
553 set_track(&get_alloc_info(cache, object)->alloc_track, flags); in kasan_kmalloc()
557 void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) in kasan_kmalloc_large() argument
570 redzone_start = round_up((unsigned long)(ptr + size), in kasan_kmalloc_large()
574 kasan_unpoison_shadow(ptr, size); in kasan_kmalloc_large()
575 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, in kasan_kmalloc_large()
579 void kasan_krealloc(const void *object, size_t size, gfp_t flags) in kasan_krealloc() argument
589 kasan_kmalloc_large(object, size, flags); in kasan_krealloc()
591 kasan_kmalloc(page->slab_cache, object, size, flags); in kasan_krealloc()
608 __kasan_slab_free(page->slab_cache, ptr, ip, false); in kasan_poison_kfree()
619 int kasan_module_alloc(void *addr, size_t size) in kasan_module_alloc() argument
627 scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT; in kasan_module_alloc()
631 return -EINVAL; in kasan_module_alloc()
640 find_vm_area(addr)->flags |= VM_KASAN; in kasan_module_alloc()
645 return -ENOMEM; in kasan_module_alloc()
650 if (vm->flags & VM_KASAN) in kasan_free_shadow()
651 vfree(kasan_mem_to_shadow(vm->addr)); in kasan_free_shadow()
656 size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE); in register_global()
658 kasan_unpoison_shadow(global->beg, global->size); in register_global()
660 kasan_poison_shadow(global->beg + aligned_size, in register_global()
661 global->size_with_redzone - aligned_size, in register_global()
665 void __asan_register_globals(struct kasan_global *globals, size_t size) in __asan_register_globals() argument
669 for (i = 0; i < size; i++) in __asan_register_globals()
674 void __asan_unregister_globals(struct kasan_global *globals, size_t size) in __asan_unregister_globals() argument
679 #define DEFINE_ASAN_LOAD_STORE(size) \ argument
680 void __asan_load##size(unsigned long addr) \
682 check_memory_region_inline(addr, size, false, _RET_IP_);\
684 EXPORT_SYMBOL(__asan_load##size); \
685 __alias(__asan_load##size) \
686 void __asan_load##size##_noabort(unsigned long); \
687 EXPORT_SYMBOL(__asan_load##size##_noabort); \
688 void __asan_store##size(unsigned long addr) \
690 check_memory_region_inline(addr, size, true, _RET_IP_); \
692 EXPORT_SYMBOL(__asan_store##size); \
693 __alias(__asan_store##size) \
694 void __asan_store##size##_noabort(unsigned long); \
695 EXPORT_SYMBOL(__asan_store##size##_noabort)
703 void __asan_loadN(unsigned long addr, size_t size) in __asan_loadN() argument
705 check_memory_region(addr, size, false, _RET_IP_); in __asan_loadN()
713 void __asan_storeN(unsigned long addr, size_t size) in __asan_storeN() argument
715 check_memory_region(addr, size, true, _RET_IP_); in __asan_storeN()
728 void __asan_poison_stack_memory(const void *addr, size_t size) in __asan_poison_stack_memory() argument
731 * Addr is KASAN_SHADOW_SCALE_SIZE-aligned and the object is surrounded in __asan_poison_stack_memory()
732 * by redzones, so we simply round up size to simplify logic. in __asan_poison_stack_memory()
734 kasan_poison_shadow(addr, round_up(size, KASAN_SHADOW_SCALE_SIZE), in __asan_poison_stack_memory()
740 void __asan_unpoison_stack_memory(const void *addr, size_t size) in __asan_unpoison_stack_memory() argument
742 kasan_unpoison_shadow(addr, size); in __asan_unpoison_stack_memory()
747 void __asan_alloca_poison(unsigned long addr, size_t size) in __asan_alloca_poison() argument
749 size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE); in __asan_alloca_poison()
750 size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) - in __asan_alloca_poison()
752 size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE); in __asan_alloca_poison()
754 const void *left_redzone = (const void *)(addr - in __asan_alloca_poison()
761 size - rounded_down_size); in __asan_alloca_poison()
776 kasan_unpoison_shadow(stack_top, stack_bottom - stack_top); in __asan_allocas_unpoison()
782 void __asan_set_shadow_##byte(const void *addr, size_t size) \
784 __memset((void *)addr, 0x##byte, size); \
815 * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse in shadow_mapped()
837 nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT; in kasan_mem_notifier()
838 start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn); in kasan_mem_notifier()
843 if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) || in kasan_mem_notifier()
862 pfn_to_nid(mem_data->start_pfn), in kasan_mem_notifier()
878 * Non-NULL result of the find_vm_area() will tell us if in kasan_mem_notifier()