/mm/kasan/ |
D | generic.c | 63 unsigned long size) in memory_is_poisoned_2_4_8() argument 71 if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1)) in memory_is_poisoned_2_4_8() 72 return *shadow_addr || memory_is_poisoned_1(addr + size - 1); in memory_is_poisoned_2_4_8() 74 return memory_is_poisoned_1(addr + size - 1); in memory_is_poisoned_2_4_8() 89 size_t size) in bytes_is_nonzero() argument 91 while (size) { in bytes_is_nonzero() 95 size--; in bytes_is_nonzero() 131 size_t size) in memory_is_poisoned_n() argument 136 kasan_mem_to_shadow((void *)addr + size - 1) + 1); in memory_is_poisoned_n() 139 unsigned long last_byte = addr + size - 1; in memory_is_poisoned_n() [all …]
|
D | common.c | 90 bool __kasan_check_read(const volatile void *p, unsigned int size) in __kasan_check_read() argument 92 return check_memory_region((unsigned long)p, size, false, _RET_IP_); in __kasan_check_read() 96 bool __kasan_check_write(const volatile void *p, unsigned int size) in __kasan_check_write() argument 98 return check_memory_region((unsigned long)p, size, true, _RET_IP_); in __kasan_check_write() 132 void kasan_poison_shadow(const void *address, size_t size, u8 value) in kasan_poison_shadow() argument 144 shadow_end = kasan_mem_to_shadow(address + size); in kasan_poison_shadow() 149 void kasan_unpoison_shadow(const void *address, size_t size) in kasan_unpoison_shadow() argument 160 kasan_poison_shadow(address, size, tag); in kasan_unpoison_shadow() 162 if (size & KASAN_SHADOW_MASK) { in kasan_unpoison_shadow() 163 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size); in kasan_unpoison_shadow() [all …]
|
D | tags.c | 79 bool check_memory_region(unsigned long addr, size_t size, bool write, in check_memory_region() argument 86 if (unlikely(size == 0)) in check_memory_region() 114 kasan_report(addr, size, write, ret_ip); in check_memory_region() 118 shadow_last = kasan_mem_to_shadow(untagged_addr + size - 1); in check_memory_region() 121 kasan_report(addr, size, write, ret_ip); in check_memory_region() 129 #define DEFINE_HWASAN_LOAD_STORE(size) \ argument 130 void __hwasan_load##size##_noabort(unsigned long addr) \ 132 check_memory_region(addr, size, false, _RET_IP_); \ 134 EXPORT_SYMBOL(__hwasan_load##size##_noabort); \ 135 void __hwasan_store##size##_noabort(unsigned long addr) \ [all …]
|
D | generic_report.c | 37 void *find_first_bad_addr(void *addr, size_t size) in find_first_bad_addr() argument 41 while (p < addr + size && !(*(u8 *)kasan_mem_to_shadow(p))) in find_first_bad_addr() 115 #define DEFINE_ASAN_REPORT_LOAD(size) \ argument 116 void __asan_report_load##size##_noabort(unsigned long addr) \ 118 kasan_report(addr, size, false, _RET_IP_); \ 120 EXPORT_SYMBOL(__asan_report_load##size##_noabort) 122 #define DEFINE_ASAN_REPORT_STORE(size) \ argument 123 void __asan_report_store##size##_noabort(unsigned long addr) \ 125 kasan_report(addr, size, true, _RET_IP_); \ 127 EXPORT_SYMBOL(__asan_report_store##size##_noabort) [all …]
|
D | kasan.h | 74 size_t size; /* Size of the global variable. */ member 139 void kasan_poison_shadow(const void *address, size_t size, u8 value); 149 bool check_memory_region(unsigned long addr, size_t size, bool write, 152 void *find_first_bad_addr(void *addr, size_t size); 155 void kasan_report(unsigned long addr, size_t size, 212 void __asan_register_globals(struct kasan_global *globals, size_t size); 213 void __asan_unregister_globals(struct kasan_global *globals, size_t size); 214 void __asan_loadN(unsigned long addr, size_t size); 215 void __asan_storeN(unsigned long addr, size_t size); 217 void __asan_alloca_poison(unsigned long addr, size_t size); [all …]
|
D | init.c | 84 static __init void *early_alloc(size_t size, int node) in early_alloc() argument 86 void *ptr = memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS), in early_alloc() 91 __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS)); in early_alloc() 452 void kasan_remove_zero_shadow(void *start, unsigned long size) in kasan_remove_zero_shadow() argument 458 end = addr + (size >> KASAN_SHADOW_SCALE_SHIFT); in kasan_remove_zero_shadow() 462 WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE))) in kasan_remove_zero_shadow() 487 int kasan_add_zero_shadow(void *start, unsigned long size) in kasan_add_zero_shadow() argument 493 shadow_end = shadow_start + (size >> KASAN_SHADOW_SCALE_SHIFT); in kasan_add_zero_shadow() 497 WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE))) in kasan_add_zero_shadow() 503 size >> KASAN_SHADOW_SCALE_SHIFT); in kasan_add_zero_shadow()
|
/mm/ |
D | memblock.c | 148 static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size) in memblock_cap_size() argument 150 return *size = min(*size, PHYS_ADDR_MAX - base); in memblock_cap_size() 163 phys_addr_t base, phys_addr_t size) in memblock_overlaps_region() argument 168 if (memblock_addrs_overlap(base, size, type->regions[i].base, in memblock_overlaps_region() 169 type->regions[i].size)) in memblock_overlaps_region() 191 phys_addr_t size, phys_addr_t align, int nid, in __memblock_find_range_bottom_up() argument 202 if (cand < this_end && this_end - cand >= size) in __memblock_find_range_bottom_up() 226 phys_addr_t size, phys_addr_t align, int nid, in __memblock_find_range_top_down() argument 237 if (this_end < size) in __memblock_find_range_top_down() 240 cand = round_down(this_end - size, align); in __memblock_find_range_top_down() [all …]
|
D | slob.c | 126 #define SLOB_UNITS(size) DIV_ROUND_UP(size, SLOB_UNIT) argument 135 int size; member 146 static void set_slob(slob_t *s, slobidx_t size, slob_t *next) in set_slob() argument 151 if (size > 1) { in set_slob() 152 s[0].units = size; in set_slob() 237 static void *slob_page_alloc(struct page *sp, size_t size, int align, in slob_page_alloc() argument 241 int delta = 0, units = SLOB_UNITS(size); in slob_page_alloc() 301 static void *slob_alloc(size_t size, gfp_t gfp, int align, int node, in slob_alloc() argument 310 if (size < SLOB_BREAK1) in slob_alloc() 312 else if (size < SLOB_BREAK2) in slob_alloc() [all …]
|
D | early_ioremap.c | 35 unsigned long size, in early_memremap_pgprot_adjust() argument 106 __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot) in __early_ioremap() argument 125 __func__, (u64)phys_addr, size)) in __early_ioremap() 129 last_addr = phys_addr + size - 1; in __early_ioremap() 130 if (WARN_ON(!size || last_addr < phys_addr)) in __early_ioremap() 133 prev_size[slot] = size; in __early_ioremap() 139 size = PAGE_ALIGN(last_addr + 1) - phys_addr; in __early_ioremap() 144 nrpages = size >> PAGE_SHIFT; in __early_ioremap() 162 __func__, (u64)phys_addr, size, slot, offset, slot_virt[slot]); in __early_ioremap() 168 void __init early_iounmap(void __iomem *addr, unsigned long size) in early_iounmap() argument [all …]
|
D | vmalloc.c | 560 unsigned long size; in augment_tree_propagate_check() local 567 size = va->subtree_max_size; in augment_tree_propagate_check() 573 if (get_subtree_max_size(node->rb_left) == size) { in augment_tree_propagate_check() 576 if (va_size(va) == size) { in augment_tree_propagate_check() 764 is_within_this_va(struct vmap_area *va, unsigned long size, in is_within_this_va() argument 775 if (nva_start_addr + size < nva_start_addr || in is_within_this_va() 779 return (nva_start_addr + size <= va->va_end); in is_within_this_va() 788 find_vmap_lowest_match(unsigned long size, in find_vmap_lowest_match() argument 799 length = size + align - 1; in find_vmap_lowest_match() 808 if (is_within_this_va(va, size, align, vstart)) in find_vmap_lowest_match() [all …]
|
D | slab_common.c | 86 static int kmem_cache_sanity_check(const char *name, unsigned int size) in kmem_cache_sanity_check() argument 88 if (!name || in_interrupt() || size < sizeof(void *) || in kmem_cache_sanity_check() 89 size > KMALLOC_MAX_SIZE) { in kmem_cache_sanity_check() 98 static inline int kmem_cache_sanity_check(const char *name, unsigned int size) in kmem_cache_sanity_check() argument 282 unsigned int align, unsigned int size) in calculate_alignment() argument 295 while (size <= ralign / 2) in calculate_alignment() 332 struct kmem_cache *find_mergeable(unsigned int size, unsigned int align, in find_mergeable() argument 343 size = ALIGN(size, sizeof(void *)); in find_mergeable() 344 align = calculate_alignment(flags, align, size); in find_mergeable() 345 size = ALIGN(size, align); in find_mergeable() [all …]
|
D | dmapool.c | 45 size_t size; member 68 unsigned size; in show_pools() local 74 size = PAGE_SIZE; in show_pools() 76 temp = scnprintf(next, size, "poolinfo - 0.1\n"); in show_pools() 77 size -= temp; in show_pools() 93 temp = scnprintf(next, size, "%-16s %4u %4zu %4zu %2u\n", in show_pools() 95 pages * (pool->allocation / pool->size), in show_pools() 96 pool->size, pages); in show_pools() 97 size -= temp; in show_pools() 102 return PAGE_SIZE - size; in show_pools() [all …]
|
D | maccess.c | 10 probe_read_common(void *dst, const void __user *src, size_t size) in probe_read_common() argument 15 ret = __copy_from_user_inatomic(dst, src, size); in probe_read_common() 22 probe_write_common(void __user *dst, const void *src, size_t size) in probe_write_common() argument 27 ret = __copy_to_user_inatomic(dst, src, size); in probe_write_common() 48 long __weak probe_kernel_read(void *dst, const void *src, size_t size) 51 long __probe_kernel_read(void *dst, const void *src, size_t size) in __probe_kernel_read() argument 57 ret = probe_read_common(dst, (__force const void __user *)src, size); in __probe_kernel_read() 74 long __weak probe_user_read(void *dst, const void __user *src, size_t size) 77 long __probe_user_read(void *dst, const void __user *src, size_t size) in __probe_user_read() argument 83 if (access_ok(src, size)) in __probe_user_read() [all …]
|
D | readahead.c | 257 static unsigned long get_init_ra_size(unsigned long size, unsigned long max) in get_init_ra_size() argument 259 unsigned long newsize = roundup_pow_of_two(size); in get_init_ra_size() 278 unsigned long cur = ra->size; in get_next_ra_size() 353 pgoff_t size; in try_context_readahead() local 355 size = count_history_pages(mapping, offset, max); in try_context_readahead() 361 if (size <= req_size) in try_context_readahead() 368 if (size >= offset) in try_context_readahead() 369 size *= 2; in try_context_readahead() 372 ra->size = min(size + req_size, max); in try_context_readahead() 409 if ((offset == (ra->start + ra->size - ra->async_size) || in ondemand_readahead() [all …]
|
D | cma.c | 176 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, in cma_init_reserved_mem() argument 190 if (!size || !memblock_is_region_reserved(base, size)) in cma_init_reserved_mem() 201 if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size) in cma_init_reserved_mem() 217 cma->count = size >> PAGE_SHIFT; in cma_init_reserved_mem() 221 totalcma_pages += (size / PAGE_SIZE); in cma_init_reserved_mem() 246 phys_addr_t size, phys_addr_t limit, in cma_declare_contiguous() argument 262 __func__, &size, &base, &limit, &alignment); in cma_declare_contiguous() 269 if (!size) in cma_declare_contiguous() 290 size = ALIGN(size, alignment); in cma_declare_contiguous() 297 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) in cma_declare_contiguous() [all …]
|
D | kmemleak.c | 123 size_t size; member 147 size_t size; member 287 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE); in hex_dump_object() 341 object->pointer, object->size); in print_unreferenced() 362 object->pointer, object->size); in dump_object_info() 388 else if (object->pointer + object->size <= ptr) in lookup_object() 570 static struct kmemleak_object *create_object(unsigned long ptr, size_t size, in create_object() argument 592 object->size = size; in create_object() 624 max_addr = max(max_addr, untagged_ptr + size); in create_object() 630 if (ptr + size <= parent->pointer) in create_object() [all …]
|
D | slab.c | 343 return (unsigned long long *)(objp + cachep->size - in dbg_redzone2() 346 return (unsigned long long *) (objp + cachep->size - in dbg_redzone2() 353 return (void **)(objp + cachep->size - BYTES_PER_WORD); in dbg_userword() 377 return page->s_mem + cache->size * idx; in index_to_obj() 386 .size = sizeof(struct kmem_cache), 1332 cachep->name, cachep->size, cachep->gfporder); in slab_out_of_memory() 1419 (cachep->size % PAGE_SIZE) == 0) in is_debug_pagealloc_cache() 1431 kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map); in slab_kernel_map() 1442 int size = cachep->object_size; in poison_obj() local 1445 memset(addr, val, size); in poison_obj() [all …]
|
D | sparse-vmemmap.c | 41 unsigned long size, in __earlyonly_bootmem_alloc() argument 45 return memblock_alloc_try_nid_raw(size, align, goal, in __earlyonly_bootmem_alloc() 49 void * __meminit vmemmap_alloc_block(unsigned long size, int node) in vmemmap_alloc_block() argument 54 int order = get_order(size); in vmemmap_alloc_block() 69 return __earlyonly_bootmem_alloc(node, size, size, in vmemmap_alloc_block() 74 void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node) in vmemmap_alloc_block_buf() argument 76 void *ptr = sparse_buffer_alloc(size); in vmemmap_alloc_block_buf() 79 ptr = vmemmap_alloc_block(size, node); in vmemmap_alloc_block_buf() 105 void * __meminit altmap_alloc_block_buf(unsigned long size, in altmap_alloc_block_buf() argument 110 if (size & ~PAGE_MASK) { in altmap_alloc_block_buf() [all …]
|
D | zsmalloc.c | 207 int size; member 404 static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp, in zs_zpool_malloc() argument 407 *handle = zs_malloc(pool, size, gfp); in zs_zpool_malloc() 539 static int get_size_class_index(int size) in get_size_class_index() argument 543 if (likely(size > ZS_MIN_ALLOC_SIZE)) in get_size_class_index() 544 idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE, in get_size_class_index() 627 i, class->size, class_almost_full, class_almost_empty, in zs_stats_size_show() 999 while ((off += class->size) < PAGE_SIZE) { in init_zspage() 1001 link += class->size / sizeof(*link); in init_zspage() 1139 struct page *pages[2], int off, int size) in __zs_map_object() argument [all …]
|
D | memory_hotplug.c | 104 static struct resource *register_memory_resource(u64 start, u64 size) in register_memory_resource() argument 110 if (start + size > max_mem_size) in register_memory_resource() 118 res = __request_region(&iomem_resource, start, size, in register_memory_resource() 123 start, start + size); in register_memory_resource() 1002 static int check_hotplug_memory_range(u64 start, u64 size) in check_hotplug_memory_range() argument 1005 if (!size || !IS_ALIGNED(start, memory_block_size_bytes()) || in check_hotplug_memory_range() 1006 !IS_ALIGNED(size, memory_block_size_bytes())) { in check_hotplug_memory_range() 1008 memory_block_size_bytes(), start, size); in check_hotplug_memory_range() 1029 u64 start, size; in add_memory_resource() local 1034 size = resource_size(res); in add_memory_resource() [all …]
|
D | sparse.c | 264 unsigned long size, align; in memory_present() local 266 size = sizeof(struct mem_section*) * NR_SECTION_ROOTS; in memory_present() 268 mem_section = memblock_alloc(size, align); in memory_present() 271 __func__, size, align); in memory_present() 356 unsigned long size) in sparse_early_usemaps_alloc_pgdat_section() argument 375 usage = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, goal, limit, nid); in sparse_early_usemaps_alloc_pgdat_section() 428 unsigned long size) in sparse_early_usemaps_alloc_pgdat_section() argument 430 return memblock_alloc_node(size, SMP_CACHE_BYTES, pgdat->node_id); in sparse_early_usemaps_alloc_pgdat_section() 454 unsigned long size = section_map_size(); in __populate_section_memmap() local 455 struct page *map = sparse_buffer_alloc(size); in __populate_section_memmap() [all …]
|
D | slub.c | 315 __p < (__addr) + (__objects) * (__s)->size; \ 316 __p += (__s)->size) 321 return (kasan_reset_tag(p) - addr) / s->size; in slab_index() 324 static inline unsigned int order_objects(unsigned int order, unsigned int size) in order_objects() argument 326 return ((unsigned int)PAGE_SIZE << order) / size; in order_objects() 330 unsigned int size) in oo_make() argument 333 (order << OO_SHIFT) + order_objects(order, size) in oo_make() 462 return s->size - s->red_left_pad; in size_from_object() 464 return s->size; in size_from_object() 519 if (object < base || object >= base + page->objects * s->size || in check_valid_pointer() [all …]
|
D | mempool.c | 25 static void poison_error(mempool_t *pool, void *element, size_t size, in poison_error() argument 30 const int end = min_t(int, byte + (BITS_PER_LONG / 8), size); in poison_error() 34 pr_err("Mempool %p size %zu\n", pool, size); in poison_error() 38 pr_cont("%s\n", end < size ? "..." : ""); in poison_error() 42 static void __check_element(mempool_t *pool, void *element, size_t size) in __check_element() argument 47 for (i = 0; i < size; i++) { in __check_element() 48 u8 exp = (i < size - 1) ? POISON_FREE : POISON_END; in __check_element() 51 poison_error(pool, element, size, i); in __check_element() 55 memset(obj, POISON_INUSE, size); in __check_element() 74 static void __poison_element(void *element, size_t size) in __poison_element() argument [all …]
|
D | gup_benchmark.c | 16 __u64 size; member 31 if (gup->size > ULONG_MAX) in __gup_benchmark_ioctl() 34 nr_pages = gup->size / PAGE_SIZE; in __gup_benchmark_ioctl() 42 for (addr = gup->addr; addr < gup->addr + gup->size; addr = next) { in __gup_benchmark_ioctl() 47 if (next > gup->addr + gup->size) { in __gup_benchmark_ioctl() 48 next = gup->addr + gup->size; in __gup_benchmark_ioctl() 79 gup->size = addr - gup->addr; in __gup_benchmark_ioctl()
|
D | percpu.c | 220 static int __pcpu_size_to_slot(int size) in __pcpu_size_to_slot() argument 222 int highbit = fls(size); /* size is in bytes */ in __pcpu_size_to_slot() 226 static int pcpu_size_to_slot(int size) in pcpu_size_to_slot() argument 228 if (size == pcpu_unit_size) in pcpu_size_to_slot() 230 return __pcpu_size_to_slot(size); in pcpu_size_to_slot() 503 static void *pcpu_mem_zalloc(size_t size, gfp_t gfp) in pcpu_mem_zalloc() argument 508 if (size <= PAGE_SIZE) in pcpu_mem_zalloc() 509 return kzalloc(size, gfp); in pcpu_mem_zalloc() 511 return __vmalloc(size, gfp | __GFP_ZERO, PAGE_KERNEL); in pcpu_mem_zalloc() 1132 unsigned long size, in pcpu_find_zero_area() argument [all …]
|