Home
last modified time | relevance | path

Searched refs:size (Results 1 – 25 of 67) sorted by relevance

123

/mm/kasan/
Dgeneric.c56 unsigned long size) in memory_is_poisoned_2_4_8() argument
64 if (unlikely(((addr + size - 1) & KASAN_GRANULE_MASK) < size - 1)) in memory_is_poisoned_2_4_8()
65 return *shadow_addr || memory_is_poisoned_1(addr + size - 1); in memory_is_poisoned_2_4_8()
67 return memory_is_poisoned_1(addr + size - 1); in memory_is_poisoned_2_4_8()
82 size_t size) in bytes_is_nonzero() argument
84 while (size) { in bytes_is_nonzero()
88 size--; in bytes_is_nonzero()
124 size_t size) in memory_is_poisoned_n() argument
129 kasan_mem_to_shadow((void *)addr + size - 1) + 1); in memory_is_poisoned_n()
132 unsigned long last_byte = addr + size - 1; in memory_is_poisoned_n()
[all …]
Dkasan.h124 size_t size; /* Size of the global variable. */ member
221 bool kasan_check_range(unsigned long addr, size_t size, bool write,
239 void *kasan_find_first_bad_addr(void *addr, size_t size);
249 bool kasan_report(unsigned long addr, size_t size,
306 #define arch_set_mem_tag_range(addr, size, tag, init) ((void *)(addr)) argument
315 #define hw_set_mem_tag_range(addr, size, tag, init) \ argument
316 arch_set_mem_tag_range((addr), (size), (tag), (init))
350 static inline void kasan_poison(const void *addr, size_t size, u8 value, bool init) in kasan_poison() argument
360 if (WARN_ON(size & KASAN_GRANULE_MASK)) in kasan_poison()
363 hw_set_mem_tag_range((void *)addr, size, value, init); in kasan_poison()
[all …]
Dsw_tags.c70 bool kasan_check_range(unsigned long addr, size_t size, bool write, in kasan_check_range() argument
77 if (unlikely(size == 0)) in kasan_check_range()
80 if (unlikely(addr + size < addr)) in kasan_check_range()
81 return !kasan_report(addr, size, write, ret_ip); in kasan_check_range()
108 return !kasan_report(addr, size, write, ret_ip); in kasan_check_range()
111 shadow_last = kasan_mem_to_shadow(untagged_addr + size - 1); in kasan_check_range()
114 return !kasan_report(addr, size, write, ret_ip); in kasan_check_range()
134 #define DEFINE_HWASAN_LOAD_STORE(size) \ argument
135 void __hwasan_load##size##_noabort(unsigned long addr) \
137 kasan_check_range(addr, size, false, _RET_IP_); \
[all …]
Dshadow.c29 bool __kasan_check_read(const volatile void *p, unsigned int size) in __kasan_check_read() argument
31 return kasan_check_range((unsigned long)p, size, false, _RET_IP_); in __kasan_check_read()
35 bool __kasan_check_write(const volatile void *p, unsigned int size) in __kasan_check_write() argument
37 return kasan_check_range((unsigned long)p, size, true, _RET_IP_); in __kasan_check_write()
72 void kasan_poison(const void *addr, size_t size, u8 value, bool init) in kasan_poison() argument
89 if (WARN_ON(size & KASAN_GRANULE_MASK)) in kasan_poison()
93 shadow_end = kasan_mem_to_shadow(addr + size); in kasan_poison()
100 void kasan_poison_last_granule(const void *addr, size_t size) in kasan_poison_last_granule() argument
102 if (size & KASAN_GRANULE_MASK) { in kasan_poison_last_granule()
103 u8 *shadow = (u8 *)kasan_mem_to_shadow(addr + size); in kasan_poison_last_granule()
[all …]
Dcommon.c61 void __kasan_unpoison_range(const void *address, size_t size) in __kasan_unpoison_range() argument
63 kasan_unpoison(address, size, false); in __kasan_unpoison_range()
137 void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size, in __kasan_cache_create() argument
155 ok_size = *size; in __kasan_cache_create()
158 cache->kasan_info.alloc_meta_offset = *size; in __kasan_cache_create()
159 *size += sizeof(struct kasan_alloc_meta); in __kasan_cache_create()
167 if (*size > KMALLOC_MAX_SIZE) { in __kasan_cache_create()
169 *size = ok_size; in __kasan_cache_create()
191 ok_size = *size; in __kasan_cache_create()
193 cache->kasan_info.free_meta_offset = *size; in __kasan_cache_create()
[all …]
Dreport_generic.c33 void *kasan_find_first_bad_addr(void *addr, size_t size) in kasan_find_first_bad_addr() argument
37 while (p < addr + size && !(*(u8 *)kasan_mem_to_shadow(p))) in kasan_find_first_bad_addr()
188 unsigned long size; in print_decoded_frame_descr() local
196 &size)) in print_decoded_frame_descr()
210 pr_err(" [%lu, %lu) '%s'", offset, offset + size, token); in print_decoded_frame_descr()
292 #define DEFINE_ASAN_REPORT_LOAD(size) \ argument
293 void __asan_report_load##size##_noabort(unsigned long addr) \
295 kasan_report(addr, size, false, _RET_IP_); \
297 EXPORT_SYMBOL(__asan_report_load##size##_noabort)
299 #define DEFINE_ASAN_REPORT_STORE(size) \ argument
[all …]
/mm/
Dmemblock.c167 static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size) in memblock_cap_size() argument
169 return *size = min(*size, PHYS_ADDR_MAX - base); in memblock_cap_size()
182 phys_addr_t base, phys_addr_t size) in memblock_overlaps_region() argument
186 memblock_cap_size(base, &size); in memblock_overlaps_region()
189 if (memblock_addrs_overlap(base, size, type->regions[i].base, in memblock_overlaps_region()
190 type->regions[i].size)) in memblock_overlaps_region()
212 phys_addr_t size, phys_addr_t align, int nid, in __memblock_find_range_bottom_up() argument
223 if (cand < this_end && this_end - cand >= size) in __memblock_find_range_bottom_up()
247 phys_addr_t size, phys_addr_t align, int nid, in __memblock_find_range_top_down() argument
258 if (this_end < size) in __memblock_find_range_top_down()
[all …]
Dslob.c126 #define SLOB_UNITS(size) DIV_ROUND_UP(size, SLOB_UNIT) argument
135 int size; member
146 static void set_slob(slob_t *s, slobidx_t size, slob_t *next) in set_slob() argument
151 if (size > 1) { in set_slob()
152 s[0].units = size; in set_slob()
237 static void *slob_page_alloc(struct page *sp, size_t size, int align, in slob_page_alloc() argument
241 int delta = 0, units = SLOB_UNITS(size); in slob_page_alloc()
301 static void *slob_alloc(size_t size, gfp_t gfp, int align, int node, in slob_alloc() argument
310 if (size < SLOB_BREAK1) in slob_alloc()
312 else if (size < SLOB_BREAK2) in slob_alloc()
[all …]
Dearly_ioremap.c35 unsigned long size, in early_memremap_pgprot_adjust() argument
106 __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot) in __early_ioremap() argument
125 __func__, &phys_addr, size)) in __early_ioremap()
129 last_addr = phys_addr + size - 1; in __early_ioremap()
130 if (WARN_ON(!size || last_addr < phys_addr)) in __early_ioremap()
133 prev_size[slot] = size; in __early_ioremap()
139 size = PAGE_ALIGN(last_addr + 1) - phys_addr; in __early_ioremap()
144 nrpages = size >> PAGE_SHIFT; in __early_ioremap()
162 __func__, &phys_addr, size, slot, offset, slot_virt[slot]); in __early_ioremap()
168 void __init early_iounmap(void __iomem *addr, unsigned long size) in early_iounmap() argument
[all …]
Dslab_common.c91 static int kmem_cache_sanity_check(const char *name, unsigned int size) in kmem_cache_sanity_check() argument
93 if (!name || in_interrupt() || size > KMALLOC_MAX_SIZE) { in kmem_cache_sanity_check()
102 static inline int kmem_cache_sanity_check(const char *name, unsigned int size) in kmem_cache_sanity_check() argument
140 unsigned int align, unsigned int size) in calculate_alignment() argument
153 while (size <= ralign / 2) in calculate_alignment()
186 struct kmem_cache *find_mergeable(unsigned int size, unsigned int align, in find_mergeable() argument
197 size = ALIGN(size, sizeof(void *)); in find_mergeable()
198 align = calculate_alignment(flags, align, size); in find_mergeable()
199 size = ALIGN(size, align); in find_mergeable()
200 flags = kmem_cache_flags(size, flags, name); in find_mergeable()
[all …]
Dmaccess.c10 size_t size) in copy_from_kernel_nofault_allowed() argument
25 long copy_from_kernel_nofault(void *dst, const void *src, size_t size) in copy_from_kernel_nofault() argument
27 if (!copy_from_kernel_nofault_allowed(src, size)) in copy_from_kernel_nofault()
31 copy_from_kernel_nofault_loop(dst, src, size, u64, Efault); in copy_from_kernel_nofault()
32 copy_from_kernel_nofault_loop(dst, src, size, u32, Efault); in copy_from_kernel_nofault()
33 copy_from_kernel_nofault_loop(dst, src, size, u16, Efault); in copy_from_kernel_nofault()
34 copy_from_kernel_nofault_loop(dst, src, size, u8, Efault); in copy_from_kernel_nofault()
51 long copy_to_kernel_nofault(void *dst, const void *src, size_t size) in copy_to_kernel_nofault() argument
54 copy_to_kernel_nofault_loop(dst, src, size, u64, Efault); in copy_to_kernel_nofault()
55 copy_to_kernel_nofault_loop(dst, src, size, u32, Efault); in copy_to_kernel_nofault()
[all …]
Dvmalloc.c170 void unmap_kernel_range_noflush(unsigned long start, unsigned long size) in unmap_kernel_range_noflush() argument
172 unsigned long end = start + size; in unmap_kernel_range_noflush()
293 int map_kernel_range_noflush(unsigned long addr, unsigned long size, in map_kernel_range_noflush() argument
297 unsigned long end = addr + size; in map_kernel_range_noflush()
321 int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot, in map_kernel_range() argument
326 ret = map_kernel_range_noflush(start, size, prot, pages); in map_kernel_range()
327 flush_cache_vmap(start, start + size); in map_kernel_range()
834 is_within_this_va(struct vmap_area *va, unsigned long size, in is_within_this_va() argument
845 if (nva_start_addr + size < nva_start_addr || in is_within_this_va()
849 return (nva_start_addr + size <= va->va_end); in is_within_this_va()
[all …]
Ddmapool.c45 size_t size; member
68 unsigned size; in show_pools() local
74 size = PAGE_SIZE; in show_pools()
76 temp = scnprintf(next, size, "poolinfo - 0.1\n"); in show_pools()
77 size -= temp; in show_pools()
93 temp = scnprintf(next, size, "%-16s %4u %4zu %4zu %2u\n", in show_pools()
95 pages * (pool->allocation / pool->size), in show_pools()
96 pool->size, pages); in show_pools()
97 size -= temp; in show_pools()
102 return PAGE_SIZE - size; in show_pools()
[all …]
Dreadahead.c321 static unsigned long get_init_ra_size(unsigned long size, unsigned long max) in get_init_ra_size() argument
323 unsigned long newsize = roundup_pow_of_two(size); in get_init_ra_size()
342 unsigned long cur = ra->size; in get_next_ra_size()
417 pgoff_t size; in try_context_readahead() local
419 size = count_history_pages(mapping, index, max); in try_context_readahead()
425 if (size <= req_size) in try_context_readahead()
432 if (size >= index) in try_context_readahead()
433 size *= 2; in try_context_readahead()
436 ra->size = min(size + req_size, max); in try_context_readahead()
474 if ((index == (ra->start + ra->size - ra->async_size) || in ondemand_readahead()
[all …]
Dmemory_hotplug.c101 static struct resource *register_memory_resource(u64 start, u64 size, in register_memory_resource() argument
116 if (start + size > max_mem_size && system_state < SYSTEM_RUNNING) in register_memory_resource()
124 res = __request_region(&iomem_resource, start, size, in register_memory_resource()
129 start, start + size); in register_memory_resource()
995 static int check_hotplug_memory_range(u64 start, u64 size) in check_hotplug_memory_range() argument
998 if (!size || !IS_ALIGNED(start, memory_block_size_bytes()) || in check_hotplug_memory_range()
999 !IS_ALIGNED(size, memory_block_size_bytes())) { in check_hotplug_memory_range()
1001 memory_block_size_bytes(), start, size); in check_hotplug_memory_range()
1023 u64 start, size; in add_memory_resource() local
1028 size = resource_size(res); in add_memory_resource()
[all …]
Dcma.c176 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, in cma_init_reserved_mem() argument
190 if (!size || !memblock_is_region_reserved(base, size)) in cma_init_reserved_mem()
201 if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size) in cma_init_reserved_mem()
216 cma->count = size >> PAGE_SHIFT; in cma_init_reserved_mem()
220 totalcma_pages += (size / PAGE_SIZE); in cma_init_reserved_mem()
246 phys_addr_t size, phys_addr_t limit, in cma_declare_contiguous_nid() argument
263 __func__, &size, &base, &limit, &alignment); in cma_declare_contiguous_nid()
270 if (!size) in cma_declare_contiguous_nid()
291 size = ALIGN(size, alignment); in cma_declare_contiguous_nid()
298 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) in cma_declare_contiguous_nid()
[all …]
Dsparse-vmemmap.c40 unsigned long size, in __earlyonly_bootmem_alloc() argument
44 return memblock_alloc_try_nid_raw(size, align, goal, in __earlyonly_bootmem_alloc()
48 void * __meminit vmemmap_alloc_block(unsigned long size, int node) in vmemmap_alloc_block() argument
53 int order = get_order(size); in vmemmap_alloc_block()
68 return __earlyonly_bootmem_alloc(node, size, size, in vmemmap_alloc_block()
72 static void * __meminit altmap_alloc_block_buf(unsigned long size,
76 void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node, in vmemmap_alloc_block_buf() argument
82 return altmap_alloc_block_buf(size, altmap); in vmemmap_alloc_block_buf()
84 ptr = sparse_buffer_alloc(size); in vmemmap_alloc_block_buf()
86 ptr = vmemmap_alloc_block(size, node); in vmemmap_alloc_block_buf()
[all …]
Dkmemleak.c124 size_t size; member
148 size_t size; member
288 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE); in hex_dump_object()
342 object->pointer, object->size); in print_unreferenced()
363 object->pointer, object->size); in dump_object_info()
389 else if (object->pointer + object->size <= ptr) in lookup_object()
571 static struct kmemleak_object *create_object(unsigned long ptr, size_t size, in create_object() argument
593 object->size = kfence_ksize((void *)ptr) ?: size; in create_object()
625 max_addr = max(max_addr, untagged_ptr + size); in create_object()
631 if (ptr + size <= parent->pointer) in create_object()
[all …]
Dslab.c344 return (unsigned long long *)(objp + cachep->size - in dbg_redzone2()
347 return (unsigned long long *) (objp + cachep->size - in dbg_redzone2()
354 return (void **)(objp + cachep->size - BYTES_PER_WORD); in dbg_userword()
378 return page->s_mem + cache->size * idx; in index_to_obj()
387 .size = sizeof(struct kmem_cache),
1261 kmalloc_info[INDEX_NODE].size, in kmem_cache_init()
1263 kmalloc_info[INDEX_NODE].size); in kmem_cache_init()
1343 cachep->name, cachep->size, cachep->gfporder); in slab_out_of_memory()
1426 (cachep->size % PAGE_SIZE) == 0) in is_debug_pagealloc_cache()
1438 __kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map); in slab_kernel_map()
[all …]
Dsparse.c258 unsigned long size, align; in memory_present() local
260 size = sizeof(struct mem_section*) * NR_SECTION_ROOTS; in memory_present()
262 mem_section = memblock_alloc(size, align); in memory_present()
265 __func__, size, align); in memory_present()
350 unsigned long size) in sparse_early_usemaps_alloc_pgdat_section() argument
369 usage = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, goal, limit, nid); in sparse_early_usemaps_alloc_pgdat_section()
422 unsigned long size) in sparse_early_usemaps_alloc_pgdat_section() argument
424 return memblock_alloc_node(size, SMP_CACHE_BYTES, pgdat->node_id); in sparse_early_usemaps_alloc_pgdat_section()
448 unsigned long size = section_map_size(); in __populate_section_memmap() local
449 struct page *map = sparse_buffer_alloc(size); in __populate_section_memmap()
[all …]
Dslub.c310 __p < (__addr) + (__objects) * (__s)->size; \
311 __p += (__s)->size)
313 static inline unsigned int order_objects(unsigned int order, unsigned int size) in order_objects() argument
315 return ((unsigned int)PAGE_SIZE << order) / size; in order_objects()
319 unsigned int size) in oo_make() argument
322 (order << OO_SHIFT) + order_objects(order, size) in oo_make()
475 return s->size - s->red_left_pad; in size_from_object()
477 return s->size; in size_from_object()
532 if (object < base || object >= base + page->objects * s->size || in check_valid_pointer()
533 (object - base) % s->size) { in check_valid_pointer()
[all …]
Dzsmalloc.c207 int size; member
400 static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp, in zs_zpool_malloc() argument
403 *handle = zs_malloc(pool, size, gfp); in zs_zpool_malloc()
535 static int get_size_class_index(int size) in get_size_class_index() argument
539 if (likely(size > ZS_MIN_ALLOC_SIZE)) in get_size_class_index()
540 idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE, in get_size_class_index()
623 i, class->size, class_almost_full, class_almost_empty, in zs_stats_size_show()
995 while ((off += class->size) < PAGE_SIZE) { in init_zspage()
997 link += class->size / sizeof(*link); in init_zspage()
1133 struct page *pages[2], int off, int size) in __zs_map_object() argument
[all …]
Dpercpu.c227 static int __pcpu_size_to_slot(int size) in __pcpu_size_to_slot() argument
229 int highbit = fls(size); /* size is in bytes */ in __pcpu_size_to_slot()
233 static int pcpu_size_to_slot(int size) in pcpu_size_to_slot() argument
235 if (size == pcpu_unit_size) in pcpu_size_to_slot()
237 return __pcpu_size_to_slot(size); in pcpu_size_to_slot()
483 static void *pcpu_mem_zalloc(size_t size, gfp_t gfp) in pcpu_mem_zalloc() argument
488 if (size <= PAGE_SIZE) in pcpu_mem_zalloc()
489 return kzalloc(size, gfp); in pcpu_mem_zalloc()
491 return __vmalloc(size, gfp | __GFP_ZERO); in pcpu_mem_zalloc()
1113 unsigned long size, in pcpu_find_zero_area() argument
[all …]
Dslab.h22 unsigned int size; /* The aligned/padded/added on size */ member
81 unsigned int size; member
119 struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
123 unsigned int size, slab_flags_t flags,
127 struct kmem_cache *find_mergeable(unsigned size, unsigned align,
131 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
138 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, in __kmem_cache_alias() argument
315 return s->size + sizeof(struct obj_cgroup *); in obj_full_size()
362 gfp_t flags, size_t size, in memcg_slab_post_alloc_hook() argument
372 for (i = 0; i < size; i++) { in memcg_slab_post_alloc_hook()
[all …]
/mm/kfence/
Dkfence_test.c169 static size_t setup_test_cache(struct kunit *test, size_t size, slab_flags_t flags, in setup_test_cache() argument
173 return size; in setup_test_cache()
175 kunit_info(test, "%s: size=%zu, ctor=%ps\n", __func__, size, ctor); in setup_test_cache()
183 test_cache = kmem_cache_create("test", size, 1, flags, ctor); in setup_test_cache()
186 return size; in setup_test_cache()
198 static inline size_t kmalloc_cache_alignment(size_t size) in kmalloc_cache_alignment() argument
200 return kmalloc_caches[kmalloc_type(GFP_KERNEL)][kmalloc_index(size)]->align; in kmalloc_cache_alignment()
227 static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocation_policy policy) in test_alloc() argument
248 kunit_info(test, "%s: size=%zu, gfp=%x, policy=%s, cache=%i\n", __func__, size, gfp, in test_alloc()
266 alloc = kmalloc(size, gfp); in test_alloc()
[all …]

123