/mm/kasan/ |
D | kasan.c | 56 static void kasan_poison_shadow(const void *address, size_t size, u8 value) in kasan_poison_shadow() 66 void kasan_unpoison_shadow(const void *address, size_t size) in kasan_unpoison_shadow() 79 size_t size = sp - base; in __kasan_unpoison_stack() 111 size_t size = watermark - sp; in kasan_unpoison_stack_above_sp_to() 227 size_t size) in bytes_is_zero() 269 size_t size) in memory_is_poisoned_n() 287 static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size) in memory_is_poisoned() 310 size_t size, bool write, in check_memory_region_inline() 329 size_t size, bool write, in check_memory_region() 348 void *memset(void *addr, int c, size_t len) in memset() [all …]
|
D | kasan.h | 34 size_t access_size; 49 size_t size; /* Size of the global variable. */ 50 size_t size_with_redzone; /* Size of the variable + size of the red zone. 32 bytes aligned */ 99 void kasan_report(unsigned long addr, size_t size,
|
D | quarantine.c | 44 size_t bytes; 61 size_t size) in qlist_put() 214 size_t total_size, new_quarantine_size, percpu_quarantines; in quarantine_reduce() 246 WRITE_ONCE(quarantine_batch_size, max((size_t)QUARANTINE_PERCPU_SIZE, in quarantine_reduce()
|
D | report.c | 42 static const void *find_first_bad_addr(const void *addr, size_t size) in find_first_bad_addr() 393 void kasan_report(unsigned long addr, size_t size, in kasan_report() 437 void __asan_report_load_n_noabort(unsigned long addr, size_t size) in __asan_report_load_n_noabort() 443 void __asan_report_store_n_noabort(unsigned long addr, size_t size) in __asan_report_store_n_noabort()
|
/mm/ |
D | maccess.c | 9 probe_read_common(void *dst, const void __user *src, size_t size) in probe_read_common() 21 probe_write_common(void __user *dst, const void *src, size_t size) in probe_write_common() 47 long __weak probe_kernel_read(void *dst, const void *src, size_t size) 50 long __probe_kernel_read(void *dst, const void *src, size_t size) in __probe_kernel_read() 73 long __weak probe_user_read(void *dst, const void __user *src, size_t size) 76 long __probe_user_read(void *dst, const void __user *src, size_t size) in __probe_user_read() 100 long __weak probe_kernel_write(void *dst, const void *src, size_t size) 103 long __probe_kernel_write(void *dst, const void *src, size_t size) in __probe_kernel_write() 126 long __weak probe_user_write(void __user *dst, const void *src, size_t size) 129 long __probe_user_write(void __user *dst, const void *src, size_t size) in __probe_user_write()
|
D | slab.h | 78 struct kmem_cache *kmalloc_slab(size_t, gfp_t); 85 extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size, 88 size_t size, unsigned long flags); 91 struct kmem_cache *find_mergeable(size_t size, size_t align, 95 __kmem_cache_alias(const char *name, size_t size, size_t align, 103 __kmem_cache_alias(const char *name, size_t size, size_t align, in __kmem_cache_alias() 164 size_t count, loff_t *ppos); 172 void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); 173 int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
|
D | slab_common.c | 70 static int kmem_cache_sanity_check(const char *name, size_t size) in kmem_cache_sanity_check() 101 static inline int kmem_cache_sanity_check(const char *name, size_t size) in kmem_cache_sanity_check() 107 void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p) in __kmem_cache_free_bulk() 109 size_t i; in __kmem_cache_free_bulk() 115 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, in __kmem_cache_alloc_bulk() 118 size_t i; in __kmem_cache_alloc_bulk() 248 struct kmem_cache *find_mergeable(size_t size, size_t align, in find_mergeable() 323 size_t object_size, size_t size, size_t align, in create_cache() 387 kmem_cache_create(const char *name, size_t size, size_t align, in kmem_cache_create() 771 void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size, in create_boot_cache() [all …]
|
D | percpu.c | 128 static size_t pcpu_chunk_struct_size __read_mostly; 144 static const size_t *pcpu_group_sizes __read_mostly; 298 static void *pcpu_mem_zalloc(size_t size) in pcpu_mem_zalloc() 316 static void pcpu_mem_free(void *ptr, size_t size) in pcpu_mem_free() 447 size_t old_size = 0, new_size = new_alloc * sizeof(new[0]); in pcpu_extend_area_map() 871 static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, in pcpu_alloc() 1066 void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) in __alloc_percpu_gfp() 1079 void __percpu *__alloc_percpu(size_t size, size_t align) in __alloc_percpu() 1101 void __percpu *__alloc_reserved_percpu(size_t size, size_t align) in __alloc_reserved_percpu() 1303 const size_t static_size = __per_cpu_end - __per_cpu_start; in is_kernel_percpu_address() [all …]
|
D | dmapool.c | 48 size_t size; 50 size_t allocation; 51 size_t boundary; 132 size_t size, size_t align, size_t boundary) in dma_pool_create() 135 size_t allocation; in dma_pool_create() 151 allocation = max_t(size_t, size, PAGE_SIZE); in dma_pool_create() 326 size_t offset; in dma_pool_alloc() 505 size_t size, size_t align, size_t allocation) in dmam_pool_create()
|
D | slob.c | 217 static void *slob_page_alloc(struct page *sp, size_t size, int align) in slob_page_alloc() 268 static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) in slob_alloc() 427 __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) in __do_kmalloc_node() 430 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); in __do_kmalloc_node() 465 void *__kmalloc(size_t size, gfp_t gfp) in __kmalloc() 471 void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller) in __kmalloc_track_caller() 478 void *__kmalloc_node_track_caller(size_t size, gfp_t gfp, in __kmalloc_node_track_caller() 498 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); in kfree() 507 size_t ksize(const void *block) in ksize() 521 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); in ksize() [all …]
|
D | util.c | 46 size_t len; in kstrdup() 86 char *kstrndup(const char *s, size_t max, gfp_t gfp) in kstrndup() 88 size_t len; in kstrndup() 111 void *kmemdup(const void *src, size_t len, gfp_t gfp) in kmemdup() 128 char *kmemdup_nul(const char *s, size_t len, gfp_t gfp) in kmemdup_nul() 152 void *memdup_user(const void __user *src, size_t len) in memdup_user() 378 void __user *buffer, size_t *lenp, in overcommit_ratio_handler() 390 void __user *buffer, size_t *lenp, in overcommit_kbytes_handler()
|
D | slab.c | 285 #define OFF_SLAB_MIN_SIZE (max_t(size_t, PAGE_SIZE >> 5, KMALLOC_MIN_SIZE + 1)) 468 static size_t calculate_freelist_size(int nr_objs, size_t align) in calculate_freelist_size() 470 size_t freelist_size; in calculate_freelist_size() 479 static int calculate_nr_objs(size_t slab_size, size_t buffer_size, in calculate_nr_objs() 480 size_t idx_size, size_t align) in calculate_nr_objs() 483 size_t remained_size; in calculate_nr_objs() 484 size_t freelist_size; in calculate_nr_objs() 511 static void cache_estimate(unsigned long gfporder, size_t buffer_size, in cache_estimate() 512 size_t align, int flags, size_t *left_over, in cache_estimate() 516 size_t mgmt_size; in cache_estimate() [all …]
|
D | process_vm_access.c | 35 size_t len, in process_vm_rw_pages() 42 size_t copy = PAGE_SIZE - offset; in process_vm_rw_pages() 43 size_t copied; in process_vm_rw_pages() 103 size_t bytes; in process_vm_rw_single_vec() 159 size_t total_len = iov_iter_count(iter); in process_vm_rw_core() 182 process_pages = kmalloc(min_t(size_t, PVM_MAX_KMALLOC_PAGES, in process_vm_rw_core()
|
D | mempool.c | 24 static void poison_error(mempool_t *pool, void *element, size_t size, in poison_error() 25 size_t byte) in poison_error() 41 static void __check_element(mempool_t *pool, void *element, size_t size) in __check_element() 44 size_t i; in __check_element() 73 static void __poison_element(void *element, size_t size) in __poison_element() 466 size_t size = (size_t)pool_data; in mempool_kmalloc()
|
D | kmemleak.c | 134 size_t size; 158 size_t size; 263 size_t size; /* memory block size */ 305 size_t len; in hex_dump_object() 308 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE); in hex_dump_object() 538 static struct kmemleak_object *create_object(unsigned long ptr, size_t size, in create_object() 665 static void delete_object_part(unsigned long ptr, size_t size) in delete_object_part() 752 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) in add_scan_area() 818 static void __init log_early(int op_type, const void *ptr, size_t size, in log_early() 908 void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count, in kmemleak_alloc() [all …]
|
D | mlock.c | 562 static int apply_vma_lock_flags(unsigned long start, size_t len, in apply_vma_lock_flags() 611 static int do_mlock(unsigned long start, size_t len, vm_flags_t flags) in do_mlock() 647 SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) in SYSCALL_DEFINE2() argument 652 SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags) in SYSCALL_DEFINE3() argument 665 SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len) in SYSCALL_DEFINE2() argument 771 int user_shm_lock(size_t size, struct user_struct *user) in user_shm_lock() 793 void user_shm_unlock(size_t size, struct user_struct *user) in user_shm_unlock()
|
D | slub.c | 279 static inline size_t slab_ksize(const struct kmem_cache *s) in slab_ksize() 1312 static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) in kmalloc_large_node_hook() 1338 size_t size, void **p) in slab_post_alloc_hook() 1340 size_t i; in slab_post_alloc_hook() 2646 void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) in kmem_cache_alloc_trace() 2671 int node, size_t size) in kmem_cache_alloc_node_trace() 2917 int build_detached_freelist(struct kmem_cache *s, size_t size, in build_detached_freelist() 2920 size_t first_skipped_index = 0; in build_detached_freelist() 2973 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) in kmem_cache_free_bulk() 2991 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, in kmem_cache_alloc_bulk() [all …]
|
D | kmemcheck.c | 60 size_t size) in kmemcheck_slab_alloc() 92 void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size) in kmemcheck_slab_free()
|
D | zbud.c | 159 static int zbud_zpool_malloc(void *pool, size_t size, gfp_t gfp, in zbud_zpool_malloc() 229 static int size_to_chunks(size_t size) in size_to_chunks() 353 int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp, in zbud_alloc()
|
D | page_idle.c | 117 loff_t pos, size_t count) in page_idle_bitmap_read() 162 loff_t pos, size_t count) in page_idle_bitmap_write()
|
D | huge_memory.c | 254 const char *buf, size_t count, in double_flag_store() 285 const char *buf, size_t count) in enabled_store() 319 const char *buf, size_t count, in single_flag_store() 353 const char *buf, size_t count) in defrag_store() 369 struct kobj_attribute *attr, const char *buf, size_t count) in use_zero_page_store() 385 const char *buf, size_t count) in debug_cow_store() 417 const char *buf, size_t count) in scan_sleep_millisecs_store() 444 const char *buf, size_t count) in alloc_sleep_millisecs_store() 470 const char *buf, size_t count) in pages_to_scan_store() 513 const char *buf, size_t count) in khugepaged_defrag_store() [all …]
|
D | page_owner.c | 99 print_page_owner(char __user *buf, size_t count, unsigned long pfn, in print_page_owner() 166 read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) in read_page_owner()
|
D | page_ext.c | 192 static void *__meminit alloc_page_ext(size_t size, int nid) in alloc_page_ext() 253 size_t table_size; in free_page_ext()
|
D | nommu.c | 498 struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) in alloc_vm_area() 1611 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) in do_munmap() 1671 int vm_munmap(unsigned long addr, size_t len) in vm_munmap() 1683 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) in SYSCALL_DEFINE2() argument 2017 int nommu_shrink_inode_mappings(struct inode *inode, size_t size, in nommu_shrink_inode_mappings() 2018 size_t newsize) in nommu_shrink_inode_mappings() 2023 size_t r_size, r_top; in nommu_shrink_inode_mappings()
|
D | backing-dev.c | 146 const char *buf, size_t count) in read_ahead_kb_store() 176 struct device_attribute *attr, const char *buf, size_t count) in min_ratio_store() 195 struct device_attribute *attr, const char *buf, size_t count) in max_ratio_store() 1051 void __user *buffer, size_t *lenp, loff_t *ppos) in pdflush_proc_obsolete()
|