/mm/ |
D | util.c | 49 size_t len; in kstrdup() local 55 len = strlen(s) + 1; in kstrdup() 56 buf = kmalloc_track_caller(len, gfp); in kstrdup() 58 memcpy(buf, s, len); in kstrdup() 91 size_t len; in kstrndup() local 97 len = strnlen(s, max); in kstrndup() 98 buf = kmalloc_track_caller(len+1, gfp); in kstrndup() 100 memcpy(buf, s, len); in kstrndup() 101 buf[len] = '\0'; in kstrndup() 114 void *kmemdup(const void *src, size_t len, gfp_t gfp) in kmemdup() argument [all …]
|
D | nommu.c | 863 unsigned long len) in find_vma_exact() argument 866 unsigned long end = addr + len; in find_vma_exact() 895 unsigned long len, in validate_mmap_request() argument 912 if (!len) in validate_mmap_request() 916 rlen = PAGE_ALIGN(len); in validate_mmap_request() 1115 unsigned long len, in do_mmap_private() argument 1147 order = get_order(len); in do_mmap_private() 1149 point = len >> PAGE_SHIFT; in do_mmap_private() 1163 region->vm_end = region->vm_start + len; in do_mmap_private() 1167 vma->vm_end = region->vm_start + len; in do_mmap_private() [all …]
|
D | mmap.c | 58 #define arch_mmap_check(addr, len, flags) (0) argument 1306 unsigned long len) in mlock_future_check() argument 1312 locked = len >> PAGE_SHIFT; in mlock_future_check() 1339 unsigned long pgoff, unsigned long len) in file_mmap_ok() argument 1343 if (maxsize && len > maxsize) in file_mmap_ok() 1345 maxsize -= len; in file_mmap_ok() 1355 unsigned long len, unsigned long prot, in do_mmap() argument 1365 if (!len) in do_mmap() 1382 len = PAGE_ALIGN(len); in do_mmap() 1383 if (!len) in do_mmap() [all …]
|
D | mlock.c | 576 static int apply_vma_lock_flags(unsigned long start, size_t len, in apply_vma_lock_flags() argument 584 VM_BUG_ON(len != PAGE_ALIGN(len)); in apply_vma_lock_flags() 585 end = start + len; in apply_vma_lock_flags() 633 unsigned long start, size_t len) in count_mm_mlocked_page_nr() argument 648 if (start + len <= vma->vm_start) in count_mm_mlocked_page_nr() 653 if (start + len < vma->vm_end) { in count_mm_mlocked_page_nr() 654 count += start + len - vma->vm_start; in count_mm_mlocked_page_nr() 664 static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags) in do_mlock() argument 675 len = PAGE_ALIGN(len + (offset_in_page(start))); in do_mlock() 680 locked = len >> PAGE_SHIFT; in do_mlock() [all …]
|
D | userfaultfd.c | 180 unsigned long len, in __mcopy_atomic_hugetlb() argument 217 if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1)) in __mcopy_atomic_hugetlb() 239 dst_start + len > dst_vma->vm_end) in __mcopy_atomic_hugetlb() 250 (len - copied) & (vma_hpagesize - 1))) in __mcopy_atomic_hugetlb() 264 while (src_addr < src_start + len) { in __mcopy_atomic_hugetlb() 267 BUG_ON(dst_addr >= dst_start + len); in __mcopy_atomic_hugetlb() 392 unsigned long len, 439 unsigned long len, in __mcopy_atomic() argument 453 BUG_ON(len & ~PAGE_MASK); in __mcopy_atomic() 456 BUG_ON(src_start + len <= src_start); in __mcopy_atomic() [all …]
|
D | process_vm_access.c | 36 size_t len, in process_vm_rw_pages() argument 41 while (len && iov_iter_count(iter)) { in process_vm_rw_pages() 46 if (copy > len) in process_vm_rw_pages() 47 copy = len; in process_vm_rw_pages() 55 len -= copied; in process_vm_rw_pages() 79 unsigned long len, in process_vm_rw_single_vec() argument 95 if (len == 0) in process_vm_rw_single_vec() 97 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; in process_vm_rw_single_vec() 121 if (bytes > len) in process_vm_rw_single_vec() 122 bytes = len; in process_vm_rw_single_vec() [all …]
|
D | fadvise.c | 29 SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice) in SYSCALL_DEFINE4() argument 51 if (!mapping || len < 0) { in SYSCALL_DEFINE4() 79 endbyte = (u64)offset + (u64)len; in SYSCALL_DEFINE4() 80 if (!len || endbyte < len) in SYSCALL_DEFINE4() 194 SYSCALL_DEFINE4(fadvise64, int, fd, loff_t, offset, size_t, len, int, advice) in SYSCALL_DEFINE4() argument 196 return sys_fadvise64_64(fd, offset, len, advice); in SYSCALL_DEFINE4()
|
D | usercopy.c | 36 static noinline int check_stack_object(const void *obj, unsigned long len) in check_stack_object() argument 43 if (obj + len <= stack || stackend <= obj) in check_stack_object() 51 if (obj < stack || stackend < obj + len) in check_stack_object() 55 ret = arch_within_stack_frames(stack, stackend, obj, len); in check_stack_object() 62 static void report_usercopy(unsigned long len, bool to_user, const char *type) in report_usercopy() argument 66 to_user ? "from" : "to", type ? : "unknown", len); in report_usercopy()
|
D | shmem.c | 2020 unsigned long uaddr, unsigned long len, in shmem_get_unmapped_area() argument 2031 if (len > TASK_SIZE) in shmem_get_unmapped_area() 2035 addr = get_area(file, uaddr, len, pgoff, flags); in shmem_get_unmapped_area() 2043 if (addr > TASK_SIZE - len) in shmem_get_unmapped_area() 2048 if (len < HPAGE_PMD_SIZE) in shmem_get_unmapped_area() 2081 if (offset && offset + len < 2 * HPAGE_PMD_SIZE) in shmem_get_unmapped_area() 2086 inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE; in shmem_get_unmapped_area() 2089 if (inflated_len < len) in shmem_get_unmapped_area() 2103 if (inflated_addr > TASK_SIZE - len) in shmem_get_unmapped_area() 2417 loff_t pos, unsigned len, unsigned flags, in shmem_write_begin() argument [all …]
|
D | mprotect.c | 476 static int do_mprotect_pkey(unsigned long start, size_t len, in do_mprotect_pkey() argument 492 if (!len) in do_mprotect_pkey() 494 len = PAGE_ALIGN(len); in do_mprotect_pkey() 495 end = start + len; in do_mprotect_pkey() 597 SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, in SYSCALL_DEFINE3() argument 600 return do_mprotect_pkey(start, len, prot, -1); in SYSCALL_DEFINE3() 605 SYSCALL_DEFINE4(pkey_mprotect, unsigned long, start, size_t, len, in SYSCALL_DEFINE4() argument 608 return do_mprotect_pkey(start, len, prot, pkey); in SYSCALL_DEFINE4()
|
D | msync.c | 32 SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags) in SYSCALL_DEFINE3() argument 47 len = (len + ~PAGE_MASK) & PAGE_MASK; in SYSCALL_DEFINE3() 48 end = start + len; in SYSCALL_DEFINE3()
|
D | slub.c | 4588 int len = 0; in list_locations() local 4622 if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100) in list_locations() 4624 len += sprintf(buf + len, "%7ld ", l->count); in list_locations() 4627 len += sprintf(buf + len, "%pS", (void *)l->addr); in list_locations() 4629 len += sprintf(buf + len, "<not-available>"); in list_locations() 4632 len += sprintf(buf + len, " age=%ld/%ld/%ld", in list_locations() 4637 len += sprintf(buf + len, " age=%ld", in list_locations() 4641 len += sprintf(buf + len, " pid=%ld-%ld", in list_locations() 4644 len += sprintf(buf + len, " pid=%ld", in list_locations() 4649 len < PAGE_SIZE - 60) in list_locations() [all …]
|
D | cleancache.c | 149 int len = 0, maxlen = CLEANCACHE_KEY_MAX; in cleancache_get_key() local 156 len = (*fhfn)(inode, &key->u.fh[0], &maxlen, NULL); in cleancache_get_key() 157 if (len <= FILEID_ROOT || len == FILEID_INVALID) in cleancache_get_key()
|
D | mincore.c | 245 SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len, in SYSCALL_DEFINE3() argument 257 if (!access_ok(VERIFY_READ, (void __user *) start, len)) in SYSCALL_DEFINE3() 261 pages = len >> PAGE_SHIFT; in SYSCALL_DEFINE3() 262 pages += (offset_in_page(len)) != 0; in SYSCALL_DEFINE3()
|
D | gup.c | 1242 int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) in __mm_populate() argument 1250 end = start + len; in __mm_populate() 1788 unsigned long len, end; in gup_fast_permitted() local 1790 len = (unsigned long) nr_pages << PAGE_SHIFT; in gup_fast_permitted() 1791 end = start + len; in gup_fast_permitted() 1803 unsigned long addr, len, end; in __get_user_pages_fast() local 1809 len = (unsigned long) nr_pages << PAGE_SHIFT; in __get_user_pages_fast() 1810 end = start + len; in __get_user_pages_fast() 1813 (void __user *)start, len))) in __get_user_pages_fast() 1856 unsigned long addr, len, end; in get_user_pages_fast() local [all …]
|
D | mremap.c | 124 unsigned long len = old_end - old_addr; in move_ptes() local 185 flush_tlb_range(vma, old_end - len, old_end); in move_ptes() 198 unsigned long new_addr, unsigned long len, in move_page_tables() argument 206 old_end = old_addr + len; in move_page_tables() 256 return len + old_addr - old_end; /* how much done */ in move_page_tables()
|
D | madvise.c | 798 size_t len; in SYSCALL_DEFINE3() local 806 len = (len_in + ~PAGE_MASK) & PAGE_MASK; in SYSCALL_DEFINE3() 809 if (len_in && !len) in SYSCALL_DEFINE3() 812 end = start + len; in SYSCALL_DEFINE3()
|
D | memory.c | 2146 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) in vm_iomap_memory() argument 2151 if (start + len < start) in vm_iomap_memory() 2158 len += start & ~PAGE_MASK; in vm_iomap_memory() 2160 pages = (len + ~PAGE_MASK) >> PAGE_SHIFT; in vm_iomap_memory() 4404 void *buf, int len, int write) in generic_access_phys() argument 4414 maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot); in generic_access_phys() 4419 memcpy_toio(maddr + offset, buf, len); in generic_access_phys() 4421 memcpy_fromio(buf, maddr + offset, len); in generic_access_phys() 4424 return len; in generic_access_phys() 4434 unsigned long addr, void *buf, int len, unsigned int gup_flags) in __access_remote_vm() argument [all …]
|
D | mempolicy.c | 1187 static long do_mbind(unsigned long start, unsigned long len, in do_mbind() argument 1208 len = (len + PAGE_SIZE - 1) & PAGE_MASK; in do_mbind() 1209 end = start + len; in do_mbind() 1231 start, start + len, mode, mode_flags, in do_mbind() 1367 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, in SYSCALL_DEFINE6() argument 1385 return do_mbind(start, len, mode, mode_flags, &nodes, flags); in SYSCALL_DEFINE6() 1586 COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, in COMPAT_SYSCALL_DEFINE6() argument 1605 return sys_mbind(start, len, mode, nm, nr_bits+1, flags); in COMPAT_SYSCALL_DEFINE6()
|
D | readahead.c | 596 unsigned long len = end - start + 1; in SYSCALL_DEFINE3() local 597 ret = do_readahead(mapping, f.file, start, len); in SYSCALL_DEFINE3()
|
D | kmemleak.c | 313 size_t len; in hex_dump_object() local 316 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE); in hex_dump_object() 318 seq_printf(seq, " hex dump (first %zu bytes):\n", len); in hex_dump_object() 321 HEX_GROUP_SIZE, ptr, len, HEX_ASCII); in hex_dump_object()
|
D | zswap.c | 967 unsigned int dlen = PAGE_SIZE, len; in zswap_frontswap_store() local 1031 len = dlen + sizeof(struct zswap_header); in zswap_frontswap_store() 1032 ret = zpool_malloc(entry->pool->zpool, len, in zswap_frontswap_store()
|
D | huge_memory.c | 498 unsigned long addr, unsigned long len, in __thp_get_unmapped_area() argument 501 loff_t off_end = off + len; in __thp_get_unmapped_area() 508 len_pad = len + size; in __thp_get_unmapped_area() 509 if (len_pad < len || (off + len_pad) < off) in __thp_get_unmapped_area() 534 unsigned long len, unsigned long pgoff, unsigned long flags) in thp_get_unmapped_area() argument 542 ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE); in thp_get_unmapped_area() 546 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); in thp_get_unmapped_area()
|
D | hugetlb.c | 2417 unsigned long count, size_t len) in __nr_hugepages_store_common() argument 2451 return len; in __nr_hugepages_store_common() 2459 size_t len) in nr_hugepages_store_common() argument 2471 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len); in nr_hugepages_store_common() 2481 struct kobj_attribute *attr, const char *buf, size_t len) in nr_hugepages_store() argument 2483 return nr_hugepages_store_common(false, kobj, buf, len); in nr_hugepages_store() 2500 struct kobj_attribute *attr, const char *buf, size_t len) in nr_hugepages_mempolicy_store() argument 2502 return nr_hugepages_store_common(true, kobj, buf, len); in nr_hugepages_mempolicy_store()
|
/mm/kasan/ |
D | kasan.c | 283 void *memset(void *addr, int c, size_t len) in memset() argument 285 check_memory_region((unsigned long)addr, len, true, _RET_IP_); in memset() 287 return __memset(addr, c, len); in memset() 291 void *memmove(void *dest, const void *src, size_t len) in memmove() argument 293 check_memory_region((unsigned long)src, len, false, _RET_IP_); in memmove() 294 check_memory_region((unsigned long)dest, len, true, _RET_IP_); in memmove() 296 return __memmove(dest, src, len); in memmove() 300 void *memcpy(void *dest, const void *src, size_t len) in memcpy() argument 302 check_memory_region((unsigned long)src, len, false, _RET_IP_); in memcpy() 303 check_memory_region((unsigned long)dest, len, true, _RET_IP_); in memcpy() [all …]
|