Home
last modified time | relevance | path

Searched refs:len (Results 1 – 25 of 27) sorted by relevance

12

/mm/
Dutil.c53 size_t len; in kstrdup() local
59 len = strlen(s) + 1; in kstrdup()
60 buf = kmalloc_track_caller(len, gfp); in kstrdup()
62 memcpy(buf, s, len); in kstrdup()
98 size_t len; in kstrndup() local
104 len = strnlen(s, max); in kstrndup()
105 buf = kmalloc_track_caller(len+1, gfp); in kstrndup()
107 memcpy(buf, s, len); in kstrndup()
108 buf[len] = '\0'; in kstrndup()
123 void *kmemdup(const void *src, size_t len, gfp_t gfp) in kmemdup() argument
[all …]
Dnommu.c750 unsigned long len) in find_vma_exact() argument
753 unsigned long end = addr + len; in find_vma_exact()
782 unsigned long len, in validate_mmap_request() argument
799 if (!len) in validate_mmap_request()
803 rlen = PAGE_ALIGN(len); in validate_mmap_request()
1002 unsigned long len, in do_mmap_private() argument
1034 order = get_order(len); in do_mmap_private()
1036 point = len >> PAGE_SHIFT; in do_mmap_private()
1050 region->vm_end = region->vm_start + len; in do_mmap_private()
1054 vma->vm_end = region->vm_start + len; in do_mmap_private()
[all …]
Dfadvise.c30 int generic_fadvise(struct file *file, loff_t offset, loff_t len, int advice) in generic_fadvise() argument
45 if (!mapping || len < 0) in generic_fadvise()
71 endbyte = (u64)offset + (u64)len; in generic_fadvise()
72 if (!len || endbyte < len) in generic_fadvise()
182 int vfs_fadvise(struct file *file, loff_t offset, loff_t len, int advice) in vfs_fadvise() argument
185 return file->f_op->fadvise(file, offset, len, advice); in vfs_fadvise()
187 return generic_fadvise(file, offset, len, advice); in vfs_fadvise()
193 int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) in ksys_fadvise64_64() argument
201 ret = vfs_fadvise(f.file, offset, len, advice); in ksys_fadvise64_64()
207 SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice) in SYSCALL_DEFINE4() argument
[all …]
Dmmap.c59 #define arch_mmap_check(addr, len, flags) (0) argument
1349 unsigned long len) in mlock_future_check() argument
1355 locked = len >> PAGE_SHIFT; in mlock_future_check()
1385 unsigned long pgoff, unsigned long len) in file_mmap_ok() argument
1389 if (maxsize && len > maxsize) in file_mmap_ok()
1391 maxsize -= len; in file_mmap_ok()
1401 unsigned long len, unsigned long prot, in do_mmap() argument
1411 if (!len) in do_mmap()
1432 len = PAGE_ALIGN(len); in do_mmap()
1433 if (!len) in do_mmap()
[all …]
Dmlock.c583 static int apply_vma_lock_flags(unsigned long start, size_t len, in apply_vma_lock_flags() argument
591 VM_BUG_ON(len != PAGE_ALIGN(len)); in apply_vma_lock_flags()
592 end = start + len; in apply_vma_lock_flags()
640 unsigned long start, size_t len) in count_mm_mlocked_page_nr() argument
655 if (start + len <= vma->vm_start) in count_mm_mlocked_page_nr()
660 if (start + len < vma->vm_end) { in count_mm_mlocked_page_nr()
661 count += start + len - vma->vm_start; in count_mm_mlocked_page_nr()
671 static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags) in do_mlock() argument
682 len = PAGE_ALIGN(len + (offset_in_page(start))); in do_mlock()
687 locked = len >> PAGE_SHIFT; in do_mlock()
[all …]
Duserfaultfd.c177 unsigned long len, in __mcopy_atomic_hugetlb() argument
214 if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1)) in __mcopy_atomic_hugetlb()
236 dst_start + len > dst_vma->vm_end) in __mcopy_atomic_hugetlb()
247 (len - copied) & (vma_hpagesize - 1))) in __mcopy_atomic_hugetlb()
261 while (src_addr < src_start + len) { in __mcopy_atomic_hugetlb()
264 BUG_ON(dst_addr >= dst_start + len); in __mcopy_atomic_hugetlb()
389 unsigned long len,
436 unsigned long len, in __mcopy_atomic() argument
451 BUG_ON(len & ~PAGE_MASK); in __mcopy_atomic()
454 BUG_ON(src_start + len <= src_start); in __mcopy_atomic()
[all …]
Dprocess_vm_access.c32 size_t len, in process_vm_rw_pages() argument
37 while (len && iov_iter_count(iter)) { in process_vm_rw_pages()
42 if (copy > len) in process_vm_rw_pages()
43 copy = len; in process_vm_rw_pages()
51 len -= copied; in process_vm_rw_pages()
75 unsigned long len, in process_vm_rw_single_vec() argument
91 if (len == 0) in process_vm_rw_single_vec()
93 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; in process_vm_rw_single_vec()
117 if (bytes > len) in process_vm_rw_single_vec()
118 bytes = len; in process_vm_rw_single_vec()
[all …]
Dusercopy.c34 static noinline int check_stack_object(const void *obj, unsigned long len) in check_stack_object() argument
41 if (obj + len <= stack || stackend <= obj) in check_stack_object()
49 if (obj < stack || stackend < obj + len) in check_stack_object()
53 ret = arch_within_stack_frames(stack, stackend, obj, len); in check_stack_object()
73 unsigned long offset, unsigned long len) in usercopy_warn() argument
80 offset, len); in usercopy_warn()
85 unsigned long len) in usercopy_abort() argument
92 offset, len); in usercopy_abort()
Dmemfd.c258 long len; in SYSCALL_DEFINE2() local
271 len = strnlen_user(uname, MFD_NAME_MAX_LEN + 1); in SYSCALL_DEFINE2()
272 if (len <= 0) in SYSCALL_DEFINE2()
274 if (len > MFD_NAME_MAX_LEN + 1) in SYSCALL_DEFINE2()
277 name = kmalloc(len + MFD_NAME_PREFIX_LEN, GFP_KERNEL); in SYSCALL_DEFINE2()
282 if (copy_from_user(&name[MFD_NAME_PREFIX_LEN], uname, len)) { in SYSCALL_DEFINE2()
288 if (name[len + MFD_NAME_PREFIX_LEN - 1]) { in SYSCALL_DEFINE2()
Dmprotect.c452 static int do_mprotect_pkey(unsigned long start, size_t len, in do_mprotect_pkey() argument
470 if (!len) in do_mprotect_pkey()
472 len = PAGE_ALIGN(len); in do_mprotect_pkey()
473 end = start + len; in do_mprotect_pkey()
575 SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, in SYSCALL_DEFINE3() argument
578 return do_mprotect_pkey(start, len, prot, -1); in SYSCALL_DEFINE3()
583 SYSCALL_DEFINE4(pkey_mprotect, unsigned long, start, size_t, len, in SYSCALL_DEFINE4() argument
586 return do_mprotect_pkey(start, len, prot, pkey); in SYSCALL_DEFINE4()
Dmsync.c32 SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags) in SYSCALL_DEFINE3() argument
49 len = (len + ~PAGE_MASK) & PAGE_MASK; in SYSCALL_DEFINE3()
50 end = start + len; in SYSCALL_DEFINE3()
Dslub.c1309 size_t len; in kmem_cache_flags() local
1315 len = strlen(name); in kmem_cache_flags()
1327 cmplen = max_t(size_t, len, (end - iter)); in kmem_cache_flags()
4614 int len = 0; in list_locations() local
4647 if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100) in list_locations()
4649 len += sprintf(buf + len, "%7ld ", l->count); in list_locations()
4652 len += sprintf(buf + len, "%pS", (void *)l->addr); in list_locations()
4654 len += sprintf(buf + len, "<not-available>"); in list_locations()
4657 len += sprintf(buf + len, " age=%ld/%ld/%ld", in list_locations()
4662 len += sprintf(buf + len, " age=%ld", in list_locations()
[all …]
Dshmem.c2074 unsigned long uaddr, unsigned long len, in shmem_get_unmapped_area() argument
2085 if (len > TASK_SIZE) in shmem_get_unmapped_area()
2089 addr = get_area(file, uaddr, len, pgoff, flags); in shmem_get_unmapped_area()
2097 if (addr > TASK_SIZE - len) in shmem_get_unmapped_area()
2102 if (len < HPAGE_PMD_SIZE) in shmem_get_unmapped_area()
2135 if (offset && offset + len < 2 * HPAGE_PMD_SIZE) in shmem_get_unmapped_area()
2140 inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE; in shmem_get_unmapped_area()
2143 if (inflated_len < len) in shmem_get_unmapped_area()
2157 if (inflated_addr > TASK_SIZE - len) in shmem_get_unmapped_area()
2471 loff_t pos, unsigned len, unsigned flags, in shmem_write_begin() argument
[all …]
Dcleancache.c148 int len = 0, maxlen = CLEANCACHE_KEY_MAX; in cleancache_get_key() local
155 len = (*fhfn)(inode, &key->u.fh[0], &maxlen, NULL); in cleancache_get_key()
156 if (len <= FILEID_ROOT || len == FILEID_INVALID) in cleancache_get_key()
Dmincore.c252 SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len, in SYSCALL_DEFINE3() argument
266 if (!access_ok((void __user *) start, len)) in SYSCALL_DEFINE3()
270 pages = len >> PAGE_SHIFT; in SYSCALL_DEFINE3()
271 pages += (offset_in_page(len)) != 0; in SYSCALL_DEFINE3()
Dmremap.c124 unsigned long len = old_end - old_addr; in move_ptes() local
185 flush_tlb_range(vma, old_end - len, old_end); in move_ptes()
242 unsigned long new_addr, unsigned long len, in move_page_tables() argument
249 old_end = old_addr + len; in move_page_tables()
315 return len + old_addr - old_end; /* how much done */ in move_page_tables()
Dgup.c1234 int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) in __mm_populate() argument
1242 end = start + len; in __mm_populate()
2323 unsigned long len, end; in __get_user_pages_fast() local
2328 len = (unsigned long) nr_pages << PAGE_SHIFT; in __get_user_pages_fast()
2329 end = start + len; in __get_user_pages_fast()
2333 if (unlikely(!access_ok((void __user *)start, len))) in __get_user_pages_fast()
2401 unsigned long addr, len, end; in get_user_pages_fast() local
2409 len = (unsigned long) nr_pages << PAGE_SHIFT; in get_user_pages_fast()
2410 end = start + len; in get_user_pages_fast()
2414 if (unlikely(!access_ok((void __user *)start, len))) in get_user_pages_fast()
Dkmemleak.c264 size_t len, bool ascii) in warn_or_seq_hex_dump() argument
268 buf, len, ascii); in warn_or_seq_hex_dump()
271 rowsize, groupsize, buf, len, ascii); in warn_or_seq_hex_dump()
284 size_t len; in hex_dump_object() local
287 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE); in hex_dump_object()
289 warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len); in hex_dump_object()
292 HEX_GROUP_SIZE, ptr, len, HEX_ASCII); in hex_dump_object()
Dmempolicy.c1218 static long do_mbind(unsigned long start, unsigned long len, in do_mbind() argument
1240 len = (len + PAGE_SIZE - 1) & PAGE_MASK; in do_mbind()
1241 end = start + len; in do_mbind()
1263 start, start + len, mode, mode_flags, in do_mbind()
1405 static long kernel_mbind(unsigned long start, unsigned long len, in kernel_mbind() argument
1424 return do_mbind(start, len, mode, mode_flags, &nodes, flags); in kernel_mbind()
1427 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, in SYSCALL_DEFINE6() argument
1431 return kernel_mbind(start, len, mode, nmask, maxnode, flags); in SYSCALL_DEFINE6()
1646 COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, in COMPAT_SYSCALL_DEFINE6() argument
1665 return kernel_mbind(start, len, mode, nm, nr_bits+1, flags); in COMPAT_SYSCALL_DEFINE6()
Dmemory.c1989 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) in vm_iomap_memory() argument
1994 if (start + len < start) in vm_iomap_memory()
2001 len += start & ~PAGE_MASK; in vm_iomap_memory()
2003 pages = (len + ~PAGE_MASK) >> PAGE_SHIFT; in vm_iomap_memory()
4303 void *buf, int len, int write) in generic_access_phys() argument
4313 maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot); in generic_access_phys()
4318 memcpy_toio(maddr + offset, buf, len); in generic_access_phys()
4320 memcpy_fromio(buf, maddr + offset, len); in generic_access_phys()
4323 return len; in generic_access_phys()
4333 unsigned long addr, void *buf, int len, unsigned int gup_flags) in __access_remote_vm() argument
[all …]
Dmadvise.c1054 size_t len; in SYSCALL_DEFINE3() local
1064 len = (len_in + ~PAGE_MASK) & PAGE_MASK; in SYSCALL_DEFINE3()
1067 if (len_in && !len) in SYSCALL_DEFINE3()
1070 end = start + len; in SYSCALL_DEFINE3()
Dhuge_memory.c531 unsigned long addr, unsigned long len, in __thp_get_unmapped_area() argument
534 loff_t off_end = off + len; in __thp_get_unmapped_area()
541 len_pad = len + size; in __thp_get_unmapped_area()
542 if (len_pad < len || (off + len_pad) < off) in __thp_get_unmapped_area()
567 unsigned long len, unsigned long pgoff, unsigned long flags) in thp_get_unmapped_area() argument
575 ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE); in thp_get_unmapped_area()
579 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); in thp_get_unmapped_area()
Dhugetlb.c2596 unsigned long count, size_t len) in __nr_hugepages_store_common() argument
2624 return err ? err : len; in __nr_hugepages_store_common()
2629 size_t len) in nr_hugepages_store_common() argument
2641 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len); in nr_hugepages_store_common()
2651 struct kobj_attribute *attr, const char *buf, size_t len) in nr_hugepages_store() argument
2653 return nr_hugepages_store_common(false, kobj, buf, len); in nr_hugepages_store()
2670 struct kobj_attribute *attr, const char *buf, size_t len) in nr_hugepages_mempolicy_store() argument
2672 return nr_hugepages_store_common(true, kobj, buf, len); in nr_hugepages_mempolicy_store()
Dfilemap.c3123 loff_t pos, unsigned len, unsigned flags, in pagecache_write_begin() argument
3128 return aops->write_begin(file, mapping, pos, len, flags, in pagecache_write_begin()
3134 loff_t pos, unsigned len, unsigned copied, in pagecache_write_end() argument
3139 return aops->write_end(file, mapping, pos, len, copied, page, fsdata); in pagecache_write_end()
/mm/kasan/
Dcommon.c103 void *memset(void *addr, int c, size_t len) in memset() argument
105 check_memory_region((unsigned long)addr, len, true, _RET_IP_); in memset()
107 return __memset(addr, c, len); in memset()
111 void *memmove(void *dest, const void *src, size_t len) in memmove() argument
113 check_memory_region((unsigned long)src, len, false, _RET_IP_); in memmove()
114 check_memory_region((unsigned long)dest, len, true, _RET_IP_); in memmove()
116 return __memmove(dest, src, len); in memmove()
120 void *memcpy(void *dest, const void *src, size_t len) in memcpy() argument
122 check_memory_region((unsigned long)src, len, false, _RET_IP_); in memcpy()
123 check_memory_region((unsigned long)dest, len, true, _RET_IP_); in memcpy()
[all …]

12