Searched refs:copied (Results 1 – 7 of 7) sorted by relevance
/mm/ |
D | filemap_xip.c | 63 size_t copied = 0, error = 0; in do_xip_mapping_read() local 93 if (nr > len - copied) in do_xip_mapping_read() 94 nr = len - copied; in do_xip_mapping_read() 123 left = __copy_to_user(buf+copied, xip_mem+offset, nr); in do_xip_mapping_read() 125 left = __clear_user(buf + copied, nr); in do_xip_mapping_read() 132 copied += (nr - left); in do_xip_mapping_read() 136 } while (copied < len); in do_xip_mapping_read() 139 *ppos = pos + copied; in do_xip_mapping_read() 143 return (copied ? copied : error); in do_xip_mapping_read() 339 size_t copied; in __xip_file_write() local [all …]
|
D | process_vm_access.c | 43 size_t copied; in process_vm_rw_pages() local 49 copied = copy_page_from_iter(page, offset, copy, iter); in process_vm_rw_pages() 52 copied = copy_page_to_iter(page, offset, copy, iter); in process_vm_rw_pages() 54 len -= copied; in process_vm_rw_pages() 55 if (copied < copy && iov_iter_count(iter)) in process_vm_rw_pages()
|
D | filemap.c | 2351 loff_t pos, unsigned len, unsigned copied, in pagecache_write_end() argument 2356 return aops->write_end(file, mapping, pos, len, copied, page, fsdata); in pagecache_write_end() 2469 size_t copied; /* Bytes copied from user */ in generic_perform_write() local 2505 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); in generic_perform_write() 2508 status = a_ops->write_end(file, mapping, pos, bytes, copied, in generic_perform_write() 2512 copied = status; in generic_perform_write() 2516 iov_iter_advance(i, copied); in generic_perform_write() 2517 if (unlikely(copied == 0)) { in generic_perform_write() 2530 pos += copied; in generic_perform_write() 2531 written += copied; in generic_perform_write()
|
D | iov_iter.c | 306 size_t copied = 0, left = 0; in __iovec_copy_from_user_inatomic() local 314 copied += copy; in __iovec_copy_from_user_inatomic() 322 return copied - left; in __iovec_copy_from_user_inatomic() 334 size_t copied; in copy_from_user_atomic_iovec() local 341 copied = bytes - left; in copy_from_user_atomic_iovec() 343 copied = __iovec_copy_from_user_inatomic(kaddr + offset, in copy_from_user_atomic_iovec() 348 return copied; in copy_from_user_atomic_iovec()
|
D | vmalloc.c | 1879 int copied = 0; in aligned_vread() local 1909 copied += length; in aligned_vread() 1912 return copied; in aligned_vread() 1918 int copied = 0; in aligned_vwrite() local 1946 copied += length; in aligned_vwrite() 1949 return copied; in aligned_vwrite() 2065 int copied = 0; in vwrite() local 2096 copied++; in vwrite() 2104 if (!copied) in vwrite()
|
D | shmem.c | 1499 loff_t pos, unsigned len, unsigned copied, in shmem_write_end() argument 1504 if (pos + copied > inode->i_size) in shmem_write_end() 1505 i_size_write(inode, pos + copied); in shmem_write_end() 1508 if (copied < PAGE_CACHE_SIZE) { in shmem_write_end() 1511 from + copied, PAGE_CACHE_SIZE); in shmem_write_end() 1519 return copied; in shmem_write_end()
|
D | Kconfig | 469 the page is copied into the kernel and a disk access is avoided.
|