/mm/ |
D | maccess.c | 18 long __weak probe_kernel_read(void *dst, const void *src, size_t size) 21 long __probe_kernel_read(void *dst, const void *src, size_t size) in __probe_kernel_read() argument 29 (__force const void __user *)src, size); in __probe_kernel_read() 46 long __weak probe_kernel_write(void *dst, const void *src, size_t size) 49 long __probe_kernel_write(void *dst, const void *src, size_t size) in __probe_kernel_write() argument 56 ret = __copy_to_user_inatomic((__force void __user *)dst, src, size); in __probe_kernel_write()
|
D | zswap.c | 111 static int zswap_comp_op(enum comp_op op, const u8 *src, unsigned int slen, in zswap_comp_op() argument 120 ret = crypto_comp_compress(tfm, src, slen, dst, dlen); in zswap_comp_op() 123 ret = crypto_comp_decompress(tfm, src, slen, dst, dlen); in zswap_comp_op() 541 u8 *src, *dst; in zswap_writeback_entry() local 581 src = (u8 *)zpool_map_handle(zswap_pool, entry->handle, in zswap_writeback_entry() 584 ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src, in zswap_writeback_entry() 649 u8 *src, *dst; in zswap_frontswap_store() local 677 src = kmap_atomic(page); in zswap_frontswap_store() 678 ret = zswap_comp_op(ZSWAP_COMPOP_COMPRESS, src, PAGE_SIZE, dst, &dlen); in zswap_frontswap_store() 679 kunmap_atomic(src); in zswap_frontswap_store() [all …]
|
D | migrate.c | 475 static void __copy_gigantic_page(struct page *dst, struct page *src, in __copy_gigantic_page() argument 480 struct page *src_base = src; in __copy_gigantic_page() 484 copy_highpage(dst, src); in __copy_gigantic_page() 488 src = mem_map_next(src, src_base, i); in __copy_gigantic_page() 492 static void copy_huge_page(struct page *dst, struct page *src) in copy_huge_page() argument 497 if (PageHuge(src)) { in copy_huge_page() 499 struct hstate *h = page_hstate(src); in copy_huge_page() 503 __copy_gigantic_page(dst, src, nr_pages); in copy_huge_page() 508 BUG_ON(!PageTransHuge(src)); in copy_huge_page() 509 nr_pages = hpage_nr_pages(src); in copy_huge_page() [all …]
|
D | util.c | 71 void *kmemdup(const void *src, size_t len, gfp_t gfp) in kmemdup() argument 77 memcpy(p, src, len); in kmemdup() 90 void *memdup_user(const void __user *src, size_t len) in memdup_user() argument 103 if (copy_from_user(p, src, len)) { in memdup_user()
|
D | memory.c | 1948 static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_ar… in cow_user_page() argument 1950 debug_dma_assert_idle(src); in cow_user_page() 1958 if (unlikely(!src)) { in cow_user_page() 1973 copy_user_highpage(dst, src, va, vma); in cow_user_page() 3732 static void copy_user_gigantic_page(struct page *dst, struct page *src, in copy_user_gigantic_page() argument 3739 struct page *src_base = src; in copy_user_gigantic_page() 3743 copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); in copy_user_gigantic_page() 3747 src = mem_map_next(src, src_base, i); in copy_user_gigantic_page() 3751 void copy_user_huge_page(struct page *dst, struct page *src, in copy_user_huge_page() argument 3758 copy_user_gigantic_page(dst, src, addr, vma, in copy_user_huge_page() [all …]
|
D | hugetlb.c | 2576 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, in copy_hugetlb_page_range() argument 2594 mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end); in copy_hugetlb_page_range() 2598 src_pte = huge_pte_offset(src, addr); in copy_hugetlb_page_range() 2612 src_ptl = huge_pte_lockptr(h, src, src_pte); in copy_hugetlb_page_range() 2628 set_huge_pte_at(src, addr, src_pte, entry); in copy_hugetlb_page_range() 2633 huge_ptep_set_wrprotect(src, addr, src_pte); in copy_hugetlb_page_range() 2645 mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end); in copy_hugetlb_page_range()
|
D | rmap.c | 252 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) in anon_vma_clone() argument 257 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { in anon_vma_clone() 280 if (!dst->anon_vma && anon_vma != src->anon_vma && in anon_vma_clone()
|
D | vmscan.c | 1315 struct list_head *src = &lruvec->lists[lru]; in isolate_lru_pages() local 1319 for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { in isolate_lru_pages() 1323 page = lru_to_page(src); in isolate_lru_pages() 1324 prefetchw_prev_lru_page(page, src, flags); in isolate_lru_pages() 1338 list_move(&page->lru, src); in isolate_lru_pages()
|
D | zsmalloc.c | 1501 static void zs_object_copy(unsigned long src, unsigned long dst, in zs_object_copy() argument 1513 obj_to_location(src, &s_page, &s_objidx); in zs_object_copy()
|
D | mempolicy.c | 2084 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) in vma_dup_policy() argument 2086 struct mempolicy *pol = mpol_dup(vma_policy(src)); in vma_dup_policy()
|