/mm/ |
D | zswap.c | 112 u8 *dst, unsigned int *dlen) in zswap_comp_op() argument 120 ret = crypto_comp_compress(tfm, src, slen, dst, dlen); in zswap_comp_op() 123 ret = crypto_comp_decompress(tfm, src, slen, dst, dlen); in zswap_comp_op() 345 u8 *dst; in __zswap_cpu_notifier() local 355 dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu)); in __zswap_cpu_notifier() 356 if (!dst) { in __zswap_cpu_notifier() 362 per_cpu(zswap_dstmem, cpu) = dst; in __zswap_cpu_notifier() 371 dst = per_cpu(zswap_dstmem, cpu); in __zswap_cpu_notifier() 372 kfree(dst); in __zswap_cpu_notifier() 541 u8 *src, *dst; in zswap_writeback_entry() local [all …]
|
D | maccess.c | 18 long __weak probe_kernel_read(void *dst, const void *src, size_t size) 21 long __probe_kernel_read(void *dst, const void *src, size_t size) in __probe_kernel_read() argument 28 ret = __copy_from_user_inatomic(dst, in __probe_kernel_read() 46 long __weak probe_kernel_write(void *dst, const void *src, size_t size) 49 long __probe_kernel_write(void *dst, const void *src, size_t size) in __probe_kernel_write() argument 56 ret = __copy_to_user_inatomic((__force void __user *)dst, src, size); in __probe_kernel_write()
|
D | backing-dev.c | 497 struct bdi_writeback *dst = &default_backing_dev_info.wb; in bdi_destroy() local 499 bdi_lock_two(&bdi->wb, dst); in bdi_destroy() 500 list_splice(&bdi->wb.b_dirty, &dst->b_dirty); in bdi_destroy() 501 list_splice(&bdi->wb.b_io, &dst->b_io); in bdi_destroy() 502 list_splice(&bdi->wb.b_more_io, &dst->b_more_io); in bdi_destroy() 504 spin_unlock(&dst->list_lock); in bdi_destroy()
|
D | migrate.c | 475 static void __copy_gigantic_page(struct page *dst, struct page *src, in __copy_gigantic_page() argument 479 struct page *dst_base = dst; in __copy_gigantic_page() 484 copy_highpage(dst, src); in __copy_gigantic_page() 487 dst = mem_map_next(dst, dst_base, i); in __copy_gigantic_page() 492 static void copy_huge_page(struct page *dst, struct page *src) in copy_huge_page() argument 503 __copy_gigantic_page(dst, src, nr_pages); in copy_huge_page() 514 copy_highpage(dst + i, src + i); in copy_huge_page()
|
D | rmap.c | 252 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) in anon_vma_clone() argument 270 anon_vma_chain_link(dst, avc, anon_vma); in anon_vma_clone() 280 if (!dst->anon_vma && anon_vma != src->anon_vma && in anon_vma_clone() 282 dst->anon_vma = anon_vma; in anon_vma_clone() 284 if (dst->anon_vma) in anon_vma_clone() 285 dst->anon_vma->degree++; in anon_vma_clone() 296 dst->anon_vma = NULL; in anon_vma_clone() 297 unlink_anon_vmas(dst); in anon_vma_clone()
|
D | memory.c | 1948 static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_ar… in cow_user_page() argument 1959 void *kaddr = kmap_atomic(dst); in cow_user_page() 1971 flush_dcache_page(dst); in cow_user_page() 1973 copy_user_highpage(dst, src, va, vma); in cow_user_page() 3732 static void copy_user_gigantic_page(struct page *dst, struct page *src, in copy_user_gigantic_page() argument 3738 struct page *dst_base = dst; in copy_user_gigantic_page() 3743 copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); in copy_user_gigantic_page() 3746 dst = mem_map_next(dst, dst_base, i); in copy_user_gigantic_page() 3751 void copy_user_huge_page(struct page *dst, struct page *src, in copy_user_huge_page() argument 3758 copy_user_gigantic_page(dst, src, addr, vma, in copy_user_huge_page() [all …]
|
D | hugetlb.c | 2576 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, in copy_hugetlb_page_range() argument 2601 dst_pte = huge_pte_alloc(dst, addr, sz); in copy_hugetlb_page_range() 2611 dst_ptl = huge_pte_lock(h, dst, dst_pte); in copy_hugetlb_page_range() 2630 set_huge_pte_at(dst, addr, dst_pte, entry); in copy_hugetlb_page_range() 2638 set_huge_pte_at(dst, addr, dst_pte, entry); in copy_hugetlb_page_range()
|
D | zsmalloc.c | 1501 static void zs_object_copy(unsigned long src, unsigned long dst, in zs_object_copy() argument 1514 obj_to_location(dst, &d_page, &d_objidx); in zs_object_copy()
|
D | mempolicy.c | 2084 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) in vma_dup_policy() argument 2090 dst->vm_policy = pol; in vma_dup_policy()
|
D | vmscan.c | 1311 struct lruvec *lruvec, struct list_head *dst, in isolate_lru_pages() argument 1332 list_move(&page->lru, dst); in isolate_lru_pages()
|