/mm/ |
D | khugepaged.c | 939 struct page *new_page; in collapse_huge_page() local 960 new_page = khugepaged_alloc_page(hpage, gfp, node); in collapse_huge_page() 961 if (!new_page) { in collapse_huge_page() 966 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) { in collapse_huge_page() 974 mem_cgroup_cancel_charge(new_page, memcg, true); in collapse_huge_page() 982 mem_cgroup_cancel_charge(new_page, memcg, true); in collapse_huge_page() 993 mem_cgroup_cancel_charge(new_page, memcg, true); in collapse_huge_page() 1057 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl); in collapse_huge_page() 1059 __SetPageUptodate(new_page); in collapse_huge_page() 1062 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot); in collapse_huge_page() [all …]
|
D | swap_state.c | 302 struct page *found_page, *new_page = NULL; in __read_swap_cache_async() local 320 if (!new_page) { in __read_swap_cache_async() 321 new_page = alloc_page_vma(gfp_mask, vma, addr); in __read_swap_cache_async() 322 if (!new_page) in __read_swap_cache_async() 363 __SetPageLocked(new_page); in __read_swap_cache_async() 364 __SetPageSwapBacked(new_page); in __read_swap_cache_async() 365 err = __add_to_swap_cache(new_page, entry); in __read_swap_cache_async() 371 lru_cache_add_anon(new_page); in __read_swap_cache_async() 373 return new_page; in __read_swap_cache_async() 376 __ClearPageLocked(new_page); in __read_swap_cache_async() [all …]
|
D | migrate.c | 1924 struct page *new_page = NULL; in migrate_misplaced_transhuge_page() local 1938 new_page = alloc_pages_node(node, in migrate_misplaced_transhuge_page() 1941 if (!new_page) in migrate_misplaced_transhuge_page() 1943 prep_transhuge_page(new_page); in migrate_misplaced_transhuge_page() 1947 put_page(new_page); in migrate_misplaced_transhuge_page() 1958 __SetPageLocked(new_page); in migrate_misplaced_transhuge_page() 1959 __SetPageSwapBacked(new_page); in migrate_misplaced_transhuge_page() 1962 new_page->mapping = page->mapping; in migrate_misplaced_transhuge_page() 1963 new_page->index = page->index; in migrate_misplaced_transhuge_page() 1964 migrate_page_copy(new_page, page); in migrate_misplaced_transhuge_page() [all …]
|
D | memory.c | 2148 struct page *new_page = NULL; in wp_page_copy() local 2159 new_page = alloc_zeroed_user_highpage_movable(vma, fe->address); in wp_page_copy() 2160 if (!new_page) in wp_page_copy() 2163 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, in wp_page_copy() 2165 if (!new_page) in wp_page_copy() 2167 cow_user_page(new_page, old_page, fe->address, vma); in wp_page_copy() 2170 if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false)) in wp_page_copy() 2173 __SetPageUptodate(new_page); in wp_page_copy() 2192 entry = mk_pte(new_page, vma->vm_page_prot); in wp_page_copy() 2201 page_add_new_anon_rmap(new_page, vma, fe->address, false); in wp_page_copy() [all …]
|
D | huge_memory.c | 1003 struct page *page = NULL, *new_page; in do_huge_pmd_wp_page() local 1040 new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER); in do_huge_pmd_wp_page() 1042 new_page = NULL; in do_huge_pmd_wp_page() 1044 if (likely(new_page)) { in do_huge_pmd_wp_page() 1045 prep_transhuge_page(new_page); in do_huge_pmd_wp_page() 1062 if (unlikely(mem_cgroup_try_charge(new_page, vma->vm_mm, in do_huge_pmd_wp_page() 1064 put_page(new_page); in do_huge_pmd_wp_page() 1076 clear_huge_page(new_page, haddr, HPAGE_PMD_NR); in do_huge_pmd_wp_page() 1078 copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); in do_huge_pmd_wp_page() 1079 __SetPageUptodate(new_page); in do_huge_pmd_wp_page() [all …]
|
D | ksm.c | 1865 struct page *new_page; in ksm_might_need_to_copy() local 1880 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); in ksm_might_need_to_copy() 1881 if (new_page) { in ksm_might_need_to_copy() 1882 copy_user_highpage(new_page, page, address, vma); in ksm_might_need_to_copy() 1884 SetPageDirty(new_page); in ksm_might_need_to_copy() 1885 __SetPageUptodate(new_page); in ksm_might_need_to_copy() 1886 __SetPageLocked(new_page); in ksm_might_need_to_copy() 1889 return new_page; in ksm_might_need_to_copy()
|
D | hugetlb.c | 3485 struct page *old_page, *new_page; in hugetlb_cow() local 3522 new_page = alloc_huge_page(vma, address, outside_reserve); in hugetlb_cow() 3524 if (IS_ERR(new_page)) { in hugetlb_cow() 3549 ret = (PTR_ERR(new_page) == -ENOMEM) ? in hugetlb_cow() 3563 copy_user_huge_page(new_page, old_page, address, vma, in hugetlb_cow() 3565 __SetPageUptodate(new_page); in hugetlb_cow() 3566 set_page_huge_active(new_page); in hugetlb_cow() 3579 ClearPagePrivate(new_page); in hugetlb_cow() 3585 make_huge_pte(vma, new_page, 1)); in hugetlb_cow() 3587 hugepage_add_new_anon_rmap(new_page, vma, address); in hugetlb_cow() [all …]
|
D | memory_hotplug.c | 1565 struct page *new_page = NULL; in new_node_page() local 1583 new_page = __alloc_pages_nodemask(gfp_mask, 0, in new_node_page() 1585 if (!new_page) in new_node_page() 1586 new_page = __alloc_pages(gfp_mask, 0, in new_node_page() 1589 return new_page; in new_node_page()
|
D | memory-failure.c | 1500 static struct page *new_page(struct page *p, unsigned long private, int **x) in new_page() function 1603 ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL, in soft_offline_huge_page() 1679 ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL, in __soft_offline_page()
|
D | mempolicy.c | 1122 static struct page *new_page(struct page *page, unsigned long start, int **x) in new_page() function 1157 static struct page *new_page(struct page *page, unsigned long start, int **x) in new_page() function 1242 nr_failed = migrate_pages(&pagelist, new_page, NULL, in do_mbind()
|