Home
last modified time | relevance | path

Searched refs:new_page (Results 1 – 25 of 31) sorted by relevance

12

/kernel/linux/linux-5.10/mm/
Dkhugepaged.c1066 struct page *new_page; in collapse_huge_page() local
1085 new_page = khugepaged_alloc_page(hpage, gfp, node); in collapse_huge_page()
1086 if (!new_page) { in collapse_huge_page()
1091 if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) { in collapse_huge_page()
1095 count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC); in collapse_huge_page()
1186 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl, in collapse_huge_page()
1189 __SetPageUptodate(new_page); in collapse_huge_page()
1192 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot); in collapse_huge_page()
1204 page_add_new_anon_rmap(new_page, vma, address, true); in collapse_huge_page()
1205 lru_cache_add_inactive_or_unevictable(new_page, vma); in collapse_huge_page()
[all …]
Dpurgeable.c84 struct page *new_page = NULL; in lookup_uxpte_page() local
112 new_page = alloc_zeroed_user_highpage_movable(vma, addr); in lookup_uxpte_page()
113 if (!new_page) { in lookup_uxpte_page()
119 put_page(new_page); in lookup_uxpte_page()
127 put_page(new_page); in lookup_uxpte_page()
129 page = new_page; in lookup_uxpte_page()
Dmigrate.c1562 struct page *new_page = NULL; in alloc_migration_target() local
1592 new_page = __alloc_pages_nodemask(gfp_mask, order, nid, mtc->nmask); in alloc_migration_target()
1594 if (new_page && PageTransHuge(new_page)) in alloc_migration_target()
1595 prep_transhuge_page(new_page); in alloc_migration_target()
1597 return new_page; in alloc_migration_target()
2154 struct page *new_page = NULL; in migrate_misplaced_transhuge_page() local
2158 new_page = alloc_pages_node(node, in migrate_misplaced_transhuge_page()
2161 if (!new_page) in migrate_misplaced_transhuge_page()
2163 prep_transhuge_page(new_page); in migrate_misplaced_transhuge_page()
2167 put_page(new_page); in migrate_misplaced_transhuge_page()
[all …]
Dmemory.c805 struct page *new_page; in copy_present_page() local
839 new_page = *prealloc; in copy_present_page()
840 if (!new_page) in copy_present_page()
848 copy_user_highpage(new_page, page, addr, src_vma); in copy_present_page()
849 __SetPageUptodate(new_page); in copy_present_page()
850 page_add_new_anon_rmap(new_page, dst_vma, addr, false); in copy_present_page()
851 lru_cache_add_inactive_or_unevictable(new_page, dst_vma); in copy_present_page()
852 rss[mm_counter(new_page)]++; in copy_present_page()
855 pte = mk_pte(new_page, dst_vma->vm_page_prot); in copy_present_page()
920 struct page *new_page; in page_copy_prealloc() local
[all …]
Dksm.c2578 struct page *new_page; in ksm_might_need_to_copy() local
2593 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); in ksm_might_need_to_copy()
2594 if (new_page && mem_cgroup_charge(new_page, vma->vm_mm, GFP_KERNEL)) { in ksm_might_need_to_copy()
2595 put_page(new_page); in ksm_might_need_to_copy()
2596 new_page = NULL; in ksm_might_need_to_copy()
2598 if (new_page) { in ksm_might_need_to_copy()
2599 copy_user_highpage(new_page, page, address, vma); in ksm_might_need_to_copy()
2601 SetPageDirty(new_page); in ksm_might_need_to_copy()
2602 __SetPageUptodate(new_page); in ksm_might_need_to_copy()
2603 __SetPageLocked(new_page); in ksm_might_need_to_copy()
[all …]
Dhugetlb.c4151 struct page *old_page, *new_page; in hugetlb_cow() local
4189 new_page = alloc_huge_page(vma, haddr, outside_reserve); in hugetlb_cow()
4191 if (IS_ERR(new_page)) { in hugetlb_cow()
4236 ret = vmf_error(PTR_ERR(new_page)); in hugetlb_cow()
4249 copy_user_huge_page(new_page, old_page, address, vma, in hugetlb_cow()
4251 __SetPageUptodate(new_page); in hugetlb_cow()
4264 ClearPagePrivate(new_page); in hugetlb_cow()
4270 make_huge_pte(vma, new_page, 1)); in hugetlb_cow()
4272 hugepage_add_new_anon_rmap(new_page, vma, haddr); in hugetlb_cow()
4273 set_page_huge_active(new_page); in hugetlb_cow()
[all …]
/kernel/linux/linux-5.10/arch/s390/mm/
Dvmem.c172 void *new_page = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE); in modify_pte_table() local
174 if (!new_page) in modify_pte_table()
176 pte_val(*pte) = __pa(new_page) | prot; in modify_pte_table()
248 void *new_page; in modify_pmd_table() local
257 new_page = vmemmap_alloc_block(PMD_SIZE, NUMA_NO_NODE); in modify_pmd_table()
258 if (new_page) { in modify_pmd_table()
259 pmd_val(*pmd) = __pa(new_page) | prot; in modify_pmd_table()
/kernel/linux/linux-5.10/kernel/events/
Duprobes.c155 struct page *old_page, struct page *new_page) in __replace_page() argument
169 if (new_page) { in __replace_page()
170 err = mem_cgroup_charge(new_page, vma->vm_mm, GFP_KERNEL); in __replace_page()
184 if (new_page) { in __replace_page()
185 get_page(new_page); in __replace_page()
186 page_add_new_anon_rmap(new_page, vma, addr, false); in __replace_page()
187 lru_cache_add_inactive_or_unevictable(new_page, vma); in __replace_page()
199 if (new_page) in __replace_page()
201 mk_pte(new_page, vma->vm_page_prot)); in __replace_page()
467 struct page *old_page, *new_page; in uprobe_write_opcode() local
[all …]
/kernel/linux/linux-5.10/drivers/hyperhold/
Dhp_iotab.c121 static bool hpio_fill_pages(struct hpio *hpio, u32 nr_page, gfp_t gfp, bool new_page) in hpio_fill_pages() argument
132 if (!new_page) in hpio_fill_pages()
160 struct hpio *hpio_alloc(u32 nr_page, gfp_t gfp, unsigned int op, bool new_page) in hpio_alloc() argument
168 if (!hpio_fill_pages(hpio, nr_page, gfp, new_page)) in hpio_alloc()
Dhp_iotab.h46 struct hpio *hpio_alloc(u32 nr_page, gfp_t gfp, unsigned int op, bool new_page);
Dhyperhold.h31 struct hpio *hyperhold_io_alloc(u32 eid, gfp_t gfp, unsigned int op, bool new_page);
Dhp_core.c435 struct hpio *hyperhold_io_alloc(u32 eid, gfp_t gfp, unsigned int op, bool new_page) in hyperhold_io_alloc() argument
448 hpio = hpio_alloc(nr_page, gfp, op, new_page); in hyperhold_io_alloc()
/kernel/linux/linux-5.10/fs/f2fs/
Dnamei.c922 struct page *old_page, *new_page = NULL; in f2fs_rename() local
997 &new_page); in f2fs_rename()
999 if (IS_ERR(new_page)) in f2fs_rename()
1000 err = PTR_ERR(new_page); in f2fs_rename()
1012 f2fs_set_link(new_dir, new_entry, new_page, old_inode); in f2fs_rename()
1013 new_page = NULL; in f2fs_rename()
1093 f2fs_put_page(new_page, 0); in f2fs_rename()
1112 struct page *old_page, *new_page; in f2fs_cross_rename() local
1147 new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_page); in f2fs_cross_rename()
1149 if (IS_ERR(new_page)) in f2fs_cross_rename()
[all …]
/kernel/linux/linux-5.10/fs/minix/
Dnamei.c211 struct page * new_page; in minix_rename() local
219 new_de = minix_find_entry(new_dentry, &new_page); in minix_rename()
222 minix_set_link(new_de, new_page, old_inode); in minix_rename()
/kernel/linux/linux-5.10/fs/sysv/
Dnamei.c216 struct page * new_page; in sysv_rename() local
224 new_de = sysv_find_entry(new_dentry, &new_page); in sysv_rename()
227 sysv_set_link(new_de, new_page, old_inode); in sysv_rename()
/kernel/linux/linux-5.10/fs/ufs/
Dnamei.c270 struct page *new_page; in ufs_rename() local
278 new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page); in ufs_rename()
281 ufs_set_link(new_dir, new_de, new_page, old_inode, 1); in ufs_rename()
/kernel/linux/linux-5.10/fs/ubifs/
Dbudget.c367 znodes = req->new_ino + (req->new_page << UBIFS_BLOCKS_PER_PAGE_SHIFT) + in calc_idx_growth()
384 if (req->new_page) in calc_data_growth()
430 ubifs_assert(c, req->new_page <= 1); in ubifs_budget_space()
517 ubifs_assert(c, req->new_page <= 1); in ubifs_release_budget()
Dfile.c198 struct ubifs_budget_req req = { .recalculate = 1, .new_page = 1 }; in release_new_page_budget()
224 struct ubifs_budget_req req = { .new_page = 1 }; in write_begin_slow()
365 req.new_page = 1; in allocate_budget()
1513 struct ubifs_budget_req req = { .new_page = 1 }; in ubifs_vm_page_mkwrite()
/kernel/linux/linux-5.10/fs/ext2/
Dnamei.c355 struct page *new_page; in ext2_rename() local
362 new_de = ext2_find_entry(new_dir, &new_dentry->d_name, &new_page); in ext2_rename()
367 ext2_set_link(new_dir, new_de, new_page, old_inode, 1); in ext2_rename()
/kernel/linux/linux-5.10/fs/nilfs2/
Dnamei.c374 struct page *new_page; in nilfs_rename() local
382 new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page); in nilfs_rename()
385 nilfs_set_link(new_dir, new_de, new_page, old_inode); in nilfs_rename()
/kernel/linux/linux-5.10/drivers/net/ethernet/ti/
Dcpsw.c346 struct page *new_page, *page = token; in cpsw_rx_handler() local
378 new_page = page; in cpsw_rx_handler()
387 new_page = page_pool_dev_alloc_pages(pool); in cpsw_rx_handler()
388 if (unlikely(!new_page)) { in cpsw_rx_handler()
389 new_page = page; in cpsw_rx_handler()
449 xmeta = page_address(new_page) + CPSW_XMETA_OFFSET; in cpsw_rx_handler()
453 dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM; in cpsw_rx_handler()
454 ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma, in cpsw_rx_handler()
458 page_pool_recycle_direct(pool, new_page); in cpsw_rx_handler()
Dcpsw_new.c283 struct page *new_page, *page = token; in cpsw_rx_handler() local
321 new_page = page; in cpsw_rx_handler()
330 new_page = page_pool_dev_alloc_pages(pool); in cpsw_rx_handler()
331 if (unlikely(!new_page)) { in cpsw_rx_handler()
332 new_page = page; in cpsw_rx_handler()
392 xmeta = page_address(new_page) + CPSW_XMETA_OFFSET; in cpsw_rx_handler()
396 dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM; in cpsw_rx_handler()
397 ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma, in cpsw_rx_handler()
401 page_pool_recycle_direct(pool, new_page); in cpsw_rx_handler()
/kernel/linux/linux-5.10/drivers/tty/serial/
Dicom.c348 unsigned char *new_page = NULL; in load_code() local
421 new_page = dma_alloc_coherent(&dev->dev, 4096, &temp_pci, GFP_KERNEL); in load_code()
423 if (!new_page) { in load_code()
443 new_page[index] = fw->data[index]; in load_code()
500 if (new_page != NULL) in load_code()
501 dma_free_coherent(&dev->dev, 4096, new_page, temp_pci); in load_code()
/kernel/linux/linux-5.10/fs/jbd2/
Djournal.c348 struct page *new_page; in jbd2_journal_write_metadata_buffer() local
377 new_page = virt_to_page(jh_in->b_frozen_data); in jbd2_journal_write_metadata_buffer()
380 new_page = jh2bh(jh_in)->b_page; in jbd2_journal_write_metadata_buffer()
384 mapped_data = kmap_atomic(new_page); in jbd2_journal_write_metadata_buffer()
424 mapped_data = kmap_atomic(new_page); in jbd2_journal_write_metadata_buffer()
428 new_page = virt_to_page(tmp); in jbd2_journal_write_metadata_buffer()
445 mapped_data = kmap_atomic(new_page); in jbd2_journal_write_metadata_buffer()
450 set_bh_page(new_bh, new_page, new_offset); in jbd2_journal_write_metadata_buffer()
/kernel/linux/linux-5.10/Documentation/networking/
Dpage_pool.rst148 new_page = page_pool_dev_alloc_pages(page_pool);

12