• Home
  • Raw
  • Download

Lines Matching refs:folio

174 struct anon_vma *folio_get_anon_vma(struct folio *folio);
197 static inline void __folio_rmap_sanity_checks(struct folio *folio, in __folio_rmap_sanity_checks() argument
201 VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); in __folio_rmap_sanity_checks()
204 VM_WARN_ON_FOLIO(is_zero_folio(folio), folio); in __folio_rmap_sanity_checks()
216 VM_WARN_ON_FOLIO(page_folio(page) != folio, folio); in __folio_rmap_sanity_checks()
217 VM_WARN_ON_FOLIO(page_folio(page + nr_pages - 1) != folio, folio); in __folio_rmap_sanity_checks()
228 VM_WARN_ON_FOLIO(folio_nr_pages(folio) != HPAGE_PMD_NR, folio); in __folio_rmap_sanity_checks()
229 VM_WARN_ON_FOLIO(nr_pages != HPAGE_PMD_NR, folio); in __folio_rmap_sanity_checks()
239 void folio_move_anon_rmap(struct folio *, struct vm_area_struct *);
240 void folio_add_anon_rmap_ptes(struct folio *, struct page *, int nr_pages,
242 #define folio_add_anon_rmap_pte(folio, page, vma, address, flags) \ argument
243 folio_add_anon_rmap_ptes(folio, page, 1, vma, address, flags)
244 void folio_add_anon_rmap_pmd(struct folio *, struct page *,
246 void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
248 void folio_add_file_rmap_ptes(struct folio *, struct page *, int nr_pages,
250 #define folio_add_file_rmap_pte(folio, page, vma) \ argument
251 folio_add_file_rmap_ptes(folio, page, 1, vma)
252 void folio_add_file_rmap_pmd(struct folio *, struct page *,
254 void folio_remove_rmap_ptes(struct folio *, struct page *, int nr_pages,
256 #define folio_remove_rmap_pte(folio, page, vma) \ argument
257 folio_remove_rmap_ptes(folio, page, 1, vma)
258 void folio_remove_rmap_pmd(struct folio *, struct page *,
261 void hugetlb_add_anon_rmap(struct folio *, struct vm_area_struct *,
263 void hugetlb_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
267 static inline int hugetlb_try_dup_anon_rmap(struct folio *folio, in hugetlb_try_dup_anon_rmap() argument
270 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); in hugetlb_try_dup_anon_rmap()
271 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); in hugetlb_try_dup_anon_rmap()
273 if (PageAnonExclusive(&folio->page)) { in hugetlb_try_dup_anon_rmap()
274 if (unlikely(folio_needs_cow_for_dma(vma, folio))) in hugetlb_try_dup_anon_rmap()
276 ClearPageAnonExclusive(&folio->page); in hugetlb_try_dup_anon_rmap()
278 atomic_inc(&folio->_entire_mapcount); in hugetlb_try_dup_anon_rmap()
279 atomic_inc(&folio->_large_mapcount); in hugetlb_try_dup_anon_rmap()
284 static inline int hugetlb_try_share_anon_rmap(struct folio *folio) in hugetlb_try_share_anon_rmap() argument
286 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); in hugetlb_try_share_anon_rmap()
287 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); in hugetlb_try_share_anon_rmap()
288 VM_WARN_ON_FOLIO(!PageAnonExclusive(&folio->page), folio); in hugetlb_try_share_anon_rmap()
294 if (unlikely(folio_maybe_dma_pinned(folio))) in hugetlb_try_share_anon_rmap()
296 ClearPageAnonExclusive(&folio->page); in hugetlb_try_share_anon_rmap()
307 static inline void hugetlb_add_file_rmap(struct folio *folio) in hugetlb_add_file_rmap() argument
309 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); in hugetlb_add_file_rmap()
310 VM_WARN_ON_FOLIO(folio_test_anon(folio), folio); in hugetlb_add_file_rmap()
312 atomic_inc(&folio->_entire_mapcount); in hugetlb_add_file_rmap()
313 atomic_inc(&folio->_large_mapcount); in hugetlb_add_file_rmap()
316 static inline void hugetlb_remove_rmap(struct folio *folio) in hugetlb_remove_rmap() argument
318 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); in hugetlb_remove_rmap()
320 atomic_dec(&folio->_entire_mapcount); in hugetlb_remove_rmap()
321 atomic_dec(&folio->_large_mapcount); in hugetlb_remove_rmap()
324 static __always_inline void __folio_dup_file_rmap(struct folio *folio, in __folio_dup_file_rmap() argument
329 __folio_rmap_sanity_checks(folio, page, nr_pages, level); in __folio_dup_file_rmap()
333 if (!folio_test_large(folio)) { in __folio_dup_file_rmap()
334 atomic_inc(&folio->_mapcount); in __folio_dup_file_rmap()
341 atomic_add(orig_nr_pages, &folio->_large_mapcount); in __folio_dup_file_rmap()
344 atomic_inc(&folio->_entire_mapcount); in __folio_dup_file_rmap()
345 atomic_inc(&folio->_large_mapcount); in __folio_dup_file_rmap()
360 static inline void folio_dup_file_rmap_ptes(struct folio *folio, in folio_dup_file_rmap_ptes() argument
363 __folio_dup_file_rmap(folio, page, nr_pages, RMAP_LEVEL_PTE); in folio_dup_file_rmap_ptes()
366 static __always_inline void folio_dup_file_rmap_pte(struct folio *folio, in folio_dup_file_rmap_pte() argument
369 __folio_dup_file_rmap(folio, page, 1, RMAP_LEVEL_PTE); in folio_dup_file_rmap_pte()
381 static inline void folio_dup_file_rmap_pmd(struct folio *folio, in folio_dup_file_rmap_pmd() argument
385 __folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, RMAP_LEVEL_PTE); in folio_dup_file_rmap_pmd()
391 static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio, in __folio_try_dup_anon_rmap() argument
399 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); in __folio_try_dup_anon_rmap()
400 __folio_rmap_sanity_checks(folio, page, nr_pages, level); in __folio_try_dup_anon_rmap()
409 maybe_pinned = likely(!folio_is_device_private(folio)) && in __folio_try_dup_anon_rmap()
410 unlikely(folio_needs_cow_for_dma(src_vma, folio)); in __folio_try_dup_anon_rmap()
425 if (!folio_test_large(folio)) { in __folio_try_dup_anon_rmap()
428 atomic_inc(&folio->_mapcount); in __folio_try_dup_anon_rmap()
437 atomic_add(orig_nr_pages, &folio->_large_mapcount); in __folio_try_dup_anon_rmap()
445 atomic_inc(&folio->_entire_mapcount); in __folio_try_dup_anon_rmap()
446 atomic_inc(&folio->_large_mapcount); in __folio_try_dup_anon_rmap()
475 static inline int folio_try_dup_anon_rmap_ptes(struct folio *folio, in folio_try_dup_anon_rmap_ptes() argument
478 return __folio_try_dup_anon_rmap(folio, page, nr_pages, src_vma, in folio_try_dup_anon_rmap_ptes()
482 static __always_inline int folio_try_dup_anon_rmap_pte(struct folio *folio, in folio_try_dup_anon_rmap_pte() argument
485 return __folio_try_dup_anon_rmap(folio, page, 1, src_vma, in folio_try_dup_anon_rmap_pte()
511 static inline int folio_try_dup_anon_rmap_pmd(struct folio *folio, in folio_try_dup_anon_rmap_pmd() argument
515 return __folio_try_dup_anon_rmap(folio, page, HPAGE_PMD_NR, src_vma, in folio_try_dup_anon_rmap_pmd()
523 static __always_inline int __folio_try_share_anon_rmap(struct folio *folio, in __folio_try_share_anon_rmap() argument
526 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); in __folio_try_share_anon_rmap()
527 VM_WARN_ON_FOLIO(!PageAnonExclusive(page), folio); in __folio_try_share_anon_rmap()
528 __folio_rmap_sanity_checks(folio, page, nr_pages, level); in __folio_try_share_anon_rmap()
531 if (unlikely(folio_is_device_private(folio))) { in __folio_try_share_anon_rmap()
582 if (unlikely(folio_maybe_dma_pinned(folio))) in __folio_try_share_anon_rmap()
616 static inline int folio_try_share_anon_rmap_pte(struct folio *folio, in folio_try_share_anon_rmap_pte() argument
619 return __folio_try_share_anon_rmap(folio, page, 1, RMAP_LEVEL_PTE); in folio_try_share_anon_rmap_pte()
645 static inline int folio_try_share_anon_rmap_pmd(struct folio *folio, in folio_try_share_anon_rmap_pmd() argument
649 return __folio_try_share_anon_rmap(folio, page, HPAGE_PMD_NR, in folio_try_share_anon_rmap_pmd()
660 int folio_referenced(struct folio *, int is_locked,
663 void try_to_migrate(struct folio *folio, enum ttu_flags flags);
664 void try_to_unmap(struct folio *, enum ttu_flags flags);
743 int folio_mkclean(struct folio *);
753 void remove_migration_ptes(struct folio *src, struct folio *dst, int flags);
774 bool (*rmap_one)(struct folio *folio, struct vm_area_struct *vma,
776 int (*done)(struct folio *folio);
777 struct anon_vma *(*anon_lock)(struct folio *folio,
782 void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc);
783 void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc);
784 struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
792 static inline int folio_referenced(struct folio *folio, int is_locked, in folio_referenced() argument
800 static inline void try_to_unmap(struct folio *folio, enum ttu_flags flags) in try_to_unmap() argument
804 static inline int folio_mkclean(struct folio *folio) in folio_mkclean() argument