Home
last modified time | relevance | path

Searched refs:page (Results 1 – 25 of 103) sorted by relevance

12345

/mm/
Dswap.c81 static void __page_cache_release(struct page *page) in __page_cache_release() argument
83 if (PageLRU(page)) { in __page_cache_release()
84 pg_data_t *pgdat = page_pgdat(page); in __page_cache_release()
89 lruvec = mem_cgroup_page_lruvec(page, pgdat); in __page_cache_release()
90 VM_BUG_ON_PAGE(!PageLRU(page), page); in __page_cache_release()
91 __ClearPageLRU(page); in __page_cache_release()
92 del_page_from_lru_list(page, lruvec, page_off_lru(page)); in __page_cache_release()
95 __ClearPageWaiters(page); in __page_cache_release()
98 static void __put_single_page(struct page *page) in __put_single_page() argument
100 __page_cache_release(page); in __put_single_page()
[all …]
Dfilemap.c126 struct page *page, void *shadow) in page_cache_delete() argument
128 XA_STATE(xas, &mapping->i_pages, page->index); in page_cache_delete()
134 if (!PageHuge(page)) { in page_cache_delete()
135 xas_set_order(&xas, page->index, compound_order(page)); in page_cache_delete()
136 nr = compound_nr(page); in page_cache_delete()
139 VM_BUG_ON_PAGE(!PageLocked(page), page); in page_cache_delete()
140 VM_BUG_ON_PAGE(PageTail(page), page); in page_cache_delete()
141 VM_BUG_ON_PAGE(nr != 1 && shadow, page); in page_cache_delete()
146 page->mapping = NULL; in page_cache_delete()
163 struct page *page) in unaccount_page_cache_page() argument
[all …]
Dmigrate.c63 int isolate_movable_page(struct page *page, isolate_mode_t mode) in isolate_movable_page() argument
76 if (unlikely(!get_page_unless_zero(page))) in isolate_movable_page()
84 if (unlikely(!__PageMovable(page))) in isolate_movable_page()
97 if (unlikely(!trylock_page(page))) in isolate_movable_page()
100 if (!PageMovable(page) || PageIsolated(page)) in isolate_movable_page()
103 mapping = page_mapping(page); in isolate_movable_page()
104 VM_BUG_ON_PAGE(!mapping, page); in isolate_movable_page()
106 if (!mapping->a_ops->isolate_page(page, mode)) in isolate_movable_page()
110 WARN_ON_ONCE(PageIsolated(page)); in isolate_movable_page()
111 SetPageIsolated(page); in isolate_movable_page()
[all …]
Drmap.c484 struct anon_vma *page_get_anon_vma(struct page *page) in page_get_anon_vma() argument
490 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_get_anon_vma()
493 if (!page_mapped(page)) in page_get_anon_vma()
509 if (!page_mapped(page)) { in page_get_anon_vma()
528 struct anon_vma *page_lock_anon_vma_read(struct page *page, in page_lock_anon_vma_read() argument
537 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_lock_anon_vma_read()
540 if (!page_mapped(page)) in page_lock_anon_vma_read()
551 if (!page_mapped(page)) { in page_lock_anon_vma_read()
557 trace_android_vh_do_page_trylock(page, NULL, NULL, &success); in page_lock_anon_vma_read()
575 if (!page_mapped(page)) { in page_lock_anon_vma_read()
[all …]
Dtruncate.c83 struct page *page = pvec->pages[i]; in truncate_exceptional_pvec_entries() local
86 if (!xa_is_value(page)) { in truncate_exceptional_pvec_entries()
87 pvec->pages[j++] = page; in truncate_exceptional_pvec_entries()
99 __clear_shadow_entry(mapping, index, page); in truncate_exceptional_pvec_entries()
152 void do_invalidatepage(struct page *page, unsigned int offset, in do_invalidatepage() argument
155 void (*invalidatepage)(struct page *, unsigned int, unsigned int); in do_invalidatepage()
157 invalidatepage = page->mapping->a_ops->invalidatepage; in do_invalidatepage()
163 (*invalidatepage)(page, offset, length); in do_invalidatepage()
176 static void truncate_cleanup_page(struct page *page) in truncate_cleanup_page() argument
178 if (page_mapped(page)) in truncate_cleanup_page()
[all …]
Dpage_io.c31 struct page *page, bio_end_io_t end_io) in get_swap_bio() argument
39 bio->bi_iter.bi_sector = map_swap_page(page, &bdev); in get_swap_bio()
44 bio_add_page(bio, page, thp_size(page), 0); in get_swap_bio()
51 struct page *page = bio_first_page_all(bio); in end_swap_bio_write() local
54 SetPageError(page); in end_swap_bio_write()
63 set_page_dirty(page); in end_swap_bio_write()
67 ClearPageReclaim(page); in end_swap_bio_write()
69 end_page_writeback(page); in end_swap_bio_write()
75 struct page *page = bio_first_page_all(bio); in end_swap_bio_read() local
79 SetPageError(page); in end_swap_bio_read()
[all …]
Dpage_isolation.c18 static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags) in set_migratetype_isolate() argument
20 struct zone *zone = page_zone(page); in set_migratetype_isolate()
21 struct page *unmovable; in set_migratetype_isolate()
31 if (is_migrate_isolate_page(page)) { in set_migratetype_isolate()
40 unmovable = has_unmovable_pages(zone, page, migratetype, isol_flags); in set_migratetype_isolate()
43 int mt = get_pageblock_migratetype(page); in set_migratetype_isolate()
45 set_pageblock_migratetype(page, MIGRATE_ISOLATE); in set_migratetype_isolate()
47 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE, in set_migratetype_isolate()
67 static void unset_migratetype_isolate(struct page *page, unsigned migratetype) in unset_migratetype_isolate() argument
74 struct page *buddy; in unset_migratetype_isolate()
[all …]
Dpage_alloc.c213 static inline int get_pcppage_migratetype(struct page *page) in get_pcppage_migratetype() argument
215 return page->index; in get_pcppage_migratetype()
218 static inline void set_pcppage_migratetype(struct page *page, int migratetype) in set_pcppage_migratetype() argument
220 page->index = migratetype; in set_pcppage_migratetype()
265 static void __free_pages_ok(struct page *page, unsigned int order,
411 static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags) in should_skip_kasan_poison() argument
416 PageSkipKASanPoison(page); in should_skip_kasan_poison()
467 static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags) in should_skip_kasan_poison() argument
471 PageSkipKASanPoison(page); in should_skip_kasan_poison()
486 static inline unsigned long *get_pageblock_bitmap(struct page *page, in get_pageblock_bitmap() argument
[all …]
Dswap_state.c115 struct page *page; in get_shadow_from_swap_cache() local
117 page = find_get_entry(address_space, idx); in get_shadow_from_swap_cache()
118 if (xa_is_value(page)) in get_shadow_from_swap_cache()
119 return page; in get_shadow_from_swap_cache()
120 if (page) in get_shadow_from_swap_cache()
121 put_page(page); in get_shadow_from_swap_cache()
129 int add_to_swap_cache(struct page *page, swp_entry_t entry, in add_to_swap_cache() argument
134 XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page)); in add_to_swap_cache()
135 unsigned long i, nr = thp_nr_pages(page); in add_to_swap_cache()
138 VM_BUG_ON_PAGE(!PageLocked(page), page); in add_to_swap_cache()
[all …]
Dgup.c33 static void hpage_pincount_add(struct page *page, int refs) in hpage_pincount_add() argument
35 VM_BUG_ON_PAGE(!hpage_pincount_available(page), page); in hpage_pincount_add()
36 VM_BUG_ON_PAGE(page != compound_head(page), page); in hpage_pincount_add()
38 atomic_add(refs, compound_pincount_ptr(page)); in hpage_pincount_add()
41 static void hpage_pincount_sub(struct page *page, int refs) in hpage_pincount_sub() argument
43 VM_BUG_ON_PAGE(!hpage_pincount_available(page), page); in hpage_pincount_sub()
44 VM_BUG_ON_PAGE(page != compound_head(page), page); in hpage_pincount_sub()
46 atomic_sub(refs, compound_pincount_ptr(page)); in hpage_pincount_sub()
50 static void put_page_refs(struct page *page, int refs) in put_page_refs() argument
53 if (VM_WARN_ON_ONCE_PAGE(page_ref_count(page) < refs, page)) in put_page_refs()
[all …]
Dmlock.c60 void clear_page_mlock(struct page *page) in clear_page_mlock() argument
64 if (!TestClearPageMlocked(page)) in clear_page_mlock()
67 nr_pages = thp_nr_pages(page); in clear_page_mlock()
68 mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); in clear_page_mlock()
76 if (!isolate_lru_page(page)) { in clear_page_mlock()
77 putback_lru_page(page); in clear_page_mlock()
82 if (PageUnevictable(page)) in clear_page_mlock()
91 void mlock_vma_page(struct page *page) in mlock_vma_page() argument
94 BUG_ON(!PageLocked(page)); in mlock_vma_page()
96 VM_BUG_ON_PAGE(PageTail(page), page); in mlock_vma_page()
[all …]
Dinternal.h83 struct page *find_get_entry(struct address_space *mapping, pgoff_t index);
84 struct page *find_lock_entry(struct address_space *mapping, pgoff_t index);
98 static inline bool page_evictable(struct page *page) in page_evictable() argument
104 ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page); in page_evictable()
113 static inline void set_page_refcounted(struct page *page) in set_page_refcounted() argument
115 VM_BUG_ON_PAGE(PageTail(page), page); in set_page_refcounted()
116 VM_BUG_ON_PAGE(page_ref_count(page), page); in set_page_refcounted()
117 set_page_count(page, 1); in set_page_refcounted()
131 extern int isolate_lru_page(struct page *page);
132 extern void putback_lru_page(struct page *page);
[all …]
Dslub.c341 static __always_inline void slab_lock(struct page *page) in slab_lock() argument
343 VM_BUG_ON_PAGE(PageTail(page), page); in slab_lock()
344 bit_spin_lock(PG_locked, &page->flags); in slab_lock()
347 static __always_inline void slab_unlock(struct page *page) in slab_unlock() argument
349 VM_BUG_ON_PAGE(PageTail(page), page); in slab_unlock()
350 __bit_spin_unlock(PG_locked, &page->flags); in slab_unlock()
354 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page, in __cmpxchg_double_slab() argument
363 if (cmpxchg_double(&page->freelist, &page->counters, in __cmpxchg_double_slab()
370 slab_lock(page); in __cmpxchg_double_slab()
371 if (page->freelist == freelist_old && in __cmpxchg_double_slab()
[all …]
Dmemory-failure.c68 static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, bool release) in page_handle_poison() argument
75 if (dissolve_free_huge_page(page) || !take_page_off_buddy(page)) in page_handle_poison()
86 SetPageHWPoison(page); in page_handle_poison()
88 put_page(page); in page_handle_poison()
89 page_ref_inc(page); in page_handle_poison()
108 static int hwpoison_filter_dev(struct page *p) in hwpoison_filter_dev()
138 static int hwpoison_filter_flags(struct page *p) in hwpoison_filter_flags()
163 static int hwpoison_filter_task(struct page *p) in hwpoison_filter_task()
174 static int hwpoison_filter_task(struct page *p) { return 0; } in hwpoison_filter_task()
177 int hwpoison_filter(struct page *p) in hwpoison_filter()
[all …]
Dhuge_memory.c63 struct page *huge_zero_page __read_mostly;
90 static struct page *get_huge_zero_page(void) in get_huge_zero_page()
92 struct page *zero_page; in get_huge_zero_page()
127 struct page *mm_get_huge_zero_page(struct mm_struct *mm) in mm_get_huge_zero_page()
158 struct page *zero_page = xchg(&huge_zero_page, NULL); in shrink_huge_zero_page_scan()
487 static inline struct deferred_split *get_deferred_split_queue(struct page *page) in get_deferred_split_queue() argument
489 struct mem_cgroup *memcg = compound_head(page)->mem_cgroup; in get_deferred_split_queue()
490 struct pglist_data *pgdat = NODE_DATA(page_to_nid(page)); in get_deferred_split_queue()
498 static inline struct deferred_split *get_deferred_split_queue(struct page *page) in get_deferred_split_queue() argument
500 struct pglist_data *pgdat = NODE_DATA(page_to_nid(page)); in get_deferred_split_queue()
[all …]
Dballoon_compaction.c15 struct page *page) in balloon_page_enqueue_one() argument
23 BUG_ON(!trylock_page(page)); in balloon_page_enqueue_one()
24 balloon_page_insert(b_dev_info, page); in balloon_page_enqueue_one()
25 unlock_page(page); in balloon_page_enqueue_one()
43 struct page *page, *tmp; in balloon_page_list_enqueue() local
48 list_for_each_entry_safe(page, tmp, pages, lru) { in balloon_page_list_enqueue()
49 list_del(&page->lru); in balloon_page_list_enqueue()
50 balloon_page_enqueue_one(b_dev_info, page); in balloon_page_list_enqueue()
79 struct page *page, *tmp; in balloon_page_list_dequeue() local
84 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { in balloon_page_list_dequeue()
[all …]
Dvmscan.c169 struct page *prev; \
754 static inline int is_page_cache_freeable(struct page *page) in is_page_cache_freeable() argument
761 int page_cache_pins = thp_nr_pages(page); in is_page_cache_freeable()
762 return page_count(page) - page_has_private(page) == 1 + page_cache_pins; in is_page_cache_freeable()
789 struct page *page, int error) in handle_write_error() argument
791 lock_page(page); in handle_write_error()
792 if (page_mapping(page) == mapping) in handle_write_error()
794 unlock_page(page); in handle_write_error()
813 static pageout_t pageout(struct page *page, struct address_space *mapping) in pageout() argument
831 if (!is_page_cache_freeable(page)) in pageout()
[all …]
Dpage_idle.c32 static struct page *page_idle_get_page(unsigned long pfn) in page_idle_get_page()
34 struct page *page = pfn_to_online_page(pfn); in page_idle_get_page() local
37 if (!page || !PageLRU(page) || in page_idle_get_page()
38 !get_page_unless_zero(page)) in page_idle_get_page()
41 pgdat = page_pgdat(page); in page_idle_get_page()
43 if (unlikely(!PageLRU(page))) { in page_idle_get_page()
44 put_page(page); in page_idle_get_page()
45 page = NULL; in page_idle_get_page()
48 return page; in page_idle_get_page()
51 static bool page_idle_clear_pte_refs_one(struct page *page, in page_idle_clear_pte_refs_one() argument
[all …]
Dkhugepaged.c559 static void release_pte_page(struct page *page) in release_pte_page() argument
561 mod_node_page_state(page_pgdat(page), in release_pte_page()
562 NR_ISOLATED_ANON + page_is_file_lru(page), in release_pte_page()
563 -compound_nr(page)); in release_pte_page()
564 unlock_page(page); in release_pte_page()
565 putback_lru_page(page); in release_pte_page()
571 struct page *page, *tmp; in release_pte_pages() local
576 page = pte_page(pteval); in release_pte_pages()
578 !PageCompound(page)) in release_pte_pages()
579 release_pte_page(page); in release_pte_pages()
[all …]
Dhugetlb.c84 static inline bool PageHugeFreed(struct page *head) in PageHugeFreed()
89 static inline void SetPageHugeFreed(struct page *head) in SetPageHugeFreed()
94 static inline void ClearPageHugeFreed(struct page *head) in ClearPageHugeFreed()
1070 static void enqueue_huge_page(struct hstate *h, struct page *page) in enqueue_huge_page() argument
1072 int nid = page_to_nid(page); in enqueue_huge_page()
1073 list_move(&page->lru, &h->hugepage_freelists[nid]); in enqueue_huge_page()
1076 SetPageHugeFreed(page); in enqueue_huge_page()
1079 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) in dequeue_huge_page_node_exact()
1081 struct page *page; in dequeue_huge_page_node_exact() local
1084 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) { in dequeue_huge_page_node_exact()
[all …]
Dshmem.c147 static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
148 static int shmem_replace_page(struct page **pagep, gfp_t gfp,
151 struct page **pagep, enum sgp_type sgp,
155 struct page **pagep, enum sgp_type sgp,
160 struct page **pagep, enum sgp_type sgp) in shmem_getpage()
535 struct page *page; in shmem_unused_huge_shrink() local
586 page = find_get_page(inode->i_mapping, in shmem_unused_huge_shrink()
588 if (!page) in shmem_unused_huge_shrink()
592 if (!PageTransHuge(page)) { in shmem_unused_huge_shrink()
593 put_page(page); in shmem_unused_huge_shrink()
[all …]
Ddmapool.c70 struct dma_page *page; in show_pools() local
86 list_for_each_entry(page, &pool->page_list, page_list) { in show_pools()
88 blocks += page->in_use; in show_pools()
203 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) in pool_initialise_page() argument
214 *(int *)(page->vaddr + offset) = next; in pool_initialise_page()
221 struct dma_page *page; in pool_alloc_page() local
223 page = kmalloc(sizeof(*page), mem_flags); in pool_alloc_page()
224 if (!page) in pool_alloc_page()
226 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, in pool_alloc_page()
227 &page->dma, mem_flags); in pool_alloc_page()
[all …]
Dcompaction.c78 struct page *page, *next; in release_freepages() local
81 list_for_each_entry_safe(page, next, freelist, lru) { in release_freepages()
82 unsigned long pfn = page_to_pfn(page); in release_freepages()
83 list_del(&page->lru); in release_freepages()
84 __free_page(page); in release_freepages()
95 struct page *page, *next; in split_map_pages() local
98 list_for_each_entry_safe(page, next, list, lru) { in split_map_pages()
99 list_del(&page->lru); in split_map_pages()
101 order = page_private(page); in split_map_pages()
104 post_alloc_hook(page, order, __GFP_MOVABLE); in split_map_pages()
[all …]
Dhighmem.c58 static inline unsigned int get_pkmap_color(struct page *page) in get_pkmap_color() argument
150 struct page *kmap_to_page(void *vaddr) in kmap_to_page()
171 struct page *page; in flush_all_zero_pkmaps() local
193 page = pte_page(pkmap_page_table[i]); in flush_all_zero_pkmaps()
196 set_page_address(page, NULL); in flush_all_zero_pkmaps()
213 static inline unsigned long map_new_virtual(struct page *page) in map_new_virtual() argument
218 unsigned int color = get_pkmap_color(page); in map_new_virtual()
250 if (page_address(page)) in map_new_virtual()
251 return (unsigned long)page_address(page); in map_new_virtual()
259 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); in map_new_virtual()
[all …]
/mm/damon/
Dpaddr.c19 static bool __damon_pa_mkold(struct page *page, struct vm_area_struct *vma, in __damon_pa_mkold() argument
23 .page = page, in __damon_pa_mkold()
40 struct page *page = damon_get_page(PHYS_PFN(paddr)); in damon_pa_mkold() local
47 if (!page) in damon_pa_mkold()
50 if (!page_mapped(page) || !page_rmapping(page)) { in damon_pa_mkold()
51 set_page_idle(page); in damon_pa_mkold()
55 need_lock = !PageAnon(page) || PageKsm(page); in damon_pa_mkold()
56 if (need_lock && !trylock_page(page)) in damon_pa_mkold()
59 rmap_walk(page, &rwc); in damon_pa_mkold()
62 unlock_page(page); in damon_pa_mkold()
[all …]

12345