Home
last modified time | relevance | path

Searched refs:page (Results 1 – 25 of 74) sorted by relevance

123

/mm/
Dswap.c51 static void __page_cache_release(struct page *page) in __page_cache_release() argument
53 if (PageLRU(page)) { in __page_cache_release()
54 struct zone *zone = page_zone(page); in __page_cache_release()
59 lruvec = mem_cgroup_page_lruvec(page, zone); in __page_cache_release()
60 VM_BUG_ON_PAGE(!PageLRU(page), page); in __page_cache_release()
61 __ClearPageLRU(page); in __page_cache_release()
62 del_page_from_lru_list(page, lruvec, page_off_lru(page)); in __page_cache_release()
65 mem_cgroup_uncharge(page); in __page_cache_release()
68 static void __put_single_page(struct page *page) in __put_single_page() argument
70 __page_cache_release(page); in __put_single_page()
[all …]
Dfilemap.c113 struct page *page, void *shadow) in page_cache_tree_delete() argument
121 VM_BUG_ON(!PageLocked(page)); in page_cache_tree_delete()
123 __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot); in page_cache_tree_delete()
145 index = page->index; in page_cache_tree_delete()
180 void __delete_from_page_cache(struct page *page, void *shadow) in __delete_from_page_cache() argument
182 struct address_space *mapping = page->mapping; in __delete_from_page_cache()
184 trace_mm_filemap_delete_from_page_cache(page); in __delete_from_page_cache()
190 if (PageUptodate(page) && PageMappedToDisk(page)) in __delete_from_page_cache()
191 cleancache_put_page(page); in __delete_from_page_cache()
193 cleancache_invalidate_page(mapping, page); in __delete_from_page_cache()
[all …]
Dmigrate.c85 struct page *page; in putback_movable_pages() local
86 struct page *page2; in putback_movable_pages()
88 list_for_each_entry_safe(page, page2, l, lru) { in putback_movable_pages()
89 if (unlikely(PageHuge(page))) { in putback_movable_pages()
90 putback_active_hugepage(page); in putback_movable_pages()
93 list_del(&page->lru); in putback_movable_pages()
94 dec_zone_page_state(page, NR_ISOLATED_ANON + in putback_movable_pages()
95 page_is_file_cache(page)); in putback_movable_pages()
96 if (unlikely(isolated_balloon_page(page))) in putback_movable_pages()
97 balloon_page_putback(page); in putback_movable_pages()
[all …]
Dtruncate.c81 void do_invalidatepage(struct page *page, unsigned int offset, in do_invalidatepage() argument
84 void (*invalidatepage)(struct page *, unsigned int, unsigned int); in do_invalidatepage()
86 invalidatepage = page->mapping->a_ops->invalidatepage; in do_invalidatepage()
92 (*invalidatepage)(page, offset, length); in do_invalidatepage()
109 void cancel_dirty_page(struct page *page, unsigned int account_size) in cancel_dirty_page() argument
111 if (TestClearPageDirty(page)) { in cancel_dirty_page()
112 struct address_space *mapping = page->mapping; in cancel_dirty_page()
114 dec_zone_page_state(page, NR_FILE_DIRTY); in cancel_dirty_page()
135 truncate_complete_page(struct address_space *mapping, struct page *page) in truncate_complete_page() argument
137 if (page->mapping != mapping) in truncate_complete_page()
[all …]
Drmap.c453 struct anon_vma *page_get_anon_vma(struct page *page) in page_get_anon_vma() argument
459 anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping); in page_get_anon_vma()
462 if (!page_mapped(page)) in page_get_anon_vma()
478 if (!page_mapped(page)) { in page_get_anon_vma()
496 struct anon_vma *page_lock_anon_vma_read(struct page *page) in page_lock_anon_vma_read() argument
503 anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping); in page_lock_anon_vma_read()
506 if (!page_mapped(page)) in page_lock_anon_vma_read()
517 if (!page_mapped(page)) { in page_lock_anon_vma_read()
530 if (!page_mapped(page)) { in page_lock_anon_vma_read()
567 __vma_address(struct page *page, struct vm_area_struct *vma) in __vma_address() argument
[all …]
Dinternal.h20 static inline void set_page_count(struct page *page, int v) in set_page_count() argument
22 atomic_set(&page->_count, v); in set_page_count()
43 static inline void set_page_refcounted(struct page *page) in set_page_refcounted() argument
45 VM_BUG_ON_PAGE(PageTail(page), page); in set_page_refcounted()
46 VM_BUG_ON_PAGE(atomic_read(&page->_count), page); in set_page_refcounted()
47 set_page_count(page, 1); in set_page_refcounted()
50 static inline void __get_page_tail_foll(struct page *page, in __get_page_tail_foll() argument
64 VM_BUG_ON_PAGE(atomic_read(&page->first_page->_count) <= 0, page); in __get_page_tail_foll()
66 atomic_inc(&page->first_page->_count); in __get_page_tail_foll()
67 get_huge_page_tail(page); in __get_page_tail_foll()
[all …]
Dballoon_compaction.c23 struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info) in balloon_page_enqueue()
26 struct page *page = alloc_page(balloon_mapping_gfp_mask() | in balloon_page_enqueue() local
28 if (!page) in balloon_page_enqueue()
36 BUG_ON(!trylock_page(page)); in balloon_page_enqueue()
38 balloon_page_insert(b_dev_info, page); in balloon_page_enqueue()
41 unlock_page(page); in balloon_page_enqueue()
42 return page; in balloon_page_enqueue()
57 struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) in balloon_page_dequeue()
59 struct page *page, *tmp; in balloon_page_dequeue() local
65 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { in balloon_page_dequeue()
[all …]
Dswap_state.c86 int __add_to_swap_cache(struct page *page, swp_entry_t entry) in __add_to_swap_cache() argument
91 VM_BUG_ON_PAGE(!PageLocked(page), page); in __add_to_swap_cache()
92 VM_BUG_ON_PAGE(PageSwapCache(page), page); in __add_to_swap_cache()
93 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); in __add_to_swap_cache()
95 page_cache_get(page); in __add_to_swap_cache()
96 SetPageSwapCache(page); in __add_to_swap_cache()
97 set_page_private(page, entry.val); in __add_to_swap_cache()
102 entry.val, page); in __add_to_swap_cache()
105 __inc_zone_page_state(page, NR_FILE_PAGES); in __add_to_swap_cache()
117 set_page_private(page, 0UL); in __add_to_swap_cache()
[all …]
Dpage_isolation.c12 int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages) in set_migratetype_isolate() argument
20 zone = page_zone(page); in set_migratetype_isolate()
24 pfn = page_to_pfn(page); in set_migratetype_isolate()
48 if (!has_unmovable_pages(zone, page, arg.pages_found, in set_migratetype_isolate()
60 int migratetype = get_pageblock_migratetype(page); in set_migratetype_isolate()
62 set_pageblock_migratetype(page, MIGRATE_ISOLATE); in set_migratetype_isolate()
64 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); in set_migratetype_isolate()
75 void unset_migratetype_isolate(struct page *page, unsigned migratetype) in unset_migratetype_isolate() argument
79 struct page *isolated_page = NULL; in unset_migratetype_isolate()
82 struct page *buddy; in unset_migratetype_isolate()
[all …]
Dslub.c330 static __always_inline void slab_lock(struct page *page) in slab_lock() argument
332 bit_spin_lock(PG_locked, &page->flags); in slab_lock()
335 static __always_inline void slab_unlock(struct page *page) in slab_unlock() argument
337 __bit_spin_unlock(PG_locked, &page->flags); in slab_unlock()
340 static inline void set_page_slub_counters(struct page *page, unsigned long counters_new) in set_page_slub_counters() argument
342 struct page tmp; in set_page_slub_counters()
350 page->frozen = tmp.frozen; in set_page_slub_counters()
351 page->inuse = tmp.inuse; in set_page_slub_counters()
352 page->objects = tmp.objects; in set_page_slub_counters()
356 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page, in __cmpxchg_double_slab() argument
[all …]
Dpage_io.c28 struct page *page, bio_end_io_t end_io) in get_swap_bio() argument
34 bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev); in get_swap_bio()
36 bio->bi_io_vec[0].bv_page = page; in get_swap_bio()
49 struct page *page = bio->bi_io_vec[0].bv_page; in end_swap_bio_write() local
52 SetPageError(page); in end_swap_bio_write()
61 set_page_dirty(page); in end_swap_bio_write()
66 ClearPageReclaim(page); in end_swap_bio_write()
68 end_page_writeback(page); in end_swap_bio_write()
75 struct page *page = bio->bi_io_vec[0].bv_page; in end_swap_bio_read() local
78 SetPageError(page); in end_swap_bio_read()
[all …]
Dzsmalloc.c226 struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS];
397 static int is_first_page(struct page *page) in is_first_page() argument
399 return PagePrivate(page); in is_first_page()
402 static int is_last_page(struct page *page) in is_last_page() argument
404 return PagePrivate2(page); in is_last_page()
407 static void get_zspage_mapping(struct page *page, unsigned int *class_idx, in get_zspage_mapping() argument
411 BUG_ON(!is_first_page(page)); in get_zspage_mapping()
413 m = (unsigned long)page->mapping; in get_zspage_mapping()
418 static void set_zspage_mapping(struct page *page, unsigned int class_idx, in set_zspage_mapping() argument
422 BUG_ON(!is_first_page(page)); in set_zspage_mapping()
[all …]
Dpage_alloc.c166 static void __free_pages_ok(struct page *page, unsigned int order);
249 void set_pageblock_migratetype(struct page *page, int migratetype) in set_pageblock_migratetype() argument
255 set_pageblock_flags_group(page, (unsigned long)migratetype, in set_pageblock_migratetype()
262 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) in page_outside_zone_boundaries() argument
266 unsigned long pfn = page_to_pfn(page); in page_outside_zone_boundaries()
285 static int page_is_consistent(struct zone *zone, struct page *page) in page_is_consistent() argument
287 if (!pfn_valid_within(page_to_pfn(page))) in page_is_consistent()
289 if (zone != page_zone(page)) in page_is_consistent()
297 static int bad_range(struct zone *zone, struct page *page) in bad_range() argument
299 if (page_outside_zone_boundaries(zone, page)) in bad_range()
[all …]
Dksm.c366 struct page *page; in break_ksm() local
371 page = follow_page(vma, addr, FOLL_GET | FOLL_MIGRATION); in break_ksm()
372 if (IS_ERR_OR_NULL(page)) in break_ksm()
374 if (PageKsm(page)) in break_ksm()
379 put_page(page); in break_ksm()
445 static struct page *page_trans_compound_anon(struct page *page) in page_trans_compound_anon() argument
447 if (PageTransCompound(page)) { in page_trans_compound_anon()
448 struct page *head = compound_head(page); in page_trans_compound_anon()
459 static struct page *get_mergeable_page(struct rmap_item *rmap_item) in get_mergeable_page()
464 struct page *page; in get_mergeable_page() local
[all …]
Dmemory-failure.c79 static int hwpoison_filter_dev(struct page *p) in hwpoison_filter_dev()
109 static int hwpoison_filter_flags(struct page *p) in hwpoison_filter_flags()
134 static int hwpoison_filter_task(struct page *p) in hwpoison_filter_task()
157 static int hwpoison_filter_task(struct page *p) { return 0; } in hwpoison_filter_task()
160 int hwpoison_filter(struct page *p) in hwpoison_filter()
177 int hwpoison_filter(struct page *p) in hwpoison_filter()
191 unsigned long pfn, struct page *page, int flags) in kill_proc() argument
205 si.si_addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT; in kill_proc()
230 void shake_page(struct page *p, int access) in shake_page()
301 static void add_to_kill(struct task_struct *tsk, struct page *p, in add_to_kill()
[all …]
Dhugetlb.c515 static void enqueue_huge_page(struct hstate *h, struct page *page) in enqueue_huge_page() argument
517 int nid = page_to_nid(page); in enqueue_huge_page()
518 list_move(&page->lru, &h->hugepage_freelists[nid]); in enqueue_huge_page()
523 static struct page *dequeue_huge_page_node(struct hstate *h, int nid) in dequeue_huge_page_node()
525 struct page *page; in dequeue_huge_page_node() local
527 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) in dequeue_huge_page_node()
528 if (!is_migrate_isolate_page(page)) in dequeue_huge_page_node()
534 if (&h->hugepage_freelists[nid] == &page->lru) in dequeue_huge_page_node()
536 list_move(&page->lru, &h->hugepage_activelist); in dequeue_huge_page_node()
537 set_page_refcounted(page); in dequeue_huge_page_node()
[all …]
Dmlock.c57 void clear_page_mlock(struct page *page) in clear_page_mlock() argument
59 if (!TestClearPageMlocked(page)) in clear_page_mlock()
62 mod_zone_page_state(page_zone(page), NR_MLOCK, in clear_page_mlock()
63 -hpage_nr_pages(page)); in clear_page_mlock()
65 if (!isolate_lru_page(page)) { in clear_page_mlock()
66 putback_lru_page(page); in clear_page_mlock()
71 if (PageUnevictable(page)) in clear_page_mlock()
80 void mlock_vma_page(struct page *page) in mlock_vma_page() argument
83 BUG_ON(!PageLocked(page)); in mlock_vma_page()
85 if (!TestSetPageMlocked(page)) { in mlock_vma_page()
[all …]
Dvmscan.c107 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
113 struct page *prev; \
127 struct page *prev; \
447 static inline int is_page_cache_freeable(struct page *page) in is_page_cache_freeable() argument
454 return page_count(page) - page_has_private(page) == 2; in is_page_cache_freeable()
482 struct page *page, int error) in handle_write_error() argument
484 lock_page(page); in handle_write_error()
485 if (page_mapping(page) == mapping) in handle_write_error()
487 unlock_page(page); in handle_write_error()
506 static pageout_t pageout(struct page *page, struct address_space *mapping, in pageout() argument
[all …]
Dhuge_memory.c174 static struct page *huge_zero_page __read_mostly;
176 static inline bool is_huge_zero_page(struct page *page) in is_huge_zero_page() argument
178 return ACCESS_ONCE(huge_zero_page) == page; in is_huge_zero_page()
186 static struct page *get_huge_zero_page(void) in get_huge_zero_page()
188 struct page *zero_page; in get_huge_zero_page()
233 struct page *zero_page = xchg(&huge_zero_page, NULL); in shrink_huge_zero_page_scan()
705 static inline pmd_t mk_huge_pmd(struct page *page, pgprot_t prot) in mk_huge_pmd() argument
708 entry = mk_pmd(page, prot); in mk_huge_pmd()
716 struct page *page) in __do_huge_pmd_anonymous_page() argument
722 VM_BUG_ON_PAGE(!PageCompound(page), page); in __do_huge_pmd_anonymous_page()
[all …]
Ddmapool.c73 struct dma_page *page; in show_pools() local
89 list_for_each_entry(page, &pool->page_list, page_list) { in show_pools()
91 blocks += page->in_use; in show_pools()
206 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) in pool_initialise_page() argument
217 *(int *)(page->vaddr + offset) = next; in pool_initialise_page()
224 struct dma_page *page; in pool_alloc_page() local
226 page = kmalloc(sizeof(*page), mem_flags); in pool_alloc_page()
227 if (!page) in pool_alloc_page()
229 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, in pool_alloc_page()
230 &page->dma, mem_flags); in pool_alloc_page()
[all …]
Dgup.c19 static struct page *no_page_table(struct vm_area_struct *vma, in no_page_table()
45 static struct page *follow_page_pte(struct vm_area_struct *vma, in follow_page_pte()
49 struct page *page; in follow_page_pte() local
84 page = vm_normal_page(vma, address, pte); in follow_page_pte()
85 if (unlikely(!page)) { in follow_page_pte()
89 page = pte_page(pte); in follow_page_pte()
93 get_page_foll(page); in follow_page_pte()
96 !pte_dirty(pte) && !PageDirty(page)) in follow_page_pte()
97 set_page_dirty(page); in follow_page_pte()
103 mark_page_accessed(page); in follow_page_pte()
[all …]
Dhighmem.c57 static inline unsigned int get_pkmap_color(struct page *page) in get_pkmap_color() argument
154 struct page *kmap_to_page(void *vaddr) in kmap_to_page()
175 struct page *page; in flush_all_zero_pkmaps() local
197 page = pte_page(pkmap_page_table[i]); in flush_all_zero_pkmaps()
200 set_page_address(page, NULL); in flush_all_zero_pkmaps()
217 static inline unsigned long map_new_virtual(struct page *page) in map_new_virtual() argument
222 unsigned int color = get_pkmap_color(page); in map_new_virtual()
254 if (page_address(page)) in map_new_virtual()
255 return (unsigned long)page_address(page); in map_new_virtual()
263 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); in map_new_virtual()
[all …]
Dcompaction.c43 struct page *page, *next; in release_freepages() local
46 list_for_each_entry_safe(page, next, freelist, lru) { in release_freepages()
47 list_del(&page->lru); in release_freepages()
48 __free_page(page); in release_freepages()
57 struct page *page; in map_pages() local
59 list_for_each_entry(page, list, lru) { in map_pages()
60 arch_alloc_page(page, 0); in map_pages()
61 kernel_map_pages(page, 1, 1); in map_pages()
87 static struct page *pageblock_pfn_to_page(unsigned long start_pfn, in pageblock_pfn_to_page()
90 struct page *start_page; in pageblock_pfn_to_page()
[all …]
Dshmem.c119 static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
120 static int shmem_replace_page(struct page **pagep, gfp_t gfp,
123 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type);
126 struct page **pagep, enum sgp_type sgp, int *fault_type) in shmem_getpage()
297 static int shmem_add_to_page_cache(struct page *page, in shmem_add_to_page_cache() argument
303 VM_BUG_ON_PAGE(!PageLocked(page), page); in shmem_add_to_page_cache()
304 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); in shmem_add_to_page_cache()
306 page_cache_get(page); in shmem_add_to_page_cache()
307 page->mapping = mapping; in shmem_add_to_page_cache()
308 page->index = index; in shmem_add_to_page_cache()
[all …]
Ddebug-pagealloc.c9 static inline void set_page_poison(struct page *page) in set_page_poison() argument
11 __set_bit(PAGE_DEBUG_FLAG_POISON, &page->debug_flags); in set_page_poison()
14 static inline void clear_page_poison(struct page *page) in clear_page_poison() argument
16 __clear_bit(PAGE_DEBUG_FLAG_POISON, &page->debug_flags); in clear_page_poison()
19 static inline bool page_poison(struct page *page) in page_poison() argument
21 return test_bit(PAGE_DEBUG_FLAG_POISON, &page->debug_flags); in page_poison()
24 static void poison_page(struct page *page) in poison_page() argument
26 void *addr = kmap_atomic(page); in poison_page()
28 set_page_poison(page); in poison_page()
33 static void poison_pages(struct page *page, int n) in poison_pages() argument
[all …]

123