| /kernel/linux/linux-4.19/include/linux/ |
| D | page-flags.h | 3 * Macros for manipulating and testing page->flags 18 * Various page->flags bits: 24 * specific data (which is normally at page->private). It can be used by 31 * PG_locked also pins a page in pagecache, and blocks truncation of the file 34 * page_waitqueue(page) is a wait queue of all tasks waiting for the page 37 * PG_uptodate tells whether the page's contents is valid. When a read 38 * completes, the page becomes uptodate, unless a disk I/O error happened. 40 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and 43 * PG_error is set to indicate that an I/O error occurred on this page. 45 * PG_arch_1 is an architecture specific page state bit. The generic code [all …]
|
| D | balloon_compaction.h | 11 * As the page isolation scanning step a compaction thread does is a lockless 12 * procedure (from a page standpoint), it might bring some racy situations while 13 * performing balloon page compaction. In order to sort out these racy scenarios 14 * and safely perform balloon's page compaction and migration we must, always, 17 * i. when updating a balloon's page ->mapping element, strictly do it under 20 * +-page_lock(page); 22 * ... page->mapping updates here ... 24 * ii. before isolating or dequeueing a balloon page from the balloon device 25 * pages list, the page reference counter must be raised by one and the 26 * extra refcount must be dropped when the page is enqueued back into [all …]
|
| D | page_ref.h | 7 #include <linux/page-flags.h> 29 extern void __page_ref_set(struct page *page, int v); 30 extern void __page_ref_mod(struct page *page, int v); 31 extern void __page_ref_mod_and_test(struct page *page, int v, int ret); 32 extern void __page_ref_mod_and_return(struct page *page, int v, int ret); 33 extern void __page_ref_mod_unless(struct page *page, int v, int u); 34 extern void __page_ref_freeze(struct page *page, int v, int ret); 35 extern void __page_ref_unfreeze(struct page *page, int v); 41 static inline void __page_ref_set(struct page *page, int v) in __page_ref_set() argument 44 static inline void __page_ref_mod(struct page *page, int v) in __page_ref_mod() argument [all …]
|
| D | pagemap.h | 121 void release_pages(struct page **pages, int nr); 124 * speculatively take a reference to a page. 125 * If the page is free (_refcount == 0), then _refcount is untouched, and 0 129 * been used to lookup the page in the pagecache radix-tree (or page table): 135 * page has been finished with, no matter what it is subsequently allocated 142 * 1. find page in radix tree 144 * 3. check the page is still in pagecache (if no, goto 1) 149 * B. remove page from pagecache 150 * C. free the page 155 * subsequently, B will complete and 1 will find no page, causing the [all …]
|
| D | page_idle.h | 6 #include <linux/page-flags.h> 12 static inline bool page_is_young(struct page *page) in page_is_young() argument 14 return PageYoung(page); in page_is_young() 17 static inline void set_page_young(struct page *page) in set_page_young() argument 19 SetPageYoung(page); in set_page_young() 22 static inline bool test_and_clear_page_young(struct page *page) in test_and_clear_page_young() argument 24 return TestClearPageYoung(page); in test_and_clear_page_young() 27 static inline bool page_is_idle(struct page *page) in page_is_idle() argument 29 return PageIdle(page); in page_is_idle() 32 static inline void set_page_idle(struct page *page) in set_page_idle() argument [all …]
|
| D | mm_inline.h | 9 * page_is_file_cache - should the page be on a file LRU or anon LRU? 10 * @page: the page to test 12 * Returns 1 if @page is page cache page backed by a regular filesystem, 13 * or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed. 14 * Used by functions that manipulate the LRU lists, to sort a page 17 * We would like to get this info without a page flag, but the state 18 * needs to survive until the page is last deleted from the LRU, which 21 static inline int page_is_file_cache(struct page *page) in page_is_file_cache() argument 23 return !PageSwapBacked(page); in page_is_file_cache() 47 static __always_inline void add_page_to_lru_list(struct page *page, in add_page_to_lru_list() argument [all …]
|
| D | page_owner.h | 11 extern void __reset_page_owner(struct page *page, unsigned int order); 12 extern void __set_page_owner(struct page *page, 14 extern void __split_page_owner(struct page *page, unsigned int order); 15 extern void __copy_page_owner(struct page *oldpage, struct page *newpage); 16 extern void __set_page_owner_migrate_reason(struct page *page, int reason); 17 extern void __dump_page_owner(struct page *page); 21 static inline void reset_page_owner(struct page *page, unsigned int order) in reset_page_owner() argument 24 __reset_page_owner(page, order); in reset_page_owner() 27 static inline void set_page_owner(struct page *page, in set_page_owner() argument 31 __set_page_owner(page, order, gfp_mask); in set_page_owner() [all …]
|
| /kernel/linux/linux-5.10/include/linux/ |
| D | page_ref.h | 7 #include <linux/page-flags.h> 29 extern void __page_ref_set(struct page *page, int v); 30 extern void __page_ref_mod(struct page *page, int v); 31 extern void __page_ref_mod_and_test(struct page *page, int v, int ret); 32 extern void __page_ref_mod_and_return(struct page *page, int v, int ret); 33 extern void __page_ref_mod_unless(struct page *page, int v, int u); 34 extern void __page_ref_freeze(struct page *page, int v, int ret); 35 extern void __page_ref_unfreeze(struct page *page, int v); 41 static inline void __page_ref_set(struct page *page, int v) in __page_ref_set() argument 44 static inline void __page_ref_mod(struct page *page, int v) in __page_ref_mod() argument [all …]
|
| D | page-flags.h | 3 * Macros for manipulating and testing page->flags 18 * Various page->flags bits: 20 * PG_reserved is set for special pages. The "struct page" of such a page 25 * - Pages reserved or allocated early during boot (before the page allocator 27 * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much 29 * be given to the page allocator. 32 * - The zero page(s) 33 * - Pages not added to the page allocator when onlining a section because 49 * Consequently, PG_reserved for a page mapped into user space can indicate 50 * the zero page, the vDSO, MMIO pages or device memory. [all …]
|
| D | balloon_compaction.h | 7 * Balloon page migration makes use of the general non-lru movable page 10 * page->private is used to reference the responsible balloon device. 11 * page->mapping is used in context of non-lru page migration to reference 12 * the address space operations for page isolation/migration/compaction. 14 * As the page isolation scanning step a compaction thread does is a lockless 15 * procedure (from a page standpoint), it might bring some racy situations while 16 * performing balloon page compaction. In order to sort out these racy scenarios 17 * and safely perform balloon's page compaction and migration we must, always, 20 * i. when updating a balloon's page ->mapping element, strictly do it under 23 * +-page_lock(page); [all …]
|
| D | pagemap.h | 158 void release_pages(struct page **pages, int nr); 161 * speculatively take a reference to a page. 162 * If the page is free (_refcount == 0), then _refcount is untouched, and 0 166 * been used to lookup the page in the pagecache radix-tree (or page table): 172 * page has been finished with, no matter what it is subsequently allocated 179 * 1. find page in radix tree 181 * 3. check the page is still in pagecache (if no, goto 1) 186 * B. remove page from pagecache 187 * C. free the page 192 * subsequently, B will complete and 1 will find no page, causing the [all …]
|
| D | mm_inline.h | 9 * page_is_file_lru - should the page be on a file LRU or anon LRU? 10 * @page: the page to test 12 * Returns 1 if @page is a regular filesystem backed page cache page or a lazily 13 * freed anonymous page (e.g. via MADV_FREE). Returns 0 if @page is a normal 14 * anonymous page, a tmpfs page or otherwise ram or swap backed page. Used by 15 * functions that manipulate the LRU lists, to sort a page onto the right LRU 18 * We would like to get this info without a page flag, but the state 19 * needs to survive until the page is last deleted from the LRU, which 22 static inline int page_is_file_lru(struct page *page) in page_is_file_lru() argument 24 return !PageSwapBacked(page); in page_is_file_lru() [all …]
|
| D | page_idle.h | 6 #include <linux/page-flags.h> 12 static inline bool page_is_young(struct page *page) in page_is_young() argument 14 return PageYoung(page); in page_is_young() 17 static inline void set_page_young(struct page *page) in set_page_young() argument 19 SetPageYoung(page); in set_page_young() 22 static inline bool test_and_clear_page_young(struct page *page) in test_and_clear_page_young() argument 24 return TestClearPageYoung(page); in test_and_clear_page_young() 27 static inline bool page_is_idle(struct page *page) in page_is_idle() argument 29 return PageIdle(page); in page_is_idle() 32 static inline void set_page_idle(struct page *page) in set_page_idle() argument [all …]
|
| D | migrate.h | 10 typedef struct page *new_page_t(struct page *page, unsigned long private); 11 typedef void free_page_t(struct page *page, unsigned long private); 17 * - negative errno on page migration failure; 18 * - zero on page migration success; 40 struct page *newpage, struct page *page, 43 struct page *newpage, struct page *page, 47 extern struct page *alloc_migration_target(struct page *page, unsigned long private); 48 extern int isolate_movable_page(struct page *page, isolate_mode_t mode); 49 extern void putback_movable_page(struct page *page); 53 extern void migrate_page_states(struct page *newpage, struct page *page); [all …]
|
| D | page_owner.h | 11 extern void __reset_page_owner(struct page *page, unsigned int order); 12 extern void __set_page_owner(struct page *page, 14 extern void __split_page_owner(struct page *page, unsigned int nr); 15 extern void __copy_page_owner(struct page *oldpage, struct page *newpage); 16 extern void __set_page_owner_migrate_reason(struct page *page, int reason); 17 extern void __dump_page_owner(struct page *page); 21 static inline void reset_page_owner(struct page *page, unsigned int order) in reset_page_owner() argument 24 __reset_page_owner(page, order); in reset_page_owner() 27 static inline void set_page_owner(struct page *page, in set_page_owner() argument 31 __set_page_owner(page, order, gfp_mask); in set_page_owner() [all …]
|
| /kernel/linux/linux-4.19/mm/ |
| D | swap.c | 44 /* How many pages do we try to swap or page in/out together? */ 59 static void __page_cache_release(struct page *page) in __page_cache_release() argument 61 if (PageLRU(page)) { in __page_cache_release() 62 struct zone *zone = page_zone(page); in __page_cache_release() 67 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); in __page_cache_release() 68 VM_BUG_ON_PAGE(!PageLRU(page), page); in __page_cache_release() 69 __ClearPageLRU(page); in __page_cache_release() 70 del_page_from_lru_list(page, lruvec, page_off_lru(page)); in __page_cache_release() 73 __ClearPageWaiters(page); in __page_cache_release() 74 mem_cgroup_uncharge(page); in __page_cache_release() [all …]
|
| D | migrate.c | 7 * Page migration was first developed in the context of the memory hotplug 84 int isolate_movable_page(struct page *page, isolate_mode_t mode) in isolate_movable_page() argument 92 * In case we 'win' a race for a movable page being freed under us and in isolate_movable_page() 95 * release this page, thus avoiding a nasty leakage. in isolate_movable_page() 97 if (unlikely(!get_page_unless_zero(page))) in isolate_movable_page() 101 * Check PageMovable before holding a PG_lock because page's owner in isolate_movable_page() 102 * assumes anybody doesn't touch PG_lock of newly allocated page in isolate_movable_page() 103 * so unconditionally grapping the lock ruins page's owner side. in isolate_movable_page() 105 if (unlikely(!__PageMovable(page))) in isolate_movable_page() 109 * compaction threads can race against page migration functions in isolate_movable_page() [all …]
|
| D | filemap.c | 57 * finished 'unifying' the page and buffer cache and SMP-threaded the 58 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> 115 struct page *page, void **shadowp) in page_cache_tree_insert() argument 121 error = __radix_tree_create(&mapping->i_pages, page->index, 0, in page_cache_tree_insert() 137 __radix_tree_replace(&mapping->i_pages, node, slot, page, in page_cache_tree_insert() 144 struct page *page, void *shadow) in page_cache_tree_delete() argument 149 nr = PageHuge(page) ? 1 : hpage_nr_pages(page); in page_cache_tree_delete() 151 VM_BUG_ON_PAGE(!PageLocked(page), page); in page_cache_tree_delete() 152 VM_BUG_ON_PAGE(PageTail(page), page); in page_cache_tree_delete() 153 VM_BUG_ON_PAGE(nr != 1 && shadow, page); in page_cache_tree_delete() [all …]
|
| D | balloon_compaction.c | 14 * balloon_page_alloc - allocates a new page for insertion into the balloon 15 * page list. 17 * Driver must call it to properly allocate a new enlisted balloon page. 19 * the guest system. This function returns the page address for the recently 20 * allocated page or NULL in the case we fail to allocate a new page this turn. 22 struct page *balloon_page_alloc(void) in balloon_page_alloc() 24 struct page *page = alloc_page(balloon_mapping_gfp_mask() | in balloon_page_alloc() local 26 return page; in balloon_page_alloc() 31 * balloon_page_enqueue - allocates a new page and inserts it into the balloon 32 * page list. [all …]
|
| D | rmap.c | 10 * Provides methods for unmapping each kind of mapped page: 25 * page->flags PG_locked (lock_page) 259 * page is mapped. 442 * Getting a lock on a stable anon_vma from a page off the LRU is tricky! 446 * have been relevant to this page. 448 * The page might have been remapped to a different anon_vma or the anon_vma 453 * ensure that any anon_vma obtained from the page will still be valid for as 457 * chain and verify that the page in question is indeed mapped in it 461 * that the anon_vma pointer from page->mapping is valid if there is a 464 struct anon_vma *page_get_anon_vma(struct page *page) in page_get_anon_vma() argument [all …]
|
| /kernel/linux/linux-5.10/mm/ |
| D | swap.c | 45 /* How many pages do we try to swap or page in/out together? */ 79 static void __page_cache_release(struct page *page) in __page_cache_release() argument 81 if (PageLRU(page)) { in __page_cache_release() 82 pg_data_t *pgdat = page_pgdat(page); in __page_cache_release() 87 lruvec = mem_cgroup_page_lruvec(page, pgdat); in __page_cache_release() 88 VM_BUG_ON_PAGE(!PageLRU(page), page); in __page_cache_release() 89 __ClearPageLRU(page); in __page_cache_release() 90 del_page_from_lru_list(page, lruvec, page_off_lru(page)); in __page_cache_release() 93 __ClearPageWaiters(page); in __page_cache_release() 96 static void __put_single_page(struct page *page) in __put_single_page() argument [all …]
|
| D | migrate.c | 7 * Page migration was first developed in the context of the memory hotplug 86 int isolate_movable_page(struct page *page, isolate_mode_t mode) in isolate_movable_page() argument 94 * In case we 'win' a race for a movable page being freed under us and in isolate_movable_page() 97 * release this page, thus avoiding a nasty leakage. in isolate_movable_page() 99 if (unlikely(!get_page_unless_zero(page))) in isolate_movable_page() 103 * Check PageMovable before holding a PG_lock because page's owner in isolate_movable_page() 104 * assumes anybody doesn't touch PG_lock of newly allocated page in isolate_movable_page() 105 * so unconditionally grabbing the lock ruins page's owner side. in isolate_movable_page() 107 if (unlikely(!__PageMovable(page))) in isolate_movable_page() 111 * compaction threads can race against page migration functions in isolate_movable_page() [all …]
|
| D | filemap.c | 63 * finished 'unifying' the page and buffer cache and SMP-threaded the 64 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> 121 struct page *page, void *shadow) in page_cache_delete() argument 123 XA_STATE(xas, &mapping->i_pages, page->index); in page_cache_delete() 129 if (!PageHuge(page)) { in page_cache_delete() 130 xas_set_order(&xas, page->index, compound_order(page)); in page_cache_delete() 131 nr = compound_nr(page); in page_cache_delete() 134 VM_BUG_ON_PAGE(!PageLocked(page), page); in page_cache_delete() 135 VM_BUG_ON_PAGE(PageTail(page), page); in page_cache_delete() 136 VM_BUG_ON_PAGE(nr != 1 && shadow, page); in page_cache_delete() [all …]
|
| /kernel/linux/linux-4.19/Documentation/vm/ |
| D | page_migration.rst | 4 Page migration 7 Page migration allows the moving of the physical location of pages between 12 The main intend of page migration is to reduce the latency of memory access 16 Page migration allows a process to manually relocate the node on which its 22 Page migration functions are provided by the numactl package by Andi Kleen 25 which provides an interface similar to other numa functionality for page 28 proc(5) man page. 34 manual page migration support. Automatic page migration may be implemented 51 Page migration allows the preservation of the relative location of pages 57 Page migration occurs in several steps. First a high level [all …]
|
| /kernel/linux/linux-5.10/Documentation/vm/ |
| D | page_migration.rst | 4 Page migration 7 Page migration allows moving the physical location of pages between 15 The main intent of page migration is to reduce the latency of memory accesses 19 Page migration allows a process to manually relocate the node on which its 25 Page migration functions are provided by the numactl package by Andi Kleen 28 which provides an interface similar to other NUMA functionality for page 31 proc(5) man page. 37 manual page migration support. Automatic page migration may be implemented 54 Page migration allows the preservation of the relative location of pages 60 Page migration occurs in several steps. First a high level [all …]
|