• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Macros for manipulating and testing page->flags
4  */
5 
6 #ifndef PAGE_FLAGS_H
7 #define PAGE_FLAGS_H
8 
9 #include <linux/types.h>
10 #include <linux/bug.h>
11 #include <linux/mmdebug.h>
12 #ifndef __GENERATING_BOUNDS_H
13 #include <linux/mm_types.h>
14 #include <generated/bounds.h>
15 #endif /* !__GENERATING_BOUNDS_H */
16 
17 /*
18  * Various page->flags bits:
19  *
20  * PG_reserved is set for special pages. The "struct page" of such a page
21  * should in general not be touched (e.g. set dirty) except by its owner.
22  * Pages marked as PG_reserved include:
23  * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS,
24  *   initrd, HW tables)
25  * - Pages reserved or allocated early during boot (before the page allocator
26  *   was initialized). This includes (depending on the architecture) the
27  *   initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
28  *   much more. Once (if ever) freed, PG_reserved is cleared and they will
29  *   be given to the page allocator.
30  * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying
31  *   to read/write these pages might end badly. Don't touch!
32  * - The zero page(s)
33  * - Pages not added to the page allocator when onlining a section because
34  *   they were excluded via the online_page_callback() or because they are
35  *   PG_hwpoison.
36  * - Pages allocated in the context of kexec/kdump (loaded kernel image,
37  *   control pages, vmcoreinfo)
38  * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are
39  *   not marked PG_reserved (as they might be in use by somebody else who does
40  *   not respect the caching strategy).
41  * - Pages part of an offline section (struct pages of offline sections should
42  *   not be trusted as they will be initialized when first onlined).
43  * - MCA pages on ia64
44  * - Pages holding CPU notes for POWER Firmware Assisted Dump
45  * - Device memory (e.g. PMEM, DAX, HMM)
46  * Some PG_reserved pages will be excluded from the hibernation image.
47  * PG_reserved does in general not hinder anybody from dumping or swapping
48  * and is no longer required for remap_pfn_range(). ioremap might require it.
49  * Consequently, PG_reserved for a page mapped into user space can indicate
50  * the zero page, the vDSO, MMIO pages or device memory.
51  *
52  * The PG_private bitflag is set on pagecache pages if they contain filesystem
53  * specific data (which is normally at page->private). It can be used by
54  * private allocations for its own usage.
55  *
56  * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
57  * and cleared when writeback _starts_ or when read _completes_. PG_writeback
58  * is set before writeback starts and cleared when it finishes.
59  *
60  * PG_locked also pins a page in pagecache, and blocks truncation of the file
61  * while it is held.
62  *
63  * page_waitqueue(page) is a wait queue of all tasks waiting for the page
64  * to become unlocked.
65  *
66  * PG_swapbacked is set when a page uses swap as a backing storage.  This are
67  * usually PageAnon or shmem pages but please note that even anonymous pages
68  * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as
69  * a result of MADV_FREE).
70  *
71  * PG_uptodate tells whether the page's contents is valid.  When a read
72  * completes, the page becomes uptodate, unless a disk I/O error happened.
73  *
74  * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
75  * file-backed pagecache (see mm/vmscan.c).
76  *
77  * PG_error is set to indicate that an I/O error occurred on this page.
78  *
79  * PG_arch_1 is an architecture specific page state bit.  The generic code
80  * guarantees that this bit is cleared for a page when it first is entered into
81  * the page cache.
82  *
83  * PG_hwpoison indicates that a page got corrupted in hardware and contains
84  * data with incorrect ECC bits that triggered a machine check. Accessing is
85  * not safe since it may cause another machine check. Don't touch!
86  */
87 
88 /*
89  * Don't use the *_dontuse flags.  Use the macros.  Otherwise you'll break
90  * locked- and dirty-page accounting.
91  *
92  * The page flags field is split into two parts, the main flags area
93  * which extends from the low bits upwards, and the fields area which
94  * extends from the high bits downwards.
95  *
96  *  | FIELD | ... | FLAGS |
97  *  N-1           ^       0
98  *               (NR_PAGEFLAGS)
99  *
100  * The fields area is reserved for fields mapping zone, node (for NUMA) and
101  * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
102  * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
103  */
104 enum pageflags {
105 	PG_locked,		/* Page is locked. Don't touch. */
106 	PG_referenced,
107 	PG_uptodate,
108 	PG_dirty,
109 	PG_lru,
110 	PG_active,
111 	PG_workingset,
112 	PG_waiters,		/* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
113 	PG_error,
114 	PG_slab,
115 	PG_owner_priv_1,	/* Owner use. If pagecache, fs may use*/
116 	PG_arch_1,
117 	PG_reserved,
118 	PG_private,		/* If pagecache, has fs-private data */
119 	PG_private_2,		/* If pagecache, has fs aux data */
120 	PG_writeback,		/* Page is under writeback */
121 	PG_head,		/* A head page */
122 	PG_mappedtodisk,	/* Has blocks allocated on-disk */
123 	PG_reclaim,		/* To be reclaimed asap */
124 	PG_swapbacked,		/* Page is backed by RAM/swap */
125 	PG_unevictable,		/* Page is "unevictable"  */
126 #ifdef CONFIG_MMU
127 	PG_mlocked,		/* Page is vma mlocked */
128 #endif
129 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
130 	PG_uncached,		/* Page has been mapped as uncached */
131 #endif
132 #ifdef CONFIG_MEMORY_FAILURE
133 	PG_hwpoison,		/* hardware poisoned page. Don't touch */
134 #endif
135 #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
136 	PG_young,
137 	PG_idle,
138 #endif
139 #ifdef CONFIG_64BIT
140 	PG_arch_2,
141 #endif
142 #ifdef CONFIG_PAGE_TRACING
143 	PG_skb,
144 	PG_zspage,
145 #endif
146 #ifdef CONFIG_MEM_PURGEABLE
147 	PG_purgeable,
148 #endif
149 #ifdef CONFIG_SECURITY_XPM
150 	PG_xpm_readonly,
151 	PG_xpm_writetainted,
152 #endif
153 	__NR_PAGEFLAGS,
154 
155 	/* Filesystems */
156 	PG_checked = PG_owner_priv_1,
157 
158 	/* SwapBacked */
159 	PG_swapcache = PG_owner_priv_1,	/* Swap page: swp_entry_t in private */
160 
161 	/* Two page bits are conscripted by FS-Cache to maintain local caching
162 	 * state.  These bits are set on pages belonging to the netfs's inodes
163 	 * when those inodes are being locally cached.
164 	 */
165 	PG_fscache = PG_private_2,	/* page backed by cache */
166 
167 	/* XEN */
168 	/* Pinned in Xen as a read-only pagetable page. */
169 	PG_pinned = PG_owner_priv_1,
170 	/* Pinned as part of domain save (see xen_mm_pin_all()). */
171 	PG_savepinned = PG_dirty,
172 	/* Has a grant mapping of another (foreign) domain's page. */
173 	PG_foreign = PG_owner_priv_1,
174 	/* Remapped by swiotlb-xen. */
175 	PG_xen_remapped = PG_owner_priv_1,
176 
177 	/* SLOB */
178 	PG_slob_free = PG_private,
179 
180 	/* Compound pages. Stored in first tail page's flags */
181 	PG_double_map = PG_workingset,
182 
183 	/* non-lru isolated movable page */
184 	PG_isolated = PG_reclaim,
185 
186 	/* Only valid for buddy pages. Used to track pages that are reported */
187 	PG_reported = PG_uptodate,
188 };
189 
190 #ifndef __GENERATING_BOUNDS_H
191 
192 struct page;	/* forward declaration */
193 
compound_head(struct page * page)194 static inline struct page *compound_head(struct page *page)
195 {
196 	unsigned long head = READ_ONCE(page->compound_head);
197 
198 	if (unlikely(head & 1))
199 		return (struct page *) (head - 1);
200 	return page;
201 }
202 
PageTail(struct page * page)203 static __always_inline int PageTail(struct page *page)
204 {
205 	return READ_ONCE(page->compound_head) & 1;
206 }
207 
PageCompound(struct page * page)208 static __always_inline int PageCompound(struct page *page)
209 {
210 	return test_bit(PG_head, &page->flags) || PageTail(page);
211 }
212 
213 #define	PAGE_POISON_PATTERN	-1l
PagePoisoned(const struct page * page)214 static inline int PagePoisoned(const struct page *page)
215 {
216 	return page->flags == PAGE_POISON_PATTERN;
217 }
218 
219 #ifdef CONFIG_DEBUG_VM
220 void page_init_poison(struct page *page, size_t size);
221 #else
page_init_poison(struct page * page,size_t size)222 static inline void page_init_poison(struct page *page, size_t size)
223 {
224 }
225 #endif
226 
227 /*
228  * Page flags policies wrt compound pages
229  *
230  * PF_POISONED_CHECK
231  *     check if this struct page poisoned/uninitialized
232  *
233  * PF_ANY:
234  *     the page flag is relevant for small, head and tail pages.
235  *
236  * PF_HEAD:
237  *     for compound page all operations related to the page flag applied to
238  *     head page.
239  *
240  * PF_ONLY_HEAD:
241  *     for compound page, callers only ever operate on the head page.
242  *
243  * PF_NO_TAIL:
244  *     modifications of the page flag must be done on small or head pages,
245  *     checks can be done on tail pages too.
246  *
247  * PF_NO_COMPOUND:
248  *     the page flag is not relevant for compound pages.
249  *
250  * PF_SECOND:
251  *     the page flag is stored in the first tail page.
252  */
253 #define PF_POISONED_CHECK(page) ({					\
254 		VM_BUG_ON_PGFLAGS(PagePoisoned(page), page);		\
255 		page; })
256 #define PF_ANY(page, enforce)	PF_POISONED_CHECK(page)
257 #define PF_HEAD(page, enforce)	PF_POISONED_CHECK(compound_head(page))
258 #define PF_ONLY_HEAD(page, enforce) ({					\
259 		VM_BUG_ON_PGFLAGS(PageTail(page), page);		\
260 		PF_POISONED_CHECK(page); })
261 #define PF_NO_TAIL(page, enforce) ({					\
262 		VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page);	\
263 		PF_POISONED_CHECK(compound_head(page)); })
264 #define PF_NO_COMPOUND(page, enforce) ({				\
265 		VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page);	\
266 		PF_POISONED_CHECK(page); })
267 #define PF_SECOND(page, enforce) ({					\
268 		VM_BUG_ON_PGFLAGS(!PageHead(page), page);		\
269 		PF_POISONED_CHECK(&page[1]); })
270 
271 /*
272  * Macros to create function definitions for page flags
273  */
274 #define TESTPAGEFLAG(uname, lname, policy)				\
275 static __always_inline int Page##uname(struct page *page)		\
276 	{ return test_bit(PG_##lname, &policy(page, 0)->flags); }
277 
278 #define SETPAGEFLAG(uname, lname, policy)				\
279 static __always_inline void SetPage##uname(struct page *page)		\
280 	{ set_bit(PG_##lname, &policy(page, 1)->flags); }
281 
282 #define CLEARPAGEFLAG(uname, lname, policy)				\
283 static __always_inline void ClearPage##uname(struct page *page)		\
284 	{ clear_bit(PG_##lname, &policy(page, 1)->flags); }
285 
286 #define __SETPAGEFLAG(uname, lname, policy)				\
287 static __always_inline void __SetPage##uname(struct page *page)		\
288 	{ __set_bit(PG_##lname, &policy(page, 1)->flags); }
289 
290 #define __CLEARPAGEFLAG(uname, lname, policy)				\
291 static __always_inline void __ClearPage##uname(struct page *page)	\
292 	{ __clear_bit(PG_##lname, &policy(page, 1)->flags); }
293 
294 #define TESTSETFLAG(uname, lname, policy)				\
295 static __always_inline int TestSetPage##uname(struct page *page)	\
296 	{ return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
297 
298 #define TESTCLEARFLAG(uname, lname, policy)				\
299 static __always_inline int TestClearPage##uname(struct page *page)	\
300 	{ return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
301 
302 #define PAGEFLAG(uname, lname, policy)					\
303 	TESTPAGEFLAG(uname, lname, policy)				\
304 	SETPAGEFLAG(uname, lname, policy)				\
305 	CLEARPAGEFLAG(uname, lname, policy)
306 
307 #define __PAGEFLAG(uname, lname, policy)				\
308 	TESTPAGEFLAG(uname, lname, policy)				\
309 	__SETPAGEFLAG(uname, lname, policy)				\
310 	__CLEARPAGEFLAG(uname, lname, policy)
311 
312 #define TESTSCFLAG(uname, lname, policy)				\
313 	TESTSETFLAG(uname, lname, policy)				\
314 	TESTCLEARFLAG(uname, lname, policy)
315 
316 #define TESTPAGEFLAG_FALSE(uname)					\
317 static inline int Page##uname(const struct page *page) { return 0; }
318 
319 #define SETPAGEFLAG_NOOP(uname)						\
320 static inline void SetPage##uname(struct page *page) {  }
321 
322 #define CLEARPAGEFLAG_NOOP(uname)					\
323 static inline void ClearPage##uname(struct page *page) {  }
324 
325 #define __CLEARPAGEFLAG_NOOP(uname)					\
326 static inline void __ClearPage##uname(struct page *page) {  }
327 
328 #define TESTSETFLAG_FALSE(uname)					\
329 static inline int TestSetPage##uname(struct page *page) { return 0; }
330 
331 #define TESTCLEARFLAG_FALSE(uname)					\
332 static inline int TestClearPage##uname(struct page *page) { return 0; }
333 
334 #define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname)			\
335 	SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname)
336 
337 #define TESTSCFLAG_FALSE(uname)						\
338 	TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
339 
340 __PAGEFLAG(Locked, locked, PF_NO_TAIL)
341 PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
342 PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
343 PAGEFLAG(Referenced, referenced, PF_HEAD)
344 	TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
345 	__SETPAGEFLAG(Referenced, referenced, PF_HEAD)
346 PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
347 	__CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
348 PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
349 PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
350 	TESTCLEARFLAG(Active, active, PF_HEAD)
351 PAGEFLAG(Workingset, workingset, PF_HEAD)
352 	TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
353 __PAGEFLAG(Slab, slab, PF_NO_TAIL)
354 __PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
355 PAGEFLAG(Checked, checked, PF_NO_COMPOUND)	   /* Used by some filesystems */
356 
357 #ifdef CONFIG_SECURITY_XPM
358 PAGEFLAG(XPMReadonly, xpm_readonly, PF_HEAD)
359 PAGEFLAG(XPMWritetainted, xpm_writetainted, PF_HEAD)
360 #else
361 PAGEFLAG_FALSE(XPMReadonly)
362 PAGEFLAG_FALSE(XPMWritetainted)
363 #endif
364 
365 /* Xen */
366 PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
367 	TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
368 PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
369 PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
PAGEFLAG(XenRemapped,xen_remapped,PF_NO_COMPOUND)370 PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
371 	TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
372 
373 PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
374 	__CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
375 	__SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
376 PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
377 	__CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
378 	__SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
379 
380 /*
381  * Private page markings that may be used by the filesystem that owns the page
382  * for its own purposes.
383  * - PG_private and PG_private_2 cause releasepage() and co to be invoked
384  */
385 PAGEFLAG(Private, private, PF_ANY) __SETPAGEFLAG(Private, private, PF_ANY)
386 	__CLEARPAGEFLAG(Private, private, PF_ANY)
387 PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
388 PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
389 	TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
390 
391 /*
392  * Only test-and-set exist for PG_writeback.  The unconditional operators are
393  * risky: they bypass page accounting.
394  */
395 TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
396 	TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
397 PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
398 
399 /* PG_readahead is only used for reads; PG_reclaim is only for writes */
400 PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
401 	TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
402 PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
403 	TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
404 
405 #ifdef CONFIG_HIGHMEM
406 /*
407  * Must use a macro here due to header dependency issues. page_zone() is not
408  * available at this point.
409  */
410 #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
411 #else
412 PAGEFLAG_FALSE(HighMem)
413 #endif
414 
415 #ifdef CONFIG_SWAP
416 static __always_inline int PageSwapCache(struct page *page)
417 {
418 #ifdef CONFIG_THP_SWAP
419 	page = compound_head(page);
420 #endif
421 	return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags);
422 
423 }
424 SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
425 CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
426 #else
427 PAGEFLAG_FALSE(SwapCache)
428 #endif
429 
430 PAGEFLAG(Unevictable, unevictable, PF_HEAD)
431 	__CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
432 	TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
433 
434 #ifdef CONFIG_MMU
435 PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
436 	__CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
437 	TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
438 #else
439 PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked)
440 	TESTSCFLAG_FALSE(Mlocked)
441 #endif
442 
443 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
444 PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
445 #else
446 PAGEFLAG_FALSE(Uncached)
447 #endif
448 
449 #ifdef CONFIG_MEMORY_FAILURE
450 PAGEFLAG(HWPoison, hwpoison, PF_ANY)
451 TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
452 #define __PG_HWPOISON (1UL << PG_hwpoison)
453 extern bool take_page_off_buddy(struct page *page);
454 #else
455 PAGEFLAG_FALSE(HWPoison)
456 #define __PG_HWPOISON 0
457 #endif
458 
459 #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
TESTPAGEFLAG(Young,young,PF_ANY)460 TESTPAGEFLAG(Young, young, PF_ANY)
461 SETPAGEFLAG(Young, young, PF_ANY)
462 TESTCLEARFLAG(Young, young, PF_ANY)
463 PAGEFLAG(Idle, idle, PF_ANY)
464 #endif
465 
466 #ifdef CONFIG_PAGE_TRACING
467 	PAGEFLAG(SKB, skb, PF_ANY)
468 	PAGEFLAG(Zspage, zspage, PF_ANY)
469 #endif
470 
471 /*
472  * PageReported() is used to track reported free pages within the Buddy
473  * allocator. We can use the non-atomic version of the test and set
474  * operations as both should be shielded with the zone lock to prevent
475  * any possible races on the setting or clearing of the bit.
476  */
477 __PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
478 
479 #ifdef CONFIG_MEM_PURGEABLE
480 PAGEFLAG(Purgeable, purgeable, PF_ANY)
481 #else
482 PAGEFLAG_FALSE(Purgeable)
483 #endif
484 /*
485  * On an anonymous page mapped into a user virtual memory area,
486  * page->mapping points to its anon_vma, not to a struct address_space;
487  * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
488  *
489  * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
490  * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
491  * bit; and then page->mapping points, not to an anon_vma, but to a private
492  * structure which KSM associates with that merged page.  See ksm.h.
493  *
494  * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
495  * page and then page->mapping points a struct address_space.
496  *
497  * Please note that, confusingly, "page_mapping" refers to the inode
498  * address_space which maps the page from disk; whereas "page_mapped"
499  * refers to user virtual address space into which the page is mapped.
500  */
501 #define PAGE_MAPPING_ANON	0x1
502 #define PAGE_MAPPING_MOVABLE	0x2
503 #define PAGE_MAPPING_KSM	(PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
504 #define PAGE_MAPPING_FLAGS	(PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
505 
506 static __always_inline int PageMappingFlags(struct page *page)
507 {
508 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
509 }
510 
PageAnon(struct page * page)511 static __always_inline int PageAnon(struct page *page)
512 {
513 	page = compound_head(page);
514 	return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
515 }
516 
__PageMovable(struct page * page)517 static __always_inline int __PageMovable(struct page *page)
518 {
519 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
520 				PAGE_MAPPING_MOVABLE;
521 }
522 
523 #ifdef CONFIG_KSM
524 /*
525  * A KSM page is one of those write-protected "shared pages" or "merged pages"
526  * which KSM maps into multiple mms, wherever identical anonymous page content
527  * is found in VM_MERGEABLE vmas.  It's a PageAnon page, pointing not to any
528  * anon_vma, but to that page's node of the stable tree.
529  */
PageKsm(struct page * page)530 static __always_inline int PageKsm(struct page *page)
531 {
532 	page = compound_head(page);
533 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
534 				PAGE_MAPPING_KSM;
535 }
536 #else
537 TESTPAGEFLAG_FALSE(Ksm)
538 #endif
539 
540 u64 stable_page_flags(struct page *page);
541 
PageUptodate(struct page * page)542 static inline int PageUptodate(struct page *page)
543 {
544 	int ret;
545 	page = compound_head(page);
546 	ret = test_bit(PG_uptodate, &(page)->flags);
547 	/*
548 	 * Must ensure that the data we read out of the page is loaded
549 	 * _after_ we've loaded page->flags to check for PageUptodate.
550 	 * We can skip the barrier if the page is not uptodate, because
551 	 * we wouldn't be reading anything from it.
552 	 *
553 	 * See SetPageUptodate() for the other side of the story.
554 	 */
555 	if (ret)
556 		smp_rmb();
557 
558 	return ret;
559 }
560 
__SetPageUptodate(struct page * page)561 static __always_inline void __SetPageUptodate(struct page *page)
562 {
563 	VM_BUG_ON_PAGE(PageTail(page), page);
564 	smp_wmb();
565 	__set_bit(PG_uptodate, &page->flags);
566 }
567 
SetPageUptodate(struct page * page)568 static __always_inline void SetPageUptodate(struct page *page)
569 {
570 	VM_BUG_ON_PAGE(PageTail(page), page);
571 	/*
572 	 * Memory barrier must be issued before setting the PG_uptodate bit,
573 	 * so that all previous stores issued in order to bring the page
574 	 * uptodate are actually visible before PageUptodate becomes true.
575 	 */
576 	smp_wmb();
577 	set_bit(PG_uptodate, &page->flags);
578 }
579 
580 CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
581 
582 int test_clear_page_writeback(struct page *page);
583 int __test_set_page_writeback(struct page *page, bool keep_write);
584 
585 #define test_set_page_writeback(page)			\
586 	__test_set_page_writeback(page, false)
587 #define test_set_page_writeback_keepwrite(page)	\
588 	__test_set_page_writeback(page, true)
589 
set_page_writeback(struct page * page)590 static inline void set_page_writeback(struct page *page)
591 {
592 	test_set_page_writeback(page);
593 }
594 
set_page_writeback_keepwrite(struct page * page)595 static inline void set_page_writeback_keepwrite(struct page *page)
596 {
597 	test_set_page_writeback_keepwrite(page);
598 }
599 
__PAGEFLAG(Head,head,PF_ANY)600 __PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
601 
602 static __always_inline void set_compound_head(struct page *page, struct page *head)
603 {
604 	WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
605 }
606 
clear_compound_head(struct page * page)607 static __always_inline void clear_compound_head(struct page *page)
608 {
609 	WRITE_ONCE(page->compound_head, 0);
610 }
611 
612 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
ClearPageCompound(struct page * page)613 static inline void ClearPageCompound(struct page *page)
614 {
615 	BUG_ON(!PageHead(page));
616 	ClearPageHead(page);
617 }
618 #endif
619 
620 #define PG_head_mask ((1UL << PG_head))
621 
622 #ifdef CONFIG_HUGETLB_PAGE
623 int PageHuge(struct page *page);
624 int PageHeadHuge(struct page *page);
625 bool page_huge_active(struct page *page);
626 #else
627 TESTPAGEFLAG_FALSE(Huge)
TESTPAGEFLAG_FALSE(HeadHuge)628 TESTPAGEFLAG_FALSE(HeadHuge)
629 
630 static inline bool page_huge_active(struct page *page)
631 {
632 	return 0;
633 }
634 #endif
635 
636 
637 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
638 /*
639  * PageHuge() only returns true for hugetlbfs pages, but not for
640  * normal or transparent huge pages.
641  *
642  * PageTransHuge() returns true for both transparent huge and
643  * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
644  * called only in the core VM paths where hugetlbfs pages can't exist.
645  */
PageTransHuge(struct page * page)646 static inline int PageTransHuge(struct page *page)
647 {
648 	VM_BUG_ON_PAGE(PageTail(page), page);
649 	return PageHead(page);
650 }
651 
652 /*
653  * PageTransCompound returns true for both transparent huge pages
654  * and hugetlbfs pages, so it should only be called when it's known
655  * that hugetlbfs pages aren't involved.
656  */
PageTransCompound(struct page * page)657 static inline int PageTransCompound(struct page *page)
658 {
659 	return PageCompound(page);
660 }
661 
662 /*
663  * PageTransCompoundMap is the same as PageTransCompound, but it also
664  * guarantees the primary MMU has the entire compound page mapped
665  * through pmd_trans_huge, which in turn guarantees the secondary MMUs
666  * can also map the entire compound page. This allows the secondary
667  * MMUs to call get_user_pages() only once for each compound page and
668  * to immediately map the entire compound page with a single secondary
669  * MMU fault. If there will be a pmd split later, the secondary MMUs
670  * will get an update through the MMU notifier invalidation through
671  * split_huge_pmd().
672  *
673  * Unlike PageTransCompound, this is safe to be called only while
674  * split_huge_pmd() cannot run from under us, like if protected by the
675  * MMU notifier, otherwise it may result in page->_mapcount check false
676  * positives.
677  *
678  * We have to treat page cache THP differently since every subpage of it
679  * would get _mapcount inc'ed once it is PMD mapped.  But, it may be PTE
680  * mapped in the current process so comparing subpage's _mapcount to
681  * compound_mapcount to filter out PTE mapped case.
682  */
PageTransCompoundMap(struct page * page)683 static inline int PageTransCompoundMap(struct page *page)
684 {
685 	struct page *head;
686 
687 	if (!PageTransCompound(page))
688 		return 0;
689 
690 	if (PageAnon(page))
691 		return atomic_read(&page->_mapcount) < 0;
692 
693 	head = compound_head(page);
694 	/* File THP is PMD mapped and not PTE mapped */
695 	return atomic_read(&page->_mapcount) ==
696 	       atomic_read(compound_mapcount_ptr(head));
697 }
698 
699 /*
700  * PageTransTail returns true for both transparent huge pages
701  * and hugetlbfs pages, so it should only be called when it's known
702  * that hugetlbfs pages aren't involved.
703  */
PageTransTail(struct page * page)704 static inline int PageTransTail(struct page *page)
705 {
706 	return PageTail(page);
707 }
708 
709 /*
710  * PageDoubleMap indicates that the compound page is mapped with PTEs as well
711  * as PMDs.
712  *
713  * This is required for optimization of rmap operations for THP: we can postpone
714  * per small page mapcount accounting (and its overhead from atomic operations)
715  * until the first PMD split.
716  *
717  * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up
718  * by one. This reference will go away with last compound_mapcount.
719  *
720  * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap().
721  */
PAGEFLAG(DoubleMap,double_map,PF_SECOND)722 PAGEFLAG(DoubleMap, double_map, PF_SECOND)
723 	TESTSCFLAG(DoubleMap, double_map, PF_SECOND)
724 #else
725 TESTPAGEFLAG_FALSE(TransHuge)
726 TESTPAGEFLAG_FALSE(TransCompound)
727 TESTPAGEFLAG_FALSE(TransCompoundMap)
728 TESTPAGEFLAG_FALSE(TransTail)
729 PAGEFLAG_FALSE(DoubleMap)
730 	TESTSCFLAG_FALSE(DoubleMap)
731 #endif
732 
733 /*
734  * For pages that are never mapped to userspace (and aren't PageSlab),
735  * page_type may be used.  Because it is initialised to -1, we invert the
736  * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
737  * __ClearPageFoo *sets* the bit used for PageFoo.  We reserve a few high and
738  * low bits so that an underflow or overflow of page_mapcount() won't be
739  * mistaken for a page type value.
740  */
741 
742 #define PAGE_TYPE_BASE	0xf0000000
743 /* Reserve		0x0000007f to catch underflows of page_mapcount */
744 #define PAGE_MAPCOUNT_RESERVE	-128
745 #define PG_buddy	0x00000080
746 #define PG_offline	0x00000100
747 #define PG_kmemcg	0x00000200
748 #define PG_table	0x00000400
749 #define PG_guard	0x00000800
750 
751 #define PageType(page, flag)						\
752 	((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
753 
754 static inline int page_has_type(struct page *page)
755 {
756 	return (int)page->page_type < PAGE_MAPCOUNT_RESERVE;
757 }
758 
759 #define PAGE_TYPE_OPS(uname, lname)					\
760 static __always_inline int Page##uname(struct page *page)		\
761 {									\
762 	return PageType(page, PG_##lname);				\
763 }									\
764 static __always_inline void __SetPage##uname(struct page *page)		\
765 {									\
766 	VM_BUG_ON_PAGE(!PageType(page, 0), page);			\
767 	page->page_type &= ~PG_##lname;					\
768 }									\
769 static __always_inline void __ClearPage##uname(struct page *page)	\
770 {									\
771 	VM_BUG_ON_PAGE(!Page##uname(page), page);			\
772 	page->page_type |= PG_##lname;					\
773 }
774 
775 /*
776  * PageBuddy() indicates that the page is free and in the buddy system
777  * (see mm/page_alloc.c).
778  */
779 PAGE_TYPE_OPS(Buddy, buddy)
780 
781 /*
782  * PageOffline() indicates that the page is logically offline although the
783  * containing section is online. (e.g. inflated in a balloon driver or
784  * not onlined when onlining the section).
785  * The content of these pages is effectively stale. Such pages should not
786  * be touched (read/write/dump/save) except by their owner.
787  *
788  * If a driver wants to allow to offline unmovable PageOffline() pages without
789  * putting them back to the buddy, it can do so via the memory notifier by
790  * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the
791  * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline()
792  * pages (now with a reference count of zero) are treated like free pages,
793  * allowing the containing memory block to get offlined. A driver that
794  * relies on this feature is aware that re-onlining the memory block will
795  * require to re-set the pages PageOffline() and not giving them to the
796  * buddy via online_page_callback_t.
797  */
798 PAGE_TYPE_OPS(Offline, offline)
799 
800 /*
801  * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on
802  * pages allocated with __GFP_ACCOUNT. It gets cleared on page free.
803  */
804 PAGE_TYPE_OPS(Kmemcg, kmemcg)
805 
806 /*
807  * Marks pages in use as page tables.
808  */
809 PAGE_TYPE_OPS(Table, table)
810 
811 /*
812  * Marks guardpages used with debug_pagealloc.
813  */
814 PAGE_TYPE_OPS(Guard, guard)
815 
816 extern bool is_free_buddy_page(struct page *page);
817 
818 __PAGEFLAG(Isolated, isolated, PF_ANY);
819 
820 /*
821  * If network-based swap is enabled, sl*b must keep track of whether pages
822  * were allocated from pfmemalloc reserves.
823  */
PageSlabPfmemalloc(struct page * page)824 static inline int PageSlabPfmemalloc(struct page *page)
825 {
826 	VM_BUG_ON_PAGE(!PageSlab(page), page);
827 	return PageActive(page);
828 }
829 
SetPageSlabPfmemalloc(struct page * page)830 static inline void SetPageSlabPfmemalloc(struct page *page)
831 {
832 	VM_BUG_ON_PAGE(!PageSlab(page), page);
833 	SetPageActive(page);
834 }
835 
__ClearPageSlabPfmemalloc(struct page * page)836 static inline void __ClearPageSlabPfmemalloc(struct page *page)
837 {
838 	VM_BUG_ON_PAGE(!PageSlab(page), page);
839 	__ClearPageActive(page);
840 }
841 
ClearPageSlabPfmemalloc(struct page * page)842 static inline void ClearPageSlabPfmemalloc(struct page *page)
843 {
844 	VM_BUG_ON_PAGE(!PageSlab(page), page);
845 	ClearPageActive(page);
846 }
847 
848 #ifdef CONFIG_MMU
849 #define __PG_MLOCKED		(1UL << PG_mlocked)
850 #else
851 #define __PG_MLOCKED		0
852 #endif
853 
854 /*
855  * Flags checked when a page is freed.  Pages being freed should not have
856  * these flags set.  It they are, there is a problem.
857  */
858 #ifdef CONFIG_SECURITY_XPM
859 #define	__XPM_PAGE_FLAGS (1UL << PG_xpm_readonly | 1UL << PG_xpm_writetainted)
860 #else
861 #define	__XPM_PAGE_FLAGS 0
862 #endif
863 
864 #define PAGE_FLAGS_CHECK_AT_FREE				\
865 	(1UL << PG_lru		| 1UL << PG_locked	|	\
866 	 1UL << PG_private	| 1UL << PG_private_2	|	\
867 	 1UL << PG_writeback	| 1UL << PG_reserved	|	\
868 	 1UL << PG_slab		| 1UL << PG_active 	|	\
869 	 __XPM_PAGE_FLAGS | \
870 	 1UL << PG_unevictable	| __PG_MLOCKED)
871 
872 /*
873  * Flags checked when a page is prepped for return by the page allocator.
874  * Pages being prepped should not have these flags set.  It they are set,
875  * there has been a kernel bug or struct page corruption.
876  *
877  * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
878  * alloc-free cycle to prevent from reusing the page.
879  */
880 #define PAGE_FLAGS_CHECK_AT_PREP	\
881 	(((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
882 
883 #define PAGE_FLAGS_PRIVATE				\
884 	(1UL << PG_private | 1UL << PG_private_2)
885 /**
886  * page_has_private - Determine if page has private stuff
887  * @page: The page to be checked
888  *
889  * Determine if a page has private stuff, indicating that release routines
890  * should be invoked upon it.
891  */
page_has_private(struct page * page)892 static inline int page_has_private(struct page *page)
893 {
894 	return !!(page->flags & PAGE_FLAGS_PRIVATE);
895 }
896 
897 #undef PF_ANY
898 #undef PF_HEAD
899 #undef PF_ONLY_HEAD
900 #undef PF_NO_TAIL
901 #undef PF_NO_COMPOUND
902 #undef PF_SECOND
903 #endif /* !__GENERATING_BOUNDS_H */
904 
905 #endif	/* PAGE_FLAGS_H */
906