• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Macros for manipulating and testing page->flags
3  */
4 
5 #ifndef PAGE_FLAGS_H
6 #define PAGE_FLAGS_H
7 
8 #include <linux/types.h>
9 #include <linux/bug.h>
10 #include <linux/mmdebug.h>
11 #ifndef __GENERATING_BOUNDS_H
12 #include <linux/mm_types.h>
13 #include <generated/bounds.h>
14 #endif /* !__GENERATING_BOUNDS_H */
15 
16 /*
17  * Various page->flags bits:
18  *
19  * PG_reserved is set for special pages, which can never be swapped out. Some
20  * of them might not even exist (eg empty_bad_page)...
21  *
22  * The PG_private bitflag is set on pagecache pages if they contain filesystem
23  * specific data (which is normally at page->private). It can be used by
24  * private allocations for its own usage.
25  *
26  * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
27  * and cleared when writeback _starts_ or when read _completes_. PG_writeback
28  * is set before writeback starts and cleared when it finishes.
29  *
30  * PG_locked also pins a page in pagecache, and blocks truncation of the file
31  * while it is held.
32  *
33  * page_waitqueue(page) is a wait queue of all tasks waiting for the page
34  * to become unlocked.
35  *
36  * PG_uptodate tells whether the page's contents is valid.  When a read
37  * completes, the page becomes uptodate, unless a disk I/O error happened.
38  *
39  * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
40  * file-backed pagecache (see mm/vmscan.c).
41  *
42  * PG_error is set to indicate that an I/O error occurred on this page.
43  *
44  * PG_arch_1 is an architecture specific page state bit.  The generic code
45  * guarantees that this bit is cleared for a page when it first is entered into
46  * the page cache.
47  *
48  * PG_highmem pages are not permanently mapped into the kernel virtual address
49  * space, they need to be kmapped separately for doing IO on the pages.  The
50  * struct page (these bits with information) are always mapped into kernel
51  * address space...
52  *
53  * PG_hwpoison indicates that a page got corrupted in hardware and contains
54  * data with incorrect ECC bits that triggered a machine check. Accessing is
55  * not safe since it may cause another machine check. Don't touch!
56  */
57 
58 /*
59  * Don't use the *_dontuse flags.  Use the macros.  Otherwise you'll break
60  * locked- and dirty-page accounting.
61  *
62  * The page flags field is split into two parts, the main flags area
63  * which extends from the low bits upwards, and the fields area which
64  * extends from the high bits downwards.
65  *
66  *  | FIELD | ... | FLAGS |
67  *  N-1           ^       0
68  *               (NR_PAGEFLAGS)
69  *
70  * The fields area is reserved for fields mapping zone, node (for NUMA) and
71  * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
72  * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
73  */
74 enum pageflags {
75 	PG_locked,		/* Page is locked. Don't touch. */
76 	PG_error,
77 	PG_referenced,
78 	PG_uptodate,
79 	PG_dirty,
80 	PG_lru,
81 	PG_active,
82 	PG_slab,
83 	PG_owner_priv_1,	/* Owner use. If pagecache, fs may use*/
84 	PG_arch_1,
85 	PG_reserved,
86 	PG_private,		/* If pagecache, has fs-private data */
87 	PG_private_2,		/* If pagecache, has fs aux data */
88 	PG_writeback,		/* Page is under writeback */
89 	PG_head,		/* A head page */
90 	PG_swapcache,		/* Swap page: swp_entry_t in private */
91 	PG_mappedtodisk,	/* Has blocks allocated on-disk */
92 	PG_reclaim,		/* To be reclaimed asap */
93 	PG_swapbacked,		/* Page is backed by RAM/swap */
94 	PG_unevictable,		/* Page is "unevictable"  */
95 #ifdef CONFIG_MMU
96 	PG_mlocked,		/* Page is vma mlocked */
97 #endif
98 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
99 	PG_uncached,		/* Page has been mapped as uncached */
100 #endif
101 #ifdef CONFIG_MEMORY_FAILURE
102 	PG_hwpoison,		/* hardware poisoned page. Don't touch */
103 #endif
104 #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
105 	PG_young,
106 	PG_idle,
107 #endif
108 	__NR_PAGEFLAGS,
109 
110 	/* Filesystems */
111 	PG_checked = PG_owner_priv_1,
112 
113 	/* Two page bits are conscripted by FS-Cache to maintain local caching
114 	 * state.  These bits are set on pages belonging to the netfs's inodes
115 	 * when those inodes are being locally cached.
116 	 */
117 	PG_fscache = PG_private_2,	/* page backed by cache */
118 
119 	/* XEN */
120 	/* Pinned in Xen as a read-only pagetable page. */
121 	PG_pinned = PG_owner_priv_1,
122 	/* Pinned as part of domain save (see xen_mm_pin_all()). */
123 	PG_savepinned = PG_dirty,
124 	/* Has a grant mapping of another (foreign) domain's page. */
125 	PG_foreign = PG_owner_priv_1,
126 
127 	/* SLOB */
128 	PG_slob_free = PG_private,
129 
130 	/* Compound pages. Stored in first tail page's flags */
131 	PG_double_map = PG_private_2,
132 
133 	/* non-lru isolated movable page */
134 	PG_isolated = PG_reclaim,
135 };
136 
137 #ifndef __GENERATING_BOUNDS_H
138 
139 struct page;	/* forward declaration */
140 
compound_head(struct page * page)141 static inline struct page *compound_head(struct page *page)
142 {
143 	unsigned long head = READ_ONCE(page->compound_head);
144 
145 	if (unlikely(head & 1))
146 		return (struct page *) (head - 1);
147 	return page;
148 }
149 
PageTail(struct page * page)150 static __always_inline int PageTail(struct page *page)
151 {
152 	return READ_ONCE(page->compound_head) & 1;
153 }
154 
PageCompound(struct page * page)155 static __always_inline int PageCompound(struct page *page)
156 {
157 	return test_bit(PG_head, &page->flags) || PageTail(page);
158 }
159 
160 /*
161  * Page flags policies wrt compound pages
162  *
163  * PF_ANY:
164  *     the page flag is relevant for small, head and tail pages.
165  *
166  * PF_HEAD:
167  *     for compound page all operations related to the page flag applied to
168  *     head page.
169  *
170  * PF_NO_TAIL:
171  *     modifications of the page flag must be done on small or head pages,
172  *     checks can be done on tail pages too.
173  *
174  * PF_NO_COMPOUND:
175  *     the page flag is not relevant for compound pages.
176  */
177 #define PF_ANY(page, enforce)	page
178 #define PF_HEAD(page, enforce)	compound_head(page)
179 #define PF_NO_TAIL(page, enforce) ({					\
180 		VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page);	\
181 		compound_head(page);})
182 #define PF_NO_COMPOUND(page, enforce) ({				\
183 		VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page);	\
184 		page;})
185 
186 /*
187  * Macros to create function definitions for page flags
188  */
189 #define TESTPAGEFLAG(uname, lname, policy)				\
190 static __always_inline int Page##uname(struct page *page)		\
191 	{ return test_bit(PG_##lname, &policy(page, 0)->flags); }
192 
193 #define SETPAGEFLAG(uname, lname, policy)				\
194 static __always_inline void SetPage##uname(struct page *page)		\
195 	{ set_bit(PG_##lname, &policy(page, 1)->flags); }
196 
197 #define CLEARPAGEFLAG(uname, lname, policy)				\
198 static __always_inline void ClearPage##uname(struct page *page)		\
199 	{ clear_bit(PG_##lname, &policy(page, 1)->flags); }
200 
201 #define __SETPAGEFLAG(uname, lname, policy)				\
202 static __always_inline void __SetPage##uname(struct page *page)		\
203 	{ __set_bit(PG_##lname, &policy(page, 1)->flags); }
204 
205 #define __CLEARPAGEFLAG(uname, lname, policy)				\
206 static __always_inline void __ClearPage##uname(struct page *page)	\
207 	{ __clear_bit(PG_##lname, &policy(page, 1)->flags); }
208 
209 #define TESTSETFLAG(uname, lname, policy)				\
210 static __always_inline int TestSetPage##uname(struct page *page)	\
211 	{ return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
212 
213 #define TESTCLEARFLAG(uname, lname, policy)				\
214 static __always_inline int TestClearPage##uname(struct page *page)	\
215 	{ return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
216 
217 #define PAGEFLAG(uname, lname, policy)					\
218 	TESTPAGEFLAG(uname, lname, policy)				\
219 	SETPAGEFLAG(uname, lname, policy)				\
220 	CLEARPAGEFLAG(uname, lname, policy)
221 
222 #define __PAGEFLAG(uname, lname, policy)				\
223 	TESTPAGEFLAG(uname, lname, policy)				\
224 	__SETPAGEFLAG(uname, lname, policy)				\
225 	__CLEARPAGEFLAG(uname, lname, policy)
226 
227 #define TESTSCFLAG(uname, lname, policy)				\
228 	TESTSETFLAG(uname, lname, policy)				\
229 	TESTCLEARFLAG(uname, lname, policy)
230 
231 #define TESTPAGEFLAG_FALSE(uname)					\
232 static inline int Page##uname(const struct page *page) { return 0; }
233 
234 #define SETPAGEFLAG_NOOP(uname)						\
235 static inline void SetPage##uname(struct page *page) {  }
236 
237 #define CLEARPAGEFLAG_NOOP(uname)					\
238 static inline void ClearPage##uname(struct page *page) {  }
239 
240 #define __CLEARPAGEFLAG_NOOP(uname)					\
241 static inline void __ClearPage##uname(struct page *page) {  }
242 
243 #define TESTSETFLAG_FALSE(uname)					\
244 static inline int TestSetPage##uname(struct page *page) { return 0; }
245 
246 #define TESTCLEARFLAG_FALSE(uname)					\
247 static inline int TestClearPage##uname(struct page *page) { return 0; }
248 
249 #define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname)			\
250 	SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname)
251 
252 #define TESTSCFLAG_FALSE(uname)						\
253 	TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
254 
255 __PAGEFLAG(Locked, locked, PF_NO_TAIL)
256 PAGEFLAG(Error, error, PF_NO_COMPOUND) TESTCLEARFLAG(Error, error, PF_NO_COMPOUND)
257 PAGEFLAG(Referenced, referenced, PF_HEAD)
258 	TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
259 	__SETPAGEFLAG(Referenced, referenced, PF_HEAD)
260 PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
261 	__CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
262 PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
263 PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
264 	TESTCLEARFLAG(Active, active, PF_HEAD)
265 __PAGEFLAG(Slab, slab, PF_NO_TAIL)
266 __PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
267 PAGEFLAG(Checked, checked, PF_NO_COMPOUND)	   /* Used by some filesystems */
268 
269 /* Xen */
270 PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
271 	TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
272 PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
273 PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
274 
PAGEFLAG(Reserved,reserved,PF_NO_COMPOUND)275 PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
276 	__CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
277 PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
278 	__CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
279 	__SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
280 
281 /*
282  * Private page markings that may be used by the filesystem that owns the page
283  * for its own purposes.
284  * - PG_private and PG_private_2 cause releasepage() and co to be invoked
285  */
286 PAGEFLAG(Private, private, PF_ANY) __SETPAGEFLAG(Private, private, PF_ANY)
287 	__CLEARPAGEFLAG(Private, private, PF_ANY)
288 PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
289 PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
290 	TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
291 
292 /*
293  * Only test-and-set exist for PG_writeback.  The unconditional operators are
294  * risky: they bypass page accounting.
295  */
296 TESTPAGEFLAG(Writeback, writeback, PF_NO_COMPOUND)
297 	TESTSCFLAG(Writeback, writeback, PF_NO_COMPOUND)
298 PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
299 
300 /* PG_readahead is only used for reads; PG_reclaim is only for writes */
301 PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
302 	TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
303 PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
304 	TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
305 
306 #ifdef CONFIG_HIGHMEM
307 /*
308  * Must use a macro here due to header dependency issues. page_zone() is not
309  * available at this point.
310  */
311 #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
312 #else
313 PAGEFLAG_FALSE(HighMem)
314 #endif
315 
316 #ifdef CONFIG_SWAP
317 PAGEFLAG(SwapCache, swapcache, PF_NO_COMPOUND)
318 #else
319 PAGEFLAG_FALSE(SwapCache)
320 #endif
321 
322 PAGEFLAG(Unevictable, unevictable, PF_HEAD)
323 	__CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
324 	TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
325 
326 #ifdef CONFIG_MMU
327 PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
328 	__CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
329 	TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
330 #else
331 PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked)
332 	TESTSCFLAG_FALSE(Mlocked)
333 #endif
334 
335 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
336 PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
337 #else
338 PAGEFLAG_FALSE(Uncached)
339 #endif
340 
341 #ifdef CONFIG_MEMORY_FAILURE
342 PAGEFLAG(HWPoison, hwpoison, PF_ANY)
343 TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
344 #define __PG_HWPOISON (1UL << PG_hwpoison)
345 #else
346 PAGEFLAG_FALSE(HWPoison)
347 #define __PG_HWPOISON 0
348 #endif
349 
350 #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
351 TESTPAGEFLAG(Young, young, PF_ANY)
352 SETPAGEFLAG(Young, young, PF_ANY)
353 TESTCLEARFLAG(Young, young, PF_ANY)
354 PAGEFLAG(Idle, idle, PF_ANY)
355 #endif
356 
357 /*
358  * On an anonymous page mapped into a user virtual memory area,
359  * page->mapping points to its anon_vma, not to a struct address_space;
360  * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
361  *
362  * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
363  * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
364  * bit; and then page->mapping points, not to an anon_vma, but to a private
365  * structure which KSM associates with that merged page.  See ksm.h.
366  *
367  * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
368  * page and then page->mapping points a struct address_space.
369  *
370  * Please note that, confusingly, "page_mapping" refers to the inode
371  * address_space which maps the page from disk; whereas "page_mapped"
372  * refers to user virtual address space into which the page is mapped.
373  */
374 #define PAGE_MAPPING_ANON	0x1
375 #define PAGE_MAPPING_MOVABLE	0x2
376 #define PAGE_MAPPING_KSM	(PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
377 #define PAGE_MAPPING_FLAGS	(PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
378 
379 static __always_inline int PageMappingFlags(struct page *page)
380 {
381 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
382 }
383 
PageAnon(struct page * page)384 static __always_inline int PageAnon(struct page *page)
385 {
386 	page = compound_head(page);
387 	return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
388 }
389 
__PageMovable(struct page * page)390 static __always_inline int __PageMovable(struct page *page)
391 {
392 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
393 				PAGE_MAPPING_MOVABLE;
394 }
395 
396 #ifdef CONFIG_KSM
397 /*
398  * A KSM page is one of those write-protected "shared pages" or "merged pages"
399  * which KSM maps into multiple mms, wherever identical anonymous page content
400  * is found in VM_MERGEABLE vmas.  It's a PageAnon page, pointing not to any
401  * anon_vma, but to that page's node of the stable tree.
402  */
PageKsm(struct page * page)403 static __always_inline int PageKsm(struct page *page)
404 {
405 	page = compound_head(page);
406 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
407 				PAGE_MAPPING_KSM;
408 }
409 #else
410 TESTPAGEFLAG_FALSE(Ksm)
411 #endif
412 
413 u64 stable_page_flags(struct page *page);
414 
PageUptodate(struct page * page)415 static inline int PageUptodate(struct page *page)
416 {
417 	int ret;
418 	page = compound_head(page);
419 	ret = test_bit(PG_uptodate, &(page)->flags);
420 	/*
421 	 * Must ensure that the data we read out of the page is loaded
422 	 * _after_ we've loaded page->flags to check for PageUptodate.
423 	 * We can skip the barrier if the page is not uptodate, because
424 	 * we wouldn't be reading anything from it.
425 	 *
426 	 * See SetPageUptodate() for the other side of the story.
427 	 */
428 	if (ret)
429 		smp_rmb();
430 
431 	return ret;
432 }
433 
__SetPageUptodate(struct page * page)434 static __always_inline void __SetPageUptodate(struct page *page)
435 {
436 	VM_BUG_ON_PAGE(PageTail(page), page);
437 	smp_wmb();
438 	__set_bit(PG_uptodate, &page->flags);
439 }
440 
SetPageUptodate(struct page * page)441 static __always_inline void SetPageUptodate(struct page *page)
442 {
443 	VM_BUG_ON_PAGE(PageTail(page), page);
444 	/*
445 	 * Memory barrier must be issued before setting the PG_uptodate bit,
446 	 * so that all previous stores issued in order to bring the page
447 	 * uptodate are actually visible before PageUptodate becomes true.
448 	 */
449 	smp_wmb();
450 	set_bit(PG_uptodate, &page->flags);
451 }
452 
453 CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
454 
455 int test_clear_page_writeback(struct page *page);
456 int __test_set_page_writeback(struct page *page, bool keep_write);
457 
458 #define test_set_page_writeback(page)			\
459 	__test_set_page_writeback(page, false)
460 #define test_set_page_writeback_keepwrite(page)	\
461 	__test_set_page_writeback(page, true)
462 
set_page_writeback(struct page * page)463 static inline void set_page_writeback(struct page *page)
464 {
465 	test_set_page_writeback(page);
466 }
467 
set_page_writeback_keepwrite(struct page * page)468 static inline void set_page_writeback_keepwrite(struct page *page)
469 {
470 	test_set_page_writeback_keepwrite(page);
471 }
472 
__PAGEFLAG(Head,head,PF_ANY)473 __PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
474 
475 static __always_inline void set_compound_head(struct page *page, struct page *head)
476 {
477 	WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
478 }
479 
clear_compound_head(struct page * page)480 static __always_inline void clear_compound_head(struct page *page)
481 {
482 	WRITE_ONCE(page->compound_head, 0);
483 }
484 
485 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
ClearPageCompound(struct page * page)486 static inline void ClearPageCompound(struct page *page)
487 {
488 	BUG_ON(!PageHead(page));
489 	ClearPageHead(page);
490 }
491 #endif
492 
493 #define PG_head_mask ((1UL << PG_head))
494 
495 #ifdef CONFIG_HUGETLB_PAGE
496 int PageHuge(struct page *page);
497 int PageHeadHuge(struct page *page);
498 bool page_huge_active(struct page *page);
499 #else
500 TESTPAGEFLAG_FALSE(Huge)
TESTPAGEFLAG_FALSE(HeadHuge)501 TESTPAGEFLAG_FALSE(HeadHuge)
502 
503 static inline bool page_huge_active(struct page *page)
504 {
505 	return 0;
506 }
507 #endif
508 
509 
510 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
511 /*
512  * PageHuge() only returns true for hugetlbfs pages, but not for
513  * normal or transparent huge pages.
514  *
515  * PageTransHuge() returns true for both transparent huge and
516  * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
517  * called only in the core VM paths where hugetlbfs pages can't exist.
518  */
PageTransHuge(struct page * page)519 static inline int PageTransHuge(struct page *page)
520 {
521 	VM_BUG_ON_PAGE(PageTail(page), page);
522 	return PageHead(page);
523 }
524 
525 /*
526  * PageTransCompound returns true for both transparent huge pages
527  * and hugetlbfs pages, so it should only be called when it's known
528  * that hugetlbfs pages aren't involved.
529  */
PageTransCompound(struct page * page)530 static inline int PageTransCompound(struct page *page)
531 {
532 	return PageCompound(page);
533 }
534 
535 /*
536  * PageTransCompoundMap is the same as PageTransCompound, but it also
537  * guarantees the primary MMU has the entire compound page mapped
538  * through pmd_trans_huge, which in turn guarantees the secondary MMUs
539  * can also map the entire compound page. This allows the secondary
540  * MMUs to call get_user_pages() only once for each compound page and
541  * to immediately map the entire compound page with a single secondary
542  * MMU fault. If there will be a pmd split later, the secondary MMUs
543  * will get an update through the MMU notifier invalidation through
544  * split_huge_pmd().
545  *
546  * Unlike PageTransCompound, this is safe to be called only while
547  * split_huge_pmd() cannot run from under us, like if protected by the
548  * MMU notifier, otherwise it may result in page->_mapcount < 0 false
549  * positives.
550  */
PageTransCompoundMap(struct page * page)551 static inline int PageTransCompoundMap(struct page *page)
552 {
553 	return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0;
554 }
555 
556 /*
557  * PageTransTail returns true for both transparent huge pages
558  * and hugetlbfs pages, so it should only be called when it's known
559  * that hugetlbfs pages aren't involved.
560  */
PageTransTail(struct page * page)561 static inline int PageTransTail(struct page *page)
562 {
563 	return PageTail(page);
564 }
565 
566 /*
567  * PageDoubleMap indicates that the compound page is mapped with PTEs as well
568  * as PMDs.
569  *
570  * This is required for optimization of rmap operations for THP: we can postpone
571  * per small page mapcount accounting (and its overhead from atomic operations)
572  * until the first PMD split.
573  *
574  * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up
575  * by one. This reference will go away with last compound_mapcount.
576  *
577  * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap().
578  */
PageDoubleMap(struct page * page)579 static inline int PageDoubleMap(struct page *page)
580 {
581 	return PageHead(page) && test_bit(PG_double_map, &page[1].flags);
582 }
583 
SetPageDoubleMap(struct page * page)584 static inline void SetPageDoubleMap(struct page *page)
585 {
586 	VM_BUG_ON_PAGE(!PageHead(page), page);
587 	set_bit(PG_double_map, &page[1].flags);
588 }
589 
ClearPageDoubleMap(struct page * page)590 static inline void ClearPageDoubleMap(struct page *page)
591 {
592 	VM_BUG_ON_PAGE(!PageHead(page), page);
593 	clear_bit(PG_double_map, &page[1].flags);
594 }
TestSetPageDoubleMap(struct page * page)595 static inline int TestSetPageDoubleMap(struct page *page)
596 {
597 	VM_BUG_ON_PAGE(!PageHead(page), page);
598 	return test_and_set_bit(PG_double_map, &page[1].flags);
599 }
600 
TestClearPageDoubleMap(struct page * page)601 static inline int TestClearPageDoubleMap(struct page *page)
602 {
603 	VM_BUG_ON_PAGE(!PageHead(page), page);
604 	return test_and_clear_bit(PG_double_map, &page[1].flags);
605 }
606 
607 #else
608 TESTPAGEFLAG_FALSE(TransHuge)
609 TESTPAGEFLAG_FALSE(TransCompound)
610 TESTPAGEFLAG_FALSE(TransCompoundMap)
611 TESTPAGEFLAG_FALSE(TransTail)
612 PAGEFLAG_FALSE(DoubleMap)
613 	TESTSETFLAG_FALSE(DoubleMap)
614 	TESTCLEARFLAG_FALSE(DoubleMap)
615 #endif
616 
617 /*
618  * For pages that are never mapped to userspace, page->mapcount may be
619  * used for storing extra information about page type. Any value used
620  * for this purpose must be <= -2, but it's better start not too close
621  * to -2 so that an underflow of the page_mapcount() won't be mistaken
622  * for a special page.
623  */
624 #define PAGE_MAPCOUNT_OPS(uname, lname)					\
625 static __always_inline int Page##uname(struct page *page)		\
626 {									\
627 	return atomic_read(&page->_mapcount) ==				\
628 				PAGE_##lname##_MAPCOUNT_VALUE;		\
629 }									\
630 static __always_inline void __SetPage##uname(struct page *page)		\
631 {									\
632 	VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);	\
633 	atomic_set(&page->_mapcount, PAGE_##lname##_MAPCOUNT_VALUE);	\
634 }									\
635 static __always_inline void __ClearPage##uname(struct page *page)	\
636 {									\
637 	VM_BUG_ON_PAGE(!Page##uname(page), page);			\
638 	atomic_set(&page->_mapcount, -1);				\
639 }
640 
641 /*
642  * PageBuddy() indicate that the page is free and in the buddy system
643  * (see mm/page_alloc.c).
644  */
645 #define PAGE_BUDDY_MAPCOUNT_VALUE		(-128)
646 PAGE_MAPCOUNT_OPS(Buddy, BUDDY)
647 
648 /*
649  * PageBalloon() is set on pages that are on the balloon page list
650  * (see mm/balloon_compaction.c).
651  */
652 #define PAGE_BALLOON_MAPCOUNT_VALUE		(-256)
653 PAGE_MAPCOUNT_OPS(Balloon, BALLOON)
654 
655 /*
656  * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on
657  * pages allocated with __GFP_ACCOUNT. It gets cleared on page free.
658  */
659 #define PAGE_KMEMCG_MAPCOUNT_VALUE		(-512)
660 PAGE_MAPCOUNT_OPS(Kmemcg, KMEMCG)
661 
662 extern bool is_free_buddy_page(struct page *page);
663 
664 __PAGEFLAG(Isolated, isolated, PF_ANY);
665 
666 /*
667  * If network-based swap is enabled, sl*b must keep track of whether pages
668  * were allocated from pfmemalloc reserves.
669  */
PageSlabPfmemalloc(struct page * page)670 static inline int PageSlabPfmemalloc(struct page *page)
671 {
672 	VM_BUG_ON_PAGE(!PageSlab(page), page);
673 	return PageActive(page);
674 }
675 
SetPageSlabPfmemalloc(struct page * page)676 static inline void SetPageSlabPfmemalloc(struct page *page)
677 {
678 	VM_BUG_ON_PAGE(!PageSlab(page), page);
679 	SetPageActive(page);
680 }
681 
__ClearPageSlabPfmemalloc(struct page * page)682 static inline void __ClearPageSlabPfmemalloc(struct page *page)
683 {
684 	VM_BUG_ON_PAGE(!PageSlab(page), page);
685 	__ClearPageActive(page);
686 }
687 
ClearPageSlabPfmemalloc(struct page * page)688 static inline void ClearPageSlabPfmemalloc(struct page *page)
689 {
690 	VM_BUG_ON_PAGE(!PageSlab(page), page);
691 	ClearPageActive(page);
692 }
693 
694 #ifdef CONFIG_MMU
695 #define __PG_MLOCKED		(1UL << PG_mlocked)
696 #else
697 #define __PG_MLOCKED		0
698 #endif
699 
700 /*
701  * Flags checked when a page is freed.  Pages being freed should not have
702  * these flags set.  It they are, there is a problem.
703  */
704 #define PAGE_FLAGS_CHECK_AT_FREE \
705 	(1UL << PG_lru	 | 1UL << PG_locked    | \
706 	 1UL << PG_private | 1UL << PG_private_2 | \
707 	 1UL << PG_writeback | 1UL << PG_reserved | \
708 	 1UL << PG_slab	 | 1UL << PG_swapcache | 1UL << PG_active | \
709 	 1UL << PG_unevictable | __PG_MLOCKED)
710 
711 /*
712  * Flags checked when a page is prepped for return by the page allocator.
713  * Pages being prepped should not have these flags set.  It they are set,
714  * there has been a kernel bug or struct page corruption.
715  *
716  * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
717  * alloc-free cycle to prevent from reusing the page.
718  */
719 #define PAGE_FLAGS_CHECK_AT_PREP	\
720 	(((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
721 
722 #define PAGE_FLAGS_PRIVATE				\
723 	(1UL << PG_private | 1UL << PG_private_2)
724 /**
725  * page_has_private - Determine if page has private stuff
726  * @page: The page to be checked
727  *
728  * Determine if a page has private stuff, indicating that release routines
729  * should be invoked upon it.
730  */
page_has_private(struct page * page)731 static inline int page_has_private(struct page *page)
732 {
733 	return !!(page->flags & PAGE_FLAGS_PRIVATE);
734 }
735 
736 #undef PF_ANY
737 #undef PF_HEAD
738 #undef PF_NO_TAIL
739 #undef PF_NO_COMPOUND
740 #endif /* !__GENERATING_BOUNDS_H */
741 
742 #endif	/* PAGE_FLAGS_H */
743