1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Macros for manipulating and testing page->flags
4 */
5
6 #ifndef PAGE_FLAGS_H
7 #define PAGE_FLAGS_H
8
9 #include <linux/types.h>
10 #include <linux/bug.h>
11 #include <linux/mmdebug.h>
12 #ifndef __GENERATING_BOUNDS_H
13 #include <linux/mm_types.h>
14 #include <generated/bounds.h>
15 #endif /* !__GENERATING_BOUNDS_H */
16
17 /*
18 * Various page->flags bits:
19 *
20 * PG_reserved is set for special pages. The "struct page" of such a page
21 * should in general not be touched (e.g. set dirty) except by its owner.
22 * Pages marked as PG_reserved include:
23 * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS,
24 * initrd, HW tables)
25 * - Pages reserved or allocated early during boot (before the page allocator
26 * was initialized). This includes (depending on the architecture) the
27 * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
28 * much more. Once (if ever) freed, PG_reserved is cleared and they will
29 * be given to the page allocator.
30 * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying
31 * to read/write these pages might end badly. Don't touch!
32 * - The zero page(s)
33 * - Pages allocated in the context of kexec/kdump (loaded kernel image,
34 * control pages, vmcoreinfo)
35 * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are
36 * not marked PG_reserved (as they might be in use by somebody else who does
37 * not respect the caching strategy).
38 * - MCA pages on ia64
39 * - Pages holding CPU notes for POWER Firmware Assisted Dump
40 * - Device memory (e.g. PMEM, DAX, HMM)
41 * Some PG_reserved pages will be excluded from the hibernation image.
42 * PG_reserved does in general not hinder anybody from dumping or swapping
43 * and is no longer required for remap_pfn_range(). ioremap might require it.
44 * Consequently, PG_reserved for a page mapped into user space can indicate
45 * the zero page, the vDSO, MMIO pages or device memory.
46 *
47 * The PG_private bitflag is set on pagecache pages if they contain filesystem
48 * specific data (which is normally at page->private). It can be used by
49 * private allocations for its own usage.
50 *
51 * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
52 * and cleared when writeback _starts_ or when read _completes_. PG_writeback
53 * is set before writeback starts and cleared when it finishes.
54 *
55 * PG_locked also pins a page in pagecache, and blocks truncation of the file
56 * while it is held.
57 *
58 * page_waitqueue(page) is a wait queue of all tasks waiting for the page
59 * to become unlocked.
60 *
61 * PG_swapbacked is set when a page uses swap as a backing storage. This are
62 * usually PageAnon or shmem pages but please note that even anonymous pages
63 * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as
64 * a result of MADV_FREE).
65 *
66 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
67 * file-backed pagecache (see mm/vmscan.c).
68 *
69 * PG_error is set to indicate that an I/O error occurred on this page.
70 *
71 * PG_arch_1 is an architecture specific page state bit. The generic code
72 * guarantees that this bit is cleared for a page when it first is entered into
73 * the page cache.
74 *
75 * PG_hwpoison indicates that a page got corrupted in hardware and contains
76 * data with incorrect ECC bits that triggered a machine check. Accessing is
77 * not safe since it may cause another machine check. Don't touch!
78 */
79
80 /*
81 * Don't use the pageflags directly. Use the PageFoo macros.
82 *
83 * The page flags field is split into two parts, the main flags area
84 * which extends from the low bits upwards, and the fields area which
85 * extends from the high bits downwards.
86 *
87 * | FIELD | ... | FLAGS |
88 * N-1 ^ 0
89 * (NR_PAGEFLAGS)
90 *
91 * The fields area is reserved for fields mapping zone, node (for NUMA) and
92 * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
93 * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
94 */
95 enum pageflags {
96 PG_locked, /* Page is locked. Don't touch. */
97 PG_writeback, /* Page is under writeback */
98 PG_referenced,
99 PG_uptodate,
100 PG_dirty,
101 PG_lru,
102 PG_head, /* Must be in bit 6 */
103 PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
104 PG_active,
105 PG_workingset,
106 PG_error,
107 PG_owner_priv_1, /* Owner use. If pagecache, fs may use */
108 PG_owner_2, /* Owner use. If pagecache, fs may use */
109 PG_arch_1,
110 PG_reserved,
111 PG_private, /* If pagecache, has fs-private data */
112 PG_private_2, /* If pagecache, has fs aux data */
113 PG_reclaim, /* To be reclaimed asap */
114 PG_swapbacked, /* Page is backed by RAM/swap */
115 PG_unevictable, /* Page is "unevictable" */
116 PG_dropbehind, /* drop pages on IO completion */
117 #ifdef CONFIG_MMU
118 PG_mlocked, /* Page is vma mlocked */
119 #endif
120 #ifdef CONFIG_MEMORY_FAILURE
121 PG_hwpoison, /* hardware poisoned page. Don't touch */
122 #endif
123 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
124 PG_young,
125 PG_idle,
126 #endif
127 #ifdef CONFIG_ARCH_USES_PG_ARCH_2
128 PG_arch_2,
129 #endif
130 #ifdef CONFIG_ARCH_USES_PG_ARCH_3
131 PG_arch_3,
132 #endif
133 #ifdef CONFIG_64BIT
134 PG_oem_reserved_1,
135 PG_oem_reserved_2,
136 PG_oem_reserved_3,
137 PG_oem_reserved_4,
138 #endif
139 __NR_PAGEFLAGS,
140
141 PG_readahead = PG_reclaim,
142
143 /* Anonymous memory (and shmem) */
144 PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
145 /* Some filesystems */
146 PG_checked = PG_owner_priv_1,
147
148 /*
149 * Depending on the way an anonymous folio can be mapped into a page
150 * table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped
151 * THP), PG_anon_exclusive may be set only for the head page or for
152 * tail pages of an anonymous folio. For now, we only expect it to be
153 * set on tail pages for PTE-mapped THP.
154 */
155 PG_anon_exclusive = PG_owner_2,
156
157 /*
158 * Set if all buffer heads in the folio are mapped.
159 * Filesystems which do not use BHs can use it for their own purpose.
160 */
161 PG_mappedtodisk = PG_owner_2,
162
163 /* Two page bits are conscripted by FS-Cache to maintain local caching
164 * state. These bits are set on pages belonging to the netfs's inodes
165 * when those inodes are being locally cached.
166 */
167 PG_fscache = PG_private_2, /* page backed by cache */
168
169 /* XEN */
170 /* Pinned in Xen as a read-only pagetable page. */
171 PG_pinned = PG_owner_priv_1,
172 /* Pinned as part of domain save (see xen_mm_pin_all()). */
173 PG_savepinned = PG_dirty,
174 /* Has a grant mapping of another (foreign) domain's page. */
175 PG_foreign = PG_owner_priv_1,
176 /* Remapped by swiotlb-xen. */
177 PG_xen_remapped = PG_owner_priv_1,
178
179 /* non-lru isolated movable page */
180 PG_isolated = PG_reclaim,
181
182 /* Only valid for buddy pages. Used to track pages that are reported */
183 PG_reported = PG_uptodate,
184
185 #ifdef CONFIG_MEMORY_HOTPLUG
186 /* For self-hosted memmap pages */
187 PG_vmemmap_self_hosted = PG_owner_priv_1,
188 #endif
189
190 /*
191 * Flags only valid for compound pages. Stored in first tail page's
192 * flags word. Cannot use the first 8 flags or any flag marked as
193 * PF_ANY.
194 */
195
196 /* At least one page in this folio has the hwpoison flag set */
197 PG_has_hwpoisoned = PG_error,
198 PG_large_rmappable = PG_workingset, /* anon or file-backed */
199 PG_partially_mapped = PG_reclaim, /* was identified to be partially mapped */
200 };
201
202 #define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1)
203
204 #ifndef __GENERATING_BOUNDS_H
205
206 #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
207 DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
208
209 /*
210 * Return the real head page struct iff the @page is a fake head page, otherwise
211 * return the @page itself. See Documentation/mm/vmemmap_dedup.rst.
212 */
page_fixed_fake_head(const struct page * page)213 static __always_inline const struct page *page_fixed_fake_head(const struct page *page)
214 {
215 if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
216 return page;
217
218 /*
219 * Only addresses aligned with PAGE_SIZE of struct page may be fake head
220 * struct page. The alignment check aims to avoid access the fields (
221 * e.g. compound_head) of the @page[1]. It can avoid touch a (possibly)
222 * cold cacheline in some cases.
223 */
224 if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) &&
225 test_bit(PG_head, &page->flags)) {
226 /*
227 * We can safely access the field of the @page[1] with PG_head
228 * because the @page is a compound page composed with at least
229 * two contiguous pages.
230 */
231 unsigned long head = READ_ONCE(page[1].compound_head);
232
233 if (likely(head & 1))
234 return (const struct page *)(head - 1);
235 }
236 return page;
237 }
238 #else
page_fixed_fake_head(const struct page * page)239 static inline const struct page *page_fixed_fake_head(const struct page *page)
240 {
241 return page;
242 }
243 #endif
244
page_is_fake_head(const struct page * page)245 static __always_inline int page_is_fake_head(const struct page *page)
246 {
247 return page_fixed_fake_head(page) != page;
248 }
249
_compound_head(const struct page * page)250 static __always_inline unsigned long _compound_head(const struct page *page)
251 {
252 unsigned long head = READ_ONCE(page->compound_head);
253
254 if (unlikely(head & 1))
255 return head - 1;
256 return (unsigned long)page_fixed_fake_head(page);
257 }
258
259 #define compound_head(page) ((typeof(page))_compound_head(page))
260
261 /**
262 * page_folio - Converts from page to folio.
263 * @p: The page.
264 *
265 * Every page is part of a folio. This function cannot be called on a
266 * NULL pointer.
267 *
268 * Context: No reference, nor lock is required on @page. If the caller
269 * does not hold a reference, this call may race with a folio split, so
270 * it should re-check the folio still contains this page after gaining
271 * a reference on the folio.
272 * Return: The folio which contains this page.
273 */
274 #define page_folio(p) (_Generic((p), \
275 const struct page *: (const struct folio *)_compound_head(p), \
276 struct page *: (struct folio *)_compound_head(p)))
277
278 /**
279 * folio_page - Return a page from a folio.
280 * @folio: The folio.
281 * @n: The page number to return.
282 *
283 * @n is relative to the start of the folio. This function does not
284 * check that the page number lies within @folio; the caller is presumed
285 * to have a reference to the page.
286 */
287 #define folio_page(folio, n) nth_page(&(folio)->page, n)
288
PageTail(const struct page * page)289 static __always_inline int PageTail(const struct page *page)
290 {
291 return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page);
292 }
293
PageCompound(const struct page * page)294 static __always_inline int PageCompound(const struct page *page)
295 {
296 return test_bit(PG_head, &page->flags) ||
297 READ_ONCE(page->compound_head) & 1;
298 }
299
300 #define PAGE_POISON_PATTERN -1l
PagePoisoned(const struct page * page)301 static inline int PagePoisoned(const struct page *page)
302 {
303 return READ_ONCE(page->flags) == PAGE_POISON_PATTERN;
304 }
305
306 #ifdef CONFIG_DEBUG_VM
307 void page_init_poison(struct page *page, size_t size);
308 #else
page_init_poison(struct page * page,size_t size)309 static inline void page_init_poison(struct page *page, size_t size)
310 {
311 }
312 #endif
313
const_folio_flags(const struct folio * folio,unsigned n)314 static const unsigned long *const_folio_flags(const struct folio *folio,
315 unsigned n)
316 {
317 const struct page *page = &folio->page;
318
319 VM_BUG_ON_PGFLAGS(page->compound_head & 1, page);
320 VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
321 return &page[n].flags;
322 }
323
folio_flags(struct folio * folio,unsigned n)324 static unsigned long *folio_flags(struct folio *folio, unsigned n)
325 {
326 struct page *page = &folio->page;
327
328 VM_BUG_ON_PGFLAGS(page->compound_head & 1, page);
329 VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
330 return &page[n].flags;
331 }
332
333 /*
334 * Page flags policies wrt compound pages
335 *
336 * PF_POISONED_CHECK
337 * check if this struct page poisoned/uninitialized
338 *
339 * PF_ANY:
340 * the page flag is relevant for small, head and tail pages.
341 *
342 * PF_HEAD:
343 * for compound page all operations related to the page flag applied to
344 * head page.
345 *
346 * PF_NO_TAIL:
347 * modifications of the page flag must be done on small or head pages,
348 * checks can be done on tail pages too.
349 *
350 * PF_NO_COMPOUND:
351 * the page flag is not relevant for compound pages.
352 *
353 * PF_SECOND:
354 * the page flag is stored in the first tail page.
355 */
356 #define PF_POISONED_CHECK(page) ({ \
357 VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \
358 page; })
359 #define PF_ANY(page, enforce) PF_POISONED_CHECK(page)
360 #define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page))
361 #define PF_NO_TAIL(page, enforce) ({ \
362 VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \
363 PF_POISONED_CHECK(compound_head(page)); })
364 #define PF_NO_COMPOUND(page, enforce) ({ \
365 VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \
366 PF_POISONED_CHECK(page); })
367 #define PF_SECOND(page, enforce) ({ \
368 VM_BUG_ON_PGFLAGS(!PageHead(page), page); \
369 PF_POISONED_CHECK(&page[1]); })
370
371 /* Which page is the flag stored in */
372 #define FOLIO_PF_ANY 0
373 #define FOLIO_PF_HEAD 0
374 #define FOLIO_PF_NO_TAIL 0
375 #define FOLIO_PF_NO_COMPOUND 0
376 #define FOLIO_PF_SECOND 1
377
378 #define FOLIO_HEAD_PAGE 0
379 #define FOLIO_SECOND_PAGE 1
380
381 /*
382 * Macros to create function definitions for page flags
383 */
384 #define FOLIO_TEST_FLAG(name, page) \
385 static __always_inline bool folio_test_##name(const struct folio *folio) \
386 { return test_bit(PG_##name, const_folio_flags(folio, page)); }
387
388 #define FOLIO_SET_FLAG(name, page) \
389 static __always_inline void folio_set_##name(struct folio *folio) \
390 { set_bit(PG_##name, folio_flags(folio, page)); }
391
392 #define FOLIO_CLEAR_FLAG(name, page) \
393 static __always_inline void folio_clear_##name(struct folio *folio) \
394 { clear_bit(PG_##name, folio_flags(folio, page)); }
395
396 #define __FOLIO_SET_FLAG(name, page) \
397 static __always_inline void __folio_set_##name(struct folio *folio) \
398 { __set_bit(PG_##name, folio_flags(folio, page)); }
399
400 #define __FOLIO_CLEAR_FLAG(name, page) \
401 static __always_inline void __folio_clear_##name(struct folio *folio) \
402 { __clear_bit(PG_##name, folio_flags(folio, page)); }
403
404 #define FOLIO_TEST_SET_FLAG(name, page) \
405 static __always_inline bool folio_test_set_##name(struct folio *folio) \
406 { return test_and_set_bit(PG_##name, folio_flags(folio, page)); }
407
408 #define FOLIO_TEST_CLEAR_FLAG(name, page) \
409 static __always_inline bool folio_test_clear_##name(struct folio *folio) \
410 { return test_and_clear_bit(PG_##name, folio_flags(folio, page)); }
411
412 #define FOLIO_FLAG(name, page) \
413 FOLIO_TEST_FLAG(name, page) \
414 FOLIO_SET_FLAG(name, page) \
415 FOLIO_CLEAR_FLAG(name, page)
416
417 #define TESTPAGEFLAG(uname, lname, policy) \
418 FOLIO_TEST_FLAG(lname, FOLIO_##policy) \
419 static __always_inline int Page##uname(const struct page *page) \
420 { return test_bit(PG_##lname, &policy(page, 0)->flags); }
421
422 #define SETPAGEFLAG(uname, lname, policy) \
423 FOLIO_SET_FLAG(lname, FOLIO_##policy) \
424 static __always_inline void SetPage##uname(struct page *page) \
425 { set_bit(PG_##lname, &policy(page, 1)->flags); }
426
427 #define CLEARPAGEFLAG(uname, lname, policy) \
428 FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \
429 static __always_inline void ClearPage##uname(struct page *page) \
430 { clear_bit(PG_##lname, &policy(page, 1)->flags); }
431
432 #define __SETPAGEFLAG(uname, lname, policy) \
433 __FOLIO_SET_FLAG(lname, FOLIO_##policy) \
434 static __always_inline void __SetPage##uname(struct page *page) \
435 { __set_bit(PG_##lname, &policy(page, 1)->flags); }
436
437 #define __CLEARPAGEFLAG(uname, lname, policy) \
438 __FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \
439 static __always_inline void __ClearPage##uname(struct page *page) \
440 { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
441
442 #define TESTSETFLAG(uname, lname, policy) \
443 FOLIO_TEST_SET_FLAG(lname, FOLIO_##policy) \
444 static __always_inline int TestSetPage##uname(struct page *page) \
445 { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
446
447 #define TESTCLEARFLAG(uname, lname, policy) \
448 FOLIO_TEST_CLEAR_FLAG(lname, FOLIO_##policy) \
449 static __always_inline int TestClearPage##uname(struct page *page) \
450 { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
451
452 #define PAGEFLAG(uname, lname, policy) \
453 TESTPAGEFLAG(uname, lname, policy) \
454 SETPAGEFLAG(uname, lname, policy) \
455 CLEARPAGEFLAG(uname, lname, policy)
456
457 #define __PAGEFLAG(uname, lname, policy) \
458 TESTPAGEFLAG(uname, lname, policy) \
459 __SETPAGEFLAG(uname, lname, policy) \
460 __CLEARPAGEFLAG(uname, lname, policy)
461
462 #define TESTSCFLAG(uname, lname, policy) \
463 TESTSETFLAG(uname, lname, policy) \
464 TESTCLEARFLAG(uname, lname, policy)
465
466 #define FOLIO_TEST_FLAG_FALSE(name) \
467 static inline bool folio_test_##name(const struct folio *folio) \
468 { return false; }
469 #define FOLIO_SET_FLAG_NOOP(name) \
470 static inline void folio_set_##name(struct folio *folio) { }
471 #define FOLIO_CLEAR_FLAG_NOOP(name) \
472 static inline void folio_clear_##name(struct folio *folio) { }
473 #define __FOLIO_SET_FLAG_NOOP(name) \
474 static inline void __folio_set_##name(struct folio *folio) { }
475 #define __FOLIO_CLEAR_FLAG_NOOP(name) \
476 static inline void __folio_clear_##name(struct folio *folio) { }
477 #define FOLIO_TEST_SET_FLAG_FALSE(name) \
478 static inline bool folio_test_set_##name(struct folio *folio) \
479 { return false; }
480 #define FOLIO_TEST_CLEAR_FLAG_FALSE(name) \
481 static inline bool folio_test_clear_##name(struct folio *folio) \
482 { return false; }
483
484 #define FOLIO_FLAG_FALSE(name) \
485 FOLIO_TEST_FLAG_FALSE(name) \
486 FOLIO_SET_FLAG_NOOP(name) \
487 FOLIO_CLEAR_FLAG_NOOP(name)
488
489 #define TESTPAGEFLAG_FALSE(uname, lname) \
490 FOLIO_TEST_FLAG_FALSE(lname) \
491 static inline int Page##uname(const struct page *page) { return 0; }
492
493 #define SETPAGEFLAG_NOOP(uname, lname) \
494 FOLIO_SET_FLAG_NOOP(lname) \
495 static inline void SetPage##uname(struct page *page) { }
496
497 #define CLEARPAGEFLAG_NOOP(uname, lname) \
498 FOLIO_CLEAR_FLAG_NOOP(lname) \
499 static inline void ClearPage##uname(struct page *page) { }
500
501 #define __CLEARPAGEFLAG_NOOP(uname, lname) \
502 __FOLIO_CLEAR_FLAG_NOOP(lname) \
503 static inline void __ClearPage##uname(struct page *page) { }
504
505 #define TESTSETFLAG_FALSE(uname, lname) \
506 FOLIO_TEST_SET_FLAG_FALSE(lname) \
507 static inline int TestSetPage##uname(struct page *page) { return 0; }
508
509 #define TESTCLEARFLAG_FALSE(uname, lname) \
510 FOLIO_TEST_CLEAR_FLAG_FALSE(lname) \
511 static inline int TestClearPage##uname(struct page *page) { return 0; }
512
513 #define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname) \
514 SETPAGEFLAG_NOOP(uname, lname) CLEARPAGEFLAG_NOOP(uname, lname)
515
516 #define TESTSCFLAG_FALSE(uname, lname) \
517 TESTSETFLAG_FALSE(uname, lname) TESTCLEARFLAG_FALSE(uname, lname)
518
519 __PAGEFLAG(Locked, locked, PF_NO_TAIL)
520 FOLIO_FLAG(waiters, FOLIO_HEAD_PAGE)
521 PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
522 FOLIO_FLAG(referenced, FOLIO_HEAD_PAGE)
523 FOLIO_TEST_CLEAR_FLAG(referenced, FOLIO_HEAD_PAGE)
524 __FOLIO_SET_FLAG(referenced, FOLIO_HEAD_PAGE)
525 PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
526 __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
527 PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
528 TESTCLEARFLAG(LRU, lru, PF_HEAD)
529 FOLIO_FLAG(active, FOLIO_HEAD_PAGE)
530 __FOLIO_CLEAR_FLAG(active, FOLIO_HEAD_PAGE)
531 FOLIO_TEST_CLEAR_FLAG(active, FOLIO_HEAD_PAGE)
532 PAGEFLAG(Workingset, workingset, PF_HEAD)
533 TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
534 PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */
535
536 /* Xen */
537 PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
538 TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
539 PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
540 PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
PAGEFLAG(XenRemapped,xen_remapped,PF_NO_COMPOUND)541 PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
542 TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
543
544 PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
545 __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
546 __SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
547 FOLIO_FLAG(swapbacked, FOLIO_HEAD_PAGE)
548 __FOLIO_CLEAR_FLAG(swapbacked, FOLIO_HEAD_PAGE)
549 __FOLIO_SET_FLAG(swapbacked, FOLIO_HEAD_PAGE)
550
551 /*
552 * Private page markings that may be used by the filesystem that owns the page
553 * for its own purposes.
554 * - PG_private and PG_private_2 cause release_folio() and co to be invoked
555 */
556 PAGEFLAG(Private, private, PF_ANY)
557 PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
558
559 /* owner_2 can be set on tail pages for anon memory */
560 FOLIO_FLAG(owner_2, FOLIO_HEAD_PAGE)
561
562 /*
563 * Only test-and-set exist for PG_writeback. The unconditional operators are
564 * risky: they bypass page accounting.
565 */
566 TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
567 TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
568 PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
569
570 /* PG_readahead is only used for reads; PG_reclaim is only for writes */
571 PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
572 TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
573 FOLIO_FLAG(readahead, FOLIO_HEAD_PAGE)
574 FOLIO_TEST_CLEAR_FLAG(readahead, FOLIO_HEAD_PAGE)
575
576 FOLIO_FLAG(dropbehind, FOLIO_HEAD_PAGE)
577 FOLIO_TEST_CLEAR_FLAG(dropbehind, FOLIO_HEAD_PAGE)
578 __FOLIO_SET_FLAG(dropbehind, FOLIO_HEAD_PAGE)
579
580 #ifdef CONFIG_HIGHMEM
581 /*
582 * Must use a macro here due to header dependency issues. page_zone() is not
583 * available at this point.
584 */
585 #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
586 #define folio_test_highmem(__f) is_highmem_idx(folio_zonenum(__f))
587 #else
588 PAGEFLAG_FALSE(HighMem, highmem)
589 #endif
590
591 /* Does kmap_local_folio() only allow access to one page of the folio? */
592 #ifdef CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP
593 #define folio_test_partial_kmap(f) true
594 #else
595 #define folio_test_partial_kmap(f) folio_test_highmem(f)
596 #endif
597
598 #ifdef CONFIG_SWAP
599 static __always_inline bool folio_test_swapcache(const struct folio *folio)
600 {
601 return folio_test_swapbacked(folio) &&
602 test_bit(PG_swapcache, const_folio_flags(folio, 0));
603 }
604
FOLIO_SET_FLAG(swapcache,FOLIO_HEAD_PAGE)605 FOLIO_SET_FLAG(swapcache, FOLIO_HEAD_PAGE)
606 FOLIO_CLEAR_FLAG(swapcache, FOLIO_HEAD_PAGE)
607 #else
608 FOLIO_FLAG_FALSE(swapcache)
609 #endif
610
611 FOLIO_FLAG(unevictable, FOLIO_HEAD_PAGE)
612 __FOLIO_CLEAR_FLAG(unevictable, FOLIO_HEAD_PAGE)
613 FOLIO_TEST_CLEAR_FLAG(unevictable, FOLIO_HEAD_PAGE)
614
615 #ifdef CONFIG_MMU
616 FOLIO_FLAG(mlocked, FOLIO_HEAD_PAGE)
617 __FOLIO_CLEAR_FLAG(mlocked, FOLIO_HEAD_PAGE)
618 FOLIO_TEST_CLEAR_FLAG(mlocked, FOLIO_HEAD_PAGE)
619 FOLIO_TEST_SET_FLAG(mlocked, FOLIO_HEAD_PAGE)
620 #else
621 FOLIO_FLAG_FALSE(mlocked)
622 __FOLIO_CLEAR_FLAG_NOOP(mlocked)
623 FOLIO_TEST_CLEAR_FLAG_FALSE(mlocked)
624 FOLIO_TEST_SET_FLAG_FALSE(mlocked)
625 #endif
626
627 #ifdef CONFIG_MEMORY_FAILURE
628 PAGEFLAG(HWPoison, hwpoison, PF_ANY)
629 TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
630 #define __PG_HWPOISON (1UL << PG_hwpoison)
631 #else
632 PAGEFLAG_FALSE(HWPoison, hwpoison)
633 #define __PG_HWPOISON 0
634 #endif
635
636 #ifdef CONFIG_PAGE_IDLE_FLAG
637 #ifdef CONFIG_64BIT
638 FOLIO_TEST_FLAG(young, FOLIO_HEAD_PAGE)
639 FOLIO_SET_FLAG(young, FOLIO_HEAD_PAGE)
640 FOLIO_TEST_CLEAR_FLAG(young, FOLIO_HEAD_PAGE)
641 FOLIO_FLAG(idle, FOLIO_HEAD_PAGE)
642 #endif
643 /* See page_idle.h for !64BIT workaround */
644 #else /* !CONFIG_PAGE_IDLE_FLAG */
645 FOLIO_FLAG_FALSE(young)
646 FOLIO_TEST_CLEAR_FLAG_FALSE(young)
647 FOLIO_FLAG_FALSE(idle)
648 #endif
649
650 /*
651 * PageReported() is used to track reported free pages within the Buddy
652 * allocator. We can use the non-atomic version of the test and set
653 * operations as both should be shielded with the zone lock to prevent
654 * any possible races on the setting or clearing of the bit.
655 */
656 __PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
657
658 #ifdef CONFIG_MEMORY_HOTPLUG
659 PAGEFLAG(VmemmapSelfHosted, vmemmap_self_hosted, PF_ANY)
660 #else
661 PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted)
662 #endif
663
664 /*
665 * On an anonymous folio mapped into a user virtual memory area,
666 * folio->mapping points to its anon_vma, not to a struct address_space;
667 * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
668 *
669 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
670 * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
671 * bit; and then folio->mapping points, not to an anon_vma, but to a private
672 * structure which KSM associates with that merged page. See ksm.h.
673 *
674 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
675 * page and then folio->mapping points to a struct movable_operations.
676 *
677 * Please note that, confusingly, "folio_mapping" refers to the inode
678 * address_space which maps the folio from disk; whereas "folio_mapped"
679 * refers to user virtual address space into which the folio is mapped.
680 *
681 * For slab pages, since slab reuses the bits in struct page to store its
682 * internal states, the folio->mapping does not exist as such, nor do
683 * these flags below. So in order to avoid testing non-existent bits,
684 * please make sure that folio_test_slab(folio) actually evaluates to
685 * false before calling the following functions (e.g., folio_test_anon).
686 * See mm/slab.h.
687 */
688 #define PAGE_MAPPING_ANON 0x1
689 #define PAGE_MAPPING_MOVABLE 0x2
690 #define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
691 #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
692
693 /*
694 * Different with flags above, this flag is used only for fsdax mode. It
695 * indicates that this page->mapping is now under reflink case.
696 */
697 #define PAGE_MAPPING_DAX_SHARED ((void *)0x1)
698
699 static __always_inline bool folio_mapping_flags(const struct folio *folio)
700 {
701 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0;
702 }
703
PageMappingFlags(const struct page * page)704 static __always_inline bool PageMappingFlags(const struct page *page)
705 {
706 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
707 }
708
folio_test_anon(const struct folio * folio)709 static __always_inline bool folio_test_anon(const struct folio *folio)
710 {
711 return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0;
712 }
713
PageAnon(const struct page * page)714 static __always_inline bool PageAnon(const struct page *page)
715 {
716 return folio_test_anon(page_folio(page));
717 }
718
__folio_test_movable(const struct folio * folio)719 static __always_inline bool __folio_test_movable(const struct folio *folio)
720 {
721 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
722 PAGE_MAPPING_MOVABLE;
723 }
724
__PageMovable(const struct page * page)725 static __always_inline bool __PageMovable(const struct page *page)
726 {
727 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
728 PAGE_MAPPING_MOVABLE;
729 }
730
731 #ifdef CONFIG_KSM
732 /*
733 * A KSM page is one of those write-protected "shared pages" or "merged pages"
734 * which KSM maps into multiple mms, wherever identical anonymous page content
735 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
736 * anon_vma, but to that page's node of the stable tree.
737 */
folio_test_ksm(const struct folio * folio)738 static __always_inline bool folio_test_ksm(const struct folio *folio)
739 {
740 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
741 PAGE_MAPPING_KSM;
742 }
743
PageKsm(const struct page * page)744 static __always_inline bool PageKsm(const struct page *page)
745 {
746 return folio_test_ksm(page_folio(page));
747 }
748 #else
749 TESTPAGEFLAG_FALSE(Ksm, ksm)
750 #endif
751
752 u64 stable_page_flags(const struct page *page);
753
754 /**
755 * folio_xor_flags_has_waiters - Change some folio flags.
756 * @folio: The folio.
757 * @mask: Bits set in this word will be changed.
758 *
759 * This must only be used for flags which are changed with the folio
760 * lock held. For example, it is unsafe to use for PG_dirty as that
761 * can be set without the folio lock held. It can also only be used
762 * on flags which are in the range 0-6 as some of the implementations
763 * only affect those bits.
764 *
765 * Return: Whether there are tasks waiting on the folio.
766 */
folio_xor_flags_has_waiters(struct folio * folio,unsigned long mask)767 static inline bool folio_xor_flags_has_waiters(struct folio *folio,
768 unsigned long mask)
769 {
770 return xor_unlock_is_negative_byte(mask, folio_flags(folio, 0));
771 }
772
773 /**
774 * folio_test_uptodate - Is this folio up to date?
775 * @folio: The folio.
776 *
777 * The uptodate flag is set on a folio when every byte in the folio is
778 * at least as new as the corresponding bytes on storage. Anonymous
779 * and CoW folios are always uptodate. If the folio is not uptodate,
780 * some of the bytes in it may be; see the is_partially_uptodate()
781 * address_space operation.
782 */
folio_test_uptodate(const struct folio * folio)783 static inline bool folio_test_uptodate(const struct folio *folio)
784 {
785 bool ret = test_bit(PG_uptodate, const_folio_flags(folio, 0));
786 /*
787 * Must ensure that the data we read out of the folio is loaded
788 * _after_ we've loaded folio->flags to check the uptodate bit.
789 * We can skip the barrier if the folio is not uptodate, because
790 * we wouldn't be reading anything from it.
791 *
792 * See folio_mark_uptodate() for the other side of the story.
793 */
794 if (ret)
795 smp_rmb();
796
797 return ret;
798 }
799
PageUptodate(const struct page * page)800 static inline bool PageUptodate(const struct page *page)
801 {
802 return folio_test_uptodate(page_folio(page));
803 }
804
__folio_mark_uptodate(struct folio * folio)805 static __always_inline void __folio_mark_uptodate(struct folio *folio)
806 {
807 smp_wmb();
808 __set_bit(PG_uptodate, folio_flags(folio, 0));
809 }
810
folio_mark_uptodate(struct folio * folio)811 static __always_inline void folio_mark_uptodate(struct folio *folio)
812 {
813 /*
814 * Memory barrier must be issued before setting the PG_uptodate bit,
815 * so that all previous stores issued in order to bring the folio
816 * uptodate are actually visible before folio_test_uptodate becomes true.
817 */
818 smp_wmb();
819 set_bit(PG_uptodate, folio_flags(folio, 0));
820 }
821
__SetPageUptodate(struct page * page)822 static __always_inline void __SetPageUptodate(struct page *page)
823 {
824 __folio_mark_uptodate((struct folio *)page);
825 }
826
SetPageUptodate(struct page * page)827 static __always_inline void SetPageUptodate(struct page *page)
828 {
829 folio_mark_uptodate((struct folio *)page);
830 }
831
832 CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
833
834 void __folio_start_writeback(struct folio *folio, bool keep_write);
835 void set_page_writeback(struct page *page);
836
837 #define folio_start_writeback(folio) \
838 __folio_start_writeback(folio, false)
839 #define folio_start_writeback_keepwrite(folio) \
840 __folio_start_writeback(folio, true)
841
folio_test_head(const struct folio * folio)842 static __always_inline bool folio_test_head(const struct folio *folio)
843 {
844 return test_bit(PG_head, const_folio_flags(folio, FOLIO_PF_ANY));
845 }
846
PageHead(const struct page * page)847 static __always_inline int PageHead(const struct page *page)
848 {
849 PF_POISONED_CHECK(page);
850 return test_bit(PG_head, &page->flags) && !page_is_fake_head(page);
851 }
852
__SETPAGEFLAG(Head,head,PF_ANY)853 __SETPAGEFLAG(Head, head, PF_ANY)
854 __CLEARPAGEFLAG(Head, head, PF_ANY)
855 CLEARPAGEFLAG(Head, head, PF_ANY)
856
857 /**
858 * folio_test_large() - Does this folio contain more than one page?
859 * @folio: The folio to test.
860 *
861 * Return: True if the folio is larger than one page.
862 */
863 static inline bool folio_test_large(const struct folio *folio)
864 {
865 return folio_test_head(folio);
866 }
867
set_compound_head(struct page * page,struct page * head)868 static __always_inline void set_compound_head(struct page *page, struct page *head)
869 {
870 WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
871 }
872
clear_compound_head(struct page * page)873 static __always_inline void clear_compound_head(struct page *page)
874 {
875 WRITE_ONCE(page->compound_head, 0);
876 }
877
878 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
ClearPageCompound(struct page * page)879 static inline void ClearPageCompound(struct page *page)
880 {
881 BUG_ON(!PageHead(page));
882 ClearPageHead(page);
883 }
FOLIO_FLAG(large_rmappable,FOLIO_SECOND_PAGE)884 FOLIO_FLAG(large_rmappable, FOLIO_SECOND_PAGE)
885 FOLIO_FLAG(partially_mapped, FOLIO_SECOND_PAGE)
886 #else
887 FOLIO_FLAG_FALSE(large_rmappable)
888 FOLIO_FLAG_FALSE(partially_mapped)
889 #endif
890
891 #define PG_head_mask ((1UL << PG_head))
892
893 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
894 /*
895 * PageHuge() only returns true for hugetlbfs pages, but not for
896 * normal or transparent huge pages.
897 *
898 * PageTransHuge() returns true for both transparent huge and
899 * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
900 * called only in the core VM paths where hugetlbfs pages can't exist.
901 */
902 static inline int PageTransHuge(const struct page *page)
903 {
904 VM_BUG_ON_PAGE(PageTail(page), page);
905 return PageHead(page);
906 }
907
908 /*
909 * PageTransCompound returns true for both transparent huge pages
910 * and hugetlbfs pages, so it should only be called when it's known
911 * that hugetlbfs pages aren't involved.
912 */
PageTransCompound(const struct page * page)913 static inline int PageTransCompound(const struct page *page)
914 {
915 return PageCompound(page);
916 }
917
918 /*
919 * PageTransTail returns true for both transparent huge pages
920 * and hugetlbfs pages, so it should only be called when it's known
921 * that hugetlbfs pages aren't involved.
922 */
PageTransTail(const struct page * page)923 static inline int PageTransTail(const struct page *page)
924 {
925 return PageTail(page);
926 }
927 #else
928 TESTPAGEFLAG_FALSE(TransHuge, transhuge)
929 TESTPAGEFLAG_FALSE(TransCompound, transcompound)
930 TESTPAGEFLAG_FALSE(TransCompoundMap, transcompoundmap)
931 TESTPAGEFLAG_FALSE(TransTail, transtail)
932 #endif
933
934 #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
935 /*
936 * PageHasHWPoisoned indicates that at least one subpage is hwpoisoned in the
937 * compound page.
938 *
939 * This flag is set by hwpoison handler. Cleared by THP split or free page.
940 */
941 PAGEFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
942 TESTSCFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
943 #else
944 PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
945 TESTSCFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
946 #endif
947
948 /*
949 * For pages that do not use mapcount, page_type may be used.
950 * The low 24 bits of pagetype may be used for your own purposes, as long
951 * as you are careful to not affect the top 8 bits. The low bits of
952 * pagetype will be overwritten when you clear the page_type from the page.
953 */
954 enum pagetype {
955 /* 0x00-0x7f are positive numbers, ie mapcount */
956 /* Reserve 0x80-0xef for mapcount overflow. */
957 PGTY_buddy = 0xf0,
958 PGTY_offline = 0xf1,
959 PGTY_table = 0xf2,
960 PGTY_guard = 0xf3,
961 PGTY_hugetlb = 0xf4,
962 PGTY_slab = 0xf5,
963 PGTY_zsmalloc = 0xf6,
964 PGTY_unaccepted = 0xf7,
965
966 PGTY_mapcount_underflow = 0xff
967 };
968
page_type_has_type(int page_type)969 static inline bool page_type_has_type(int page_type)
970 {
971 return page_type < (PGTY_mapcount_underflow << 24);
972 }
973
974 /* This takes a mapcount which is one more than page->_mapcount */
page_mapcount_is_type(unsigned int mapcount)975 static inline bool page_mapcount_is_type(unsigned int mapcount)
976 {
977 return page_type_has_type(mapcount - 1);
978 }
979
page_has_type(const struct page * page)980 static inline bool page_has_type(const struct page *page)
981 {
982 return page_mapcount_is_type(data_race(page->page_type));
983 }
984
985 #define FOLIO_TYPE_OPS(lname, fname) \
986 static __always_inline bool folio_test_##fname(const struct folio *folio) \
987 { \
988 return data_race(folio->page.page_type >> 24) == PGTY_##lname; \
989 } \
990 static __always_inline void __folio_set_##fname(struct folio *folio) \
991 { \
992 if (folio_test_##fname(folio)) \
993 return; \
994 VM_BUG_ON_FOLIO(data_race(folio->page.page_type) != UINT_MAX, \
995 folio); \
996 folio->page.page_type = (unsigned int)PGTY_##lname << 24; \
997 } \
998 static __always_inline void __folio_clear_##fname(struct folio *folio) \
999 { \
1000 if (folio->page.page_type == UINT_MAX) \
1001 return; \
1002 VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio); \
1003 folio->page.page_type = UINT_MAX; \
1004 }
1005
1006 #define PAGE_TYPE_OPS(uname, lname, fname) \
1007 FOLIO_TYPE_OPS(lname, fname) \
1008 static __always_inline int Page##uname(const struct page *page) \
1009 { \
1010 return data_race(page->page_type >> 24) == PGTY_##lname; \
1011 } \
1012 static __always_inline void __SetPage##uname(struct page *page) \
1013 { \
1014 if (Page##uname(page)) \
1015 return; \
1016 VM_BUG_ON_PAGE(data_race(page->page_type) != UINT_MAX, page); \
1017 page->page_type = (unsigned int)PGTY_##lname << 24; \
1018 } \
1019 static __always_inline void __ClearPage##uname(struct page *page) \
1020 { \
1021 if (page->page_type == UINT_MAX) \
1022 return; \
1023 VM_BUG_ON_PAGE(!Page##uname(page), page); \
1024 page->page_type = UINT_MAX; \
1025 }
1026
1027 /*
1028 * PageBuddy() indicates that the page is free and in the buddy system
1029 * (see mm/page_alloc.c).
1030 */
1031 PAGE_TYPE_OPS(Buddy, buddy, buddy)
1032
1033 /*
1034 * PageOffline() indicates that the page is logically offline although the
1035 * containing section is online. (e.g. inflated in a balloon driver or
1036 * not onlined when onlining the section).
1037 * The content of these pages is effectively stale. Such pages should not
1038 * be touched (read/write/dump/save) except by their owner.
1039 *
1040 * When a memory block gets onlined, all pages are initialized with a
1041 * refcount of 1 and PageOffline(). generic_online_page() will
1042 * take care of clearing PageOffline().
1043 *
1044 * If a driver wants to allow to offline unmovable PageOffline() pages without
1045 * putting them back to the buddy, it can do so via the memory notifier by
1046 * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the
1047 * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline()
1048 * pages (now with a reference count of zero) are treated like free (unmanaged)
1049 * pages, allowing the containing memory block to get offlined. A driver that
1050 * relies on this feature is aware that re-onlining the memory block will
1051 * require not giving them to the buddy via generic_online_page().
1052 *
1053 * Memory offlining code will not adjust the managed page count for any
1054 * PageOffline() pages, treating them like they were never exposed to the
1055 * buddy using generic_online_page().
1056 *
1057 * There are drivers that mark a page PageOffline() and expect there won't be
1058 * any further access to page content. PFN walkers that read content of random
1059 * pages should check PageOffline() and synchronize with such drivers using
1060 * page_offline_freeze()/page_offline_thaw().
1061 */
1062 PAGE_TYPE_OPS(Offline, offline, offline)
1063
1064 extern void page_offline_freeze(void);
1065 extern void page_offline_thaw(void);
1066 extern void page_offline_begin(void);
1067 extern void page_offline_end(void);
1068
1069 /*
1070 * Marks pages in use as page tables.
1071 */
PAGE_TYPE_OPS(Table,table,pgtable)1072 PAGE_TYPE_OPS(Table, table, pgtable)
1073
1074 /*
1075 * Marks guardpages used with debug_pagealloc.
1076 */
1077 PAGE_TYPE_OPS(Guard, guard, guard)
1078
1079 FOLIO_TYPE_OPS(slab, slab)
1080
1081 /**
1082 * PageSlab - Determine if the page belongs to the slab allocator
1083 * @page: The page to test.
1084 *
1085 * Context: Any context.
1086 * Return: True for slab pages, false for any other kind of page.
1087 */
1088 static inline bool PageSlab(const struct page *page)
1089 {
1090 return folio_test_slab(page_folio(page));
1091 }
1092
1093 #ifdef CONFIG_HUGETLB_PAGE
FOLIO_TYPE_OPS(hugetlb,hugetlb)1094 FOLIO_TYPE_OPS(hugetlb, hugetlb)
1095 #else
1096 FOLIO_TEST_FLAG_FALSE(hugetlb)
1097 #endif
1098
1099 PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc)
1100
1101 /*
1102 * Mark pages that has to be accepted before touched for the first time.
1103 *
1104 * Serialized with zone lock.
1105 */
1106 PAGE_TYPE_OPS(Unaccepted, unaccepted, unaccepted)
1107
1108 /**
1109 * PageHuge - Determine if the page belongs to hugetlbfs
1110 * @page: The page to test.
1111 *
1112 * Context: Any context.
1113 * Return: True for hugetlbfs pages, false for anon pages or pages
1114 * belonging to other filesystems.
1115 */
1116 static inline bool PageHuge(const struct page *page)
1117 {
1118 return folio_test_hugetlb(page_folio(page));
1119 }
1120
1121 /*
1122 * Check if a page is currently marked HWPoisoned. Note that this check is
1123 * best effort only and inherently racy: there is no way to synchronize with
1124 * failing hardware.
1125 */
is_page_hwpoison(const struct page * page)1126 static inline bool is_page_hwpoison(const struct page *page)
1127 {
1128 const struct folio *folio;
1129
1130 if (PageHWPoison(page))
1131 return true;
1132 folio = page_folio(page);
1133 return folio_test_hugetlb(folio) && PageHWPoison(&folio->page);
1134 }
1135
folio_contain_hwpoisoned_page(struct folio * folio)1136 static inline bool folio_contain_hwpoisoned_page(struct folio *folio)
1137 {
1138 return folio_test_hwpoison(folio) ||
1139 (folio_test_large(folio) && folio_test_has_hwpoisoned(folio));
1140 }
1141
1142 bool is_free_buddy_page(const struct page *page);
1143
1144 PAGEFLAG(Isolated, isolated, PF_ANY);
1145
PageAnonExclusive(const struct page * page)1146 static __always_inline int PageAnonExclusive(const struct page *page)
1147 {
1148 VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
1149 /*
1150 * HugeTLB stores this information on the head page; THP keeps it per
1151 * page
1152 */
1153 if (PageHuge(page))
1154 page = compound_head(page);
1155 return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1156 }
1157
SetPageAnonExclusive(struct page * page)1158 static __always_inline void SetPageAnonExclusive(struct page *page)
1159 {
1160 VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page);
1161 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1162 set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1163 }
1164
ClearPageAnonExclusive(struct page * page)1165 static __always_inline void ClearPageAnonExclusive(struct page *page)
1166 {
1167 VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page);
1168 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1169 clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1170 }
1171
__ClearPageAnonExclusive(struct page * page)1172 static __always_inline void __ClearPageAnonExclusive(struct page *page)
1173 {
1174 VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
1175 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1176 __clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1177 }
1178
1179 #ifdef CONFIG_MMU
1180 #define __PG_MLOCKED (1UL << PG_mlocked)
1181 #else
1182 #define __PG_MLOCKED 0
1183 #endif
1184
1185 /*
1186 * Flags checked when a page is freed. Pages being freed should not have
1187 * these flags set. If they are, there is a problem.
1188 */
1189 #define PAGE_FLAGS_CHECK_AT_FREE \
1190 (1UL << PG_lru | 1UL << PG_locked | \
1191 1UL << PG_private | 1UL << PG_private_2 | \
1192 1UL << PG_writeback | 1UL << PG_reserved | \
1193 1UL << PG_active | \
1194 1UL << PG_unevictable | __PG_MLOCKED | LRU_GEN_MASK)
1195
1196 /*
1197 * Flags checked when a page is prepped for return by the page allocator.
1198 * Pages being prepped should not have these flags set. If they are set,
1199 * there has been a kernel bug or struct page corruption.
1200 *
1201 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
1202 * alloc-free cycle to prevent from reusing the page.
1203 */
1204 #define PAGE_FLAGS_CHECK_AT_PREP \
1205 ((PAGEFLAGS_MASK & ~__PG_HWPOISON) | LRU_GEN_MASK | LRU_REFS_MASK)
1206
1207 /*
1208 * Flags stored in the second page of a compound page. They may overlap
1209 * the CHECK_AT_FREE flags above, so need to be cleared.
1210 */
1211 #define PAGE_FLAGS_SECOND \
1212 (0xffUL /* order */ | 1UL << PG_has_hwpoisoned | \
1213 1UL << PG_large_rmappable | 1UL << PG_partially_mapped)
1214
1215 #define PAGE_FLAGS_PRIVATE \
1216 (1UL << PG_private | 1UL << PG_private_2)
1217 /**
1218 * folio_has_private - Determine if folio has private stuff
1219 * @folio: The folio to be checked
1220 *
1221 * Determine if a folio has private stuff, indicating that release routines
1222 * should be invoked upon it.
1223 */
folio_has_private(const struct folio * folio)1224 static inline int folio_has_private(const struct folio *folio)
1225 {
1226 return !!(folio->flags & PAGE_FLAGS_PRIVATE);
1227 }
1228
1229 #undef PF_ANY
1230 #undef PF_HEAD
1231 #undef PF_NO_TAIL
1232 #undef PF_NO_COMPOUND
1233 #undef PF_SECOND
1234 #endif /* !__GENERATING_BOUNDS_H */
1235
1236 #endif /* PAGE_FLAGS_H */
1237