1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Macros for manipulating and testing page->flags
4 */
5
6 #ifndef PAGE_FLAGS_H
7 #define PAGE_FLAGS_H
8
9 #include <linux/types.h>
10 #include <linux/bug.h>
11 #include <linux/mmdebug.h>
12 #ifndef __GENERATING_BOUNDS_H
13 #include <linux/mm_types.h>
14 #include <generated/bounds.h>
15 #endif /* !__GENERATING_BOUNDS_H */
16
17 /*
18 * Various page->flags bits:
19 *
20 * PG_reserved is set for special pages. The "struct page" of such a page
21 * should in general not be touched (e.g. set dirty) except by its owner.
22 * Pages marked as PG_reserved include:
23 * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS,
24 * initrd, HW tables)
25 * - Pages reserved or allocated early during boot (before the page allocator
26 * was initialized). This includes (depending on the architecture) the
27 * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
28 * much more. Once (if ever) freed, PG_reserved is cleared and they will
29 * be given to the page allocator.
30 * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying
31 * to read/write these pages might end badly. Don't touch!
32 * - The zero page(s)
33 * - Pages not added to the page allocator when onlining a section because
34 * they were excluded via the online_page_callback() or because they are
35 * PG_hwpoison.
36 * - Pages allocated in the context of kexec/kdump (loaded kernel image,
37 * control pages, vmcoreinfo)
38 * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are
39 * not marked PG_reserved (as they might be in use by somebody else who does
40 * not respect the caching strategy).
41 * - Pages part of an offline section (struct pages of offline sections should
42 * not be trusted as they will be initialized when first onlined).
43 * - MCA pages on ia64
44 * - Pages holding CPU notes for POWER Firmware Assisted Dump
45 * - Device memory (e.g. PMEM, DAX, HMM)
46 * Some PG_reserved pages will be excluded from the hibernation image.
47 * PG_reserved does in general not hinder anybody from dumping or swapping
48 * and is no longer required for remap_pfn_range(). ioremap might require it.
49 * Consequently, PG_reserved for a page mapped into user space can indicate
50 * the zero page, the vDSO, MMIO pages or device memory.
51 *
52 * The PG_private bitflag is set on pagecache pages if they contain filesystem
53 * specific data (which is normally at page->private). It can be used by
54 * private allocations for its own usage.
55 *
56 * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
57 * and cleared when writeback _starts_ or when read _completes_. PG_writeback
58 * is set before writeback starts and cleared when it finishes.
59 *
60 * PG_locked also pins a page in pagecache, and blocks truncation of the file
61 * while it is held.
62 *
63 * page_waitqueue(page) is a wait queue of all tasks waiting for the page
64 * to become unlocked.
65 *
66 * PG_swapbacked is set when a page uses swap as a backing storage. This are
67 * usually PageAnon or shmem pages but please note that even anonymous pages
68 * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as
69 * a result of MADV_FREE).
70 *
71 * PG_uptodate tells whether the page's contents is valid. When a read
72 * completes, the page becomes uptodate, unless a disk I/O error happened.
73 *
74 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
75 * file-backed pagecache (see mm/vmscan.c).
76 *
77 * PG_error is set to indicate that an I/O error occurred on this page.
78 *
79 * PG_arch_1 is an architecture specific page state bit. The generic code
80 * guarantees that this bit is cleared for a page when it first is entered into
81 * the page cache.
82 *
83 * PG_hwpoison indicates that a page got corrupted in hardware and contains
84 * data with incorrect ECC bits that triggered a machine check. Accessing is
85 * not safe since it may cause another machine check. Don't touch!
86 */
87
88 /*
89 * Don't use the *_dontuse flags. Use the macros. Otherwise you'll break
90 * locked- and dirty-page accounting.
91 *
92 * The page flags field is split into two parts, the main flags area
93 * which extends from the low bits upwards, and the fields area which
94 * extends from the high bits downwards.
95 *
96 * | FIELD | ... | FLAGS |
97 * N-1 ^ 0
98 * (NR_PAGEFLAGS)
99 *
100 * The fields area is reserved for fields mapping zone, node (for NUMA) and
101 * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
102 * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
103 */
104 enum pageflags {
105 PG_locked, /* Page is locked. Don't touch. */
106 PG_referenced,
107 PG_uptodate,
108 PG_dirty,
109 PG_lru,
110 PG_active,
111 PG_workingset,
112 PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
113 PG_error,
114 PG_slab,
115 PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
116 PG_arch_1,
117 PG_reserved,
118 PG_private, /* If pagecache, has fs-private data */
119 PG_private_2, /* If pagecache, has fs aux data */
120 PG_writeback, /* Page is under writeback */
121 PG_head, /* A head page */
122 PG_mappedtodisk, /* Has blocks allocated on-disk */
123 PG_reclaim, /* To be reclaimed asap */
124 PG_swapbacked, /* Page is backed by RAM/swap */
125 PG_unevictable, /* Page is "unevictable" */
126 #ifdef CONFIG_MMU
127 PG_mlocked, /* Page is vma mlocked */
128 #endif
129 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
130 PG_uncached, /* Page has been mapped as uncached */
131 #endif
132 #ifdef CONFIG_MEMORY_FAILURE
133 PG_hwpoison, /* hardware poisoned page. Don't touch */
134 #endif
135 #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
136 PG_young,
137 PG_idle,
138 #endif
139 #ifdef CONFIG_64BIT
140 PG_arch_2,
141 #endif
142 #ifdef CONFIG_PAGE_TRACING
143 PG_skb,
144 PG_zspage,
145 #endif
146 #ifdef CONFIG_MEM_PURGEABLE
147 PG_purgeable,
148 #endif
149 __NR_PAGEFLAGS,
150
151 /* Filesystems */
152 PG_checked = PG_owner_priv_1,
153
154 /* SwapBacked */
155 PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
156
157 /* Two page bits are conscripted by FS-Cache to maintain local caching
158 * state. These bits are set on pages belonging to the netfs's inodes
159 * when those inodes are being locally cached.
160 */
161 PG_fscache = PG_private_2, /* page backed by cache */
162
163 /* XEN */
164 /* Pinned in Xen as a read-only pagetable page. */
165 PG_pinned = PG_owner_priv_1,
166 /* Pinned as part of domain save (see xen_mm_pin_all()). */
167 PG_savepinned = PG_dirty,
168 /* Has a grant mapping of another (foreign) domain's page. */
169 PG_foreign = PG_owner_priv_1,
170 /* Remapped by swiotlb-xen. */
171 PG_xen_remapped = PG_owner_priv_1,
172
173 /* SLOB */
174 PG_slob_free = PG_private,
175
176 /* Compound pages. Stored in first tail page's flags */
177 PG_double_map = PG_workingset,
178
179 /* non-lru isolated movable page */
180 PG_isolated = PG_reclaim,
181
182 /* Only valid for buddy pages. Used to track pages that are reported */
183 PG_reported = PG_uptodate,
184 };
185
186 #ifndef __GENERATING_BOUNDS_H
187
188 struct page; /* forward declaration */
189
compound_head(struct page * page)190 static inline struct page *compound_head(struct page *page)
191 {
192 unsigned long head = READ_ONCE(page->compound_head);
193
194 if (unlikely(head & 1))
195 return (struct page *) (head - 1);
196 return page;
197 }
198
PageTail(struct page * page)199 static __always_inline int PageTail(struct page *page)
200 {
201 return READ_ONCE(page->compound_head) & 1;
202 }
203
PageCompound(struct page * page)204 static __always_inline int PageCompound(struct page *page)
205 {
206 return test_bit(PG_head, &page->flags) || PageTail(page);
207 }
208
209 #define PAGE_POISON_PATTERN -1l
PagePoisoned(const struct page * page)210 static inline int PagePoisoned(const struct page *page)
211 {
212 return page->flags == PAGE_POISON_PATTERN;
213 }
214
215 #ifdef CONFIG_DEBUG_VM
216 void page_init_poison(struct page *page, size_t size);
217 #else
page_init_poison(struct page * page,size_t size)218 static inline void page_init_poison(struct page *page, size_t size)
219 {
220 }
221 #endif
222
223 /*
224 * Page flags policies wrt compound pages
225 *
226 * PF_POISONED_CHECK
227 * check if this struct page poisoned/uninitialized
228 *
229 * PF_ANY:
230 * the page flag is relevant for small, head and tail pages.
231 *
232 * PF_HEAD:
233 * for compound page all operations related to the page flag applied to
234 * head page.
235 *
236 * PF_ONLY_HEAD:
237 * for compound page, callers only ever operate on the head page.
238 *
239 * PF_NO_TAIL:
240 * modifications of the page flag must be done on small or head pages,
241 * checks can be done on tail pages too.
242 *
243 * PF_NO_COMPOUND:
244 * the page flag is not relevant for compound pages.
245 *
246 * PF_SECOND:
247 * the page flag is stored in the first tail page.
248 */
249 #define PF_POISONED_CHECK(page) ({ \
250 VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \
251 page; })
252 #define PF_ANY(page, enforce) PF_POISONED_CHECK(page)
253 #define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page))
254 #define PF_ONLY_HEAD(page, enforce) ({ \
255 VM_BUG_ON_PGFLAGS(PageTail(page), page); \
256 PF_POISONED_CHECK(page); })
257 #define PF_NO_TAIL(page, enforce) ({ \
258 VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \
259 PF_POISONED_CHECK(compound_head(page)); })
260 #define PF_NO_COMPOUND(page, enforce) ({ \
261 VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \
262 PF_POISONED_CHECK(page); })
263 #define PF_SECOND(page, enforce) ({ \
264 VM_BUG_ON_PGFLAGS(!PageHead(page), page); \
265 PF_POISONED_CHECK(&page[1]); })
266
267 /*
268 * Macros to create function definitions for page flags
269 */
270 #define TESTPAGEFLAG(uname, lname, policy) \
271 static __always_inline int Page##uname(struct page *page) \
272 { return test_bit(PG_##lname, &policy(page, 0)->flags); }
273
274 #define SETPAGEFLAG(uname, lname, policy) \
275 static __always_inline void SetPage##uname(struct page *page) \
276 { set_bit(PG_##lname, &policy(page, 1)->flags); }
277
278 #define CLEARPAGEFLAG(uname, lname, policy) \
279 static __always_inline void ClearPage##uname(struct page *page) \
280 { clear_bit(PG_##lname, &policy(page, 1)->flags); }
281
282 #define __SETPAGEFLAG(uname, lname, policy) \
283 static __always_inline void __SetPage##uname(struct page *page) \
284 { __set_bit(PG_##lname, &policy(page, 1)->flags); }
285
286 #define __CLEARPAGEFLAG(uname, lname, policy) \
287 static __always_inline void __ClearPage##uname(struct page *page) \
288 { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
289
290 #define TESTSETFLAG(uname, lname, policy) \
291 static __always_inline int TestSetPage##uname(struct page *page) \
292 { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
293
294 #define TESTCLEARFLAG(uname, lname, policy) \
295 static __always_inline int TestClearPage##uname(struct page *page) \
296 { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
297
298 #define PAGEFLAG(uname, lname, policy) \
299 TESTPAGEFLAG(uname, lname, policy) \
300 SETPAGEFLAG(uname, lname, policy) \
301 CLEARPAGEFLAG(uname, lname, policy)
302
303 #define __PAGEFLAG(uname, lname, policy) \
304 TESTPAGEFLAG(uname, lname, policy) \
305 __SETPAGEFLAG(uname, lname, policy) \
306 __CLEARPAGEFLAG(uname, lname, policy)
307
308 #define TESTSCFLAG(uname, lname, policy) \
309 TESTSETFLAG(uname, lname, policy) \
310 TESTCLEARFLAG(uname, lname, policy)
311
312 #define TESTPAGEFLAG_FALSE(uname) \
313 static inline int Page##uname(const struct page *page) { return 0; }
314
315 #define SETPAGEFLAG_NOOP(uname) \
316 static inline void SetPage##uname(struct page *page) { }
317
318 #define CLEARPAGEFLAG_NOOP(uname) \
319 static inline void ClearPage##uname(struct page *page) { }
320
321 #define __CLEARPAGEFLAG_NOOP(uname) \
322 static inline void __ClearPage##uname(struct page *page) { }
323
324 #define TESTSETFLAG_FALSE(uname) \
325 static inline int TestSetPage##uname(struct page *page) { return 0; }
326
327 #define TESTCLEARFLAG_FALSE(uname) \
328 static inline int TestClearPage##uname(struct page *page) { return 0; }
329
330 #define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname) \
331 SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname)
332
333 #define TESTSCFLAG_FALSE(uname) \
334 TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
335
336 __PAGEFLAG(Locked, locked, PF_NO_TAIL)
337 PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
338 PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
339 PAGEFLAG(Referenced, referenced, PF_HEAD)
340 TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
341 __SETPAGEFLAG(Referenced, referenced, PF_HEAD)
342 PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
343 __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
344 PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
345 PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
346 TESTCLEARFLAG(Active, active, PF_HEAD)
347 PAGEFLAG(Workingset, workingset, PF_HEAD)
348 TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
349 __PAGEFLAG(Slab, slab, PF_NO_TAIL)
350 __PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
351 PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */
352
353 /* Xen */
354 PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
355 TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
356 PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
357 PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
PAGEFLAG(XenRemapped,xen_remapped,PF_NO_COMPOUND)358 PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
359 TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
360
361 PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
362 __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
363 __SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
364 PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
365 __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
366 __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
367
368 /*
369 * Private page markings that may be used by the filesystem that owns the page
370 * for its own purposes.
371 * - PG_private and PG_private_2 cause releasepage() and co to be invoked
372 */
373 PAGEFLAG(Private, private, PF_ANY) __SETPAGEFLAG(Private, private, PF_ANY)
374 __CLEARPAGEFLAG(Private, private, PF_ANY)
375 PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
376 PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
377 TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
378
379 /*
380 * Only test-and-set exist for PG_writeback. The unconditional operators are
381 * risky: they bypass page accounting.
382 */
383 TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
384 TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
385 PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
386
387 /* PG_readahead is only used for reads; PG_reclaim is only for writes */
388 PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
389 TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
390 PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
391 TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
392
393 #ifdef CONFIG_HIGHMEM
394 /*
395 * Must use a macro here due to header dependency issues. page_zone() is not
396 * available at this point.
397 */
398 #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
399 #else
400 PAGEFLAG_FALSE(HighMem)
401 #endif
402
403 #ifdef CONFIG_SWAP
404 static __always_inline int PageSwapCache(struct page *page)
405 {
406 #ifdef CONFIG_THP_SWAP
407 page = compound_head(page);
408 #endif
409 return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags);
410
411 }
412 SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
413 CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
414 #else
415 PAGEFLAG_FALSE(SwapCache)
416 #endif
417
418 PAGEFLAG(Unevictable, unevictable, PF_HEAD)
419 __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
420 TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
421
422 #ifdef CONFIG_MMU
423 PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
424 __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
425 TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
426 #else
427 PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked)
428 TESTSCFLAG_FALSE(Mlocked)
429 #endif
430
431 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
432 PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
433 #else
434 PAGEFLAG_FALSE(Uncached)
435 #endif
436
437 #ifdef CONFIG_MEMORY_FAILURE
438 PAGEFLAG(HWPoison, hwpoison, PF_ANY)
439 TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
440 #define __PG_HWPOISON (1UL << PG_hwpoison)
441 extern bool take_page_off_buddy(struct page *page);
442 #else
443 PAGEFLAG_FALSE(HWPoison)
444 #define __PG_HWPOISON 0
445 #endif
446
447 #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
TESTPAGEFLAG(Young,young,PF_ANY)448 TESTPAGEFLAG(Young, young, PF_ANY)
449 SETPAGEFLAG(Young, young, PF_ANY)
450 TESTCLEARFLAG(Young, young, PF_ANY)
451 PAGEFLAG(Idle, idle, PF_ANY)
452 #endif
453
454 #ifdef CONFIG_PAGE_TRACING
455 PAGEFLAG(SKB, skb, PF_ANY)
456 PAGEFLAG(Zspage, zspage, PF_ANY)
457 #endif
458
459 /*
460 * PageReported() is used to track reported free pages within the Buddy
461 * allocator. We can use the non-atomic version of the test and set
462 * operations as both should be shielded with the zone lock to prevent
463 * any possible races on the setting or clearing of the bit.
464 */
465 __PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
466
467 #ifdef CONFIG_MEM_PURGEABLE
468 PAGEFLAG(Purgeable, purgeable, PF_ANY)
469 #else
470 PAGEFLAG_FALSE(Purgeable)
471 #endif
472 /*
473 * On an anonymous page mapped into a user virtual memory area,
474 * page->mapping points to its anon_vma, not to a struct address_space;
475 * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
476 *
477 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
478 * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
479 * bit; and then page->mapping points, not to an anon_vma, but to a private
480 * structure which KSM associates with that merged page. See ksm.h.
481 *
482 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
483 * page and then page->mapping points a struct address_space.
484 *
485 * Please note that, confusingly, "page_mapping" refers to the inode
486 * address_space which maps the page from disk; whereas "page_mapped"
487 * refers to user virtual address space into which the page is mapped.
488 */
489 #define PAGE_MAPPING_ANON 0x1
490 #define PAGE_MAPPING_MOVABLE 0x2
491 #define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
492 #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
493
494 static __always_inline int PageMappingFlags(struct page *page)
495 {
496 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
497 }
498
PageAnon(struct page * page)499 static __always_inline int PageAnon(struct page *page)
500 {
501 page = compound_head(page);
502 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
503 }
504
__PageMovable(struct page * page)505 static __always_inline int __PageMovable(struct page *page)
506 {
507 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
508 PAGE_MAPPING_MOVABLE;
509 }
510
511 #ifdef CONFIG_KSM
512 /*
513 * A KSM page is one of those write-protected "shared pages" or "merged pages"
514 * which KSM maps into multiple mms, wherever identical anonymous page content
515 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
516 * anon_vma, but to that page's node of the stable tree.
517 */
PageKsm(struct page * page)518 static __always_inline int PageKsm(struct page *page)
519 {
520 page = compound_head(page);
521 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
522 PAGE_MAPPING_KSM;
523 }
524 #else
525 TESTPAGEFLAG_FALSE(Ksm)
526 #endif
527
528 u64 stable_page_flags(struct page *page);
529
PageUptodate(struct page * page)530 static inline int PageUptodate(struct page *page)
531 {
532 int ret;
533 page = compound_head(page);
534 ret = test_bit(PG_uptodate, &(page)->flags);
535 /*
536 * Must ensure that the data we read out of the page is loaded
537 * _after_ we've loaded page->flags to check for PageUptodate.
538 * We can skip the barrier if the page is not uptodate, because
539 * we wouldn't be reading anything from it.
540 *
541 * See SetPageUptodate() for the other side of the story.
542 */
543 if (ret)
544 smp_rmb();
545
546 return ret;
547 }
548
__SetPageUptodate(struct page * page)549 static __always_inline void __SetPageUptodate(struct page *page)
550 {
551 VM_BUG_ON_PAGE(PageTail(page), page);
552 smp_wmb();
553 __set_bit(PG_uptodate, &page->flags);
554 }
555
SetPageUptodate(struct page * page)556 static __always_inline void SetPageUptodate(struct page *page)
557 {
558 VM_BUG_ON_PAGE(PageTail(page), page);
559 /*
560 * Memory barrier must be issued before setting the PG_uptodate bit,
561 * so that all previous stores issued in order to bring the page
562 * uptodate are actually visible before PageUptodate becomes true.
563 */
564 smp_wmb();
565 set_bit(PG_uptodate, &page->flags);
566 }
567
568 CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
569
570 int test_clear_page_writeback(struct page *page);
571 int __test_set_page_writeback(struct page *page, bool keep_write);
572
573 #define test_set_page_writeback(page) \
574 __test_set_page_writeback(page, false)
575 #define test_set_page_writeback_keepwrite(page) \
576 __test_set_page_writeback(page, true)
577
set_page_writeback(struct page * page)578 static inline void set_page_writeback(struct page *page)
579 {
580 test_set_page_writeback(page);
581 }
582
set_page_writeback_keepwrite(struct page * page)583 static inline void set_page_writeback_keepwrite(struct page *page)
584 {
585 test_set_page_writeback_keepwrite(page);
586 }
587
__PAGEFLAG(Head,head,PF_ANY)588 __PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
589
590 static __always_inline void set_compound_head(struct page *page, struct page *head)
591 {
592 WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
593 }
594
clear_compound_head(struct page * page)595 static __always_inline void clear_compound_head(struct page *page)
596 {
597 WRITE_ONCE(page->compound_head, 0);
598 }
599
600 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
ClearPageCompound(struct page * page)601 static inline void ClearPageCompound(struct page *page)
602 {
603 BUG_ON(!PageHead(page));
604 ClearPageHead(page);
605 }
606 #endif
607
608 #define PG_head_mask ((1UL << PG_head))
609
610 #ifdef CONFIG_HUGETLB_PAGE
611 int PageHuge(struct page *page);
612 int PageHeadHuge(struct page *page);
613 bool page_huge_active(struct page *page);
614 #else
615 TESTPAGEFLAG_FALSE(Huge)
TESTPAGEFLAG_FALSE(HeadHuge)616 TESTPAGEFLAG_FALSE(HeadHuge)
617
618 static inline bool page_huge_active(struct page *page)
619 {
620 return 0;
621 }
622 #endif
623
624
625 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
626 /*
627 * PageHuge() only returns true for hugetlbfs pages, but not for
628 * normal or transparent huge pages.
629 *
630 * PageTransHuge() returns true for both transparent huge and
631 * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
632 * called only in the core VM paths where hugetlbfs pages can't exist.
633 */
PageTransHuge(struct page * page)634 static inline int PageTransHuge(struct page *page)
635 {
636 VM_BUG_ON_PAGE(PageTail(page), page);
637 return PageHead(page);
638 }
639
640 /*
641 * PageTransCompound returns true for both transparent huge pages
642 * and hugetlbfs pages, so it should only be called when it's known
643 * that hugetlbfs pages aren't involved.
644 */
PageTransCompound(struct page * page)645 static inline int PageTransCompound(struct page *page)
646 {
647 return PageCompound(page);
648 }
649
650 /*
651 * PageTransCompoundMap is the same as PageTransCompound, but it also
652 * guarantees the primary MMU has the entire compound page mapped
653 * through pmd_trans_huge, which in turn guarantees the secondary MMUs
654 * can also map the entire compound page. This allows the secondary
655 * MMUs to call get_user_pages() only once for each compound page and
656 * to immediately map the entire compound page with a single secondary
657 * MMU fault. If there will be a pmd split later, the secondary MMUs
658 * will get an update through the MMU notifier invalidation through
659 * split_huge_pmd().
660 *
661 * Unlike PageTransCompound, this is safe to be called only while
662 * split_huge_pmd() cannot run from under us, like if protected by the
663 * MMU notifier, otherwise it may result in page->_mapcount check false
664 * positives.
665 *
666 * We have to treat page cache THP differently since every subpage of it
667 * would get _mapcount inc'ed once it is PMD mapped. But, it may be PTE
668 * mapped in the current process so comparing subpage's _mapcount to
669 * compound_mapcount to filter out PTE mapped case.
670 */
PageTransCompoundMap(struct page * page)671 static inline int PageTransCompoundMap(struct page *page)
672 {
673 struct page *head;
674
675 if (!PageTransCompound(page))
676 return 0;
677
678 if (PageAnon(page))
679 return atomic_read(&page->_mapcount) < 0;
680
681 head = compound_head(page);
682 /* File THP is PMD mapped and not PTE mapped */
683 return atomic_read(&page->_mapcount) ==
684 atomic_read(compound_mapcount_ptr(head));
685 }
686
687 /*
688 * PageTransTail returns true for both transparent huge pages
689 * and hugetlbfs pages, so it should only be called when it's known
690 * that hugetlbfs pages aren't involved.
691 */
PageTransTail(struct page * page)692 static inline int PageTransTail(struct page *page)
693 {
694 return PageTail(page);
695 }
696
697 /*
698 * PageDoubleMap indicates that the compound page is mapped with PTEs as well
699 * as PMDs.
700 *
701 * This is required for optimization of rmap operations for THP: we can postpone
702 * per small page mapcount accounting (and its overhead from atomic operations)
703 * until the first PMD split.
704 *
705 * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up
706 * by one. This reference will go away with last compound_mapcount.
707 *
708 * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap().
709 */
PAGEFLAG(DoubleMap,double_map,PF_SECOND)710 PAGEFLAG(DoubleMap, double_map, PF_SECOND)
711 TESTSCFLAG(DoubleMap, double_map, PF_SECOND)
712 #else
713 TESTPAGEFLAG_FALSE(TransHuge)
714 TESTPAGEFLAG_FALSE(TransCompound)
715 TESTPAGEFLAG_FALSE(TransCompoundMap)
716 TESTPAGEFLAG_FALSE(TransTail)
717 PAGEFLAG_FALSE(DoubleMap)
718 TESTSCFLAG_FALSE(DoubleMap)
719 #endif
720
721 /*
722 * For pages that are never mapped to userspace (and aren't PageSlab),
723 * page_type may be used. Because it is initialised to -1, we invert the
724 * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
725 * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and
726 * low bits so that an underflow or overflow of page_mapcount() won't be
727 * mistaken for a page type value.
728 */
729
730 #define PAGE_TYPE_BASE 0xf0000000
731 /* Reserve 0x0000007f to catch underflows of page_mapcount */
732 #define PAGE_MAPCOUNT_RESERVE -128
733 #define PG_buddy 0x00000080
734 #define PG_offline 0x00000100
735 #define PG_kmemcg 0x00000200
736 #define PG_table 0x00000400
737 #define PG_guard 0x00000800
738
739 #define PageType(page, flag) \
740 ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
741
742 static inline int page_has_type(struct page *page)
743 {
744 return (int)page->page_type < PAGE_MAPCOUNT_RESERVE;
745 }
746
747 #define PAGE_TYPE_OPS(uname, lname) \
748 static __always_inline int Page##uname(struct page *page) \
749 { \
750 return PageType(page, PG_##lname); \
751 } \
752 static __always_inline void __SetPage##uname(struct page *page) \
753 { \
754 VM_BUG_ON_PAGE(!PageType(page, 0), page); \
755 page->page_type &= ~PG_##lname; \
756 } \
757 static __always_inline void __ClearPage##uname(struct page *page) \
758 { \
759 VM_BUG_ON_PAGE(!Page##uname(page), page); \
760 page->page_type |= PG_##lname; \
761 }
762
763 /*
764 * PageBuddy() indicates that the page is free and in the buddy system
765 * (see mm/page_alloc.c).
766 */
767 PAGE_TYPE_OPS(Buddy, buddy)
768
769 /*
770 * PageOffline() indicates that the page is logically offline although the
771 * containing section is online. (e.g. inflated in a balloon driver or
772 * not onlined when onlining the section).
773 * The content of these pages is effectively stale. Such pages should not
774 * be touched (read/write/dump/save) except by their owner.
775 *
776 * If a driver wants to allow to offline unmovable PageOffline() pages without
777 * putting them back to the buddy, it can do so via the memory notifier by
778 * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the
779 * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline()
780 * pages (now with a reference count of zero) are treated like free pages,
781 * allowing the containing memory block to get offlined. A driver that
782 * relies on this feature is aware that re-onlining the memory block will
783 * require to re-set the pages PageOffline() and not giving them to the
784 * buddy via online_page_callback_t.
785 */
786 PAGE_TYPE_OPS(Offline, offline)
787
788 /*
789 * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on
790 * pages allocated with __GFP_ACCOUNT. It gets cleared on page free.
791 */
792 PAGE_TYPE_OPS(Kmemcg, kmemcg)
793
794 /*
795 * Marks pages in use as page tables.
796 */
797 PAGE_TYPE_OPS(Table, table)
798
799 /*
800 * Marks guardpages used with debug_pagealloc.
801 */
802 PAGE_TYPE_OPS(Guard, guard)
803
804 extern bool is_free_buddy_page(struct page *page);
805
806 __PAGEFLAG(Isolated, isolated, PF_ANY);
807
808 /*
809 * If network-based swap is enabled, sl*b must keep track of whether pages
810 * were allocated from pfmemalloc reserves.
811 */
PageSlabPfmemalloc(struct page * page)812 static inline int PageSlabPfmemalloc(struct page *page)
813 {
814 VM_BUG_ON_PAGE(!PageSlab(page), page);
815 return PageActive(page);
816 }
817
SetPageSlabPfmemalloc(struct page * page)818 static inline void SetPageSlabPfmemalloc(struct page *page)
819 {
820 VM_BUG_ON_PAGE(!PageSlab(page), page);
821 SetPageActive(page);
822 }
823
__ClearPageSlabPfmemalloc(struct page * page)824 static inline void __ClearPageSlabPfmemalloc(struct page *page)
825 {
826 VM_BUG_ON_PAGE(!PageSlab(page), page);
827 __ClearPageActive(page);
828 }
829
ClearPageSlabPfmemalloc(struct page * page)830 static inline void ClearPageSlabPfmemalloc(struct page *page)
831 {
832 VM_BUG_ON_PAGE(!PageSlab(page), page);
833 ClearPageActive(page);
834 }
835
836 #ifdef CONFIG_MMU
837 #define __PG_MLOCKED (1UL << PG_mlocked)
838 #else
839 #define __PG_MLOCKED 0
840 #endif
841
842 /*
843 * Flags checked when a page is freed. Pages being freed should not have
844 * these flags set. It they are, there is a problem.
845 */
846 #define PAGE_FLAGS_CHECK_AT_FREE \
847 (1UL << PG_lru | 1UL << PG_locked | \
848 1UL << PG_private | 1UL << PG_private_2 | \
849 1UL << PG_writeback | 1UL << PG_reserved | \
850 1UL << PG_slab | 1UL << PG_active | \
851 1UL << PG_unevictable | __PG_MLOCKED)
852
853 /*
854 * Flags checked when a page is prepped for return by the page allocator.
855 * Pages being prepped should not have these flags set. It they are set,
856 * there has been a kernel bug or struct page corruption.
857 *
858 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
859 * alloc-free cycle to prevent from reusing the page.
860 */
861 #define PAGE_FLAGS_CHECK_AT_PREP \
862 (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
863
864 #define PAGE_FLAGS_PRIVATE \
865 (1UL << PG_private | 1UL << PG_private_2)
866 /**
867 * page_has_private - Determine if page has private stuff
868 * @page: The page to be checked
869 *
870 * Determine if a page has private stuff, indicating that release routines
871 * should be invoked upon it.
872 */
page_has_private(struct page * page)873 static inline int page_has_private(struct page *page)
874 {
875 return !!(page->flags & PAGE_FLAGS_PRIVATE);
876 }
877
878 #undef PF_ANY
879 #undef PF_HEAD
880 #undef PF_ONLY_HEAD
881 #undef PF_NO_TAIL
882 #undef PF_NO_COMPOUND
883 #undef PF_SECOND
884 #endif /* !__GENERATING_BOUNDS_H */
885
886 #endif /* PAGE_FLAGS_H */
887