• Home
  • Raw
  • Download

Lines Matching +full:compound +full:- +full:device

1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Macros for manipulating and testing page->flags
18 * Various page->flags bits:
23 * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS,
25 * - Pages reserved or allocated early during boot (before the page allocator
30 * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying
32 * - The zero page(s)
33 * - Pages not added to the page allocator when onlining a section because
36 * - Pages allocated in the context of kexec/kdump (loaded kernel image,
38 * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are
41 * - Pages part of an offline section (struct pages of offline sections should
43 * - MCA pages on ia64
44 * - Pages holding CPU notes for POWER Firmware Assisted Dump
45 * - Device memory (e.g. PMEM, DAX, HMM)
50 * the zero page, the vDSO, MMIO pages or device memory.
53 * specific data (which is normally at page->private). It can be used by
75 * file-backed pagecache (see mm/vmscan.c).
90 * locked- and dirty-page accounting.
97 * N-1 ^ 0
118 PG_private, /* If pagecache, has fs-private data */
122 PG_mappedtodisk, /* Has blocks allocated on-disk */
161 /* Two page bits are conscripted by FS-Cache to maintain local caching
168 /* Pinned in Xen as a read-only pagetable page. */
174 /* Remapped by swiotlb-xen. */
180 /* Compound pages. Stored in first tail page's flags */
183 /* non-lru isolated movable page */
196 unsigned long head = READ_ONCE(page->compound_head); in compound_head()
199 return (struct page *) (head - 1); in compound_head()
205 return READ_ONCE(page->compound_head) & 1; in PageTail()
210 return test_bit(PG_head, &page->flags) || PageTail(page); in PageCompound()
213 #define PAGE_POISON_PATTERN -1l
216 return page->flags == PAGE_POISON_PATTERN; in PagePoisoned()
228 * Page flags policies wrt compound pages
237 * for compound page all operations related to the page flag applied to
241 * for compound page, callers only ever operate on the head page.
248 * the page flag is not relevant for compound pages.
276 { return test_bit(PG_##lname, &policy(page, 0)->flags); }
280 { set_bit(PG_##lname, &policy(page, 1)->flags); }
284 { clear_bit(PG_##lname, &policy(page, 1)->flags); }
288 { __set_bit(PG_##lname, &policy(page, 1)->flags); }
292 { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
296 { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
300 { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
383 * - PG_private and PG_private_2 cause releasepage() and co to be invoked in PAGEFLAG()
392 * Only test-and-set exist for PG_writeback. The unconditional operators are in PAGEFLAG()
421 return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags); in PAGEFLAG()
473 * allocator. We can use the non-atomic version of the test and set in TESTPAGEFLAG()
486 * page->mapping points to its anon_vma, not to a struct address_space; in TESTPAGEFLAG()
491 * bit; and then page->mapping points, not to an anon_vma, but to a private in TESTPAGEFLAG()
494 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable in TESTPAGEFLAG()
495 * page and then page->mapping points a struct address_space. in TESTPAGEFLAG()
508 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0; in TESTPAGEFLAG()
514 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; in PageAnon()
519 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == in __PageMovable()
525 * A KSM page is one of those write-protected "shared pages" or "merged pages"
533 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == in PageKsm()
546 ret = test_bit(PG_uptodate, &(page)->flags); in PageUptodate()
549 * _after_ we've loaded page->flags to check for PageUptodate. in PageUptodate()
565 __set_bit(PG_uptodate, &page->flags); in __SetPageUptodate()
577 set_bit(PG_uptodate, &page->flags); in SetPageUptodate()
604 WRITE_ONCE(page->compound_head, (unsigned long)head + 1); in __PAGEFLAG()
609 WRITE_ONCE(page->compound_head, 0); in clear_compound_head()
664 * guarantees the primary MMU has the entire compound page mapped
666 * can also map the entire compound page. This allows the secondary
667 * MMUs to call get_user_pages() only once for each compound page and
668 * to immediately map the entire compound page with a single secondary
675 * MMU notifier, otherwise it may result in page->_mapcount check false
691 return atomic_read(&page->_mapcount) < 0; in PageTransCompoundMap()
695 return atomic_read(&page->_mapcount) == in PageTransCompoundMap()
710 * PageDoubleMap indicates that the compound page is mapped with PTEs as well
717 * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up
735 * page_type may be used. Because it is initialised to -1, we invert the in PAGEFLAG()
744 #define PAGE_MAPCOUNT_RESERVE -128 in PAGEFLAG()
752 ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE) in PAGEFLAG()
756 return (int)page->page_type < PAGE_MAPCOUNT_RESERVE; in PAGEFLAG()
767 page->page_type &= ~PG_##lname; \
772 page->page_type |= PG_##lname; \
794 * relies on this feature is aware that re-onlining the memory block will
795 * require to re-set the pages PageOffline() and not giving them to the
821 * If network-based swap is enabled, sl*b must keep track of whether pages
878 * alloc-free cycle to prevent from reusing the page.
881 (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
886 * page_has_private - Determine if page has private stuff
894 return !!(page->flags & PAGE_FLAGS_PRIVATE); in page_has_private()