• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/mm.h>
3 #include <linux/rmap.h>
4 #include <linux/hugetlb.h>
5 #include <linux/swap.h>
6 #include <linux/swapops.h>
7 
8 #include "internal.h"
9 
not_found(struct page_vma_mapped_walk * pvmw)10 static inline bool not_found(struct page_vma_mapped_walk *pvmw)
11 {
12 	page_vma_mapped_walk_done(pvmw);
13 	return false;
14 }
15 
map_pte(struct page_vma_mapped_walk * pvmw)16 static bool map_pte(struct page_vma_mapped_walk *pvmw)
17 {
18 	pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
19 	if (!(pvmw->flags & PVMW_SYNC)) {
20 		if (pvmw->flags & PVMW_MIGRATION) {
21 			if (!is_swap_pte(*pvmw->pte))
22 				return false;
23 		} else {
24 			/*
25 			 * We get here when we are trying to unmap a private
26 			 * device page from the process address space. Such
27 			 * page is not CPU accessible and thus is mapped as
28 			 * a special swap entry, nonetheless it still does
29 			 * count as a valid regular mapping for the page (and
30 			 * is accounted as such in page maps count).
31 			 *
32 			 * So handle this special case as if it was a normal
33 			 * page mapping ie lock CPU page table and returns
34 			 * true.
35 			 *
36 			 * For more details on device private memory see HMM
37 			 * (include/linux/hmm.h or mm/hmm.c).
38 			 */
39 			if (is_swap_pte(*pvmw->pte)) {
40 				swp_entry_t entry;
41 
42 				/* Handle un-addressable ZONE_DEVICE memory */
43 				entry = pte_to_swp_entry(*pvmw->pte);
44 				if (!is_device_private_entry(entry))
45 					return false;
46 			} else if (!pte_present(*pvmw->pte))
47 				return false;
48 		}
49 	}
50 	pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
51 	spin_lock(pvmw->ptl);
52 	return true;
53 }
54 
55 /**
56  * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
57  *
58  * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
59  * mapped. check_pte() has to validate this.
60  *
61  * @pvmw->pte may point to empty PTE, swap PTE or PTE pointing to arbitrary
62  * page.
63  *
64  * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
65  * entry that points to @pvmw->page or any subpage in case of THP.
66  *
67  * If PVMW_MIGRATION flag is not set, returns true if @pvmw->pte points to
68  * @pvmw->page or any subpage in case of THP.
69  *
70  * Otherwise, return false.
71  *
72  */
check_pte(struct page_vma_mapped_walk * pvmw)73 static bool check_pte(struct page_vma_mapped_walk *pvmw)
74 {
75 	unsigned long pfn;
76 
77 	if (pvmw->flags & PVMW_MIGRATION) {
78 		swp_entry_t entry;
79 		if (!is_swap_pte(*pvmw->pte))
80 			return false;
81 		entry = pte_to_swp_entry(*pvmw->pte);
82 
83 		if (!is_migration_entry(entry))
84 			return false;
85 
86 		pfn = migration_entry_to_pfn(entry);
87 	} else if (is_swap_pte(*pvmw->pte)) {
88 		swp_entry_t entry;
89 
90 		/* Handle un-addressable ZONE_DEVICE memory */
91 		entry = pte_to_swp_entry(*pvmw->pte);
92 		if (!is_device_private_entry(entry))
93 			return false;
94 
95 		pfn = device_private_entry_to_pfn(entry);
96 	} else {
97 		if (!pte_present(*pvmw->pte))
98 			return false;
99 
100 		pfn = pte_pfn(*pvmw->pte);
101 	}
102 
103 	if (pfn < page_to_pfn(pvmw->page))
104 		return false;
105 
106 	/* THP can be referenced by any subpage */
107 	if (pfn - page_to_pfn(pvmw->page) >= hpage_nr_pages(pvmw->page))
108 		return false;
109 
110 	return true;
111 }
112 
113 /**
114  * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
115  * @pvmw->address
116  * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
117  * must be set. pmd, pte and ptl must be NULL.
118  *
119  * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
120  * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
121  * adjusted if needed (for PTE-mapped THPs).
122  *
123  * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
124  * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
125  * a loop to find all PTEs that map the THP.
126  *
127  * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
128  * regardless of which page table level the page is mapped at. @pvmw->pmd is
129  * NULL.
130  *
131  * Retruns false if there are no more page table entries for the page in
132  * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
133  *
134  * If you need to stop the walk before page_vma_mapped_walk() returned false,
135  * use page_vma_mapped_walk_done(). It will do the housekeeping.
136  */
page_vma_mapped_walk(struct page_vma_mapped_walk * pvmw)137 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
138 {
139 	struct mm_struct *mm = pvmw->vma->vm_mm;
140 	struct page *page = pvmw->page;
141 	pgd_t *pgd;
142 	p4d_t *p4d;
143 	pud_t *pud;
144 	pmd_t pmde;
145 
146 	/* The only possible pmd mapping has been handled on last iteration */
147 	if (pvmw->pmd && !pvmw->pte)
148 		return not_found(pvmw);
149 
150 	if (pvmw->pte)
151 		goto next_pte;
152 
153 	if (unlikely(PageHuge(pvmw->page))) {
154 		/* when pud is not present, pte will be NULL */
155 		pvmw->pte = huge_pte_offset(mm, pvmw->address,
156 					    PAGE_SIZE << compound_order(page));
157 		if (!pvmw->pte)
158 			return false;
159 
160 		pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
161 		spin_lock(pvmw->ptl);
162 		if (!check_pte(pvmw))
163 			return not_found(pvmw);
164 		return true;
165 	}
166 restart:
167 	pgd = pgd_offset(mm, pvmw->address);
168 	if (!pgd_present(*pgd))
169 		return false;
170 	p4d = p4d_offset(pgd, pvmw->address);
171 	if (!p4d_present(*p4d))
172 		return false;
173 	pud = pud_offset(p4d, pvmw->address);
174 	if (!pud_present(*pud))
175 		return false;
176 	pvmw->pmd = pmd_offset(pud, pvmw->address);
177 	/*
178 	 * Make sure the pmd value isn't cached in a register by the
179 	 * compiler and used as a stale value after we've observed a
180 	 * subsequent update.
181 	 */
182 	pmde = READ_ONCE(*pvmw->pmd);
183 	if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
184 		pvmw->ptl = pmd_lock(mm, pvmw->pmd);
185 		if (likely(pmd_trans_huge(*pvmw->pmd))) {
186 			if (pvmw->flags & PVMW_MIGRATION)
187 				return not_found(pvmw);
188 			if (pmd_page(*pvmw->pmd) != page)
189 				return not_found(pvmw);
190 			return true;
191 		} else if (!pmd_present(*pvmw->pmd)) {
192 			if (thp_migration_supported()) {
193 				if (!(pvmw->flags & PVMW_MIGRATION))
194 					return not_found(pvmw);
195 				if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) {
196 					swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd);
197 
198 					if (migration_entry_to_page(entry) != page)
199 						return not_found(pvmw);
200 					return true;
201 				}
202 			}
203 			return not_found(pvmw);
204 		} else {
205 			/* THP pmd was split under us: handle on pte level */
206 			spin_unlock(pvmw->ptl);
207 			pvmw->ptl = NULL;
208 		}
209 	} else if (!pmd_present(pmde)) {
210 		return false;
211 	}
212 	if (!map_pte(pvmw))
213 		goto next_pte;
214 	while (1) {
215 		if (check_pte(pvmw))
216 			return true;
217 next_pte:
218 		/* Seek to next pte only makes sense for THP */
219 		if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
220 			return not_found(pvmw);
221 		do {
222 			pvmw->address += PAGE_SIZE;
223 			if (pvmw->address >= pvmw->vma->vm_end ||
224 			    pvmw->address >=
225 					__vma_address(pvmw->page, pvmw->vma) +
226 					hpage_nr_pages(pvmw->page) * PAGE_SIZE)
227 				return not_found(pvmw);
228 			/* Did we cross page table boundary? */
229 			if (pvmw->address % PMD_SIZE == 0) {
230 				pte_unmap(pvmw->pte);
231 				if (pvmw->ptl) {
232 					spin_unlock(pvmw->ptl);
233 					pvmw->ptl = NULL;
234 				}
235 				goto restart;
236 			} else {
237 				pvmw->pte++;
238 			}
239 		} while (pte_none(*pvmw->pte));
240 
241 		if (!pvmw->ptl) {
242 			pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
243 			spin_lock(pvmw->ptl);
244 		}
245 	}
246 }
247 
248 /**
249  * page_mapped_in_vma - check whether a page is really mapped in a VMA
250  * @page: the page to test
251  * @vma: the VMA to test
252  *
253  * Returns 1 if the page is mapped into the page tables of the VMA, 0
254  * if the page is not mapped into the page tables of this VMA.  Only
255  * valid for normal file or anonymous VMAs.
256  */
page_mapped_in_vma(struct page * page,struct vm_area_struct * vma)257 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
258 {
259 	struct page_vma_mapped_walk pvmw = {
260 		.page = page,
261 		.vma = vma,
262 		.flags = PVMW_SYNC,
263 	};
264 	unsigned long start, end;
265 
266 	start = __vma_address(page, vma);
267 	end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
268 
269 	if (unlikely(end < vma->vm_start || start >= vma->vm_end))
270 		return 0;
271 	pvmw.address = max(start, vma->vm_start);
272 	if (!page_vma_mapped_walk(&pvmw))
273 		return 0;
274 	page_vma_mapped_walk_done(&pvmw);
275 	return 1;
276 }
277