• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/mm.h>
3 #include <linux/rmap.h>
4 #include <linux/hugetlb.h>
5 #include <linux/swap.h>
6 #include <linux/swapops.h>
7 
8 #include "internal.h"
9 
not_found(struct page_vma_mapped_walk * pvmw)10 static inline bool not_found(struct page_vma_mapped_walk *pvmw)
11 {
12 	page_vma_mapped_walk_done(pvmw);
13 	return false;
14 }
15 
map_pte(struct page_vma_mapped_walk * pvmw)16 static bool map_pte(struct page_vma_mapped_walk *pvmw)
17 {
18 	pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
19 	if (!(pvmw->flags & PVMW_SYNC)) {
20 		if (pvmw->flags & PVMW_MIGRATION) {
21 			if (!is_swap_pte(*pvmw->pte))
22 				return false;
23 		} else {
24 			/*
25 			 * We get here when we are trying to unmap a private
26 			 * device page from the process address space. Such
27 			 * page is not CPU accessible and thus is mapped as
28 			 * a special swap entry, nonetheless it still does
29 			 * count as a valid regular mapping for the page (and
30 			 * is accounted as such in page maps count).
31 			 *
32 			 * So handle this special case as if it was a normal
33 			 * page mapping ie lock CPU page table and returns
34 			 * true.
35 			 *
36 			 * For more details on device private memory see HMM
37 			 * (include/linux/hmm.h or mm/hmm.c).
38 			 */
39 			if (is_swap_pte(*pvmw->pte)) {
40 				swp_entry_t entry;
41 
42 				/* Handle un-addressable ZONE_DEVICE memory */
43 				entry = pte_to_swp_entry(*pvmw->pte);
44 				if (!is_device_private_entry(entry))
45 					return false;
46 			} else if (!pte_present(*pvmw->pte))
47 				return false;
48 		}
49 	}
50 	pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
51 	spin_lock(pvmw->ptl);
52 	return true;
53 }
54 
pfn_is_match(struct page * page,unsigned long pfn)55 static inline bool pfn_is_match(struct page *page, unsigned long pfn)
56 {
57 	unsigned long page_pfn = page_to_pfn(page);
58 
59 	/* normal page and hugetlbfs page */
60 	if (!PageTransCompound(page) || PageHuge(page))
61 		return page_pfn == pfn;
62 
63 	/* THP can be referenced by any subpage */
64 	return pfn >= page_pfn && pfn - page_pfn < thp_nr_pages(page);
65 }
66 
67 /**
68  * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
69  *
70  * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
71  * mapped. check_pte() has to validate this.
72  *
73  * @pvmw->pte may point to empty PTE, swap PTE or PTE pointing to arbitrary
74  * page.
75  *
76  * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
77  * entry that points to @pvmw->page or any subpage in case of THP.
78  *
79  * If PVMW_MIGRATION flag is not set, returns true if @pvmw->pte points to
80  * @pvmw->page or any subpage in case of THP.
81  *
82  * Otherwise, return false.
83  *
84  */
check_pte(struct page_vma_mapped_walk * pvmw)85 static bool check_pte(struct page_vma_mapped_walk *pvmw)
86 {
87 	unsigned long pfn;
88 
89 	if (pvmw->flags & PVMW_MIGRATION) {
90 		swp_entry_t entry;
91 		if (!is_swap_pte(*pvmw->pte))
92 			return false;
93 		entry = pte_to_swp_entry(*pvmw->pte);
94 
95 		if (!is_migration_entry(entry))
96 			return false;
97 
98 		pfn = migration_entry_to_pfn(entry);
99 	} else if (is_swap_pte(*pvmw->pte)) {
100 		swp_entry_t entry;
101 
102 		/* Handle un-addressable ZONE_DEVICE memory */
103 		entry = pte_to_swp_entry(*pvmw->pte);
104 		if (!is_device_private_entry(entry))
105 			return false;
106 
107 		pfn = device_private_entry_to_pfn(entry);
108 	} else {
109 		if (!pte_present(*pvmw->pte))
110 			return false;
111 
112 		pfn = pte_pfn(*pvmw->pte);
113 	}
114 
115 	return pfn_is_match(pvmw->page, pfn);
116 }
117 
step_forward(struct page_vma_mapped_walk * pvmw,unsigned long size)118 static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
119 {
120 	pvmw->address = (pvmw->address + size) & ~(size - 1);
121 	if (!pvmw->address)
122 		pvmw->address = ULONG_MAX;
123 }
124 
125 /**
126  * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
127  * @pvmw->address
128  * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
129  * must be set. pmd, pte and ptl must be NULL.
130  *
131  * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
132  * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
133  * adjusted if needed (for PTE-mapped THPs).
134  *
135  * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
136  * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
137  * a loop to find all PTEs that map the THP.
138  *
139  * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
140  * regardless of which page table level the page is mapped at. @pvmw->pmd is
141  * NULL.
142  *
143  * Retruns false if there are no more page table entries for the page in
144  * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
145  *
146  * If you need to stop the walk before page_vma_mapped_walk() returned false,
147  * use page_vma_mapped_walk_done(). It will do the housekeeping.
148  */
page_vma_mapped_walk(struct page_vma_mapped_walk * pvmw)149 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
150 {
151 	struct mm_struct *mm = pvmw->vma->vm_mm;
152 	struct page *page = pvmw->page;
153 	unsigned long end;
154 	pgd_t *pgd;
155 	p4d_t *p4d;
156 	pud_t *pud;
157 	pmd_t pmde;
158 
159 	/* The only possible pmd mapping has been handled on last iteration */
160 	if (pvmw->pmd && !pvmw->pte)
161 		return not_found(pvmw);
162 
163 	if (unlikely(PageHuge(page))) {
164 		/* The only possible mapping was handled on last iteration */
165 		if (pvmw->pte)
166 			return not_found(pvmw);
167 
168 		/* when pud is not present, pte will be NULL */
169 		pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
170 		if (!pvmw->pte)
171 			return false;
172 
173 		pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
174 		spin_lock(pvmw->ptl);
175 		if (!check_pte(pvmw))
176 			return not_found(pvmw);
177 		return true;
178 	}
179 
180 	/*
181 	 * Seek to next pte only makes sense for THP.
182 	 * But more important than that optimization, is to filter out
183 	 * any PageKsm page: whose page->index misleads vma_address()
184 	 * and vma_address_end() to disaster.
185 	 */
186 	end = PageTransCompound(page) ?
187 		vma_address_end(page, pvmw->vma) :
188 		pvmw->address + PAGE_SIZE;
189 	if (pvmw->pte)
190 		goto next_pte;
191 restart:
192 	do {
193 		pgd = pgd_offset(mm, pvmw->address);
194 		if (!pgd_present(*pgd)) {
195 			step_forward(pvmw, PGDIR_SIZE);
196 			continue;
197 		}
198 		p4d = p4d_offset(pgd, pvmw->address);
199 		if (!p4d_present(*p4d)) {
200 			step_forward(pvmw, P4D_SIZE);
201 			continue;
202 		}
203 		pud = pud_offset(p4d, pvmw->address);
204 		if (!pud_present(*pud)) {
205 			step_forward(pvmw, PUD_SIZE);
206 			continue;
207 		}
208 
209 		pvmw->pmd = pmd_offset(pud, pvmw->address);
210 		/*
211 		 * Make sure the pmd value isn't cached in a register by the
212 		 * compiler and used as a stale value after we've observed a
213 		 * subsequent update.
214 		 */
215 		pmde = READ_ONCE(*pvmw->pmd);
216 
217 		if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
218 			pvmw->ptl = pmd_lock(mm, pvmw->pmd);
219 			pmde = *pvmw->pmd;
220 			if (likely(pmd_trans_huge(pmde))) {
221 				if (pvmw->flags & PVMW_MIGRATION)
222 					return not_found(pvmw);
223 				if (pmd_page(pmde) != page)
224 					return not_found(pvmw);
225 				return true;
226 			}
227 			if (!pmd_present(pmde)) {
228 				swp_entry_t entry;
229 
230 				if (!thp_migration_supported() ||
231 				    !(pvmw->flags & PVMW_MIGRATION))
232 					return not_found(pvmw);
233 				entry = pmd_to_swp_entry(pmde);
234 				if (!is_migration_entry(entry) ||
235 				    migration_entry_to_page(entry) != page)
236 					return not_found(pvmw);
237 				return true;
238 			}
239 			/* THP pmd was split under us: handle on pte level */
240 			spin_unlock(pvmw->ptl);
241 			pvmw->ptl = NULL;
242 		} else if (!pmd_present(pmde)) {
243 			/*
244 			 * If PVMW_SYNC, take and drop THP pmd lock so that we
245 			 * cannot return prematurely, while zap_huge_pmd() has
246 			 * cleared *pmd but not decremented compound_mapcount().
247 			 */
248 			if ((pvmw->flags & PVMW_SYNC) &&
249 			    PageTransCompound(page)) {
250 				spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
251 
252 				spin_unlock(ptl);
253 			}
254 			step_forward(pvmw, PMD_SIZE);
255 			continue;
256 		}
257 		if (!map_pte(pvmw))
258 			goto next_pte;
259 this_pte:
260 		if (check_pte(pvmw))
261 			return true;
262 next_pte:
263 		do {
264 			pvmw->address += PAGE_SIZE;
265 			if (pvmw->address >= end)
266 				return not_found(pvmw);
267 			/* Did we cross page table boundary? */
268 			if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
269 				if (pvmw->ptl) {
270 					spin_unlock(pvmw->ptl);
271 					pvmw->ptl = NULL;
272 				}
273 				pte_unmap(pvmw->pte);
274 				pvmw->pte = NULL;
275 				goto restart;
276 			}
277 			pvmw->pte++;
278 			if ((pvmw->flags & PVMW_SYNC) && !pvmw->ptl) {
279 				pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
280 				spin_lock(pvmw->ptl);
281 			}
282 		} while (pte_none(*pvmw->pte));
283 
284 		if (!pvmw->ptl) {
285 			pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
286 			spin_lock(pvmw->ptl);
287 		}
288 		goto this_pte;
289 	} while (pvmw->address < end);
290 
291 	return false;
292 }
293 
294 /**
295  * page_mapped_in_vma - check whether a page is really mapped in a VMA
296  * @page: the page to test
297  * @vma: the VMA to test
298  *
299  * Returns 1 if the page is mapped into the page tables of the VMA, 0
300  * if the page is not mapped into the page tables of this VMA.  Only
301  * valid for normal file or anonymous VMAs.
302  */
page_mapped_in_vma(struct page * page,struct vm_area_struct * vma)303 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
304 {
305 	struct page_vma_mapped_walk pvmw = {
306 		.page = page,
307 		.vma = vma,
308 		.flags = PVMW_SYNC,
309 	};
310 
311 	pvmw.address = vma_address(page, vma);
312 	if (pvmw.address == -EFAULT)
313 		return 0;
314 	if (!page_vma_mapped_walk(&pvmw))
315 		return 0;
316 	page_vma_mapped_walk_done(&pvmw);
317 	return 1;
318 }
319