1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/mm/mincore.c
4 *
5 * Copyright (C) 1994-2006 Linus Torvalds
6 */
7
8 /*
9 * The mincore() system call.
10 */
11 #include <linux/pagemap.h>
12 #include <linux/gfp.h>
13 #include <linux/pagewalk.h>
14 #include <linux/mman.h>
15 #include <linux/syscalls.h>
16 #include <linux/swap.h>
17 #include <linux/swapops.h>
18 #include <linux/shmem_fs.h>
19 #include <linux/hugetlb.h>
20 #include <linux/pgtable.h>
21 #include <linux/page_size_compat.h>
22
23 #include <linux/uaccess.h>
24 #include "swap.h"
25
mincore_hugetlb(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)26 static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
27 unsigned long end, struct mm_walk *walk)
28 {
29 #ifdef CONFIG_HUGETLB_PAGE
30 unsigned char present;
31 unsigned char *vec = walk->private;
32
33 /*
34 * Hugepages under user process are always in RAM and never
35 * swapped out, but theoretically it needs to be checked.
36 */
37 present = pte && !huge_pte_none_mostly(huge_ptep_get(walk->mm, addr, pte));
38 for (; addr != end; vec++, addr += PAGE_SIZE)
39 *vec = present;
40 walk->private = vec;
41 #else
42 BUG();
43 #endif
44 return 0;
45 }
46
47 /*
48 * Later we can get more picky about what "in core" means precisely.
49 * For now, simply check to see if the page is in the page cache,
50 * and is up to date; i.e. that no page-in operation would be required
51 * at this time if an application were to map and access this page.
52 */
mincore_page(struct address_space * mapping,pgoff_t index)53 static unsigned char mincore_page(struct address_space *mapping, pgoff_t index)
54 {
55 unsigned char present = 0;
56 struct folio *folio;
57
58 /*
59 * When tmpfs swaps out a page from a file, any process mapping that
60 * file will not get a swp_entry_t in its pte, but rather it is like
61 * any other file mapping (ie. marked !present and faulted in with
62 * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
63 */
64 folio = filemap_get_incore_folio(mapping, index);
65 if (!IS_ERR(folio)) {
66 present = folio_test_uptodate(folio);
67 folio_put(folio);
68 }
69
70 return present;
71 }
72
__mincore_unmapped_range(unsigned long addr,unsigned long end,struct vm_area_struct * vma,unsigned char * vec)73 static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
74 struct vm_area_struct *vma, unsigned char *vec)
75 {
76 unsigned long nr = (end - addr) >> PAGE_SHIFT;
77 int i;
78
79 if (vma->vm_file) {
80 pgoff_t pgoff;
81
82 pgoff = linear_page_index(vma, addr);
83 for (i = 0; i < nr; i++, pgoff++)
84 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
85 } else {
86 for (i = 0; i < nr; i++)
87 vec[i] = 0;
88 }
89 return nr;
90 }
91
mincore_unmapped_range(unsigned long addr,unsigned long end,__always_unused int depth,struct mm_walk * walk)92 static int mincore_unmapped_range(unsigned long addr, unsigned long end,
93 __always_unused int depth,
94 struct mm_walk *walk)
95 {
96 walk->private += __mincore_unmapped_range(addr, end,
97 walk->vma, walk->private);
98 return 0;
99 }
100
mincore_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)101 static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
102 struct mm_walk *walk)
103 {
104 spinlock_t *ptl;
105 struct vm_area_struct *vma = walk->vma;
106 pte_t *ptep;
107 unsigned char *vec = walk->private;
108 int nr = (end - addr) >> PAGE_SHIFT;
109
110 ptl = pmd_trans_huge_lock(pmd, vma);
111 if (ptl) {
112 memset(vec, 1, nr);
113 spin_unlock(ptl);
114 goto out;
115 }
116
117 ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
118 if (!ptep) {
119 walk->action = ACTION_AGAIN;
120 return 0;
121 }
122 for (; addr != end; ptep++, addr += PAGE_SIZE) {
123 pte_t pte = ptep_get(ptep);
124
125 /* We need to do cache lookup too for pte markers */
126 if (pte_none_mostly(pte))
127 __mincore_unmapped_range(addr, addr + PAGE_SIZE,
128 vma, vec);
129 else if (pte_present(pte))
130 *vec = 1;
131 else { /* pte is a swap entry */
132 swp_entry_t entry = pte_to_swp_entry(pte);
133
134 if (non_swap_entry(entry)) {
135 /*
136 * migration or hwpoison entries are always
137 * uptodate
138 */
139 *vec = 1;
140 } else {
141 #ifdef CONFIG_SWAP
142 *vec = mincore_page(swap_address_space(entry),
143 swap_cache_index(entry));
144 #else
145 WARN_ON(1);
146 *vec = 1;
147 #endif
148 }
149 }
150 vec++;
151 }
152 pte_unmap_unlock(ptep - 1, ptl);
153 out:
154 walk->private += nr;
155 cond_resched();
156 return 0;
157 }
158
can_do_mincore(struct vm_area_struct * vma)159 static inline bool can_do_mincore(struct vm_area_struct *vma)
160 {
161 if (vma_is_anonymous(vma))
162 return true;
163 if (!vma->vm_file)
164 return false;
165 /*
166 * Reveal pagecache information only for non-anonymous mappings that
167 * correspond to the files the calling process could (if tried) open
168 * for writing; otherwise we'd be including shared non-exclusive
169 * mappings, which opens a side channel.
170 */
171 return inode_owner_or_capable(&nop_mnt_idmap,
172 file_inode(vma->vm_file)) ||
173 file_permission(vma->vm_file, MAY_WRITE) == 0;
174 }
175
176 static const struct mm_walk_ops mincore_walk_ops = {
177 .pmd_entry = mincore_pte_range,
178 .pte_hole = mincore_unmapped_range,
179 .hugetlb_entry = mincore_hugetlb,
180 .walk_lock = PGWALK_RDLOCK,
181 };
182
183 /*
184 * Do a chunk of "sys_mincore()". We've already checked
185 * all the arguments, we hold the mmap semaphore: we should
186 * just return the amount of info we're asked for.
187 */
do_mincore(unsigned long addr,unsigned long pages,unsigned char * vec)188 static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec)
189 {
190 struct vm_area_struct *vma;
191 unsigned long end;
192 int err;
193
194 vma = vma_lookup(current->mm, addr);
195 if (!vma)
196 return -ENOMEM;
197 end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
198 if (!can_do_mincore(vma)) {
199 unsigned long pages = DIV_ROUND_UP(end - addr, PAGE_SIZE);
200 memset(vec, 1, pages);
201 return pages;
202 }
203 err = walk_page_range(vma->vm_mm, addr, end, &mincore_walk_ops, vec);
204 if (err < 0)
205 return err;
206 return (end - addr) >> PAGE_SHIFT;
207 }
208
__collapse_mincore_result(unsigned char * src_vec,unsigned char * res_vec,unsigned long pages,unsigned long nr_subpages)209 static inline void __collapse_mincore_result(unsigned char *src_vec,
210 unsigned char *res_vec,
211 unsigned long pages,
212 unsigned long nr_subpages)
213 {
214 unsigned long i;
215
216 if (nr_subpages == 1)
217 return;
218
219 for (i = 0; i < pages; i++)
220 res_vec[i / nr_subpages] |= src_vec[i];
221 }
222
223 /*
224 * The mincore(2) system call.
225 *
226 * mincore() returns the memory residency status of the pages in the
227 * current process's address space specified by [addr, addr + len).
228 * The status is returned in a vector of bytes. The least significant
229 * bit of each byte is 1 if the referenced page is in memory, otherwise
230 * it is zero.
231 *
232 * Because the status of a page can change after mincore() checks it
233 * but before it returns to the application, the returned vector may
234 * contain stale information. Only locked pages are guaranteed to
235 * remain in memory.
236 *
237 * return values:
238 * zero - success
239 * -EFAULT - vec points to an illegal address
240 * -EINVAL - addr is not a multiple of PAGE_SIZE
241 * -ENOMEM - Addresses in the range [addr, addr + len] are
242 * invalid for the address space of this process, or
243 * specify one or more pages which are not currently
244 * mapped
245 * -EAGAIN - A kernel resource was temporarily unavailable.
246 */
SYSCALL_DEFINE3(mincore,unsigned long,start,size_t,len,unsigned char __user *,vec)247 SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
248 unsigned char __user *, vec)
249 {
250 long retval;
251 unsigned long pages;
252 unsigned char *tmp;
253 unsigned char *res;
254 unsigned long nr_subpages = __PAGE_SIZE / PAGE_SIZE;
255
256 start = untagged_addr(start);
257
258 /* Check the start address: needs to be page-aligned.. */
259 if (start & ~__PAGE_MASK)
260 return -EINVAL;
261
262 /* ..and we need to be passed a valid user-space range */
263 if (!access_ok((void __user *) start, len))
264 return -ENOMEM;
265
266 /* This also avoids any overflows on PAGE_ALIGN */
267 pages = len >> PAGE_SHIFT;
268 pages += (offset_in_page(len)) != 0;
269
270 if (!access_ok(vec, pages / nr_subpages))
271 return -EFAULT;
272
273 tmp = (void *) __get_free_page(GFP_USER);
274 if (!tmp)
275 return -EAGAIN;
276
277 if (unlikely(nr_subpages > 1)) {
278 res = (void *) __get_free_page(GFP_USER|__GFP_ZERO);
279 if (!res) {
280 free_page((unsigned long) tmp);
281 return -EAGAIN;
282 }
283 } else
284 res = tmp;
285
286 retval = 0;
287 while (pages) {
288 /*
289 * Do at most PAGE_SIZE entries per iteration, due to
290 * the temporary buffer size.
291 */
292 mmap_read_lock(current->mm);
293 retval = do_mincore(start, min(pages, PAGE_SIZE), tmp);
294 mmap_read_unlock(current->mm);
295
296 if (retval <= 0)
297 break;
298
299 __collapse_mincore_result(tmp, res, retval, nr_subpages);
300
301 if (copy_to_user(vec, res, retval / nr_subpages)) {
302 retval = -EFAULT;
303 break;
304 }
305
306 /*
307 * If emulating the page size, clear the old results, to avoid
308 * corrupting the next __collapse_mincore_result()
309 */
310 if (nr_subpages > 1)
311 memset(res, 0, retval / nr_subpages);
312
313 pages -= retval;
314 vec += retval / nr_subpages;
315 start += retval << PAGE_SHIFT;
316 retval = 0;
317 }
318 if (unlikely(nr_subpages > 1))
319 free_page((unsigned long) res);
320 free_page((unsigned long) tmp);
321 return retval;
322 }
323