• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *   linux/mm/fremap.c
3  *
4  * Explicit pagetable population and nonlinear (random) mappings support.
5  *
6  * started by Ingo Molnar, Copyright (C) 2002, 2003
7  */
8 #include <linux/export.h>
9 #include <linux/backing-dev.h>
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/file.h>
13 #include <linux/mman.h>
14 #include <linux/pagemap.h>
15 #include <linux/swapops.h>
16 #include <linux/rmap.h>
17 #include <linux/syscalls.h>
18 #include <linux/mmu_notifier.h>
19 
20 #include <asm/mmu_context.h>
21 #include <asm/cacheflush.h>
22 #include <asm/tlbflush.h>
23 
24 #include "internal.h"
25 
mm_counter(struct page * page)26 static int mm_counter(struct page *page)
27 {
28 	return PageAnon(page) ? MM_ANONPAGES : MM_FILEPAGES;
29 }
30 
zap_pte(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)31 static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
32 			unsigned long addr, pte_t *ptep)
33 {
34 	pte_t pte = *ptep;
35 	struct page *page;
36 	swp_entry_t entry;
37 
38 	if (pte_present(pte)) {
39 		flush_cache_page(vma, addr, pte_pfn(pte));
40 		pte = ptep_clear_flush(vma, addr, ptep);
41 		page = vm_normal_page(vma, addr, pte);
42 		if (page) {
43 			if (pte_dirty(pte))
44 				set_page_dirty(page);
45 			update_hiwater_rss(mm);
46 			dec_mm_counter(mm, mm_counter(page));
47 			page_remove_rmap(page);
48 			page_cache_release(page);
49 		}
50 	} else {	/* zap_pte() is not called when pte_none() */
51 		if (!pte_file(pte)) {
52 			update_hiwater_rss(mm);
53 			entry = pte_to_swp_entry(pte);
54 			if (non_swap_entry(entry)) {
55 				if (is_migration_entry(entry)) {
56 					page = migration_entry_to_page(entry);
57 					dec_mm_counter(mm, mm_counter(page));
58 				}
59 			} else {
60 				free_swap_and_cache(entry);
61 				dec_mm_counter(mm, MM_SWAPENTS);
62 			}
63 		}
64 		pte_clear_not_present_full(mm, addr, ptep, 0);
65 	}
66 }
67 
68 /*
69  * Install a file pte to a given virtual memory address, release any
70  * previously existing mapping.
71  */
install_file_pte(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,unsigned long pgoff,pgprot_t prot)72 static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
73 		unsigned long addr, unsigned long pgoff, pgprot_t prot)
74 {
75 	int err = -ENOMEM;
76 	pte_t *pte, ptfile;
77 	spinlock_t *ptl;
78 
79 	pte = get_locked_pte(mm, addr, &ptl);
80 	if (!pte)
81 		goto out;
82 
83 	ptfile = pgoff_to_pte(pgoff);
84 
85 	if (!pte_none(*pte))
86 		zap_pte(mm, vma, addr, pte);
87 
88 	set_pte_at(mm, addr, pte, pte_file_mksoft_dirty(ptfile));
89 	/*
90 	 * We don't need to run update_mmu_cache() here because the "file pte"
91 	 * being installed by install_file_pte() is not a real pte - it's a
92 	 * non-present entry (like a swap entry), noting what file offset should
93 	 * be mapped there when there's a fault (in a non-linear vma where
94 	 * that's not obvious).
95 	 */
96 	pte_unmap_unlock(pte, ptl);
97 	err = 0;
98 out:
99 	return err;
100 }
101 
generic_file_remap_pages(struct vm_area_struct * vma,unsigned long addr,unsigned long size,pgoff_t pgoff)102 int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
103 			     unsigned long size, pgoff_t pgoff)
104 {
105 	struct mm_struct *mm = vma->vm_mm;
106 	int err;
107 
108 	do {
109 		err = install_file_pte(mm, vma, addr, pgoff, vma->vm_page_prot);
110 		if (err)
111 			return err;
112 
113 		size -= PAGE_SIZE;
114 		addr += PAGE_SIZE;
115 		pgoff++;
116 	} while (size);
117 
118 	return 0;
119 }
120 EXPORT_SYMBOL(generic_file_remap_pages);
121 
122 /**
123  * sys_remap_file_pages - remap arbitrary pages of an existing VM_SHARED vma
124  * @start: start of the remapped virtual memory range
125  * @size: size of the remapped virtual memory range
126  * @prot: new protection bits of the range (see NOTE)
127  * @pgoff: to-be-mapped page of the backing store file
128  * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
129  *
130  * sys_remap_file_pages remaps arbitrary pages of an existing VM_SHARED vma
131  * (shared backing store file).
132  *
133  * This syscall works purely via pagetables, so it's the most efficient
134  * way to map the same (large) file into a given virtual window. Unlike
135  * mmap()/mremap() it does not create any new vmas. The new mappings are
136  * also safe across swapout.
137  *
138  * NOTE: the @prot parameter right now is ignored (but must be zero),
139  * and the vma's default protection is used. Arbitrary protections
140  * might be implemented in the future.
141  */
SYSCALL_DEFINE5(remap_file_pages,unsigned long,start,unsigned long,size,unsigned long,prot,unsigned long,pgoff,unsigned long,flags)142 SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
143 		unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
144 {
145 	struct mm_struct *mm = current->mm;
146 	struct address_space *mapping;
147 	struct vm_area_struct *vma;
148 	int err = -EINVAL;
149 	int has_write_lock = 0;
150 	vm_flags_t vm_flags = 0;
151 
152 	pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. "
153 			"See Documentation/vm/remap_file_pages.txt.\n",
154 			current->comm, current->pid);
155 
156 	if (prot)
157 		return err;
158 	/*
159 	 * Sanitize the syscall parameters:
160 	 */
161 	start = start & PAGE_MASK;
162 	size = size & PAGE_MASK;
163 
164 	/* Does the address range wrap, or is the span zero-sized? */
165 	if (start + size <= start)
166 		return err;
167 
168 	/* Does pgoff wrap? */
169 	if (pgoff + (size >> PAGE_SHIFT) < pgoff)
170 		return err;
171 
172 	/* Can we represent this offset inside this architecture's pte's? */
173 #if PTE_FILE_MAX_BITS < BITS_PER_LONG
174 	if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
175 		return err;
176 #endif
177 
178 	/* We need down_write() to change vma->vm_flags. */
179 	down_read(&mm->mmap_sem);
180  retry:
181 	vma = find_vma(mm, start);
182 
183 	/*
184 	 * Make sure the vma is shared, that it supports prefaulting,
185 	 * and that the remapped range is valid and fully within
186 	 * the single existing vma.
187 	 */
188 	if (!vma || !(vma->vm_flags & VM_SHARED))
189 		goto out;
190 
191 	if (!vma->vm_ops || !vma->vm_ops->remap_pages)
192 		goto out;
193 
194 	if (start < vma->vm_start || start + size > vma->vm_end)
195 		goto out;
196 
197 	/* Must set VM_NONLINEAR before any pages are populated. */
198 	if (!(vma->vm_flags & VM_NONLINEAR)) {
199 		/*
200 		 * vm_private_data is used as a swapout cursor
201 		 * in a VM_NONLINEAR vma.
202 		 */
203 		if (vma->vm_private_data)
204 			goto out;
205 
206 		/* Don't need a nonlinear mapping, exit success */
207 		if (pgoff == linear_page_index(vma, start)) {
208 			err = 0;
209 			goto out;
210 		}
211 
212 		if (!has_write_lock) {
213 get_write_lock:
214 			up_read(&mm->mmap_sem);
215 			down_write(&mm->mmap_sem);
216 			has_write_lock = 1;
217 			goto retry;
218 		}
219 		mapping = vma->vm_file->f_mapping;
220 		/*
221 		 * page_mkclean doesn't work on nonlinear vmas, so if
222 		 * dirty pages need to be accounted, emulate with linear
223 		 * vmas.
224 		 */
225 		if (mapping_cap_account_dirty(mapping)) {
226 			unsigned long addr;
227 			struct file *file = get_file(vma->vm_file);
228 			/* mmap_region may free vma; grab the info now */
229 			vm_flags = vma->vm_flags;
230 
231 			addr = mmap_region(file, start, size, vm_flags, pgoff);
232 			fput(file);
233 			if (IS_ERR_VALUE(addr)) {
234 				err = addr;
235 			} else {
236 				BUG_ON(addr != start);
237 				err = 0;
238 			}
239 			goto out_freed;
240 		}
241 		mutex_lock(&mapping->i_mmap_mutex);
242 		flush_dcache_mmap_lock(mapping);
243 		vma->vm_flags |= VM_NONLINEAR;
244 		vma_interval_tree_remove(vma, &mapping->i_mmap);
245 		vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
246 		flush_dcache_mmap_unlock(mapping);
247 		mutex_unlock(&mapping->i_mmap_mutex);
248 	}
249 
250 	if (vma->vm_flags & VM_LOCKED) {
251 		/*
252 		 * drop PG_Mlocked flag for over-mapped range
253 		 */
254 		if (!has_write_lock)
255 			goto get_write_lock;
256 		vm_flags = vma->vm_flags;
257 		munlock_vma_pages_range(vma, start, start + size);
258 		vma->vm_flags = vm_flags;
259 	}
260 
261 	mmu_notifier_invalidate_range_start(mm, start, start + size);
262 	err = vma->vm_ops->remap_pages(vma, start, size, pgoff);
263 	mmu_notifier_invalidate_range_end(mm, start, start + size);
264 
265 	/*
266 	 * We can't clear VM_NONLINEAR because we'd have to do
267 	 * it after ->populate completes, and that would prevent
268 	 * downgrading the lock.  (Locks can't be upgraded).
269 	 */
270 
271 out:
272 	if (vma)
273 		vm_flags = vma->vm_flags;
274 out_freed:
275 	if (likely(!has_write_lock))
276 		up_read(&mm->mmap_sem);
277 	else
278 		up_write(&mm->mmap_sem);
279 	if (!err && ((vm_flags & VM_LOCKED) || !(flags & MAP_NONBLOCK)))
280 		mm_populate(start, size);
281 
282 	return err;
283 }
284