• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *   linux/mm/fremap.c
3  *
4  * Explicit pagetable population and nonlinear (random) mappings support.
5  *
6  * started by Ingo Molnar, Copyright (C) 2002, 2003
7  */
8 #include <linux/export.h>
9 #include <linux/backing-dev.h>
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/file.h>
13 #include <linux/mman.h>
14 #include <linux/pagemap.h>
15 #include <linux/swapops.h>
16 #include <linux/rmap.h>
17 #include <linux/syscalls.h>
18 #include <linux/mmu_notifier.h>
19 
20 #include <asm/mmu_context.h>
21 #include <asm/cacheflush.h>
22 #include <asm/tlbflush.h>
23 
24 #include "internal.h"
25 
zap_pte(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)26 static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
27 			unsigned long addr, pte_t *ptep)
28 {
29 	pte_t pte = *ptep;
30 
31 	if (pte_present(pte)) {
32 		struct page *page;
33 
34 		flush_cache_page(vma, addr, pte_pfn(pte));
35 		pte = ptep_clear_flush(vma, addr, ptep);
36 		page = vm_normal_page(vma, addr, pte);
37 		if (page) {
38 			if (pte_dirty(pte))
39 				set_page_dirty(page);
40 			page_remove_rmap(page);
41 			page_cache_release(page);
42 			update_hiwater_rss(mm);
43 			dec_mm_counter(mm, MM_FILEPAGES);
44 		}
45 	} else {
46 		if (!pte_file(pte))
47 			free_swap_and_cache(pte_to_swp_entry(pte));
48 		pte_clear_not_present_full(mm, addr, ptep, 0);
49 	}
50 }
51 
52 /*
53  * Install a file pte to a given virtual memory address, release any
54  * previously existing mapping.
55  */
install_file_pte(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,unsigned long pgoff,pgprot_t prot)56 static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
57 		unsigned long addr, unsigned long pgoff, pgprot_t prot)
58 {
59 	int err = -ENOMEM;
60 	pte_t *pte;
61 	spinlock_t *ptl;
62 
63 	pte = get_locked_pte(mm, addr, &ptl);
64 	if (!pte)
65 		goto out;
66 
67 	if (!pte_none(*pte))
68 		zap_pte(mm, vma, addr, pte);
69 
70 	set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
71 	/*
72 	 * We don't need to run update_mmu_cache() here because the "file pte"
73 	 * being installed by install_file_pte() is not a real pte - it's a
74 	 * non-present entry (like a swap entry), noting what file offset should
75 	 * be mapped there when there's a fault (in a non-linear vma where
76 	 * that's not obvious).
77 	 */
78 	pte_unmap_unlock(pte, ptl);
79 	err = 0;
80 out:
81 	return err;
82 }
83 
generic_file_remap_pages(struct vm_area_struct * vma,unsigned long addr,unsigned long size,pgoff_t pgoff)84 int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
85 			     unsigned long size, pgoff_t pgoff)
86 {
87 	struct mm_struct *mm = vma->vm_mm;
88 	int err;
89 
90 	do {
91 		err = install_file_pte(mm, vma, addr, pgoff, vma->vm_page_prot);
92 		if (err)
93 			return err;
94 
95 		size -= PAGE_SIZE;
96 		addr += PAGE_SIZE;
97 		pgoff++;
98 	} while (size);
99 
100 	return 0;
101 }
102 EXPORT_SYMBOL(generic_file_remap_pages);
103 
104 /**
105  * sys_remap_file_pages - remap arbitrary pages of an existing VM_SHARED vma
106  * @start: start of the remapped virtual memory range
107  * @size: size of the remapped virtual memory range
108  * @prot: new protection bits of the range (see NOTE)
109  * @pgoff: to-be-mapped page of the backing store file
110  * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
111  *
112  * sys_remap_file_pages remaps arbitrary pages of an existing VM_SHARED vma
113  * (shared backing store file).
114  *
115  * This syscall works purely via pagetables, so it's the most efficient
116  * way to map the same (large) file into a given virtual window. Unlike
117  * mmap()/mremap() it does not create any new vmas. The new mappings are
118  * also safe across swapout.
119  *
120  * NOTE: the @prot parameter right now is ignored (but must be zero),
121  * and the vma's default protection is used. Arbitrary protections
122  * might be implemented in the future.
123  */
SYSCALL_DEFINE5(remap_file_pages,unsigned long,start,unsigned long,size,unsigned long,prot,unsigned long,pgoff,unsigned long,flags)124 SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
125 		unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
126 {
127 	struct mm_struct *mm = current->mm;
128 	struct address_space *mapping;
129 	struct vm_area_struct *vma;
130 	int err = -EINVAL;
131 	int has_write_lock = 0;
132 	vm_flags_t vm_flags = 0;
133 
134 	if (prot)
135 		return err;
136 	/*
137 	 * Sanitize the syscall parameters:
138 	 */
139 	start = start & PAGE_MASK;
140 	size = size & PAGE_MASK;
141 
142 	/* Does the address range wrap, or is the span zero-sized? */
143 	if (start + size <= start)
144 		return err;
145 
146 	/* Does pgoff wrap? */
147 	if (pgoff + (size >> PAGE_SHIFT) < pgoff)
148 		return err;
149 
150 	/* Can we represent this offset inside this architecture's pte's? */
151 #if PTE_FILE_MAX_BITS < BITS_PER_LONG
152 	if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
153 		return err;
154 #endif
155 
156 	/* We need down_write() to change vma->vm_flags. */
157 	down_read(&mm->mmap_sem);
158  retry:
159 	vma = find_vma(mm, start);
160 
161 	/*
162 	 * Make sure the vma is shared, that it supports prefaulting,
163 	 * and that the remapped range is valid and fully within
164 	 * the single existing vma.
165 	 */
166 	if (!vma || !(vma->vm_flags & VM_SHARED))
167 		goto out;
168 
169 	if (!vma->vm_ops || !vma->vm_ops->remap_pages)
170 		goto out;
171 
172 	if (start < vma->vm_start || start + size > vma->vm_end)
173 		goto out;
174 
175 	/* Must set VM_NONLINEAR before any pages are populated. */
176 	if (!(vma->vm_flags & VM_NONLINEAR)) {
177 		/*
178 		 * vm_private_data is used as a swapout cursor
179 		 * in a VM_NONLINEAR vma.
180 		 */
181 		if (vma->vm_private_data)
182 			goto out;
183 
184 		/* Don't need a nonlinear mapping, exit success */
185 		if (pgoff == linear_page_index(vma, start)) {
186 			err = 0;
187 			goto out;
188 		}
189 
190 		if (!has_write_lock) {
191 get_write_lock:
192 			up_read(&mm->mmap_sem);
193 			down_write(&mm->mmap_sem);
194 			has_write_lock = 1;
195 			goto retry;
196 		}
197 		mapping = vma->vm_file->f_mapping;
198 		/*
199 		 * page_mkclean doesn't work on nonlinear vmas, so if
200 		 * dirty pages need to be accounted, emulate with linear
201 		 * vmas.
202 		 */
203 		if (mapping_cap_account_dirty(mapping)) {
204 			unsigned long addr;
205 			struct file *file = get_file(vma->vm_file);
206 
207 			addr = mmap_region(file, start, size,
208 					vma->vm_flags, pgoff);
209 			fput(file);
210 			if (IS_ERR_VALUE(addr)) {
211 				err = addr;
212 			} else {
213 				BUG_ON(addr != start);
214 				err = 0;
215 			}
216 			goto out;
217 		}
218 		mutex_lock(&mapping->i_mmap_mutex);
219 		flush_dcache_mmap_lock(mapping);
220 		vma->vm_flags |= VM_NONLINEAR;
221 		vma_interval_tree_remove(vma, &mapping->i_mmap);
222 		vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
223 		flush_dcache_mmap_unlock(mapping);
224 		mutex_unlock(&mapping->i_mmap_mutex);
225 	}
226 
227 	if (vma->vm_flags & VM_LOCKED) {
228 		/*
229 		 * drop PG_Mlocked flag for over-mapped range
230 		 */
231 		if (!has_write_lock)
232 			goto get_write_lock;
233 		vm_flags = vma->vm_flags;
234 		munlock_vma_pages_range(vma, start, start + size);
235 		vma->vm_flags = vm_flags;
236 	}
237 
238 	mmu_notifier_invalidate_range_start(mm, start, start + size);
239 	err = vma->vm_ops->remap_pages(vma, start, size, pgoff);
240 	mmu_notifier_invalidate_range_end(mm, start, start + size);
241 
242 	/*
243 	 * We can't clear VM_NONLINEAR because we'd have to do
244 	 * it after ->populate completes, and that would prevent
245 	 * downgrading the lock.  (Locks can't be upgraded).
246 	 */
247 
248 out:
249 	if (vma)
250 		vm_flags = vma->vm_flags;
251 	if (likely(!has_write_lock))
252 		up_read(&mm->mmap_sem);
253 	else
254 		up_write(&mm->mmap_sem);
255 	if (!err && ((vm_flags & VM_LOCKED) || !(flags & MAP_NONBLOCK)))
256 		mm_populate(start, size);
257 
258 	return err;
259 }
260