• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *	linux/mm/filemap_xip.c
3  *
4  * Copyright (C) 2005 IBM Corporation
5  * Author: Carsten Otte <cotte@de.ibm.com>
6  *
7  * derived from linux/mm/filemap.c - Copyright (C) Linus Torvalds
8  *
9  */
10 
11 #include <linux/fs.h>
12 #include <linux/pagemap.h>
13 #include <linux/module.h>
14 #include <linux/uio.h>
15 #include <linux/rmap.h>
16 #include <linux/mmu_notifier.h>
17 #include <linux/sched.h>
18 #include <linux/seqlock.h>
19 #include <linux/mutex.h>
20 #include <asm/tlbflush.h>
21 #include <asm/io.h>
22 
23 /*
24  * We do use our own empty page to avoid interference with other users
25  * of ZERO_PAGE(), such as /dev/zero
26  */
27 static DEFINE_MUTEX(xip_sparse_mutex);
28 static seqcount_t xip_sparse_seq = SEQCNT_ZERO;
29 static struct page *__xip_sparse_page;
30 
31 /* called under xip_sparse_mutex */
xip_sparse_page(void)32 static struct page *xip_sparse_page(void)
33 {
34 	if (!__xip_sparse_page) {
35 		struct page *page = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
36 
37 		if (page)
38 			__xip_sparse_page = page;
39 	}
40 	return __xip_sparse_page;
41 }
42 
43 /*
44  * This is a file read routine for execute in place files, and uses
45  * the mapping->a_ops->get_xip_mem() function for the actual low-level
46  * stuff.
47  *
48  * Note the struct file* is not used at all.  It may be NULL.
49  */
50 static ssize_t
do_xip_mapping_read(struct address_space * mapping,struct file_ra_state * _ra,struct file * filp,char __user * buf,size_t len,loff_t * ppos)51 do_xip_mapping_read(struct address_space *mapping,
52 		    struct file_ra_state *_ra,
53 		    struct file *filp,
54 		    char __user *buf,
55 		    size_t len,
56 		    loff_t *ppos)
57 {
58 	struct inode *inode = mapping->host;
59 	pgoff_t index, end_index;
60 	unsigned long offset;
61 	loff_t isize, pos;
62 	size_t copied = 0, error = 0;
63 
64 	BUG_ON(!mapping->a_ops->get_xip_mem);
65 
66 	pos = *ppos;
67 	index = pos >> PAGE_CACHE_SHIFT;
68 	offset = pos & ~PAGE_CACHE_MASK;
69 
70 	isize = i_size_read(inode);
71 	if (!isize)
72 		goto out;
73 
74 	end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
75 	do {
76 		unsigned long nr, left;
77 		void *xip_mem;
78 		unsigned long xip_pfn;
79 		int zero = 0;
80 
81 		/* nr is the maximum number of bytes to copy from this page */
82 		nr = PAGE_CACHE_SIZE;
83 		if (index >= end_index) {
84 			if (index > end_index)
85 				goto out;
86 			nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
87 			if (nr <= offset) {
88 				goto out;
89 			}
90 		}
91 		nr = nr - offset;
92 		if (nr > len)
93 			nr = len;
94 
95 		error = mapping->a_ops->get_xip_mem(mapping, index, 0,
96 							&xip_mem, &xip_pfn);
97 		if (unlikely(error)) {
98 			if (error == -ENODATA) {
99 				/* sparse */
100 				zero = 1;
101 			} else
102 				goto out;
103 		}
104 
105 		/* If users can be writing to this page using arbitrary
106 		 * virtual addresses, take care about potential aliasing
107 		 * before reading the page on the kernel side.
108 		 */
109 		if (mapping_writably_mapped(mapping))
110 			/* address based flush */ ;
111 
112 		/*
113 		 * Ok, we have the mem, so now we can copy it to user space...
114 		 *
115 		 * The actor routine returns how many bytes were actually used..
116 		 * NOTE! This may not be the same as how much of a user buffer
117 		 * we filled up (we may be padding etc), so we can only update
118 		 * "pos" here (the actor routine has to update the user buffer
119 		 * pointers and the remaining count).
120 		 */
121 		if (!zero)
122 			left = __copy_to_user(buf+copied, xip_mem+offset, nr);
123 		else
124 			left = __clear_user(buf + copied, nr);
125 
126 		if (left) {
127 			error = -EFAULT;
128 			goto out;
129 		}
130 
131 		copied += (nr - left);
132 		offset += (nr - left);
133 		index += offset >> PAGE_CACHE_SHIFT;
134 		offset &= ~PAGE_CACHE_MASK;
135 	} while (copied < len);
136 
137 out:
138 	*ppos = pos + copied;
139 	if (filp)
140 		file_accessed(filp);
141 
142 	return (copied ? copied : error);
143 }
144 
145 ssize_t
xip_file_read(struct file * filp,char __user * buf,size_t len,loff_t * ppos)146 xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
147 {
148 	if (!access_ok(VERIFY_WRITE, buf, len))
149 		return -EFAULT;
150 
151 	return do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp,
152 			    buf, len, ppos);
153 }
154 EXPORT_SYMBOL_GPL(xip_file_read);
155 
156 /*
157  * __xip_unmap is invoked from xip_unmap and
158  * xip_write
159  *
160  * This function walks all vmas of the address_space and unmaps the
161  * __xip_sparse_page when found at pgoff.
162  */
163 static void
__xip_unmap(struct address_space * mapping,unsigned long pgoff)164 __xip_unmap (struct address_space * mapping,
165 		     unsigned long pgoff)
166 {
167 	struct vm_area_struct *vma;
168 	struct mm_struct *mm;
169 	struct prio_tree_iter iter;
170 	unsigned long address;
171 	pte_t *pte;
172 	pte_t pteval;
173 	spinlock_t *ptl;
174 	struct page *page;
175 	unsigned count;
176 	int locked = 0;
177 
178 	count = read_seqcount_begin(&xip_sparse_seq);
179 
180 	page = __xip_sparse_page;
181 	if (!page)
182 		return;
183 
184 retry:
185 	spin_lock(&mapping->i_mmap_lock);
186 	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
187 		mm = vma->vm_mm;
188 		address = vma->vm_start +
189 			((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
190 		BUG_ON(address < vma->vm_start || address >= vma->vm_end);
191 		pte = page_check_address(page, mm, address, &ptl, 1);
192 		if (pte) {
193 			/* Nuke the page table entry. */
194 			flush_cache_page(vma, address, pte_pfn(*pte));
195 			pteval = ptep_clear_flush_notify(vma, address, pte);
196 			page_remove_rmap(page);
197 			dec_mm_counter(mm, file_rss);
198 			BUG_ON(pte_dirty(pteval));
199 			pte_unmap_unlock(pte, ptl);
200 			page_cache_release(page);
201 		}
202 	}
203 	spin_unlock(&mapping->i_mmap_lock);
204 
205 	if (locked) {
206 		mutex_unlock(&xip_sparse_mutex);
207 	} else if (read_seqcount_retry(&xip_sparse_seq, count)) {
208 		mutex_lock(&xip_sparse_mutex);
209 		locked = 1;
210 		goto retry;
211 	}
212 }
213 
214 /*
215  * xip_fault() is invoked via the vma operations vector for a
216  * mapped memory region to read in file data during a page fault.
217  *
218  * This function is derived from filemap_fault, but used for execute in place
219  */
xip_file_fault(struct vm_area_struct * vma,struct vm_fault * vmf)220 static int xip_file_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
221 {
222 	struct file *file = vma->vm_file;
223 	struct address_space *mapping = file->f_mapping;
224 	struct inode *inode = mapping->host;
225 	pgoff_t size;
226 	void *xip_mem;
227 	unsigned long xip_pfn;
228 	struct page *page;
229 	int error;
230 
231 	/* XXX: are VM_FAULT_ codes OK? */
232 again:
233 	size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
234 	if (vmf->pgoff >= size)
235 		return VM_FAULT_SIGBUS;
236 
237 	error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0,
238 						&xip_mem, &xip_pfn);
239 	if (likely(!error))
240 		goto found;
241 	if (error != -ENODATA)
242 		return VM_FAULT_OOM;
243 
244 	/* sparse block */
245 	if ((vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) &&
246 	    (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) &&
247 	    (!(mapping->host->i_sb->s_flags & MS_RDONLY))) {
248 		int err;
249 
250 		/* maybe shared writable, allocate new block */
251 		mutex_lock(&xip_sparse_mutex);
252 		error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 1,
253 							&xip_mem, &xip_pfn);
254 		mutex_unlock(&xip_sparse_mutex);
255 		if (error)
256 			return VM_FAULT_SIGBUS;
257 		/* unmap sparse mappings at pgoff from all other vmas */
258 		__xip_unmap(mapping, vmf->pgoff);
259 
260 found:
261 		err = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
262 							xip_pfn);
263 		if (err == -ENOMEM)
264 			return VM_FAULT_OOM;
265 		BUG_ON(err);
266 		return VM_FAULT_NOPAGE;
267 	} else {
268 		int err, ret = VM_FAULT_OOM;
269 
270 		mutex_lock(&xip_sparse_mutex);
271 		write_seqcount_begin(&xip_sparse_seq);
272 		error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0,
273 							&xip_mem, &xip_pfn);
274 		if (unlikely(!error)) {
275 			write_seqcount_end(&xip_sparse_seq);
276 			mutex_unlock(&xip_sparse_mutex);
277 			goto again;
278 		}
279 		if (error != -ENODATA)
280 			goto out;
281 		/* not shared and writable, use xip_sparse_page() */
282 		page = xip_sparse_page();
283 		if (!page)
284 			goto out;
285 		err = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
286 							page);
287 		if (err == -ENOMEM)
288 			goto out;
289 
290 		ret = VM_FAULT_NOPAGE;
291 out:
292 		write_seqcount_end(&xip_sparse_seq);
293 		mutex_unlock(&xip_sparse_mutex);
294 
295 		return ret;
296 	}
297 }
298 
299 static struct vm_operations_struct xip_file_vm_ops = {
300 	.fault	= xip_file_fault,
301 };
302 
xip_file_mmap(struct file * file,struct vm_area_struct * vma)303 int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
304 {
305 	BUG_ON(!file->f_mapping->a_ops->get_xip_mem);
306 
307 	file_accessed(file);
308 	vma->vm_ops = &xip_file_vm_ops;
309 	vma->vm_flags |= VM_CAN_NONLINEAR | VM_MIXEDMAP;
310 	return 0;
311 }
312 EXPORT_SYMBOL_GPL(xip_file_mmap);
313 
314 static ssize_t
__xip_file_write(struct file * filp,const char __user * buf,size_t count,loff_t pos,loff_t * ppos)315 __xip_file_write(struct file *filp, const char __user *buf,
316 		  size_t count, loff_t pos, loff_t *ppos)
317 {
318 	struct address_space * mapping = filp->f_mapping;
319 	const struct address_space_operations *a_ops = mapping->a_ops;
320 	struct inode 	*inode = mapping->host;
321 	long		status = 0;
322 	size_t		bytes;
323 	ssize_t		written = 0;
324 
325 	BUG_ON(!mapping->a_ops->get_xip_mem);
326 
327 	do {
328 		unsigned long index;
329 		unsigned long offset;
330 		size_t copied;
331 		void *xip_mem;
332 		unsigned long xip_pfn;
333 
334 		offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
335 		index = pos >> PAGE_CACHE_SHIFT;
336 		bytes = PAGE_CACHE_SIZE - offset;
337 		if (bytes > count)
338 			bytes = count;
339 
340 		status = a_ops->get_xip_mem(mapping, index, 0,
341 						&xip_mem, &xip_pfn);
342 		if (status == -ENODATA) {
343 			/* we allocate a new page unmap it */
344 			mutex_lock(&xip_sparse_mutex);
345 			status = a_ops->get_xip_mem(mapping, index, 1,
346 							&xip_mem, &xip_pfn);
347 			mutex_unlock(&xip_sparse_mutex);
348 			if (!status)
349 				/* unmap page at pgoff from all other vmas */
350 				__xip_unmap(mapping, index);
351 		}
352 
353 		if (status)
354 			break;
355 
356 		copied = bytes -
357 			__copy_from_user_nocache(xip_mem + offset, buf, bytes);
358 
359 		if (likely(copied > 0)) {
360 			status = copied;
361 
362 			if (status >= 0) {
363 				written += status;
364 				count -= status;
365 				pos += status;
366 				buf += status;
367 			}
368 		}
369 		if (unlikely(copied != bytes))
370 			if (status >= 0)
371 				status = -EFAULT;
372 		if (status < 0)
373 			break;
374 	} while (count);
375 	*ppos = pos;
376 	/*
377 	 * No need to use i_size_read() here, the i_size
378 	 * cannot change under us because we hold i_mutex.
379 	 */
380 	if (pos > inode->i_size) {
381 		i_size_write(inode, pos);
382 		mark_inode_dirty(inode);
383 	}
384 
385 	return written ? written : status;
386 }
387 
388 ssize_t
xip_file_write(struct file * filp,const char __user * buf,size_t len,loff_t * ppos)389 xip_file_write(struct file *filp, const char __user *buf, size_t len,
390 	       loff_t *ppos)
391 {
392 	struct address_space *mapping = filp->f_mapping;
393 	struct inode *inode = mapping->host;
394 	size_t count;
395 	loff_t pos;
396 	ssize_t ret;
397 
398 	mutex_lock(&inode->i_mutex);
399 
400 	if (!access_ok(VERIFY_READ, buf, len)) {
401 		ret=-EFAULT;
402 		goto out_up;
403 	}
404 
405 	pos = *ppos;
406 	count = len;
407 
408 	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
409 
410 	/* We can write back this queue in page reclaim */
411 	current->backing_dev_info = mapping->backing_dev_info;
412 
413 	ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode));
414 	if (ret)
415 		goto out_backing;
416 	if (count == 0)
417 		goto out_backing;
418 
419 	ret = file_remove_suid(filp);
420 	if (ret)
421 		goto out_backing;
422 
423 	file_update_time(filp);
424 
425 	ret = __xip_file_write (filp, buf, count, pos, ppos);
426 
427  out_backing:
428 	current->backing_dev_info = NULL;
429  out_up:
430 	mutex_unlock(&inode->i_mutex);
431 	return ret;
432 }
433 EXPORT_SYMBOL_GPL(xip_file_write);
434 
435 /*
436  * truncate a page used for execute in place
437  * functionality is analog to block_truncate_page but does use get_xip_mem
438  * to get the page instead of page cache
439  */
440 int
xip_truncate_page(struct address_space * mapping,loff_t from)441 xip_truncate_page(struct address_space *mapping, loff_t from)
442 {
443 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
444 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
445 	unsigned blocksize;
446 	unsigned length;
447 	void *xip_mem;
448 	unsigned long xip_pfn;
449 	int err;
450 
451 	BUG_ON(!mapping->a_ops->get_xip_mem);
452 
453 	blocksize = 1 << mapping->host->i_blkbits;
454 	length = offset & (blocksize - 1);
455 
456 	/* Block boundary? Nothing to do */
457 	if (!length)
458 		return 0;
459 
460 	length = blocksize - length;
461 
462 	err = mapping->a_ops->get_xip_mem(mapping, index, 0,
463 						&xip_mem, &xip_pfn);
464 	if (unlikely(err)) {
465 		if (err == -ENODATA)
466 			/* Hole? No need to truncate */
467 			return 0;
468 		else
469 			return err;
470 	}
471 	memset(xip_mem + offset, 0, length);
472 	return 0;
473 }
474 EXPORT_SYMBOL_GPL(xip_truncate_page);
475