• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* mm/ashmem.c
3  *
4  * Anonymous Shared Memory Subsystem, ashmem
5  *
6  * Copyright (C) 2008 Google, Inc.
7  *
8  * Robert Love <rlove@google.com>
9  */
10 
11 #define pr_fmt(fmt) "ashmem: " fmt
12 
13 #include <linux/init.h>
14 #include <linux/export.h>
15 #include <linux/file.h>
16 #include <linux/fs.h>
17 #include <linux/falloc.h>
18 #include <linux/miscdevice.h>
19 #include <linux/security.h>
20 #include <linux/mm.h>
21 #include <linux/mman.h>
22 #include <linux/uaccess.h>
23 #include <linux/page_size_compat.h>
24 #include <linux/personality.h>
25 #include <linux/bitops.h>
26 #include <linux/mutex.h>
27 #include <linux/shmem_fs.h>
28 #include "ashmem.h"
29 
30 /**
31  * struct ashmem_area - The anonymous shared memory area
32  * @name:		The optional name in /proc/pid/maps
33  * @unpinned_list:	The list of all ashmem areas
34  * @file:		The shmem-based backing file
35  * @size:		The size of the mapping, in bytes
36  * @prot_mask:		The allowed protection bits, as vm_flags
37  *
38  * The lifecycle of this structure is from our parent file's open() until
39  * its release(). It is also protected by 'ashmem_mutex'
40  *
41  * Warning: Mappings do NOT pin this structure; It dies on close()
42  */
43 struct ashmem_area {
44 	char name[ASHMEM_FULL_NAME_LEN];
45 	struct list_head unpinned_list;
46 	struct file *file;
47 	size_t size;
48 	unsigned long prot_mask;
49 };
50 
51 /**
52  * struct ashmem_range - A range of unpinned/evictable pages
53  * @lru:	         The entry in the LRU list
54  * @unpinned:	         The entry in its area's unpinned list
55  * @asma:	         The associated anonymous shared memory area.
56  * @pgstart:	         The starting page (inclusive)
57  * @pgend:	         The ending page (inclusive)
58  * @purged:	         The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED)
59  *
60  * The lifecycle of this structure is from unpin to pin.
61  * It is protected by 'ashmem_mutex'
62  */
63 struct ashmem_range {
64 	struct list_head lru;
65 	struct list_head unpinned;
66 	struct ashmem_area *asma;
67 	size_t pgstart;
68 	size_t pgend;
69 	unsigned int purged;
70 };
71 
72 /* LRU list of unpinned pages, protected by ashmem_mutex */
73 static LIST_HEAD(ashmem_lru_list);
74 
75 static atomic_t ashmem_shrink_inflight = ATOMIC_INIT(0);
76 static DECLARE_WAIT_QUEUE_HEAD(ashmem_shrink_wait);
77 
78 /*
79  * long lru_count - The count of pages on our LRU list.
80  *
81  * This is protected by ashmem_mutex.
82  */
83 static unsigned long lru_count;
84 
85 /*
86  * ashmem_mutex - protects the list of and each individual ashmem_area
87  *
88  * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
89  */
90 static DEFINE_MUTEX(ashmem_mutex);
91 
92 static struct kmem_cache *ashmem_area_cachep __read_mostly;
93 static struct kmem_cache *ashmem_range_cachep __read_mostly;
94 
95 /*
96  * A separate lockdep class for the backing shmem inodes to resolve the lockdep
97  * warning about the race between kswapd taking fs_reclaim before inode_lock
98  * and write syscall taking inode_lock and then fs_reclaim.
99  * Note that such race is impossible because ashmem does not support write
100  * syscalls operating on the backing shmem.
101  */
102 static struct lock_class_key backing_shmem_inode_class;
103 
range_size(struct ashmem_range * range)104 static inline unsigned long range_size(struct ashmem_range *range)
105 {
106 	return range->pgend - range->pgstart + 1;
107 }
108 
range_on_lru(struct ashmem_range * range)109 static inline bool range_on_lru(struct ashmem_range *range)
110 {
111 	return range->purged == ASHMEM_NOT_PURGED;
112 }
113 
page_range_subsumes_range(struct ashmem_range * range,size_t start,size_t end)114 static inline bool page_range_subsumes_range(struct ashmem_range *range,
115 					     size_t start, size_t end)
116 {
117 	return (range->pgstart >= start) && (range->pgend <= end);
118 }
119 
page_range_subsumed_by_range(struct ashmem_range * range,size_t start,size_t end)120 static inline bool page_range_subsumed_by_range(struct ashmem_range *range,
121 						size_t start, size_t end)
122 {
123 	return (range->pgstart <= start) && (range->pgend >= end);
124 }
125 
page_in_range(struct ashmem_range * range,size_t page)126 static inline bool page_in_range(struct ashmem_range *range, size_t page)
127 {
128 	return (range->pgstart <= page) && (range->pgend >= page);
129 }
130 
page_range_in_range(struct ashmem_range * range,size_t start,size_t end)131 static inline bool page_range_in_range(struct ashmem_range *range,
132 				       size_t start, size_t end)
133 {
134 	return page_in_range(range, start) || page_in_range(range, end) ||
135 		page_range_subsumes_range(range, start, end);
136 }
137 
range_before_page(struct ashmem_range * range,size_t page)138 static inline bool range_before_page(struct ashmem_range *range,
139 				     size_t page)
140 {
141 	return range->pgend < page;
142 }
143 
144 #define PROT_MASK		(PROT_EXEC | PROT_READ | PROT_WRITE)
145 
146 /**
147  * lru_add() - Adds a range of memory to the LRU list
148  * @range:     The memory range being added.
149  *
150  * The range is first added to the end (tail) of the LRU list.
151  * After this, the size of the range is added to @lru_count
152  */
lru_add(struct ashmem_range * range)153 static inline void lru_add(struct ashmem_range *range)
154 {
155 	list_add_tail(&range->lru, &ashmem_lru_list);
156 	lru_count += range_size(range);
157 }
158 
159 /**
160  * lru_del() - Removes a range of memory from the LRU list
161  * @range:     The memory range being removed
162  *
163  * The range is first deleted from the LRU list.
164  * After this, the size of the range is removed from @lru_count
165  */
lru_del(struct ashmem_range * range)166 static inline void lru_del(struct ashmem_range *range)
167 {
168 	list_del(&range->lru);
169 	lru_count -= range_size(range);
170 }
171 
172 /**
173  * range_alloc() - Allocates and initializes a new ashmem_range structure
174  * @asma:	   The associated ashmem_area
175  * @prev_range:	   The previous ashmem_range in the sorted asma->unpinned list
176  * @purged:	   Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
177  * @start:	   The starting page (inclusive)
178  * @end:	   The ending page (inclusive)
179  * @new_range:	   The placeholder for the new range
180  *
181  * This function is protected by ashmem_mutex.
182  */
range_alloc(struct ashmem_area * asma,struct ashmem_range * prev_range,unsigned int purged,size_t start,size_t end,struct ashmem_range ** new_range)183 static void range_alloc(struct ashmem_area *asma,
184 			struct ashmem_range *prev_range, unsigned int purged,
185 			size_t start, size_t end,
186 			struct ashmem_range **new_range)
187 {
188 	struct ashmem_range *range = *new_range;
189 
190 	*new_range = NULL;
191 	range->asma = asma;
192 	range->pgstart = start;
193 	range->pgend = end;
194 	range->purged = purged;
195 
196 	list_add_tail(&range->unpinned, &prev_range->unpinned);
197 
198 	if (range_on_lru(range))
199 		lru_add(range);
200 }
201 
202 /**
203  * range_del() - Deletes and deallocates an ashmem_range structure
204  * @range:	 The associated ashmem_range that has previously been allocated
205  */
range_del(struct ashmem_range * range)206 static void range_del(struct ashmem_range *range)
207 {
208 	list_del(&range->unpinned);
209 	if (range_on_lru(range))
210 		lru_del(range);
211 	kmem_cache_free(ashmem_range_cachep, range);
212 }
213 
214 /**
215  * range_shrink() - Shrinks an ashmem_range
216  * @range:	    The associated ashmem_range being shrunk
217  * @start:	    The starting byte of the new range
218  * @end:	    The ending byte of the new range
219  *
220  * This does not modify the data inside the existing range in any way - It
221  * simply shrinks the boundaries of the range.
222  *
223  * Theoretically, with a little tweaking, this could eventually be changed
224  * to range_resize, and expand the lru_count if the new range is larger.
225  */
range_shrink(struct ashmem_range * range,size_t start,size_t end)226 static inline void range_shrink(struct ashmem_range *range,
227 				size_t start, size_t end)
228 {
229 	size_t pre = range_size(range);
230 
231 	range->pgstart = start;
232 	range->pgend = end;
233 
234 	if (range_on_lru(range))
235 		lru_count -= pre - range_size(range);
236 }
237 
238 /**
239  * ashmem_open() - Opens an Anonymous Shared Memory structure
240  * @inode:	   The backing file's index node(?)
241  * @file:	   The backing file
242  *
243  * Please note that the ashmem_area is not returned by this function - It is
244  * instead written to "file->private_data".
245  *
246  * Return: 0 if successful, or another code if unsuccessful.
247  */
ashmem_open(struct inode * inode,struct file * file)248 static int ashmem_open(struct inode *inode, struct file *file)
249 {
250 	struct ashmem_area *asma;
251 	int ret;
252 
253 	ret = generic_file_open(inode, file);
254 	if (ret)
255 		return ret;
256 
257 	asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
258 	if (!asma)
259 		return -ENOMEM;
260 
261 	INIT_LIST_HEAD(&asma->unpinned_list);
262 	memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
263 	asma->prot_mask = PROT_MASK;
264 	file->private_data = asma;
265 
266 	return 0;
267 }
268 
269 /**
270  * ashmem_release() - Releases an Anonymous Shared Memory structure
271  * @ignored:	      The backing file's Index Node(?) - It is ignored here.
272  * @file:	      The backing file
273  *
274  * Return: 0 if successful. If it is anything else, go have a coffee and
275  * try again.
276  */
ashmem_release(struct inode * ignored,struct file * file)277 static int ashmem_release(struct inode *ignored, struct file *file)
278 {
279 	struct ashmem_area *asma = file->private_data;
280 	struct ashmem_range *range, *next;
281 
282 	mutex_lock(&ashmem_mutex);
283 	list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
284 		range_del(range);
285 	mutex_unlock(&ashmem_mutex);
286 
287 	if (asma->file)
288 		fput(asma->file);
289 	kmem_cache_free(ashmem_area_cachep, asma);
290 
291 	return 0;
292 }
293 
ashmem_read_iter(struct kiocb * iocb,struct iov_iter * iter)294 static ssize_t ashmem_read_iter(struct kiocb *iocb, struct iov_iter *iter)
295 {
296 	struct ashmem_area *asma = iocb->ki_filp->private_data;
297 	int ret = 0;
298 
299 	mutex_lock(&ashmem_mutex);
300 
301 	/* If size is not set, or set to 0, always return EOF. */
302 	if (asma->size == 0)
303 		goto out_unlock;
304 
305 	if (!asma->file) {
306 		ret = -EBADF;
307 		goto out_unlock;
308 	}
309 
310 	/*
311 	 * asma and asma->file are used outside the lock here.  We assume
312 	 * once asma->file is set it will never be changed, and will not
313 	 * be destroyed until all references to the file are dropped and
314 	 * ashmem_release is called.
315 	 */
316 	mutex_unlock(&ashmem_mutex);
317 	ret = vfs_iter_read(asma->file, iter, &iocb->ki_pos, 0);
318 	mutex_lock(&ashmem_mutex);
319 	if (ret > 0)
320 		asma->file->f_pos = iocb->ki_pos;
321 out_unlock:
322 	mutex_unlock(&ashmem_mutex);
323 	return ret;
324 }
325 
ashmem_llseek(struct file * file,loff_t offset,int origin)326 static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
327 {
328 	struct ashmem_area *asma = file->private_data;
329 	loff_t ret;
330 
331 	mutex_lock(&ashmem_mutex);
332 
333 	if (asma->size == 0) {
334 		mutex_unlock(&ashmem_mutex);
335 		return -EINVAL;
336 	}
337 
338 	if (!asma->file) {
339 		mutex_unlock(&ashmem_mutex);
340 		return -EBADF;
341 	}
342 
343 	mutex_unlock(&ashmem_mutex);
344 
345 	ret = vfs_llseek(asma->file, offset, origin);
346 	if (ret < 0)
347 		return ret;
348 
349 	/** Copy f_pos from backing file, since f_ops->llseek() sets it */
350 	file->f_pos = asma->file->f_pos;
351 	return ret;
352 }
353 
calc_vm_may_flags(unsigned long prot)354 static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
355 {
356 	return _calc_vm_trans(prot, PROT_READ,  VM_MAYREAD) |
357 	       _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
358 	       _calc_vm_trans(prot, PROT_EXEC,  VM_MAYEXEC);
359 }
360 
ashmem_vmfile_mmap(struct file * file,struct vm_area_struct * vma)361 static int ashmem_vmfile_mmap(struct file *file, struct vm_area_struct *vma)
362 {
363 	/* do not allow to mmap ashmem backing shmem file directly */
364 	return -EPERM;
365 }
366 
367 static unsigned long
ashmem_vmfile_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)368 ashmem_vmfile_get_unmapped_area(struct file *file, unsigned long addr,
369 				unsigned long len, unsigned long pgoff,
370 				unsigned long flags)
371 {
372 	return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags);
373 }
374 
ashmem_mmap(struct file * file,struct vm_area_struct * vma)375 static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
376 {
377 	static struct file_operations vmfile_fops;
378 	struct ashmem_area *asma = file->private_data;
379 	int ret = 0;
380 
381 	mutex_lock(&ashmem_mutex);
382 
383 	/* user needs to SET_SIZE before mapping */
384 	if (!asma->size) {
385 		ret = -EINVAL;
386 		goto out;
387 	}
388 
389 	/* requested mapping size larger than object size */
390 	if (vma->vm_end - vma->vm_start > __PAGE_ALIGN(asma->size)) {
391 		ret = -EINVAL;
392 		goto out;
393 	}
394 
395 	/* requested protection bits must match our allowed protection mask */
396 	if ((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) &
397 	    calc_vm_prot_bits(PROT_MASK, 0)) {
398 		ret = -EPERM;
399 		goto out;
400 	}
401 	vm_flags_clear(vma, calc_vm_may_flags(~asma->prot_mask));
402 
403 	if (!asma->file) {
404 		char *name = ASHMEM_NAME_DEF;
405 		struct file *vmfile;
406 		struct inode *inode;
407 
408 		if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
409 			name = asma->name;
410 
411 		/* ... and allocate the backing shmem file */
412 		vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
413 		if (IS_ERR(vmfile)) {
414 			ret = PTR_ERR(vmfile);
415 			goto out;
416 		}
417 		vmfile->f_mode |= FMODE_LSEEK;
418 		inode = file_inode(vmfile);
419 		lockdep_set_class(&inode->i_rwsem, &backing_shmem_inode_class);
420 		asma->file = vmfile;
421 		/*
422 		 * override mmap operation of the vmfile so that it can't be
423 		 * remapped which would lead to creation of a new vma with no
424 		 * asma permission checks. Have to override get_unmapped_area
425 		 * as well to prevent VM_BUG_ON check for f_ops modification.
426 		 */
427 		if (!vmfile_fops.mmap) {
428 			vmfile_fops = *vmfile->f_op;
429 			vmfile_fops.mmap = ashmem_vmfile_mmap;
430 			vmfile_fops.get_unmapped_area =
431 					ashmem_vmfile_get_unmapped_area;
432 		}
433 		vmfile->f_op = &vmfile_fops;
434 	}
435 	get_file(asma->file);
436 
437 	/*
438 	 * XXX - Reworked to use shmem_zero_setup() instead of
439 	 * shmem_set_file while we're in staging. -jstultz
440 	 */
441 	if (vma->vm_flags & VM_SHARED) {
442 		ret = shmem_zero_setup(vma);
443 		if (ret) {
444 			fput(asma->file);
445 			goto out;
446 		}
447 	} else {
448 		vma_set_anonymous(vma);
449 	}
450 
451 	vma_set_file(vma, asma->file);
452 	/* XXX: merge this with the get_file() above if possible */
453 	fput(asma->file);
454 
455 out:
456 	mutex_unlock(&ashmem_mutex);
457 	return ret;
458 }
459 
460 /*
461  * ashmem_shrink - our cache shrinker, called from mm/vmscan.c
462  *
463  * 'nr_to_scan' is the number of objects to scan for freeing.
464  *
465  * 'gfp_mask' is the mask of the allocation that got us into this mess.
466  *
467  * Return value is the number of objects freed or -1 if we cannot
468  * proceed without risk of deadlock (due to gfp_mask).
469  *
470  * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
471  * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
472  * pages freed.
473  */
474 static unsigned long
ashmem_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)475 ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
476 {
477 	unsigned long freed = 0;
478 
479 	/* We might recurse into filesystem code, so bail out if necessary */
480 	if (!(sc->gfp_mask & __GFP_FS))
481 		return SHRINK_STOP;
482 
483 	if (!mutex_trylock(&ashmem_mutex))
484 		return -1;
485 
486 	while (!list_empty(&ashmem_lru_list)) {
487 		struct ashmem_range *range =
488 			list_first_entry(&ashmem_lru_list, typeof(*range), lru);
489 		loff_t start = range->pgstart * PAGE_SIZE;
490 		loff_t end = (range->pgend + 1) * PAGE_SIZE;
491 		struct file *f = range->asma->file;
492 
493 		get_file(f);
494 		atomic_inc(&ashmem_shrink_inflight);
495 		range->purged = ASHMEM_WAS_PURGED;
496 		lru_del(range);
497 
498 		freed += range_size(range);
499 		mutex_unlock(&ashmem_mutex);
500 		f->f_op->fallocate(f,
501 				   FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
502 				   start, end - start);
503 		fput(f);
504 		if (atomic_dec_and_test(&ashmem_shrink_inflight))
505 			wake_up_all(&ashmem_shrink_wait);
506 		if (!mutex_trylock(&ashmem_mutex))
507 			goto out;
508 		if (--sc->nr_to_scan <= 0)
509 			break;
510 	}
511 	mutex_unlock(&ashmem_mutex);
512 out:
513 	return freed;
514 }
515 
516 static unsigned long
ashmem_shrink_count(struct shrinker * shrink,struct shrink_control * sc)517 ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
518 {
519 	/*
520 	 * note that lru_count is count of pages on the lru, not a count of
521 	 * objects on the list. This means the scan function needs to return the
522 	 * number of pages freed, not the number of objects scanned.
523 	 */
524 	return lru_count;
525 }
526 
527 static struct shrinker *ashmem_shrinker;
528 
ashmem_init_shrinker(void)529 static int __init ashmem_init_shrinker(void)
530 {
531 	ashmem_shrinker = shrinker_alloc(0, "android-ashmem");
532 	if (!ashmem_shrinker)
533 		return -ENOMEM;
534 
535 	ashmem_shrinker->count_objects = ashmem_shrink_count;
536 	ashmem_shrinker->scan_objects = ashmem_shrink_scan;
537 	/*
538 	 * XXX (dchinner): I wish people would comment on why they need on
539 	 * significant changes to the default value here
540 	 */
541 	ashmem_shrinker->seeks = DEFAULT_SEEKS * 4;
542 
543 	shrinker_register(ashmem_shrinker);
544 
545 	return 0;
546 }
547 
set_prot_mask(struct ashmem_area * asma,unsigned long prot)548 static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
549 {
550 	int ret = 0;
551 
552 	mutex_lock(&ashmem_mutex);
553 
554 	/* the user can only remove, not add, protection bits */
555 	if ((asma->prot_mask & prot) != prot) {
556 		ret = -EINVAL;
557 		goto out;
558 	}
559 
560 	/* does the application expect PROT_READ to imply PROT_EXEC? */
561 	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
562 		prot |= PROT_EXEC;
563 
564 	asma->prot_mask = prot;
565 
566 out:
567 	mutex_unlock(&ashmem_mutex);
568 	return ret;
569 }
570 
set_name(struct ashmem_area * asma,void __user * name)571 static int set_name(struct ashmem_area *asma, void __user *name)
572 {
573 	int len;
574 	int ret = 0;
575 	char local_name[ASHMEM_NAME_LEN];
576 
577 	/*
578 	 * Holding the ashmem_mutex while doing a copy_from_user might cause
579 	 * an data abort which would try to access mmap_lock. If another
580 	 * thread has invoked ashmem_mmap then it will be holding the
581 	 * semaphore and will be waiting for ashmem_mutex, there by leading to
582 	 * deadlock. We'll release the mutex and take the name to a local
583 	 * variable that does not need protection and later copy the local
584 	 * variable to the structure member with lock held.
585 	 */
586 	len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN);
587 	if (len < 0)
588 		return len;
589 
590 	mutex_lock(&ashmem_mutex);
591 	/* cannot change an existing mapping's name */
592 	if (asma->file)
593 		ret = -EINVAL;
594 	else
595 		strscpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name,
596 			ASHMEM_NAME_LEN);
597 
598 	mutex_unlock(&ashmem_mutex);
599 	return ret;
600 }
601 
get_name(struct ashmem_area * asma,void __user * name)602 static int get_name(struct ashmem_area *asma, void __user *name)
603 {
604 	int ret = 0;
605 	size_t len;
606 	/*
607 	 * Have a local variable to which we'll copy the content
608 	 * from asma with the lock held. Later we can copy this to the user
609 	 * space safely without holding any locks. So even if we proceed to
610 	 * wait for mmap_lock, it won't lead to deadlock.
611 	 */
612 	char local_name[ASHMEM_NAME_LEN];
613 
614 	mutex_lock(&ashmem_mutex);
615 	if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
616 		/*
617 		 * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
618 		 * prevents us from revealing one user's stack to another.
619 		 */
620 		len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
621 		memcpy(local_name, asma->name + ASHMEM_NAME_PREFIX_LEN, len);
622 	} else {
623 		len = sizeof(ASHMEM_NAME_DEF);
624 		memcpy(local_name, ASHMEM_NAME_DEF, len);
625 	}
626 	mutex_unlock(&ashmem_mutex);
627 
628 	/*
629 	 * Now we are just copying from the stack variable to userland
630 	 * No lock held
631 	 */
632 	if (copy_to_user(name, local_name, len))
633 		ret = -EFAULT;
634 	return ret;
635 }
636 
637 /*
638  * ashmem_pin - pin the given ashmem region, returning whether it was
639  * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
640  *
641  * Caller must hold ashmem_mutex.
642  */
ashmem_pin(struct ashmem_area * asma,size_t pgstart,size_t pgend,struct ashmem_range ** new_range)643 static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
644 		      struct ashmem_range **new_range)
645 {
646 	struct ashmem_range *range, *next;
647 	int ret = ASHMEM_NOT_PURGED;
648 
649 	list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
650 		/* moved past last applicable page; we can short circuit */
651 		if (range_before_page(range, pgstart))
652 			break;
653 
654 		/*
655 		 * The user can ask us to pin pages that span multiple ranges,
656 		 * or to pin pages that aren't even unpinned, so this is messy.
657 		 *
658 		 * Four cases:
659 		 * 1. The requested range subsumes an existing range, so we
660 		 *    just remove the entire matching range.
661 		 * 2. The requested range overlaps the start of an existing
662 		 *    range, so we just update that range.
663 		 * 3. The requested range overlaps the end of an existing
664 		 *    range, so we just update that range.
665 		 * 4. The requested range punches a hole in an existing range,
666 		 *    so we have to update one side of the range and then
667 		 *    create a new range for the other side.
668 		 */
669 		if (page_range_in_range(range, pgstart, pgend)) {
670 			ret |= range->purged;
671 
672 			/* Case #1: Easy. Just nuke the whole thing. */
673 			if (page_range_subsumes_range(range, pgstart, pgend)) {
674 				range_del(range);
675 				continue;
676 			}
677 
678 			/* Case #2: We overlap from the start, so adjust it */
679 			if (range->pgstart >= pgstart) {
680 				range_shrink(range, pgend + 1, range->pgend);
681 				continue;
682 			}
683 
684 			/* Case #3: We overlap from the rear, so adjust it */
685 			if (range->pgend <= pgend) {
686 				range_shrink(range, range->pgstart,
687 					     pgstart - 1);
688 				continue;
689 			}
690 
691 			/*
692 			 * Case #4: We eat a chunk out of the middle. A bit
693 			 * more complicated, we allocate a new range for the
694 			 * second half and adjust the first chunk's endpoint.
695 			 */
696 			range_alloc(asma, range, range->purged,
697 				    pgend + 1, range->pgend, new_range);
698 			range_shrink(range, range->pgstart, pgstart - 1);
699 			break;
700 		}
701 	}
702 
703 	return ret;
704 }
705 
706 /*
707  * ashmem_unpin - unpin the given range of pages. Returns zero on success.
708  *
709  * Caller must hold ashmem_mutex.
710  */
ashmem_unpin(struct ashmem_area * asma,size_t pgstart,size_t pgend,struct ashmem_range ** new_range)711 static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
712 			struct ashmem_range **new_range)
713 {
714 	struct ashmem_range *range = NULL, *iter, *next;
715 	unsigned int purged = ASHMEM_NOT_PURGED;
716 
717 restart:
718 	list_for_each_entry_safe(iter, next, &asma->unpinned_list, unpinned) {
719 		/* short circuit: this is our insertion point */
720 		if (range_before_page(iter, pgstart)) {
721 			range = iter;
722 			break;
723 		}
724 
725 		/*
726 		 * The user can ask us to unpin pages that are already entirely
727 		 * or partially pinned. We handle those two cases here.
728 		 */
729 		if (page_range_subsumed_by_range(iter, pgstart, pgend))
730 			return 0;
731 		if (page_range_in_range(iter, pgstart, pgend)) {
732 			pgstart = min(iter->pgstart, pgstart);
733 			pgend = max(iter->pgend, pgend);
734 			purged |= iter->purged;
735 			range_del(iter);
736 			goto restart;
737 		}
738 	}
739 
740 	range = list_prepare_entry(range, &asma->unpinned_list, unpinned);
741 	range_alloc(asma, range, purged, pgstart, pgend, new_range);
742 	return 0;
743 }
744 
745 /*
746  * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
747  * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
748  *
749  * Caller must hold ashmem_mutex.
750  */
ashmem_get_pin_status(struct ashmem_area * asma,size_t pgstart,size_t pgend)751 static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
752 				 size_t pgend)
753 {
754 	struct ashmem_range *range;
755 	int ret = ASHMEM_IS_PINNED;
756 
757 	list_for_each_entry(range, &asma->unpinned_list, unpinned) {
758 		if (range_before_page(range, pgstart))
759 			break;
760 		if (page_range_in_range(range, pgstart, pgend)) {
761 			ret = ASHMEM_IS_UNPINNED;
762 			break;
763 		}
764 	}
765 
766 	return ret;
767 }
768 
ashmem_pin_unpin(struct ashmem_area * asma,unsigned long cmd,void __user * p)769 static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
770 			    void __user *p)
771 {
772 	struct ashmem_pin pin;
773 	size_t pgstart, pgend;
774 	int ret = -EINVAL;
775 	struct ashmem_range *range = NULL;
776 
777 	if (copy_from_user(&pin, p, sizeof(pin)))
778 		return -EFAULT;
779 
780 	if (cmd == ASHMEM_PIN || cmd == ASHMEM_UNPIN) {
781 		range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
782 		if (!range)
783 			return -ENOMEM;
784 	}
785 
786 	mutex_lock(&ashmem_mutex);
787 	wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
788 
789 	if (!asma->file)
790 		goto out_unlock;
791 
792 	/* per custom, you can pass zero for len to mean "everything onward" */
793 	if (!pin.len)
794 		pin.len = PAGE_ALIGN(asma->size) - pin.offset;
795 
796 	if ((pin.offset | pin.len) & ~PAGE_MASK)
797 		goto out_unlock;
798 
799 	if (((__u32)-1) - pin.offset < pin.len)
800 		goto out_unlock;
801 
802 	if (PAGE_ALIGN(asma->size) < pin.offset + pin.len)
803 		goto out_unlock;
804 
805 	pgstart = pin.offset / PAGE_SIZE;
806 	pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
807 
808 	switch (cmd) {
809 	case ASHMEM_PIN:
810 		ret = ashmem_pin(asma, pgstart, pgend, &range);
811 		break;
812 	case ASHMEM_UNPIN:
813 		ret = ashmem_unpin(asma, pgstart, pgend, &range);
814 		break;
815 	case ASHMEM_GET_PIN_STATUS:
816 		ret = ashmem_get_pin_status(asma, pgstart, pgend);
817 		break;
818 	}
819 
820 out_unlock:
821 	mutex_unlock(&ashmem_mutex);
822 	if (range)
823 		kmem_cache_free(ashmem_range_cachep, range);
824 
825 	return ret;
826 }
827 
ashmem_ioctl(struct file * file,unsigned int cmd,unsigned long arg)828 static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
829 {
830 	struct ashmem_area *asma = file->private_data;
831 	unsigned long ino;
832 	long ret = -ENOTTY;
833 
834 	switch (cmd) {
835 	case ASHMEM_SET_NAME:
836 		ret = set_name(asma, (void __user *)arg);
837 		break;
838 	case ASHMEM_GET_NAME:
839 		ret = get_name(asma, (void __user *)arg);
840 		break;
841 	case ASHMEM_SET_SIZE:
842 		ret = -EINVAL;
843 		mutex_lock(&ashmem_mutex);
844 		if (!asma->file) {
845 			ret = 0;
846 			asma->size = (size_t)arg;
847 		}
848 		mutex_unlock(&ashmem_mutex);
849 		break;
850 	case ASHMEM_GET_SIZE:
851 		ret = asma->size;
852 		break;
853 	case ASHMEM_SET_PROT_MASK:
854 		ret = set_prot_mask(asma, arg);
855 		break;
856 	case ASHMEM_GET_PROT_MASK:
857 		ret = asma->prot_mask;
858 		break;
859 	case ASHMEM_PIN:
860 	case ASHMEM_UNPIN:
861 	case ASHMEM_GET_PIN_STATUS:
862 		ret = ashmem_pin_unpin(asma, cmd, (void __user *)arg);
863 		break;
864 	case ASHMEM_PURGE_ALL_CACHES:
865 		ret = -EPERM;
866 		if (capable(CAP_SYS_ADMIN)) {
867 			struct shrink_control sc = {
868 				.gfp_mask = GFP_KERNEL,
869 				.nr_to_scan = LONG_MAX,
870 			};
871 			ret = ashmem_shrink_count(ashmem_shrinker, &sc);
872 			ashmem_shrink_scan(ashmem_shrinker, &sc);
873 		}
874 		break;
875 	case ASHMEM_GET_FILE_ID:
876 		/* Lock around our check to avoid racing with ashmem_mmap(). */
877 		mutex_lock(&ashmem_mutex);
878 		if (!asma || !asma->file) {
879 			mutex_unlock(&ashmem_mutex);
880 			ret = -EINVAL;
881 			break;
882 		}
883 		ino = file_inode(asma->file)->i_ino;
884 		mutex_unlock(&ashmem_mutex);
885 
886 		if (copy_to_user((void __user *)arg, &ino, sizeof(ino))) {
887 			ret = -EFAULT;
888 			break;
889 		}
890 		ret = 0;
891 		break;
892 	}
893 
894 	return ret;
895 }
896 
897 /* support of 32bit userspace on 64bit platforms */
898 #ifdef CONFIG_COMPAT
compat_ashmem_ioctl(struct file * file,unsigned int cmd,unsigned long arg)899 static long compat_ashmem_ioctl(struct file *file, unsigned int cmd,
900 				unsigned long arg)
901 {
902 	switch (cmd) {
903 	case COMPAT_ASHMEM_SET_SIZE:
904 		cmd = ASHMEM_SET_SIZE;
905 		break;
906 	case COMPAT_ASHMEM_SET_PROT_MASK:
907 		cmd = ASHMEM_SET_PROT_MASK;
908 		break;
909 	}
910 	return ashmem_ioctl(file, cmd, arg);
911 }
912 #endif
913 #ifdef CONFIG_PROC_FS
ashmem_show_fdinfo(struct seq_file * m,struct file * file)914 static void ashmem_show_fdinfo(struct seq_file *m, struct file *file)
915 {
916 	struct ashmem_area *asma = file->private_data;
917 
918 	mutex_lock(&ashmem_mutex);
919 
920 	if (asma->file)
921 		seq_printf(m, "inode:\t%ld\n", file_inode(asma->file)->i_ino);
922 
923 	if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
924 		seq_printf(m, "name:\t%s\n",
925 			   asma->name + ASHMEM_NAME_PREFIX_LEN);
926 
927 	seq_printf(m, "size:\t%zu\n", asma->size);
928 
929 	mutex_unlock(&ashmem_mutex);
930 }
931 #endif
932 static const struct file_operations ashmem_fops = {
933 	.owner = THIS_MODULE,
934 	.open = ashmem_open,
935 	.release = ashmem_release,
936 	.read_iter = ashmem_read_iter,
937 	.llseek = ashmem_llseek,
938 	.mmap = ashmem_mmap,
939 	.unlocked_ioctl = ashmem_ioctl,
940 #ifdef CONFIG_COMPAT
941 	.compat_ioctl = compat_ashmem_ioctl,
942 #endif
943 #ifdef CONFIG_PROC_FS
944 	.show_fdinfo = ashmem_show_fdinfo,
945 #endif
946 };
947 
948 static struct miscdevice ashmem_misc = {
949 	.minor = MISC_DYNAMIC_MINOR,
950 	.name = "ashmem",
951 	.fops = &ashmem_fops,
952 };
953 
ashmem_init(void)954 static int __init ashmem_init(void)
955 {
956 	int ret = -ENOMEM;
957 
958 	ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
959 					       sizeof(struct ashmem_area),
960 					       0, 0, NULL);
961 	if (!ashmem_area_cachep) {
962 		pr_err("failed to create slab cache\n");
963 		goto out;
964 	}
965 
966 	ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
967 						sizeof(struct ashmem_range),
968 						0, SLAB_RECLAIM_ACCOUNT, NULL);
969 	if (!ashmem_range_cachep) {
970 		pr_err("failed to create slab cache\n");
971 		goto out_free1;
972 	}
973 
974 	ret = misc_register(&ashmem_misc);
975 	if (ret) {
976 		pr_err("failed to register misc device!\n");
977 		goto out_free2;
978 	}
979 
980 	ret = ashmem_init_shrinker();
981 	if (ret) {
982 		pr_err("failed to register shrinker!\n");
983 		goto out_demisc;
984 	}
985 
986 	pr_info("initialized\n");
987 
988 	return 0;
989 
990 out_demisc:
991 	misc_deregister(&ashmem_misc);
992 out_free2:
993 	kmem_cache_destroy(ashmem_range_cachep);
994 out_free1:
995 	kmem_cache_destroy(ashmem_area_cachep);
996 out:
997 	return ret;
998 }
999 device_initcall(ashmem_init);
1000