• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* mm/ashmem.c
2  *
3  * Anonymous Shared Memory Subsystem, ashmem
4  *
5  * Copyright (C) 2008 Google, Inc.
6  *
7  * Robert Love <rlove@google.com>
8  *
9  * This software is licensed under the terms of the GNU General Public
10  * License version 2, as published by the Free Software Foundation, and
11  * may be copied, distributed, and modified under those terms.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  */
18 
19 #define pr_fmt(fmt) "ashmem: " fmt
20 
21 #include <linux/module.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/falloc.h>
25 #include <linux/miscdevice.h>
26 #include <linux/security.h>
27 #include <linux/mm.h>
28 #include <linux/mman.h>
29 #include <linux/uaccess.h>
30 #include <linux/personality.h>
31 #include <linux/bitops.h>
32 #include <linux/mutex.h>
33 #include <linux/shmem_fs.h>
34 #include "ashmem.h"
35 
36 #define ASHMEM_NAME_PREFIX "dev/ashmem/"
37 #define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
38 #define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
39 
40 /*
41  * ashmem_area - anonymous shared memory area
42  * Lifecycle: From our parent file's open() until its release()
43  * Locking: Protected by `ashmem_mutex'
44  * Big Note: Mappings do NOT pin this structure; it dies on close()
45  */
46 struct ashmem_area {
47 	char name[ASHMEM_FULL_NAME_LEN]; /* optional name in /proc/pid/maps */
48 	struct list_head unpinned_list;	 /* list of all ashmem areas */
49 	struct file *file;		 /* the shmem-based backing file */
50 	size_t size;			 /* size of the mapping, in bytes */
51 	unsigned long prot_mask;	 /* allowed prot bits, as vm_flags */
52 };
53 
54 /*
55  * ashmem_range - represents an interval of unpinned (evictable) pages
56  * Lifecycle: From unpin to pin
57  * Locking: Protected by `ashmem_mutex'
58  */
59 struct ashmem_range {
60 	struct list_head lru;		/* entry in LRU list */
61 	struct list_head unpinned;	/* entry in its area's unpinned list */
62 	struct ashmem_area *asma;	/* associated area */
63 	size_t pgstart;			/* starting page, inclusive */
64 	size_t pgend;			/* ending page, inclusive */
65 	unsigned int purged;		/* ASHMEM_NOT or ASHMEM_WAS_PURGED */
66 };
67 
68 /* LRU list of unpinned pages, protected by ashmem_mutex */
69 static LIST_HEAD(ashmem_lru_list);
70 
71 /* Count of pages on our LRU list, protected by ashmem_mutex */
72 static unsigned long lru_count;
73 
74 /*
75  * ashmem_mutex - protects the list of and each individual ashmem_area
76  *
77  * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
78  */
79 static DEFINE_MUTEX(ashmem_mutex);
80 
81 static struct kmem_cache *ashmem_area_cachep __read_mostly;
82 static struct kmem_cache *ashmem_range_cachep __read_mostly;
83 
84 #define range_size(range) \
85 	((range)->pgend - (range)->pgstart + 1)
86 
87 #define range_on_lru(range) \
88 	((range)->purged == ASHMEM_NOT_PURGED)
89 
90 #define page_range_subsumes_range(range, start, end) \
91 	(((range)->pgstart >= (start)) && ((range)->pgend <= (end)))
92 
93 #define page_range_subsumed_by_range(range, start, end) \
94 	(((range)->pgstart <= (start)) && ((range)->pgend >= (end)))
95 
96 #define page_in_range(range, page) \
97 	(((range)->pgstart <= (page)) && ((range)->pgend >= (page)))
98 
99 #define page_range_in_range(range, start, end) \
100 	(page_in_range(range, start) || page_in_range(range, end) || \
101 		page_range_subsumes_range(range, start, end))
102 
103 #define range_before_page(range, page) \
104 	((range)->pgend < (page))
105 
106 #define PROT_MASK		(PROT_EXEC | PROT_READ | PROT_WRITE)
107 
lru_add(struct ashmem_range * range)108 static inline void lru_add(struct ashmem_range *range)
109 {
110 	list_add_tail(&range->lru, &ashmem_lru_list);
111 	lru_count += range_size(range);
112 }
113 
lru_del(struct ashmem_range * range)114 static inline void lru_del(struct ashmem_range *range)
115 {
116 	list_del(&range->lru);
117 	lru_count -= range_size(range);
118 }
119 
120 /*
121  * range_alloc - allocate and initialize a new ashmem_range structure
122  *
123  * 'asma' - associated ashmem_area
124  * 'prev_range' - the previous ashmem_range in the sorted asma->unpinned list
125  * 'purged' - initial purge value (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
126  * 'start' - starting page, inclusive
127  * 'end' - ending page, inclusive
128  *
129  * Caller must hold ashmem_mutex.
130  */
range_alloc(struct ashmem_area * asma,struct ashmem_range * prev_range,unsigned int purged,size_t start,size_t end)131 static int range_alloc(struct ashmem_area *asma,
132 		       struct ashmem_range *prev_range, unsigned int purged,
133 		       size_t start, size_t end)
134 {
135 	struct ashmem_range *range;
136 
137 	range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
138 	if (unlikely(!range))
139 		return -ENOMEM;
140 
141 	range->asma = asma;
142 	range->pgstart = start;
143 	range->pgend = end;
144 	range->purged = purged;
145 
146 	list_add_tail(&range->unpinned, &prev_range->unpinned);
147 
148 	if (range_on_lru(range))
149 		lru_add(range);
150 
151 	return 0;
152 }
153 
range_del(struct ashmem_range * range)154 static void range_del(struct ashmem_range *range)
155 {
156 	list_del(&range->unpinned);
157 	if (range_on_lru(range))
158 		lru_del(range);
159 	kmem_cache_free(ashmem_range_cachep, range);
160 }
161 
162 /*
163  * range_shrink - shrinks a range
164  *
165  * Caller must hold ashmem_mutex.
166  */
range_shrink(struct ashmem_range * range,size_t start,size_t end)167 static inline void range_shrink(struct ashmem_range *range,
168 				size_t start, size_t end)
169 {
170 	size_t pre = range_size(range);
171 
172 	range->pgstart = start;
173 	range->pgend = end;
174 
175 	if (range_on_lru(range))
176 		lru_count -= pre - range_size(range);
177 }
178 
ashmem_open(struct inode * inode,struct file * file)179 static int ashmem_open(struct inode *inode, struct file *file)
180 {
181 	struct ashmem_area *asma;
182 	int ret;
183 
184 	ret = generic_file_open(inode, file);
185 	if (unlikely(ret))
186 		return ret;
187 
188 	asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
189 	if (unlikely(!asma))
190 		return -ENOMEM;
191 
192 	INIT_LIST_HEAD(&asma->unpinned_list);
193 	memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
194 	asma->prot_mask = PROT_MASK;
195 	file->private_data = asma;
196 
197 	return 0;
198 }
199 
ashmem_release(struct inode * ignored,struct file * file)200 static int ashmem_release(struct inode *ignored, struct file *file)
201 {
202 	struct ashmem_area *asma = file->private_data;
203 	struct ashmem_range *range, *next;
204 
205 	mutex_lock(&ashmem_mutex);
206 	list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
207 		range_del(range);
208 	mutex_unlock(&ashmem_mutex);
209 
210 	if (asma->file)
211 		fput(asma->file);
212 	kmem_cache_free(ashmem_area_cachep, asma);
213 
214 	return 0;
215 }
216 
ashmem_read(struct file * file,char __user * buf,size_t len,loff_t * pos)217 static ssize_t ashmem_read(struct file *file, char __user *buf,
218 			   size_t len, loff_t *pos)
219 {
220 	struct ashmem_area *asma = file->private_data;
221 	int ret = 0;
222 
223 	mutex_lock(&ashmem_mutex);
224 
225 	/* If size is not set, or set to 0, always return EOF. */
226 	if (asma->size == 0)
227 		goto out_unlock;
228 
229 	if (!asma->file) {
230 		ret = -EBADF;
231 		goto out_unlock;
232 	}
233 
234 	mutex_unlock(&ashmem_mutex);
235 
236 	/*
237 	 * asma and asma->file are used outside the lock here.  We assume
238 	 * once asma->file is set it will never be changed, and will not
239 	 * be destroyed until all references to the file are dropped and
240 	 * ashmem_release is called.
241 	 */
242 	ret = asma->file->f_op->read(asma->file, buf, len, pos);
243 	if (ret >= 0) {
244 		/** Update backing file pos, since f_ops->read() doesn't */
245 		asma->file->f_pos = *pos;
246 	}
247 	return ret;
248 
249 out_unlock:
250 	mutex_unlock(&ashmem_mutex);
251 	return ret;
252 }
253 
ashmem_llseek(struct file * file,loff_t offset,int origin)254 static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
255 {
256 	struct ashmem_area *asma = file->private_data;
257 	int ret;
258 
259 	mutex_lock(&ashmem_mutex);
260 
261 	if (asma->size == 0) {
262 		ret = -EINVAL;
263 		goto out;
264 	}
265 
266 	if (!asma->file) {
267 		ret = -EBADF;
268 		goto out;
269 	}
270 
271 	ret = asma->file->f_op->llseek(asma->file, offset, origin);
272 	if (ret < 0)
273 		goto out;
274 
275 	/** Copy f_pos from backing file, since f_ops->llseek() sets it */
276 	file->f_pos = asma->file->f_pos;
277 
278 out:
279 	mutex_unlock(&ashmem_mutex);
280 	return ret;
281 }
282 
calc_vm_may_flags(unsigned long prot)283 static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
284 {
285 	return _calc_vm_trans(prot, PROT_READ,  VM_MAYREAD) |
286 	       _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
287 	       _calc_vm_trans(prot, PROT_EXEC,  VM_MAYEXEC);
288 }
289 
ashmem_mmap(struct file * file,struct vm_area_struct * vma)290 static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
291 {
292 	struct ashmem_area *asma = file->private_data;
293 	int ret = 0;
294 
295 	mutex_lock(&ashmem_mutex);
296 
297 	/* user needs to SET_SIZE before mapping */
298 	if (unlikely(!asma->size)) {
299 		ret = -EINVAL;
300 		goto out;
301 	}
302 
303 	/* requested protection bits must match our allowed protection mask */
304 	if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) &
305 		     calc_vm_prot_bits(PROT_MASK))) {
306 		ret = -EPERM;
307 		goto out;
308 	}
309 	vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask);
310 
311 	if (!asma->file) {
312 		char *name = ASHMEM_NAME_DEF;
313 		struct file *vmfile;
314 
315 		if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
316 			name = asma->name;
317 
318 		/* ... and allocate the backing shmem file */
319 		vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
320 		if (unlikely(IS_ERR(vmfile))) {
321 			ret = PTR_ERR(vmfile);
322 			goto out;
323 		}
324 		asma->file = vmfile;
325 	}
326 	get_file(asma->file);
327 
328 	if (vma->vm_flags & VM_SHARED)
329 		shmem_set_file(vma, asma->file);
330 	else {
331 		if (vma->vm_file)
332 			fput(vma->vm_file);
333 		vma->vm_file = asma->file;
334 	}
335 
336 out:
337 	mutex_unlock(&ashmem_mutex);
338 	return ret;
339 }
340 
341 /*
342  * ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab
343  *
344  * 'nr_to_scan' is the number of objects (pages) to prune, or 0 to query how
345  * many objects (pages) we have in total.
346  *
347  * 'gfp_mask' is the mask of the allocation that got us into this mess.
348  *
349  * Return value is the number of objects (pages) remaining, or -1 if we cannot
350  * proceed without risk of deadlock (due to gfp_mask).
351  *
352  * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
353  * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
354  * pages freed.
355  */
ashmem_shrink(struct shrinker * s,struct shrink_control * sc)356 static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc)
357 {
358 	struct ashmem_range *range, *next;
359 
360 	/* We might recurse into filesystem code, so bail out if necessary */
361 	if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS))
362 		return -1;
363 	if (!sc->nr_to_scan)
364 		return lru_count;
365 
366 	if (!mutex_trylock(&ashmem_mutex))
367 		return -1;
368 
369 	list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
370 		loff_t start = range->pgstart * PAGE_SIZE;
371 		loff_t end = (range->pgend + 1) * PAGE_SIZE;
372 
373 		range->asma->file->f_op->fallocate(range->asma->file,
374 				FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
375 				start, end - start);
376 		range->purged = ASHMEM_WAS_PURGED;
377 		lru_del(range);
378 
379 		sc->nr_to_scan -= range_size(range);
380 		if (sc->nr_to_scan <= 0)
381 			break;
382 	}
383 	mutex_unlock(&ashmem_mutex);
384 
385 	return lru_count;
386 }
387 
388 static struct shrinker ashmem_shrinker = {
389 	.shrink = ashmem_shrink,
390 	.seeks = DEFAULT_SEEKS * 4,
391 };
392 
set_prot_mask(struct ashmem_area * asma,unsigned long prot)393 static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
394 {
395 	int ret = 0;
396 
397 	mutex_lock(&ashmem_mutex);
398 
399 	/* the user can only remove, not add, protection bits */
400 	if (unlikely((asma->prot_mask & prot) != prot)) {
401 		ret = -EINVAL;
402 		goto out;
403 	}
404 
405 	/* does the application expect PROT_READ to imply PROT_EXEC? */
406 	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
407 		prot |= PROT_EXEC;
408 
409 	asma->prot_mask = prot;
410 
411 out:
412 	mutex_unlock(&ashmem_mutex);
413 	return ret;
414 }
415 
set_name(struct ashmem_area * asma,void __user * name)416 static int set_name(struct ashmem_area *asma, void __user *name)
417 {
418 	int len;
419 	int ret = 0;
420 	char local_name[ASHMEM_NAME_LEN];
421 
422 	/*
423 	 * Holding the ashmem_mutex while doing a copy_from_user might cause
424 	 * an data abort which would try to access mmap_sem. If another
425 	 * thread has invoked ashmem_mmap then it will be holding the
426 	 * semaphore and will be waiting for ashmem_mutex, there by leading to
427 	 * deadlock. We'll release the mutex  and take the name to a local
428 	 * variable that does not need protection and later copy the local
429 	 * variable to the structure member with lock held.
430 	 */
431 	len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN);
432 	if (len < 0)
433 		return len;
434 	if (len == ASHMEM_NAME_LEN)
435 		local_name[ASHMEM_NAME_LEN - 1] = '\0';
436 	mutex_lock(&ashmem_mutex);
437 	/* cannot change an existing mapping's name */
438 	if (unlikely(asma->file))
439 		ret = -EINVAL;
440 	else
441 		strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name);
442 
443 	mutex_unlock(&ashmem_mutex);
444 	return ret;
445 }
446 
get_name(struct ashmem_area * asma,void __user * name)447 static int get_name(struct ashmem_area *asma, void __user *name)
448 {
449 	int ret = 0;
450 	size_t len;
451 	/*
452 	 * Have a local variable to which we'll copy the content
453 	 * from asma with the lock held. Later we can copy this to the user
454 	 * space safely without holding any locks. So even if we proceed to
455 	 * wait for mmap_sem, it won't lead to deadlock.
456 	 */
457 	char local_name[ASHMEM_NAME_LEN];
458 
459 	mutex_lock(&ashmem_mutex);
460 	if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
461 
462 		/*
463 		 * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
464 		 * prevents us from revealing one user's stack to another.
465 		 */
466 		len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
467 		memcpy(local_name, asma->name + ASHMEM_NAME_PREFIX_LEN, len);
468 	} else {
469 		len = sizeof(ASHMEM_NAME_DEF);
470 		memcpy(local_name, ASHMEM_NAME_DEF, len);
471 	}
472 	mutex_unlock(&ashmem_mutex);
473 
474 	/*
475 	 * Now we are just copying from the stack variable to userland
476 	 * No lock held
477 	 */
478 	if (unlikely(copy_to_user(name, local_name, len)))
479 		ret = -EFAULT;
480 	return ret;
481 }
482 
483 /*
484  * ashmem_pin - pin the given ashmem region, returning whether it was
485  * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
486  *
487  * Caller must hold ashmem_mutex.
488  */
ashmem_pin(struct ashmem_area * asma,size_t pgstart,size_t pgend)489 static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
490 {
491 	struct ashmem_range *range, *next;
492 	int ret = ASHMEM_NOT_PURGED;
493 
494 	list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
495 		/* moved past last applicable page; we can short circuit */
496 		if (range_before_page(range, pgstart))
497 			break;
498 
499 		/*
500 		 * The user can ask us to pin pages that span multiple ranges,
501 		 * or to pin pages that aren't even unpinned, so this is messy.
502 		 *
503 		 * Four cases:
504 		 * 1. The requested range subsumes an existing range, so we
505 		 *    just remove the entire matching range.
506 		 * 2. The requested range overlaps the start of an existing
507 		 *    range, so we just update that range.
508 		 * 3. The requested range overlaps the end of an existing
509 		 *    range, so we just update that range.
510 		 * 4. The requested range punches a hole in an existing range,
511 		 *    so we have to update one side of the range and then
512 		 *    create a new range for the other side.
513 		 */
514 		if (page_range_in_range(range, pgstart, pgend)) {
515 			ret |= range->purged;
516 
517 			/* Case #1: Easy. Just nuke the whole thing. */
518 			if (page_range_subsumes_range(range, pgstart, pgend)) {
519 				range_del(range);
520 				continue;
521 			}
522 
523 			/* Case #2: We overlap from the start, so adjust it */
524 			if (range->pgstart >= pgstart) {
525 				range_shrink(range, pgend + 1, range->pgend);
526 				continue;
527 			}
528 
529 			/* Case #3: We overlap from the rear, so adjust it */
530 			if (range->pgend <= pgend) {
531 				range_shrink(range, range->pgstart, pgstart-1);
532 				continue;
533 			}
534 
535 			/*
536 			 * Case #4: We eat a chunk out of the middle. A bit
537 			 * more complicated, we allocate a new range for the
538 			 * second half and adjust the first chunk's endpoint.
539 			 */
540 			range_alloc(asma, range, range->purged,
541 				    pgend + 1, range->pgend);
542 			range_shrink(range, range->pgstart, pgstart - 1);
543 			break;
544 		}
545 	}
546 
547 	return ret;
548 }
549 
550 /*
551  * ashmem_unpin - unpin the given range of pages. Returns zero on success.
552  *
553  * Caller must hold ashmem_mutex.
554  */
ashmem_unpin(struct ashmem_area * asma,size_t pgstart,size_t pgend)555 static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
556 {
557 	struct ashmem_range *range, *next;
558 	unsigned int purged = ASHMEM_NOT_PURGED;
559 
560 restart:
561 	list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
562 		/* short circuit: this is our insertion point */
563 		if (range_before_page(range, pgstart))
564 			break;
565 
566 		/*
567 		 * The user can ask us to unpin pages that are already entirely
568 		 * or partially pinned. We handle those two cases here.
569 		 */
570 		if (page_range_subsumed_by_range(range, pgstart, pgend))
571 			return 0;
572 		if (page_range_in_range(range, pgstart, pgend)) {
573 			pgstart = min_t(size_t, range->pgstart, pgstart),
574 			pgend = max_t(size_t, range->pgend, pgend);
575 			purged |= range->purged;
576 			range_del(range);
577 			goto restart;
578 		}
579 	}
580 
581 	return range_alloc(asma, range, purged, pgstart, pgend);
582 }
583 
584 /*
585  * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
586  * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
587  *
588  * Caller must hold ashmem_mutex.
589  */
ashmem_get_pin_status(struct ashmem_area * asma,size_t pgstart,size_t pgend)590 static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
591 				 size_t pgend)
592 {
593 	struct ashmem_range *range;
594 	int ret = ASHMEM_IS_PINNED;
595 
596 	list_for_each_entry(range, &asma->unpinned_list, unpinned) {
597 		if (range_before_page(range, pgstart))
598 			break;
599 		if (page_range_in_range(range, pgstart, pgend)) {
600 			ret = ASHMEM_IS_UNPINNED;
601 			break;
602 		}
603 	}
604 
605 	return ret;
606 }
607 
ashmem_pin_unpin(struct ashmem_area * asma,unsigned long cmd,void __user * p)608 static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
609 			    void __user *p)
610 {
611 	struct ashmem_pin pin;
612 	size_t pgstart, pgend;
613 	int ret = -EINVAL;
614 
615 	if (unlikely(!asma->file))
616 		return -EINVAL;
617 
618 	if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
619 		return -EFAULT;
620 
621 	/* per custom, you can pass zero for len to mean "everything onward" */
622 	if (!pin.len)
623 		pin.len = PAGE_ALIGN(asma->size) - pin.offset;
624 
625 	if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
626 		return -EINVAL;
627 
628 	if (unlikely(((__u32) -1) - pin.offset < pin.len))
629 		return -EINVAL;
630 
631 	if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
632 		return -EINVAL;
633 
634 	pgstart = pin.offset / PAGE_SIZE;
635 	pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
636 
637 	mutex_lock(&ashmem_mutex);
638 
639 	switch (cmd) {
640 	case ASHMEM_PIN:
641 		ret = ashmem_pin(asma, pgstart, pgend);
642 		break;
643 	case ASHMEM_UNPIN:
644 		ret = ashmem_unpin(asma, pgstart, pgend);
645 		break;
646 	case ASHMEM_GET_PIN_STATUS:
647 		ret = ashmem_get_pin_status(asma, pgstart, pgend);
648 		break;
649 	}
650 
651 	mutex_unlock(&ashmem_mutex);
652 
653 	return ret;
654 }
655 
ashmem_ioctl(struct file * file,unsigned int cmd,unsigned long arg)656 static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
657 {
658 	struct ashmem_area *asma = file->private_data;
659 	long ret = -ENOTTY;
660 
661 	switch (cmd) {
662 	case ASHMEM_SET_NAME:
663 		ret = set_name(asma, (void __user *) arg);
664 		break;
665 	case ASHMEM_GET_NAME:
666 		ret = get_name(asma, (void __user *) arg);
667 		break;
668 	case ASHMEM_SET_SIZE:
669 		ret = -EINVAL;
670 		if (!asma->file) {
671 			ret = 0;
672 			asma->size = (size_t) arg;
673 		}
674 		break;
675 	case ASHMEM_GET_SIZE:
676 		ret = asma->size;
677 		break;
678 	case ASHMEM_SET_PROT_MASK:
679 		ret = set_prot_mask(asma, arg);
680 		break;
681 	case ASHMEM_GET_PROT_MASK:
682 		ret = asma->prot_mask;
683 		break;
684 	case ASHMEM_PIN:
685 	case ASHMEM_UNPIN:
686 	case ASHMEM_GET_PIN_STATUS:
687 		ret = ashmem_pin_unpin(asma, cmd, (void __user *) arg);
688 		break;
689 	case ASHMEM_PURGE_ALL_CACHES:
690 		ret = -EPERM;
691 		if (capable(CAP_SYS_ADMIN)) {
692 			struct shrink_control sc = {
693 				.gfp_mask = GFP_KERNEL,
694 				.nr_to_scan = 0,
695 			};
696 			ret = ashmem_shrink(&ashmem_shrinker, &sc);
697 			sc.nr_to_scan = ret;
698 			ashmem_shrink(&ashmem_shrinker, &sc);
699 		}
700 		break;
701 	}
702 
703 	return ret;
704 }
705 
706 /* support of 32bit userspace on 64bit platforms */
707 #ifdef CONFIG_COMPAT
compat_ashmem_ioctl(struct file * file,unsigned int cmd,unsigned long arg)708 static long compat_ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
709 {
710 
711 	switch (cmd) {
712 	case COMPAT_ASHMEM_SET_SIZE:
713 		cmd = ASHMEM_SET_SIZE;
714 		break;
715 	case COMPAT_ASHMEM_SET_PROT_MASK:
716 		cmd = ASHMEM_SET_PROT_MASK;
717 		break;
718 	}
719 	return ashmem_ioctl(file, cmd, arg);
720 }
721 #endif
722 
723 static const struct file_operations ashmem_fops = {
724 	.owner = THIS_MODULE,
725 	.open = ashmem_open,
726 	.release = ashmem_release,
727 	.read = ashmem_read,
728 	.llseek = ashmem_llseek,
729 	.mmap = ashmem_mmap,
730 	.unlocked_ioctl = ashmem_ioctl,
731 #ifdef CONFIG_COMPAT
732 	.compat_ioctl = compat_ashmem_ioctl,
733 #endif
734 };
735 
736 static struct miscdevice ashmem_misc = {
737 	.minor = MISC_DYNAMIC_MINOR,
738 	.name = "ashmem",
739 	.fops = &ashmem_fops,
740 };
741 
ashmem_init(void)742 static int __init ashmem_init(void)
743 {
744 	int ret;
745 
746 	ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
747 					  sizeof(struct ashmem_area),
748 					  0, 0, NULL);
749 	if (unlikely(!ashmem_area_cachep)) {
750 		pr_err("failed to create slab cache\n");
751 		return -ENOMEM;
752 	}
753 
754 	ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
755 					  sizeof(struct ashmem_range),
756 					  0, 0, NULL);
757 	if (unlikely(!ashmem_range_cachep)) {
758 		pr_err("failed to create slab cache\n");
759 		return -ENOMEM;
760 	}
761 
762 	ret = misc_register(&ashmem_misc);
763 	if (unlikely(ret)) {
764 		pr_err("failed to register misc device!\n");
765 		return ret;
766 	}
767 
768 	register_shrinker(&ashmem_shrinker);
769 
770 	pr_info("initialized\n");
771 
772 	return 0;
773 }
774 
ashmem_exit(void)775 static void __exit ashmem_exit(void)
776 {
777 	int ret;
778 
779 	unregister_shrinker(&ashmem_shrinker);
780 
781 	ret = misc_deregister(&ashmem_misc);
782 	if (unlikely(ret))
783 		pr_err("failed to unregister misc device!\n");
784 
785 	kmem_cache_destroy(ashmem_range_cachep);
786 	kmem_cache_destroy(ashmem_area_cachep);
787 
788 	pr_info("unloaded\n");
789 }
790 
791 module_init(ashmem_init);
792 module_exit(ashmem_exit);
793 
794 MODULE_LICENSE("GPL");
795