• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* mm/ashmem.c
2 **
3 ** Anonymous Shared Memory Subsystem, ashmem
4 **
5 ** Copyright (C) 2008 Google, Inc.
6 **
7 ** Robert Love <rlove@google.com>
8 **
9 ** This software is licensed under the terms of the GNU General Public
10 ** License version 2, as published by the Free Software Foundation, and
11 ** may be copied, distributed, and modified under those terms.
12 **
13 ** This program is distributed in the hope that it will be useful,
14 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 ** GNU General Public License for more details.
17 */
18 
19 #include <linux/module.h>
20 #include <linux/file.h>
21 #include <linux/fs.h>
22 #include <linux/miscdevice.h>
23 #include <linux/security.h>
24 #include <linux/mm.h>
25 #include <linux/mman.h>
26 #include <linux/uaccess.h>
27 #include <linux/personality.h>
28 #include <linux/bitops.h>
29 #include <linux/mutex.h>
30 #include <linux/shmem_fs.h>
31 #include <linux/ashmem.h>
32 
33 #define ASHMEM_NAME_PREFIX "dev/ashmem/"
34 #define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
35 #define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
36 
37 /*
38  * ashmem_area - anonymous shared memory area
39  * Lifecycle: From our parent file's open() until its release()
40  * Locking: Protected by `ashmem_mutex'
41  * Big Note: Mappings do NOT pin this structure; it dies on close()
42  */
43 struct ashmem_area {
44 	char name[ASHMEM_FULL_NAME_LEN];/* optional name for /proc/pid/maps */
45 	struct list_head unpinned_list;	/* list of all ashmem areas */
46 	struct file *file;		/* the shmem-based backing file */
47 	size_t size;			/* size of the mapping, in bytes */
48 	unsigned long prot_mask;	/* allowed prot bits, as vm_flags */
49 };
50 
51 /*
52  * ashmem_range - represents an interval of unpinned (evictable) pages
53  * Lifecycle: From unpin to pin
54  * Locking: Protected by `ashmem_mutex'
55  */
56 struct ashmem_range {
57 	struct list_head lru;		/* entry in LRU list */
58 	struct list_head unpinned;	/* entry in its area's unpinned list */
59 	struct ashmem_area *asma;	/* associated area */
60 	size_t pgstart;			/* starting page, inclusive */
61 	size_t pgend;			/* ending page, inclusive */
62 	unsigned int purged;		/* ASHMEM_NOT or ASHMEM_WAS_PURGED */
63 };
64 
65 /* LRU list of unpinned pages, protected by ashmem_mutex */
66 static LIST_HEAD(ashmem_lru_list);
67 
68 /* Count of pages on our LRU list, protected by ashmem_mutex */
69 static unsigned long lru_count;
70 
71 /*
72  * ashmem_mutex - protects the list of and each individual ashmem_area
73  *
74  * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
75  */
76 static DEFINE_MUTEX(ashmem_mutex);
77 
78 static struct kmem_cache *ashmem_area_cachep __read_mostly;
79 static struct kmem_cache *ashmem_range_cachep __read_mostly;
80 
81 #define range_size(range) \
82   ((range)->pgend - (range)->pgstart + 1)
83 
84 #define range_on_lru(range) \
85   ((range)->purged == ASHMEM_NOT_PURGED)
86 
87 #define page_range_subsumes_range(range, start, end) \
88   (((range)->pgstart >= (start)) && ((range)->pgend <= (end)))
89 
90 #define page_range_subsumed_by_range(range, start, end) \
91   (((range)->pgstart <= (start)) && ((range)->pgend >= (end)))
92 
93 #define page_in_range(range, page) \
94  (((range)->pgstart <= (page)) && ((range)->pgend >= (page)))
95 
96 #define page_range_in_range(range, start, end) \
97   (page_in_range(range, start) || page_in_range(range, end) || \
98    page_range_subsumes_range(range, start, end))
99 
100 #define range_before_page(range, page) \
101   ((range)->pgend < (page))
102 
103 #define PROT_MASK		(PROT_EXEC | PROT_READ | PROT_WRITE)
104 
lru_add(struct ashmem_range * range)105 static inline void lru_add(struct ashmem_range *range)
106 {
107 	list_add_tail(&range->lru, &ashmem_lru_list);
108 	lru_count += range_size(range);
109 }
110 
lru_del(struct ashmem_range * range)111 static inline void lru_del(struct ashmem_range *range)
112 {
113 	list_del(&range->lru);
114 	lru_count -= range_size(range);
115 }
116 
117 /*
118  * range_alloc - allocate and initialize a new ashmem_range structure
119  *
120  * 'asma' - associated ashmem_area
121  * 'prev_range' - the previous ashmem_range in the sorted asma->unpinned list
122  * 'purged' - initial purge value (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
123  * 'start' - starting page, inclusive
124  * 'end' - ending page, inclusive
125  *
126  * Caller must hold ashmem_mutex.
127  */
range_alloc(struct ashmem_area * asma,struct ashmem_range * prev_range,unsigned int purged,size_t start,size_t end)128 static int range_alloc(struct ashmem_area *asma,
129 		       struct ashmem_range *prev_range, unsigned int purged,
130 		       size_t start, size_t end)
131 {
132 	struct ashmem_range *range;
133 
134 	range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
135 	if (unlikely(!range))
136 		return -ENOMEM;
137 
138 	range->asma = asma;
139 	range->pgstart = start;
140 	range->pgend = end;
141 	range->purged = purged;
142 
143 	list_add_tail(&range->unpinned, &prev_range->unpinned);
144 
145 	if (range_on_lru(range))
146 		lru_add(range);
147 
148 	return 0;
149 }
150 
range_del(struct ashmem_range * range)151 static void range_del(struct ashmem_range *range)
152 {
153 	list_del(&range->unpinned);
154 	if (range_on_lru(range))
155 		lru_del(range);
156 	kmem_cache_free(ashmem_range_cachep, range);
157 }
158 
159 /*
160  * range_shrink - shrinks a range
161  *
162  * Caller must hold ashmem_mutex.
163  */
range_shrink(struct ashmem_range * range,size_t start,size_t end)164 static inline void range_shrink(struct ashmem_range *range,
165 				size_t start, size_t end)
166 {
167 	size_t pre = range_size(range);
168 
169 	range->pgstart = start;
170 	range->pgend = end;
171 
172 	if (range_on_lru(range))
173 		lru_count -= pre - range_size(range);
174 }
175 
ashmem_open(struct inode * inode,struct file * file)176 static int ashmem_open(struct inode *inode, struct file *file)
177 {
178 	struct ashmem_area *asma;
179 	int ret;
180 
181 	ret = nonseekable_open(inode, file);
182 	if (unlikely(ret))
183 		return ret;
184 
185 	asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
186 	if (unlikely(!asma))
187 		return -ENOMEM;
188 
189 	INIT_LIST_HEAD(&asma->unpinned_list);
190 	memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
191 	asma->prot_mask = PROT_MASK;
192 	file->private_data = asma;
193 
194 	return 0;
195 }
196 
ashmem_release(struct inode * ignored,struct file * file)197 static int ashmem_release(struct inode *ignored, struct file *file)
198 {
199 	struct ashmem_area *asma = file->private_data;
200 	struct ashmem_range *range, *next;
201 
202 	mutex_lock(&ashmem_mutex);
203 	list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
204 		range_del(range);
205 	mutex_unlock(&ashmem_mutex);
206 
207 	if (asma->file)
208 		fput(asma->file);
209 	kmem_cache_free(ashmem_area_cachep, asma);
210 
211 	return 0;
212 }
213 
ashmem_mmap(struct file * file,struct vm_area_struct * vma)214 static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
215 {
216 	struct ashmem_area *asma = file->private_data;
217 	int ret = 0;
218 
219 	mutex_lock(&ashmem_mutex);
220 
221 	/* user needs to SET_SIZE before mapping */
222 	if (unlikely(!asma->size)) {
223 		ret = -EINVAL;
224 		goto out;
225 	}
226 
227 	/* requested protection bits must match our allowed protection mask */
228 	if (unlikely((vma->vm_flags & ~asma->prot_mask) & PROT_MASK)) {
229 		ret = -EPERM;
230 		goto out;
231 	}
232 
233 	if (!asma->file) {
234 		char *name = ASHMEM_NAME_DEF;
235 		struct file *vmfile;
236 
237 		if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
238 			name = asma->name;
239 
240 		/* ... and allocate the backing shmem file */
241 		vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
242 		if (unlikely(IS_ERR(vmfile))) {
243 			ret = PTR_ERR(vmfile);
244 			goto out;
245 		}
246 		asma->file = vmfile;
247 	}
248 	get_file(asma->file);
249 
250 	if (vma->vm_flags & VM_SHARED)
251 		shmem_set_file(vma, asma->file);
252 	else {
253 		if (vma->vm_file)
254 			fput(vma->vm_file);
255 		vma->vm_file = asma->file;
256 	}
257 	vma->vm_flags |= VM_CAN_NONLINEAR;
258 
259 out:
260 	mutex_unlock(&ashmem_mutex);
261 	return ret;
262 }
263 
264 /*
265  * ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab
266  *
267  * 'nr_to_scan' is the number of objects (pages) to prune, or 0 to query how
268  * many objects (pages) we have in total.
269  *
270  * 'gfp_mask' is the mask of the allocation that got us into this mess.
271  *
272  * Return value is the number of objects (pages) remaining, or -1 if we cannot
273  * proceed without risk of deadlock (due to gfp_mask).
274  *
275  * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
276  * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
277  * pages freed.
278  */
ashmem_shrink(int nr_to_scan,gfp_t gfp_mask)279 static int ashmem_shrink(int nr_to_scan, gfp_t gfp_mask)
280 {
281 	struct ashmem_range *range, *next;
282 
283 	/* We might recurse into filesystem code, so bail out if necessary */
284 	if (nr_to_scan && !(gfp_mask & __GFP_FS))
285 		return -1;
286 	if (!nr_to_scan)
287 		return lru_count;
288 
289 	mutex_lock(&ashmem_mutex);
290 	list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
291 		struct inode *inode = range->asma->file->f_dentry->d_inode;
292 		loff_t start = range->pgstart * PAGE_SIZE;
293 		loff_t end = (range->pgend + 1) * PAGE_SIZE - 1;
294 
295 		vmtruncate_range(inode, start, end);
296 		range->purged = ASHMEM_WAS_PURGED;
297 		lru_del(range);
298 
299 		nr_to_scan -= range_size(range);
300 		if (nr_to_scan <= 0)
301 			break;
302 	}
303 	mutex_unlock(&ashmem_mutex);
304 
305 	return lru_count;
306 }
307 
308 static struct shrinker ashmem_shrinker = {
309 	.shrink = ashmem_shrink,
310 	.seeks = DEFAULT_SEEKS * 4,
311 };
312 
set_prot_mask(struct ashmem_area * asma,unsigned long prot)313 static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
314 {
315 	int ret = 0;
316 
317 	mutex_lock(&ashmem_mutex);
318 
319 	/* the user can only remove, not add, protection bits */
320 	if (unlikely((asma->prot_mask & prot) != prot)) {
321 		ret = -EINVAL;
322 		goto out;
323 	}
324 
325 	/* does the application expect PROT_READ to imply PROT_EXEC? */
326 	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
327 		prot |= PROT_EXEC;
328 
329 	asma->prot_mask = prot;
330 
331 out:
332 	mutex_unlock(&ashmem_mutex);
333 	return ret;
334 }
335 
set_name(struct ashmem_area * asma,void __user * name)336 static int set_name(struct ashmem_area *asma, void __user *name)
337 {
338 	int ret = 0;
339 
340 	mutex_lock(&ashmem_mutex);
341 
342 	/* cannot change an existing mapping's name */
343 	if (unlikely(asma->file)) {
344 		ret = -EINVAL;
345 		goto out;
346 	}
347 
348 	if (unlikely(copy_from_user(asma->name + ASHMEM_NAME_PREFIX_LEN,
349 				    name, ASHMEM_NAME_LEN)))
350 		ret = -EFAULT;
351 	asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0';
352 
353 out:
354 	mutex_unlock(&ashmem_mutex);
355 
356 	return ret;
357 }
358 
get_name(struct ashmem_area * asma,void __user * name)359 static int get_name(struct ashmem_area *asma, void __user *name)
360 {
361 	int ret = 0;
362 
363 	mutex_lock(&ashmem_mutex);
364 	if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
365 		size_t len;
366 
367 		/*
368 		 * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
369 		 * prevents us from revealing one user's stack to another.
370 		 */
371 		len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
372 		if (unlikely(copy_to_user(name,
373 				asma->name + ASHMEM_NAME_PREFIX_LEN, len)))
374 			ret = -EFAULT;
375 	} else {
376 		if (unlikely(copy_to_user(name, ASHMEM_NAME_DEF,
377 					  sizeof(ASHMEM_NAME_DEF))))
378 			ret = -EFAULT;
379 	}
380 	mutex_unlock(&ashmem_mutex);
381 
382 	return ret;
383 }
384 
385 /*
386  * ashmem_pin - pin the given ashmem region, returning whether it was
387  * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
388  *
389  * Caller must hold ashmem_mutex.
390  */
ashmem_pin(struct ashmem_area * asma,size_t pgstart,size_t pgend)391 static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
392 {
393 	struct ashmem_range *range, *next;
394 	int ret = ASHMEM_NOT_PURGED;
395 
396 	list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
397 		/* moved past last applicable page; we can short circuit */
398 		if (range_before_page(range, pgstart))
399 			break;
400 
401 		/*
402 		 * The user can ask us to pin pages that span multiple ranges,
403 		 * or to pin pages that aren't even unpinned, so this is messy.
404 		 *
405 		 * Four cases:
406 		 * 1. The requested range subsumes an existing range, so we
407 		 *    just remove the entire matching range.
408 		 * 2. The requested range overlaps the start of an existing
409 		 *    range, so we just update that range.
410 		 * 3. The requested range overlaps the end of an existing
411 		 *    range, so we just update that range.
412 		 * 4. The requested range punches a hole in an existing range,
413 		 *    so we have to update one side of the range and then
414 		 *    create a new range for the other side.
415 		 */
416 		if (page_range_in_range(range, pgstart, pgend)) {
417 			ret |= range->purged;
418 
419 			/* Case #1: Easy. Just nuke the whole thing. */
420 			if (page_range_subsumes_range(range, pgstart, pgend)) {
421 				range_del(range);
422 				continue;
423 			}
424 
425 			/* Case #2: We overlap from the start, so adjust it */
426 			if (range->pgstart >= pgstart) {
427 				range_shrink(range, pgend + 1, range->pgend);
428 				continue;
429 			}
430 
431 			/* Case #3: We overlap from the rear, so adjust it */
432 			if (range->pgend <= pgend) {
433 				range_shrink(range, range->pgstart, pgstart-1);
434 				continue;
435 			}
436 
437 			/*
438 			 * Case #4: We eat a chunk out of the middle. A bit
439 			 * more complicated, we allocate a new range for the
440 			 * second half and adjust the first chunk's endpoint.
441 			 */
442 			range_alloc(asma, range, range->purged,
443 				    pgend + 1, range->pgend);
444 			range_shrink(range, range->pgstart, pgstart - 1);
445 			break;
446 		}
447 	}
448 
449 	return ret;
450 }
451 
452 /*
453  * ashmem_unpin - unpin the given range of pages. Returns zero on success.
454  *
455  * Caller must hold ashmem_mutex.
456  */
ashmem_unpin(struct ashmem_area * asma,size_t pgstart,size_t pgend)457 static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
458 {
459 	struct ashmem_range *range, *next;
460 	unsigned int purged = ASHMEM_NOT_PURGED;
461 
462 restart:
463 	list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
464 		/* short circuit: this is our insertion point */
465 		if (range_before_page(range, pgstart))
466 			break;
467 
468 		/*
469 		 * The user can ask us to unpin pages that are already entirely
470 		 * or partially pinned. We handle those two cases here.
471 		 */
472 		if (page_range_subsumed_by_range(range, pgstart, pgend))
473 			return 0;
474 		if (page_range_in_range(range, pgstart, pgend)) {
475 			pgstart = min_t(size_t, range->pgstart, pgstart),
476 			pgend = max_t(size_t, range->pgend, pgend);
477 			purged |= range->purged;
478 			range_del(range);
479 			goto restart;
480 		}
481 	}
482 
483 	return range_alloc(asma, range, purged, pgstart, pgend);
484 }
485 
486 /*
487  * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
488  * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
489  *
490  * Caller must hold ashmem_mutex.
491  */
ashmem_get_pin_status(struct ashmem_area * asma,size_t pgstart,size_t pgend)492 static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
493 				 size_t pgend)
494 {
495 	struct ashmem_range *range;
496 	int ret = ASHMEM_IS_PINNED;
497 
498 	list_for_each_entry(range, &asma->unpinned_list, unpinned) {
499 		if (range_before_page(range, pgstart))
500 			break;
501 		if (page_range_in_range(range, pgstart, pgend)) {
502 			ret = ASHMEM_IS_UNPINNED;
503 			break;
504 		}
505 	}
506 
507 	return ret;
508 }
509 
ashmem_pin_unpin(struct ashmem_area * asma,unsigned long cmd,void __user * p)510 static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
511 			    void __user *p)
512 {
513 	struct ashmem_pin pin;
514 	size_t pgstart, pgend;
515 	int ret = -EINVAL;
516 
517 	if (unlikely(!asma->file))
518 		return -EINVAL;
519 
520 	if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
521 		return -EFAULT;
522 
523 	/* per custom, you can pass zero for len to mean "everything onward" */
524 	if (!pin.len)
525 		pin.len = PAGE_ALIGN(asma->size) - pin.offset;
526 
527 	if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
528 		return -EINVAL;
529 
530 	if (unlikely(((__u32) -1) - pin.offset < pin.len))
531 		return -EINVAL;
532 
533 	if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
534 		return -EINVAL;
535 
536 	pgstart = pin.offset / PAGE_SIZE;
537 	pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
538 
539 	mutex_lock(&ashmem_mutex);
540 
541 	switch (cmd) {
542 	case ASHMEM_PIN:
543 		ret = ashmem_pin(asma, pgstart, pgend);
544 		break;
545 	case ASHMEM_UNPIN:
546 		ret = ashmem_unpin(asma, pgstart, pgend);
547 		break;
548 	case ASHMEM_GET_PIN_STATUS:
549 		ret = ashmem_get_pin_status(asma, pgstart, pgend);
550 		break;
551 	}
552 
553 	mutex_unlock(&ashmem_mutex);
554 
555 	return ret;
556 }
557 
ashmem_ioctl(struct file * file,unsigned int cmd,unsigned long arg)558 static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
559 {
560 	struct ashmem_area *asma = file->private_data;
561 	long ret = -ENOTTY;
562 
563 	switch (cmd) {
564 	case ASHMEM_SET_NAME:
565 		ret = set_name(asma, (void __user *) arg);
566 		break;
567 	case ASHMEM_GET_NAME:
568 		ret = get_name(asma, (void __user *) arg);
569 		break;
570 	case ASHMEM_SET_SIZE:
571 		ret = -EINVAL;
572 		if (!asma->file) {
573 			ret = 0;
574 			asma->size = (size_t) arg;
575 		}
576 		break;
577 	case ASHMEM_GET_SIZE:
578 		ret = asma->size;
579 		break;
580 	case ASHMEM_SET_PROT_MASK:
581 		ret = set_prot_mask(asma, arg);
582 		break;
583 	case ASHMEM_GET_PROT_MASK:
584 		ret = asma->prot_mask;
585 		break;
586 	case ASHMEM_PIN:
587 	case ASHMEM_UNPIN:
588 	case ASHMEM_GET_PIN_STATUS:
589 		ret = ashmem_pin_unpin(asma, cmd, (void __user *) arg);
590 		break;
591 	case ASHMEM_PURGE_ALL_CACHES:
592 		ret = -EPERM;
593 		if (capable(CAP_SYS_ADMIN)) {
594 			ret = ashmem_shrink(0, GFP_KERNEL);
595 			ashmem_shrink(ret, GFP_KERNEL);
596 		}
597 		break;
598 	}
599 
600 	return ret;
601 }
602 
603 static struct file_operations ashmem_fops = {
604 	.owner = THIS_MODULE,
605 	.open = ashmem_open,
606 	.release = ashmem_release,
607 	.mmap = ashmem_mmap,
608 	.unlocked_ioctl = ashmem_ioctl,
609 	.compat_ioctl = ashmem_ioctl,
610 };
611 
612 static struct miscdevice ashmem_misc = {
613 	.minor = MISC_DYNAMIC_MINOR,
614 	.name = "ashmem",
615 	.fops = &ashmem_fops,
616 };
617 
ashmem_init(void)618 static int __init ashmem_init(void)
619 {
620 	int ret;
621 
622 	ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
623 					  sizeof(struct ashmem_area),
624 					  0, 0, NULL);
625 	if (unlikely(!ashmem_area_cachep)) {
626 		printk(KERN_ERR "ashmem: failed to create slab cache\n");
627 		return -ENOMEM;
628 	}
629 
630 	ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
631 					  sizeof(struct ashmem_range),
632 					  0, 0, NULL);
633 	if (unlikely(!ashmem_range_cachep)) {
634 		printk(KERN_ERR "ashmem: failed to create slab cache\n");
635 		return -ENOMEM;
636 	}
637 
638 	ret = misc_register(&ashmem_misc);
639 	if (unlikely(ret)) {
640 		printk(KERN_ERR "ashmem: failed to register misc device!\n");
641 		return ret;
642 	}
643 
644 	register_shrinker(&ashmem_shrinker);
645 
646 	printk(KERN_INFO "ashmem: initialized\n");
647 
648 	return 0;
649 }
650 
ashmem_exit(void)651 static void __exit ashmem_exit(void)
652 {
653 	int ret;
654 
655 	unregister_shrinker(&ashmem_shrinker);
656 
657 	ret = misc_deregister(&ashmem_misc);
658 	if (unlikely(ret))
659 		printk(KERN_ERR "ashmem: failed to unregister misc device!\n");
660 
661 	kmem_cache_destroy(ashmem_range_cachep);
662 	kmem_cache_destroy(ashmem_area_cachep);
663 
664 	printk(KERN_INFO "ashmem: unloaded\n");
665 }
666 
667 module_init(ashmem_init);
668 module_exit(ashmem_exit);
669 
670 MODULE_LICENSE("GPL");
671