1 // SPDX-License-Identifier: GPL-2.0
2 /* mm/ashmem.c
3 *
4 * Anonymous Shared Memory Subsystem, ashmem
5 *
6 * Copyright (C) 2008 Google, Inc.
7 *
8 * Robert Love <rlove@google.com>
9 */
10
11 #define pr_fmt(fmt) "ashmem: " fmt
12
13 #include <linux/init.h>
14 #include <linux/export.h>
15 #include <linux/file.h>
16 #include <linux/fs.h>
17 #include <linux/falloc.h>
18 #include <linux/miscdevice.h>
19 #include <linux/security.h>
20 #include <linux/mm.h>
21 #include <linux/mman.h>
22 #include <linux/uaccess.h>
23 #include <linux/personality.h>
24 #include <linux/bitops.h>
25 #include <linux/mutex.h>
26 #include <linux/shmem_fs.h>
27 #include <linux/memcheck.h>
28 #include "ashmem.h"
29
30 #define ASHMEM_NAME_PREFIX "dev/ashmem/"
31 #define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
32 #define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
33
34 #ifdef CONFIG_PURGEABLE_ASHMEM
35 #define PURGEABLE_ASHMEM_INIT_REFCOUNT 1
36 #define PURGEABLE_ASHMEM_UNPIN_REFCOUNT 0
37 #define PURGEABLE_ASHMEM_PIN_OFFSET 0
38 #define PURGEABLE_ASHMEM_PIN_LEN 0
39 #endif
40
41 /**
42 * struct ashmem_area - The anonymous shared memory area
43 * @name: The optional name in /proc/pid/maps
44 * @unpinned_list: The list of all ashmem areas
45 * @file: The shmem-based backing file
46 * @size: The size of the mapping, in bytes
47 * @prot_mask: The allowed protection bits, as vm_flags
48 *
49 * The lifecycle of this structure is from our parent file's open() until
50 * its release(). It is also protected by 'ashmem_mutex'
51 *
52 * Warning: Mappings do NOT pin this structure; It dies on close()
53 */
54 struct ashmem_area {
55 char name[ASHMEM_FULL_NAME_LEN];
56 struct list_head unpinned_list;
57 struct file *file;
58 size_t size;
59 unsigned long prot_mask;
60 #ifdef CONFIG_PURGEABLE_ASHMEM
61 bool is_purgeable;
62 bool purged;
63 unsigned int id;
64 unsigned int create_time;
65 int ref_count;
66 #endif
67 };
68
69 /**
70 * struct ashmem_range - A range of unpinned/evictable pages
71 * @lru: The entry in the LRU list
72 * @unpinned: The entry in its area's unpinned list
73 * @asma: The associated anonymous shared memory area.
74 * @pgstart: The starting page (inclusive)
75 * @pgend: The ending page (inclusive)
76 * @purged: The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED)
77 *
78 * The lifecycle of this structure is from unpin to pin.
79 * It is protected by 'ashmem_mutex'
80 */
81 struct ashmem_range {
82 struct list_head lru;
83 struct list_head unpinned;
84 struct ashmem_area *asma;
85 size_t pgstart;
86 size_t pgend;
87 unsigned int purged;
88 };
89
90 /* LRU list of unpinned pages, protected by ashmem_mutex */
91 static LIST_HEAD(ashmem_lru_list);
92
93 static atomic_t ashmem_shrink_inflight = ATOMIC_INIT(0);
94 static DECLARE_WAIT_QUEUE_HEAD(ashmem_shrink_wait);
95
96 /*
97 * long lru_count - The count of pages on our LRU list.
98 *
99 * This is protected by ashmem_mutex.
100 */
101 static unsigned long lru_count;
102
103 /*
104 * ashmem_mutex - protects the list of and each individual ashmem_area
105 *
106 * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
107 */
108 static DEFINE_MUTEX(ashmem_mutex);
109
110 static struct kmem_cache *ashmem_area_cachep __read_mostly;
111 static struct kmem_cache *ashmem_range_cachep __read_mostly;
112
113 /*
114 * A separate lockdep class for the backing shmem inodes to resolve the lockdep
115 * warning about the race between kswapd taking fs_reclaim before inode_lock
116 * and write syscall taking inode_lock and then fs_reclaim.
117 * Note that such race is impossible because ashmem does not support write
118 * syscalls operating on the backing shmem.
119 */
120 static struct lock_class_key backing_shmem_inode_class;
121
ashmem_mutex_lock(void)122 void ashmem_mutex_lock(void)
123 {
124 mutex_lock(&ashmem_mutex);
125 }
126
ashmem_mutex_unlock(void)127 void ashmem_mutex_unlock(void)
128 {
129 mutex_unlock(&ashmem_mutex);
130 }
131
range_size(struct ashmem_range * range)132 static inline unsigned long range_size(struct ashmem_range *range)
133 {
134 return range->pgend - range->pgstart + 1;
135 }
136
range_on_lru(struct ashmem_range * range)137 static inline bool range_on_lru(struct ashmem_range *range)
138 {
139 return range->purged == ASHMEM_NOT_PURGED;
140 }
141
page_range_subsumes_range(struct ashmem_range * range,size_t start,size_t end)142 static inline bool page_range_subsumes_range(struct ashmem_range *range,
143 size_t start, size_t end)
144 {
145 return (range->pgstart >= start) && (range->pgend <= end);
146 }
147
page_range_subsumed_by_range(struct ashmem_range * range,size_t start,size_t end)148 static inline bool page_range_subsumed_by_range(struct ashmem_range *range,
149 size_t start, size_t end)
150 {
151 return (range->pgstart <= start) && (range->pgend >= end);
152 }
153
page_in_range(struct ashmem_range * range,size_t page)154 static inline bool page_in_range(struct ashmem_range *range, size_t page)
155 {
156 return (range->pgstart <= page) && (range->pgend >= page);
157 }
158
page_range_in_range(struct ashmem_range * range,size_t start,size_t end)159 static inline bool page_range_in_range(struct ashmem_range *range,
160 size_t start, size_t end)
161 {
162 return page_in_range(range, start) || page_in_range(range, end) ||
163 page_range_subsumes_range(range, start, end);
164 }
165
range_before_page(struct ashmem_range * range,size_t page)166 static inline bool range_before_page(struct ashmem_range *range,
167 size_t page)
168 {
169 return range->pgend < page;
170 }
171
172 #ifdef CONFIG_PURGEABLE_ASHMEM
is_purgeable_ashmem(const struct ashmem_area * asma)173 static inline bool is_purgeable_ashmem(const struct ashmem_area *asma)
174 {
175 return (asma && asma->is_purgeable);
176 }
177 #endif
178 #define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
179
180 /**
181 * lru_add() - Adds a range of memory to the LRU list
182 * @range: The memory range being added.
183 *
184 * The range is first added to the end (tail) of the LRU list.
185 * After this, the size of the range is added to @lru_count
186 */
lru_add(struct ashmem_range * range)187 static inline void lru_add(struct ashmem_range *range)
188 {
189 list_add_tail(&range->lru, &ashmem_lru_list);
190 lru_count += range_size(range);
191 }
192
193 /**
194 * lru_del() - Removes a range of memory from the LRU list
195 * @range: The memory range being removed
196 *
197 * The range is first deleted from the LRU list.
198 * After this, the size of the range is removed from @lru_count
199 */
lru_del(struct ashmem_range * range)200 static inline void lru_del(struct ashmem_range *range)
201 {
202 list_del(&range->lru);
203 lru_count -= range_size(range);
204 }
205
206 /**
207 * range_alloc() - Allocates and initializes a new ashmem_range structure
208 * @asma: The associated ashmem_area
209 * @prev_range: The previous ashmem_range in the sorted asma->unpinned list
210 * @purged: Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
211 * @start: The starting page (inclusive)
212 * @end: The ending page (inclusive)
213 *
214 * This function is protected by ashmem_mutex.
215 */
range_alloc(struct ashmem_area * asma,struct ashmem_range * prev_range,unsigned int purged,size_t start,size_t end,struct ashmem_range ** new_range)216 static void range_alloc(struct ashmem_area *asma,
217 struct ashmem_range *prev_range, unsigned int purged,
218 size_t start, size_t end,
219 struct ashmem_range **new_range)
220 {
221 struct ashmem_range *range = *new_range;
222
223 *new_range = NULL;
224 range->asma = asma;
225 range->pgstart = start;
226 range->pgend = end;
227 range->purged = purged;
228
229 list_add_tail(&range->unpinned, &prev_range->unpinned);
230
231 if (range_on_lru(range))
232 lru_add(range);
233 }
234
235 /**
236 * range_del() - Deletes and deallocates an ashmem_range structure
237 * @range: The associated ashmem_range that has previously been allocated
238 */
range_del(struct ashmem_range * range)239 static void range_del(struct ashmem_range *range)
240 {
241 list_del(&range->unpinned);
242 if (range_on_lru(range))
243 lru_del(range);
244 kmem_cache_free(ashmem_range_cachep, range);
245 }
246
247 /**
248 * range_shrink() - Shrinks an ashmem_range
249 * @range: The associated ashmem_range being shrunk
250 * @start: The starting byte of the new range
251 * @end: The ending byte of the new range
252 *
253 * This does not modify the data inside the existing range in any way - It
254 * simply shrinks the boundaries of the range.
255 *
256 * Theoretically, with a little tweaking, this could eventually be changed
257 * to range_resize, and expand the lru_count if the new range is larger.
258 */
range_shrink(struct ashmem_range * range,size_t start,size_t end)259 static inline void range_shrink(struct ashmem_range *range,
260 size_t start, size_t end)
261 {
262 size_t pre = range_size(range);
263
264 range->pgstart = start;
265 range->pgend = end;
266
267 if (range_on_lru(range))
268 lru_count -= pre - range_size(range);
269 }
270
271 /**
272 * ashmem_open() - Opens an Anonymous Shared Memory structure
273 * @inode: The backing file's index node(?)
274 * @file: The backing file
275 *
276 * Please note that the ashmem_area is not returned by this function - It is
277 * instead written to "file->private_data".
278 *
279 * Return: 0 if successful, or another code if unsuccessful.
280 */
ashmem_open(struct inode * inode,struct file * file)281 static int ashmem_open(struct inode *inode, struct file *file)
282 {
283 struct ashmem_area *asma;
284 int ret;
285
286 ret = generic_file_open(inode, file);
287 if (ret)
288 return ret;
289
290 asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
291 if (!asma)
292 return -ENOMEM;
293
294 INIT_LIST_HEAD(&asma->unpinned_list);
295 memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
296 asma->prot_mask = PROT_MASK;
297 file->private_data = asma;
298 #ifdef CONFIG_PURGEABLE_ASHMEM
299 asma->ref_count = PURGEABLE_ASHMEM_INIT_REFCOUNT;
300 asma->is_purgeable = false;
301 asma->purged = false;
302 asma->id = current->pid;
303 asma->create_time = ktime_get();
304 #endif
305 return 0;
306 }
307
308 /**
309 * ashmem_release() - Releases an Anonymous Shared Memory structure
310 * @ignored: The backing file's Index Node(?) - It is ignored here.
311 * @file: The backing file
312 *
313 * Return: 0 if successful. If it is anything else, go have a coffee and
314 * try again.
315 */
ashmem_release(struct inode * ignored,struct file * file)316 static int ashmem_release(struct inode *ignored, struct file *file)
317 {
318 struct ashmem_area *asma = file->private_data;
319 struct ashmem_range *range, *next;
320
321 mutex_lock(&ashmem_mutex);
322 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
323 range_del(range);
324 mutex_unlock(&ashmem_mutex);
325
326 if (asma->file)
327 fput(asma->file);
328 kmem_cache_free(ashmem_area_cachep, asma);
329
330 return 0;
331 }
332
ashmem_read_iter(struct kiocb * iocb,struct iov_iter * iter)333 static ssize_t ashmem_read_iter(struct kiocb *iocb, struct iov_iter *iter)
334 {
335 struct ashmem_area *asma = iocb->ki_filp->private_data;
336 int ret = 0;
337
338 mutex_lock(&ashmem_mutex);
339
340 /* If size is not set, or set to 0, always return EOF. */
341 if (asma->size == 0)
342 goto out_unlock;
343
344 if (!asma->file) {
345 ret = -EBADF;
346 goto out_unlock;
347 }
348
349 /*
350 * asma and asma->file are used outside the lock here. We assume
351 * once asma->file is set it will never be changed, and will not
352 * be destroyed until all references to the file are dropped and
353 * ashmem_release is called.
354 */
355 mutex_unlock(&ashmem_mutex);
356 ret = vfs_iter_read(asma->file, iter, &iocb->ki_pos, 0);
357 mutex_lock(&ashmem_mutex);
358 if (ret > 0)
359 asma->file->f_pos = iocb->ki_pos;
360 out_unlock:
361 mutex_unlock(&ashmem_mutex);
362 return ret;
363 }
364
ashmem_llseek(struct file * file,loff_t offset,int origin)365 static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
366 {
367 struct ashmem_area *asma = file->private_data;
368 loff_t ret;
369
370 mutex_lock(&ashmem_mutex);
371
372 if (asma->size == 0) {
373 mutex_unlock(&ashmem_mutex);
374 return -EINVAL;
375 }
376
377 if (!asma->file) {
378 mutex_unlock(&ashmem_mutex);
379 return -EBADF;
380 }
381
382 mutex_unlock(&ashmem_mutex);
383
384 ret = vfs_llseek(asma->file, offset, origin);
385 if (ret < 0)
386 return ret;
387
388 /** Copy f_pos from backing file, since f_ops->llseek() sets it */
389 file->f_pos = asma->file->f_pos;
390 return ret;
391 }
392
calc_vm_may_flags(unsigned long prot)393 static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
394 {
395 return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD) |
396 _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
397 _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC);
398 }
399
ashmem_vmfile_mmap(struct file * file,struct vm_area_struct * vma)400 static int ashmem_vmfile_mmap(struct file *file, struct vm_area_struct *vma)
401 {
402 /* do not allow to mmap ashmem backing shmem file directly */
403 return -EPERM;
404 }
405
406 static unsigned long
ashmem_vmfile_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)407 ashmem_vmfile_get_unmapped_area(struct file *file, unsigned long addr,
408 unsigned long len, unsigned long pgoff,
409 unsigned long flags)
410 {
411 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
412 }
413
ashmem_mmap(struct file * file,struct vm_area_struct * vma)414 static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
415 {
416 static struct file_operations vmfile_fops;
417 struct ashmem_area *asma = file->private_data;
418 int ret = 0;
419
420 mutex_lock(&ashmem_mutex);
421
422 /* user needs to SET_SIZE before mapping */
423 if (!asma->size) {
424 ret = -EINVAL;
425 goto out;
426 }
427
428 /* requested mapping size larger than object size */
429 if (vma->vm_end - vma->vm_start > PAGE_ALIGN(asma->size)) {
430 ret = -EINVAL;
431 goto out;
432 }
433
434 /* requested protection bits must match our allowed protection mask */
435 if ((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) &
436 calc_vm_prot_bits(PROT_MASK, 0)) {
437 ret = -EPERM;
438 goto out;
439 }
440 vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask);
441
442 if (!asma->file) {
443 char *name = ASHMEM_NAME_DEF;
444 struct file *vmfile;
445 struct inode *inode;
446
447 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
448 name = asma->name;
449
450 /* ... and allocate the backing shmem file */
451 vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
452 if (IS_ERR(vmfile)) {
453 ret = PTR_ERR(vmfile);
454 goto out;
455 }
456 vmfile->f_mode |= FMODE_LSEEK;
457 inode = file_inode(vmfile);
458 lockdep_set_class(&inode->i_rwsem, &backing_shmem_inode_class);
459 asma->file = vmfile;
460 /*
461 * override mmap operation of the vmfile so that it can't be
462 * remapped which would lead to creation of a new vma with no
463 * asma permission checks. Have to override get_unmapped_area
464 * as well to prevent VM_BUG_ON check for f_ops modification.
465 */
466 if (!vmfile_fops.mmap) {
467 vmfile_fops = *vmfile->f_op;
468 vmfile_fops.mmap = ashmem_vmfile_mmap;
469 vmfile_fops.get_unmapped_area =
470 ashmem_vmfile_get_unmapped_area;
471 }
472 vmfile->f_op = &vmfile_fops;
473 }
474 get_file(asma->file);
475
476 /*
477 * XXX - Reworked to use shmem_zero_setup() instead of
478 * shmem_set_file while we're in staging. -jstultz
479 */
480 if (vma->vm_flags & VM_SHARED) {
481 ret = shmem_zero_setup(vma);
482 if (ret) {
483 fput(asma->file);
484 goto out;
485 }
486 } else {
487 vma_set_anonymous(vma);
488 }
489
490 if (vma->vm_file)
491 fput(vma->vm_file);
492 vma->vm_file = asma->file;
493
494 out:
495 mutex_unlock(&ashmem_mutex);
496 return ret;
497 }
498
499 /*
500 * ashmem_shrink - our cache shrinker, called from mm/vmscan.c
501 *
502 * 'nr_to_scan' is the number of objects to scan for freeing.
503 *
504 * 'gfp_mask' is the mask of the allocation that got us into this mess.
505 *
506 * Return value is the number of objects freed or -1 if we cannot
507 * proceed without risk of deadlock (due to gfp_mask).
508 *
509 * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
510 * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
511 * pages freed.
512 */
513 static unsigned long
ashmem_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)514 ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
515 {
516 unsigned long freed = 0;
517
518 /* We might recurse into filesystem code, so bail out if necessary */
519 if (!(sc->gfp_mask & __GFP_FS))
520 return SHRINK_STOP;
521
522 if (!mutex_trylock(&ashmem_mutex))
523 return -1;
524
525 while (!list_empty(&ashmem_lru_list)) {
526 struct ashmem_range *range =
527 list_first_entry(&ashmem_lru_list, typeof(*range), lru);
528 loff_t start = range->pgstart * PAGE_SIZE;
529 loff_t end = (range->pgend + 1) * PAGE_SIZE;
530 struct file *f = range->asma->file;
531
532 get_file(f);
533 atomic_inc(&ashmem_shrink_inflight);
534 range->purged = ASHMEM_WAS_PURGED;
535 #ifdef CONFIG_PURGEABLE_ASHMEM
536 if (is_purgeable_ashmem(range->asma))
537 range->asma->purged = true;
538 #endif
539 lru_del(range);
540
541 freed += range_size(range);
542 mutex_unlock(&ashmem_mutex);
543 f->f_op->fallocate(f,
544 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
545 start, end - start);
546 fput(f);
547 if (atomic_dec_and_test(&ashmem_shrink_inflight))
548 wake_up_all(&ashmem_shrink_wait);
549 if (!mutex_trylock(&ashmem_mutex))
550 goto out;
551 if (--sc->nr_to_scan <= 0)
552 break;
553 }
554 mutex_unlock(&ashmem_mutex);
555 out:
556 return freed;
557 }
558
559 static unsigned long
ashmem_shrink_count(struct shrinker * shrink,struct shrink_control * sc)560 ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
561 {
562 /*
563 * note that lru_count is count of pages on the lru, not a count of
564 * objects on the list. This means the scan function needs to return the
565 * number of pages freed, not the number of objects scanned.
566 */
567 return lru_count;
568 }
569
570 static struct shrinker ashmem_shrinker = {
571 .count_objects = ashmem_shrink_count,
572 .scan_objects = ashmem_shrink_scan,
573 /*
574 * XXX (dchinner): I wish people would comment on why they need on
575 * significant changes to the default value here
576 */
577 .seeks = DEFAULT_SEEKS * 4,
578 };
579
set_prot_mask(struct ashmem_area * asma,unsigned long prot)580 static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
581 {
582 int ret = 0;
583
584 mutex_lock(&ashmem_mutex);
585
586 /* the user can only remove, not add, protection bits */
587 if ((asma->prot_mask & prot) != prot) {
588 ret = -EINVAL;
589 goto out;
590 }
591
592 /* does the application expect PROT_READ to imply PROT_EXEC? */
593 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
594 prot |= PROT_EXEC;
595
596 asma->prot_mask = prot;
597
598 out:
599 mutex_unlock(&ashmem_mutex);
600 return ret;
601 }
602
set_name(struct ashmem_area * asma,void __user * name)603 static int set_name(struct ashmem_area *asma, void __user *name)
604 {
605 int len;
606 int ret = 0;
607 char local_name[ASHMEM_NAME_LEN];
608
609 /*
610 * Holding the ashmem_mutex while doing a copy_from_user might cause
611 * an data abort which would try to access mmap_lock. If another
612 * thread has invoked ashmem_mmap then it will be holding the
613 * semaphore and will be waiting for ashmem_mutex, there by leading to
614 * deadlock. We'll release the mutex and take the name to a local
615 * variable that does not need protection and later copy the local
616 * variable to the structure member with lock held.
617 */
618 len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN);
619 if (len < 0)
620 return len;
621
622 mutex_lock(&ashmem_mutex);
623 /* cannot change an existing mapping's name */
624 if (asma->file)
625 ret = -EINVAL;
626 else
627 strscpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name,
628 ASHMEM_NAME_LEN);
629
630 mutex_unlock(&ashmem_mutex);
631 return ret;
632 }
633
get_name(struct ashmem_area * asma,void __user * name)634 static int get_name(struct ashmem_area *asma, void __user *name)
635 {
636 int ret = 0;
637 size_t len;
638 /*
639 * Have a local variable to which we'll copy the content
640 * from asma with the lock held. Later we can copy this to the user
641 * space safely without holding any locks. So even if we proceed to
642 * wait for mmap_lock, it won't lead to deadlock.
643 */
644 char local_name[ASHMEM_NAME_LEN];
645
646 mutex_lock(&ashmem_mutex);
647 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
648 /*
649 * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
650 * prevents us from revealing one user's stack to another.
651 */
652 len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
653 memcpy(local_name, asma->name + ASHMEM_NAME_PREFIX_LEN, len);
654 } else {
655 len = sizeof(ASHMEM_NAME_DEF);
656 memcpy(local_name, ASHMEM_NAME_DEF, len);
657 }
658 mutex_unlock(&ashmem_mutex);
659
660 /*
661 * Now we are just copying from the stack variable to userland
662 * No lock held
663 */
664 if (copy_to_user(name, local_name, len))
665 ret = -EFAULT;
666 return ret;
667 }
668
669 /*
670 * ashmem_pin - pin the given ashmem region, returning whether it was
671 * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
672 *
673 * Caller must hold ashmem_mutex.
674 */
ashmem_pin(struct ashmem_area * asma,size_t pgstart,size_t pgend,struct ashmem_range ** new_range)675 static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
676 struct ashmem_range **new_range)
677 {
678 struct ashmem_range *range, *next;
679 int ret = ASHMEM_NOT_PURGED;
680 #ifdef CONFIG_PURGEABLE_ASHMEM
681 if (is_purgeable_ashmem(asma)) {
682 asma->ref_count++;
683 if (asma->ref_count > 1)
684 return PM_SUCCESS;
685 }
686 #endif
687 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
688 /* moved past last applicable page; we can short circuit */
689 if (range_before_page(range, pgstart))
690 break;
691
692 /*
693 * The user can ask us to pin pages that span multiple ranges,
694 * or to pin pages that aren't even unpinned, so this is messy.
695 *
696 * Four cases:
697 * 1. The requested range subsumes an existing range, so we
698 * just remove the entire matching range.
699 * 2. The requested range overlaps the start of an existing
700 * range, so we just update that range.
701 * 3. The requested range overlaps the end of an existing
702 * range, so we just update that range.
703 * 4. The requested range punches a hole in an existing range,
704 * so we have to update one side of the range and then
705 * create a new range for the other side.
706 */
707 if (page_range_in_range(range, pgstart, pgend)) {
708 ret |= range->purged;
709
710 /* Case #1: Easy. Just nuke the whole thing. */
711 if (page_range_subsumes_range(range, pgstart, pgend)) {
712 range_del(range);
713 continue;
714 }
715
716 /* Case #2: We overlap from the start, so adjust it */
717 if (range->pgstart >= pgstart) {
718 range_shrink(range, pgend + 1, range->pgend);
719 continue;
720 }
721
722 /* Case #3: We overlap from the rear, so adjust it */
723 if (range->pgend <= pgend) {
724 range_shrink(range, range->pgstart,
725 pgstart - 1);
726 continue;
727 }
728
729 /*
730 * Case #4: We eat a chunk out of the middle. A bit
731 * more complicated, we allocate a new range for the
732 * second half and adjust the first chunk's endpoint.
733 */
734 range_alloc(asma, range, range->purged,
735 pgend + 1, range->pgend, new_range);
736 range_shrink(range, range->pgstart, pgstart - 1);
737 break;
738 }
739 }
740
741 return ret;
742 }
743
744 /*
745 * ashmem_unpin - unpin the given range of pages. Returns zero on success.
746 *
747 * Caller must hold ashmem_mutex.
748 */
ashmem_unpin(struct ashmem_area * asma,size_t pgstart,size_t pgend,struct ashmem_range ** new_range)749 static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
750 struct ashmem_range **new_range)
751 {
752 struct ashmem_range *range, *next;
753 unsigned int purged = ASHMEM_NOT_PURGED;
754 #ifdef CONFIG_PURGEABLE_ASHMEM
755 if (is_purgeable_ashmem(asma)) {
756 if (asma->ref_count > PURGEABLE_ASHMEM_UNPIN_REFCOUNT &&
757 !(--asma->ref_count == PURGEABLE_ASHMEM_UNPIN_REFCOUNT))
758 return PM_SUCCESS;
759 if (asma->ref_count < PURGEABLE_ASHMEM_UNPIN_REFCOUNT) {
760 asma->ref_count = PURGEABLE_ASHMEM_UNPIN_REFCOUNT;
761 return PM_FAIL;
762 }
763 }
764 #endif
765 restart:
766 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
767 /* short circuit: this is our insertion point */
768 if (range_before_page(range, pgstart))
769 break;
770
771 /*
772 * The user can ask us to unpin pages that are already entirely
773 * or partially pinned. We handle those two cases here.
774 */
775 if (page_range_subsumed_by_range(range, pgstart, pgend))
776 return 0;
777 if (page_range_in_range(range, pgstart, pgend)) {
778 pgstart = min(range->pgstart, pgstart);
779 pgend = max(range->pgend, pgend);
780 purged |= range->purged;
781 range_del(range);
782 goto restart;
783 }
784 }
785
786 range_alloc(asma, range, purged, pgstart, pgend, new_range);
787 return 0;
788 }
789
790 /*
791 * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
792 * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
793 *
794 * Caller must hold ashmem_mutex.
795 */
ashmem_get_pin_status(struct ashmem_area * asma,size_t pgstart,size_t pgend)796 static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
797 size_t pgend)
798 {
799 struct ashmem_range *range;
800 int ret = ASHMEM_IS_PINNED;
801 #ifdef CONFIG_PURGEABLE_ASHMEM
802 if (is_purgeable_ashmem(asma))
803 return asma->ref_count;
804 #endif
805 list_for_each_entry(range, &asma->unpinned_list, unpinned) {
806 if (range_before_page(range, pgstart))
807 break;
808 if (page_range_in_range(range, pgstart, pgend)) {
809 ret = ASHMEM_IS_UNPINNED;
810 break;
811 }
812 }
813
814 return ret;
815 }
816
ashmem_pin_unpin(struct ashmem_area * asma,unsigned long cmd,void __user * p)817 static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
818 void __user *p)
819 {
820 struct ashmem_pin pin;
821 size_t pgstart, pgend;
822 int ret = -EINVAL;
823 struct ashmem_range *range = NULL;
824
825 if (copy_from_user(&pin, p, sizeof(pin)))
826 return -EFAULT;
827
828 if (cmd == ASHMEM_PIN || cmd == ASHMEM_UNPIN) {
829 range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
830 if (!range)
831 return -ENOMEM;
832 }
833
834 mutex_lock(&ashmem_mutex);
835 wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
836 #ifdef CONFIG_PURGEABLE_ASHMEM
837 if (is_purgeable_ashmem(asma)) {
838 if (pin.offset != PURGEABLE_ASHMEM_PIN_OFFSET ||
839 pin.len != PURGEABLE_ASHMEM_PIN_LEN) {
840 ret = -EINVAL;
841 goto out_unlock;
842 }
843 }
844 #endif
845 if (!asma->file)
846 goto out_unlock;
847
848 /* per custom, you can pass zero for len to mean "everything onward" */
849 if (!pin.len)
850 pin.len = PAGE_ALIGN(asma->size) - pin.offset;
851
852 if ((pin.offset | pin.len) & ~PAGE_MASK)
853 goto out_unlock;
854
855 if (((__u32)-1) - pin.offset < pin.len)
856 goto out_unlock;
857
858 if (PAGE_ALIGN(asma->size) < pin.offset + pin.len)
859 goto out_unlock;
860
861 pgstart = pin.offset / PAGE_SIZE;
862 pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
863
864 switch (cmd) {
865 case ASHMEM_PIN:
866 ret = ashmem_pin(asma, pgstart, pgend, &range);
867 break;
868 case ASHMEM_UNPIN:
869 ret = ashmem_unpin(asma, pgstart, pgend, &range);
870 break;
871 case ASHMEM_GET_PIN_STATUS:
872 ret = ashmem_get_pin_status(asma, pgstart, pgend);
873 break;
874 }
875
876 out_unlock:
877 mutex_unlock(&ashmem_mutex);
878 if (range)
879 kmem_cache_free(ashmem_range_cachep, range);
880
881 return ret;
882 }
883
884 #ifdef CONFIG_PURGEABLE_ASHMEM
ashmem_shrinkall(void)885 void ashmem_shrinkall(void)
886 {
887 struct shrink_control sc = {
888 .gfp_mask = GFP_KERNEL,
889 .nr_to_scan = LONG_MAX,
890 };
891
892 ashmem_shrink_scan(&ashmem_shrinker, &sc);
893 }
894
ashmem_shrink_by_id(const unsigned int ashmem_id,const unsigned int create_time)895 void ashmem_shrink_by_id(const unsigned int ashmem_id, const unsigned int create_time)
896 {
897 struct ashmem_range *range, *next;
898 bool found = false;
899
900 if (!mutex_trylock(&ashmem_mutex))
901 return;
902
903 list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
904 if (!is_purgeable_ashmem(range->asma))
905 continue;
906 if (range->asma->id != ashmem_id ||
907 range->asma->create_time != create_time)
908 continue;
909 found = true;
910 range->asma->purged = true;
911 break;
912 }
913 if (!found)
914 goto out_unlock;
915
916 loff_t start = range->pgstart * PAGE_SIZE;
917 loff_t end = (range->pgend + 1) * PAGE_SIZE;
918 struct file *f = range->asma->file;
919
920 if (!f)
921 goto out_unlock;
922 get_file(f);
923 atomic_inc(&ashmem_shrink_inflight);
924 range->purged = ASHMEM_WAS_PURGED;
925
926 lru_del(range);
927 mutex_unlock(&ashmem_mutex);
928 f->f_op->fallocate(f, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
929 start, end - start);
930 fput(f);
931 if (atomic_dec_and_test(&ashmem_shrink_inflight))
932 wake_up_all(&ashmem_shrink_wait);
933 return;
934
935 out_unlock:
936 mutex_unlock(&ashmem_mutex);
937 }
938
is_ashmem_unpin(struct ashmem_area * asma)939 static bool is_ashmem_unpin(struct ashmem_area *asma)
940 {
941 struct ashmem_range *range, *next;
942 int count = 0;
943
944 mutex_lock(&ashmem_mutex);
945 if (!asma) {
946 mutex_unlock(&ashmem_mutex);
947 return false;
948 }
949 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
950 count++;
951 mutex_unlock(&ashmem_mutex);
952 return count > 0 ? true : false;
953 }
954
purgeable_ashmem_cmd(struct ashmem_area * asma,unsigned int cmd)955 static long purgeable_ashmem_cmd(struct ashmem_area *asma, unsigned int cmd)
956 {
957 int ret = -EINVAL;
958
959 if (!is_purgeable_ashmem(asma))
960 return ret;
961 mutex_lock(&ashmem_mutex);
962 switch (cmd) {
963 case ASHMEM_GET_PURGEABLE:
964 ret = asma->is_purgeable;
965 break;
966 case PURGEABLE_ASHMEM_IS_PURGED:
967 ret = asma->purged;
968 break;
969 case PURGEABLE_ASHMEM_REBUILD_SUCCESS:
970 asma->purged = false;
971 ret = PM_SUCCESS;
972 break;
973 }
974 mutex_unlock(&ashmem_mutex);
975 return ret;
976 }
977
get_purgeable_ashmem_metadata(struct file * f,struct purgeable_ashmem_metadata * pmdata)978 bool get_purgeable_ashmem_metadata(struct file *f, struct purgeable_ashmem_metadata *pmdata)
979 {
980 struct ashmem_area *asma = f->private_data;
981
982 if (!asma) {
983 return false;
984 }
985 pmdata->name = asma->name;
986 pmdata->size = asma->size;
987 pmdata->refc = asma->ref_count;
988 pmdata->purged = asma->purged;
989 pmdata->is_purgeable = asma->is_purgeable;
990 pmdata->id = asma->id;
991 pmdata->create_time = asma->create_time;
992 return true;
993 }
994 #endif
ashmem_ioctl(struct file * file,unsigned int cmd,unsigned long arg)995 static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
996 {
997 struct ashmem_area *asma = file->private_data;
998 long ret = -ENOTTY;
999
1000 switch (cmd) {
1001 case ASHMEM_SET_NAME:
1002 ret = set_name(asma, (void __user *)arg);
1003 break;
1004 case ASHMEM_GET_NAME:
1005 ret = get_name(asma, (void __user *)arg);
1006 break;
1007 case ASHMEM_SET_SIZE:
1008 ret = -EINVAL;
1009 mutex_lock(&ashmem_mutex);
1010 if (!asma->file) {
1011 ret = 0;
1012 asma->size = (size_t)arg;
1013 }
1014 mutex_unlock(&ashmem_mutex);
1015 break;
1016 case ASHMEM_GET_SIZE:
1017 ret = asma->size;
1018 break;
1019 case ASHMEM_SET_PROT_MASK:
1020 ret = set_prot_mask(asma, arg);
1021 break;
1022 case ASHMEM_GET_PROT_MASK:
1023 ret = asma->prot_mask;
1024 break;
1025 case ASHMEM_PIN:
1026 case ASHMEM_UNPIN:
1027 case ASHMEM_GET_PIN_STATUS:
1028 ret = ashmem_pin_unpin(asma, cmd, (void __user *)arg);
1029 break;
1030 case ASHMEM_PURGE_ALL_CACHES:
1031 ret = -EPERM;
1032 if (capable(CAP_SYS_ADMIN)) {
1033 struct shrink_control sc = {
1034 .gfp_mask = GFP_KERNEL,
1035 .nr_to_scan = LONG_MAX,
1036 };
1037 ret = ashmem_shrink_count(&ashmem_shrinker, &sc);
1038 ashmem_shrink_scan(&ashmem_shrinker, &sc);
1039 }
1040 break;
1041 #ifdef CONFIG_PURGEABLE_ASHMEM
1042 case ASHMEM_SET_PURGEABLE:
1043 if (is_ashmem_unpin(asma)) {
1044 ret = PM_FAIL;
1045 break;
1046 }
1047 mutex_lock(&ashmem_mutex);
1048 if (asma) {
1049 asma->is_purgeable = true;
1050 ret = PM_SUCCESS;
1051 }
1052 mutex_unlock(&ashmem_mutex);
1053 break;
1054 case ASHMEM_GET_PURGEABLE:
1055 fallthrough;
1056 case PURGEABLE_ASHMEM_IS_PURGED:
1057 fallthrough;
1058 case PURGEABLE_ASHMEM_REBUILD_SUCCESS:
1059 ret = purgeable_ashmem_cmd(asma, cmd);
1060 break;
1061 #endif
1062 }
1063 return ret;
1064 }
1065
1066 /* support of 32bit userspace on 64bit platforms */
1067 #ifdef CONFIG_COMPAT
compat_ashmem_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1068 static long compat_ashmem_ioctl(struct file *file, unsigned int cmd,
1069 unsigned long arg)
1070 {
1071 switch (cmd) {
1072 case COMPAT_ASHMEM_SET_SIZE:
1073 cmd = ASHMEM_SET_SIZE;
1074 break;
1075 case COMPAT_ASHMEM_SET_PROT_MASK:
1076 cmd = ASHMEM_SET_PROT_MASK;
1077 break;
1078 }
1079 return ashmem_ioctl(file, cmd, arg);
1080 }
1081 #endif
1082 #ifdef CONFIG_PROC_FS
ashmem_show_fdinfo(struct seq_file * m,struct file * file)1083 static void ashmem_show_fdinfo(struct seq_file *m, struct file *file)
1084 {
1085 struct ashmem_area *asma = file->private_data;
1086
1087 mutex_lock(&ashmem_mutex);
1088
1089 if (asma->file)
1090 seq_printf(m, "inode:\t%ld\n", file_inode(asma->file)->i_ino);
1091
1092 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
1093 seq_printf(m, "name:\t%s\n",
1094 asma->name + ASHMEM_NAME_PREFIX_LEN);
1095
1096 mutex_unlock(&ashmem_mutex);
1097 }
1098 #endif
1099 static const struct file_operations ashmem_fops = {
1100 .owner = THIS_MODULE,
1101 .open = ashmem_open,
1102 .release = ashmem_release,
1103 .read_iter = ashmem_read_iter,
1104 .llseek = ashmem_llseek,
1105 .mmap = ashmem_mmap,
1106 .unlocked_ioctl = ashmem_ioctl,
1107 #ifdef CONFIG_COMPAT
1108 .compat_ioctl = compat_ashmem_ioctl,
1109 #endif
1110 #ifdef CONFIG_PROC_FS
1111 .show_fdinfo = ashmem_show_fdinfo,
1112 #endif
1113 };
1114
is_ashmem_file(struct file * file)1115 int is_ashmem_file(struct file *file)
1116 {
1117 return file->f_op == &ashmem_fops;
1118 }
1119
1120 static struct miscdevice ashmem_misc = {
1121 .minor = MISC_DYNAMIC_MINOR,
1122 .name = "ashmem",
1123 .fops = &ashmem_fops,
1124 };
1125
get_ashmem_size_by_file(struct file * f)1126 size_t get_ashmem_size_by_file(struct file *f)
1127 {
1128 struct ashmem_area *asma = f->private_data;
1129
1130 if (asma)
1131 return asma->size;
1132 return 0;
1133 }
1134
get_ashmem_name_by_file(struct file * f)1135 char *get_ashmem_name_by_file(struct file *f)
1136 {
1137 struct ashmem_area *asma = f->private_data;
1138
1139 if (asma)
1140 return asma->name;
1141 return NULL;
1142 }
1143
ashmem_init(void)1144 static int __init ashmem_init(void)
1145 {
1146 int ret = -ENOMEM;
1147
1148 ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
1149 sizeof(struct ashmem_area),
1150 0, 0, NULL);
1151 if (!ashmem_area_cachep) {
1152 pr_err("failed to create slab cache\n");
1153 goto out;
1154 }
1155
1156 ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
1157 sizeof(struct ashmem_range),
1158 0, 0, NULL);
1159 if (!ashmem_range_cachep) {
1160 pr_err("failed to create slab cache\n");
1161 goto out_free1;
1162 }
1163
1164 ret = misc_register(&ashmem_misc);
1165 if (ret) {
1166 pr_err("failed to register misc device!\n");
1167 goto out_free2;
1168 }
1169
1170 ret = register_shrinker(&ashmem_shrinker);
1171 if (ret) {
1172 pr_err("failed to register shrinker!\n");
1173 goto out_demisc;
1174 }
1175 init_ashmem_process_info();
1176 #ifdef CONFIG_PURGEABLE_ASHMEM
1177 init_purgeable_ashmem_trigger();
1178 #endif
1179 pr_info("initialized\n");
1180
1181 return 0;
1182
1183 out_demisc:
1184 misc_deregister(&ashmem_misc);
1185 out_free2:
1186 kmem_cache_destroy(ashmem_range_cachep);
1187 out_free1:
1188 kmem_cache_destroy(ashmem_area_cachep);
1189 out:
1190 return ret;
1191 }
1192 device_initcall(ashmem_init);
1193