• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder_alloc.c
3  *
4  * Android IPC Subsystem
5  *
6  * Copyright (C) 2007-2017 Google, Inc.
7  */
8 
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 
11 #include <linux/list.h>
12 #include <linux/sched/mm.h>
13 #include <linux/module.h>
14 #include <linux/rtmutex.h>
15 #include <linux/rbtree.h>
16 #include <linux/seq_file.h>
17 #include <linux/vmalloc.h>
18 #include <linux/slab.h>
19 #include <linux/sched.h>
20 #include <linux/list_lru.h>
21 #include <linux/ratelimit.h>
22 #include <asm/cacheflush.h>
23 #include <linux/uaccess.h>
24 #include <linux/highmem.h>
25 #include <linux/sizes.h>
26 #include "binder_internal.h"
27 #include "binder_trace.h"
28 #include <trace/hooks/binder.h>
29 
30 ANDROID_KABI_DECLONLY(binder_node);
31 ANDROID_KABI_DECLONLY(binder_transaction);
32 
33 struct list_lru binder_freelist;
34 
35 static DEFINE_MUTEX(binder_alloc_mmap_lock);
36 
37 enum {
38 	BINDER_DEBUG_USER_ERROR             = 1U << 0,
39 	BINDER_DEBUG_OPEN_CLOSE             = 1U << 1,
40 	BINDER_DEBUG_BUFFER_ALLOC           = 1U << 2,
41 	BINDER_DEBUG_BUFFER_ALLOC_ASYNC     = 1U << 3,
42 };
43 static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
44 
45 module_param_named(debug_mask, binder_alloc_debug_mask,
46 		   uint, 0644);
47 
48 #define binder_alloc_debug(mask, x...) \
49 	do { \
50 		if (binder_alloc_debug_mask & mask) \
51 			pr_info_ratelimited(x); \
52 	} while (0)
53 
binder_buffer_next(struct binder_buffer * buffer)54 static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
55 {
56 	return list_entry(buffer->entry.next, struct binder_buffer, entry);
57 }
58 
binder_buffer_prev(struct binder_buffer * buffer)59 static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
60 {
61 	return list_entry(buffer->entry.prev, struct binder_buffer, entry);
62 }
63 
binder_alloc_buffer_size(struct binder_alloc * alloc,struct binder_buffer * buffer)64 static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
65 				       struct binder_buffer *buffer)
66 {
67 	if (list_is_last(&buffer->entry, &alloc->buffers))
68 		return alloc->buffer + alloc->buffer_size - buffer->user_data;
69 	return binder_buffer_next(buffer)->user_data - buffer->user_data;
70 }
71 
binder_insert_free_buffer(struct binder_alloc * alloc,struct binder_buffer * new_buffer)72 static void binder_insert_free_buffer(struct binder_alloc *alloc,
73 				      struct binder_buffer *new_buffer)
74 {
75 	struct rb_node **p = &alloc->free_buffers.rb_node;
76 	struct rb_node *parent = NULL;
77 	struct binder_buffer *buffer;
78 	size_t buffer_size;
79 	size_t new_buffer_size;
80 
81 	BUG_ON(!new_buffer->free);
82 
83 	new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
84 
85 	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
86 		     "%d: add free buffer, size %zd, at %pK\n",
87 		      alloc->pid, new_buffer_size, new_buffer);
88 
89 	while (*p) {
90 		parent = *p;
91 		buffer = rb_entry(parent, struct binder_buffer, rb_node);
92 		BUG_ON(!buffer->free);
93 
94 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
95 
96 		if (new_buffer_size < buffer_size)
97 			p = &parent->rb_left;
98 		else
99 			p = &parent->rb_right;
100 	}
101 	rb_link_node(&new_buffer->rb_node, parent, p);
102 	rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
103 }
104 
binder_insert_allocated_buffer_locked(struct binder_alloc * alloc,struct binder_buffer * new_buffer)105 static void binder_insert_allocated_buffer_locked(
106 		struct binder_alloc *alloc, struct binder_buffer *new_buffer)
107 {
108 	struct rb_node **p = &alloc->allocated_buffers.rb_node;
109 	struct rb_node *parent = NULL;
110 	struct binder_buffer *buffer;
111 
112 	BUG_ON(new_buffer->free);
113 
114 	while (*p) {
115 		parent = *p;
116 		buffer = rb_entry(parent, struct binder_buffer, rb_node);
117 		BUG_ON(buffer->free);
118 
119 		if (new_buffer->user_data < buffer->user_data)
120 			p = &parent->rb_left;
121 		else if (new_buffer->user_data > buffer->user_data)
122 			p = &parent->rb_right;
123 		else
124 			BUG();
125 	}
126 	rb_link_node(&new_buffer->rb_node, parent, p);
127 	rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
128 }
129 
binder_alloc_prepare_to_free_locked(struct binder_alloc * alloc,unsigned long user_ptr)130 static struct binder_buffer *binder_alloc_prepare_to_free_locked(
131 		struct binder_alloc *alloc,
132 		unsigned long user_ptr)
133 {
134 	struct rb_node *n = alloc->allocated_buffers.rb_node;
135 	struct binder_buffer *buffer;
136 
137 	while (n) {
138 		buffer = rb_entry(n, struct binder_buffer, rb_node);
139 		BUG_ON(buffer->free);
140 
141 		if (user_ptr < buffer->user_data) {
142 			n = n->rb_left;
143 		} else if (user_ptr > buffer->user_data) {
144 			n = n->rb_right;
145 		} else {
146 			/*
147 			 * Guard against user threads attempting to
148 			 * free the buffer when in use by kernel or
149 			 * after it's already been freed.
150 			 */
151 			if (!buffer->allow_user_free)
152 				return ERR_PTR(-EPERM);
153 			buffer->allow_user_free = 0;
154 			return buffer;
155 		}
156 	}
157 	return NULL;
158 }
159 
160 /**
161  * binder_alloc_prepare_to_free() - get buffer given user ptr
162  * @alloc:	binder_alloc for this proc
163  * @user_ptr:	User pointer to buffer data
164  *
165  * Validate userspace pointer to buffer data and return buffer corresponding to
166  * that user pointer. Search the rb tree for buffer that matches user data
167  * pointer.
168  *
169  * Return:	Pointer to buffer or NULL
170  */
binder_alloc_prepare_to_free(struct binder_alloc * alloc,unsigned long user_ptr)171 struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
172 						   unsigned long user_ptr)
173 {
174 	struct binder_buffer *buffer;
175 
176 	mutex_lock(&alloc_to_wrap(alloc)->mutex);
177 	buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
178 	mutex_unlock(&alloc_to_wrap(alloc)->mutex);
179 	return buffer;
180 }
181 
182 static inline void
binder_set_installed_page(struct binder_alloc * alloc,unsigned long index,struct page * page)183 binder_set_installed_page(struct binder_alloc *alloc,
184 			  unsigned long index,
185 			  struct page *page)
186 {
187 	/* Pairs with acquire in binder_get_installed_page() */
188 	smp_store_release(&alloc_to_wrap(alloc)->pages[index], page);
189 }
190 
191 static inline struct page *
binder_get_installed_page(struct binder_alloc * alloc,unsigned long index)192 binder_get_installed_page(struct binder_alloc *alloc, unsigned long index)
193 {
194 	/* Pairs with release in binder_set_installed_page() */
195 	return smp_load_acquire(&alloc_to_wrap(alloc)->pages[index]);
196 }
197 
binder_lru_freelist_add(struct binder_alloc * alloc,unsigned long start,unsigned long end)198 static void binder_lru_freelist_add(struct binder_alloc *alloc,
199 				    unsigned long start, unsigned long end)
200 {
201 	unsigned long page_addr;
202 	struct page *page;
203 
204 	trace_binder_update_page_range(alloc, false, start, end);
205 
206 	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
207 		size_t index;
208 		int ret;
209 
210 		index = (page_addr - alloc->buffer) / PAGE_SIZE;
211 		page = binder_get_installed_page(alloc, index);
212 		if (!page)
213 			continue;
214 
215 		trace_binder_free_lru_start(alloc, index);
216 
217 		ret = list_lru_add(&binder_freelist,
218 				   page_to_lru(page),
219 				   page_to_nid(page),
220 				   NULL);
221 		WARN_ON(!ret);
222 
223 		trace_binder_free_lru_end(alloc, index);
224 	}
225 }
226 
227 static inline
binder_alloc_set_mapped(struct binder_alloc * alloc,bool state)228 void binder_alloc_set_mapped(struct binder_alloc *alloc, bool state)
229 {
230 	/* pairs with smp_load_acquire in binder_alloc_is_mapped() */
231 	smp_store_release(&alloc_to_wrap(alloc)->mapped, state);
232 }
233 
binder_alloc_is_mapped(struct binder_alloc * alloc)234 static inline bool binder_alloc_is_mapped(struct binder_alloc *alloc)
235 {
236 	/* pairs with smp_store_release in binder_alloc_set_mapped() */
237 	return smp_load_acquire(&alloc_to_wrap(alloc)->mapped);
238 }
239 
binder_page_lookup(struct binder_alloc * alloc,unsigned long addr)240 static struct page *binder_page_lookup(struct binder_alloc *alloc,
241 				       unsigned long addr)
242 {
243 	struct mm_struct *mm = alloc->mm;
244 	struct page *page;
245 	long npages = 0;
246 
247 	/*
248 	 * Find an existing page in the remote mm. If missing,
249 	 * don't attempt to fault-in just propagate an error.
250 	 */
251 	mmap_read_lock(mm);
252 	if (binder_alloc_is_mapped(alloc))
253 		npages = get_user_pages_remote(mm, addr, 1, FOLL_NOFAULT,
254 					       &page, NULL);
255 	mmap_read_unlock(mm);
256 
257 	return npages > 0 ? page : NULL;
258 }
259 
binder_page_insert(struct binder_alloc * alloc,unsigned long addr,struct page * page)260 static int binder_page_insert(struct binder_alloc *alloc,
261 			      unsigned long addr,
262 			      struct page *page)
263 {
264 	struct mm_struct *mm = alloc->mm;
265 	struct vm_area_struct *vma;
266 	int ret = -ESRCH;
267 
268 	/* attempt per-vma lock first */
269 	vma = lock_vma_under_rcu(mm, addr);
270 	if (vma) {
271 		if (binder_alloc_is_mapped(alloc))
272 			ret = vm_insert_page(vma, addr, page);
273 		vma_end_read(vma);
274 		return ret;
275 	}
276 
277 	/* fall back to mmap_lock */
278 	mmap_read_lock(mm);
279 	vma = vma_lookup(mm, addr);
280 	if (vma && binder_alloc_is_mapped(alloc))
281 		ret = vm_insert_page(vma, addr, page);
282 	mmap_read_unlock(mm);
283 
284 	return ret;
285 }
286 
binder_page_alloc(struct binder_alloc * alloc,unsigned long index)287 static struct page *binder_page_alloc(struct binder_alloc *alloc,
288 				      unsigned long index)
289 {
290 	struct binder_shrinker_mdata *mdata;
291 	struct page *page;
292 
293 	page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
294 	if (!page)
295 		return NULL;
296 
297 	/* allocate and install shrinker metadata under page->private */
298 	mdata = kzalloc(sizeof(*mdata), GFP_KERNEL);
299 	if (!mdata) {
300 		__free_page(page);
301 		return NULL;
302 	}
303 
304 	mdata->alloc = alloc;
305 	mdata->page_index = index;
306 	INIT_LIST_HEAD(&mdata->lru);
307 	set_page_private(page, (unsigned long)mdata);
308 
309 	return page;
310 }
311 
binder_free_page(struct page * page)312 static void binder_free_page(struct page *page)
313 {
314 	kfree((struct binder_shrinker_mdata *)page_private(page));
315 	__free_page(page);
316 }
317 
binder_install_single_page(struct binder_alloc * alloc,unsigned long index,unsigned long addr)318 static int binder_install_single_page(struct binder_alloc *alloc,
319 				      unsigned long index,
320 				      unsigned long addr)
321 {
322 	struct page *page;
323 	int ret;
324 
325 	if (!mmget_not_zero(alloc->mm))
326 		return -ESRCH;
327 
328 	page = binder_page_alloc(alloc, index);
329 	if (!page) {
330 		ret = -ENOMEM;
331 		goto out;
332 	}
333 
334 	ret = binder_page_insert(alloc, addr, page);
335 	switch (ret) {
336 	case -EBUSY:
337 		/*
338 		 * EBUSY is ok. Someone installed the pte first but the
339 		 * alloc->pages[index] has not been updated yet. Discard
340 		 * our page and look up the one already installed.
341 		 */
342 		ret = 0;
343 		binder_free_page(page);
344 		page = binder_page_lookup(alloc, addr);
345 		if (!page) {
346 			pr_err("%d: failed to find page at offset %lx\n",
347 			       alloc->pid, addr - alloc->buffer);
348 			ret = -ESRCH;
349 			break;
350 		}
351 		fallthrough;
352 	case 0:
353 		/* Mark page installation complete and safe to use */
354 		binder_set_installed_page(alloc, index, page);
355 		break;
356 	default:
357 		binder_free_page(page);
358 		pr_err("%d: %s failed to insert page at offset %lx with %d\n",
359 		       alloc->pid, __func__, addr - alloc->buffer, ret);
360 		break;
361 	}
362 out:
363 	mmput_async(alloc->mm);
364 	return ret;
365 }
366 
binder_install_buffer_pages(struct binder_alloc * alloc,struct binder_buffer * buffer,size_t size)367 static int binder_install_buffer_pages(struct binder_alloc *alloc,
368 				       struct binder_buffer *buffer,
369 				       size_t size)
370 {
371 	unsigned long start, final;
372 	unsigned long page_addr;
373 
374 	start = buffer->user_data & PAGE_MASK;
375 	final = PAGE_ALIGN(buffer->user_data + size);
376 
377 	for (page_addr = start; page_addr < final; page_addr += PAGE_SIZE) {
378 		unsigned long index;
379 		int ret;
380 
381 		index = (page_addr - alloc->buffer) / PAGE_SIZE;
382 		if (binder_get_installed_page(alloc, index))
383 			continue;
384 
385 		trace_binder_alloc_page_start(alloc, index);
386 
387 		ret = binder_install_single_page(alloc, index, page_addr);
388 		if (ret)
389 			return ret;
390 
391 		trace_binder_alloc_page_end(alloc, index);
392 	}
393 
394 	return 0;
395 }
396 
397 /* The range of pages should exclude those shared with other buffers */
binder_lru_freelist_del(struct binder_alloc * alloc,unsigned long start,unsigned long end)398 static void binder_lru_freelist_del(struct binder_alloc *alloc,
399 				    unsigned long start, unsigned long end)
400 {
401 	unsigned long page_addr;
402 	struct page *page;
403 
404 	trace_binder_update_page_range(alloc, true, start, end);
405 
406 	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
407 		unsigned long index;
408 		bool on_lru;
409 
410 		index = (page_addr - alloc->buffer) / PAGE_SIZE;
411 		page = binder_get_installed_page(alloc, index);
412 
413 		if (page) {
414 			trace_binder_alloc_lru_start(alloc, index);
415 
416 			on_lru = list_lru_del(&binder_freelist,
417 					      page_to_lru(page),
418 					      page_to_nid(page),
419 					      NULL);
420 			WARN_ON(!on_lru);
421 
422 			trace_binder_alloc_lru_end(alloc, index);
423 			continue;
424 		}
425 
426 		if (index + 1 > alloc->pages_high)
427 			alloc->pages_high = index + 1;
428 	}
429 }
430 
debug_no_space_locked(struct binder_alloc * alloc)431 static void debug_no_space_locked(struct binder_alloc *alloc)
432 {
433 	size_t largest_alloc_size = 0;
434 	struct binder_buffer *buffer;
435 	size_t allocated_buffers = 0;
436 	size_t largest_free_size = 0;
437 	size_t total_alloc_size = 0;
438 	size_t total_free_size = 0;
439 	size_t free_buffers = 0;
440 	size_t buffer_size;
441 	struct rb_node *n;
442 
443 	for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
444 		buffer = rb_entry(n, struct binder_buffer, rb_node);
445 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
446 		allocated_buffers++;
447 		total_alloc_size += buffer_size;
448 		if (buffer_size > largest_alloc_size)
449 			largest_alloc_size = buffer_size;
450 	}
451 
452 	for (n = rb_first(&alloc->free_buffers); n; n = rb_next(n)) {
453 		buffer = rb_entry(n, struct binder_buffer, rb_node);
454 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
455 		free_buffers++;
456 		total_free_size += buffer_size;
457 		if (buffer_size > largest_free_size)
458 			largest_free_size = buffer_size;
459 	}
460 
461 	binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
462 			   "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
463 			   total_alloc_size, allocated_buffers,
464 			   largest_alloc_size, total_free_size,
465 			   free_buffers, largest_free_size);
466 }
467 
debug_low_async_space_locked(struct binder_alloc * alloc)468 static bool debug_low_async_space_locked(struct binder_alloc *alloc)
469 {
470 	/*
471 	 * Find the amount and size of buffers allocated by the current caller;
472 	 * The idea is that once we cross the threshold, whoever is responsible
473 	 * for the low async space is likely to try to send another async txn,
474 	 * and at some point we'll catch them in the act. This is more efficient
475 	 * than keeping a map per pid.
476 	 */
477 	struct binder_buffer *buffer;
478 	size_t total_alloc_size = 0;
479 	int pid = current->tgid;
480 	size_t num_buffers = 0;
481 	struct rb_node *n;
482 
483 	/*
484 	 * Only start detecting spammers once we have less than 20% of async
485 	 * space left (which is less than 10% of total buffer size).
486 	 */
487 	if (alloc->free_async_space >= alloc->buffer_size / 10) {
488 		alloc->oneway_spam_detected = false;
489 		return false;
490 	}
491 
492 	for (n = rb_first(&alloc->allocated_buffers); n != NULL;
493 		 n = rb_next(n)) {
494 		buffer = rb_entry(n, struct binder_buffer, rb_node);
495 		if (buffer->pid != pid)
496 			continue;
497 		if (!buffer->async_transaction)
498 			continue;
499 		total_alloc_size += binder_alloc_buffer_size(alloc, buffer);
500 		num_buffers++;
501 	}
502 
503 	/*
504 	 * Warn if this pid has more than 50 transactions, or more than 50% of
505 	 * async space (which is 25% of total buffer size). Oneway spam is only
506 	 * detected when the threshold is exceeded.
507 	 */
508 	if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) {
509 		binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
510 			     "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n",
511 			      alloc->pid, pid, num_buffers, total_alloc_size);
512 		if (!alloc->oneway_spam_detected) {
513 			alloc->oneway_spam_detected = true;
514 			return true;
515 		}
516 	}
517 	return false;
518 }
519 
520 /* Callers preallocate @new_buffer, it is freed by this function if unused */
binder_alloc_new_buf_locked(struct binder_alloc * alloc,struct binder_buffer * new_buffer,size_t size,int is_async)521 static struct binder_buffer *binder_alloc_new_buf_locked(
522 				struct binder_alloc *alloc,
523 				struct binder_buffer *new_buffer,
524 				size_t size,
525 				int is_async)
526 {
527 	struct rb_node *n = alloc->free_buffers.rb_node;
528 	struct rb_node *best_fit = NULL;
529 	struct binder_buffer *buffer;
530 	unsigned long next_used_page;
531 	unsigned long curr_last_page;
532 	bool should_fail = false;
533 	size_t buffer_size;
534 
535 	trace_android_vh_binder_alloc_new_buf_locked(size, &alloc->free_async_space, is_async,
536 			&should_fail);
537 	if (should_fail) {
538 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
539 			     "%d: binder_alloc_buf failed, not allowed to alloc more async space\n",
540 			      alloc->pid);
541 		buffer = ERR_PTR(-EPERM);
542 		goto out;
543 	}
544 
545 	if (is_async && alloc->free_async_space < size) {
546 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
547 			     "%d: binder_alloc_buf size %zd failed, no async space left\n",
548 			      alloc->pid, size);
549 		buffer = ERR_PTR(-ENOSPC);
550 		goto out;
551 	}
552 
553 	while (n) {
554 		buffer = rb_entry(n, struct binder_buffer, rb_node);
555 		BUG_ON(!buffer->free);
556 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
557 
558 		if (size < buffer_size) {
559 			best_fit = n;
560 			n = n->rb_left;
561 		} else if (size > buffer_size) {
562 			n = n->rb_right;
563 		} else {
564 			best_fit = n;
565 			break;
566 		}
567 	}
568 
569 	if (unlikely(!best_fit)) {
570 		binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
571 				   "%d: binder_alloc_buf size %zd failed, no address space\n",
572 				   alloc->pid, size);
573 		debug_no_space_locked(alloc);
574 		buffer = ERR_PTR(-ENOSPC);
575 		goto out;
576 	}
577 
578 	if (buffer_size != size) {
579 		/* Found an oversized buffer and needs to be split */
580 		buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
581 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
582 
583 		WARN_ON(n || buffer_size == size);
584 		new_buffer->user_data = buffer->user_data + size;
585 		list_add(&new_buffer->entry, &buffer->entry);
586 		new_buffer->free = 1;
587 		binder_insert_free_buffer(alloc, new_buffer);
588 		new_buffer = NULL;
589 	}
590 
591 	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
592 		     "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
593 		      alloc->pid, size, buffer, buffer_size);
594 
595 	/*
596 	 * Now we remove the pages from the freelist. A clever calculation
597 	 * with buffer_size determines if the last page is shared with an
598 	 * adjacent in-use buffer. In such case, the page has been already
599 	 * removed from the freelist so we trim our range short.
600 	 */
601 	next_used_page = (buffer->user_data + buffer_size) & PAGE_MASK;
602 	curr_last_page = PAGE_ALIGN(buffer->user_data + size);
603 	binder_lru_freelist_del(alloc, PAGE_ALIGN(buffer->user_data),
604 				min(next_used_page, curr_last_page));
605 
606 	rb_erase(&buffer->rb_node, &alloc->free_buffers);
607 	buffer->free = 0;
608 	buffer->allow_user_free = 0;
609 	binder_insert_allocated_buffer_locked(alloc, buffer);
610 	buffer->async_transaction = is_async;
611 	buffer->oneway_spam_suspect = false;
612 	if (is_async) {
613 		alloc->free_async_space -= size;
614 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
615 			     "%d: binder_alloc_buf size %zd async free %zd\n",
616 			      alloc->pid, size, alloc->free_async_space);
617 		if (debug_low_async_space_locked(alloc))
618 			buffer->oneway_spam_suspect = true;
619 	}
620 
621 out:
622 	/* Discard possibly unused new_buffer */
623 	kfree(new_buffer);
624 	return buffer;
625 }
626 
627 /* Calculate the sanitized total size, returns 0 for invalid request */
sanitized_size(size_t data_size,size_t offsets_size,size_t extra_buffers_size)628 static inline size_t sanitized_size(size_t data_size,
629 				    size_t offsets_size,
630 				    size_t extra_buffers_size)
631 {
632 	size_t total, tmp;
633 
634 	/* Align to pointer size and check for overflows */
635 	tmp = ALIGN(data_size, sizeof(void *)) +
636 		ALIGN(offsets_size, sizeof(void *));
637 	if (tmp < data_size || tmp < offsets_size)
638 		return 0;
639 	total = tmp + ALIGN(extra_buffers_size, sizeof(void *));
640 	if (total < tmp || total < extra_buffers_size)
641 		return 0;
642 
643 	/* Pad 0-sized buffers so they get a unique address */
644 	total = max(total, sizeof(void *));
645 
646 	return total;
647 }
648 
649 /**
650  * binder_alloc_new_buf() - Allocate a new binder buffer
651  * @alloc:              binder_alloc for this proc
652  * @data_size:          size of user data buffer
653  * @offsets_size:       user specified buffer offset
654  * @extra_buffers_size: size of extra space for meta-data (eg, security context)
655  * @is_async:           buffer for async transaction
656  *
657  * Allocate a new buffer given the requested sizes. Returns
658  * the kernel version of the buffer pointer. The size allocated
659  * is the sum of the three given sizes (each rounded up to
660  * pointer-sized boundary)
661  *
662  * Return:	The allocated buffer or %ERR_PTR(-errno) if error
663  */
binder_alloc_new_buf(struct binder_alloc * alloc,size_t data_size,size_t offsets_size,size_t extra_buffers_size,int is_async)664 struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
665 					   size_t data_size,
666 					   size_t offsets_size,
667 					   size_t extra_buffers_size,
668 					   int is_async)
669 {
670 	struct binder_buffer *buffer, *next;
671 	size_t size;
672 	int ret;
673 
674 	/* Check binder_alloc is fully initialized */
675 	if (!binder_alloc_is_mapped(alloc)) {
676 		binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
677 				   "%d: binder_alloc_buf, no vma\n",
678 				   alloc->pid);
679 		return ERR_PTR(-ESRCH);
680 	}
681 
682 	size = sanitized_size(data_size, offsets_size, extra_buffers_size);
683 	if (unlikely(!size)) {
684 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
685 				   "%d: got transaction with invalid size %zd-%zd-%zd\n",
686 				   alloc->pid, data_size, offsets_size,
687 				   extra_buffers_size);
688 		return ERR_PTR(-EINVAL);
689 	}
690 
691 	/* Preallocate the next buffer */
692 	next = kzalloc(sizeof(*next), GFP_KERNEL);
693 	if (!next)
694 		return ERR_PTR(-ENOMEM);
695 
696 	mutex_lock(&alloc_to_wrap(alloc)->mutex);
697 	buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async);
698 	if (IS_ERR(buffer)) {
699 		mutex_unlock(&alloc_to_wrap(alloc)->mutex);
700 		goto out;
701 	}
702 
703 	buffer->data_size = data_size;
704 	buffer->offsets_size = offsets_size;
705 	buffer->extra_buffers_size = extra_buffers_size;
706 	buffer->pid = current->tgid;
707 	mutex_unlock(&alloc_to_wrap(alloc)->mutex);
708 
709 	ret = binder_install_buffer_pages(alloc, buffer, size);
710 	if (ret) {
711 		binder_alloc_free_buf(alloc, buffer);
712 		buffer = ERR_PTR(ret);
713 	}
714 out:
715 	return buffer;
716 }
717 
buffer_start_page(struct binder_buffer * buffer)718 static unsigned long buffer_start_page(struct binder_buffer *buffer)
719 {
720 	return buffer->user_data & PAGE_MASK;
721 }
722 
prev_buffer_end_page(struct binder_buffer * buffer)723 static unsigned long prev_buffer_end_page(struct binder_buffer *buffer)
724 {
725 	return (buffer->user_data - 1) & PAGE_MASK;
726 }
727 
binder_delete_free_buffer(struct binder_alloc * alloc,struct binder_buffer * buffer)728 static void binder_delete_free_buffer(struct binder_alloc *alloc,
729 				      struct binder_buffer *buffer)
730 {
731 	struct binder_buffer *prev, *next;
732 
733 	if (PAGE_ALIGNED(buffer->user_data))
734 		goto skip_freelist;
735 
736 	BUG_ON(alloc->buffers.next == &buffer->entry);
737 	prev = binder_buffer_prev(buffer);
738 	BUG_ON(!prev->free);
739 	if (prev_buffer_end_page(prev) == buffer_start_page(buffer))
740 		goto skip_freelist;
741 
742 	if (!list_is_last(&buffer->entry, &alloc->buffers)) {
743 		next = binder_buffer_next(buffer);
744 		if (buffer_start_page(next) == buffer_start_page(buffer))
745 			goto skip_freelist;
746 	}
747 
748 	binder_lru_freelist_add(alloc, buffer_start_page(buffer),
749 				buffer_start_page(buffer) + PAGE_SIZE);
750 skip_freelist:
751 	list_del(&buffer->entry);
752 	kfree(buffer);
753 }
754 
binder_free_buf_locked(struct binder_alloc * alloc,struct binder_buffer * buffer)755 static void binder_free_buf_locked(struct binder_alloc *alloc,
756 				   struct binder_buffer *buffer)
757 {
758 	size_t size, buffer_size;
759 
760 	buffer_size = binder_alloc_buffer_size(alloc, buffer);
761 
762 	size = ALIGN(buffer->data_size, sizeof(void *)) +
763 		ALIGN(buffer->offsets_size, sizeof(void *)) +
764 		ALIGN(buffer->extra_buffers_size, sizeof(void *));
765 
766 	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
767 		     "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
768 		      alloc->pid, buffer, size, buffer_size);
769 
770 	BUG_ON(buffer->free);
771 	BUG_ON(size > buffer_size);
772 	BUG_ON(buffer->transaction != NULL);
773 	BUG_ON(buffer->user_data < alloc->buffer);
774 	BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
775 
776 	if (buffer->async_transaction) {
777 		alloc->free_async_space += buffer_size;
778 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
779 			     "%d: binder_free_buf size %zd async free %zd\n",
780 			      alloc->pid, size, alloc->free_async_space);
781 	}
782 
783 	binder_lru_freelist_add(alloc, PAGE_ALIGN(buffer->user_data),
784 				(buffer->user_data + buffer_size) & PAGE_MASK);
785 
786 	rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
787 	buffer->free = 1;
788 	if (!list_is_last(&buffer->entry, &alloc->buffers)) {
789 		struct binder_buffer *next = binder_buffer_next(buffer);
790 
791 		if (next->free) {
792 			rb_erase(&next->rb_node, &alloc->free_buffers);
793 			binder_delete_free_buffer(alloc, next);
794 		}
795 	}
796 	if (alloc->buffers.next != &buffer->entry) {
797 		struct binder_buffer *prev = binder_buffer_prev(buffer);
798 
799 		if (prev->free) {
800 			binder_delete_free_buffer(alloc, buffer);
801 			rb_erase(&prev->rb_node, &alloc->free_buffers);
802 			buffer = prev;
803 		}
804 	}
805 	binder_insert_free_buffer(alloc, buffer);
806 }
807 
808 /**
809  * binder_alloc_get_page() - get kernel pointer for given buffer offset
810  * @alloc: binder_alloc for this proc
811  * @buffer: binder buffer to be accessed
812  * @buffer_offset: offset into @buffer data
813  * @pgoffp: address to copy final page offset to
814  *
815  * Lookup the struct page corresponding to the address
816  * at @buffer_offset into @buffer->user_data. If @pgoffp is not
817  * NULL, the byte-offset into the page is written there.
818  *
819  * The caller is responsible to ensure that the offset points
820  * to a valid address within the @buffer and that @buffer is
821  * not freeable by the user. Since it can't be freed, we are
822  * guaranteed that the corresponding elements of @alloc->pages[]
823  * cannot change.
824  *
825  * Return: struct page
826  */
binder_alloc_get_page(struct binder_alloc * alloc,struct binder_buffer * buffer,binder_size_t buffer_offset,pgoff_t * pgoffp)827 static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
828 					  struct binder_buffer *buffer,
829 					  binder_size_t buffer_offset,
830 					  pgoff_t *pgoffp)
831 {
832 	binder_size_t buffer_space_offset = buffer_offset +
833 		(buffer->user_data - alloc->buffer);
834 	pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
835 	size_t index = buffer_space_offset >> PAGE_SHIFT;
836 
837 	*pgoffp = pgoff;
838 
839 	return alloc_to_wrap(alloc)->pages[index];
840 }
841 
842 /**
843  * binder_alloc_clear_buf() - zero out buffer
844  * @alloc: binder_alloc for this proc
845  * @buffer: binder buffer to be cleared
846  *
847  * memset the given buffer to 0
848  */
binder_alloc_clear_buf(struct binder_alloc * alloc,struct binder_buffer * buffer)849 static void binder_alloc_clear_buf(struct binder_alloc *alloc,
850 				   struct binder_buffer *buffer)
851 {
852 	size_t bytes = binder_alloc_buffer_size(alloc, buffer);
853 	binder_size_t buffer_offset = 0;
854 
855 	while (bytes) {
856 		unsigned long size;
857 		struct page *page;
858 		pgoff_t pgoff;
859 
860 		page = binder_alloc_get_page(alloc, buffer,
861 					     buffer_offset, &pgoff);
862 		size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
863 		memset_page(page, pgoff, 0, size);
864 		bytes -= size;
865 		buffer_offset += size;
866 	}
867 }
868 
869 /**
870  * binder_alloc_free_buf() - free a binder buffer
871  * @alloc:	binder_alloc for this proc
872  * @buffer:	kernel pointer to buffer
873  *
874  * Free the buffer allocated via binder_alloc_new_buf()
875  */
binder_alloc_free_buf(struct binder_alloc * alloc,struct binder_buffer * buffer)876 void binder_alloc_free_buf(struct binder_alloc *alloc,
877 			    struct binder_buffer *buffer)
878 {
879 	/*
880 	 * We could eliminate the call to binder_alloc_clear_buf()
881 	 * from binder_alloc_deferred_release() by moving this to
882 	 * binder_free_buf_locked(). However, that could
883 	 * increase contention for the alloc mutex if clear_on_free
884 	 * is used frequently for large buffers. The mutex is not
885 	 * needed for correctness here.
886 	 */
887 	if (buffer->clear_on_free) {
888 		binder_alloc_clear_buf(alloc, buffer);
889 		buffer->clear_on_free = false;
890 	}
891 	mutex_lock(&alloc_to_wrap(alloc)->mutex);
892 	binder_free_buf_locked(alloc, buffer);
893 	mutex_unlock(&alloc_to_wrap(alloc)->mutex);
894 }
895 
896 /**
897  * binder_alloc_mmap_handler() - map virtual address space for proc
898  * @alloc:	alloc structure for this proc
899  * @vma:	vma passed to mmap()
900  *
901  * Called by binder_mmap() to initialize the space specified in
902  * vma for allocating binder buffers
903  *
904  * Return:
905  *      0 = success
906  *      -EBUSY = address space already mapped
907  *      -ENOMEM = failed to map memory to given address space
908  */
binder_alloc_mmap_handler(struct binder_alloc * alloc,struct vm_area_struct * vma)909 int binder_alloc_mmap_handler(struct binder_alloc *alloc,
910 			      struct vm_area_struct *vma)
911 {
912 	struct binder_buffer *buffer;
913 	const char *failure_string;
914 	int ret;
915 
916 	if (unlikely(vma->vm_mm != alloc->mm)) {
917 		ret = -EINVAL;
918 		failure_string = "invalid vma->vm_mm";
919 		goto err_invalid_mm;
920 	}
921 
922 	mutex_lock(&binder_alloc_mmap_lock);
923 	if (alloc->buffer_size) {
924 		ret = -EBUSY;
925 		failure_string = "already mapped";
926 		goto err_already_mapped;
927 	}
928 	alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
929 				   SZ_4M);
930 	mutex_unlock(&binder_alloc_mmap_lock);
931 
932 	alloc->buffer = vma->vm_start;
933 
934 	alloc_to_wrap(alloc)->pages = kvcalloc(alloc->buffer_size / PAGE_SIZE,
935 				sizeof(alloc_to_wrap(alloc)->pages[0]),
936 				GFP_KERNEL);
937 	if (!alloc_to_wrap(alloc)->pages) {
938 		ret = -ENOMEM;
939 		failure_string = "alloc page array";
940 		goto err_alloc_pages_failed;
941 	}
942 
943 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
944 	if (!buffer) {
945 		ret = -ENOMEM;
946 		failure_string = "alloc buffer struct";
947 		goto err_alloc_buf_struct_failed;
948 	}
949 
950 	buffer->user_data = alloc->buffer;
951 	list_add(&buffer->entry, &alloc->buffers);
952 	buffer->free = 1;
953 	binder_insert_free_buffer(alloc, buffer);
954 	alloc->free_async_space = alloc->buffer_size / 2;
955 
956 	/* Signal binder_alloc is fully initialized */
957 	binder_alloc_set_mapped(alloc, true);
958 
959 	return 0;
960 
961 err_alloc_buf_struct_failed:
962 	kvfree(alloc_to_wrap(alloc)->pages);
963 	alloc_to_wrap(alloc)->pages = NULL;
964 err_alloc_pages_failed:
965 	alloc->buffer = 0;
966 	mutex_lock(&binder_alloc_mmap_lock);
967 	alloc->buffer_size = 0;
968 err_already_mapped:
969 	mutex_unlock(&binder_alloc_mmap_lock);
970 err_invalid_mm:
971 	binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
972 			   "%s: %d %lx-%lx %s failed %d\n", __func__,
973 			   alloc->pid, vma->vm_start, vma->vm_end,
974 			   failure_string, ret);
975 	return ret;
976 }
977 
978 
binder_alloc_deferred_release(struct binder_alloc * alloc)979 void binder_alloc_deferred_release(struct binder_alloc *alloc)
980 {
981 	struct rb_node *n;
982 	int buffers, page_count;
983 	struct binder_buffer *buffer;
984 
985 	buffers = 0;
986 	mutex_lock(&alloc_to_wrap(alloc)->mutex);
987 	BUG_ON(alloc_to_wrap(alloc)->mapped);
988 
989 	while ((n = rb_first(&alloc->allocated_buffers))) {
990 		buffer = rb_entry(n, struct binder_buffer, rb_node);
991 
992 		/* Transaction should already have been freed */
993 		BUG_ON(buffer->transaction);
994 
995 		if (buffer->clear_on_free) {
996 			binder_alloc_clear_buf(alloc, buffer);
997 			buffer->clear_on_free = false;
998 		}
999 		binder_free_buf_locked(alloc, buffer);
1000 		buffers++;
1001 	}
1002 
1003 	while (!list_empty(&alloc->buffers)) {
1004 		buffer = list_first_entry(&alloc->buffers,
1005 					  struct binder_buffer, entry);
1006 		WARN_ON(!buffer->free);
1007 
1008 		list_del(&buffer->entry);
1009 		WARN_ON_ONCE(!list_empty(&alloc->buffers));
1010 		kfree(buffer);
1011 	}
1012 
1013 	page_count = 0;
1014 	if (alloc_to_wrap(alloc)->pages) {
1015 		int i;
1016 
1017 		for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
1018 			struct page *page;
1019 			bool on_lru;
1020 
1021 			page = binder_get_installed_page(alloc, i);
1022 			if (!page)
1023 				continue;
1024 
1025 			on_lru = list_lru_del(&binder_freelist,
1026 					      page_to_lru(page),
1027 					      page_to_nid(page),
1028 					      NULL);
1029 			binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
1030 				     "%s: %d: page %d %s\n",
1031 				     __func__, alloc->pid, i,
1032 				     on_lru ? "on lru" : "active");
1033 			binder_free_page(page);
1034 			page_count++;
1035 		}
1036 	}
1037 	mutex_unlock(&alloc_to_wrap(alloc)->mutex);
1038 	kvfree(alloc_to_wrap(alloc)->pages);
1039 	if (alloc->mm)
1040 		mmdrop(alloc->mm);
1041 
1042 	binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
1043 		     "%s: %d buffers %d, pages %d\n",
1044 		     __func__, alloc->pid, buffers, page_count);
1045 }
1046 
1047 /**
1048  * binder_alloc_print_allocated() - print buffer info
1049  * @m:     seq_file for output via seq_printf()
1050  * @alloc: binder_alloc for this proc
1051  *
1052  * Prints information about every buffer associated with
1053  * the binder_alloc state to the given seq_file
1054  */
binder_alloc_print_allocated(struct seq_file * m,struct binder_alloc * alloc)1055 void binder_alloc_print_allocated(struct seq_file *m,
1056 				  struct binder_alloc *alloc)
1057 {
1058 	struct binder_buffer *buffer;
1059 	struct rb_node *n;
1060 
1061 	mutex_lock(&alloc_to_wrap(alloc)->mutex);
1062 	for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
1063 		buffer = rb_entry(n, struct binder_buffer, rb_node);
1064 		seq_printf(m, "  buffer %d: %lx size %zd:%zd:%zd %s\n",
1065 			   buffer->debug_id,
1066 			   buffer->user_data - alloc->buffer,
1067 			   buffer->data_size, buffer->offsets_size,
1068 			   buffer->extra_buffers_size,
1069 			   buffer->transaction ? "active" : "delivered");
1070 	}
1071 	mutex_unlock(&alloc_to_wrap(alloc)->mutex);
1072 }
1073 
1074 /**
1075  * binder_alloc_print_pages() - print page usage
1076  * @m:     seq_file for output via seq_printf()
1077  * @alloc: binder_alloc for this proc
1078  */
binder_alloc_print_pages(struct seq_file * m,struct binder_alloc * alloc)1079 void binder_alloc_print_pages(struct seq_file *m,
1080 			      struct binder_alloc *alloc)
1081 {
1082 	struct page *page;
1083 	int i;
1084 	int active = 0;
1085 	int lru = 0;
1086 	int free = 0;
1087 
1088 	mutex_lock(&alloc_to_wrap(alloc)->mutex);
1089 	/*
1090 	 * Make sure the binder_alloc is fully initialized, otherwise we might
1091 	 * read inconsistent state.
1092 	 */
1093 	if (binder_alloc_is_mapped(alloc)) {
1094 		for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
1095 			page = binder_get_installed_page(alloc, i);
1096 			if (!page)
1097 				free++;
1098 			else if (list_empty(page_to_lru(page)))
1099 				active++;
1100 			else
1101 				lru++;
1102 		}
1103 	}
1104 	mutex_unlock(&alloc_to_wrap(alloc)->mutex);
1105 	seq_printf(m, "  pages: %d:%d:%d\n", active, lru, free);
1106 	seq_printf(m, "  pages high watermark: %zu\n", alloc->pages_high);
1107 }
1108 
1109 /**
1110  * binder_alloc_get_allocated_count() - return count of buffers
1111  * @alloc: binder_alloc for this proc
1112  *
1113  * Return: count of allocated buffers
1114  */
binder_alloc_get_allocated_count(struct binder_alloc * alloc)1115 int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
1116 {
1117 	struct rb_node *n;
1118 	int count = 0;
1119 
1120 	mutex_lock(&alloc_to_wrap(alloc)->mutex);
1121 	for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
1122 		count++;
1123 	mutex_unlock(&alloc_to_wrap(alloc)->mutex);
1124 	return count;
1125 }
1126 
1127 
1128 /**
1129  * binder_alloc_vma_close() - invalidate address space
1130  * @alloc: binder_alloc for this proc
1131  *
1132  * Called from binder_vma_close() when releasing address space.
1133  * Clears alloc->mapped to prevent new incoming transactions from
1134  * allocating more buffers.
1135  */
binder_alloc_vma_close(struct binder_alloc * alloc)1136 void binder_alloc_vma_close(struct binder_alloc *alloc)
1137 {
1138 	binder_alloc_set_mapped(alloc, false);
1139 }
1140 
1141 /**
1142  * binder_alloc_free_page() - shrinker callback to free pages
1143  * @item:   item to free
1144  * @lock:   lock protecting the item
1145  * @cb_arg: callback argument
1146  *
1147  * Called from list_lru_walk() in binder_shrink_scan() to free
1148  * up pages when the system is under memory pressure.
1149  */
binder_alloc_free_page(struct list_head * item,struct list_lru_one * lru,spinlock_t * lock,void * cb_arg)1150 enum lru_status binder_alloc_free_page(struct list_head *item,
1151 				       struct list_lru_one *lru,
1152 				       spinlock_t *lock,
1153 				       void *cb_arg)
1154 	__must_hold(lock)
1155 {
1156 	struct binder_shrinker_mdata *mdata = container_of(item, typeof(*mdata), lru);
1157 	struct binder_alloc *alloc = mdata->alloc;
1158 	struct mm_struct *mm = alloc->mm;
1159 	struct vm_area_struct *vma;
1160 	struct page *page_to_free;
1161 	unsigned long page_addr;
1162 	int mm_locked = 0;
1163 	size_t index;
1164 
1165 	if (!mmget_not_zero(mm))
1166 		goto err_mmget;
1167 
1168 	index = mdata->page_index;
1169 	page_addr = alloc->buffer + index * PAGE_SIZE;
1170 
1171 	/* attempt per-vma lock first */
1172 	vma = lock_vma_under_rcu(mm, page_addr);
1173 	if (!vma) {
1174 		/* fall back to mmap_lock */
1175 		if (!mmap_read_trylock(mm))
1176 			goto err_mmap_read_lock_failed;
1177 		mm_locked = 1;
1178 		vma = vma_lookup(mm, page_addr);
1179 	}
1180 
1181 	if (!mutex_trylock(&alloc_to_wrap(alloc)->mutex))
1182 		goto err_get_alloc_mutex_failed;
1183 
1184 	/*
1185 	 * Since a binder_alloc can only be mapped once, we ensure
1186 	 * the vma corresponds to this mapping by checking whether
1187 	 * the binder_alloc is still mapped.
1188 	 */
1189 	if (vma && !binder_alloc_is_mapped(alloc))
1190 		goto err_invalid_vma;
1191 
1192 	trace_binder_unmap_kernel_start(alloc, index);
1193 
1194 	page_to_free = alloc_to_wrap(alloc)->pages[index];
1195 	binder_set_installed_page(alloc, index, NULL);
1196 
1197 	trace_binder_unmap_kernel_end(alloc, index);
1198 
1199 	list_lru_isolate(lru, item);
1200 	spin_unlock(lock);
1201 
1202 	if (vma) {
1203 		trace_binder_unmap_user_start(alloc, index);
1204 
1205 		zap_page_range_single(vma, page_addr, PAGE_SIZE, NULL);
1206 
1207 		trace_binder_unmap_user_end(alloc, index);
1208 	}
1209 
1210 	mutex_unlock(&alloc_to_wrap(alloc)->mutex);
1211 	if (mm_locked)
1212 		mmap_read_unlock(mm);
1213 	else
1214 		vma_end_read(vma);
1215 	mmput_async(mm);
1216 	binder_free_page(page_to_free);
1217 
1218 	spin_lock(lock);
1219 	return LRU_REMOVED_RETRY;
1220 
1221 err_invalid_vma:
1222 	mutex_unlock(&alloc_to_wrap(alloc)->mutex);
1223 err_get_alloc_mutex_failed:
1224 	if (mm_locked)
1225 		mmap_read_unlock(mm);
1226 	else
1227 		vma_end_read(vma);
1228 err_mmap_read_lock_failed:
1229 	mmput_async(mm);
1230 err_mmget:
1231 	return LRU_SKIP;
1232 }
1233 
1234 static unsigned long
binder_shrink_count(struct shrinker * shrink,struct shrink_control * sc)1235 binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1236 {
1237 	return list_lru_count(&binder_freelist);
1238 }
1239 
1240 static unsigned long
binder_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)1241 binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1242 {
1243 	return list_lru_walk(&binder_freelist, binder_alloc_free_page,
1244 			    NULL, sc->nr_to_scan);
1245 }
1246 
1247 static struct shrinker *binder_shrinker;
1248 
1249 /**
1250  * binder_alloc_init() - called by binder_open() for per-proc initialization
1251  * @alloc: binder_alloc for this proc
1252  *
1253  * Called from binder_open() to initialize binder_alloc fields for
1254  * new binder proc
1255  */
binder_alloc_init(struct binder_alloc * alloc)1256 void binder_alloc_init(struct binder_alloc *alloc)
1257 {
1258 	alloc->pid = current->group_leader->pid;
1259 	alloc->mm = current->mm;
1260 	mmgrab(alloc->mm);
1261 	mutex_init(&alloc_to_wrap(alloc)->mutex);
1262 	INIT_LIST_HEAD(&alloc->buffers);
1263 }
1264 
binder_alloc_shrinker_init(void)1265 int binder_alloc_shrinker_init(void)
1266 {
1267 	int ret;
1268 
1269 	ret = list_lru_init(&binder_freelist);
1270 	if (ret)
1271 		return ret;
1272 
1273 	binder_shrinker = shrinker_alloc(0, "android-binder");
1274 	if (!binder_shrinker) {
1275 		list_lru_destroy(&binder_freelist);
1276 		return -ENOMEM;
1277 	}
1278 
1279 	binder_shrinker->count_objects = binder_shrink_count;
1280 	binder_shrinker->scan_objects = binder_shrink_scan;
1281 
1282 	shrinker_register(binder_shrinker);
1283 
1284 	return 0;
1285 }
1286 
binder_alloc_shrinker_exit(void)1287 void binder_alloc_shrinker_exit(void)
1288 {
1289 	shrinker_free(binder_shrinker);
1290 	list_lru_destroy(&binder_freelist);
1291 }
1292 
1293 /**
1294  * check_buffer() - verify that buffer/offset is safe to access
1295  * @alloc: binder_alloc for this proc
1296  * @buffer: binder buffer to be accessed
1297  * @offset: offset into @buffer data
1298  * @bytes: bytes to access from offset
1299  *
1300  * Check that the @offset/@bytes are within the size of the given
1301  * @buffer and that the buffer is currently active and not freeable.
1302  * Offsets must also be multiples of sizeof(u32). The kernel is
1303  * allowed to touch the buffer in two cases:
1304  *
1305  * 1) when the buffer is being created:
1306  *     (buffer->free == 0 && buffer->allow_user_free == 0)
1307  * 2) when the buffer is being torn down:
1308  *     (buffer->free == 0 && buffer->transaction == NULL).
1309  *
1310  * Return: true if the buffer is safe to access
1311  */
check_buffer(struct binder_alloc * alloc,struct binder_buffer * buffer,binder_size_t offset,size_t bytes)1312 static inline bool check_buffer(struct binder_alloc *alloc,
1313 				struct binder_buffer *buffer,
1314 				binder_size_t offset, size_t bytes)
1315 {
1316 	size_t buffer_size = binder_alloc_buffer_size(alloc, buffer);
1317 
1318 	return buffer_size >= bytes &&
1319 		offset <= buffer_size - bytes &&
1320 		IS_ALIGNED(offset, sizeof(u32)) &&
1321 		!buffer->free &&
1322 		(!buffer->allow_user_free || !buffer->transaction);
1323 }
1324 
1325 /**
1326  * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
1327  * @alloc: binder_alloc for this proc
1328  * @buffer: binder buffer to be accessed
1329  * @buffer_offset: offset into @buffer data
1330  * @from: userspace pointer to source buffer
1331  * @bytes: bytes to copy
1332  *
1333  * Copy bytes from source userspace to target buffer.
1334  *
1335  * Return: bytes remaining to be copied
1336  */
1337 unsigned long
binder_alloc_copy_user_to_buffer(struct binder_alloc * alloc,struct binder_buffer * buffer,binder_size_t buffer_offset,const void __user * from,size_t bytes)1338 binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
1339 				 struct binder_buffer *buffer,
1340 				 binder_size_t buffer_offset,
1341 				 const void __user *from,
1342 				 size_t bytes)
1343 {
1344 	if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1345 		return bytes;
1346 
1347 	while (bytes) {
1348 		unsigned long size;
1349 		unsigned long ret;
1350 		struct page *page;
1351 		pgoff_t pgoff;
1352 		void *kptr;
1353 
1354 		page = binder_alloc_get_page(alloc, buffer,
1355 					     buffer_offset, &pgoff);
1356 		size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1357 		kptr = kmap_local_page(page) + pgoff;
1358 		ret = copy_from_user(kptr, from, size);
1359 		kunmap_local(kptr);
1360 		if (ret)
1361 			return bytes - size + ret;
1362 		bytes -= size;
1363 		from += size;
1364 		buffer_offset += size;
1365 	}
1366 	return 0;
1367 }
1368 
binder_alloc_do_buffer_copy(struct binder_alloc * alloc,bool to_buffer,struct binder_buffer * buffer,binder_size_t buffer_offset,void * ptr,size_t bytes)1369 static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
1370 				       bool to_buffer,
1371 				       struct binder_buffer *buffer,
1372 				       binder_size_t buffer_offset,
1373 				       void *ptr,
1374 				       size_t bytes)
1375 {
1376 	/* All copies must be 32-bit aligned and 32-bit size */
1377 	if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1378 		return -EINVAL;
1379 
1380 	while (bytes) {
1381 		unsigned long size;
1382 		struct page *page;
1383 		pgoff_t pgoff;
1384 
1385 		page = binder_alloc_get_page(alloc, buffer,
1386 					     buffer_offset, &pgoff);
1387 		size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1388 		if (to_buffer)
1389 			memcpy_to_page(page, pgoff, ptr, size);
1390 		else
1391 			memcpy_from_page(ptr, page, pgoff, size);
1392 		bytes -= size;
1393 		pgoff = 0;
1394 		ptr = ptr + size;
1395 		buffer_offset += size;
1396 	}
1397 	return 0;
1398 }
1399 
binder_alloc_copy_to_buffer(struct binder_alloc * alloc,struct binder_buffer * buffer,binder_size_t buffer_offset,void * src,size_t bytes)1400 int binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
1401 				struct binder_buffer *buffer,
1402 				binder_size_t buffer_offset,
1403 				void *src,
1404 				size_t bytes)
1405 {
1406 	return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
1407 					   src, bytes);
1408 }
1409 
binder_alloc_copy_from_buffer(struct binder_alloc * alloc,void * dest,struct binder_buffer * buffer,binder_size_t buffer_offset,size_t bytes)1410 int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
1411 				  void *dest,
1412 				  struct binder_buffer *buffer,
1413 				  binder_size_t buffer_offset,
1414 				  size_t bytes)
1415 {
1416 	return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
1417 					   dest, bytes);
1418 }
1419 EXPORT_SYMBOL_GPL(binder_alloc_copy_from_buffer);
1420