• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder_alloc.c
3  *
4  * Android IPC Subsystem
5  *
6  * Copyright (C) 2007-2017 Google, Inc.
7  */
8 
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 
11 #include <linux/list.h>
12 #include <linux/sched/mm.h>
13 #include <linux/module.h>
14 #include <linux/rtmutex.h>
15 #include <linux/rbtree.h>
16 #include <linux/seq_file.h>
17 #include <linux/vmalloc.h>
18 #include <linux/slab.h>
19 #include <linux/sched.h>
20 #include <linux/list_lru.h>
21 #include <linux/ratelimit.h>
22 #include <asm/cacheflush.h>
23 #include <linux/uaccess.h>
24 #include <linux/highmem.h>
25 #include <linux/sizes.h>
26 #include "binder_alloc.h"
27 #include "binder_trace.h"
28 #include <trace/hooks/binder.h>
29 
30 struct list_lru binder_freelist;
31 
32 static DEFINE_MUTEX(binder_alloc_mmap_lock);
33 
34 enum {
35 	BINDER_DEBUG_USER_ERROR             = 1U << 0,
36 	BINDER_DEBUG_OPEN_CLOSE             = 1U << 1,
37 	BINDER_DEBUG_BUFFER_ALLOC           = 1U << 2,
38 	BINDER_DEBUG_BUFFER_ALLOC_ASYNC     = 1U << 3,
39 };
40 static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
41 
42 module_param_named(debug_mask, binder_alloc_debug_mask,
43 		   uint, 0644);
44 
45 #define binder_alloc_debug(mask, x...) \
46 	do { \
47 		if (binder_alloc_debug_mask & mask) \
48 			pr_info_ratelimited(x); \
49 	} while (0)
50 
binder_buffer_next(struct binder_buffer * buffer)51 static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
52 {
53 	return list_entry(buffer->entry.next, struct binder_buffer, entry);
54 }
55 
binder_buffer_prev(struct binder_buffer * buffer)56 static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
57 {
58 	return list_entry(buffer->entry.prev, struct binder_buffer, entry);
59 }
60 
binder_alloc_buffer_size(struct binder_alloc * alloc,struct binder_buffer * buffer)61 static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
62 				       struct binder_buffer *buffer)
63 {
64 	if (list_is_last(&buffer->entry, &alloc->buffers))
65 		return alloc->buffer + alloc->buffer_size - buffer->user_data;
66 	return binder_buffer_next(buffer)->user_data - buffer->user_data;
67 }
68 
binder_insert_free_buffer(struct binder_alloc * alloc,struct binder_buffer * new_buffer)69 static void binder_insert_free_buffer(struct binder_alloc *alloc,
70 				      struct binder_buffer *new_buffer)
71 {
72 	struct rb_node **p = &alloc->free_buffers.rb_node;
73 	struct rb_node *parent = NULL;
74 	struct binder_buffer *buffer;
75 	size_t buffer_size;
76 	size_t new_buffer_size;
77 
78 	BUG_ON(!new_buffer->free);
79 
80 	new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
81 
82 	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
83 		     "%d: add free buffer, size %zd, at %pK\n",
84 		      alloc->pid, new_buffer_size, new_buffer);
85 
86 	while (*p) {
87 		parent = *p;
88 		buffer = rb_entry(parent, struct binder_buffer, rb_node);
89 		BUG_ON(!buffer->free);
90 
91 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
92 
93 		if (new_buffer_size < buffer_size)
94 			p = &parent->rb_left;
95 		else
96 			p = &parent->rb_right;
97 	}
98 	rb_link_node(&new_buffer->rb_node, parent, p);
99 	rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
100 }
101 
binder_insert_allocated_buffer_locked(struct binder_alloc * alloc,struct binder_buffer * new_buffer)102 static void binder_insert_allocated_buffer_locked(
103 		struct binder_alloc *alloc, struct binder_buffer *new_buffer)
104 {
105 	struct rb_node **p = &alloc->allocated_buffers.rb_node;
106 	struct rb_node *parent = NULL;
107 	struct binder_buffer *buffer;
108 
109 	BUG_ON(new_buffer->free);
110 
111 	while (*p) {
112 		parent = *p;
113 		buffer = rb_entry(parent, struct binder_buffer, rb_node);
114 		BUG_ON(buffer->free);
115 
116 		if (new_buffer->user_data < buffer->user_data)
117 			p = &parent->rb_left;
118 		else if (new_buffer->user_data > buffer->user_data)
119 			p = &parent->rb_right;
120 		else
121 			BUG();
122 	}
123 	rb_link_node(&new_buffer->rb_node, parent, p);
124 	rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
125 }
126 
binder_alloc_prepare_to_free_locked(struct binder_alloc * alloc,unsigned long user_ptr)127 static struct binder_buffer *binder_alloc_prepare_to_free_locked(
128 		struct binder_alloc *alloc,
129 		unsigned long user_ptr)
130 {
131 	struct rb_node *n = alloc->allocated_buffers.rb_node;
132 	struct binder_buffer *buffer;
133 
134 	while (n) {
135 		buffer = rb_entry(n, struct binder_buffer, rb_node);
136 		BUG_ON(buffer->free);
137 
138 		if (user_ptr < buffer->user_data) {
139 			n = n->rb_left;
140 		} else if (user_ptr > buffer->user_data) {
141 			n = n->rb_right;
142 		} else {
143 			/*
144 			 * Guard against user threads attempting to
145 			 * free the buffer when in use by kernel or
146 			 * after it's already been freed.
147 			 */
148 			if (!buffer->allow_user_free)
149 				return ERR_PTR(-EPERM);
150 			buffer->allow_user_free = 0;
151 			return buffer;
152 		}
153 	}
154 	return NULL;
155 }
156 
157 /**
158  * binder_alloc_prepare_to_free() - get buffer given user ptr
159  * @alloc:	binder_alloc for this proc
160  * @user_ptr:	User pointer to buffer data
161  *
162  * Validate userspace pointer to buffer data and return buffer corresponding to
163  * that user pointer. Search the rb tree for buffer that matches user data
164  * pointer.
165  *
166  * Return:	Pointer to buffer or NULL
167  */
binder_alloc_prepare_to_free(struct binder_alloc * alloc,unsigned long user_ptr)168 struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
169 						   unsigned long user_ptr)
170 {
171 	struct binder_buffer *buffer;
172 
173 	spin_lock(&alloc->lock);
174 	buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
175 	spin_unlock(&alloc->lock);
176 	return buffer;
177 }
178 
179 static inline void
binder_set_installed_page(struct binder_lru_page * lru_page,struct page * page)180 binder_set_installed_page(struct binder_lru_page *lru_page,
181 			  struct page *page)
182 {
183 	/* Pairs with acquire in binder_get_installed_page() */
184 	smp_store_release(&lru_page->page_ptr, page);
185 }
186 
187 static inline struct page *
binder_get_installed_page(struct binder_lru_page * lru_page)188 binder_get_installed_page(struct binder_lru_page *lru_page)
189 {
190 	/* Pairs with release in binder_set_installed_page() */
191 	return smp_load_acquire(&lru_page->page_ptr);
192 }
193 
binder_lru_freelist_add(struct binder_alloc * alloc,unsigned long start,unsigned long end)194 static void binder_lru_freelist_add(struct binder_alloc *alloc,
195 				    unsigned long start, unsigned long end)
196 {
197 	struct binder_lru_page *page;
198 	unsigned long page_addr;
199 
200 	trace_binder_update_page_range(alloc, false, start, end);
201 
202 	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
203 		size_t index;
204 		int ret;
205 
206 		index = (page_addr - alloc->buffer) / PAGE_SIZE;
207 		page = &alloc->pages[index];
208 
209 		if (!binder_get_installed_page(page))
210 			continue;
211 
212 		trace_binder_free_lru_start(alloc, index);
213 
214 		ret = list_lru_add(&binder_freelist, &page->lru);
215 		WARN_ON(!ret);
216 
217 		trace_binder_free_lru_end(alloc, index);
218 	}
219 }
220 
binder_install_single_page(struct binder_alloc * alloc,struct binder_lru_page * lru_page,unsigned long addr)221 static int binder_install_single_page(struct binder_alloc *alloc,
222 				      struct binder_lru_page *lru_page,
223 				      unsigned long addr)
224 {
225 	struct page *page;
226 	int ret = 0;
227 
228 	if (!mmget_not_zero(alloc->mm))
229 		return -ESRCH;
230 
231 	/*
232 	 * Protected with mmap_sem in write mode as multiple tasks
233 	 * might race to install the same page.
234 	 */
235 	mmap_write_lock(alloc->mm);
236 	if (binder_get_installed_page(lru_page))
237 		goto out;
238 
239 	if (!alloc->vma) {
240 		pr_err("%d: %s failed, no vma\n", alloc->pid, __func__);
241 		ret = -ESRCH;
242 		goto out;
243 	}
244 
245 	page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
246 	if (!page) {
247 		pr_err("%d: failed to allocate page\n", alloc->pid);
248 		ret = -ENOMEM;
249 		goto out;
250 	}
251 
252 	ret = vm_insert_page(alloc->vma, addr, page);
253 	if (ret) {
254 		pr_err("%d: %s failed to insert page at offset %lx with %d\n",
255 		       alloc->pid, __func__, addr - alloc->buffer, ret);
256 		__free_page(page);
257 		ret = -ENOMEM;
258 		goto out;
259 	}
260 
261 	/* Mark page installation complete and safe to use */
262 	binder_set_installed_page(lru_page, page);
263 out:
264 	mmap_write_unlock(alloc->mm);
265 	mmput_async(alloc->mm);
266 	return ret;
267 }
268 
binder_install_buffer_pages(struct binder_alloc * alloc,struct binder_buffer * buffer,size_t size)269 static int binder_install_buffer_pages(struct binder_alloc *alloc,
270 				       struct binder_buffer *buffer,
271 				       size_t size)
272 {
273 	struct binder_lru_page *page;
274 	unsigned long start, final;
275 	unsigned long page_addr;
276 
277 	start = buffer->user_data & PAGE_MASK;
278 	final = PAGE_ALIGN(buffer->user_data + size);
279 
280 	for (page_addr = start; page_addr < final; page_addr += PAGE_SIZE) {
281 		unsigned long index;
282 		int ret;
283 
284 		index = (page_addr - alloc->buffer) / PAGE_SIZE;
285 		page = &alloc->pages[index];
286 
287 		if (binder_get_installed_page(page))
288 			continue;
289 
290 		trace_binder_alloc_page_start(alloc, index);
291 
292 		ret = binder_install_single_page(alloc, page, page_addr);
293 		if (ret)
294 			return ret;
295 
296 		trace_binder_alloc_page_end(alloc, index);
297 	}
298 
299 	return 0;
300 }
301 
302 /* The range of pages should exclude those shared with other buffers */
binder_lru_freelist_del(struct binder_alloc * alloc,unsigned long start,unsigned long end)303 static void binder_lru_freelist_del(struct binder_alloc *alloc,
304 				    unsigned long start, unsigned long end)
305 {
306 	struct binder_lru_page *page;
307 	unsigned long page_addr;
308 
309 	trace_binder_update_page_range(alloc, true, start, end);
310 
311 	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
312 		unsigned long index;
313 		bool on_lru;
314 
315 		index = (page_addr - alloc->buffer) / PAGE_SIZE;
316 		page = &alloc->pages[index];
317 
318 		if (page->page_ptr) {
319 			trace_binder_alloc_lru_start(alloc, index);
320 
321 			on_lru = list_lru_del(&binder_freelist, &page->lru);
322 			WARN_ON(!on_lru);
323 
324 			trace_binder_alloc_lru_end(alloc, index);
325 			continue;
326 		}
327 
328 		if (index + 1 > alloc->pages_high)
329 			alloc->pages_high = index + 1;
330 	}
331 }
332 
binder_alloc_set_vma(struct binder_alloc * alloc,struct vm_area_struct * vma)333 static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
334 		struct vm_area_struct *vma)
335 {
336 	/* pairs with smp_load_acquire in binder_alloc_get_vma() */
337 	smp_store_release(&alloc->vma, vma);
338 }
339 
binder_alloc_get_vma(struct binder_alloc * alloc)340 static inline struct vm_area_struct *binder_alloc_get_vma(
341 		struct binder_alloc *alloc)
342 {
343 	/* pairs with smp_store_release in binder_alloc_set_vma() */
344 	return smp_load_acquire(&alloc->vma);
345 }
346 
debug_no_space_locked(struct binder_alloc * alloc)347 static void debug_no_space_locked(struct binder_alloc *alloc)
348 {
349 	size_t largest_alloc_size = 0;
350 	struct binder_buffer *buffer;
351 	size_t allocated_buffers = 0;
352 	size_t largest_free_size = 0;
353 	size_t total_alloc_size = 0;
354 	size_t total_free_size = 0;
355 	size_t free_buffers = 0;
356 	size_t buffer_size;
357 	struct rb_node *n;
358 
359 	for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
360 		buffer = rb_entry(n, struct binder_buffer, rb_node);
361 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
362 		allocated_buffers++;
363 		total_alloc_size += buffer_size;
364 		if (buffer_size > largest_alloc_size)
365 			largest_alloc_size = buffer_size;
366 	}
367 
368 	for (n = rb_first(&alloc->free_buffers); n; n = rb_next(n)) {
369 		buffer = rb_entry(n, struct binder_buffer, rb_node);
370 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
371 		free_buffers++;
372 		total_free_size += buffer_size;
373 		if (buffer_size > largest_free_size)
374 			largest_free_size = buffer_size;
375 	}
376 
377 	binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
378 			   "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
379 			   total_alloc_size, allocated_buffers,
380 			   largest_alloc_size, total_free_size,
381 			   free_buffers, largest_free_size);
382 }
383 
debug_low_async_space_locked(struct binder_alloc * alloc)384 static bool debug_low_async_space_locked(struct binder_alloc *alloc)
385 {
386 	/*
387 	 * Find the amount and size of buffers allocated by the current caller;
388 	 * The idea is that once we cross the threshold, whoever is responsible
389 	 * for the low async space is likely to try to send another async txn,
390 	 * and at some point we'll catch them in the act. This is more efficient
391 	 * than keeping a map per pid.
392 	 */
393 	struct binder_buffer *buffer;
394 	size_t total_alloc_size = 0;
395 	int pid = current->tgid;
396 	size_t num_buffers = 0;
397 	struct rb_node *n;
398 
399 	/*
400 	 * Only start detecting spammers once we have less than 20% of async
401 	 * space left (which is less than 10% of total buffer size).
402 	 */
403 	if (alloc->free_async_space >= alloc->buffer_size / 10) {
404 		alloc->oneway_spam_detected = false;
405 		return false;
406 	}
407 
408 	for (n = rb_first(&alloc->allocated_buffers); n != NULL;
409 		 n = rb_next(n)) {
410 		buffer = rb_entry(n, struct binder_buffer, rb_node);
411 		if (buffer->pid != pid)
412 			continue;
413 		if (!buffer->async_transaction)
414 			continue;
415 		total_alloc_size += binder_alloc_buffer_size(alloc, buffer);
416 		num_buffers++;
417 	}
418 
419 	/*
420 	 * Warn if this pid has more than 50 transactions, or more than 50% of
421 	 * async space (which is 25% of total buffer size). Oneway spam is only
422 	 * detected when the threshold is exceeded.
423 	 */
424 	if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) {
425 		binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
426 			     "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n",
427 			      alloc->pid, pid, num_buffers, total_alloc_size);
428 		if (!alloc->oneway_spam_detected) {
429 			alloc->oneway_spam_detected = true;
430 			return true;
431 		}
432 	}
433 	return false;
434 }
435 
436 /* Callers preallocate @new_buffer, it is freed by this function if unused */
binder_alloc_new_buf_locked(struct binder_alloc * alloc,struct binder_buffer * new_buffer,size_t size,int is_async)437 static struct binder_buffer *binder_alloc_new_buf_locked(
438 				struct binder_alloc *alloc,
439 				struct binder_buffer *new_buffer,
440 				size_t size,
441 				int is_async)
442 {
443 	struct rb_node *n = alloc->free_buffers.rb_node;
444 	struct rb_node *best_fit = NULL;
445 	struct binder_buffer *buffer;
446 	unsigned long next_used_page;
447 	unsigned long curr_last_page;
448 	bool should_fail = false;
449 	size_t buffer_size;
450 
451 	trace_android_vh_binder_alloc_new_buf_locked(size, &alloc->free_async_space, is_async,
452 			&should_fail);
453 	if (should_fail) {
454 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
455 			     "%d: binder_alloc_buf failed, not allowed to alloc more async space\n",
456 			      alloc->pid);
457 		buffer = ERR_PTR(-EPERM);
458 		goto out;
459 	}
460 
461 	if (is_async && alloc->free_async_space < size) {
462 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
463 			     "%d: binder_alloc_buf size %zd failed, no async space left\n",
464 			      alloc->pid, size);
465 		buffer = ERR_PTR(-ENOSPC);
466 		goto out;
467 	}
468 
469 	while (n) {
470 		buffer = rb_entry(n, struct binder_buffer, rb_node);
471 		BUG_ON(!buffer->free);
472 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
473 
474 		if (size < buffer_size) {
475 			best_fit = n;
476 			n = n->rb_left;
477 		} else if (size > buffer_size) {
478 			n = n->rb_right;
479 		} else {
480 			best_fit = n;
481 			break;
482 		}
483 	}
484 
485 	if (unlikely(!best_fit)) {
486 		binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
487 				   "%d: binder_alloc_buf size %zd failed, no address space\n",
488 				   alloc->pid, size);
489 		debug_no_space_locked(alloc);
490 		buffer = ERR_PTR(-ENOSPC);
491 		goto out;
492 	}
493 
494 	if (buffer_size != size) {
495 		/* Found an oversized buffer and needs to be split */
496 		buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
497 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
498 
499 		WARN_ON(n || buffer_size == size);
500 		new_buffer->user_data = buffer->user_data + size;
501 		list_add(&new_buffer->entry, &buffer->entry);
502 		new_buffer->free = 1;
503 		binder_insert_free_buffer(alloc, new_buffer);
504 		new_buffer = NULL;
505 	}
506 
507 	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
508 		     "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
509 		      alloc->pid, size, buffer, buffer_size);
510 
511 	/*
512 	 * Now we remove the pages from the freelist. A clever calculation
513 	 * with buffer_size determines if the last page is shared with an
514 	 * adjacent in-use buffer. In such case, the page has been already
515 	 * removed from the freelist so we trim our range short.
516 	 */
517 	next_used_page = (buffer->user_data + buffer_size) & PAGE_MASK;
518 	curr_last_page = PAGE_ALIGN(buffer->user_data + size);
519 	binder_lru_freelist_del(alloc, PAGE_ALIGN(buffer->user_data),
520 				min(next_used_page, curr_last_page));
521 
522 	rb_erase(&buffer->rb_node, &alloc->free_buffers);
523 	buffer->free = 0;
524 	buffer->allow_user_free = 0;
525 	binder_insert_allocated_buffer_locked(alloc, buffer);
526 	buffer->async_transaction = is_async;
527 	buffer->oneway_spam_suspect = false;
528 	if (is_async) {
529 		alloc->free_async_space -= size;
530 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
531 			     "%d: binder_alloc_buf size %zd async free %zd\n",
532 			      alloc->pid, size, alloc->free_async_space);
533 		if (debug_low_async_space_locked(alloc))
534 			buffer->oneway_spam_suspect = true;
535 	}
536 
537 out:
538 	/* Discard possibly unused new_buffer */
539 	kfree(new_buffer);
540 	return buffer;
541 }
542 
543 /* Calculate the sanitized total size, returns 0 for invalid request */
sanitized_size(size_t data_size,size_t offsets_size,size_t extra_buffers_size)544 static inline size_t sanitized_size(size_t data_size,
545 				    size_t offsets_size,
546 				    size_t extra_buffers_size)
547 {
548 	size_t total, tmp;
549 
550 	/* Align to pointer size and check for overflows */
551 	tmp = ALIGN(data_size, sizeof(void *)) +
552 		ALIGN(offsets_size, sizeof(void *));
553 	if (tmp < data_size || tmp < offsets_size)
554 		return 0;
555 	total = tmp + ALIGN(extra_buffers_size, sizeof(void *));
556 	if (total < tmp || total < extra_buffers_size)
557 		return 0;
558 
559 	/* Pad 0-sized buffers so they get a unique address */
560 	total = max(total, sizeof(void *));
561 
562 	return total;
563 }
564 
565 /**
566  * binder_alloc_new_buf() - Allocate a new binder buffer
567  * @alloc:              binder_alloc for this proc
568  * @data_size:          size of user data buffer
569  * @offsets_size:       user specified buffer offset
570  * @extra_buffers_size: size of extra space for meta-data (eg, security context)
571  * @is_async:           buffer for async transaction
572  *
573  * Allocate a new buffer given the requested sizes. Returns
574  * the kernel version of the buffer pointer. The size allocated
575  * is the sum of the three given sizes (each rounded up to
576  * pointer-sized boundary)
577  *
578  * Return:	The allocated buffer or %ERR_PTR(-errno) if error
579  */
binder_alloc_new_buf(struct binder_alloc * alloc,size_t data_size,size_t offsets_size,size_t extra_buffers_size,int is_async)580 struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
581 					   size_t data_size,
582 					   size_t offsets_size,
583 					   size_t extra_buffers_size,
584 					   int is_async)
585 {
586 	struct binder_buffer *buffer, *next;
587 	size_t size;
588 	int ret;
589 
590 	/* Check binder_alloc is fully initialized */
591 	if (!binder_alloc_get_vma(alloc)) {
592 		binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
593 				   "%d: binder_alloc_buf, no vma\n",
594 				   alloc->pid);
595 		return ERR_PTR(-ESRCH);
596 	}
597 
598 	size = sanitized_size(data_size, offsets_size, extra_buffers_size);
599 	if (unlikely(!size)) {
600 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
601 				   "%d: got transaction with invalid size %zd-%zd-%zd\n",
602 				   alloc->pid, data_size, offsets_size,
603 				   extra_buffers_size);
604 		return ERR_PTR(-EINVAL);
605 	}
606 
607 	/* Preallocate the next buffer */
608 	next = kzalloc(sizeof(*next), GFP_KERNEL);
609 	if (!next)
610 		return ERR_PTR(-ENOMEM);
611 
612 	spin_lock(&alloc->lock);
613 	buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async);
614 	if (IS_ERR(buffer)) {
615 		spin_unlock(&alloc->lock);
616 		goto out;
617 	}
618 
619 	buffer->data_size = data_size;
620 	buffer->offsets_size = offsets_size;
621 	buffer->extra_buffers_size = extra_buffers_size;
622 	buffer->pid = current->tgid;
623 	spin_unlock(&alloc->lock);
624 
625 	ret = binder_install_buffer_pages(alloc, buffer, size);
626 	if (ret) {
627 		binder_alloc_free_buf(alloc, buffer);
628 		buffer = ERR_PTR(ret);
629 	}
630 out:
631 	return buffer;
632 }
633 
buffer_start_page(struct binder_buffer * buffer)634 static unsigned long buffer_start_page(struct binder_buffer *buffer)
635 {
636 	return buffer->user_data & PAGE_MASK;
637 }
638 
prev_buffer_end_page(struct binder_buffer * buffer)639 static unsigned long prev_buffer_end_page(struct binder_buffer *buffer)
640 {
641 	return (buffer->user_data - 1) & PAGE_MASK;
642 }
643 
binder_delete_free_buffer(struct binder_alloc * alloc,struct binder_buffer * buffer)644 static void binder_delete_free_buffer(struct binder_alloc *alloc,
645 				      struct binder_buffer *buffer)
646 {
647 	struct binder_buffer *prev, *next;
648 
649 	if (PAGE_ALIGNED(buffer->user_data))
650 		goto skip_freelist;
651 
652 	BUG_ON(alloc->buffers.next == &buffer->entry);
653 	prev = binder_buffer_prev(buffer);
654 	BUG_ON(!prev->free);
655 	if (prev_buffer_end_page(prev) == buffer_start_page(buffer))
656 		goto skip_freelist;
657 
658 	if (!list_is_last(&buffer->entry, &alloc->buffers)) {
659 		next = binder_buffer_next(buffer);
660 		if (buffer_start_page(next) == buffer_start_page(buffer))
661 			goto skip_freelist;
662 	}
663 
664 	binder_lru_freelist_add(alloc, buffer_start_page(buffer),
665 				buffer_start_page(buffer) + PAGE_SIZE);
666 skip_freelist:
667 	list_del(&buffer->entry);
668 	kfree(buffer);
669 }
670 
binder_free_buf_locked(struct binder_alloc * alloc,struct binder_buffer * buffer)671 static void binder_free_buf_locked(struct binder_alloc *alloc,
672 				   struct binder_buffer *buffer)
673 {
674 	size_t size, buffer_size;
675 
676 	buffer_size = binder_alloc_buffer_size(alloc, buffer);
677 
678 	size = ALIGN(buffer->data_size, sizeof(void *)) +
679 		ALIGN(buffer->offsets_size, sizeof(void *)) +
680 		ALIGN(buffer->extra_buffers_size, sizeof(void *));
681 
682 	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
683 		     "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
684 		      alloc->pid, buffer, size, buffer_size);
685 
686 	BUG_ON(buffer->free);
687 	BUG_ON(size > buffer_size);
688 	BUG_ON(buffer->transaction != NULL);
689 	BUG_ON(buffer->user_data < alloc->buffer);
690 	BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
691 
692 	if (buffer->async_transaction) {
693 		alloc->free_async_space += buffer_size;
694 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
695 			     "%d: binder_free_buf size %zd async free %zd\n",
696 			      alloc->pid, size, alloc->free_async_space);
697 	}
698 
699 	binder_lru_freelist_add(alloc, PAGE_ALIGN(buffer->user_data),
700 				(buffer->user_data + buffer_size) & PAGE_MASK);
701 
702 	rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
703 	buffer->free = 1;
704 	if (!list_is_last(&buffer->entry, &alloc->buffers)) {
705 		struct binder_buffer *next = binder_buffer_next(buffer);
706 
707 		if (next->free) {
708 			rb_erase(&next->rb_node, &alloc->free_buffers);
709 			binder_delete_free_buffer(alloc, next);
710 		}
711 	}
712 	if (alloc->buffers.next != &buffer->entry) {
713 		struct binder_buffer *prev = binder_buffer_prev(buffer);
714 
715 		if (prev->free) {
716 			binder_delete_free_buffer(alloc, buffer);
717 			rb_erase(&prev->rb_node, &alloc->free_buffers);
718 			buffer = prev;
719 		}
720 	}
721 	binder_insert_free_buffer(alloc, buffer);
722 }
723 
724 /**
725  * binder_alloc_get_page() - get kernel pointer for given buffer offset
726  * @alloc: binder_alloc for this proc
727  * @buffer: binder buffer to be accessed
728  * @buffer_offset: offset into @buffer data
729  * @pgoffp: address to copy final page offset to
730  *
731  * Lookup the struct page corresponding to the address
732  * at @buffer_offset into @buffer->user_data. If @pgoffp is not
733  * NULL, the byte-offset into the page is written there.
734  *
735  * The caller is responsible to ensure that the offset points
736  * to a valid address within the @buffer and that @buffer is
737  * not freeable by the user. Since it can't be freed, we are
738  * guaranteed that the corresponding elements of @alloc->pages[]
739  * cannot change.
740  *
741  * Return: struct page
742  */
binder_alloc_get_page(struct binder_alloc * alloc,struct binder_buffer * buffer,binder_size_t buffer_offset,pgoff_t * pgoffp)743 static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
744 					  struct binder_buffer *buffer,
745 					  binder_size_t buffer_offset,
746 					  pgoff_t *pgoffp)
747 {
748 	binder_size_t buffer_space_offset = buffer_offset +
749 		(buffer->user_data - alloc->buffer);
750 	pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
751 	size_t index = buffer_space_offset >> PAGE_SHIFT;
752 	struct binder_lru_page *lru_page;
753 
754 	lru_page = &alloc->pages[index];
755 	*pgoffp = pgoff;
756 	return lru_page->page_ptr;
757 }
758 
759 /**
760  * binder_alloc_clear_buf() - zero out buffer
761  * @alloc: binder_alloc for this proc
762  * @buffer: binder buffer to be cleared
763  *
764  * memset the given buffer to 0
765  */
binder_alloc_clear_buf(struct binder_alloc * alloc,struct binder_buffer * buffer)766 static void binder_alloc_clear_buf(struct binder_alloc *alloc,
767 				   struct binder_buffer *buffer)
768 {
769 	size_t bytes = binder_alloc_buffer_size(alloc, buffer);
770 	binder_size_t buffer_offset = 0;
771 
772 	while (bytes) {
773 		unsigned long size;
774 		struct page *page;
775 		pgoff_t pgoff;
776 
777 		page = binder_alloc_get_page(alloc, buffer,
778 					     buffer_offset, &pgoff);
779 		size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
780 		memset_page(page, pgoff, 0, size);
781 		bytes -= size;
782 		buffer_offset += size;
783 	}
784 }
785 
786 /**
787  * binder_alloc_free_buf() - free a binder buffer
788  * @alloc:	binder_alloc for this proc
789  * @buffer:	kernel pointer to buffer
790  *
791  * Free the buffer allocated via binder_alloc_new_buf()
792  */
binder_alloc_free_buf(struct binder_alloc * alloc,struct binder_buffer * buffer)793 void binder_alloc_free_buf(struct binder_alloc *alloc,
794 			    struct binder_buffer *buffer)
795 {
796 	/*
797 	 * We could eliminate the call to binder_alloc_clear_buf()
798 	 * from binder_alloc_deferred_release() by moving this to
799 	 * binder_free_buf_locked(). However, that could
800 	 * increase contention for the alloc->lock if clear_on_free
801 	 * is used frequently for large buffers. This lock is not
802 	 * needed for correctness here.
803 	 */
804 	if (buffer->clear_on_free) {
805 		binder_alloc_clear_buf(alloc, buffer);
806 		buffer->clear_on_free = false;
807 	}
808 	spin_lock(&alloc->lock);
809 	binder_free_buf_locked(alloc, buffer);
810 	spin_unlock(&alloc->lock);
811 }
812 
813 /**
814  * binder_alloc_mmap_handler() - map virtual address space for proc
815  * @alloc:	alloc structure for this proc
816  * @vma:	vma passed to mmap()
817  *
818  * Called by binder_mmap() to initialize the space specified in
819  * vma for allocating binder buffers
820  *
821  * Return:
822  *      0 = success
823  *      -EBUSY = address space already mapped
824  *      -ENOMEM = failed to map memory to given address space
825  */
binder_alloc_mmap_handler(struct binder_alloc * alloc,struct vm_area_struct * vma)826 int binder_alloc_mmap_handler(struct binder_alloc *alloc,
827 			      struct vm_area_struct *vma)
828 {
829 	struct binder_buffer *buffer;
830 	const char *failure_string;
831 	int ret, i;
832 
833 	if (unlikely(vma->vm_mm != alloc->mm)) {
834 		ret = -EINVAL;
835 		failure_string = "invalid vma->vm_mm";
836 		goto err_invalid_mm;
837 	}
838 
839 	mutex_lock(&binder_alloc_mmap_lock);
840 	if (alloc->buffer_size) {
841 		ret = -EBUSY;
842 		failure_string = "already mapped";
843 		goto err_already_mapped;
844 	}
845 	alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
846 				   SZ_4M);
847 	mutex_unlock(&binder_alloc_mmap_lock);
848 
849 	alloc->buffer = vma->vm_start;
850 
851 	alloc->pages = kvcalloc(alloc->buffer_size / PAGE_SIZE,
852 				sizeof(alloc->pages[0]),
853 				GFP_KERNEL);
854 	if (alloc->pages == NULL) {
855 		ret = -ENOMEM;
856 		failure_string = "alloc page array";
857 		goto err_alloc_pages_failed;
858 	}
859 
860 	for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
861 		alloc->pages[i].alloc = alloc;
862 		INIT_LIST_HEAD(&alloc->pages[i].lru);
863 	}
864 
865 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
866 	if (!buffer) {
867 		ret = -ENOMEM;
868 		failure_string = "alloc buffer struct";
869 		goto err_alloc_buf_struct_failed;
870 	}
871 
872 	buffer->user_data = alloc->buffer;
873 	list_add(&buffer->entry, &alloc->buffers);
874 	buffer->free = 1;
875 	binder_insert_free_buffer(alloc, buffer);
876 	alloc->free_async_space = alloc->buffer_size / 2;
877 
878 	/* Signal binder_alloc is fully initialized */
879 	binder_alloc_set_vma(alloc, vma);
880 
881 	return 0;
882 
883 err_alloc_buf_struct_failed:
884 	kvfree(alloc->pages);
885 	alloc->pages = NULL;
886 err_alloc_pages_failed:
887 	alloc->buffer = 0;
888 	mutex_lock(&binder_alloc_mmap_lock);
889 	alloc->buffer_size = 0;
890 err_already_mapped:
891 	mutex_unlock(&binder_alloc_mmap_lock);
892 err_invalid_mm:
893 	binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
894 			   "%s: %d %lx-%lx %s failed %d\n", __func__,
895 			   alloc->pid, vma->vm_start, vma->vm_end,
896 			   failure_string, ret);
897 	return ret;
898 }
899 
900 
binder_alloc_deferred_release(struct binder_alloc * alloc)901 void binder_alloc_deferred_release(struct binder_alloc *alloc)
902 {
903 	struct rb_node *n;
904 	int buffers, page_count;
905 	struct binder_buffer *buffer;
906 
907 	buffers = 0;
908 	spin_lock(&alloc->lock);
909 	BUG_ON(alloc->vma);
910 
911 	while ((n = rb_first(&alloc->allocated_buffers))) {
912 		buffer = rb_entry(n, struct binder_buffer, rb_node);
913 
914 		/* Transaction should already have been freed */
915 		BUG_ON(buffer->transaction);
916 
917 		if (buffer->clear_on_free) {
918 			binder_alloc_clear_buf(alloc, buffer);
919 			buffer->clear_on_free = false;
920 		}
921 		binder_free_buf_locked(alloc, buffer);
922 		buffers++;
923 	}
924 
925 	while (!list_empty(&alloc->buffers)) {
926 		buffer = list_first_entry(&alloc->buffers,
927 					  struct binder_buffer, entry);
928 		WARN_ON(!buffer->free);
929 
930 		list_del(&buffer->entry);
931 		WARN_ON_ONCE(!list_empty(&alloc->buffers));
932 		kfree(buffer);
933 	}
934 
935 	page_count = 0;
936 	if (alloc->pages) {
937 		int i;
938 
939 		for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
940 			bool on_lru;
941 
942 			if (!alloc->pages[i].page_ptr)
943 				continue;
944 
945 			on_lru = list_lru_del(&binder_freelist,
946 					      &alloc->pages[i].lru);
947 			binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
948 				     "%s: %d: page %d %s\n",
949 				     __func__, alloc->pid, i,
950 				     on_lru ? "on lru" : "active");
951 			__free_page(alloc->pages[i].page_ptr);
952 			page_count++;
953 		}
954 	}
955 	spin_unlock(&alloc->lock);
956 	kvfree(alloc->pages);
957 	if (alloc->mm)
958 		mmdrop(alloc->mm);
959 
960 	binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
961 		     "%s: %d buffers %d, pages %d\n",
962 		     __func__, alloc->pid, buffers, page_count);
963 }
964 
965 /**
966  * binder_alloc_print_allocated() - print buffer info
967  * @m:     seq_file for output via seq_printf()
968  * @alloc: binder_alloc for this proc
969  *
970  * Prints information about every buffer associated with
971  * the binder_alloc state to the given seq_file
972  */
binder_alloc_print_allocated(struct seq_file * m,struct binder_alloc * alloc)973 void binder_alloc_print_allocated(struct seq_file *m,
974 				  struct binder_alloc *alloc)
975 {
976 	struct binder_buffer *buffer;
977 	struct rb_node *n;
978 
979 	spin_lock(&alloc->lock);
980 	for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
981 		buffer = rb_entry(n, struct binder_buffer, rb_node);
982 		seq_printf(m, "  buffer %d: %lx size %zd:%zd:%zd %s\n",
983 			   buffer->debug_id,
984 			   buffer->user_data - alloc->buffer,
985 			   buffer->data_size, buffer->offsets_size,
986 			   buffer->extra_buffers_size,
987 			   buffer->transaction ? "active" : "delivered");
988 	}
989 	spin_unlock(&alloc->lock);
990 }
991 
992 /**
993  * binder_alloc_print_pages() - print page usage
994  * @m:     seq_file for output via seq_printf()
995  * @alloc: binder_alloc for this proc
996  */
binder_alloc_print_pages(struct seq_file * m,struct binder_alloc * alloc)997 void binder_alloc_print_pages(struct seq_file *m,
998 			      struct binder_alloc *alloc)
999 {
1000 	struct binder_lru_page *page;
1001 	int i;
1002 	int active = 0;
1003 	int lru = 0;
1004 	int free = 0;
1005 
1006 	spin_lock(&alloc->lock);
1007 	/*
1008 	 * Make sure the binder_alloc is fully initialized, otherwise we might
1009 	 * read inconsistent state.
1010 	 */
1011 	if (binder_alloc_get_vma(alloc) != NULL) {
1012 		for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
1013 			page = &alloc->pages[i];
1014 			if (!page->page_ptr)
1015 				free++;
1016 			else if (list_empty(&page->lru))
1017 				active++;
1018 			else
1019 				lru++;
1020 		}
1021 	}
1022 	spin_unlock(&alloc->lock);
1023 	seq_printf(m, "  pages: %d:%d:%d\n", active, lru, free);
1024 	seq_printf(m, "  pages high watermark: %zu\n", alloc->pages_high);
1025 }
1026 
1027 /**
1028  * binder_alloc_get_allocated_count() - return count of buffers
1029  * @alloc: binder_alloc for this proc
1030  *
1031  * Return: count of allocated buffers
1032  */
binder_alloc_get_allocated_count(struct binder_alloc * alloc)1033 int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
1034 {
1035 	struct rb_node *n;
1036 	int count = 0;
1037 
1038 	spin_lock(&alloc->lock);
1039 	for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
1040 		count++;
1041 	spin_unlock(&alloc->lock);
1042 	return count;
1043 }
1044 
1045 
1046 /**
1047  * binder_alloc_vma_close() - invalidate address space
1048  * @alloc: binder_alloc for this proc
1049  *
1050  * Called from binder_vma_close() when releasing address space.
1051  * Clears alloc->vma to prevent new incoming transactions from
1052  * allocating more buffers.
1053  */
binder_alloc_vma_close(struct binder_alloc * alloc)1054 void binder_alloc_vma_close(struct binder_alloc *alloc)
1055 {
1056 	binder_alloc_set_vma(alloc, NULL);
1057 }
1058 
1059 /**
1060  * binder_alloc_free_page() - shrinker callback to free pages
1061  * @item:   item to free
1062  * @lock:   lock protecting the item
1063  * @cb_arg: callback argument
1064  *
1065  * Called from list_lru_walk() in binder_shrink_scan() to free
1066  * up pages when the system is under memory pressure.
1067  */
binder_alloc_free_page(struct list_head * item,struct list_lru_one * lru,spinlock_t * lock,void * cb_arg)1068 enum lru_status binder_alloc_free_page(struct list_head *item,
1069 				       struct list_lru_one *lru,
1070 				       spinlock_t *lock,
1071 				       void *cb_arg)
1072 	__must_hold(lock)
1073 {
1074 	struct binder_lru_page *page = container_of(item, typeof(*page), lru);
1075 	struct binder_alloc *alloc = page->alloc;
1076 	struct mm_struct *mm = alloc->mm;
1077 	struct vm_area_struct *vma;
1078 	struct page *page_to_free;
1079 	unsigned long page_addr;
1080 	size_t index;
1081 
1082 	if (!mmget_not_zero(mm))
1083 		goto err_mmget;
1084 	if (!mmap_read_trylock(mm))
1085 		goto err_mmap_read_lock_failed;
1086 	if (!spin_trylock(&alloc->lock))
1087 		goto err_get_alloc_lock_failed;
1088 	if (!page->page_ptr)
1089 		goto err_page_already_freed;
1090 
1091 	index = page - alloc->pages;
1092 	page_addr = alloc->buffer + index * PAGE_SIZE;
1093 
1094 	vma = vma_lookup(mm, page_addr);
1095 	if (vma && vma != binder_alloc_get_vma(alloc))
1096 		goto err_invalid_vma;
1097 
1098 	trace_binder_unmap_kernel_start(alloc, index);
1099 
1100 	page_to_free = page->page_ptr;
1101 	page->page_ptr = NULL;
1102 
1103 	trace_binder_unmap_kernel_end(alloc, index);
1104 
1105 	list_lru_isolate(lru, item);
1106 	spin_unlock(&alloc->lock);
1107 	spin_unlock(lock);
1108 
1109 	if (vma) {
1110 		trace_binder_unmap_user_start(alloc, index);
1111 
1112 		zap_page_range_single(vma, page_addr, PAGE_SIZE, NULL);
1113 
1114 		trace_binder_unmap_user_end(alloc, index);
1115 	}
1116 
1117 	mmap_read_unlock(mm);
1118 	mmput_async(mm);
1119 	__free_page(page_to_free);
1120 
1121 	spin_lock(lock);
1122 	return LRU_REMOVED_RETRY;
1123 
1124 err_invalid_vma:
1125 err_page_already_freed:
1126 	spin_unlock(&alloc->lock);
1127 err_get_alloc_lock_failed:
1128 	mmap_read_unlock(mm);
1129 err_mmap_read_lock_failed:
1130 	mmput_async(mm);
1131 err_mmget:
1132 	return LRU_SKIP;
1133 }
1134 
1135 static unsigned long
binder_shrink_count(struct shrinker * shrink,struct shrink_control * sc)1136 binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1137 {
1138 	return list_lru_count(&binder_freelist);
1139 }
1140 
1141 static unsigned long
binder_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)1142 binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1143 {
1144 	return list_lru_walk(&binder_freelist, binder_alloc_free_page,
1145 			    NULL, sc->nr_to_scan);
1146 }
1147 
1148 static struct shrinker binder_shrinker = {
1149 	.count_objects = binder_shrink_count,
1150 	.scan_objects = binder_shrink_scan,
1151 	.seeks = DEFAULT_SEEKS,
1152 };
1153 
1154 /**
1155  * binder_alloc_init() - called by binder_open() for per-proc initialization
1156  * @alloc: binder_alloc for this proc
1157  *
1158  * Called from binder_open() to initialize binder_alloc fields for
1159  * new binder proc
1160  */
binder_alloc_init(struct binder_alloc * alloc)1161 void binder_alloc_init(struct binder_alloc *alloc)
1162 {
1163 	alloc->pid = current->group_leader->pid;
1164 	alloc->mm = current->mm;
1165 	mmgrab(alloc->mm);
1166 	spin_lock_init(&alloc->lock);
1167 	INIT_LIST_HEAD(&alloc->buffers);
1168 }
1169 
binder_alloc_shrinker_init(void)1170 int binder_alloc_shrinker_init(void)
1171 {
1172 	int ret = list_lru_init(&binder_freelist);
1173 
1174 	if (ret == 0) {
1175 		ret = register_shrinker(&binder_shrinker, "android-binder");
1176 		if (ret)
1177 			list_lru_destroy(&binder_freelist);
1178 	}
1179 	return ret;
1180 }
1181 
binder_alloc_shrinker_exit(void)1182 void binder_alloc_shrinker_exit(void)
1183 {
1184 	unregister_shrinker(&binder_shrinker);
1185 	list_lru_destroy(&binder_freelist);
1186 }
1187 
1188 /**
1189  * check_buffer() - verify that buffer/offset is safe to access
1190  * @alloc: binder_alloc for this proc
1191  * @buffer: binder buffer to be accessed
1192  * @offset: offset into @buffer data
1193  * @bytes: bytes to access from offset
1194  *
1195  * Check that the @offset/@bytes are within the size of the given
1196  * @buffer and that the buffer is currently active and not freeable.
1197  * Offsets must also be multiples of sizeof(u32). The kernel is
1198  * allowed to touch the buffer in two cases:
1199  *
1200  * 1) when the buffer is being created:
1201  *     (buffer->free == 0 && buffer->allow_user_free == 0)
1202  * 2) when the buffer is being torn down:
1203  *     (buffer->free == 0 && buffer->transaction == NULL).
1204  *
1205  * Return: true if the buffer is safe to access
1206  */
check_buffer(struct binder_alloc * alloc,struct binder_buffer * buffer,binder_size_t offset,size_t bytes)1207 static inline bool check_buffer(struct binder_alloc *alloc,
1208 				struct binder_buffer *buffer,
1209 				binder_size_t offset, size_t bytes)
1210 {
1211 	size_t buffer_size = binder_alloc_buffer_size(alloc, buffer);
1212 
1213 	return buffer_size >= bytes &&
1214 		offset <= buffer_size - bytes &&
1215 		IS_ALIGNED(offset, sizeof(u32)) &&
1216 		!buffer->free &&
1217 		(!buffer->allow_user_free || !buffer->transaction);
1218 }
1219 
1220 /**
1221  * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
1222  * @alloc: binder_alloc for this proc
1223  * @buffer: binder buffer to be accessed
1224  * @buffer_offset: offset into @buffer data
1225  * @from: userspace pointer to source buffer
1226  * @bytes: bytes to copy
1227  *
1228  * Copy bytes from source userspace to target buffer.
1229  *
1230  * Return: bytes remaining to be copied
1231  */
1232 unsigned long
binder_alloc_copy_user_to_buffer(struct binder_alloc * alloc,struct binder_buffer * buffer,binder_size_t buffer_offset,const void __user * from,size_t bytes)1233 binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
1234 				 struct binder_buffer *buffer,
1235 				 binder_size_t buffer_offset,
1236 				 const void __user *from,
1237 				 size_t bytes)
1238 {
1239 	if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1240 		return bytes;
1241 
1242 	while (bytes) {
1243 		unsigned long size;
1244 		unsigned long ret;
1245 		struct page *page;
1246 		pgoff_t pgoff;
1247 		void *kptr;
1248 
1249 		page = binder_alloc_get_page(alloc, buffer,
1250 					     buffer_offset, &pgoff);
1251 		size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1252 		kptr = kmap_local_page(page) + pgoff;
1253 		ret = copy_from_user(kptr, from, size);
1254 		kunmap_local(kptr);
1255 		if (ret)
1256 			return bytes - size + ret;
1257 		bytes -= size;
1258 		from += size;
1259 		buffer_offset += size;
1260 	}
1261 	return 0;
1262 }
1263 
binder_alloc_do_buffer_copy(struct binder_alloc * alloc,bool to_buffer,struct binder_buffer * buffer,binder_size_t buffer_offset,void * ptr,size_t bytes)1264 static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
1265 				       bool to_buffer,
1266 				       struct binder_buffer *buffer,
1267 				       binder_size_t buffer_offset,
1268 				       void *ptr,
1269 				       size_t bytes)
1270 {
1271 	/* All copies must be 32-bit aligned and 32-bit size */
1272 	if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1273 		return -EINVAL;
1274 
1275 	while (bytes) {
1276 		unsigned long size;
1277 		struct page *page;
1278 		pgoff_t pgoff;
1279 
1280 		page = binder_alloc_get_page(alloc, buffer,
1281 					     buffer_offset, &pgoff);
1282 		size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1283 		if (to_buffer)
1284 			memcpy_to_page(page, pgoff, ptr, size);
1285 		else
1286 			memcpy_from_page(ptr, page, pgoff, size);
1287 		bytes -= size;
1288 		pgoff = 0;
1289 		ptr = ptr + size;
1290 		buffer_offset += size;
1291 	}
1292 	return 0;
1293 }
1294 
binder_alloc_copy_to_buffer(struct binder_alloc * alloc,struct binder_buffer * buffer,binder_size_t buffer_offset,void * src,size_t bytes)1295 int binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
1296 				struct binder_buffer *buffer,
1297 				binder_size_t buffer_offset,
1298 				void *src,
1299 				size_t bytes)
1300 {
1301 	return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
1302 					   src, bytes);
1303 }
1304 
binder_alloc_copy_from_buffer(struct binder_alloc * alloc,void * dest,struct binder_buffer * buffer,binder_size_t buffer_offset,size_t bytes)1305 int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
1306 				  void *dest,
1307 				  struct binder_buffer *buffer,
1308 				  binder_size_t buffer_offset,
1309 				  size_t bytes)
1310 {
1311 	return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
1312 					   dest, bytes);
1313 }
1314 EXPORT_SYMBOL_GPL(binder_alloc_copy_from_buffer);
1315