1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder_alloc.c
3 *
4 * Android IPC Subsystem
5 *
6 * Copyright (C) 2007-2017 Google, Inc.
7 */
8
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11 #include <linux/list.h>
12 #include <linux/sched/mm.h>
13 #include <linux/module.h>
14 #include <linux/rtmutex.h>
15 #include <linux/rbtree.h>
16 #include <linux/seq_file.h>
17 #include <linux/vmalloc.h>
18 #include <linux/slab.h>
19 #include <linux/sched.h>
20 #include <linux/list_lru.h>
21 #include <linux/ratelimit.h>
22 #include <asm/cacheflush.h>
23 #include <linux/uaccess.h>
24 #include <linux/highmem.h>
25 #include <linux/sizes.h>
26 #include "binder_internal.h"
27 #include "binder_trace.h"
28 #include <trace/hooks/binder.h>
29
30 struct list_lru binder_freelist;
31
32 static DEFINE_MUTEX(binder_alloc_mmap_lock);
33
34 enum {
35 BINDER_DEBUG_USER_ERROR = 1U << 0,
36 BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
37 BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
38 BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
39 };
40 static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
41
42 module_param_named(debug_mask, binder_alloc_debug_mask,
43 uint, 0644);
44
45 #define binder_alloc_debug(mask, x...) \
46 do { \
47 if (binder_alloc_debug_mask & mask) \
48 pr_info_ratelimited(x); \
49 } while (0)
50
binder_buffer_next(struct binder_buffer * buffer)51 static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
52 {
53 return list_entry(buffer->entry.next, struct binder_buffer, entry);
54 }
55
binder_buffer_prev(struct binder_buffer * buffer)56 static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
57 {
58 return list_entry(buffer->entry.prev, struct binder_buffer, entry);
59 }
60
binder_alloc_buffer_size(struct binder_alloc * alloc,struct binder_buffer * buffer)61 static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
62 struct binder_buffer *buffer)
63 {
64 if (list_is_last(&buffer->entry, &alloc->buffers))
65 return alloc->buffer + alloc->buffer_size - buffer->user_data;
66 return binder_buffer_next(buffer)->user_data - buffer->user_data;
67 }
68
binder_insert_free_buffer(struct binder_alloc * alloc,struct binder_buffer * new_buffer)69 static void binder_insert_free_buffer(struct binder_alloc *alloc,
70 struct binder_buffer *new_buffer)
71 {
72 struct rb_node **p = &alloc->free_buffers.rb_node;
73 struct rb_node *parent = NULL;
74 struct binder_buffer *buffer;
75 size_t buffer_size;
76 size_t new_buffer_size;
77
78 BUG_ON(!new_buffer->free);
79
80 new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
81
82 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
83 "%d: add free buffer, size %zd, at %pK\n",
84 alloc->pid, new_buffer_size, new_buffer);
85
86 while (*p) {
87 parent = *p;
88 buffer = rb_entry(parent, struct binder_buffer, rb_node);
89 BUG_ON(!buffer->free);
90
91 buffer_size = binder_alloc_buffer_size(alloc, buffer);
92
93 if (new_buffer_size < buffer_size)
94 p = &parent->rb_left;
95 else
96 p = &parent->rb_right;
97 }
98 rb_link_node(&new_buffer->rb_node, parent, p);
99 rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
100 }
101
binder_insert_allocated_buffer_locked(struct binder_alloc * alloc,struct binder_buffer * new_buffer)102 static void binder_insert_allocated_buffer_locked(
103 struct binder_alloc *alloc, struct binder_buffer *new_buffer)
104 {
105 struct rb_node **p = &alloc->allocated_buffers.rb_node;
106 struct rb_node *parent = NULL;
107 struct binder_buffer *buffer;
108
109 BUG_ON(new_buffer->free);
110
111 while (*p) {
112 parent = *p;
113 buffer = rb_entry(parent, struct binder_buffer, rb_node);
114 BUG_ON(buffer->free);
115
116 if (new_buffer->user_data < buffer->user_data)
117 p = &parent->rb_left;
118 else if (new_buffer->user_data > buffer->user_data)
119 p = &parent->rb_right;
120 else
121 BUG();
122 }
123 rb_link_node(&new_buffer->rb_node, parent, p);
124 rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
125 }
126
binder_alloc_prepare_to_free_locked(struct binder_alloc * alloc,unsigned long user_ptr)127 static struct binder_buffer *binder_alloc_prepare_to_free_locked(
128 struct binder_alloc *alloc,
129 unsigned long user_ptr)
130 {
131 struct rb_node *n = alloc->allocated_buffers.rb_node;
132 struct binder_buffer *buffer;
133
134 while (n) {
135 buffer = rb_entry(n, struct binder_buffer, rb_node);
136 BUG_ON(buffer->free);
137
138 if (user_ptr < (uintptr_t)buffer->user_data) {
139 n = n->rb_left;
140 } else if (user_ptr > (uintptr_t)buffer->user_data) {
141 n = n->rb_right;
142 } else {
143 /*
144 * Guard against user threads attempting to
145 * free the buffer when in use by kernel or
146 * after it's already been freed.
147 */
148 if (!buffer->allow_user_free)
149 return ERR_PTR(-EPERM);
150 buffer->allow_user_free = 0;
151 return buffer;
152 }
153 }
154 return NULL;
155 }
156
157 /**
158 * binder_alloc_prepare_to_free() - get buffer given user ptr
159 * @alloc: binder_alloc for this proc
160 * @user_ptr: User pointer to buffer data
161 *
162 * Validate userspace pointer to buffer data and return buffer corresponding to
163 * that user pointer. Search the rb tree for buffer that matches user data
164 * pointer.
165 *
166 * Return: Pointer to buffer or NULL
167 */
binder_alloc_prepare_to_free(struct binder_alloc * alloc,unsigned long user_ptr)168 struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
169 unsigned long user_ptr)
170 {
171 struct binder_buffer *buffer;
172
173 binder_alloc_lock(alloc);
174 buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
175 binder_alloc_unlock(alloc);
176 return buffer;
177 }
178
179 static inline void
binder_set_installed_page(struct binder_lru_page * lru_page,struct page * page)180 binder_set_installed_page(struct binder_lru_page *lru_page,
181 struct page *page)
182 {
183 /* Pairs with acquire in binder_get_installed_page() */
184 smp_store_release(&lru_page->page_ptr, page);
185 }
186
187 static inline struct page *
binder_get_installed_page(struct binder_lru_page * lru_page)188 binder_get_installed_page(struct binder_lru_page *lru_page)
189 {
190 /* Pairs with release in binder_set_installed_page() */
191 return smp_load_acquire(&lru_page->page_ptr);
192 }
193
binder_lru_freelist_add(struct binder_alloc * alloc,unsigned long start,unsigned long end)194 static void binder_lru_freelist_add(struct binder_alloc *alloc,
195 unsigned long start, unsigned long end)
196 {
197 struct binder_lru_page *page;
198 unsigned long page_addr;
199
200 trace_binder_update_page_range(alloc, false, start, end);
201
202 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
203 size_t index;
204 int ret;
205
206 index = (page_addr - (uintptr_t)alloc->buffer) / PAGE_SIZE;
207 page = &alloc->pages[index];
208
209 if (!binder_get_installed_page(page))
210 continue;
211
212 trace_binder_free_lru_start(alloc, index);
213
214 ret = list_lru_add(&binder_freelist, &page->lru);
215 WARN_ON(!ret);
216
217 trace_binder_free_lru_end(alloc, index);
218 }
219 }
220
binder_install_single_page(struct binder_alloc * alloc,struct binder_lru_page * lru_page,unsigned long addr)221 static int binder_install_single_page(struct binder_alloc *alloc,
222 struct binder_lru_page *lru_page,
223 unsigned long addr)
224 {
225 struct page *page;
226 int ret = 0;
227
228 if (!mmget_not_zero(alloc->vma_vm_mm))
229 return -ESRCH;
230
231 /*
232 * Protected with mmap_sem in write mode as multiple tasks
233 * might race to install the same page.
234 */
235 mmap_write_lock(alloc->vma_vm_mm);
236 if (binder_get_installed_page(lru_page))
237 goto out;
238
239 if (!alloc->vma) {
240 pr_err("%d: %s failed, no vma\n", alloc->pid, __func__);
241 ret = -ESRCH;
242 goto out;
243 }
244
245 page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
246 if (!page) {
247 pr_err("%d: failed to allocate page\n", alloc->pid);
248 ret = -ENOMEM;
249 goto out;
250 }
251
252 ret = vm_insert_page(alloc->vma, addr, page);
253 if (ret) {
254 pr_err("%d: %s failed to insert page at offset %lx with %d\n",
255 alloc->pid, __func__, addr - (uintptr_t)alloc->buffer,
256 ret);
257 __free_page(page);
258 ret = -ENOMEM;
259 goto out;
260 }
261
262 /* Mark page installation complete and safe to use */
263 binder_set_installed_page(lru_page, page);
264 out:
265 mmap_write_unlock(alloc->vma_vm_mm);
266 mmput_async(alloc->vma_vm_mm);
267 return ret;
268 }
269
binder_install_buffer_pages(struct binder_alloc * alloc,struct binder_buffer * buffer,size_t size)270 static int binder_install_buffer_pages(struct binder_alloc *alloc,
271 struct binder_buffer *buffer,
272 size_t size)
273 {
274 struct binder_lru_page *page;
275 unsigned long start, final;
276 unsigned long page_addr;
277
278 start = (uintptr_t)buffer->user_data & PAGE_MASK;
279 final = PAGE_ALIGN((uintptr_t)buffer->user_data + size);
280
281 for (page_addr = start; page_addr < final; page_addr += PAGE_SIZE) {
282 unsigned long index;
283 int ret;
284
285 index = (page_addr - (uintptr_t)alloc->buffer) / PAGE_SIZE;
286 page = &alloc->pages[index];
287
288 if (binder_get_installed_page(page))
289 continue;
290
291 trace_binder_alloc_page_start(alloc, index);
292
293 ret = binder_install_single_page(alloc, page, page_addr);
294 if (ret)
295 return ret;
296
297 trace_binder_alloc_page_end(alloc, index);
298 }
299
300 return 0;
301 }
302
303 /* The range of pages should exclude those shared with other buffers */
binder_lru_freelist_del(struct binder_alloc * alloc,unsigned long start,unsigned long end)304 static void binder_lru_freelist_del(struct binder_alloc *alloc,
305 unsigned long start, unsigned long end)
306 {
307 struct binder_lru_page *page;
308 unsigned long page_addr;
309
310 trace_binder_update_page_range(alloc, true, start, end);
311
312 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
313 unsigned long index;
314 bool on_lru;
315
316 index = (page_addr - (uintptr_t)alloc->buffer) / PAGE_SIZE;
317 page = &alloc->pages[index];
318
319 if (page->page_ptr) {
320 trace_binder_alloc_lru_start(alloc, index);
321
322 on_lru = list_lru_del(&binder_freelist, &page->lru);
323 WARN_ON(!on_lru);
324
325 trace_binder_alloc_lru_end(alloc, index);
326 continue;
327 }
328
329 if (index + 1 > alloc->pages_high)
330 alloc->pages_high = index + 1;
331 }
332 }
333
334
binder_alloc_set_vma(struct binder_alloc * alloc,struct vm_area_struct * vma)335 static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
336 struct vm_area_struct *vma)
337 {
338 if (vma)
339 alloc->vma_vm_mm = vma->vm_mm;
340 /*
341 * If we see alloc->vma is not NULL, buffer data structures set up
342 * completely. Look at smp_rmb side binder_alloc_get_vma.
343 * We also want to guarantee new alloc->vma_vm_mm is always visible
344 * if alloc->vma is set.
345 */
346 smp_wmb();
347 alloc->vma = vma;
348 }
349
binder_alloc_get_vma(struct binder_alloc * alloc)350 static inline struct vm_area_struct *binder_alloc_get_vma(
351 struct binder_alloc *alloc)
352 {
353 struct vm_area_struct *vma = NULL;
354
355 if (alloc->vma) {
356 /* Look at description in binder_alloc_set_vma */
357 smp_rmb();
358 vma = alloc->vma;
359 }
360 return vma;
361 }
362
debug_no_space_locked(struct binder_alloc * alloc)363 static void debug_no_space_locked(struct binder_alloc *alloc)
364 {
365 size_t largest_alloc_size = 0;
366 struct binder_buffer *buffer;
367 size_t allocated_buffers = 0;
368 size_t largest_free_size = 0;
369 size_t total_alloc_size = 0;
370 size_t total_free_size = 0;
371 size_t free_buffers = 0;
372 size_t buffer_size;
373 struct rb_node *n;
374
375 for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
376 buffer = rb_entry(n, struct binder_buffer, rb_node);
377 buffer_size = binder_alloc_buffer_size(alloc, buffer);
378 allocated_buffers++;
379 total_alloc_size += buffer_size;
380 if (buffer_size > largest_alloc_size)
381 largest_alloc_size = buffer_size;
382 }
383
384 for (n = rb_first(&alloc->free_buffers); n; n = rb_next(n)) {
385 buffer = rb_entry(n, struct binder_buffer, rb_node);
386 buffer_size = binder_alloc_buffer_size(alloc, buffer);
387 free_buffers++;
388 total_free_size += buffer_size;
389 if (buffer_size > largest_free_size)
390 largest_free_size = buffer_size;
391 }
392
393 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
394 "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
395 total_alloc_size, allocated_buffers,
396 largest_alloc_size, total_free_size,
397 free_buffers, largest_free_size);
398 }
399
debug_low_async_space_locked(struct binder_alloc * alloc)400 static bool debug_low_async_space_locked(struct binder_alloc *alloc)
401 {
402 /*
403 * Find the amount and size of buffers allocated by the current caller;
404 * The idea is that once we cross the threshold, whoever is responsible
405 * for the low async space is likely to try to send another async txn,
406 * and at some point we'll catch them in the act. This is more efficient
407 * than keeping a map per pid.
408 */
409 struct binder_buffer *buffer;
410 size_t total_alloc_size = 0;
411 int pid = current->tgid;
412 size_t num_buffers = 0;
413 struct rb_node *n;
414
415 /*
416 * Only start detecting spammers once we have less than 20% of async
417 * space left (which is less than 10% of total buffer size).
418 */
419 if (alloc->free_async_space >= alloc->buffer_size / 10) {
420 alloc->oneway_spam_detected = false;
421 return false;
422 }
423
424 for (n = rb_first(&alloc->allocated_buffers); n != NULL;
425 n = rb_next(n)) {
426 buffer = rb_entry(n, struct binder_buffer, rb_node);
427 if (buffer->pid != pid)
428 continue;
429 if (!buffer->async_transaction)
430 continue;
431 total_alloc_size += binder_alloc_buffer_size(alloc, buffer);
432 num_buffers++;
433 }
434
435 /*
436 * Warn if this pid has more than 50 transactions, or more than 50% of
437 * async space (which is 25% of total buffer size). Oneway spam is only
438 * detected when the threshold is exceeded.
439 */
440 if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) {
441 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
442 "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n",
443 alloc->pid, pid, num_buffers, total_alloc_size);
444 if (!alloc->oneway_spam_detected) {
445 alloc->oneway_spam_detected = true;
446 return true;
447 }
448 }
449 return false;
450 }
451
452 /* Callers preallocate @new_buffer, it is freed by this function if unused */
binder_alloc_new_buf_locked(struct binder_alloc * alloc,struct binder_buffer * new_buffer,size_t size,int is_async)453 static struct binder_buffer *binder_alloc_new_buf_locked(
454 struct binder_alloc *alloc,
455 struct binder_buffer *new_buffer,
456 size_t size,
457 int is_async)
458 {
459 struct rb_node *n = alloc->free_buffers.rb_node;
460 struct rb_node *best_fit = NULL;
461 struct binder_buffer *buffer;
462 unsigned long next_used_page;
463 unsigned long curr_last_page;
464 size_t buffer_size;
465
466 trace_android_vh_binder_alloc_new_buf_locked(size, alloc, is_async);
467
468 if (is_async && alloc->free_async_space < size) {
469 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
470 "%d: binder_alloc_buf size %zd failed, no async space left\n",
471 alloc->pid, size);
472 buffer = ERR_PTR(-ENOSPC);
473 goto out;
474 }
475
476 while (n) {
477 buffer = rb_entry(n, struct binder_buffer, rb_node);
478 BUG_ON(!buffer->free);
479 buffer_size = binder_alloc_buffer_size(alloc, buffer);
480
481 if (size < buffer_size) {
482 best_fit = n;
483 n = n->rb_left;
484 } else if (size > buffer_size) {
485 n = n->rb_right;
486 } else {
487 best_fit = n;
488 break;
489 }
490 }
491
492 if (unlikely(!best_fit)) {
493 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
494 "%d: binder_alloc_buf size %zd failed, no address space\n",
495 alloc->pid, size);
496 debug_no_space_locked(alloc);
497 buffer = ERR_PTR(-ENOSPC);
498 goto out;
499 }
500
501 if (buffer_size != size) {
502 /* Found an oversized buffer and needs to be split */
503 buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
504 buffer_size = binder_alloc_buffer_size(alloc, buffer);
505
506 WARN_ON(n || buffer_size == size);
507 new_buffer->user_data = buffer->user_data + size;
508 list_add(&new_buffer->entry, &buffer->entry);
509 new_buffer->free = 1;
510 binder_insert_free_buffer(alloc, new_buffer);
511 new_buffer = NULL;
512 }
513
514 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
515 "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
516 alloc->pid, size, buffer, buffer_size);
517
518 /*
519 * Now we remove the pages from the freelist. A clever calculation
520 * with buffer_size determines if the last page is shared with an
521 * adjacent in-use buffer. In such case, the page has been already
522 * removed from the freelist so we trim our range short.
523 */
524 next_used_page = ((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK;
525 curr_last_page = PAGE_ALIGN((uintptr_t)buffer->user_data + size);
526 binder_lru_freelist_del(alloc, PAGE_ALIGN((uintptr_t)buffer->user_data),
527 min(next_used_page, curr_last_page));
528
529 rb_erase(&buffer->rb_node, &alloc->free_buffers);
530 buffer->free = 0;
531 buffer->allow_user_free = 0;
532 binder_insert_allocated_buffer_locked(alloc, buffer);
533 buffer->async_transaction = is_async;
534 buffer->oneway_spam_suspect = false;
535 if (is_async) {
536 alloc->free_async_space -= size;
537 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
538 "%d: binder_alloc_buf size %zd async free %zd\n",
539 alloc->pid, size, alloc->free_async_space);
540 if (debug_low_async_space_locked(alloc))
541 buffer->oneway_spam_suspect = true;
542 }
543
544 out:
545 /* Discard possibly unused new_buffer */
546 kfree(new_buffer);
547 return buffer;
548 }
549
550 /* Calculate the sanitized total size, returns 0 for invalid request */
sanitized_size(size_t data_size,size_t offsets_size,size_t extra_buffers_size)551 static inline size_t sanitized_size(size_t data_size,
552 size_t offsets_size,
553 size_t extra_buffers_size)
554 {
555 size_t total, tmp;
556
557 /* Align to pointer size and check for overflows */
558 tmp = ALIGN(data_size, sizeof(void *)) +
559 ALIGN(offsets_size, sizeof(void *));
560 if (tmp < data_size || tmp < offsets_size)
561 return 0;
562 total = tmp + ALIGN(extra_buffers_size, sizeof(void *));
563 if (total < tmp || total < extra_buffers_size)
564 return 0;
565
566 /* Pad 0-sized buffers so they get a unique address */
567 total = max(total, sizeof(void *));
568
569 return total;
570 }
571
572 /**
573 * binder_alloc_new_buf() - Allocate a new binder buffer
574 * @alloc: binder_alloc for this proc
575 * @data_size: size of user data buffer
576 * @offsets_size: user specified buffer offset
577 * @extra_buffers_size: size of extra space for meta-data (eg, security context)
578 * @is_async: buffer for async transaction
579 *
580 * Allocate a new buffer given the requested sizes. Returns
581 * the kernel version of the buffer pointer. The size allocated
582 * is the sum of the three given sizes (each rounded up to
583 * pointer-sized boundary)
584 *
585 * Return: The allocated buffer or %ERR_PTR(-errno) if error
586 */
binder_alloc_new_buf(struct binder_alloc * alloc,size_t data_size,size_t offsets_size,size_t extra_buffers_size,int is_async)587 struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
588 size_t data_size,
589 size_t offsets_size,
590 size_t extra_buffers_size,
591 int is_async)
592 {
593 struct binder_buffer *buffer, *next;
594 size_t size;
595 int ret;
596
597 /* Check binder_alloc is fully initialized */
598 if (!binder_alloc_get_vma(alloc)) {
599 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
600 "%d: binder_alloc_buf, no vma\n",
601 alloc->pid);
602 return ERR_PTR(-ESRCH);
603 }
604
605 size = sanitized_size(data_size, offsets_size, extra_buffers_size);
606 if (unlikely(!size)) {
607 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
608 "%d: got transaction with invalid size %zd-%zd-%zd\n",
609 alloc->pid, data_size, offsets_size,
610 extra_buffers_size);
611 return ERR_PTR(-EINVAL);
612 }
613
614 /* Preallocate the next buffer */
615 next = kzalloc(sizeof(*next), GFP_KERNEL);
616 if (!next)
617 return ERR_PTR(-ENOMEM);
618
619 binder_alloc_lock(alloc);
620 buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async);
621 if (IS_ERR(buffer)) {
622 binder_alloc_unlock(alloc);
623 goto out;
624 }
625
626 buffer->data_size = data_size;
627 buffer->offsets_size = offsets_size;
628 buffer->extra_buffers_size = extra_buffers_size;
629 buffer->pid = current->tgid;
630 binder_alloc_unlock(alloc);
631
632 ret = binder_install_buffer_pages(alloc, buffer, size);
633 if (ret) {
634 binder_alloc_free_buf(alloc, buffer);
635 buffer = ERR_PTR(ret);
636 }
637 out:
638 return buffer;
639 }
640
buffer_start_page(struct binder_buffer * buffer)641 static unsigned long buffer_start_page(struct binder_buffer *buffer)
642 {
643 return (uintptr_t)buffer->user_data & PAGE_MASK;
644 }
645
prev_buffer_end_page(struct binder_buffer * buffer)646 static unsigned long prev_buffer_end_page(struct binder_buffer *buffer)
647 {
648 return ((uintptr_t)buffer->user_data - 1) & PAGE_MASK;
649 }
650
binder_delete_free_buffer(struct binder_alloc * alloc,struct binder_buffer * buffer)651 static void binder_delete_free_buffer(struct binder_alloc *alloc,
652 struct binder_buffer *buffer)
653 {
654 struct binder_buffer *prev, *next;
655
656 if (PAGE_ALIGNED(buffer->user_data))
657 goto skip_freelist;
658
659 BUG_ON(alloc->buffers.next == &buffer->entry);
660 prev = binder_buffer_prev(buffer);
661 BUG_ON(!prev->free);
662 if (prev_buffer_end_page(prev) == buffer_start_page(buffer))
663 goto skip_freelist;
664
665 if (!list_is_last(&buffer->entry, &alloc->buffers)) {
666 next = binder_buffer_next(buffer);
667 if (buffer_start_page(next) == buffer_start_page(buffer))
668 goto skip_freelist;
669 }
670
671 binder_lru_freelist_add(alloc, buffer_start_page(buffer),
672 buffer_start_page(buffer) + PAGE_SIZE);
673 skip_freelist:
674 list_del(&buffer->entry);
675 kfree(buffer);
676 }
677
binder_free_buf_locked(struct binder_alloc * alloc,struct binder_buffer * buffer)678 static void binder_free_buf_locked(struct binder_alloc *alloc,
679 struct binder_buffer *buffer)
680 {
681 size_t size, buffer_size;
682
683 buffer_size = binder_alloc_buffer_size(alloc, buffer);
684
685 size = ALIGN(buffer->data_size, sizeof(void *)) +
686 ALIGN(buffer->offsets_size, sizeof(void *)) +
687 ALIGN(buffer->extra_buffers_size, sizeof(void *));
688
689 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
690 "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
691 alloc->pid, buffer, size, buffer_size);
692
693 BUG_ON(buffer->free);
694 BUG_ON(size > buffer_size);
695 BUG_ON(buffer->transaction != NULL);
696 BUG_ON(buffer->user_data < alloc->buffer);
697 BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
698
699 if (buffer->async_transaction) {
700 alloc->free_async_space += buffer_size;
701 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
702 "%d: binder_free_buf size %zd async free %zd\n",
703 alloc->pid, size, alloc->free_async_space);
704 }
705
706 binder_lru_freelist_add(alloc, PAGE_ALIGN((uintptr_t)buffer->user_data),
707 ((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK);
708
709 rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
710 buffer->free = 1;
711 if (!list_is_last(&buffer->entry, &alloc->buffers)) {
712 struct binder_buffer *next = binder_buffer_next(buffer);
713
714 if (next->free) {
715 rb_erase(&next->rb_node, &alloc->free_buffers);
716 binder_delete_free_buffer(alloc, next);
717 }
718 }
719 if (alloc->buffers.next != &buffer->entry) {
720 struct binder_buffer *prev = binder_buffer_prev(buffer);
721
722 if (prev->free) {
723 binder_delete_free_buffer(alloc, buffer);
724 rb_erase(&prev->rb_node, &alloc->free_buffers);
725 buffer = prev;
726 }
727 }
728 binder_insert_free_buffer(alloc, buffer);
729 }
730
731 /**
732 * binder_alloc_get_page() - get kernel pointer for given buffer offset
733 * @alloc: binder_alloc for this proc
734 * @buffer: binder buffer to be accessed
735 * @buffer_offset: offset into @buffer data
736 * @pgoffp: address to copy final page offset to
737 *
738 * Lookup the struct page corresponding to the address
739 * at @buffer_offset into @buffer->user_data. If @pgoffp is not
740 * NULL, the byte-offset into the page is written there.
741 *
742 * The caller is responsible to ensure that the offset points
743 * to a valid address within the @buffer and that @buffer is
744 * not freeable by the user. Since it can't be freed, we are
745 * guaranteed that the corresponding elements of @alloc->pages[]
746 * cannot change.
747 *
748 * Return: struct page
749 */
binder_alloc_get_page(struct binder_alloc * alloc,struct binder_buffer * buffer,binder_size_t buffer_offset,pgoff_t * pgoffp)750 static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
751 struct binder_buffer *buffer,
752 binder_size_t buffer_offset,
753 pgoff_t *pgoffp)
754 {
755 binder_size_t buffer_space_offset = buffer_offset +
756 (buffer->user_data - alloc->buffer);
757 pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
758 size_t index = buffer_space_offset >> PAGE_SHIFT;
759 struct binder_lru_page *lru_page;
760
761 lru_page = &alloc->pages[index];
762 *pgoffp = pgoff;
763 return lru_page->page_ptr;
764 }
765
766 /**
767 * binder_alloc_clear_buf() - zero out buffer
768 * @alloc: binder_alloc for this proc
769 * @buffer: binder buffer to be cleared
770 *
771 * memset the given buffer to 0
772 */
binder_alloc_clear_buf(struct binder_alloc * alloc,struct binder_buffer * buffer)773 static void binder_alloc_clear_buf(struct binder_alloc *alloc,
774 struct binder_buffer *buffer)
775 {
776 size_t bytes = binder_alloc_buffer_size(alloc, buffer);
777 binder_size_t buffer_offset = 0;
778
779 while (bytes) {
780 unsigned long size;
781 struct page *page;
782 pgoff_t pgoff;
783 void *kptr;
784
785 page = binder_alloc_get_page(alloc, buffer,
786 buffer_offset, &pgoff);
787 size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
788 kptr = kmap(page) + pgoff;
789 memset(kptr, 0, size);
790 kunmap(page);
791 bytes -= size;
792 buffer_offset += size;
793 }
794 }
795
796
797 /**
798 * binder_alloc_free_buf() - free a binder buffer
799 * @alloc: binder_alloc for this proc
800 * @buffer: kernel pointer to buffer
801 *
802 * Free the buffer allocated via binder_alloc_new_buf()
803 */
binder_alloc_free_buf(struct binder_alloc * alloc,struct binder_buffer * buffer)804 void binder_alloc_free_buf(struct binder_alloc *alloc,
805 struct binder_buffer *buffer)
806 {
807 /*
808 * We could eliminate the call to binder_alloc_clear_buf()
809 * from binder_alloc_deferred_release() by moving this to
810 * binder_free_buf_locked(). However, that could
811 * increase contention for the alloc->lock if clear_on_free
812 * is used frequently for large buffers. This lock is not
813 * needed for correctness here.
814 */
815 if (buffer->clear_on_free) {
816 binder_alloc_clear_buf(alloc, buffer);
817 buffer->clear_on_free = false;
818 }
819 binder_alloc_lock(alloc);
820 binder_free_buf_locked(alloc, buffer);
821 binder_alloc_unlock(alloc);
822 }
823
824 /**
825 * binder_alloc_mmap_handler() - map virtual address space for proc
826 * @alloc: alloc structure for this proc
827 * @vma: vma passed to mmap()
828 *
829 * Called by binder_mmap() to initialize the space specified in
830 * vma for allocating binder buffers
831 *
832 * Return:
833 * 0 = success
834 * -EBUSY = address space already mapped
835 * -ENOMEM = failed to map memory to given address space
836 */
binder_alloc_mmap_handler(struct binder_alloc * alloc,struct vm_area_struct * vma)837 int binder_alloc_mmap_handler(struct binder_alloc *alloc,
838 struct vm_area_struct *vma)
839 {
840 struct binder_buffer *buffer;
841 const char *failure_string;
842 int ret, i;
843
844 mutex_lock(&binder_alloc_mmap_lock);
845 if (alloc->buffer_size) {
846 ret = -EBUSY;
847 failure_string = "already mapped";
848 goto err_already_mapped;
849 }
850 alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
851 SZ_4M);
852 mutex_unlock(&binder_alloc_mmap_lock);
853
854 alloc->buffer = (void __user *)vma->vm_start;
855
856 alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
857 sizeof(alloc->pages[0]),
858 GFP_KERNEL);
859 if (alloc->pages == NULL) {
860 ret = -ENOMEM;
861 failure_string = "alloc page array";
862 goto err_alloc_pages_failed;
863 }
864
865 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
866 alloc->pages[i].alloc = alloc;
867 INIT_LIST_HEAD(&alloc->pages[i].lru);
868 }
869
870 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
871 if (!buffer) {
872 ret = -ENOMEM;
873 failure_string = "alloc buffer struct";
874 goto err_alloc_buf_struct_failed;
875 }
876
877 buffer->user_data = alloc->buffer;
878 list_add(&buffer->entry, &alloc->buffers);
879 buffer->free = 1;
880 binder_insert_free_buffer(alloc, buffer);
881 alloc->free_async_space = alloc->buffer_size / 2;
882 binder_alloc_set_vma(alloc, vma);
883 mmgrab(alloc->vma_vm_mm);
884
885 return 0;
886
887 err_alloc_buf_struct_failed:
888 kfree(alloc->pages);
889 alloc->pages = NULL;
890 err_alloc_pages_failed:
891 alloc->buffer = NULL;
892 mutex_lock(&binder_alloc_mmap_lock);
893 alloc->buffer_size = 0;
894 err_already_mapped:
895 mutex_unlock(&binder_alloc_mmap_lock);
896 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
897 "%s: %d %lx-%lx %s failed %d\n", __func__,
898 alloc->pid, vma->vm_start, vma->vm_end,
899 failure_string, ret);
900 return ret;
901 }
902
903
binder_alloc_deferred_release(struct binder_alloc * alloc)904 void binder_alloc_deferred_release(struct binder_alloc *alloc)
905 {
906 struct rb_node *n;
907 int buffers, page_count;
908 struct binder_buffer *buffer;
909
910 buffers = 0;
911 binder_alloc_lock(alloc);
912 BUG_ON(alloc->vma);
913
914 while ((n = rb_first(&alloc->allocated_buffers))) {
915 buffer = rb_entry(n, struct binder_buffer, rb_node);
916
917 /* Transaction should already have been freed */
918 BUG_ON(buffer->transaction);
919
920 if (buffer->clear_on_free) {
921 binder_alloc_clear_buf(alloc, buffer);
922 buffer->clear_on_free = false;
923 }
924 binder_free_buf_locked(alloc, buffer);
925 buffers++;
926 }
927
928 while (!list_empty(&alloc->buffers)) {
929 buffer = list_first_entry(&alloc->buffers,
930 struct binder_buffer, entry);
931 WARN_ON(!buffer->free);
932
933 list_del(&buffer->entry);
934 WARN_ON_ONCE(!list_empty(&alloc->buffers));
935 kfree(buffer);
936 }
937
938 page_count = 0;
939 if (alloc->pages) {
940 int i;
941
942 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
943 unsigned long page_addr;
944 bool on_lru;
945
946 if (!alloc->pages[i].page_ptr)
947 continue;
948
949 on_lru = list_lru_del(&binder_freelist,
950 &alloc->pages[i].lru);
951 page_addr = (uintptr_t)alloc->buffer + i * PAGE_SIZE;
952 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
953 "%s: %d: page %d %s\n",
954 __func__, alloc->pid, i,
955 on_lru ? "on lru" : "active");
956 __free_page(alloc->pages[i].page_ptr);
957 page_count++;
958 }
959 kfree(alloc->pages);
960 }
961 binder_alloc_unlock(alloc);
962 if (alloc->vma_vm_mm)
963 mmdrop(alloc->vma_vm_mm);
964
965 binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
966 "%s: %d buffers %d, pages %d\n",
967 __func__, alloc->pid, buffers, page_count);
968 }
969
970 /**
971 * binder_alloc_print_allocated() - print buffer info
972 * @m: seq_file for output via seq_printf()
973 * @alloc: binder_alloc for this proc
974 *
975 * Prints information about every buffer associated with
976 * the binder_alloc state to the given seq_file
977 */
binder_alloc_print_allocated(struct seq_file * m,struct binder_alloc * alloc)978 void binder_alloc_print_allocated(struct seq_file *m,
979 struct binder_alloc *alloc)
980 {
981 struct binder_buffer *buffer;
982 struct rb_node *n;
983
984 binder_alloc_lock(alloc);
985 for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
986 buffer = rb_entry(n, struct binder_buffer, rb_node);
987 seq_printf(m, " buffer %d: %lx size %zd:%zd:%zd %s\n",
988 buffer->debug_id,
989 buffer->user_data - alloc->buffer,
990 buffer->data_size, buffer->offsets_size,
991 buffer->extra_buffers_size,
992 buffer->transaction ? "active" : "delivered");
993 }
994 binder_alloc_unlock(alloc);
995 }
996
997 /**
998 * binder_alloc_print_pages() - print page usage
999 * @m: seq_file for output via seq_printf()
1000 * @alloc: binder_alloc for this proc
1001 */
binder_alloc_print_pages(struct seq_file * m,struct binder_alloc * alloc)1002 void binder_alloc_print_pages(struct seq_file *m,
1003 struct binder_alloc *alloc)
1004 {
1005 struct binder_lru_page *page;
1006 int i;
1007 int active = 0;
1008 int lru = 0;
1009 int free = 0;
1010
1011 binder_alloc_lock(alloc);
1012 /*
1013 * Make sure the binder_alloc is fully initialized, otherwise we might
1014 * read inconsistent state.
1015 */
1016 if (binder_alloc_get_vma(alloc) != NULL) {
1017 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
1018 page = &alloc->pages[i];
1019 if (!page->page_ptr)
1020 free++;
1021 else if (list_empty(&page->lru))
1022 active++;
1023 else
1024 lru++;
1025 }
1026 }
1027 binder_alloc_unlock(alloc);
1028 seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
1029 seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
1030 }
1031
1032 /**
1033 * binder_alloc_get_allocated_count() - return count of buffers
1034 * @alloc: binder_alloc for this proc
1035 *
1036 * Return: count of allocated buffers
1037 */
binder_alloc_get_allocated_count(struct binder_alloc * alloc)1038 int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
1039 {
1040 struct rb_node *n;
1041 int count = 0;
1042
1043 binder_alloc_lock(alloc);
1044 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
1045 count++;
1046 binder_alloc_unlock(alloc);
1047 return count;
1048 }
1049
1050
1051 /**
1052 * binder_alloc_vma_close() - invalidate address space
1053 * @alloc: binder_alloc for this proc
1054 *
1055 * Called from binder_vma_close() when releasing address space.
1056 * Clears alloc->vma to prevent new incoming transactions from
1057 * allocating more buffers.
1058 */
binder_alloc_vma_close(struct binder_alloc * alloc)1059 void binder_alloc_vma_close(struct binder_alloc *alloc)
1060 {
1061 binder_alloc_set_vma(alloc, NULL);
1062 }
1063
1064 /**
1065 * binder_alloc_free_page() - shrinker callback to free pages
1066 * @item: item to free
1067 * @lock: lock protecting the item
1068 * @cb_arg: callback argument
1069 *
1070 * Called from list_lru_walk() in binder_shrink_scan() to free
1071 * up pages when the system is under memory pressure.
1072 */
binder_alloc_free_page(struct list_head * item,struct list_lru_one * lru,spinlock_t * lock,void * cb_arg)1073 enum lru_status binder_alloc_free_page(struct list_head *item,
1074 struct list_lru_one *lru,
1075 spinlock_t *lock,
1076 void *cb_arg)
1077 __must_hold(lock)
1078 {
1079 struct binder_lru_page *page = container_of(item, typeof(*page), lru);
1080 struct binder_alloc *alloc = page->alloc;
1081 struct mm_struct *mm = alloc->vma_vm_mm;
1082 struct vm_area_struct *vma;
1083 struct page *page_to_free;
1084 unsigned long page_addr;
1085 size_t index;
1086
1087 if (!mmget_not_zero(mm))
1088 goto err_mmget;
1089 if (!mmap_read_trylock(mm))
1090 goto err_mmap_read_lock_failed;
1091 if (!binder_alloc_trylock(alloc))
1092 goto err_get_alloc_lock_failed;
1093 if (!page->page_ptr)
1094 goto err_page_already_freed;
1095
1096 index = page - alloc->pages;
1097 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
1098
1099 vma = find_vma(mm, page_addr);
1100 if (vma && vma != binder_alloc_get_vma(alloc))
1101 goto err_invalid_vma;
1102
1103 trace_binder_unmap_kernel_start(alloc, index);
1104
1105 page_to_free = page->page_ptr;
1106 page->page_ptr = NULL;
1107
1108 trace_binder_unmap_kernel_end(alloc, index);
1109
1110 list_lru_isolate(lru, item);
1111 binder_alloc_unlock(alloc);
1112 spin_unlock(lock);
1113
1114 if (vma) {
1115 trace_binder_unmap_user_start(alloc, index);
1116
1117 zap_page_range(vma, page_addr, PAGE_SIZE);
1118
1119 trace_binder_unmap_user_end(alloc, index);
1120 }
1121
1122 mmap_read_unlock(mm);
1123 mmput_async(mm);
1124 __free_page(page_to_free);
1125
1126 spin_lock(lock);
1127 return LRU_REMOVED_RETRY;
1128
1129 err_invalid_vma:
1130 err_page_already_freed:
1131 binder_alloc_unlock(alloc);
1132 err_get_alloc_lock_failed:
1133 mmap_read_unlock(mm);
1134 err_mmap_read_lock_failed:
1135 mmput_async(mm);
1136 err_mmget:
1137 return LRU_SKIP;
1138 }
1139
1140 static unsigned long
binder_shrink_count(struct shrinker * shrink,struct shrink_control * sc)1141 binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1142 {
1143 return list_lru_count(&binder_freelist);
1144 }
1145
1146 static unsigned long
binder_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)1147 binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1148 {
1149 return list_lru_walk(&binder_freelist, binder_alloc_free_page,
1150 NULL, sc->nr_to_scan);
1151 }
1152
1153 static struct shrinker binder_shrinker = {
1154 .count_objects = binder_shrink_count,
1155 .scan_objects = binder_shrink_scan,
1156 .seeks = DEFAULT_SEEKS,
1157 };
1158
1159 /**
1160 * binder_alloc_init() - called by binder_open() for per-proc initialization
1161 * @alloc: binder_alloc for this proc
1162 *
1163 * Called from binder_open() to initialize binder_alloc fields for
1164 * new binder proc
1165 */
binder_alloc_init(struct binder_alloc * alloc)1166 void binder_alloc_init(struct binder_alloc *alloc)
1167 {
1168 alloc->pid = current->group_leader->pid;
1169 binder_alloc_lock_init(alloc);
1170 INIT_LIST_HEAD(&alloc->buffers);
1171 }
1172
binder_alloc_shrinker_init(void)1173 int binder_alloc_shrinker_init(void)
1174 {
1175 int ret = list_lru_init(&binder_freelist);
1176
1177 if (ret == 0) {
1178 ret = register_shrinker(&binder_shrinker);
1179 if (ret)
1180 list_lru_destroy(&binder_freelist);
1181 }
1182 return ret;
1183 }
1184
binder_alloc_shrinker_exit(void)1185 void binder_alloc_shrinker_exit(void)
1186 {
1187 unregister_shrinker(&binder_shrinker);
1188 list_lru_destroy(&binder_freelist);
1189 }
1190
1191 /**
1192 * check_buffer() - verify that buffer/offset is safe to access
1193 * @alloc: binder_alloc for this proc
1194 * @buffer: binder buffer to be accessed
1195 * @offset: offset into @buffer data
1196 * @bytes: bytes to access from offset
1197 *
1198 * Check that the @offset/@bytes are within the size of the given
1199 * @buffer and that the buffer is currently active and not freeable.
1200 * Offsets must also be multiples of sizeof(u32). The kernel is
1201 * allowed to touch the buffer in two cases:
1202 *
1203 * 1) when the buffer is being created:
1204 * (buffer->free == 0 && buffer->allow_user_free == 0)
1205 * 2) when the buffer is being torn down:
1206 * (buffer->free == 0 && buffer->transaction == NULL).
1207 *
1208 * Return: true if the buffer is safe to access
1209 */
check_buffer(struct binder_alloc * alloc,struct binder_buffer * buffer,binder_size_t offset,size_t bytes)1210 static inline bool check_buffer(struct binder_alloc *alloc,
1211 struct binder_buffer *buffer,
1212 binder_size_t offset, size_t bytes)
1213 {
1214 size_t buffer_size = binder_alloc_buffer_size(alloc, buffer);
1215
1216 return buffer_size >= bytes &&
1217 offset <= buffer_size - bytes &&
1218 IS_ALIGNED(offset, sizeof(u32)) &&
1219 !buffer->free &&
1220 (!buffer->allow_user_free || !buffer->transaction);
1221 }
1222
1223 /**
1224 * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
1225 * @alloc: binder_alloc for this proc
1226 * @buffer: binder buffer to be accessed
1227 * @buffer_offset: offset into @buffer data
1228 * @from: userspace pointer to source buffer
1229 * @bytes: bytes to copy
1230 *
1231 * Copy bytes from source userspace to target buffer.
1232 *
1233 * Return: bytes remaining to be copied
1234 */
1235 unsigned long
binder_alloc_copy_user_to_buffer(struct binder_alloc * alloc,struct binder_buffer * buffer,binder_size_t buffer_offset,const void __user * from,size_t bytes)1236 binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
1237 struct binder_buffer *buffer,
1238 binder_size_t buffer_offset,
1239 const void __user *from,
1240 size_t bytes)
1241 {
1242 if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1243 return bytes;
1244
1245 while (bytes) {
1246 unsigned long size;
1247 unsigned long ret;
1248 struct page *page;
1249 pgoff_t pgoff;
1250 void *kptr;
1251
1252 page = binder_alloc_get_page(alloc, buffer,
1253 buffer_offset, &pgoff);
1254 size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1255 kptr = kmap(page) + pgoff;
1256 ret = copy_from_user(kptr, from, size);
1257 kunmap(page);
1258 if (ret)
1259 return bytes - size + ret;
1260 bytes -= size;
1261 from += size;
1262 buffer_offset += size;
1263 }
1264 return 0;
1265 }
1266
binder_alloc_do_buffer_copy(struct binder_alloc * alloc,bool to_buffer,struct binder_buffer * buffer,binder_size_t buffer_offset,void * ptr,size_t bytes)1267 static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
1268 bool to_buffer,
1269 struct binder_buffer *buffer,
1270 binder_size_t buffer_offset,
1271 void *ptr,
1272 size_t bytes)
1273 {
1274 /* All copies must be 32-bit aligned and 32-bit size */
1275 if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1276 return -EINVAL;
1277
1278 while (bytes) {
1279 unsigned long size;
1280 struct page *page;
1281 pgoff_t pgoff;
1282 void *tmpptr;
1283 void *base_ptr;
1284
1285 page = binder_alloc_get_page(alloc, buffer,
1286 buffer_offset, &pgoff);
1287 size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1288 base_ptr = kmap_atomic(page);
1289 tmpptr = base_ptr + pgoff;
1290 if (to_buffer)
1291 memcpy(tmpptr, ptr, size);
1292 else
1293 memcpy(ptr, tmpptr, size);
1294 /*
1295 * kunmap_atomic() takes care of flushing the cache
1296 * if this device has VIVT cache arch
1297 */
1298 kunmap_atomic(base_ptr);
1299 bytes -= size;
1300 pgoff = 0;
1301 ptr = ptr + size;
1302 buffer_offset += size;
1303 }
1304 return 0;
1305 }
1306
binder_alloc_copy_to_buffer(struct binder_alloc * alloc,struct binder_buffer * buffer,binder_size_t buffer_offset,void * src,size_t bytes)1307 int binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
1308 struct binder_buffer *buffer,
1309 binder_size_t buffer_offset,
1310 void *src,
1311 size_t bytes)
1312 {
1313 return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
1314 src, bytes);
1315 }
1316
binder_alloc_copy_from_buffer(struct binder_alloc * alloc,void * dest,struct binder_buffer * buffer,binder_size_t buffer_offset,size_t bytes)1317 int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
1318 void *dest,
1319 struct binder_buffer *buffer,
1320 binder_size_t buffer_offset,
1321 size_t bytes)
1322 {
1323 return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
1324 dest, bytes);
1325 }
1326
1327