Lines Matching full:buffer
56 static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer) in binder_buffer_next() argument
58 return list_entry(buffer->entry.next, struct binder_buffer, entry); in binder_buffer_next()
61 static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer) in binder_buffer_prev() argument
63 return list_entry(buffer->entry.prev, struct binder_buffer, entry); in binder_buffer_prev()
67 struct binder_buffer *buffer) in binder_alloc_buffer_size() argument
69 if (list_is_last(&buffer->entry, &alloc->buffers)) in binder_alloc_buffer_size()
70 return (u8 *)alloc->buffer + in binder_alloc_buffer_size()
71 alloc->buffer_size - (u8 *)buffer->data; in binder_alloc_buffer_size()
72 return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data; in binder_alloc_buffer_size()
80 struct binder_buffer *buffer; in binder_insert_free_buffer() local
89 "%d: add free buffer, size %zd, at %pK\n", in binder_insert_free_buffer()
94 buffer = rb_entry(parent, struct binder_buffer, rb_node); in binder_insert_free_buffer()
95 BUG_ON(!buffer->free); in binder_insert_free_buffer()
97 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_insert_free_buffer()
113 struct binder_buffer *buffer; in binder_insert_allocated_buffer_locked() local
119 buffer = rb_entry(parent, struct binder_buffer, rb_node); in binder_insert_allocated_buffer_locked()
120 BUG_ON(buffer->free); in binder_insert_allocated_buffer_locked()
122 if (new_buffer->data < buffer->data) in binder_insert_allocated_buffer_locked()
124 else if (new_buffer->data > buffer->data) in binder_insert_allocated_buffer_locked()
138 struct binder_buffer *buffer; in binder_alloc_prepare_to_free_locked() local
144 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_alloc_prepare_to_free_locked()
145 BUG_ON(buffer->free); in binder_alloc_prepare_to_free_locked()
147 if (kern_ptr < buffer->data) in binder_alloc_prepare_to_free_locked()
149 else if (kern_ptr > buffer->data) in binder_alloc_prepare_to_free_locked()
154 * free the buffer when in use by kernel or in binder_alloc_prepare_to_free_locked()
157 if (!buffer->allow_user_free) in binder_alloc_prepare_to_free_locked()
159 buffer->allow_user_free = 0; in binder_alloc_prepare_to_free_locked()
160 return buffer; in binder_alloc_prepare_to_free_locked()
167 * binder_alloc_buffer_lookup() - get buffer given user ptr
169 * @user_ptr: User pointer to buffer data
171 * Validate userspace pointer to buffer data and return buffer corresponding to
172 * that user pointer. Search the rb tree for buffer that matches user data
175 * Return: Pointer to buffer or NULL
180 struct binder_buffer *buffer; in binder_alloc_prepare_to_free() local
183 buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr); in binder_alloc_prepare_to_free()
185 return buffer; in binder_alloc_prepare_to_free()
211 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; in binder_update_page_range()
238 index = (page_addr - alloc->buffer) / PAGE_SIZE; in binder_update_page_range()
302 index = (page_addr - alloc->buffer) / PAGE_SIZE; in binder_update_page_range()
340 * If we see alloc->vma is not NULL, buffer data structures set up in binder_alloc_set_vma()
370 struct binder_buffer *buffer; in binder_alloc_new_buf_locked() local
413 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_alloc_new_buf_locked()
414 BUG_ON(!buffer->free); in binder_alloc_new_buf_locked()
415 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
437 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_alloc_new_buf_locked()
438 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
446 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_alloc_new_buf_locked()
447 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
464 buffer = rb_entry(best_fit, struct binder_buffer, rb_node); in binder_alloc_new_buf_locked()
465 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
469 "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n", in binder_alloc_new_buf_locked()
470 alloc->pid, size, buffer, buffer_size); in binder_alloc_new_buf_locked()
473 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); in binder_alloc_new_buf_locked()
476 (void *)PAGE_ALIGN((uintptr_t)buffer->data + size); in binder_alloc_new_buf_locked()
480 (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr); in binder_alloc_new_buf_locked()
487 new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); in binder_alloc_new_buf_locked()
489 pr_err("%s: %d failed to alloc new buffer struct\n", in binder_alloc_new_buf_locked()
493 new_buffer->data = (u8 *)buffer->data + size; in binder_alloc_new_buf_locked()
494 list_add(&new_buffer->entry, &buffer->entry); in binder_alloc_new_buf_locked()
500 buffer->free = 0; in binder_alloc_new_buf_locked()
501 buffer->allow_user_free = 0; in binder_alloc_new_buf_locked()
502 binder_insert_allocated_buffer_locked(alloc, buffer); in binder_alloc_new_buf_locked()
505 alloc->pid, size, buffer); in binder_alloc_new_buf_locked()
506 buffer->data_size = data_size; in binder_alloc_new_buf_locked()
507 buffer->offsets_size = offsets_size; in binder_alloc_new_buf_locked()
508 buffer->async_transaction = is_async; in binder_alloc_new_buf_locked()
509 buffer->extra_buffers_size = extra_buffers_size; in binder_alloc_new_buf_locked()
516 return buffer; in binder_alloc_new_buf_locked()
520 (void *)PAGE_ALIGN((uintptr_t)buffer->data), in binder_alloc_new_buf_locked()
526 * binder_alloc_new_buf() - Allocate a new binder buffer
528 * @data_size: size of user data buffer
529 * @offsets_size: user specified buffer offset
531 * @is_async: buffer for async transaction
533 * Allocate a new buffer given the requested sizes. Returns
534 * the kernel version of the buffer pointer. The size allocated
538 * Return: The allocated buffer or %NULL if error
546 struct binder_buffer *buffer; in binder_alloc_new_buf() local
549 buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size, in binder_alloc_new_buf()
552 return buffer; in binder_alloc_new_buf()
555 static void *buffer_start_page(struct binder_buffer *buffer) in buffer_start_page() argument
557 return (void *)((uintptr_t)buffer->data & PAGE_MASK); in buffer_start_page()
560 static void *prev_buffer_end_page(struct binder_buffer *buffer) in prev_buffer_end_page() argument
562 return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK); in prev_buffer_end_page()
566 struct binder_buffer *buffer) in binder_delete_free_buffer() argument
570 BUG_ON(alloc->buffers.next == &buffer->entry); in binder_delete_free_buffer()
571 prev = binder_buffer_prev(buffer); in binder_delete_free_buffer()
573 if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) { in binder_delete_free_buffer()
576 "%d: merge free, buffer %pK share page with %pK\n", in binder_delete_free_buffer()
577 alloc->pid, buffer->data, prev->data); in binder_delete_free_buffer()
580 if (!list_is_last(&buffer->entry, &alloc->buffers)) { in binder_delete_free_buffer()
581 next = binder_buffer_next(buffer); in binder_delete_free_buffer()
582 if (buffer_start_page(next) == buffer_start_page(buffer)) { in binder_delete_free_buffer()
585 "%d: merge free, buffer %pK share page with %pK\n", in binder_delete_free_buffer()
587 buffer->data, in binder_delete_free_buffer()
592 if (PAGE_ALIGNED(buffer->data)) { in binder_delete_free_buffer()
594 "%d: merge free, buffer start %pK is page aligned\n", in binder_delete_free_buffer()
595 alloc->pid, buffer->data); in binder_delete_free_buffer()
601 "%d: merge free, buffer %pK do not share page with %pK or %pK\n", in binder_delete_free_buffer()
602 alloc->pid, buffer->data, in binder_delete_free_buffer()
604 binder_update_page_range(alloc, 0, buffer_start_page(buffer), in binder_delete_free_buffer()
605 buffer_start_page(buffer) + PAGE_SIZE); in binder_delete_free_buffer()
607 list_del(&buffer->entry); in binder_delete_free_buffer()
608 kfree(buffer); in binder_delete_free_buffer()
612 struct binder_buffer *buffer) in binder_free_buf_locked() argument
616 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_free_buf_locked()
618 size = ALIGN(buffer->data_size, sizeof(void *)) + in binder_free_buf_locked()
619 ALIGN(buffer->offsets_size, sizeof(void *)) + in binder_free_buf_locked()
620 ALIGN(buffer->extra_buffers_size, sizeof(void *)); in binder_free_buf_locked()
624 alloc->pid, buffer, size, buffer_size); in binder_free_buf_locked()
626 BUG_ON(buffer->free); in binder_free_buf_locked()
628 BUG_ON(buffer->transaction != NULL); in binder_free_buf_locked()
629 BUG_ON(buffer->data < alloc->buffer); in binder_free_buf_locked()
630 BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size); in binder_free_buf_locked()
632 if (buffer->async_transaction) { in binder_free_buf_locked()
641 (void *)PAGE_ALIGN((uintptr_t)buffer->data), in binder_free_buf_locked()
642 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK)); in binder_free_buf_locked()
644 rb_erase(&buffer->rb_node, &alloc->allocated_buffers); in binder_free_buf_locked()
645 buffer->free = 1; in binder_free_buf_locked()
646 if (!list_is_last(&buffer->entry, &alloc->buffers)) { in binder_free_buf_locked()
647 struct binder_buffer *next = binder_buffer_next(buffer); in binder_free_buf_locked()
654 if (alloc->buffers.next != &buffer->entry) { in binder_free_buf_locked()
655 struct binder_buffer *prev = binder_buffer_prev(buffer); in binder_free_buf_locked()
658 binder_delete_free_buffer(alloc, buffer); in binder_free_buf_locked()
660 buffer = prev; in binder_free_buf_locked()
663 binder_insert_free_buffer(alloc, buffer); in binder_free_buf_locked()
667 * binder_alloc_free_buf() - free a binder buffer
669 * @buffer: kernel pointer to buffer
671 * Free the buffer allocated via binder_alloc_new_buffer()
674 struct binder_buffer *buffer) in binder_alloc_free_buf() argument
677 binder_free_buf_locked(alloc, buffer); in binder_alloc_free_buf()
700 struct binder_buffer *buffer; in binder_alloc_mmap_handler() local
703 if (alloc->buffer) { in binder_alloc_mmap_handler()
715 alloc->buffer = area->addr; in binder_alloc_mmap_handler()
717 vma->vm_start - (uintptr_t)alloc->buffer; in binder_alloc_mmap_handler()
723 (vma->vm_start ^ (uint32_t)alloc->buffer))) { in binder_alloc_mmap_handler()
726 vma->vm_end, alloc->buffer); in binder_alloc_mmap_handler()
741 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); in binder_alloc_mmap_handler()
742 if (!buffer) { in binder_alloc_mmap_handler()
744 failure_string = "alloc buffer struct"; in binder_alloc_mmap_handler()
748 buffer->data = alloc->buffer; in binder_alloc_mmap_handler()
749 list_add(&buffer->entry, &alloc->buffers); in binder_alloc_mmap_handler()
750 buffer->free = 1; in binder_alloc_mmap_handler()
751 binder_insert_free_buffer(alloc, buffer); in binder_alloc_mmap_handler()
763 vfree(alloc->buffer); in binder_alloc_mmap_handler()
764 alloc->buffer = NULL; in binder_alloc_mmap_handler()
780 struct binder_buffer *buffer; in binder_alloc_deferred_release() local
787 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_alloc_deferred_release()
790 BUG_ON(buffer->transaction); in binder_alloc_deferred_release()
792 binder_free_buf_locked(alloc, buffer); in binder_alloc_deferred_release()
797 buffer = list_first_entry(&alloc->buffers, in binder_alloc_deferred_release()
799 WARN_ON(!buffer->free); in binder_alloc_deferred_release()
801 list_del(&buffer->entry); in binder_alloc_deferred_release()
803 kfree(buffer); in binder_alloc_deferred_release()
819 page_addr = alloc->buffer + i * PAGE_SIZE; in binder_alloc_deferred_release()
829 vfree(alloc->buffer); in binder_alloc_deferred_release()
841 struct binder_buffer *buffer) in print_binder_buffer() argument
844 prefix, buffer->debug_id, buffer->data, in print_binder_buffer()
845 buffer->data_size, buffer->offsets_size, in print_binder_buffer()
846 buffer->extra_buffers_size, in print_binder_buffer()
847 buffer->transaction ? "active" : "delivered"); in print_binder_buffer()
851 * binder_alloc_print_allocated() - print buffer info
855 * Prints information about every buffer associated with
865 print_binder_buffer(m, " buffer", in binder_alloc_print_allocated()
968 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; in binder_alloc_free_page()