Lines Matching refs:buffer
54 static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer) in binder_buffer_next() argument
56 return list_entry(buffer->entry.next, struct binder_buffer, entry); in binder_buffer_next()
59 static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer) in binder_buffer_prev() argument
61 return list_entry(buffer->entry.prev, struct binder_buffer, entry); in binder_buffer_prev()
65 struct binder_buffer *buffer) in binder_alloc_buffer_size() argument
67 if (list_is_last(&buffer->entry, &alloc->buffers)) in binder_alloc_buffer_size()
68 return alloc->buffer + alloc->buffer_size - buffer->user_data; in binder_alloc_buffer_size()
69 return binder_buffer_next(buffer)->user_data - buffer->user_data; in binder_alloc_buffer_size()
77 struct binder_buffer *buffer; in binder_insert_free_buffer() local
91 buffer = rb_entry(parent, struct binder_buffer, rb_node); in binder_insert_free_buffer()
92 BUG_ON(!buffer->free); in binder_insert_free_buffer()
94 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_insert_free_buffer()
110 struct binder_buffer *buffer; in binder_insert_allocated_buffer_locked() local
116 buffer = rb_entry(parent, struct binder_buffer, rb_node); in binder_insert_allocated_buffer_locked()
117 BUG_ON(buffer->free); in binder_insert_allocated_buffer_locked()
119 if (new_buffer->user_data < buffer->user_data) in binder_insert_allocated_buffer_locked()
121 else if (new_buffer->user_data > buffer->user_data) in binder_insert_allocated_buffer_locked()
135 struct binder_buffer *buffer; in binder_alloc_prepare_to_free_locked() local
138 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_alloc_prepare_to_free_locked()
139 BUG_ON(buffer->free); in binder_alloc_prepare_to_free_locked()
141 if (user_ptr < buffer->user_data) { in binder_alloc_prepare_to_free_locked()
143 } else if (user_ptr > buffer->user_data) { in binder_alloc_prepare_to_free_locked()
151 if (!buffer->allow_user_free) in binder_alloc_prepare_to_free_locked()
153 buffer->allow_user_free = 0; in binder_alloc_prepare_to_free_locked()
154 return buffer; in binder_alloc_prepare_to_free_locked()
174 struct binder_buffer *buffer; in binder_alloc_prepare_to_free() local
177 buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr); in binder_alloc_prepare_to_free()
179 return buffer; in binder_alloc_prepare_to_free()
210 index = (page_addr - alloc->buffer) / PAGE_SIZE; in binder_lru_freelist_add()
347 alloc->pid, addr - alloc->buffer); in binder_install_single_page()
359 alloc->pid, __func__, addr - alloc->buffer, ret); in binder_install_single_page()
368 struct binder_buffer *buffer, in binder_install_buffer_pages() argument
374 start = buffer->user_data & PAGE_MASK; in binder_install_buffer_pages()
375 final = PAGE_ALIGN(buffer->user_data + size); in binder_install_buffer_pages()
381 index = (page_addr - alloc->buffer) / PAGE_SIZE; in binder_install_buffer_pages()
410 index = (page_addr - alloc->buffer) / PAGE_SIZE; in binder_lru_freelist_del()
434 struct binder_buffer *buffer; in debug_no_space_locked() local
444 buffer = rb_entry(n, struct binder_buffer, rb_node); in debug_no_space_locked()
445 buffer_size = binder_alloc_buffer_size(alloc, buffer); in debug_no_space_locked()
453 buffer = rb_entry(n, struct binder_buffer, rb_node); in debug_no_space_locked()
454 buffer_size = binder_alloc_buffer_size(alloc, buffer); in debug_no_space_locked()
477 struct binder_buffer *buffer; in debug_low_async_space_locked() local
494 buffer = rb_entry(n, struct binder_buffer, rb_node); in debug_low_async_space_locked()
495 if (buffer->pid != pid) in debug_low_async_space_locked()
497 if (!buffer->async_transaction) in debug_low_async_space_locked()
499 total_alloc_size += binder_alloc_buffer_size(alloc, buffer); in debug_low_async_space_locked()
529 struct binder_buffer *buffer; in binder_alloc_new_buf_locked() local
541 buffer = ERR_PTR(-EPERM); in binder_alloc_new_buf_locked()
549 buffer = ERR_PTR(-ENOSPC); in binder_alloc_new_buf_locked()
554 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_alloc_new_buf_locked()
555 BUG_ON(!buffer->free); in binder_alloc_new_buf_locked()
556 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
574 buffer = ERR_PTR(-ENOSPC); in binder_alloc_new_buf_locked()
580 buffer = rb_entry(best_fit, struct binder_buffer, rb_node); in binder_alloc_new_buf_locked()
581 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
584 new_buffer->user_data = buffer->user_data + size; in binder_alloc_new_buf_locked()
585 list_add(&new_buffer->entry, &buffer->entry); in binder_alloc_new_buf_locked()
593 alloc->pid, size, buffer, buffer_size); in binder_alloc_new_buf_locked()
601 next_used_page = (buffer->user_data + buffer_size) & PAGE_MASK; in binder_alloc_new_buf_locked()
602 curr_last_page = PAGE_ALIGN(buffer->user_data + size); in binder_alloc_new_buf_locked()
603 binder_lru_freelist_del(alloc, PAGE_ALIGN(buffer->user_data), in binder_alloc_new_buf_locked()
606 rb_erase(&buffer->rb_node, &alloc->free_buffers); in binder_alloc_new_buf_locked()
607 buffer->free = 0; in binder_alloc_new_buf_locked()
608 buffer->allow_user_free = 0; in binder_alloc_new_buf_locked()
609 binder_insert_allocated_buffer_locked(alloc, buffer); in binder_alloc_new_buf_locked()
610 buffer->async_transaction = is_async; in binder_alloc_new_buf_locked()
611 buffer->oneway_spam_suspect = false; in binder_alloc_new_buf_locked()
618 buffer->oneway_spam_suspect = true; in binder_alloc_new_buf_locked()
624 return buffer; in binder_alloc_new_buf_locked()
670 struct binder_buffer *buffer, *next; in binder_alloc_new_buf() local
697 buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async); in binder_alloc_new_buf()
698 if (IS_ERR(buffer)) { in binder_alloc_new_buf()
703 buffer->data_size = data_size; in binder_alloc_new_buf()
704 buffer->offsets_size = offsets_size; in binder_alloc_new_buf()
705 buffer->extra_buffers_size = extra_buffers_size; in binder_alloc_new_buf()
706 buffer->pid = current->tgid; in binder_alloc_new_buf()
709 ret = binder_install_buffer_pages(alloc, buffer, size); in binder_alloc_new_buf()
711 binder_alloc_free_buf(alloc, buffer); in binder_alloc_new_buf()
712 buffer = ERR_PTR(ret); in binder_alloc_new_buf()
715 return buffer; in binder_alloc_new_buf()
718 static unsigned long buffer_start_page(struct binder_buffer *buffer) in buffer_start_page() argument
720 return buffer->user_data & PAGE_MASK; in buffer_start_page()
723 static unsigned long prev_buffer_end_page(struct binder_buffer *buffer) in prev_buffer_end_page() argument
725 return (buffer->user_data - 1) & PAGE_MASK; in prev_buffer_end_page()
729 struct binder_buffer *buffer) in binder_delete_free_buffer() argument
733 if (PAGE_ALIGNED(buffer->user_data)) in binder_delete_free_buffer()
736 BUG_ON(alloc->buffers.next == &buffer->entry); in binder_delete_free_buffer()
737 prev = binder_buffer_prev(buffer); in binder_delete_free_buffer()
739 if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) in binder_delete_free_buffer()
742 if (!list_is_last(&buffer->entry, &alloc->buffers)) { in binder_delete_free_buffer()
743 next = binder_buffer_next(buffer); in binder_delete_free_buffer()
744 if (buffer_start_page(next) == buffer_start_page(buffer)) in binder_delete_free_buffer()
748 binder_lru_freelist_add(alloc, buffer_start_page(buffer), in binder_delete_free_buffer()
749 buffer_start_page(buffer) + PAGE_SIZE); in binder_delete_free_buffer()
751 list_del(&buffer->entry); in binder_delete_free_buffer()
752 kfree(buffer); in binder_delete_free_buffer()
756 struct binder_buffer *buffer) in binder_free_buf_locked() argument
760 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_free_buf_locked()
762 size = ALIGN(buffer->data_size, sizeof(void *)) + in binder_free_buf_locked()
763 ALIGN(buffer->offsets_size, sizeof(void *)) + in binder_free_buf_locked()
764 ALIGN(buffer->extra_buffers_size, sizeof(void *)); in binder_free_buf_locked()
768 alloc->pid, buffer, size, buffer_size); in binder_free_buf_locked()
770 BUG_ON(buffer->free); in binder_free_buf_locked()
772 BUG_ON(buffer->transaction != NULL); in binder_free_buf_locked()
773 BUG_ON(buffer->user_data < alloc->buffer); in binder_free_buf_locked()
774 BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size); in binder_free_buf_locked()
776 if (buffer->async_transaction) { in binder_free_buf_locked()
783 binder_lru_freelist_add(alloc, PAGE_ALIGN(buffer->user_data), in binder_free_buf_locked()
784 (buffer->user_data + buffer_size) & PAGE_MASK); in binder_free_buf_locked()
786 rb_erase(&buffer->rb_node, &alloc->allocated_buffers); in binder_free_buf_locked()
787 buffer->free = 1; in binder_free_buf_locked()
788 if (!list_is_last(&buffer->entry, &alloc->buffers)) { in binder_free_buf_locked()
789 struct binder_buffer *next = binder_buffer_next(buffer); in binder_free_buf_locked()
796 if (alloc->buffers.next != &buffer->entry) { in binder_free_buf_locked()
797 struct binder_buffer *prev = binder_buffer_prev(buffer); in binder_free_buf_locked()
800 binder_delete_free_buffer(alloc, buffer); in binder_free_buf_locked()
802 buffer = prev; in binder_free_buf_locked()
805 binder_insert_free_buffer(alloc, buffer); in binder_free_buf_locked()
828 struct binder_buffer *buffer, in binder_alloc_get_page() argument
833 (buffer->user_data - alloc->buffer); in binder_alloc_get_page()
850 struct binder_buffer *buffer) in binder_alloc_clear_buf() argument
852 size_t bytes = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_clear_buf()
860 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_clear_buf()
877 struct binder_buffer *buffer) in binder_alloc_free_buf() argument
887 if (buffer->clear_on_free) { in binder_alloc_free_buf()
888 binder_alloc_clear_buf(alloc, buffer); in binder_alloc_free_buf()
889 buffer->clear_on_free = false; in binder_alloc_free_buf()
892 binder_free_buf_locked(alloc, buffer); in binder_alloc_free_buf()
912 struct binder_buffer *buffer; in binder_alloc_mmap_handler() local
932 alloc->buffer = vma->vm_start; in binder_alloc_mmap_handler()
943 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); in binder_alloc_mmap_handler()
944 if (!buffer) { in binder_alloc_mmap_handler()
950 buffer->user_data = alloc->buffer; in binder_alloc_mmap_handler()
951 list_add(&buffer->entry, &alloc->buffers); in binder_alloc_mmap_handler()
952 buffer->free = 1; in binder_alloc_mmap_handler()
953 binder_insert_free_buffer(alloc, buffer); in binder_alloc_mmap_handler()
965 alloc->buffer = 0; in binder_alloc_mmap_handler()
983 struct binder_buffer *buffer; in binder_alloc_deferred_release() local
990 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_alloc_deferred_release()
993 BUG_ON(buffer->transaction); in binder_alloc_deferred_release()
995 if (buffer->clear_on_free) { in binder_alloc_deferred_release()
996 binder_alloc_clear_buf(alloc, buffer); in binder_alloc_deferred_release()
997 buffer->clear_on_free = false; in binder_alloc_deferred_release()
999 binder_free_buf_locked(alloc, buffer); in binder_alloc_deferred_release()
1004 buffer = list_first_entry(&alloc->buffers, in binder_alloc_deferred_release()
1006 WARN_ON(!buffer->free); in binder_alloc_deferred_release()
1008 list_del(&buffer->entry); in binder_alloc_deferred_release()
1010 kfree(buffer); in binder_alloc_deferred_release()
1058 struct binder_buffer *buffer; in binder_alloc_print_allocated() local
1063 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_alloc_print_allocated()
1065 buffer->debug_id, in binder_alloc_print_allocated()
1066 buffer->user_data - alloc->buffer, in binder_alloc_print_allocated()
1067 buffer->data_size, buffer->offsets_size, in binder_alloc_print_allocated()
1068 buffer->extra_buffers_size, in binder_alloc_print_allocated()
1069 buffer->transaction ? "active" : "delivered"); in binder_alloc_print_allocated()
1169 page_addr = alloc->buffer + index * PAGE_SIZE; in binder_alloc_free_page()
1313 struct binder_buffer *buffer, in check_buffer() argument
1316 size_t buffer_size = binder_alloc_buffer_size(alloc, buffer); in check_buffer()
1321 !buffer->free && in check_buffer()
1322 (!buffer->allow_user_free || !buffer->transaction); in check_buffer()
1339 struct binder_buffer *buffer, in binder_alloc_copy_user_to_buffer() argument
1344 if (!check_buffer(alloc, buffer, buffer_offset, bytes)) in binder_alloc_copy_user_to_buffer()
1354 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_copy_user_to_buffer()
1371 struct binder_buffer *buffer, in binder_alloc_do_buffer_copy() argument
1377 if (!check_buffer(alloc, buffer, buffer_offset, bytes)) in binder_alloc_do_buffer_copy()
1385 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_do_buffer_copy()
1401 struct binder_buffer *buffer, in binder_alloc_copy_to_buffer() argument
1406 return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset, in binder_alloc_copy_to_buffer()
1412 struct binder_buffer *buffer, in binder_alloc_copy_from_buffer() argument
1416 return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset, in binder_alloc_copy_from_buffer()