• Home
  • Raw
  • Download

Lines Matching refs:alloc

66 static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,  in binder_alloc_buffer_size()  argument
69 if (list_is_last(&buffer->entry, &alloc->buffers)) in binder_alloc_buffer_size()
70 return alloc->buffer + alloc->buffer_size - buffer->user_data; in binder_alloc_buffer_size()
74 static void binder_insert_free_buffer(struct binder_alloc *alloc, in binder_insert_free_buffer() argument
77 struct rb_node **p = &alloc->free_buffers.rb_node; in binder_insert_free_buffer()
85 new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer); in binder_insert_free_buffer()
89 alloc->pid, new_buffer_size, new_buffer); in binder_insert_free_buffer()
96 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_insert_free_buffer()
104 rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers); in binder_insert_free_buffer()
108 struct binder_alloc *alloc, struct binder_buffer *new_buffer) in binder_insert_allocated_buffer_locked() argument
110 struct rb_node **p = &alloc->allocated_buffers.rb_node; in binder_insert_allocated_buffer_locked()
129 rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers); in binder_insert_allocated_buffer_locked()
133 struct binder_alloc *alloc, in binder_alloc_prepare_to_free_locked() argument
136 struct rb_node *n = alloc->allocated_buffers.rb_node; in binder_alloc_prepare_to_free_locked()
176 struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, in binder_alloc_prepare_to_free() argument
181 mutex_lock(&alloc->mutex); in binder_alloc_prepare_to_free()
182 buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr); in binder_alloc_prepare_to_free()
183 mutex_unlock(&alloc->mutex); in binder_alloc_prepare_to_free()
187 static int binder_update_page_range(struct binder_alloc *alloc, int allocate, in binder_update_page_range() argument
198 "%d: %s pages %pK-%pK\n", alloc->pid, in binder_update_page_range()
204 trace_binder_update_page_range(alloc, allocate, start, end); in binder_update_page_range()
210 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; in binder_update_page_range()
217 if (need_mm && mmget_not_zero(alloc->vma_vm_mm)) in binder_update_page_range()
218 mm = alloc->vma_vm_mm; in binder_update_page_range()
222 vma = alloc->vma; in binder_update_page_range()
227 alloc->pid); in binder_update_page_range()
236 index = (page_addr - alloc->buffer) / PAGE_SIZE; in binder_update_page_range()
237 page = &alloc->pages[index]; in binder_update_page_range()
240 trace_binder_alloc_lru_start(alloc, index); in binder_update_page_range()
245 trace_binder_alloc_lru_end(alloc, index); in binder_update_page_range()
252 trace_binder_alloc_page_start(alloc, index); in binder_update_page_range()
258 alloc->pid, page_addr); in binder_update_page_range()
261 page->alloc = alloc; in binder_update_page_range()
268 alloc->pid, user_page_addr); in binder_update_page_range()
272 if (index + 1 > alloc->pages_high) in binder_update_page_range()
273 alloc->pages_high = index + 1; in binder_update_page_range()
275 trace_binder_alloc_page_end(alloc, index); in binder_update_page_range()
289 index = (page_addr - alloc->buffer) / PAGE_SIZE; in binder_update_page_range()
290 page = &alloc->pages[index]; in binder_update_page_range()
292 trace_binder_free_lru_start(alloc, index); in binder_update_page_range()
297 trace_binder_free_lru_end(alloc, index); in binder_update_page_range()
318 static inline void binder_alloc_set_vma(struct binder_alloc *alloc, in binder_alloc_set_vma() argument
322 alloc->vma_vm_mm = vma->vm_mm; in binder_alloc_set_vma()
330 alloc->vma = vma; in binder_alloc_set_vma()
334 struct binder_alloc *alloc) in binder_alloc_get_vma() argument
338 if (alloc->vma) { in binder_alloc_get_vma()
341 vma = alloc->vma; in binder_alloc_get_vma()
347 struct binder_alloc *alloc, in binder_alloc_new_buf_locked() argument
353 struct rb_node *n = alloc->free_buffers.rb_node; in binder_alloc_new_buf_locked()
362 if (!binder_alloc_get_vma(alloc)) { in binder_alloc_new_buf_locked()
364 alloc->pid); in binder_alloc_new_buf_locked()
374 alloc->pid, data_size, offsets_size); in binder_alloc_new_buf_locked()
381 alloc->pid, extra_buffers_size); in binder_alloc_new_buf_locked()
385 alloc->free_async_space < size + sizeof(struct binder_buffer)) { in binder_alloc_new_buf_locked()
388 alloc->pid, size); in binder_alloc_new_buf_locked()
398 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
418 for (n = rb_first(&alloc->allocated_buffers); n != NULL; in binder_alloc_new_buf_locked()
421 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
427 for (n = rb_first(&alloc->free_buffers); n != NULL; in binder_alloc_new_buf_locked()
430 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
437 alloc->pid, size); in binder_alloc_new_buf_locked()
445 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
450 alloc->pid, size, buffer, buffer_size); in binder_alloc_new_buf_locked()
459 ret = binder_update_page_range(alloc, 1, (void __user *) in binder_alloc_new_buf_locked()
470 __func__, alloc->pid); in binder_alloc_new_buf_locked()
476 binder_insert_free_buffer(alloc, new_buffer); in binder_alloc_new_buf_locked()
479 rb_erase(best_fit, &alloc->free_buffers); in binder_alloc_new_buf_locked()
482 binder_insert_allocated_buffer_locked(alloc, buffer); in binder_alloc_new_buf_locked()
485 alloc->pid, size, buffer); in binder_alloc_new_buf_locked()
491 alloc->free_async_space -= size + sizeof(struct binder_buffer); in binder_alloc_new_buf_locked()
494 alloc->pid, size, alloc->free_async_space); in binder_alloc_new_buf_locked()
499 binder_update_page_range(alloc, 0, (void __user *) in binder_alloc_new_buf_locked()
520 struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, in binder_alloc_new_buf() argument
528 mutex_lock(&alloc->mutex); in binder_alloc_new_buf()
529 buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size, in binder_alloc_new_buf()
531 mutex_unlock(&alloc->mutex); in binder_alloc_new_buf()
546 static void binder_delete_free_buffer(struct binder_alloc *alloc, in binder_delete_free_buffer() argument
551 BUG_ON(alloc->buffers.next == &buffer->entry); in binder_delete_free_buffer()
558 alloc->pid, buffer->user_data, in binder_delete_free_buffer()
562 if (!list_is_last(&buffer->entry, &alloc->buffers)) { in binder_delete_free_buffer()
568 alloc->pid, in binder_delete_free_buffer()
577 alloc->pid, buffer->user_data); in binder_delete_free_buffer()
584 alloc->pid, buffer->user_data, in binder_delete_free_buffer()
587 binder_update_page_range(alloc, 0, buffer_start_page(buffer), in binder_delete_free_buffer()
594 static void binder_free_buf_locked(struct binder_alloc *alloc, in binder_free_buf_locked() argument
599 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_free_buf_locked()
607 alloc->pid, buffer, size, buffer_size); in binder_free_buf_locked()
612 BUG_ON(buffer->user_data < alloc->buffer); in binder_free_buf_locked()
613 BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size); in binder_free_buf_locked()
616 alloc->free_async_space += size + sizeof(struct binder_buffer); in binder_free_buf_locked()
620 alloc->pid, size, alloc->free_async_space); in binder_free_buf_locked()
623 binder_update_page_range(alloc, 0, in binder_free_buf_locked()
628 rb_erase(&buffer->rb_node, &alloc->allocated_buffers); in binder_free_buf_locked()
630 if (!list_is_last(&buffer->entry, &alloc->buffers)) { in binder_free_buf_locked()
634 rb_erase(&next->rb_node, &alloc->free_buffers); in binder_free_buf_locked()
635 binder_delete_free_buffer(alloc, next); in binder_free_buf_locked()
638 if (alloc->buffers.next != &buffer->entry) { in binder_free_buf_locked()
642 binder_delete_free_buffer(alloc, buffer); in binder_free_buf_locked()
643 rb_erase(&prev->rb_node, &alloc->free_buffers); in binder_free_buf_locked()
647 binder_insert_free_buffer(alloc, buffer); in binder_free_buf_locked()
657 void binder_alloc_free_buf(struct binder_alloc *alloc, in binder_alloc_free_buf() argument
660 mutex_lock(&alloc->mutex); in binder_alloc_free_buf()
661 binder_free_buf_locked(alloc, buffer); in binder_alloc_free_buf()
662 mutex_unlock(&alloc->mutex); in binder_alloc_free_buf()
678 int binder_alloc_mmap_handler(struct binder_alloc *alloc, in binder_alloc_mmap_handler() argument
686 if (alloc->buffer) { in binder_alloc_mmap_handler()
692 alloc->buffer = (void __user *)vma->vm_start; in binder_alloc_mmap_handler()
695 alloc->pages = kzalloc(sizeof(alloc->pages[0]) * in binder_alloc_mmap_handler()
698 if (alloc->pages == NULL) { in binder_alloc_mmap_handler()
703 alloc->buffer_size = vma->vm_end - vma->vm_start; in binder_alloc_mmap_handler()
712 buffer->user_data = alloc->buffer; in binder_alloc_mmap_handler()
713 list_add(&buffer->entry, &alloc->buffers); in binder_alloc_mmap_handler()
715 binder_insert_free_buffer(alloc, buffer); in binder_alloc_mmap_handler()
716 alloc->free_async_space = alloc->buffer_size / 2; in binder_alloc_mmap_handler()
717 binder_alloc_set_vma(alloc, vma); in binder_alloc_mmap_handler()
718 mmgrab(alloc->vma_vm_mm); in binder_alloc_mmap_handler()
723 kfree(alloc->pages); in binder_alloc_mmap_handler()
724 alloc->pages = NULL; in binder_alloc_mmap_handler()
727 alloc->buffer = NULL; in binder_alloc_mmap_handler()
731 alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret); in binder_alloc_mmap_handler()
736 void binder_alloc_deferred_release(struct binder_alloc *alloc) in binder_alloc_deferred_release() argument
743 mutex_lock(&alloc->mutex); in binder_alloc_deferred_release()
744 BUG_ON(alloc->vma); in binder_alloc_deferred_release()
746 while ((n = rb_first(&alloc->allocated_buffers))) { in binder_alloc_deferred_release()
752 binder_free_buf_locked(alloc, buffer); in binder_alloc_deferred_release()
756 while (!list_empty(&alloc->buffers)) { in binder_alloc_deferred_release()
757 buffer = list_first_entry(&alloc->buffers, in binder_alloc_deferred_release()
762 WARN_ON_ONCE(!list_empty(&alloc->buffers)); in binder_alloc_deferred_release()
767 if (alloc->pages) { in binder_alloc_deferred_release()
770 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { in binder_alloc_deferred_release()
774 if (!alloc->pages[i].page_ptr) in binder_alloc_deferred_release()
778 &alloc->pages[i].lru); in binder_alloc_deferred_release()
779 page_addr = alloc->buffer + i * PAGE_SIZE; in binder_alloc_deferred_release()
782 __func__, alloc->pid, i, page_addr, in binder_alloc_deferred_release()
784 __free_page(alloc->pages[i].page_ptr); in binder_alloc_deferred_release()
787 kfree(alloc->pages); in binder_alloc_deferred_release()
789 mutex_unlock(&alloc->mutex); in binder_alloc_deferred_release()
790 if (alloc->vma_vm_mm) in binder_alloc_deferred_release()
791 mmdrop(alloc->vma_vm_mm); in binder_alloc_deferred_release()
795 __func__, alloc->pid, buffers, page_count); in binder_alloc_deferred_release()
817 struct binder_alloc *alloc) in binder_alloc_print_allocated() argument
821 mutex_lock(&alloc->mutex); in binder_alloc_print_allocated()
822 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) in binder_alloc_print_allocated()
825 mutex_unlock(&alloc->mutex); in binder_alloc_print_allocated()
834 struct binder_alloc *alloc) in binder_alloc_print_pages() argument
842 mutex_lock(&alloc->mutex); in binder_alloc_print_pages()
843 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { in binder_alloc_print_pages()
844 page = &alloc->pages[i]; in binder_alloc_print_pages()
852 mutex_unlock(&alloc->mutex); in binder_alloc_print_pages()
854 seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high); in binder_alloc_print_pages()
863 int binder_alloc_get_allocated_count(struct binder_alloc *alloc) in binder_alloc_get_allocated_count() argument
868 mutex_lock(&alloc->mutex); in binder_alloc_get_allocated_count()
869 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) in binder_alloc_get_allocated_count()
871 mutex_unlock(&alloc->mutex); in binder_alloc_get_allocated_count()
884 void binder_alloc_vma_close(struct binder_alloc *alloc) in binder_alloc_vma_close() argument
886 binder_alloc_set_vma(alloc, NULL); in binder_alloc_vma_close()
907 struct binder_alloc *alloc; in binder_alloc_free_page() local
912 alloc = page->alloc; in binder_alloc_free_page()
913 if (!mutex_trylock(&alloc->mutex)) in binder_alloc_free_page()
919 index = page - alloc->pages; in binder_alloc_free_page()
920 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; in binder_alloc_free_page()
922 mm = alloc->vma_vm_mm; in binder_alloc_free_page()
927 vma = binder_alloc_get_vma(alloc); in binder_alloc_free_page()
933 trace_binder_unmap_user_start(alloc, index); in binder_alloc_free_page()
937 trace_binder_unmap_user_end(alloc, index); in binder_alloc_free_page()
942 trace_binder_unmap_kernel_start(alloc, index); in binder_alloc_free_page()
947 trace_binder_unmap_kernel_end(alloc, index); in binder_alloc_free_page()
950 mutex_unlock(&alloc->mutex); in binder_alloc_free_page()
957 mutex_unlock(&alloc->mutex); in binder_alloc_free_page()
992 void binder_alloc_init(struct binder_alloc *alloc) in binder_alloc_init() argument
994 alloc->pid = current->group_leader->pid; in binder_alloc_init()
995 mutex_init(&alloc->mutex); in binder_alloc_init()
996 INIT_LIST_HEAD(&alloc->buffers); in binder_alloc_init()
1030 static inline bool check_buffer(struct binder_alloc *alloc, in check_buffer() argument
1034 size_t buffer_size = binder_alloc_buffer_size(alloc, buffer); in check_buffer()
1062 static struct page *binder_alloc_get_page(struct binder_alloc *alloc, in binder_alloc_get_page() argument
1068 (buffer->user_data - alloc->buffer); in binder_alloc_get_page()
1073 lru_page = &alloc->pages[index]; in binder_alloc_get_page()
1091 binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc, in binder_alloc_copy_user_to_buffer() argument
1097 if (!check_buffer(alloc, buffer, buffer_offset, bytes)) in binder_alloc_copy_user_to_buffer()
1107 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_copy_user_to_buffer()
1122 static void binder_alloc_do_buffer_copy(struct binder_alloc *alloc, in binder_alloc_do_buffer_copy() argument
1130 BUG_ON(!check_buffer(alloc, buffer, buffer_offset, bytes)); in binder_alloc_do_buffer_copy()
1139 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_do_buffer_copy()
1160 void binder_alloc_copy_to_buffer(struct binder_alloc *alloc, in binder_alloc_copy_to_buffer() argument
1166 binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset, in binder_alloc_copy_to_buffer()
1170 void binder_alloc_copy_from_buffer(struct binder_alloc *alloc, in binder_alloc_copy_from_buffer() argument
1176 binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset, in binder_alloc_copy_from_buffer()