Lines Matching +full:async +full:- +full:prefix
1 // SPDX-License-Identifier: GPL-2.0-only
6 * Copyright (C) 2007-2017 Google, Inc.
52 return list_entry(buffer->entry.next, struct binder_buffer, entry); in binder_buffer_next()
57 return list_entry(buffer->entry.prev, struct binder_buffer, entry); in binder_buffer_prev()
63 if (list_is_last(&buffer->entry, &alloc->buffers)) in binder_alloc_buffer_size()
64 return alloc->buffer + alloc->buffer_size - buffer->user_data; in binder_alloc_buffer_size()
65 return binder_buffer_next(buffer)->user_data - buffer->user_data; in binder_alloc_buffer_size()
71 struct rb_node **p = &alloc->free_buffers.rb_node; in binder_insert_free_buffer()
77 BUG_ON(!new_buffer->free); in binder_insert_free_buffer()
83 alloc->pid, new_buffer_size, new_buffer); in binder_insert_free_buffer()
88 BUG_ON(!buffer->free); in binder_insert_free_buffer()
93 p = &parent->rb_left; in binder_insert_free_buffer()
95 p = &parent->rb_right; in binder_insert_free_buffer()
97 rb_link_node(&new_buffer->rb_node, parent, p); in binder_insert_free_buffer()
98 rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers); in binder_insert_free_buffer()
104 struct rb_node **p = &alloc->allocated_buffers.rb_node; in binder_insert_allocated_buffer_locked()
108 BUG_ON(new_buffer->free); in binder_insert_allocated_buffer_locked()
113 BUG_ON(buffer->free); in binder_insert_allocated_buffer_locked()
115 if (new_buffer->user_data < buffer->user_data) in binder_insert_allocated_buffer_locked()
116 p = &parent->rb_left; in binder_insert_allocated_buffer_locked()
117 else if (new_buffer->user_data > buffer->user_data) in binder_insert_allocated_buffer_locked()
118 p = &parent->rb_right; in binder_insert_allocated_buffer_locked()
122 rb_link_node(&new_buffer->rb_node, parent, p); in binder_insert_allocated_buffer_locked()
123 rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers); in binder_insert_allocated_buffer_locked()
130 struct rb_node *n = alloc->allocated_buffers.rb_node; in binder_alloc_prepare_to_free_locked()
138 BUG_ON(buffer->free); in binder_alloc_prepare_to_free_locked()
140 if (uptr < buffer->user_data) in binder_alloc_prepare_to_free_locked()
141 n = n->rb_left; in binder_alloc_prepare_to_free_locked()
142 else if (uptr > buffer->user_data) in binder_alloc_prepare_to_free_locked()
143 n = n->rb_right; in binder_alloc_prepare_to_free_locked()
150 if (!buffer->allow_user_free) in binder_alloc_prepare_to_free_locked()
151 return ERR_PTR(-EPERM); in binder_alloc_prepare_to_free_locked()
152 buffer->allow_user_free = 0; in binder_alloc_prepare_to_free_locked()
160 * binder_alloc_prepare_to_free() - get buffer given user ptr
175 mutex_lock(&alloc->mutex); in binder_alloc_prepare_to_free()
177 mutex_unlock(&alloc->mutex); in binder_alloc_prepare_to_free()
192 "%d: %s pages %pK-%pK\n", alloc->pid, in binder_update_page_range()
204 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; in binder_update_page_range()
205 if (!page->page_ptr) { in binder_update_page_range()
211 if (need_mm && mmget_not_zero(alloc->mm)) in binder_update_page_range()
212 mm = alloc->mm; in binder_update_page_range()
216 vma = alloc->vma; in binder_update_page_range()
222 alloc->pid); in binder_update_page_range()
231 index = (page_addr - alloc->buffer) / PAGE_SIZE; in binder_update_page_range()
232 page = &alloc->pages[index]; in binder_update_page_range()
234 if (page->page_ptr) { in binder_update_page_range()
237 on_lru = list_lru_del(&binder_alloc_lru, &page->lru); in binder_update_page_range()
248 page->page_ptr = alloc_page(GFP_KERNEL | in binder_update_page_range()
251 if (!page->page_ptr) { in binder_update_page_range()
253 alloc->pid, page_addr); in binder_update_page_range()
256 page->alloc = alloc; in binder_update_page_range()
257 INIT_LIST_HEAD(&page->lru); in binder_update_page_range()
263 alloc->pid, user_page_addr); in binder_update_page_range()
267 if (index + 1 > alloc->pages_high) in binder_update_page_range()
268 alloc->pages_high = index + 1; in binder_update_page_range()
279 for (page_addr = end - PAGE_SIZE; 1; page_addr -= PAGE_SIZE) { in binder_update_page_range()
283 index = (page_addr - alloc->buffer) / PAGE_SIZE; in binder_update_page_range()
284 page = &alloc->pages[index]; in binder_update_page_range()
288 ret = list_lru_add(&binder_alloc_lru, &page->lru); in binder_update_page_range()
297 __free_page(page->page_ptr); in binder_update_page_range()
298 page->page_ptr = NULL; in binder_update_page_range()
309 return vma ? -ENOMEM : -ESRCH; in binder_update_page_range()
316 smp_store_release(&alloc->vma, vma); in binder_alloc_set_vma()
323 return smp_load_acquire(&alloc->vma); in binder_alloc_get_vma()
331 * for the low async space is likely to try to send another async txn, in debug_low_async_space_locked()
340 for (n = rb_first(&alloc->allocated_buffers); n != NULL; in debug_low_async_space_locked()
343 if (buffer->pid != pid) in debug_low_async_space_locked()
345 if (!buffer->async_transaction) in debug_low_async_space_locked()
353 * async space (which is 25% of total buffer size). Oneway spam is only in debug_low_async_space_locked()
356 if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) { in debug_low_async_space_locked()
359 alloc->pid, pid, num_buffers, total_alloc_size); in debug_low_async_space_locked()
360 if (!alloc->oneway_spam_detected) { in debug_low_async_space_locked()
361 alloc->oneway_spam_detected = true; in debug_low_async_space_locked()
376 struct rb_node *n = alloc->free_buffers.rb_node; in binder_alloc_new_buf_locked()
389 alloc->pid); in binder_alloc_new_buf_locked()
390 return ERR_PTR(-ESRCH); in binder_alloc_new_buf_locked()
398 "%d: got transaction with invalid size %zd-%zd\n", in binder_alloc_new_buf_locked()
399 alloc->pid, data_size, offsets_size); in binder_alloc_new_buf_locked()
400 return ERR_PTR(-EINVAL); in binder_alloc_new_buf_locked()
406 alloc->pid, extra_buffers_size); in binder_alloc_new_buf_locked()
407 return ERR_PTR(-EINVAL); in binder_alloc_new_buf_locked()
410 /* Pad 0-size buffers so they get assigned unique addresses */ in binder_alloc_new_buf_locked()
413 if (is_async && alloc->free_async_space < size) { in binder_alloc_new_buf_locked()
415 "%d: binder_alloc_buf size %zd failed, no async space left\n", in binder_alloc_new_buf_locked()
416 alloc->pid, size); in binder_alloc_new_buf_locked()
417 return ERR_PTR(-ENOSPC); in binder_alloc_new_buf_locked()
422 BUG_ON(!buffer->free); in binder_alloc_new_buf_locked()
427 n = n->rb_left; in binder_alloc_new_buf_locked()
429 n = n->rb_right; in binder_alloc_new_buf_locked()
443 for (n = rb_first(&alloc->allocated_buffers); n != NULL; in binder_alloc_new_buf_locked()
452 for (n = rb_first(&alloc->free_buffers); n != NULL; in binder_alloc_new_buf_locked()
463 alloc->pid, size); in binder_alloc_new_buf_locked()
469 return ERR_PTR(-ENOSPC); in binder_alloc_new_buf_locked()
478 alloc->pid, size, buffer, buffer_size); in binder_alloc_new_buf_locked()
481 (((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK); in binder_alloc_new_buf_locked()
484 (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size); in binder_alloc_new_buf_locked()
488 PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr); in binder_alloc_new_buf_locked()
498 __func__, alloc->pid); in binder_alloc_new_buf_locked()
501 new_buffer->user_data = (u8 __user *)buffer->user_data + size; in binder_alloc_new_buf_locked()
502 list_add(&new_buffer->entry, &buffer->entry); in binder_alloc_new_buf_locked()
503 new_buffer->free = 1; in binder_alloc_new_buf_locked()
507 rb_erase(best_fit, &alloc->free_buffers); in binder_alloc_new_buf_locked()
508 buffer->free = 0; in binder_alloc_new_buf_locked()
509 buffer->allow_user_free = 0; in binder_alloc_new_buf_locked()
513 alloc->pid, size, buffer); in binder_alloc_new_buf_locked()
514 buffer->data_size = data_size; in binder_alloc_new_buf_locked()
515 buffer->offsets_size = offsets_size; in binder_alloc_new_buf_locked()
516 buffer->async_transaction = is_async; in binder_alloc_new_buf_locked()
517 buffer->extra_buffers_size = extra_buffers_size; in binder_alloc_new_buf_locked()
518 buffer->pid = pid; in binder_alloc_new_buf_locked()
519 buffer->oneway_spam_suspect = false; in binder_alloc_new_buf_locked()
521 alloc->free_async_space -= size; in binder_alloc_new_buf_locked()
523 "%d: binder_alloc_buf size %zd async free %zd\n", in binder_alloc_new_buf_locked()
524 alloc->pid, size, alloc->free_async_space); in binder_alloc_new_buf_locked()
525 if (alloc->free_async_space < alloc->buffer_size / 10) { in binder_alloc_new_buf_locked()
528 * of async space left (which is less than 10% of total in binder_alloc_new_buf_locked()
531 buffer->oneway_spam_suspect = debug_low_async_space_locked(alloc, pid); in binder_alloc_new_buf_locked()
533 alloc->oneway_spam_detected = false; in binder_alloc_new_buf_locked()
540 PAGE_ALIGN((uintptr_t)buffer->user_data), in binder_alloc_new_buf_locked()
542 return ERR_PTR(-ENOMEM); in binder_alloc_new_buf_locked()
546 * binder_alloc_new_buf() - Allocate a new binder buffer
550 * @extra_buffers_size: size of extra space for meta-data (eg, security context)
551 * @is_async: buffer for async transaction
557 * pointer-sized boundary)
559 * Return: The allocated buffer or %ERR_PTR(-errno) if error
570 mutex_lock(&alloc->mutex); in binder_alloc_new_buf()
573 mutex_unlock(&alloc->mutex); in binder_alloc_new_buf()
579 return (void __user *)((uintptr_t)buffer->user_data & PAGE_MASK); in buffer_start_page()
585 (((uintptr_t)(buffer->user_data) - 1) & PAGE_MASK); in prev_buffer_end_page()
594 BUG_ON(alloc->buffers.next == &buffer->entry); in binder_delete_free_buffer()
596 BUG_ON(!prev->free); in binder_delete_free_buffer()
601 alloc->pid, buffer->user_data, in binder_delete_free_buffer()
602 prev->user_data); in binder_delete_free_buffer()
605 if (!list_is_last(&buffer->entry, &alloc->buffers)) { in binder_delete_free_buffer()
611 alloc->pid, in binder_delete_free_buffer()
612 buffer->user_data, in binder_delete_free_buffer()
613 next->user_data); in binder_delete_free_buffer()
617 if (PAGE_ALIGNED(buffer->user_data)) { in binder_delete_free_buffer()
620 alloc->pid, buffer->user_data); in binder_delete_free_buffer()
627 alloc->pid, buffer->user_data, in binder_delete_free_buffer()
628 prev->user_data, in binder_delete_free_buffer()
629 next ? next->user_data : NULL); in binder_delete_free_buffer()
633 list_del(&buffer->entry); in binder_delete_free_buffer()
644 size = ALIGN(buffer->data_size, sizeof(void *)) + in binder_free_buf_locked()
645 ALIGN(buffer->offsets_size, sizeof(void *)) + in binder_free_buf_locked()
646 ALIGN(buffer->extra_buffers_size, sizeof(void *)); in binder_free_buf_locked()
650 alloc->pid, buffer, size, buffer_size); in binder_free_buf_locked()
652 BUG_ON(buffer->free); in binder_free_buf_locked()
654 BUG_ON(buffer->transaction != NULL); in binder_free_buf_locked()
655 BUG_ON(buffer->user_data < alloc->buffer); in binder_free_buf_locked()
656 BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size); in binder_free_buf_locked()
658 if (buffer->async_transaction) { in binder_free_buf_locked()
659 alloc->free_async_space += buffer_size; in binder_free_buf_locked()
661 "%d: binder_free_buf size %zd async free %zd\n", in binder_free_buf_locked()
662 alloc->pid, size, alloc->free_async_space); in binder_free_buf_locked()
666 (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data), in binder_free_buf_locked()
668 buffer->user_data + buffer_size) & PAGE_MASK)); in binder_free_buf_locked()
670 rb_erase(&buffer->rb_node, &alloc->allocated_buffers); in binder_free_buf_locked()
671 buffer->free = 1; in binder_free_buf_locked()
672 if (!list_is_last(&buffer->entry, &alloc->buffers)) { in binder_free_buf_locked()
675 if (next->free) { in binder_free_buf_locked()
676 rb_erase(&next->rb_node, &alloc->free_buffers); in binder_free_buf_locked()
680 if (alloc->buffers.next != &buffer->entry) { in binder_free_buf_locked()
683 if (prev->free) { in binder_free_buf_locked()
685 rb_erase(&prev->rb_node, &alloc->free_buffers); in binder_free_buf_locked()
695 * binder_alloc_free_buf() - free a binder buffer
712 if (buffer->clear_on_free) { in binder_alloc_free_buf()
714 buffer->clear_on_free = false; in binder_alloc_free_buf()
716 mutex_lock(&alloc->mutex); in binder_alloc_free_buf()
718 mutex_unlock(&alloc->mutex); in binder_alloc_free_buf()
722 * binder_alloc_mmap_handler() - map virtual address space for proc
731 * -EBUSY = address space already mapped
732 * -ENOMEM = failed to map memory to given address space
741 if (unlikely(vma->vm_mm != alloc->mm)) { in binder_alloc_mmap_handler()
742 ret = -EINVAL; in binder_alloc_mmap_handler()
743 failure_string = "invalid vma->vm_mm"; in binder_alloc_mmap_handler()
748 if (alloc->buffer_size) { in binder_alloc_mmap_handler()
749 ret = -EBUSY; in binder_alloc_mmap_handler()
753 alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start, in binder_alloc_mmap_handler()
757 alloc->buffer = (void __user *)vma->vm_start; in binder_alloc_mmap_handler()
759 alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE, in binder_alloc_mmap_handler()
760 sizeof(alloc->pages[0]), in binder_alloc_mmap_handler()
762 if (alloc->pages == NULL) { in binder_alloc_mmap_handler()
763 ret = -ENOMEM; in binder_alloc_mmap_handler()
770 ret = -ENOMEM; in binder_alloc_mmap_handler()
775 buffer->user_data = alloc->buffer; in binder_alloc_mmap_handler()
776 list_add(&buffer->entry, &alloc->buffers); in binder_alloc_mmap_handler()
777 buffer->free = 1; in binder_alloc_mmap_handler()
779 alloc->free_async_space = alloc->buffer_size / 2; in binder_alloc_mmap_handler()
787 kfree(alloc->pages); in binder_alloc_mmap_handler()
788 alloc->pages = NULL; in binder_alloc_mmap_handler()
790 alloc->buffer = NULL; in binder_alloc_mmap_handler()
792 alloc->buffer_size = 0; in binder_alloc_mmap_handler()
797 "%s: %d %lx-%lx %s failed %d\n", __func__, in binder_alloc_mmap_handler()
798 alloc->pid, vma->vm_start, vma->vm_end, in binder_alloc_mmap_handler()
811 mutex_lock(&alloc->mutex); in binder_alloc_deferred_release()
812 BUG_ON(alloc->vma); in binder_alloc_deferred_release()
814 while ((n = rb_first(&alloc->allocated_buffers))) { in binder_alloc_deferred_release()
818 BUG_ON(buffer->transaction); in binder_alloc_deferred_release()
820 if (buffer->clear_on_free) { in binder_alloc_deferred_release()
822 buffer->clear_on_free = false; in binder_alloc_deferred_release()
828 while (!list_empty(&alloc->buffers)) { in binder_alloc_deferred_release()
829 buffer = list_first_entry(&alloc->buffers, in binder_alloc_deferred_release()
831 WARN_ON(!buffer->free); in binder_alloc_deferred_release()
833 list_del(&buffer->entry); in binder_alloc_deferred_release()
834 WARN_ON_ONCE(!list_empty(&alloc->buffers)); in binder_alloc_deferred_release()
839 if (alloc->pages) { in binder_alloc_deferred_release()
842 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { in binder_alloc_deferred_release()
846 if (!alloc->pages[i].page_ptr) in binder_alloc_deferred_release()
850 &alloc->pages[i].lru); in binder_alloc_deferred_release()
851 page_addr = alloc->buffer + i * PAGE_SIZE; in binder_alloc_deferred_release()
854 __func__, alloc->pid, i, page_addr, in binder_alloc_deferred_release()
856 __free_page(alloc->pages[i].page_ptr); in binder_alloc_deferred_release()
859 kfree(alloc->pages); in binder_alloc_deferred_release()
861 mutex_unlock(&alloc->mutex); in binder_alloc_deferred_release()
862 if (alloc->mm) in binder_alloc_deferred_release()
863 mmdrop(alloc->mm); in binder_alloc_deferred_release()
867 __func__, alloc->pid, buffers, page_count); in binder_alloc_deferred_release()
870 static void print_binder_buffer(struct seq_file *m, const char *prefix, in print_binder_buffer() argument
874 prefix, buffer->debug_id, buffer->user_data, in print_binder_buffer()
875 buffer->data_size, buffer->offsets_size, in print_binder_buffer()
876 buffer->extra_buffers_size, in print_binder_buffer()
877 buffer->transaction ? "active" : "delivered"); in print_binder_buffer()
881 * binder_alloc_print_allocated() - print buffer info
893 mutex_lock(&alloc->mutex); in binder_alloc_print_allocated()
894 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) in binder_alloc_print_allocated()
897 mutex_unlock(&alloc->mutex); in binder_alloc_print_allocated()
901 * binder_alloc_print_pages() - print page usage
914 mutex_lock(&alloc->mutex); in binder_alloc_print_pages()
920 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { in binder_alloc_print_pages()
921 page = &alloc->pages[i]; in binder_alloc_print_pages()
922 if (!page->page_ptr) in binder_alloc_print_pages()
924 else if (list_empty(&page->lru)) in binder_alloc_print_pages()
930 mutex_unlock(&alloc->mutex); in binder_alloc_print_pages()
932 seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high); in binder_alloc_print_pages()
936 * binder_alloc_get_allocated_count() - return count of buffers
946 mutex_lock(&alloc->mutex); in binder_alloc_get_allocated_count()
947 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) in binder_alloc_get_allocated_count()
949 mutex_unlock(&alloc->mutex); in binder_alloc_get_allocated_count()
955 * binder_alloc_vma_close() - invalidate address space
959 * Clears alloc->vma to prevent new incoming transactions from
968 * binder_alloc_free_page() - shrinker callback to free pages
991 alloc = page->alloc; in binder_alloc_free_page()
992 if (!mutex_trylock(&alloc->mutex)) in binder_alloc_free_page()
995 if (!page->page_ptr) in binder_alloc_free_page()
998 index = page - alloc->pages; in binder_alloc_free_page()
999 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; in binder_alloc_free_page()
1001 mm = alloc->mm; in binder_alloc_free_page()
1025 __free_page(page->page_ptr); in binder_alloc_free_page()
1026 page->page_ptr = NULL; in binder_alloc_free_page()
1031 mutex_unlock(&alloc->mutex); in binder_alloc_free_page()
1040 mutex_unlock(&alloc->mutex); in binder_alloc_free_page()
1055 NULL, sc->nr_to_scan); in binder_shrink_scan()
1065 * binder_alloc_init() - called by binder_open() for per-proc initialization
1073 alloc->pid = current->group_leader->pid; in binder_alloc_init()
1074 alloc->mm = current->mm; in binder_alloc_init()
1075 mmgrab(alloc->mm); in binder_alloc_init()
1076 mutex_init(&alloc->mutex); in binder_alloc_init()
1077 INIT_LIST_HEAD(&alloc->buffers); in binder_alloc_init()
1085 ret = register_shrinker(&binder_shrinker, "android-binder"); in binder_alloc_shrinker_init()
1099 * check_buffer() - verify that buffer/offset is safe to access
1111 * (buffer->free == 0 && buffer->allow_user_free == 0)
1113 * (buffer->free == 0 && buffer->transaction == NULL).
1124 offset <= buffer_size - bytes && in check_buffer()
1126 !buffer->free && in check_buffer()
1127 (!buffer->allow_user_free || !buffer->transaction); in check_buffer()
1131 * binder_alloc_get_page() - get kernel pointer for given buffer offset
1138 * at @buffer_offset into @buffer->user_data. If @pgoffp is not
1139 * NULL, the byte-offset into the page is written there.
1144 * guaranteed that the corresponding elements of @alloc->pages[]
1155 (buffer->user_data - alloc->buffer); in binder_alloc_get_page()
1160 lru_page = &alloc->pages[index]; in binder_alloc_get_page()
1162 return lru_page->page_ptr; in binder_alloc_get_page()
1166 * binder_alloc_clear_buf() - zero out buffer
1185 size = min_t(size_t, bytes, PAGE_SIZE - pgoff); in binder_alloc_clear_buf()
1187 bytes -= size; in binder_alloc_clear_buf()
1193 * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
1223 size = min_t(size_t, bytes, PAGE_SIZE - pgoff); in binder_alloc_copy_user_to_buffer()
1228 return bytes - size + ret; in binder_alloc_copy_user_to_buffer()
1229 bytes -= size; in binder_alloc_copy_user_to_buffer()
1243 /* All copies must be 32-bit aligned and 32-bit size */ in binder_alloc_do_buffer_copy()
1245 return -EINVAL; in binder_alloc_do_buffer_copy()
1254 size = min_t(size_t, bytes, PAGE_SIZE - pgoff); in binder_alloc_do_buffer_copy()
1259 bytes -= size; in binder_alloc_do_buffer_copy()