• Home
  • Raw
  • Download

Lines Matching +full:async +full:- +full:prefix

1 // SPDX-License-Identifier: GPL-2.0-only
6 * Copyright (C) 2007-2008 Google, Inc.
15 * 1) proc->outer_lock : protects binder_ref
18 * 2) node->lock : protects most fields of binder_node.
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
24 * (proc->todo, thread->todo, proc->delivered_death and
25 * node->async_todo), as well as thread->transaction_stack
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
199 (ee)->id = _id; \
200 (ee)->command = _command; \
201 (ee)->param = _param; \
258 unsigned int cur = atomic_inc_return(&log->cur); in binder_transaction_log_add()
260 if (cur >= ARRAY_SIZE(log->entry)) in binder_transaction_log_add()
261 log->full = true; in binder_transaction_log_add()
262 e = &log->entry[cur % ARRAY_SIZE(log->entry)]; in binder_transaction_log_add()
263 WRITE_ONCE(e->debug_id_done, 0); in binder_transaction_log_add()
265 * write-barrier to synchronize access to e->debug_id_done. in binder_transaction_log_add()
289 * binder_proc_lock() - Acquire outer lock for given binder_proc
292 * Acquires proc->outer_lock. Used to protect binder_ref
298 __acquires(&proc->outer_lock) in _binder_proc_lock()
302 spin_lock(&proc->outer_lock); in _binder_proc_lock()
306 * binder_proc_unlock() - Release spinlock for given binder_proc
314 __releases(&proc->outer_lock) in _binder_proc_unlock()
318 spin_unlock(&proc->outer_lock); in _binder_proc_unlock()
322 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
325 * Acquires proc->inner_lock. Used to protect todo lists
330 __acquires(&proc->inner_lock) in _binder_inner_proc_lock()
334 spin_lock(&proc->inner_lock); in _binder_inner_proc_lock()
338 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
346 __releases(&proc->inner_lock) in _binder_inner_proc_unlock()
350 spin_unlock(&proc->inner_lock); in _binder_inner_proc_unlock()
354 * binder_node_lock() - Acquire spinlock for given binder_node
357 * Acquires node->lock. Used to protect binder_node fields
362 __acquires(&node->lock) in _binder_node_lock()
366 spin_lock(&node->lock); in _binder_node_lock()
370 * binder_node_unlock() - Release spinlock for given binder_proc
378 __releases(&node->lock) in _binder_node_unlock()
382 spin_unlock(&node->lock); in _binder_node_unlock()
386 * binder_node_inner_lock() - Acquire node and inner locks
389 * Acquires node->lock. If node->proc also acquires
390 * proc->inner_lock. Used to protect binder_node fields
395 __acquires(&node->lock) __acquires(&node->proc->inner_lock) in _binder_node_inner_lock()
399 spin_lock(&node->lock); in _binder_node_inner_lock()
400 if (node->proc) in _binder_node_inner_lock()
401 binder_inner_proc_lock(node->proc); in _binder_node_inner_lock()
404 __acquire(&node->proc->inner_lock); in _binder_node_inner_lock()
408 * binder_node_inner_unlock() - Release node and inner locks
416 __releases(&node->lock) __releases(&node->proc->inner_lock) in _binder_node_inner_unlock()
418 struct binder_proc *proc = node->proc; in _binder_node_inner_unlock()
426 __release(&node->proc->inner_lock); in _binder_node_inner_unlock()
427 spin_unlock(&node->lock); in _binder_node_inner_unlock()
436 * binder_worklist_empty() - Check if no items on the work list
454 * binder_enqueue_work_ilocked() - Add an item to the work list
461 * Requires the proc->inner_lock to be held.
468 BUG_ON(work->entry.next && !list_empty(&work->entry)); in binder_enqueue_work_ilocked()
469 list_add_tail(&work->entry, target_list); in binder_enqueue_work_ilocked()
473 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
481 * Requires the proc->inner_lock to be held.
487 WARN_ON(!list_empty(&thread->waiting_thread_node)); in binder_enqueue_deferred_thread_work_ilocked()
488 binder_enqueue_work_ilocked(work, &thread->todo); in binder_enqueue_deferred_thread_work_ilocked()
492 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
499 * Requires the proc->inner_lock to be held.
505 WARN_ON(!list_empty(&thread->waiting_thread_node)); in binder_enqueue_thread_work_ilocked()
506 binder_enqueue_work_ilocked(work, &thread->todo); in binder_enqueue_thread_work_ilocked()
508 /* (e)poll-based threads require an explicit wakeup signal when in binder_enqueue_thread_work_ilocked()
513 if (thread->looper & BINDER_LOOPER_STATE_POLL && in binder_enqueue_thread_work_ilocked()
514 thread->pid == current->pid && !thread->process_todo) in binder_enqueue_thread_work_ilocked()
515 wake_up_interruptible_sync(&thread->wait); in binder_enqueue_thread_work_ilocked()
517 thread->process_todo = true; in binder_enqueue_thread_work_ilocked()
521 * binder_enqueue_thread_work() - Add an item to the thread work list
532 binder_inner_proc_lock(thread->proc); in binder_enqueue_thread_work()
534 binder_inner_proc_unlock(thread->proc); in binder_enqueue_thread_work()
540 list_del_init(&work->entry); in binder_dequeue_work_ilocked()
544 * binder_dequeue_work() - Removes an item from the work list
566 list_del_init(&w->entry); in binder_dequeue_work_head_ilocked()
589 return thread->process_todo || in binder_has_work_ilocked()
590 thread->looper_need_return || in binder_has_work_ilocked()
592 !binder_worklist_empty_ilocked(&thread->proc->todo)); in binder_has_work_ilocked()
599 binder_inner_proc_lock(thread->proc); in binder_has_work()
601 binder_inner_proc_unlock(thread->proc); in binder_has_work()
608 return !thread->transaction_stack && in binder_available_for_proc_work_ilocked()
609 binder_worklist_empty_ilocked(&thread->todo); in binder_available_for_proc_work_ilocked()
618 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { in binder_wakeup_poll_threads_ilocked()
620 if (thread->looper & BINDER_LOOPER_STATE_POLL && in binder_wakeup_poll_threads_ilocked()
623 wake_up_interruptible_sync(&thread->wait); in binder_wakeup_poll_threads_ilocked()
625 wake_up_interruptible(&thread->wait); in binder_wakeup_poll_threads_ilocked()
631 * binder_select_thread_ilocked() - selects a thread for doing proc work.
647 assert_spin_locked(&proc->inner_lock); in binder_select_thread_ilocked()
648 thread = list_first_entry_or_null(&proc->waiting_threads, in binder_select_thread_ilocked()
653 list_del_init(&thread->waiting_thread_node); in binder_select_thread_ilocked()
659 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
661 * @thread: specific thread to wake-up (may be NULL)
662 * @sync: whether to do a synchronous wake-up
665 * The caller may provide a specific thread to wake-up in
678 assert_spin_locked(&proc->inner_lock); in binder_wakeup_thread_ilocked()
682 wake_up_interruptible_sync(&thread->wait); in binder_wakeup_thread_ilocked()
684 wake_up_interruptible(&thread->wait); in binder_wakeup_thread_ilocked()
698 * a thread that called into (e)poll is handling non-binder in binder_wakeup_thread_ilocked()
722 current->pid, nice, min_nice); in binder_set_nice()
726 binder_user_error("%d RLIMIT_NICE not set\n", current->pid); in binder_set_nice()
732 struct rb_node *n = proc->nodes.rb_node; in binder_get_node_ilocked()
735 assert_spin_locked(&proc->inner_lock); in binder_get_node_ilocked()
740 if (ptr < node->ptr) in binder_get_node_ilocked()
741 n = n->rb_left; in binder_get_node_ilocked()
742 else if (ptr > node->ptr) in binder_get_node_ilocked()
743 n = n->rb_right; in binder_get_node_ilocked()
773 struct rb_node **p = &proc->nodes.rb_node; in binder_init_node_ilocked()
776 binder_uintptr_t ptr = fp ? fp->binder : 0; in binder_init_node_ilocked()
777 binder_uintptr_t cookie = fp ? fp->cookie : 0; in binder_init_node_ilocked()
778 __u32 flags = fp ? fp->flags : 0; in binder_init_node_ilocked()
780 assert_spin_locked(&proc->inner_lock); in binder_init_node_ilocked()
787 if (ptr < node->ptr) in binder_init_node_ilocked()
788 p = &(*p)->rb_left; in binder_init_node_ilocked()
789 else if (ptr > node->ptr) in binder_init_node_ilocked()
790 p = &(*p)->rb_right; in binder_init_node_ilocked()
803 node->tmp_refs++; in binder_init_node_ilocked()
804 rb_link_node(&node->rb_node, parent, p); in binder_init_node_ilocked()
805 rb_insert_color(&node->rb_node, &proc->nodes); in binder_init_node_ilocked()
806 node->debug_id = atomic_inc_return(&binder_last_id); in binder_init_node_ilocked()
807 node->proc = proc; in binder_init_node_ilocked()
808 node->ptr = ptr; in binder_init_node_ilocked()
809 node->cookie = cookie; in binder_init_node_ilocked()
810 node->work.type = BINDER_WORK_NODE; in binder_init_node_ilocked()
811 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK; in binder_init_node_ilocked()
812 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); in binder_init_node_ilocked()
813 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX); in binder_init_node_ilocked()
814 spin_lock_init(&node->lock); in binder_init_node_ilocked()
815 INIT_LIST_HEAD(&node->work.entry); in binder_init_node_ilocked()
816 INIT_LIST_HEAD(&node->async_todo); in binder_init_node_ilocked()
819 proc->pid, current->pid, node->debug_id, in binder_init_node_ilocked()
820 (u64)node->ptr, (u64)node->cookie); in binder_init_node_ilocked()
855 struct binder_proc *proc = node->proc; in binder_inc_node_nilocked()
857 assert_spin_locked(&node->lock); in binder_inc_node_nilocked()
859 assert_spin_locked(&proc->inner_lock); in binder_inc_node_nilocked()
863 node->internal_strong_refs == 0 && in binder_inc_node_nilocked()
864 !(node->proc && in binder_inc_node_nilocked()
865 node == node->proc->context->binder_context_mgr_node && in binder_inc_node_nilocked()
866 node->has_strong_ref)) { in binder_inc_node_nilocked()
868 node->debug_id); in binder_inc_node_nilocked()
869 return -EINVAL; in binder_inc_node_nilocked()
871 node->internal_strong_refs++; in binder_inc_node_nilocked()
873 node->local_strong_refs++; in binder_inc_node_nilocked()
874 if (!node->has_strong_ref && target_list) { in binder_inc_node_nilocked()
877 binder_dequeue_work_ilocked(&node->work); in binder_inc_node_nilocked()
878 BUG_ON(&thread->todo != target_list); in binder_inc_node_nilocked()
880 &node->work); in binder_inc_node_nilocked()
884 node->local_weak_refs++; in binder_inc_node_nilocked()
885 if (!node->has_weak_ref && list_empty(&node->work.entry)) { in binder_inc_node_nilocked()
888 node->debug_id); in binder_inc_node_nilocked()
889 return -EINVAL; in binder_inc_node_nilocked()
894 binder_enqueue_work_ilocked(&node->work, target_list); in binder_inc_node_nilocked()
915 struct binder_proc *proc = node->proc; in binder_dec_node_nilocked()
917 assert_spin_locked(&node->lock); in binder_dec_node_nilocked()
919 assert_spin_locked(&proc->inner_lock); in binder_dec_node_nilocked()
922 node->internal_strong_refs--; in binder_dec_node_nilocked()
924 node->local_strong_refs--; in binder_dec_node_nilocked()
925 if (node->local_strong_refs || node->internal_strong_refs) in binder_dec_node_nilocked()
929 node->local_weak_refs--; in binder_dec_node_nilocked()
930 if (node->local_weak_refs || node->tmp_refs || in binder_dec_node_nilocked()
931 !hlist_empty(&node->refs)) in binder_dec_node_nilocked()
935 if (proc && (node->has_strong_ref || node->has_weak_ref)) { in binder_dec_node_nilocked()
936 if (list_empty(&node->work.entry)) { in binder_dec_node_nilocked()
937 binder_enqueue_work_ilocked(&node->work, &proc->todo); in binder_dec_node_nilocked()
941 if (hlist_empty(&node->refs) && !node->local_strong_refs && in binder_dec_node_nilocked()
942 !node->local_weak_refs && !node->tmp_refs) { in binder_dec_node_nilocked()
944 binder_dequeue_work_ilocked(&node->work); in binder_dec_node_nilocked()
945 rb_erase(&node->rb_node, &proc->nodes); in binder_dec_node_nilocked()
948 node->debug_id); in binder_dec_node_nilocked()
950 BUG_ON(!list_empty(&node->work.entry)); in binder_dec_node_nilocked()
956 if (node->tmp_refs) { in binder_dec_node_nilocked()
960 hlist_del(&node->dead_node); in binder_dec_node_nilocked()
964 node->debug_id); in binder_dec_node_nilocked()
990 node->tmp_refs++; in binder_inc_node_tmpref_ilocked()
994 * binder_inc_node_tmpref() - take a temporary reference on node
1001 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1002 * node->tmp_refs against dead-node-only cases where the node
1009 if (node->proc) in binder_inc_node_tmpref()
1010 binder_inner_proc_lock(node->proc); in binder_inc_node_tmpref()
1014 if (node->proc) in binder_inc_node_tmpref()
1015 binder_inner_proc_unlock(node->proc); in binder_inc_node_tmpref()
1022 * binder_dec_node_tmpref() - remove a temporary reference on node
1032 if (!node->proc) in binder_dec_node_tmpref()
1036 node->tmp_refs--; in binder_dec_node_tmpref()
1037 BUG_ON(node->tmp_refs < 0); in binder_dec_node_tmpref()
1038 if (!node->proc) in binder_dec_node_tmpref()
1062 struct rb_node *n = proc->refs_by_desc.rb_node; in binder_get_ref_olocked()
1068 if (desc < ref->data.desc) { in binder_get_ref_olocked()
1069 n = n->rb_left; in binder_get_ref_olocked()
1070 } else if (desc > ref->data.desc) { in binder_get_ref_olocked()
1071 n = n->rb_right; in binder_get_ref_olocked()
1072 } else if (need_strong_ref && !ref->data.strong) { in binder_get_ref_olocked()
1083 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1096 * returned ref would be different than the passed-in
1105 struct binder_context *context = proc->context; in binder_get_ref_for_node_olocked()
1106 struct rb_node **p = &proc->refs_by_node.rb_node; in binder_get_ref_for_node_olocked()
1115 if (node < ref->node) in binder_get_ref_for_node_olocked()
1116 p = &(*p)->rb_left; in binder_get_ref_for_node_olocked()
1117 else if (node > ref->node) in binder_get_ref_for_node_olocked()
1118 p = &(*p)->rb_right; in binder_get_ref_for_node_olocked()
1126 new_ref->data.debug_id = atomic_inc_return(&binder_last_id); in binder_get_ref_for_node_olocked()
1127 new_ref->proc = proc; in binder_get_ref_for_node_olocked()
1128 new_ref->node = node; in binder_get_ref_for_node_olocked()
1129 rb_link_node(&new_ref->rb_node_node, parent, p); in binder_get_ref_for_node_olocked()
1130 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); in binder_get_ref_for_node_olocked()
1132 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1; in binder_get_ref_for_node_olocked()
1133 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { in binder_get_ref_for_node_olocked()
1135 if (ref->data.desc > new_ref->data.desc) in binder_get_ref_for_node_olocked()
1137 new_ref->data.desc = ref->data.desc + 1; in binder_get_ref_for_node_olocked()
1140 p = &proc->refs_by_desc.rb_node; in binder_get_ref_for_node_olocked()
1145 if (new_ref->data.desc < ref->data.desc) in binder_get_ref_for_node_olocked()
1146 p = &(*p)->rb_left; in binder_get_ref_for_node_olocked()
1147 else if (new_ref->data.desc > ref->data.desc) in binder_get_ref_for_node_olocked()
1148 p = &(*p)->rb_right; in binder_get_ref_for_node_olocked()
1152 rb_link_node(&new_ref->rb_node_desc, parent, p); in binder_get_ref_for_node_olocked()
1153 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); in binder_get_ref_for_node_olocked()
1156 hlist_add_head(&new_ref->node_entry, &node->refs); in binder_get_ref_for_node_olocked()
1160 proc->pid, new_ref->data.debug_id, new_ref->data.desc, in binder_get_ref_for_node_olocked()
1161 node->debug_id); in binder_get_ref_for_node_olocked()
1172 ref->proc->pid, ref->data.debug_id, ref->data.desc, in binder_cleanup_ref_olocked()
1173 ref->node->debug_id); in binder_cleanup_ref_olocked()
1175 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); in binder_cleanup_ref_olocked()
1176 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); in binder_cleanup_ref_olocked()
1178 binder_node_inner_lock(ref->node); in binder_cleanup_ref_olocked()
1179 if (ref->data.strong) in binder_cleanup_ref_olocked()
1180 binder_dec_node_nilocked(ref->node, 1, 1); in binder_cleanup_ref_olocked()
1182 hlist_del(&ref->node_entry); in binder_cleanup_ref_olocked()
1183 delete_node = binder_dec_node_nilocked(ref->node, 0, 1); in binder_cleanup_ref_olocked()
1184 binder_node_inner_unlock(ref->node); in binder_cleanup_ref_olocked()
1186 * Clear ref->node unless we want the caller to free the node in binder_cleanup_ref_olocked()
1190 * The caller uses ref->node to determine in binder_cleanup_ref_olocked()
1194 ref->node = NULL; in binder_cleanup_ref_olocked()
1197 if (ref->death) { in binder_cleanup_ref_olocked()
1200 ref->proc->pid, ref->data.debug_id, in binder_cleanup_ref_olocked()
1201 ref->data.desc); in binder_cleanup_ref_olocked()
1202 binder_dequeue_work(ref->proc, &ref->death->work); in binder_cleanup_ref_olocked()
1209 * binder_inc_ref_olocked() - increment the ref for given handle
1214 * Increment the ref. @ref->proc->outer_lock must be held on entry
1224 if (ref->data.strong == 0) { in binder_inc_ref_olocked()
1225 ret = binder_inc_node(ref->node, 1, 1, target_list); in binder_inc_ref_olocked()
1229 ref->data.strong++; in binder_inc_ref_olocked()
1231 if (ref->data.weak == 0) { in binder_inc_ref_olocked()
1232 ret = binder_inc_node(ref->node, 0, 1, target_list); in binder_inc_ref_olocked()
1236 ref->data.weak++; in binder_inc_ref_olocked()
1242 * binder_dec_ref_olocked() - dec the ref for given handle
1253 if (ref->data.strong == 0) { in binder_dec_ref_olocked()
1255 ref->proc->pid, ref->data.debug_id, in binder_dec_ref_olocked()
1256 ref->data.desc, ref->data.strong, in binder_dec_ref_olocked()
1257 ref->data.weak); in binder_dec_ref_olocked()
1260 ref->data.strong--; in binder_dec_ref_olocked()
1261 if (ref->data.strong == 0) in binder_dec_ref_olocked()
1262 binder_dec_node(ref->node, strong, 1); in binder_dec_ref_olocked()
1264 if (ref->data.weak == 0) { in binder_dec_ref_olocked()
1266 ref->proc->pid, ref->data.debug_id, in binder_dec_ref_olocked()
1267 ref->data.desc, ref->data.strong, in binder_dec_ref_olocked()
1268 ref->data.weak); in binder_dec_ref_olocked()
1271 ref->data.weak--; in binder_dec_ref_olocked()
1273 if (ref->data.strong == 0 && ref->data.weak == 0) { in binder_dec_ref_olocked()
1281 * binder_get_node_from_ref() - get the node from the given proc/desc
1303 node = ref->node; in binder_get_node_from_ref()
1310 *rdata = ref->data; in binder_get_node_from_ref()
1321 * binder_free_ref() - free the binder_ref
1324 * Free the binder_ref. Free the binder_node indicated by ref->node
1325 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1329 if (ref->node) in binder_free_ref()
1330 binder_free_node(ref->node); in binder_free_ref()
1331 kfree(ref->death); in binder_free_ref()
1336 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1359 ret = -EINVAL; in binder_update_ref_for_handle()
1368 *rdata = ref->data; in binder_update_ref_for_handle()
1381 * binder_dec_ref_for_handle() - dec the ref for given handle
1399 * binder_inc_ref_for_node() - increment the ref for given proc/node
1427 return -ENOMEM; in binder_inc_ref_for_node()
1432 *rdata = ref->data; in binder_inc_ref_for_node()
1459 assert_spin_locked(&target_thread->proc->inner_lock); in binder_pop_transaction_ilocked()
1460 BUG_ON(target_thread->transaction_stack != t); in binder_pop_transaction_ilocked()
1461 BUG_ON(target_thread->transaction_stack->from != target_thread); in binder_pop_transaction_ilocked()
1462 target_thread->transaction_stack = in binder_pop_transaction_ilocked()
1463 target_thread->transaction_stack->from_parent; in binder_pop_transaction_ilocked()
1464 t->from = NULL; in binder_pop_transaction_ilocked()
1468 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1473 * extract t->from from a binder_transaction and keep the thread
1474 * indicated by t->from from being freed. When done with that
1483 * it cannot reach zero or thread->is_dead is false in binder_thread_dec_tmpref()
1485 binder_inner_proc_lock(thread->proc); in binder_thread_dec_tmpref()
1486 atomic_dec(&thread->tmp_ref); in binder_thread_dec_tmpref()
1487 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) { in binder_thread_dec_tmpref()
1488 binder_inner_proc_unlock(thread->proc); in binder_thread_dec_tmpref()
1492 binder_inner_proc_unlock(thread->proc); in binder_thread_dec_tmpref()
1496 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1500 * handle a transaction. proc->tmp_ref is incremented when
1501 * creating a new transaction or the binder_proc is currently in-use
1505 * been released and not currenly in-use to process a transaction).
1510 proc->tmp_ref--; in binder_proc_dec_tmpref()
1511 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) && in binder_proc_dec_tmpref()
1512 !proc->tmp_ref) { in binder_proc_dec_tmpref()
1521 * binder_get_txn_from() - safely extract the "from" thread in transaction
1522 * @t: binder transaction for t->from
1528 * Return: the value of t->from
1535 spin_lock(&t->lock); in binder_get_txn_from()
1536 from = t->from; in binder_get_txn_from()
1538 atomic_inc(&from->tmp_ref); in binder_get_txn_from()
1539 spin_unlock(&t->lock); in binder_get_txn_from()
1544 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1545 * @t: binder transaction for t->from
1547 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1552 * Return: the value of t->from
1556 __acquires(&t->from->proc->inner_lock) in binder_get_txn_from_and_acq_inner()
1562 __acquire(&from->proc->inner_lock); in binder_get_txn_from_and_acq_inner()
1565 binder_inner_proc_lock(from->proc); in binder_get_txn_from_and_acq_inner()
1566 if (t->from) { in binder_get_txn_from_and_acq_inner()
1567 BUG_ON(from != t->from); in binder_get_txn_from_and_acq_inner()
1570 binder_inner_proc_unlock(from->proc); in binder_get_txn_from_and_acq_inner()
1571 __acquire(&from->proc->inner_lock); in binder_get_txn_from_and_acq_inner()
1577 * binder_free_txn_fixups() - free unprocessed fd fixups
1578 * @t: binder transaction for t->from
1584 * processed -- in that case, the list will be empty.
1590 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { in binder_free_txn_fixups()
1591 fput(fixup->file); in binder_free_txn_fixups()
1592 if (fixup->target_fd >= 0) in binder_free_txn_fixups()
1593 put_unused_fd(fixup->target_fd); in binder_free_txn_fixups()
1594 list_del(&fixup->fixup_entry); in binder_free_txn_fixups()
1603 spin_lock(&t->lock); in binder_txn_latency_free()
1604 from_proc = t->from ? t->from->proc->pid : 0; in binder_txn_latency_free()
1605 from_thread = t->from ? t->from->pid : 0; in binder_txn_latency_free()
1606 to_proc = t->to_proc ? t->to_proc->pid : 0; in binder_txn_latency_free()
1607 to_thread = t->to_thread ? t->to_thread->pid : 0; in binder_txn_latency_free()
1608 spin_unlock(&t->lock); in binder_txn_latency_free()
1615 struct binder_proc *target_proc = t->to_proc; in binder_free_transaction()
1619 target_proc->outstanding_txns--; in binder_free_transaction()
1620 if (target_proc->outstanding_txns < 0) in binder_free_transaction()
1622 __func__, target_proc->outstanding_txns); in binder_free_transaction()
1623 if (!target_proc->outstanding_txns && target_proc->is_frozen) in binder_free_transaction()
1624 wake_up_interruptible_all(&target_proc->freeze_wait); in binder_free_transaction()
1625 if (t->buffer) in binder_free_transaction()
1626 t->buffer->transaction = NULL; in binder_free_transaction()
1633 * t->buffer->transaction has already been cleared. in binder_free_transaction()
1646 BUG_ON(t->flags & TF_ONE_WAY); in binder_send_failed_reply()
1652 t->debug_id, in binder_send_failed_reply()
1653 target_thread->proc->pid, in binder_send_failed_reply()
1654 target_thread->pid); in binder_send_failed_reply()
1657 if (target_thread->reply_error.cmd == BR_OK) { in binder_send_failed_reply()
1658 target_thread->reply_error.cmd = error_code; in binder_send_failed_reply()
1661 &target_thread->reply_error.work); in binder_send_failed_reply()
1662 wake_up_interruptible(&target_thread->wait); in binder_send_failed_reply()
1671 target_thread->reply_error.cmd); in binder_send_failed_reply()
1673 binder_inner_proc_unlock(target_thread->proc); in binder_send_failed_reply()
1678 __release(&target_thread->proc->inner_lock); in binder_send_failed_reply()
1679 next = t->from_parent; in binder_send_failed_reply()
1683 t->debug_id); in binder_send_failed_reply()
1693 "reply failed, no target thread -- retry %d\n", in binder_send_failed_reply()
1694 t->debug_id); in binder_send_failed_reply()
1699 * binder_cleanup_transaction() - cleans up undelivered transaction
1708 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) { in binder_cleanup_transaction()
1713 t->debug_id, reason); in binder_cleanup_transaction()
1719 * binder_get_object() - gets object and checks for valid metadata
1744 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset); in binder_get_object()
1745 if (offset > buffer->data_size || read_size < sizeof(*hdr) || in binder_get_object()
1753 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer, in binder_get_object()
1759 hdr = &object->hdr; in binder_get_object()
1760 switch (hdr->type) { in binder_get_object()
1779 if (offset <= buffer->data_size - object_size && in binder_get_object()
1780 buffer->data_size >= object_size) in binder_get_object()
1787 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1805 * If @object_offsetp is non-NULL, then the offset within
1825 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, in binder_validate_ptr()
1830 if (!object_size || object->hdr.type != BINDER_TYPE_PTR) in binder_validate_ptr()
1835 return &object->bbo; in binder_validate_ptr()
1839 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1905 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0) in binder_validate_fixup()
1907 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t); in binder_validate_fixup()
1909 sizeof(binder_size_t) * last_bbo->parent; in binder_validate_fixup()
1910 if (binder_alloc_copy_from_buffer(&proc->alloc, in binder_validate_fixup()
1920 * struct binder_task_work_cb - for deferred close
1934 * binder_do_fd_close() - close list of file descriptors
1951 fput(twcb->file); in binder_do_fd_close()
1956 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
1957 * @fd: file-descriptor to close
1960 * a file-descriptor to be closed after returning from binder_ioctl().
1969 init_task_work(&twcb->twork, binder_do_fd_close); in binder_deferred_fd_close()
1970 twcb->file = close_fd_get_file(fd); in binder_deferred_fd_close()
1971 if (twcb->file) { in binder_deferred_fd_close()
1973 get_file(twcb->file); in binder_deferred_fd_close()
1974 filp_close(twcb->file, current->files); in binder_deferred_fd_close()
1975 task_work_add(current, &twcb->twork, TWA_RESUME); in binder_deferred_fd_close()
1987 int debug_id = buffer->debug_id; in binder_transaction_buffer_release()
1991 "%d buffer release %d, size %zd-%zd, failed at %llx\n", in binder_transaction_buffer_release()
1992 proc->pid, buffer->debug_id, in binder_transaction_buffer_release()
1993 buffer->data_size, buffer->offsets_size, in binder_transaction_buffer_release()
1996 if (buffer->target_node) in binder_transaction_buffer_release()
1997 binder_dec_node(buffer->target_node, 1, 0); in binder_transaction_buffer_release()
1999 off_start_offset = ALIGN(buffer->data_size, sizeof(void *)); in binder_transaction_buffer_release()
2008 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, in binder_transaction_buffer_release()
2015 debug_id, (u64)object_offset, buffer->data_size); in binder_transaction_buffer_release()
2019 switch (hdr->type) { in binder_transaction_buffer_release()
2026 node = binder_get_node(proc, fp->binder); in binder_transaction_buffer_release()
2029 debug_id, (u64)fp->binder); in binder_transaction_buffer_release()
2034 node->debug_id, (u64)node->ptr); in binder_transaction_buffer_release()
2035 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER, in binder_transaction_buffer_release()
2046 ret = binder_dec_ref_for_handle(proc, fp->handle, in binder_transaction_buffer_release()
2047 hdr->type == BINDER_TYPE_HANDLE, &rdata); in binder_transaction_buffer_release()
2051 debug_id, fp->handle, ret); in binder_transaction_buffer_release()
2061 * No need to close the file here since user-space in binder_transaction_buffer_release()
2093 num_valid = (buffer_offset - off_start_offset) / in binder_transaction_buffer_release()
2097 fda->parent, in binder_transaction_buffer_release()
2106 fd_buf_size = sizeof(u32) * fda->num_fds; in binder_transaction_buffer_release()
2107 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { in binder_transaction_buffer_release()
2109 debug_id, (u64)fda->num_fds); in binder_transaction_buffer_release()
2112 if (fd_buf_size > parent->length || in binder_transaction_buffer_release()
2113 fda->parent_offset > parent->length - fd_buf_size) { in binder_transaction_buffer_release()
2116 debug_id, (u64)fda->num_fds); in binder_transaction_buffer_release()
2121 * to user-space and the @buffer element is the user in binder_transaction_buffer_release()
2127 (parent->buffer - (uintptr_t)buffer->user_data) + in binder_transaction_buffer_release()
2128 fda->parent_offset; in binder_transaction_buffer_release()
2129 for (fd_index = 0; fd_index < fda->num_fds; in binder_transaction_buffer_release()
2137 &proc->alloc, &fd, buffer, in binder_transaction_buffer_release()
2148 thread->looper_need_return = true; in binder_transaction_buffer_release()
2154 debug_id, hdr->type); in binder_transaction_buffer_release()
2168 off_end_offset = ALIGN(buffer->data_size, sizeof(void *)); in binder_release_entire_buffer()
2169 off_end_offset += buffer->offsets_size; in binder_release_entire_buffer()
2180 struct binder_proc *proc = thread->proc; in binder_translate_binder()
2181 struct binder_proc *target_proc = t->to_proc; in binder_translate_binder()
2185 node = binder_get_node(proc, fp->binder); in binder_translate_binder()
2189 return -ENOMEM; in binder_translate_binder()
2191 if (fp->cookie != node->cookie) { in binder_translate_binder()
2193 proc->pid, thread->pid, (u64)fp->binder, in binder_translate_binder()
2194 node->debug_id, (u64)fp->cookie, in binder_translate_binder()
2195 (u64)node->cookie); in binder_translate_binder()
2196 ret = -EINVAL; in binder_translate_binder()
2199 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) { in binder_translate_binder()
2200 ret = -EPERM; in binder_translate_binder()
2205 fp->hdr.type == BINDER_TYPE_BINDER, in binder_translate_binder()
2206 &thread->todo, &rdata); in binder_translate_binder()
2210 if (fp->hdr.type == BINDER_TYPE_BINDER) in binder_translate_binder()
2211 fp->hdr.type = BINDER_TYPE_HANDLE; in binder_translate_binder()
2213 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE; in binder_translate_binder()
2214 fp->binder = 0; in binder_translate_binder()
2215 fp->handle = rdata.desc; in binder_translate_binder()
2216 fp->cookie = 0; in binder_translate_binder()
2220 " node %d u%016llx -> ref %d desc %d\n", in binder_translate_binder()
2221 node->debug_id, (u64)node->ptr, in binder_translate_binder()
2232 struct binder_proc *proc = thread->proc; in binder_translate_handle()
2233 struct binder_proc *target_proc = t->to_proc; in binder_translate_handle()
2238 node = binder_get_node_from_ref(proc, fp->handle, in binder_translate_handle()
2239 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata); in binder_translate_handle()
2242 proc->pid, thread->pid, fp->handle); in binder_translate_handle()
2243 return -EINVAL; in binder_translate_handle()
2245 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) { in binder_translate_handle()
2246 ret = -EPERM; in binder_translate_handle()
2251 if (node->proc == target_proc) { in binder_translate_handle()
2252 if (fp->hdr.type == BINDER_TYPE_HANDLE) in binder_translate_handle()
2253 fp->hdr.type = BINDER_TYPE_BINDER; in binder_translate_handle()
2255 fp->hdr.type = BINDER_TYPE_WEAK_BINDER; in binder_translate_handle()
2256 fp->binder = node->ptr; in binder_translate_handle()
2257 fp->cookie = node->cookie; in binder_translate_handle()
2258 if (node->proc) in binder_translate_handle()
2259 binder_inner_proc_lock(node->proc); in binder_translate_handle()
2261 __acquire(&node->proc->inner_lock); in binder_translate_handle()
2263 fp->hdr.type == BINDER_TYPE_BINDER, in binder_translate_handle()
2265 if (node->proc) in binder_translate_handle()
2266 binder_inner_proc_unlock(node->proc); in binder_translate_handle()
2268 __release(&node->proc->inner_lock); in binder_translate_handle()
2271 " ref %d desc %d -> node %d u%016llx\n", in binder_translate_handle()
2272 src_rdata.debug_id, src_rdata.desc, node->debug_id, in binder_translate_handle()
2273 (u64)node->ptr); in binder_translate_handle()
2280 fp->hdr.type == BINDER_TYPE_HANDLE, in binder_translate_handle()
2285 fp->binder = 0; in binder_translate_handle()
2286 fp->handle = dest_rdata.desc; in binder_translate_handle()
2287 fp->cookie = 0; in binder_translate_handle()
2291 " ref %d desc %d -> ref %d desc %d (node %d)\n", in binder_translate_handle()
2294 node->debug_id); in binder_translate_handle()
2306 struct binder_proc *proc = thread->proc; in binder_translate_fd()
2307 struct binder_proc *target_proc = t->to_proc; in binder_translate_fd()
2314 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS); in binder_translate_fd()
2316 target_allows_fd = t->buffer->target_node->accept_fds; in binder_translate_fd()
2319 proc->pid, thread->pid, in binder_translate_fd()
2322 ret = -EPERM; in binder_translate_fd()
2329 proc->pid, thread->pid, fd); in binder_translate_fd()
2330 ret = -EBADF; in binder_translate_fd()
2333 ret = security_binder_transfer_file(proc->cred, target_proc->cred, file); in binder_translate_fd()
2335 ret = -EPERM; in binder_translate_fd()
2346 ret = -ENOMEM; in binder_translate_fd()
2349 fixup->file = file; in binder_translate_fd()
2350 fixup->offset = fd_offset; in binder_translate_fd()
2351 fixup->target_fd = -1; in binder_translate_fd()
2352 trace_binder_transaction_fd_send(t, fd, fixup->offset); in binder_translate_fd()
2353 list_add_tail(&fixup->fixup_entry, &t->fd_fixups); in binder_translate_fd()
2366 * struct binder_ptr_fixup - data to be fixed-up in target buffer
2386 * struct binder_sg_copy - scatter-gather data to be copied
2406 * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2409 * @sgc_head: list_head of scatter-gather copy list
2413 * and copying the scatter-gather data from the source process' user
2418 * Return: 0=success, else -errno
2435 while (bytes_copied < sgc->length) { in binder_do_deferred_txn_copies()
2437 size_t bytes_left = sgc->length - bytes_copied; in binder_do_deferred_txn_copies()
2438 size_t offset = sgc->offset + bytes_copied; in binder_do_deferred_txn_copies()
2443 copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset) in binder_do_deferred_txn_copies()
2449 sgc->sender_uaddr + bytes_copied, in binder_do_deferred_txn_copies()
2455 if (pf->skip_size) { in binder_do_deferred_txn_copies()
2462 bytes_copied += pf->skip_size; in binder_do_deferred_txn_copies()
2468 pf->offset, in binder_do_deferred_txn_copies()
2469 &pf->fixup_data, in binder_do_deferred_txn_copies()
2470 sizeof(pf->fixup_data)); in binder_do_deferred_txn_copies()
2471 bytes_copied += sizeof(pf->fixup_data); in binder_do_deferred_txn_copies()
2473 list_del(&pf->node); in binder_do_deferred_txn_copies()
2479 list_del(&sgc->node); in binder_do_deferred_txn_copies()
2483 BUG_ON(pf->skip_size == 0); in binder_do_deferred_txn_copies()
2484 list_del(&pf->node); in binder_do_deferred_txn_copies()
2489 return ret > 0 ? -EINVAL : ret; in binder_do_deferred_txn_copies()
2493 * binder_cleanup_deferred_txn_lists() - free specified lists
2494 * @sgc_head: list_head of scatter-gather copy list
2507 list_del(&sgc->node); in binder_cleanup_deferred_txn_lists()
2511 list_del(&pf->node); in binder_cleanup_deferred_txn_lists()
2517 * binder_defer_copy() - queue a scatter-gather buffer for copy
2518 * @sgc_head: list_head of scatter-gather copy list
2523 * Specify a scatter-gather block to be copied. The actual copy must
2525 * Then the copy and fixups are done together so un-translated values
2532 * Return: 0=success, else -errno
2540 return -ENOMEM; in binder_defer_copy()
2542 bc->offset = offset; in binder_defer_copy()
2543 bc->sender_uaddr = sender_uaddr; in binder_defer_copy()
2544 bc->length = length; in binder_defer_copy()
2545 INIT_LIST_HEAD(&bc->node); in binder_defer_copy()
2548 * We are guaranteed that the deferred copies are in-order in binder_defer_copy()
2551 list_add_tail(&bc->node, sgc_head); in binder_defer_copy()
2557 * binder_add_fixup() - queue a fixup to be applied to sg copy
2564 * the scatter-gather buffers, the fixup will be copied instead of
2571 * exceptions. Since out-of-order inserts are relatively uncommon,
2575 * Return: 0=success, else -errno
2584 return -ENOMEM; in binder_add_fixup()
2586 pf->offset = offset; in binder_add_fixup()
2587 pf->fixup_data = fixup; in binder_add_fixup()
2588 pf->skip_size = skip_size; in binder_add_fixup()
2589 INIT_LIST_HEAD(&pf->node); in binder_add_fixup()
2591 /* Fixups are *mostly* added in-order, but there are some in binder_add_fixup()
2595 if (tmppf->offset < pf->offset) { in binder_add_fixup()
2596 list_add(&pf->node, &tmppf->node); in binder_add_fixup()
2604 list_add(&pf->node, pf_head); in binder_add_fixup()
2620 struct binder_proc *proc = thread->proc; in binder_translate_fd_array()
2623 if (fda->num_fds == 0) in binder_translate_fd_array()
2626 fd_buf_size = sizeof(u32) * fda->num_fds; in binder_translate_fd_array()
2627 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { in binder_translate_fd_array()
2629 proc->pid, thread->pid, (u64)fda->num_fds); in binder_translate_fd_array()
2630 return -EINVAL; in binder_translate_fd_array()
2632 if (fd_buf_size > parent->length || in binder_translate_fd_array()
2633 fda->parent_offset > parent->length - fd_buf_size) { in binder_translate_fd_array()
2636 proc->pid, thread->pid, (u64)fda->num_fds); in binder_translate_fd_array()
2637 return -EINVAL; in binder_translate_fd_array()
2641 * to user-space and the @buffer element is the user in binder_translate_fd_array()
2646 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) + in binder_translate_fd_array()
2647 fda->parent_offset; in binder_translate_fd_array()
2648 sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer + in binder_translate_fd_array()
2649 fda->parent_offset; in binder_translate_fd_array()
2654 proc->pid, thread->pid); in binder_translate_fd_array()
2655 return -EINVAL; in binder_translate_fd_array()
2657 ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32)); in binder_translate_fd_array()
2661 for (fdi = 0; fdi < fda->num_fds; fdi++) { in binder_translate_fd_array()
2671 return ret > 0 ? -EINVAL : ret; in binder_translate_fd_array()
2686 struct binder_buffer *b = t->buffer; in binder_fixup_parent()
2687 struct binder_proc *proc = thread->proc; in binder_fixup_parent()
2688 struct binder_proc *target_proc = t->to_proc; in binder_fixup_parent()
2693 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT)) in binder_fixup_parent()
2696 parent = binder_validate_ptr(target_proc, b, &object, bp->parent, in binder_fixup_parent()
2701 proc->pid, thread->pid); in binder_fixup_parent()
2702 return -EINVAL; in binder_fixup_parent()
2706 parent_offset, bp->parent_offset, in binder_fixup_parent()
2709 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", in binder_fixup_parent()
2710 proc->pid, thread->pid); in binder_fixup_parent()
2711 return -EINVAL; in binder_fixup_parent()
2714 if (parent->length < sizeof(binder_uintptr_t) || in binder_fixup_parent()
2715 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) { in binder_fixup_parent()
2718 proc->pid, thread->pid); in binder_fixup_parent()
2719 return -EINVAL; in binder_fixup_parent()
2721 buffer_offset = bp->parent_offset + in binder_fixup_parent()
2722 (uintptr_t)parent->buffer - (uintptr_t)b->user_data; in binder_fixup_parent()
2723 return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0); in binder_fixup_parent()
2727 * binder_can_update_transaction() - Can a txn be superseded by an updated one?
2728 * @t1: the pending async txn in the frozen process
2729 * @t2: the new async txn to supersede the outdated pending one
2737 if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) != in binder_can_update_transaction()
2738 (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc) in binder_can_update_transaction()
2740 if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code && in binder_can_update_transaction()
2741 t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid && in binder_can_update_transaction()
2742 t1->buffer->target_node->ptr == t2->buffer->target_node->ptr && in binder_can_update_transaction()
2743 t1->buffer->target_node->cookie == t2->buffer->target_node->cookie) in binder_can_update_transaction()
2749 * binder_find_outdated_transaction_ilocked() - Find the outdated transaction
2750 * @t: new async transaction
2756 * Requires the proc->inner_lock to be held.
2767 if (w->type != BINDER_WORK_TRANSACTION) in binder_find_outdated_transaction_ilocked()
2777 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2795 * and the async transaction was successfully queued
2801 struct binder_node *node = t->buffer->target_node; in binder_proc_transaction()
2802 bool oneway = !!(t->flags & TF_ONE_WAY); in binder_proc_transaction()
2811 if (node->has_async_transaction) in binder_proc_transaction()
2814 node->has_async_transaction = true; in binder_proc_transaction()
2818 if (proc->is_frozen) { in binder_proc_transaction()
2820 proc->sync_recv |= !oneway; in binder_proc_transaction()
2821 proc->async_recv |= oneway; in binder_proc_transaction()
2824 if ((frozen && !oneway) || proc->is_dead || in binder_proc_transaction()
2825 (thread && thread->is_dead)) { in binder_proc_transaction()
2835 binder_enqueue_thread_work_ilocked(thread, &t->work); in binder_proc_transaction()
2837 binder_enqueue_work_ilocked(&t->work, &proc->todo); in binder_proc_transaction()
2839 if ((t->flags & TF_UPDATE_TXN) && frozen) { in binder_proc_transaction()
2841 &node->async_todo); in binder_proc_transaction()
2845 t->debug_id, t_outdated->debug_id); in binder_proc_transaction()
2846 list_del_init(&t_outdated->work.entry); in binder_proc_transaction()
2847 proc->outstanding_txns--; in binder_proc_transaction()
2850 binder_enqueue_work_ilocked(&t->work, &node->async_todo); in binder_proc_transaction()
2856 proc->outstanding_txns++; in binder_proc_transaction()
2865 struct binder_buffer *buffer = t_outdated->buffer; in binder_proc_transaction()
2867 t_outdated->buffer = NULL; in binder_proc_transaction()
2868 buffer->transaction = NULL; in binder_proc_transaction()
2871 binder_alloc_free_buf(&proc->alloc, buffer); in binder_proc_transaction()
2883 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2885 * @procp: returns @node->proc if valid
2888 * User-space normally keeps the node alive when creating a transaction
2894 * Since user-space can cause the local strong ref to go away, we also take
2899 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2900 * Also sets @procp if valid. If the @node->proc is NULL indicating that the
2911 if (node->proc) { in binder_get_node_refs_for_txn()
2915 node->proc->tmp_ref++; in binder_get_node_refs_for_txn()
2916 *procp = node->proc; in binder_get_node_refs_for_txn()
2931 __release(&from->proc->inner_lock); in binder_set_txn_from_error()
2936 if (from->ee.command == BR_OK) in binder_set_txn_from_error()
2937 binder_set_extended_error(&from->ee, id, command, param); in binder_set_txn_from_error()
2938 binder_inner_proc_unlock(from->proc); in binder_set_txn_from_error()
2966 struct binder_context *context = proc->context; in binder_transaction()
2974 (uintptr_t)tr->data.ptr.buffer; in binder_transaction()
2979 e->debug_id = t_debug_id; in binder_transaction()
2980 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); in binder_transaction()
2981 e->from_proc = proc->pid; in binder_transaction()
2982 e->from_thread = thread->pid; in binder_transaction()
2983 e->target_handle = tr->target.handle; in binder_transaction()
2984 e->data_size = tr->data_size; in binder_transaction()
2985 e->offsets_size = tr->offsets_size; in binder_transaction()
2986 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME); in binder_transaction()
2989 binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0); in binder_transaction()
2994 in_reply_to = thread->transaction_stack; in binder_transaction()
2998 proc->pid, thread->pid); in binder_transaction()
3000 return_error_param = -EPROTO; in binder_transaction()
3004 if (in_reply_to->to_thread != thread) { in binder_transaction()
3005 spin_lock(&in_reply_to->lock); in binder_transaction()
3007 proc->pid, thread->pid, in_reply_to->debug_id, in binder_transaction()
3008 in_reply_to->to_proc ? in binder_transaction()
3009 in_reply_to->to_proc->pid : 0, in binder_transaction()
3010 in_reply_to->to_thread ? in binder_transaction()
3011 in_reply_to->to_thread->pid : 0); in binder_transaction()
3012 spin_unlock(&in_reply_to->lock); in binder_transaction()
3015 return_error_param = -EPROTO; in binder_transaction()
3020 thread->transaction_stack = in_reply_to->to_parent; in binder_transaction()
3022 binder_set_nice(in_reply_to->saved_priority); in binder_transaction()
3026 __release(&target_thread->proc->inner_lock); in binder_transaction()
3028 thread->pid, proc->pid); in binder_transaction()
3033 if (target_thread->transaction_stack != in_reply_to) { in binder_transaction()
3035 proc->pid, thread->pid, in binder_transaction()
3036 target_thread->transaction_stack ? in binder_transaction()
3037 target_thread->transaction_stack->debug_id : 0, in binder_transaction()
3038 in_reply_to->debug_id); in binder_transaction()
3039 binder_inner_proc_unlock(target_thread->proc); in binder_transaction()
3041 return_error_param = -EPROTO; in binder_transaction()
3047 target_proc = target_thread->proc; in binder_transaction()
3048 target_proc->tmp_ref++; in binder_transaction()
3049 binder_inner_proc_unlock(target_thread->proc); in binder_transaction()
3051 if (tr->target.handle) { in binder_transaction()
3062 ref = binder_get_ref_olocked(proc, tr->target.handle, in binder_transaction()
3066 ref->node, &target_proc, in binder_transaction()
3070 proc->pid, thread->pid, tr->target.handle); in binder_transaction()
3075 mutex_lock(&context->context_mgr_node_lock); in binder_transaction()
3076 target_node = context->binder_context_mgr_node; in binder_transaction()
3083 mutex_unlock(&context->context_mgr_node_lock); in binder_transaction()
3084 if (target_node && target_proc->pid == proc->pid) { in binder_transaction()
3086 proc->pid, thread->pid); in binder_transaction()
3088 return_error_param = -EINVAL; in binder_transaction()
3095 thread->pid, proc->pid); in binder_transaction()
3099 return_error_param = -EINVAL; in binder_transaction()
3103 e->to_node = target_node->debug_id; in binder_transaction()
3106 thread->pid, proc->pid); in binder_transaction()
3108 return_error_param = -EINVAL; in binder_transaction()
3112 if (security_binder_transaction(proc->cred, in binder_transaction()
3113 target_proc->cred) < 0) { in binder_transaction()
3115 thread->pid, proc->pid); in binder_transaction()
3117 return_error_param = -EPERM; in binder_transaction()
3123 w = list_first_entry_or_null(&thread->todo, in binder_transaction()
3125 if (!(tr->flags & TF_ONE_WAY) && w && in binder_transaction()
3126 w->type == BINDER_WORK_TRANSACTION) { in binder_transaction()
3132 * thread from proc->waiting_threads to enqueue in binder_transaction()
3137 proc->pid, thread->pid); in binder_transaction()
3140 return_error_param = -EPROTO; in binder_transaction()
3145 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { in binder_transaction()
3148 tmp = thread->transaction_stack; in binder_transaction()
3149 if (tmp->to_thread != thread) { in binder_transaction()
3150 spin_lock(&tmp->lock); in binder_transaction()
3152 proc->pid, thread->pid, tmp->debug_id, in binder_transaction()
3153 tmp->to_proc ? tmp->to_proc->pid : 0, in binder_transaction()
3154 tmp->to_thread ? in binder_transaction()
3155 tmp->to_thread->pid : 0); in binder_transaction()
3156 spin_unlock(&tmp->lock); in binder_transaction()
3159 return_error_param = -EPROTO; in binder_transaction()
3166 spin_lock(&tmp->lock); in binder_transaction()
3167 from = tmp->from; in binder_transaction()
3168 if (from && from->proc == target_proc) { in binder_transaction()
3169 atomic_inc(&from->tmp_ref); in binder_transaction()
3171 spin_unlock(&tmp->lock); in binder_transaction()
3174 spin_unlock(&tmp->lock); in binder_transaction()
3175 tmp = tmp->from_parent; in binder_transaction()
3181 e->to_thread = target_thread->pid; in binder_transaction()
3182 e->to_proc = target_proc->pid; in binder_transaction()
3188 thread->pid, proc->pid); in binder_transaction()
3190 return_error_param = -ENOMEM; in binder_transaction()
3194 INIT_LIST_HEAD(&t->fd_fixups); in binder_transaction()
3196 spin_lock_init(&t->lock); in binder_transaction()
3201 thread->pid, proc->pid); in binder_transaction()
3203 return_error_param = -ENOMEM; in binder_transaction()
3209 t->debug_id = t_debug_id; in binder_transaction()
3210 t->start_time = t_start_time; in binder_transaction()
3214 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n", in binder_transaction()
3215 proc->pid, thread->pid, t->debug_id, in binder_transaction()
3216 target_proc->pid, target_thread->pid, in binder_transaction()
3217 (u64)tr->data.ptr.buffer, in binder_transaction()
3218 (u64)tr->data.ptr.offsets, in binder_transaction()
3219 (u64)tr->data_size, (u64)tr->offsets_size, in binder_transaction()
3223 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n", in binder_transaction()
3224 proc->pid, thread->pid, t->debug_id, in binder_transaction()
3225 target_proc->pid, target_node->debug_id, in binder_transaction()
3226 (u64)tr->data.ptr.buffer, in binder_transaction()
3227 (u64)tr->data.ptr.offsets, in binder_transaction()
3228 (u64)tr->data_size, (u64)tr->offsets_size, in binder_transaction()
3231 if (!reply && !(tr->flags & TF_ONE_WAY)) { in binder_transaction()
3232 t->from = thread; in binder_transaction()
3234 t->from_pid = -1; in binder_transaction()
3235 t->from_tid = -1; in binder_transaction()
3238 t->from = NULL; in binder_transaction()
3240 t->from_pid = thread->proc->pid; in binder_transaction()
3241 t->from_tid = thread->pid; in binder_transaction()
3245 t->sender_euid = task_euid(proc->tsk); in binder_transaction()
3247 t->sender_tokenid = current->token; in binder_transaction()
3248 t->first_tokenid = current->ftoken; in binder_transaction()
3250 t->to_proc = target_proc; in binder_transaction()
3251 t->to_thread = target_thread; in binder_transaction()
3252 t->code = tr->code; in binder_transaction()
3253 t->flags = tr->flags; in binder_transaction()
3254 t->priority = task_nice(current); in binder_transaction()
3256 if (target_node && target_node->txn_security_ctx) { in binder_transaction()
3260 security_cred_getsecid(proc->cred, &secid); in binder_transaction()
3264 thread->pid, proc->pid); in binder_transaction()
3274 thread->pid, proc->pid); in binder_transaction()
3276 return_error_param = -EINVAL; in binder_transaction()
3284 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, in binder_transaction()
3285 tr->offsets_size, extra_buffers_size, in binder_transaction()
3286 !reply && (t->flags & TF_ONE_WAY), current->tgid); in binder_transaction()
3287 if (IS_ERR(t->buffer)) { in binder_transaction()
3290 ret = PTR_ERR(t->buffer); in binder_transaction()
3291 s = (ret == -ESRCH) ? ": vma cleared, target dead or dying" in binder_transaction()
3292 : (ret == -ENOSPC) ? ": no space left" in binder_transaction()
3293 : (ret == -ENOMEM) ? ": memory allocation failed" in binder_transaction()
3297 return_error_param = PTR_ERR(t->buffer); in binder_transaction()
3298 return_error = return_error_param == -ESRCH ? in binder_transaction()
3301 t->buffer = NULL; in binder_transaction()
3306 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) + in binder_transaction()
3307 ALIGN(tr->offsets_size, sizeof(void *)) + in binder_transaction()
3308 ALIGN(extra_buffers_size, sizeof(void *)) - in binder_transaction()
3311 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset; in binder_transaction()
3312 err = binder_alloc_copy_to_buffer(&target_proc->alloc, in binder_transaction()
3313 t->buffer, buf_offset, in binder_transaction()
3316 t->security_ctx = 0; in binder_transaction()
3322 t->buffer->debug_id = t->debug_id; in binder_transaction()
3323 t->buffer->transaction = t; in binder_transaction()
3324 t->buffer->target_node = target_node; in binder_transaction()
3325 t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF); in binder_transaction()
3326 trace_binder_transaction_alloc_buf(t->buffer); in binder_transaction()
3329 &target_proc->alloc, in binder_transaction()
3330 t->buffer, in binder_transaction()
3331 ALIGN(tr->data_size, sizeof(void *)), in binder_transaction()
3333 (uintptr_t)tr->data.ptr.offsets, in binder_transaction()
3334 tr->offsets_size)) { in binder_transaction()
3336 proc->pid, thread->pid); in binder_transaction()
3338 return_error_param = -EFAULT; in binder_transaction()
3342 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) { in binder_transaction()
3344 proc->pid, thread->pid, (u64)tr->offsets_size); in binder_transaction()
3346 return_error_param = -EINVAL; in binder_transaction()
3352 proc->pid, thread->pid, in binder_transaction()
3355 return_error_param = -EINVAL; in binder_transaction()
3359 off_start_offset = ALIGN(tr->data_size, sizeof(void *)); in binder_transaction()
3361 off_end_offset = off_start_offset + tr->offsets_size; in binder_transaction()
3363 sg_buf_end_offset = sg_buf_offset + extra_buffers_size - in binder_transaction()
3374 if (binder_alloc_copy_from_buffer(&target_proc->alloc, in binder_transaction()
3376 t->buffer, in binder_transaction()
3380 thread->pid, proc->pid); in binder_transaction()
3382 return_error_param = -EINVAL; in binder_transaction()
3391 copy_size = object_offset - user_offset; in binder_transaction()
3393 object_offset > tr->data_size || in binder_transaction()
3395 &target_proc->alloc, in binder_transaction()
3396 t->buffer, user_offset, in binder_transaction()
3400 proc->pid, thread->pid); in binder_transaction()
3402 return_error_param = -EFAULT; in binder_transaction()
3407 t->buffer, object_offset, &object); in binder_transaction()
3410 proc->pid, thread->pid, in binder_transaction()
3413 (u64)t->buffer->data_size); in binder_transaction()
3415 return_error_param = -EINVAL; in binder_transaction()
3427 switch (hdr->type) { in binder_transaction()
3436 binder_alloc_copy_to_buffer(&target_proc->alloc, in binder_transaction()
3437 t->buffer, in binder_transaction()
3441 thread->pid, proc->pid); in binder_transaction()
3455 binder_alloc_copy_to_buffer(&target_proc->alloc, in binder_transaction()
3456 t->buffer, in binder_transaction()
3460 thread->pid, proc->pid); in binder_transaction()
3471 (uintptr_t)&fp->fd - (uintptr_t)fp; in binder_transaction()
3472 int ret = binder_translate_fd(fp->fd, fd_offset, t, in binder_transaction()
3475 fp->pad_binder = 0; in binder_transaction()
3477 binder_alloc_copy_to_buffer(&target_proc->alloc, in binder_transaction()
3478 t->buffer, in binder_transaction()
3482 thread->pid, proc->pid); in binder_transaction()
3496 size_t num_valid = (buffer_offset - off_start_offset) / in binder_transaction()
3499 binder_validate_ptr(target_proc, t->buffer, in binder_transaction()
3500 &ptr_object, fda->parent, in binder_transaction()
3506 proc->pid, thread->pid); in binder_transaction()
3508 return_error_param = -EINVAL; in binder_transaction()
3512 if (!binder_validate_fixup(target_proc, t->buffer, in binder_transaction()
3515 fda->parent_offset, in binder_transaction()
3518 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", in binder_transaction()
3519 proc->pid, thread->pid); in binder_transaction()
3521 return_error_param = -EINVAL; in binder_transaction()
3530 binder_get_object(proc, user_buffer, t->buffer, in binder_transaction()
3534 proc->pid, thread->pid, in binder_transaction()
3538 return_error_param = -EINVAL; in binder_transaction()
3547 ret = binder_alloc_copy_to_buffer(&target_proc->alloc, in binder_transaction()
3548 t->buffer, in binder_transaction()
3553 thread->pid, proc->pid); in binder_transaction()
3555 return_error_param = ret > 0 ? -EINVAL : ret; in binder_transaction()
3561 fda->parent_offset + sizeof(u32) * fda->num_fds; in binder_transaction()
3566 size_t buf_left = sg_buf_end_offset - sg_buf_offset; in binder_transaction()
3569 if (bp->length > buf_left) { in binder_transaction()
3571 proc->pid, thread->pid); in binder_transaction()
3573 return_error_param = -EINVAL; in binder_transaction()
3578 (const void __user *)(uintptr_t)bp->buffer, in binder_transaction()
3579 bp->length); in binder_transaction()
3582 thread->pid, proc->pid); in binder_transaction()
3589 bp->buffer = (uintptr_t) in binder_transaction()
3590 t->buffer->user_data + sg_buf_offset; in binder_transaction()
3591 sg_buf_offset += ALIGN(bp->length, sizeof(u64)); in binder_transaction()
3593 num_valid = (buffer_offset - off_start_offset) / in binder_transaction()
3602 binder_alloc_copy_to_buffer(&target_proc->alloc, in binder_transaction()
3603 t->buffer, in binder_transaction()
3607 thread->pid, proc->pid); in binder_transaction()
3618 proc->pid, thread->pid, hdr->type); in binder_transaction()
3620 return_error_param = -EINVAL; in binder_transaction()
3627 &target_proc->alloc, in binder_transaction()
3628 t->buffer, user_offset, in binder_transaction()
3630 tr->data_size - user_offset)) { in binder_transaction()
3632 proc->pid, thread->pid); in binder_transaction()
3634 return_error_param = -EFAULT; in binder_transaction()
3639 ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer, in binder_transaction()
3643 proc->pid, thread->pid); in binder_transaction()
3649 if (t->buffer->oneway_spam_suspect) in binder_transaction()
3650 tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT; in binder_transaction()
3652 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; in binder_transaction()
3653 t->work.type = BINDER_WORK_TRANSACTION; in binder_transaction()
3658 if (target_thread->is_dead) { in binder_transaction()
3663 BUG_ON(t->buffer->async_transaction != 0); in binder_transaction()
3665 t->timestamp = in_reply_to->timestamp; in binder_transaction()
3668 binder_enqueue_thread_work_ilocked(target_thread, &t->work); in binder_transaction()
3669 target_proc->outstanding_txns++; in binder_transaction()
3671 wake_up_interruptible_sync(&target_thread->wait); in binder_transaction()
3673 } else if (!(t->flags & TF_ONE_WAY)) { in binder_transaction()
3674 BUG_ON(t->buffer->async_transaction != 0); in binder_transaction()
3684 t->need_reply = 1; in binder_transaction()
3685 t->from_parent = thread->transaction_stack; in binder_transaction()
3686 thread->transaction_stack = t; in binder_transaction()
3688 t->timestamp = binder_clock(); in binder_transaction()
3701 BUG_ON(t->buffer->async_transaction != 1); in binder_transaction()
3703 t->timestamp = binder_clock(); in binder_transaction()
3707 * Let the caller know when async transaction reaches a frozen in binder_transaction()
3712 tcomplete->type = BINDER_WORK_TRANSACTION_PENDING; in binder_transaction()
3728 WRITE_ONCE(e->debug_id_done, t_debug_id); in binder_transaction()
3733 thread->pid, proc->pid); in binder_transaction()
3743 trace_binder_transaction_failed_buffer_release(t->buffer); in binder_transaction()
3744 binder_transaction_buffer_release(target_proc, NULL, t->buffer, in binder_transaction()
3749 t->buffer->transaction = NULL; in binder_transaction()
3750 binder_alloc_free_buf(&target_proc->alloc, t->buffer); in binder_transaction()
3775 "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n", in binder_transaction()
3776 proc->pid, thread->pid, reply ? "reply" : in binder_transaction()
3777 (tr->flags & TF_ONE_WAY ? "async" : "call"), in binder_transaction()
3778 target_proc ? target_proc->pid : 0, in binder_transaction()
3779 target_thread ? target_thread->pid : 0, in binder_transaction()
3781 (u64)tr->data_size, (u64)tr->offsets_size, in binder_transaction()
3792 e->return_error = return_error; in binder_transaction()
3793 e->return_error_param = return_error_param; in binder_transaction()
3794 e->return_error_line = return_error_line; in binder_transaction()
3802 WRITE_ONCE(e->debug_id_done, t_debug_id); in binder_transaction()
3803 WRITE_ONCE(fe->debug_id_done, t_debug_id); in binder_transaction()
3806 BUG_ON(thread->return_error.cmd != BR_OK); in binder_transaction()
3810 thread->return_error.cmd = BR_TRANSACTION_COMPLETE; in binder_transaction()
3811 binder_enqueue_thread_work(thread, &thread->return_error.work); in binder_transaction()
3815 binder_set_extended_error(&thread->ee, t_debug_id, in binder_transaction()
3818 thread->return_error.cmd = return_error; in binder_transaction()
3819 binder_enqueue_thread_work(thread, &thread->return_error.work); in binder_transaction()
3824 * binder_free_buf() - free the specified buffer
3829 * If buffer for an async transaction, enqueue the next async
3840 if (buffer->transaction) { in binder_free_buf()
3841 buffer->transaction->buffer = NULL; in binder_free_buf()
3842 buffer->transaction = NULL; in binder_free_buf()
3845 if (buffer->async_transaction && buffer->target_node) { in binder_free_buf()
3849 buf_node = buffer->target_node; in binder_free_buf()
3851 BUG_ON(!buf_node->has_async_transaction); in binder_free_buf()
3852 BUG_ON(buf_node->proc != proc); in binder_free_buf()
3854 &buf_node->async_todo); in binder_free_buf()
3856 buf_node->has_async_transaction = false; in binder_free_buf()
3859 w, &proc->todo); in binder_free_buf()
3866 binder_alloc_free_buf(&proc->alloc, buffer); in binder_free_buf()
3875 struct binder_context *context = proc->context; in binder_thread_write()
3880 while (ptr < end && thread->return_error.cmd == BR_OK) { in binder_thread_write()
3884 return -EFAULT; in binder_thread_write()
3889 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]); in binder_thread_write()
3890 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]); in binder_thread_write()
3904 return -EFAULT; in binder_thread_write()
3907 ret = -1; in binder_thread_write()
3911 mutex_lock(&context->context_mgr_node_lock); in binder_thread_write()
3912 ctx_mgr_node = context->binder_context_mgr_node; in binder_thread_write()
3914 if (ctx_mgr_node->proc == proc) { in binder_thread_write()
3916 proc->pid, thread->pid); in binder_thread_write()
3917 mutex_unlock(&context->context_mgr_node_lock); in binder_thread_write()
3918 return -EINVAL; in binder_thread_write()
3924 mutex_unlock(&context->context_mgr_node_lock); in binder_thread_write()
3932 proc->pid, thread->pid, in binder_thread_write()
3952 proc->pid, thread->pid, debug_string, in binder_thread_write()
3958 proc->pid, thread->pid, debug_string, in binder_thread_write()
3971 return -EFAULT; in binder_thread_write()
3974 return -EFAULT; in binder_thread_write()
3979 proc->pid, thread->pid, in binder_thread_write()
3986 if (cookie != node->cookie) { in binder_thread_write()
3988 proc->pid, thread->pid, in binder_thread_write()
3991 (u64)node_ptr, node->debug_id, in binder_thread_write()
3992 (u64)cookie, (u64)node->cookie); in binder_thread_write()
3998 if (node->pending_strong_ref == 0) { in binder_thread_write()
4000 proc->pid, thread->pid, in binder_thread_write()
4001 node->debug_id); in binder_thread_write()
4006 node->pending_strong_ref = 0; in binder_thread_write()
4008 if (node->pending_weak_ref == 0) { in binder_thread_write()
4010 proc->pid, thread->pid, in binder_thread_write()
4011 node->debug_id); in binder_thread_write()
4016 node->pending_weak_ref = 0; in binder_thread_write()
4023 proc->pid, thread->pid, in binder_thread_write()
4025 node->debug_id, node->local_strong_refs, in binder_thread_write()
4026 node->local_weak_refs, node->tmp_refs); in binder_thread_write()
4033 return -EINVAL; in binder_thread_write()
4036 return -EINVAL; in binder_thread_write()
4043 return -EFAULT; in binder_thread_write()
4046 buffer = binder_alloc_prepare_to_free(&proc->alloc, in binder_thread_write()
4049 if (PTR_ERR(buffer) == -EPERM) { in binder_thread_write()
4052 proc->pid, thread->pid, in binder_thread_write()
4057 proc->pid, thread->pid, in binder_thread_write()
4064 proc->pid, thread->pid, (u64)data_ptr, in binder_thread_write()
4065 buffer->debug_id, in binder_thread_write()
4066 buffer->transaction ? "active" : "finished"); in binder_thread_write()
4076 return -EFAULT; in binder_thread_write()
4087 return -EFAULT; in binder_thread_write()
4097 proc->pid, thread->pid); in binder_thread_write()
4099 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { in binder_thread_write()
4100 thread->looper |= BINDER_LOOPER_STATE_INVALID; in binder_thread_write()
4102 proc->pid, thread->pid); in binder_thread_write()
4103 } else if (proc->requested_threads == 0) { in binder_thread_write()
4104 thread->looper |= BINDER_LOOPER_STATE_INVALID; in binder_thread_write()
4106 proc->pid, thread->pid); in binder_thread_write()
4108 proc->requested_threads--; in binder_thread_write()
4109 proc->requested_threads_started++; in binder_thread_write()
4111 thread->looper |= BINDER_LOOPER_STATE_REGISTERED; in binder_thread_write()
4117 proc->pid, thread->pid); in binder_thread_write()
4118 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { in binder_thread_write()
4119 thread->looper |= BINDER_LOOPER_STATE_INVALID; in binder_thread_write()
4121 proc->pid, thread->pid); in binder_thread_write()
4123 thread->looper |= BINDER_LOOPER_STATE_ENTERED; in binder_thread_write()
4128 proc->pid, thread->pid); in binder_thread_write()
4129 thread->looper |= BINDER_LOOPER_STATE_EXITED; in binder_thread_write()
4140 return -EFAULT; in binder_thread_write()
4143 return -EFAULT; in binder_thread_write()
4152 WARN_ON(thread->return_error.cmd != in binder_thread_write()
4154 thread->return_error.cmd = BR_ERROR; in binder_thread_write()
4157 &thread->return_error.work); in binder_thread_write()
4161 proc->pid, thread->pid); in binder_thread_write()
4169 proc->pid, thread->pid, in binder_thread_write()
4181 proc->pid, thread->pid, in binder_thread_write()
4185 (u64)cookie, ref->data.debug_id, in binder_thread_write()
4186 ref->data.desc, ref->data.strong, in binder_thread_write()
4187 ref->data.weak, ref->node->debug_id); in binder_thread_write()
4189 binder_node_lock(ref->node); in binder_thread_write()
4191 if (ref->death) { in binder_thread_write()
4193 proc->pid, thread->pid); in binder_thread_write()
4194 binder_node_unlock(ref->node); in binder_thread_write()
4200 INIT_LIST_HEAD(&death->work.entry); in binder_thread_write()
4201 death->cookie = cookie; in binder_thread_write()
4202 ref->death = death; in binder_thread_write()
4203 if (ref->node->proc == NULL) { in binder_thread_write()
4204 ref->death->work.type = BINDER_WORK_DEAD_BINDER; in binder_thread_write()
4208 &ref->death->work, &proc->todo); in binder_thread_write()
4213 if (ref->death == NULL) { in binder_thread_write()
4215 proc->pid, thread->pid); in binder_thread_write()
4216 binder_node_unlock(ref->node); in binder_thread_write()
4220 death = ref->death; in binder_thread_write()
4221 if (death->cookie != cookie) { in binder_thread_write()
4223 proc->pid, thread->pid, in binder_thread_write()
4224 (u64)death->cookie, in binder_thread_write()
4226 binder_node_unlock(ref->node); in binder_thread_write()
4230 ref->death = NULL; in binder_thread_write()
4232 if (list_empty(&death->work.entry)) { in binder_thread_write()
4233 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; in binder_thread_write()
4234 if (thread->looper & in binder_thread_write()
4239 &death->work); in binder_thread_write()
4242 &death->work, in binder_thread_write()
4243 &proc->todo); in binder_thread_write()
4248 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); in binder_thread_write()
4249 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; in binder_thread_write()
4253 binder_node_unlock(ref->node); in binder_thread_write()
4262 return -EFAULT; in binder_thread_write()
4266 list_for_each_entry(w, &proc->delivered_death, in binder_thread_write()
4273 if (tmp_death->cookie == cookie) { in binder_thread_write()
4280 proc->pid, thread->pid, (u64)cookie, in binder_thread_write()
4284 proc->pid, thread->pid, (u64)cookie); in binder_thread_write()
4288 binder_dequeue_work_ilocked(&death->work); in binder_thread_write()
4289 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { in binder_thread_write()
4290 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; in binder_thread_write()
4291 if (thread->looper & in binder_thread_write()
4295 thread, &death->work); in binder_thread_write()
4298 &death->work, in binder_thread_write()
4299 &proc->todo); in binder_thread_write()
4308 proc->pid, thread->pid, cmd); in binder_thread_write()
4309 return -EINVAL; in binder_thread_write()
4311 *consumed = ptr - buffer; in binder_thread_write()
4322 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]); in binder_stat_br()
4323 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]); in binder_stat_br()
4338 return -EFAULT; in binder_put_node_cmd()
4342 return -EFAULT; in binder_put_node_cmd()
4346 return -EFAULT; in binder_put_node_cmd()
4351 proc->pid, thread->pid, cmd_name, node_debug_id, in binder_put_node_cmd()
4362 struct binder_proc *proc = thread->proc; in binder_wait_for_work()
4367 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE); in binder_wait_for_work()
4371 list_add(&thread->waiting_thread_node, in binder_wait_for_work()
4372 &proc->waiting_threads); in binder_wait_for_work()
4376 list_del_init(&thread->waiting_thread_node); in binder_wait_for_work()
4378 ret = -EINTR; in binder_wait_for_work()
4382 finish_wait(&thread->wait, &wait); in binder_wait_for_work()
4389 * binder_apply_fd_fixups() - finish fd translation
4390 * @proc: binder_proc associated @t->buffer
4407 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) { in binder_apply_fd_fixups()
4413 t->debug_id, fd); in binder_apply_fd_fixups()
4414 ret = -ENOMEM; in binder_apply_fd_fixups()
4419 t->debug_id, fd); in binder_apply_fd_fixups()
4420 trace_binder_transaction_fd_recv(t, fd, fixup->offset); in binder_apply_fd_fixups()
4421 fixup->target_fd = fd; in binder_apply_fd_fixups()
4422 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer, in binder_apply_fd_fixups()
4423 fixup->offset, &fd, in binder_apply_fd_fixups()
4425 ret = -EINVAL; in binder_apply_fd_fixups()
4429 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { in binder_apply_fd_fixups()
4430 fd_install(fixup->target_fd, fixup->file); in binder_apply_fd_fixups()
4431 list_del(&fixup->fixup_entry); in binder_apply_fd_fixups()
4456 return -EFAULT; in binder_thread_read()
4465 thread->looper |= BINDER_LOOPER_STATE_WAITING; in binder_thread_read()
4468 !!thread->transaction_stack, in binder_thread_read()
4469 !binder_worklist_empty(proc, &thread->todo)); in binder_thread_read()
4471 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | in binder_thread_read()
4474 proc->pid, thread->pid, thread->looper); in binder_thread_read()
4478 binder_set_nice(proc->default_priority); in binder_thread_read()
4483 ret = -EAGAIN; in binder_thread_read()
4488 thread->looper &= ~BINDER_LOOPER_STATE_WAITING; in binder_thread_read()
4504 if (!binder_worklist_empty_ilocked(&thread->todo)) in binder_thread_read()
4505 list = &thread->todo; in binder_thread_read()
4506 else if (!binder_worklist_empty_ilocked(&proc->todo) && in binder_thread_read()
4508 list = &proc->todo; in binder_thread_read()
4513 if (ptr - buffer == 4 && !thread->looper_need_return) in binder_thread_read()
4518 if (end - ptr < sizeof(tr) + 4) { in binder_thread_read()
4523 if (binder_worklist_empty_ilocked(&thread->todo)) in binder_thread_read()
4524 thread->process_todo = false; in binder_thread_read()
4526 switch (w->type) { in binder_thread_read()
4535 WARN_ON(e->cmd == BR_OK); in binder_thread_read()
4537 if (put_user(e->cmd, (uint32_t __user *)ptr)) in binder_thread_read()
4538 return -EFAULT; in binder_thread_read()
4539 cmd = e->cmd; in binder_thread_read()
4540 e->cmd = BR_OK; in binder_thread_read()
4548 if (proc->oneway_spam_detection_enabled && in binder_thread_read()
4549 w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT) in binder_thread_read()
4551 else if (w->type == BINDER_WORK_TRANSACTION_PENDING) in binder_thread_read()
4559 return -EFAULT; in binder_thread_read()
4565 proc->pid, thread->pid); in binder_thread_read()
4570 binder_uintptr_t node_ptr = node->ptr; in binder_thread_read()
4571 binder_uintptr_t node_cookie = node->cookie; in binder_thread_read()
4572 int node_debug_id = node->debug_id; in binder_thread_read()
4577 BUG_ON(proc != node->proc); in binder_thread_read()
4578 strong = node->internal_strong_refs || in binder_thread_read()
4579 node->local_strong_refs; in binder_thread_read()
4580 weak = !hlist_empty(&node->refs) || in binder_thread_read()
4581 node->local_weak_refs || in binder_thread_read()
4582 node->tmp_refs || strong; in binder_thread_read()
4583 has_strong_ref = node->has_strong_ref; in binder_thread_read()
4584 has_weak_ref = node->has_weak_ref; in binder_thread_read()
4587 node->has_weak_ref = 1; in binder_thread_read()
4588 node->pending_weak_ref = 1; in binder_thread_read()
4589 node->local_weak_refs++; in binder_thread_read()
4592 node->has_strong_ref = 1; in binder_thread_read()
4593 node->pending_strong_ref = 1; in binder_thread_read()
4594 node->local_strong_refs++; in binder_thread_read()
4597 node->has_strong_ref = 0; in binder_thread_read()
4599 node->has_weak_ref = 0; in binder_thread_read()
4603 proc->pid, thread->pid, in binder_thread_read()
4607 rb_erase(&node->rb_node, &proc->nodes); in binder_thread_read()
4647 proc->pid, thread->pid, in binder_thread_read()
4662 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) in binder_thread_read()
4666 cookie = death->cookie; in binder_thread_read()
4670 proc->pid, thread->pid, in binder_thread_read()
4675 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { in binder_thread_read()
4681 w, &proc->delivered_death); in binder_thread_read()
4685 return -EFAULT; in binder_thread_read()
4689 return -EFAULT; in binder_thread_read()
4698 proc->pid, thread->pid, w->type); in binder_thread_read()
4705 BUG_ON(t->buffer == NULL); in binder_thread_read()
4706 if (t->buffer->target_node) { in binder_thread_read()
4707 struct binder_node *target_node = t->buffer->target_node; in binder_thread_read()
4709 trd->target.ptr = target_node->ptr; in binder_thread_read()
4710 trd->cookie = target_node->cookie; in binder_thread_read()
4711 t->saved_priority = task_nice(current); in binder_thread_read()
4712 if (t->priority < target_node->min_priority && in binder_thread_read()
4713 !(t->flags & TF_ONE_WAY)) in binder_thread_read()
4714 binder_set_nice(t->priority); in binder_thread_read()
4715 else if (!(t->flags & TF_ONE_WAY) || in binder_thread_read()
4716 t->saved_priority > target_node->min_priority) in binder_thread_read()
4717 binder_set_nice(target_node->min_priority); in binder_thread_read()
4720 trd->target.ptr = 0; in binder_thread_read()
4721 trd->cookie = 0; in binder_thread_read()
4724 trd->code = t->code; in binder_thread_read()
4725 trd->flags = t->flags; in binder_thread_read()
4726 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid); in binder_thread_read()
4730 struct task_struct *sender = t_from->proc->tsk; in binder_thread_read()
4732 trd->sender_pid = in binder_thread_read()
4736 binder_inner_proc_lock(thread->proc); in binder_thread_read()
4737 thread->sender_pid_nr = task_tgid_nr(sender); in binder_thread_read()
4738 binder_inner_proc_unlock(thread->proc); in binder_thread_read()
4741 trd->sender_pid = 0; in binder_thread_read()
4743 binder_inner_proc_lock(thread->proc); in binder_thread_read()
4744 thread->sender_pid_nr = 0; in binder_thread_read()
4745 binder_inner_proc_unlock(thread->proc); in binder_thread_read()
4751 struct binder_buffer *buffer = t->buffer; in binder_thread_read()
4752 bool oneway = !!(t->flags & TF_ONE_WAY); in binder_thread_read()
4753 int tid = t->debug_id; in binder_thread_read()
4757 buffer->transaction = NULL; in binder_thread_read()
4763 proc->pid, thread->pid, in binder_thread_read()
4764 oneway ? "async " : in binder_thread_read()
4770 return -EFAULT; in binder_thread_read()
4777 trd->data_size = t->buffer->data_size; in binder_thread_read()
4778 trd->offsets_size = t->buffer->offsets_size; in binder_thread_read()
4779 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data; in binder_thread_read()
4780 trd->data.ptr.offsets = trd->data.ptr.buffer + in binder_thread_read()
4781 ALIGN(t->buffer->data_size, in binder_thread_read()
4784 tr.secctx = t->security_ctx; in binder_thread_read()
4785 if (t->security_ctx) { in binder_thread_read()
4796 return -EFAULT; in binder_thread_read()
4806 return -EFAULT; in binder_thread_read()
4813 "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n", in binder_thread_read()
4814 proc->pid, thread->pid, in binder_thread_read()
4818 t->debug_id, t_from ? t_from->proc->pid : 0, in binder_thread_read()
4819 t_from ? t_from->pid : 0, cmd, in binder_thread_read()
4820 t->buffer->data_size, t->buffer->offsets_size, in binder_thread_read()
4821 (u64)trd->data.ptr.buffer, in binder_thread_read()
4822 (u64)trd->data.ptr.offsets); in binder_thread_read()
4826 t->buffer->allow_user_free = 1; in binder_thread_read()
4828 binder_inner_proc_lock(thread->proc); in binder_thread_read()
4829 thread->tokens.sender_tokenid = t->sender_tokenid; in binder_thread_read()
4830 thread->tokens.first_tokenid = t->first_tokenid; in binder_thread_read()
4831 binder_inner_proc_unlock(thread->proc); in binder_thread_read()
4833 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) { in binder_thread_read()
4834 binder_inner_proc_lock(thread->proc); in binder_thread_read()
4835 t->to_parent = thread->transaction_stack; in binder_thread_read()
4836 t->to_thread = thread; in binder_thread_read()
4837 thread->transaction_stack = t; in binder_thread_read()
4838 binder_inner_proc_unlock(thread->proc); in binder_thread_read()
4847 *consumed = ptr - buffer; in binder_thread_read()
4849 if (proc->requested_threads == 0 && in binder_thread_read()
4850 list_empty(&thread->proc->waiting_threads) && in binder_thread_read()
4851 proc->requested_threads_started < proc->max_threads && in binder_thread_read()
4852 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | in binder_thread_read()
4853 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ in binder_thread_read()
4855 proc->requested_threads++; in binder_thread_read()
4859 proc->pid, thread->pid); in binder_thread_read()
4861 return -EFAULT; in binder_thread_read()
4877 wtype = w ? w->type : 0; in binder_release_work()
4897 e->cmd); in binder_release_work()
4914 (u64)death->cookie); in binder_release_work()
4934 struct rb_node **p = &proc->threads.rb_node; in binder_get_thread_ilocked()
4940 if (current->pid < thread->pid) in binder_get_thread_ilocked()
4941 p = &(*p)->rb_left; in binder_get_thread_ilocked()
4942 else if (current->pid > thread->pid) in binder_get_thread_ilocked()
4943 p = &(*p)->rb_right; in binder_get_thread_ilocked()
4951 thread->proc = proc; in binder_get_thread_ilocked()
4952 thread->pid = current->pid; in binder_get_thread_ilocked()
4953 atomic_set(&thread->tmp_ref, 0); in binder_get_thread_ilocked()
4954 init_waitqueue_head(&thread->wait); in binder_get_thread_ilocked()
4955 INIT_LIST_HEAD(&thread->todo); in binder_get_thread_ilocked()
4956 rb_link_node(&thread->rb_node, parent, p); in binder_get_thread_ilocked()
4957 rb_insert_color(&thread->rb_node, &proc->threads); in binder_get_thread_ilocked()
4958 thread->looper_need_return = true; in binder_get_thread_ilocked()
4959 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR; in binder_get_thread_ilocked()
4960 thread->return_error.cmd = BR_OK; in binder_get_thread_ilocked()
4961 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR; in binder_get_thread_ilocked()
4962 thread->reply_error.cmd = BR_OK; in binder_get_thread_ilocked()
4963 thread->ee.command = BR_OK; in binder_get_thread_ilocked()
4964 INIT_LIST_HEAD(&new_thread->waiting_thread_node); in binder_get_thread_ilocked()
4993 BUG_ON(!list_empty(&proc->todo)); in binder_free_proc()
4994 BUG_ON(!list_empty(&proc->delivered_death)); in binder_free_proc()
4995 if (proc->outstanding_txns) in binder_free_proc()
4997 __func__, proc->outstanding_txns); in binder_free_proc()
4998 device = container_of(proc->context, struct binder_device, context); in binder_free_proc()
4999 if (refcount_dec_and_test(&device->ref)) { in binder_free_proc()
5000 kfree(proc->context->name); in binder_free_proc()
5003 binder_alloc_deferred_release(&proc->alloc); in binder_free_proc()
5004 put_task_struct(proc->tsk); in binder_free_proc()
5005 put_cred(proc->cred); in binder_free_proc()
5012 BUG_ON(!list_empty(&thread->todo)); in binder_free_thread()
5014 binder_proc_dec_tmpref(thread->proc); in binder_free_thread()
5026 binder_inner_proc_lock(thread->proc); in binder_thread_release()
5029 * after we remove this thread from proc->threads. in binder_thread_release()
5033 proc->tmp_ref++; in binder_thread_release()
5038 atomic_inc(&thread->tmp_ref); in binder_thread_release()
5039 rb_erase(&thread->rb_node, &proc->threads); in binder_thread_release()
5040 t = thread->transaction_stack; in binder_thread_release()
5042 spin_lock(&t->lock); in binder_thread_release()
5043 if (t->to_thread == thread) in binder_thread_release()
5046 __acquire(&t->lock); in binder_thread_release()
5048 thread->is_dead = true; in binder_thread_release()
5055 proc->pid, thread->pid, in binder_thread_release()
5056 t->debug_id, in binder_thread_release()
5057 (t->to_thread == thread) ? "in" : "out"); in binder_thread_release()
5059 if (t->to_thread == thread) { in binder_thread_release()
5060 thread->proc->outstanding_txns--; in binder_thread_release()
5061 t->to_proc = NULL; in binder_thread_release()
5062 t->to_thread = NULL; in binder_thread_release()
5063 if (t->buffer) { in binder_thread_release()
5064 t->buffer->transaction = NULL; in binder_thread_release()
5065 t->buffer = NULL; in binder_thread_release()
5067 t = t->to_parent; in binder_thread_release()
5068 } else if (t->from == thread) { in binder_thread_release()
5069 t->from = NULL; in binder_thread_release()
5071 t->from_pid = -1; in binder_thread_release()
5072 t->from_tid = -1; in binder_thread_release()
5074 t = t->from_parent; in binder_thread_release()
5077 spin_unlock(&last_t->lock); in binder_thread_release()
5079 spin_lock(&t->lock); in binder_thread_release()
5081 __acquire(&t->lock); in binder_thread_release()
5084 __release(&t->lock); in binder_thread_release()
5090 if (thread->looper & BINDER_LOOPER_STATE_POLL) in binder_thread_release()
5091 wake_up_pollfree(&thread->wait); in binder_thread_release()
5093 binder_inner_proc_unlock(thread->proc); in binder_thread_release()
5102 if (thread->looper & BINDER_LOOPER_STATE_POLL) in binder_thread_release()
5107 binder_release_work(proc, &thread->todo); in binder_thread_release()
5115 struct binder_proc *proc = filp->private_data; in binder_poll()
5123 binder_inner_proc_lock(thread->proc); in binder_poll()
5124 thread->looper |= BINDER_LOOPER_STATE_POLL; in binder_poll()
5127 binder_inner_proc_unlock(thread->proc); in binder_poll()
5129 poll_wait(filp, &thread->wait, wait); in binder_poll()
5142 struct binder_proc *proc = filp->private_data; in binder_ioctl_write_read()
5148 ret = -EINVAL; in binder_ioctl_write_read()
5153 ret = -EFAULT; in binder_ioctl_write_read()
5158 proc->pid, thread->pid, in binder_ioctl_write_read()
5171 ret = -EFAULT; in binder_ioctl_write_read()
5179 filp->f_flags & O_NONBLOCK); in binder_ioctl_write_read()
5182 if (!binder_worklist_empty_ilocked(&proc->todo)) in binder_ioctl_write_read()
5187 ret = -EFAULT; in binder_ioctl_write_read()
5193 proc->pid, thread->pid, in binder_ioctl_write_read()
5197 ret = -EFAULT; in binder_ioctl_write_read()
5208 struct binder_proc *proc = filp->private_data; in binder_ioctl_set_ctx_mgr()
5209 struct binder_context *context = proc->context; in binder_ioctl_set_ctx_mgr()
5213 mutex_lock(&context->context_mgr_node_lock); in binder_ioctl_set_ctx_mgr()
5214 if (context->binder_context_mgr_node) { in binder_ioctl_set_ctx_mgr()
5216 ret = -EBUSY; in binder_ioctl_set_ctx_mgr()
5219 ret = security_binder_set_context_mgr(proc->cred); in binder_ioctl_set_ctx_mgr()
5222 if (uid_valid(context->binder_context_mgr_uid)) { in binder_ioctl_set_ctx_mgr()
5223 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) { in binder_ioctl_set_ctx_mgr()
5227 context->binder_context_mgr_uid)); in binder_ioctl_set_ctx_mgr()
5228 ret = -EPERM; in binder_ioctl_set_ctx_mgr()
5232 context->binder_context_mgr_uid = curr_euid; in binder_ioctl_set_ctx_mgr()
5236 ret = -ENOMEM; in binder_ioctl_set_ctx_mgr()
5240 new_node->local_weak_refs++; in binder_ioctl_set_ctx_mgr()
5241 new_node->local_strong_refs++; in binder_ioctl_set_ctx_mgr()
5242 new_node->has_strong_ref = 1; in binder_ioctl_set_ctx_mgr()
5243 new_node->has_weak_ref = 1; in binder_ioctl_set_ctx_mgr()
5244 context->binder_context_mgr_node = new_node; in binder_ioctl_set_ctx_mgr()
5248 mutex_unlock(&context->context_mgr_node_lock); in binder_ioctl_set_ctx_mgr()
5256 struct binder_context *context = proc->context; in binder_ioctl_get_node_info_for_ref()
5257 __u32 handle = info->handle; in binder_ioctl_get_node_info_for_ref()
5259 if (info->strong_count || info->weak_count || info->reserved1 || in binder_ioctl_get_node_info_for_ref()
5260 info->reserved2 || info->reserved3) { in binder_ioctl_get_node_info_for_ref()
5261 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.", in binder_ioctl_get_node_info_for_ref()
5262 proc->pid); in binder_ioctl_get_node_info_for_ref()
5263 return -EINVAL; in binder_ioctl_get_node_info_for_ref()
5267 mutex_lock(&context->context_mgr_node_lock); in binder_ioctl_get_node_info_for_ref()
5268 if (!context->binder_context_mgr_node || in binder_ioctl_get_node_info_for_ref()
5269 context->binder_context_mgr_node->proc != proc) { in binder_ioctl_get_node_info_for_ref()
5270 mutex_unlock(&context->context_mgr_node_lock); in binder_ioctl_get_node_info_for_ref()
5271 return -EPERM; in binder_ioctl_get_node_info_for_ref()
5273 mutex_unlock(&context->context_mgr_node_lock); in binder_ioctl_get_node_info_for_ref()
5277 return -EINVAL; in binder_ioctl_get_node_info_for_ref()
5279 info->strong_count = node->local_strong_refs + in binder_ioctl_get_node_info_for_ref()
5280 node->internal_strong_refs; in binder_ioctl_get_node_info_for_ref()
5281 info->weak_count = node->local_weak_refs; in binder_ioctl_get_node_info_for_ref()
5292 binder_uintptr_t ptr = info->ptr; in binder_ioctl_get_node_debug_info()
5297 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { in binder_ioctl_get_node_debug_info()
5300 if (node->ptr > ptr) { in binder_ioctl_get_node_debug_info()
5301 info->ptr = node->ptr; in binder_ioctl_get_node_debug_info()
5302 info->cookie = node->cookie; in binder_ioctl_get_node_debug_info()
5303 info->has_strong_ref = node->has_strong_ref; in binder_ioctl_get_node_debug_info()
5304 info->has_weak_ref = node->has_weak_ref; in binder_ioctl_get_node_debug_info()
5318 if (proc->outstanding_txns > 0) in binder_txns_pending_ilocked()
5321 for (n = rb_first(&proc->threads); n; n = rb_next(n)) { in binder_txns_pending_ilocked()
5323 if (thread->transaction_stack) in binder_txns_pending_ilocked()
5334 if (!info->enable) { in binder_ioctl_freeze()
5336 target_proc->sync_recv = false; in binder_ioctl_freeze()
5337 target_proc->async_recv = false; in binder_ioctl_freeze()
5338 target_proc->is_frozen = false; in binder_ioctl_freeze()
5349 target_proc->sync_recv = false; in binder_ioctl_freeze()
5350 target_proc->async_recv = false; in binder_ioctl_freeze()
5351 target_proc->is_frozen = true; in binder_ioctl_freeze()
5354 if (info->timeout_ms > 0) in binder_ioctl_freeze()
5356 target_proc->freeze_wait, in binder_ioctl_freeze()
5357 (!target_proc->outstanding_txns), in binder_ioctl_freeze()
5358 msecs_to_jiffies(info->timeout_ms)); in binder_ioctl_freeze()
5364 ret = -EAGAIN; in binder_ioctl_freeze()
5370 target_proc->is_frozen = false; in binder_ioctl_freeze()
5384 info->sync_recv = 0; in binder_ioctl_get_freezer_info()
5385 info->async_recv = 0; in binder_ioctl_get_freezer_info()
5389 if (target_proc->pid == info->pid) { in binder_ioctl_get_freezer_info()
5393 info->sync_recv |= target_proc->sync_recv | in binder_ioctl_get_freezer_info()
5395 info->async_recv |= target_proc->async_recv; in binder_ioctl_get_freezer_info()
5402 return -EINVAL; in binder_ioctl_get_freezer_info()
5412 binder_inner_proc_lock(thread->proc); in binder_ioctl_get_extended_error()
5413 ee = thread->ee; in binder_ioctl_get_extended_error()
5414 binder_set_extended_error(&thread->ee, 0, BR_OK, 0); in binder_ioctl_get_extended_error()
5415 binder_inner_proc_unlock(thread->proc); in binder_ioctl_get_extended_error()
5418 return -EFAULT; in binder_ioctl_get_extended_error()
5426 struct binder_proc *proc = filp->private_data; in binder_ioctl()
5432 proc->pid, current->pid, cmd, arg);*/ in binder_ioctl()
5434 binder_selftest_alloc(&proc->alloc); in binder_ioctl()
5444 ret = -ENOMEM; in binder_ioctl()
5459 ret = -EINVAL; in binder_ioctl()
5463 proc->max_threads = max_threads; in binder_ioctl()
5471 ret = -EINVAL; in binder_ioctl()
5486 proc->pid, thread->pid); in binder_ioctl()
5494 ret = -EINVAL; in binder_ioctl()
5498 &ver->protocol_version)) { in binder_ioctl()
5499 ret = -EINVAL; in binder_ioctl()
5508 ret = -EFAULT; in binder_ioctl()
5517 ret = -EFAULT; in binder_ioctl()
5527 ret = -EFAULT; in binder_ioctl()
5536 ret = -EFAULT; in binder_ioctl()
5549 ret = -EFAULT; in binder_ioctl()
5555 if (target_proc->pid == info.pid) in binder_ioctl()
5561 ret = -EINVAL; in binder_ioctl()
5571 ret = -ENOMEM; in binder_ioctl()
5576 if (target_proc->pid != info.pid) in binder_ioctl()
5580 target_proc->tmp_ref++; in binder_ioctl()
5605 ret = -EFAULT; in binder_ioctl()
5614 ret = -EFAULT; in binder_ioctl()
5623 ret = -EFAULT; in binder_ioctl()
5627 proc->oneway_spam_detection_enabled = (bool)enable; in binder_ioctl()
5640 ret = -EINVAL; in binder_ioctl()
5643 if (put_user(BINDER_CURRENT_FEATURE_SET, &features->feature_set)) { in binder_ioctl()
5644 ret = -EINVAL; in binder_ioctl()
5655 ret = -EINVAL; in binder_ioctl()
5659 token = thread->tokens.sender_tokenid; in binder_ioctl()
5660 ftoken = thread->tokens.first_tokenid; in binder_ioctl()
5662 if (put_user(token, &tokens->sender_tokenid)) { in binder_ioctl()
5663 ret = -EINVAL; in binder_ioctl()
5666 if (put_user(ftoken, &tokens->first_tokenid)) { in binder_ioctl()
5667 ret = -EINVAL; in binder_ioctl()
5679 ret = -EINVAL; in binder_ioctl()
5684 token = thread->tokens.sender_tokenid; in binder_ioctl()
5685 ftoken = thread->tokens.first_tokenid; in binder_ioctl()
5687 sender_pid_nr = thread->sender_pid_nr; in binder_ioctl()
5690 if (put_user(token, &sender->tokens.sender_tokenid)) { in binder_ioctl()
5691 ret = -EFAULT; in binder_ioctl()
5694 if (put_user(ftoken, &sender->tokens.first_tokenid)) { in binder_ioctl()
5695 ret = -EFAULT; in binder_ioctl()
5699 if (put_user(sender_pid_nr, &sender->sender_pid_nr)) { in binder_ioctl()
5700 ret = -EFAULT; in binder_ioctl()
5707 ret = -EINVAL; in binder_ioctl()
5713 thread->looper_need_return = false; in binder_ioctl()
5715 if (ret && ret != -EINTR) in binder_ioctl()
5716 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); in binder_ioctl()
5724 struct binder_proc *proc = vma->vm_private_data; in binder_vma_open()
5727 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", in binder_vma_open()
5728 proc->pid, vma->vm_start, vma->vm_end, in binder_vma_open()
5729 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, in binder_vma_open()
5730 (unsigned long)pgprot_val(vma->vm_page_prot)); in binder_vma_open()
5735 struct binder_proc *proc = vma->vm_private_data; in binder_vma_close()
5738 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", in binder_vma_close()
5739 proc->pid, vma->vm_start, vma->vm_end, in binder_vma_close()
5740 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, in binder_vma_close()
5741 (unsigned long)pgprot_val(vma->vm_page_prot)); in binder_vma_close()
5742 binder_alloc_vma_close(&proc->alloc); in binder_vma_close()
5758 struct binder_proc *proc = filp->private_data; in binder_mmap()
5760 if (proc->tsk != current->group_leader) in binder_mmap()
5761 return -EINVAL; in binder_mmap()
5764 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", in binder_mmap()
5765 __func__, proc->pid, vma->vm_start, vma->vm_end, in binder_mmap()
5766 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, in binder_mmap()
5767 (unsigned long)pgprot_val(vma->vm_page_prot)); in binder_mmap()
5769 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { in binder_mmap()
5770 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__, in binder_mmap()
5771 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM); in binder_mmap()
5772 return -EPERM; in binder_mmap()
5776 vma->vm_ops = &binder_vm_ops; in binder_mmap()
5777 vma->vm_private_data = proc; in binder_mmap()
5779 return binder_alloc_mmap_handler(&proc->alloc, vma); in binder_mmap()
5791 current->group_leader->pid, current->pid); in binder_open()
5795 return -ENOMEM; in binder_open()
5796 spin_lock_init(&proc->inner_lock); in binder_open()
5797 spin_lock_init(&proc->outer_lock); in binder_open()
5798 get_task_struct(current->group_leader); in binder_open()
5799 proc->tsk = current->group_leader; in binder_open()
5800 proc->cred = get_cred(filp->f_cred); in binder_open()
5801 INIT_LIST_HEAD(&proc->todo); in binder_open()
5802 init_waitqueue_head(&proc->freeze_wait); in binder_open()
5803 proc->default_priority = task_nice(current); in binder_open()
5806 binder_dev = nodp->i_private; in binder_open()
5807 info = nodp->i_sb->s_fs_info; in binder_open()
5808 binder_binderfs_dir_entry_proc = info->proc_log_dir; in binder_open()
5810 binder_dev = container_of(filp->private_data, in binder_open()
5813 refcount_inc(&binder_dev->ref); in binder_open()
5814 proc->context = &binder_dev->context; in binder_open()
5815 binder_alloc_init(&proc->alloc); in binder_open()
5818 proc->pid = current->group_leader->pid; in binder_open()
5819 INIT_LIST_HEAD(&proc->delivered_death); in binder_open()
5820 INIT_LIST_HEAD(&proc->waiting_threads); in binder_open()
5821 filp->private_data = proc; in binder_open()
5825 if (itr->pid == proc->pid) { in binder_open()
5830 hlist_add_head(&proc->proc_node, &binder_procs); in binder_open()
5836 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); in binder_open()
5843 proc->debugfs_entry = debugfs_create_file(strbuf, 0444, in binder_open()
5845 (void *)(unsigned long)proc->pid, in binder_open()
5853 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); in binder_open()
5861 strbuf, &proc_fops, (void *)(unsigned long)proc->pid); in binder_open()
5863 proc->binderfs_entry = binderfs_entry; in binder_open()
5878 struct binder_proc *proc = filp->private_data; in binder_flush()
5891 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { in binder_deferred_flush()
5894 thread->looper_need_return = true; in binder_deferred_flush()
5895 if (thread->looper & BINDER_LOOPER_STATE_WAITING) { in binder_deferred_flush()
5896 wake_up_interruptible(&thread->wait); in binder_deferred_flush()
5903 "binder_flush: %d woke %d threads\n", proc->pid, in binder_deferred_flush()
5909 struct binder_proc *proc = filp->private_data; in binder_release()
5911 debugfs_remove(proc->debugfs_entry); in binder_release()
5913 if (proc->binderfs_entry) { in binder_release()
5914 binderfs_remove_file(proc->binderfs_entry); in binder_release()
5915 proc->binderfs_entry = NULL; in binder_release()
5927 struct binder_proc *proc = node->proc; in binder_node_release()
5929 binder_release_work(proc, &node->async_todo); in binder_node_release()
5933 binder_dequeue_work_ilocked(&node->work); in binder_node_release()
5937 BUG_ON(!node->tmp_refs); in binder_node_release()
5938 if (hlist_empty(&node->refs) && node->tmp_refs == 1) { in binder_node_release()
5946 node->proc = NULL; in binder_node_release()
5947 node->local_strong_refs = 0; in binder_node_release()
5948 node->local_weak_refs = 0; in binder_node_release()
5952 hlist_add_head(&node->dead_node, &binder_dead_nodes); in binder_node_release()
5955 hlist_for_each_entry(ref, &node->refs, node_entry) { in binder_node_release()
5963 binder_inner_proc_lock(ref->proc); in binder_node_release()
5964 if (!ref->death) { in binder_node_release()
5965 binder_inner_proc_unlock(ref->proc); in binder_node_release()
5971 BUG_ON(!list_empty(&ref->death->work.entry)); in binder_node_release()
5972 ref->death->work.type = BINDER_WORK_DEAD_BINDER; in binder_node_release()
5973 binder_enqueue_work_ilocked(&ref->death->work, in binder_node_release()
5974 &ref->proc->todo); in binder_node_release()
5975 binder_wakeup_proc_ilocked(ref->proc); in binder_node_release()
5976 binder_inner_proc_unlock(ref->proc); in binder_node_release()
5981 node->debug_id, refs, death); in binder_node_release()
5990 struct binder_context *context = proc->context; in binder_deferred_release()
5995 hlist_del(&proc->proc_node); in binder_deferred_release()
5998 mutex_lock(&context->context_mgr_node_lock); in binder_deferred_release()
5999 if (context->binder_context_mgr_node && in binder_deferred_release()
6000 context->binder_context_mgr_node->proc == proc) { in binder_deferred_release()
6003 __func__, proc->pid); in binder_deferred_release()
6004 context->binder_context_mgr_node = NULL; in binder_deferred_release()
6006 mutex_unlock(&context->context_mgr_node_lock); in binder_deferred_release()
6012 proc->tmp_ref++; in binder_deferred_release()
6014 proc->is_dead = true; in binder_deferred_release()
6015 proc->is_frozen = false; in binder_deferred_release()
6016 proc->sync_recv = false; in binder_deferred_release()
6017 proc->async_recv = false; in binder_deferred_release()
6020 while ((n = rb_first(&proc->threads))) { in binder_deferred_release()
6032 while ((n = rb_first(&proc->nodes))) { in binder_deferred_release()
6043 rb_erase(&node->rb_node, &proc->nodes); in binder_deferred_release()
6052 while ((n = rb_first(&proc->refs_by_desc))) { in binder_deferred_release()
6064 binder_release_work(proc, &proc->todo); in binder_deferred_release()
6065 binder_release_work(proc, &proc->delivered_death); in binder_deferred_release()
6069 __func__, proc->pid, threads, nodes, incoming_refs, in binder_deferred_release()
6086 hlist_del_init(&proc->deferred_work_node); in binder_deferred_func()
6087 defer = proc->deferred_work; in binder_deferred_func()
6088 proc->deferred_work = 0; in binder_deferred_func()
6108 proc->deferred_work |= defer; in binder_defer_work()
6109 if (hlist_unhashed(&proc->deferred_work_node)) { in binder_defer_work()
6110 hlist_add_head(&proc->deferred_work_node, in binder_defer_work()
6119 const char *prefix, in print_binder_transaction_ilocked() argument
6123 struct binder_buffer *buffer = t->buffer; in print_binder_transaction_ilocked()
6126 spin_lock(&t->lock); in print_binder_transaction_ilocked()
6127 to_proc = t->to_proc; in print_binder_transaction_ilocked()
6130 prefix, t->debug_id, t, in print_binder_transaction_ilocked()
6131 t->from_pid, in print_binder_transaction_ilocked()
6132 t->from_tid, in print_binder_transaction_ilocked()
6133 to_proc ? to_proc->pid : 0, in print_binder_transaction_ilocked()
6134 t->to_thread ? t->to_thread->pid : 0, in print_binder_transaction_ilocked()
6135 t->code, t->flags, t->priority, t->need_reply, in print_binder_transaction_ilocked()
6136 ktime_ms_delta(current_time, t->start_time)); in print_binder_transaction_ilocked()
6137 spin_unlock(&t->lock); in print_binder_transaction_ilocked()
6152 if (buffer->target_node) in print_binder_transaction_ilocked()
6153 seq_printf(m, " node %d", buffer->target_node->debug_id); in print_binder_transaction_ilocked()
6155 buffer->data_size, buffer->offsets_size, in print_binder_transaction_ilocked()
6156 buffer->user_data); in print_binder_transaction_ilocked()
6161 const char *prefix, in print_binder_work_ilocked() argument
6168 switch (w->type) { in print_binder_work_ilocked()
6179 prefix, e->cmd); in print_binder_work_ilocked()
6182 seq_printf(m, "%stransaction complete\n", prefix); in print_binder_work_ilocked()
6187 prefix, node->debug_id, in print_binder_work_ilocked()
6188 (u64)node->ptr, (u64)node->cookie); in print_binder_work_ilocked()
6191 seq_printf(m, "%shas dead binder\n", prefix); in print_binder_work_ilocked()
6194 seq_printf(m, "%shas cleared dead binder\n", prefix); in print_binder_work_ilocked()
6197 seq_printf(m, "%shas cleared death notification\n", prefix); in print_binder_work_ilocked()
6200 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); in print_binder_work_ilocked()
6211 size_t start_pos = m->count; in print_binder_thread_ilocked()
6215 thread->pid, thread->looper, in print_binder_thread_ilocked()
6216 thread->looper_need_return, in print_binder_thread_ilocked()
6217 atomic_read(&thread->tmp_ref)); in print_binder_thread_ilocked()
6218 header_pos = m->count; in print_binder_thread_ilocked()
6219 t = thread->transaction_stack; in print_binder_thread_ilocked()
6221 if (t->from == thread) { in print_binder_thread_ilocked()
6222 print_binder_transaction_ilocked(m, thread->proc, in print_binder_thread_ilocked()
6224 t = t->from_parent; in print_binder_thread_ilocked()
6225 } else if (t->to_thread == thread) { in print_binder_thread_ilocked()
6226 print_binder_transaction_ilocked(m, thread->proc, in print_binder_thread_ilocked()
6228 t = t->to_parent; in print_binder_thread_ilocked()
6230 print_binder_transaction_ilocked(m, thread->proc, in print_binder_thread_ilocked()
6235 list_for_each_entry(w, &thread->todo, entry) { in print_binder_thread_ilocked()
6236 print_binder_work_ilocked(m, thread->proc, " ", in print_binder_thread_ilocked()
6239 if (!print_always && m->count == header_pos) in print_binder_thread_ilocked()
6240 m->count = start_pos; in print_binder_thread_ilocked()
6251 hlist_for_each_entry(ref, &node->refs, node_entry) in print_binder_node_nilocked()
6255 node->debug_id, (u64)node->ptr, (u64)node->cookie, in print_binder_node_nilocked()
6256 node->has_strong_ref, node->has_weak_ref, in print_binder_node_nilocked()
6257 node->local_strong_refs, node->local_weak_refs, in print_binder_node_nilocked()
6258 node->internal_strong_refs, count, node->tmp_refs); in print_binder_node_nilocked()
6261 hlist_for_each_entry(ref, &node->refs, node_entry) in print_binder_node_nilocked()
6262 seq_printf(m, " %d", ref->proc->pid); in print_binder_node_nilocked()
6265 if (node->proc) { in print_binder_node_nilocked()
6266 list_for_each_entry(w, &node->async_todo, entry) in print_binder_node_nilocked()
6267 print_binder_work_ilocked(m, node->proc, " ", in print_binder_node_nilocked()
6268 " pending async transaction", w); in print_binder_node_nilocked()
6275 binder_node_lock(ref->node); in print_binder_ref_olocked()
6277 ref->data.debug_id, ref->data.desc, in print_binder_ref_olocked()
6278 ref->node->proc ? "" : "dead ", in print_binder_ref_olocked()
6279 ref->node->debug_id, ref->data.strong, in print_binder_ref_olocked()
6280 ref->data.weak, ref->death); in print_binder_ref_olocked()
6281 binder_node_unlock(ref->node); in print_binder_ref_olocked()
6289 size_t start_pos = m->count; in print_binder_proc()
6293 seq_printf(m, "proc %d\n", proc->pid); in print_binder_proc()
6294 seq_printf(m, "context %s\n", proc->context->name); in print_binder_proc()
6295 header_pos = m->count; in print_binder_proc()
6298 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) in print_binder_proc()
6302 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { in print_binder_proc()
6305 if (!print_all && !node->has_async_transaction) in print_binder_proc()
6330 for (n = rb_first(&proc->refs_by_desc); in print_binder_proc()
6338 binder_alloc_print_allocated(m, &proc->alloc); in print_binder_proc()
6340 list_for_each_entry(w, &proc->todo, entry) in print_binder_proc()
6343 list_for_each_entry(w, &proc->delivered_death, entry) { in print_binder_proc()
6348 if (!print_all && m->count == header_pos) in print_binder_proc()
6349 m->count = start_pos; in print_binder_proc()
6408 static void print_binder_stats(struct seq_file *m, const char *prefix, in print_binder_stats() argument
6413 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != in print_binder_stats()
6415 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { in print_binder_stats()
6416 int temp = atomic_read(&stats->bc[i]); in print_binder_stats()
6419 seq_printf(m, "%s%s: %d\n", prefix, in print_binder_stats()
6423 BUILD_BUG_ON(ARRAY_SIZE(stats->br) != in print_binder_stats()
6425 for (i = 0; i < ARRAY_SIZE(stats->br); i++) { in print_binder_stats()
6426 int temp = atomic_read(&stats->br[i]); in print_binder_stats()
6429 seq_printf(m, "%s%s: %d\n", prefix, in print_binder_stats()
6433 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != in print_binder_stats()
6435 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != in print_binder_stats()
6436 ARRAY_SIZE(stats->obj_deleted)); in print_binder_stats()
6437 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { in print_binder_stats()
6438 int created = atomic_read(&stats->obj_created[i]); in print_binder_stats()
6439 int deleted = atomic_read(&stats->obj_deleted[i]); in print_binder_stats()
6443 prefix, in print_binder_stats()
6445 created - deleted, in print_binder_stats()
6458 binder_alloc_get_free_async_space(&proc->alloc); in print_binder_proc_stats()
6460 seq_printf(m, "proc %d\n", proc->pid); in print_binder_proc_stats()
6461 seq_printf(m, "context %s\n", proc->context->name); in print_binder_proc_stats()
6465 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) in print_binder_proc_stats()
6468 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node) in print_binder_proc_stats()
6474 " free async space %zd\n", proc->requested_threads, in print_binder_proc_stats()
6475 proc->requested_threads_started, proc->max_threads, in print_binder_proc_stats()
6479 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) in print_binder_proc_stats()
6487 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { in print_binder_proc_stats()
6491 strong += ref->data.strong; in print_binder_proc_stats()
6492 weak += ref->data.weak; in print_binder_proc_stats()
6497 count = binder_alloc_get_allocated_count(&proc->alloc); in print_binder_proc_stats()
6500 binder_alloc_print_pages(m, &proc->alloc); in print_binder_proc_stats()
6504 list_for_each_entry(w, &proc->todo, entry) { in print_binder_proc_stats()
6505 if (w->type == BINDER_WORK_TRANSACTION) in print_binder_proc_stats()
6511 print_binder_stats(m, " ", &proc->stats); in print_binder_proc_stats()
6531 node->tmp_refs++; in state_show()
6585 int pid = (unsigned long)m->private; in proc_show()
6589 if (itr->pid == pid) { in proc_show()
6602 int debug_id = READ_ONCE(e->debug_id_done); in print_binder_transaction_log_entry()
6610 e->debug_id, (e->call_type == 2) ? "reply" : in print_binder_transaction_log_entry()
6611 ((e->call_type == 1) ? "async" : "call "), e->from_proc, in print_binder_transaction_log_entry()
6612 e->from_thread, e->to_proc, e->to_thread, e->context_name, in print_binder_transaction_log_entry()
6613 e->to_node, e->target_handle, e->data_size, e->offsets_size, in print_binder_transaction_log_entry()
6614 e->return_error, e->return_error_param, in print_binder_transaction_log_entry()
6615 e->return_error_line); in print_binder_transaction_log_entry()
6617 * read-barrier to guarantee read of debug_id_done after in print_binder_transaction_log_entry()
6621 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ? in print_binder_transaction_log_entry()
6627 struct binder_transaction_log *log = m->private; in transaction_log_show()
6628 unsigned int log_cur = atomic_read(&log->cur); in transaction_log_show()
6634 cur = count < ARRAY_SIZE(log->entry) && !log->full ? in transaction_log_show()
6635 0 : count % ARRAY_SIZE(log->entry); in transaction_log_show()
6636 if (count > ARRAY_SIZE(log->entry) || log->full) in transaction_log_show()
6637 count = ARRAY_SIZE(log->entry); in transaction_log_show()
6639 unsigned int index = cur++ % ARRAY_SIZE(log->entry); in transaction_log_show()
6641 print_binder_transaction_log_entry(m, &log->entry[index]); in transaction_log_show()
6699 const char *prefix, struct binder_transaction *t, in print_binder_transaction_brief_ilocked() argument
6709 spin_lock(&t->lock); in print_binder_transaction_brief_ilocked()
6710 to_proc = t->to_proc; in print_binder_transaction_brief_ilocked()
6711 from_pid = t->from ? (t->from->proc ? t->from->proc->pid : 0) : t->from_pid; in print_binder_transaction_brief_ilocked()
6712 from_tid = t->from ? t->from->pid : t->from_tid; in print_binder_transaction_brief_ilocked()
6713 to_pid = to_proc ? to_proc->pid : 0; in print_binder_transaction_brief_ilocked()
6714 sec = div_u64_rem((timestamp - t->timestamp), 1000000000, &nsec); in print_binder_transaction_brief_ilocked()
6718 prefix, in print_binder_transaction_brief_ilocked()
6720 to_pid, t->to_thread ? t->to_thread->pid : 0, in print_binder_transaction_brief_ilocked()
6721 t->code, in print_binder_transaction_brief_ilocked()
6722 timestamp > t->timestamp ? sec : 0, in print_binder_transaction_brief_ilocked()
6723 timestamp > t->timestamp ? nsec : 0); in print_binder_transaction_brief_ilocked()
6724 spin_unlock(&t->lock); in print_binder_transaction_brief_ilocked()
6728 const char *prefix, struct binder_work *w, in print_binder_work_transaction_nilocked() argument
6733 switch (w->type) { in print_binder_work_transaction_nilocked()
6736 print_binder_transaction_brief_ilocked(m, prefix, t, timestamp); in print_binder_work_transaction_nilocked()
6751 size_t start_pos = m->count; in print_binder_transaction_brief()
6752 size_t header_pos = m->count; in print_binder_transaction_brief()
6756 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { in print_binder_transaction_brief()
6758 struct binder_transaction *t = thread->transaction_stack; in print_binder_transaction_brief()
6760 if (t->from == thread) { in print_binder_transaction_brief()
6762 t = t->from_parent; in print_binder_transaction_brief()
6763 } else if (t->to_thread == thread) { in print_binder_transaction_brief()
6764 t = t->to_parent; in print_binder_transaction_brief()
6771 /* async binder / one way */ in print_binder_transaction_brief()
6772 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { in print_binder_transaction_brief()
6785 list_for_each_entry(w, &node->async_todo, entry) in print_binder_transaction_brief()
6786 print_binder_work_transaction_nilocked(m, "async\t", w, timestamp); in print_binder_transaction_brief()
6796 if (m->count == header_pos) in print_binder_transaction_brief()
6797 m->count = start_pos; in print_binder_transaction_brief()
6805 size_t free_async_space = binder_alloc_get_free_async_space(&proc->alloc); in print_binder_proc_brief()
6807 seq_printf(m, "%d\t", proc->pid); in print_binder_proc_brief()
6808 seq_printf(m, "%s\t", proc->context->name); in print_binder_proc_brief()
6811 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node) in print_binder_proc_brief()
6815 "\t%zd\n", proc->requested_threads, in print_binder_proc_brief()
6816 proc->requested_threads_started, proc->max_threads, in print_binder_proc_brief()
6848 return -ENOMEM; in init_binder_device()
6850 binder_device->miscdev.fops = &binder_fops; in init_binder_device()
6851 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR; in init_binder_device()
6852 binder_device->miscdev.name = name; in init_binder_device()
6854 refcount_set(&binder_device->ref, 1); in init_binder_device()
6855 binder_device->context.binder_context_mgr_uid = INVALID_UID; in init_binder_device()
6856 binder_device->context.name = name; in init_binder_device()
6857 mutex_init(&binder_device->context.context_mgr_node_lock); in init_binder_device()
6859 ret = misc_register(&binder_device->miscdev); in init_binder_device()
6865 hlist_add_head(&binder_device->hlist, &binder_devices); in init_binder_device()
6889 debugfs_create_file(db_entry->name, in binder_init()
6890 db_entry->mode, in binder_init()
6892 db_entry->data, in binder_init()
6893 db_entry->fops); in binder_init()
6912 * tokenize it in-place. in binder_init()
6916 ret = -ENOMEM; in binder_init()
6936 misc_deregister(&device->miscdev); in binder_init()
6937 hlist_del(&device->hlist); in binder_init()