1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder.c
3 *
4 * Android IPC Subsystem
5 *
6 * Copyright (C) 2007-2008 Google, Inc.
7 */
8
9 /*
10 * Locking overview
11 *
12 * There are 3 main spinlocks which must be acquired in the
13 * order shown:
14 *
15 * 1) proc->outer_lock : protects binder_ref
16 * binder_proc_lock() and binder_proc_unlock() are
17 * used to acq/rel.
18 * 2) node->lock : protects most fields of binder_node.
19 * binder_node_lock() and binder_node_unlock() are
20 * used to acq/rel
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
23 * and all todo lists associated with the binder_proc
24 * (proc->todo, thread->todo, proc->delivered_death and
25 * node->async_todo), as well as thread->transaction_stack
26 * binder_inner_proc_lock() and binder_inner_proc_unlock()
27 * are used to acq/rel
28 *
29 * Any lock under procA must never be nested under any lock at the same
30 * level or below on procB.
31 *
32 * Functions that require a lock held on entry indicate which lock
33 * in the suffix of the function name:
34 *
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
40 * ...
41 */
42
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
48 #include <linux/fs.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68
69 #include <uapi/linux/sched/types.h>
70 #include <uapi/linux/android/binder.h>
71 #include <uapi/linux/android/binderfs.h>
72
73 #include <asm/cacheflush.h>
74
75 #include "binder_alloc.h"
76 #include "binder_internal.h"
77 #include "binder_trace.h"
78
79 static HLIST_HEAD(binder_deferred_list);
80 static DEFINE_MUTEX(binder_deferred_lock);
81
82 static HLIST_HEAD(binder_devices);
83 static HLIST_HEAD(binder_procs);
84 static DEFINE_MUTEX(binder_procs_lock);
85
86 static HLIST_HEAD(binder_dead_nodes);
87 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
88
89 static struct dentry *binder_debugfs_dir_entry_root;
90 static struct dentry *binder_debugfs_dir_entry_proc;
91 static atomic_t binder_last_id;
92
93 static int proc_show(struct seq_file *m, void *unused);
94 DEFINE_SHOW_ATTRIBUTE(proc);
95
96 /* This is only defined in include/asm-arm/sizes.h */
97 #ifndef SZ_1K
98 #define SZ_1K 0x400
99 #endif
100
101 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
102
103 enum {
104 BINDER_DEBUG_USER_ERROR = 1U << 0,
105 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
106 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
107 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
108 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
109 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
110 BINDER_DEBUG_READ_WRITE = 1U << 6,
111 BINDER_DEBUG_USER_REFS = 1U << 7,
112 BINDER_DEBUG_THREADS = 1U << 8,
113 BINDER_DEBUG_TRANSACTION = 1U << 9,
114 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
115 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
116 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
117 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
118 BINDER_DEBUG_SPINLOCKS = 1U << 14,
119 };
120 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
121 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
122 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
123
124 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
125 module_param_named(devices, binder_devices_param, charp, 0444);
126
127 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
128 static int binder_stop_on_user_error;
129
binder_set_stop_on_user_error(const char * val,const struct kernel_param * kp)130 static int binder_set_stop_on_user_error(const char *val,
131 const struct kernel_param *kp)
132 {
133 int ret;
134
135 ret = param_set_int(val, kp);
136 if (binder_stop_on_user_error < 2)
137 wake_up(&binder_user_error_wait);
138 return ret;
139 }
140 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
141 param_get_int, &binder_stop_on_user_error, 0644);
142
143 #define binder_debug(mask, x...) \
144 do { \
145 if (binder_debug_mask & mask) \
146 pr_info_ratelimited(x); \
147 } while (0)
148
149 #define binder_user_error(x...) \
150 do { \
151 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
152 pr_info_ratelimited(x); \
153 if (binder_stop_on_user_error) \
154 binder_stop_on_user_error = 2; \
155 } while (0)
156
157 #define to_flat_binder_object(hdr) \
158 container_of(hdr, struct flat_binder_object, hdr)
159
160 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
161
162 #define to_binder_buffer_object(hdr) \
163 container_of(hdr, struct binder_buffer_object, hdr)
164
165 #define to_binder_fd_array_object(hdr) \
166 container_of(hdr, struct binder_fd_array_object, hdr)
167
168 enum binder_stat_types {
169 BINDER_STAT_PROC,
170 BINDER_STAT_THREAD,
171 BINDER_STAT_NODE,
172 BINDER_STAT_REF,
173 BINDER_STAT_DEATH,
174 BINDER_STAT_TRANSACTION,
175 BINDER_STAT_TRANSACTION_COMPLETE,
176 BINDER_STAT_COUNT
177 };
178
179 struct binder_stats {
180 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
181 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
182 atomic_t obj_created[BINDER_STAT_COUNT];
183 atomic_t obj_deleted[BINDER_STAT_COUNT];
184 };
185
186 static struct binder_stats binder_stats;
187
binder_stats_deleted(enum binder_stat_types type)188 static inline void binder_stats_deleted(enum binder_stat_types type)
189 {
190 atomic_inc(&binder_stats.obj_deleted[type]);
191 }
192
binder_stats_created(enum binder_stat_types type)193 static inline void binder_stats_created(enum binder_stat_types type)
194 {
195 atomic_inc(&binder_stats.obj_created[type]);
196 }
197
198 struct binder_transaction_log binder_transaction_log;
199 struct binder_transaction_log binder_transaction_log_failed;
200
binder_transaction_log_add(struct binder_transaction_log * log)201 static struct binder_transaction_log_entry *binder_transaction_log_add(
202 struct binder_transaction_log *log)
203 {
204 struct binder_transaction_log_entry *e;
205 unsigned int cur = atomic_inc_return(&log->cur);
206
207 if (cur >= ARRAY_SIZE(log->entry))
208 log->full = true;
209 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
210 WRITE_ONCE(e->debug_id_done, 0);
211 /*
212 * write-barrier to synchronize access to e->debug_id_done.
213 * We make sure the initialized 0 value is seen before
214 * memset() other fields are zeroed by memset.
215 */
216 smp_wmb();
217 memset(e, 0, sizeof(*e));
218 return e;
219 }
220
221 /**
222 * struct binder_work - work enqueued on a worklist
223 * @entry: node enqueued on list
224 * @type: type of work to be performed
225 *
226 * There are separate work lists for proc, thread, and node (async).
227 */
228 struct binder_work {
229 struct list_head entry;
230
231 enum {
232 BINDER_WORK_TRANSACTION = 1,
233 BINDER_WORK_TRANSACTION_COMPLETE,
234 BINDER_WORK_RETURN_ERROR,
235 BINDER_WORK_NODE,
236 BINDER_WORK_DEAD_BINDER,
237 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
238 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
239 } type;
240 };
241
242 struct binder_error {
243 struct binder_work work;
244 uint32_t cmd;
245 };
246
247 /**
248 * struct binder_node - binder node bookkeeping
249 * @debug_id: unique ID for debugging
250 * (invariant after initialized)
251 * @lock: lock for node fields
252 * @work: worklist element for node work
253 * (protected by @proc->inner_lock)
254 * @rb_node: element for proc->nodes tree
255 * (protected by @proc->inner_lock)
256 * @dead_node: element for binder_dead_nodes list
257 * (protected by binder_dead_nodes_lock)
258 * @proc: binder_proc that owns this node
259 * (invariant after initialized)
260 * @refs: list of references on this node
261 * (protected by @lock)
262 * @internal_strong_refs: used to take strong references when
263 * initiating a transaction
264 * (protected by @proc->inner_lock if @proc
265 * and by @lock)
266 * @local_weak_refs: weak user refs from local process
267 * (protected by @proc->inner_lock if @proc
268 * and by @lock)
269 * @local_strong_refs: strong user refs from local process
270 * (protected by @proc->inner_lock if @proc
271 * and by @lock)
272 * @tmp_refs: temporary kernel refs
273 * (protected by @proc->inner_lock while @proc
274 * is valid, and by binder_dead_nodes_lock
275 * if @proc is NULL. During inc/dec and node release
276 * it is also protected by @lock to provide safety
277 * as the node dies and @proc becomes NULL)
278 * @ptr: userspace pointer for node
279 * (invariant, no lock needed)
280 * @cookie: userspace cookie for node
281 * (invariant, no lock needed)
282 * @has_strong_ref: userspace notified of strong ref
283 * (protected by @proc->inner_lock if @proc
284 * and by @lock)
285 * @pending_strong_ref: userspace has acked notification of strong ref
286 * (protected by @proc->inner_lock if @proc
287 * and by @lock)
288 * @has_weak_ref: userspace notified of weak ref
289 * (protected by @proc->inner_lock if @proc
290 * and by @lock)
291 * @pending_weak_ref: userspace has acked notification of weak ref
292 * (protected by @proc->inner_lock if @proc
293 * and by @lock)
294 * @has_async_transaction: async transaction to node in progress
295 * (protected by @lock)
296 * @sched_policy: minimum scheduling policy for node
297 * (invariant after initialized)
298 * @accept_fds: file descriptor operations supported for node
299 * (invariant after initialized)
300 * @min_priority: minimum scheduling priority
301 * (invariant after initialized)
302 * @inherit_rt: inherit RT scheduling policy from caller
303 * @txn_security_ctx: require sender's security context
304 * (invariant after initialized)
305 * @async_todo: list of async work items
306 * (protected by @proc->inner_lock)
307 *
308 * Bookkeeping structure for binder nodes.
309 */
310 struct binder_node {
311 int debug_id;
312 spinlock_t lock;
313 struct binder_work work;
314 union {
315 struct rb_node rb_node;
316 struct hlist_node dead_node;
317 };
318 struct binder_proc *proc;
319 struct hlist_head refs;
320 int internal_strong_refs;
321 int local_weak_refs;
322 int local_strong_refs;
323 int tmp_refs;
324 binder_uintptr_t ptr;
325 binder_uintptr_t cookie;
326 struct {
327 /*
328 * bitfield elements protected by
329 * proc inner_lock
330 */
331 u8 has_strong_ref:1;
332 u8 pending_strong_ref:1;
333 u8 has_weak_ref:1;
334 u8 pending_weak_ref:1;
335 };
336 struct {
337 /*
338 * invariant after initialization
339 */
340 u8 sched_policy:2;
341 u8 inherit_rt:1;
342 u8 accept_fds:1;
343 u8 txn_security_ctx:1;
344 u8 min_priority;
345 };
346 bool has_async_transaction;
347 struct list_head async_todo;
348 };
349
350 struct binder_ref_death {
351 /**
352 * @work: worklist element for death notifications
353 * (protected by inner_lock of the proc that
354 * this ref belongs to)
355 */
356 struct binder_work work;
357 binder_uintptr_t cookie;
358 };
359
360 /**
361 * struct binder_ref_data - binder_ref counts and id
362 * @debug_id: unique ID for the ref
363 * @desc: unique userspace handle for ref
364 * @strong: strong ref count (debugging only if not locked)
365 * @weak: weak ref count (debugging only if not locked)
366 *
367 * Structure to hold ref count and ref id information. Since
368 * the actual ref can only be accessed with a lock, this structure
369 * is used to return information about the ref to callers of
370 * ref inc/dec functions.
371 */
372 struct binder_ref_data {
373 int debug_id;
374 uint32_t desc;
375 int strong;
376 int weak;
377 };
378
379 /**
380 * struct binder_ref - struct to track references on nodes
381 * @data: binder_ref_data containing id, handle, and current refcounts
382 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
383 * @rb_node_node: node for lookup by @node in proc's rb_tree
384 * @node_entry: list entry for node->refs list in target node
385 * (protected by @node->lock)
386 * @proc: binder_proc containing ref
387 * @node: binder_node of target node. When cleaning up a
388 * ref for deletion in binder_cleanup_ref, a non-NULL
389 * @node indicates the node must be freed
390 * @death: pointer to death notification (ref_death) if requested
391 * (protected by @node->lock)
392 *
393 * Structure to track references from procA to target node (on procB). This
394 * structure is unsafe to access without holding @proc->outer_lock.
395 */
396 struct binder_ref {
397 /* Lookups needed: */
398 /* node + proc => ref (transaction) */
399 /* desc + proc => ref (transaction, inc/dec ref) */
400 /* node => refs + procs (proc exit) */
401 struct binder_ref_data data;
402 struct rb_node rb_node_desc;
403 struct rb_node rb_node_node;
404 struct hlist_node node_entry;
405 struct binder_proc *proc;
406 struct binder_node *node;
407 struct binder_ref_death *death;
408 };
409
410 enum binder_deferred_state {
411 BINDER_DEFERRED_FLUSH = 0x01,
412 BINDER_DEFERRED_RELEASE = 0x02,
413 };
414
415 /**
416 * struct binder_priority - scheduler policy and priority
417 * @sched_policy scheduler policy
418 * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
419 *
420 * The binder driver supports inheriting the following scheduler policies:
421 * SCHED_NORMAL
422 * SCHED_BATCH
423 * SCHED_FIFO
424 * SCHED_RR
425 */
426 struct binder_priority {
427 unsigned int sched_policy;
428 int prio;
429 };
430
431 /**
432 * struct binder_proc - binder process bookkeeping
433 * @proc_node: element for binder_procs list
434 * @threads: rbtree of binder_threads in this proc
435 * (protected by @inner_lock)
436 * @nodes: rbtree of binder nodes associated with
437 * this proc ordered by node->ptr
438 * (protected by @inner_lock)
439 * @refs_by_desc: rbtree of refs ordered by ref->desc
440 * (protected by @outer_lock)
441 * @refs_by_node: rbtree of refs ordered by ref->node
442 * (protected by @outer_lock)
443 * @waiting_threads: threads currently waiting for proc work
444 * (protected by @inner_lock)
445 * @pid PID of group_leader of process
446 * (invariant after initialized)
447 * @tsk task_struct for group_leader of process
448 * (invariant after initialized)
449 * @deferred_work_node: element for binder_deferred_list
450 * (protected by binder_deferred_lock)
451 * @deferred_work: bitmap of deferred work to perform
452 * (protected by binder_deferred_lock)
453 * @is_dead: process is dead and awaiting free
454 * when outstanding transactions are cleaned up
455 * (protected by @inner_lock)
456 * @todo: list of work for this process
457 * (protected by @inner_lock)
458 * @stats: per-process binder statistics
459 * (atomics, no lock needed)
460 * @delivered_death: list of delivered death notification
461 * (protected by @inner_lock)
462 * @max_threads: cap on number of binder threads
463 * (protected by @inner_lock)
464 * @requested_threads: number of binder threads requested but not
465 * yet started. In current implementation, can
466 * only be 0 or 1.
467 * (protected by @inner_lock)
468 * @requested_threads_started: number binder threads started
469 * (protected by @inner_lock)
470 * @tmp_ref: temporary reference to indicate proc is in use
471 * (protected by @inner_lock)
472 * @default_priority: default scheduler priority
473 * (invariant after initialized)
474 * @debugfs_entry: debugfs node
475 * @alloc: binder allocator bookkeeping
476 * @context: binder_context for this proc
477 * (invariant after initialized)
478 * @inner_lock: can nest under outer_lock and/or node lock
479 * @outer_lock: no nesting under innor or node lock
480 * Lock order: 1) outer, 2) node, 3) inner
481 * @binderfs_entry: process-specific binderfs log file
482 *
483 * Bookkeeping structure for binder processes
484 */
485 struct binder_proc {
486 struct hlist_node proc_node;
487 struct rb_root threads;
488 struct rb_root nodes;
489 struct rb_root refs_by_desc;
490 struct rb_root refs_by_node;
491 struct list_head waiting_threads;
492 int pid;
493 struct task_struct *tsk;
494 struct hlist_node deferred_work_node;
495 int deferred_work;
496 bool is_dead;
497
498 struct list_head todo;
499 struct binder_stats stats;
500 struct list_head delivered_death;
501 int max_threads;
502 int requested_threads;
503 int requested_threads_started;
504 int tmp_ref;
505 struct binder_priority default_priority;
506 struct dentry *debugfs_entry;
507 struct binder_alloc alloc;
508 struct binder_context *context;
509 spinlock_t inner_lock;
510 spinlock_t outer_lock;
511 struct dentry *binderfs_entry;
512 };
513
514 enum {
515 BINDER_LOOPER_STATE_REGISTERED = 0x01,
516 BINDER_LOOPER_STATE_ENTERED = 0x02,
517 BINDER_LOOPER_STATE_EXITED = 0x04,
518 BINDER_LOOPER_STATE_INVALID = 0x08,
519 BINDER_LOOPER_STATE_WAITING = 0x10,
520 BINDER_LOOPER_STATE_POLL = 0x20,
521 };
522
523 /**
524 * struct binder_thread - binder thread bookkeeping
525 * @proc: binder process for this thread
526 * (invariant after initialization)
527 * @rb_node: element for proc->threads rbtree
528 * (protected by @proc->inner_lock)
529 * @waiting_thread_node: element for @proc->waiting_threads list
530 * (protected by @proc->inner_lock)
531 * @pid: PID for this thread
532 * (invariant after initialization)
533 * @looper: bitmap of looping state
534 * (only accessed by this thread)
535 * @looper_needs_return: looping thread needs to exit driver
536 * (no lock needed)
537 * @transaction_stack: stack of in-progress transactions for this thread
538 * (protected by @proc->inner_lock)
539 * @todo: list of work to do for this thread
540 * (protected by @proc->inner_lock)
541 * @process_todo: whether work in @todo should be processed
542 * (protected by @proc->inner_lock)
543 * @return_error: transaction errors reported by this thread
544 * (only accessed by this thread)
545 * @reply_error: transaction errors reported by target thread
546 * (protected by @proc->inner_lock)
547 * @wait: wait queue for thread work
548 * @stats: per-thread statistics
549 * (atomics, no lock needed)
550 * @tmp_ref: temporary reference to indicate thread is in use
551 * (atomic since @proc->inner_lock cannot
552 * always be acquired)
553 * @is_dead: thread is dead and awaiting free
554 * when outstanding transactions are cleaned up
555 * (protected by @proc->inner_lock)
556 * @task: struct task_struct for this thread
557 *
558 * Bookkeeping structure for binder threads.
559 */
560 struct binder_thread {
561 struct binder_proc *proc;
562 struct rb_node rb_node;
563 struct list_head waiting_thread_node;
564 int pid;
565 int looper; /* only modified by this thread */
566 bool looper_need_return; /* can be written by other thread */
567 struct binder_transaction *transaction_stack;
568 struct list_head todo;
569 bool process_todo;
570 struct binder_error return_error;
571 struct binder_error reply_error;
572 wait_queue_head_t wait;
573 struct binder_stats stats;
574 atomic_t tmp_ref;
575 bool is_dead;
576 struct task_struct *task;
577 };
578
579 /**
580 * struct binder_txn_fd_fixup - transaction fd fixup list element
581 * @fixup_entry: list entry
582 * @file: struct file to be associated with new fd
583 * @offset: offset in buffer data to this fixup
584 *
585 * List element for fd fixups in a transaction. Since file
586 * descriptors need to be allocated in the context of the
587 * target process, we pass each fd to be processed in this
588 * struct.
589 */
590 struct binder_txn_fd_fixup {
591 struct list_head fixup_entry;
592 struct file *file;
593 size_t offset;
594 };
595
596 struct binder_transaction {
597 int debug_id;
598 struct binder_work work;
599 struct binder_thread *from;
600 struct binder_transaction *from_parent;
601 struct binder_proc *to_proc;
602 struct binder_thread *to_thread;
603 struct binder_transaction *to_parent;
604 unsigned need_reply:1;
605 /* unsigned is_dead:1; */ /* not used at the moment */
606
607 struct binder_buffer *buffer;
608 unsigned int code;
609 unsigned int flags;
610 struct binder_priority priority;
611 struct binder_priority saved_priority;
612 bool set_priority_called;
613 kuid_t sender_euid;
614 struct list_head fd_fixups;
615 binder_uintptr_t security_ctx;
616 /**
617 * @lock: protects @from, @to_proc, and @to_thread
618 *
619 * @from, @to_proc, and @to_thread can be set to NULL
620 * during thread teardown
621 */
622 spinlock_t lock;
623 };
624
625 /**
626 * struct binder_object - union of flat binder object types
627 * @hdr: generic object header
628 * @fbo: binder object (nodes and refs)
629 * @fdo: file descriptor object
630 * @bbo: binder buffer pointer
631 * @fdao: file descriptor array
632 *
633 * Used for type-independent object copies
634 */
635 struct binder_object {
636 union {
637 struct binder_object_header hdr;
638 struct flat_binder_object fbo;
639 struct binder_fd_object fdo;
640 struct binder_buffer_object bbo;
641 struct binder_fd_array_object fdao;
642 };
643 };
644
645 /**
646 * binder_proc_lock() - Acquire outer lock for given binder_proc
647 * @proc: struct binder_proc to acquire
648 *
649 * Acquires proc->outer_lock. Used to protect binder_ref
650 * structures associated with the given proc.
651 */
652 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
653 static void
_binder_proc_lock(struct binder_proc * proc,int line)654 _binder_proc_lock(struct binder_proc *proc, int line)
655 __acquires(&proc->outer_lock)
656 {
657 binder_debug(BINDER_DEBUG_SPINLOCKS,
658 "%s: line=%d\n", __func__, line);
659 spin_lock(&proc->outer_lock);
660 }
661
662 /**
663 * binder_proc_unlock() - Release spinlock for given binder_proc
664 * @proc: struct binder_proc to acquire
665 *
666 * Release lock acquired via binder_proc_lock()
667 */
668 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
669 static void
_binder_proc_unlock(struct binder_proc * proc,int line)670 _binder_proc_unlock(struct binder_proc *proc, int line)
671 __releases(&proc->outer_lock)
672 {
673 binder_debug(BINDER_DEBUG_SPINLOCKS,
674 "%s: line=%d\n", __func__, line);
675 spin_unlock(&proc->outer_lock);
676 }
677
678 /**
679 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
680 * @proc: struct binder_proc to acquire
681 *
682 * Acquires proc->inner_lock. Used to protect todo lists
683 */
684 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
685 static void
_binder_inner_proc_lock(struct binder_proc * proc,int line)686 _binder_inner_proc_lock(struct binder_proc *proc, int line)
687 __acquires(&proc->inner_lock)
688 {
689 binder_debug(BINDER_DEBUG_SPINLOCKS,
690 "%s: line=%d\n", __func__, line);
691 spin_lock(&proc->inner_lock);
692 }
693
694 /**
695 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
696 * @proc: struct binder_proc to acquire
697 *
698 * Release lock acquired via binder_inner_proc_lock()
699 */
700 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
701 static void
_binder_inner_proc_unlock(struct binder_proc * proc,int line)702 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
703 __releases(&proc->inner_lock)
704 {
705 binder_debug(BINDER_DEBUG_SPINLOCKS,
706 "%s: line=%d\n", __func__, line);
707 spin_unlock(&proc->inner_lock);
708 }
709
710 /**
711 * binder_node_lock() - Acquire spinlock for given binder_node
712 * @node: struct binder_node to acquire
713 *
714 * Acquires node->lock. Used to protect binder_node fields
715 */
716 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
717 static void
_binder_node_lock(struct binder_node * node,int line)718 _binder_node_lock(struct binder_node *node, int line)
719 __acquires(&node->lock)
720 {
721 binder_debug(BINDER_DEBUG_SPINLOCKS,
722 "%s: line=%d\n", __func__, line);
723 spin_lock(&node->lock);
724 }
725
726 /**
727 * binder_node_unlock() - Release spinlock for given binder_proc
728 * @node: struct binder_node to acquire
729 *
730 * Release lock acquired via binder_node_lock()
731 */
732 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
733 static void
_binder_node_unlock(struct binder_node * node,int line)734 _binder_node_unlock(struct binder_node *node, int line)
735 __releases(&node->lock)
736 {
737 binder_debug(BINDER_DEBUG_SPINLOCKS,
738 "%s: line=%d\n", __func__, line);
739 spin_unlock(&node->lock);
740 }
741
742 /**
743 * binder_node_inner_lock() - Acquire node and inner locks
744 * @node: struct binder_node to acquire
745 *
746 * Acquires node->lock. If node->proc also acquires
747 * proc->inner_lock. Used to protect binder_node fields
748 */
749 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
750 static void
_binder_node_inner_lock(struct binder_node * node,int line)751 _binder_node_inner_lock(struct binder_node *node, int line)
752 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
753 {
754 binder_debug(BINDER_DEBUG_SPINLOCKS,
755 "%s: line=%d\n", __func__, line);
756 spin_lock(&node->lock);
757 if (node->proc)
758 binder_inner_proc_lock(node->proc);
759 else
760 /* annotation for sparse */
761 __acquire(&node->proc->inner_lock);
762 }
763
764 /**
765 * binder_node_unlock() - Release node and inner locks
766 * @node: struct binder_node to acquire
767 *
768 * Release lock acquired via binder_node_lock()
769 */
770 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
771 static void
_binder_node_inner_unlock(struct binder_node * node,int line)772 _binder_node_inner_unlock(struct binder_node *node, int line)
773 __releases(&node->lock) __releases(&node->proc->inner_lock)
774 {
775 struct binder_proc *proc = node->proc;
776
777 binder_debug(BINDER_DEBUG_SPINLOCKS,
778 "%s: line=%d\n", __func__, line);
779 if (proc)
780 binder_inner_proc_unlock(proc);
781 else
782 /* annotation for sparse */
783 __release(&node->proc->inner_lock);
784 spin_unlock(&node->lock);
785 }
786
binder_worklist_empty_ilocked(struct list_head * list)787 static bool binder_worklist_empty_ilocked(struct list_head *list)
788 {
789 return list_empty(list);
790 }
791
792 /**
793 * binder_worklist_empty() - Check if no items on the work list
794 * @proc: binder_proc associated with list
795 * @list: list to check
796 *
797 * Return: true if there are no items on list, else false
798 */
binder_worklist_empty(struct binder_proc * proc,struct list_head * list)799 static bool binder_worklist_empty(struct binder_proc *proc,
800 struct list_head *list)
801 {
802 bool ret;
803
804 binder_inner_proc_lock(proc);
805 ret = binder_worklist_empty_ilocked(list);
806 binder_inner_proc_unlock(proc);
807 return ret;
808 }
809
810 /**
811 * binder_enqueue_work_ilocked() - Add an item to the work list
812 * @work: struct binder_work to add to list
813 * @target_list: list to add work to
814 *
815 * Adds the work to the specified list. Asserts that work
816 * is not already on a list.
817 *
818 * Requires the proc->inner_lock to be held.
819 */
820 static void
binder_enqueue_work_ilocked(struct binder_work * work,struct list_head * target_list)821 binder_enqueue_work_ilocked(struct binder_work *work,
822 struct list_head *target_list)
823 {
824 BUG_ON(target_list == NULL);
825 BUG_ON(work->entry.next && !list_empty(&work->entry));
826 list_add_tail(&work->entry, target_list);
827 }
828
829 /**
830 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
831 * @thread: thread to queue work to
832 * @work: struct binder_work to add to list
833 *
834 * Adds the work to the todo list of the thread. Doesn't set the process_todo
835 * flag, which means that (if it wasn't already set) the thread will go to
836 * sleep without handling this work when it calls read.
837 *
838 * Requires the proc->inner_lock to be held.
839 */
840 static void
binder_enqueue_deferred_thread_work_ilocked(struct binder_thread * thread,struct binder_work * work)841 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
842 struct binder_work *work)
843 {
844 WARN_ON(!list_empty(&thread->waiting_thread_node));
845 binder_enqueue_work_ilocked(work, &thread->todo);
846 }
847
848 /**
849 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
850 * @thread: thread to queue work to
851 * @work: struct binder_work to add to list
852 *
853 * Adds the work to the todo list of the thread, and enables processing
854 * of the todo queue.
855 *
856 * Requires the proc->inner_lock to be held.
857 */
858 static void
binder_enqueue_thread_work_ilocked(struct binder_thread * thread,struct binder_work * work)859 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
860 struct binder_work *work)
861 {
862 WARN_ON(!list_empty(&thread->waiting_thread_node));
863 binder_enqueue_work_ilocked(work, &thread->todo);
864 thread->process_todo = true;
865 }
866
867 /**
868 * binder_enqueue_thread_work() - Add an item to the thread work list
869 * @thread: thread to queue work to
870 * @work: struct binder_work to add to list
871 *
872 * Adds the work to the todo list of the thread, and enables processing
873 * of the todo queue.
874 */
875 static void
binder_enqueue_thread_work(struct binder_thread * thread,struct binder_work * work)876 binder_enqueue_thread_work(struct binder_thread *thread,
877 struct binder_work *work)
878 {
879 binder_inner_proc_lock(thread->proc);
880 binder_enqueue_thread_work_ilocked(thread, work);
881 binder_inner_proc_unlock(thread->proc);
882 }
883
884 static void
binder_dequeue_work_ilocked(struct binder_work * work)885 binder_dequeue_work_ilocked(struct binder_work *work)
886 {
887 list_del_init(&work->entry);
888 }
889
890 /**
891 * binder_dequeue_work() - Removes an item from the work list
892 * @proc: binder_proc associated with list
893 * @work: struct binder_work to remove from list
894 *
895 * Removes the specified work item from whatever list it is on.
896 * Can safely be called if work is not on any list.
897 */
898 static void
binder_dequeue_work(struct binder_proc * proc,struct binder_work * work)899 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
900 {
901 binder_inner_proc_lock(proc);
902 binder_dequeue_work_ilocked(work);
903 binder_inner_proc_unlock(proc);
904 }
905
binder_dequeue_work_head_ilocked(struct list_head * list)906 static struct binder_work *binder_dequeue_work_head_ilocked(
907 struct list_head *list)
908 {
909 struct binder_work *w;
910
911 w = list_first_entry_or_null(list, struct binder_work, entry);
912 if (w)
913 list_del_init(&w->entry);
914 return w;
915 }
916
917 /**
918 * binder_dequeue_work_head() - Dequeues the item at head of list
919 * @proc: binder_proc associated with list
920 * @list: list to dequeue head
921 *
922 * Removes the head of the list if there are items on the list
923 *
924 * Return: pointer dequeued binder_work, NULL if list was empty
925 */
binder_dequeue_work_head(struct binder_proc * proc,struct list_head * list)926 static struct binder_work *binder_dequeue_work_head(
927 struct binder_proc *proc,
928 struct list_head *list)
929 {
930 struct binder_work *w;
931
932 binder_inner_proc_lock(proc);
933 w = binder_dequeue_work_head_ilocked(list);
934 binder_inner_proc_unlock(proc);
935 return w;
936 }
937
938 static void
939 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
940 static void binder_free_thread(struct binder_thread *thread);
941 static void binder_free_proc(struct binder_proc *proc);
942 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
943
binder_has_work_ilocked(struct binder_thread * thread,bool do_proc_work)944 static bool binder_has_work_ilocked(struct binder_thread *thread,
945 bool do_proc_work)
946 {
947 return thread->process_todo ||
948 thread->looper_need_return ||
949 (do_proc_work &&
950 !binder_worklist_empty_ilocked(&thread->proc->todo));
951 }
952
binder_has_work(struct binder_thread * thread,bool do_proc_work)953 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
954 {
955 bool has_work;
956
957 binder_inner_proc_lock(thread->proc);
958 has_work = binder_has_work_ilocked(thread, do_proc_work);
959 binder_inner_proc_unlock(thread->proc);
960
961 return has_work;
962 }
963
binder_available_for_proc_work_ilocked(struct binder_thread * thread)964 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
965 {
966 return !thread->transaction_stack &&
967 binder_worklist_empty_ilocked(&thread->todo) &&
968 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
969 BINDER_LOOPER_STATE_REGISTERED));
970 }
971
binder_wakeup_poll_threads_ilocked(struct binder_proc * proc,bool sync)972 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
973 bool sync)
974 {
975 struct rb_node *n;
976 struct binder_thread *thread;
977
978 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
979 thread = rb_entry(n, struct binder_thread, rb_node);
980 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
981 binder_available_for_proc_work_ilocked(thread)) {
982 if (sync)
983 wake_up_interruptible_sync(&thread->wait);
984 else
985 wake_up_interruptible(&thread->wait);
986 }
987 }
988 }
989
990 /**
991 * binder_select_thread_ilocked() - selects a thread for doing proc work.
992 * @proc: process to select a thread from
993 *
994 * Note that calling this function moves the thread off the waiting_threads
995 * list, so it can only be woken up by the caller of this function, or a
996 * signal. Therefore, callers *should* always wake up the thread this function
997 * returns.
998 *
999 * Return: If there's a thread currently waiting for process work,
1000 * returns that thread. Otherwise returns NULL.
1001 */
1002 static struct binder_thread *
binder_select_thread_ilocked(struct binder_proc * proc)1003 binder_select_thread_ilocked(struct binder_proc *proc)
1004 {
1005 struct binder_thread *thread;
1006
1007 assert_spin_locked(&proc->inner_lock);
1008 thread = list_first_entry_or_null(&proc->waiting_threads,
1009 struct binder_thread,
1010 waiting_thread_node);
1011
1012 if (thread)
1013 list_del_init(&thread->waiting_thread_node);
1014
1015 return thread;
1016 }
1017
1018 /**
1019 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1020 * @proc: process to wake up a thread in
1021 * @thread: specific thread to wake-up (may be NULL)
1022 * @sync: whether to do a synchronous wake-up
1023 *
1024 * This function wakes up a thread in the @proc process.
1025 * The caller may provide a specific thread to wake-up in
1026 * the @thread parameter. If @thread is NULL, this function
1027 * will wake up threads that have called poll().
1028 *
1029 * Note that for this function to work as expected, callers
1030 * should first call binder_select_thread() to find a thread
1031 * to handle the work (if they don't have a thread already),
1032 * and pass the result into the @thread parameter.
1033 */
binder_wakeup_thread_ilocked(struct binder_proc * proc,struct binder_thread * thread,bool sync)1034 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1035 struct binder_thread *thread,
1036 bool sync)
1037 {
1038 assert_spin_locked(&proc->inner_lock);
1039
1040 if (thread) {
1041 if (sync)
1042 wake_up_interruptible_sync(&thread->wait);
1043 else
1044 wake_up_interruptible(&thread->wait);
1045 return;
1046 }
1047
1048 /* Didn't find a thread waiting for proc work; this can happen
1049 * in two scenarios:
1050 * 1. All threads are busy handling transactions
1051 * In that case, one of those threads should call back into
1052 * the kernel driver soon and pick up this work.
1053 * 2. Threads are using the (e)poll interface, in which case
1054 * they may be blocked on the waitqueue without having been
1055 * added to waiting_threads. For this case, we just iterate
1056 * over all threads not handling transaction work, and
1057 * wake them all up. We wake all because we don't know whether
1058 * a thread that called into (e)poll is handling non-binder
1059 * work currently.
1060 */
1061 binder_wakeup_poll_threads_ilocked(proc, sync);
1062 }
1063
binder_wakeup_proc_ilocked(struct binder_proc * proc)1064 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1065 {
1066 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1067
1068 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1069 }
1070
is_rt_policy(int policy)1071 static bool is_rt_policy(int policy)
1072 {
1073 return policy == SCHED_FIFO || policy == SCHED_RR;
1074 }
1075
is_fair_policy(int policy)1076 static bool is_fair_policy(int policy)
1077 {
1078 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
1079 }
1080
binder_supported_policy(int policy)1081 static bool binder_supported_policy(int policy)
1082 {
1083 return is_fair_policy(policy) || is_rt_policy(policy);
1084 }
1085
to_userspace_prio(int policy,int kernel_priority)1086 static int to_userspace_prio(int policy, int kernel_priority)
1087 {
1088 if (is_fair_policy(policy))
1089 return PRIO_TO_NICE(kernel_priority);
1090 else
1091 return MAX_USER_RT_PRIO - 1 - kernel_priority;
1092 }
1093
to_kernel_prio(int policy,int user_priority)1094 static int to_kernel_prio(int policy, int user_priority)
1095 {
1096 if (is_fair_policy(policy))
1097 return NICE_TO_PRIO(user_priority);
1098 else
1099 return MAX_USER_RT_PRIO - 1 - user_priority;
1100 }
1101
binder_do_set_priority(struct task_struct * task,struct binder_priority desired,bool verify)1102 static void binder_do_set_priority(struct task_struct *task,
1103 struct binder_priority desired,
1104 bool verify)
1105 {
1106 int priority; /* user-space prio value */
1107 bool has_cap_nice;
1108 unsigned int policy = desired.sched_policy;
1109
1110 if (task->policy == policy && task->normal_prio == desired.prio)
1111 return;
1112
1113 has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE);
1114
1115 priority = to_userspace_prio(policy, desired.prio);
1116
1117 if (verify && is_rt_policy(policy) && !has_cap_nice) {
1118 long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO);
1119
1120 if (max_rtprio == 0) {
1121 policy = SCHED_NORMAL;
1122 priority = MIN_NICE;
1123 } else if (priority > max_rtprio) {
1124 priority = max_rtprio;
1125 }
1126 }
1127
1128 if (verify && is_fair_policy(policy) && !has_cap_nice) {
1129 long min_nice = rlimit_to_nice(task_rlimit(task, RLIMIT_NICE));
1130
1131 if (min_nice > MAX_NICE) {
1132 binder_user_error("%d RLIMIT_NICE not set\n",
1133 task->pid);
1134 return;
1135 } else if (priority < min_nice) {
1136 priority = min_nice;
1137 }
1138 }
1139
1140 if (policy != desired.sched_policy ||
1141 to_kernel_prio(policy, priority) != desired.prio)
1142 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1143 "%d: priority %d not allowed, using %d instead\n",
1144 task->pid, desired.prio,
1145 to_kernel_prio(policy, priority));
1146
1147 trace_binder_set_priority(task->tgid, task->pid, task->normal_prio,
1148 to_kernel_prio(policy, priority),
1149 desired.prio);
1150
1151 /* Set the actual priority */
1152 if (task->policy != policy || is_rt_policy(policy)) {
1153 struct sched_param params;
1154
1155 params.sched_priority = is_rt_policy(policy) ? priority : 0;
1156
1157 sched_setscheduler_nocheck(task,
1158 policy | SCHED_RESET_ON_FORK,
1159 ¶ms);
1160 }
1161 if (is_fair_policy(policy))
1162 set_user_nice(task, priority);
1163 }
1164
binder_set_priority(struct task_struct * task,struct binder_priority desired)1165 static void binder_set_priority(struct task_struct *task,
1166 struct binder_priority desired)
1167 {
1168 binder_do_set_priority(task, desired, /* verify = */ true);
1169 }
1170
binder_restore_priority(struct task_struct * task,struct binder_priority desired)1171 static void binder_restore_priority(struct task_struct *task,
1172 struct binder_priority desired)
1173 {
1174 binder_do_set_priority(task, desired, /* verify = */ false);
1175 }
1176
binder_transaction_priority(struct task_struct * task,struct binder_transaction * t,struct binder_priority node_prio,bool inherit_rt)1177 static void binder_transaction_priority(struct task_struct *task,
1178 struct binder_transaction *t,
1179 struct binder_priority node_prio,
1180 bool inherit_rt)
1181 {
1182 struct binder_priority desired_prio = t->priority;
1183
1184 if (t->set_priority_called)
1185 return;
1186
1187 t->set_priority_called = true;
1188 t->saved_priority.sched_policy = task->policy;
1189 t->saved_priority.prio = task->normal_prio;
1190
1191 if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
1192 desired_prio.prio = NICE_TO_PRIO(0);
1193 desired_prio.sched_policy = SCHED_NORMAL;
1194 }
1195
1196 if (node_prio.prio < t->priority.prio ||
1197 (node_prio.prio == t->priority.prio &&
1198 node_prio.sched_policy == SCHED_FIFO)) {
1199 /*
1200 * In case the minimum priority on the node is
1201 * higher (lower value), use that priority. If
1202 * the priority is the same, but the node uses
1203 * SCHED_FIFO, prefer SCHED_FIFO, since it can
1204 * run unbounded, unlike SCHED_RR.
1205 */
1206 desired_prio = node_prio;
1207 }
1208
1209 binder_set_priority(task, desired_prio);
1210 }
1211
binder_get_node_ilocked(struct binder_proc * proc,binder_uintptr_t ptr)1212 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1213 binder_uintptr_t ptr)
1214 {
1215 struct rb_node *n = proc->nodes.rb_node;
1216 struct binder_node *node;
1217
1218 assert_spin_locked(&proc->inner_lock);
1219
1220 while (n) {
1221 node = rb_entry(n, struct binder_node, rb_node);
1222
1223 if (ptr < node->ptr)
1224 n = n->rb_left;
1225 else if (ptr > node->ptr)
1226 n = n->rb_right;
1227 else {
1228 /*
1229 * take an implicit weak reference
1230 * to ensure node stays alive until
1231 * call to binder_put_node()
1232 */
1233 binder_inc_node_tmpref_ilocked(node);
1234 return node;
1235 }
1236 }
1237 return NULL;
1238 }
1239
binder_get_node(struct binder_proc * proc,binder_uintptr_t ptr)1240 static struct binder_node *binder_get_node(struct binder_proc *proc,
1241 binder_uintptr_t ptr)
1242 {
1243 struct binder_node *node;
1244
1245 binder_inner_proc_lock(proc);
1246 node = binder_get_node_ilocked(proc, ptr);
1247 binder_inner_proc_unlock(proc);
1248 return node;
1249 }
1250
binder_init_node_ilocked(struct binder_proc * proc,struct binder_node * new_node,struct flat_binder_object * fp)1251 static struct binder_node *binder_init_node_ilocked(
1252 struct binder_proc *proc,
1253 struct binder_node *new_node,
1254 struct flat_binder_object *fp)
1255 {
1256 struct rb_node **p = &proc->nodes.rb_node;
1257 struct rb_node *parent = NULL;
1258 struct binder_node *node;
1259 binder_uintptr_t ptr = fp ? fp->binder : 0;
1260 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1261 __u32 flags = fp ? fp->flags : 0;
1262 s8 priority;
1263
1264 assert_spin_locked(&proc->inner_lock);
1265
1266 while (*p) {
1267
1268 parent = *p;
1269 node = rb_entry(parent, struct binder_node, rb_node);
1270
1271 if (ptr < node->ptr)
1272 p = &(*p)->rb_left;
1273 else if (ptr > node->ptr)
1274 p = &(*p)->rb_right;
1275 else {
1276 /*
1277 * A matching node is already in
1278 * the rb tree. Abandon the init
1279 * and return it.
1280 */
1281 binder_inc_node_tmpref_ilocked(node);
1282 return node;
1283 }
1284 }
1285 node = new_node;
1286 binder_stats_created(BINDER_STAT_NODE);
1287 node->tmp_refs++;
1288 rb_link_node(&node->rb_node, parent, p);
1289 rb_insert_color(&node->rb_node, &proc->nodes);
1290 node->debug_id = atomic_inc_return(&binder_last_id);
1291 node->proc = proc;
1292 node->ptr = ptr;
1293 node->cookie = cookie;
1294 node->work.type = BINDER_WORK_NODE;
1295 priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1296 node->sched_policy = (flags & FLAT_BINDER_FLAG_SCHED_POLICY_MASK) >>
1297 FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
1298 node->min_priority = to_kernel_prio(node->sched_policy, priority);
1299 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1300 node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);
1301 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
1302 spin_lock_init(&node->lock);
1303 INIT_LIST_HEAD(&node->work.entry);
1304 INIT_LIST_HEAD(&node->async_todo);
1305 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1306 "%d:%d node %d u%016llx c%016llx created\n",
1307 proc->pid, current->pid, node->debug_id,
1308 (u64)node->ptr, (u64)node->cookie);
1309
1310 return node;
1311 }
1312
binder_new_node(struct binder_proc * proc,struct flat_binder_object * fp)1313 static struct binder_node *binder_new_node(struct binder_proc *proc,
1314 struct flat_binder_object *fp)
1315 {
1316 struct binder_node *node;
1317 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1318
1319 if (!new_node)
1320 return NULL;
1321 binder_inner_proc_lock(proc);
1322 node = binder_init_node_ilocked(proc, new_node, fp);
1323 binder_inner_proc_unlock(proc);
1324 if (node != new_node)
1325 /*
1326 * The node was already added by another thread
1327 */
1328 kfree(new_node);
1329
1330 return node;
1331 }
1332
binder_free_node(struct binder_node * node)1333 static void binder_free_node(struct binder_node *node)
1334 {
1335 kfree(node);
1336 binder_stats_deleted(BINDER_STAT_NODE);
1337 }
1338
binder_inc_node_nilocked(struct binder_node * node,int strong,int internal,struct list_head * target_list)1339 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1340 int internal,
1341 struct list_head *target_list)
1342 {
1343 struct binder_proc *proc = node->proc;
1344
1345 assert_spin_locked(&node->lock);
1346 if (proc)
1347 assert_spin_locked(&proc->inner_lock);
1348 if (strong) {
1349 if (internal) {
1350 if (target_list == NULL &&
1351 node->internal_strong_refs == 0 &&
1352 !(node->proc &&
1353 node == node->proc->context->binder_context_mgr_node &&
1354 node->has_strong_ref)) {
1355 pr_err("invalid inc strong node for %d\n",
1356 node->debug_id);
1357 return -EINVAL;
1358 }
1359 node->internal_strong_refs++;
1360 } else
1361 node->local_strong_refs++;
1362 if (!node->has_strong_ref && target_list) {
1363 struct binder_thread *thread = container_of(target_list,
1364 struct binder_thread, todo);
1365 binder_dequeue_work_ilocked(&node->work);
1366 BUG_ON(&thread->todo != target_list);
1367 binder_enqueue_deferred_thread_work_ilocked(thread,
1368 &node->work);
1369 }
1370 } else {
1371 if (!internal)
1372 node->local_weak_refs++;
1373 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1374 if (target_list == NULL) {
1375 pr_err("invalid inc weak node for %d\n",
1376 node->debug_id);
1377 return -EINVAL;
1378 }
1379 /*
1380 * See comment above
1381 */
1382 binder_enqueue_work_ilocked(&node->work, target_list);
1383 }
1384 }
1385 return 0;
1386 }
1387
binder_inc_node(struct binder_node * node,int strong,int internal,struct list_head * target_list)1388 static int binder_inc_node(struct binder_node *node, int strong, int internal,
1389 struct list_head *target_list)
1390 {
1391 int ret;
1392
1393 binder_node_inner_lock(node);
1394 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1395 binder_node_inner_unlock(node);
1396
1397 return ret;
1398 }
1399
binder_dec_node_nilocked(struct binder_node * node,int strong,int internal)1400 static bool binder_dec_node_nilocked(struct binder_node *node,
1401 int strong, int internal)
1402 {
1403 struct binder_proc *proc = node->proc;
1404
1405 assert_spin_locked(&node->lock);
1406 if (proc)
1407 assert_spin_locked(&proc->inner_lock);
1408 if (strong) {
1409 if (internal)
1410 node->internal_strong_refs--;
1411 else
1412 node->local_strong_refs--;
1413 if (node->local_strong_refs || node->internal_strong_refs)
1414 return false;
1415 } else {
1416 if (!internal)
1417 node->local_weak_refs--;
1418 if (node->local_weak_refs || node->tmp_refs ||
1419 !hlist_empty(&node->refs))
1420 return false;
1421 }
1422
1423 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1424 if (list_empty(&node->work.entry)) {
1425 binder_enqueue_work_ilocked(&node->work, &proc->todo);
1426 binder_wakeup_proc_ilocked(proc);
1427 }
1428 } else {
1429 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1430 !node->local_weak_refs && !node->tmp_refs) {
1431 if (proc) {
1432 binder_dequeue_work_ilocked(&node->work);
1433 rb_erase(&node->rb_node, &proc->nodes);
1434 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1435 "refless node %d deleted\n",
1436 node->debug_id);
1437 } else {
1438 BUG_ON(!list_empty(&node->work.entry));
1439 spin_lock(&binder_dead_nodes_lock);
1440 /*
1441 * tmp_refs could have changed so
1442 * check it again
1443 */
1444 if (node->tmp_refs) {
1445 spin_unlock(&binder_dead_nodes_lock);
1446 return false;
1447 }
1448 hlist_del(&node->dead_node);
1449 spin_unlock(&binder_dead_nodes_lock);
1450 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1451 "dead node %d deleted\n",
1452 node->debug_id);
1453 }
1454 return true;
1455 }
1456 }
1457 return false;
1458 }
1459
binder_dec_node(struct binder_node * node,int strong,int internal)1460 static void binder_dec_node(struct binder_node *node, int strong, int internal)
1461 {
1462 bool free_node;
1463
1464 binder_node_inner_lock(node);
1465 free_node = binder_dec_node_nilocked(node, strong, internal);
1466 binder_node_inner_unlock(node);
1467 if (free_node)
1468 binder_free_node(node);
1469 }
1470
binder_inc_node_tmpref_ilocked(struct binder_node * node)1471 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1472 {
1473 /*
1474 * No call to binder_inc_node() is needed since we
1475 * don't need to inform userspace of any changes to
1476 * tmp_refs
1477 */
1478 node->tmp_refs++;
1479 }
1480
1481 /**
1482 * binder_inc_node_tmpref() - take a temporary reference on node
1483 * @node: node to reference
1484 *
1485 * Take reference on node to prevent the node from being freed
1486 * while referenced only by a local variable. The inner lock is
1487 * needed to serialize with the node work on the queue (which
1488 * isn't needed after the node is dead). If the node is dead
1489 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1490 * node->tmp_refs against dead-node-only cases where the node
1491 * lock cannot be acquired (eg traversing the dead node list to
1492 * print nodes)
1493 */
binder_inc_node_tmpref(struct binder_node * node)1494 static void binder_inc_node_tmpref(struct binder_node *node)
1495 {
1496 binder_node_lock(node);
1497 if (node->proc)
1498 binder_inner_proc_lock(node->proc);
1499 else
1500 spin_lock(&binder_dead_nodes_lock);
1501 binder_inc_node_tmpref_ilocked(node);
1502 if (node->proc)
1503 binder_inner_proc_unlock(node->proc);
1504 else
1505 spin_unlock(&binder_dead_nodes_lock);
1506 binder_node_unlock(node);
1507 }
1508
1509 /**
1510 * binder_dec_node_tmpref() - remove a temporary reference on node
1511 * @node: node to reference
1512 *
1513 * Release temporary reference on node taken via binder_inc_node_tmpref()
1514 */
binder_dec_node_tmpref(struct binder_node * node)1515 static void binder_dec_node_tmpref(struct binder_node *node)
1516 {
1517 bool free_node;
1518
1519 binder_node_inner_lock(node);
1520 if (!node->proc)
1521 spin_lock(&binder_dead_nodes_lock);
1522 else
1523 __acquire(&binder_dead_nodes_lock);
1524 node->tmp_refs--;
1525 BUG_ON(node->tmp_refs < 0);
1526 if (!node->proc)
1527 spin_unlock(&binder_dead_nodes_lock);
1528 else
1529 __release(&binder_dead_nodes_lock);
1530 /*
1531 * Call binder_dec_node() to check if all refcounts are 0
1532 * and cleanup is needed. Calling with strong=0 and internal=1
1533 * causes no actual reference to be released in binder_dec_node().
1534 * If that changes, a change is needed here too.
1535 */
1536 free_node = binder_dec_node_nilocked(node, 0, 1);
1537 binder_node_inner_unlock(node);
1538 if (free_node)
1539 binder_free_node(node);
1540 }
1541
binder_put_node(struct binder_node * node)1542 static void binder_put_node(struct binder_node *node)
1543 {
1544 binder_dec_node_tmpref(node);
1545 }
1546
binder_get_ref_olocked(struct binder_proc * proc,u32 desc,bool need_strong_ref)1547 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1548 u32 desc, bool need_strong_ref)
1549 {
1550 struct rb_node *n = proc->refs_by_desc.rb_node;
1551 struct binder_ref *ref;
1552
1553 while (n) {
1554 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1555
1556 if (desc < ref->data.desc) {
1557 n = n->rb_left;
1558 } else if (desc > ref->data.desc) {
1559 n = n->rb_right;
1560 } else if (need_strong_ref && !ref->data.strong) {
1561 binder_user_error("tried to use weak ref as strong ref\n");
1562 return NULL;
1563 } else {
1564 return ref;
1565 }
1566 }
1567 return NULL;
1568 }
1569
1570 /**
1571 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1572 * @proc: binder_proc that owns the ref
1573 * @node: binder_node of target
1574 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1575 *
1576 * Look up the ref for the given node and return it if it exists
1577 *
1578 * If it doesn't exist and the caller provides a newly allocated
1579 * ref, initialize the fields of the newly allocated ref and insert
1580 * into the given proc rb_trees and node refs list.
1581 *
1582 * Return: the ref for node. It is possible that another thread
1583 * allocated/initialized the ref first in which case the
1584 * returned ref would be different than the passed-in
1585 * new_ref. new_ref must be kfree'd by the caller in
1586 * this case.
1587 */
binder_get_ref_for_node_olocked(struct binder_proc * proc,struct binder_node * node,struct binder_ref * new_ref)1588 static struct binder_ref *binder_get_ref_for_node_olocked(
1589 struct binder_proc *proc,
1590 struct binder_node *node,
1591 struct binder_ref *new_ref)
1592 {
1593 struct binder_context *context = proc->context;
1594 struct rb_node **p = &proc->refs_by_node.rb_node;
1595 struct rb_node *parent = NULL;
1596 struct binder_ref *ref;
1597 struct rb_node *n;
1598
1599 while (*p) {
1600 parent = *p;
1601 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1602
1603 if (node < ref->node)
1604 p = &(*p)->rb_left;
1605 else if (node > ref->node)
1606 p = &(*p)->rb_right;
1607 else
1608 return ref;
1609 }
1610 if (!new_ref)
1611 return NULL;
1612
1613 binder_stats_created(BINDER_STAT_REF);
1614 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1615 new_ref->proc = proc;
1616 new_ref->node = node;
1617 rb_link_node(&new_ref->rb_node_node, parent, p);
1618 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1619
1620 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1621 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1622 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1623 if (ref->data.desc > new_ref->data.desc)
1624 break;
1625 new_ref->data.desc = ref->data.desc + 1;
1626 }
1627
1628 p = &proc->refs_by_desc.rb_node;
1629 while (*p) {
1630 parent = *p;
1631 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1632
1633 if (new_ref->data.desc < ref->data.desc)
1634 p = &(*p)->rb_left;
1635 else if (new_ref->data.desc > ref->data.desc)
1636 p = &(*p)->rb_right;
1637 else
1638 BUG();
1639 }
1640 rb_link_node(&new_ref->rb_node_desc, parent, p);
1641 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1642
1643 binder_node_lock(node);
1644 hlist_add_head(&new_ref->node_entry, &node->refs);
1645
1646 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1647 "%d new ref %d desc %d for node %d\n",
1648 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1649 node->debug_id);
1650 binder_node_unlock(node);
1651 return new_ref;
1652 }
1653
binder_cleanup_ref_olocked(struct binder_ref * ref)1654 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1655 {
1656 bool delete_node = false;
1657
1658 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1659 "%d delete ref %d desc %d for node %d\n",
1660 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1661 ref->node->debug_id);
1662
1663 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1664 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1665
1666 binder_node_inner_lock(ref->node);
1667 if (ref->data.strong)
1668 binder_dec_node_nilocked(ref->node, 1, 1);
1669
1670 hlist_del(&ref->node_entry);
1671 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1672 binder_node_inner_unlock(ref->node);
1673 /*
1674 * Clear ref->node unless we want the caller to free the node
1675 */
1676 if (!delete_node) {
1677 /*
1678 * The caller uses ref->node to determine
1679 * whether the node needs to be freed. Clear
1680 * it since the node is still alive.
1681 */
1682 ref->node = NULL;
1683 }
1684
1685 if (ref->death) {
1686 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1687 "%d delete ref %d desc %d has death notification\n",
1688 ref->proc->pid, ref->data.debug_id,
1689 ref->data.desc);
1690 binder_dequeue_work(ref->proc, &ref->death->work);
1691 binder_stats_deleted(BINDER_STAT_DEATH);
1692 }
1693 binder_stats_deleted(BINDER_STAT_REF);
1694 }
1695
1696 /**
1697 * binder_inc_ref_olocked() - increment the ref for given handle
1698 * @ref: ref to be incremented
1699 * @strong: if true, strong increment, else weak
1700 * @target_list: list to queue node work on
1701 *
1702 * Increment the ref. @ref->proc->outer_lock must be held on entry
1703 *
1704 * Return: 0, if successful, else errno
1705 */
binder_inc_ref_olocked(struct binder_ref * ref,int strong,struct list_head * target_list)1706 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1707 struct list_head *target_list)
1708 {
1709 int ret;
1710
1711 if (strong) {
1712 if (ref->data.strong == 0) {
1713 ret = binder_inc_node(ref->node, 1, 1, target_list);
1714 if (ret)
1715 return ret;
1716 }
1717 ref->data.strong++;
1718 } else {
1719 if (ref->data.weak == 0) {
1720 ret = binder_inc_node(ref->node, 0, 1, target_list);
1721 if (ret)
1722 return ret;
1723 }
1724 ref->data.weak++;
1725 }
1726 return 0;
1727 }
1728
1729 /**
1730 * binder_dec_ref() - dec the ref for given handle
1731 * @ref: ref to be decremented
1732 * @strong: if true, strong decrement, else weak
1733 *
1734 * Decrement the ref.
1735 *
1736 * Return: true if ref is cleaned up and ready to be freed
1737 */
binder_dec_ref_olocked(struct binder_ref * ref,int strong)1738 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1739 {
1740 if (strong) {
1741 if (ref->data.strong == 0) {
1742 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1743 ref->proc->pid, ref->data.debug_id,
1744 ref->data.desc, ref->data.strong,
1745 ref->data.weak);
1746 return false;
1747 }
1748 ref->data.strong--;
1749 if (ref->data.strong == 0)
1750 binder_dec_node(ref->node, strong, 1);
1751 } else {
1752 if (ref->data.weak == 0) {
1753 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1754 ref->proc->pid, ref->data.debug_id,
1755 ref->data.desc, ref->data.strong,
1756 ref->data.weak);
1757 return false;
1758 }
1759 ref->data.weak--;
1760 }
1761 if (ref->data.strong == 0 && ref->data.weak == 0) {
1762 binder_cleanup_ref_olocked(ref);
1763 return true;
1764 }
1765 return false;
1766 }
1767
1768 /**
1769 * binder_get_node_from_ref() - get the node from the given proc/desc
1770 * @proc: proc containing the ref
1771 * @desc: the handle associated with the ref
1772 * @need_strong_ref: if true, only return node if ref is strong
1773 * @rdata: the id/refcount data for the ref
1774 *
1775 * Given a proc and ref handle, return the associated binder_node
1776 *
1777 * Return: a binder_node or NULL if not found or not strong when strong required
1778 */
binder_get_node_from_ref(struct binder_proc * proc,u32 desc,bool need_strong_ref,struct binder_ref_data * rdata)1779 static struct binder_node *binder_get_node_from_ref(
1780 struct binder_proc *proc,
1781 u32 desc, bool need_strong_ref,
1782 struct binder_ref_data *rdata)
1783 {
1784 struct binder_node *node;
1785 struct binder_ref *ref;
1786
1787 binder_proc_lock(proc);
1788 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1789 if (!ref)
1790 goto err_no_ref;
1791 node = ref->node;
1792 /*
1793 * Take an implicit reference on the node to ensure
1794 * it stays alive until the call to binder_put_node()
1795 */
1796 binder_inc_node_tmpref(node);
1797 if (rdata)
1798 *rdata = ref->data;
1799 binder_proc_unlock(proc);
1800
1801 return node;
1802
1803 err_no_ref:
1804 binder_proc_unlock(proc);
1805 return NULL;
1806 }
1807
1808 /**
1809 * binder_free_ref() - free the binder_ref
1810 * @ref: ref to free
1811 *
1812 * Free the binder_ref. Free the binder_node indicated by ref->node
1813 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1814 */
binder_free_ref(struct binder_ref * ref)1815 static void binder_free_ref(struct binder_ref *ref)
1816 {
1817 if (ref->node)
1818 binder_free_node(ref->node);
1819 kfree(ref->death);
1820 kfree(ref);
1821 }
1822
1823 /**
1824 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1825 * @proc: proc containing the ref
1826 * @desc: the handle associated with the ref
1827 * @increment: true=inc reference, false=dec reference
1828 * @strong: true=strong reference, false=weak reference
1829 * @rdata: the id/refcount data for the ref
1830 *
1831 * Given a proc and ref handle, increment or decrement the ref
1832 * according to "increment" arg.
1833 *
1834 * Return: 0 if successful, else errno
1835 */
binder_update_ref_for_handle(struct binder_proc * proc,uint32_t desc,bool increment,bool strong,struct binder_ref_data * rdata)1836 static int binder_update_ref_for_handle(struct binder_proc *proc,
1837 uint32_t desc, bool increment, bool strong,
1838 struct binder_ref_data *rdata)
1839 {
1840 int ret = 0;
1841 struct binder_ref *ref;
1842 bool delete_ref = false;
1843
1844 binder_proc_lock(proc);
1845 ref = binder_get_ref_olocked(proc, desc, strong);
1846 if (!ref) {
1847 ret = -EINVAL;
1848 goto err_no_ref;
1849 }
1850 if (increment)
1851 ret = binder_inc_ref_olocked(ref, strong, NULL);
1852 else
1853 delete_ref = binder_dec_ref_olocked(ref, strong);
1854
1855 if (rdata)
1856 *rdata = ref->data;
1857 binder_proc_unlock(proc);
1858
1859 if (delete_ref)
1860 binder_free_ref(ref);
1861 return ret;
1862
1863 err_no_ref:
1864 binder_proc_unlock(proc);
1865 return ret;
1866 }
1867
1868 /**
1869 * binder_dec_ref_for_handle() - dec the ref for given handle
1870 * @proc: proc containing the ref
1871 * @desc: the handle associated with the ref
1872 * @strong: true=strong reference, false=weak reference
1873 * @rdata: the id/refcount data for the ref
1874 *
1875 * Just calls binder_update_ref_for_handle() to decrement the ref.
1876 *
1877 * Return: 0 if successful, else errno
1878 */
binder_dec_ref_for_handle(struct binder_proc * proc,uint32_t desc,bool strong,struct binder_ref_data * rdata)1879 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1880 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1881 {
1882 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1883 }
1884
1885
1886 /**
1887 * binder_inc_ref_for_node() - increment the ref for given proc/node
1888 * @proc: proc containing the ref
1889 * @node: target node
1890 * @strong: true=strong reference, false=weak reference
1891 * @target_list: worklist to use if node is incremented
1892 * @rdata: the id/refcount data for the ref
1893 *
1894 * Given a proc and node, increment the ref. Create the ref if it
1895 * doesn't already exist
1896 *
1897 * Return: 0 if successful, else errno
1898 */
binder_inc_ref_for_node(struct binder_proc * proc,struct binder_node * node,bool strong,struct list_head * target_list,struct binder_ref_data * rdata)1899 static int binder_inc_ref_for_node(struct binder_proc *proc,
1900 struct binder_node *node,
1901 bool strong,
1902 struct list_head *target_list,
1903 struct binder_ref_data *rdata)
1904 {
1905 struct binder_ref *ref;
1906 struct binder_ref *new_ref = NULL;
1907 int ret = 0;
1908
1909 binder_proc_lock(proc);
1910 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1911 if (!ref) {
1912 binder_proc_unlock(proc);
1913 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1914 if (!new_ref)
1915 return -ENOMEM;
1916 binder_proc_lock(proc);
1917 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1918 }
1919 ret = binder_inc_ref_olocked(ref, strong, target_list);
1920 *rdata = ref->data;
1921 binder_proc_unlock(proc);
1922 if (new_ref && ref != new_ref)
1923 /*
1924 * Another thread created the ref first so
1925 * free the one we allocated
1926 */
1927 kfree(new_ref);
1928 return ret;
1929 }
1930
binder_pop_transaction_ilocked(struct binder_thread * target_thread,struct binder_transaction * t)1931 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1932 struct binder_transaction *t)
1933 {
1934 BUG_ON(!target_thread);
1935 assert_spin_locked(&target_thread->proc->inner_lock);
1936 BUG_ON(target_thread->transaction_stack != t);
1937 BUG_ON(target_thread->transaction_stack->from != target_thread);
1938 target_thread->transaction_stack =
1939 target_thread->transaction_stack->from_parent;
1940 t->from = NULL;
1941 }
1942
1943 /**
1944 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1945 * @thread: thread to decrement
1946 *
1947 * A thread needs to be kept alive while being used to create or
1948 * handle a transaction. binder_get_txn_from() is used to safely
1949 * extract t->from from a binder_transaction and keep the thread
1950 * indicated by t->from from being freed. When done with that
1951 * binder_thread, this function is called to decrement the
1952 * tmp_ref and free if appropriate (thread has been released
1953 * and no transaction being processed by the driver)
1954 */
binder_thread_dec_tmpref(struct binder_thread * thread)1955 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1956 {
1957 /*
1958 * atomic is used to protect the counter value while
1959 * it cannot reach zero or thread->is_dead is false
1960 */
1961 binder_inner_proc_lock(thread->proc);
1962 atomic_dec(&thread->tmp_ref);
1963 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1964 binder_inner_proc_unlock(thread->proc);
1965 binder_free_thread(thread);
1966 return;
1967 }
1968 binder_inner_proc_unlock(thread->proc);
1969 }
1970
1971 /**
1972 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1973 * @proc: proc to decrement
1974 *
1975 * A binder_proc needs to be kept alive while being used to create or
1976 * handle a transaction. proc->tmp_ref is incremented when
1977 * creating a new transaction or the binder_proc is currently in-use
1978 * by threads that are being released. When done with the binder_proc,
1979 * this function is called to decrement the counter and free the
1980 * proc if appropriate (proc has been released, all threads have
1981 * been released and not currenly in-use to process a transaction).
1982 */
binder_proc_dec_tmpref(struct binder_proc * proc)1983 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1984 {
1985 binder_inner_proc_lock(proc);
1986 proc->tmp_ref--;
1987 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1988 !proc->tmp_ref) {
1989 binder_inner_proc_unlock(proc);
1990 binder_free_proc(proc);
1991 return;
1992 }
1993 binder_inner_proc_unlock(proc);
1994 }
1995
1996 /**
1997 * binder_get_txn_from() - safely extract the "from" thread in transaction
1998 * @t: binder transaction for t->from
1999 *
2000 * Atomically return the "from" thread and increment the tmp_ref
2001 * count for the thread to ensure it stays alive until
2002 * binder_thread_dec_tmpref() is called.
2003 *
2004 * Return: the value of t->from
2005 */
binder_get_txn_from(struct binder_transaction * t)2006 static struct binder_thread *binder_get_txn_from(
2007 struct binder_transaction *t)
2008 {
2009 struct binder_thread *from;
2010
2011 spin_lock(&t->lock);
2012 from = t->from;
2013 if (from)
2014 atomic_inc(&from->tmp_ref);
2015 spin_unlock(&t->lock);
2016 return from;
2017 }
2018
2019 /**
2020 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
2021 * @t: binder transaction for t->from
2022 *
2023 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
2024 * to guarantee that the thread cannot be released while operating on it.
2025 * The caller must call binder_inner_proc_unlock() to release the inner lock
2026 * as well as call binder_dec_thread_txn() to release the reference.
2027 *
2028 * Return: the value of t->from
2029 */
binder_get_txn_from_and_acq_inner(struct binder_transaction * t)2030 static struct binder_thread *binder_get_txn_from_and_acq_inner(
2031 struct binder_transaction *t)
2032 __acquires(&t->from->proc->inner_lock)
2033 {
2034 struct binder_thread *from;
2035
2036 from = binder_get_txn_from(t);
2037 if (!from) {
2038 __acquire(&from->proc->inner_lock);
2039 return NULL;
2040 }
2041 binder_inner_proc_lock(from->proc);
2042 if (t->from) {
2043 BUG_ON(from != t->from);
2044 return from;
2045 }
2046 binder_inner_proc_unlock(from->proc);
2047 __acquire(&from->proc->inner_lock);
2048 binder_thread_dec_tmpref(from);
2049 return NULL;
2050 }
2051
2052 /**
2053 * binder_free_txn_fixups() - free unprocessed fd fixups
2054 * @t: binder transaction for t->from
2055 *
2056 * If the transaction is being torn down prior to being
2057 * processed by the target process, free all of the
2058 * fd fixups and fput the file structs. It is safe to
2059 * call this function after the fixups have been
2060 * processed -- in that case, the list will be empty.
2061 */
binder_free_txn_fixups(struct binder_transaction * t)2062 static void binder_free_txn_fixups(struct binder_transaction *t)
2063 {
2064 struct binder_txn_fd_fixup *fixup, *tmp;
2065
2066 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
2067 fput(fixup->file);
2068 list_del(&fixup->fixup_entry);
2069 kfree(fixup);
2070 }
2071 }
2072
binder_free_transaction(struct binder_transaction * t)2073 static void binder_free_transaction(struct binder_transaction *t)
2074 {
2075 struct binder_proc *target_proc = t->to_proc;
2076
2077 if (target_proc) {
2078 binder_inner_proc_lock(target_proc);
2079 if (t->buffer)
2080 t->buffer->transaction = NULL;
2081 binder_inner_proc_unlock(target_proc);
2082 }
2083 /*
2084 * If the transaction has no target_proc, then
2085 * t->buffer->transaction has already been cleared.
2086 */
2087 binder_free_txn_fixups(t);
2088 kfree(t);
2089 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2090 }
2091
binder_send_failed_reply(struct binder_transaction * t,uint32_t error_code)2092 static void binder_send_failed_reply(struct binder_transaction *t,
2093 uint32_t error_code)
2094 {
2095 struct binder_thread *target_thread;
2096 struct binder_transaction *next;
2097
2098 BUG_ON(t->flags & TF_ONE_WAY);
2099 while (1) {
2100 target_thread = binder_get_txn_from_and_acq_inner(t);
2101 if (target_thread) {
2102 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2103 "send failed reply for transaction %d to %d:%d\n",
2104 t->debug_id,
2105 target_thread->proc->pid,
2106 target_thread->pid);
2107
2108 binder_pop_transaction_ilocked(target_thread, t);
2109 if (target_thread->reply_error.cmd == BR_OK) {
2110 target_thread->reply_error.cmd = error_code;
2111 binder_enqueue_thread_work_ilocked(
2112 target_thread,
2113 &target_thread->reply_error.work);
2114 wake_up_interruptible(&target_thread->wait);
2115 } else {
2116 /*
2117 * Cannot get here for normal operation, but
2118 * we can if multiple synchronous transactions
2119 * are sent without blocking for responses.
2120 * Just ignore the 2nd error in this case.
2121 */
2122 pr_warn("Unexpected reply error: %u\n",
2123 target_thread->reply_error.cmd);
2124 }
2125 binder_inner_proc_unlock(target_thread->proc);
2126 binder_thread_dec_tmpref(target_thread);
2127 binder_free_transaction(t);
2128 return;
2129 } else {
2130 __release(&target_thread->proc->inner_lock);
2131 }
2132 next = t->from_parent;
2133
2134 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2135 "send failed reply for transaction %d, target dead\n",
2136 t->debug_id);
2137
2138 binder_free_transaction(t);
2139 if (next == NULL) {
2140 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2141 "reply failed, no target thread at root\n");
2142 return;
2143 }
2144 t = next;
2145 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2146 "reply failed, no target thread -- retry %d\n",
2147 t->debug_id);
2148 }
2149 }
2150
2151 /**
2152 * binder_cleanup_transaction() - cleans up undelivered transaction
2153 * @t: transaction that needs to be cleaned up
2154 * @reason: reason the transaction wasn't delivered
2155 * @error_code: error to return to caller (if synchronous call)
2156 */
binder_cleanup_transaction(struct binder_transaction * t,const char * reason,uint32_t error_code)2157 static void binder_cleanup_transaction(struct binder_transaction *t,
2158 const char *reason,
2159 uint32_t error_code)
2160 {
2161 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2162 binder_send_failed_reply(t, error_code);
2163 } else {
2164 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2165 "undelivered transaction %d, %s\n",
2166 t->debug_id, reason);
2167 binder_free_transaction(t);
2168 }
2169 }
2170
2171 /**
2172 * binder_get_object() - gets object and checks for valid metadata
2173 * @proc: binder_proc owning the buffer
2174 * @buffer: binder_buffer that we're parsing.
2175 * @offset: offset in the @buffer at which to validate an object.
2176 * @object: struct binder_object to read into
2177 *
2178 * Return: If there's a valid metadata object at @offset in @buffer, the
2179 * size of that object. Otherwise, it returns zero. The object
2180 * is read into the struct binder_object pointed to by @object.
2181 */
binder_get_object(struct binder_proc * proc,struct binder_buffer * buffer,unsigned long offset,struct binder_object * object)2182 static size_t binder_get_object(struct binder_proc *proc,
2183 struct binder_buffer *buffer,
2184 unsigned long offset,
2185 struct binder_object *object)
2186 {
2187 size_t read_size;
2188 struct binder_object_header *hdr;
2189 size_t object_size = 0;
2190
2191 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
2192 if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
2193 binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
2194 offset, read_size))
2195 return 0;
2196
2197 /* Ok, now see if we read a complete object. */
2198 hdr = &object->hdr;
2199 switch (hdr->type) {
2200 case BINDER_TYPE_BINDER:
2201 case BINDER_TYPE_WEAK_BINDER:
2202 case BINDER_TYPE_HANDLE:
2203 case BINDER_TYPE_WEAK_HANDLE:
2204 object_size = sizeof(struct flat_binder_object);
2205 break;
2206 case BINDER_TYPE_FD:
2207 object_size = sizeof(struct binder_fd_object);
2208 break;
2209 case BINDER_TYPE_PTR:
2210 object_size = sizeof(struct binder_buffer_object);
2211 break;
2212 case BINDER_TYPE_FDA:
2213 object_size = sizeof(struct binder_fd_array_object);
2214 break;
2215 default:
2216 return 0;
2217 }
2218 if (offset <= buffer->data_size - object_size &&
2219 buffer->data_size >= object_size)
2220 return object_size;
2221 else
2222 return 0;
2223 }
2224
2225 /**
2226 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2227 * @proc: binder_proc owning the buffer
2228 * @b: binder_buffer containing the object
2229 * @object: struct binder_object to read into
2230 * @index: index in offset array at which the binder_buffer_object is
2231 * located
2232 * @start_offset: points to the start of the offset array
2233 * @object_offsetp: offset of @object read from @b
2234 * @num_valid: the number of valid offsets in the offset array
2235 *
2236 * Return: If @index is within the valid range of the offset array
2237 * described by @start and @num_valid, and if there's a valid
2238 * binder_buffer_object at the offset found in index @index
2239 * of the offset array, that object is returned. Otherwise,
2240 * %NULL is returned.
2241 * Note that the offset found in index @index itself is not
2242 * verified; this function assumes that @num_valid elements
2243 * from @start were previously verified to have valid offsets.
2244 * If @object_offsetp is non-NULL, then the offset within
2245 * @b is written to it.
2246 */
binder_validate_ptr(struct binder_proc * proc,struct binder_buffer * b,struct binder_object * object,binder_size_t index,binder_size_t start_offset,binder_size_t * object_offsetp,binder_size_t num_valid)2247 static struct binder_buffer_object *binder_validate_ptr(
2248 struct binder_proc *proc,
2249 struct binder_buffer *b,
2250 struct binder_object *object,
2251 binder_size_t index,
2252 binder_size_t start_offset,
2253 binder_size_t *object_offsetp,
2254 binder_size_t num_valid)
2255 {
2256 size_t object_size;
2257 binder_size_t object_offset;
2258 unsigned long buffer_offset;
2259
2260 if (index >= num_valid)
2261 return NULL;
2262
2263 buffer_offset = start_offset + sizeof(binder_size_t) * index;
2264 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2265 b, buffer_offset,
2266 sizeof(object_offset)))
2267 return NULL;
2268 object_size = binder_get_object(proc, b, object_offset, object);
2269 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
2270 return NULL;
2271 if (object_offsetp)
2272 *object_offsetp = object_offset;
2273
2274 return &object->bbo;
2275 }
2276
2277 /**
2278 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2279 * @proc: binder_proc owning the buffer
2280 * @b: transaction buffer
2281 * @objects_start_offset: offset to start of objects buffer
2282 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
2283 * @fixup_offset: start offset in @buffer to fix up
2284 * @last_obj_offset: offset to last binder_buffer_object that we fixed
2285 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
2286 *
2287 * Return: %true if a fixup in buffer @buffer at offset @offset is
2288 * allowed.
2289 *
2290 * For safety reasons, we only allow fixups inside a buffer to happen
2291 * at increasing offsets; additionally, we only allow fixup on the last
2292 * buffer object that was verified, or one of its parents.
2293 *
2294 * Example of what is allowed:
2295 *
2296 * A
2297 * B (parent = A, offset = 0)
2298 * C (parent = A, offset = 16)
2299 * D (parent = C, offset = 0)
2300 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2301 *
2302 * Examples of what is not allowed:
2303 *
2304 * Decreasing offsets within the same parent:
2305 * A
2306 * C (parent = A, offset = 16)
2307 * B (parent = A, offset = 0) // decreasing offset within A
2308 *
2309 * Referring to a parent that wasn't the last object or any of its parents:
2310 * A
2311 * B (parent = A, offset = 0)
2312 * C (parent = A, offset = 0)
2313 * C (parent = A, offset = 16)
2314 * D (parent = B, offset = 0) // B is not A or any of A's parents
2315 */
binder_validate_fixup(struct binder_proc * proc,struct binder_buffer * b,binder_size_t objects_start_offset,binder_size_t buffer_obj_offset,binder_size_t fixup_offset,binder_size_t last_obj_offset,binder_size_t last_min_offset)2316 static bool binder_validate_fixup(struct binder_proc *proc,
2317 struct binder_buffer *b,
2318 binder_size_t objects_start_offset,
2319 binder_size_t buffer_obj_offset,
2320 binder_size_t fixup_offset,
2321 binder_size_t last_obj_offset,
2322 binder_size_t last_min_offset)
2323 {
2324 if (!last_obj_offset) {
2325 /* Nothing to fix up in */
2326 return false;
2327 }
2328
2329 while (last_obj_offset != buffer_obj_offset) {
2330 unsigned long buffer_offset;
2331 struct binder_object last_object;
2332 struct binder_buffer_object *last_bbo;
2333 size_t object_size = binder_get_object(proc, b, last_obj_offset,
2334 &last_object);
2335 if (object_size != sizeof(*last_bbo))
2336 return false;
2337
2338 last_bbo = &last_object.bbo;
2339 /*
2340 * Safe to retrieve the parent of last_obj, since it
2341 * was already previously verified by the driver.
2342 */
2343 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2344 return false;
2345 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
2346 buffer_offset = objects_start_offset +
2347 sizeof(binder_size_t) * last_bbo->parent;
2348 if (binder_alloc_copy_from_buffer(&proc->alloc,
2349 &last_obj_offset,
2350 b, buffer_offset,
2351 sizeof(last_obj_offset)))
2352 return false;
2353 }
2354 return (fixup_offset >= last_min_offset);
2355 }
2356
2357 /**
2358 * struct binder_task_work_cb - for deferred close
2359 *
2360 * @twork: callback_head for task work
2361 * @fd: fd to close
2362 *
2363 * Structure to pass task work to be handled after
2364 * returning from binder_ioctl() via task_work_add().
2365 */
2366 struct binder_task_work_cb {
2367 struct callback_head twork;
2368 struct file *file;
2369 };
2370
2371 /**
2372 * binder_do_fd_close() - close list of file descriptors
2373 * @twork: callback head for task work
2374 *
2375 * It is not safe to call ksys_close() during the binder_ioctl()
2376 * function if there is a chance that binder's own file descriptor
2377 * might be closed. This is to meet the requirements for using
2378 * fdget() (see comments for __fget_light()). Therefore use
2379 * task_work_add() to schedule the close operation once we have
2380 * returned from binder_ioctl(). This function is a callback
2381 * for that mechanism and does the actual ksys_close() on the
2382 * given file descriptor.
2383 */
binder_do_fd_close(struct callback_head * twork)2384 static void binder_do_fd_close(struct callback_head *twork)
2385 {
2386 struct binder_task_work_cb *twcb = container_of(twork,
2387 struct binder_task_work_cb, twork);
2388
2389 fput(twcb->file);
2390 kfree(twcb);
2391 }
2392
2393 /**
2394 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2395 * @fd: file-descriptor to close
2396 *
2397 * See comments in binder_do_fd_close(). This function is used to schedule
2398 * a file-descriptor to be closed after returning from binder_ioctl().
2399 */
binder_deferred_fd_close(int fd)2400 static void binder_deferred_fd_close(int fd)
2401 {
2402 struct binder_task_work_cb *twcb;
2403
2404 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2405 if (!twcb)
2406 return;
2407 init_task_work(&twcb->twork, binder_do_fd_close);
2408 __close_fd_get_file(fd, &twcb->file);
2409 if (twcb->file)
2410 task_work_add(current, &twcb->twork, true);
2411 else
2412 kfree(twcb);
2413 }
2414
binder_transaction_buffer_release(struct binder_proc * proc,struct binder_buffer * buffer,binder_size_t failed_at,bool is_failure)2415 static void binder_transaction_buffer_release(struct binder_proc *proc,
2416 struct binder_buffer *buffer,
2417 binder_size_t failed_at,
2418 bool is_failure)
2419 {
2420 int debug_id = buffer->debug_id;
2421 binder_size_t off_start_offset, buffer_offset, off_end_offset;
2422
2423 binder_debug(BINDER_DEBUG_TRANSACTION,
2424 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2425 proc->pid, buffer->debug_id,
2426 buffer->data_size, buffer->offsets_size,
2427 (unsigned long long)failed_at);
2428
2429 if (buffer->target_node)
2430 binder_dec_node(buffer->target_node, 1, 0);
2431
2432 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2433 off_end_offset = is_failure ? failed_at :
2434 off_start_offset + buffer->offsets_size;
2435 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2436 buffer_offset += sizeof(binder_size_t)) {
2437 struct binder_object_header *hdr;
2438 size_t object_size = 0;
2439 struct binder_object object;
2440 binder_size_t object_offset;
2441
2442 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2443 buffer, buffer_offset,
2444 sizeof(object_offset)))
2445 object_size = binder_get_object(proc, buffer,
2446 object_offset, &object);
2447 if (object_size == 0) {
2448 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2449 debug_id, (u64)object_offset, buffer->data_size);
2450 continue;
2451 }
2452 hdr = &object.hdr;
2453 switch (hdr->type) {
2454 case BINDER_TYPE_BINDER:
2455 case BINDER_TYPE_WEAK_BINDER: {
2456 struct flat_binder_object *fp;
2457 struct binder_node *node;
2458
2459 fp = to_flat_binder_object(hdr);
2460 node = binder_get_node(proc, fp->binder);
2461 if (node == NULL) {
2462 pr_err("transaction release %d bad node %016llx\n",
2463 debug_id, (u64)fp->binder);
2464 break;
2465 }
2466 binder_debug(BINDER_DEBUG_TRANSACTION,
2467 " node %d u%016llx\n",
2468 node->debug_id, (u64)node->ptr);
2469 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2470 0);
2471 binder_put_node(node);
2472 } break;
2473 case BINDER_TYPE_HANDLE:
2474 case BINDER_TYPE_WEAK_HANDLE: {
2475 struct flat_binder_object *fp;
2476 struct binder_ref_data rdata;
2477 int ret;
2478
2479 fp = to_flat_binder_object(hdr);
2480 ret = binder_dec_ref_for_handle(proc, fp->handle,
2481 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2482
2483 if (ret) {
2484 pr_err("transaction release %d bad handle %d, ret = %d\n",
2485 debug_id, fp->handle, ret);
2486 break;
2487 }
2488 binder_debug(BINDER_DEBUG_TRANSACTION,
2489 " ref %d desc %d\n",
2490 rdata.debug_id, rdata.desc);
2491 } break;
2492
2493 case BINDER_TYPE_FD: {
2494 /*
2495 * No need to close the file here since user-space
2496 * closes it for for successfully delivered
2497 * transactions. For transactions that weren't
2498 * delivered, the new fd was never allocated so
2499 * there is no need to close and the fput on the
2500 * file is done when the transaction is torn
2501 * down.
2502 */
2503 WARN_ON(failed_at &&
2504 proc->tsk == current->group_leader);
2505 } break;
2506 case BINDER_TYPE_PTR:
2507 /*
2508 * Nothing to do here, this will get cleaned up when the
2509 * transaction buffer gets freed
2510 */
2511 break;
2512 case BINDER_TYPE_FDA: {
2513 struct binder_fd_array_object *fda;
2514 struct binder_buffer_object *parent;
2515 struct binder_object ptr_object;
2516 binder_size_t fda_offset;
2517 size_t fd_index;
2518 binder_size_t fd_buf_size;
2519 binder_size_t num_valid;
2520
2521 if (proc->tsk != current->group_leader) {
2522 /*
2523 * Nothing to do if running in sender context
2524 * The fd fixups have not been applied so no
2525 * fds need to be closed.
2526 */
2527 continue;
2528 }
2529
2530 num_valid = (buffer_offset - off_start_offset) /
2531 sizeof(binder_size_t);
2532 fda = to_binder_fd_array_object(hdr);
2533 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2534 fda->parent,
2535 off_start_offset,
2536 NULL,
2537 num_valid);
2538 if (!parent) {
2539 pr_err("transaction release %d bad parent offset\n",
2540 debug_id);
2541 continue;
2542 }
2543 fd_buf_size = sizeof(u32) * fda->num_fds;
2544 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2545 pr_err("transaction release %d invalid number of fds (%lld)\n",
2546 debug_id, (u64)fda->num_fds);
2547 continue;
2548 }
2549 if (fd_buf_size > parent->length ||
2550 fda->parent_offset > parent->length - fd_buf_size) {
2551 /* No space for all file descriptors here. */
2552 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2553 debug_id, (u64)fda->num_fds);
2554 continue;
2555 }
2556 /*
2557 * the source data for binder_buffer_object is visible
2558 * to user-space and the @buffer element is the user
2559 * pointer to the buffer_object containing the fd_array.
2560 * Convert the address to an offset relative to
2561 * the base of the transaction buffer.
2562 */
2563 fda_offset =
2564 (parent->buffer - (uintptr_t)buffer->user_data) +
2565 fda->parent_offset;
2566 for (fd_index = 0; fd_index < fda->num_fds;
2567 fd_index++) {
2568 u32 fd;
2569 int err;
2570 binder_size_t offset = fda_offset +
2571 fd_index * sizeof(fd);
2572
2573 err = binder_alloc_copy_from_buffer(
2574 &proc->alloc, &fd, buffer,
2575 offset, sizeof(fd));
2576 WARN_ON(err);
2577 if (!err)
2578 binder_deferred_fd_close(fd);
2579 }
2580 } break;
2581 default:
2582 pr_err("transaction release %d bad object type %x\n",
2583 debug_id, hdr->type);
2584 break;
2585 }
2586 }
2587 }
2588
binder_translate_binder(struct flat_binder_object * fp,struct binder_transaction * t,struct binder_thread * thread)2589 static int binder_translate_binder(struct flat_binder_object *fp,
2590 struct binder_transaction *t,
2591 struct binder_thread *thread)
2592 {
2593 struct binder_node *node;
2594 struct binder_proc *proc = thread->proc;
2595 struct binder_proc *target_proc = t->to_proc;
2596 struct binder_ref_data rdata;
2597 int ret = 0;
2598
2599 node = binder_get_node(proc, fp->binder);
2600 if (!node) {
2601 node = binder_new_node(proc, fp);
2602 if (!node)
2603 return -ENOMEM;
2604 }
2605 if (fp->cookie != node->cookie) {
2606 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2607 proc->pid, thread->pid, (u64)fp->binder,
2608 node->debug_id, (u64)fp->cookie,
2609 (u64)node->cookie);
2610 ret = -EINVAL;
2611 goto done;
2612 }
2613 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2614 ret = -EPERM;
2615 goto done;
2616 }
2617
2618 ret = binder_inc_ref_for_node(target_proc, node,
2619 fp->hdr.type == BINDER_TYPE_BINDER,
2620 &thread->todo, &rdata);
2621 if (ret)
2622 goto done;
2623
2624 if (fp->hdr.type == BINDER_TYPE_BINDER)
2625 fp->hdr.type = BINDER_TYPE_HANDLE;
2626 else
2627 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2628 fp->binder = 0;
2629 fp->handle = rdata.desc;
2630 fp->cookie = 0;
2631
2632 trace_binder_transaction_node_to_ref(t, node, &rdata);
2633 binder_debug(BINDER_DEBUG_TRANSACTION,
2634 " node %d u%016llx -> ref %d desc %d\n",
2635 node->debug_id, (u64)node->ptr,
2636 rdata.debug_id, rdata.desc);
2637 done:
2638 binder_put_node(node);
2639 return ret;
2640 }
2641
binder_translate_handle(struct flat_binder_object * fp,struct binder_transaction * t,struct binder_thread * thread)2642 static int binder_translate_handle(struct flat_binder_object *fp,
2643 struct binder_transaction *t,
2644 struct binder_thread *thread)
2645 {
2646 struct binder_proc *proc = thread->proc;
2647 struct binder_proc *target_proc = t->to_proc;
2648 struct binder_node *node;
2649 struct binder_ref_data src_rdata;
2650 int ret = 0;
2651
2652 node = binder_get_node_from_ref(proc, fp->handle,
2653 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2654 if (!node) {
2655 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2656 proc->pid, thread->pid, fp->handle);
2657 return -EINVAL;
2658 }
2659 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2660 ret = -EPERM;
2661 goto done;
2662 }
2663
2664 binder_node_lock(node);
2665 if (node->proc == target_proc) {
2666 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2667 fp->hdr.type = BINDER_TYPE_BINDER;
2668 else
2669 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2670 fp->binder = node->ptr;
2671 fp->cookie = node->cookie;
2672 if (node->proc)
2673 binder_inner_proc_lock(node->proc);
2674 else
2675 __acquire(&node->proc->inner_lock);
2676 binder_inc_node_nilocked(node,
2677 fp->hdr.type == BINDER_TYPE_BINDER,
2678 0, NULL);
2679 if (node->proc)
2680 binder_inner_proc_unlock(node->proc);
2681 else
2682 __release(&node->proc->inner_lock);
2683 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2684 binder_debug(BINDER_DEBUG_TRANSACTION,
2685 " ref %d desc %d -> node %d u%016llx\n",
2686 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2687 (u64)node->ptr);
2688 binder_node_unlock(node);
2689 } else {
2690 struct binder_ref_data dest_rdata;
2691
2692 binder_node_unlock(node);
2693 ret = binder_inc_ref_for_node(target_proc, node,
2694 fp->hdr.type == BINDER_TYPE_HANDLE,
2695 NULL, &dest_rdata);
2696 if (ret)
2697 goto done;
2698
2699 fp->binder = 0;
2700 fp->handle = dest_rdata.desc;
2701 fp->cookie = 0;
2702 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2703 &dest_rdata);
2704 binder_debug(BINDER_DEBUG_TRANSACTION,
2705 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2706 src_rdata.debug_id, src_rdata.desc,
2707 dest_rdata.debug_id, dest_rdata.desc,
2708 node->debug_id);
2709 }
2710 done:
2711 binder_put_node(node);
2712 return ret;
2713 }
2714
binder_translate_fd(u32 fd,binder_size_t fd_offset,struct binder_transaction * t,struct binder_thread * thread,struct binder_transaction * in_reply_to)2715 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2716 struct binder_transaction *t,
2717 struct binder_thread *thread,
2718 struct binder_transaction *in_reply_to)
2719 {
2720 struct binder_proc *proc = thread->proc;
2721 struct binder_proc *target_proc = t->to_proc;
2722 struct binder_txn_fd_fixup *fixup;
2723 struct file *file;
2724 int ret = 0;
2725 bool target_allows_fd;
2726
2727 if (in_reply_to)
2728 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2729 else
2730 target_allows_fd = t->buffer->target_node->accept_fds;
2731 if (!target_allows_fd) {
2732 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2733 proc->pid, thread->pid,
2734 in_reply_to ? "reply" : "transaction",
2735 fd);
2736 ret = -EPERM;
2737 goto err_fd_not_accepted;
2738 }
2739
2740 file = fget(fd);
2741 if (!file) {
2742 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2743 proc->pid, thread->pid, fd);
2744 ret = -EBADF;
2745 goto err_fget;
2746 }
2747 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2748 if (ret < 0) {
2749 ret = -EPERM;
2750 goto err_security;
2751 }
2752
2753 /*
2754 * Add fixup record for this transaction. The allocation
2755 * of the fd in the target needs to be done from a
2756 * target thread.
2757 */
2758 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2759 if (!fixup) {
2760 ret = -ENOMEM;
2761 goto err_alloc;
2762 }
2763 fixup->file = file;
2764 fixup->offset = fd_offset;
2765 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2766 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2767
2768 return ret;
2769
2770 err_alloc:
2771 err_security:
2772 fput(file);
2773 err_fget:
2774 err_fd_not_accepted:
2775 return ret;
2776 }
2777
binder_translate_fd_array(struct binder_fd_array_object * fda,struct binder_buffer_object * parent,struct binder_transaction * t,struct binder_thread * thread,struct binder_transaction * in_reply_to)2778 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2779 struct binder_buffer_object *parent,
2780 struct binder_transaction *t,
2781 struct binder_thread *thread,
2782 struct binder_transaction *in_reply_to)
2783 {
2784 binder_size_t fdi, fd_buf_size;
2785 binder_size_t fda_offset;
2786 struct binder_proc *proc = thread->proc;
2787 struct binder_proc *target_proc = t->to_proc;
2788
2789 fd_buf_size = sizeof(u32) * fda->num_fds;
2790 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2791 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2792 proc->pid, thread->pid, (u64)fda->num_fds);
2793 return -EINVAL;
2794 }
2795 if (fd_buf_size > parent->length ||
2796 fda->parent_offset > parent->length - fd_buf_size) {
2797 /* No space for all file descriptors here. */
2798 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2799 proc->pid, thread->pid, (u64)fda->num_fds);
2800 return -EINVAL;
2801 }
2802 /*
2803 * the source data for binder_buffer_object is visible
2804 * to user-space and the @buffer element is the user
2805 * pointer to the buffer_object containing the fd_array.
2806 * Convert the address to an offset relative to
2807 * the base of the transaction buffer.
2808 */
2809 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2810 fda->parent_offset;
2811 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
2812 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2813 proc->pid, thread->pid);
2814 return -EINVAL;
2815 }
2816 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2817 u32 fd;
2818 int ret;
2819 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2820
2821 ret = binder_alloc_copy_from_buffer(&target_proc->alloc,
2822 &fd, t->buffer,
2823 offset, sizeof(fd));
2824 if (!ret)
2825 ret = binder_translate_fd(fd, offset, t, thread,
2826 in_reply_to);
2827 if (ret < 0)
2828 return ret;
2829 }
2830 return 0;
2831 }
2832
binder_fixup_parent(struct binder_transaction * t,struct binder_thread * thread,struct binder_buffer_object * bp,binder_size_t off_start_offset,binder_size_t num_valid,binder_size_t last_fixup_obj_off,binder_size_t last_fixup_min_off)2833 static int binder_fixup_parent(struct binder_transaction *t,
2834 struct binder_thread *thread,
2835 struct binder_buffer_object *bp,
2836 binder_size_t off_start_offset,
2837 binder_size_t num_valid,
2838 binder_size_t last_fixup_obj_off,
2839 binder_size_t last_fixup_min_off)
2840 {
2841 struct binder_buffer_object *parent;
2842 struct binder_buffer *b = t->buffer;
2843 struct binder_proc *proc = thread->proc;
2844 struct binder_proc *target_proc = t->to_proc;
2845 struct binder_object object;
2846 binder_size_t buffer_offset;
2847 binder_size_t parent_offset;
2848
2849 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2850 return 0;
2851
2852 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2853 off_start_offset, &parent_offset,
2854 num_valid);
2855 if (!parent) {
2856 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2857 proc->pid, thread->pid);
2858 return -EINVAL;
2859 }
2860
2861 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2862 parent_offset, bp->parent_offset,
2863 last_fixup_obj_off,
2864 last_fixup_min_off)) {
2865 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2866 proc->pid, thread->pid);
2867 return -EINVAL;
2868 }
2869
2870 if (parent->length < sizeof(binder_uintptr_t) ||
2871 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2872 /* No space for a pointer here! */
2873 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2874 proc->pid, thread->pid);
2875 return -EINVAL;
2876 }
2877 buffer_offset = bp->parent_offset +
2878 (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2879 if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
2880 &bp->buffer, sizeof(bp->buffer))) {
2881 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2882 proc->pid, thread->pid);
2883 return -EINVAL;
2884 }
2885
2886 return 0;
2887 }
2888
2889 /**
2890 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2891 * @t: transaction to send
2892 * @proc: process to send the transaction to
2893 * @thread: thread in @proc to send the transaction to (may be NULL)
2894 *
2895 * This function queues a transaction to the specified process. It will try
2896 * to find a thread in the target process to handle the transaction and
2897 * wake it up. If no thread is found, the work is queued to the proc
2898 * waitqueue.
2899 *
2900 * If the @thread parameter is not NULL, the transaction is always queued
2901 * to the waitlist of that specific thread.
2902 *
2903 * Return: true if the transactions was successfully queued
2904 * false if the target process or thread is dead
2905 */
binder_proc_transaction(struct binder_transaction * t,struct binder_proc * proc,struct binder_thread * thread)2906 static bool binder_proc_transaction(struct binder_transaction *t,
2907 struct binder_proc *proc,
2908 struct binder_thread *thread)
2909 {
2910 struct binder_node *node = t->buffer->target_node;
2911 struct binder_priority node_prio;
2912 bool oneway = !!(t->flags & TF_ONE_WAY);
2913 bool pending_async = false;
2914 bool retry = false;
2915
2916 BUG_ON(!node);
2917
2918 set_thread_prio:
2919 node_prio.prio = node->min_priority;
2920 node_prio.sched_policy = node->sched_policy;
2921 if (thread) {
2922 /*
2923 * Priority must be set outside of lock, but must be
2924 * done before enqueuing the transaction.
2925 */
2926 binder_transaction_priority(thread->task, t, node_prio,
2927 node->inherit_rt);
2928 }
2929
2930 retry_after_prio_restore:
2931 binder_node_lock(node);
2932
2933 if (oneway) {
2934 BUG_ON(!retry && thread);
2935 if (node->has_async_transaction) {
2936 pending_async = true;
2937 } else {
2938 node->has_async_transaction = true;
2939 }
2940 if (thread && pending_async) {
2941 /*
2942 * The node state has changed since we selected
2943 * the thread. Return the thread to the
2944 * waiting_threads list. We have to drop
2945 * the node lock to restore priority so we
2946 * have to re-check the node state.
2947 */
2948 binder_node_unlock(node);
2949 binder_restore_priority(thread->task,
2950 proc->default_priority);
2951 binder_inner_proc_lock(proc);
2952 list_add(&thread->waiting_thread_node,
2953 &proc->waiting_threads);
2954 binder_inner_proc_unlock(proc);
2955 thread = NULL;
2956 goto retry_after_prio_restore;
2957 }
2958 }
2959
2960 binder_inner_proc_lock(proc);
2961
2962 if (proc->is_dead || (thread && thread->is_dead)) {
2963 binder_inner_proc_unlock(proc);
2964 binder_node_unlock(node);
2965 return false;
2966 }
2967
2968 if (!thread && !pending_async) {
2969 thread = binder_select_thread_ilocked(proc);
2970 if (thread) {
2971 if (oneway)
2972 node->has_async_transaction = false;
2973 binder_inner_proc_unlock(proc);
2974 binder_node_unlock(node);
2975 retry = true;
2976 goto set_thread_prio;
2977 }
2978 }
2979
2980 if (thread)
2981 binder_enqueue_thread_work_ilocked(thread, &t->work);
2982 else if (!pending_async)
2983 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2984 else
2985 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2986
2987 if (!pending_async)
2988 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2989
2990 binder_inner_proc_unlock(proc);
2991 binder_node_unlock(node);
2992
2993 return true;
2994 }
2995
2996 /**
2997 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2998 * @node: struct binder_node for which to get refs
2999 * @proc: returns @node->proc if valid
3000 * @error: if no @proc then returns BR_DEAD_REPLY
3001 *
3002 * User-space normally keeps the node alive when creating a transaction
3003 * since it has a reference to the target. The local strong ref keeps it
3004 * alive if the sending process dies before the target process processes
3005 * the transaction. If the source process is malicious or has a reference
3006 * counting bug, relying on the local strong ref can fail.
3007 *
3008 * Since user-space can cause the local strong ref to go away, we also take
3009 * a tmpref on the node to ensure it survives while we are constructing
3010 * the transaction. We also need a tmpref on the proc while we are
3011 * constructing the transaction, so we take that here as well.
3012 *
3013 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
3014 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
3015 * target proc has died, @error is set to BR_DEAD_REPLY
3016 */
binder_get_node_refs_for_txn(struct binder_node * node,struct binder_proc ** procp,uint32_t * error)3017 static struct binder_node *binder_get_node_refs_for_txn(
3018 struct binder_node *node,
3019 struct binder_proc **procp,
3020 uint32_t *error)
3021 {
3022 struct binder_node *target_node = NULL;
3023
3024 binder_node_inner_lock(node);
3025 if (node->proc) {
3026 target_node = node;
3027 binder_inc_node_nilocked(node, 1, 0, NULL);
3028 binder_inc_node_tmpref_ilocked(node);
3029 node->proc->tmp_ref++;
3030 *procp = node->proc;
3031 } else
3032 *error = BR_DEAD_REPLY;
3033 binder_node_inner_unlock(node);
3034
3035 return target_node;
3036 }
3037
binder_transaction(struct binder_proc * proc,struct binder_thread * thread,struct binder_transaction_data * tr,int reply,binder_size_t extra_buffers_size)3038 static void binder_transaction(struct binder_proc *proc,
3039 struct binder_thread *thread,
3040 struct binder_transaction_data *tr, int reply,
3041 binder_size_t extra_buffers_size)
3042 {
3043 int ret;
3044 struct binder_transaction *t;
3045 struct binder_work *w;
3046 struct binder_work *tcomplete;
3047 binder_size_t buffer_offset = 0;
3048 binder_size_t off_start_offset, off_end_offset;
3049 binder_size_t off_min;
3050 binder_size_t sg_buf_offset, sg_buf_end_offset;
3051 struct binder_proc *target_proc = NULL;
3052 struct binder_thread *target_thread = NULL;
3053 struct binder_node *target_node = NULL;
3054 struct binder_transaction *in_reply_to = NULL;
3055 struct binder_transaction_log_entry *e;
3056 uint32_t return_error = 0;
3057 uint32_t return_error_param = 0;
3058 uint32_t return_error_line = 0;
3059 binder_size_t last_fixup_obj_off = 0;
3060 binder_size_t last_fixup_min_off = 0;
3061 struct binder_context *context = proc->context;
3062 int t_debug_id = atomic_inc_return(&binder_last_id);
3063 char *secctx = NULL;
3064 u32 secctx_sz = 0;
3065
3066 e = binder_transaction_log_add(&binder_transaction_log);
3067 e->debug_id = t_debug_id;
3068 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
3069 e->from_proc = proc->pid;
3070 e->from_thread = thread->pid;
3071 e->target_handle = tr->target.handle;
3072 e->data_size = tr->data_size;
3073 e->offsets_size = tr->offsets_size;
3074 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
3075
3076 if (reply) {
3077 binder_inner_proc_lock(proc);
3078 in_reply_to = thread->transaction_stack;
3079 if (in_reply_to == NULL) {
3080 binder_inner_proc_unlock(proc);
3081 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
3082 proc->pid, thread->pid);
3083 return_error = BR_FAILED_REPLY;
3084 return_error_param = -EPROTO;
3085 return_error_line = __LINE__;
3086 goto err_empty_call_stack;
3087 }
3088 if (in_reply_to->to_thread != thread) {
3089 spin_lock(&in_reply_to->lock);
3090 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
3091 proc->pid, thread->pid, in_reply_to->debug_id,
3092 in_reply_to->to_proc ?
3093 in_reply_to->to_proc->pid : 0,
3094 in_reply_to->to_thread ?
3095 in_reply_to->to_thread->pid : 0);
3096 spin_unlock(&in_reply_to->lock);
3097 binder_inner_proc_unlock(proc);
3098 return_error = BR_FAILED_REPLY;
3099 return_error_param = -EPROTO;
3100 return_error_line = __LINE__;
3101 in_reply_to = NULL;
3102 goto err_bad_call_stack;
3103 }
3104 thread->transaction_stack = in_reply_to->to_parent;
3105 binder_inner_proc_unlock(proc);
3106 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
3107 if (target_thread == NULL) {
3108 /* annotation for sparse */
3109 __release(&target_thread->proc->inner_lock);
3110 return_error = BR_DEAD_REPLY;
3111 return_error_line = __LINE__;
3112 goto err_dead_binder;
3113 }
3114 if (target_thread->transaction_stack != in_reply_to) {
3115 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
3116 proc->pid, thread->pid,
3117 target_thread->transaction_stack ?
3118 target_thread->transaction_stack->debug_id : 0,
3119 in_reply_to->debug_id);
3120 binder_inner_proc_unlock(target_thread->proc);
3121 return_error = BR_FAILED_REPLY;
3122 return_error_param = -EPROTO;
3123 return_error_line = __LINE__;
3124 in_reply_to = NULL;
3125 target_thread = NULL;
3126 goto err_dead_binder;
3127 }
3128 target_proc = target_thread->proc;
3129 target_proc->tmp_ref++;
3130 binder_inner_proc_unlock(target_thread->proc);
3131 } else {
3132 if (tr->target.handle) {
3133 struct binder_ref *ref;
3134
3135 /*
3136 * There must already be a strong ref
3137 * on this node. If so, do a strong
3138 * increment on the node to ensure it
3139 * stays alive until the transaction is
3140 * done.
3141 */
3142 binder_proc_lock(proc);
3143 ref = binder_get_ref_olocked(proc, tr->target.handle,
3144 true);
3145 if (ref) {
3146 target_node = binder_get_node_refs_for_txn(
3147 ref->node, &target_proc,
3148 &return_error);
3149 } else {
3150 binder_user_error("%d:%d got transaction to invalid handle\n",
3151 proc->pid, thread->pid);
3152 return_error = BR_FAILED_REPLY;
3153 }
3154 binder_proc_unlock(proc);
3155 } else {
3156 mutex_lock(&context->context_mgr_node_lock);
3157 target_node = context->binder_context_mgr_node;
3158 if (target_node)
3159 target_node = binder_get_node_refs_for_txn(
3160 target_node, &target_proc,
3161 &return_error);
3162 else
3163 return_error = BR_DEAD_REPLY;
3164 mutex_unlock(&context->context_mgr_node_lock);
3165 if (target_node && target_proc->pid == proc->pid) {
3166 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3167 proc->pid, thread->pid);
3168 return_error = BR_FAILED_REPLY;
3169 return_error_param = -EINVAL;
3170 return_error_line = __LINE__;
3171 goto err_invalid_target_handle;
3172 }
3173 }
3174 if (!target_node) {
3175 /*
3176 * return_error is set above
3177 */
3178 return_error_param = -EINVAL;
3179 return_error_line = __LINE__;
3180 goto err_dead_binder;
3181 }
3182 e->to_node = target_node->debug_id;
3183 if (security_binder_transaction(proc->tsk,
3184 target_proc->tsk) < 0) {
3185 return_error = BR_FAILED_REPLY;
3186 return_error_param = -EPERM;
3187 return_error_line = __LINE__;
3188 goto err_invalid_target_handle;
3189 }
3190 binder_inner_proc_lock(proc);
3191
3192 w = list_first_entry_or_null(&thread->todo,
3193 struct binder_work, entry);
3194 if (!(tr->flags & TF_ONE_WAY) && w &&
3195 w->type == BINDER_WORK_TRANSACTION) {
3196 /*
3197 * Do not allow new outgoing transaction from a
3198 * thread that has a transaction at the head of
3199 * its todo list. Only need to check the head
3200 * because binder_select_thread_ilocked picks a
3201 * thread from proc->waiting_threads to enqueue
3202 * the transaction, and nothing is queued to the
3203 * todo list while the thread is on waiting_threads.
3204 */
3205 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3206 proc->pid, thread->pid);
3207 binder_inner_proc_unlock(proc);
3208 return_error = BR_FAILED_REPLY;
3209 return_error_param = -EPROTO;
3210 return_error_line = __LINE__;
3211 goto err_bad_todo_list;
3212 }
3213
3214 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3215 struct binder_transaction *tmp;
3216
3217 tmp = thread->transaction_stack;
3218 if (tmp->to_thread != thread) {
3219 spin_lock(&tmp->lock);
3220 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3221 proc->pid, thread->pid, tmp->debug_id,
3222 tmp->to_proc ? tmp->to_proc->pid : 0,
3223 tmp->to_thread ?
3224 tmp->to_thread->pid : 0);
3225 spin_unlock(&tmp->lock);
3226 binder_inner_proc_unlock(proc);
3227 return_error = BR_FAILED_REPLY;
3228 return_error_param = -EPROTO;
3229 return_error_line = __LINE__;
3230 goto err_bad_call_stack;
3231 }
3232 while (tmp) {
3233 struct binder_thread *from;
3234
3235 spin_lock(&tmp->lock);
3236 from = tmp->from;
3237 if (from && from->proc == target_proc) {
3238 atomic_inc(&from->tmp_ref);
3239 target_thread = from;
3240 spin_unlock(&tmp->lock);
3241 break;
3242 }
3243 spin_unlock(&tmp->lock);
3244 tmp = tmp->from_parent;
3245 }
3246 }
3247 binder_inner_proc_unlock(proc);
3248 }
3249 if (target_thread)
3250 e->to_thread = target_thread->pid;
3251 e->to_proc = target_proc->pid;
3252
3253 /* TODO: reuse incoming transaction for reply */
3254 t = kzalloc(sizeof(*t), GFP_KERNEL);
3255 if (t == NULL) {
3256 return_error = BR_FAILED_REPLY;
3257 return_error_param = -ENOMEM;
3258 return_error_line = __LINE__;
3259 goto err_alloc_t_failed;
3260 }
3261 INIT_LIST_HEAD(&t->fd_fixups);
3262 binder_stats_created(BINDER_STAT_TRANSACTION);
3263 spin_lock_init(&t->lock);
3264
3265 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3266 if (tcomplete == NULL) {
3267 return_error = BR_FAILED_REPLY;
3268 return_error_param = -ENOMEM;
3269 return_error_line = __LINE__;
3270 goto err_alloc_tcomplete_failed;
3271 }
3272 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3273
3274 t->debug_id = t_debug_id;
3275
3276 if (reply)
3277 binder_debug(BINDER_DEBUG_TRANSACTION,
3278 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3279 proc->pid, thread->pid, t->debug_id,
3280 target_proc->pid, target_thread->pid,
3281 (u64)tr->data.ptr.buffer,
3282 (u64)tr->data.ptr.offsets,
3283 (u64)tr->data_size, (u64)tr->offsets_size,
3284 (u64)extra_buffers_size);
3285 else
3286 binder_debug(BINDER_DEBUG_TRANSACTION,
3287 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3288 proc->pid, thread->pid, t->debug_id,
3289 target_proc->pid, target_node->debug_id,
3290 (u64)tr->data.ptr.buffer,
3291 (u64)tr->data.ptr.offsets,
3292 (u64)tr->data_size, (u64)tr->offsets_size,
3293 (u64)extra_buffers_size);
3294
3295 if (!reply && !(tr->flags & TF_ONE_WAY))
3296 t->from = thread;
3297 else
3298 t->from = NULL;
3299 t->sender_euid = task_euid(proc->tsk);
3300 t->to_proc = target_proc;
3301 t->to_thread = target_thread;
3302 t->code = tr->code;
3303 t->flags = tr->flags;
3304 if (!(t->flags & TF_ONE_WAY) &&
3305 binder_supported_policy(current->policy)) {
3306 /* Inherit supported policies for synchronous transactions */
3307 t->priority.sched_policy = current->policy;
3308 t->priority.prio = current->normal_prio;
3309 } else {
3310 /* Otherwise, fall back to the default priority */
3311 t->priority = target_proc->default_priority;
3312 }
3313
3314 if (target_node && target_node->txn_security_ctx) {
3315 u32 secid;
3316 size_t added_size;
3317
3318 security_task_getsecid(proc->tsk, &secid);
3319 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3320 if (ret) {
3321 return_error = BR_FAILED_REPLY;
3322 return_error_param = ret;
3323 return_error_line = __LINE__;
3324 goto err_get_secctx_failed;
3325 }
3326 added_size = ALIGN(secctx_sz, sizeof(u64));
3327 extra_buffers_size += added_size;
3328 if (extra_buffers_size < added_size) {
3329 /* integer overflow of extra_buffers_size */
3330 return_error = BR_FAILED_REPLY;
3331 return_error_param = EINVAL;
3332 return_error_line = __LINE__;
3333 goto err_bad_extra_size;
3334 }
3335 }
3336
3337 trace_binder_transaction(reply, t, target_node);
3338
3339 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3340 tr->offsets_size, extra_buffers_size,
3341 !reply && (t->flags & TF_ONE_WAY));
3342 if (IS_ERR(t->buffer)) {
3343 /*
3344 * -ESRCH indicates VMA cleared. The target is dying.
3345 */
3346 return_error_param = PTR_ERR(t->buffer);
3347 return_error = return_error_param == -ESRCH ?
3348 BR_DEAD_REPLY : BR_FAILED_REPLY;
3349 return_error_line = __LINE__;
3350 t->buffer = NULL;
3351 goto err_binder_alloc_buf_failed;
3352 }
3353 if (secctx) {
3354 int err;
3355 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3356 ALIGN(tr->offsets_size, sizeof(void *)) +
3357 ALIGN(extra_buffers_size, sizeof(void *)) -
3358 ALIGN(secctx_sz, sizeof(u64));
3359
3360 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3361 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3362 t->buffer, buf_offset,
3363 secctx, secctx_sz);
3364 if (err) {
3365 t->security_ctx = 0;
3366 WARN_ON(1);
3367 }
3368 security_release_secctx(secctx, secctx_sz);
3369 secctx = NULL;
3370 }
3371 t->buffer->debug_id = t->debug_id;
3372 t->buffer->transaction = t;
3373 t->buffer->target_node = target_node;
3374 trace_binder_transaction_alloc_buf(t->buffer);
3375
3376 if (binder_alloc_copy_user_to_buffer(
3377 &target_proc->alloc,
3378 t->buffer, 0,
3379 (const void __user *)
3380 (uintptr_t)tr->data.ptr.buffer,
3381 tr->data_size)) {
3382 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3383 proc->pid, thread->pid);
3384 return_error = BR_FAILED_REPLY;
3385 return_error_param = -EFAULT;
3386 return_error_line = __LINE__;
3387 goto err_copy_data_failed;
3388 }
3389 if (binder_alloc_copy_user_to_buffer(
3390 &target_proc->alloc,
3391 t->buffer,
3392 ALIGN(tr->data_size, sizeof(void *)),
3393 (const void __user *)
3394 (uintptr_t)tr->data.ptr.offsets,
3395 tr->offsets_size)) {
3396 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3397 proc->pid, thread->pid);
3398 return_error = BR_FAILED_REPLY;
3399 return_error_param = -EFAULT;
3400 return_error_line = __LINE__;
3401 goto err_copy_data_failed;
3402 }
3403 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3404 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3405 proc->pid, thread->pid, (u64)tr->offsets_size);
3406 return_error = BR_FAILED_REPLY;
3407 return_error_param = -EINVAL;
3408 return_error_line = __LINE__;
3409 goto err_bad_offset;
3410 }
3411 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3412 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3413 proc->pid, thread->pid,
3414 (u64)extra_buffers_size);
3415 return_error = BR_FAILED_REPLY;
3416 return_error_param = -EINVAL;
3417 return_error_line = __LINE__;
3418 goto err_bad_offset;
3419 }
3420 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3421 buffer_offset = off_start_offset;
3422 off_end_offset = off_start_offset + tr->offsets_size;
3423 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3424 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3425 ALIGN(secctx_sz, sizeof(u64));
3426 off_min = 0;
3427 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3428 buffer_offset += sizeof(binder_size_t)) {
3429 struct binder_object_header *hdr;
3430 size_t object_size;
3431 struct binder_object object;
3432 binder_size_t object_offset;
3433
3434 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3435 &object_offset,
3436 t->buffer,
3437 buffer_offset,
3438 sizeof(object_offset))) {
3439 return_error = BR_FAILED_REPLY;
3440 return_error_param = -EINVAL;
3441 return_error_line = __LINE__;
3442 goto err_bad_offset;
3443 }
3444 object_size = binder_get_object(target_proc, t->buffer,
3445 object_offset, &object);
3446 if (object_size == 0 || object_offset < off_min) {
3447 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3448 proc->pid, thread->pid,
3449 (u64)object_offset,
3450 (u64)off_min,
3451 (u64)t->buffer->data_size);
3452 return_error = BR_FAILED_REPLY;
3453 return_error_param = -EINVAL;
3454 return_error_line = __LINE__;
3455 goto err_bad_offset;
3456 }
3457
3458 hdr = &object.hdr;
3459 off_min = object_offset + object_size;
3460 switch (hdr->type) {
3461 case BINDER_TYPE_BINDER:
3462 case BINDER_TYPE_WEAK_BINDER: {
3463 struct flat_binder_object *fp;
3464
3465 fp = to_flat_binder_object(hdr);
3466 ret = binder_translate_binder(fp, t, thread);
3467
3468 if (ret < 0 ||
3469 binder_alloc_copy_to_buffer(&target_proc->alloc,
3470 t->buffer,
3471 object_offset,
3472 fp, sizeof(*fp))) {
3473 return_error = BR_FAILED_REPLY;
3474 return_error_param = ret;
3475 return_error_line = __LINE__;
3476 goto err_translate_failed;
3477 }
3478 } break;
3479 case BINDER_TYPE_HANDLE:
3480 case BINDER_TYPE_WEAK_HANDLE: {
3481 struct flat_binder_object *fp;
3482
3483 fp = to_flat_binder_object(hdr);
3484 ret = binder_translate_handle(fp, t, thread);
3485 if (ret < 0 ||
3486 binder_alloc_copy_to_buffer(&target_proc->alloc,
3487 t->buffer,
3488 object_offset,
3489 fp, sizeof(*fp))) {
3490 return_error = BR_FAILED_REPLY;
3491 return_error_param = ret;
3492 return_error_line = __LINE__;
3493 goto err_translate_failed;
3494 }
3495 } break;
3496
3497 case BINDER_TYPE_FD: {
3498 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3499 binder_size_t fd_offset = object_offset +
3500 (uintptr_t)&fp->fd - (uintptr_t)fp;
3501 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3502 thread, in_reply_to);
3503
3504 fp->pad_binder = 0;
3505 if (ret < 0 ||
3506 binder_alloc_copy_to_buffer(&target_proc->alloc,
3507 t->buffer,
3508 object_offset,
3509 fp, sizeof(*fp))) {
3510 return_error = BR_FAILED_REPLY;
3511 return_error_param = ret;
3512 return_error_line = __LINE__;
3513 goto err_translate_failed;
3514 }
3515 } break;
3516 case BINDER_TYPE_FDA: {
3517 struct binder_object ptr_object;
3518 binder_size_t parent_offset;
3519 struct binder_fd_array_object *fda =
3520 to_binder_fd_array_object(hdr);
3521 size_t num_valid = (buffer_offset - off_start_offset) /
3522 sizeof(binder_size_t);
3523 struct binder_buffer_object *parent =
3524 binder_validate_ptr(target_proc, t->buffer,
3525 &ptr_object, fda->parent,
3526 off_start_offset,
3527 &parent_offset,
3528 num_valid);
3529 if (!parent) {
3530 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3531 proc->pid, thread->pid);
3532 return_error = BR_FAILED_REPLY;
3533 return_error_param = -EINVAL;
3534 return_error_line = __LINE__;
3535 goto err_bad_parent;
3536 }
3537 if (!binder_validate_fixup(target_proc, t->buffer,
3538 off_start_offset,
3539 parent_offset,
3540 fda->parent_offset,
3541 last_fixup_obj_off,
3542 last_fixup_min_off)) {
3543 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3544 proc->pid, thread->pid);
3545 return_error = BR_FAILED_REPLY;
3546 return_error_param = -EINVAL;
3547 return_error_line = __LINE__;
3548 goto err_bad_parent;
3549 }
3550 ret = binder_translate_fd_array(fda, parent, t, thread,
3551 in_reply_to);
3552 if (ret < 0) {
3553 return_error = BR_FAILED_REPLY;
3554 return_error_param = ret;
3555 return_error_line = __LINE__;
3556 goto err_translate_failed;
3557 }
3558 last_fixup_obj_off = parent_offset;
3559 last_fixup_min_off =
3560 fda->parent_offset + sizeof(u32) * fda->num_fds;
3561 } break;
3562 case BINDER_TYPE_PTR: {
3563 struct binder_buffer_object *bp =
3564 to_binder_buffer_object(hdr);
3565 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3566 size_t num_valid;
3567
3568 if (bp->length > buf_left) {
3569 binder_user_error("%d:%d got transaction with too large buffer\n",
3570 proc->pid, thread->pid);
3571 return_error = BR_FAILED_REPLY;
3572 return_error_param = -EINVAL;
3573 return_error_line = __LINE__;
3574 goto err_bad_offset;
3575 }
3576 if (binder_alloc_copy_user_to_buffer(
3577 &target_proc->alloc,
3578 t->buffer,
3579 sg_buf_offset,
3580 (const void __user *)
3581 (uintptr_t)bp->buffer,
3582 bp->length)) {
3583 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3584 proc->pid, thread->pid);
3585 return_error_param = -EFAULT;
3586 return_error = BR_FAILED_REPLY;
3587 return_error_line = __LINE__;
3588 goto err_copy_data_failed;
3589 }
3590 /* Fixup buffer pointer to target proc address space */
3591 bp->buffer = (uintptr_t)
3592 t->buffer->user_data + sg_buf_offset;
3593 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3594
3595 num_valid = (buffer_offset - off_start_offset) /
3596 sizeof(binder_size_t);
3597 ret = binder_fixup_parent(t, thread, bp,
3598 off_start_offset,
3599 num_valid,
3600 last_fixup_obj_off,
3601 last_fixup_min_off);
3602 if (ret < 0 ||
3603 binder_alloc_copy_to_buffer(&target_proc->alloc,
3604 t->buffer,
3605 object_offset,
3606 bp, sizeof(*bp))) {
3607 return_error = BR_FAILED_REPLY;
3608 return_error_param = ret;
3609 return_error_line = __LINE__;
3610 goto err_translate_failed;
3611 }
3612 last_fixup_obj_off = object_offset;
3613 last_fixup_min_off = 0;
3614 } break;
3615 default:
3616 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3617 proc->pid, thread->pid, hdr->type);
3618 return_error = BR_FAILED_REPLY;
3619 return_error_param = -EINVAL;
3620 return_error_line = __LINE__;
3621 goto err_bad_object_type;
3622 }
3623 }
3624 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3625 t->work.type = BINDER_WORK_TRANSACTION;
3626
3627 if (reply) {
3628 binder_enqueue_thread_work(thread, tcomplete);
3629 binder_inner_proc_lock(target_proc);
3630 if (target_thread->is_dead) {
3631 binder_inner_proc_unlock(target_proc);
3632 goto err_dead_proc_or_thread;
3633 }
3634 BUG_ON(t->buffer->async_transaction != 0);
3635 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3636 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3637 binder_inner_proc_unlock(target_proc);
3638 wake_up_interruptible_sync(&target_thread->wait);
3639 binder_restore_priority(current, in_reply_to->saved_priority);
3640 binder_free_transaction(in_reply_to);
3641 } else if (!(t->flags & TF_ONE_WAY)) {
3642 BUG_ON(t->buffer->async_transaction != 0);
3643 binder_inner_proc_lock(proc);
3644 /*
3645 * Defer the TRANSACTION_COMPLETE, so we don't return to
3646 * userspace immediately; this allows the target process to
3647 * immediately start processing this transaction, reducing
3648 * latency. We will then return the TRANSACTION_COMPLETE when
3649 * the target replies (or there is an error).
3650 */
3651 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3652 t->need_reply = 1;
3653 t->from_parent = thread->transaction_stack;
3654 thread->transaction_stack = t;
3655 binder_inner_proc_unlock(proc);
3656 if (!binder_proc_transaction(t, target_proc, target_thread)) {
3657 binder_inner_proc_lock(proc);
3658 binder_pop_transaction_ilocked(thread, t);
3659 binder_inner_proc_unlock(proc);
3660 goto err_dead_proc_or_thread;
3661 }
3662 } else {
3663 BUG_ON(target_node == NULL);
3664 BUG_ON(t->buffer->async_transaction != 1);
3665 binder_enqueue_thread_work(thread, tcomplete);
3666 if (!binder_proc_transaction(t, target_proc, NULL))
3667 goto err_dead_proc_or_thread;
3668 }
3669 if (target_thread)
3670 binder_thread_dec_tmpref(target_thread);
3671 binder_proc_dec_tmpref(target_proc);
3672 if (target_node)
3673 binder_dec_node_tmpref(target_node);
3674 /*
3675 * write barrier to synchronize with initialization
3676 * of log entry
3677 */
3678 smp_wmb();
3679 WRITE_ONCE(e->debug_id_done, t_debug_id);
3680 return;
3681
3682 err_dead_proc_or_thread:
3683 return_error = BR_DEAD_REPLY;
3684 return_error_line = __LINE__;
3685 binder_dequeue_work(proc, tcomplete);
3686 err_translate_failed:
3687 err_bad_object_type:
3688 err_bad_offset:
3689 err_bad_parent:
3690 err_copy_data_failed:
3691 binder_free_txn_fixups(t);
3692 trace_binder_transaction_failed_buffer_release(t->buffer);
3693 binder_transaction_buffer_release(target_proc, t->buffer,
3694 buffer_offset, true);
3695 if (target_node)
3696 binder_dec_node_tmpref(target_node);
3697 target_node = NULL;
3698 t->buffer->transaction = NULL;
3699 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3700 err_binder_alloc_buf_failed:
3701 err_bad_extra_size:
3702 if (secctx)
3703 security_release_secctx(secctx, secctx_sz);
3704 err_get_secctx_failed:
3705 kfree(tcomplete);
3706 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3707 err_alloc_tcomplete_failed:
3708 kfree(t);
3709 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3710 err_alloc_t_failed:
3711 err_bad_todo_list:
3712 err_bad_call_stack:
3713 err_empty_call_stack:
3714 err_dead_binder:
3715 err_invalid_target_handle:
3716 if (target_thread)
3717 binder_thread_dec_tmpref(target_thread);
3718 if (target_proc)
3719 binder_proc_dec_tmpref(target_proc);
3720 if (target_node) {
3721 binder_dec_node(target_node, 1, 0);
3722 binder_dec_node_tmpref(target_node);
3723 }
3724
3725 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3726 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3727 proc->pid, thread->pid, return_error, return_error_param,
3728 (u64)tr->data_size, (u64)tr->offsets_size,
3729 return_error_line);
3730
3731 {
3732 struct binder_transaction_log_entry *fe;
3733
3734 e->return_error = return_error;
3735 e->return_error_param = return_error_param;
3736 e->return_error_line = return_error_line;
3737 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3738 *fe = *e;
3739 /*
3740 * write barrier to synchronize with initialization
3741 * of log entry
3742 */
3743 smp_wmb();
3744 WRITE_ONCE(e->debug_id_done, t_debug_id);
3745 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3746 }
3747
3748 BUG_ON(thread->return_error.cmd != BR_OK);
3749 if (in_reply_to) {
3750 binder_restore_priority(current, in_reply_to->saved_priority);
3751 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3752 binder_enqueue_thread_work(thread, &thread->return_error.work);
3753 binder_send_failed_reply(in_reply_to, return_error);
3754 } else {
3755 thread->return_error.cmd = return_error;
3756 binder_enqueue_thread_work(thread, &thread->return_error.work);
3757 }
3758 }
3759
3760 /**
3761 * binder_free_buf() - free the specified buffer
3762 * @proc: binder proc that owns buffer
3763 * @buffer: buffer to be freed
3764 *
3765 * If buffer for an async transaction, enqueue the next async
3766 * transaction from the node.
3767 *
3768 * Cleanup buffer and free it.
3769 */
3770 static void
binder_free_buf(struct binder_proc * proc,struct binder_buffer * buffer)3771 binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
3772 {
3773 binder_inner_proc_lock(proc);
3774 if (buffer->transaction) {
3775 buffer->transaction->buffer = NULL;
3776 buffer->transaction = NULL;
3777 }
3778 binder_inner_proc_unlock(proc);
3779 if (buffer->async_transaction && buffer->target_node) {
3780 struct binder_node *buf_node;
3781 struct binder_work *w;
3782
3783 buf_node = buffer->target_node;
3784 binder_node_inner_lock(buf_node);
3785 BUG_ON(!buf_node->has_async_transaction);
3786 BUG_ON(buf_node->proc != proc);
3787 w = binder_dequeue_work_head_ilocked(
3788 &buf_node->async_todo);
3789 if (!w) {
3790 buf_node->has_async_transaction = false;
3791 } else {
3792 binder_enqueue_work_ilocked(
3793 w, &proc->todo);
3794 binder_wakeup_proc_ilocked(proc);
3795 }
3796 binder_node_inner_unlock(buf_node);
3797 }
3798 trace_binder_transaction_buffer_release(buffer);
3799 binder_transaction_buffer_release(proc, buffer, 0, false);
3800 binder_alloc_free_buf(&proc->alloc, buffer);
3801 }
3802
binder_thread_write(struct binder_proc * proc,struct binder_thread * thread,binder_uintptr_t binder_buffer,size_t size,binder_size_t * consumed)3803 static int binder_thread_write(struct binder_proc *proc,
3804 struct binder_thread *thread,
3805 binder_uintptr_t binder_buffer, size_t size,
3806 binder_size_t *consumed)
3807 {
3808 uint32_t cmd;
3809 struct binder_context *context = proc->context;
3810 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3811 void __user *ptr = buffer + *consumed;
3812 void __user *end = buffer + size;
3813
3814 while (ptr < end && thread->return_error.cmd == BR_OK) {
3815 int ret;
3816
3817 if (get_user(cmd, (uint32_t __user *)ptr))
3818 return -EFAULT;
3819 ptr += sizeof(uint32_t);
3820 trace_binder_command(cmd);
3821 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3822 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3823 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3824 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3825 }
3826 switch (cmd) {
3827 case BC_INCREFS:
3828 case BC_ACQUIRE:
3829 case BC_RELEASE:
3830 case BC_DECREFS: {
3831 uint32_t target;
3832 const char *debug_string;
3833 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3834 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3835 struct binder_ref_data rdata;
3836
3837 if (get_user(target, (uint32_t __user *)ptr))
3838 return -EFAULT;
3839
3840 ptr += sizeof(uint32_t);
3841 ret = -1;
3842 if (increment && !target) {
3843 struct binder_node *ctx_mgr_node;
3844 mutex_lock(&context->context_mgr_node_lock);
3845 ctx_mgr_node = context->binder_context_mgr_node;
3846 if (ctx_mgr_node)
3847 ret = binder_inc_ref_for_node(
3848 proc, ctx_mgr_node,
3849 strong, NULL, &rdata);
3850 mutex_unlock(&context->context_mgr_node_lock);
3851 }
3852 if (ret)
3853 ret = binder_update_ref_for_handle(
3854 proc, target, increment, strong,
3855 &rdata);
3856 if (!ret && rdata.desc != target) {
3857 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3858 proc->pid, thread->pid,
3859 target, rdata.desc);
3860 }
3861 switch (cmd) {
3862 case BC_INCREFS:
3863 debug_string = "IncRefs";
3864 break;
3865 case BC_ACQUIRE:
3866 debug_string = "Acquire";
3867 break;
3868 case BC_RELEASE:
3869 debug_string = "Release";
3870 break;
3871 case BC_DECREFS:
3872 default:
3873 debug_string = "DecRefs";
3874 break;
3875 }
3876 if (ret) {
3877 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3878 proc->pid, thread->pid, debug_string,
3879 strong, target, ret);
3880 break;
3881 }
3882 binder_debug(BINDER_DEBUG_USER_REFS,
3883 "%d:%d %s ref %d desc %d s %d w %d\n",
3884 proc->pid, thread->pid, debug_string,
3885 rdata.debug_id, rdata.desc, rdata.strong,
3886 rdata.weak);
3887 break;
3888 }
3889 case BC_INCREFS_DONE:
3890 case BC_ACQUIRE_DONE: {
3891 binder_uintptr_t node_ptr;
3892 binder_uintptr_t cookie;
3893 struct binder_node *node;
3894 bool free_node;
3895
3896 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3897 return -EFAULT;
3898 ptr += sizeof(binder_uintptr_t);
3899 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3900 return -EFAULT;
3901 ptr += sizeof(binder_uintptr_t);
3902 node = binder_get_node(proc, node_ptr);
3903 if (node == NULL) {
3904 binder_user_error("%d:%d %s u%016llx no match\n",
3905 proc->pid, thread->pid,
3906 cmd == BC_INCREFS_DONE ?
3907 "BC_INCREFS_DONE" :
3908 "BC_ACQUIRE_DONE",
3909 (u64)node_ptr);
3910 break;
3911 }
3912 if (cookie != node->cookie) {
3913 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3914 proc->pid, thread->pid,
3915 cmd == BC_INCREFS_DONE ?
3916 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3917 (u64)node_ptr, node->debug_id,
3918 (u64)cookie, (u64)node->cookie);
3919 binder_put_node(node);
3920 break;
3921 }
3922 binder_node_inner_lock(node);
3923 if (cmd == BC_ACQUIRE_DONE) {
3924 if (node->pending_strong_ref == 0) {
3925 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3926 proc->pid, thread->pid,
3927 node->debug_id);
3928 binder_node_inner_unlock(node);
3929 binder_put_node(node);
3930 break;
3931 }
3932 node->pending_strong_ref = 0;
3933 } else {
3934 if (node->pending_weak_ref == 0) {
3935 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3936 proc->pid, thread->pid,
3937 node->debug_id);
3938 binder_node_inner_unlock(node);
3939 binder_put_node(node);
3940 break;
3941 }
3942 node->pending_weak_ref = 0;
3943 }
3944 free_node = binder_dec_node_nilocked(node,
3945 cmd == BC_ACQUIRE_DONE, 0);
3946 WARN_ON(free_node);
3947 binder_debug(BINDER_DEBUG_USER_REFS,
3948 "%d:%d %s node %d ls %d lw %d tr %d\n",
3949 proc->pid, thread->pid,
3950 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3951 node->debug_id, node->local_strong_refs,
3952 node->local_weak_refs, node->tmp_refs);
3953 binder_node_inner_unlock(node);
3954 binder_put_node(node);
3955 break;
3956 }
3957 case BC_ATTEMPT_ACQUIRE:
3958 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3959 return -EINVAL;
3960 case BC_ACQUIRE_RESULT:
3961 pr_err("BC_ACQUIRE_RESULT not supported\n");
3962 return -EINVAL;
3963
3964 case BC_FREE_BUFFER: {
3965 binder_uintptr_t data_ptr;
3966 struct binder_buffer *buffer;
3967
3968 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3969 return -EFAULT;
3970 ptr += sizeof(binder_uintptr_t);
3971
3972 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3973 data_ptr);
3974 if (IS_ERR_OR_NULL(buffer)) {
3975 if (PTR_ERR(buffer) == -EPERM) {
3976 binder_user_error(
3977 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3978 proc->pid, thread->pid,
3979 (u64)data_ptr);
3980 } else {
3981 binder_user_error(
3982 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3983 proc->pid, thread->pid,
3984 (u64)data_ptr);
3985 }
3986 break;
3987 }
3988 binder_debug(BINDER_DEBUG_FREE_BUFFER,
3989 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3990 proc->pid, thread->pid, (u64)data_ptr,
3991 buffer->debug_id,
3992 buffer->transaction ? "active" : "finished");
3993 binder_free_buf(proc, buffer);
3994 break;
3995 }
3996
3997 case BC_TRANSACTION_SG:
3998 case BC_REPLY_SG: {
3999 struct binder_transaction_data_sg tr;
4000
4001 if (copy_from_user(&tr, ptr, sizeof(tr)))
4002 return -EFAULT;
4003 ptr += sizeof(tr);
4004 binder_transaction(proc, thread, &tr.transaction_data,
4005 cmd == BC_REPLY_SG, tr.buffers_size);
4006 break;
4007 }
4008 case BC_TRANSACTION:
4009 case BC_REPLY: {
4010 struct binder_transaction_data tr;
4011
4012 if (copy_from_user(&tr, ptr, sizeof(tr)))
4013 return -EFAULT;
4014 ptr += sizeof(tr);
4015 binder_transaction(proc, thread, &tr,
4016 cmd == BC_REPLY, 0);
4017 break;
4018 }
4019
4020 case BC_REGISTER_LOOPER:
4021 binder_debug(BINDER_DEBUG_THREADS,
4022 "%d:%d BC_REGISTER_LOOPER\n",
4023 proc->pid, thread->pid);
4024 binder_inner_proc_lock(proc);
4025 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
4026 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4027 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
4028 proc->pid, thread->pid);
4029 } else if (proc->requested_threads == 0) {
4030 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4031 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4032 proc->pid, thread->pid);
4033 } else {
4034 proc->requested_threads--;
4035 proc->requested_threads_started++;
4036 }
4037 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
4038 binder_inner_proc_unlock(proc);
4039 break;
4040 case BC_ENTER_LOOPER:
4041 binder_debug(BINDER_DEBUG_THREADS,
4042 "%d:%d BC_ENTER_LOOPER\n",
4043 proc->pid, thread->pid);
4044 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4045 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4046 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4047 proc->pid, thread->pid);
4048 }
4049 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4050 break;
4051 case BC_EXIT_LOOPER:
4052 binder_debug(BINDER_DEBUG_THREADS,
4053 "%d:%d BC_EXIT_LOOPER\n",
4054 proc->pid, thread->pid);
4055 thread->looper |= BINDER_LOOPER_STATE_EXITED;
4056 break;
4057
4058 case BC_REQUEST_DEATH_NOTIFICATION:
4059 case BC_CLEAR_DEATH_NOTIFICATION: {
4060 uint32_t target;
4061 binder_uintptr_t cookie;
4062 struct binder_ref *ref;
4063 struct binder_ref_death *death = NULL;
4064
4065 if (get_user(target, (uint32_t __user *)ptr))
4066 return -EFAULT;
4067 ptr += sizeof(uint32_t);
4068 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4069 return -EFAULT;
4070 ptr += sizeof(binder_uintptr_t);
4071 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4072 /*
4073 * Allocate memory for death notification
4074 * before taking lock
4075 */
4076 death = kzalloc(sizeof(*death), GFP_KERNEL);
4077 if (death == NULL) {
4078 WARN_ON(thread->return_error.cmd !=
4079 BR_OK);
4080 thread->return_error.cmd = BR_ERROR;
4081 binder_enqueue_thread_work(
4082 thread,
4083 &thread->return_error.work);
4084 binder_debug(
4085 BINDER_DEBUG_FAILED_TRANSACTION,
4086 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4087 proc->pid, thread->pid);
4088 break;
4089 }
4090 }
4091 binder_proc_lock(proc);
4092 ref = binder_get_ref_olocked(proc, target, false);
4093 if (ref == NULL) {
4094 binder_user_error("%d:%d %s invalid ref %d\n",
4095 proc->pid, thread->pid,
4096 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4097 "BC_REQUEST_DEATH_NOTIFICATION" :
4098 "BC_CLEAR_DEATH_NOTIFICATION",
4099 target);
4100 binder_proc_unlock(proc);
4101 kfree(death);
4102 break;
4103 }
4104
4105 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4106 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4107 proc->pid, thread->pid,
4108 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4109 "BC_REQUEST_DEATH_NOTIFICATION" :
4110 "BC_CLEAR_DEATH_NOTIFICATION",
4111 (u64)cookie, ref->data.debug_id,
4112 ref->data.desc, ref->data.strong,
4113 ref->data.weak, ref->node->debug_id);
4114
4115 binder_node_lock(ref->node);
4116 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4117 if (ref->death) {
4118 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4119 proc->pid, thread->pid);
4120 binder_node_unlock(ref->node);
4121 binder_proc_unlock(proc);
4122 kfree(death);
4123 break;
4124 }
4125 binder_stats_created(BINDER_STAT_DEATH);
4126 INIT_LIST_HEAD(&death->work.entry);
4127 death->cookie = cookie;
4128 ref->death = death;
4129 if (ref->node->proc == NULL) {
4130 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4131
4132 binder_inner_proc_lock(proc);
4133 binder_enqueue_work_ilocked(
4134 &ref->death->work, &proc->todo);
4135 binder_wakeup_proc_ilocked(proc);
4136 binder_inner_proc_unlock(proc);
4137 }
4138 } else {
4139 if (ref->death == NULL) {
4140 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4141 proc->pid, thread->pid);
4142 binder_node_unlock(ref->node);
4143 binder_proc_unlock(proc);
4144 break;
4145 }
4146 death = ref->death;
4147 if (death->cookie != cookie) {
4148 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4149 proc->pid, thread->pid,
4150 (u64)death->cookie,
4151 (u64)cookie);
4152 binder_node_unlock(ref->node);
4153 binder_proc_unlock(proc);
4154 break;
4155 }
4156 ref->death = NULL;
4157 binder_inner_proc_lock(proc);
4158 if (list_empty(&death->work.entry)) {
4159 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4160 if (thread->looper &
4161 (BINDER_LOOPER_STATE_REGISTERED |
4162 BINDER_LOOPER_STATE_ENTERED))
4163 binder_enqueue_thread_work_ilocked(
4164 thread,
4165 &death->work);
4166 else {
4167 binder_enqueue_work_ilocked(
4168 &death->work,
4169 &proc->todo);
4170 binder_wakeup_proc_ilocked(
4171 proc);
4172 }
4173 } else {
4174 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4175 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4176 }
4177 binder_inner_proc_unlock(proc);
4178 }
4179 binder_node_unlock(ref->node);
4180 binder_proc_unlock(proc);
4181 } break;
4182 case BC_DEAD_BINDER_DONE: {
4183 struct binder_work *w;
4184 binder_uintptr_t cookie;
4185 struct binder_ref_death *death = NULL;
4186
4187 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4188 return -EFAULT;
4189
4190 ptr += sizeof(cookie);
4191 binder_inner_proc_lock(proc);
4192 list_for_each_entry(w, &proc->delivered_death,
4193 entry) {
4194 struct binder_ref_death *tmp_death =
4195 container_of(w,
4196 struct binder_ref_death,
4197 work);
4198
4199 if (tmp_death->cookie == cookie) {
4200 death = tmp_death;
4201 break;
4202 }
4203 }
4204 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4205 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4206 proc->pid, thread->pid, (u64)cookie,
4207 death);
4208 if (death == NULL) {
4209 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4210 proc->pid, thread->pid, (u64)cookie);
4211 binder_inner_proc_unlock(proc);
4212 break;
4213 }
4214 binder_dequeue_work_ilocked(&death->work);
4215 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4216 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4217 if (thread->looper &
4218 (BINDER_LOOPER_STATE_REGISTERED |
4219 BINDER_LOOPER_STATE_ENTERED))
4220 binder_enqueue_thread_work_ilocked(
4221 thread, &death->work);
4222 else {
4223 binder_enqueue_work_ilocked(
4224 &death->work,
4225 &proc->todo);
4226 binder_wakeup_proc_ilocked(proc);
4227 }
4228 }
4229 binder_inner_proc_unlock(proc);
4230 } break;
4231
4232 default:
4233 pr_err("%d:%d unknown command %d\n",
4234 proc->pid, thread->pid, cmd);
4235 return -EINVAL;
4236 }
4237 *consumed = ptr - buffer;
4238 }
4239 return 0;
4240 }
4241
binder_stat_br(struct binder_proc * proc,struct binder_thread * thread,uint32_t cmd)4242 static void binder_stat_br(struct binder_proc *proc,
4243 struct binder_thread *thread, uint32_t cmd)
4244 {
4245 trace_binder_return(cmd);
4246 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4247 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4248 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4249 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4250 }
4251 }
4252
binder_put_node_cmd(struct binder_proc * proc,struct binder_thread * thread,void __user ** ptrp,binder_uintptr_t node_ptr,binder_uintptr_t node_cookie,int node_debug_id,uint32_t cmd,const char * cmd_name)4253 static int binder_put_node_cmd(struct binder_proc *proc,
4254 struct binder_thread *thread,
4255 void __user **ptrp,
4256 binder_uintptr_t node_ptr,
4257 binder_uintptr_t node_cookie,
4258 int node_debug_id,
4259 uint32_t cmd, const char *cmd_name)
4260 {
4261 void __user *ptr = *ptrp;
4262
4263 if (put_user(cmd, (uint32_t __user *)ptr))
4264 return -EFAULT;
4265 ptr += sizeof(uint32_t);
4266
4267 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4268 return -EFAULT;
4269 ptr += sizeof(binder_uintptr_t);
4270
4271 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4272 return -EFAULT;
4273 ptr += sizeof(binder_uintptr_t);
4274
4275 binder_stat_br(proc, thread, cmd);
4276 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4277 proc->pid, thread->pid, cmd_name, node_debug_id,
4278 (u64)node_ptr, (u64)node_cookie);
4279
4280 *ptrp = ptr;
4281 return 0;
4282 }
4283
binder_wait_for_work(struct binder_thread * thread,bool do_proc_work)4284 static int binder_wait_for_work(struct binder_thread *thread,
4285 bool do_proc_work)
4286 {
4287 DEFINE_WAIT(wait);
4288 struct binder_proc *proc = thread->proc;
4289 int ret = 0;
4290
4291 freezer_do_not_count();
4292 binder_inner_proc_lock(proc);
4293 for (;;) {
4294 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
4295 if (binder_has_work_ilocked(thread, do_proc_work))
4296 break;
4297 if (do_proc_work)
4298 list_add(&thread->waiting_thread_node,
4299 &proc->waiting_threads);
4300 binder_inner_proc_unlock(proc);
4301 schedule();
4302 binder_inner_proc_lock(proc);
4303 list_del_init(&thread->waiting_thread_node);
4304 if (signal_pending(current)) {
4305 ret = -ERESTARTSYS;
4306 break;
4307 }
4308 }
4309 finish_wait(&thread->wait, &wait);
4310 binder_inner_proc_unlock(proc);
4311 freezer_count();
4312
4313 return ret;
4314 }
4315
4316 /**
4317 * binder_apply_fd_fixups() - finish fd translation
4318 * @proc: binder_proc associated @t->buffer
4319 * @t: binder transaction with list of fd fixups
4320 *
4321 * Now that we are in the context of the transaction target
4322 * process, we can allocate and install fds. Process the
4323 * list of fds to translate and fixup the buffer with the
4324 * new fds.
4325 *
4326 * If we fail to allocate an fd, then free the resources by
4327 * fput'ing files that have not been processed and ksys_close'ing
4328 * any fds that have already been allocated.
4329 */
binder_apply_fd_fixups(struct binder_proc * proc,struct binder_transaction * t)4330 static int binder_apply_fd_fixups(struct binder_proc *proc,
4331 struct binder_transaction *t)
4332 {
4333 struct binder_txn_fd_fixup *fixup, *tmp;
4334 int ret = 0;
4335
4336 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4337 int fd = get_unused_fd_flags(O_CLOEXEC);
4338
4339 if (fd < 0) {
4340 binder_debug(BINDER_DEBUG_TRANSACTION,
4341 "failed fd fixup txn %d fd %d\n",
4342 t->debug_id, fd);
4343 ret = -ENOMEM;
4344 break;
4345 }
4346 binder_debug(BINDER_DEBUG_TRANSACTION,
4347 "fd fixup txn %d fd %d\n",
4348 t->debug_id, fd);
4349 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4350 fd_install(fd, fixup->file);
4351 fixup->file = NULL;
4352 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4353 fixup->offset, &fd,
4354 sizeof(u32))) {
4355 ret = -EINVAL;
4356 break;
4357 }
4358 }
4359 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4360 if (fixup->file) {
4361 fput(fixup->file);
4362 } else if (ret) {
4363 u32 fd;
4364 int err;
4365
4366 err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
4367 t->buffer,
4368 fixup->offset,
4369 sizeof(fd));
4370 WARN_ON(err);
4371 if (!err)
4372 binder_deferred_fd_close(fd);
4373 }
4374 list_del(&fixup->fixup_entry);
4375 kfree(fixup);
4376 }
4377
4378 return ret;
4379 }
4380
binder_thread_read(struct binder_proc * proc,struct binder_thread * thread,binder_uintptr_t binder_buffer,size_t size,binder_size_t * consumed,int non_block)4381 static int binder_thread_read(struct binder_proc *proc,
4382 struct binder_thread *thread,
4383 binder_uintptr_t binder_buffer, size_t size,
4384 binder_size_t *consumed, int non_block)
4385 {
4386 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4387 void __user *ptr = buffer + *consumed;
4388 void __user *end = buffer + size;
4389
4390 int ret = 0;
4391 int wait_for_proc_work;
4392
4393 if (*consumed == 0) {
4394 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4395 return -EFAULT;
4396 ptr += sizeof(uint32_t);
4397 }
4398
4399 retry:
4400 binder_inner_proc_lock(proc);
4401 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4402 binder_inner_proc_unlock(proc);
4403
4404 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4405
4406 trace_binder_wait_for_work(wait_for_proc_work,
4407 !!thread->transaction_stack,
4408 !binder_worklist_empty(proc, &thread->todo));
4409 if (wait_for_proc_work) {
4410 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4411 BINDER_LOOPER_STATE_ENTERED))) {
4412 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4413 proc->pid, thread->pid, thread->looper);
4414 wait_event_interruptible(binder_user_error_wait,
4415 binder_stop_on_user_error < 2);
4416 }
4417 binder_restore_priority(current, proc->default_priority);
4418 }
4419
4420 if (non_block) {
4421 if (!binder_has_work(thread, wait_for_proc_work))
4422 ret = -EAGAIN;
4423 } else {
4424 ret = binder_wait_for_work(thread, wait_for_proc_work);
4425 }
4426
4427 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4428
4429 if (ret)
4430 return ret;
4431
4432 while (1) {
4433 uint32_t cmd;
4434 struct binder_transaction_data_secctx tr;
4435 struct binder_transaction_data *trd = &tr.transaction_data;
4436 struct binder_work *w = NULL;
4437 struct list_head *list = NULL;
4438 struct binder_transaction *t = NULL;
4439 struct binder_thread *t_from;
4440 size_t trsize = sizeof(*trd);
4441
4442 binder_inner_proc_lock(proc);
4443 if (!binder_worklist_empty_ilocked(&thread->todo))
4444 list = &thread->todo;
4445 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4446 wait_for_proc_work)
4447 list = &proc->todo;
4448 else {
4449 binder_inner_proc_unlock(proc);
4450
4451 /* no data added */
4452 if (ptr - buffer == 4 && !thread->looper_need_return)
4453 goto retry;
4454 break;
4455 }
4456
4457 if (end - ptr < sizeof(tr) + 4) {
4458 binder_inner_proc_unlock(proc);
4459 break;
4460 }
4461 w = binder_dequeue_work_head_ilocked(list);
4462 if (binder_worklist_empty_ilocked(&thread->todo))
4463 thread->process_todo = false;
4464
4465 switch (w->type) {
4466 case BINDER_WORK_TRANSACTION: {
4467 binder_inner_proc_unlock(proc);
4468 t = container_of(w, struct binder_transaction, work);
4469 } break;
4470 case BINDER_WORK_RETURN_ERROR: {
4471 struct binder_error *e = container_of(
4472 w, struct binder_error, work);
4473
4474 WARN_ON(e->cmd == BR_OK);
4475 binder_inner_proc_unlock(proc);
4476 if (put_user(e->cmd, (uint32_t __user *)ptr))
4477 return -EFAULT;
4478 cmd = e->cmd;
4479 e->cmd = BR_OK;
4480 ptr += sizeof(uint32_t);
4481
4482 binder_stat_br(proc, thread, cmd);
4483 } break;
4484 case BINDER_WORK_TRANSACTION_COMPLETE: {
4485 binder_inner_proc_unlock(proc);
4486 cmd = BR_TRANSACTION_COMPLETE;
4487 kfree(w);
4488 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4489 if (put_user(cmd, (uint32_t __user *)ptr))
4490 return -EFAULT;
4491 ptr += sizeof(uint32_t);
4492
4493 binder_stat_br(proc, thread, cmd);
4494 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4495 "%d:%d BR_TRANSACTION_COMPLETE\n",
4496 proc->pid, thread->pid);
4497 } break;
4498 case BINDER_WORK_NODE: {
4499 struct binder_node *node = container_of(w, struct binder_node, work);
4500 int strong, weak;
4501 binder_uintptr_t node_ptr = node->ptr;
4502 binder_uintptr_t node_cookie = node->cookie;
4503 int node_debug_id = node->debug_id;
4504 int has_weak_ref;
4505 int has_strong_ref;
4506 void __user *orig_ptr = ptr;
4507
4508 BUG_ON(proc != node->proc);
4509 strong = node->internal_strong_refs ||
4510 node->local_strong_refs;
4511 weak = !hlist_empty(&node->refs) ||
4512 node->local_weak_refs ||
4513 node->tmp_refs || strong;
4514 has_strong_ref = node->has_strong_ref;
4515 has_weak_ref = node->has_weak_ref;
4516
4517 if (weak && !has_weak_ref) {
4518 node->has_weak_ref = 1;
4519 node->pending_weak_ref = 1;
4520 node->local_weak_refs++;
4521 }
4522 if (strong && !has_strong_ref) {
4523 node->has_strong_ref = 1;
4524 node->pending_strong_ref = 1;
4525 node->local_strong_refs++;
4526 }
4527 if (!strong && has_strong_ref)
4528 node->has_strong_ref = 0;
4529 if (!weak && has_weak_ref)
4530 node->has_weak_ref = 0;
4531 if (!weak && !strong) {
4532 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4533 "%d:%d node %d u%016llx c%016llx deleted\n",
4534 proc->pid, thread->pid,
4535 node_debug_id,
4536 (u64)node_ptr,
4537 (u64)node_cookie);
4538 rb_erase(&node->rb_node, &proc->nodes);
4539 binder_inner_proc_unlock(proc);
4540 binder_node_lock(node);
4541 /*
4542 * Acquire the node lock before freeing the
4543 * node to serialize with other threads that
4544 * may have been holding the node lock while
4545 * decrementing this node (avoids race where
4546 * this thread frees while the other thread
4547 * is unlocking the node after the final
4548 * decrement)
4549 */
4550 binder_node_unlock(node);
4551 binder_free_node(node);
4552 } else
4553 binder_inner_proc_unlock(proc);
4554
4555 if (weak && !has_weak_ref)
4556 ret = binder_put_node_cmd(
4557 proc, thread, &ptr, node_ptr,
4558 node_cookie, node_debug_id,
4559 BR_INCREFS, "BR_INCREFS");
4560 if (!ret && strong && !has_strong_ref)
4561 ret = binder_put_node_cmd(
4562 proc, thread, &ptr, node_ptr,
4563 node_cookie, node_debug_id,
4564 BR_ACQUIRE, "BR_ACQUIRE");
4565 if (!ret && !strong && has_strong_ref)
4566 ret = binder_put_node_cmd(
4567 proc, thread, &ptr, node_ptr,
4568 node_cookie, node_debug_id,
4569 BR_RELEASE, "BR_RELEASE");
4570 if (!ret && !weak && has_weak_ref)
4571 ret = binder_put_node_cmd(
4572 proc, thread, &ptr, node_ptr,
4573 node_cookie, node_debug_id,
4574 BR_DECREFS, "BR_DECREFS");
4575 if (orig_ptr == ptr)
4576 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4577 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4578 proc->pid, thread->pid,
4579 node_debug_id,
4580 (u64)node_ptr,
4581 (u64)node_cookie);
4582 if (ret)
4583 return ret;
4584 } break;
4585 case BINDER_WORK_DEAD_BINDER:
4586 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4587 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4588 struct binder_ref_death *death;
4589 uint32_t cmd;
4590 binder_uintptr_t cookie;
4591
4592 death = container_of(w, struct binder_ref_death, work);
4593 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4594 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4595 else
4596 cmd = BR_DEAD_BINDER;
4597 cookie = death->cookie;
4598
4599 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4600 "%d:%d %s %016llx\n",
4601 proc->pid, thread->pid,
4602 cmd == BR_DEAD_BINDER ?
4603 "BR_DEAD_BINDER" :
4604 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4605 (u64)cookie);
4606 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4607 binder_inner_proc_unlock(proc);
4608 kfree(death);
4609 binder_stats_deleted(BINDER_STAT_DEATH);
4610 } else {
4611 binder_enqueue_work_ilocked(
4612 w, &proc->delivered_death);
4613 binder_inner_proc_unlock(proc);
4614 }
4615 if (put_user(cmd, (uint32_t __user *)ptr))
4616 return -EFAULT;
4617 ptr += sizeof(uint32_t);
4618 if (put_user(cookie,
4619 (binder_uintptr_t __user *)ptr))
4620 return -EFAULT;
4621 ptr += sizeof(binder_uintptr_t);
4622 binder_stat_br(proc, thread, cmd);
4623 if (cmd == BR_DEAD_BINDER)
4624 goto done; /* DEAD_BINDER notifications can cause transactions */
4625 } break;
4626 default:
4627 binder_inner_proc_unlock(proc);
4628 pr_err("%d:%d: bad work type %d\n",
4629 proc->pid, thread->pid, w->type);
4630 break;
4631 }
4632
4633 if (!t)
4634 continue;
4635
4636 BUG_ON(t->buffer == NULL);
4637 if (t->buffer->target_node) {
4638 struct binder_node *target_node = t->buffer->target_node;
4639 struct binder_priority node_prio;
4640
4641 trd->target.ptr = target_node->ptr;
4642 trd->cookie = target_node->cookie;
4643 node_prio.sched_policy = target_node->sched_policy;
4644 node_prio.prio = target_node->min_priority;
4645 binder_transaction_priority(current, t, node_prio,
4646 target_node->inherit_rt);
4647 cmd = BR_TRANSACTION;
4648 } else {
4649 trd->target.ptr = 0;
4650 trd->cookie = 0;
4651 cmd = BR_REPLY;
4652 }
4653 trd->code = t->code;
4654 trd->flags = t->flags;
4655 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4656
4657 t_from = binder_get_txn_from(t);
4658 if (t_from) {
4659 struct task_struct *sender = t_from->proc->tsk;
4660
4661 trd->sender_pid =
4662 task_tgid_nr_ns(sender,
4663 task_active_pid_ns(current));
4664 } else {
4665 trd->sender_pid = 0;
4666 }
4667
4668 ret = binder_apply_fd_fixups(proc, t);
4669 if (ret) {
4670 struct binder_buffer *buffer = t->buffer;
4671 bool oneway = !!(t->flags & TF_ONE_WAY);
4672 int tid = t->debug_id;
4673
4674 if (t_from)
4675 binder_thread_dec_tmpref(t_from);
4676 buffer->transaction = NULL;
4677 binder_cleanup_transaction(t, "fd fixups failed",
4678 BR_FAILED_REPLY);
4679 binder_free_buf(proc, buffer);
4680 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4681 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4682 proc->pid, thread->pid,
4683 oneway ? "async " :
4684 (cmd == BR_REPLY ? "reply " : ""),
4685 tid, BR_FAILED_REPLY, ret, __LINE__);
4686 if (cmd == BR_REPLY) {
4687 cmd = BR_FAILED_REPLY;
4688 if (put_user(cmd, (uint32_t __user *)ptr))
4689 return -EFAULT;
4690 ptr += sizeof(uint32_t);
4691 binder_stat_br(proc, thread, cmd);
4692 break;
4693 }
4694 continue;
4695 }
4696 trd->data_size = t->buffer->data_size;
4697 trd->offsets_size = t->buffer->offsets_size;
4698 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4699 trd->data.ptr.offsets = trd->data.ptr.buffer +
4700 ALIGN(t->buffer->data_size,
4701 sizeof(void *));
4702
4703 tr.secctx = t->security_ctx;
4704 if (t->security_ctx) {
4705 cmd = BR_TRANSACTION_SEC_CTX;
4706 trsize = sizeof(tr);
4707 }
4708 if (put_user(cmd, (uint32_t __user *)ptr)) {
4709 if (t_from)
4710 binder_thread_dec_tmpref(t_from);
4711
4712 binder_cleanup_transaction(t, "put_user failed",
4713 BR_FAILED_REPLY);
4714
4715 return -EFAULT;
4716 }
4717 ptr += sizeof(uint32_t);
4718 if (copy_to_user(ptr, &tr, trsize)) {
4719 if (t_from)
4720 binder_thread_dec_tmpref(t_from);
4721
4722 binder_cleanup_transaction(t, "copy_to_user failed",
4723 BR_FAILED_REPLY);
4724
4725 return -EFAULT;
4726 }
4727 ptr += trsize;
4728
4729 trace_binder_transaction_received(t);
4730 binder_stat_br(proc, thread, cmd);
4731 binder_debug(BINDER_DEBUG_TRANSACTION,
4732 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4733 proc->pid, thread->pid,
4734 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4735 (cmd == BR_TRANSACTION_SEC_CTX) ?
4736 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4737 t->debug_id, t_from ? t_from->proc->pid : 0,
4738 t_from ? t_from->pid : 0, cmd,
4739 t->buffer->data_size, t->buffer->offsets_size,
4740 (u64)trd->data.ptr.buffer,
4741 (u64)trd->data.ptr.offsets);
4742
4743 if (t_from)
4744 binder_thread_dec_tmpref(t_from);
4745 t->buffer->allow_user_free = 1;
4746 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4747 binder_inner_proc_lock(thread->proc);
4748 t->to_parent = thread->transaction_stack;
4749 t->to_thread = thread;
4750 thread->transaction_stack = t;
4751 binder_inner_proc_unlock(thread->proc);
4752 } else {
4753 binder_free_transaction(t);
4754 }
4755 break;
4756 }
4757
4758 done:
4759
4760 *consumed = ptr - buffer;
4761 binder_inner_proc_lock(proc);
4762 if (proc->requested_threads == 0 &&
4763 list_empty(&thread->proc->waiting_threads) &&
4764 proc->requested_threads_started < proc->max_threads &&
4765 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4766 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4767 /*spawn a new thread if we leave this out */) {
4768 proc->requested_threads++;
4769 binder_inner_proc_unlock(proc);
4770 binder_debug(BINDER_DEBUG_THREADS,
4771 "%d:%d BR_SPAWN_LOOPER\n",
4772 proc->pid, thread->pid);
4773 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4774 return -EFAULT;
4775 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4776 } else
4777 binder_inner_proc_unlock(proc);
4778 return 0;
4779 }
4780
binder_release_work(struct binder_proc * proc,struct list_head * list)4781 static void binder_release_work(struct binder_proc *proc,
4782 struct list_head *list)
4783 {
4784 struct binder_work *w;
4785
4786 while (1) {
4787 w = binder_dequeue_work_head(proc, list);
4788 if (!w)
4789 return;
4790
4791 switch (w->type) {
4792 case BINDER_WORK_TRANSACTION: {
4793 struct binder_transaction *t;
4794
4795 t = container_of(w, struct binder_transaction, work);
4796
4797 binder_cleanup_transaction(t, "process died.",
4798 BR_DEAD_REPLY);
4799 } break;
4800 case BINDER_WORK_RETURN_ERROR: {
4801 struct binder_error *e = container_of(
4802 w, struct binder_error, work);
4803
4804 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4805 "undelivered TRANSACTION_ERROR: %u\n",
4806 e->cmd);
4807 } break;
4808 case BINDER_WORK_TRANSACTION_COMPLETE: {
4809 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4810 "undelivered TRANSACTION_COMPLETE\n");
4811 kfree(w);
4812 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4813 } break;
4814 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4815 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4816 struct binder_ref_death *death;
4817
4818 death = container_of(w, struct binder_ref_death, work);
4819 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4820 "undelivered death notification, %016llx\n",
4821 (u64)death->cookie);
4822 kfree(death);
4823 binder_stats_deleted(BINDER_STAT_DEATH);
4824 } break;
4825 default:
4826 pr_err("unexpected work type, %d, not freed\n",
4827 w->type);
4828 break;
4829 }
4830 }
4831
4832 }
4833
binder_get_thread_ilocked(struct binder_proc * proc,struct binder_thread * new_thread)4834 static struct binder_thread *binder_get_thread_ilocked(
4835 struct binder_proc *proc, struct binder_thread *new_thread)
4836 {
4837 struct binder_thread *thread = NULL;
4838 struct rb_node *parent = NULL;
4839 struct rb_node **p = &proc->threads.rb_node;
4840
4841 while (*p) {
4842 parent = *p;
4843 thread = rb_entry(parent, struct binder_thread, rb_node);
4844
4845 if (current->pid < thread->pid)
4846 p = &(*p)->rb_left;
4847 else if (current->pid > thread->pid)
4848 p = &(*p)->rb_right;
4849 else
4850 return thread;
4851 }
4852 if (!new_thread)
4853 return NULL;
4854 thread = new_thread;
4855 binder_stats_created(BINDER_STAT_THREAD);
4856 thread->proc = proc;
4857 thread->pid = current->pid;
4858 get_task_struct(current);
4859 thread->task = current;
4860 atomic_set(&thread->tmp_ref, 0);
4861 init_waitqueue_head(&thread->wait);
4862 INIT_LIST_HEAD(&thread->todo);
4863 rb_link_node(&thread->rb_node, parent, p);
4864 rb_insert_color(&thread->rb_node, &proc->threads);
4865 thread->looper_need_return = true;
4866 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4867 thread->return_error.cmd = BR_OK;
4868 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4869 thread->reply_error.cmd = BR_OK;
4870 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4871 return thread;
4872 }
4873
binder_get_thread(struct binder_proc * proc)4874 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4875 {
4876 struct binder_thread *thread;
4877 struct binder_thread *new_thread;
4878
4879 binder_inner_proc_lock(proc);
4880 thread = binder_get_thread_ilocked(proc, NULL);
4881 binder_inner_proc_unlock(proc);
4882 if (!thread) {
4883 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4884 if (new_thread == NULL)
4885 return NULL;
4886 binder_inner_proc_lock(proc);
4887 thread = binder_get_thread_ilocked(proc, new_thread);
4888 binder_inner_proc_unlock(proc);
4889 if (thread != new_thread)
4890 kfree(new_thread);
4891 }
4892 return thread;
4893 }
4894
binder_free_proc(struct binder_proc * proc)4895 static void binder_free_proc(struct binder_proc *proc)
4896 {
4897 BUG_ON(!list_empty(&proc->todo));
4898 BUG_ON(!list_empty(&proc->delivered_death));
4899 binder_alloc_deferred_release(&proc->alloc);
4900 put_task_struct(proc->tsk);
4901 binder_stats_deleted(BINDER_STAT_PROC);
4902 kfree(proc);
4903 }
4904
binder_free_thread(struct binder_thread * thread)4905 static void binder_free_thread(struct binder_thread *thread)
4906 {
4907 BUG_ON(!list_empty(&thread->todo));
4908 binder_stats_deleted(BINDER_STAT_THREAD);
4909 binder_proc_dec_tmpref(thread->proc);
4910 put_task_struct(thread->task);
4911 kfree(thread);
4912 }
4913
binder_thread_release(struct binder_proc * proc,struct binder_thread * thread)4914 static int binder_thread_release(struct binder_proc *proc,
4915 struct binder_thread *thread)
4916 {
4917 struct binder_transaction *t;
4918 struct binder_transaction *send_reply = NULL;
4919 int active_transactions = 0;
4920 struct binder_transaction *last_t = NULL;
4921
4922 binder_inner_proc_lock(thread->proc);
4923 /*
4924 * take a ref on the proc so it survives
4925 * after we remove this thread from proc->threads.
4926 * The corresponding dec is when we actually
4927 * free the thread in binder_free_thread()
4928 */
4929 proc->tmp_ref++;
4930 /*
4931 * take a ref on this thread to ensure it
4932 * survives while we are releasing it
4933 */
4934 atomic_inc(&thread->tmp_ref);
4935 rb_erase(&thread->rb_node, &proc->threads);
4936 t = thread->transaction_stack;
4937 if (t) {
4938 spin_lock(&t->lock);
4939 if (t->to_thread == thread)
4940 send_reply = t;
4941 } else {
4942 __acquire(&t->lock);
4943 }
4944 thread->is_dead = true;
4945
4946 while (t) {
4947 last_t = t;
4948 active_transactions++;
4949 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4950 "release %d:%d transaction %d %s, still active\n",
4951 proc->pid, thread->pid,
4952 t->debug_id,
4953 (t->to_thread == thread) ? "in" : "out");
4954
4955 if (t->to_thread == thread) {
4956 t->to_proc = NULL;
4957 t->to_thread = NULL;
4958 if (t->buffer) {
4959 t->buffer->transaction = NULL;
4960 t->buffer = NULL;
4961 }
4962 t = t->to_parent;
4963 } else if (t->from == thread) {
4964 t->from = NULL;
4965 t = t->from_parent;
4966 } else
4967 BUG();
4968 spin_unlock(&last_t->lock);
4969 if (t)
4970 spin_lock(&t->lock);
4971 else
4972 __acquire(&t->lock);
4973 }
4974 /* annotation for sparse, lock not acquired in last iteration above */
4975 __release(&t->lock);
4976
4977 /*
4978 * If this thread used poll, make sure we remove the waitqueue
4979 * from any epoll data structures holding it with POLLFREE.
4980 * waitqueue_active() is safe to use here because we're holding
4981 * the inner lock.
4982 */
4983 if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4984 waitqueue_active(&thread->wait)) {
4985 wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
4986 }
4987
4988 binder_inner_proc_unlock(thread->proc);
4989
4990 /*
4991 * This is needed to avoid races between wake_up_poll() above and
4992 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4993 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4994 * lock, so we can be sure it's done after calling synchronize_rcu().
4995 */
4996 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4997 synchronize_rcu();
4998
4999 if (send_reply)
5000 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
5001 binder_release_work(proc, &thread->todo);
5002 binder_thread_dec_tmpref(thread);
5003 return active_transactions;
5004 }
5005
binder_poll(struct file * filp,struct poll_table_struct * wait)5006 static __poll_t binder_poll(struct file *filp,
5007 struct poll_table_struct *wait)
5008 {
5009 struct binder_proc *proc = filp->private_data;
5010 struct binder_thread *thread = NULL;
5011 bool wait_for_proc_work;
5012
5013 thread = binder_get_thread(proc);
5014 if (!thread)
5015 return POLLERR;
5016
5017 binder_inner_proc_lock(thread->proc);
5018 thread->looper |= BINDER_LOOPER_STATE_POLL;
5019 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
5020
5021 binder_inner_proc_unlock(thread->proc);
5022
5023 poll_wait(filp, &thread->wait, wait);
5024
5025 if (binder_has_work(thread, wait_for_proc_work))
5026 return EPOLLIN;
5027
5028 return 0;
5029 }
5030
binder_ioctl_write_read(struct file * filp,unsigned int cmd,unsigned long arg,struct binder_thread * thread)5031 static int binder_ioctl_write_read(struct file *filp,
5032 unsigned int cmd, unsigned long arg,
5033 struct binder_thread *thread)
5034 {
5035 int ret = 0;
5036 struct binder_proc *proc = filp->private_data;
5037 unsigned int size = _IOC_SIZE(cmd);
5038 void __user *ubuf = (void __user *)arg;
5039 struct binder_write_read bwr;
5040
5041 if (size != sizeof(struct binder_write_read)) {
5042 ret = -EINVAL;
5043 goto out;
5044 }
5045 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
5046 ret = -EFAULT;
5047 goto out;
5048 }
5049 binder_debug(BINDER_DEBUG_READ_WRITE,
5050 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
5051 proc->pid, thread->pid,
5052 (u64)bwr.write_size, (u64)bwr.write_buffer,
5053 (u64)bwr.read_size, (u64)bwr.read_buffer);
5054
5055 if (bwr.write_size > 0) {
5056 ret = binder_thread_write(proc, thread,
5057 bwr.write_buffer,
5058 bwr.write_size,
5059 &bwr.write_consumed);
5060 trace_binder_write_done(ret);
5061 if (ret < 0) {
5062 bwr.read_consumed = 0;
5063 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5064 ret = -EFAULT;
5065 goto out;
5066 }
5067 }
5068 if (bwr.read_size > 0) {
5069 ret = binder_thread_read(proc, thread, bwr.read_buffer,
5070 bwr.read_size,
5071 &bwr.read_consumed,
5072 filp->f_flags & O_NONBLOCK);
5073 trace_binder_read_done(ret);
5074 binder_inner_proc_lock(proc);
5075 if (!binder_worklist_empty_ilocked(&proc->todo))
5076 binder_wakeup_proc_ilocked(proc);
5077 binder_inner_proc_unlock(proc);
5078 if (ret < 0) {
5079 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5080 ret = -EFAULT;
5081 goto out;
5082 }
5083 }
5084 binder_debug(BINDER_DEBUG_READ_WRITE,
5085 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5086 proc->pid, thread->pid,
5087 (u64)bwr.write_consumed, (u64)bwr.write_size,
5088 (u64)bwr.read_consumed, (u64)bwr.read_size);
5089 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
5090 ret = -EFAULT;
5091 goto out;
5092 }
5093 out:
5094 return ret;
5095 }
5096
binder_ioctl_set_ctx_mgr(struct file * filp,struct flat_binder_object * fbo)5097 static int binder_ioctl_set_ctx_mgr(struct file *filp,
5098 struct flat_binder_object *fbo)
5099 {
5100 int ret = 0;
5101 struct binder_proc *proc = filp->private_data;
5102 struct binder_context *context = proc->context;
5103 struct binder_node *new_node;
5104 kuid_t curr_euid = current_euid();
5105
5106 mutex_lock(&context->context_mgr_node_lock);
5107 if (context->binder_context_mgr_node) {
5108 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5109 ret = -EBUSY;
5110 goto out;
5111 }
5112 ret = security_binder_set_context_mgr(proc->tsk);
5113 if (ret < 0)
5114 goto out;
5115 if (uid_valid(context->binder_context_mgr_uid)) {
5116 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5117 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5118 from_kuid(&init_user_ns, curr_euid),
5119 from_kuid(&init_user_ns,
5120 context->binder_context_mgr_uid));
5121 ret = -EPERM;
5122 goto out;
5123 }
5124 } else {
5125 context->binder_context_mgr_uid = curr_euid;
5126 }
5127 new_node = binder_new_node(proc, fbo);
5128 if (!new_node) {
5129 ret = -ENOMEM;
5130 goto out;
5131 }
5132 binder_node_lock(new_node);
5133 new_node->local_weak_refs++;
5134 new_node->local_strong_refs++;
5135 new_node->has_strong_ref = 1;
5136 new_node->has_weak_ref = 1;
5137 context->binder_context_mgr_node = new_node;
5138 binder_node_unlock(new_node);
5139 binder_put_node(new_node);
5140 out:
5141 mutex_unlock(&context->context_mgr_node_lock);
5142 return ret;
5143 }
5144
binder_ioctl_get_node_info_for_ref(struct binder_proc * proc,struct binder_node_info_for_ref * info)5145 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5146 struct binder_node_info_for_ref *info)
5147 {
5148 struct binder_node *node;
5149 struct binder_context *context = proc->context;
5150 __u32 handle = info->handle;
5151
5152 if (info->strong_count || info->weak_count || info->reserved1 ||
5153 info->reserved2 || info->reserved3) {
5154 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5155 proc->pid);
5156 return -EINVAL;
5157 }
5158
5159 /* This ioctl may only be used by the context manager */
5160 mutex_lock(&context->context_mgr_node_lock);
5161 if (!context->binder_context_mgr_node ||
5162 context->binder_context_mgr_node->proc != proc) {
5163 mutex_unlock(&context->context_mgr_node_lock);
5164 return -EPERM;
5165 }
5166 mutex_unlock(&context->context_mgr_node_lock);
5167
5168 node = binder_get_node_from_ref(proc, handle, true, NULL);
5169 if (!node)
5170 return -EINVAL;
5171
5172 info->strong_count = node->local_strong_refs +
5173 node->internal_strong_refs;
5174 info->weak_count = node->local_weak_refs;
5175
5176 binder_put_node(node);
5177
5178 return 0;
5179 }
5180
binder_ioctl_get_node_debug_info(struct binder_proc * proc,struct binder_node_debug_info * info)5181 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5182 struct binder_node_debug_info *info)
5183 {
5184 struct rb_node *n;
5185 binder_uintptr_t ptr = info->ptr;
5186
5187 memset(info, 0, sizeof(*info));
5188
5189 binder_inner_proc_lock(proc);
5190 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5191 struct binder_node *node = rb_entry(n, struct binder_node,
5192 rb_node);
5193 if (node->ptr > ptr) {
5194 info->ptr = node->ptr;
5195 info->cookie = node->cookie;
5196 info->has_strong_ref = node->has_strong_ref;
5197 info->has_weak_ref = node->has_weak_ref;
5198 break;
5199 }
5200 }
5201 binder_inner_proc_unlock(proc);
5202
5203 return 0;
5204 }
5205
binder_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)5206 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5207 {
5208 int ret;
5209 struct binder_proc *proc = filp->private_data;
5210 struct binder_thread *thread;
5211 unsigned int size = _IOC_SIZE(cmd);
5212 void __user *ubuf = (void __user *)arg;
5213
5214 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
5215 proc->pid, current->pid, cmd, arg);*/
5216
5217 binder_selftest_alloc(&proc->alloc);
5218
5219 trace_binder_ioctl(cmd, arg);
5220
5221 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5222 if (ret)
5223 goto err_unlocked;
5224
5225 thread = binder_get_thread(proc);
5226 if (thread == NULL) {
5227 ret = -ENOMEM;
5228 goto err;
5229 }
5230
5231 switch (cmd) {
5232 case BINDER_WRITE_READ:
5233 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
5234 if (ret)
5235 goto err;
5236 break;
5237 case BINDER_SET_MAX_THREADS: {
5238 int max_threads;
5239
5240 if (copy_from_user(&max_threads, ubuf,
5241 sizeof(max_threads))) {
5242 ret = -EINVAL;
5243 goto err;
5244 }
5245 binder_inner_proc_lock(proc);
5246 proc->max_threads = max_threads;
5247 binder_inner_proc_unlock(proc);
5248 break;
5249 }
5250 case BINDER_SET_CONTEXT_MGR_EXT: {
5251 struct flat_binder_object fbo;
5252
5253 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5254 ret = -EINVAL;
5255 goto err;
5256 }
5257 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5258 if (ret)
5259 goto err;
5260 break;
5261 }
5262 case BINDER_SET_CONTEXT_MGR:
5263 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5264 if (ret)
5265 goto err;
5266 break;
5267 case BINDER_THREAD_EXIT:
5268 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5269 proc->pid, thread->pid);
5270 binder_thread_release(proc, thread);
5271 thread = NULL;
5272 break;
5273 case BINDER_VERSION: {
5274 struct binder_version __user *ver = ubuf;
5275
5276 if (size != sizeof(struct binder_version)) {
5277 ret = -EINVAL;
5278 goto err;
5279 }
5280 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5281 &ver->protocol_version)) {
5282 ret = -EINVAL;
5283 goto err;
5284 }
5285 break;
5286 }
5287 case BINDER_GET_NODE_INFO_FOR_REF: {
5288 struct binder_node_info_for_ref info;
5289
5290 if (copy_from_user(&info, ubuf, sizeof(info))) {
5291 ret = -EFAULT;
5292 goto err;
5293 }
5294
5295 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5296 if (ret < 0)
5297 goto err;
5298
5299 if (copy_to_user(ubuf, &info, sizeof(info))) {
5300 ret = -EFAULT;
5301 goto err;
5302 }
5303
5304 break;
5305 }
5306 case BINDER_GET_NODE_DEBUG_INFO: {
5307 struct binder_node_debug_info info;
5308
5309 if (copy_from_user(&info, ubuf, sizeof(info))) {
5310 ret = -EFAULT;
5311 goto err;
5312 }
5313
5314 ret = binder_ioctl_get_node_debug_info(proc, &info);
5315 if (ret < 0)
5316 goto err;
5317
5318 if (copy_to_user(ubuf, &info, sizeof(info))) {
5319 ret = -EFAULT;
5320 goto err;
5321 }
5322 break;
5323 }
5324 default:
5325 ret = -EINVAL;
5326 goto err;
5327 }
5328 ret = 0;
5329 err:
5330 if (thread)
5331 thread->looper_need_return = false;
5332 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5333 if (ret && ret != -ERESTARTSYS)
5334 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5335 err_unlocked:
5336 trace_binder_ioctl_done(ret);
5337 return ret;
5338 }
5339
binder_vma_open(struct vm_area_struct * vma)5340 static void binder_vma_open(struct vm_area_struct *vma)
5341 {
5342 struct binder_proc *proc = vma->vm_private_data;
5343
5344 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5345 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5346 proc->pid, vma->vm_start, vma->vm_end,
5347 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5348 (unsigned long)pgprot_val(vma->vm_page_prot));
5349 }
5350
binder_vma_close(struct vm_area_struct * vma)5351 static void binder_vma_close(struct vm_area_struct *vma)
5352 {
5353 struct binder_proc *proc = vma->vm_private_data;
5354
5355 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5356 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5357 proc->pid, vma->vm_start, vma->vm_end,
5358 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5359 (unsigned long)pgprot_val(vma->vm_page_prot));
5360 binder_alloc_vma_close(&proc->alloc);
5361 }
5362
binder_vm_fault(struct vm_fault * vmf)5363 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5364 {
5365 return VM_FAULT_SIGBUS;
5366 }
5367
5368 static const struct vm_operations_struct binder_vm_ops = {
5369 .open = binder_vma_open,
5370 .close = binder_vma_close,
5371 .fault = binder_vm_fault,
5372 };
5373
binder_mmap(struct file * filp,struct vm_area_struct * vma)5374 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5375 {
5376 int ret;
5377 struct binder_proc *proc = filp->private_data;
5378 const char *failure_string;
5379
5380 if (proc->tsk != current->group_leader)
5381 return -EINVAL;
5382
5383 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5384 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5385 __func__, proc->pid, vma->vm_start, vma->vm_end,
5386 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5387 (unsigned long)pgprot_val(vma->vm_page_prot));
5388
5389 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5390 ret = -EPERM;
5391 failure_string = "bad vm_flags";
5392 goto err_bad_arg;
5393 }
5394 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5395 vma->vm_flags &= ~VM_MAYWRITE;
5396
5397 vma->vm_ops = &binder_vm_ops;
5398 vma->vm_private_data = proc;
5399
5400 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
5401 if (ret)
5402 return ret;
5403 return 0;
5404
5405 err_bad_arg:
5406 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5407 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
5408 return ret;
5409 }
5410
binder_open(struct inode * nodp,struct file * filp)5411 static int binder_open(struct inode *nodp, struct file *filp)
5412 {
5413 struct binder_proc *proc, *itr;
5414 struct binder_device *binder_dev;
5415 struct binderfs_info *info;
5416 struct dentry *binder_binderfs_dir_entry_proc = NULL;
5417 bool existing_pid = false;
5418
5419 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5420 current->group_leader->pid, current->pid);
5421
5422 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5423 if (proc == NULL)
5424 return -ENOMEM;
5425 spin_lock_init(&proc->inner_lock);
5426 spin_lock_init(&proc->outer_lock);
5427 get_task_struct(current->group_leader);
5428 proc->tsk = current->group_leader;
5429 INIT_LIST_HEAD(&proc->todo);
5430 if (binder_supported_policy(current->policy)) {
5431 proc->default_priority.sched_policy = current->policy;
5432 proc->default_priority.prio = current->normal_prio;
5433 } else {
5434 proc->default_priority.sched_policy = SCHED_NORMAL;
5435 proc->default_priority.prio = NICE_TO_PRIO(0);
5436 }
5437
5438 /* binderfs stashes devices in i_private */
5439 if (is_binderfs_device(nodp)) {
5440 binder_dev = nodp->i_private;
5441 info = nodp->i_sb->s_fs_info;
5442 binder_binderfs_dir_entry_proc = info->proc_log_dir;
5443 } else {
5444 binder_dev = container_of(filp->private_data,
5445 struct binder_device, miscdev);
5446 }
5447 proc->context = &binder_dev->context;
5448 binder_alloc_init(&proc->alloc);
5449
5450 binder_stats_created(BINDER_STAT_PROC);
5451 proc->pid = current->group_leader->pid;
5452 INIT_LIST_HEAD(&proc->delivered_death);
5453 INIT_LIST_HEAD(&proc->waiting_threads);
5454 filp->private_data = proc;
5455
5456 mutex_lock(&binder_procs_lock);
5457 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5458 if (itr->pid == proc->pid) {
5459 existing_pid = true;
5460 break;
5461 }
5462 }
5463 hlist_add_head(&proc->proc_node, &binder_procs);
5464 mutex_unlock(&binder_procs_lock);
5465
5466 if (binder_debugfs_dir_entry_proc && !existing_pid) {
5467 char strbuf[11];
5468
5469 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5470 /*
5471 * proc debug entries are shared between contexts.
5472 * Only create for the first PID to avoid debugfs log spamming
5473 * The printing code will anyway print all contexts for a given
5474 * PID so this is not a problem.
5475 */
5476 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5477 binder_debugfs_dir_entry_proc,
5478 (void *)(unsigned long)proc->pid,
5479 &proc_fops);
5480 }
5481
5482 if (binder_binderfs_dir_entry_proc && !existing_pid) {
5483 char strbuf[11];
5484 struct dentry *binderfs_entry;
5485
5486 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5487 /*
5488 * Similar to debugfs, the process specific log file is shared
5489 * between contexts. Only create for the first PID.
5490 * This is ok since same as debugfs, the log file will contain
5491 * information on all contexts of a given PID.
5492 */
5493 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5494 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5495 if (!IS_ERR(binderfs_entry)) {
5496 proc->binderfs_entry = binderfs_entry;
5497 } else {
5498 int error;
5499
5500 error = PTR_ERR(binderfs_entry);
5501 pr_warn("Unable to create file %s in binderfs (error %d)\n",
5502 strbuf, error);
5503 }
5504 }
5505
5506 return 0;
5507 }
5508
binder_flush(struct file * filp,fl_owner_t id)5509 static int binder_flush(struct file *filp, fl_owner_t id)
5510 {
5511 struct binder_proc *proc = filp->private_data;
5512
5513 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5514
5515 return 0;
5516 }
5517
binder_deferred_flush(struct binder_proc * proc)5518 static void binder_deferred_flush(struct binder_proc *proc)
5519 {
5520 struct rb_node *n;
5521 int wake_count = 0;
5522
5523 binder_inner_proc_lock(proc);
5524 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5525 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5526
5527 thread->looper_need_return = true;
5528 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5529 wake_up_interruptible(&thread->wait);
5530 wake_count++;
5531 }
5532 }
5533 binder_inner_proc_unlock(proc);
5534
5535 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5536 "binder_flush: %d woke %d threads\n", proc->pid,
5537 wake_count);
5538 }
5539
binder_release(struct inode * nodp,struct file * filp)5540 static int binder_release(struct inode *nodp, struct file *filp)
5541 {
5542 struct binder_proc *proc = filp->private_data;
5543
5544 debugfs_remove(proc->debugfs_entry);
5545
5546 if (proc->binderfs_entry) {
5547 binderfs_remove_file(proc->binderfs_entry);
5548 proc->binderfs_entry = NULL;
5549 }
5550
5551 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5552
5553 return 0;
5554 }
5555
binder_node_release(struct binder_node * node,int refs)5556 static int binder_node_release(struct binder_node *node, int refs)
5557 {
5558 struct binder_ref *ref;
5559 int death = 0;
5560 struct binder_proc *proc = node->proc;
5561
5562 binder_release_work(proc, &node->async_todo);
5563
5564 binder_node_lock(node);
5565 binder_inner_proc_lock(proc);
5566 binder_dequeue_work_ilocked(&node->work);
5567 /*
5568 * The caller must have taken a temporary ref on the node,
5569 */
5570 BUG_ON(!node->tmp_refs);
5571 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5572 binder_inner_proc_unlock(proc);
5573 binder_node_unlock(node);
5574 binder_free_node(node);
5575
5576 return refs;
5577 }
5578
5579 node->proc = NULL;
5580 node->local_strong_refs = 0;
5581 node->local_weak_refs = 0;
5582 binder_inner_proc_unlock(proc);
5583
5584 spin_lock(&binder_dead_nodes_lock);
5585 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5586 spin_unlock(&binder_dead_nodes_lock);
5587
5588 hlist_for_each_entry(ref, &node->refs, node_entry) {
5589 refs++;
5590 /*
5591 * Need the node lock to synchronize
5592 * with new notification requests and the
5593 * inner lock to synchronize with queued
5594 * death notifications.
5595 */
5596 binder_inner_proc_lock(ref->proc);
5597 if (!ref->death) {
5598 binder_inner_proc_unlock(ref->proc);
5599 continue;
5600 }
5601
5602 death++;
5603
5604 BUG_ON(!list_empty(&ref->death->work.entry));
5605 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5606 binder_enqueue_work_ilocked(&ref->death->work,
5607 &ref->proc->todo);
5608 binder_wakeup_proc_ilocked(ref->proc);
5609 binder_inner_proc_unlock(ref->proc);
5610 }
5611
5612 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5613 "node %d now dead, refs %d, death %d\n",
5614 node->debug_id, refs, death);
5615 binder_node_unlock(node);
5616 binder_put_node(node);
5617
5618 return refs;
5619 }
5620
binder_deferred_release(struct binder_proc * proc)5621 static void binder_deferred_release(struct binder_proc *proc)
5622 {
5623 struct binder_context *context = proc->context;
5624 struct rb_node *n;
5625 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5626
5627 mutex_lock(&binder_procs_lock);
5628 hlist_del(&proc->proc_node);
5629 mutex_unlock(&binder_procs_lock);
5630
5631 mutex_lock(&context->context_mgr_node_lock);
5632 if (context->binder_context_mgr_node &&
5633 context->binder_context_mgr_node->proc == proc) {
5634 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5635 "%s: %d context_mgr_node gone\n",
5636 __func__, proc->pid);
5637 context->binder_context_mgr_node = NULL;
5638 }
5639 mutex_unlock(&context->context_mgr_node_lock);
5640 binder_inner_proc_lock(proc);
5641 /*
5642 * Make sure proc stays alive after we
5643 * remove all the threads
5644 */
5645 proc->tmp_ref++;
5646
5647 proc->is_dead = true;
5648 threads = 0;
5649 active_transactions = 0;
5650 while ((n = rb_first(&proc->threads))) {
5651 struct binder_thread *thread;
5652
5653 thread = rb_entry(n, struct binder_thread, rb_node);
5654 binder_inner_proc_unlock(proc);
5655 threads++;
5656 active_transactions += binder_thread_release(proc, thread);
5657 binder_inner_proc_lock(proc);
5658 }
5659
5660 nodes = 0;
5661 incoming_refs = 0;
5662 while ((n = rb_first(&proc->nodes))) {
5663 struct binder_node *node;
5664
5665 node = rb_entry(n, struct binder_node, rb_node);
5666 nodes++;
5667 /*
5668 * take a temporary ref on the node before
5669 * calling binder_node_release() which will either
5670 * kfree() the node or call binder_put_node()
5671 */
5672 binder_inc_node_tmpref_ilocked(node);
5673 rb_erase(&node->rb_node, &proc->nodes);
5674 binder_inner_proc_unlock(proc);
5675 incoming_refs = binder_node_release(node, incoming_refs);
5676 binder_inner_proc_lock(proc);
5677 }
5678 binder_inner_proc_unlock(proc);
5679
5680 outgoing_refs = 0;
5681 binder_proc_lock(proc);
5682 while ((n = rb_first(&proc->refs_by_desc))) {
5683 struct binder_ref *ref;
5684
5685 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5686 outgoing_refs++;
5687 binder_cleanup_ref_olocked(ref);
5688 binder_proc_unlock(proc);
5689 binder_free_ref(ref);
5690 binder_proc_lock(proc);
5691 }
5692 binder_proc_unlock(proc);
5693
5694 binder_release_work(proc, &proc->todo);
5695 binder_release_work(proc, &proc->delivered_death);
5696
5697 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5698 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5699 __func__, proc->pid, threads, nodes, incoming_refs,
5700 outgoing_refs, active_transactions);
5701
5702 binder_proc_dec_tmpref(proc);
5703 }
5704
binder_deferred_func(struct work_struct * work)5705 static void binder_deferred_func(struct work_struct *work)
5706 {
5707 struct binder_proc *proc;
5708
5709 int defer;
5710
5711 do {
5712 mutex_lock(&binder_deferred_lock);
5713 if (!hlist_empty(&binder_deferred_list)) {
5714 proc = hlist_entry(binder_deferred_list.first,
5715 struct binder_proc, deferred_work_node);
5716 hlist_del_init(&proc->deferred_work_node);
5717 defer = proc->deferred_work;
5718 proc->deferred_work = 0;
5719 } else {
5720 proc = NULL;
5721 defer = 0;
5722 }
5723 mutex_unlock(&binder_deferred_lock);
5724
5725 if (defer & BINDER_DEFERRED_FLUSH)
5726 binder_deferred_flush(proc);
5727
5728 if (defer & BINDER_DEFERRED_RELEASE)
5729 binder_deferred_release(proc); /* frees proc */
5730 } while (proc);
5731 }
5732 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5733
5734 static void
binder_defer_work(struct binder_proc * proc,enum binder_deferred_state defer)5735 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5736 {
5737 mutex_lock(&binder_deferred_lock);
5738 proc->deferred_work |= defer;
5739 if (hlist_unhashed(&proc->deferred_work_node)) {
5740 hlist_add_head(&proc->deferred_work_node,
5741 &binder_deferred_list);
5742 schedule_work(&binder_deferred_work);
5743 }
5744 mutex_unlock(&binder_deferred_lock);
5745 }
5746
print_binder_transaction_ilocked(struct seq_file * m,struct binder_proc * proc,const char * prefix,struct binder_transaction * t)5747 static void print_binder_transaction_ilocked(struct seq_file *m,
5748 struct binder_proc *proc,
5749 const char *prefix,
5750 struct binder_transaction *t)
5751 {
5752 struct binder_proc *to_proc;
5753 struct binder_buffer *buffer = t->buffer;
5754
5755 spin_lock(&t->lock);
5756 to_proc = t->to_proc;
5757 seq_printf(m,
5758 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
5759 prefix, t->debug_id, t,
5760 t->from ? t->from->proc->pid : 0,
5761 t->from ? t->from->pid : 0,
5762 to_proc ? to_proc->pid : 0,
5763 t->to_thread ? t->to_thread->pid : 0,
5764 t->code, t->flags, t->priority.sched_policy,
5765 t->priority.prio, t->need_reply);
5766 spin_unlock(&t->lock);
5767
5768 if (proc != to_proc) {
5769 /*
5770 * Can only safely deref buffer if we are holding the
5771 * correct proc inner lock for this node
5772 */
5773 seq_puts(m, "\n");
5774 return;
5775 }
5776
5777 if (buffer == NULL) {
5778 seq_puts(m, " buffer free\n");
5779 return;
5780 }
5781 if (buffer->target_node)
5782 seq_printf(m, " node %d", buffer->target_node->debug_id);
5783 seq_printf(m, " size %zd:%zd data %pK\n",
5784 buffer->data_size, buffer->offsets_size,
5785 buffer->user_data);
5786 }
5787
print_binder_work_ilocked(struct seq_file * m,struct binder_proc * proc,const char * prefix,const char * transaction_prefix,struct binder_work * w)5788 static void print_binder_work_ilocked(struct seq_file *m,
5789 struct binder_proc *proc,
5790 const char *prefix,
5791 const char *transaction_prefix,
5792 struct binder_work *w)
5793 {
5794 struct binder_node *node;
5795 struct binder_transaction *t;
5796
5797 switch (w->type) {
5798 case BINDER_WORK_TRANSACTION:
5799 t = container_of(w, struct binder_transaction, work);
5800 print_binder_transaction_ilocked(
5801 m, proc, transaction_prefix, t);
5802 break;
5803 case BINDER_WORK_RETURN_ERROR: {
5804 struct binder_error *e = container_of(
5805 w, struct binder_error, work);
5806
5807 seq_printf(m, "%stransaction error: %u\n",
5808 prefix, e->cmd);
5809 } break;
5810 case BINDER_WORK_TRANSACTION_COMPLETE:
5811 seq_printf(m, "%stransaction complete\n", prefix);
5812 break;
5813 case BINDER_WORK_NODE:
5814 node = container_of(w, struct binder_node, work);
5815 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5816 prefix, node->debug_id,
5817 (u64)node->ptr, (u64)node->cookie);
5818 break;
5819 case BINDER_WORK_DEAD_BINDER:
5820 seq_printf(m, "%shas dead binder\n", prefix);
5821 break;
5822 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5823 seq_printf(m, "%shas cleared dead binder\n", prefix);
5824 break;
5825 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5826 seq_printf(m, "%shas cleared death notification\n", prefix);
5827 break;
5828 default:
5829 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5830 break;
5831 }
5832 }
5833
print_binder_thread_ilocked(struct seq_file * m,struct binder_thread * thread,int print_always)5834 static void print_binder_thread_ilocked(struct seq_file *m,
5835 struct binder_thread *thread,
5836 int print_always)
5837 {
5838 struct binder_transaction *t;
5839 struct binder_work *w;
5840 size_t start_pos = m->count;
5841 size_t header_pos;
5842
5843 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
5844 thread->pid, thread->looper,
5845 thread->looper_need_return,
5846 atomic_read(&thread->tmp_ref));
5847 header_pos = m->count;
5848 t = thread->transaction_stack;
5849 while (t) {
5850 if (t->from == thread) {
5851 print_binder_transaction_ilocked(m, thread->proc,
5852 " outgoing transaction", t);
5853 t = t->from_parent;
5854 } else if (t->to_thread == thread) {
5855 print_binder_transaction_ilocked(m, thread->proc,
5856 " incoming transaction", t);
5857 t = t->to_parent;
5858 } else {
5859 print_binder_transaction_ilocked(m, thread->proc,
5860 " bad transaction", t);
5861 t = NULL;
5862 }
5863 }
5864 list_for_each_entry(w, &thread->todo, entry) {
5865 print_binder_work_ilocked(m, thread->proc, " ",
5866 " pending transaction", w);
5867 }
5868 if (!print_always && m->count == header_pos)
5869 m->count = start_pos;
5870 }
5871
print_binder_node_nilocked(struct seq_file * m,struct binder_node * node)5872 static void print_binder_node_nilocked(struct seq_file *m,
5873 struct binder_node *node)
5874 {
5875 struct binder_ref *ref;
5876 struct binder_work *w;
5877 int count;
5878
5879 count = 0;
5880 hlist_for_each_entry(ref, &node->refs, node_entry)
5881 count++;
5882
5883 seq_printf(m, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5884 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5885 node->sched_policy, node->min_priority,
5886 node->has_strong_ref, node->has_weak_ref,
5887 node->local_strong_refs, node->local_weak_refs,
5888 node->internal_strong_refs, count, node->tmp_refs);
5889 if (count) {
5890 seq_puts(m, " proc");
5891 hlist_for_each_entry(ref, &node->refs, node_entry)
5892 seq_printf(m, " %d", ref->proc->pid);
5893 }
5894 seq_puts(m, "\n");
5895 if (node->proc) {
5896 list_for_each_entry(w, &node->async_todo, entry)
5897 print_binder_work_ilocked(m, node->proc, " ",
5898 " pending async transaction", w);
5899 }
5900 }
5901
print_binder_ref_olocked(struct seq_file * m,struct binder_ref * ref)5902 static void print_binder_ref_olocked(struct seq_file *m,
5903 struct binder_ref *ref)
5904 {
5905 binder_node_lock(ref->node);
5906 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5907 ref->data.debug_id, ref->data.desc,
5908 ref->node->proc ? "" : "dead ",
5909 ref->node->debug_id, ref->data.strong,
5910 ref->data.weak, ref->death);
5911 binder_node_unlock(ref->node);
5912 }
5913
print_binder_proc(struct seq_file * m,struct binder_proc * proc,int print_all)5914 static void print_binder_proc(struct seq_file *m,
5915 struct binder_proc *proc, int print_all)
5916 {
5917 struct binder_work *w;
5918 struct rb_node *n;
5919 size_t start_pos = m->count;
5920 size_t header_pos;
5921 struct binder_node *last_node = NULL;
5922
5923 seq_printf(m, "proc %d\n", proc->pid);
5924 seq_printf(m, "context %s\n", proc->context->name);
5925 header_pos = m->count;
5926
5927 binder_inner_proc_lock(proc);
5928 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5929 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5930 rb_node), print_all);
5931
5932 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5933 struct binder_node *node = rb_entry(n, struct binder_node,
5934 rb_node);
5935 if (!print_all && !node->has_async_transaction)
5936 continue;
5937
5938 /*
5939 * take a temporary reference on the node so it
5940 * survives and isn't removed from the tree
5941 * while we print it.
5942 */
5943 binder_inc_node_tmpref_ilocked(node);
5944 /* Need to drop inner lock to take node lock */
5945 binder_inner_proc_unlock(proc);
5946 if (last_node)
5947 binder_put_node(last_node);
5948 binder_node_inner_lock(node);
5949 print_binder_node_nilocked(m, node);
5950 binder_node_inner_unlock(node);
5951 last_node = node;
5952 binder_inner_proc_lock(proc);
5953 }
5954 binder_inner_proc_unlock(proc);
5955 if (last_node)
5956 binder_put_node(last_node);
5957
5958 if (print_all) {
5959 binder_proc_lock(proc);
5960 for (n = rb_first(&proc->refs_by_desc);
5961 n != NULL;
5962 n = rb_next(n))
5963 print_binder_ref_olocked(m, rb_entry(n,
5964 struct binder_ref,
5965 rb_node_desc));
5966 binder_proc_unlock(proc);
5967 }
5968 binder_alloc_print_allocated(m, &proc->alloc);
5969 binder_inner_proc_lock(proc);
5970 list_for_each_entry(w, &proc->todo, entry)
5971 print_binder_work_ilocked(m, proc, " ",
5972 " pending transaction", w);
5973 list_for_each_entry(w, &proc->delivered_death, entry) {
5974 seq_puts(m, " has delivered dead binder\n");
5975 break;
5976 }
5977 binder_inner_proc_unlock(proc);
5978 if (!print_all && m->count == header_pos)
5979 m->count = start_pos;
5980 }
5981
5982 static const char * const binder_return_strings[] = {
5983 "BR_ERROR",
5984 "BR_OK",
5985 "BR_TRANSACTION",
5986 "BR_REPLY",
5987 "BR_ACQUIRE_RESULT",
5988 "BR_DEAD_REPLY",
5989 "BR_TRANSACTION_COMPLETE",
5990 "BR_INCREFS",
5991 "BR_ACQUIRE",
5992 "BR_RELEASE",
5993 "BR_DECREFS",
5994 "BR_ATTEMPT_ACQUIRE",
5995 "BR_NOOP",
5996 "BR_SPAWN_LOOPER",
5997 "BR_FINISHED",
5998 "BR_DEAD_BINDER",
5999 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
6000 "BR_FAILED_REPLY"
6001 };
6002
6003 static const char * const binder_command_strings[] = {
6004 "BC_TRANSACTION",
6005 "BC_REPLY",
6006 "BC_ACQUIRE_RESULT",
6007 "BC_FREE_BUFFER",
6008 "BC_INCREFS",
6009 "BC_ACQUIRE",
6010 "BC_RELEASE",
6011 "BC_DECREFS",
6012 "BC_INCREFS_DONE",
6013 "BC_ACQUIRE_DONE",
6014 "BC_ATTEMPT_ACQUIRE",
6015 "BC_REGISTER_LOOPER",
6016 "BC_ENTER_LOOPER",
6017 "BC_EXIT_LOOPER",
6018 "BC_REQUEST_DEATH_NOTIFICATION",
6019 "BC_CLEAR_DEATH_NOTIFICATION",
6020 "BC_DEAD_BINDER_DONE",
6021 "BC_TRANSACTION_SG",
6022 "BC_REPLY_SG",
6023 };
6024
6025 static const char * const binder_objstat_strings[] = {
6026 "proc",
6027 "thread",
6028 "node",
6029 "ref",
6030 "death",
6031 "transaction",
6032 "transaction_complete"
6033 };
6034
print_binder_stats(struct seq_file * m,const char * prefix,struct binder_stats * stats)6035 static void print_binder_stats(struct seq_file *m, const char *prefix,
6036 struct binder_stats *stats)
6037 {
6038 int i;
6039
6040 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6041 ARRAY_SIZE(binder_command_strings));
6042 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6043 int temp = atomic_read(&stats->bc[i]);
6044
6045 if (temp)
6046 seq_printf(m, "%s%s: %d\n", prefix,
6047 binder_command_strings[i], temp);
6048 }
6049
6050 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6051 ARRAY_SIZE(binder_return_strings));
6052 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6053 int temp = atomic_read(&stats->br[i]);
6054
6055 if (temp)
6056 seq_printf(m, "%s%s: %d\n", prefix,
6057 binder_return_strings[i], temp);
6058 }
6059
6060 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6061 ARRAY_SIZE(binder_objstat_strings));
6062 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6063 ARRAY_SIZE(stats->obj_deleted));
6064 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6065 int created = atomic_read(&stats->obj_created[i]);
6066 int deleted = atomic_read(&stats->obj_deleted[i]);
6067
6068 if (created || deleted)
6069 seq_printf(m, "%s%s: active %d total %d\n",
6070 prefix,
6071 binder_objstat_strings[i],
6072 created - deleted,
6073 created);
6074 }
6075 }
6076
print_binder_proc_stats(struct seq_file * m,struct binder_proc * proc)6077 static void print_binder_proc_stats(struct seq_file *m,
6078 struct binder_proc *proc)
6079 {
6080 struct binder_work *w;
6081 struct binder_thread *thread;
6082 struct rb_node *n;
6083 int count, strong, weak, ready_threads;
6084 size_t free_async_space =
6085 binder_alloc_get_free_async_space(&proc->alloc);
6086
6087 seq_printf(m, "proc %d\n", proc->pid);
6088 seq_printf(m, "context %s\n", proc->context->name);
6089 count = 0;
6090 ready_threads = 0;
6091 binder_inner_proc_lock(proc);
6092 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6093 count++;
6094
6095 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6096 ready_threads++;
6097
6098 seq_printf(m, " threads: %d\n", count);
6099 seq_printf(m, " requested threads: %d+%d/%d\n"
6100 " ready threads %d\n"
6101 " free async space %zd\n", proc->requested_threads,
6102 proc->requested_threads_started, proc->max_threads,
6103 ready_threads,
6104 free_async_space);
6105 count = 0;
6106 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
6107 count++;
6108 binder_inner_proc_unlock(proc);
6109 seq_printf(m, " nodes: %d\n", count);
6110 count = 0;
6111 strong = 0;
6112 weak = 0;
6113 binder_proc_lock(proc);
6114 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
6115 struct binder_ref *ref = rb_entry(n, struct binder_ref,
6116 rb_node_desc);
6117 count++;
6118 strong += ref->data.strong;
6119 weak += ref->data.weak;
6120 }
6121 binder_proc_unlock(proc);
6122 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
6123
6124 count = binder_alloc_get_allocated_count(&proc->alloc);
6125 seq_printf(m, " buffers: %d\n", count);
6126
6127 binder_alloc_print_pages(m, &proc->alloc);
6128
6129 count = 0;
6130 binder_inner_proc_lock(proc);
6131 list_for_each_entry(w, &proc->todo, entry) {
6132 if (w->type == BINDER_WORK_TRANSACTION)
6133 count++;
6134 }
6135 binder_inner_proc_unlock(proc);
6136 seq_printf(m, " pending transactions: %d\n", count);
6137
6138 print_binder_stats(m, " ", &proc->stats);
6139 }
6140
6141
binder_state_show(struct seq_file * m,void * unused)6142 int binder_state_show(struct seq_file *m, void *unused)
6143 {
6144 struct binder_proc *proc;
6145 struct binder_node *node;
6146 struct binder_node *last_node = NULL;
6147
6148 seq_puts(m, "binder state:\n");
6149
6150 spin_lock(&binder_dead_nodes_lock);
6151 if (!hlist_empty(&binder_dead_nodes))
6152 seq_puts(m, "dead nodes:\n");
6153 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
6154 /*
6155 * take a temporary reference on the node so it
6156 * survives and isn't removed from the list
6157 * while we print it.
6158 */
6159 node->tmp_refs++;
6160 spin_unlock(&binder_dead_nodes_lock);
6161 if (last_node)
6162 binder_put_node(last_node);
6163 binder_node_lock(node);
6164 print_binder_node_nilocked(m, node);
6165 binder_node_unlock(node);
6166 last_node = node;
6167 spin_lock(&binder_dead_nodes_lock);
6168 }
6169 spin_unlock(&binder_dead_nodes_lock);
6170 if (last_node)
6171 binder_put_node(last_node);
6172
6173 mutex_lock(&binder_procs_lock);
6174 hlist_for_each_entry(proc, &binder_procs, proc_node)
6175 print_binder_proc(m, proc, 1);
6176 mutex_unlock(&binder_procs_lock);
6177
6178 return 0;
6179 }
6180
binder_stats_show(struct seq_file * m,void * unused)6181 int binder_stats_show(struct seq_file *m, void *unused)
6182 {
6183 struct binder_proc *proc;
6184
6185 seq_puts(m, "binder stats:\n");
6186
6187 print_binder_stats(m, "", &binder_stats);
6188
6189 mutex_lock(&binder_procs_lock);
6190 hlist_for_each_entry(proc, &binder_procs, proc_node)
6191 print_binder_proc_stats(m, proc);
6192 mutex_unlock(&binder_procs_lock);
6193
6194 return 0;
6195 }
6196
binder_transactions_show(struct seq_file * m,void * unused)6197 int binder_transactions_show(struct seq_file *m, void *unused)
6198 {
6199 struct binder_proc *proc;
6200
6201 seq_puts(m, "binder transactions:\n");
6202 mutex_lock(&binder_procs_lock);
6203 hlist_for_each_entry(proc, &binder_procs, proc_node)
6204 print_binder_proc(m, proc, 0);
6205 mutex_unlock(&binder_procs_lock);
6206
6207 return 0;
6208 }
6209
proc_show(struct seq_file * m,void * unused)6210 static int proc_show(struct seq_file *m, void *unused)
6211 {
6212 struct binder_proc *itr;
6213 int pid = (unsigned long)m->private;
6214
6215 mutex_lock(&binder_procs_lock);
6216 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6217 if (itr->pid == pid) {
6218 seq_puts(m, "binder proc state:\n");
6219 print_binder_proc(m, itr, 1);
6220 }
6221 }
6222 mutex_unlock(&binder_procs_lock);
6223
6224 return 0;
6225 }
6226
print_binder_transaction_log_entry(struct seq_file * m,struct binder_transaction_log_entry * e)6227 static void print_binder_transaction_log_entry(struct seq_file *m,
6228 struct binder_transaction_log_entry *e)
6229 {
6230 int debug_id = READ_ONCE(e->debug_id_done);
6231 /*
6232 * read barrier to guarantee debug_id_done read before
6233 * we print the log values
6234 */
6235 smp_rmb();
6236 seq_printf(m,
6237 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6238 e->debug_id, (e->call_type == 2) ? "reply" :
6239 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6240 e->from_thread, e->to_proc, e->to_thread, e->context_name,
6241 e->to_node, e->target_handle, e->data_size, e->offsets_size,
6242 e->return_error, e->return_error_param,
6243 e->return_error_line);
6244 /*
6245 * read-barrier to guarantee read of debug_id_done after
6246 * done printing the fields of the entry
6247 */
6248 smp_rmb();
6249 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6250 "\n" : " (incomplete)\n");
6251 }
6252
binder_transaction_log_show(struct seq_file * m,void * unused)6253 int binder_transaction_log_show(struct seq_file *m, void *unused)
6254 {
6255 struct binder_transaction_log *log = m->private;
6256 unsigned int log_cur = atomic_read(&log->cur);
6257 unsigned int count;
6258 unsigned int cur;
6259 int i;
6260
6261 count = log_cur + 1;
6262 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6263 0 : count % ARRAY_SIZE(log->entry);
6264 if (count > ARRAY_SIZE(log->entry) || log->full)
6265 count = ARRAY_SIZE(log->entry);
6266 for (i = 0; i < count; i++) {
6267 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6268
6269 print_binder_transaction_log_entry(m, &log->entry[index]);
6270 }
6271 return 0;
6272 }
6273
6274 const struct file_operations binder_fops = {
6275 .owner = THIS_MODULE,
6276 .poll = binder_poll,
6277 .unlocked_ioctl = binder_ioctl,
6278 .compat_ioctl = binder_ioctl,
6279 .mmap = binder_mmap,
6280 .open = binder_open,
6281 .flush = binder_flush,
6282 .release = binder_release,
6283 };
6284
init_binder_device(const char * name)6285 static int __init init_binder_device(const char *name)
6286 {
6287 int ret;
6288 struct binder_device *binder_device;
6289
6290 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6291 if (!binder_device)
6292 return -ENOMEM;
6293
6294 binder_device->miscdev.fops = &binder_fops;
6295 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6296 binder_device->miscdev.name = name;
6297
6298 binder_device->context.binder_context_mgr_uid = INVALID_UID;
6299 binder_device->context.name = name;
6300 mutex_init(&binder_device->context.context_mgr_node_lock);
6301
6302 ret = misc_register(&binder_device->miscdev);
6303 if (ret < 0) {
6304 kfree(binder_device);
6305 return ret;
6306 }
6307
6308 hlist_add_head(&binder_device->hlist, &binder_devices);
6309
6310 return ret;
6311 }
6312
binder_init(void)6313 static int __init binder_init(void)
6314 {
6315 int ret;
6316 char *device_name, *device_tmp;
6317 struct binder_device *device;
6318 struct hlist_node *tmp;
6319 char *device_names = NULL;
6320
6321 ret = binder_alloc_shrinker_init();
6322 if (ret)
6323 return ret;
6324
6325 atomic_set(&binder_transaction_log.cur, ~0U);
6326 atomic_set(&binder_transaction_log_failed.cur, ~0U);
6327
6328 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6329 if (binder_debugfs_dir_entry_root)
6330 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6331 binder_debugfs_dir_entry_root);
6332
6333 if (binder_debugfs_dir_entry_root) {
6334 debugfs_create_file("state",
6335 0444,
6336 binder_debugfs_dir_entry_root,
6337 NULL,
6338 &binder_state_fops);
6339 debugfs_create_file("stats",
6340 0444,
6341 binder_debugfs_dir_entry_root,
6342 NULL,
6343 &binder_stats_fops);
6344 debugfs_create_file("transactions",
6345 0444,
6346 binder_debugfs_dir_entry_root,
6347 NULL,
6348 &binder_transactions_fops);
6349 debugfs_create_file("transaction_log",
6350 0444,
6351 binder_debugfs_dir_entry_root,
6352 &binder_transaction_log,
6353 &binder_transaction_log_fops);
6354 debugfs_create_file("failed_transaction_log",
6355 0444,
6356 binder_debugfs_dir_entry_root,
6357 &binder_transaction_log_failed,
6358 &binder_transaction_log_fops);
6359 }
6360
6361 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6362 strcmp(binder_devices_param, "") != 0) {
6363 /*
6364 * Copy the module_parameter string, because we don't want to
6365 * tokenize it in-place.
6366 */
6367 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6368 if (!device_names) {
6369 ret = -ENOMEM;
6370 goto err_alloc_device_names_failed;
6371 }
6372
6373 device_tmp = device_names;
6374 while ((device_name = strsep(&device_tmp, ","))) {
6375 ret = init_binder_device(device_name);
6376 if (ret)
6377 goto err_init_binder_device_failed;
6378 }
6379 }
6380
6381 ret = init_binderfs();
6382 if (ret)
6383 goto err_init_binder_device_failed;
6384
6385 return ret;
6386
6387 err_init_binder_device_failed:
6388 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6389 misc_deregister(&device->miscdev);
6390 hlist_del(&device->hlist);
6391 kfree(device);
6392 }
6393
6394 kfree(device_names);
6395
6396 err_alloc_device_names_failed:
6397 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6398
6399 return ret;
6400 }
6401
6402 device_initcall(binder_init);
6403
6404 #define CREATE_TRACE_POINTS
6405 #include "binder_trace.h"
6406
6407 MODULE_LICENSE("GPL v2");
6408