1 /* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18 /*
19 * Locking overview
20 *
21 * There are 3 main spinlocks which must be acquired in the
22 * order shown:
23 *
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
26 * used to acq/rel.
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
29 * used to acq/rel
30 * 3) proc->inner_lock : protects the thread and node lists
31 * (proc->threads, proc->waiting_threads, proc->nodes)
32 * and all todo lists associated with the binder_proc
33 * (proc->todo, thread->todo, proc->delivered_death and
34 * node->async_todo), as well as thread->transaction_stack
35 * binder_inner_proc_lock() and binder_inner_proc_unlock()
36 * are used to acq/rel
37 *
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
40 *
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
43 *
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
49 * ...
50 */
51
52 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
53
54 #include <asm/cacheflush.h>
55 #include <linux/fdtable.h>
56 #include <linux/file.h>
57 #include <linux/freezer.h>
58 #include <linux/fs.h>
59 #include <linux/list.h>
60 #include <linux/miscdevice.h>
61 #include <linux/module.h>
62 #include <linux/mutex.h>
63 #include <linux/nsproxy.h>
64 #include <linux/poll.h>
65 #include <linux/debugfs.h>
66 #include <linux/rbtree.h>
67 #include <linux/sched.h>
68 #include <linux/seq_file.h>
69 #include <linux/uaccess.h>
70 #include <linux/pid_namespace.h>
71 #include <linux/security.h>
72 #include <linux/spinlock.h>
73
74 #include "binder.h"
75 #include "binder_alloc.h"
76 #include "binder_trace.h"
77
78 static HLIST_HEAD(binder_deferred_list);
79 static DEFINE_MUTEX(binder_deferred_lock);
80
81 static HLIST_HEAD(binder_devices);
82 static HLIST_HEAD(binder_procs);
83 static DEFINE_MUTEX(binder_procs_lock);
84
85 static HLIST_HEAD(binder_dead_nodes);
86 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
87
88 static struct dentry *binder_debugfs_dir_entry_root;
89 static struct dentry *binder_debugfs_dir_entry_proc;
90 static atomic_t binder_last_id;
91 static struct workqueue_struct *binder_deferred_workqueue;
92
93 #define BINDER_DEBUG_ENTRY(name) \
94 static int binder_##name##_open(struct inode *inode, struct file *file) \
95 { \
96 return single_open(file, binder_##name##_show, inode->i_private); \
97 } \
98 \
99 static const struct file_operations binder_##name##_fops = { \
100 .owner = THIS_MODULE, \
101 .open = binder_##name##_open, \
102 .read = seq_read, \
103 .llseek = seq_lseek, \
104 .release = single_release, \
105 }
106
107 static int binder_proc_show(struct seq_file *m, void *unused);
108 BINDER_DEBUG_ENTRY(proc);
109
110 /* This is only defined in include/asm-arm/sizes.h */
111 #ifndef SZ_1K
112 #define SZ_1K 0x400
113 #endif
114
115 #ifndef SZ_4M
116 #define SZ_4M 0x400000
117 #endif
118
119 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
120
121 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
122
123 enum {
124 BINDER_DEBUG_USER_ERROR = 1U << 0,
125 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
126 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
127 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
128 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
129 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
130 BINDER_DEBUG_READ_WRITE = 1U << 6,
131 BINDER_DEBUG_USER_REFS = 1U << 7,
132 BINDER_DEBUG_THREADS = 1U << 8,
133 BINDER_DEBUG_TRANSACTION = 1U << 9,
134 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
135 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
136 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
137 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
138 BINDER_DEBUG_SPINLOCKS = 1U << 14,
139 };
140 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
141 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
142 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
143
144 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
145 module_param_named(devices, binder_devices_param, charp, S_IRUGO);
146
147 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
148 static int binder_stop_on_user_error;
149
binder_set_stop_on_user_error(const char * val,struct kernel_param * kp)150 static int binder_set_stop_on_user_error(const char *val,
151 struct kernel_param *kp)
152 {
153 int ret;
154
155 ret = param_set_int(val, kp);
156 if (binder_stop_on_user_error < 2)
157 wake_up(&binder_user_error_wait);
158 return ret;
159 }
160 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
161 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
162
163 #define binder_debug(mask, x...) \
164 do { \
165 if (binder_debug_mask & mask) \
166 pr_info(x); \
167 } while (0)
168
169 #define binder_user_error(x...) \
170 do { \
171 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
172 pr_info(x); \
173 if (binder_stop_on_user_error) \
174 binder_stop_on_user_error = 2; \
175 } while (0)
176
177 #define to_flat_binder_object(hdr) \
178 container_of(hdr, struct flat_binder_object, hdr)
179
180 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
181
182 #define to_binder_buffer_object(hdr) \
183 container_of(hdr, struct binder_buffer_object, hdr)
184
185 #define to_binder_fd_array_object(hdr) \
186 container_of(hdr, struct binder_fd_array_object, hdr)
187
188 enum binder_stat_types {
189 BINDER_STAT_PROC,
190 BINDER_STAT_THREAD,
191 BINDER_STAT_NODE,
192 BINDER_STAT_REF,
193 BINDER_STAT_DEATH,
194 BINDER_STAT_TRANSACTION,
195 BINDER_STAT_TRANSACTION_COMPLETE,
196 BINDER_STAT_COUNT
197 };
198
199 struct binder_stats {
200 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
201 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
202 atomic_t obj_created[BINDER_STAT_COUNT];
203 atomic_t obj_deleted[BINDER_STAT_COUNT];
204 };
205
206 static struct binder_stats binder_stats;
207
binder_stats_deleted(enum binder_stat_types type)208 static inline void binder_stats_deleted(enum binder_stat_types type)
209 {
210 atomic_inc(&binder_stats.obj_deleted[type]);
211 }
212
binder_stats_created(enum binder_stat_types type)213 static inline void binder_stats_created(enum binder_stat_types type)
214 {
215 atomic_inc(&binder_stats.obj_created[type]);
216 }
217
218 struct binder_transaction_log_entry {
219 int debug_id;
220 int debug_id_done;
221 int call_type;
222 int from_proc;
223 int from_thread;
224 int target_handle;
225 int to_proc;
226 int to_thread;
227 int to_node;
228 int data_size;
229 int offsets_size;
230 int return_error_line;
231 uint32_t return_error;
232 uint32_t return_error_param;
233 const char *context_name;
234 };
235 struct binder_transaction_log {
236 atomic_t cur;
237 bool full;
238 struct binder_transaction_log_entry entry[32];
239 };
240 static struct binder_transaction_log binder_transaction_log;
241 static struct binder_transaction_log binder_transaction_log_failed;
242
binder_transaction_log_add(struct binder_transaction_log * log)243 static struct binder_transaction_log_entry *binder_transaction_log_add(
244 struct binder_transaction_log *log)
245 {
246 struct binder_transaction_log_entry *e;
247 unsigned int cur = atomic_inc_return(&log->cur);
248
249 if (cur >= ARRAY_SIZE(log->entry))
250 log->full = 1;
251 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
252 WRITE_ONCE(e->debug_id_done, 0);
253 /*
254 * write-barrier to synchronize access to e->debug_id_done.
255 * We make sure the initialized 0 value is seen before
256 * memset() other fields are zeroed by memset.
257 */
258 smp_wmb();
259 memset(e, 0, sizeof(*e));
260 return e;
261 }
262
263 struct binder_context {
264 struct binder_node *binder_context_mgr_node;
265 struct mutex context_mgr_node_lock;
266
267 kuid_t binder_context_mgr_uid;
268 const char *name;
269 };
270
271 struct binder_device {
272 struct hlist_node hlist;
273 struct miscdevice miscdev;
274 struct binder_context context;
275 };
276
277 /**
278 * struct binder_work - work enqueued on a worklist
279 * @entry: node enqueued on list
280 * @type: type of work to be performed
281 *
282 * There are separate work lists for proc, thread, and node (async).
283 */
284 struct binder_work {
285 struct list_head entry;
286
287 enum {
288 BINDER_WORK_TRANSACTION = 1,
289 BINDER_WORK_TRANSACTION_COMPLETE,
290 BINDER_WORK_RETURN_ERROR,
291 BINDER_WORK_NODE,
292 BINDER_WORK_DEAD_BINDER,
293 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
294 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
295 } type;
296 };
297
298 struct binder_error {
299 struct binder_work work;
300 uint32_t cmd;
301 };
302
303 /**
304 * struct binder_node - binder node bookkeeping
305 * @debug_id: unique ID for debugging
306 * (invariant after initialized)
307 * @lock: lock for node fields
308 * @work: worklist element for node work
309 * (protected by @proc->inner_lock)
310 * @rb_node: element for proc->nodes tree
311 * (protected by @proc->inner_lock)
312 * @dead_node: element for binder_dead_nodes list
313 * (protected by binder_dead_nodes_lock)
314 * @proc: binder_proc that owns this node
315 * (invariant after initialized)
316 * @refs: list of references on this node
317 * (protected by @lock)
318 * @internal_strong_refs: used to take strong references when
319 * initiating a transaction
320 * (protected by @proc->inner_lock if @proc
321 * and by @lock)
322 * @local_weak_refs: weak user refs from local process
323 * (protected by @proc->inner_lock if @proc
324 * and by @lock)
325 * @local_strong_refs: strong user refs from local process
326 * (protected by @proc->inner_lock if @proc
327 * and by @lock)
328 * @tmp_refs: temporary kernel refs
329 * (protected by @proc->inner_lock while @proc
330 * is valid, and by binder_dead_nodes_lock
331 * if @proc is NULL. During inc/dec and node release
332 * it is also protected by @lock to provide safety
333 * as the node dies and @proc becomes NULL)
334 * @ptr: userspace pointer for node
335 * (invariant, no lock needed)
336 * @cookie: userspace cookie for node
337 * (invariant, no lock needed)
338 * @has_strong_ref: userspace notified of strong ref
339 * (protected by @proc->inner_lock if @proc
340 * and by @lock)
341 * @pending_strong_ref: userspace has acked notification of strong ref
342 * (protected by @proc->inner_lock if @proc
343 * and by @lock)
344 * @has_weak_ref: userspace notified of weak ref
345 * (protected by @proc->inner_lock if @proc
346 * and by @lock)
347 * @pending_weak_ref: userspace has acked notification of weak ref
348 * (protected by @proc->inner_lock if @proc
349 * and by @lock)
350 * @has_async_transaction: async transaction to node in progress
351 * (protected by @lock)
352 * @sched_policy: minimum scheduling policy for node
353 * (invariant after initialized)
354 * @accept_fds: file descriptor operations supported for node
355 * (invariant after initialized)
356 * @min_priority: minimum scheduling priority
357 * (invariant after initialized)
358 * @inherit_rt: inherit RT scheduling policy from caller
359 * (invariant after initialized)
360 * @async_todo: list of async work items
361 * (protected by @proc->inner_lock)
362 *
363 * Bookkeeping structure for binder nodes.
364 */
365 struct binder_node {
366 int debug_id;
367 spinlock_t lock;
368 struct binder_work work;
369 union {
370 struct rb_node rb_node;
371 struct hlist_node dead_node;
372 };
373 struct binder_proc *proc;
374 struct hlist_head refs;
375 int internal_strong_refs;
376 int local_weak_refs;
377 int local_strong_refs;
378 int tmp_refs;
379 binder_uintptr_t ptr;
380 binder_uintptr_t cookie;
381 struct {
382 /*
383 * bitfield elements protected by
384 * proc inner_lock
385 */
386 u8 has_strong_ref:1;
387 u8 pending_strong_ref:1;
388 u8 has_weak_ref:1;
389 u8 pending_weak_ref:1;
390 };
391 struct {
392 /*
393 * invariant after initialization
394 */
395 u8 sched_policy:2;
396 u8 inherit_rt:1;
397 u8 accept_fds:1;
398 u8 min_priority;
399 };
400 bool has_async_transaction;
401 struct list_head async_todo;
402 };
403
404 struct binder_ref_death {
405 /**
406 * @work: worklist element for death notifications
407 * (protected by inner_lock of the proc that
408 * this ref belongs to)
409 */
410 struct binder_work work;
411 binder_uintptr_t cookie;
412 };
413
414 /**
415 * struct binder_ref_data - binder_ref counts and id
416 * @debug_id: unique ID for the ref
417 * @desc: unique userspace handle for ref
418 * @strong: strong ref count (debugging only if not locked)
419 * @weak: weak ref count (debugging only if not locked)
420 *
421 * Structure to hold ref count and ref id information. Since
422 * the actual ref can only be accessed with a lock, this structure
423 * is used to return information about the ref to callers of
424 * ref inc/dec functions.
425 */
426 struct binder_ref_data {
427 int debug_id;
428 uint32_t desc;
429 int strong;
430 int weak;
431 };
432
433 /**
434 * struct binder_ref - struct to track references on nodes
435 * @data: binder_ref_data containing id, handle, and current refcounts
436 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
437 * @rb_node_node: node for lookup by @node in proc's rb_tree
438 * @node_entry: list entry for node->refs list in target node
439 * (protected by @node->lock)
440 * @proc: binder_proc containing ref
441 * @node: binder_node of target node. When cleaning up a
442 * ref for deletion in binder_cleanup_ref, a non-NULL
443 * @node indicates the node must be freed
444 * @death: pointer to death notification (ref_death) if requested
445 * (protected by @node->lock)
446 *
447 * Structure to track references from procA to target node (on procB). This
448 * structure is unsafe to access without holding @proc->outer_lock.
449 */
450 struct binder_ref {
451 /* Lookups needed: */
452 /* node + proc => ref (transaction) */
453 /* desc + proc => ref (transaction, inc/dec ref) */
454 /* node => refs + procs (proc exit) */
455 struct binder_ref_data data;
456 struct rb_node rb_node_desc;
457 struct rb_node rb_node_node;
458 struct hlist_node node_entry;
459 struct binder_proc *proc;
460 struct binder_node *node;
461 struct binder_ref_death *death;
462 };
463
464 enum binder_deferred_state {
465 BINDER_DEFERRED_FLUSH = 0x01,
466 BINDER_DEFERRED_RELEASE = 0x02,
467 };
468
469 /**
470 * struct binder_priority - scheduler policy and priority
471 * @sched_policy scheduler policy
472 * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
473 *
474 * The binder driver supports inheriting the following scheduler policies:
475 * SCHED_NORMAL
476 * SCHED_BATCH
477 * SCHED_FIFO
478 * SCHED_RR
479 */
480 struct binder_priority {
481 unsigned int sched_policy;
482 int prio;
483 };
484
485 /**
486 * struct binder_proc - binder process bookkeeping
487 * @proc_node: element for binder_procs list
488 * @threads: rbtree of binder_threads in this proc
489 * (protected by @inner_lock)
490 * @nodes: rbtree of binder nodes associated with
491 * this proc ordered by node->ptr
492 * (protected by @inner_lock)
493 * @refs_by_desc: rbtree of refs ordered by ref->desc
494 * (protected by @outer_lock)
495 * @refs_by_node: rbtree of refs ordered by ref->node
496 * (protected by @outer_lock)
497 * @waiting_threads: threads currently waiting for proc work
498 * (protected by @inner_lock)
499 * @pid PID of group_leader of process
500 * (invariant after initialized)
501 * @tsk task_struct for group_leader of process
502 * (invariant after initialized)
503 * @deferred_work_node: element for binder_deferred_list
504 * (protected by binder_deferred_lock)
505 * @deferred_work: bitmap of deferred work to perform
506 * (protected by binder_deferred_lock)
507 * @is_dead: process is dead and awaiting free
508 * when outstanding transactions are cleaned up
509 * (protected by @inner_lock)
510 * @todo: list of work for this process
511 * (protected by @inner_lock)
512 * @stats: per-process binder statistics
513 * (atomics, no lock needed)
514 * @delivered_death: list of delivered death notification
515 * (protected by @inner_lock)
516 * @max_threads: cap on number of binder threads
517 * (protected by @inner_lock)
518 * @requested_threads: number of binder threads requested but not
519 * yet started. In current implementation, can
520 * only be 0 or 1.
521 * (protected by @inner_lock)
522 * @requested_threads_started: number binder threads started
523 * (protected by @inner_lock)
524 * @tmp_ref: temporary reference to indicate proc is in use
525 * (protected by @inner_lock)
526 * @default_priority: default scheduler priority
527 * (invariant after initialized)
528 * @debugfs_entry: debugfs node
529 * @alloc: binder allocator bookkeeping
530 * @context: binder_context for this proc
531 * (invariant after initialized)
532 * @inner_lock: can nest under outer_lock and/or node lock
533 * @outer_lock: no nesting under innor or node lock
534 * Lock order: 1) outer, 2) node, 3) inner
535 *
536 * Bookkeeping structure for binder processes
537 */
538 struct binder_proc {
539 struct hlist_node proc_node;
540 struct rb_root threads;
541 struct rb_root nodes;
542 struct rb_root refs_by_desc;
543 struct rb_root refs_by_node;
544 struct list_head waiting_threads;
545 int pid;
546 struct task_struct *tsk;
547 struct hlist_node deferred_work_node;
548 int deferred_work;
549 bool is_dead;
550
551 struct list_head todo;
552 struct binder_stats stats;
553 struct list_head delivered_death;
554 int max_threads;
555 int requested_threads;
556 int requested_threads_started;
557 int tmp_ref;
558 struct binder_priority default_priority;
559 struct dentry *debugfs_entry;
560 struct binder_alloc alloc;
561 struct binder_context *context;
562 spinlock_t inner_lock;
563 spinlock_t outer_lock;
564 };
565
566 enum {
567 BINDER_LOOPER_STATE_REGISTERED = 0x01,
568 BINDER_LOOPER_STATE_ENTERED = 0x02,
569 BINDER_LOOPER_STATE_EXITED = 0x04,
570 BINDER_LOOPER_STATE_INVALID = 0x08,
571 BINDER_LOOPER_STATE_WAITING = 0x10,
572 BINDER_LOOPER_STATE_POLL = 0x20,
573 };
574
575 /**
576 * struct binder_thread - binder thread bookkeeping
577 * @proc: binder process for this thread
578 * (invariant after initialization)
579 * @rb_node: element for proc->threads rbtree
580 * (protected by @proc->inner_lock)
581 * @waiting_thread_node: element for @proc->waiting_threads list
582 * (protected by @proc->inner_lock)
583 * @pid: PID for this thread
584 * (invariant after initialization)
585 * @looper: bitmap of looping state
586 * (only accessed by this thread)
587 * @looper_needs_return: looping thread needs to exit driver
588 * (no lock needed)
589 * @transaction_stack: stack of in-progress transactions for this thread
590 * (protected by @proc->inner_lock)
591 * @todo: list of work to do for this thread
592 * (protected by @proc->inner_lock)
593 * @process_todo: whether work in @todo should be processed
594 * (protected by @proc->inner_lock)
595 * @return_error: transaction errors reported by this thread
596 * (only accessed by this thread)
597 * @reply_error: transaction errors reported by target thread
598 * (protected by @proc->inner_lock)
599 * @wait: wait queue for thread work
600 * @stats: per-thread statistics
601 * (atomics, no lock needed)
602 * @tmp_ref: temporary reference to indicate thread is in use
603 * (atomic since @proc->inner_lock cannot
604 * always be acquired)
605 * @is_dead: thread is dead and awaiting free
606 * when outstanding transactions are cleaned up
607 * (protected by @proc->inner_lock)
608 * @task: struct task_struct for this thread
609 *
610 * Bookkeeping structure for binder threads.
611 */
612 struct binder_thread {
613 struct binder_proc *proc;
614 struct rb_node rb_node;
615 struct list_head waiting_thread_node;
616 int pid;
617 int looper; /* only modified by this thread */
618 bool looper_need_return; /* can be written by other thread */
619 struct binder_transaction *transaction_stack;
620 struct list_head todo;
621 bool process_todo;
622 struct binder_error return_error;
623 struct binder_error reply_error;
624 wait_queue_head_t wait;
625 struct binder_stats stats;
626 atomic_t tmp_ref;
627 bool is_dead;
628 struct task_struct *task;
629 };
630
631 struct binder_transaction {
632 int debug_id;
633 struct binder_work work;
634 struct binder_thread *from;
635 struct binder_transaction *from_parent;
636 struct binder_proc *to_proc;
637 struct binder_thread *to_thread;
638 struct binder_transaction *to_parent;
639 unsigned need_reply:1;
640 /* unsigned is_dead:1; */ /* not used at the moment */
641
642 struct binder_buffer *buffer;
643 unsigned int code;
644 unsigned int flags;
645 struct binder_priority priority;
646 struct binder_priority saved_priority;
647 bool set_priority_called;
648 kuid_t sender_euid;
649 /**
650 * @lock: protects @from, @to_proc, and @to_thread
651 *
652 * @from, @to_proc, and @to_thread can be set to NULL
653 * during thread teardown
654 */
655 spinlock_t lock;
656 };
657
658 /**
659 * binder_proc_lock() - Acquire outer lock for given binder_proc
660 * @proc: struct binder_proc to acquire
661 *
662 * Acquires proc->outer_lock. Used to protect binder_ref
663 * structures associated with the given proc.
664 */
665 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
666 static void
_binder_proc_lock(struct binder_proc * proc,int line)667 _binder_proc_lock(struct binder_proc *proc, int line)
668 {
669 binder_debug(BINDER_DEBUG_SPINLOCKS,
670 "%s: line=%d\n", __func__, line);
671 spin_lock(&proc->outer_lock);
672 }
673
674 /**
675 * binder_proc_unlock() - Release spinlock for given binder_proc
676 * @proc: struct binder_proc to acquire
677 *
678 * Release lock acquired via binder_proc_lock()
679 */
680 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
681 static void
_binder_proc_unlock(struct binder_proc * proc,int line)682 _binder_proc_unlock(struct binder_proc *proc, int line)
683 {
684 binder_debug(BINDER_DEBUG_SPINLOCKS,
685 "%s: line=%d\n", __func__, line);
686 spin_unlock(&proc->outer_lock);
687 }
688
689 /**
690 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
691 * @proc: struct binder_proc to acquire
692 *
693 * Acquires proc->inner_lock. Used to protect todo lists
694 */
695 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
696 static void
_binder_inner_proc_lock(struct binder_proc * proc,int line)697 _binder_inner_proc_lock(struct binder_proc *proc, int line)
698 {
699 binder_debug(BINDER_DEBUG_SPINLOCKS,
700 "%s: line=%d\n", __func__, line);
701 spin_lock(&proc->inner_lock);
702 }
703
704 /**
705 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
706 * @proc: struct binder_proc to acquire
707 *
708 * Release lock acquired via binder_inner_proc_lock()
709 */
710 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
711 static void
_binder_inner_proc_unlock(struct binder_proc * proc,int line)712 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
713 {
714 binder_debug(BINDER_DEBUG_SPINLOCKS,
715 "%s: line=%d\n", __func__, line);
716 spin_unlock(&proc->inner_lock);
717 }
718
719 /**
720 * binder_node_lock() - Acquire spinlock for given binder_node
721 * @node: struct binder_node to acquire
722 *
723 * Acquires node->lock. Used to protect binder_node fields
724 */
725 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
726 static void
_binder_node_lock(struct binder_node * node,int line)727 _binder_node_lock(struct binder_node *node, int line)
728 {
729 binder_debug(BINDER_DEBUG_SPINLOCKS,
730 "%s: line=%d\n", __func__, line);
731 spin_lock(&node->lock);
732 }
733
734 /**
735 * binder_node_unlock() - Release spinlock for given binder_proc
736 * @node: struct binder_node to acquire
737 *
738 * Release lock acquired via binder_node_lock()
739 */
740 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
741 static void
_binder_node_unlock(struct binder_node * node,int line)742 _binder_node_unlock(struct binder_node *node, int line)
743 {
744 binder_debug(BINDER_DEBUG_SPINLOCKS,
745 "%s: line=%d\n", __func__, line);
746 spin_unlock(&node->lock);
747 }
748
749 /**
750 * binder_node_inner_lock() - Acquire node and inner locks
751 * @node: struct binder_node to acquire
752 *
753 * Acquires node->lock. If node->proc also acquires
754 * proc->inner_lock. Used to protect binder_node fields
755 */
756 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
757 static void
_binder_node_inner_lock(struct binder_node * node,int line)758 _binder_node_inner_lock(struct binder_node *node, int line)
759 {
760 binder_debug(BINDER_DEBUG_SPINLOCKS,
761 "%s: line=%d\n", __func__, line);
762 spin_lock(&node->lock);
763 if (node->proc)
764 binder_inner_proc_lock(node->proc);
765 }
766
767 /**
768 * binder_node_unlock() - Release node and inner locks
769 * @node: struct binder_node to acquire
770 *
771 * Release lock acquired via binder_node_lock()
772 */
773 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
774 static void
_binder_node_inner_unlock(struct binder_node * node,int line)775 _binder_node_inner_unlock(struct binder_node *node, int line)
776 {
777 struct binder_proc *proc = node->proc;
778
779 binder_debug(BINDER_DEBUG_SPINLOCKS,
780 "%s: line=%d\n", __func__, line);
781 if (proc)
782 binder_inner_proc_unlock(proc);
783 spin_unlock(&node->lock);
784 }
785
binder_worklist_empty_ilocked(struct list_head * list)786 static bool binder_worklist_empty_ilocked(struct list_head *list)
787 {
788 return list_empty(list);
789 }
790
791 /**
792 * binder_worklist_empty() - Check if no items on the work list
793 * @proc: binder_proc associated with list
794 * @list: list to check
795 *
796 * Return: true if there are no items on list, else false
797 */
binder_worklist_empty(struct binder_proc * proc,struct list_head * list)798 static bool binder_worklist_empty(struct binder_proc *proc,
799 struct list_head *list)
800 {
801 bool ret;
802
803 binder_inner_proc_lock(proc);
804 ret = binder_worklist_empty_ilocked(list);
805 binder_inner_proc_unlock(proc);
806 return ret;
807 }
808
809 /**
810 * binder_enqueue_work_ilocked() - Add an item to the work list
811 * @work: struct binder_work to add to list
812 * @target_list: list to add work to
813 *
814 * Adds the work to the specified list. Asserts that work
815 * is not already on a list.
816 *
817 * Requires the proc->inner_lock to be held.
818 */
819 static void
binder_enqueue_work_ilocked(struct binder_work * work,struct list_head * target_list)820 binder_enqueue_work_ilocked(struct binder_work *work,
821 struct list_head *target_list)
822 {
823 BUG_ON(target_list == NULL);
824 BUG_ON(work->entry.next && !list_empty(&work->entry));
825 list_add_tail(&work->entry, target_list);
826 }
827
828 /**
829 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
830 * @thread: thread to queue work to
831 * @work: struct binder_work to add to list
832 *
833 * Adds the work to the todo list of the thread. Doesn't set the process_todo
834 * flag, which means that (if it wasn't already set) the thread will go to
835 * sleep without handling this work when it calls read.
836 *
837 * Requires the proc->inner_lock to be held.
838 */
839 static void
binder_enqueue_deferred_thread_work_ilocked(struct binder_thread * thread,struct binder_work * work)840 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
841 struct binder_work *work)
842 {
843 binder_enqueue_work_ilocked(work, &thread->todo);
844 }
845
846 /**
847 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
848 * @thread: thread to queue work to
849 * @work: struct binder_work to add to list
850 *
851 * Adds the work to the todo list of the thread, and enables processing
852 * of the todo queue.
853 *
854 * Requires the proc->inner_lock to be held.
855 */
856 static void
binder_enqueue_thread_work_ilocked(struct binder_thread * thread,struct binder_work * work)857 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
858 struct binder_work *work)
859 {
860 binder_enqueue_work_ilocked(work, &thread->todo);
861 thread->process_todo = true;
862 }
863
864 /**
865 * binder_enqueue_thread_work() - Add an item to the thread work list
866 * @thread: thread to queue work to
867 * @work: struct binder_work to add to list
868 *
869 * Adds the work to the todo list of the thread, and enables processing
870 * of the todo queue.
871 */
872 static void
binder_enqueue_thread_work(struct binder_thread * thread,struct binder_work * work)873 binder_enqueue_thread_work(struct binder_thread *thread,
874 struct binder_work *work)
875 {
876 binder_inner_proc_lock(thread->proc);
877 binder_enqueue_thread_work_ilocked(thread, work);
878 binder_inner_proc_unlock(thread->proc);
879 }
880
881 static void
binder_dequeue_work_ilocked(struct binder_work * work)882 binder_dequeue_work_ilocked(struct binder_work *work)
883 {
884 list_del_init(&work->entry);
885 }
886
887 /**
888 * binder_dequeue_work() - Removes an item from the work list
889 * @proc: binder_proc associated with list
890 * @work: struct binder_work to remove from list
891 *
892 * Removes the specified work item from whatever list it is on.
893 * Can safely be called if work is not on any list.
894 */
895 static void
binder_dequeue_work(struct binder_proc * proc,struct binder_work * work)896 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
897 {
898 binder_inner_proc_lock(proc);
899 binder_dequeue_work_ilocked(work);
900 binder_inner_proc_unlock(proc);
901 }
902
binder_dequeue_work_head_ilocked(struct list_head * list)903 static struct binder_work *binder_dequeue_work_head_ilocked(
904 struct list_head *list)
905 {
906 struct binder_work *w;
907
908 w = list_first_entry_or_null(list, struct binder_work, entry);
909 if (w)
910 list_del_init(&w->entry);
911 return w;
912 }
913
914 /**
915 * binder_dequeue_work_head() - Dequeues the item at head of list
916 * @proc: binder_proc associated with list
917 * @list: list to dequeue head
918 *
919 * Removes the head of the list if there are items on the list
920 *
921 * Return: pointer dequeued binder_work, NULL if list was empty
922 */
binder_dequeue_work_head(struct binder_proc * proc,struct list_head * list)923 static struct binder_work *binder_dequeue_work_head(
924 struct binder_proc *proc,
925 struct list_head *list)
926 {
927 struct binder_work *w;
928
929 binder_inner_proc_lock(proc);
930 w = binder_dequeue_work_head_ilocked(list);
931 binder_inner_proc_unlock(proc);
932 return w;
933 }
934
935 static void
936 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
937 static void binder_free_thread(struct binder_thread *thread);
938 static void binder_free_proc(struct binder_proc *proc);
939 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
940
binder_get_files_struct(struct binder_proc * proc)941 struct files_struct *binder_get_files_struct(struct binder_proc *proc)
942 {
943 return get_files_struct(proc->tsk);
944 }
945
task_get_unused_fd_flags(struct binder_proc * proc,int flags)946 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
947 {
948 struct files_struct *files;
949 unsigned long rlim_cur;
950 unsigned long irqs;
951 int ret;
952
953 files = binder_get_files_struct(proc);
954 if (files == NULL)
955 return -ESRCH;
956
957 if (!lock_task_sighand(proc->tsk, &irqs)) {
958 ret = -EMFILE;
959 goto err;
960 }
961
962 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
963 unlock_task_sighand(proc->tsk, &irqs);
964
965 ret = __alloc_fd(files, 0, rlim_cur, flags);
966 err:
967 put_files_struct(files);
968 return ret;
969 }
970
971 /*
972 * copied from fd_install
973 */
task_fd_install(struct binder_proc * proc,unsigned int fd,struct file * file)974 static void task_fd_install(
975 struct binder_proc *proc, unsigned int fd, struct file *file)
976 {
977 struct files_struct *files = binder_get_files_struct(proc);
978
979 if (files) {
980 __fd_install(files, fd, file);
981 put_files_struct(files);
982 }
983 }
984
985 /*
986 * copied from sys_close
987 */
task_close_fd(struct binder_proc * proc,unsigned int fd)988 static long task_close_fd(struct binder_proc *proc, unsigned int fd)
989 {
990 struct files_struct *files = binder_get_files_struct(proc);
991 int retval;
992
993 if (files == NULL)
994 return -ESRCH;
995
996 retval = __close_fd(files, fd);
997 /* can't restart close syscall because file table entry was cleared */
998 if (unlikely(retval == -ERESTARTSYS ||
999 retval == -ERESTARTNOINTR ||
1000 retval == -ERESTARTNOHAND ||
1001 retval == -ERESTART_RESTARTBLOCK))
1002 retval = -EINTR;
1003 put_files_struct(files);
1004
1005 return retval;
1006 }
1007
binder_has_work_ilocked(struct binder_thread * thread,bool do_proc_work)1008 static bool binder_has_work_ilocked(struct binder_thread *thread,
1009 bool do_proc_work)
1010 {
1011 return thread->process_todo ||
1012 thread->looper_need_return ||
1013 (do_proc_work &&
1014 !binder_worklist_empty_ilocked(&thread->proc->todo));
1015 }
1016
binder_has_work(struct binder_thread * thread,bool do_proc_work)1017 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
1018 {
1019 bool has_work;
1020
1021 binder_inner_proc_lock(thread->proc);
1022 has_work = binder_has_work_ilocked(thread, do_proc_work);
1023 binder_inner_proc_unlock(thread->proc);
1024
1025 return has_work;
1026 }
1027
binder_available_for_proc_work_ilocked(struct binder_thread * thread)1028 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
1029 {
1030 return !thread->transaction_stack &&
1031 binder_worklist_empty_ilocked(&thread->todo) &&
1032 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
1033 BINDER_LOOPER_STATE_REGISTERED));
1034 }
1035
binder_wakeup_poll_threads_ilocked(struct binder_proc * proc,bool sync)1036 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
1037 bool sync)
1038 {
1039 struct rb_node *n;
1040 struct binder_thread *thread;
1041
1042 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
1043 thread = rb_entry(n, struct binder_thread, rb_node);
1044 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
1045 binder_available_for_proc_work_ilocked(thread)) {
1046 if (sync)
1047 wake_up_interruptible_sync(&thread->wait);
1048 else
1049 wake_up_interruptible(&thread->wait);
1050 }
1051 }
1052 }
1053
1054 /**
1055 * binder_select_thread_ilocked() - selects a thread for doing proc work.
1056 * @proc: process to select a thread from
1057 *
1058 * Note that calling this function moves the thread off the waiting_threads
1059 * list, so it can only be woken up by the caller of this function, or a
1060 * signal. Therefore, callers *should* always wake up the thread this function
1061 * returns.
1062 *
1063 * Return: If there's a thread currently waiting for process work,
1064 * returns that thread. Otherwise returns NULL.
1065 */
1066 static struct binder_thread *
binder_select_thread_ilocked(struct binder_proc * proc)1067 binder_select_thread_ilocked(struct binder_proc *proc)
1068 {
1069 struct binder_thread *thread;
1070
1071 assert_spin_locked(&proc->inner_lock);
1072 thread = list_first_entry_or_null(&proc->waiting_threads,
1073 struct binder_thread,
1074 waiting_thread_node);
1075
1076 if (thread)
1077 list_del_init(&thread->waiting_thread_node);
1078
1079 return thread;
1080 }
1081
1082 /**
1083 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1084 * @proc: process to wake up a thread in
1085 * @thread: specific thread to wake-up (may be NULL)
1086 * @sync: whether to do a synchronous wake-up
1087 *
1088 * This function wakes up a thread in the @proc process.
1089 * The caller may provide a specific thread to wake-up in
1090 * the @thread parameter. If @thread is NULL, this function
1091 * will wake up threads that have called poll().
1092 *
1093 * Note that for this function to work as expected, callers
1094 * should first call binder_select_thread() to find a thread
1095 * to handle the work (if they don't have a thread already),
1096 * and pass the result into the @thread parameter.
1097 */
binder_wakeup_thread_ilocked(struct binder_proc * proc,struct binder_thread * thread,bool sync)1098 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1099 struct binder_thread *thread,
1100 bool sync)
1101 {
1102 assert_spin_locked(&proc->inner_lock);
1103
1104 if (thread) {
1105 if (sync)
1106 wake_up_interruptible_sync(&thread->wait);
1107 else
1108 wake_up_interruptible(&thread->wait);
1109 return;
1110 }
1111
1112 /* Didn't find a thread waiting for proc work; this can happen
1113 * in two scenarios:
1114 * 1. All threads are busy handling transactions
1115 * In that case, one of those threads should call back into
1116 * the kernel driver soon and pick up this work.
1117 * 2. Threads are using the (e)poll interface, in which case
1118 * they may be blocked on the waitqueue without having been
1119 * added to waiting_threads. For this case, we just iterate
1120 * over all threads not handling transaction work, and
1121 * wake them all up. We wake all because we don't know whether
1122 * a thread that called into (e)poll is handling non-binder
1123 * work currently.
1124 */
1125 binder_wakeup_poll_threads_ilocked(proc, sync);
1126 }
1127
binder_wakeup_proc_ilocked(struct binder_proc * proc)1128 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1129 {
1130 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1131
1132 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1133 }
1134
is_rt_policy(int policy)1135 static bool is_rt_policy(int policy)
1136 {
1137 return policy == SCHED_FIFO || policy == SCHED_RR;
1138 }
1139
is_fair_policy(int policy)1140 static bool is_fair_policy(int policy)
1141 {
1142 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
1143 }
1144
binder_supported_policy(int policy)1145 static bool binder_supported_policy(int policy)
1146 {
1147 return is_fair_policy(policy) || is_rt_policy(policy);
1148 }
1149
to_userspace_prio(int policy,int kernel_priority)1150 static int to_userspace_prio(int policy, int kernel_priority)
1151 {
1152 if (is_fair_policy(policy))
1153 return PRIO_TO_NICE(kernel_priority);
1154 else
1155 return MAX_USER_RT_PRIO - 1 - kernel_priority;
1156 }
1157
to_kernel_prio(int policy,int user_priority)1158 static int to_kernel_prio(int policy, int user_priority)
1159 {
1160 if (is_fair_policy(policy))
1161 return NICE_TO_PRIO(user_priority);
1162 else
1163 return MAX_USER_RT_PRIO - 1 - user_priority;
1164 }
1165
binder_do_set_priority(struct task_struct * task,struct binder_priority desired,bool verify)1166 static void binder_do_set_priority(struct task_struct *task,
1167 struct binder_priority desired,
1168 bool verify)
1169 {
1170 int priority; /* user-space prio value */
1171 bool has_cap_nice;
1172 unsigned int policy = desired.sched_policy;
1173
1174 if (task->policy == policy && task->normal_prio == desired.prio)
1175 return;
1176
1177 has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE);
1178
1179 priority = to_userspace_prio(policy, desired.prio);
1180
1181 if (verify && is_rt_policy(policy) && !has_cap_nice) {
1182 long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO);
1183
1184 if (max_rtprio == 0) {
1185 policy = SCHED_NORMAL;
1186 priority = MIN_NICE;
1187 } else if (priority > max_rtprio) {
1188 priority = max_rtprio;
1189 }
1190 }
1191
1192 if (verify && is_fair_policy(policy) && !has_cap_nice) {
1193 long min_nice = rlimit_to_nice(task_rlimit(task, RLIMIT_NICE));
1194
1195 if (min_nice > MAX_NICE) {
1196 binder_user_error("%d RLIMIT_NICE not set\n",
1197 task->pid);
1198 return;
1199 } else if (priority < min_nice) {
1200 priority = min_nice;
1201 }
1202 }
1203
1204 if (policy != desired.sched_policy ||
1205 to_kernel_prio(policy, priority) != desired.prio)
1206 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1207 "%d: priority %d not allowed, using %d instead\n",
1208 task->pid, desired.prio,
1209 to_kernel_prio(policy, priority));
1210
1211 trace_binder_set_priority(task->tgid, task->pid, task->normal_prio,
1212 to_kernel_prio(policy, priority),
1213 desired.prio);
1214
1215 /* Set the actual priority */
1216 if (task->policy != policy || is_rt_policy(policy)) {
1217 struct sched_param params;
1218
1219 params.sched_priority = is_rt_policy(policy) ? priority : 0;
1220
1221 sched_setscheduler_nocheck(task,
1222 policy | SCHED_RESET_ON_FORK,
1223 ¶ms);
1224 }
1225 if (is_fair_policy(policy))
1226 set_user_nice(task, priority);
1227 }
1228
binder_set_priority(struct task_struct * task,struct binder_priority desired)1229 static void binder_set_priority(struct task_struct *task,
1230 struct binder_priority desired)
1231 {
1232 binder_do_set_priority(task, desired, /* verify = */ true);
1233 }
1234
binder_restore_priority(struct task_struct * task,struct binder_priority desired)1235 static void binder_restore_priority(struct task_struct *task,
1236 struct binder_priority desired)
1237 {
1238 binder_do_set_priority(task, desired, /* verify = */ false);
1239 }
1240
binder_transaction_priority(struct task_struct * task,struct binder_transaction * t,struct binder_priority node_prio,bool inherit_rt)1241 static void binder_transaction_priority(struct task_struct *task,
1242 struct binder_transaction *t,
1243 struct binder_priority node_prio,
1244 bool inherit_rt)
1245 {
1246 struct binder_priority desired_prio = t->priority;
1247
1248 if (t->set_priority_called)
1249 return;
1250
1251 t->set_priority_called = true;
1252 t->saved_priority.sched_policy = task->policy;
1253 t->saved_priority.prio = task->normal_prio;
1254
1255 if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
1256 desired_prio.prio = NICE_TO_PRIO(0);
1257 desired_prio.sched_policy = SCHED_NORMAL;
1258 }
1259
1260 if (node_prio.prio < t->priority.prio ||
1261 (node_prio.prio == t->priority.prio &&
1262 node_prio.sched_policy == SCHED_FIFO)) {
1263 /*
1264 * In case the minimum priority on the node is
1265 * higher (lower value), use that priority. If
1266 * the priority is the same, but the node uses
1267 * SCHED_FIFO, prefer SCHED_FIFO, since it can
1268 * run unbounded, unlike SCHED_RR.
1269 */
1270 desired_prio = node_prio;
1271 }
1272
1273 binder_set_priority(task, desired_prio);
1274 }
1275
binder_get_node_ilocked(struct binder_proc * proc,binder_uintptr_t ptr)1276 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1277 binder_uintptr_t ptr)
1278 {
1279 struct rb_node *n = proc->nodes.rb_node;
1280 struct binder_node *node;
1281
1282 assert_spin_locked(&proc->inner_lock);
1283
1284 while (n) {
1285 node = rb_entry(n, struct binder_node, rb_node);
1286
1287 if (ptr < node->ptr)
1288 n = n->rb_left;
1289 else if (ptr > node->ptr)
1290 n = n->rb_right;
1291 else {
1292 /*
1293 * take an implicit weak reference
1294 * to ensure node stays alive until
1295 * call to binder_put_node()
1296 */
1297 binder_inc_node_tmpref_ilocked(node);
1298 return node;
1299 }
1300 }
1301 return NULL;
1302 }
1303
binder_get_node(struct binder_proc * proc,binder_uintptr_t ptr)1304 static struct binder_node *binder_get_node(struct binder_proc *proc,
1305 binder_uintptr_t ptr)
1306 {
1307 struct binder_node *node;
1308
1309 binder_inner_proc_lock(proc);
1310 node = binder_get_node_ilocked(proc, ptr);
1311 binder_inner_proc_unlock(proc);
1312 return node;
1313 }
1314
binder_init_node_ilocked(struct binder_proc * proc,struct binder_node * new_node,struct flat_binder_object * fp)1315 static struct binder_node *binder_init_node_ilocked(
1316 struct binder_proc *proc,
1317 struct binder_node *new_node,
1318 struct flat_binder_object *fp)
1319 {
1320 struct rb_node **p = &proc->nodes.rb_node;
1321 struct rb_node *parent = NULL;
1322 struct binder_node *node;
1323 binder_uintptr_t ptr = fp ? fp->binder : 0;
1324 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1325 __u32 flags = fp ? fp->flags : 0;
1326 s8 priority;
1327
1328 assert_spin_locked(&proc->inner_lock);
1329
1330 while (*p) {
1331
1332 parent = *p;
1333 node = rb_entry(parent, struct binder_node, rb_node);
1334
1335 if (ptr < node->ptr)
1336 p = &(*p)->rb_left;
1337 else if (ptr > node->ptr)
1338 p = &(*p)->rb_right;
1339 else {
1340 /*
1341 * A matching node is already in
1342 * the rb tree. Abandon the init
1343 * and return it.
1344 */
1345 binder_inc_node_tmpref_ilocked(node);
1346 return node;
1347 }
1348 }
1349 node = new_node;
1350 binder_stats_created(BINDER_STAT_NODE);
1351 node->tmp_refs++;
1352 rb_link_node(&node->rb_node, parent, p);
1353 rb_insert_color(&node->rb_node, &proc->nodes);
1354 node->debug_id = atomic_inc_return(&binder_last_id);
1355 node->proc = proc;
1356 node->ptr = ptr;
1357 node->cookie = cookie;
1358 node->work.type = BINDER_WORK_NODE;
1359 priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1360 node->sched_policy = (flags & FLAT_BINDER_FLAG_SCHED_POLICY_MASK) >>
1361 FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
1362 node->min_priority = to_kernel_prio(node->sched_policy, priority);
1363 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1364 node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);
1365 spin_lock_init(&node->lock);
1366 INIT_LIST_HEAD(&node->work.entry);
1367 INIT_LIST_HEAD(&node->async_todo);
1368 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1369 "%d:%d node %d u%016llx c%016llx created\n",
1370 proc->pid, current->pid, node->debug_id,
1371 (u64)node->ptr, (u64)node->cookie);
1372
1373 return node;
1374 }
1375
binder_new_node(struct binder_proc * proc,struct flat_binder_object * fp)1376 static struct binder_node *binder_new_node(struct binder_proc *proc,
1377 struct flat_binder_object *fp)
1378 {
1379 struct binder_node *node;
1380 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1381
1382 if (!new_node)
1383 return NULL;
1384 binder_inner_proc_lock(proc);
1385 node = binder_init_node_ilocked(proc, new_node, fp);
1386 binder_inner_proc_unlock(proc);
1387 if (node != new_node)
1388 /*
1389 * The node was already added by another thread
1390 */
1391 kfree(new_node);
1392
1393 return node;
1394 }
1395
binder_free_node(struct binder_node * node)1396 static void binder_free_node(struct binder_node *node)
1397 {
1398 kfree(node);
1399 binder_stats_deleted(BINDER_STAT_NODE);
1400 }
1401
binder_inc_node_nilocked(struct binder_node * node,int strong,int internal,struct list_head * target_list)1402 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1403 int internal,
1404 struct list_head *target_list)
1405 {
1406 struct binder_proc *proc = node->proc;
1407
1408 assert_spin_locked(&node->lock);
1409 if (proc)
1410 assert_spin_locked(&proc->inner_lock);
1411 if (strong) {
1412 if (internal) {
1413 if (target_list == NULL &&
1414 node->internal_strong_refs == 0 &&
1415 !(node->proc &&
1416 node == node->proc->context->
1417 binder_context_mgr_node &&
1418 node->has_strong_ref)) {
1419 pr_err("invalid inc strong node for %d\n",
1420 node->debug_id);
1421 return -EINVAL;
1422 }
1423 node->internal_strong_refs++;
1424 } else
1425 node->local_strong_refs++;
1426 if (!node->has_strong_ref && target_list) {
1427 binder_dequeue_work_ilocked(&node->work);
1428 /*
1429 * Note: this function is the only place where we queue
1430 * directly to a thread->todo without using the
1431 * corresponding binder_enqueue_thread_work() helper
1432 * functions; in this case it's ok to not set the
1433 * process_todo flag, since we know this node work will
1434 * always be followed by other work that starts queue
1435 * processing: in case of synchronous transactions, a
1436 * BR_REPLY or BR_ERROR; in case of oneway
1437 * transactions, a BR_TRANSACTION_COMPLETE.
1438 */
1439 binder_enqueue_work_ilocked(&node->work, target_list);
1440 }
1441 } else {
1442 if (!internal)
1443 node->local_weak_refs++;
1444 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1445 if (target_list == NULL) {
1446 pr_err("invalid inc weak node for %d\n",
1447 node->debug_id);
1448 return -EINVAL;
1449 }
1450 /*
1451 * See comment above
1452 */
1453 binder_enqueue_work_ilocked(&node->work, target_list);
1454 }
1455 }
1456 return 0;
1457 }
1458
binder_inc_node(struct binder_node * node,int strong,int internal,struct list_head * target_list)1459 static int binder_inc_node(struct binder_node *node, int strong, int internal,
1460 struct list_head *target_list)
1461 {
1462 int ret;
1463
1464 binder_node_inner_lock(node);
1465 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1466 binder_node_inner_unlock(node);
1467
1468 return ret;
1469 }
1470
binder_dec_node_nilocked(struct binder_node * node,int strong,int internal)1471 static bool binder_dec_node_nilocked(struct binder_node *node,
1472 int strong, int internal)
1473 {
1474 struct binder_proc *proc = node->proc;
1475
1476 assert_spin_locked(&node->lock);
1477 if (proc)
1478 assert_spin_locked(&proc->inner_lock);
1479 if (strong) {
1480 if (internal)
1481 node->internal_strong_refs--;
1482 else
1483 node->local_strong_refs--;
1484 if (node->local_strong_refs || node->internal_strong_refs)
1485 return false;
1486 } else {
1487 if (!internal)
1488 node->local_weak_refs--;
1489 if (node->local_weak_refs || node->tmp_refs ||
1490 !hlist_empty(&node->refs))
1491 return false;
1492 }
1493
1494 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1495 if (list_empty(&node->work.entry)) {
1496 binder_enqueue_work_ilocked(&node->work, &proc->todo);
1497 binder_wakeup_proc_ilocked(proc);
1498 }
1499 } else {
1500 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1501 !node->local_weak_refs && !node->tmp_refs) {
1502 if (proc) {
1503 binder_dequeue_work_ilocked(&node->work);
1504 rb_erase(&node->rb_node, &proc->nodes);
1505 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1506 "refless node %d deleted\n",
1507 node->debug_id);
1508 } else {
1509 BUG_ON(!list_empty(&node->work.entry));
1510 spin_lock(&binder_dead_nodes_lock);
1511 /*
1512 * tmp_refs could have changed so
1513 * check it again
1514 */
1515 if (node->tmp_refs) {
1516 spin_unlock(&binder_dead_nodes_lock);
1517 return false;
1518 }
1519 hlist_del(&node->dead_node);
1520 spin_unlock(&binder_dead_nodes_lock);
1521 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1522 "dead node %d deleted\n",
1523 node->debug_id);
1524 }
1525 return true;
1526 }
1527 }
1528 return false;
1529 }
1530
binder_dec_node(struct binder_node * node,int strong,int internal)1531 static void binder_dec_node(struct binder_node *node, int strong, int internal)
1532 {
1533 bool free_node;
1534
1535 binder_node_inner_lock(node);
1536 free_node = binder_dec_node_nilocked(node, strong, internal);
1537 binder_node_inner_unlock(node);
1538 if (free_node)
1539 binder_free_node(node);
1540 }
1541
binder_inc_node_tmpref_ilocked(struct binder_node * node)1542 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1543 {
1544 /*
1545 * No call to binder_inc_node() is needed since we
1546 * don't need to inform userspace of any changes to
1547 * tmp_refs
1548 */
1549 node->tmp_refs++;
1550 }
1551
1552 /**
1553 * binder_inc_node_tmpref() - take a temporary reference on node
1554 * @node: node to reference
1555 *
1556 * Take reference on node to prevent the node from being freed
1557 * while referenced only by a local variable. The inner lock is
1558 * needed to serialize with the node work on the queue (which
1559 * isn't needed after the node is dead). If the node is dead
1560 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1561 * node->tmp_refs against dead-node-only cases where the node
1562 * lock cannot be acquired (eg traversing the dead node list to
1563 * print nodes)
1564 */
binder_inc_node_tmpref(struct binder_node * node)1565 static void binder_inc_node_tmpref(struct binder_node *node)
1566 {
1567 binder_node_lock(node);
1568 if (node->proc)
1569 binder_inner_proc_lock(node->proc);
1570 else
1571 spin_lock(&binder_dead_nodes_lock);
1572 binder_inc_node_tmpref_ilocked(node);
1573 if (node->proc)
1574 binder_inner_proc_unlock(node->proc);
1575 else
1576 spin_unlock(&binder_dead_nodes_lock);
1577 binder_node_unlock(node);
1578 }
1579
1580 /**
1581 * binder_dec_node_tmpref() - remove a temporary reference on node
1582 * @node: node to reference
1583 *
1584 * Release temporary reference on node taken via binder_inc_node_tmpref()
1585 */
binder_dec_node_tmpref(struct binder_node * node)1586 static void binder_dec_node_tmpref(struct binder_node *node)
1587 {
1588 bool free_node;
1589
1590 binder_node_inner_lock(node);
1591 if (!node->proc)
1592 spin_lock(&binder_dead_nodes_lock);
1593 node->tmp_refs--;
1594 BUG_ON(node->tmp_refs < 0);
1595 if (!node->proc)
1596 spin_unlock(&binder_dead_nodes_lock);
1597 /*
1598 * Call binder_dec_node() to check if all refcounts are 0
1599 * and cleanup is needed. Calling with strong=0 and internal=1
1600 * causes no actual reference to be released in binder_dec_node().
1601 * If that changes, a change is needed here too.
1602 */
1603 free_node = binder_dec_node_nilocked(node, 0, 1);
1604 binder_node_inner_unlock(node);
1605 if (free_node)
1606 binder_free_node(node);
1607 }
1608
binder_put_node(struct binder_node * node)1609 static void binder_put_node(struct binder_node *node)
1610 {
1611 binder_dec_node_tmpref(node);
1612 }
1613
binder_get_ref_olocked(struct binder_proc * proc,u32 desc,bool need_strong_ref)1614 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1615 u32 desc, bool need_strong_ref)
1616 {
1617 struct rb_node *n = proc->refs_by_desc.rb_node;
1618 struct binder_ref *ref;
1619
1620 while (n) {
1621 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1622
1623 if (desc < ref->data.desc) {
1624 n = n->rb_left;
1625 } else if (desc > ref->data.desc) {
1626 n = n->rb_right;
1627 } else if (need_strong_ref && !ref->data.strong) {
1628 binder_user_error("tried to use weak ref as strong ref\n");
1629 return NULL;
1630 } else {
1631 return ref;
1632 }
1633 }
1634 return NULL;
1635 }
1636
1637 /**
1638 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1639 * @proc: binder_proc that owns the ref
1640 * @node: binder_node of target
1641 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1642 *
1643 * Look up the ref for the given node and return it if it exists
1644 *
1645 * If it doesn't exist and the caller provides a newly allocated
1646 * ref, initialize the fields of the newly allocated ref and insert
1647 * into the given proc rb_trees and node refs list.
1648 *
1649 * Return: the ref for node. It is possible that another thread
1650 * allocated/initialized the ref first in which case the
1651 * returned ref would be different than the passed-in
1652 * new_ref. new_ref must be kfree'd by the caller in
1653 * this case.
1654 */
binder_get_ref_for_node_olocked(struct binder_proc * proc,struct binder_node * node,struct binder_ref * new_ref)1655 static struct binder_ref *binder_get_ref_for_node_olocked(
1656 struct binder_proc *proc,
1657 struct binder_node *node,
1658 struct binder_ref *new_ref)
1659 {
1660 struct binder_context *context = proc->context;
1661 struct rb_node **p = &proc->refs_by_node.rb_node;
1662 struct rb_node *parent = NULL;
1663 struct binder_ref *ref;
1664 struct rb_node *n;
1665
1666 while (*p) {
1667 parent = *p;
1668 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1669
1670 if (node < ref->node)
1671 p = &(*p)->rb_left;
1672 else if (node > ref->node)
1673 p = &(*p)->rb_right;
1674 else
1675 return ref;
1676 }
1677 if (!new_ref)
1678 return NULL;
1679
1680 binder_stats_created(BINDER_STAT_REF);
1681 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1682 new_ref->proc = proc;
1683 new_ref->node = node;
1684 rb_link_node(&new_ref->rb_node_node, parent, p);
1685 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1686
1687 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1688 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1689 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1690 if (ref->data.desc > new_ref->data.desc)
1691 break;
1692 new_ref->data.desc = ref->data.desc + 1;
1693 }
1694
1695 p = &proc->refs_by_desc.rb_node;
1696 while (*p) {
1697 parent = *p;
1698 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1699
1700 if (new_ref->data.desc < ref->data.desc)
1701 p = &(*p)->rb_left;
1702 else if (new_ref->data.desc > ref->data.desc)
1703 p = &(*p)->rb_right;
1704 else
1705 BUG();
1706 }
1707 rb_link_node(&new_ref->rb_node_desc, parent, p);
1708 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1709
1710 binder_node_lock(node);
1711 hlist_add_head(&new_ref->node_entry, &node->refs);
1712
1713 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1714 "%d new ref %d desc %d for node %d\n",
1715 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1716 node->debug_id);
1717 binder_node_unlock(node);
1718 return new_ref;
1719 }
1720
binder_cleanup_ref_olocked(struct binder_ref * ref)1721 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1722 {
1723 bool delete_node = false;
1724
1725 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1726 "%d delete ref %d desc %d for node %d\n",
1727 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1728 ref->node->debug_id);
1729
1730 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1731 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1732
1733 binder_node_inner_lock(ref->node);
1734 if (ref->data.strong)
1735 binder_dec_node_nilocked(ref->node, 1, 1);
1736
1737 hlist_del(&ref->node_entry);
1738 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1739 binder_node_inner_unlock(ref->node);
1740 /*
1741 * Clear ref->node unless we want the caller to free the node
1742 */
1743 if (!delete_node) {
1744 /*
1745 * The caller uses ref->node to determine
1746 * whether the node needs to be freed. Clear
1747 * it since the node is still alive.
1748 */
1749 ref->node = NULL;
1750 }
1751
1752 if (ref->death) {
1753 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1754 "%d delete ref %d desc %d has death notification\n",
1755 ref->proc->pid, ref->data.debug_id,
1756 ref->data.desc);
1757 binder_dequeue_work(ref->proc, &ref->death->work);
1758 binder_stats_deleted(BINDER_STAT_DEATH);
1759 }
1760 binder_stats_deleted(BINDER_STAT_REF);
1761 }
1762
1763 /**
1764 * binder_inc_ref_olocked() - increment the ref for given handle
1765 * @ref: ref to be incremented
1766 * @strong: if true, strong increment, else weak
1767 * @target_list: list to queue node work on
1768 *
1769 * Increment the ref. @ref->proc->outer_lock must be held on entry
1770 *
1771 * Return: 0, if successful, else errno
1772 */
binder_inc_ref_olocked(struct binder_ref * ref,int strong,struct list_head * target_list)1773 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1774 struct list_head *target_list)
1775 {
1776 int ret;
1777
1778 if (strong) {
1779 if (ref->data.strong == 0) {
1780 ret = binder_inc_node(ref->node, 1, 1, target_list);
1781 if (ret)
1782 return ret;
1783 }
1784 ref->data.strong++;
1785 } else {
1786 if (ref->data.weak == 0) {
1787 ret = binder_inc_node(ref->node, 0, 1, target_list);
1788 if (ret)
1789 return ret;
1790 }
1791 ref->data.weak++;
1792 }
1793 return 0;
1794 }
1795
1796 /**
1797 * binder_dec_ref() - dec the ref for given handle
1798 * @ref: ref to be decremented
1799 * @strong: if true, strong decrement, else weak
1800 *
1801 * Decrement the ref.
1802 *
1803 * Return: true if ref is cleaned up and ready to be freed
1804 */
binder_dec_ref_olocked(struct binder_ref * ref,int strong)1805 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1806 {
1807 if (strong) {
1808 if (ref->data.strong == 0) {
1809 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1810 ref->proc->pid, ref->data.debug_id,
1811 ref->data.desc, ref->data.strong,
1812 ref->data.weak);
1813 return false;
1814 }
1815 ref->data.strong--;
1816 if (ref->data.strong == 0)
1817 binder_dec_node(ref->node, strong, 1);
1818 } else {
1819 if (ref->data.weak == 0) {
1820 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1821 ref->proc->pid, ref->data.debug_id,
1822 ref->data.desc, ref->data.strong,
1823 ref->data.weak);
1824 return false;
1825 }
1826 ref->data.weak--;
1827 }
1828 if (ref->data.strong == 0 && ref->data.weak == 0) {
1829 binder_cleanup_ref_olocked(ref);
1830 return true;
1831 }
1832 return false;
1833 }
1834
1835 /**
1836 * binder_get_node_from_ref() - get the node from the given proc/desc
1837 * @proc: proc containing the ref
1838 * @desc: the handle associated with the ref
1839 * @need_strong_ref: if true, only return node if ref is strong
1840 * @rdata: the id/refcount data for the ref
1841 *
1842 * Given a proc and ref handle, return the associated binder_node
1843 *
1844 * Return: a binder_node or NULL if not found or not strong when strong required
1845 */
binder_get_node_from_ref(struct binder_proc * proc,u32 desc,bool need_strong_ref,struct binder_ref_data * rdata)1846 static struct binder_node *binder_get_node_from_ref(
1847 struct binder_proc *proc,
1848 u32 desc, bool need_strong_ref,
1849 struct binder_ref_data *rdata)
1850 {
1851 struct binder_node *node;
1852 struct binder_ref *ref;
1853
1854 binder_proc_lock(proc);
1855 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1856 if (!ref)
1857 goto err_no_ref;
1858 node = ref->node;
1859 /*
1860 * Take an implicit reference on the node to ensure
1861 * it stays alive until the call to binder_put_node()
1862 */
1863 binder_inc_node_tmpref(node);
1864 if (rdata)
1865 *rdata = ref->data;
1866 binder_proc_unlock(proc);
1867
1868 return node;
1869
1870 err_no_ref:
1871 binder_proc_unlock(proc);
1872 return NULL;
1873 }
1874
1875 /**
1876 * binder_free_ref() - free the binder_ref
1877 * @ref: ref to free
1878 *
1879 * Free the binder_ref. Free the binder_node indicated by ref->node
1880 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1881 */
binder_free_ref(struct binder_ref * ref)1882 static void binder_free_ref(struct binder_ref *ref)
1883 {
1884 if (ref->node)
1885 binder_free_node(ref->node);
1886 kfree(ref->death);
1887 kfree(ref);
1888 }
1889
1890 /**
1891 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1892 * @proc: proc containing the ref
1893 * @desc: the handle associated with the ref
1894 * @increment: true=inc reference, false=dec reference
1895 * @strong: true=strong reference, false=weak reference
1896 * @rdata: the id/refcount data for the ref
1897 *
1898 * Given a proc and ref handle, increment or decrement the ref
1899 * according to "increment" arg.
1900 *
1901 * Return: 0 if successful, else errno
1902 */
binder_update_ref_for_handle(struct binder_proc * proc,uint32_t desc,bool increment,bool strong,struct binder_ref_data * rdata)1903 static int binder_update_ref_for_handle(struct binder_proc *proc,
1904 uint32_t desc, bool increment, bool strong,
1905 struct binder_ref_data *rdata)
1906 {
1907 int ret = 0;
1908 struct binder_ref *ref;
1909 bool delete_ref = false;
1910
1911 binder_proc_lock(proc);
1912 ref = binder_get_ref_olocked(proc, desc, strong);
1913 if (!ref) {
1914 ret = -EINVAL;
1915 goto err_no_ref;
1916 }
1917 if (increment)
1918 ret = binder_inc_ref_olocked(ref, strong, NULL);
1919 else
1920 delete_ref = binder_dec_ref_olocked(ref, strong);
1921
1922 if (rdata)
1923 *rdata = ref->data;
1924 binder_proc_unlock(proc);
1925
1926 if (delete_ref)
1927 binder_free_ref(ref);
1928 return ret;
1929
1930 err_no_ref:
1931 binder_proc_unlock(proc);
1932 return ret;
1933 }
1934
1935 /**
1936 * binder_dec_ref_for_handle() - dec the ref for given handle
1937 * @proc: proc containing the ref
1938 * @desc: the handle associated with the ref
1939 * @strong: true=strong reference, false=weak reference
1940 * @rdata: the id/refcount data for the ref
1941 *
1942 * Just calls binder_update_ref_for_handle() to decrement the ref.
1943 *
1944 * Return: 0 if successful, else errno
1945 */
binder_dec_ref_for_handle(struct binder_proc * proc,uint32_t desc,bool strong,struct binder_ref_data * rdata)1946 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1947 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1948 {
1949 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1950 }
1951
1952
1953 /**
1954 * binder_inc_ref_for_node() - increment the ref for given proc/node
1955 * @proc: proc containing the ref
1956 * @node: target node
1957 * @strong: true=strong reference, false=weak reference
1958 * @target_list: worklist to use if node is incremented
1959 * @rdata: the id/refcount data for the ref
1960 *
1961 * Given a proc and node, increment the ref. Create the ref if it
1962 * doesn't already exist
1963 *
1964 * Return: 0 if successful, else errno
1965 */
binder_inc_ref_for_node(struct binder_proc * proc,struct binder_node * node,bool strong,struct list_head * target_list,struct binder_ref_data * rdata)1966 static int binder_inc_ref_for_node(struct binder_proc *proc,
1967 struct binder_node *node,
1968 bool strong,
1969 struct list_head *target_list,
1970 struct binder_ref_data *rdata)
1971 {
1972 struct binder_ref *ref;
1973 struct binder_ref *new_ref = NULL;
1974 int ret = 0;
1975
1976 binder_proc_lock(proc);
1977 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1978 if (!ref) {
1979 binder_proc_unlock(proc);
1980 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1981 if (!new_ref)
1982 return -ENOMEM;
1983 binder_proc_lock(proc);
1984 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1985 }
1986 ret = binder_inc_ref_olocked(ref, strong, target_list);
1987 *rdata = ref->data;
1988 binder_proc_unlock(proc);
1989 if (new_ref && ref != new_ref)
1990 /*
1991 * Another thread created the ref first so
1992 * free the one we allocated
1993 */
1994 kfree(new_ref);
1995 return ret;
1996 }
1997
binder_pop_transaction_ilocked(struct binder_thread * target_thread,struct binder_transaction * t)1998 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1999 struct binder_transaction *t)
2000 {
2001 BUG_ON(!target_thread);
2002 assert_spin_locked(&target_thread->proc->inner_lock);
2003 BUG_ON(target_thread->transaction_stack != t);
2004 BUG_ON(target_thread->transaction_stack->from != target_thread);
2005 target_thread->transaction_stack =
2006 target_thread->transaction_stack->from_parent;
2007 t->from = NULL;
2008 }
2009
2010 /**
2011 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
2012 * @thread: thread to decrement
2013 *
2014 * A thread needs to be kept alive while being used to create or
2015 * handle a transaction. binder_get_txn_from() is used to safely
2016 * extract t->from from a binder_transaction and keep the thread
2017 * indicated by t->from from being freed. When done with that
2018 * binder_thread, this function is called to decrement the
2019 * tmp_ref and free if appropriate (thread has been released
2020 * and no transaction being processed by the driver)
2021 */
binder_thread_dec_tmpref(struct binder_thread * thread)2022 static void binder_thread_dec_tmpref(struct binder_thread *thread)
2023 {
2024 /*
2025 * atomic is used to protect the counter value while
2026 * it cannot reach zero or thread->is_dead is false
2027 */
2028 binder_inner_proc_lock(thread->proc);
2029 atomic_dec(&thread->tmp_ref);
2030 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
2031 binder_inner_proc_unlock(thread->proc);
2032 binder_free_thread(thread);
2033 return;
2034 }
2035 binder_inner_proc_unlock(thread->proc);
2036 }
2037
2038 /**
2039 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
2040 * @proc: proc to decrement
2041 *
2042 * A binder_proc needs to be kept alive while being used to create or
2043 * handle a transaction. proc->tmp_ref is incremented when
2044 * creating a new transaction or the binder_proc is currently in-use
2045 * by threads that are being released. When done with the binder_proc,
2046 * this function is called to decrement the counter and free the
2047 * proc if appropriate (proc has been released, all threads have
2048 * been released and not currenly in-use to process a transaction).
2049 */
binder_proc_dec_tmpref(struct binder_proc * proc)2050 static void binder_proc_dec_tmpref(struct binder_proc *proc)
2051 {
2052 binder_inner_proc_lock(proc);
2053 proc->tmp_ref--;
2054 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
2055 !proc->tmp_ref) {
2056 binder_inner_proc_unlock(proc);
2057 binder_free_proc(proc);
2058 return;
2059 }
2060 binder_inner_proc_unlock(proc);
2061 }
2062
2063 /**
2064 * binder_get_txn_from() - safely extract the "from" thread in transaction
2065 * @t: binder transaction for t->from
2066 *
2067 * Atomically return the "from" thread and increment the tmp_ref
2068 * count for the thread to ensure it stays alive until
2069 * binder_thread_dec_tmpref() is called.
2070 *
2071 * Return: the value of t->from
2072 */
binder_get_txn_from(struct binder_transaction * t)2073 static struct binder_thread *binder_get_txn_from(
2074 struct binder_transaction *t)
2075 {
2076 struct binder_thread *from;
2077
2078 spin_lock(&t->lock);
2079 from = t->from;
2080 if (from)
2081 atomic_inc(&from->tmp_ref);
2082 spin_unlock(&t->lock);
2083 return from;
2084 }
2085
2086 /**
2087 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
2088 * @t: binder transaction for t->from
2089 *
2090 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
2091 * to guarantee that the thread cannot be released while operating on it.
2092 * The caller must call binder_inner_proc_unlock() to release the inner lock
2093 * as well as call binder_dec_thread_txn() to release the reference.
2094 *
2095 * Return: the value of t->from
2096 */
binder_get_txn_from_and_acq_inner(struct binder_transaction * t)2097 static struct binder_thread *binder_get_txn_from_and_acq_inner(
2098 struct binder_transaction *t)
2099 {
2100 struct binder_thread *from;
2101
2102 from = binder_get_txn_from(t);
2103 if (!from)
2104 return NULL;
2105 binder_inner_proc_lock(from->proc);
2106 if (t->from) {
2107 BUG_ON(from != t->from);
2108 return from;
2109 }
2110 binder_inner_proc_unlock(from->proc);
2111 binder_thread_dec_tmpref(from);
2112 return NULL;
2113 }
2114
binder_free_transaction(struct binder_transaction * t)2115 static void binder_free_transaction(struct binder_transaction *t)
2116 {
2117 if (t->buffer)
2118 t->buffer->transaction = NULL;
2119 kfree(t);
2120 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2121 }
2122
binder_send_failed_reply(struct binder_transaction * t,uint32_t error_code)2123 static void binder_send_failed_reply(struct binder_transaction *t,
2124 uint32_t error_code)
2125 {
2126 struct binder_thread *target_thread;
2127 struct binder_transaction *next;
2128
2129 BUG_ON(t->flags & TF_ONE_WAY);
2130 while (1) {
2131 target_thread = binder_get_txn_from_and_acq_inner(t);
2132 if (target_thread) {
2133 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2134 "send failed reply for transaction %d to %d:%d\n",
2135 t->debug_id,
2136 target_thread->proc->pid,
2137 target_thread->pid);
2138
2139 binder_pop_transaction_ilocked(target_thread, t);
2140 if (target_thread->reply_error.cmd == BR_OK) {
2141 target_thread->reply_error.cmd = error_code;
2142 binder_enqueue_thread_work_ilocked(
2143 target_thread,
2144 &target_thread->reply_error.work);
2145 wake_up_interruptible(&target_thread->wait);
2146 } else {
2147 WARN(1, "Unexpected reply error: %u\n",
2148 target_thread->reply_error.cmd);
2149 }
2150 binder_inner_proc_unlock(target_thread->proc);
2151 binder_thread_dec_tmpref(target_thread);
2152 binder_free_transaction(t);
2153 return;
2154 }
2155 next = t->from_parent;
2156
2157 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2158 "send failed reply for transaction %d, target dead\n",
2159 t->debug_id);
2160
2161 binder_free_transaction(t);
2162 if (next == NULL) {
2163 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2164 "reply failed, no target thread at root\n");
2165 return;
2166 }
2167 t = next;
2168 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2169 "reply failed, no target thread -- retry %d\n",
2170 t->debug_id);
2171 }
2172 }
2173
2174 /**
2175 * binder_cleanup_transaction() - cleans up undelivered transaction
2176 * @t: transaction that needs to be cleaned up
2177 * @reason: reason the transaction wasn't delivered
2178 * @error_code: error to return to caller (if synchronous call)
2179 */
binder_cleanup_transaction(struct binder_transaction * t,const char * reason,uint32_t error_code)2180 static void binder_cleanup_transaction(struct binder_transaction *t,
2181 const char *reason,
2182 uint32_t error_code)
2183 {
2184 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2185 binder_send_failed_reply(t, error_code);
2186 } else {
2187 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2188 "undelivered transaction %d, %s\n",
2189 t->debug_id, reason);
2190 binder_free_transaction(t);
2191 }
2192 }
2193
2194 /**
2195 * binder_validate_object() - checks for a valid metadata object in a buffer.
2196 * @buffer: binder_buffer that we're parsing.
2197 * @offset: offset in the buffer at which to validate an object.
2198 *
2199 * Return: If there's a valid metadata object at @offset in @buffer, the
2200 * size of that object. Otherwise, it returns zero.
2201 */
binder_validate_object(struct binder_buffer * buffer,u64 offset)2202 static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
2203 {
2204 /* Check if we can read a header first */
2205 struct binder_object_header *hdr;
2206 size_t object_size = 0;
2207
2208 if (offset > buffer->data_size - sizeof(*hdr) ||
2209 buffer->data_size < sizeof(*hdr) ||
2210 !IS_ALIGNED(offset, sizeof(u32)))
2211 return 0;
2212
2213 /* Ok, now see if we can read a complete object. */
2214 hdr = (struct binder_object_header *)(buffer->data + offset);
2215 switch (hdr->type) {
2216 case BINDER_TYPE_BINDER:
2217 case BINDER_TYPE_WEAK_BINDER:
2218 case BINDER_TYPE_HANDLE:
2219 case BINDER_TYPE_WEAK_HANDLE:
2220 object_size = sizeof(struct flat_binder_object);
2221 break;
2222 case BINDER_TYPE_FD:
2223 object_size = sizeof(struct binder_fd_object);
2224 break;
2225 case BINDER_TYPE_PTR:
2226 object_size = sizeof(struct binder_buffer_object);
2227 break;
2228 case BINDER_TYPE_FDA:
2229 object_size = sizeof(struct binder_fd_array_object);
2230 break;
2231 default:
2232 return 0;
2233 }
2234 if (offset <= buffer->data_size - object_size &&
2235 buffer->data_size >= object_size)
2236 return object_size;
2237 else
2238 return 0;
2239 }
2240
2241 /**
2242 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2243 * @b: binder_buffer containing the object
2244 * @index: index in offset array at which the binder_buffer_object is
2245 * located
2246 * @start: points to the start of the offset array
2247 * @num_valid: the number of valid offsets in the offset array
2248 *
2249 * Return: If @index is within the valid range of the offset array
2250 * described by @start and @num_valid, and if there's a valid
2251 * binder_buffer_object at the offset found in index @index
2252 * of the offset array, that object is returned. Otherwise,
2253 * %NULL is returned.
2254 * Note that the offset found in index @index itself is not
2255 * verified; this function assumes that @num_valid elements
2256 * from @start were previously verified to have valid offsets.
2257 */
binder_validate_ptr(struct binder_buffer * b,binder_size_t index,binder_size_t * start,binder_size_t num_valid)2258 static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
2259 binder_size_t index,
2260 binder_size_t *start,
2261 binder_size_t num_valid)
2262 {
2263 struct binder_buffer_object *buffer_obj;
2264 binder_size_t *offp;
2265
2266 if (index >= num_valid)
2267 return NULL;
2268
2269 offp = start + index;
2270 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
2271 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
2272 return NULL;
2273
2274 return buffer_obj;
2275 }
2276
2277 /**
2278 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2279 * @b: transaction buffer
2280 * @objects_start start of objects buffer
2281 * @buffer: binder_buffer_object in which to fix up
2282 * @offset: start offset in @buffer to fix up
2283 * @last_obj: last binder_buffer_object that we fixed up in
2284 * @last_min_offset: minimum fixup offset in @last_obj
2285 *
2286 * Return: %true if a fixup in buffer @buffer at offset @offset is
2287 * allowed.
2288 *
2289 * For safety reasons, we only allow fixups inside a buffer to happen
2290 * at increasing offsets; additionally, we only allow fixup on the last
2291 * buffer object that was verified, or one of its parents.
2292 *
2293 * Example of what is allowed:
2294 *
2295 * A
2296 * B (parent = A, offset = 0)
2297 * C (parent = A, offset = 16)
2298 * D (parent = C, offset = 0)
2299 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2300 *
2301 * Examples of what is not allowed:
2302 *
2303 * Decreasing offsets within the same parent:
2304 * A
2305 * C (parent = A, offset = 16)
2306 * B (parent = A, offset = 0) // decreasing offset within A
2307 *
2308 * Referring to a parent that wasn't the last object or any of its parents:
2309 * A
2310 * B (parent = A, offset = 0)
2311 * C (parent = A, offset = 0)
2312 * C (parent = A, offset = 16)
2313 * D (parent = B, offset = 0) // B is not A or any of A's parents
2314 */
binder_validate_fixup(struct binder_buffer * b,binder_size_t * objects_start,struct binder_buffer_object * buffer,binder_size_t fixup_offset,struct binder_buffer_object * last_obj,binder_size_t last_min_offset)2315 static bool binder_validate_fixup(struct binder_buffer *b,
2316 binder_size_t *objects_start,
2317 struct binder_buffer_object *buffer,
2318 binder_size_t fixup_offset,
2319 struct binder_buffer_object *last_obj,
2320 binder_size_t last_min_offset)
2321 {
2322 if (!last_obj) {
2323 /* Nothing to fix up in */
2324 return false;
2325 }
2326
2327 while (last_obj != buffer) {
2328 /*
2329 * Safe to retrieve the parent of last_obj, since it
2330 * was already previously verified by the driver.
2331 */
2332 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2333 return false;
2334 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
2335 last_obj = (struct binder_buffer_object *)
2336 (b->data + *(objects_start + last_obj->parent));
2337 }
2338 return (fixup_offset >= last_min_offset);
2339 }
2340
binder_transaction_buffer_release(struct binder_proc * proc,struct binder_buffer * buffer,binder_size_t * failed_at)2341 static void binder_transaction_buffer_release(struct binder_proc *proc,
2342 struct binder_buffer *buffer,
2343 binder_size_t *failed_at)
2344 {
2345 binder_size_t *offp, *off_start, *off_end;
2346 int debug_id = buffer->debug_id;
2347
2348 binder_debug(BINDER_DEBUG_TRANSACTION,
2349 "%d buffer release %d, size %zd-%zd, failed at %p\n",
2350 proc->pid, buffer->debug_id,
2351 buffer->data_size, buffer->offsets_size, failed_at);
2352
2353 if (buffer->target_node)
2354 binder_dec_node(buffer->target_node, 1, 0);
2355
2356 off_start = (binder_size_t *)(buffer->data +
2357 ALIGN(buffer->data_size, sizeof(void *)));
2358 if (failed_at)
2359 off_end = failed_at;
2360 else
2361 off_end = (void *)off_start + buffer->offsets_size;
2362 for (offp = off_start; offp < off_end; offp++) {
2363 struct binder_object_header *hdr;
2364 size_t object_size = binder_validate_object(buffer, *offp);
2365
2366 if (object_size == 0) {
2367 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2368 debug_id, (u64)*offp, buffer->data_size);
2369 continue;
2370 }
2371 hdr = (struct binder_object_header *)(buffer->data + *offp);
2372 switch (hdr->type) {
2373 case BINDER_TYPE_BINDER:
2374 case BINDER_TYPE_WEAK_BINDER: {
2375 struct flat_binder_object *fp;
2376 struct binder_node *node;
2377
2378 fp = to_flat_binder_object(hdr);
2379 node = binder_get_node(proc, fp->binder);
2380 if (node == NULL) {
2381 pr_err("transaction release %d bad node %016llx\n",
2382 debug_id, (u64)fp->binder);
2383 break;
2384 }
2385 binder_debug(BINDER_DEBUG_TRANSACTION,
2386 " node %d u%016llx\n",
2387 node->debug_id, (u64)node->ptr);
2388 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2389 0);
2390 binder_put_node(node);
2391 } break;
2392 case BINDER_TYPE_HANDLE:
2393 case BINDER_TYPE_WEAK_HANDLE: {
2394 struct flat_binder_object *fp;
2395 struct binder_ref_data rdata;
2396 int ret;
2397
2398 fp = to_flat_binder_object(hdr);
2399 ret = binder_dec_ref_for_handle(proc, fp->handle,
2400 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2401
2402 if (ret) {
2403 pr_err("transaction release %d bad handle %d, ret = %d\n",
2404 debug_id, fp->handle, ret);
2405 break;
2406 }
2407 binder_debug(BINDER_DEBUG_TRANSACTION,
2408 " ref %d desc %d\n",
2409 rdata.debug_id, rdata.desc);
2410 } break;
2411
2412 case BINDER_TYPE_FD: {
2413 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2414
2415 binder_debug(BINDER_DEBUG_TRANSACTION,
2416 " fd %d\n", fp->fd);
2417 if (failed_at)
2418 task_close_fd(proc, fp->fd);
2419 } break;
2420 case BINDER_TYPE_PTR:
2421 /*
2422 * Nothing to do here, this will get cleaned up when the
2423 * transaction buffer gets freed
2424 */
2425 break;
2426 case BINDER_TYPE_FDA: {
2427 struct binder_fd_array_object *fda;
2428 struct binder_buffer_object *parent;
2429 uintptr_t parent_buffer;
2430 u32 *fd_array;
2431 size_t fd_index;
2432 binder_size_t fd_buf_size;
2433
2434 fda = to_binder_fd_array_object(hdr);
2435 parent = binder_validate_ptr(buffer, fda->parent,
2436 off_start,
2437 offp - off_start);
2438 if (!parent) {
2439 pr_err("transaction release %d bad parent offset",
2440 debug_id);
2441 continue;
2442 }
2443 /*
2444 * Since the parent was already fixed up, convert it
2445 * back to kernel address space to access it
2446 */
2447 parent_buffer = parent->buffer -
2448 binder_alloc_get_user_buffer_offset(
2449 &proc->alloc);
2450
2451 fd_buf_size = sizeof(u32) * fda->num_fds;
2452 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2453 pr_err("transaction release %d invalid number of fds (%lld)\n",
2454 debug_id, (u64)fda->num_fds);
2455 continue;
2456 }
2457 if (fd_buf_size > parent->length ||
2458 fda->parent_offset > parent->length - fd_buf_size) {
2459 /* No space for all file descriptors here. */
2460 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2461 debug_id, (u64)fda->num_fds);
2462 continue;
2463 }
2464 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2465 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2466 task_close_fd(proc, fd_array[fd_index]);
2467 } break;
2468 default:
2469 pr_err("transaction release %d bad object type %x\n",
2470 debug_id, hdr->type);
2471 break;
2472 }
2473 }
2474 }
2475
binder_translate_binder(struct flat_binder_object * fp,struct binder_transaction * t,struct binder_thread * thread)2476 static int binder_translate_binder(struct flat_binder_object *fp,
2477 struct binder_transaction *t,
2478 struct binder_thread *thread)
2479 {
2480 struct binder_node *node;
2481 struct binder_proc *proc = thread->proc;
2482 struct binder_proc *target_proc = t->to_proc;
2483 struct binder_ref_data rdata;
2484 int ret = 0;
2485
2486 node = binder_get_node(proc, fp->binder);
2487 if (!node) {
2488 node = binder_new_node(proc, fp);
2489 if (!node)
2490 return -ENOMEM;
2491 }
2492 if (fp->cookie != node->cookie) {
2493 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2494 proc->pid, thread->pid, (u64)fp->binder,
2495 node->debug_id, (u64)fp->cookie,
2496 (u64)node->cookie);
2497 ret = -EINVAL;
2498 goto done;
2499 }
2500 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2501 ret = -EPERM;
2502 goto done;
2503 }
2504
2505 ret = binder_inc_ref_for_node(target_proc, node,
2506 fp->hdr.type == BINDER_TYPE_BINDER,
2507 &thread->todo, &rdata);
2508 if (ret)
2509 goto done;
2510
2511 if (fp->hdr.type == BINDER_TYPE_BINDER)
2512 fp->hdr.type = BINDER_TYPE_HANDLE;
2513 else
2514 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2515 fp->binder = 0;
2516 fp->handle = rdata.desc;
2517 fp->cookie = 0;
2518
2519 trace_binder_transaction_node_to_ref(t, node, &rdata);
2520 binder_debug(BINDER_DEBUG_TRANSACTION,
2521 " node %d u%016llx -> ref %d desc %d\n",
2522 node->debug_id, (u64)node->ptr,
2523 rdata.debug_id, rdata.desc);
2524 done:
2525 binder_put_node(node);
2526 return ret;
2527 }
2528
binder_translate_handle(struct flat_binder_object * fp,struct binder_transaction * t,struct binder_thread * thread)2529 static int binder_translate_handle(struct flat_binder_object *fp,
2530 struct binder_transaction *t,
2531 struct binder_thread *thread)
2532 {
2533 struct binder_proc *proc = thread->proc;
2534 struct binder_proc *target_proc = t->to_proc;
2535 struct binder_node *node;
2536 struct binder_ref_data src_rdata;
2537 int ret = 0;
2538
2539 node = binder_get_node_from_ref(proc, fp->handle,
2540 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2541 if (!node) {
2542 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2543 proc->pid, thread->pid, fp->handle);
2544 return -EINVAL;
2545 }
2546 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2547 ret = -EPERM;
2548 goto done;
2549 }
2550
2551 binder_node_lock(node);
2552 if (node->proc == target_proc) {
2553 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2554 fp->hdr.type = BINDER_TYPE_BINDER;
2555 else
2556 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2557 fp->binder = node->ptr;
2558 fp->cookie = node->cookie;
2559 if (node->proc)
2560 binder_inner_proc_lock(node->proc);
2561 binder_inc_node_nilocked(node,
2562 fp->hdr.type == BINDER_TYPE_BINDER,
2563 0, NULL);
2564 if (node->proc)
2565 binder_inner_proc_unlock(node->proc);
2566 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2567 binder_debug(BINDER_DEBUG_TRANSACTION,
2568 " ref %d desc %d -> node %d u%016llx\n",
2569 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2570 (u64)node->ptr);
2571 binder_node_unlock(node);
2572 } else {
2573 struct binder_ref_data dest_rdata;
2574
2575 binder_node_unlock(node);
2576 ret = binder_inc_ref_for_node(target_proc, node,
2577 fp->hdr.type == BINDER_TYPE_HANDLE,
2578 NULL, &dest_rdata);
2579 if (ret)
2580 goto done;
2581
2582 fp->binder = 0;
2583 fp->handle = dest_rdata.desc;
2584 fp->cookie = 0;
2585 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2586 &dest_rdata);
2587 binder_debug(BINDER_DEBUG_TRANSACTION,
2588 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2589 src_rdata.debug_id, src_rdata.desc,
2590 dest_rdata.debug_id, dest_rdata.desc,
2591 node->debug_id);
2592 }
2593 done:
2594 binder_put_node(node);
2595 return ret;
2596 }
2597
binder_translate_fd(int fd,struct binder_transaction * t,struct binder_thread * thread,struct binder_transaction * in_reply_to)2598 static int binder_translate_fd(int fd,
2599 struct binder_transaction *t,
2600 struct binder_thread *thread,
2601 struct binder_transaction *in_reply_to)
2602 {
2603 struct binder_proc *proc = thread->proc;
2604 struct binder_proc *target_proc = t->to_proc;
2605 int target_fd;
2606 struct file *file;
2607 int ret;
2608 bool target_allows_fd;
2609
2610 if (in_reply_to)
2611 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2612 else
2613 target_allows_fd = t->buffer->target_node->accept_fds;
2614 if (!target_allows_fd) {
2615 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2616 proc->pid, thread->pid,
2617 in_reply_to ? "reply" : "transaction",
2618 fd);
2619 ret = -EPERM;
2620 goto err_fd_not_accepted;
2621 }
2622
2623 file = fget(fd);
2624 if (!file) {
2625 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2626 proc->pid, thread->pid, fd);
2627 ret = -EBADF;
2628 goto err_fget;
2629 }
2630 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2631 if (ret < 0) {
2632 ret = -EPERM;
2633 goto err_security;
2634 }
2635
2636 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2637 if (target_fd < 0) {
2638 ret = -ENOMEM;
2639 goto err_get_unused_fd;
2640 }
2641 task_fd_install(target_proc, target_fd, file);
2642 trace_binder_transaction_fd(t, fd, target_fd);
2643 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
2644 fd, target_fd);
2645
2646 return target_fd;
2647
2648 err_get_unused_fd:
2649 err_security:
2650 fput(file);
2651 err_fget:
2652 err_fd_not_accepted:
2653 return ret;
2654 }
2655
binder_translate_fd_array(struct binder_fd_array_object * fda,struct binder_buffer_object * parent,struct binder_transaction * t,struct binder_thread * thread,struct binder_transaction * in_reply_to)2656 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2657 struct binder_buffer_object *parent,
2658 struct binder_transaction *t,
2659 struct binder_thread *thread,
2660 struct binder_transaction *in_reply_to)
2661 {
2662 binder_size_t fdi, fd_buf_size, num_installed_fds;
2663 int target_fd;
2664 uintptr_t parent_buffer;
2665 u32 *fd_array;
2666 struct binder_proc *proc = thread->proc;
2667 struct binder_proc *target_proc = t->to_proc;
2668
2669 fd_buf_size = sizeof(u32) * fda->num_fds;
2670 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2671 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2672 proc->pid, thread->pid, (u64)fda->num_fds);
2673 return -EINVAL;
2674 }
2675 if (fd_buf_size > parent->length ||
2676 fda->parent_offset > parent->length - fd_buf_size) {
2677 /* No space for all file descriptors here. */
2678 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2679 proc->pid, thread->pid, (u64)fda->num_fds);
2680 return -EINVAL;
2681 }
2682 /*
2683 * Since the parent was already fixed up, convert it
2684 * back to the kernel address space to access it
2685 */
2686 parent_buffer = parent->buffer -
2687 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
2688 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2689 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2690 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2691 proc->pid, thread->pid);
2692 return -EINVAL;
2693 }
2694 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2695 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
2696 in_reply_to);
2697 if (target_fd < 0)
2698 goto err_translate_fd_failed;
2699 fd_array[fdi] = target_fd;
2700 }
2701 return 0;
2702
2703 err_translate_fd_failed:
2704 /*
2705 * Failed to allocate fd or security error, free fds
2706 * installed so far.
2707 */
2708 num_installed_fds = fdi;
2709 for (fdi = 0; fdi < num_installed_fds; fdi++)
2710 task_close_fd(target_proc, fd_array[fdi]);
2711 return target_fd;
2712 }
2713
binder_fixup_parent(struct binder_transaction * t,struct binder_thread * thread,struct binder_buffer_object * bp,binder_size_t * off_start,binder_size_t num_valid,struct binder_buffer_object * last_fixup_obj,binder_size_t last_fixup_min_off)2714 static int binder_fixup_parent(struct binder_transaction *t,
2715 struct binder_thread *thread,
2716 struct binder_buffer_object *bp,
2717 binder_size_t *off_start,
2718 binder_size_t num_valid,
2719 struct binder_buffer_object *last_fixup_obj,
2720 binder_size_t last_fixup_min_off)
2721 {
2722 struct binder_buffer_object *parent;
2723 u8 *parent_buffer;
2724 struct binder_buffer *b = t->buffer;
2725 struct binder_proc *proc = thread->proc;
2726 struct binder_proc *target_proc = t->to_proc;
2727
2728 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2729 return 0;
2730
2731 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2732 if (!parent) {
2733 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2734 proc->pid, thread->pid);
2735 return -EINVAL;
2736 }
2737
2738 if (!binder_validate_fixup(b, off_start,
2739 parent, bp->parent_offset,
2740 last_fixup_obj,
2741 last_fixup_min_off)) {
2742 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2743 proc->pid, thread->pid);
2744 return -EINVAL;
2745 }
2746
2747 if (parent->length < sizeof(binder_uintptr_t) ||
2748 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2749 /* No space for a pointer here! */
2750 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2751 proc->pid, thread->pid);
2752 return -EINVAL;
2753 }
2754 parent_buffer = (u8 *)((uintptr_t)parent->buffer -
2755 binder_alloc_get_user_buffer_offset(
2756 &target_proc->alloc));
2757 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2758
2759 return 0;
2760 }
2761
2762 /**
2763 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2764 * @t: transaction to send
2765 * @proc: process to send the transaction to
2766 * @thread: thread in @proc to send the transaction to (may be NULL)
2767 *
2768 * This function queues a transaction to the specified process. It will try
2769 * to find a thread in the target process to handle the transaction and
2770 * wake it up. If no thread is found, the work is queued to the proc
2771 * waitqueue.
2772 *
2773 * If the @thread parameter is not NULL, the transaction is always queued
2774 * to the waitlist of that specific thread.
2775 *
2776 * Return: true if the transactions was successfully queued
2777 * false if the target process or thread is dead
2778 */
binder_proc_transaction(struct binder_transaction * t,struct binder_proc * proc,struct binder_thread * thread)2779 static bool binder_proc_transaction(struct binder_transaction *t,
2780 struct binder_proc *proc,
2781 struct binder_thread *thread)
2782 {
2783 struct binder_node *node = t->buffer->target_node;
2784 struct binder_priority node_prio;
2785 bool oneway = !!(t->flags & TF_ONE_WAY);
2786 bool pending_async = false;
2787
2788 BUG_ON(!node);
2789 binder_node_lock(node);
2790 node_prio.prio = node->min_priority;
2791 node_prio.sched_policy = node->sched_policy;
2792
2793 if (oneway) {
2794 BUG_ON(thread);
2795 if (node->has_async_transaction) {
2796 pending_async = true;
2797 } else {
2798 node->has_async_transaction = 1;
2799 }
2800 }
2801
2802 binder_inner_proc_lock(proc);
2803
2804 if (proc->is_dead || (thread && thread->is_dead)) {
2805 binder_inner_proc_unlock(proc);
2806 binder_node_unlock(node);
2807 return false;
2808 }
2809
2810 if (!thread && !pending_async)
2811 thread = binder_select_thread_ilocked(proc);
2812
2813 if (thread) {
2814 binder_transaction_priority(thread->task, t, node_prio,
2815 node->inherit_rt);
2816 binder_enqueue_thread_work_ilocked(thread, &t->work);
2817 } else if (!pending_async) {
2818 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2819 } else {
2820 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2821 }
2822
2823 if (!pending_async)
2824 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2825
2826 binder_inner_proc_unlock(proc);
2827 binder_node_unlock(node);
2828
2829 return true;
2830 }
2831
2832 /**
2833 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2834 * @node: struct binder_node for which to get refs
2835 * @proc: returns @node->proc if valid
2836 * @error: if no @proc then returns BR_DEAD_REPLY
2837 *
2838 * User-space normally keeps the node alive when creating a transaction
2839 * since it has a reference to the target. The local strong ref keeps it
2840 * alive if the sending process dies before the target process processes
2841 * the transaction. If the source process is malicious or has a reference
2842 * counting bug, relying on the local strong ref can fail.
2843 *
2844 * Since user-space can cause the local strong ref to go away, we also take
2845 * a tmpref on the node to ensure it survives while we are constructing
2846 * the transaction. We also need a tmpref on the proc while we are
2847 * constructing the transaction, so we take that here as well.
2848 *
2849 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2850 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2851 * target proc has died, @error is set to BR_DEAD_REPLY
2852 */
binder_get_node_refs_for_txn(struct binder_node * node,struct binder_proc ** procp,uint32_t * error)2853 static struct binder_node *binder_get_node_refs_for_txn(
2854 struct binder_node *node,
2855 struct binder_proc **procp,
2856 uint32_t *error)
2857 {
2858 struct binder_node *target_node = NULL;
2859
2860 binder_node_inner_lock(node);
2861 if (node->proc) {
2862 target_node = node;
2863 binder_inc_node_nilocked(node, 1, 0, NULL);
2864 binder_inc_node_tmpref_ilocked(node);
2865 node->proc->tmp_ref++;
2866 *procp = node->proc;
2867 } else
2868 *error = BR_DEAD_REPLY;
2869 binder_node_inner_unlock(node);
2870
2871 return target_node;
2872 }
2873
binder_transaction(struct binder_proc * proc,struct binder_thread * thread,struct binder_transaction_data * tr,int reply,binder_size_t extra_buffers_size)2874 static void binder_transaction(struct binder_proc *proc,
2875 struct binder_thread *thread,
2876 struct binder_transaction_data *tr, int reply,
2877 binder_size_t extra_buffers_size)
2878 {
2879 int ret;
2880 struct binder_transaction *t;
2881 struct binder_work *tcomplete;
2882 binder_size_t *offp, *off_end, *off_start;
2883 binder_size_t off_min;
2884 u8 *sg_bufp, *sg_buf_end;
2885 struct binder_proc *target_proc = NULL;
2886 struct binder_thread *target_thread = NULL;
2887 struct binder_node *target_node = NULL;
2888 struct binder_transaction *in_reply_to = NULL;
2889 struct binder_transaction_log_entry *e;
2890 uint32_t return_error = 0;
2891 uint32_t return_error_param = 0;
2892 uint32_t return_error_line = 0;
2893 struct binder_buffer_object *last_fixup_obj = NULL;
2894 binder_size_t last_fixup_min_off = 0;
2895 struct binder_context *context = proc->context;
2896 int t_debug_id = atomic_inc_return(&binder_last_id);
2897
2898 e = binder_transaction_log_add(&binder_transaction_log);
2899 e->debug_id = t_debug_id;
2900 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2901 e->from_proc = proc->pid;
2902 e->from_thread = thread->pid;
2903 e->target_handle = tr->target.handle;
2904 e->data_size = tr->data_size;
2905 e->offsets_size = tr->offsets_size;
2906 e->context_name = proc->context->name;
2907
2908 if (reply) {
2909 binder_inner_proc_lock(proc);
2910 in_reply_to = thread->transaction_stack;
2911 if (in_reply_to == NULL) {
2912 binder_inner_proc_unlock(proc);
2913 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2914 proc->pid, thread->pid);
2915 return_error = BR_FAILED_REPLY;
2916 return_error_param = -EPROTO;
2917 return_error_line = __LINE__;
2918 goto err_empty_call_stack;
2919 }
2920 if (in_reply_to->to_thread != thread) {
2921 spin_lock(&in_reply_to->lock);
2922 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2923 proc->pid, thread->pid, in_reply_to->debug_id,
2924 in_reply_to->to_proc ?
2925 in_reply_to->to_proc->pid : 0,
2926 in_reply_to->to_thread ?
2927 in_reply_to->to_thread->pid : 0);
2928 spin_unlock(&in_reply_to->lock);
2929 binder_inner_proc_unlock(proc);
2930 return_error = BR_FAILED_REPLY;
2931 return_error_param = -EPROTO;
2932 return_error_line = __LINE__;
2933 in_reply_to = NULL;
2934 goto err_bad_call_stack;
2935 }
2936 thread->transaction_stack = in_reply_to->to_parent;
2937 binder_inner_proc_unlock(proc);
2938 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2939 if (target_thread == NULL) {
2940 return_error = BR_DEAD_REPLY;
2941 return_error_line = __LINE__;
2942 goto err_dead_binder;
2943 }
2944 if (target_thread->transaction_stack != in_reply_to) {
2945 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2946 proc->pid, thread->pid,
2947 target_thread->transaction_stack ?
2948 target_thread->transaction_stack->debug_id : 0,
2949 in_reply_to->debug_id);
2950 binder_inner_proc_unlock(target_thread->proc);
2951 return_error = BR_FAILED_REPLY;
2952 return_error_param = -EPROTO;
2953 return_error_line = __LINE__;
2954 in_reply_to = NULL;
2955 target_thread = NULL;
2956 goto err_dead_binder;
2957 }
2958 target_proc = target_thread->proc;
2959 target_proc->tmp_ref++;
2960 binder_inner_proc_unlock(target_thread->proc);
2961 } else {
2962 if (tr->target.handle) {
2963 struct binder_ref *ref;
2964
2965 /*
2966 * There must already be a strong ref
2967 * on this node. If so, do a strong
2968 * increment on the node to ensure it
2969 * stays alive until the transaction is
2970 * done.
2971 */
2972 binder_proc_lock(proc);
2973 ref = binder_get_ref_olocked(proc, tr->target.handle,
2974 true);
2975 if (ref) {
2976 target_node = binder_get_node_refs_for_txn(
2977 ref->node, &target_proc,
2978 &return_error);
2979 } else {
2980 binder_user_error("%d:%d got transaction to invalid handle\n",
2981 proc->pid, thread->pid);
2982 return_error = BR_FAILED_REPLY;
2983 }
2984 binder_proc_unlock(proc);
2985 } else {
2986 mutex_lock(&context->context_mgr_node_lock);
2987 target_node = context->binder_context_mgr_node;
2988 if (target_node)
2989 target_node = binder_get_node_refs_for_txn(
2990 target_node, &target_proc,
2991 &return_error);
2992 else
2993 return_error = BR_DEAD_REPLY;
2994 mutex_unlock(&context->context_mgr_node_lock);
2995 }
2996 if (!target_node) {
2997 /*
2998 * return_error is set above
2999 */
3000 return_error_param = -EINVAL;
3001 return_error_line = __LINE__;
3002 goto err_dead_binder;
3003 }
3004 e->to_node = target_node->debug_id;
3005 if (security_binder_transaction(proc->tsk,
3006 target_proc->tsk) < 0) {
3007 return_error = BR_FAILED_REPLY;
3008 return_error_param = -EPERM;
3009 return_error_line = __LINE__;
3010 goto err_invalid_target_handle;
3011 }
3012 binder_inner_proc_lock(proc);
3013 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3014 struct binder_transaction *tmp;
3015
3016 tmp = thread->transaction_stack;
3017 if (tmp->to_thread != thread) {
3018 spin_lock(&tmp->lock);
3019 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3020 proc->pid, thread->pid, tmp->debug_id,
3021 tmp->to_proc ? tmp->to_proc->pid : 0,
3022 tmp->to_thread ?
3023 tmp->to_thread->pid : 0);
3024 spin_unlock(&tmp->lock);
3025 binder_inner_proc_unlock(proc);
3026 return_error = BR_FAILED_REPLY;
3027 return_error_param = -EPROTO;
3028 return_error_line = __LINE__;
3029 goto err_bad_call_stack;
3030 }
3031 while (tmp) {
3032 struct binder_thread *from;
3033
3034 spin_lock(&tmp->lock);
3035 from = tmp->from;
3036 if (from && from->proc == target_proc) {
3037 atomic_inc(&from->tmp_ref);
3038 target_thread = from;
3039 spin_unlock(&tmp->lock);
3040 break;
3041 }
3042 spin_unlock(&tmp->lock);
3043 tmp = tmp->from_parent;
3044 }
3045 }
3046 binder_inner_proc_unlock(proc);
3047 }
3048 if (target_thread)
3049 e->to_thread = target_thread->pid;
3050 e->to_proc = target_proc->pid;
3051
3052 /* TODO: reuse incoming transaction for reply */
3053 t = kzalloc(sizeof(*t), GFP_KERNEL);
3054 if (t == NULL) {
3055 return_error = BR_FAILED_REPLY;
3056 return_error_param = -ENOMEM;
3057 return_error_line = __LINE__;
3058 goto err_alloc_t_failed;
3059 }
3060 binder_stats_created(BINDER_STAT_TRANSACTION);
3061 spin_lock_init(&t->lock);
3062
3063 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3064 if (tcomplete == NULL) {
3065 return_error = BR_FAILED_REPLY;
3066 return_error_param = -ENOMEM;
3067 return_error_line = __LINE__;
3068 goto err_alloc_tcomplete_failed;
3069 }
3070 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3071
3072 t->debug_id = t_debug_id;
3073
3074 if (reply)
3075 binder_debug(BINDER_DEBUG_TRANSACTION,
3076 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3077 proc->pid, thread->pid, t->debug_id,
3078 target_proc->pid, target_thread->pid,
3079 (u64)tr->data.ptr.buffer,
3080 (u64)tr->data.ptr.offsets,
3081 (u64)tr->data_size, (u64)tr->offsets_size,
3082 (u64)extra_buffers_size);
3083 else
3084 binder_debug(BINDER_DEBUG_TRANSACTION,
3085 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3086 proc->pid, thread->pid, t->debug_id,
3087 target_proc->pid, target_node->debug_id,
3088 (u64)tr->data.ptr.buffer,
3089 (u64)tr->data.ptr.offsets,
3090 (u64)tr->data_size, (u64)tr->offsets_size,
3091 (u64)extra_buffers_size);
3092
3093 if (!reply && !(tr->flags & TF_ONE_WAY))
3094 t->from = thread;
3095 else
3096 t->from = NULL;
3097 t->sender_euid = task_euid(proc->tsk);
3098 t->to_proc = target_proc;
3099 t->to_thread = target_thread;
3100 t->code = tr->code;
3101 t->flags = tr->flags;
3102 if (!(t->flags & TF_ONE_WAY) &&
3103 binder_supported_policy(current->policy)) {
3104 /* Inherit supported policies for synchronous transactions */
3105 t->priority.sched_policy = current->policy;
3106 t->priority.prio = current->normal_prio;
3107 } else {
3108 /* Otherwise, fall back to the default priority */
3109 t->priority = target_proc->default_priority;
3110 }
3111
3112 trace_binder_transaction(reply, t, target_node);
3113
3114 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3115 tr->offsets_size, extra_buffers_size,
3116 !reply && (t->flags & TF_ONE_WAY));
3117 if (IS_ERR(t->buffer)) {
3118 /*
3119 * -ESRCH indicates VMA cleared. The target is dying.
3120 */
3121 return_error_param = PTR_ERR(t->buffer);
3122 return_error = return_error_param == -ESRCH ?
3123 BR_DEAD_REPLY : BR_FAILED_REPLY;
3124 return_error_line = __LINE__;
3125 t->buffer = NULL;
3126 goto err_binder_alloc_buf_failed;
3127 }
3128 t->buffer->allow_user_free = 0;
3129 t->buffer->debug_id = t->debug_id;
3130 t->buffer->transaction = t;
3131 t->buffer->target_node = target_node;
3132 trace_binder_transaction_alloc_buf(t->buffer);
3133 off_start = (binder_size_t *)(t->buffer->data +
3134 ALIGN(tr->data_size, sizeof(void *)));
3135 offp = off_start;
3136
3137 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
3138 tr->data.ptr.buffer, tr->data_size)) {
3139 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3140 proc->pid, thread->pid);
3141 return_error = BR_FAILED_REPLY;
3142 return_error_param = -EFAULT;
3143 return_error_line = __LINE__;
3144 goto err_copy_data_failed;
3145 }
3146 if (copy_from_user(offp, (const void __user *)(uintptr_t)
3147 tr->data.ptr.offsets, tr->offsets_size)) {
3148 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3149 proc->pid, thread->pid);
3150 return_error = BR_FAILED_REPLY;
3151 return_error_param = -EFAULT;
3152 return_error_line = __LINE__;
3153 goto err_copy_data_failed;
3154 }
3155 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3156 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3157 proc->pid, thread->pid, (u64)tr->offsets_size);
3158 return_error = BR_FAILED_REPLY;
3159 return_error_param = -EINVAL;
3160 return_error_line = __LINE__;
3161 goto err_bad_offset;
3162 }
3163 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3164 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3165 proc->pid, thread->pid,
3166 (u64)extra_buffers_size);
3167 return_error = BR_FAILED_REPLY;
3168 return_error_param = -EINVAL;
3169 return_error_line = __LINE__;
3170 goto err_bad_offset;
3171 }
3172 off_end = (void *)off_start + tr->offsets_size;
3173 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
3174 sg_buf_end = sg_bufp + extra_buffers_size;
3175 off_min = 0;
3176 for (; offp < off_end; offp++) {
3177 struct binder_object_header *hdr;
3178 size_t object_size = binder_validate_object(t->buffer, *offp);
3179
3180 if (object_size == 0 || *offp < off_min) {
3181 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3182 proc->pid, thread->pid, (u64)*offp,
3183 (u64)off_min,
3184 (u64)t->buffer->data_size);
3185 return_error = BR_FAILED_REPLY;
3186 return_error_param = -EINVAL;
3187 return_error_line = __LINE__;
3188 goto err_bad_offset;
3189 }
3190
3191 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
3192 off_min = *offp + object_size;
3193 switch (hdr->type) {
3194 case BINDER_TYPE_BINDER:
3195 case BINDER_TYPE_WEAK_BINDER: {
3196 struct flat_binder_object *fp;
3197
3198 fp = to_flat_binder_object(hdr);
3199 ret = binder_translate_binder(fp, t, thread);
3200 if (ret < 0) {
3201 return_error = BR_FAILED_REPLY;
3202 return_error_param = ret;
3203 return_error_line = __LINE__;
3204 goto err_translate_failed;
3205 }
3206 } break;
3207 case BINDER_TYPE_HANDLE:
3208 case BINDER_TYPE_WEAK_HANDLE: {
3209 struct flat_binder_object *fp;
3210
3211 fp = to_flat_binder_object(hdr);
3212 ret = binder_translate_handle(fp, t, thread);
3213 if (ret < 0) {
3214 return_error = BR_FAILED_REPLY;
3215 return_error_param = ret;
3216 return_error_line = __LINE__;
3217 goto err_translate_failed;
3218 }
3219 } break;
3220
3221 case BINDER_TYPE_FD: {
3222 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3223 int target_fd = binder_translate_fd(fp->fd, t, thread,
3224 in_reply_to);
3225
3226 if (target_fd < 0) {
3227 return_error = BR_FAILED_REPLY;
3228 return_error_param = target_fd;
3229 return_error_line = __LINE__;
3230 goto err_translate_failed;
3231 }
3232 fp->pad_binder = 0;
3233 fp->fd = target_fd;
3234 } break;
3235 case BINDER_TYPE_FDA: {
3236 struct binder_fd_array_object *fda =
3237 to_binder_fd_array_object(hdr);
3238 struct binder_buffer_object *parent =
3239 binder_validate_ptr(t->buffer, fda->parent,
3240 off_start,
3241 offp - off_start);
3242 if (!parent) {
3243 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3244 proc->pid, thread->pid);
3245 return_error = BR_FAILED_REPLY;
3246 return_error_param = -EINVAL;
3247 return_error_line = __LINE__;
3248 goto err_bad_parent;
3249 }
3250 if (!binder_validate_fixup(t->buffer, off_start,
3251 parent, fda->parent_offset,
3252 last_fixup_obj,
3253 last_fixup_min_off)) {
3254 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3255 proc->pid, thread->pid);
3256 return_error = BR_FAILED_REPLY;
3257 return_error_param = -EINVAL;
3258 return_error_line = __LINE__;
3259 goto err_bad_parent;
3260 }
3261 ret = binder_translate_fd_array(fda, parent, t, thread,
3262 in_reply_to);
3263 if (ret < 0) {
3264 return_error = BR_FAILED_REPLY;
3265 return_error_param = ret;
3266 return_error_line = __LINE__;
3267 goto err_translate_failed;
3268 }
3269 last_fixup_obj = parent;
3270 last_fixup_min_off =
3271 fda->parent_offset + sizeof(u32) * fda->num_fds;
3272 } break;
3273 case BINDER_TYPE_PTR: {
3274 struct binder_buffer_object *bp =
3275 to_binder_buffer_object(hdr);
3276 size_t buf_left = sg_buf_end - sg_bufp;
3277
3278 if (bp->length > buf_left) {
3279 binder_user_error("%d:%d got transaction with too large buffer\n",
3280 proc->pid, thread->pid);
3281 return_error = BR_FAILED_REPLY;
3282 return_error_param = -EINVAL;
3283 return_error_line = __LINE__;
3284 goto err_bad_offset;
3285 }
3286 if (copy_from_user(sg_bufp,
3287 (const void __user *)(uintptr_t)
3288 bp->buffer, bp->length)) {
3289 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3290 proc->pid, thread->pid);
3291 return_error_param = -EFAULT;
3292 return_error = BR_FAILED_REPLY;
3293 return_error_line = __LINE__;
3294 goto err_copy_data_failed;
3295 }
3296 /* Fixup buffer pointer to target proc address space */
3297 bp->buffer = (uintptr_t)sg_bufp +
3298 binder_alloc_get_user_buffer_offset(
3299 &target_proc->alloc);
3300 sg_bufp += ALIGN(bp->length, sizeof(u64));
3301
3302 ret = binder_fixup_parent(t, thread, bp, off_start,
3303 offp - off_start,
3304 last_fixup_obj,
3305 last_fixup_min_off);
3306 if (ret < 0) {
3307 return_error = BR_FAILED_REPLY;
3308 return_error_param = ret;
3309 return_error_line = __LINE__;
3310 goto err_translate_failed;
3311 }
3312 last_fixup_obj = bp;
3313 last_fixup_min_off = 0;
3314 } break;
3315 default:
3316 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3317 proc->pid, thread->pid, hdr->type);
3318 return_error = BR_FAILED_REPLY;
3319 return_error_param = -EINVAL;
3320 return_error_line = __LINE__;
3321 goto err_bad_object_type;
3322 }
3323 }
3324 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3325 t->work.type = BINDER_WORK_TRANSACTION;
3326
3327 if (reply) {
3328 binder_enqueue_thread_work(thread, tcomplete);
3329 binder_inner_proc_lock(target_proc);
3330 if (target_thread->is_dead) {
3331 binder_inner_proc_unlock(target_proc);
3332 goto err_dead_proc_or_thread;
3333 }
3334 BUG_ON(t->buffer->async_transaction != 0);
3335 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3336 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3337 binder_inner_proc_unlock(target_proc);
3338 wake_up_interruptible_sync(&target_thread->wait);
3339 binder_restore_priority(current, in_reply_to->saved_priority);
3340 binder_free_transaction(in_reply_to);
3341 } else if (!(t->flags & TF_ONE_WAY)) {
3342 BUG_ON(t->buffer->async_transaction != 0);
3343 binder_inner_proc_lock(proc);
3344 /*
3345 * Defer the TRANSACTION_COMPLETE, so we don't return to
3346 * userspace immediately; this allows the target process to
3347 * immediately start processing this transaction, reducing
3348 * latency. We will then return the TRANSACTION_COMPLETE when
3349 * the target replies (or there is an error).
3350 */
3351 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3352 t->need_reply = 1;
3353 t->from_parent = thread->transaction_stack;
3354 thread->transaction_stack = t;
3355 binder_inner_proc_unlock(proc);
3356 if (!binder_proc_transaction(t, target_proc, target_thread)) {
3357 binder_inner_proc_lock(proc);
3358 binder_pop_transaction_ilocked(thread, t);
3359 binder_inner_proc_unlock(proc);
3360 goto err_dead_proc_or_thread;
3361 }
3362 } else {
3363 BUG_ON(target_node == NULL);
3364 BUG_ON(t->buffer->async_transaction != 1);
3365 binder_enqueue_thread_work(thread, tcomplete);
3366 if (!binder_proc_transaction(t, target_proc, NULL))
3367 goto err_dead_proc_or_thread;
3368 }
3369 if (target_thread)
3370 binder_thread_dec_tmpref(target_thread);
3371 binder_proc_dec_tmpref(target_proc);
3372 if (target_node)
3373 binder_dec_node_tmpref(target_node);
3374 /*
3375 * write barrier to synchronize with initialization
3376 * of log entry
3377 */
3378 smp_wmb();
3379 WRITE_ONCE(e->debug_id_done, t_debug_id);
3380 return;
3381
3382 err_dead_proc_or_thread:
3383 return_error = BR_DEAD_REPLY;
3384 return_error_line = __LINE__;
3385 binder_dequeue_work(proc, tcomplete);
3386 err_translate_failed:
3387 err_bad_object_type:
3388 err_bad_offset:
3389 err_bad_parent:
3390 err_copy_data_failed:
3391 trace_binder_transaction_failed_buffer_release(t->buffer);
3392 binder_transaction_buffer_release(target_proc, t->buffer, offp);
3393 if (target_node)
3394 binder_dec_node_tmpref(target_node);
3395 target_node = NULL;
3396 t->buffer->transaction = NULL;
3397 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3398 err_binder_alloc_buf_failed:
3399 kfree(tcomplete);
3400 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3401 err_alloc_tcomplete_failed:
3402 kfree(t);
3403 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3404 err_alloc_t_failed:
3405 err_bad_call_stack:
3406 err_empty_call_stack:
3407 err_dead_binder:
3408 err_invalid_target_handle:
3409 if (target_thread)
3410 binder_thread_dec_tmpref(target_thread);
3411 if (target_proc)
3412 binder_proc_dec_tmpref(target_proc);
3413 if (target_node) {
3414 binder_dec_node(target_node, 1, 0);
3415 binder_dec_node_tmpref(target_node);
3416 }
3417
3418 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3419 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3420 proc->pid, thread->pid, return_error, return_error_param,
3421 (u64)tr->data_size, (u64)tr->offsets_size,
3422 return_error_line);
3423
3424 {
3425 struct binder_transaction_log_entry *fe;
3426
3427 e->return_error = return_error;
3428 e->return_error_param = return_error_param;
3429 e->return_error_line = return_error_line;
3430 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3431 *fe = *e;
3432 /*
3433 * write barrier to synchronize with initialization
3434 * of log entry
3435 */
3436 smp_wmb();
3437 WRITE_ONCE(e->debug_id_done, t_debug_id);
3438 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3439 }
3440
3441 BUG_ON(thread->return_error.cmd != BR_OK);
3442 if (in_reply_to) {
3443 binder_restore_priority(current, in_reply_to->saved_priority);
3444 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3445 binder_enqueue_thread_work(thread, &thread->return_error.work);
3446 binder_send_failed_reply(in_reply_to, return_error);
3447 } else {
3448 thread->return_error.cmd = return_error;
3449 binder_enqueue_thread_work(thread, &thread->return_error.work);
3450 }
3451 }
3452
binder_thread_write(struct binder_proc * proc,struct binder_thread * thread,binder_uintptr_t binder_buffer,size_t size,binder_size_t * consumed)3453 static int binder_thread_write(struct binder_proc *proc,
3454 struct binder_thread *thread,
3455 binder_uintptr_t binder_buffer, size_t size,
3456 binder_size_t *consumed)
3457 {
3458 uint32_t cmd;
3459 struct binder_context *context = proc->context;
3460 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3461 void __user *ptr = buffer + *consumed;
3462 void __user *end = buffer + size;
3463
3464 while (ptr < end && thread->return_error.cmd == BR_OK) {
3465 int ret;
3466
3467 if (get_user(cmd, (uint32_t __user *)ptr))
3468 return -EFAULT;
3469 ptr += sizeof(uint32_t);
3470 trace_binder_command(cmd);
3471 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3472 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3473 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3474 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3475 }
3476 switch (cmd) {
3477 case BC_INCREFS:
3478 case BC_ACQUIRE:
3479 case BC_RELEASE:
3480 case BC_DECREFS: {
3481 uint32_t target;
3482 const char *debug_string;
3483 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3484 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3485 struct binder_ref_data rdata;
3486
3487 if (get_user(target, (uint32_t __user *)ptr))
3488 return -EFAULT;
3489
3490 ptr += sizeof(uint32_t);
3491 ret = -1;
3492 if (increment && !target) {
3493 struct binder_node *ctx_mgr_node;
3494 mutex_lock(&context->context_mgr_node_lock);
3495 ctx_mgr_node = context->binder_context_mgr_node;
3496 if (ctx_mgr_node)
3497 ret = binder_inc_ref_for_node(
3498 proc, ctx_mgr_node,
3499 strong, NULL, &rdata);
3500 mutex_unlock(&context->context_mgr_node_lock);
3501 }
3502 if (ret)
3503 ret = binder_update_ref_for_handle(
3504 proc, target, increment, strong,
3505 &rdata);
3506 if (!ret && rdata.desc != target) {
3507 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3508 proc->pid, thread->pid,
3509 target, rdata.desc);
3510 }
3511 switch (cmd) {
3512 case BC_INCREFS:
3513 debug_string = "IncRefs";
3514 break;
3515 case BC_ACQUIRE:
3516 debug_string = "Acquire";
3517 break;
3518 case BC_RELEASE:
3519 debug_string = "Release";
3520 break;
3521 case BC_DECREFS:
3522 default:
3523 debug_string = "DecRefs";
3524 break;
3525 }
3526 if (ret) {
3527 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3528 proc->pid, thread->pid, debug_string,
3529 strong, target, ret);
3530 break;
3531 }
3532 binder_debug(BINDER_DEBUG_USER_REFS,
3533 "%d:%d %s ref %d desc %d s %d w %d\n",
3534 proc->pid, thread->pid, debug_string,
3535 rdata.debug_id, rdata.desc, rdata.strong,
3536 rdata.weak);
3537 break;
3538 }
3539 case BC_INCREFS_DONE:
3540 case BC_ACQUIRE_DONE: {
3541 binder_uintptr_t node_ptr;
3542 binder_uintptr_t cookie;
3543 struct binder_node *node;
3544 bool free_node;
3545
3546 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3547 return -EFAULT;
3548 ptr += sizeof(binder_uintptr_t);
3549 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3550 return -EFAULT;
3551 ptr += sizeof(binder_uintptr_t);
3552 node = binder_get_node(proc, node_ptr);
3553 if (node == NULL) {
3554 binder_user_error("%d:%d %s u%016llx no match\n",
3555 proc->pid, thread->pid,
3556 cmd == BC_INCREFS_DONE ?
3557 "BC_INCREFS_DONE" :
3558 "BC_ACQUIRE_DONE",
3559 (u64)node_ptr);
3560 break;
3561 }
3562 if (cookie != node->cookie) {
3563 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3564 proc->pid, thread->pid,
3565 cmd == BC_INCREFS_DONE ?
3566 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3567 (u64)node_ptr, node->debug_id,
3568 (u64)cookie, (u64)node->cookie);
3569 binder_put_node(node);
3570 break;
3571 }
3572 binder_node_inner_lock(node);
3573 if (cmd == BC_ACQUIRE_DONE) {
3574 if (node->pending_strong_ref == 0) {
3575 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3576 proc->pid, thread->pid,
3577 node->debug_id);
3578 binder_node_inner_unlock(node);
3579 binder_put_node(node);
3580 break;
3581 }
3582 node->pending_strong_ref = 0;
3583 } else {
3584 if (node->pending_weak_ref == 0) {
3585 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3586 proc->pid, thread->pid,
3587 node->debug_id);
3588 binder_node_inner_unlock(node);
3589 binder_put_node(node);
3590 break;
3591 }
3592 node->pending_weak_ref = 0;
3593 }
3594 free_node = binder_dec_node_nilocked(node,
3595 cmd == BC_ACQUIRE_DONE, 0);
3596 WARN_ON(free_node);
3597 binder_debug(BINDER_DEBUG_USER_REFS,
3598 "%d:%d %s node %d ls %d lw %d tr %d\n",
3599 proc->pid, thread->pid,
3600 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3601 node->debug_id, node->local_strong_refs,
3602 node->local_weak_refs, node->tmp_refs);
3603 binder_node_inner_unlock(node);
3604 binder_put_node(node);
3605 break;
3606 }
3607 case BC_ATTEMPT_ACQUIRE:
3608 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3609 return -EINVAL;
3610 case BC_ACQUIRE_RESULT:
3611 pr_err("BC_ACQUIRE_RESULT not supported\n");
3612 return -EINVAL;
3613
3614 case BC_FREE_BUFFER: {
3615 binder_uintptr_t data_ptr;
3616 struct binder_buffer *buffer;
3617
3618 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3619 return -EFAULT;
3620 ptr += sizeof(binder_uintptr_t);
3621
3622 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3623 data_ptr);
3624 if (buffer == NULL) {
3625 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
3626 proc->pid, thread->pid, (u64)data_ptr);
3627 break;
3628 }
3629 if (!buffer->allow_user_free) {
3630 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3631 proc->pid, thread->pid, (u64)data_ptr);
3632 break;
3633 }
3634 binder_debug(BINDER_DEBUG_FREE_BUFFER,
3635 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3636 proc->pid, thread->pid, (u64)data_ptr,
3637 buffer->debug_id,
3638 buffer->transaction ? "active" : "finished");
3639
3640 if (buffer->transaction) {
3641 buffer->transaction->buffer = NULL;
3642 buffer->transaction = NULL;
3643 }
3644 if (buffer->async_transaction && buffer->target_node) {
3645 struct binder_node *buf_node;
3646 struct binder_work *w;
3647
3648 buf_node = buffer->target_node;
3649 binder_node_inner_lock(buf_node);
3650 BUG_ON(!buf_node->has_async_transaction);
3651 BUG_ON(buf_node->proc != proc);
3652 w = binder_dequeue_work_head_ilocked(
3653 &buf_node->async_todo);
3654 if (!w) {
3655 buf_node->has_async_transaction = 0;
3656 } else {
3657 binder_enqueue_work_ilocked(
3658 w, &proc->todo);
3659 binder_wakeup_proc_ilocked(proc);
3660 }
3661 binder_node_inner_unlock(buf_node);
3662 }
3663 trace_binder_transaction_buffer_release(buffer);
3664 binder_transaction_buffer_release(proc, buffer, NULL);
3665 binder_alloc_free_buf(&proc->alloc, buffer);
3666 break;
3667 }
3668
3669 case BC_TRANSACTION_SG:
3670 case BC_REPLY_SG: {
3671 struct binder_transaction_data_sg tr;
3672
3673 if (copy_from_user(&tr, ptr, sizeof(tr)))
3674 return -EFAULT;
3675 ptr += sizeof(tr);
3676 binder_transaction(proc, thread, &tr.transaction_data,
3677 cmd == BC_REPLY_SG, tr.buffers_size);
3678 break;
3679 }
3680 case BC_TRANSACTION:
3681 case BC_REPLY: {
3682 struct binder_transaction_data tr;
3683
3684 if (copy_from_user(&tr, ptr, sizeof(tr)))
3685 return -EFAULT;
3686 ptr += sizeof(tr);
3687 binder_transaction(proc, thread, &tr,
3688 cmd == BC_REPLY, 0);
3689 break;
3690 }
3691
3692 case BC_REGISTER_LOOPER:
3693 binder_debug(BINDER_DEBUG_THREADS,
3694 "%d:%d BC_REGISTER_LOOPER\n",
3695 proc->pid, thread->pid);
3696 binder_inner_proc_lock(proc);
3697 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3698 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3699 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3700 proc->pid, thread->pid);
3701 } else if (proc->requested_threads == 0) {
3702 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3703 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3704 proc->pid, thread->pid);
3705 } else {
3706 proc->requested_threads--;
3707 proc->requested_threads_started++;
3708 }
3709 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3710 binder_inner_proc_unlock(proc);
3711 break;
3712 case BC_ENTER_LOOPER:
3713 binder_debug(BINDER_DEBUG_THREADS,
3714 "%d:%d BC_ENTER_LOOPER\n",
3715 proc->pid, thread->pid);
3716 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3717 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3718 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3719 proc->pid, thread->pid);
3720 }
3721 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3722 break;
3723 case BC_EXIT_LOOPER:
3724 binder_debug(BINDER_DEBUG_THREADS,
3725 "%d:%d BC_EXIT_LOOPER\n",
3726 proc->pid, thread->pid);
3727 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3728 break;
3729
3730 case BC_REQUEST_DEATH_NOTIFICATION:
3731 case BC_CLEAR_DEATH_NOTIFICATION: {
3732 uint32_t target;
3733 binder_uintptr_t cookie;
3734 struct binder_ref *ref;
3735 struct binder_ref_death *death = NULL;
3736
3737 if (get_user(target, (uint32_t __user *)ptr))
3738 return -EFAULT;
3739 ptr += sizeof(uint32_t);
3740 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3741 return -EFAULT;
3742 ptr += sizeof(binder_uintptr_t);
3743 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3744 /*
3745 * Allocate memory for death notification
3746 * before taking lock
3747 */
3748 death = kzalloc(sizeof(*death), GFP_KERNEL);
3749 if (death == NULL) {
3750 WARN_ON(thread->return_error.cmd !=
3751 BR_OK);
3752 thread->return_error.cmd = BR_ERROR;
3753 binder_enqueue_thread_work(
3754 thread,
3755 &thread->return_error.work);
3756 binder_debug(
3757 BINDER_DEBUG_FAILED_TRANSACTION,
3758 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3759 proc->pid, thread->pid);
3760 break;
3761 }
3762 }
3763 binder_proc_lock(proc);
3764 ref = binder_get_ref_olocked(proc, target, false);
3765 if (ref == NULL) {
3766 binder_user_error("%d:%d %s invalid ref %d\n",
3767 proc->pid, thread->pid,
3768 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3769 "BC_REQUEST_DEATH_NOTIFICATION" :
3770 "BC_CLEAR_DEATH_NOTIFICATION",
3771 target);
3772 binder_proc_unlock(proc);
3773 kfree(death);
3774 break;
3775 }
3776
3777 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3778 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3779 proc->pid, thread->pid,
3780 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3781 "BC_REQUEST_DEATH_NOTIFICATION" :
3782 "BC_CLEAR_DEATH_NOTIFICATION",
3783 (u64)cookie, ref->data.debug_id,
3784 ref->data.desc, ref->data.strong,
3785 ref->data.weak, ref->node->debug_id);
3786
3787 binder_node_lock(ref->node);
3788 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3789 if (ref->death) {
3790 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3791 proc->pid, thread->pid);
3792 binder_node_unlock(ref->node);
3793 binder_proc_unlock(proc);
3794 kfree(death);
3795 break;
3796 }
3797 binder_stats_created(BINDER_STAT_DEATH);
3798 INIT_LIST_HEAD(&death->work.entry);
3799 death->cookie = cookie;
3800 ref->death = death;
3801 if (ref->node->proc == NULL) {
3802 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3803
3804 binder_inner_proc_lock(proc);
3805 binder_enqueue_work_ilocked(
3806 &ref->death->work, &proc->todo);
3807 binder_wakeup_proc_ilocked(proc);
3808 binder_inner_proc_unlock(proc);
3809 }
3810 } else {
3811 if (ref->death == NULL) {
3812 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3813 proc->pid, thread->pid);
3814 binder_node_unlock(ref->node);
3815 binder_proc_unlock(proc);
3816 break;
3817 }
3818 death = ref->death;
3819 if (death->cookie != cookie) {
3820 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3821 proc->pid, thread->pid,
3822 (u64)death->cookie,
3823 (u64)cookie);
3824 binder_node_unlock(ref->node);
3825 binder_proc_unlock(proc);
3826 break;
3827 }
3828 ref->death = NULL;
3829 binder_inner_proc_lock(proc);
3830 if (list_empty(&death->work.entry)) {
3831 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3832 if (thread->looper &
3833 (BINDER_LOOPER_STATE_REGISTERED |
3834 BINDER_LOOPER_STATE_ENTERED))
3835 binder_enqueue_thread_work_ilocked(
3836 thread,
3837 &death->work);
3838 else {
3839 binder_enqueue_work_ilocked(
3840 &death->work,
3841 &proc->todo);
3842 binder_wakeup_proc_ilocked(
3843 proc);
3844 }
3845 } else {
3846 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3847 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3848 }
3849 binder_inner_proc_unlock(proc);
3850 }
3851 binder_node_unlock(ref->node);
3852 binder_proc_unlock(proc);
3853 } break;
3854 case BC_DEAD_BINDER_DONE: {
3855 struct binder_work *w;
3856 binder_uintptr_t cookie;
3857 struct binder_ref_death *death = NULL;
3858
3859 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3860 return -EFAULT;
3861
3862 ptr += sizeof(cookie);
3863 binder_inner_proc_lock(proc);
3864 list_for_each_entry(w, &proc->delivered_death,
3865 entry) {
3866 struct binder_ref_death *tmp_death =
3867 container_of(w,
3868 struct binder_ref_death,
3869 work);
3870
3871 if (tmp_death->cookie == cookie) {
3872 death = tmp_death;
3873 break;
3874 }
3875 }
3876 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3877 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
3878 proc->pid, thread->pid, (u64)cookie,
3879 death);
3880 if (death == NULL) {
3881 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3882 proc->pid, thread->pid, (u64)cookie);
3883 binder_inner_proc_unlock(proc);
3884 break;
3885 }
3886 binder_dequeue_work_ilocked(&death->work);
3887 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3888 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3889 if (thread->looper &
3890 (BINDER_LOOPER_STATE_REGISTERED |
3891 BINDER_LOOPER_STATE_ENTERED))
3892 binder_enqueue_thread_work_ilocked(
3893 thread, &death->work);
3894 else {
3895 binder_enqueue_work_ilocked(
3896 &death->work,
3897 &proc->todo);
3898 binder_wakeup_proc_ilocked(proc);
3899 }
3900 }
3901 binder_inner_proc_unlock(proc);
3902 } break;
3903
3904 default:
3905 pr_err("%d:%d unknown command %d\n",
3906 proc->pid, thread->pid, cmd);
3907 return -EINVAL;
3908 }
3909 *consumed = ptr - buffer;
3910 }
3911 return 0;
3912 }
3913
binder_stat_br(struct binder_proc * proc,struct binder_thread * thread,uint32_t cmd)3914 static void binder_stat_br(struct binder_proc *proc,
3915 struct binder_thread *thread, uint32_t cmd)
3916 {
3917 trace_binder_return(cmd);
3918 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
3919 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3920 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3921 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
3922 }
3923 }
3924
binder_put_node_cmd(struct binder_proc * proc,struct binder_thread * thread,void __user ** ptrp,binder_uintptr_t node_ptr,binder_uintptr_t node_cookie,int node_debug_id,uint32_t cmd,const char * cmd_name)3925 static int binder_put_node_cmd(struct binder_proc *proc,
3926 struct binder_thread *thread,
3927 void __user **ptrp,
3928 binder_uintptr_t node_ptr,
3929 binder_uintptr_t node_cookie,
3930 int node_debug_id,
3931 uint32_t cmd, const char *cmd_name)
3932 {
3933 void __user *ptr = *ptrp;
3934
3935 if (put_user(cmd, (uint32_t __user *)ptr))
3936 return -EFAULT;
3937 ptr += sizeof(uint32_t);
3938
3939 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3940 return -EFAULT;
3941 ptr += sizeof(binder_uintptr_t);
3942
3943 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3944 return -EFAULT;
3945 ptr += sizeof(binder_uintptr_t);
3946
3947 binder_stat_br(proc, thread, cmd);
3948 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3949 proc->pid, thread->pid, cmd_name, node_debug_id,
3950 (u64)node_ptr, (u64)node_cookie);
3951
3952 *ptrp = ptr;
3953 return 0;
3954 }
3955
binder_wait_for_work(struct binder_thread * thread,bool do_proc_work)3956 static int binder_wait_for_work(struct binder_thread *thread,
3957 bool do_proc_work)
3958 {
3959 DEFINE_WAIT(wait);
3960 struct binder_proc *proc = thread->proc;
3961 int ret = 0;
3962
3963 freezer_do_not_count();
3964 binder_inner_proc_lock(proc);
3965 for (;;) {
3966 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3967 if (binder_has_work_ilocked(thread, do_proc_work))
3968 break;
3969 if (do_proc_work)
3970 list_add(&thread->waiting_thread_node,
3971 &proc->waiting_threads);
3972 binder_inner_proc_unlock(proc);
3973 schedule();
3974 binder_inner_proc_lock(proc);
3975 list_del_init(&thread->waiting_thread_node);
3976 if (signal_pending(current)) {
3977 ret = -ERESTARTSYS;
3978 break;
3979 }
3980 }
3981 finish_wait(&thread->wait, &wait);
3982 binder_inner_proc_unlock(proc);
3983 freezer_count();
3984
3985 return ret;
3986 }
3987
binder_thread_read(struct binder_proc * proc,struct binder_thread * thread,binder_uintptr_t binder_buffer,size_t size,binder_size_t * consumed,int non_block)3988 static int binder_thread_read(struct binder_proc *proc,
3989 struct binder_thread *thread,
3990 binder_uintptr_t binder_buffer, size_t size,
3991 binder_size_t *consumed, int non_block)
3992 {
3993 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3994 void __user *ptr = buffer + *consumed;
3995 void __user *end = buffer + size;
3996
3997 int ret = 0;
3998 int wait_for_proc_work;
3999
4000 if (*consumed == 0) {
4001 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4002 return -EFAULT;
4003 ptr += sizeof(uint32_t);
4004 }
4005
4006 retry:
4007 binder_inner_proc_lock(proc);
4008 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4009 binder_inner_proc_unlock(proc);
4010
4011 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4012
4013 trace_binder_wait_for_work(wait_for_proc_work,
4014 !!thread->transaction_stack,
4015 !binder_worklist_empty(proc, &thread->todo));
4016 if (wait_for_proc_work) {
4017 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4018 BINDER_LOOPER_STATE_ENTERED))) {
4019 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4020 proc->pid, thread->pid, thread->looper);
4021 wait_event_interruptible(binder_user_error_wait,
4022 binder_stop_on_user_error < 2);
4023 }
4024 binder_restore_priority(current, proc->default_priority);
4025 }
4026
4027 if (non_block) {
4028 if (!binder_has_work(thread, wait_for_proc_work))
4029 ret = -EAGAIN;
4030 } else {
4031 ret = binder_wait_for_work(thread, wait_for_proc_work);
4032 }
4033
4034 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4035
4036 if (ret)
4037 return ret;
4038
4039 while (1) {
4040 uint32_t cmd;
4041 struct binder_transaction_data tr;
4042 struct binder_work *w = NULL;
4043 struct list_head *list = NULL;
4044 struct binder_transaction *t = NULL;
4045 struct binder_thread *t_from;
4046
4047 binder_inner_proc_lock(proc);
4048 if (!binder_worklist_empty_ilocked(&thread->todo))
4049 list = &thread->todo;
4050 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4051 wait_for_proc_work)
4052 list = &proc->todo;
4053 else {
4054 binder_inner_proc_unlock(proc);
4055
4056 /* no data added */
4057 if (ptr - buffer == 4 && !thread->looper_need_return)
4058 goto retry;
4059 break;
4060 }
4061
4062 if (end - ptr < sizeof(tr) + 4) {
4063 binder_inner_proc_unlock(proc);
4064 break;
4065 }
4066 w = binder_dequeue_work_head_ilocked(list);
4067 if (binder_worklist_empty_ilocked(&thread->todo))
4068 thread->process_todo = false;
4069
4070 switch (w->type) {
4071 case BINDER_WORK_TRANSACTION: {
4072 binder_inner_proc_unlock(proc);
4073 t = container_of(w, struct binder_transaction, work);
4074 } break;
4075 case BINDER_WORK_RETURN_ERROR: {
4076 struct binder_error *e = container_of(
4077 w, struct binder_error, work);
4078
4079 WARN_ON(e->cmd == BR_OK);
4080 binder_inner_proc_unlock(proc);
4081 if (put_user(e->cmd, (uint32_t __user *)ptr))
4082 return -EFAULT;
4083 e->cmd = BR_OK;
4084 ptr += sizeof(uint32_t);
4085
4086 binder_stat_br(proc, thread, cmd);
4087 } break;
4088 case BINDER_WORK_TRANSACTION_COMPLETE: {
4089 binder_inner_proc_unlock(proc);
4090 cmd = BR_TRANSACTION_COMPLETE;
4091 if (put_user(cmd, (uint32_t __user *)ptr))
4092 return -EFAULT;
4093 ptr += sizeof(uint32_t);
4094
4095 binder_stat_br(proc, thread, cmd);
4096 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4097 "%d:%d BR_TRANSACTION_COMPLETE\n",
4098 proc->pid, thread->pid);
4099 kfree(w);
4100 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4101 } break;
4102 case BINDER_WORK_NODE: {
4103 struct binder_node *node = container_of(w, struct binder_node, work);
4104 int strong, weak;
4105 binder_uintptr_t node_ptr = node->ptr;
4106 binder_uintptr_t node_cookie = node->cookie;
4107 int node_debug_id = node->debug_id;
4108 int has_weak_ref;
4109 int has_strong_ref;
4110 void __user *orig_ptr = ptr;
4111
4112 BUG_ON(proc != node->proc);
4113 strong = node->internal_strong_refs ||
4114 node->local_strong_refs;
4115 weak = !hlist_empty(&node->refs) ||
4116 node->local_weak_refs ||
4117 node->tmp_refs || strong;
4118 has_strong_ref = node->has_strong_ref;
4119 has_weak_ref = node->has_weak_ref;
4120
4121 if (weak && !has_weak_ref) {
4122 node->has_weak_ref = 1;
4123 node->pending_weak_ref = 1;
4124 node->local_weak_refs++;
4125 }
4126 if (strong && !has_strong_ref) {
4127 node->has_strong_ref = 1;
4128 node->pending_strong_ref = 1;
4129 node->local_strong_refs++;
4130 }
4131 if (!strong && has_strong_ref)
4132 node->has_strong_ref = 0;
4133 if (!weak && has_weak_ref)
4134 node->has_weak_ref = 0;
4135 if (!weak && !strong) {
4136 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4137 "%d:%d node %d u%016llx c%016llx deleted\n",
4138 proc->pid, thread->pid,
4139 node_debug_id,
4140 (u64)node_ptr,
4141 (u64)node_cookie);
4142 rb_erase(&node->rb_node, &proc->nodes);
4143 binder_inner_proc_unlock(proc);
4144 binder_node_lock(node);
4145 /*
4146 * Acquire the node lock before freeing the
4147 * node to serialize with other threads that
4148 * may have been holding the node lock while
4149 * decrementing this node (avoids race where
4150 * this thread frees while the other thread
4151 * is unlocking the node after the final
4152 * decrement)
4153 */
4154 binder_node_unlock(node);
4155 binder_free_node(node);
4156 } else
4157 binder_inner_proc_unlock(proc);
4158
4159 if (weak && !has_weak_ref)
4160 ret = binder_put_node_cmd(
4161 proc, thread, &ptr, node_ptr,
4162 node_cookie, node_debug_id,
4163 BR_INCREFS, "BR_INCREFS");
4164 if (!ret && strong && !has_strong_ref)
4165 ret = binder_put_node_cmd(
4166 proc, thread, &ptr, node_ptr,
4167 node_cookie, node_debug_id,
4168 BR_ACQUIRE, "BR_ACQUIRE");
4169 if (!ret && !strong && has_strong_ref)
4170 ret = binder_put_node_cmd(
4171 proc, thread, &ptr, node_ptr,
4172 node_cookie, node_debug_id,
4173 BR_RELEASE, "BR_RELEASE");
4174 if (!ret && !weak && has_weak_ref)
4175 ret = binder_put_node_cmd(
4176 proc, thread, &ptr, node_ptr,
4177 node_cookie, node_debug_id,
4178 BR_DECREFS, "BR_DECREFS");
4179 if (orig_ptr == ptr)
4180 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4181 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4182 proc->pid, thread->pid,
4183 node_debug_id,
4184 (u64)node_ptr,
4185 (u64)node_cookie);
4186 if (ret)
4187 return ret;
4188 } break;
4189 case BINDER_WORK_DEAD_BINDER:
4190 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4191 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4192 struct binder_ref_death *death;
4193 uint32_t cmd;
4194 binder_uintptr_t cookie;
4195
4196 death = container_of(w, struct binder_ref_death, work);
4197 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4198 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4199 else
4200 cmd = BR_DEAD_BINDER;
4201 cookie = death->cookie;
4202
4203 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4204 "%d:%d %s %016llx\n",
4205 proc->pid, thread->pid,
4206 cmd == BR_DEAD_BINDER ?
4207 "BR_DEAD_BINDER" :
4208 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4209 (u64)cookie);
4210 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4211 binder_inner_proc_unlock(proc);
4212 kfree(death);
4213 binder_stats_deleted(BINDER_STAT_DEATH);
4214 } else {
4215 binder_enqueue_work_ilocked(
4216 w, &proc->delivered_death);
4217 binder_inner_proc_unlock(proc);
4218 }
4219 if (put_user(cmd, (uint32_t __user *)ptr))
4220 return -EFAULT;
4221 ptr += sizeof(uint32_t);
4222 if (put_user(cookie,
4223 (binder_uintptr_t __user *)ptr))
4224 return -EFAULT;
4225 ptr += sizeof(binder_uintptr_t);
4226 binder_stat_br(proc, thread, cmd);
4227 if (cmd == BR_DEAD_BINDER)
4228 goto done; /* DEAD_BINDER notifications can cause transactions */
4229 } break;
4230 }
4231
4232 if (!t)
4233 continue;
4234
4235 BUG_ON(t->buffer == NULL);
4236 if (t->buffer->target_node) {
4237 struct binder_node *target_node = t->buffer->target_node;
4238 struct binder_priority node_prio;
4239
4240 tr.target.ptr = target_node->ptr;
4241 tr.cookie = target_node->cookie;
4242 node_prio.sched_policy = target_node->sched_policy;
4243 node_prio.prio = target_node->min_priority;
4244 binder_transaction_priority(current, t, node_prio,
4245 target_node->inherit_rt);
4246 cmd = BR_TRANSACTION;
4247 } else {
4248 tr.target.ptr = 0;
4249 tr.cookie = 0;
4250 cmd = BR_REPLY;
4251 }
4252 tr.code = t->code;
4253 tr.flags = t->flags;
4254 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4255
4256 t_from = binder_get_txn_from(t);
4257 if (t_from) {
4258 struct task_struct *sender = t_from->proc->tsk;
4259
4260 tr.sender_pid = task_tgid_nr_ns(sender,
4261 task_active_pid_ns(current));
4262 } else {
4263 tr.sender_pid = 0;
4264 }
4265
4266 tr.data_size = t->buffer->data_size;
4267 tr.offsets_size = t->buffer->offsets_size;
4268 tr.data.ptr.buffer = (binder_uintptr_t)
4269 ((uintptr_t)t->buffer->data +
4270 binder_alloc_get_user_buffer_offset(&proc->alloc));
4271 tr.data.ptr.offsets = tr.data.ptr.buffer +
4272 ALIGN(t->buffer->data_size,
4273 sizeof(void *));
4274
4275 if (put_user(cmd, (uint32_t __user *)ptr)) {
4276 if (t_from)
4277 binder_thread_dec_tmpref(t_from);
4278
4279 binder_cleanup_transaction(t, "put_user failed",
4280 BR_FAILED_REPLY);
4281
4282 return -EFAULT;
4283 }
4284 ptr += sizeof(uint32_t);
4285 if (copy_to_user(ptr, &tr, sizeof(tr))) {
4286 if (t_from)
4287 binder_thread_dec_tmpref(t_from);
4288
4289 binder_cleanup_transaction(t, "copy_to_user failed",
4290 BR_FAILED_REPLY);
4291
4292 return -EFAULT;
4293 }
4294 ptr += sizeof(tr);
4295
4296 trace_binder_transaction_received(t);
4297 binder_stat_br(proc, thread, cmd);
4298 binder_debug(BINDER_DEBUG_TRANSACTION,
4299 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4300 proc->pid, thread->pid,
4301 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4302 "BR_REPLY",
4303 t->debug_id, t_from ? t_from->proc->pid : 0,
4304 t_from ? t_from->pid : 0, cmd,
4305 t->buffer->data_size, t->buffer->offsets_size,
4306 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
4307
4308 if (t_from)
4309 binder_thread_dec_tmpref(t_from);
4310 t->buffer->allow_user_free = 1;
4311 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
4312 binder_inner_proc_lock(thread->proc);
4313 t->to_parent = thread->transaction_stack;
4314 t->to_thread = thread;
4315 thread->transaction_stack = t;
4316 binder_inner_proc_unlock(thread->proc);
4317 } else {
4318 binder_free_transaction(t);
4319 }
4320 break;
4321 }
4322
4323 done:
4324
4325 *consumed = ptr - buffer;
4326 binder_inner_proc_lock(proc);
4327 if (proc->requested_threads == 0 &&
4328 list_empty(&thread->proc->waiting_threads) &&
4329 proc->requested_threads_started < proc->max_threads &&
4330 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4331 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4332 /*spawn a new thread if we leave this out */) {
4333 proc->requested_threads++;
4334 binder_inner_proc_unlock(proc);
4335 binder_debug(BINDER_DEBUG_THREADS,
4336 "%d:%d BR_SPAWN_LOOPER\n",
4337 proc->pid, thread->pid);
4338 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4339 return -EFAULT;
4340 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4341 } else
4342 binder_inner_proc_unlock(proc);
4343 return 0;
4344 }
4345
binder_release_work(struct binder_proc * proc,struct list_head * list)4346 static void binder_release_work(struct binder_proc *proc,
4347 struct list_head *list)
4348 {
4349 struct binder_work *w;
4350
4351 while (1) {
4352 w = binder_dequeue_work_head(proc, list);
4353 if (!w)
4354 return;
4355
4356 switch (w->type) {
4357 case BINDER_WORK_TRANSACTION: {
4358 struct binder_transaction *t;
4359
4360 t = container_of(w, struct binder_transaction, work);
4361
4362 binder_cleanup_transaction(t, "process died.",
4363 BR_DEAD_REPLY);
4364 } break;
4365 case BINDER_WORK_RETURN_ERROR: {
4366 struct binder_error *e = container_of(
4367 w, struct binder_error, work);
4368
4369 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4370 "undelivered TRANSACTION_ERROR: %u\n",
4371 e->cmd);
4372 } break;
4373 case BINDER_WORK_TRANSACTION_COMPLETE: {
4374 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4375 "undelivered TRANSACTION_COMPLETE\n");
4376 kfree(w);
4377 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4378 } break;
4379 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4380 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4381 struct binder_ref_death *death;
4382
4383 death = container_of(w, struct binder_ref_death, work);
4384 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4385 "undelivered death notification, %016llx\n",
4386 (u64)death->cookie);
4387 kfree(death);
4388 binder_stats_deleted(BINDER_STAT_DEATH);
4389 } break;
4390 default:
4391 pr_err("unexpected work type, %d, not freed\n",
4392 w->type);
4393 break;
4394 }
4395 }
4396
4397 }
4398
binder_get_thread_ilocked(struct binder_proc * proc,struct binder_thread * new_thread)4399 static struct binder_thread *binder_get_thread_ilocked(
4400 struct binder_proc *proc, struct binder_thread *new_thread)
4401 {
4402 struct binder_thread *thread = NULL;
4403 struct rb_node *parent = NULL;
4404 struct rb_node **p = &proc->threads.rb_node;
4405
4406 while (*p) {
4407 parent = *p;
4408 thread = rb_entry(parent, struct binder_thread, rb_node);
4409
4410 if (current->pid < thread->pid)
4411 p = &(*p)->rb_left;
4412 else if (current->pid > thread->pid)
4413 p = &(*p)->rb_right;
4414 else
4415 return thread;
4416 }
4417 if (!new_thread)
4418 return NULL;
4419 thread = new_thread;
4420 binder_stats_created(BINDER_STAT_THREAD);
4421 thread->proc = proc;
4422 thread->pid = current->pid;
4423 get_task_struct(current);
4424 thread->task = current;
4425 atomic_set(&thread->tmp_ref, 0);
4426 init_waitqueue_head(&thread->wait);
4427 INIT_LIST_HEAD(&thread->todo);
4428 rb_link_node(&thread->rb_node, parent, p);
4429 rb_insert_color(&thread->rb_node, &proc->threads);
4430 thread->looper_need_return = true;
4431 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4432 thread->return_error.cmd = BR_OK;
4433 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4434 thread->reply_error.cmd = BR_OK;
4435 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4436 return thread;
4437 }
4438
binder_get_thread(struct binder_proc * proc)4439 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4440 {
4441 struct binder_thread *thread;
4442 struct binder_thread *new_thread;
4443
4444 binder_inner_proc_lock(proc);
4445 thread = binder_get_thread_ilocked(proc, NULL);
4446 binder_inner_proc_unlock(proc);
4447 if (!thread) {
4448 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4449 if (new_thread == NULL)
4450 return NULL;
4451 binder_inner_proc_lock(proc);
4452 thread = binder_get_thread_ilocked(proc, new_thread);
4453 binder_inner_proc_unlock(proc);
4454 if (thread != new_thread)
4455 kfree(new_thread);
4456 }
4457 return thread;
4458 }
4459
binder_free_proc(struct binder_proc * proc)4460 static void binder_free_proc(struct binder_proc *proc)
4461 {
4462 BUG_ON(!list_empty(&proc->todo));
4463 BUG_ON(!list_empty(&proc->delivered_death));
4464 binder_alloc_deferred_release(&proc->alloc);
4465 put_task_struct(proc->tsk);
4466 binder_stats_deleted(BINDER_STAT_PROC);
4467 kfree(proc);
4468 }
4469
binder_free_thread(struct binder_thread * thread)4470 static void binder_free_thread(struct binder_thread *thread)
4471 {
4472 BUG_ON(!list_empty(&thread->todo));
4473 binder_stats_deleted(BINDER_STAT_THREAD);
4474 binder_proc_dec_tmpref(thread->proc);
4475 put_task_struct(thread->task);
4476 kfree(thread);
4477 }
4478
binder_thread_release(struct binder_proc * proc,struct binder_thread * thread)4479 static int binder_thread_release(struct binder_proc *proc,
4480 struct binder_thread *thread)
4481 {
4482 struct binder_transaction *t;
4483 struct binder_transaction *send_reply = NULL;
4484 int active_transactions = 0;
4485 struct binder_transaction *last_t = NULL;
4486
4487 binder_inner_proc_lock(thread->proc);
4488 /*
4489 * take a ref on the proc so it survives
4490 * after we remove this thread from proc->threads.
4491 * The corresponding dec is when we actually
4492 * free the thread in binder_free_thread()
4493 */
4494 proc->tmp_ref++;
4495 /*
4496 * take a ref on this thread to ensure it
4497 * survives while we are releasing it
4498 */
4499 atomic_inc(&thread->tmp_ref);
4500 rb_erase(&thread->rb_node, &proc->threads);
4501 t = thread->transaction_stack;
4502 if (t) {
4503 spin_lock(&t->lock);
4504 if (t->to_thread == thread)
4505 send_reply = t;
4506 }
4507 thread->is_dead = true;
4508
4509 while (t) {
4510 last_t = t;
4511 active_transactions++;
4512 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4513 "release %d:%d transaction %d %s, still active\n",
4514 proc->pid, thread->pid,
4515 t->debug_id,
4516 (t->to_thread == thread) ? "in" : "out");
4517
4518 if (t->to_thread == thread) {
4519 t->to_proc = NULL;
4520 t->to_thread = NULL;
4521 if (t->buffer) {
4522 t->buffer->transaction = NULL;
4523 t->buffer = NULL;
4524 }
4525 t = t->to_parent;
4526 } else if (t->from == thread) {
4527 t->from = NULL;
4528 t = t->from_parent;
4529 } else
4530 BUG();
4531 spin_unlock(&last_t->lock);
4532 if (t)
4533 spin_lock(&t->lock);
4534 }
4535
4536 /*
4537 * If this thread used poll, make sure we remove the waitqueue
4538 * from any epoll data structures holding it with POLLFREE.
4539 * waitqueue_active() is safe to use here because we're holding
4540 * the inner lock.
4541 */
4542 if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4543 waitqueue_active(&thread->wait)) {
4544 wake_up_poll(&thread->wait, POLLHUP | POLLFREE);
4545 }
4546
4547 binder_inner_proc_unlock(thread->proc);
4548
4549 if (send_reply)
4550 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4551 binder_release_work(proc, &thread->todo);
4552 binder_thread_dec_tmpref(thread);
4553 return active_transactions;
4554 }
4555
binder_poll(struct file * filp,struct poll_table_struct * wait)4556 static unsigned int binder_poll(struct file *filp,
4557 struct poll_table_struct *wait)
4558 {
4559 struct binder_proc *proc = filp->private_data;
4560 struct binder_thread *thread = NULL;
4561 bool wait_for_proc_work;
4562
4563 thread = binder_get_thread(proc);
4564
4565 binder_inner_proc_lock(thread->proc);
4566 thread->looper |= BINDER_LOOPER_STATE_POLL;
4567 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4568
4569 binder_inner_proc_unlock(thread->proc);
4570
4571 poll_wait(filp, &thread->wait, wait);
4572
4573 if (binder_has_work(thread, wait_for_proc_work))
4574 return POLLIN;
4575
4576 return 0;
4577 }
4578
binder_ioctl_write_read(struct file * filp,unsigned int cmd,unsigned long arg,struct binder_thread * thread)4579 static int binder_ioctl_write_read(struct file *filp,
4580 unsigned int cmd, unsigned long arg,
4581 struct binder_thread *thread)
4582 {
4583 int ret = 0;
4584 struct binder_proc *proc = filp->private_data;
4585 unsigned int size = _IOC_SIZE(cmd);
4586 void __user *ubuf = (void __user *)arg;
4587 struct binder_write_read bwr;
4588
4589 if (size != sizeof(struct binder_write_read)) {
4590 ret = -EINVAL;
4591 goto out;
4592 }
4593 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4594 ret = -EFAULT;
4595 goto out;
4596 }
4597 binder_debug(BINDER_DEBUG_READ_WRITE,
4598 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4599 proc->pid, thread->pid,
4600 (u64)bwr.write_size, (u64)bwr.write_buffer,
4601 (u64)bwr.read_size, (u64)bwr.read_buffer);
4602
4603 if (bwr.write_size > 0) {
4604 ret = binder_thread_write(proc, thread,
4605 bwr.write_buffer,
4606 bwr.write_size,
4607 &bwr.write_consumed);
4608 trace_binder_write_done(ret);
4609 if (ret < 0) {
4610 bwr.read_consumed = 0;
4611 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4612 ret = -EFAULT;
4613 goto out;
4614 }
4615 }
4616 if (bwr.read_size > 0) {
4617 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4618 bwr.read_size,
4619 &bwr.read_consumed,
4620 filp->f_flags & O_NONBLOCK);
4621 trace_binder_read_done(ret);
4622 binder_inner_proc_lock(proc);
4623 if (!binder_worklist_empty_ilocked(&proc->todo))
4624 binder_wakeup_proc_ilocked(proc);
4625 binder_inner_proc_unlock(proc);
4626 if (ret < 0) {
4627 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4628 ret = -EFAULT;
4629 goto out;
4630 }
4631 }
4632 binder_debug(BINDER_DEBUG_READ_WRITE,
4633 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4634 proc->pid, thread->pid,
4635 (u64)bwr.write_consumed, (u64)bwr.write_size,
4636 (u64)bwr.read_consumed, (u64)bwr.read_size);
4637 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4638 ret = -EFAULT;
4639 goto out;
4640 }
4641 out:
4642 return ret;
4643 }
4644
binder_ioctl_set_ctx_mgr(struct file * filp)4645 static int binder_ioctl_set_ctx_mgr(struct file *filp)
4646 {
4647 int ret = 0;
4648 struct binder_proc *proc = filp->private_data;
4649 struct binder_context *context = proc->context;
4650 struct binder_node *new_node;
4651 kuid_t curr_euid = current_euid();
4652
4653 mutex_lock(&context->context_mgr_node_lock);
4654 if (context->binder_context_mgr_node) {
4655 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4656 ret = -EBUSY;
4657 goto out;
4658 }
4659 ret = security_binder_set_context_mgr(proc->tsk);
4660 if (ret < 0)
4661 goto out;
4662 if (uid_valid(context->binder_context_mgr_uid)) {
4663 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4664 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4665 from_kuid(&init_user_ns, curr_euid),
4666 from_kuid(&init_user_ns,
4667 context->binder_context_mgr_uid));
4668 ret = -EPERM;
4669 goto out;
4670 }
4671 } else {
4672 context->binder_context_mgr_uid = curr_euid;
4673 }
4674 new_node = binder_new_node(proc, NULL);
4675 if (!new_node) {
4676 ret = -ENOMEM;
4677 goto out;
4678 }
4679 binder_node_lock(new_node);
4680 new_node->local_weak_refs++;
4681 new_node->local_strong_refs++;
4682 new_node->has_strong_ref = 1;
4683 new_node->has_weak_ref = 1;
4684 context->binder_context_mgr_node = new_node;
4685 binder_node_unlock(new_node);
4686 binder_put_node(new_node);
4687 out:
4688 mutex_unlock(&context->context_mgr_node_lock);
4689 return ret;
4690 }
4691
binder_ioctl_get_node_debug_info(struct binder_proc * proc,struct binder_node_debug_info * info)4692 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4693 struct binder_node_debug_info *info) {
4694 struct rb_node *n;
4695 binder_uintptr_t ptr = info->ptr;
4696
4697 memset(info, 0, sizeof(*info));
4698
4699 binder_inner_proc_lock(proc);
4700 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4701 struct binder_node *node = rb_entry(n, struct binder_node,
4702 rb_node);
4703 if (node->ptr > ptr) {
4704 info->ptr = node->ptr;
4705 info->cookie = node->cookie;
4706 info->has_strong_ref = node->has_strong_ref;
4707 info->has_weak_ref = node->has_weak_ref;
4708 break;
4709 }
4710 }
4711 binder_inner_proc_unlock(proc);
4712
4713 return 0;
4714 }
4715
binder_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)4716 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4717 {
4718 int ret;
4719 struct binder_proc *proc = filp->private_data;
4720 struct binder_thread *thread;
4721 unsigned int size = _IOC_SIZE(cmd);
4722 void __user *ubuf = (void __user *)arg;
4723
4724 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4725 proc->pid, current->pid, cmd, arg);*/
4726
4727 binder_selftest_alloc(&proc->alloc);
4728
4729 trace_binder_ioctl(cmd, arg);
4730
4731 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4732 if (ret)
4733 goto err_unlocked;
4734
4735 thread = binder_get_thread(proc);
4736 if (thread == NULL) {
4737 ret = -ENOMEM;
4738 goto err;
4739 }
4740
4741 switch (cmd) {
4742 case BINDER_WRITE_READ:
4743 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4744 if (ret)
4745 goto err;
4746 break;
4747 case BINDER_SET_MAX_THREADS: {
4748 int max_threads;
4749
4750 if (copy_from_user(&max_threads, ubuf,
4751 sizeof(max_threads))) {
4752 ret = -EINVAL;
4753 goto err;
4754 }
4755 binder_inner_proc_lock(proc);
4756 proc->max_threads = max_threads;
4757 binder_inner_proc_unlock(proc);
4758 break;
4759 }
4760 case BINDER_SET_CONTEXT_MGR:
4761 ret = binder_ioctl_set_ctx_mgr(filp);
4762 if (ret)
4763 goto err;
4764 break;
4765 case BINDER_THREAD_EXIT:
4766 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
4767 proc->pid, thread->pid);
4768 binder_thread_release(proc, thread);
4769 thread = NULL;
4770 break;
4771 case BINDER_VERSION: {
4772 struct binder_version __user *ver = ubuf;
4773
4774 if (size != sizeof(struct binder_version)) {
4775 ret = -EINVAL;
4776 goto err;
4777 }
4778 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4779 &ver->protocol_version)) {
4780 ret = -EINVAL;
4781 goto err;
4782 }
4783 break;
4784 }
4785 case BINDER_GET_NODE_DEBUG_INFO: {
4786 struct binder_node_debug_info info;
4787
4788 if (copy_from_user(&info, ubuf, sizeof(info))) {
4789 ret = -EFAULT;
4790 goto err;
4791 }
4792
4793 ret = binder_ioctl_get_node_debug_info(proc, &info);
4794 if (ret < 0)
4795 goto err;
4796
4797 if (copy_to_user(ubuf, &info, sizeof(info))) {
4798 ret = -EFAULT;
4799 goto err;
4800 }
4801 break;
4802 }
4803 default:
4804 ret = -EINVAL;
4805 goto err;
4806 }
4807 ret = 0;
4808 err:
4809 if (thread)
4810 thread->looper_need_return = false;
4811 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4812 if (ret && ret != -ERESTARTSYS)
4813 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
4814 err_unlocked:
4815 trace_binder_ioctl_done(ret);
4816 return ret;
4817 }
4818
binder_vma_open(struct vm_area_struct * vma)4819 static void binder_vma_open(struct vm_area_struct *vma)
4820 {
4821 struct binder_proc *proc = vma->vm_private_data;
4822
4823 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4824 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4825 proc->pid, vma->vm_start, vma->vm_end,
4826 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4827 (unsigned long)pgprot_val(vma->vm_page_prot));
4828 }
4829
binder_vma_close(struct vm_area_struct * vma)4830 static void binder_vma_close(struct vm_area_struct *vma)
4831 {
4832 struct binder_proc *proc = vma->vm_private_data;
4833
4834 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4835 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4836 proc->pid, vma->vm_start, vma->vm_end,
4837 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4838 (unsigned long)pgprot_val(vma->vm_page_prot));
4839 binder_alloc_vma_close(&proc->alloc);
4840 }
4841
binder_vm_fault(struct vm_area_struct * vma,struct vm_fault * vmf)4842 static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4843 {
4844 return VM_FAULT_SIGBUS;
4845 }
4846
4847 static struct vm_operations_struct binder_vm_ops = {
4848 .open = binder_vma_open,
4849 .close = binder_vma_close,
4850 .fault = binder_vm_fault,
4851 };
4852
binder_mmap(struct file * filp,struct vm_area_struct * vma)4853 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4854 {
4855 int ret;
4856 struct binder_proc *proc = filp->private_data;
4857 const char *failure_string;
4858
4859 if (proc->tsk != current->group_leader)
4860 return -EINVAL;
4861
4862 if ((vma->vm_end - vma->vm_start) > SZ_4M)
4863 vma->vm_end = vma->vm_start + SZ_4M;
4864
4865 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4866 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4867 __func__, proc->pid, vma->vm_start, vma->vm_end,
4868 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4869 (unsigned long)pgprot_val(vma->vm_page_prot));
4870
4871 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4872 ret = -EPERM;
4873 failure_string = "bad vm_flags";
4874 goto err_bad_arg;
4875 }
4876 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
4877 vma->vm_ops = &binder_vm_ops;
4878 vma->vm_private_data = proc;
4879
4880 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
4881
4882 return ret;
4883
4884 err_bad_arg:
4885 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
4886 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
4887 return ret;
4888 }
4889
binder_open(struct inode * nodp,struct file * filp)4890 static int binder_open(struct inode *nodp, struct file *filp)
4891 {
4892 struct binder_proc *proc;
4893 struct binder_device *binder_dev;
4894
4895 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
4896 current->group_leader->pid, current->pid);
4897
4898 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
4899 if (proc == NULL)
4900 return -ENOMEM;
4901 spin_lock_init(&proc->inner_lock);
4902 spin_lock_init(&proc->outer_lock);
4903 get_task_struct(current->group_leader);
4904 proc->tsk = current->group_leader;
4905 INIT_LIST_HEAD(&proc->todo);
4906 if (binder_supported_policy(current->policy)) {
4907 proc->default_priority.sched_policy = current->policy;
4908 proc->default_priority.prio = current->normal_prio;
4909 } else {
4910 proc->default_priority.sched_policy = SCHED_NORMAL;
4911 proc->default_priority.prio = NICE_TO_PRIO(0);
4912 }
4913
4914 binder_dev = container_of(filp->private_data, struct binder_device,
4915 miscdev);
4916 proc->context = &binder_dev->context;
4917 binder_alloc_init(&proc->alloc);
4918
4919 binder_stats_created(BINDER_STAT_PROC);
4920 proc->pid = current->group_leader->pid;
4921 INIT_LIST_HEAD(&proc->delivered_death);
4922 INIT_LIST_HEAD(&proc->waiting_threads);
4923 filp->private_data = proc;
4924
4925 mutex_lock(&binder_procs_lock);
4926 hlist_add_head(&proc->proc_node, &binder_procs);
4927 mutex_unlock(&binder_procs_lock);
4928
4929 if (binder_debugfs_dir_entry_proc) {
4930 char strbuf[11];
4931
4932 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
4933 /*
4934 * proc debug entries are shared between contexts, so
4935 * this will fail if the process tries to open the driver
4936 * again with a different context. The priting code will
4937 * anyway print all contexts that a given PID has, so this
4938 * is not a problem.
4939 */
4940 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
4941 binder_debugfs_dir_entry_proc,
4942 (void *)(unsigned long)proc->pid,
4943 &binder_proc_fops);
4944 }
4945
4946 return 0;
4947 }
4948
binder_flush(struct file * filp,fl_owner_t id)4949 static int binder_flush(struct file *filp, fl_owner_t id)
4950 {
4951 struct binder_proc *proc = filp->private_data;
4952
4953 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
4954
4955 return 0;
4956 }
4957
binder_deferred_flush(struct binder_proc * proc)4958 static void binder_deferred_flush(struct binder_proc *proc)
4959 {
4960 struct rb_node *n;
4961 int wake_count = 0;
4962
4963 binder_inner_proc_lock(proc);
4964 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
4965 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
4966
4967 thread->looper_need_return = true;
4968 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
4969 wake_up_interruptible(&thread->wait);
4970 wake_count++;
4971 }
4972 }
4973 binder_inner_proc_unlock(proc);
4974
4975 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4976 "binder_flush: %d woke %d threads\n", proc->pid,
4977 wake_count);
4978 }
4979
binder_release(struct inode * nodp,struct file * filp)4980 static int binder_release(struct inode *nodp, struct file *filp)
4981 {
4982 struct binder_proc *proc = filp->private_data;
4983
4984 debugfs_remove(proc->debugfs_entry);
4985 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
4986
4987 return 0;
4988 }
4989
binder_node_release(struct binder_node * node,int refs)4990 static int binder_node_release(struct binder_node *node, int refs)
4991 {
4992 struct binder_ref *ref;
4993 int death = 0;
4994 struct binder_proc *proc = node->proc;
4995
4996 binder_release_work(proc, &node->async_todo);
4997
4998 binder_node_lock(node);
4999 binder_inner_proc_lock(proc);
5000 binder_dequeue_work_ilocked(&node->work);
5001 /*
5002 * The caller must have taken a temporary ref on the node,
5003 */
5004 BUG_ON(!node->tmp_refs);
5005 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5006 binder_inner_proc_unlock(proc);
5007 binder_node_unlock(node);
5008 binder_free_node(node);
5009
5010 return refs;
5011 }
5012
5013 node->proc = NULL;
5014 node->local_strong_refs = 0;
5015 node->local_weak_refs = 0;
5016 binder_inner_proc_unlock(proc);
5017
5018 spin_lock(&binder_dead_nodes_lock);
5019 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5020 spin_unlock(&binder_dead_nodes_lock);
5021
5022 hlist_for_each_entry(ref, &node->refs, node_entry) {
5023 refs++;
5024 /*
5025 * Need the node lock to synchronize
5026 * with new notification requests and the
5027 * inner lock to synchronize with queued
5028 * death notifications.
5029 */
5030 binder_inner_proc_lock(ref->proc);
5031 if (!ref->death) {
5032 binder_inner_proc_unlock(ref->proc);
5033 continue;
5034 }
5035
5036 death++;
5037
5038 BUG_ON(!list_empty(&ref->death->work.entry));
5039 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5040 binder_enqueue_work_ilocked(&ref->death->work,
5041 &ref->proc->todo);
5042 binder_wakeup_proc_ilocked(ref->proc);
5043 binder_inner_proc_unlock(ref->proc);
5044 }
5045
5046 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5047 "node %d now dead, refs %d, death %d\n",
5048 node->debug_id, refs, death);
5049 binder_node_unlock(node);
5050 binder_put_node(node);
5051
5052 return refs;
5053 }
5054
binder_deferred_release(struct binder_proc * proc)5055 static void binder_deferred_release(struct binder_proc *proc)
5056 {
5057 struct binder_context *context = proc->context;
5058 struct rb_node *n;
5059 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5060
5061 mutex_lock(&binder_procs_lock);
5062 hlist_del(&proc->proc_node);
5063 mutex_unlock(&binder_procs_lock);
5064
5065 mutex_lock(&context->context_mgr_node_lock);
5066 if (context->binder_context_mgr_node &&
5067 context->binder_context_mgr_node->proc == proc) {
5068 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5069 "%s: %d context_mgr_node gone\n",
5070 __func__, proc->pid);
5071 context->binder_context_mgr_node = NULL;
5072 }
5073 mutex_unlock(&context->context_mgr_node_lock);
5074 binder_inner_proc_lock(proc);
5075 /*
5076 * Make sure proc stays alive after we
5077 * remove all the threads
5078 */
5079 proc->tmp_ref++;
5080
5081 proc->is_dead = true;
5082 threads = 0;
5083 active_transactions = 0;
5084 while ((n = rb_first(&proc->threads))) {
5085 struct binder_thread *thread;
5086
5087 thread = rb_entry(n, struct binder_thread, rb_node);
5088 binder_inner_proc_unlock(proc);
5089 threads++;
5090 active_transactions += binder_thread_release(proc, thread);
5091 binder_inner_proc_lock(proc);
5092 }
5093
5094 nodes = 0;
5095 incoming_refs = 0;
5096 while ((n = rb_first(&proc->nodes))) {
5097 struct binder_node *node;
5098
5099 node = rb_entry(n, struct binder_node, rb_node);
5100 nodes++;
5101 /*
5102 * take a temporary ref on the node before
5103 * calling binder_node_release() which will either
5104 * kfree() the node or call binder_put_node()
5105 */
5106 binder_inc_node_tmpref_ilocked(node);
5107 rb_erase(&node->rb_node, &proc->nodes);
5108 binder_inner_proc_unlock(proc);
5109 incoming_refs = binder_node_release(node, incoming_refs);
5110 binder_inner_proc_lock(proc);
5111 }
5112 binder_inner_proc_unlock(proc);
5113
5114 outgoing_refs = 0;
5115 binder_proc_lock(proc);
5116 while ((n = rb_first(&proc->refs_by_desc))) {
5117 struct binder_ref *ref;
5118
5119 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5120 outgoing_refs++;
5121 binder_cleanup_ref_olocked(ref);
5122 binder_proc_unlock(proc);
5123 binder_free_ref(ref);
5124 binder_proc_lock(proc);
5125 }
5126 binder_proc_unlock(proc);
5127
5128 binder_release_work(proc, &proc->todo);
5129 binder_release_work(proc, &proc->delivered_death);
5130
5131 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5132 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5133 __func__, proc->pid, threads, nodes, incoming_refs,
5134 outgoing_refs, active_transactions);
5135
5136 binder_proc_dec_tmpref(proc);
5137 }
5138
binder_deferred_func(struct work_struct * work)5139 static void binder_deferred_func(struct work_struct *work)
5140 {
5141 struct binder_proc *proc;
5142 int defer;
5143
5144 do {
5145 mutex_lock(&binder_deferred_lock);
5146 if (!hlist_empty(&binder_deferred_list)) {
5147 proc = hlist_entry(binder_deferred_list.first,
5148 struct binder_proc, deferred_work_node);
5149 hlist_del_init(&proc->deferred_work_node);
5150 defer = proc->deferred_work;
5151 proc->deferred_work = 0;
5152 } else {
5153 proc = NULL;
5154 defer = 0;
5155 }
5156 mutex_unlock(&binder_deferred_lock);
5157
5158 if (defer & BINDER_DEFERRED_FLUSH)
5159 binder_deferred_flush(proc);
5160
5161 if (defer & BINDER_DEFERRED_RELEASE)
5162 binder_deferred_release(proc); /* frees proc */
5163 } while (proc);
5164 }
5165 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5166
5167 static void
binder_defer_work(struct binder_proc * proc,enum binder_deferred_state defer)5168 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5169 {
5170 mutex_lock(&binder_deferred_lock);
5171 proc->deferred_work |= defer;
5172 if (hlist_unhashed(&proc->deferred_work_node)) {
5173 hlist_add_head(&proc->deferred_work_node,
5174 &binder_deferred_list);
5175 queue_work(binder_deferred_workqueue, &binder_deferred_work);
5176 }
5177 mutex_unlock(&binder_deferred_lock);
5178 }
5179
print_binder_transaction_ilocked(struct seq_file * m,struct binder_proc * proc,const char * prefix,struct binder_transaction * t)5180 static void print_binder_transaction_ilocked(struct seq_file *m,
5181 struct binder_proc *proc,
5182 const char *prefix,
5183 struct binder_transaction *t)
5184 {
5185 struct binder_proc *to_proc;
5186 struct binder_buffer *buffer = t->buffer;
5187
5188 spin_lock(&t->lock);
5189 to_proc = t->to_proc;
5190 seq_printf(m,
5191 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
5192 prefix, t->debug_id, t,
5193 t->from ? t->from->proc->pid : 0,
5194 t->from ? t->from->pid : 0,
5195 to_proc ? to_proc->pid : 0,
5196 t->to_thread ? t->to_thread->pid : 0,
5197 t->code, t->flags, t->priority.sched_policy,
5198 t->priority.prio, t->need_reply);
5199 spin_unlock(&t->lock);
5200
5201 if (proc != to_proc) {
5202 /*
5203 * Can only safely deref buffer if we are holding the
5204 * correct proc inner lock for this node
5205 */
5206 seq_puts(m, "\n");
5207 return;
5208 }
5209
5210 if (buffer == NULL) {
5211 seq_puts(m, " buffer free\n");
5212 return;
5213 }
5214 if (buffer->target_node)
5215 seq_printf(m, " node %d", buffer->target_node->debug_id);
5216 seq_printf(m, " size %zd:%zd data %p\n",
5217 buffer->data_size, buffer->offsets_size,
5218 buffer->data);
5219 }
5220
print_binder_work_ilocked(struct seq_file * m,struct binder_proc * proc,const char * prefix,const char * transaction_prefix,struct binder_work * w)5221 static void print_binder_work_ilocked(struct seq_file *m,
5222 struct binder_proc *proc,
5223 const char *prefix,
5224 const char *transaction_prefix,
5225 struct binder_work *w)
5226 {
5227 struct binder_node *node;
5228 struct binder_transaction *t;
5229
5230 switch (w->type) {
5231 case BINDER_WORK_TRANSACTION:
5232 t = container_of(w, struct binder_transaction, work);
5233 print_binder_transaction_ilocked(
5234 m, proc, transaction_prefix, t);
5235 break;
5236 case BINDER_WORK_RETURN_ERROR: {
5237 struct binder_error *e = container_of(
5238 w, struct binder_error, work);
5239
5240 seq_printf(m, "%stransaction error: %u\n",
5241 prefix, e->cmd);
5242 } break;
5243 case BINDER_WORK_TRANSACTION_COMPLETE:
5244 seq_printf(m, "%stransaction complete\n", prefix);
5245 break;
5246 case BINDER_WORK_NODE:
5247 node = container_of(w, struct binder_node, work);
5248 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5249 prefix, node->debug_id,
5250 (u64)node->ptr, (u64)node->cookie);
5251 break;
5252 case BINDER_WORK_DEAD_BINDER:
5253 seq_printf(m, "%shas dead binder\n", prefix);
5254 break;
5255 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5256 seq_printf(m, "%shas cleared dead binder\n", prefix);
5257 break;
5258 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5259 seq_printf(m, "%shas cleared death notification\n", prefix);
5260 break;
5261 default:
5262 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5263 break;
5264 }
5265 }
5266
print_binder_thread_ilocked(struct seq_file * m,struct binder_thread * thread,int print_always)5267 static void print_binder_thread_ilocked(struct seq_file *m,
5268 struct binder_thread *thread,
5269 int print_always)
5270 {
5271 struct binder_transaction *t;
5272 struct binder_work *w;
5273 size_t start_pos = m->count;
5274 size_t header_pos;
5275
5276 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
5277 thread->pid, thread->looper,
5278 thread->looper_need_return,
5279 atomic_read(&thread->tmp_ref));
5280 header_pos = m->count;
5281 t = thread->transaction_stack;
5282 while (t) {
5283 if (t->from == thread) {
5284 print_binder_transaction_ilocked(m, thread->proc,
5285 " outgoing transaction", t);
5286 t = t->from_parent;
5287 } else if (t->to_thread == thread) {
5288 print_binder_transaction_ilocked(m, thread->proc,
5289 " incoming transaction", t);
5290 t = t->to_parent;
5291 } else {
5292 print_binder_transaction_ilocked(m, thread->proc,
5293 " bad transaction", t);
5294 t = NULL;
5295 }
5296 }
5297 list_for_each_entry(w, &thread->todo, entry) {
5298 print_binder_work_ilocked(m, thread->proc, " ",
5299 " pending transaction", w);
5300 }
5301 if (!print_always && m->count == header_pos)
5302 m->count = start_pos;
5303 }
5304
print_binder_node_nilocked(struct seq_file * m,struct binder_node * node)5305 static void print_binder_node_nilocked(struct seq_file *m,
5306 struct binder_node *node)
5307 {
5308 struct binder_ref *ref;
5309 struct binder_work *w;
5310 int count;
5311
5312 count = 0;
5313 hlist_for_each_entry(ref, &node->refs, node_entry)
5314 count++;
5315
5316 seq_printf(m, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5317 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5318 node->sched_policy, node->min_priority,
5319 node->has_strong_ref, node->has_weak_ref,
5320 node->local_strong_refs, node->local_weak_refs,
5321 node->internal_strong_refs, count, node->tmp_refs);
5322 if (count) {
5323 seq_puts(m, " proc");
5324 hlist_for_each_entry(ref, &node->refs, node_entry)
5325 seq_printf(m, " %d", ref->proc->pid);
5326 }
5327 seq_puts(m, "\n");
5328 if (node->proc) {
5329 list_for_each_entry(w, &node->async_todo, entry)
5330 print_binder_work_ilocked(m, node->proc, " ",
5331 " pending async transaction", w);
5332 }
5333 }
5334
print_binder_ref_olocked(struct seq_file * m,struct binder_ref * ref)5335 static void print_binder_ref_olocked(struct seq_file *m,
5336 struct binder_ref *ref)
5337 {
5338 binder_node_lock(ref->node);
5339 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5340 ref->data.debug_id, ref->data.desc,
5341 ref->node->proc ? "" : "dead ",
5342 ref->node->debug_id, ref->data.strong,
5343 ref->data.weak, ref->death);
5344 binder_node_unlock(ref->node);
5345 }
5346
print_binder_proc(struct seq_file * m,struct binder_proc * proc,int print_all)5347 static void print_binder_proc(struct seq_file *m,
5348 struct binder_proc *proc, int print_all)
5349 {
5350 struct binder_work *w;
5351 struct rb_node *n;
5352 size_t start_pos = m->count;
5353 size_t header_pos;
5354 struct binder_node *last_node = NULL;
5355
5356 seq_printf(m, "proc %d\n", proc->pid);
5357 seq_printf(m, "context %s\n", proc->context->name);
5358 header_pos = m->count;
5359
5360 binder_inner_proc_lock(proc);
5361 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5362 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5363 rb_node), print_all);
5364
5365 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5366 struct binder_node *node = rb_entry(n, struct binder_node,
5367 rb_node);
5368 /*
5369 * take a temporary reference on the node so it
5370 * survives and isn't removed from the tree
5371 * while we print it.
5372 */
5373 binder_inc_node_tmpref_ilocked(node);
5374 /* Need to drop inner lock to take node lock */
5375 binder_inner_proc_unlock(proc);
5376 if (last_node)
5377 binder_put_node(last_node);
5378 binder_node_inner_lock(node);
5379 print_binder_node_nilocked(m, node);
5380 binder_node_inner_unlock(node);
5381 last_node = node;
5382 binder_inner_proc_lock(proc);
5383 }
5384 binder_inner_proc_unlock(proc);
5385 if (last_node)
5386 binder_put_node(last_node);
5387
5388 if (print_all) {
5389 binder_proc_lock(proc);
5390 for (n = rb_first(&proc->refs_by_desc);
5391 n != NULL;
5392 n = rb_next(n))
5393 print_binder_ref_olocked(m, rb_entry(n,
5394 struct binder_ref,
5395 rb_node_desc));
5396 binder_proc_unlock(proc);
5397 }
5398 binder_alloc_print_allocated(m, &proc->alloc);
5399 binder_inner_proc_lock(proc);
5400 list_for_each_entry(w, &proc->todo, entry)
5401 print_binder_work_ilocked(m, proc, " ",
5402 " pending transaction", w);
5403 list_for_each_entry(w, &proc->delivered_death, entry) {
5404 seq_puts(m, " has delivered dead binder\n");
5405 break;
5406 }
5407 binder_inner_proc_unlock(proc);
5408 if (!print_all && m->count == header_pos)
5409 m->count = start_pos;
5410 }
5411
5412 static const char * const binder_return_strings[] = {
5413 "BR_ERROR",
5414 "BR_OK",
5415 "BR_TRANSACTION",
5416 "BR_REPLY",
5417 "BR_ACQUIRE_RESULT",
5418 "BR_DEAD_REPLY",
5419 "BR_TRANSACTION_COMPLETE",
5420 "BR_INCREFS",
5421 "BR_ACQUIRE",
5422 "BR_RELEASE",
5423 "BR_DECREFS",
5424 "BR_ATTEMPT_ACQUIRE",
5425 "BR_NOOP",
5426 "BR_SPAWN_LOOPER",
5427 "BR_FINISHED",
5428 "BR_DEAD_BINDER",
5429 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5430 "BR_FAILED_REPLY"
5431 };
5432
5433 static const char * const binder_command_strings[] = {
5434 "BC_TRANSACTION",
5435 "BC_REPLY",
5436 "BC_ACQUIRE_RESULT",
5437 "BC_FREE_BUFFER",
5438 "BC_INCREFS",
5439 "BC_ACQUIRE",
5440 "BC_RELEASE",
5441 "BC_DECREFS",
5442 "BC_INCREFS_DONE",
5443 "BC_ACQUIRE_DONE",
5444 "BC_ATTEMPT_ACQUIRE",
5445 "BC_REGISTER_LOOPER",
5446 "BC_ENTER_LOOPER",
5447 "BC_EXIT_LOOPER",
5448 "BC_REQUEST_DEATH_NOTIFICATION",
5449 "BC_CLEAR_DEATH_NOTIFICATION",
5450 "BC_DEAD_BINDER_DONE",
5451 "BC_TRANSACTION_SG",
5452 "BC_REPLY_SG",
5453 };
5454
5455 static const char * const binder_objstat_strings[] = {
5456 "proc",
5457 "thread",
5458 "node",
5459 "ref",
5460 "death",
5461 "transaction",
5462 "transaction_complete"
5463 };
5464
print_binder_stats(struct seq_file * m,const char * prefix,struct binder_stats * stats)5465 static void print_binder_stats(struct seq_file *m, const char *prefix,
5466 struct binder_stats *stats)
5467 {
5468 int i;
5469
5470 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5471 ARRAY_SIZE(binder_command_strings));
5472 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5473 int temp = atomic_read(&stats->bc[i]);
5474
5475 if (temp)
5476 seq_printf(m, "%s%s: %d\n", prefix,
5477 binder_command_strings[i], temp);
5478 }
5479
5480 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5481 ARRAY_SIZE(binder_return_strings));
5482 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5483 int temp = atomic_read(&stats->br[i]);
5484
5485 if (temp)
5486 seq_printf(m, "%s%s: %d\n", prefix,
5487 binder_return_strings[i], temp);
5488 }
5489
5490 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5491 ARRAY_SIZE(binder_objstat_strings));
5492 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5493 ARRAY_SIZE(stats->obj_deleted));
5494 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5495 int created = atomic_read(&stats->obj_created[i]);
5496 int deleted = atomic_read(&stats->obj_deleted[i]);
5497
5498 if (created || deleted)
5499 seq_printf(m, "%s%s: active %d total %d\n",
5500 prefix,
5501 binder_objstat_strings[i],
5502 created - deleted,
5503 created);
5504 }
5505 }
5506
print_binder_proc_stats(struct seq_file * m,struct binder_proc * proc)5507 static void print_binder_proc_stats(struct seq_file *m,
5508 struct binder_proc *proc)
5509 {
5510 struct binder_work *w;
5511 struct binder_thread *thread;
5512 struct rb_node *n;
5513 int count, strong, weak, ready_threads;
5514 size_t free_async_space =
5515 binder_alloc_get_free_async_space(&proc->alloc);
5516
5517 seq_printf(m, "proc %d\n", proc->pid);
5518 seq_printf(m, "context %s\n", proc->context->name);
5519 count = 0;
5520 ready_threads = 0;
5521 binder_inner_proc_lock(proc);
5522 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5523 count++;
5524
5525 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5526 ready_threads++;
5527
5528 seq_printf(m, " threads: %d\n", count);
5529 seq_printf(m, " requested threads: %d+%d/%d\n"
5530 " ready threads %d\n"
5531 " free async space %zd\n", proc->requested_threads,
5532 proc->requested_threads_started, proc->max_threads,
5533 ready_threads,
5534 free_async_space);
5535 count = 0;
5536 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5537 count++;
5538 binder_inner_proc_unlock(proc);
5539 seq_printf(m, " nodes: %d\n", count);
5540 count = 0;
5541 strong = 0;
5542 weak = 0;
5543 binder_proc_lock(proc);
5544 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5545 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5546 rb_node_desc);
5547 count++;
5548 strong += ref->data.strong;
5549 weak += ref->data.weak;
5550 }
5551 binder_proc_unlock(proc);
5552 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
5553
5554 count = binder_alloc_get_allocated_count(&proc->alloc);
5555 seq_printf(m, " buffers: %d\n", count);
5556
5557 binder_alloc_print_pages(m, &proc->alloc);
5558
5559 count = 0;
5560 binder_inner_proc_lock(proc);
5561 list_for_each_entry(w, &proc->todo, entry) {
5562 if (w->type == BINDER_WORK_TRANSACTION)
5563 count++;
5564 }
5565 binder_inner_proc_unlock(proc);
5566 seq_printf(m, " pending transactions: %d\n", count);
5567
5568 print_binder_stats(m, " ", &proc->stats);
5569 }
5570
5571
binder_state_show(struct seq_file * m,void * unused)5572 static int binder_state_show(struct seq_file *m, void *unused)
5573 {
5574 struct binder_proc *proc;
5575 struct binder_node *node;
5576 struct binder_node *last_node = NULL;
5577
5578 seq_puts(m, "binder state:\n");
5579
5580 spin_lock(&binder_dead_nodes_lock);
5581 if (!hlist_empty(&binder_dead_nodes))
5582 seq_puts(m, "dead nodes:\n");
5583 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5584 /*
5585 * take a temporary reference on the node so it
5586 * survives and isn't removed from the list
5587 * while we print it.
5588 */
5589 node->tmp_refs++;
5590 spin_unlock(&binder_dead_nodes_lock);
5591 if (last_node)
5592 binder_put_node(last_node);
5593 binder_node_lock(node);
5594 print_binder_node_nilocked(m, node);
5595 binder_node_unlock(node);
5596 last_node = node;
5597 spin_lock(&binder_dead_nodes_lock);
5598 }
5599 spin_unlock(&binder_dead_nodes_lock);
5600 if (last_node)
5601 binder_put_node(last_node);
5602
5603 mutex_lock(&binder_procs_lock);
5604 hlist_for_each_entry(proc, &binder_procs, proc_node)
5605 print_binder_proc(m, proc, 1);
5606 mutex_unlock(&binder_procs_lock);
5607
5608 return 0;
5609 }
5610
binder_stats_show(struct seq_file * m,void * unused)5611 static int binder_stats_show(struct seq_file *m, void *unused)
5612 {
5613 struct binder_proc *proc;
5614
5615 seq_puts(m, "binder stats:\n");
5616
5617 print_binder_stats(m, "", &binder_stats);
5618
5619 mutex_lock(&binder_procs_lock);
5620 hlist_for_each_entry(proc, &binder_procs, proc_node)
5621 print_binder_proc_stats(m, proc);
5622 mutex_unlock(&binder_procs_lock);
5623
5624 return 0;
5625 }
5626
binder_transactions_show(struct seq_file * m,void * unused)5627 static int binder_transactions_show(struct seq_file *m, void *unused)
5628 {
5629 struct binder_proc *proc;
5630
5631 seq_puts(m, "binder transactions:\n");
5632 mutex_lock(&binder_procs_lock);
5633 hlist_for_each_entry(proc, &binder_procs, proc_node)
5634 print_binder_proc(m, proc, 0);
5635 mutex_unlock(&binder_procs_lock);
5636
5637 return 0;
5638 }
5639
binder_proc_show(struct seq_file * m,void * unused)5640 static int binder_proc_show(struct seq_file *m, void *unused)
5641 {
5642 struct binder_proc *itr;
5643 int pid = (unsigned long)m->private;
5644
5645 mutex_lock(&binder_procs_lock);
5646 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5647 if (itr->pid == pid) {
5648 seq_puts(m, "binder proc state:\n");
5649 print_binder_proc(m, itr, 1);
5650 }
5651 }
5652 mutex_unlock(&binder_procs_lock);
5653
5654 return 0;
5655 }
5656
print_binder_transaction_log_entry(struct seq_file * m,struct binder_transaction_log_entry * e)5657 static void print_binder_transaction_log_entry(struct seq_file *m,
5658 struct binder_transaction_log_entry *e)
5659 {
5660 int debug_id = READ_ONCE(e->debug_id_done);
5661 /*
5662 * read barrier to guarantee debug_id_done read before
5663 * we print the log values
5664 */
5665 smp_rmb();
5666 seq_printf(m,
5667 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5668 e->debug_id, (e->call_type == 2) ? "reply" :
5669 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
5670 e->from_thread, e->to_proc, e->to_thread, e->context_name,
5671 e->to_node, e->target_handle, e->data_size, e->offsets_size,
5672 e->return_error, e->return_error_param,
5673 e->return_error_line);
5674 /*
5675 * read-barrier to guarantee read of debug_id_done after
5676 * done printing the fields of the entry
5677 */
5678 smp_rmb();
5679 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5680 "\n" : " (incomplete)\n");
5681 }
5682
binder_transaction_log_show(struct seq_file * m,void * unused)5683 static int binder_transaction_log_show(struct seq_file *m, void *unused)
5684 {
5685 struct binder_transaction_log *log = m->private;
5686 unsigned int log_cur = atomic_read(&log->cur);
5687 unsigned int count;
5688 unsigned int cur;
5689 int i;
5690
5691 count = log_cur + 1;
5692 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5693 0 : count % ARRAY_SIZE(log->entry);
5694 if (count > ARRAY_SIZE(log->entry) || log->full)
5695 count = ARRAY_SIZE(log->entry);
5696 for (i = 0; i < count; i++) {
5697 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5698
5699 print_binder_transaction_log_entry(m, &log->entry[index]);
5700 }
5701 return 0;
5702 }
5703
5704 static const struct file_operations binder_fops = {
5705 .owner = THIS_MODULE,
5706 .poll = binder_poll,
5707 .unlocked_ioctl = binder_ioctl,
5708 .compat_ioctl = binder_ioctl,
5709 .mmap = binder_mmap,
5710 .open = binder_open,
5711 .flush = binder_flush,
5712 .release = binder_release,
5713 };
5714
5715 BINDER_DEBUG_ENTRY(state);
5716 BINDER_DEBUG_ENTRY(stats);
5717 BINDER_DEBUG_ENTRY(transactions);
5718 BINDER_DEBUG_ENTRY(transaction_log);
5719
init_binder_device(const char * name)5720 static int __init init_binder_device(const char *name)
5721 {
5722 int ret;
5723 struct binder_device *binder_device;
5724
5725 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5726 if (!binder_device)
5727 return -ENOMEM;
5728
5729 binder_device->miscdev.fops = &binder_fops;
5730 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5731 binder_device->miscdev.name = name;
5732
5733 binder_device->context.binder_context_mgr_uid = INVALID_UID;
5734 binder_device->context.name = name;
5735 mutex_init(&binder_device->context.context_mgr_node_lock);
5736
5737 ret = misc_register(&binder_device->miscdev);
5738 if (ret < 0) {
5739 kfree(binder_device);
5740 return ret;
5741 }
5742
5743 hlist_add_head(&binder_device->hlist, &binder_devices);
5744
5745 return ret;
5746 }
5747
binder_init(void)5748 static int __init binder_init(void)
5749 {
5750 int ret;
5751 char *device_name, *device_names;
5752 struct binder_device *device;
5753 struct hlist_node *tmp;
5754
5755 binder_alloc_shrinker_init();
5756
5757 atomic_set(&binder_transaction_log.cur, ~0U);
5758 atomic_set(&binder_transaction_log_failed.cur, ~0U);
5759 binder_deferred_workqueue = create_singlethread_workqueue("binder");
5760 if (!binder_deferred_workqueue)
5761 return -ENOMEM;
5762
5763 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5764 if (binder_debugfs_dir_entry_root)
5765 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5766 binder_debugfs_dir_entry_root);
5767
5768 if (binder_debugfs_dir_entry_root) {
5769 debugfs_create_file("state",
5770 S_IRUGO,
5771 binder_debugfs_dir_entry_root,
5772 NULL,
5773 &binder_state_fops);
5774 debugfs_create_file("stats",
5775 S_IRUGO,
5776 binder_debugfs_dir_entry_root,
5777 NULL,
5778 &binder_stats_fops);
5779 debugfs_create_file("transactions",
5780 S_IRUGO,
5781 binder_debugfs_dir_entry_root,
5782 NULL,
5783 &binder_transactions_fops);
5784 debugfs_create_file("transaction_log",
5785 S_IRUGO,
5786 binder_debugfs_dir_entry_root,
5787 &binder_transaction_log,
5788 &binder_transaction_log_fops);
5789 debugfs_create_file("failed_transaction_log",
5790 S_IRUGO,
5791 binder_debugfs_dir_entry_root,
5792 &binder_transaction_log_failed,
5793 &binder_transaction_log_fops);
5794 }
5795
5796 /*
5797 * Copy the module_parameter string, because we don't want to
5798 * tokenize it in-place.
5799 */
5800 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
5801 if (!device_names) {
5802 ret = -ENOMEM;
5803 goto err_alloc_device_names_failed;
5804 }
5805 strcpy(device_names, binder_devices_param);
5806
5807 while ((device_name = strsep(&device_names, ","))) {
5808 ret = init_binder_device(device_name);
5809 if (ret)
5810 goto err_init_binder_device_failed;
5811 }
5812
5813 return ret;
5814
5815 err_init_binder_device_failed:
5816 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5817 misc_deregister(&device->miscdev);
5818 hlist_del(&device->hlist);
5819 kfree(device);
5820 }
5821 err_alloc_device_names_failed:
5822 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5823
5824 destroy_workqueue(binder_deferred_workqueue);
5825
5826 return ret;
5827 }
5828
5829 device_initcall(binder_init);
5830
5831 #define CREATE_TRACE_POINTS
5832 #include "binder_trace.h"
5833
5834 MODULE_LICENSE("GPL v2");
5835