1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder.c
3 *
4 * Android IPC Subsystem
5 *
6 * Copyright (C) 2007-2008 Google, Inc.
7 */
8
9 /*
10 * Locking overview
11 *
12 * There are 3 main spinlocks which must be acquired in the
13 * order shown:
14 *
15 * 1) proc->outer_lock : protects binder_ref
16 * binder_proc_lock() and binder_proc_unlock() are
17 * used to acq/rel.
18 * 2) node->lock : protects most fields of binder_node.
19 * binder_node_lock() and binder_node_unlock() are
20 * used to acq/rel
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
23 * and all todo lists associated with the binder_proc
24 * (proc->todo, thread->todo, proc->delivered_death and
25 * node->async_todo), as well as thread->transaction_stack
26 * binder_inner_proc_lock() and binder_inner_proc_unlock()
27 * are used to acq/rel
28 *
29 * Any lock under procA must never be nested under any lock at the same
30 * level or below on procB.
31 *
32 * Functions that require a lock held on entry indicate which lock
33 * in the suffix of the function name:
34 *
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
40 * ...
41 */
42
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
48 #include <linux/fs.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
69 #include <linux/ktime.h>
70
71 #include <uapi/linux/sched/types.h>
72 #include <uapi/linux/android/binder.h>
73
74 #include <linux/cacheflush.h>
75
76 #include "binder_internal.h"
77 #include "binder_trace.h"
78 #include <trace/hooks/binder.h>
79
80 static HLIST_HEAD(binder_deferred_list);
81 static DEFINE_MUTEX(binder_deferred_lock);
82
83 static HLIST_HEAD(binder_devices);
84 static HLIST_HEAD(binder_procs);
85 static DEFINE_MUTEX(binder_procs_lock);
86
87 static HLIST_HEAD(binder_dead_nodes);
88 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
89
90 static struct dentry *binder_debugfs_dir_entry_root;
91 static struct dentry *binder_debugfs_dir_entry_proc;
92 static atomic_t binder_last_id;
93
94 static int proc_show(struct seq_file *m, void *unused);
95 DEFINE_SHOW_ATTRIBUTE(proc);
96
97 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
98
99 enum {
100 BINDER_DEBUG_USER_ERROR = 1U << 0,
101 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
102 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
103 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
104 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
105 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
106 BINDER_DEBUG_READ_WRITE = 1U << 6,
107 BINDER_DEBUG_USER_REFS = 1U << 7,
108 BINDER_DEBUG_THREADS = 1U << 8,
109 BINDER_DEBUG_TRANSACTION = 1U << 9,
110 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
111 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
112 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
113 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
114 BINDER_DEBUG_SPINLOCKS = 1U << 14,
115 };
116 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
117 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
118 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
119
120 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
121 module_param_named(devices, binder_devices_param, charp, 0444);
122
123 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
124 static int binder_stop_on_user_error;
125
binder_set_stop_on_user_error(const char * val,const struct kernel_param * kp)126 static int binder_set_stop_on_user_error(const char *val,
127 const struct kernel_param *kp)
128 {
129 int ret;
130
131 ret = param_set_int(val, kp);
132 if (binder_stop_on_user_error < 2)
133 wake_up(&binder_user_error_wait);
134 return ret;
135 }
136 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
137 param_get_int, &binder_stop_on_user_error, 0644);
138
binder_debug(int mask,const char * format,...)139 static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
140 {
141 struct va_format vaf;
142 va_list args;
143
144 if (binder_debug_mask & mask) {
145 va_start(args, format);
146 vaf.va = &args;
147 vaf.fmt = format;
148 pr_info_ratelimited("%pV", &vaf);
149 va_end(args);
150 }
151 }
152
153 #define binder_txn_error(x...) \
154 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
155
binder_user_error(const char * format,...)156 static __printf(1, 2) void binder_user_error(const char *format, ...)
157 {
158 struct va_format vaf;
159 va_list args;
160
161 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
162 va_start(args, format);
163 vaf.va = &args;
164 vaf.fmt = format;
165 pr_info_ratelimited("%pV", &vaf);
166 va_end(args);
167 }
168
169 if (binder_stop_on_user_error)
170 binder_stop_on_user_error = 2;
171 }
172
173 #define binder_set_extended_error(ee, _id, _command, _param) \
174 do { \
175 (ee)->id = _id; \
176 (ee)->command = _command; \
177 (ee)->param = _param; \
178 } while (0)
179
180 #define to_flat_binder_object(hdr) \
181 container_of(hdr, struct flat_binder_object, hdr)
182
183 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
184
185 #define to_binder_buffer_object(hdr) \
186 container_of(hdr, struct binder_buffer_object, hdr)
187
188 #define to_binder_fd_array_object(hdr) \
189 container_of(hdr, struct binder_fd_array_object, hdr)
190
191 static struct binder_stats binder_stats;
192
binder_stats_deleted(enum binder_stat_types type)193 static inline void binder_stats_deleted(enum binder_stat_types type)
194 {
195 atomic_inc(&binder_stats.obj_deleted[type]);
196 }
197
binder_stats_created(enum binder_stat_types type)198 static inline void binder_stats_created(enum binder_stat_types type)
199 {
200 atomic_inc(&binder_stats.obj_created[type]);
201 }
202
203 struct binder_transaction_log_entry {
204 int debug_id;
205 int debug_id_done;
206 int call_type;
207 int from_proc;
208 int from_thread;
209 int target_handle;
210 int to_proc;
211 int to_thread;
212 int to_node;
213 int data_size;
214 int offsets_size;
215 int return_error_line;
216 uint32_t return_error;
217 uint32_t return_error_param;
218 char context_name[BINDERFS_MAX_NAME + 1];
219 };
220
221 struct binder_transaction_log {
222 atomic_t cur;
223 bool full;
224 struct binder_transaction_log_entry entry[32];
225 };
226
227 static struct binder_transaction_log binder_transaction_log;
228 static struct binder_transaction_log binder_transaction_log_failed;
229
binder_transaction_log_add(struct binder_transaction_log * log)230 static struct binder_transaction_log_entry *binder_transaction_log_add(
231 struct binder_transaction_log *log)
232 {
233 struct binder_transaction_log_entry *e;
234 unsigned int cur = atomic_inc_return(&log->cur);
235
236 if (cur >= ARRAY_SIZE(log->entry))
237 log->full = true;
238 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
239 WRITE_ONCE(e->debug_id_done, 0);
240 /*
241 * write-barrier to synchronize access to e->debug_id_done.
242 * We make sure the initialized 0 value is seen before
243 * memset() other fields are zeroed by memset.
244 */
245 smp_wmb();
246 memset(e, 0, sizeof(*e));
247 return e;
248 }
249
250 enum binder_deferred_state {
251 BINDER_DEFERRED_FLUSH = 0x01,
252 BINDER_DEFERRED_RELEASE = 0x02,
253 };
254
255 enum {
256 BINDER_LOOPER_STATE_REGISTERED = 0x01,
257 BINDER_LOOPER_STATE_ENTERED = 0x02,
258 BINDER_LOOPER_STATE_EXITED = 0x04,
259 BINDER_LOOPER_STATE_INVALID = 0x08,
260 BINDER_LOOPER_STATE_WAITING = 0x10,
261 BINDER_LOOPER_STATE_POLL = 0x20,
262 };
263
264 /**
265 * binder_proc_lock() - Acquire outer lock for given binder_proc
266 * @proc: struct binder_proc to acquire
267 *
268 * Acquires proc->outer_lock. Used to protect binder_ref
269 * structures associated with the given proc.
270 */
271 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
272 static void
_binder_proc_lock(struct binder_proc * proc,int line)273 _binder_proc_lock(struct binder_proc *proc, int line)
274 __acquires(&proc->outer_lock)
275 {
276 binder_debug(BINDER_DEBUG_SPINLOCKS,
277 "%s: line=%d\n", __func__, line);
278 spin_lock(&proc->outer_lock);
279 }
280
281 /**
282 * binder_proc_unlock() - Release spinlock for given binder_proc
283 * @proc: struct binder_proc to acquire
284 *
285 * Release lock acquired via binder_proc_lock()
286 */
287 #define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__)
288 static void
_binder_proc_unlock(struct binder_proc * proc,int line)289 _binder_proc_unlock(struct binder_proc *proc, int line)
290 __releases(&proc->outer_lock)
291 {
292 binder_debug(BINDER_DEBUG_SPINLOCKS,
293 "%s: line=%d\n", __func__, line);
294 spin_unlock(&proc->outer_lock);
295 }
296
297 /**
298 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
299 * @proc: struct binder_proc to acquire
300 *
301 * Acquires proc->inner_lock. Used to protect todo lists
302 */
303 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
304 static void
_binder_inner_proc_lock(struct binder_proc * proc,int line)305 _binder_inner_proc_lock(struct binder_proc *proc, int line)
306 __acquires(&proc->inner_lock)
307 {
308 binder_debug(BINDER_DEBUG_SPINLOCKS,
309 "%s: line=%d\n", __func__, line);
310 spin_lock(&proc->inner_lock);
311 }
312
313 /**
314 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
315 * @proc: struct binder_proc to acquire
316 *
317 * Release lock acquired via binder_inner_proc_lock()
318 */
319 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
320 static void
_binder_inner_proc_unlock(struct binder_proc * proc,int line)321 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
322 __releases(&proc->inner_lock)
323 {
324 binder_debug(BINDER_DEBUG_SPINLOCKS,
325 "%s: line=%d\n", __func__, line);
326 spin_unlock(&proc->inner_lock);
327 }
328
329 /**
330 * binder_node_lock() - Acquire spinlock for given binder_node
331 * @node: struct binder_node to acquire
332 *
333 * Acquires node->lock. Used to protect binder_node fields
334 */
335 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
336 static void
_binder_node_lock(struct binder_node * node,int line)337 _binder_node_lock(struct binder_node *node, int line)
338 __acquires(&node->lock)
339 {
340 binder_debug(BINDER_DEBUG_SPINLOCKS,
341 "%s: line=%d\n", __func__, line);
342 spin_lock(&node->lock);
343 }
344
345 /**
346 * binder_node_unlock() - Release spinlock for given binder_proc
347 * @node: struct binder_node to acquire
348 *
349 * Release lock acquired via binder_node_lock()
350 */
351 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
352 static void
_binder_node_unlock(struct binder_node * node,int line)353 _binder_node_unlock(struct binder_node *node, int line)
354 __releases(&node->lock)
355 {
356 binder_debug(BINDER_DEBUG_SPINLOCKS,
357 "%s: line=%d\n", __func__, line);
358 spin_unlock(&node->lock);
359 }
360
361 /**
362 * binder_node_inner_lock() - Acquire node and inner locks
363 * @node: struct binder_node to acquire
364 *
365 * Acquires node->lock. If node->proc also acquires
366 * proc->inner_lock. Used to protect binder_node fields
367 */
368 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
369 static void
_binder_node_inner_lock(struct binder_node * node,int line)370 _binder_node_inner_lock(struct binder_node *node, int line)
371 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
372 {
373 binder_debug(BINDER_DEBUG_SPINLOCKS,
374 "%s: line=%d\n", __func__, line);
375 spin_lock(&node->lock);
376 if (node->proc)
377 binder_inner_proc_lock(node->proc);
378 else
379 /* annotation for sparse */
380 __acquire(&node->proc->inner_lock);
381 }
382
383 /**
384 * binder_node_inner_unlock() - Release node and inner locks
385 * @node: struct binder_node to acquire
386 *
387 * Release lock acquired via binder_node_lock()
388 */
389 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
390 static void
_binder_node_inner_unlock(struct binder_node * node,int line)391 _binder_node_inner_unlock(struct binder_node *node, int line)
392 __releases(&node->lock) __releases(&node->proc->inner_lock)
393 {
394 struct binder_proc *proc = node->proc;
395
396 binder_debug(BINDER_DEBUG_SPINLOCKS,
397 "%s: line=%d\n", __func__, line);
398 if (proc)
399 binder_inner_proc_unlock(proc);
400 else
401 /* annotation for sparse */
402 __release(&node->proc->inner_lock);
403 spin_unlock(&node->lock);
404 }
405
binder_worklist_empty_ilocked(struct list_head * list)406 static bool binder_worklist_empty_ilocked(struct list_head *list)
407 {
408 return list_empty(list);
409 }
410
411 /**
412 * binder_worklist_empty() - Check if no items on the work list
413 * @proc: binder_proc associated with list
414 * @list: list to check
415 *
416 * Return: true if there are no items on list, else false
417 */
binder_worklist_empty(struct binder_proc * proc,struct list_head * list)418 static bool binder_worklist_empty(struct binder_proc *proc,
419 struct list_head *list)
420 {
421 bool ret;
422
423 binder_inner_proc_lock(proc);
424 ret = binder_worklist_empty_ilocked(list);
425 binder_inner_proc_unlock(proc);
426 return ret;
427 }
428
429 /**
430 * binder_enqueue_work_ilocked() - Add an item to the work list
431 * @work: struct binder_work to add to list
432 * @target_list: list to add work to
433 *
434 * Adds the work to the specified list. Asserts that work
435 * is not already on a list.
436 *
437 * Requires the proc->inner_lock to be held.
438 */
439 static void
binder_enqueue_work_ilocked(struct binder_work * work,struct list_head * target_list)440 binder_enqueue_work_ilocked(struct binder_work *work,
441 struct list_head *target_list)
442 {
443 BUG_ON(target_list == NULL);
444 BUG_ON(work->entry.next && !list_empty(&work->entry));
445 trace_android_vh_binder_list_add_work(work, target_list);
446 list_add_tail(&work->entry, target_list);
447 }
448
449 /**
450 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
451 * @thread: thread to queue work to
452 * @work: struct binder_work to add to list
453 *
454 * Adds the work to the todo list of the thread. Doesn't set the process_todo
455 * flag, which means that (if it wasn't already set) the thread will go to
456 * sleep without handling this work when it calls read.
457 *
458 * Requires the proc->inner_lock to be held.
459 */
460 static void
binder_enqueue_deferred_thread_work_ilocked(struct binder_thread * thread,struct binder_work * work)461 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
462 struct binder_work *work)
463 {
464 WARN_ON(!list_empty(&thread->waiting_thread_node));
465 binder_enqueue_work_ilocked(work, &thread->todo);
466 }
467
468 /**
469 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
470 * @thread: thread to queue work to
471 * @work: struct binder_work to add to list
472 *
473 * Adds the work to the todo list of the thread, and enables processing
474 * of the todo queue.
475 *
476 * Requires the proc->inner_lock to be held.
477 */
478 static void
binder_enqueue_thread_work_ilocked(struct binder_thread * thread,struct binder_work * work)479 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
480 struct binder_work *work)
481 {
482 WARN_ON(!list_empty(&thread->waiting_thread_node));
483 binder_enqueue_work_ilocked(work, &thread->todo);
484
485 /* (e)poll-based threads require an explicit wakeup signal when
486 * queuing their own work; they rely on these events to consume
487 * messages without I/O block. Without it, threads risk waiting
488 * indefinitely without handling the work.
489 */
490 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
491 thread->pid == current->pid && !thread->process_todo)
492 wake_up_interruptible_sync(&thread->wait);
493
494 thread->process_todo = true;
495 }
496
497 /**
498 * binder_enqueue_thread_work() - Add an item to the thread work list
499 * @thread: thread to queue work to
500 * @work: struct binder_work to add to list
501 *
502 * Adds the work to the todo list of the thread, and enables processing
503 * of the todo queue.
504 */
505 static void
binder_enqueue_thread_work(struct binder_thread * thread,struct binder_work * work)506 binder_enqueue_thread_work(struct binder_thread *thread,
507 struct binder_work *work)
508 {
509 binder_inner_proc_lock(thread->proc);
510 binder_enqueue_thread_work_ilocked(thread, work);
511 binder_inner_proc_unlock(thread->proc);
512 }
513
514 static void
binder_dequeue_work_ilocked(struct binder_work * work)515 binder_dequeue_work_ilocked(struct binder_work *work)
516 {
517 list_del_init(&work->entry);
518 }
519
520 /**
521 * binder_dequeue_work() - Removes an item from the work list
522 * @proc: binder_proc associated with list
523 * @work: struct binder_work to remove from list
524 *
525 * Removes the specified work item from whatever list it is on.
526 * Can safely be called if work is not on any list.
527 */
528 static void
binder_dequeue_work(struct binder_proc * proc,struct binder_work * work)529 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
530 {
531 binder_inner_proc_lock(proc);
532 binder_dequeue_work_ilocked(work);
533 binder_inner_proc_unlock(proc);
534 }
535
binder_dequeue_work_head_ilocked(struct list_head * list)536 static struct binder_work *binder_dequeue_work_head_ilocked(
537 struct list_head *list)
538 {
539 struct binder_work *w;
540
541 w = list_first_entry_or_null(list, struct binder_work, entry);
542 if (w)
543 list_del_init(&w->entry);
544 return w;
545 }
546
547 static void
548 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
549 static void binder_free_thread(struct binder_thread *thread);
550 static void binder_free_proc(struct binder_proc *proc);
551 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
552
binder_has_work_ilocked(struct binder_thread * thread,bool do_proc_work)553 static bool binder_has_work_ilocked(struct binder_thread *thread,
554 bool do_proc_work)
555 {
556 bool has_work = thread->process_todo ||
557 thread->looper_need_return ||
558 (do_proc_work &&
559 !binder_worklist_empty_ilocked(&thread->proc->todo));
560 trace_android_vh_binder_has_special_work_ilocked(thread, do_proc_work, &has_work);
561 return has_work;
562 }
563
binder_has_work(struct binder_thread * thread,bool do_proc_work)564 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
565 {
566 bool has_work;
567
568 binder_inner_proc_lock(thread->proc);
569 has_work = binder_has_work_ilocked(thread, do_proc_work);
570 binder_inner_proc_unlock(thread->proc);
571
572 return has_work;
573 }
574
binder_available_for_proc_work_ilocked(struct binder_thread * thread)575 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
576 {
577 return !thread->transaction_stack &&
578 binder_worklist_empty_ilocked(&thread->todo);
579 }
580
binder_wakeup_poll_threads_ilocked(struct binder_proc * proc,bool sync)581 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
582 bool sync)
583 {
584 struct rb_node *n;
585 struct binder_thread *thread;
586
587 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
588 thread = rb_entry(n, struct binder_thread, rb_node);
589 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
590 binder_available_for_proc_work_ilocked(thread)) {
591 if (sync)
592 wake_up_interruptible_sync(&thread->wait);
593 else
594 wake_up_interruptible(&thread->wait);
595 }
596 }
597 }
598
599 /**
600 * binder_select_thread_ilocked() - selects a thread for doing proc work.
601 * @proc: process to select a thread from
602 *
603 * Note that calling this function moves the thread off the waiting_threads
604 * list, so it can only be woken up by the caller of this function, or a
605 * signal. Therefore, callers *should* always wake up the thread this function
606 * returns.
607 *
608 * Return: If there's a thread currently waiting for process work,
609 * returns that thread. Otherwise returns NULL.
610 */
611 static struct binder_thread *
binder_select_thread_ilocked(struct binder_proc * proc)612 binder_select_thread_ilocked(struct binder_proc *proc)
613 {
614 struct binder_thread *thread;
615
616 assert_spin_locked(&proc->inner_lock);
617 thread = list_first_entry_or_null(&proc->waiting_threads,
618 struct binder_thread,
619 waiting_thread_node);
620
621 if (thread)
622 list_del_init(&thread->waiting_thread_node);
623
624 return thread;
625 }
626
627 /**
628 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
629 * @proc: process to wake up a thread in
630 * @thread: specific thread to wake-up (may be NULL)
631 * @sync: whether to do a synchronous wake-up
632 *
633 * This function wakes up a thread in the @proc process.
634 * The caller may provide a specific thread to wake-up in
635 * the @thread parameter. If @thread is NULL, this function
636 * will wake up threads that have called poll().
637 *
638 * Note that for this function to work as expected, callers
639 * should first call binder_select_thread() to find a thread
640 * to handle the work (if they don't have a thread already),
641 * and pass the result into the @thread parameter.
642 */
binder_wakeup_thread_ilocked(struct binder_proc * proc,struct binder_thread * thread,bool sync)643 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
644 struct binder_thread *thread,
645 bool sync)
646 {
647 assert_spin_locked(&proc->inner_lock);
648
649 if (thread) {
650 if (sync)
651 wake_up_interruptible_sync(&thread->wait);
652 else
653 wake_up_interruptible(&thread->wait);
654 return;
655 }
656
657 /* Didn't find a thread waiting for proc work; this can happen
658 * in two scenarios:
659 * 1. All threads are busy handling transactions
660 * In that case, one of those threads should call back into
661 * the kernel driver soon and pick up this work.
662 * 2. Threads are using the (e)poll interface, in which case
663 * they may be blocked on the waitqueue without having been
664 * added to waiting_threads. For this case, we just iterate
665 * over all threads not handling transaction work, and
666 * wake them all up. We wake all because we don't know whether
667 * a thread that called into (e)poll is handling non-binder
668 * work currently.
669 */
670 binder_wakeup_poll_threads_ilocked(proc, sync);
671 }
672
binder_wakeup_proc_ilocked(struct binder_proc * proc)673 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
674 {
675 struct binder_thread *thread = binder_select_thread_ilocked(proc);
676
677 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
678 }
679
is_rt_policy(int policy)680 static bool is_rt_policy(int policy)
681 {
682 return policy == SCHED_FIFO || policy == SCHED_RR;
683 }
684
is_fair_policy(int policy)685 static bool is_fair_policy(int policy)
686 {
687 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
688 }
689
binder_supported_policy(int policy)690 static bool binder_supported_policy(int policy)
691 {
692 return is_fair_policy(policy) || is_rt_policy(policy);
693 }
694
to_userspace_prio(int policy,int kernel_priority)695 static int to_userspace_prio(int policy, int kernel_priority)
696 {
697 if (is_fair_policy(policy))
698 return PRIO_TO_NICE(kernel_priority);
699 else
700 return MAX_RT_PRIO - 1 - kernel_priority;
701 }
702
to_kernel_prio(int policy,int user_priority)703 static int to_kernel_prio(int policy, int user_priority)
704 {
705 if (is_fair_policy(policy))
706 return NICE_TO_PRIO(user_priority);
707 else
708 return MAX_RT_PRIO - 1 - user_priority;
709 }
710
binder_do_set_priority(struct binder_thread * thread,const struct binder_priority * desired,bool verify)711 static void binder_do_set_priority(struct binder_thread *thread,
712 const struct binder_priority *desired,
713 bool verify)
714 {
715 struct task_struct *task = thread->task;
716 int priority; /* user-space prio value */
717 bool has_cap_nice;
718 unsigned int policy = desired->sched_policy;
719
720 if (task->policy == policy && task->normal_prio == desired->prio) {
721 spin_lock(&thread->prio_lock);
722 if (thread->prio_state == BINDER_PRIO_PENDING)
723 thread->prio_state = BINDER_PRIO_SET;
724 spin_unlock(&thread->prio_lock);
725 return;
726 }
727
728 has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE);
729
730 priority = to_userspace_prio(policy, desired->prio);
731
732 if (verify && is_rt_policy(policy) && !has_cap_nice) {
733 long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO);
734
735 if (max_rtprio == 0) {
736 policy = SCHED_NORMAL;
737 priority = MIN_NICE;
738 } else if (priority > max_rtprio) {
739 priority = max_rtprio;
740 }
741 }
742
743 if (verify && is_fair_policy(policy) && !has_cap_nice) {
744 long min_nice = rlimit_to_nice(task_rlimit(task, RLIMIT_NICE));
745
746 if (min_nice > MAX_NICE) {
747 binder_user_error("%d RLIMIT_NICE not set\n",
748 task->pid);
749 return;
750 } else if (priority < min_nice) {
751 priority = min_nice;
752 }
753 }
754
755 if (policy != desired->sched_policy ||
756 to_kernel_prio(policy, priority) != desired->prio)
757 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
758 "%d: priority %d not allowed, using %d instead\n",
759 task->pid, desired->prio,
760 to_kernel_prio(policy, priority));
761
762 trace_binder_set_priority(task->tgid, task->pid, task->normal_prio,
763 to_kernel_prio(policy, priority),
764 desired->prio);
765
766 spin_lock(&thread->prio_lock);
767 if (!verify && thread->prio_state == BINDER_PRIO_ABORT) {
768 /*
769 * A new priority has been set by an incoming nested
770 * transaction. Abort this priority restore and allow
771 * the transaction to run at the new desired priority.
772 */
773 spin_unlock(&thread->prio_lock);
774 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
775 "%d: %s: aborting priority restore\n",
776 thread->pid, __func__);
777 return;
778 }
779
780 /* Set the actual priority */
781 if (task->policy != policy || is_rt_policy(policy)) {
782 struct sched_param params;
783
784 params.sched_priority = is_rt_policy(policy) ? priority : 0;
785
786 sched_setscheduler_nocheck(task,
787 policy | SCHED_RESET_ON_FORK,
788 ¶ms);
789 }
790 if (is_fair_policy(policy))
791 set_user_nice(task, priority);
792
793 thread->prio_state = BINDER_PRIO_SET;
794 spin_unlock(&thread->prio_lock);
795 }
796
binder_set_priority(struct binder_thread * thread,const struct binder_priority * desired)797 static void binder_set_priority(struct binder_thread *thread,
798 const struct binder_priority *desired)
799 {
800 binder_do_set_priority(thread, desired, /* verify = */ true);
801 }
802
binder_restore_priority(struct binder_thread * thread,const struct binder_priority * desired)803 static void binder_restore_priority(struct binder_thread *thread,
804 const struct binder_priority *desired)
805 {
806 binder_do_set_priority(thread, desired, /* verify = */ false);
807 }
808
binder_transaction_priority(struct binder_thread * thread,struct binder_transaction * t,struct binder_node * node)809 static void binder_transaction_priority(struct binder_thread *thread,
810 struct binder_transaction *t,
811 struct binder_node *node)
812 {
813 struct task_struct *task = thread->task;
814 struct binder_priority desired = t->priority;
815 const struct binder_priority node_prio = {
816 .sched_policy = node->sched_policy,
817 .prio = node->min_priority,
818 };
819
820 if (t->set_priority_called)
821 return;
822
823 t->set_priority_called = true;
824
825 if (!node->inherit_rt && is_rt_policy(desired.sched_policy)) {
826 desired.prio = NICE_TO_PRIO(0);
827 desired.sched_policy = SCHED_NORMAL;
828 }
829
830 if (node_prio.prio < t->priority.prio ||
831 (node_prio.prio == t->priority.prio &&
832 node_prio.sched_policy == SCHED_FIFO)) {
833 /*
834 * In case the minimum priority on the node is
835 * higher (lower value), use that priority. If
836 * the priority is the same, but the node uses
837 * SCHED_FIFO, prefer SCHED_FIFO, since it can
838 * run unbounded, unlike SCHED_RR.
839 */
840 desired = node_prio;
841 }
842
843 spin_lock(&thread->prio_lock);
844 if (thread->prio_state == BINDER_PRIO_PENDING) {
845 /*
846 * Task is in the process of changing priorities
847 * saving its current values would be incorrect.
848 * Instead, save the pending priority and signal
849 * the task to abort the priority restore.
850 */
851 t->saved_priority = thread->prio_next;
852 thread->prio_state = BINDER_PRIO_ABORT;
853 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
854 "%d: saved pending priority %d\n",
855 current->pid, thread->prio_next.prio);
856 } else {
857 t->saved_priority.sched_policy = task->policy;
858 t->saved_priority.prio = task->normal_prio;
859 }
860 spin_unlock(&thread->prio_lock);
861
862 binder_set_priority(thread, &desired);
863 trace_android_vh_binder_set_priority(t, task);
864 }
865
binder_get_node_ilocked(struct binder_proc * proc,binder_uintptr_t ptr)866 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
867 binder_uintptr_t ptr)
868 {
869 struct rb_node *n = proc->nodes.rb_node;
870 struct binder_node *node;
871
872 assert_spin_locked(&proc->inner_lock);
873
874 while (n) {
875 node = rb_entry(n, struct binder_node, rb_node);
876
877 if (ptr < node->ptr)
878 n = n->rb_left;
879 else if (ptr > node->ptr)
880 n = n->rb_right;
881 else {
882 /*
883 * take an implicit weak reference
884 * to ensure node stays alive until
885 * call to binder_put_node()
886 */
887 binder_inc_node_tmpref_ilocked(node);
888 return node;
889 }
890 }
891 return NULL;
892 }
893
binder_get_node(struct binder_proc * proc,binder_uintptr_t ptr)894 static struct binder_node *binder_get_node(struct binder_proc *proc,
895 binder_uintptr_t ptr)
896 {
897 struct binder_node *node;
898
899 binder_inner_proc_lock(proc);
900 node = binder_get_node_ilocked(proc, ptr);
901 binder_inner_proc_unlock(proc);
902 return node;
903 }
904
binder_init_node_ilocked(struct binder_proc * proc,struct binder_node * new_node,struct flat_binder_object * fp)905 static struct binder_node *binder_init_node_ilocked(
906 struct binder_proc *proc,
907 struct binder_node *new_node,
908 struct flat_binder_object *fp)
909 {
910 struct rb_node **p = &proc->nodes.rb_node;
911 struct rb_node *parent = NULL;
912 struct binder_node *node;
913 binder_uintptr_t ptr = fp ? fp->binder : 0;
914 binder_uintptr_t cookie = fp ? fp->cookie : 0;
915 __u32 flags = fp ? fp->flags : 0;
916 s8 priority;
917
918 assert_spin_locked(&proc->inner_lock);
919
920 while (*p) {
921
922 parent = *p;
923 node = rb_entry(parent, struct binder_node, rb_node);
924
925 if (ptr < node->ptr)
926 p = &(*p)->rb_left;
927 else if (ptr > node->ptr)
928 p = &(*p)->rb_right;
929 else {
930 /*
931 * A matching node is already in
932 * the rb tree. Abandon the init
933 * and return it.
934 */
935 binder_inc_node_tmpref_ilocked(node);
936 return node;
937 }
938 }
939 node = new_node;
940 binder_stats_created(BINDER_STAT_NODE);
941 node->tmp_refs++;
942 rb_link_node(&node->rb_node, parent, p);
943 rb_insert_color(&node->rb_node, &proc->nodes);
944 node->debug_id = atomic_inc_return(&binder_last_id);
945 node->proc = proc;
946 node->ptr = ptr;
947 node->cookie = cookie;
948 node->work.type = BINDER_WORK_NODE;
949 priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
950 node->sched_policy = (flags & FLAT_BINDER_FLAG_SCHED_POLICY_MASK) >>
951 FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
952 node->min_priority = to_kernel_prio(node->sched_policy, priority);
953 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
954 node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);
955 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
956 spin_lock_init(&node->lock);
957 INIT_LIST_HEAD(&node->work.entry);
958 INIT_LIST_HEAD(&node->async_todo);
959 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
960 "%d:%d node %d u%016llx c%016llx created\n",
961 proc->pid, current->pid, node->debug_id,
962 (u64)node->ptr, (u64)node->cookie);
963
964 return node;
965 }
966
binder_new_node(struct binder_proc * proc,struct flat_binder_object * fp)967 static struct binder_node *binder_new_node(struct binder_proc *proc,
968 struct flat_binder_object *fp)
969 {
970 struct binder_node *node;
971 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
972
973 if (!new_node)
974 return NULL;
975 binder_inner_proc_lock(proc);
976 node = binder_init_node_ilocked(proc, new_node, fp);
977 binder_inner_proc_unlock(proc);
978 if (node != new_node)
979 /*
980 * The node was already added by another thread
981 */
982 kfree(new_node);
983
984 return node;
985 }
986
binder_free_node(struct binder_node * node)987 static void binder_free_node(struct binder_node *node)
988 {
989 kfree(node);
990 binder_stats_deleted(BINDER_STAT_NODE);
991 }
992
binder_inc_node_nilocked(struct binder_node * node,int strong,int internal,struct list_head * target_list)993 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
994 int internal,
995 struct list_head *target_list)
996 {
997 struct binder_proc *proc = node->proc;
998
999 assert_spin_locked(&node->lock);
1000 if (proc)
1001 assert_spin_locked(&proc->inner_lock);
1002 if (strong) {
1003 if (internal) {
1004 if (target_list == NULL &&
1005 node->internal_strong_refs == 0 &&
1006 !(node->proc &&
1007 node == node->proc->context->binder_context_mgr_node &&
1008 node->has_strong_ref)) {
1009 pr_err("invalid inc strong node for %d\n",
1010 node->debug_id);
1011 return -EINVAL;
1012 }
1013 node->internal_strong_refs++;
1014 } else
1015 node->local_strong_refs++;
1016 if (!node->has_strong_ref && target_list) {
1017 struct binder_thread *thread = container_of(target_list,
1018 struct binder_thread, todo);
1019 binder_dequeue_work_ilocked(&node->work);
1020 BUG_ON(&thread->todo != target_list);
1021 binder_enqueue_deferred_thread_work_ilocked(thread,
1022 &node->work);
1023 }
1024 } else {
1025 if (!internal)
1026 node->local_weak_refs++;
1027 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1028 if (target_list == NULL) {
1029 pr_err("invalid inc weak node for %d\n",
1030 node->debug_id);
1031 return -EINVAL;
1032 }
1033 /*
1034 * See comment above
1035 */
1036 binder_enqueue_work_ilocked(&node->work, target_list);
1037 }
1038 }
1039 return 0;
1040 }
1041
binder_inc_node(struct binder_node * node,int strong,int internal,struct list_head * target_list)1042 static int binder_inc_node(struct binder_node *node, int strong, int internal,
1043 struct list_head *target_list)
1044 {
1045 int ret;
1046
1047 binder_node_inner_lock(node);
1048 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1049 binder_node_inner_unlock(node);
1050
1051 return ret;
1052 }
1053
binder_dec_node_nilocked(struct binder_node * node,int strong,int internal)1054 static bool binder_dec_node_nilocked(struct binder_node *node,
1055 int strong, int internal)
1056 {
1057 struct binder_proc *proc = node->proc;
1058
1059 assert_spin_locked(&node->lock);
1060 if (proc)
1061 assert_spin_locked(&proc->inner_lock);
1062 if (strong) {
1063 if (internal)
1064 node->internal_strong_refs--;
1065 else
1066 node->local_strong_refs--;
1067 if (node->local_strong_refs || node->internal_strong_refs)
1068 return false;
1069 } else {
1070 if (!internal)
1071 node->local_weak_refs--;
1072 if (node->local_weak_refs || node->tmp_refs ||
1073 !hlist_empty(&node->refs))
1074 return false;
1075 }
1076
1077 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1078 if (list_empty(&node->work.entry)) {
1079 binder_enqueue_work_ilocked(&node->work, &proc->todo);
1080 binder_wakeup_proc_ilocked(proc);
1081 }
1082 } else {
1083 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1084 !node->local_weak_refs && !node->tmp_refs) {
1085 if (proc) {
1086 binder_dequeue_work_ilocked(&node->work);
1087 rb_erase(&node->rb_node, &proc->nodes);
1088 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1089 "refless node %d deleted\n",
1090 node->debug_id);
1091 } else {
1092 BUG_ON(!list_empty(&node->work.entry));
1093 spin_lock(&binder_dead_nodes_lock);
1094 /*
1095 * tmp_refs could have changed so
1096 * check it again
1097 */
1098 if (node->tmp_refs) {
1099 spin_unlock(&binder_dead_nodes_lock);
1100 return false;
1101 }
1102 hlist_del(&node->dead_node);
1103 spin_unlock(&binder_dead_nodes_lock);
1104 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1105 "dead node %d deleted\n",
1106 node->debug_id);
1107 }
1108 return true;
1109 }
1110 }
1111 return false;
1112 }
1113
binder_dec_node(struct binder_node * node,int strong,int internal)1114 static void binder_dec_node(struct binder_node *node, int strong, int internal)
1115 {
1116 bool free_node;
1117
1118 binder_node_inner_lock(node);
1119 free_node = binder_dec_node_nilocked(node, strong, internal);
1120 binder_node_inner_unlock(node);
1121 if (free_node)
1122 binder_free_node(node);
1123 }
1124
binder_inc_node_tmpref_ilocked(struct binder_node * node)1125 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1126 {
1127 /*
1128 * No call to binder_inc_node() is needed since we
1129 * don't need to inform userspace of any changes to
1130 * tmp_refs
1131 */
1132 node->tmp_refs++;
1133 }
1134
1135 /**
1136 * binder_inc_node_tmpref() - take a temporary reference on node
1137 * @node: node to reference
1138 *
1139 * Take reference on node to prevent the node from being freed
1140 * while referenced only by a local variable. The inner lock is
1141 * needed to serialize with the node work on the queue (which
1142 * isn't needed after the node is dead). If the node is dead
1143 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1144 * node->tmp_refs against dead-node-only cases where the node
1145 * lock cannot be acquired (eg traversing the dead node list to
1146 * print nodes)
1147 */
binder_inc_node_tmpref(struct binder_node * node)1148 static void binder_inc_node_tmpref(struct binder_node *node)
1149 {
1150 binder_node_lock(node);
1151 if (node->proc)
1152 binder_inner_proc_lock(node->proc);
1153 else
1154 spin_lock(&binder_dead_nodes_lock);
1155 binder_inc_node_tmpref_ilocked(node);
1156 if (node->proc)
1157 binder_inner_proc_unlock(node->proc);
1158 else
1159 spin_unlock(&binder_dead_nodes_lock);
1160 binder_node_unlock(node);
1161 }
1162
1163 /**
1164 * binder_dec_node_tmpref() - remove a temporary reference on node
1165 * @node: node to reference
1166 *
1167 * Release temporary reference on node taken via binder_inc_node_tmpref()
1168 */
binder_dec_node_tmpref(struct binder_node * node)1169 static void binder_dec_node_tmpref(struct binder_node *node)
1170 {
1171 bool free_node;
1172
1173 binder_node_inner_lock(node);
1174 if (!node->proc)
1175 spin_lock(&binder_dead_nodes_lock);
1176 else
1177 __acquire(&binder_dead_nodes_lock);
1178 node->tmp_refs--;
1179 BUG_ON(node->tmp_refs < 0);
1180 if (!node->proc)
1181 spin_unlock(&binder_dead_nodes_lock);
1182 else
1183 __release(&binder_dead_nodes_lock);
1184 /*
1185 * Call binder_dec_node() to check if all refcounts are 0
1186 * and cleanup is needed. Calling with strong=0 and internal=1
1187 * causes no actual reference to be released in binder_dec_node().
1188 * If that changes, a change is needed here too.
1189 */
1190 free_node = binder_dec_node_nilocked(node, 0, 1);
1191 binder_node_inner_unlock(node);
1192 if (free_node)
1193 binder_free_node(node);
1194 }
1195
binder_put_node(struct binder_node * node)1196 static void binder_put_node(struct binder_node *node)
1197 {
1198 binder_dec_node_tmpref(node);
1199 }
1200
binder_get_ref_olocked(struct binder_proc * proc,u32 desc,bool need_strong_ref)1201 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1202 u32 desc, bool need_strong_ref)
1203 {
1204 struct rb_node *n = proc->refs_by_desc.rb_node;
1205 struct binder_ref *ref;
1206
1207 while (n) {
1208 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1209
1210 if (desc < ref->data.desc) {
1211 n = n->rb_left;
1212 } else if (desc > ref->data.desc) {
1213 n = n->rb_right;
1214 } else if (need_strong_ref && !ref->data.strong) {
1215 binder_user_error("tried to use weak ref as strong ref\n");
1216 return NULL;
1217 } else {
1218 return ref;
1219 }
1220 }
1221 return NULL;
1222 }
1223
1224 /* Find the smallest unused descriptor the "slow way" */
slow_desc_lookup_olocked(struct binder_proc * proc,u32 offset)1225 static u32 slow_desc_lookup_olocked(struct binder_proc *proc, u32 offset)
1226 {
1227 struct binder_ref *ref;
1228 struct rb_node *n;
1229 u32 desc;
1230
1231 desc = offset;
1232 for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
1233 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1234 if (ref->data.desc > desc)
1235 break;
1236 desc = ref->data.desc + 1;
1237 }
1238
1239 return desc;
1240 }
1241
1242 /*
1243 * Find an available reference descriptor ID. The proc->outer_lock might
1244 * be released in the process, in which case -EAGAIN is returned and the
1245 * @desc should be considered invalid.
1246 */
get_ref_desc_olocked(struct binder_proc * proc,struct binder_node * node,u32 * desc)1247 static int get_ref_desc_olocked(struct binder_proc *proc,
1248 struct binder_node *node,
1249 u32 *desc)
1250 {
1251 struct dbitmap *dmap = &proc_wrapper(proc)->dmap;
1252 unsigned int nbits, offset;
1253 unsigned long *new, bit;
1254
1255 /* 0 is reserved for the context manager */
1256 offset = (node == proc->context->binder_context_mgr_node) ? 0 : 1;
1257
1258 if (!dbitmap_enabled(dmap)) {
1259 *desc = slow_desc_lookup_olocked(proc, offset);
1260 return 0;
1261 }
1262
1263 if (dbitmap_acquire_next_zero_bit(dmap, offset, &bit) == 0) {
1264 *desc = bit;
1265 return 0;
1266 }
1267
1268 /*
1269 * The dbitmap is full and needs to grow. The proc->outer_lock
1270 * is briefly released to allocate the new bitmap safely.
1271 */
1272 nbits = dbitmap_grow_nbits(dmap);
1273 binder_proc_unlock(proc);
1274 new = bitmap_zalloc(nbits, GFP_KERNEL);
1275 binder_proc_lock(proc);
1276 dbitmap_grow(dmap, new, nbits);
1277
1278 return -EAGAIN;
1279 }
1280
1281 /**
1282 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1283 * @proc: binder_proc that owns the ref
1284 * @node: binder_node of target
1285 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1286 *
1287 * Look up the ref for the given node and return it if it exists
1288 *
1289 * If it doesn't exist and the caller provides a newly allocated
1290 * ref, initialize the fields of the newly allocated ref and insert
1291 * into the given proc rb_trees and node refs list.
1292 *
1293 * Return: the ref for node. It is possible that another thread
1294 * allocated/initialized the ref first in which case the
1295 * returned ref would be different than the passed-in
1296 * new_ref. new_ref must be kfree'd by the caller in
1297 * this case.
1298 */
binder_get_ref_for_node_olocked(struct binder_proc * proc,struct binder_node * node,struct binder_ref * new_ref)1299 static struct binder_ref *binder_get_ref_for_node_olocked(
1300 struct binder_proc *proc,
1301 struct binder_node *node,
1302 struct binder_ref *new_ref)
1303 {
1304 struct binder_ref *ref;
1305 struct rb_node *parent;
1306 struct rb_node **p;
1307 u32 desc;
1308
1309 retry:
1310 p = &proc->refs_by_node.rb_node;
1311 parent = NULL;
1312 while (*p) {
1313 parent = *p;
1314 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1315
1316 if (node < ref->node)
1317 p = &(*p)->rb_left;
1318 else if (node > ref->node)
1319 p = &(*p)->rb_right;
1320 else
1321 return ref;
1322 }
1323 if (!new_ref)
1324 return NULL;
1325
1326 /* might release the proc->outer_lock */
1327 if (get_ref_desc_olocked(proc, node, &desc) == -EAGAIN)
1328 goto retry;
1329
1330 binder_stats_created(BINDER_STAT_REF);
1331 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1332 new_ref->proc = proc;
1333 new_ref->node = node;
1334 rb_link_node(&new_ref->rb_node_node, parent, p);
1335 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1336
1337 new_ref->data.desc = desc;
1338 p = &proc->refs_by_desc.rb_node;
1339 while (*p) {
1340 parent = *p;
1341 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1342
1343 if (new_ref->data.desc < ref->data.desc)
1344 p = &(*p)->rb_left;
1345 else if (new_ref->data.desc > ref->data.desc)
1346 p = &(*p)->rb_right;
1347 else
1348 BUG();
1349 }
1350 rb_link_node(&new_ref->rb_node_desc, parent, p);
1351 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1352
1353 binder_node_lock(node);
1354 hlist_add_head(&new_ref->node_entry, &node->refs);
1355
1356 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1357 "%d new ref %d desc %d for node %d\n",
1358 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1359 node->debug_id);
1360 trace_android_vh_binder_new_ref(proc->tsk, new_ref->data.desc, new_ref->node->debug_id);
1361 binder_node_unlock(node);
1362 return new_ref;
1363 }
1364
binder_cleanup_ref_olocked(struct binder_ref * ref)1365 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1366 {
1367 struct dbitmap *dmap = &proc_wrapper(ref->proc)->dmap;
1368 bool delete_node = false;
1369
1370 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1371 "%d delete ref %d desc %d for node %d\n",
1372 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1373 ref->node->debug_id);
1374
1375 if (dbitmap_enabled(dmap))
1376 dbitmap_clear_bit(dmap, ref->data.desc);
1377 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1378 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1379
1380 binder_node_inner_lock(ref->node);
1381 if (ref->data.strong)
1382 binder_dec_node_nilocked(ref->node, 1, 1);
1383
1384 hlist_del(&ref->node_entry);
1385 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1386 binder_node_inner_unlock(ref->node);
1387 /*
1388 * Clear ref->node unless we want the caller to free the node
1389 */
1390 if (!delete_node) {
1391 /*
1392 * The caller uses ref->node to determine
1393 * whether the node needs to be freed. Clear
1394 * it since the node is still alive.
1395 */
1396 ref->node = NULL;
1397 }
1398
1399 if (ref->death) {
1400 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1401 "%d delete ref %d desc %d has death notification\n",
1402 ref->proc->pid, ref->data.debug_id,
1403 ref->data.desc);
1404 binder_dequeue_work(ref->proc, &ref->death->work);
1405 binder_stats_deleted(BINDER_STAT_DEATH);
1406 }
1407 binder_stats_deleted(BINDER_STAT_REF);
1408 }
1409
1410 /**
1411 * binder_inc_ref_olocked() - increment the ref for given handle
1412 * @ref: ref to be incremented
1413 * @strong: if true, strong increment, else weak
1414 * @target_list: list to queue node work on
1415 *
1416 * Increment the ref. @ref->proc->outer_lock must be held on entry
1417 *
1418 * Return: 0, if successful, else errno
1419 */
binder_inc_ref_olocked(struct binder_ref * ref,int strong,struct list_head * target_list)1420 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1421 struct list_head *target_list)
1422 {
1423 int ret;
1424
1425 if (strong) {
1426 if (ref->data.strong == 0) {
1427 ret = binder_inc_node(ref->node, 1, 1, target_list);
1428 if (ret)
1429 return ret;
1430 }
1431 ref->data.strong++;
1432 } else {
1433 if (ref->data.weak == 0) {
1434 ret = binder_inc_node(ref->node, 0, 1, target_list);
1435 if (ret)
1436 return ret;
1437 }
1438 ref->data.weak++;
1439 }
1440 return 0;
1441 }
1442
1443 /**
1444 * binder_dec_ref_olocked() - dec the ref for given handle
1445 * @ref: ref to be decremented
1446 * @strong: if true, strong decrement, else weak
1447 *
1448 * Decrement the ref.
1449 *
1450 * Return: %true if ref is cleaned up and ready to be freed.
1451 */
binder_dec_ref_olocked(struct binder_ref * ref,int strong)1452 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1453 {
1454 if (strong) {
1455 if (ref->data.strong == 0) {
1456 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1457 ref->proc->pid, ref->data.debug_id,
1458 ref->data.desc, ref->data.strong,
1459 ref->data.weak);
1460 return false;
1461 }
1462 ref->data.strong--;
1463 if (ref->data.strong == 0)
1464 binder_dec_node(ref->node, strong, 1);
1465 } else {
1466 if (ref->data.weak == 0) {
1467 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1468 ref->proc->pid, ref->data.debug_id,
1469 ref->data.desc, ref->data.strong,
1470 ref->data.weak);
1471 return false;
1472 }
1473 ref->data.weak--;
1474 }
1475 if (ref->data.strong == 0 && ref->data.weak == 0) {
1476 binder_cleanup_ref_olocked(ref);
1477 return true;
1478 }
1479 return false;
1480 }
1481
1482 /**
1483 * binder_get_node_from_ref() - get the node from the given proc/desc
1484 * @proc: proc containing the ref
1485 * @desc: the handle associated with the ref
1486 * @need_strong_ref: if true, only return node if ref is strong
1487 * @rdata: the id/refcount data for the ref
1488 *
1489 * Given a proc and ref handle, return the associated binder_node
1490 *
1491 * Return: a binder_node or NULL if not found or not strong when strong required
1492 */
binder_get_node_from_ref(struct binder_proc * proc,u32 desc,bool need_strong_ref,struct binder_ref_data * rdata)1493 static struct binder_node *binder_get_node_from_ref(
1494 struct binder_proc *proc,
1495 u32 desc, bool need_strong_ref,
1496 struct binder_ref_data *rdata)
1497 {
1498 struct binder_node *node;
1499 struct binder_ref *ref;
1500
1501 binder_proc_lock(proc);
1502 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1503 if (!ref)
1504 goto err_no_ref;
1505 node = ref->node;
1506 /*
1507 * Take an implicit reference on the node to ensure
1508 * it stays alive until the call to binder_put_node()
1509 */
1510 binder_inc_node_tmpref(node);
1511 if (rdata)
1512 *rdata = ref->data;
1513 binder_proc_unlock(proc);
1514
1515 return node;
1516
1517 err_no_ref:
1518 binder_proc_unlock(proc);
1519 return NULL;
1520 }
1521
1522 /**
1523 * binder_free_ref() - free the binder_ref
1524 * @ref: ref to free
1525 *
1526 * Free the binder_ref. Free the binder_node indicated by ref->node
1527 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1528 */
binder_free_ref(struct binder_ref * ref)1529 static void binder_free_ref(struct binder_ref *ref)
1530 {
1531 trace_android_vh_binder_del_ref(ref->proc ? ref->proc->tsk : NULL,
1532 ref->data.desc);
1533 if (ref->node)
1534 binder_free_node(ref->node);
1535 kfree(ref->death);
1536 kfree(ref);
1537 }
1538
1539 /* shrink descriptor bitmap if needed */
try_shrink_dmap(struct binder_proc * proc)1540 static void try_shrink_dmap(struct binder_proc *proc)
1541 {
1542 struct dbitmap *dmap = &proc_wrapper(proc)->dmap;
1543 unsigned long *new;
1544 int nbits;
1545
1546 binder_proc_lock(proc);
1547 nbits = dbitmap_shrink_nbits(dmap);
1548 binder_proc_unlock(proc);
1549
1550 if (!nbits)
1551 return;
1552
1553 new = bitmap_zalloc(nbits, GFP_KERNEL);
1554 binder_proc_lock(proc);
1555 dbitmap_shrink(dmap, new, nbits);
1556 binder_proc_unlock(proc);
1557 }
1558
1559 /**
1560 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1561 * @proc: proc containing the ref
1562 * @desc: the handle associated with the ref
1563 * @increment: true=inc reference, false=dec reference
1564 * @strong: true=strong reference, false=weak reference
1565 * @rdata: the id/refcount data for the ref
1566 *
1567 * Given a proc and ref handle, increment or decrement the ref
1568 * according to "increment" arg.
1569 *
1570 * Return: 0 if successful, else errno
1571 */
binder_update_ref_for_handle(struct binder_proc * proc,uint32_t desc,bool increment,bool strong,struct binder_ref_data * rdata)1572 static int binder_update_ref_for_handle(struct binder_proc *proc,
1573 uint32_t desc, bool increment, bool strong,
1574 struct binder_ref_data *rdata)
1575 {
1576 int ret = 0;
1577 struct binder_ref *ref;
1578 bool delete_ref = false;
1579
1580 binder_proc_lock(proc);
1581 ref = binder_get_ref_olocked(proc, desc, strong);
1582 if (!ref) {
1583 ret = -EINVAL;
1584 goto err_no_ref;
1585 }
1586 if (increment)
1587 ret = binder_inc_ref_olocked(ref, strong, NULL);
1588 else
1589 delete_ref = binder_dec_ref_olocked(ref, strong);
1590
1591 if (rdata)
1592 *rdata = ref->data;
1593 binder_proc_unlock(proc);
1594
1595 if (delete_ref) {
1596 binder_free_ref(ref);
1597 try_shrink_dmap(proc);
1598 }
1599 return ret;
1600
1601 err_no_ref:
1602 binder_proc_unlock(proc);
1603 return ret;
1604 }
1605
1606 /**
1607 * binder_dec_ref_for_handle() - dec the ref for given handle
1608 * @proc: proc containing the ref
1609 * @desc: the handle associated with the ref
1610 * @strong: true=strong reference, false=weak reference
1611 * @rdata: the id/refcount data for the ref
1612 *
1613 * Just calls binder_update_ref_for_handle() to decrement the ref.
1614 *
1615 * Return: 0 if successful, else errno
1616 */
binder_dec_ref_for_handle(struct binder_proc * proc,uint32_t desc,bool strong,struct binder_ref_data * rdata)1617 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1618 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1619 {
1620 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1621 }
1622
1623
1624 /**
1625 * binder_inc_ref_for_node() - increment the ref for given proc/node
1626 * @proc: proc containing the ref
1627 * @node: target node
1628 * @strong: true=strong reference, false=weak reference
1629 * @target_list: worklist to use if node is incremented
1630 * @rdata: the id/refcount data for the ref
1631 *
1632 * Given a proc and node, increment the ref. Create the ref if it
1633 * doesn't already exist
1634 *
1635 * Return: 0 if successful, else errno
1636 */
binder_inc_ref_for_node(struct binder_proc * proc,struct binder_node * node,bool strong,struct list_head * target_list,struct binder_ref_data * rdata)1637 static int binder_inc_ref_for_node(struct binder_proc *proc,
1638 struct binder_node *node,
1639 bool strong,
1640 struct list_head *target_list,
1641 struct binder_ref_data *rdata)
1642 {
1643 struct binder_ref *ref;
1644 struct binder_ref *new_ref = NULL;
1645 int ret = 0;
1646
1647 binder_proc_lock(proc);
1648 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1649 if (!ref) {
1650 binder_proc_unlock(proc);
1651 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1652 if (!new_ref)
1653 return -ENOMEM;
1654 binder_proc_lock(proc);
1655 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1656 }
1657 ret = binder_inc_ref_olocked(ref, strong, target_list);
1658 *rdata = ref->data;
1659 if (ret && ref == new_ref) {
1660 /*
1661 * Cleanup the failed reference here as the target
1662 * could now be dead and have already released its
1663 * references by now. Calling on the new reference
1664 * with strong=0 and a tmp_refs will not decrement
1665 * the node. The new_ref gets kfree'd below.
1666 */
1667 binder_cleanup_ref_olocked(new_ref);
1668 ref = NULL;
1669 }
1670
1671 binder_proc_unlock(proc);
1672 if (new_ref && ref != new_ref)
1673 /*
1674 * Another thread created the ref first so
1675 * free the one we allocated
1676 */
1677 kfree(new_ref);
1678 return ret;
1679 }
1680
binder_pop_transaction_ilocked(struct binder_thread * target_thread,struct binder_transaction * t)1681 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1682 struct binder_transaction *t)
1683 {
1684 BUG_ON(!target_thread);
1685 assert_spin_locked(&target_thread->proc->inner_lock);
1686 BUG_ON(target_thread->transaction_stack != t);
1687 BUG_ON(target_thread->transaction_stack->from != target_thread);
1688 target_thread->transaction_stack =
1689 target_thread->transaction_stack->from_parent;
1690 t->from = NULL;
1691 }
1692
1693 /**
1694 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1695 * @thread: thread to decrement
1696 *
1697 * A thread needs to be kept alive while being used to create or
1698 * handle a transaction. binder_get_txn_from() is used to safely
1699 * extract t->from from a binder_transaction and keep the thread
1700 * indicated by t->from from being freed. When done with that
1701 * binder_thread, this function is called to decrement the
1702 * tmp_ref and free if appropriate (thread has been released
1703 * and no transaction being processed by the driver)
1704 */
binder_thread_dec_tmpref(struct binder_thread * thread)1705 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1706 {
1707 /*
1708 * atomic is used to protect the counter value while
1709 * it cannot reach zero or thread->is_dead is false
1710 */
1711 binder_inner_proc_lock(thread->proc);
1712 atomic_dec(&thread->tmp_ref);
1713 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1714 binder_inner_proc_unlock(thread->proc);
1715 binder_free_thread(thread);
1716 return;
1717 }
1718 binder_inner_proc_unlock(thread->proc);
1719 }
1720
1721 /**
1722 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1723 * @proc: proc to decrement
1724 *
1725 * A binder_proc needs to be kept alive while being used to create or
1726 * handle a transaction. proc->tmp_ref is incremented when
1727 * creating a new transaction or the binder_proc is currently in-use
1728 * by threads that are being released. When done with the binder_proc,
1729 * this function is called to decrement the counter and free the
1730 * proc if appropriate (proc has been released, all threads have
1731 * been released and not currenly in-use to process a transaction).
1732 */
binder_proc_dec_tmpref(struct binder_proc * proc)1733 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1734 {
1735 binder_inner_proc_lock(proc);
1736 proc->tmp_ref--;
1737 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1738 !proc->tmp_ref) {
1739 binder_inner_proc_unlock(proc);
1740 binder_free_proc(proc);
1741 return;
1742 }
1743 binder_inner_proc_unlock(proc);
1744 }
1745
1746 /**
1747 * binder_get_txn_from() - safely extract the "from" thread in transaction
1748 * @t: binder transaction for t->from
1749 *
1750 * Atomically return the "from" thread and increment the tmp_ref
1751 * count for the thread to ensure it stays alive until
1752 * binder_thread_dec_tmpref() is called.
1753 *
1754 * Return: the value of t->from
1755 */
binder_get_txn_from(struct binder_transaction * t)1756 static struct binder_thread *binder_get_txn_from(
1757 struct binder_transaction *t)
1758 {
1759 struct binder_thread *from;
1760
1761 spin_lock(&t->lock);
1762 from = t->from;
1763 if (from)
1764 atomic_inc(&from->tmp_ref);
1765 spin_unlock(&t->lock);
1766 return from;
1767 }
1768
1769 /**
1770 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1771 * @t: binder transaction for t->from
1772 *
1773 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1774 * to guarantee that the thread cannot be released while operating on it.
1775 * The caller must call binder_inner_proc_unlock() to release the inner lock
1776 * as well as call binder_dec_thread_txn() to release the reference.
1777 *
1778 * Return: the value of t->from
1779 */
binder_get_txn_from_and_acq_inner(struct binder_transaction * t)1780 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1781 struct binder_transaction *t)
1782 __acquires(&t->from->proc->inner_lock)
1783 {
1784 struct binder_thread *from;
1785
1786 from = binder_get_txn_from(t);
1787 if (!from) {
1788 __acquire(&from->proc->inner_lock);
1789 return NULL;
1790 }
1791 binder_inner_proc_lock(from->proc);
1792 if (t->from) {
1793 BUG_ON(from != t->from);
1794 return from;
1795 }
1796 binder_inner_proc_unlock(from->proc);
1797 __acquire(&from->proc->inner_lock);
1798 binder_thread_dec_tmpref(from);
1799 return NULL;
1800 }
1801
1802 /**
1803 * binder_free_txn_fixups() - free unprocessed fd fixups
1804 * @t: binder transaction for t->from
1805 *
1806 * If the transaction is being torn down prior to being
1807 * processed by the target process, free all of the
1808 * fd fixups and fput the file structs. It is safe to
1809 * call this function after the fixups have been
1810 * processed -- in that case, the list will be empty.
1811 */
binder_free_txn_fixups(struct binder_transaction * t)1812 static void binder_free_txn_fixups(struct binder_transaction *t)
1813 {
1814 struct binder_txn_fd_fixup *fixup, *tmp;
1815
1816 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1817 fput(fixup->file);
1818 if (fixup->target_fd >= 0)
1819 put_unused_fd(fixup->target_fd);
1820 list_del(&fixup->fixup_entry);
1821 kfree(fixup);
1822 }
1823 }
1824
binder_txn_latency_free(struct binder_transaction * t)1825 static void binder_txn_latency_free(struct binder_transaction *t)
1826 {
1827 int from_proc, from_thread, to_proc, to_thread;
1828
1829 spin_lock(&t->lock);
1830 from_proc = t->from ? t->from->proc->pid : 0;
1831 from_thread = t->from ? t->from->pid : 0;
1832 to_proc = t->to_proc ? t->to_proc->pid : 0;
1833 to_thread = t->to_thread ? t->to_thread->pid : 0;
1834 spin_unlock(&t->lock);
1835
1836 trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1837 }
1838
binder_free_transaction(struct binder_transaction * t)1839 static void binder_free_transaction(struct binder_transaction *t)
1840 {
1841 struct binder_proc *target_proc = t->to_proc;
1842
1843 trace_android_vh_free_oem_binder_struct(t);
1844 if (target_proc) {
1845 binder_inner_proc_lock(target_proc);
1846 target_proc->outstanding_txns--;
1847 if (target_proc->outstanding_txns < 0)
1848 pr_warn("%s: Unexpected outstanding_txns %d\n",
1849 __func__, target_proc->outstanding_txns);
1850 if (!target_proc->outstanding_txns && target_proc->is_frozen)
1851 wake_up_interruptible_all(&target_proc->freeze_wait);
1852 if (t->buffer)
1853 t->buffer->transaction = NULL;
1854 binder_inner_proc_unlock(target_proc);
1855 }
1856 if (trace_binder_txn_latency_free_enabled())
1857 binder_txn_latency_free(t);
1858 /*
1859 * If the transaction has no target_proc, then
1860 * t->buffer->transaction has already been cleared.
1861 */
1862 binder_free_txn_fixups(t);
1863 kfree(t);
1864 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1865 }
1866
binder_send_failed_reply(struct binder_transaction * t,uint32_t error_code)1867 static void binder_send_failed_reply(struct binder_transaction *t,
1868 uint32_t error_code)
1869 {
1870 struct binder_thread *target_thread;
1871 struct binder_transaction *next;
1872
1873 BUG_ON(t->flags & TF_ONE_WAY);
1874 while (1) {
1875 target_thread = binder_get_txn_from_and_acq_inner(t);
1876 if (target_thread) {
1877 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1878 "send failed reply for transaction %d to %d:%d\n",
1879 t->debug_id,
1880 target_thread->proc->pid,
1881 target_thread->pid);
1882
1883 binder_pop_transaction_ilocked(target_thread, t);
1884 if (target_thread->reply_error.cmd == BR_OK) {
1885 target_thread->reply_error.cmd = error_code;
1886 binder_enqueue_thread_work_ilocked(
1887 target_thread,
1888 &target_thread->reply_error.work);
1889 wake_up_interruptible(&target_thread->wait);
1890 } else {
1891 /*
1892 * Cannot get here for normal operation, but
1893 * we can if multiple synchronous transactions
1894 * are sent without blocking for responses.
1895 * Just ignore the 2nd error in this case.
1896 */
1897 pr_warn("Unexpected reply error: %u\n",
1898 target_thread->reply_error.cmd);
1899 }
1900 binder_inner_proc_unlock(target_thread->proc);
1901 binder_thread_dec_tmpref(target_thread);
1902 binder_free_transaction(t);
1903 return;
1904 }
1905 __release(&target_thread->proc->inner_lock);
1906 next = t->from_parent;
1907
1908 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1909 "send failed reply for transaction %d, target dead\n",
1910 t->debug_id);
1911
1912 binder_free_transaction(t);
1913 if (next == NULL) {
1914 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1915 "reply failed, no target thread at root\n");
1916 return;
1917 }
1918 t = next;
1919 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1920 "reply failed, no target thread -- retry %d\n",
1921 t->debug_id);
1922 }
1923 }
1924
1925 /**
1926 * binder_cleanup_transaction() - cleans up undelivered transaction
1927 * @t: transaction that needs to be cleaned up
1928 * @reason: reason the transaction wasn't delivered
1929 * @error_code: error to return to caller (if synchronous call)
1930 */
binder_cleanup_transaction(struct binder_transaction * t,const char * reason,uint32_t error_code)1931 static void binder_cleanup_transaction(struct binder_transaction *t,
1932 const char *reason,
1933 uint32_t error_code)
1934 {
1935 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1936 binder_send_failed_reply(t, error_code);
1937 } else {
1938 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1939 "undelivered transaction %d, %s\n",
1940 t->debug_id, reason);
1941 binder_free_transaction(t);
1942 }
1943 }
1944
1945 /**
1946 * binder_get_object() - gets object and checks for valid metadata
1947 * @proc: binder_proc owning the buffer
1948 * @u: sender's user pointer to base of buffer
1949 * @buffer: binder_buffer that we're parsing.
1950 * @offset: offset in the @buffer at which to validate an object.
1951 * @object: struct binder_object to read into
1952 *
1953 * Copy the binder object at the given offset into @object. If @u is
1954 * provided then the copy is from the sender's buffer. If not, then
1955 * it is copied from the target's @buffer.
1956 *
1957 * Return: If there's a valid metadata object at @offset, the
1958 * size of that object. Otherwise, it returns zero. The object
1959 * is read into the struct binder_object pointed to by @object.
1960 */
binder_get_object(struct binder_proc * proc,const void __user * u,struct binder_buffer * buffer,unsigned long offset,struct binder_object * object)1961 static size_t binder_get_object(struct binder_proc *proc,
1962 const void __user *u,
1963 struct binder_buffer *buffer,
1964 unsigned long offset,
1965 struct binder_object *object)
1966 {
1967 size_t read_size;
1968 struct binder_object_header *hdr;
1969 size_t object_size = 0;
1970
1971 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1972 if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
1973 !IS_ALIGNED(offset, sizeof(u32)))
1974 return 0;
1975
1976 if (u) {
1977 if (copy_from_user(object, u + offset, read_size))
1978 return 0;
1979 } else {
1980 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1981 offset, read_size))
1982 return 0;
1983 }
1984
1985 /* Ok, now see if we read a complete object. */
1986 hdr = &object->hdr;
1987 switch (hdr->type) {
1988 case BINDER_TYPE_BINDER:
1989 case BINDER_TYPE_WEAK_BINDER:
1990 case BINDER_TYPE_HANDLE:
1991 case BINDER_TYPE_WEAK_HANDLE:
1992 object_size = sizeof(struct flat_binder_object);
1993 break;
1994 case BINDER_TYPE_FD:
1995 object_size = sizeof(struct binder_fd_object);
1996 break;
1997 case BINDER_TYPE_PTR:
1998 object_size = sizeof(struct binder_buffer_object);
1999 break;
2000 case BINDER_TYPE_FDA:
2001 object_size = sizeof(struct binder_fd_array_object);
2002 break;
2003 default:
2004 return 0;
2005 }
2006 if (offset <= buffer->data_size - object_size &&
2007 buffer->data_size >= object_size)
2008 return object_size;
2009 else
2010 return 0;
2011 }
2012
2013 /**
2014 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2015 * @proc: binder_proc owning the buffer
2016 * @b: binder_buffer containing the object
2017 * @object: struct binder_object to read into
2018 * @index: index in offset array at which the binder_buffer_object is
2019 * located
2020 * @start_offset: points to the start of the offset array
2021 * @object_offsetp: offset of @object read from @b
2022 * @num_valid: the number of valid offsets in the offset array
2023 *
2024 * Return: If @index is within the valid range of the offset array
2025 * described by @start and @num_valid, and if there's a valid
2026 * binder_buffer_object at the offset found in index @index
2027 * of the offset array, that object is returned. Otherwise,
2028 * %NULL is returned.
2029 * Note that the offset found in index @index itself is not
2030 * verified; this function assumes that @num_valid elements
2031 * from @start were previously verified to have valid offsets.
2032 * If @object_offsetp is non-NULL, then the offset within
2033 * @b is written to it.
2034 */
binder_validate_ptr(struct binder_proc * proc,struct binder_buffer * b,struct binder_object * object,binder_size_t index,binder_size_t start_offset,binder_size_t * object_offsetp,binder_size_t num_valid)2035 static struct binder_buffer_object *binder_validate_ptr(
2036 struct binder_proc *proc,
2037 struct binder_buffer *b,
2038 struct binder_object *object,
2039 binder_size_t index,
2040 binder_size_t start_offset,
2041 binder_size_t *object_offsetp,
2042 binder_size_t num_valid)
2043 {
2044 size_t object_size;
2045 binder_size_t object_offset;
2046 unsigned long buffer_offset;
2047
2048 if (index >= num_valid)
2049 return NULL;
2050
2051 buffer_offset = start_offset + sizeof(binder_size_t) * index;
2052 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2053 b, buffer_offset,
2054 sizeof(object_offset)))
2055 return NULL;
2056 object_size = binder_get_object(proc, NULL, b, object_offset, object);
2057 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
2058 return NULL;
2059 if (object_offsetp)
2060 *object_offsetp = object_offset;
2061
2062 return &object->bbo;
2063 }
2064
2065 /**
2066 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2067 * @proc: binder_proc owning the buffer
2068 * @b: transaction buffer
2069 * @objects_start_offset: offset to start of objects buffer
2070 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
2071 * @fixup_offset: start offset in @buffer to fix up
2072 * @last_obj_offset: offset to last binder_buffer_object that we fixed
2073 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
2074 *
2075 * Return: %true if a fixup in buffer @buffer at offset @offset is
2076 * allowed.
2077 *
2078 * For safety reasons, we only allow fixups inside a buffer to happen
2079 * at increasing offsets; additionally, we only allow fixup on the last
2080 * buffer object that was verified, or one of its parents.
2081 *
2082 * Example of what is allowed:
2083 *
2084 * A
2085 * B (parent = A, offset = 0)
2086 * C (parent = A, offset = 16)
2087 * D (parent = C, offset = 0)
2088 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2089 *
2090 * Examples of what is not allowed:
2091 *
2092 * Decreasing offsets within the same parent:
2093 * A
2094 * C (parent = A, offset = 16)
2095 * B (parent = A, offset = 0) // decreasing offset within A
2096 *
2097 * Referring to a parent that wasn't the last object or any of its parents:
2098 * A
2099 * B (parent = A, offset = 0)
2100 * C (parent = A, offset = 0)
2101 * C (parent = A, offset = 16)
2102 * D (parent = B, offset = 0) // B is not A or any of A's parents
2103 */
binder_validate_fixup(struct binder_proc * proc,struct binder_buffer * b,binder_size_t objects_start_offset,binder_size_t buffer_obj_offset,binder_size_t fixup_offset,binder_size_t last_obj_offset,binder_size_t last_min_offset)2104 static bool binder_validate_fixup(struct binder_proc *proc,
2105 struct binder_buffer *b,
2106 binder_size_t objects_start_offset,
2107 binder_size_t buffer_obj_offset,
2108 binder_size_t fixup_offset,
2109 binder_size_t last_obj_offset,
2110 binder_size_t last_min_offset)
2111 {
2112 if (!last_obj_offset) {
2113 /* Nothing to fix up in */
2114 return false;
2115 }
2116
2117 while (last_obj_offset != buffer_obj_offset) {
2118 unsigned long buffer_offset;
2119 struct binder_object last_object;
2120 struct binder_buffer_object *last_bbo;
2121 size_t object_size = binder_get_object(proc, NULL, b,
2122 last_obj_offset,
2123 &last_object);
2124 if (object_size != sizeof(*last_bbo))
2125 return false;
2126
2127 last_bbo = &last_object.bbo;
2128 /*
2129 * Safe to retrieve the parent of last_obj, since it
2130 * was already previously verified by the driver.
2131 */
2132 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2133 return false;
2134 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
2135 buffer_offset = objects_start_offset +
2136 sizeof(binder_size_t) * last_bbo->parent;
2137 if (binder_alloc_copy_from_buffer(&proc->alloc,
2138 &last_obj_offset,
2139 b, buffer_offset,
2140 sizeof(last_obj_offset)))
2141 return false;
2142 }
2143 return (fixup_offset >= last_min_offset);
2144 }
2145
2146 /**
2147 * struct binder_task_work_cb - for deferred close
2148 *
2149 * @twork: callback_head for task work
2150 * @fd: fd to close
2151 *
2152 * Structure to pass task work to be handled after
2153 * returning from binder_ioctl() via task_work_add().
2154 */
2155 struct binder_task_work_cb {
2156 struct callback_head twork;
2157 struct file *file;
2158 };
2159
2160 /**
2161 * binder_do_fd_close() - close list of file descriptors
2162 * @twork: callback head for task work
2163 *
2164 * It is not safe to call ksys_close() during the binder_ioctl()
2165 * function if there is a chance that binder's own file descriptor
2166 * might be closed. This is to meet the requirements for using
2167 * fdget() (see comments for __fget_light()). Therefore use
2168 * task_work_add() to schedule the close operation once we have
2169 * returned from binder_ioctl(). This function is a callback
2170 * for that mechanism and does the actual ksys_close() on the
2171 * given file descriptor.
2172 */
binder_do_fd_close(struct callback_head * twork)2173 static void binder_do_fd_close(struct callback_head *twork)
2174 {
2175 struct binder_task_work_cb *twcb = container_of(twork,
2176 struct binder_task_work_cb, twork);
2177
2178 fput(twcb->file);
2179 kfree(twcb);
2180 }
2181
2182 /**
2183 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2184 * @fd: file-descriptor to close
2185 *
2186 * See comments in binder_do_fd_close(). This function is used to schedule
2187 * a file-descriptor to be closed after returning from binder_ioctl().
2188 */
binder_deferred_fd_close(int fd)2189 static void binder_deferred_fd_close(int fd)
2190 {
2191 struct binder_task_work_cb *twcb;
2192
2193 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2194 if (!twcb)
2195 return;
2196 init_task_work(&twcb->twork, binder_do_fd_close);
2197 twcb->file = close_fd_get_file(fd);
2198 if (twcb->file) {
2199 // pin it until binder_do_fd_close(); see comments there
2200 get_file(twcb->file);
2201 filp_close(twcb->file, current->files);
2202 task_work_add(current, &twcb->twork, TWA_RESUME);
2203 } else {
2204 kfree(twcb);
2205 }
2206 }
2207
binder_transaction_buffer_release(struct binder_proc * proc,struct binder_thread * thread,struct binder_buffer * buffer,binder_size_t off_end_offset,bool is_failure)2208 static void binder_transaction_buffer_release(struct binder_proc *proc,
2209 struct binder_thread *thread,
2210 struct binder_buffer *buffer,
2211 binder_size_t off_end_offset,
2212 bool is_failure)
2213 {
2214 int debug_id = buffer->debug_id;
2215 binder_size_t off_start_offset, buffer_offset;
2216
2217 binder_debug(BINDER_DEBUG_TRANSACTION,
2218 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2219 proc->pid, buffer->debug_id,
2220 buffer->data_size, buffer->offsets_size,
2221 (unsigned long long)off_end_offset);
2222
2223 if (buffer->target_node)
2224 binder_dec_node(buffer->target_node, 1, 0);
2225
2226 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2227
2228 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2229 buffer_offset += sizeof(binder_size_t)) {
2230 struct binder_object_header *hdr;
2231 size_t object_size = 0;
2232 struct binder_object object;
2233 binder_size_t object_offset;
2234
2235 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2236 buffer, buffer_offset,
2237 sizeof(object_offset)))
2238 object_size = binder_get_object(proc, NULL, buffer,
2239 object_offset, &object);
2240 if (object_size == 0) {
2241 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2242 debug_id, (u64)object_offset, buffer->data_size);
2243 continue;
2244 }
2245 hdr = &object.hdr;
2246 switch (hdr->type) {
2247 case BINDER_TYPE_BINDER:
2248 case BINDER_TYPE_WEAK_BINDER: {
2249 struct flat_binder_object *fp;
2250 struct binder_node *node;
2251
2252 fp = to_flat_binder_object(hdr);
2253 node = binder_get_node(proc, fp->binder);
2254 if (node == NULL) {
2255 pr_err("transaction release %d bad node %016llx\n",
2256 debug_id, (u64)fp->binder);
2257 break;
2258 }
2259 binder_debug(BINDER_DEBUG_TRANSACTION,
2260 " node %d u%016llx\n",
2261 node->debug_id, (u64)node->ptr);
2262 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2263 0);
2264 binder_put_node(node);
2265 } break;
2266 case BINDER_TYPE_HANDLE:
2267 case BINDER_TYPE_WEAK_HANDLE: {
2268 struct flat_binder_object *fp;
2269 struct binder_ref_data rdata;
2270 int ret;
2271
2272 fp = to_flat_binder_object(hdr);
2273 ret = binder_dec_ref_for_handle(proc, fp->handle,
2274 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2275
2276 if (ret) {
2277 pr_err("transaction release %d bad handle %d, ret = %d\n",
2278 debug_id, fp->handle, ret);
2279 break;
2280 }
2281 binder_debug(BINDER_DEBUG_TRANSACTION,
2282 " ref %d desc %d\n",
2283 rdata.debug_id, rdata.desc);
2284 } break;
2285
2286 case BINDER_TYPE_FD: {
2287 /*
2288 * No need to close the file here since user-space
2289 * closes it for successfully delivered
2290 * transactions. For transactions that weren't
2291 * delivered, the new fd was never allocated so
2292 * there is no need to close and the fput on the
2293 * file is done when the transaction is torn
2294 * down.
2295 */
2296 } break;
2297 case BINDER_TYPE_PTR:
2298 /*
2299 * Nothing to do here, this will get cleaned up when the
2300 * transaction buffer gets freed
2301 */
2302 break;
2303 case BINDER_TYPE_FDA: {
2304 struct binder_fd_array_object *fda;
2305 struct binder_buffer_object *parent;
2306 struct binder_object ptr_object;
2307 binder_size_t fda_offset;
2308 size_t fd_index;
2309 binder_size_t fd_buf_size;
2310 binder_size_t num_valid;
2311
2312 if (is_failure) {
2313 /*
2314 * The fd fixups have not been applied so no
2315 * fds need to be closed.
2316 */
2317 continue;
2318 }
2319
2320 num_valid = (buffer_offset - off_start_offset) /
2321 sizeof(binder_size_t);
2322 fda = to_binder_fd_array_object(hdr);
2323 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2324 fda->parent,
2325 off_start_offset,
2326 NULL,
2327 num_valid);
2328 if (!parent) {
2329 pr_err("transaction release %d bad parent offset\n",
2330 debug_id);
2331 continue;
2332 }
2333 fd_buf_size = sizeof(u32) * fda->num_fds;
2334 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2335 pr_err("transaction release %d invalid number of fds (%lld)\n",
2336 debug_id, (u64)fda->num_fds);
2337 continue;
2338 }
2339 if (fd_buf_size > parent->length ||
2340 fda->parent_offset > parent->length - fd_buf_size) {
2341 /* No space for all file descriptors here. */
2342 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2343 debug_id, (u64)fda->num_fds);
2344 continue;
2345 }
2346 /*
2347 * the source data for binder_buffer_object is visible
2348 * to user-space and the @buffer element is the user
2349 * pointer to the buffer_object containing the fd_array.
2350 * Convert the address to an offset relative to
2351 * the base of the transaction buffer.
2352 */
2353 fda_offset = parent->buffer - buffer->user_data +
2354 fda->parent_offset;
2355 for (fd_index = 0; fd_index < fda->num_fds;
2356 fd_index++) {
2357 u32 fd;
2358 int err;
2359 binder_size_t offset = fda_offset +
2360 fd_index * sizeof(fd);
2361
2362 err = binder_alloc_copy_from_buffer(
2363 &proc->alloc, &fd, buffer,
2364 offset, sizeof(fd));
2365 WARN_ON(err);
2366 if (!err) {
2367 binder_deferred_fd_close(fd);
2368 /*
2369 * Need to make sure the thread goes
2370 * back to userspace to complete the
2371 * deferred close
2372 */
2373 if (thread)
2374 thread->looper_need_return = true;
2375 }
2376 }
2377 } break;
2378 default:
2379 pr_err("transaction release %d bad object type %x\n",
2380 debug_id, hdr->type);
2381 break;
2382 }
2383 }
2384 }
2385
2386 /* Clean up all the objects in the buffer */
binder_release_entire_buffer(struct binder_proc * proc,struct binder_thread * thread,struct binder_buffer * buffer,bool is_failure)2387 static inline void binder_release_entire_buffer(struct binder_proc *proc,
2388 struct binder_thread *thread,
2389 struct binder_buffer *buffer,
2390 bool is_failure)
2391 {
2392 binder_size_t off_end_offset;
2393
2394 off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
2395 off_end_offset += buffer->offsets_size;
2396
2397 binder_transaction_buffer_release(proc, thread, buffer,
2398 off_end_offset, is_failure);
2399 }
2400
binder_translate_binder(struct flat_binder_object * fp,struct binder_transaction * t,struct binder_thread * thread)2401 static int binder_translate_binder(struct flat_binder_object *fp,
2402 struct binder_transaction *t,
2403 struct binder_thread *thread)
2404 {
2405 struct binder_node *node;
2406 struct binder_proc *proc = thread->proc;
2407 struct binder_proc *target_proc = t->to_proc;
2408 struct binder_ref_data rdata;
2409 int ret = 0;
2410
2411 node = binder_get_node(proc, fp->binder);
2412 if (!node) {
2413 node = binder_new_node(proc, fp);
2414 if (!node)
2415 return -ENOMEM;
2416 }
2417 if (fp->cookie != node->cookie) {
2418 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2419 proc->pid, thread->pid, (u64)fp->binder,
2420 node->debug_id, (u64)fp->cookie,
2421 (u64)node->cookie);
2422 ret = -EINVAL;
2423 goto done;
2424 }
2425 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2426 ret = -EPERM;
2427 goto done;
2428 }
2429
2430 ret = binder_inc_ref_for_node(target_proc, node,
2431 fp->hdr.type == BINDER_TYPE_BINDER,
2432 &thread->todo, &rdata);
2433 if (ret)
2434 goto done;
2435
2436 if (fp->hdr.type == BINDER_TYPE_BINDER)
2437 fp->hdr.type = BINDER_TYPE_HANDLE;
2438 else
2439 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2440 fp->binder = 0;
2441 fp->handle = rdata.desc;
2442 fp->cookie = 0;
2443
2444 trace_binder_transaction_node_to_ref(t, node, &rdata);
2445 binder_debug(BINDER_DEBUG_TRANSACTION,
2446 " node %d u%016llx -> ref %d desc %d\n",
2447 node->debug_id, (u64)node->ptr,
2448 rdata.debug_id, rdata.desc);
2449 done:
2450 binder_put_node(node);
2451 return ret;
2452 }
2453
binder_translate_handle(struct flat_binder_object * fp,struct binder_transaction * t,struct binder_thread * thread)2454 static int binder_translate_handle(struct flat_binder_object *fp,
2455 struct binder_transaction *t,
2456 struct binder_thread *thread)
2457 {
2458 struct binder_proc *proc = thread->proc;
2459 struct binder_proc *target_proc = t->to_proc;
2460 struct binder_node *node;
2461 struct binder_ref_data src_rdata;
2462 int ret = 0;
2463
2464 node = binder_get_node_from_ref(proc, fp->handle,
2465 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2466 if (!node) {
2467 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2468 proc->pid, thread->pid, fp->handle);
2469 return -EINVAL;
2470 }
2471 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2472 ret = -EPERM;
2473 goto done;
2474 }
2475
2476 binder_node_lock(node);
2477 if (node->proc == target_proc) {
2478 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2479 fp->hdr.type = BINDER_TYPE_BINDER;
2480 else
2481 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2482 fp->binder = node->ptr;
2483 fp->cookie = node->cookie;
2484 if (node->proc)
2485 binder_inner_proc_lock(node->proc);
2486 else
2487 __acquire(&node->proc->inner_lock);
2488 binder_inc_node_nilocked(node,
2489 fp->hdr.type == BINDER_TYPE_BINDER,
2490 0, NULL);
2491 if (node->proc)
2492 binder_inner_proc_unlock(node->proc);
2493 else
2494 __release(&node->proc->inner_lock);
2495 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2496 binder_debug(BINDER_DEBUG_TRANSACTION,
2497 " ref %d desc %d -> node %d u%016llx\n",
2498 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2499 (u64)node->ptr);
2500 binder_node_unlock(node);
2501 } else {
2502 struct binder_ref_data dest_rdata;
2503
2504 binder_node_unlock(node);
2505 ret = binder_inc_ref_for_node(target_proc, node,
2506 fp->hdr.type == BINDER_TYPE_HANDLE,
2507 NULL, &dest_rdata);
2508 if (ret)
2509 goto done;
2510
2511 fp->binder = 0;
2512 fp->handle = dest_rdata.desc;
2513 fp->cookie = 0;
2514 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2515 &dest_rdata);
2516 binder_debug(BINDER_DEBUG_TRANSACTION,
2517 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2518 src_rdata.debug_id, src_rdata.desc,
2519 dest_rdata.debug_id, dest_rdata.desc,
2520 node->debug_id);
2521 }
2522 done:
2523 binder_put_node(node);
2524 return ret;
2525 }
2526
binder_translate_fd(u32 fd,binder_size_t fd_offset,struct binder_transaction * t,struct binder_thread * thread,struct binder_transaction * in_reply_to)2527 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2528 struct binder_transaction *t,
2529 struct binder_thread *thread,
2530 struct binder_transaction *in_reply_to)
2531 {
2532 struct binder_proc *proc = thread->proc;
2533 struct binder_proc *target_proc = t->to_proc;
2534 struct binder_txn_fd_fixup *fixup;
2535 struct file *file;
2536 int ret = 0;
2537 bool target_allows_fd;
2538
2539 if (in_reply_to)
2540 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2541 else
2542 target_allows_fd = t->buffer->target_node->accept_fds;
2543 if (!target_allows_fd) {
2544 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2545 proc->pid, thread->pid,
2546 in_reply_to ? "reply" : "transaction",
2547 fd);
2548 ret = -EPERM;
2549 goto err_fd_not_accepted;
2550 }
2551
2552 file = fget(fd);
2553 if (!file) {
2554 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2555 proc->pid, thread->pid, fd);
2556 ret = -EBADF;
2557 goto err_fget;
2558 }
2559 ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2560 if (ret < 0) {
2561 ret = -EPERM;
2562 goto err_security;
2563 }
2564
2565 /*
2566 * Add fixup record for this transaction. The allocation
2567 * of the fd in the target needs to be done from a
2568 * target thread.
2569 */
2570 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2571 if (!fixup) {
2572 ret = -ENOMEM;
2573 goto err_alloc;
2574 }
2575 fixup->file = file;
2576 fixup->offset = fd_offset;
2577 fixup->target_fd = -1;
2578 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2579 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2580
2581 return ret;
2582
2583 err_alloc:
2584 err_security:
2585 fput(file);
2586 err_fget:
2587 err_fd_not_accepted:
2588 return ret;
2589 }
2590
2591 /**
2592 * struct binder_ptr_fixup - data to be fixed-up in target buffer
2593 * @offset offset in target buffer to fixup
2594 * @skip_size bytes to skip in copy (fixup will be written later)
2595 * @fixup_data data to write at fixup offset
2596 * @node list node
2597 *
2598 * This is used for the pointer fixup list (pf) which is created and consumed
2599 * during binder_transaction() and is only accessed locally. No
2600 * locking is necessary.
2601 *
2602 * The list is ordered by @offset.
2603 */
2604 struct binder_ptr_fixup {
2605 binder_size_t offset;
2606 size_t skip_size;
2607 binder_uintptr_t fixup_data;
2608 struct list_head node;
2609 };
2610
2611 /**
2612 * struct binder_sg_copy - scatter-gather data to be copied
2613 * @offset offset in target buffer
2614 * @sender_uaddr user address in source buffer
2615 * @length bytes to copy
2616 * @node list node
2617 *
2618 * This is used for the sg copy list (sgc) which is created and consumed
2619 * during binder_transaction() and is only accessed locally. No
2620 * locking is necessary.
2621 *
2622 * The list is ordered by @offset.
2623 */
2624 struct binder_sg_copy {
2625 binder_size_t offset;
2626 const void __user *sender_uaddr;
2627 size_t length;
2628 struct list_head node;
2629 };
2630
2631 /**
2632 * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2633 * @alloc: binder_alloc associated with @buffer
2634 * @buffer: binder buffer in target process
2635 * @sgc_head: list_head of scatter-gather copy list
2636 * @pf_head: list_head of pointer fixup list
2637 *
2638 * Processes all elements of @sgc_head, applying fixups from @pf_head
2639 * and copying the scatter-gather data from the source process' user
2640 * buffer to the target's buffer. It is expected that the list creation
2641 * and processing all occurs during binder_transaction() so these lists
2642 * are only accessed in local context.
2643 *
2644 * Return: 0=success, else -errno
2645 */
binder_do_deferred_txn_copies(struct binder_alloc * alloc,struct binder_buffer * buffer,struct list_head * sgc_head,struct list_head * pf_head)2646 static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2647 struct binder_buffer *buffer,
2648 struct list_head *sgc_head,
2649 struct list_head *pf_head)
2650 {
2651 int ret = 0;
2652 struct binder_sg_copy *sgc, *tmpsgc;
2653 struct binder_ptr_fixup *tmppf;
2654 struct binder_ptr_fixup *pf =
2655 list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2656 node);
2657
2658 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2659 size_t bytes_copied = 0;
2660
2661 while (bytes_copied < sgc->length) {
2662 size_t copy_size;
2663 size_t bytes_left = sgc->length - bytes_copied;
2664 size_t offset = sgc->offset + bytes_copied;
2665
2666 /*
2667 * We copy up to the fixup (pointed to by pf)
2668 */
2669 copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2670 : bytes_left;
2671 if (!ret && copy_size)
2672 ret = binder_alloc_copy_user_to_buffer(
2673 alloc, buffer,
2674 offset,
2675 sgc->sender_uaddr + bytes_copied,
2676 copy_size);
2677 bytes_copied += copy_size;
2678 if (copy_size != bytes_left) {
2679 BUG_ON(!pf);
2680 /* we stopped at a fixup offset */
2681 if (pf->skip_size) {
2682 /*
2683 * we are just skipping. This is for
2684 * BINDER_TYPE_FDA where the translated
2685 * fds will be fixed up when we get
2686 * to target context.
2687 */
2688 bytes_copied += pf->skip_size;
2689 } else {
2690 /* apply the fixup indicated by pf */
2691 if (!ret)
2692 ret = binder_alloc_copy_to_buffer(
2693 alloc, buffer,
2694 pf->offset,
2695 &pf->fixup_data,
2696 sizeof(pf->fixup_data));
2697 bytes_copied += sizeof(pf->fixup_data);
2698 }
2699 list_del(&pf->node);
2700 kfree(pf);
2701 pf = list_first_entry_or_null(pf_head,
2702 struct binder_ptr_fixup, node);
2703 }
2704 }
2705 list_del(&sgc->node);
2706 kfree(sgc);
2707 }
2708 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2709 BUG_ON(pf->skip_size == 0);
2710 list_del(&pf->node);
2711 kfree(pf);
2712 }
2713 BUG_ON(!list_empty(sgc_head));
2714
2715 return ret > 0 ? -EINVAL : ret;
2716 }
2717
2718 /**
2719 * binder_cleanup_deferred_txn_lists() - free specified lists
2720 * @sgc_head: list_head of scatter-gather copy list
2721 * @pf_head: list_head of pointer fixup list
2722 *
2723 * Called to clean up @sgc_head and @pf_head if there is an
2724 * error.
2725 */
binder_cleanup_deferred_txn_lists(struct list_head * sgc_head,struct list_head * pf_head)2726 static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2727 struct list_head *pf_head)
2728 {
2729 struct binder_sg_copy *sgc, *tmpsgc;
2730 struct binder_ptr_fixup *pf, *tmppf;
2731
2732 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2733 list_del(&sgc->node);
2734 kfree(sgc);
2735 }
2736 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2737 list_del(&pf->node);
2738 kfree(pf);
2739 }
2740 }
2741
2742 /**
2743 * binder_defer_copy() - queue a scatter-gather buffer for copy
2744 * @sgc_head: list_head of scatter-gather copy list
2745 * @offset: binder buffer offset in target process
2746 * @sender_uaddr: user address in source process
2747 * @length: bytes to copy
2748 *
2749 * Specify a scatter-gather block to be copied. The actual copy must
2750 * be deferred until all the needed fixups are identified and queued.
2751 * Then the copy and fixups are done together so un-translated values
2752 * from the source are never visible in the target buffer.
2753 *
2754 * We are guaranteed that repeated calls to this function will have
2755 * monotonically increasing @offset values so the list will naturally
2756 * be ordered.
2757 *
2758 * Return: 0=success, else -errno
2759 */
binder_defer_copy(struct list_head * sgc_head,binder_size_t offset,const void __user * sender_uaddr,size_t length)2760 static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2761 const void __user *sender_uaddr, size_t length)
2762 {
2763 struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2764
2765 if (!bc)
2766 return -ENOMEM;
2767
2768 bc->offset = offset;
2769 bc->sender_uaddr = sender_uaddr;
2770 bc->length = length;
2771 INIT_LIST_HEAD(&bc->node);
2772
2773 /*
2774 * We are guaranteed that the deferred copies are in-order
2775 * so just add to the tail.
2776 */
2777 list_add_tail(&bc->node, sgc_head);
2778
2779 return 0;
2780 }
2781
2782 /**
2783 * binder_add_fixup() - queue a fixup to be applied to sg copy
2784 * @pf_head: list_head of binder ptr fixup list
2785 * @offset: binder buffer offset in target process
2786 * @fixup: bytes to be copied for fixup
2787 * @skip_size: bytes to skip when copying (fixup will be applied later)
2788 *
2789 * Add the specified fixup to a list ordered by @offset. When copying
2790 * the scatter-gather buffers, the fixup will be copied instead of
2791 * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2792 * will be applied later (in target process context), so we just skip
2793 * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2794 * value in @fixup.
2795 *
2796 * This function is called *mostly* in @offset order, but there are
2797 * exceptions. Since out-of-order inserts are relatively uncommon,
2798 * we insert the new element by searching backward from the tail of
2799 * the list.
2800 *
2801 * Return: 0=success, else -errno
2802 */
binder_add_fixup(struct list_head * pf_head,binder_size_t offset,binder_uintptr_t fixup,size_t skip_size)2803 static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2804 binder_uintptr_t fixup, size_t skip_size)
2805 {
2806 struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2807 struct binder_ptr_fixup *tmppf;
2808
2809 if (!pf)
2810 return -ENOMEM;
2811
2812 pf->offset = offset;
2813 pf->fixup_data = fixup;
2814 pf->skip_size = skip_size;
2815 INIT_LIST_HEAD(&pf->node);
2816
2817 /* Fixups are *mostly* added in-order, but there are some
2818 * exceptions. Look backwards through list for insertion point.
2819 */
2820 list_for_each_entry_reverse(tmppf, pf_head, node) {
2821 if (tmppf->offset < pf->offset) {
2822 list_add(&pf->node, &tmppf->node);
2823 return 0;
2824 }
2825 }
2826 /*
2827 * if we get here, then the new offset is the lowest so
2828 * insert at the head
2829 */
2830 list_add(&pf->node, pf_head);
2831 return 0;
2832 }
2833
binder_translate_fd_array(struct list_head * pf_head,struct binder_fd_array_object * fda,const void __user * sender_ubuffer,struct binder_buffer_object * parent,struct binder_buffer_object * sender_uparent,struct binder_transaction * t,struct binder_thread * thread,struct binder_transaction * in_reply_to)2834 static int binder_translate_fd_array(struct list_head *pf_head,
2835 struct binder_fd_array_object *fda,
2836 const void __user *sender_ubuffer,
2837 struct binder_buffer_object *parent,
2838 struct binder_buffer_object *sender_uparent,
2839 struct binder_transaction *t,
2840 struct binder_thread *thread,
2841 struct binder_transaction *in_reply_to)
2842 {
2843 binder_size_t fdi, fd_buf_size;
2844 binder_size_t fda_offset;
2845 const void __user *sender_ufda_base;
2846 struct binder_proc *proc = thread->proc;
2847 int ret;
2848
2849 if (fda->num_fds == 0)
2850 return 0;
2851
2852 fd_buf_size = sizeof(u32) * fda->num_fds;
2853 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2854 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2855 proc->pid, thread->pid, (u64)fda->num_fds);
2856 return -EINVAL;
2857 }
2858 if (fd_buf_size > parent->length ||
2859 fda->parent_offset > parent->length - fd_buf_size) {
2860 /* No space for all file descriptors here. */
2861 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2862 proc->pid, thread->pid, (u64)fda->num_fds);
2863 return -EINVAL;
2864 }
2865 /*
2866 * the source data for binder_buffer_object is visible
2867 * to user-space and the @buffer element is the user
2868 * pointer to the buffer_object containing the fd_array.
2869 * Convert the address to an offset relative to
2870 * the base of the transaction buffer.
2871 */
2872 fda_offset = parent->buffer - t->buffer->user_data +
2873 fda->parent_offset;
2874 sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2875 fda->parent_offset;
2876
2877 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2878 !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2879 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2880 proc->pid, thread->pid);
2881 return -EINVAL;
2882 }
2883 ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2884 if (ret)
2885 return ret;
2886
2887 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2888 u32 fd;
2889 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2890 binder_size_t sender_uoffset = fdi * sizeof(fd);
2891
2892 ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2893 if (!ret)
2894 ret = binder_translate_fd(fd, offset, t, thread,
2895 in_reply_to);
2896 if (ret)
2897 return ret > 0 ? -EINVAL : ret;
2898 }
2899 return 0;
2900 }
2901
binder_fixup_parent(struct list_head * pf_head,struct binder_transaction * t,struct binder_thread * thread,struct binder_buffer_object * bp,binder_size_t off_start_offset,binder_size_t num_valid,binder_size_t last_fixup_obj_off,binder_size_t last_fixup_min_off)2902 static int binder_fixup_parent(struct list_head *pf_head,
2903 struct binder_transaction *t,
2904 struct binder_thread *thread,
2905 struct binder_buffer_object *bp,
2906 binder_size_t off_start_offset,
2907 binder_size_t num_valid,
2908 binder_size_t last_fixup_obj_off,
2909 binder_size_t last_fixup_min_off)
2910 {
2911 struct binder_buffer_object *parent;
2912 struct binder_buffer *b = t->buffer;
2913 struct binder_proc *proc = thread->proc;
2914 struct binder_proc *target_proc = t->to_proc;
2915 struct binder_object object;
2916 binder_size_t buffer_offset;
2917 binder_size_t parent_offset;
2918
2919 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2920 return 0;
2921
2922 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2923 off_start_offset, &parent_offset,
2924 num_valid);
2925 if (!parent) {
2926 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2927 proc->pid, thread->pid);
2928 return -EINVAL;
2929 }
2930
2931 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2932 parent_offset, bp->parent_offset,
2933 last_fixup_obj_off,
2934 last_fixup_min_off)) {
2935 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2936 proc->pid, thread->pid);
2937 return -EINVAL;
2938 }
2939
2940 if (parent->length < sizeof(binder_uintptr_t) ||
2941 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2942 /* No space for a pointer here! */
2943 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2944 proc->pid, thread->pid);
2945 return -EINVAL;
2946 }
2947
2948 buffer_offset = bp->parent_offset + parent->buffer - b->user_data;
2949
2950 return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2951 }
2952
2953 /**
2954 * binder_can_update_transaction() - Can a txn be superseded by an updated one?
2955 * @t1: the pending async txn in the frozen process
2956 * @t2: the new async txn to supersede the outdated pending one
2957 *
2958 * Return: true if t2 can supersede t1
2959 * false if t2 can not supersede t1
2960 */
binder_can_update_transaction(struct binder_transaction * t1,struct binder_transaction * t2)2961 static bool binder_can_update_transaction(struct binder_transaction *t1,
2962 struct binder_transaction *t2)
2963 {
2964 if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) !=
2965 (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc)
2966 return false;
2967 if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code &&
2968 t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid &&
2969 t1->buffer->target_node->ptr == t2->buffer->target_node->ptr &&
2970 t1->buffer->target_node->cookie == t2->buffer->target_node->cookie)
2971 return true;
2972 return false;
2973 }
2974
2975 /**
2976 * binder_find_outdated_transaction_ilocked() - Find the outdated transaction
2977 * @t: new async transaction
2978 * @target_list: list to find outdated transaction
2979 *
2980 * Return: the outdated transaction if found
2981 * NULL if no outdated transacton can be found
2982 *
2983 * Requires the proc->inner_lock to be held.
2984 */
2985 static struct binder_transaction *
binder_find_outdated_transaction_ilocked(struct binder_transaction * t,struct list_head * target_list)2986 binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
2987 struct list_head *target_list)
2988 {
2989 struct binder_work *w;
2990
2991 list_for_each_entry(w, target_list, entry) {
2992 struct binder_transaction *t_queued;
2993
2994 if (w->type != BINDER_WORK_TRANSACTION)
2995 continue;
2996 t_queued = container_of(w, struct binder_transaction, work);
2997 if (binder_can_update_transaction(t_queued, t))
2998 return t_queued;
2999 }
3000 return NULL;
3001 }
3002
3003 /**
3004 * binder_proc_transaction() - sends a transaction to a process and wakes it up
3005 * @t: transaction to send
3006 * @proc: process to send the transaction to
3007 * @thread: thread in @proc to send the transaction to (may be NULL)
3008 *
3009 * This function queues a transaction to the specified process. It will try
3010 * to find a thread in the target process to handle the transaction and
3011 * wake it up. If no thread is found, the work is queued to the proc
3012 * waitqueue.
3013 *
3014 * If the @thread parameter is not NULL, the transaction is always queued
3015 * to the waitlist of that specific thread.
3016 *
3017 * Return: 0 if the transaction was successfully queued
3018 * BR_DEAD_REPLY if the target process or thread is dead
3019 * BR_FROZEN_REPLY if the target process or thread is frozen and
3020 * the sync transaction was rejected
3021 * BR_TRANSACTION_PENDING_FROZEN if the target process is frozen
3022 * and the async transaction was successfully queued
3023 */
binder_proc_transaction(struct binder_transaction * t,struct binder_proc * proc,struct binder_thread * thread)3024 static int binder_proc_transaction(struct binder_transaction *t,
3025 struct binder_proc *proc,
3026 struct binder_thread *thread)
3027 {
3028 struct binder_node *node = t->buffer->target_node;
3029 bool oneway = !!(t->flags & TF_ONE_WAY);
3030 bool pending_async = false;
3031 struct binder_transaction *t_outdated = NULL;
3032 bool frozen = false;
3033 bool enqueue_task = true;
3034
3035 BUG_ON(!node);
3036 binder_node_lock(node);
3037
3038 if (oneway) {
3039 BUG_ON(thread);
3040 if (node->has_async_transaction)
3041 pending_async = true;
3042 else
3043 node->has_async_transaction = true;
3044 }
3045
3046 binder_inner_proc_lock(proc);
3047 if (proc->is_frozen) {
3048 frozen = true;
3049 proc->sync_recv |= !oneway;
3050 proc->async_recv |= oneway;
3051 }
3052
3053 if ((frozen && !oneway) || proc->is_dead ||
3054 (thread && thread->is_dead)) {
3055 binder_inner_proc_unlock(proc);
3056 binder_node_unlock(node);
3057 return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
3058 }
3059
3060 if (!thread && !pending_async)
3061 thread = binder_select_thread_ilocked(proc);
3062
3063 trace_android_vh_binder_proc_transaction(current, proc->tsk,
3064 thread ? thread->task : NULL, node->debug_id, t,
3065 pending_async);
3066
3067 if (thread) {
3068 binder_transaction_priority(thread, t, node);
3069 binder_enqueue_thread_work_ilocked(thread, &t->work);
3070 } else if (!pending_async) {
3071 trace_android_vh_binder_special_task(t, proc, thread,
3072 &t->work, &proc->todo, !oneway, &enqueue_task);
3073 if (enqueue_task)
3074 binder_enqueue_work_ilocked(&t->work, &proc->todo);
3075 } else {
3076 if ((t->flags & TF_UPDATE_TXN) && frozen) {
3077 t_outdated = binder_find_outdated_transaction_ilocked(t,
3078 &node->async_todo);
3079 if (t_outdated) {
3080 binder_debug(BINDER_DEBUG_TRANSACTION,
3081 "txn %d supersedes %d\n",
3082 t->debug_id, t_outdated->debug_id);
3083 list_del_init(&t_outdated->work.entry);
3084 proc->outstanding_txns--;
3085 }
3086 }
3087 trace_android_vh_binder_special_task(t, proc, thread,
3088 &t->work, &node->async_todo, !oneway, &enqueue_task);
3089 if (enqueue_task)
3090 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
3091 }
3092
3093 trace_android_vh_binder_proc_transaction_finish(proc, t,
3094 thread ? thread->task : NULL, pending_async, !oneway);
3095 if (!pending_async)
3096 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
3097
3098 proc->outstanding_txns++;
3099 binder_inner_proc_unlock(proc);
3100 binder_node_unlock(node);
3101
3102 /*
3103 * To reduce potential contention, free the outdated transaction and
3104 * buffer after releasing the locks.
3105 */
3106 if (t_outdated) {
3107 struct binder_buffer *buffer = t_outdated->buffer;
3108
3109 t_outdated->buffer = NULL;
3110 buffer->transaction = NULL;
3111 trace_binder_transaction_update_buffer_release(buffer);
3112 binder_release_entire_buffer(proc, NULL, buffer, false);
3113 binder_alloc_free_buf(&proc->alloc, buffer);
3114 kfree(t_outdated);
3115 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3116 }
3117
3118 if (oneway && frozen)
3119 return BR_TRANSACTION_PENDING_FROZEN;
3120
3121 return 0;
3122 }
3123
3124 /**
3125 * binder_get_node_refs_for_txn() - Get required refs on node for txn
3126 * @node: struct binder_node for which to get refs
3127 * @procp: returns @node->proc if valid
3128 * @error: if no @procp then returns BR_DEAD_REPLY
3129 *
3130 * User-space normally keeps the node alive when creating a transaction
3131 * since it has a reference to the target. The local strong ref keeps it
3132 * alive if the sending process dies before the target process processes
3133 * the transaction. If the source process is malicious or has a reference
3134 * counting bug, relying on the local strong ref can fail.
3135 *
3136 * Since user-space can cause the local strong ref to go away, we also take
3137 * a tmpref on the node to ensure it survives while we are constructing
3138 * the transaction. We also need a tmpref on the proc while we are
3139 * constructing the transaction, so we take that here as well.
3140 *
3141 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
3142 * Also sets @procp if valid. If the @node->proc is NULL indicating that the
3143 * target proc has died, @error is set to BR_DEAD_REPLY.
3144 */
binder_get_node_refs_for_txn(struct binder_node * node,struct binder_proc ** procp,uint32_t * error)3145 static struct binder_node *binder_get_node_refs_for_txn(
3146 struct binder_node *node,
3147 struct binder_proc **procp,
3148 uint32_t *error)
3149 {
3150 struct binder_node *target_node = NULL;
3151
3152 binder_node_inner_lock(node);
3153 if (node->proc) {
3154 target_node = node;
3155 binder_inc_node_nilocked(node, 1, 0, NULL);
3156 binder_inc_node_tmpref_ilocked(node);
3157 node->proc->tmp_ref++;
3158 *procp = node->proc;
3159 } else
3160 *error = BR_DEAD_REPLY;
3161 binder_node_inner_unlock(node);
3162
3163 return target_node;
3164 }
3165
binder_set_txn_from_error(struct binder_transaction * t,int id,uint32_t command,int32_t param)3166 static void binder_set_txn_from_error(struct binder_transaction *t, int id,
3167 uint32_t command, int32_t param)
3168 {
3169 struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
3170
3171 if (!from) {
3172 /* annotation for sparse */
3173 __release(&from->proc->inner_lock);
3174 return;
3175 }
3176
3177 /* don't override existing errors */
3178 if (from->ee.command == BR_OK)
3179 binder_set_extended_error(&from->ee, id, command, param);
3180 binder_inner_proc_unlock(from->proc);
3181 binder_thread_dec_tmpref(from);
3182 }
3183
binder_transaction(struct binder_proc * proc,struct binder_thread * thread,struct binder_transaction_data * tr,int reply,binder_size_t extra_buffers_size)3184 static void binder_transaction(struct binder_proc *proc,
3185 struct binder_thread *thread,
3186 struct binder_transaction_data *tr, int reply,
3187 binder_size_t extra_buffers_size)
3188 {
3189 int ret;
3190 struct binder_transaction *t;
3191 struct binder_work *w;
3192 struct binder_work *tcomplete;
3193 binder_size_t buffer_offset = 0;
3194 binder_size_t off_start_offset, off_end_offset;
3195 binder_size_t off_min;
3196 binder_size_t sg_buf_offset, sg_buf_end_offset;
3197 binder_size_t user_offset = 0;
3198 struct binder_proc *target_proc = NULL;
3199 struct binder_thread *target_thread = NULL;
3200 struct binder_node *target_node = NULL;
3201 struct binder_transaction *in_reply_to = NULL;
3202 struct binder_transaction_log_entry *e;
3203 uint32_t return_error = 0;
3204 uint32_t return_error_param = 0;
3205 uint32_t return_error_line = 0;
3206 binder_size_t last_fixup_obj_off = 0;
3207 binder_size_t last_fixup_min_off = 0;
3208 struct binder_context *context = proc->context;
3209 int t_debug_id = atomic_inc_return(&binder_last_id);
3210 ktime_t t_start_time = ktime_get();
3211 char *secctx = NULL;
3212 u32 secctx_sz = 0;
3213 struct list_head sgc_head;
3214 struct list_head pf_head;
3215 const void __user *user_buffer = (const void __user *)
3216 (uintptr_t)tr->data.ptr.buffer;
3217 bool is_nested = false;
3218 INIT_LIST_HEAD(&sgc_head);
3219 INIT_LIST_HEAD(&pf_head);
3220
3221 e = binder_transaction_log_add(&binder_transaction_log);
3222 e->debug_id = t_debug_id;
3223 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
3224 e->from_proc = proc->pid;
3225 e->from_thread = thread->pid;
3226 e->target_handle = tr->target.handle;
3227 e->data_size = tr->data_size;
3228 e->offsets_size = tr->offsets_size;
3229 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
3230
3231 binder_inner_proc_lock(proc);
3232 binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
3233 binder_inner_proc_unlock(proc);
3234
3235 if (reply) {
3236 binder_inner_proc_lock(proc);
3237 in_reply_to = thread->transaction_stack;
3238 if (in_reply_to == NULL) {
3239 binder_inner_proc_unlock(proc);
3240 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
3241 proc->pid, thread->pid);
3242 return_error = BR_FAILED_REPLY;
3243 return_error_param = -EPROTO;
3244 return_error_line = __LINE__;
3245 goto err_empty_call_stack;
3246 }
3247 if (in_reply_to->to_thread != thread) {
3248 spin_lock(&in_reply_to->lock);
3249 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
3250 proc->pid, thread->pid, in_reply_to->debug_id,
3251 in_reply_to->to_proc ?
3252 in_reply_to->to_proc->pid : 0,
3253 in_reply_to->to_thread ?
3254 in_reply_to->to_thread->pid : 0);
3255 spin_unlock(&in_reply_to->lock);
3256 binder_inner_proc_unlock(proc);
3257 return_error = BR_FAILED_REPLY;
3258 return_error_param = -EPROTO;
3259 return_error_line = __LINE__;
3260 in_reply_to = NULL;
3261 goto err_bad_call_stack;
3262 }
3263 thread->transaction_stack = in_reply_to->to_parent;
3264 binder_inner_proc_unlock(proc);
3265 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
3266 if (target_thread == NULL) {
3267 /* annotation for sparse */
3268 __release(&target_thread->proc->inner_lock);
3269 binder_txn_error("%d:%d reply target not found\n",
3270 thread->pid, proc->pid);
3271 return_error = BR_DEAD_REPLY;
3272 return_error_line = __LINE__;
3273 goto err_dead_binder;
3274 }
3275 if (target_thread->transaction_stack != in_reply_to) {
3276 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
3277 proc->pid, thread->pid,
3278 target_thread->transaction_stack ?
3279 target_thread->transaction_stack->debug_id : 0,
3280 in_reply_to->debug_id);
3281 binder_inner_proc_unlock(target_thread->proc);
3282 return_error = BR_FAILED_REPLY;
3283 return_error_param = -EPROTO;
3284 return_error_line = __LINE__;
3285 in_reply_to = NULL;
3286 target_thread = NULL;
3287 goto err_dead_binder;
3288 }
3289 target_proc = target_thread->proc;
3290 target_proc->tmp_ref++;
3291 binder_inner_proc_unlock(target_thread->proc);
3292 trace_android_vh_binder_reply(target_proc, proc, thread, tr);
3293 } else {
3294 if (tr->target.handle) {
3295 struct binder_ref *ref;
3296
3297 /*
3298 * There must already be a strong ref
3299 * on this node. If so, do a strong
3300 * increment on the node to ensure it
3301 * stays alive until the transaction is
3302 * done.
3303 */
3304 binder_proc_lock(proc);
3305 ref = binder_get_ref_olocked(proc, tr->target.handle,
3306 true);
3307 if (ref) {
3308 target_node = binder_get_node_refs_for_txn(
3309 ref->node, &target_proc,
3310 &return_error);
3311 } else {
3312 binder_user_error("%d:%d got transaction to invalid handle, %u\n",
3313 proc->pid, thread->pid, tr->target.handle);
3314 return_error = BR_FAILED_REPLY;
3315 }
3316 binder_proc_unlock(proc);
3317 } else {
3318 mutex_lock(&context->context_mgr_node_lock);
3319 target_node = context->binder_context_mgr_node;
3320 if (target_node)
3321 target_node = binder_get_node_refs_for_txn(
3322 target_node, &target_proc,
3323 &return_error);
3324 else
3325 return_error = BR_DEAD_REPLY;
3326 mutex_unlock(&context->context_mgr_node_lock);
3327 if (target_node && target_proc->pid == proc->pid) {
3328 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3329 proc->pid, thread->pid);
3330 return_error = BR_FAILED_REPLY;
3331 return_error_param = -EINVAL;
3332 return_error_line = __LINE__;
3333 goto err_invalid_target_handle;
3334 }
3335 }
3336 if (!target_node) {
3337 binder_txn_error("%d:%d cannot find target node\n",
3338 thread->pid, proc->pid);
3339 /*
3340 * return_error is set above
3341 */
3342 return_error_param = -EINVAL;
3343 return_error_line = __LINE__;
3344 goto err_dead_binder;
3345 }
3346 e->to_node = target_node->debug_id;
3347 if (WARN_ON(proc == target_proc)) {
3348 binder_txn_error("%d:%d self transactions not allowed\n",
3349 thread->pid, proc->pid);
3350 return_error = BR_FAILED_REPLY;
3351 return_error_param = -EINVAL;
3352 return_error_line = __LINE__;
3353 goto err_invalid_target_handle;
3354 }
3355 trace_android_vh_binder_trans(target_proc, proc, thread, tr);
3356 if (security_binder_transaction(proc->cred,
3357 target_proc->cred) < 0) {
3358 binder_txn_error("%d:%d transaction credentials failed\n",
3359 thread->pid, proc->pid);
3360 return_error = BR_FAILED_REPLY;
3361 return_error_param = -EPERM;
3362 return_error_line = __LINE__;
3363 goto err_invalid_target_handle;
3364 }
3365 binder_inner_proc_lock(proc);
3366
3367 w = list_first_entry_or_null(&thread->todo,
3368 struct binder_work, entry);
3369 if (!(tr->flags & TF_ONE_WAY) && w &&
3370 w->type == BINDER_WORK_TRANSACTION) {
3371 /*
3372 * Do not allow new outgoing transaction from a
3373 * thread that has a transaction at the head of
3374 * its todo list. Only need to check the head
3375 * because binder_select_thread_ilocked picks a
3376 * thread from proc->waiting_threads to enqueue
3377 * the transaction, and nothing is queued to the
3378 * todo list while the thread is on waiting_threads.
3379 */
3380 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3381 proc->pid, thread->pid);
3382 binder_inner_proc_unlock(proc);
3383 return_error = BR_FAILED_REPLY;
3384 return_error_param = -EPROTO;
3385 return_error_line = __LINE__;
3386 goto err_bad_todo_list;
3387 }
3388
3389 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3390 struct binder_transaction *tmp;
3391
3392 tmp = thread->transaction_stack;
3393 if (tmp->to_thread != thread) {
3394 spin_lock(&tmp->lock);
3395 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3396 proc->pid, thread->pid, tmp->debug_id,
3397 tmp->to_proc ? tmp->to_proc->pid : 0,
3398 tmp->to_thread ?
3399 tmp->to_thread->pid : 0);
3400 spin_unlock(&tmp->lock);
3401 binder_inner_proc_unlock(proc);
3402 return_error = BR_FAILED_REPLY;
3403 return_error_param = -EPROTO;
3404 return_error_line = __LINE__;
3405 goto err_bad_call_stack;
3406 }
3407 while (tmp) {
3408 struct binder_thread *from;
3409
3410 spin_lock(&tmp->lock);
3411 from = tmp->from;
3412 if (from && from->proc == target_proc) {
3413 atomic_inc(&from->tmp_ref);
3414 target_thread = from;
3415 spin_unlock(&tmp->lock);
3416 is_nested = true;
3417 break;
3418 }
3419 spin_unlock(&tmp->lock);
3420 tmp = tmp->from_parent;
3421 }
3422 }
3423 binder_inner_proc_unlock(proc);
3424 }
3425 if (target_thread)
3426 e->to_thread = target_thread->pid;
3427 e->to_proc = target_proc->pid;
3428
3429 /* TODO: reuse incoming transaction for reply */
3430 t = kzalloc(sizeof(*t), GFP_KERNEL);
3431 if (t == NULL) {
3432 binder_txn_error("%d:%d cannot allocate transaction\n",
3433 thread->pid, proc->pid);
3434 return_error = BR_FAILED_REPLY;
3435 return_error_param = -ENOMEM;
3436 return_error_line = __LINE__;
3437 goto err_alloc_t_failed;
3438 }
3439 INIT_LIST_HEAD(&t->fd_fixups);
3440 binder_stats_created(BINDER_STAT_TRANSACTION);
3441 spin_lock_init(&t->lock);
3442 trace_android_vh_binder_transaction_init(t);
3443
3444 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3445 if (tcomplete == NULL) {
3446 binder_txn_error("%d:%d cannot allocate work for transaction\n",
3447 thread->pid, proc->pid);
3448 return_error = BR_FAILED_REPLY;
3449 return_error_param = -ENOMEM;
3450 return_error_line = __LINE__;
3451 goto err_alloc_tcomplete_failed;
3452 }
3453 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3454
3455 t->debug_id = t_debug_id;
3456 t->start_time = t_start_time;
3457
3458 if (reply)
3459 binder_debug(BINDER_DEBUG_TRANSACTION,
3460 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3461 proc->pid, thread->pid, t->debug_id,
3462 target_proc->pid, target_thread->pid,
3463 (u64)tr->data.ptr.buffer,
3464 (u64)tr->data.ptr.offsets,
3465 (u64)tr->data_size, (u64)tr->offsets_size,
3466 (u64)extra_buffers_size);
3467 else
3468 binder_debug(BINDER_DEBUG_TRANSACTION,
3469 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3470 proc->pid, thread->pid, t->debug_id,
3471 target_proc->pid, target_node->debug_id,
3472 (u64)tr->data.ptr.buffer,
3473 (u64)tr->data.ptr.offsets,
3474 (u64)tr->data_size, (u64)tr->offsets_size,
3475 (u64)extra_buffers_size);
3476
3477 if (!reply && !(tr->flags & TF_ONE_WAY))
3478 t->from = thread;
3479 else
3480 t->from = NULL;
3481 t->from_pid = proc->pid;
3482 t->from_tid = thread->pid;
3483 t->sender_euid = task_euid(proc->tsk);
3484 t->to_proc = target_proc;
3485 t->to_thread = target_thread;
3486 t->code = tr->code;
3487 t->flags = tr->flags;
3488 t->is_nested = is_nested;
3489 if (!(t->flags & TF_ONE_WAY) &&
3490 binder_supported_policy(current->policy)) {
3491 /* Inherit supported policies for synchronous transactions */
3492 t->priority.sched_policy = current->policy;
3493 t->priority.prio = current->normal_prio;
3494 } else {
3495 /* Otherwise, fall back to the default priority */
3496 t->priority = target_proc->default_priority;
3497 }
3498
3499 if (target_node && target_node->txn_security_ctx) {
3500 u32 secid;
3501 size_t added_size;
3502
3503 security_cred_getsecid(proc->cred, &secid);
3504 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3505 if (ret) {
3506 binder_txn_error("%d:%d failed to get security context\n",
3507 thread->pid, proc->pid);
3508 return_error = BR_FAILED_REPLY;
3509 return_error_param = ret;
3510 return_error_line = __LINE__;
3511 goto err_get_secctx_failed;
3512 }
3513 added_size = ALIGN(secctx_sz, sizeof(u64));
3514 extra_buffers_size += added_size;
3515 if (extra_buffers_size < added_size) {
3516 binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
3517 thread->pid, proc->pid);
3518 return_error = BR_FAILED_REPLY;
3519 return_error_param = -EINVAL;
3520 return_error_line = __LINE__;
3521 goto err_bad_extra_size;
3522 }
3523 }
3524
3525 trace_binder_transaction(reply, t, target_node);
3526
3527 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3528 tr->offsets_size, extra_buffers_size,
3529 !reply && (t->flags & TF_ONE_WAY));
3530 if (IS_ERR(t->buffer)) {
3531 char *s;
3532
3533 ret = PTR_ERR(t->buffer);
3534 s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
3535 : (ret == -ENOSPC) ? ": no space left"
3536 : (ret == -ENOMEM) ? ": memory allocation failed"
3537 : "";
3538 binder_txn_error("cannot allocate buffer%s", s);
3539
3540 return_error_param = PTR_ERR(t->buffer);
3541 return_error = return_error_param == -ESRCH ?
3542 BR_DEAD_REPLY : BR_FAILED_REPLY;
3543 return_error_line = __LINE__;
3544 t->buffer = NULL;
3545 goto err_binder_alloc_buf_failed;
3546 }
3547 if (secctx) {
3548 int err;
3549 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3550 ALIGN(tr->offsets_size, sizeof(void *)) +
3551 ALIGN(extra_buffers_size, sizeof(void *)) -
3552 ALIGN(secctx_sz, sizeof(u64));
3553
3554 t->security_ctx = t->buffer->user_data + buf_offset;
3555 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3556 t->buffer, buf_offset,
3557 secctx, secctx_sz);
3558 if (err) {
3559 t->security_ctx = 0;
3560 WARN_ON(1);
3561 }
3562 security_release_secctx(secctx, secctx_sz);
3563 secctx = NULL;
3564 }
3565 t->buffer->debug_id = t->debug_id;
3566 t->buffer->transaction = t;
3567 t->buffer->target_node = target_node;
3568 t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3569 trace_binder_transaction_alloc_buf(t->buffer);
3570 trace_android_vh_alloc_oem_binder_struct(tr, t, target_proc);
3571
3572 if (binder_alloc_copy_user_to_buffer(
3573 &target_proc->alloc,
3574 t->buffer,
3575 ALIGN(tr->data_size, sizeof(void *)),
3576 (const void __user *)
3577 (uintptr_t)tr->data.ptr.offsets,
3578 tr->offsets_size)) {
3579 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3580 proc->pid, thread->pid);
3581 return_error = BR_FAILED_REPLY;
3582 return_error_param = -EFAULT;
3583 return_error_line = __LINE__;
3584 goto err_copy_data_failed;
3585 }
3586 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3587 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3588 proc->pid, thread->pid, (u64)tr->offsets_size);
3589 return_error = BR_FAILED_REPLY;
3590 return_error_param = -EINVAL;
3591 return_error_line = __LINE__;
3592 goto err_bad_offset;
3593 }
3594 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3595 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3596 proc->pid, thread->pid,
3597 (u64)extra_buffers_size);
3598 return_error = BR_FAILED_REPLY;
3599 return_error_param = -EINVAL;
3600 return_error_line = __LINE__;
3601 goto err_bad_offset;
3602 }
3603 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3604 buffer_offset = off_start_offset;
3605 off_end_offset = off_start_offset + tr->offsets_size;
3606 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3607 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3608 ALIGN(secctx_sz, sizeof(u64));
3609 off_min = 0;
3610 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3611 buffer_offset += sizeof(binder_size_t)) {
3612 struct binder_object_header *hdr;
3613 size_t object_size;
3614 struct binder_object object;
3615 binder_size_t object_offset;
3616 binder_size_t copy_size;
3617
3618 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3619 &object_offset,
3620 t->buffer,
3621 buffer_offset,
3622 sizeof(object_offset))) {
3623 binder_txn_error("%d:%d copy offset from buffer failed\n",
3624 thread->pid, proc->pid);
3625 return_error = BR_FAILED_REPLY;
3626 return_error_param = -EINVAL;
3627 return_error_line = __LINE__;
3628 goto err_bad_offset;
3629 }
3630
3631 /*
3632 * Copy the source user buffer up to the next object
3633 * that will be processed.
3634 */
3635 copy_size = object_offset - user_offset;
3636 if (copy_size && (user_offset > object_offset ||
3637 object_offset > tr->data_size ||
3638 binder_alloc_copy_user_to_buffer(
3639 &target_proc->alloc,
3640 t->buffer, user_offset,
3641 user_buffer + user_offset,
3642 copy_size))) {
3643 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3644 proc->pid, thread->pid);
3645 return_error = BR_FAILED_REPLY;
3646 return_error_param = -EFAULT;
3647 return_error_line = __LINE__;
3648 goto err_copy_data_failed;
3649 }
3650 object_size = binder_get_object(target_proc, user_buffer,
3651 t->buffer, object_offset, &object);
3652 if (object_size == 0 || object_offset < off_min) {
3653 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3654 proc->pid, thread->pid,
3655 (u64)object_offset,
3656 (u64)off_min,
3657 (u64)t->buffer->data_size);
3658 return_error = BR_FAILED_REPLY;
3659 return_error_param = -EINVAL;
3660 return_error_line = __LINE__;
3661 goto err_bad_offset;
3662 }
3663 /*
3664 * Set offset to the next buffer fragment to be
3665 * copied
3666 */
3667 user_offset = object_offset + object_size;
3668
3669 hdr = &object.hdr;
3670 off_min = object_offset + object_size;
3671 switch (hdr->type) {
3672 case BINDER_TYPE_BINDER:
3673 case BINDER_TYPE_WEAK_BINDER: {
3674 struct flat_binder_object *fp;
3675
3676 fp = to_flat_binder_object(hdr);
3677 ret = binder_translate_binder(fp, t, thread);
3678
3679 if (ret < 0 ||
3680 binder_alloc_copy_to_buffer(&target_proc->alloc,
3681 t->buffer,
3682 object_offset,
3683 fp, sizeof(*fp))) {
3684 binder_txn_error("%d:%d translate binder failed\n",
3685 thread->pid, proc->pid);
3686 return_error = BR_FAILED_REPLY;
3687 return_error_param = ret;
3688 return_error_line = __LINE__;
3689 goto err_translate_failed;
3690 }
3691 } break;
3692 case BINDER_TYPE_HANDLE:
3693 case BINDER_TYPE_WEAK_HANDLE: {
3694 struct flat_binder_object *fp;
3695
3696 fp = to_flat_binder_object(hdr);
3697 ret = binder_translate_handle(fp, t, thread);
3698 if (ret < 0 ||
3699 binder_alloc_copy_to_buffer(&target_proc->alloc,
3700 t->buffer,
3701 object_offset,
3702 fp, sizeof(*fp))) {
3703 binder_txn_error("%d:%d translate handle failed\n",
3704 thread->pid, proc->pid);
3705 return_error = BR_FAILED_REPLY;
3706 return_error_param = ret;
3707 return_error_line = __LINE__;
3708 goto err_translate_failed;
3709 }
3710 } break;
3711
3712 case BINDER_TYPE_FD: {
3713 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3714 binder_size_t fd_offset = object_offset +
3715 (uintptr_t)&fp->fd - (uintptr_t)fp;
3716 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3717 thread, in_reply_to);
3718
3719 fp->pad_binder = 0;
3720 if (ret < 0 ||
3721 binder_alloc_copy_to_buffer(&target_proc->alloc,
3722 t->buffer,
3723 object_offset,
3724 fp, sizeof(*fp))) {
3725 binder_txn_error("%d:%d translate fd failed\n",
3726 thread->pid, proc->pid);
3727 return_error = BR_FAILED_REPLY;
3728 return_error_param = ret;
3729 return_error_line = __LINE__;
3730 goto err_translate_failed;
3731 }
3732 } break;
3733 case BINDER_TYPE_FDA: {
3734 struct binder_object ptr_object;
3735 binder_size_t parent_offset;
3736 struct binder_object user_object;
3737 size_t user_parent_size;
3738 struct binder_fd_array_object *fda =
3739 to_binder_fd_array_object(hdr);
3740 size_t num_valid = (buffer_offset - off_start_offset) /
3741 sizeof(binder_size_t);
3742 struct binder_buffer_object *parent =
3743 binder_validate_ptr(target_proc, t->buffer,
3744 &ptr_object, fda->parent,
3745 off_start_offset,
3746 &parent_offset,
3747 num_valid);
3748 if (!parent) {
3749 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3750 proc->pid, thread->pid);
3751 return_error = BR_FAILED_REPLY;
3752 return_error_param = -EINVAL;
3753 return_error_line = __LINE__;
3754 goto err_bad_parent;
3755 }
3756 if (!binder_validate_fixup(target_proc, t->buffer,
3757 off_start_offset,
3758 parent_offset,
3759 fda->parent_offset,
3760 last_fixup_obj_off,
3761 last_fixup_min_off)) {
3762 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3763 proc->pid, thread->pid);
3764 return_error = BR_FAILED_REPLY;
3765 return_error_param = -EINVAL;
3766 return_error_line = __LINE__;
3767 goto err_bad_parent;
3768 }
3769 /*
3770 * We need to read the user version of the parent
3771 * object to get the original user offset
3772 */
3773 user_parent_size =
3774 binder_get_object(proc, user_buffer, t->buffer,
3775 parent_offset, &user_object);
3776 if (user_parent_size != sizeof(user_object.bbo)) {
3777 binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3778 proc->pid, thread->pid,
3779 user_parent_size,
3780 sizeof(user_object.bbo));
3781 return_error = BR_FAILED_REPLY;
3782 return_error_param = -EINVAL;
3783 return_error_line = __LINE__;
3784 goto err_bad_parent;
3785 }
3786 ret = binder_translate_fd_array(&pf_head, fda,
3787 user_buffer, parent,
3788 &user_object.bbo, t,
3789 thread, in_reply_to);
3790 if (!ret)
3791 ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3792 t->buffer,
3793 object_offset,
3794 fda, sizeof(*fda));
3795 if (ret) {
3796 binder_txn_error("%d:%d translate fd array failed\n",
3797 thread->pid, proc->pid);
3798 return_error = BR_FAILED_REPLY;
3799 return_error_param = ret > 0 ? -EINVAL : ret;
3800 return_error_line = __LINE__;
3801 goto err_translate_failed;
3802 }
3803 last_fixup_obj_off = parent_offset;
3804 last_fixup_min_off =
3805 fda->parent_offset + sizeof(u32) * fda->num_fds;
3806 } break;
3807 case BINDER_TYPE_PTR: {
3808 struct binder_buffer_object *bp =
3809 to_binder_buffer_object(hdr);
3810 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3811 size_t num_valid;
3812
3813 if (bp->length > buf_left) {
3814 binder_user_error("%d:%d got transaction with too large buffer\n",
3815 proc->pid, thread->pid);
3816 return_error = BR_FAILED_REPLY;
3817 return_error_param = -EINVAL;
3818 return_error_line = __LINE__;
3819 goto err_bad_offset;
3820 }
3821 ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3822 (const void __user *)(uintptr_t)bp->buffer,
3823 bp->length);
3824 if (ret) {
3825 binder_txn_error("%d:%d deferred copy failed\n",
3826 thread->pid, proc->pid);
3827 return_error = BR_FAILED_REPLY;
3828 return_error_param = ret;
3829 return_error_line = __LINE__;
3830 goto err_translate_failed;
3831 }
3832 /* Fixup buffer pointer to target proc address space */
3833 bp->buffer = t->buffer->user_data + sg_buf_offset;
3834 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3835
3836 num_valid = (buffer_offset - off_start_offset) /
3837 sizeof(binder_size_t);
3838 ret = binder_fixup_parent(&pf_head, t,
3839 thread, bp,
3840 off_start_offset,
3841 num_valid,
3842 last_fixup_obj_off,
3843 last_fixup_min_off);
3844 if (ret < 0 ||
3845 binder_alloc_copy_to_buffer(&target_proc->alloc,
3846 t->buffer,
3847 object_offset,
3848 bp, sizeof(*bp))) {
3849 binder_txn_error("%d:%d failed to fixup parent\n",
3850 thread->pid, proc->pid);
3851 return_error = BR_FAILED_REPLY;
3852 return_error_param = ret;
3853 return_error_line = __LINE__;
3854 goto err_translate_failed;
3855 }
3856 last_fixup_obj_off = object_offset;
3857 last_fixup_min_off = 0;
3858 } break;
3859 default:
3860 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3861 proc->pid, thread->pid, hdr->type);
3862 return_error = BR_FAILED_REPLY;
3863 return_error_param = -EINVAL;
3864 return_error_line = __LINE__;
3865 goto err_bad_object_type;
3866 }
3867 }
3868 /* Done processing objects, copy the rest of the buffer */
3869 if (binder_alloc_copy_user_to_buffer(
3870 &target_proc->alloc,
3871 t->buffer, user_offset,
3872 user_buffer + user_offset,
3873 tr->data_size - user_offset)) {
3874 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3875 proc->pid, thread->pid);
3876 return_error = BR_FAILED_REPLY;
3877 return_error_param = -EFAULT;
3878 return_error_line = __LINE__;
3879 goto err_copy_data_failed;
3880 }
3881
3882 ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3883 &sgc_head, &pf_head);
3884 if (ret) {
3885 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3886 proc->pid, thread->pid);
3887 return_error = BR_FAILED_REPLY;
3888 return_error_param = ret;
3889 return_error_line = __LINE__;
3890 goto err_copy_data_failed;
3891 }
3892 if (t->buffer->oneway_spam_suspect)
3893 tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3894 else
3895 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3896 t->work.type = BINDER_WORK_TRANSACTION;
3897
3898 if (reply) {
3899 binder_enqueue_thread_work(thread, tcomplete);
3900 binder_inner_proc_lock(target_proc);
3901 if (target_thread->is_dead) {
3902 return_error = BR_DEAD_REPLY;
3903 binder_inner_proc_unlock(target_proc);
3904 goto err_dead_proc_or_thread;
3905 }
3906 BUG_ON(t->buffer->async_transaction != 0);
3907 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3908 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3909 target_proc->outstanding_txns++;
3910 binder_inner_proc_unlock(target_proc);
3911 if (in_reply_to->is_nested) {
3912 spin_lock(&thread->prio_lock);
3913 thread->prio_state = BINDER_PRIO_PENDING;
3914 thread->prio_next = in_reply_to->saved_priority;
3915 spin_unlock(&thread->prio_lock);
3916 }
3917 wake_up_interruptible_sync(&target_thread->wait);
3918 trace_android_vh_binder_restore_priority(in_reply_to, current);
3919 binder_restore_priority(thread, &in_reply_to->saved_priority);
3920 binder_free_transaction(in_reply_to);
3921 } else if (!(t->flags & TF_ONE_WAY)) {
3922 BUG_ON(t->buffer->async_transaction != 0);
3923 binder_inner_proc_lock(proc);
3924 /*
3925 * Defer the TRANSACTION_COMPLETE, so we don't return to
3926 * userspace immediately; this allows the target process to
3927 * immediately start processing this transaction, reducing
3928 * latency. We will then return the TRANSACTION_COMPLETE when
3929 * the target replies (or there is an error).
3930 */
3931 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3932 t->need_reply = 1;
3933 t->from_parent = thread->transaction_stack;
3934 thread->transaction_stack = t;
3935 binder_inner_proc_unlock(proc);
3936 return_error = binder_proc_transaction(t,
3937 target_proc, target_thread);
3938 if (return_error) {
3939 binder_inner_proc_lock(proc);
3940 binder_pop_transaction_ilocked(thread, t);
3941 binder_inner_proc_unlock(proc);
3942 goto err_dead_proc_or_thread;
3943 }
3944 } else {
3945 BUG_ON(target_node == NULL);
3946 BUG_ON(t->buffer->async_transaction != 1);
3947 return_error = binder_proc_transaction(t, target_proc, NULL);
3948 /*
3949 * Let the caller know when async transaction reaches a frozen
3950 * process and is put in a pending queue, waiting for the target
3951 * process to be unfrozen.
3952 */
3953 if (return_error == BR_TRANSACTION_PENDING_FROZEN)
3954 tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
3955 binder_enqueue_thread_work(thread, tcomplete);
3956 if (return_error &&
3957 return_error != BR_TRANSACTION_PENDING_FROZEN)
3958 goto err_dead_proc_or_thread;
3959 }
3960 if (target_thread)
3961 binder_thread_dec_tmpref(target_thread);
3962 binder_proc_dec_tmpref(target_proc);
3963 if (target_node)
3964 binder_dec_node_tmpref(target_node);
3965 /*
3966 * write barrier to synchronize with initialization
3967 * of log entry
3968 */
3969 smp_wmb();
3970 WRITE_ONCE(e->debug_id_done, t_debug_id);
3971 return;
3972
3973 err_dead_proc_or_thread:
3974 binder_txn_error("%d:%d dead process or thread\n",
3975 thread->pid, proc->pid);
3976 return_error_line = __LINE__;
3977 binder_dequeue_work(proc, tcomplete);
3978 err_translate_failed:
3979 err_bad_object_type:
3980 err_bad_offset:
3981 err_bad_parent:
3982 err_copy_data_failed:
3983 binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3984 binder_free_txn_fixups(t);
3985 trace_binder_transaction_failed_buffer_release(t->buffer);
3986 binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3987 buffer_offset, true);
3988 if (target_node)
3989 binder_dec_node_tmpref(target_node);
3990 target_node = NULL;
3991 t->buffer->transaction = NULL;
3992 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3993 err_binder_alloc_buf_failed:
3994 err_bad_extra_size:
3995 if (secctx)
3996 security_release_secctx(secctx, secctx_sz);
3997 err_get_secctx_failed:
3998 kfree(tcomplete);
3999 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4000 err_alloc_tcomplete_failed:
4001 if (trace_binder_txn_latency_free_enabled())
4002 binder_txn_latency_free(t);
4003 kfree(t);
4004 binder_stats_deleted(BINDER_STAT_TRANSACTION);
4005 err_alloc_t_failed:
4006 err_bad_todo_list:
4007 err_bad_call_stack:
4008 err_empty_call_stack:
4009 err_dead_binder:
4010 err_invalid_target_handle:
4011 if (target_node) {
4012 binder_dec_node(target_node, 1, 0);
4013 binder_dec_node_tmpref(target_node);
4014 }
4015
4016 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4017 "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n",
4018 proc->pid, thread->pid, reply ? "reply" :
4019 (tr->flags & TF_ONE_WAY ? "async" : "call"),
4020 target_proc ? target_proc->pid : 0,
4021 target_thread ? target_thread->pid : 0,
4022 t_debug_id, return_error, return_error_param,
4023 (u64)tr->data_size, (u64)tr->offsets_size,
4024 return_error_line);
4025
4026 if (target_thread)
4027 binder_thread_dec_tmpref(target_thread);
4028 if (target_proc)
4029 binder_proc_dec_tmpref(target_proc);
4030
4031 {
4032 struct binder_transaction_log_entry *fe;
4033
4034 e->return_error = return_error;
4035 e->return_error_param = return_error_param;
4036 e->return_error_line = return_error_line;
4037 fe = binder_transaction_log_add(&binder_transaction_log_failed);
4038 *fe = *e;
4039 /*
4040 * write barrier to synchronize with initialization
4041 * of log entry
4042 */
4043 smp_wmb();
4044 WRITE_ONCE(e->debug_id_done, t_debug_id);
4045 WRITE_ONCE(fe->debug_id_done, t_debug_id);
4046 }
4047
4048 BUG_ON(thread->return_error.cmd != BR_OK);
4049 if (in_reply_to) {
4050 trace_android_vh_binder_restore_priority(in_reply_to, current);
4051 binder_restore_priority(thread, &in_reply_to->saved_priority);
4052 binder_set_txn_from_error(in_reply_to, t_debug_id,
4053 return_error, return_error_param);
4054 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
4055 binder_enqueue_thread_work(thread, &thread->return_error.work);
4056 binder_send_failed_reply(in_reply_to, return_error);
4057 } else {
4058 binder_inner_proc_lock(proc);
4059 binder_set_extended_error(&thread->ee, t_debug_id,
4060 return_error, return_error_param);
4061 binder_inner_proc_unlock(proc);
4062 thread->return_error.cmd = return_error;
4063 binder_enqueue_thread_work(thread, &thread->return_error.work);
4064 }
4065 }
4066
4067 /**
4068 * binder_free_buf() - free the specified buffer
4069 * @proc: binder proc that owns buffer
4070 * @buffer: buffer to be freed
4071 * @is_failure: failed to send transaction
4072 *
4073 * If buffer for an async transaction, enqueue the next async
4074 * transaction from the node.
4075 *
4076 * Cleanup buffer and free it.
4077 */
4078 static void
binder_free_buf(struct binder_proc * proc,struct binder_thread * thread,struct binder_buffer * buffer,bool is_failure)4079 binder_free_buf(struct binder_proc *proc,
4080 struct binder_thread *thread,
4081 struct binder_buffer *buffer, bool is_failure)
4082 {
4083 bool enqueue_task = true;
4084 bool has_transaction = false;
4085
4086 binder_inner_proc_lock(proc);
4087 if (buffer->transaction) {
4088 buffer->transaction->buffer = NULL;
4089 buffer->transaction = NULL;
4090 has_transaction = true;
4091 }
4092 binder_inner_proc_unlock(proc);
4093 if (buffer->async_transaction && buffer->target_node) {
4094 struct binder_node *buf_node;
4095 struct binder_work *w;
4096
4097 buf_node = buffer->target_node;
4098 binder_node_inner_lock(buf_node);
4099 BUG_ON(!buf_node->has_async_transaction);
4100 BUG_ON(buf_node->proc != proc);
4101 w = binder_dequeue_work_head_ilocked(
4102 &buf_node->async_todo);
4103 if (!w) {
4104 buf_node->has_async_transaction = false;
4105 } else {
4106 trace_android_vh_binder_special_task(NULL, proc, thread, w,
4107 &proc->todo, false, &enqueue_task);
4108 if (enqueue_task)
4109 binder_enqueue_work_ilocked(w, &proc->todo);
4110 binder_wakeup_proc_ilocked(proc);
4111 }
4112 binder_node_inner_unlock(buf_node);
4113 }
4114 trace_android_vh_binder_buffer_release(proc, thread, buffer,
4115 has_transaction);
4116 trace_binder_transaction_buffer_release(buffer);
4117 binder_release_entire_buffer(proc, thread, buffer, is_failure);
4118 binder_alloc_free_buf(&proc->alloc, buffer);
4119 }
4120
binder_thread_write(struct binder_proc * proc,struct binder_thread * thread,binder_uintptr_t binder_buffer,size_t size,binder_size_t * consumed)4121 static int binder_thread_write(struct binder_proc *proc,
4122 struct binder_thread *thread,
4123 binder_uintptr_t binder_buffer, size_t size,
4124 binder_size_t *consumed)
4125 {
4126 uint32_t cmd;
4127 struct binder_context *context = proc->context;
4128 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4129 void __user *ptr = buffer + *consumed;
4130 void __user *end = buffer + size;
4131
4132 while (ptr < end && thread->return_error.cmd == BR_OK) {
4133 int ret;
4134
4135 if (get_user(cmd, (uint32_t __user *)ptr))
4136 return -EFAULT;
4137 ptr += sizeof(uint32_t);
4138 trace_binder_command(cmd);
4139 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
4140 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
4141 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
4142 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
4143 }
4144 switch (cmd) {
4145 case BC_INCREFS:
4146 case BC_ACQUIRE:
4147 case BC_RELEASE:
4148 case BC_DECREFS: {
4149 uint32_t target;
4150 const char *debug_string;
4151 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
4152 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
4153 struct binder_ref_data rdata;
4154
4155 if (get_user(target, (uint32_t __user *)ptr))
4156 return -EFAULT;
4157
4158 ptr += sizeof(uint32_t);
4159 ret = -1;
4160 if (increment && !target) {
4161 struct binder_node *ctx_mgr_node;
4162
4163 mutex_lock(&context->context_mgr_node_lock);
4164 ctx_mgr_node = context->binder_context_mgr_node;
4165 if (ctx_mgr_node) {
4166 if (ctx_mgr_node->proc == proc) {
4167 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
4168 proc->pid, thread->pid);
4169 mutex_unlock(&context->context_mgr_node_lock);
4170 return -EINVAL;
4171 }
4172 ret = binder_inc_ref_for_node(
4173 proc, ctx_mgr_node,
4174 strong, NULL, &rdata);
4175 }
4176 mutex_unlock(&context->context_mgr_node_lock);
4177 }
4178 if (ret)
4179 ret = binder_update_ref_for_handle(
4180 proc, target, increment, strong,
4181 &rdata);
4182 if (!ret && rdata.desc != target) {
4183 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
4184 proc->pid, thread->pid,
4185 target, rdata.desc);
4186 }
4187 switch (cmd) {
4188 case BC_INCREFS:
4189 debug_string = "IncRefs";
4190 break;
4191 case BC_ACQUIRE:
4192 debug_string = "Acquire";
4193 break;
4194 case BC_RELEASE:
4195 debug_string = "Release";
4196 break;
4197 case BC_DECREFS:
4198 default:
4199 debug_string = "DecRefs";
4200 break;
4201 }
4202 if (ret) {
4203 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
4204 proc->pid, thread->pid, debug_string,
4205 strong, target, ret);
4206 break;
4207 }
4208 binder_debug(BINDER_DEBUG_USER_REFS,
4209 "%d:%d %s ref %d desc %d s %d w %d\n",
4210 proc->pid, thread->pid, debug_string,
4211 rdata.debug_id, rdata.desc, rdata.strong,
4212 rdata.weak);
4213 break;
4214 }
4215 case BC_INCREFS_DONE:
4216 case BC_ACQUIRE_DONE: {
4217 binder_uintptr_t node_ptr;
4218 binder_uintptr_t cookie;
4219 struct binder_node *node;
4220 bool free_node;
4221
4222 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
4223 return -EFAULT;
4224 ptr += sizeof(binder_uintptr_t);
4225 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4226 return -EFAULT;
4227 ptr += sizeof(binder_uintptr_t);
4228 node = binder_get_node(proc, node_ptr);
4229 if (node == NULL) {
4230 binder_user_error("%d:%d %s u%016llx no match\n",
4231 proc->pid, thread->pid,
4232 cmd == BC_INCREFS_DONE ?
4233 "BC_INCREFS_DONE" :
4234 "BC_ACQUIRE_DONE",
4235 (u64)node_ptr);
4236 break;
4237 }
4238 if (cookie != node->cookie) {
4239 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
4240 proc->pid, thread->pid,
4241 cmd == BC_INCREFS_DONE ?
4242 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4243 (u64)node_ptr, node->debug_id,
4244 (u64)cookie, (u64)node->cookie);
4245 binder_put_node(node);
4246 break;
4247 }
4248 binder_node_inner_lock(node);
4249 if (cmd == BC_ACQUIRE_DONE) {
4250 if (node->pending_strong_ref == 0) {
4251 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
4252 proc->pid, thread->pid,
4253 node->debug_id);
4254 binder_node_inner_unlock(node);
4255 binder_put_node(node);
4256 break;
4257 }
4258 node->pending_strong_ref = 0;
4259 } else {
4260 if (node->pending_weak_ref == 0) {
4261 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
4262 proc->pid, thread->pid,
4263 node->debug_id);
4264 binder_node_inner_unlock(node);
4265 binder_put_node(node);
4266 break;
4267 }
4268 node->pending_weak_ref = 0;
4269 }
4270 free_node = binder_dec_node_nilocked(node,
4271 cmd == BC_ACQUIRE_DONE, 0);
4272 WARN_ON(free_node);
4273 binder_debug(BINDER_DEBUG_USER_REFS,
4274 "%d:%d %s node %d ls %d lw %d tr %d\n",
4275 proc->pid, thread->pid,
4276 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4277 node->debug_id, node->local_strong_refs,
4278 node->local_weak_refs, node->tmp_refs);
4279 binder_node_inner_unlock(node);
4280 binder_put_node(node);
4281 break;
4282 }
4283 case BC_ATTEMPT_ACQUIRE:
4284 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
4285 return -EINVAL;
4286 case BC_ACQUIRE_RESULT:
4287 pr_err("BC_ACQUIRE_RESULT not supported\n");
4288 return -EINVAL;
4289
4290 case BC_FREE_BUFFER: {
4291 binder_uintptr_t data_ptr;
4292 struct binder_buffer *buffer;
4293
4294 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
4295 return -EFAULT;
4296 ptr += sizeof(binder_uintptr_t);
4297
4298 buffer = binder_alloc_prepare_to_free(&proc->alloc,
4299 data_ptr);
4300 if (IS_ERR_OR_NULL(buffer)) {
4301 if (PTR_ERR(buffer) == -EPERM) {
4302 binder_user_error(
4303 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
4304 proc->pid, thread->pid,
4305 (u64)data_ptr);
4306 } else {
4307 binder_user_error(
4308 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
4309 proc->pid, thread->pid,
4310 (u64)data_ptr);
4311 }
4312 break;
4313 }
4314 binder_debug(BINDER_DEBUG_FREE_BUFFER,
4315 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
4316 proc->pid, thread->pid, (u64)data_ptr,
4317 buffer->debug_id,
4318 buffer->transaction ? "active" : "finished");
4319 binder_free_buf(proc, thread, buffer, false);
4320 break;
4321 }
4322
4323 case BC_TRANSACTION_SG:
4324 case BC_REPLY_SG: {
4325 struct binder_transaction_data_sg tr;
4326
4327 if (copy_from_user(&tr, ptr, sizeof(tr)))
4328 return -EFAULT;
4329 ptr += sizeof(tr);
4330 binder_transaction(proc, thread, &tr.transaction_data,
4331 cmd == BC_REPLY_SG, tr.buffers_size);
4332 break;
4333 }
4334 case BC_TRANSACTION:
4335 case BC_REPLY: {
4336 struct binder_transaction_data tr;
4337
4338 if (copy_from_user(&tr, ptr, sizeof(tr)))
4339 return -EFAULT;
4340 ptr += sizeof(tr);
4341 binder_transaction(proc, thread, &tr,
4342 cmd == BC_REPLY, 0);
4343 break;
4344 }
4345
4346 case BC_REGISTER_LOOPER:
4347 binder_debug(BINDER_DEBUG_THREADS,
4348 "%d:%d BC_REGISTER_LOOPER\n",
4349 proc->pid, thread->pid);
4350 binder_inner_proc_lock(proc);
4351 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
4352 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4353 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
4354 proc->pid, thread->pid);
4355 } else if (proc->requested_threads == 0) {
4356 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4357 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4358 proc->pid, thread->pid);
4359 } else {
4360 proc->requested_threads--;
4361 proc->requested_threads_started++;
4362 }
4363 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
4364 binder_inner_proc_unlock(proc);
4365 trace_android_vh_binder_looper_state_registered(thread, proc);
4366 break;
4367 case BC_ENTER_LOOPER:
4368 binder_debug(BINDER_DEBUG_THREADS,
4369 "%d:%d BC_ENTER_LOOPER\n",
4370 proc->pid, thread->pid);
4371 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4372 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4373 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4374 proc->pid, thread->pid);
4375 }
4376 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4377 break;
4378 case BC_EXIT_LOOPER:
4379 trace_android_vh_binder_looper_exited(thread, proc);
4380 binder_debug(BINDER_DEBUG_THREADS,
4381 "%d:%d BC_EXIT_LOOPER\n",
4382 proc->pid, thread->pid);
4383 thread->looper |= BINDER_LOOPER_STATE_EXITED;
4384 break;
4385
4386 case BC_REQUEST_DEATH_NOTIFICATION:
4387 case BC_CLEAR_DEATH_NOTIFICATION: {
4388 uint32_t target;
4389 binder_uintptr_t cookie;
4390 struct binder_ref *ref;
4391 struct binder_ref_death *death = NULL;
4392
4393 if (get_user(target, (uint32_t __user *)ptr))
4394 return -EFAULT;
4395 ptr += sizeof(uint32_t);
4396 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4397 return -EFAULT;
4398 ptr += sizeof(binder_uintptr_t);
4399 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4400 /*
4401 * Allocate memory for death notification
4402 * before taking lock
4403 */
4404 death = kzalloc(sizeof(*death), GFP_KERNEL);
4405 if (death == NULL) {
4406 WARN_ON(thread->return_error.cmd !=
4407 BR_OK);
4408 thread->return_error.cmd = BR_ERROR;
4409 binder_enqueue_thread_work(
4410 thread,
4411 &thread->return_error.work);
4412 binder_debug(
4413 BINDER_DEBUG_FAILED_TRANSACTION,
4414 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4415 proc->pid, thread->pid);
4416 break;
4417 }
4418 }
4419 binder_proc_lock(proc);
4420 ref = binder_get_ref_olocked(proc, target, false);
4421 if (ref == NULL) {
4422 binder_user_error("%d:%d %s invalid ref %d\n",
4423 proc->pid, thread->pid,
4424 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4425 "BC_REQUEST_DEATH_NOTIFICATION" :
4426 "BC_CLEAR_DEATH_NOTIFICATION",
4427 target);
4428 binder_proc_unlock(proc);
4429 kfree(death);
4430 break;
4431 }
4432
4433 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4434 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4435 proc->pid, thread->pid,
4436 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4437 "BC_REQUEST_DEATH_NOTIFICATION" :
4438 "BC_CLEAR_DEATH_NOTIFICATION",
4439 (u64)cookie, ref->data.debug_id,
4440 ref->data.desc, ref->data.strong,
4441 ref->data.weak, ref->node->debug_id);
4442
4443 binder_node_lock(ref->node);
4444 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4445 if (ref->death) {
4446 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4447 proc->pid, thread->pid);
4448 binder_node_unlock(ref->node);
4449 binder_proc_unlock(proc);
4450 kfree(death);
4451 break;
4452 }
4453 binder_stats_created(BINDER_STAT_DEATH);
4454 INIT_LIST_HEAD(&death->work.entry);
4455 death->cookie = cookie;
4456 ref->death = death;
4457 if (ref->node->proc == NULL) {
4458 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4459
4460 binder_inner_proc_lock(proc);
4461 binder_enqueue_work_ilocked(
4462 &ref->death->work, &proc->todo);
4463 binder_wakeup_proc_ilocked(proc);
4464 binder_inner_proc_unlock(proc);
4465 }
4466 } else {
4467 if (ref->death == NULL) {
4468 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4469 proc->pid, thread->pid);
4470 binder_node_unlock(ref->node);
4471 binder_proc_unlock(proc);
4472 break;
4473 }
4474 death = ref->death;
4475 if (death->cookie != cookie) {
4476 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4477 proc->pid, thread->pid,
4478 (u64)death->cookie,
4479 (u64)cookie);
4480 binder_node_unlock(ref->node);
4481 binder_proc_unlock(proc);
4482 break;
4483 }
4484 ref->death = NULL;
4485 binder_inner_proc_lock(proc);
4486 if (list_empty(&death->work.entry)) {
4487 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4488 if (thread->looper &
4489 (BINDER_LOOPER_STATE_REGISTERED |
4490 BINDER_LOOPER_STATE_ENTERED))
4491 binder_enqueue_thread_work_ilocked(
4492 thread,
4493 &death->work);
4494 else {
4495 binder_enqueue_work_ilocked(
4496 &death->work,
4497 &proc->todo);
4498 binder_wakeup_proc_ilocked(
4499 proc);
4500 }
4501 } else {
4502 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4503 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4504 }
4505 binder_inner_proc_unlock(proc);
4506 }
4507 binder_node_unlock(ref->node);
4508 binder_proc_unlock(proc);
4509 } break;
4510 case BC_DEAD_BINDER_DONE: {
4511 struct binder_work *w;
4512 binder_uintptr_t cookie;
4513 struct binder_ref_death *death = NULL;
4514
4515 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4516 return -EFAULT;
4517
4518 ptr += sizeof(cookie);
4519 binder_inner_proc_lock(proc);
4520 list_for_each_entry(w, &proc->delivered_death,
4521 entry) {
4522 struct binder_ref_death *tmp_death =
4523 container_of(w,
4524 struct binder_ref_death,
4525 work);
4526
4527 if (tmp_death->cookie == cookie) {
4528 death = tmp_death;
4529 break;
4530 }
4531 }
4532 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4533 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4534 proc->pid, thread->pid, (u64)cookie,
4535 death);
4536 if (death == NULL) {
4537 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4538 proc->pid, thread->pid, (u64)cookie);
4539 binder_inner_proc_unlock(proc);
4540 break;
4541 }
4542 binder_dequeue_work_ilocked(&death->work);
4543 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4544 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4545 if (thread->looper &
4546 (BINDER_LOOPER_STATE_REGISTERED |
4547 BINDER_LOOPER_STATE_ENTERED))
4548 binder_enqueue_thread_work_ilocked(
4549 thread, &death->work);
4550 else {
4551 binder_enqueue_work_ilocked(
4552 &death->work,
4553 &proc->todo);
4554 binder_wakeup_proc_ilocked(proc);
4555 }
4556 }
4557 binder_inner_proc_unlock(proc);
4558 } break;
4559
4560 default:
4561 pr_err("%d:%d unknown command %u\n",
4562 proc->pid, thread->pid, cmd);
4563 return -EINVAL;
4564 }
4565 *consumed = ptr - buffer;
4566 }
4567 return 0;
4568 }
4569
binder_stat_br(struct binder_proc * proc,struct binder_thread * thread,uint32_t cmd)4570 static void binder_stat_br(struct binder_proc *proc,
4571 struct binder_thread *thread, uint32_t cmd)
4572 {
4573 trace_binder_return(cmd);
4574 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4575 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4576 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4577 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4578 }
4579 }
4580
binder_put_node_cmd(struct binder_proc * proc,struct binder_thread * thread,void __user ** ptrp,binder_uintptr_t node_ptr,binder_uintptr_t node_cookie,int node_debug_id,uint32_t cmd,const char * cmd_name)4581 static int binder_put_node_cmd(struct binder_proc *proc,
4582 struct binder_thread *thread,
4583 void __user **ptrp,
4584 binder_uintptr_t node_ptr,
4585 binder_uintptr_t node_cookie,
4586 int node_debug_id,
4587 uint32_t cmd, const char *cmd_name)
4588 {
4589 void __user *ptr = *ptrp;
4590
4591 if (put_user(cmd, (uint32_t __user *)ptr))
4592 return -EFAULT;
4593 ptr += sizeof(uint32_t);
4594
4595 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4596 return -EFAULT;
4597 ptr += sizeof(binder_uintptr_t);
4598
4599 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4600 return -EFAULT;
4601 ptr += sizeof(binder_uintptr_t);
4602
4603 binder_stat_br(proc, thread, cmd);
4604 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4605 proc->pid, thread->pid, cmd_name, node_debug_id,
4606 (u64)node_ptr, (u64)node_cookie);
4607
4608 *ptrp = ptr;
4609 return 0;
4610 }
4611
binder_wait_for_work(struct binder_thread * thread,bool do_proc_work)4612 static int binder_wait_for_work(struct binder_thread *thread,
4613 bool do_proc_work)
4614 {
4615 DEFINE_WAIT(wait);
4616 struct binder_proc *proc = thread->proc;
4617 int ret = 0;
4618
4619 binder_inner_proc_lock(proc);
4620 for (;;) {
4621 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE);
4622 if (binder_has_work_ilocked(thread, do_proc_work))
4623 break;
4624 if (do_proc_work)
4625 list_add(&thread->waiting_thread_node,
4626 &proc->waiting_threads);
4627 trace_android_vh_binder_wait_for_work(do_proc_work, thread, proc);
4628 binder_inner_proc_unlock(proc);
4629 schedule();
4630 binder_inner_proc_lock(proc);
4631 list_del_init(&thread->waiting_thread_node);
4632 if (signal_pending(current)) {
4633 ret = -EINTR;
4634 break;
4635 }
4636 }
4637 finish_wait(&thread->wait, &wait);
4638 binder_inner_proc_unlock(proc);
4639
4640 return ret;
4641 }
4642
4643 /**
4644 * binder_apply_fd_fixups() - finish fd translation
4645 * @proc: binder_proc associated @t->buffer
4646 * @t: binder transaction with list of fd fixups
4647 *
4648 * Now that we are in the context of the transaction target
4649 * process, we can allocate and install fds. Process the
4650 * list of fds to translate and fixup the buffer with the
4651 * new fds first and only then install the files.
4652 *
4653 * If we fail to allocate an fd, skip the install and release
4654 * any fds that have already been allocated.
4655 */
binder_apply_fd_fixups(struct binder_proc * proc,struct binder_transaction * t)4656 static int binder_apply_fd_fixups(struct binder_proc *proc,
4657 struct binder_transaction *t)
4658 {
4659 struct binder_txn_fd_fixup *fixup, *tmp;
4660 int ret = 0;
4661
4662 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4663 int fd = get_unused_fd_flags(O_CLOEXEC);
4664
4665 if (fd < 0) {
4666 binder_debug(BINDER_DEBUG_TRANSACTION,
4667 "failed fd fixup txn %d fd %d\n",
4668 t->debug_id, fd);
4669 ret = -ENOMEM;
4670 goto err;
4671 }
4672 binder_debug(BINDER_DEBUG_TRANSACTION,
4673 "fd fixup txn %d fd %d\n",
4674 t->debug_id, fd);
4675 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4676 fixup->target_fd = fd;
4677 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4678 fixup->offset, &fd,
4679 sizeof(u32))) {
4680 ret = -EINVAL;
4681 goto err;
4682 }
4683 }
4684 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4685 fd_install(fixup->target_fd, fixup->file);
4686 list_del(&fixup->fixup_entry);
4687 kfree(fixup);
4688 }
4689
4690 return ret;
4691
4692 err:
4693 binder_free_txn_fixups(t);
4694 return ret;
4695 }
4696
binder_thread_read(struct binder_proc * proc,struct binder_thread * thread,binder_uintptr_t binder_buffer,size_t size,binder_size_t * consumed,int non_block)4697 static int binder_thread_read(struct binder_proc *proc,
4698 struct binder_thread *thread,
4699 binder_uintptr_t binder_buffer, size_t size,
4700 binder_size_t *consumed, int non_block)
4701 {
4702 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4703 void __user *ptr = buffer + *consumed;
4704 void __user *end = buffer + size;
4705
4706 int ret = 0;
4707 bool nothing_to_do = false;
4708 bool force_spawn = false;
4709 int wait_for_proc_work;
4710
4711 if (*consumed == 0) {
4712 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4713 return -EFAULT;
4714 ptr += sizeof(uint32_t);
4715 }
4716
4717 retry:
4718 binder_inner_proc_lock(proc);
4719 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4720 binder_inner_proc_unlock(proc);
4721
4722 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4723
4724 trace_binder_wait_for_work(wait_for_proc_work,
4725 !!thread->transaction_stack,
4726 !binder_worklist_empty(proc, &thread->todo));
4727 if (wait_for_proc_work) {
4728 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4729 BINDER_LOOPER_STATE_ENTERED))) {
4730 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4731 proc->pid, thread->pid, thread->looper);
4732 wait_event_interruptible(binder_user_error_wait,
4733 binder_stop_on_user_error < 2);
4734 }
4735 trace_android_vh_binder_restore_priority(NULL, current);
4736 binder_restore_priority(thread, &proc->default_priority);
4737 }
4738
4739 if (non_block) {
4740 if (!binder_has_work(thread, wait_for_proc_work))
4741 ret = -EAGAIN;
4742 } else {
4743 ret = binder_wait_for_work(thread, wait_for_proc_work);
4744 }
4745
4746 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4747
4748 if (ret)
4749 return ret;
4750
4751 while (1) {
4752 uint32_t cmd;
4753 struct binder_transaction_data_secctx tr;
4754 struct binder_transaction_data *trd = &tr.transaction_data;
4755 struct binder_work *w = NULL;
4756 struct list_head *list = NULL;
4757 struct binder_transaction *t = NULL;
4758 struct binder_thread *t_from;
4759 size_t trsize = sizeof(*trd);
4760
4761 binder_inner_proc_lock(proc);
4762 trace_android_vh_binder_select_special_worklist(&list, thread,
4763 proc, wait_for_proc_work, ¬hing_to_do);
4764 if (list)
4765 goto skip;
4766 else if (nothing_to_do)
4767 goto no_work;
4768
4769 if (!binder_worklist_empty_ilocked(&thread->todo))
4770 list = &thread->todo;
4771 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4772 wait_for_proc_work)
4773 list = &proc->todo;
4774 else {
4775 no_work:
4776 binder_inner_proc_unlock(proc);
4777
4778 /* no data added */
4779 if (ptr - buffer == 4 && !thread->looper_need_return)
4780 goto retry;
4781 break;
4782 }
4783 skip:
4784 if (end - ptr < sizeof(tr) + 4) {
4785 binder_inner_proc_unlock(proc);
4786 break;
4787 }
4788 trace_android_vh_binder_thread_read(&list, proc, thread);
4789 w = binder_dequeue_work_head_ilocked(list);
4790 if (binder_worklist_empty_ilocked(&thread->todo))
4791 thread->process_todo = false;
4792
4793 switch (w->type) {
4794 case BINDER_WORK_TRANSACTION: {
4795 binder_inner_proc_unlock(proc);
4796 t = container_of(w, struct binder_transaction, work);
4797 } break;
4798 case BINDER_WORK_RETURN_ERROR: {
4799 struct binder_error *e = container_of(
4800 w, struct binder_error, work);
4801
4802 WARN_ON(e->cmd == BR_OK);
4803 binder_inner_proc_unlock(proc);
4804 if (put_user(e->cmd, (uint32_t __user *)ptr))
4805 return -EFAULT;
4806 cmd = e->cmd;
4807 e->cmd = BR_OK;
4808 ptr += sizeof(uint32_t);
4809
4810 binder_stat_br(proc, thread, cmd);
4811 } break;
4812 case BINDER_WORK_TRANSACTION_COMPLETE:
4813 case BINDER_WORK_TRANSACTION_PENDING:
4814 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4815 if (proc->oneway_spam_detection_enabled &&
4816 w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4817 cmd = BR_ONEWAY_SPAM_SUSPECT;
4818 else if (w->type == BINDER_WORK_TRANSACTION_PENDING)
4819 cmd = BR_TRANSACTION_PENDING_FROZEN;
4820 else
4821 cmd = BR_TRANSACTION_COMPLETE;
4822 binder_inner_proc_unlock(proc);
4823 kfree(w);
4824 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4825 if (put_user(cmd, (uint32_t __user *)ptr))
4826 return -EFAULT;
4827 ptr += sizeof(uint32_t);
4828
4829 binder_stat_br(proc, thread, cmd);
4830 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4831 "%d:%d BR_TRANSACTION_COMPLETE\n",
4832 proc->pid, thread->pid);
4833 } break;
4834 case BINDER_WORK_NODE: {
4835 struct binder_node *node = container_of(w, struct binder_node, work);
4836 int strong, weak;
4837 binder_uintptr_t node_ptr = node->ptr;
4838 binder_uintptr_t node_cookie = node->cookie;
4839 int node_debug_id = node->debug_id;
4840 int has_weak_ref;
4841 int has_strong_ref;
4842 void __user *orig_ptr = ptr;
4843
4844 BUG_ON(proc != node->proc);
4845 strong = node->internal_strong_refs ||
4846 node->local_strong_refs;
4847 weak = !hlist_empty(&node->refs) ||
4848 node->local_weak_refs ||
4849 node->tmp_refs || strong;
4850 has_strong_ref = node->has_strong_ref;
4851 has_weak_ref = node->has_weak_ref;
4852
4853 if (weak && !has_weak_ref) {
4854 node->has_weak_ref = 1;
4855 node->pending_weak_ref = 1;
4856 node->local_weak_refs++;
4857 }
4858 if (strong && !has_strong_ref) {
4859 node->has_strong_ref = 1;
4860 node->pending_strong_ref = 1;
4861 node->local_strong_refs++;
4862 }
4863 if (!strong && has_strong_ref)
4864 node->has_strong_ref = 0;
4865 if (!weak && has_weak_ref)
4866 node->has_weak_ref = 0;
4867 if (!weak && !strong) {
4868 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4869 "%d:%d node %d u%016llx c%016llx deleted\n",
4870 proc->pid, thread->pid,
4871 node_debug_id,
4872 (u64)node_ptr,
4873 (u64)node_cookie);
4874 rb_erase(&node->rb_node, &proc->nodes);
4875 binder_inner_proc_unlock(proc);
4876 binder_node_lock(node);
4877 /*
4878 * Acquire the node lock before freeing the
4879 * node to serialize with other threads that
4880 * may have been holding the node lock while
4881 * decrementing this node (avoids race where
4882 * this thread frees while the other thread
4883 * is unlocking the node after the final
4884 * decrement)
4885 */
4886 binder_node_unlock(node);
4887 binder_free_node(node);
4888 } else
4889 binder_inner_proc_unlock(proc);
4890
4891 if (weak && !has_weak_ref)
4892 ret = binder_put_node_cmd(
4893 proc, thread, &ptr, node_ptr,
4894 node_cookie, node_debug_id,
4895 BR_INCREFS, "BR_INCREFS");
4896 if (!ret && strong && !has_strong_ref)
4897 ret = binder_put_node_cmd(
4898 proc, thread, &ptr, node_ptr,
4899 node_cookie, node_debug_id,
4900 BR_ACQUIRE, "BR_ACQUIRE");
4901 if (!ret && !strong && has_strong_ref)
4902 ret = binder_put_node_cmd(
4903 proc, thread, &ptr, node_ptr,
4904 node_cookie, node_debug_id,
4905 BR_RELEASE, "BR_RELEASE");
4906 if (!ret && !weak && has_weak_ref)
4907 ret = binder_put_node_cmd(
4908 proc, thread, &ptr, node_ptr,
4909 node_cookie, node_debug_id,
4910 BR_DECREFS, "BR_DECREFS");
4911 if (orig_ptr == ptr)
4912 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4913 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4914 proc->pid, thread->pid,
4915 node_debug_id,
4916 (u64)node_ptr,
4917 (u64)node_cookie);
4918 if (ret)
4919 return ret;
4920 } break;
4921 case BINDER_WORK_DEAD_BINDER:
4922 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4923 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4924 struct binder_ref_death *death;
4925 uint32_t cmd;
4926 binder_uintptr_t cookie;
4927
4928 death = container_of(w, struct binder_ref_death, work);
4929 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4930 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4931 else
4932 cmd = BR_DEAD_BINDER;
4933 cookie = death->cookie;
4934
4935 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4936 "%d:%d %s %016llx\n",
4937 proc->pid, thread->pid,
4938 cmd == BR_DEAD_BINDER ?
4939 "BR_DEAD_BINDER" :
4940 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4941 (u64)cookie);
4942 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4943 binder_inner_proc_unlock(proc);
4944 kfree(death);
4945 binder_stats_deleted(BINDER_STAT_DEATH);
4946 } else {
4947 binder_enqueue_work_ilocked(
4948 w, &proc->delivered_death);
4949 binder_inner_proc_unlock(proc);
4950 }
4951 if (put_user(cmd, (uint32_t __user *)ptr))
4952 return -EFAULT;
4953 ptr += sizeof(uint32_t);
4954 if (put_user(cookie,
4955 (binder_uintptr_t __user *)ptr))
4956 return -EFAULT;
4957 ptr += sizeof(binder_uintptr_t);
4958 binder_stat_br(proc, thread, cmd);
4959 if (cmd == BR_DEAD_BINDER)
4960 goto done; /* DEAD_BINDER notifications can cause transactions */
4961 } break;
4962 default:
4963 binder_inner_proc_unlock(proc);
4964 pr_err("%d:%d: bad work type %d\n",
4965 proc->pid, thread->pid, w->type);
4966 break;
4967 }
4968
4969 if (!t)
4970 continue;
4971
4972 BUG_ON(t->buffer == NULL);
4973 if (t->buffer->target_node) {
4974 struct binder_node *target_node = t->buffer->target_node;
4975
4976 trd->target.ptr = target_node->ptr;
4977 trd->cookie = target_node->cookie;
4978 binder_transaction_priority(thread, t, target_node);
4979 cmd = BR_TRANSACTION;
4980 } else {
4981 trd->target.ptr = 0;
4982 trd->cookie = 0;
4983 cmd = BR_REPLY;
4984 }
4985 trd->code = t->code;
4986 trd->flags = t->flags;
4987 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4988
4989 t_from = binder_get_txn_from(t);
4990 if (t_from) {
4991 struct task_struct *sender = t_from->proc->tsk;
4992
4993 trd->sender_pid =
4994 task_tgid_nr_ns(sender,
4995 task_active_pid_ns(current));
4996 trace_android_vh_sync_txn_recvd(thread->task, t_from->task);
4997 } else {
4998 trd->sender_pid = 0;
4999 }
5000
5001 ret = binder_apply_fd_fixups(proc, t);
5002 if (ret) {
5003 struct binder_buffer *buffer = t->buffer;
5004 bool oneway = !!(t->flags & TF_ONE_WAY);
5005 int tid = t->debug_id;
5006
5007 if (t_from)
5008 binder_thread_dec_tmpref(t_from);
5009 buffer->transaction = NULL;
5010 binder_cleanup_transaction(t, "fd fixups failed",
5011 BR_FAILED_REPLY);
5012 binder_free_buf(proc, thread, buffer, true);
5013 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
5014 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
5015 proc->pid, thread->pid,
5016 oneway ? "async " :
5017 (cmd == BR_REPLY ? "reply " : ""),
5018 tid, BR_FAILED_REPLY, ret, __LINE__);
5019 if (cmd == BR_REPLY) {
5020 cmd = BR_FAILED_REPLY;
5021 if (put_user(cmd, (uint32_t __user *)ptr))
5022 return -EFAULT;
5023 ptr += sizeof(uint32_t);
5024 binder_stat_br(proc, thread, cmd);
5025 break;
5026 }
5027 continue;
5028 }
5029 trd->data_size = t->buffer->data_size;
5030 trd->offsets_size = t->buffer->offsets_size;
5031 trd->data.ptr.buffer = t->buffer->user_data;
5032 trd->data.ptr.offsets = trd->data.ptr.buffer +
5033 ALIGN(t->buffer->data_size,
5034 sizeof(void *));
5035
5036 tr.secctx = t->security_ctx;
5037 if (t->security_ctx) {
5038 cmd = BR_TRANSACTION_SEC_CTX;
5039 trsize = sizeof(tr);
5040 }
5041 if (put_user(cmd, (uint32_t __user *)ptr)) {
5042 if (t_from)
5043 binder_thread_dec_tmpref(t_from);
5044
5045 binder_cleanup_transaction(t, "put_user failed",
5046 BR_FAILED_REPLY);
5047
5048 return -EFAULT;
5049 }
5050 ptr += sizeof(uint32_t);
5051 if (copy_to_user(ptr, &tr, trsize)) {
5052 if (t_from)
5053 binder_thread_dec_tmpref(t_from);
5054
5055 binder_cleanup_transaction(t, "copy_to_user failed",
5056 BR_FAILED_REPLY);
5057
5058 return -EFAULT;
5059 }
5060 ptr += trsize;
5061
5062 trace_binder_transaction_received(t);
5063 trace_android_vh_binder_transaction_received(t, proc, thread, cmd);
5064 binder_stat_br(proc, thread, cmd);
5065 binder_debug(BINDER_DEBUG_TRANSACTION,
5066 "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n",
5067 proc->pid, thread->pid,
5068 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
5069 (cmd == BR_TRANSACTION_SEC_CTX) ?
5070 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
5071 t->debug_id, t_from ? t_from->proc->pid : 0,
5072 t_from ? t_from->pid : 0, cmd,
5073 t->buffer->data_size, t->buffer->offsets_size,
5074 (u64)trd->data.ptr.buffer,
5075 (u64)trd->data.ptr.offsets);
5076
5077 if (t_from)
5078 binder_thread_dec_tmpref(t_from);
5079 t->buffer->allow_user_free = 1;
5080 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
5081 binder_inner_proc_lock(thread->proc);
5082 t->to_parent = thread->transaction_stack;
5083 t->to_thread = thread;
5084 thread->transaction_stack = t;
5085 binder_inner_proc_unlock(thread->proc);
5086 } else {
5087 binder_free_transaction(t);
5088 }
5089 break;
5090 }
5091
5092 done:
5093
5094 *consumed = ptr - buffer;
5095 binder_inner_proc_lock(proc);
5096 trace_android_vh_binder_spawn_new_thread(thread, proc, &force_spawn);
5097
5098 if (force_spawn || (proc->requested_threads == 0 &&
5099 list_empty(&thread->proc->waiting_threads) &&
5100 proc->requested_threads_started < proc->max_threads &&
5101 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
5102 BINDER_LOOPER_STATE_ENTERED)))/* the user-space code fails to */
5103 /*spawn a new thread if we leave this out */) {
5104 proc->requested_threads++;
5105 binder_inner_proc_unlock(proc);
5106 binder_debug(BINDER_DEBUG_THREADS,
5107 "%d:%d BR_SPAWN_LOOPER\n",
5108 proc->pid, thread->pid);
5109 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
5110 return -EFAULT;
5111 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
5112 } else
5113 binder_inner_proc_unlock(proc);
5114 return 0;
5115 }
5116
binder_release_work(struct binder_proc * proc,struct list_head * list)5117 static void binder_release_work(struct binder_proc *proc,
5118 struct list_head *list)
5119 {
5120 struct binder_work *w;
5121 enum binder_work_type wtype;
5122
5123 while (1) {
5124 binder_inner_proc_lock(proc);
5125 w = binder_dequeue_work_head_ilocked(list);
5126 wtype = w ? w->type : 0;
5127 binder_inner_proc_unlock(proc);
5128 if (!w)
5129 return;
5130
5131 switch (wtype) {
5132 case BINDER_WORK_TRANSACTION: {
5133 struct binder_transaction *t;
5134
5135 t = container_of(w, struct binder_transaction, work);
5136
5137 binder_cleanup_transaction(t, "process died.",
5138 BR_DEAD_REPLY);
5139 } break;
5140 case BINDER_WORK_RETURN_ERROR: {
5141 struct binder_error *e = container_of(
5142 w, struct binder_error, work);
5143
5144 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5145 "undelivered TRANSACTION_ERROR: %u\n",
5146 e->cmd);
5147 } break;
5148 case BINDER_WORK_TRANSACTION_PENDING:
5149 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT:
5150 case BINDER_WORK_TRANSACTION_COMPLETE: {
5151 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5152 "undelivered TRANSACTION_COMPLETE\n");
5153 kfree(w);
5154 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
5155 } break;
5156 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5157 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
5158 struct binder_ref_death *death;
5159
5160 death = container_of(w, struct binder_ref_death, work);
5161 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5162 "undelivered death notification, %016llx\n",
5163 (u64)death->cookie);
5164 kfree(death);
5165 binder_stats_deleted(BINDER_STAT_DEATH);
5166 } break;
5167 case BINDER_WORK_NODE:
5168 break;
5169 default:
5170 pr_err("unexpected work type, %d, not freed\n",
5171 wtype);
5172 break;
5173 }
5174 }
5175
5176 }
5177
binder_get_thread_ilocked(struct binder_proc * proc,struct binder_thread * new_thread)5178 static struct binder_thread *binder_get_thread_ilocked(
5179 struct binder_proc *proc, struct binder_thread *new_thread)
5180 {
5181 struct binder_thread *thread = NULL;
5182 struct rb_node *parent = NULL;
5183 struct rb_node **p = &proc->threads.rb_node;
5184
5185 while (*p) {
5186 parent = *p;
5187 thread = rb_entry(parent, struct binder_thread, rb_node);
5188
5189 if (current->pid < thread->pid)
5190 p = &(*p)->rb_left;
5191 else if (current->pid > thread->pid)
5192 p = &(*p)->rb_right;
5193 else
5194 return thread;
5195 }
5196 if (!new_thread)
5197 return NULL;
5198 thread = new_thread;
5199 binder_stats_created(BINDER_STAT_THREAD);
5200 thread->proc = proc;
5201 thread->pid = current->pid;
5202 get_task_struct(current);
5203 thread->task = current;
5204 atomic_set(&thread->tmp_ref, 0);
5205 init_waitqueue_head(&thread->wait);
5206 INIT_LIST_HEAD(&thread->todo);
5207 rb_link_node(&thread->rb_node, parent, p);
5208 rb_insert_color(&thread->rb_node, &proc->threads);
5209 thread->looper_need_return = true;
5210 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
5211 thread->return_error.cmd = BR_OK;
5212 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
5213 thread->reply_error.cmd = BR_OK;
5214 spin_lock_init(&thread->prio_lock);
5215 thread->prio_state = BINDER_PRIO_SET;
5216 thread->ee.command = BR_OK;
5217 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
5218 return thread;
5219 }
5220
binder_get_thread(struct binder_proc * proc)5221 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
5222 {
5223 struct binder_thread *thread;
5224 struct binder_thread *new_thread;
5225
5226 binder_inner_proc_lock(proc);
5227 thread = binder_get_thread_ilocked(proc, NULL);
5228 binder_inner_proc_unlock(proc);
5229 if (!thread) {
5230 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
5231 if (new_thread == NULL)
5232 return NULL;
5233 binder_inner_proc_lock(proc);
5234 thread = binder_get_thread_ilocked(proc, new_thread);
5235 binder_inner_proc_unlock(proc);
5236 if (thread != new_thread)
5237 kfree(new_thread);
5238 }
5239 return thread;
5240 }
5241
binder_free_proc(struct binder_proc * proc)5242 static void binder_free_proc(struct binder_proc *proc)
5243 {
5244 struct binder_device *device;
5245
5246 BUG_ON(!list_empty(&proc->todo));
5247 BUG_ON(!list_empty(&proc->delivered_death));
5248 if (proc->outstanding_txns)
5249 pr_warn("%s: Unexpected outstanding_txns %d\n",
5250 __func__, proc->outstanding_txns);
5251 device = container_of(proc->context, struct binder_device, context);
5252 if (refcount_dec_and_test(&device->ref)) {
5253 kfree(proc->context->name);
5254 kfree(device);
5255 }
5256 binder_alloc_deferred_release(&proc->alloc);
5257 put_task_struct(proc->tsk);
5258 put_cred(proc->cred);
5259 binder_stats_deleted(BINDER_STAT_PROC);
5260 dbitmap_free(&proc_wrapper(proc)->dmap);
5261 trace_android_vh_binder_free_proc(proc);
5262 kfree(proc_wrapper(proc));
5263 }
5264
binder_free_thread(struct binder_thread * thread)5265 static void binder_free_thread(struct binder_thread *thread)
5266 {
5267 BUG_ON(!list_empty(&thread->todo));
5268 binder_stats_deleted(BINDER_STAT_THREAD);
5269 binder_proc_dec_tmpref(thread->proc);
5270 put_task_struct(thread->task);
5271 kfree(thread);
5272 }
5273
binder_thread_release(struct binder_proc * proc,struct binder_thread * thread)5274 static int binder_thread_release(struct binder_proc *proc,
5275 struct binder_thread *thread)
5276 {
5277 struct binder_transaction *t;
5278 struct binder_transaction *send_reply = NULL;
5279 int active_transactions = 0;
5280 struct binder_transaction *last_t = NULL;
5281
5282 binder_inner_proc_lock(thread->proc);
5283 /*
5284 * take a ref on the proc so it survives
5285 * after we remove this thread from proc->threads.
5286 * The corresponding dec is when we actually
5287 * free the thread in binder_free_thread()
5288 */
5289 proc->tmp_ref++;
5290 /*
5291 * take a ref on this thread to ensure it
5292 * survives while we are releasing it
5293 */
5294 atomic_inc(&thread->tmp_ref);
5295 rb_erase(&thread->rb_node, &proc->threads);
5296 t = thread->transaction_stack;
5297 if (t) {
5298 spin_lock(&t->lock);
5299 if (t->to_thread == thread)
5300 send_reply = t;
5301 } else {
5302 __acquire(&t->lock);
5303 }
5304 thread->is_dead = true;
5305
5306 while (t) {
5307 last_t = t;
5308 active_transactions++;
5309 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5310 "release %d:%d transaction %d %s, still active\n",
5311 proc->pid, thread->pid,
5312 t->debug_id,
5313 (t->to_thread == thread) ? "in" : "out");
5314
5315 if (t->to_thread == thread) {
5316 thread->proc->outstanding_txns--;
5317 t->to_proc = NULL;
5318 t->to_thread = NULL;
5319 if (t->buffer) {
5320 t->buffer->transaction = NULL;
5321 t->buffer = NULL;
5322 }
5323 t = t->to_parent;
5324 } else if (t->from == thread) {
5325 t->from = NULL;
5326 t = t->from_parent;
5327 } else
5328 BUG();
5329 spin_unlock(&last_t->lock);
5330 if (t)
5331 spin_lock(&t->lock);
5332 else
5333 __acquire(&t->lock);
5334 }
5335 /* annotation for sparse, lock not acquired in last iteration above */
5336 __release(&t->lock);
5337
5338 /*
5339 * If this thread used poll, make sure we remove the waitqueue from any
5340 * poll data structures holding it.
5341 */
5342 if (thread->looper & BINDER_LOOPER_STATE_POLL)
5343 wake_up_pollfree(&thread->wait);
5344
5345 binder_inner_proc_unlock(thread->proc);
5346
5347 /*
5348 * This is needed to avoid races between wake_up_pollfree() above and
5349 * someone else removing the last entry from the queue for other reasons
5350 * (e.g. ep_remove_wait_queue() being called due to an epoll file
5351 * descriptor being closed). Such other users hold an RCU read lock, so
5352 * we can be sure they're done after we call synchronize_rcu().
5353 */
5354 if (thread->looper & BINDER_LOOPER_STATE_POLL)
5355 synchronize_rcu();
5356
5357 if (send_reply)
5358 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
5359 binder_release_work(proc, &thread->todo);
5360 trace_android_vh_binder_thread_release(proc, thread);
5361 binder_thread_dec_tmpref(thread);
5362 return active_transactions;
5363 }
5364
binder_poll(struct file * filp,struct poll_table_struct * wait)5365 static __poll_t binder_poll(struct file *filp,
5366 struct poll_table_struct *wait)
5367 {
5368 struct binder_proc *proc = filp->private_data;
5369 struct binder_thread *thread = NULL;
5370 bool wait_for_proc_work;
5371
5372 thread = binder_get_thread(proc);
5373 if (!thread)
5374 return EPOLLERR;
5375
5376 binder_inner_proc_lock(thread->proc);
5377 thread->looper |= BINDER_LOOPER_STATE_POLL;
5378 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
5379
5380 binder_inner_proc_unlock(thread->proc);
5381
5382 poll_wait(filp, &thread->wait, wait);
5383
5384 if (binder_has_work(thread, wait_for_proc_work))
5385 return EPOLLIN;
5386
5387 return 0;
5388 }
5389
binder_ioctl_write_read(struct file * filp,unsigned long arg,struct binder_thread * thread)5390 static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
5391 struct binder_thread *thread)
5392 {
5393 int ret = 0;
5394 struct binder_proc *proc = filp->private_data;
5395 void __user *ubuf = (void __user *)arg;
5396 struct binder_write_read bwr;
5397 bool has_special_work = false;
5398
5399 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
5400 ret = -EFAULT;
5401 goto out;
5402 }
5403 binder_debug(BINDER_DEBUG_READ_WRITE,
5404 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
5405 proc->pid, thread->pid,
5406 (u64)bwr.write_size, (u64)bwr.write_buffer,
5407 (u64)bwr.read_size, (u64)bwr.read_buffer);
5408
5409 if (bwr.write_size > 0) {
5410 ret = binder_thread_write(proc, thread,
5411 bwr.write_buffer,
5412 bwr.write_size,
5413 &bwr.write_consumed);
5414 trace_binder_write_done(ret);
5415 if (ret < 0) {
5416 bwr.read_consumed = 0;
5417 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5418 ret = -EFAULT;
5419 goto out;
5420 }
5421 }
5422 if (bwr.read_size > 0) {
5423 ret = binder_thread_read(proc, thread, bwr.read_buffer,
5424 bwr.read_size,
5425 &bwr.read_consumed,
5426 filp->f_flags & O_NONBLOCK);
5427 trace_binder_read_done(ret);
5428 binder_inner_proc_lock(proc);
5429 trace_android_vh_binder_has_proc_work_ilocked(
5430 thread, true, &has_special_work);
5431 if (!binder_worklist_empty_ilocked(&proc->todo) || has_special_work)
5432 binder_wakeup_proc_ilocked(proc);
5433 binder_inner_proc_unlock(proc);
5434 trace_android_vh_binder_read_done(proc, thread);
5435 if (ret < 0) {
5436 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5437 ret = -EFAULT;
5438 goto out;
5439 }
5440 }
5441 binder_debug(BINDER_DEBUG_READ_WRITE,
5442 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5443 proc->pid, thread->pid,
5444 (u64)bwr.write_consumed, (u64)bwr.write_size,
5445 (u64)bwr.read_consumed, (u64)bwr.read_size);
5446 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
5447 ret = -EFAULT;
5448 goto out;
5449 }
5450 out:
5451 return ret;
5452 }
5453
binder_ioctl_set_ctx_mgr(struct file * filp,struct flat_binder_object * fbo)5454 static int binder_ioctl_set_ctx_mgr(struct file *filp,
5455 struct flat_binder_object *fbo)
5456 {
5457 int ret = 0;
5458 struct binder_proc *proc = filp->private_data;
5459 struct binder_context *context = proc->context;
5460 struct binder_node *new_node;
5461 kuid_t curr_euid = current_euid();
5462
5463 mutex_lock(&context->context_mgr_node_lock);
5464 if (context->binder_context_mgr_node) {
5465 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5466 ret = -EBUSY;
5467 goto out;
5468 }
5469 ret = security_binder_set_context_mgr(proc->cred);
5470 if (ret < 0)
5471 goto out;
5472 if (uid_valid(context->binder_context_mgr_uid)) {
5473 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5474 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5475 from_kuid(&init_user_ns, curr_euid),
5476 from_kuid(&init_user_ns,
5477 context->binder_context_mgr_uid));
5478 ret = -EPERM;
5479 goto out;
5480 }
5481 } else {
5482 context->binder_context_mgr_uid = curr_euid;
5483 }
5484 new_node = binder_new_node(proc, fbo);
5485 if (!new_node) {
5486 ret = -ENOMEM;
5487 goto out;
5488 }
5489 binder_node_lock(new_node);
5490 new_node->local_weak_refs++;
5491 new_node->local_strong_refs++;
5492 new_node->has_strong_ref = 1;
5493 new_node->has_weak_ref = 1;
5494 context->binder_context_mgr_node = new_node;
5495 binder_node_unlock(new_node);
5496 binder_put_node(new_node);
5497 out:
5498 mutex_unlock(&context->context_mgr_node_lock);
5499 return ret;
5500 }
5501
binder_ioctl_get_node_info_for_ref(struct binder_proc * proc,struct binder_node_info_for_ref * info)5502 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5503 struct binder_node_info_for_ref *info)
5504 {
5505 struct binder_node *node;
5506 struct binder_context *context = proc->context;
5507 __u32 handle = info->handle;
5508
5509 if (info->strong_count || info->weak_count || info->reserved1 ||
5510 info->reserved2 || info->reserved3) {
5511 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5512 proc->pid);
5513 return -EINVAL;
5514 }
5515
5516 /* This ioctl may only be used by the context manager */
5517 mutex_lock(&context->context_mgr_node_lock);
5518 if (!context->binder_context_mgr_node ||
5519 context->binder_context_mgr_node->proc != proc) {
5520 mutex_unlock(&context->context_mgr_node_lock);
5521 return -EPERM;
5522 }
5523 mutex_unlock(&context->context_mgr_node_lock);
5524
5525 node = binder_get_node_from_ref(proc, handle, true, NULL);
5526 if (!node)
5527 return -EINVAL;
5528
5529 info->strong_count = node->local_strong_refs +
5530 node->internal_strong_refs;
5531 info->weak_count = node->local_weak_refs;
5532
5533 binder_put_node(node);
5534
5535 return 0;
5536 }
5537
binder_ioctl_get_node_debug_info(struct binder_proc * proc,struct binder_node_debug_info * info)5538 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5539 struct binder_node_debug_info *info)
5540 {
5541 struct rb_node *n;
5542 binder_uintptr_t ptr = info->ptr;
5543
5544 memset(info, 0, sizeof(*info));
5545
5546 binder_inner_proc_lock(proc);
5547 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5548 struct binder_node *node = rb_entry(n, struct binder_node,
5549 rb_node);
5550 if (node->ptr > ptr) {
5551 info->ptr = node->ptr;
5552 info->cookie = node->cookie;
5553 info->has_strong_ref = node->has_strong_ref;
5554 info->has_weak_ref = node->has_weak_ref;
5555 break;
5556 }
5557 }
5558 binder_inner_proc_unlock(proc);
5559
5560 return 0;
5561 }
5562
binder_txns_pending_ilocked(struct binder_proc * proc)5563 static bool binder_txns_pending_ilocked(struct binder_proc *proc)
5564 {
5565 struct rb_node *n;
5566 struct binder_thread *thread;
5567
5568 if (proc->outstanding_txns > 0)
5569 return true;
5570
5571 for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
5572 thread = rb_entry(n, struct binder_thread, rb_node);
5573 if (thread->transaction_stack)
5574 return true;
5575 }
5576 return false;
5577 }
5578
binder_ioctl_freeze(struct binder_freeze_info * info,struct binder_proc * target_proc)5579 static int binder_ioctl_freeze(struct binder_freeze_info *info,
5580 struct binder_proc *target_proc)
5581 {
5582 int ret = 0;
5583
5584 if (!info->enable) {
5585 binder_inner_proc_lock(target_proc);
5586 target_proc->sync_recv = false;
5587 target_proc->async_recv = false;
5588 target_proc->is_frozen = false;
5589 binder_inner_proc_unlock(target_proc);
5590 return 0;
5591 }
5592
5593 /*
5594 * Freezing the target. Prevent new transactions by
5595 * setting frozen state. If timeout specified, wait
5596 * for transactions to drain.
5597 */
5598 binder_inner_proc_lock(target_proc);
5599 target_proc->sync_recv = false;
5600 target_proc->async_recv = false;
5601 target_proc->is_frozen = true;
5602 binder_inner_proc_unlock(target_proc);
5603
5604 if (info->timeout_ms > 0)
5605 ret = wait_event_interruptible_timeout(
5606 target_proc->freeze_wait,
5607 (!target_proc->outstanding_txns),
5608 msecs_to_jiffies(info->timeout_ms));
5609
5610 /* Check pending transactions that wait for reply */
5611 if (ret >= 0) {
5612 binder_inner_proc_lock(target_proc);
5613 if (binder_txns_pending_ilocked(target_proc))
5614 ret = -EAGAIN;
5615 binder_inner_proc_unlock(target_proc);
5616 }
5617
5618 if (ret < 0) {
5619 binder_inner_proc_lock(target_proc);
5620 target_proc->is_frozen = false;
5621 binder_inner_proc_unlock(target_proc);
5622 }
5623
5624 return ret;
5625 }
5626
binder_ioctl_get_freezer_info(struct binder_frozen_status_info * info)5627 static int binder_ioctl_get_freezer_info(
5628 struct binder_frozen_status_info *info)
5629 {
5630 struct binder_proc *target_proc;
5631 bool found = false;
5632 __u32 txns_pending;
5633
5634 info->sync_recv = 0;
5635 info->async_recv = 0;
5636
5637 mutex_lock(&binder_procs_lock);
5638 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5639 if (target_proc->pid == info->pid) {
5640 found = true;
5641 binder_inner_proc_lock(target_proc);
5642 txns_pending = binder_txns_pending_ilocked(target_proc);
5643 info->sync_recv |= target_proc->sync_recv |
5644 (txns_pending << 1);
5645 info->async_recv |= target_proc->async_recv;
5646 binder_inner_proc_unlock(target_proc);
5647 }
5648 }
5649 mutex_unlock(&binder_procs_lock);
5650
5651 if (!found)
5652 return -EINVAL;
5653
5654 return 0;
5655 }
5656
binder_ioctl_get_extended_error(struct binder_thread * thread,void __user * ubuf)5657 static int binder_ioctl_get_extended_error(struct binder_thread *thread,
5658 void __user *ubuf)
5659 {
5660 struct binder_extended_error ee;
5661
5662 binder_inner_proc_lock(thread->proc);
5663 ee = thread->ee;
5664 binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
5665 binder_inner_proc_unlock(thread->proc);
5666
5667 if (copy_to_user(ubuf, &ee, sizeof(ee)))
5668 return -EFAULT;
5669
5670 return 0;
5671 }
5672
binder_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)5673 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5674 {
5675 int ret;
5676 struct binder_proc *proc = filp->private_data;
5677 struct binder_thread *thread;
5678 void __user *ubuf = (void __user *)arg;
5679
5680 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
5681 proc->pid, current->pid, cmd, arg);*/
5682
5683 binder_selftest_alloc(&proc->alloc);
5684
5685 trace_binder_ioctl(cmd, arg);
5686
5687 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5688 if (ret)
5689 goto err_unlocked;
5690
5691 thread = binder_get_thread(proc);
5692 if (thread == NULL) {
5693 ret = -ENOMEM;
5694 goto err;
5695 }
5696
5697 switch (cmd) {
5698 case BINDER_WRITE_READ:
5699 ret = binder_ioctl_write_read(filp, arg, thread);
5700 if (ret)
5701 goto err;
5702 break;
5703 case BINDER_SET_MAX_THREADS: {
5704 u32 max_threads;
5705
5706 if (copy_from_user(&max_threads, ubuf,
5707 sizeof(max_threads))) {
5708 ret = -EINVAL;
5709 goto err;
5710 }
5711 binder_inner_proc_lock(proc);
5712 proc->max_threads = max_threads;
5713 binder_inner_proc_unlock(proc);
5714 break;
5715 }
5716 case BINDER_SET_CONTEXT_MGR_EXT: {
5717 struct flat_binder_object fbo;
5718
5719 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5720 ret = -EINVAL;
5721 goto err;
5722 }
5723 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5724 if (ret)
5725 goto err;
5726 break;
5727 }
5728 case BINDER_SET_CONTEXT_MGR:
5729 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5730 if (ret)
5731 goto err;
5732 break;
5733 case BINDER_THREAD_EXIT:
5734 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5735 proc->pid, thread->pid);
5736 binder_thread_release(proc, thread);
5737 thread = NULL;
5738 break;
5739 case BINDER_VERSION: {
5740 struct binder_version __user *ver = ubuf;
5741
5742 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5743 &ver->protocol_version)) {
5744 ret = -EINVAL;
5745 goto err;
5746 }
5747 break;
5748 }
5749 case BINDER_GET_NODE_INFO_FOR_REF: {
5750 struct binder_node_info_for_ref info;
5751
5752 if (copy_from_user(&info, ubuf, sizeof(info))) {
5753 ret = -EFAULT;
5754 goto err;
5755 }
5756
5757 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5758 if (ret < 0)
5759 goto err;
5760
5761 if (copy_to_user(ubuf, &info, sizeof(info))) {
5762 ret = -EFAULT;
5763 goto err;
5764 }
5765
5766 break;
5767 }
5768 case BINDER_GET_NODE_DEBUG_INFO: {
5769 struct binder_node_debug_info info;
5770
5771 if (copy_from_user(&info, ubuf, sizeof(info))) {
5772 ret = -EFAULT;
5773 goto err;
5774 }
5775
5776 ret = binder_ioctl_get_node_debug_info(proc, &info);
5777 if (ret < 0)
5778 goto err;
5779
5780 if (copy_to_user(ubuf, &info, sizeof(info))) {
5781 ret = -EFAULT;
5782 goto err;
5783 }
5784 break;
5785 }
5786 case BINDER_FREEZE: {
5787 struct binder_freeze_info info;
5788 struct binder_proc **target_procs = NULL, *target_proc;
5789 int target_procs_count = 0, i = 0;
5790
5791 ret = 0;
5792
5793 if (copy_from_user(&info, ubuf, sizeof(info))) {
5794 ret = -EFAULT;
5795 goto err;
5796 }
5797
5798 mutex_lock(&binder_procs_lock);
5799 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5800 if (target_proc->pid == info.pid)
5801 target_procs_count++;
5802 }
5803
5804 if (target_procs_count == 0) {
5805 mutex_unlock(&binder_procs_lock);
5806 ret = -EINVAL;
5807 goto err;
5808 }
5809
5810 target_procs = kcalloc(target_procs_count,
5811 sizeof(struct binder_proc *),
5812 GFP_KERNEL);
5813
5814 if (!target_procs) {
5815 mutex_unlock(&binder_procs_lock);
5816 ret = -ENOMEM;
5817 goto err;
5818 }
5819
5820 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5821 if (target_proc->pid != info.pid)
5822 continue;
5823
5824 binder_inner_proc_lock(target_proc);
5825 target_proc->tmp_ref++;
5826 binder_inner_proc_unlock(target_proc);
5827
5828 target_procs[i++] = target_proc;
5829 }
5830 mutex_unlock(&binder_procs_lock);
5831
5832 for (i = 0; i < target_procs_count; i++) {
5833 if (ret >= 0)
5834 ret = binder_ioctl_freeze(&info,
5835 target_procs[i]);
5836
5837 binder_proc_dec_tmpref(target_procs[i]);
5838 }
5839
5840 kfree(target_procs);
5841
5842 if (ret < 0)
5843 goto err;
5844 break;
5845 }
5846 case BINDER_GET_FROZEN_INFO: {
5847 struct binder_frozen_status_info info;
5848
5849 if (copy_from_user(&info, ubuf, sizeof(info))) {
5850 ret = -EFAULT;
5851 goto err;
5852 }
5853
5854 ret = binder_ioctl_get_freezer_info(&info);
5855 if (ret < 0)
5856 goto err;
5857
5858 if (copy_to_user(ubuf, &info, sizeof(info))) {
5859 ret = -EFAULT;
5860 goto err;
5861 }
5862 break;
5863 }
5864 case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5865 uint32_t enable;
5866
5867 if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5868 ret = -EFAULT;
5869 goto err;
5870 }
5871 binder_inner_proc_lock(proc);
5872 proc->oneway_spam_detection_enabled = (bool)enable;
5873 binder_inner_proc_unlock(proc);
5874 break;
5875 }
5876 case BINDER_GET_EXTENDED_ERROR:
5877 ret = binder_ioctl_get_extended_error(thread, ubuf);
5878 if (ret < 0)
5879 goto err;
5880 break;
5881 default:
5882 ret = -EINVAL;
5883 goto err;
5884 }
5885 ret = 0;
5886 trace_android_vh_binder_ioctl_end(current, cmd, arg, thread, proc, &ret);
5887 err:
5888 if (thread)
5889 thread->looper_need_return = false;
5890 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5891 if (ret && ret != -EINTR)
5892 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5893 err_unlocked:
5894 trace_binder_ioctl_done(ret);
5895 return ret;
5896 }
5897
binder_vma_open(struct vm_area_struct * vma)5898 static void binder_vma_open(struct vm_area_struct *vma)
5899 {
5900 struct binder_proc *proc = vma->vm_private_data;
5901
5902 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5903 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5904 proc->pid, vma->vm_start, vma->vm_end,
5905 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5906 (unsigned long)pgprot_val(vma->vm_page_prot));
5907 }
5908
binder_vma_close(struct vm_area_struct * vma)5909 static void binder_vma_close(struct vm_area_struct *vma)
5910 {
5911 struct binder_proc *proc = vma->vm_private_data;
5912
5913 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5914 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5915 proc->pid, vma->vm_start, vma->vm_end,
5916 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5917 (unsigned long)pgprot_val(vma->vm_page_prot));
5918 binder_alloc_vma_close(&proc->alloc);
5919 }
5920
binder_vm_fault(struct vm_fault * vmf)5921 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5922 {
5923 return VM_FAULT_SIGBUS;
5924 }
5925
5926 static const struct vm_operations_struct binder_vm_ops = {
5927 .open = binder_vma_open,
5928 .close = binder_vma_close,
5929 .fault = binder_vm_fault,
5930 };
5931
binder_mmap(struct file * filp,struct vm_area_struct * vma)5932 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5933 {
5934 struct binder_proc *proc = filp->private_data;
5935
5936 if (proc->tsk != current->group_leader)
5937 return -EINVAL;
5938
5939 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5940 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5941 __func__, proc->pid, vma->vm_start, vma->vm_end,
5942 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5943 (unsigned long)pgprot_val(vma->vm_page_prot));
5944
5945 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5946 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5947 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5948 return -EPERM;
5949 }
5950 vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
5951
5952 vma->vm_ops = &binder_vm_ops;
5953 vma->vm_private_data = proc;
5954
5955 return binder_alloc_mmap_handler(&proc->alloc, vma);
5956 }
5957
binder_open(struct inode * nodp,struct file * filp)5958 static int binder_open(struct inode *nodp, struct file *filp)
5959 {
5960 struct binder_proc_wrap *proc_wrap;
5961 struct binder_proc *proc, *itr;
5962 struct binder_device *binder_dev;
5963 struct binderfs_info *info;
5964 struct dentry *binder_binderfs_dir_entry_proc = NULL;
5965 bool existing_pid = false;
5966
5967 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5968 current->group_leader->pid, current->pid);
5969
5970 proc_wrap = kzalloc(sizeof(*proc_wrap), GFP_KERNEL);
5971 if (proc_wrap == NULL)
5972 return -ENOMEM;
5973 proc = &proc_wrap->proc;
5974
5975 dbitmap_init(&proc_wrapper(proc)->dmap);
5976 spin_lock_init(&proc->inner_lock);
5977 spin_lock_init(&proc->outer_lock);
5978 get_task_struct(current->group_leader);
5979 proc->tsk = current->group_leader;
5980 proc->cred = get_cred(filp->f_cred);
5981 INIT_LIST_HEAD(&proc->todo);
5982 init_waitqueue_head(&proc->freeze_wait);
5983 if (binder_supported_policy(current->policy)) {
5984 proc->default_priority.sched_policy = current->policy;
5985 proc->default_priority.prio = current->normal_prio;
5986 } else {
5987 proc->default_priority.sched_policy = SCHED_NORMAL;
5988 proc->default_priority.prio = NICE_TO_PRIO(0);
5989 }
5990
5991 /* binderfs stashes devices in i_private */
5992 if (is_binderfs_device(nodp)) {
5993 binder_dev = nodp->i_private;
5994 info = nodp->i_sb->s_fs_info;
5995 binder_binderfs_dir_entry_proc = info->proc_log_dir;
5996 } else {
5997 binder_dev = container_of(filp->private_data,
5998 struct binder_device, miscdev);
5999 }
6000 refcount_inc(&binder_dev->ref);
6001 proc->context = &binder_dev->context;
6002 binder_alloc_init(&proc->alloc);
6003
6004 binder_stats_created(BINDER_STAT_PROC);
6005 proc->pid = current->group_leader->pid;
6006 INIT_LIST_HEAD(&proc->delivered_death);
6007 INIT_LIST_HEAD(&proc->waiting_threads);
6008 filp->private_data = proc;
6009
6010 mutex_lock(&binder_procs_lock);
6011 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6012 if (itr->pid == proc->pid) {
6013 existing_pid = true;
6014 break;
6015 }
6016 }
6017 hlist_add_head(&proc->proc_node, &binder_procs);
6018 mutex_unlock(&binder_procs_lock);
6019 trace_android_vh_binder_preset(&binder_procs, &binder_procs_lock, proc);
6020 if (binder_debugfs_dir_entry_proc && !existing_pid) {
6021 char strbuf[11];
6022
6023 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
6024 /*
6025 * proc debug entries are shared between contexts.
6026 * Only create for the first PID to avoid debugfs log spamming
6027 * The printing code will anyway print all contexts for a given
6028 * PID so this is not a problem.
6029 */
6030 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
6031 binder_debugfs_dir_entry_proc,
6032 (void *)(unsigned long)proc->pid,
6033 &proc_fops);
6034 }
6035
6036 if (binder_binderfs_dir_entry_proc && !existing_pid) {
6037 char strbuf[11];
6038 struct dentry *binderfs_entry;
6039
6040 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
6041 /*
6042 * Similar to debugfs, the process specific log file is shared
6043 * between contexts. Only create for the first PID.
6044 * This is ok since same as debugfs, the log file will contain
6045 * information on all contexts of a given PID.
6046 */
6047 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
6048 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
6049 if (!IS_ERR(binderfs_entry)) {
6050 proc->binderfs_entry = binderfs_entry;
6051 } else {
6052 int error;
6053
6054 error = PTR_ERR(binderfs_entry);
6055 pr_warn("Unable to create file %s in binderfs (error %d)\n",
6056 strbuf, error);
6057 }
6058 }
6059
6060 return 0;
6061 }
6062
binder_flush(struct file * filp,fl_owner_t id)6063 static int binder_flush(struct file *filp, fl_owner_t id)
6064 {
6065 struct binder_proc *proc = filp->private_data;
6066
6067 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
6068
6069 return 0;
6070 }
6071
binder_deferred_flush(struct binder_proc * proc)6072 static void binder_deferred_flush(struct binder_proc *proc)
6073 {
6074 struct rb_node *n;
6075 int wake_count = 0;
6076
6077 binder_inner_proc_lock(proc);
6078 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
6079 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
6080
6081 thread->looper_need_return = true;
6082 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
6083 wake_up_interruptible(&thread->wait);
6084 wake_count++;
6085 }
6086 }
6087 binder_inner_proc_unlock(proc);
6088
6089 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6090 "binder_flush: %d woke %d threads\n", proc->pid,
6091 wake_count);
6092 }
6093
binder_release(struct inode * nodp,struct file * filp)6094 static int binder_release(struct inode *nodp, struct file *filp)
6095 {
6096 struct binder_proc *proc = filp->private_data;
6097
6098 debugfs_remove(proc->debugfs_entry);
6099
6100 if (proc->binderfs_entry) {
6101 binderfs_remove_file(proc->binderfs_entry);
6102 proc->binderfs_entry = NULL;
6103 }
6104
6105 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
6106
6107 return 0;
6108 }
6109
binder_node_release(struct binder_node * node,int refs)6110 static int binder_node_release(struct binder_node *node, int refs)
6111 {
6112 struct binder_ref *ref;
6113 int death = 0;
6114 struct binder_proc *proc = node->proc;
6115
6116 binder_release_work(proc, &node->async_todo);
6117
6118 binder_node_lock(node);
6119 binder_inner_proc_lock(proc);
6120 binder_dequeue_work_ilocked(&node->work);
6121 /*
6122 * The caller must have taken a temporary ref on the node,
6123 */
6124 BUG_ON(!node->tmp_refs);
6125 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
6126 binder_inner_proc_unlock(proc);
6127 binder_node_unlock(node);
6128 binder_free_node(node);
6129
6130 return refs;
6131 }
6132
6133 node->proc = NULL;
6134 node->local_strong_refs = 0;
6135 node->local_weak_refs = 0;
6136 binder_inner_proc_unlock(proc);
6137
6138 spin_lock(&binder_dead_nodes_lock);
6139 hlist_add_head(&node->dead_node, &binder_dead_nodes);
6140 spin_unlock(&binder_dead_nodes_lock);
6141
6142 hlist_for_each_entry(ref, &node->refs, node_entry) {
6143 refs++;
6144 /*
6145 * Need the node lock to synchronize
6146 * with new notification requests and the
6147 * inner lock to synchronize with queued
6148 * death notifications.
6149 */
6150 binder_inner_proc_lock(ref->proc);
6151 if (!ref->death) {
6152 binder_inner_proc_unlock(ref->proc);
6153 continue;
6154 }
6155
6156 death++;
6157
6158 BUG_ON(!list_empty(&ref->death->work.entry));
6159 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
6160 binder_enqueue_work_ilocked(&ref->death->work,
6161 &ref->proc->todo);
6162 binder_wakeup_proc_ilocked(ref->proc);
6163 binder_inner_proc_unlock(ref->proc);
6164 }
6165
6166 binder_debug(BINDER_DEBUG_DEAD_BINDER,
6167 "node %d now dead, refs %d, death %d\n",
6168 node->debug_id, refs, death);
6169 binder_node_unlock(node);
6170 binder_put_node(node);
6171
6172 return refs;
6173 }
6174
binder_deferred_release(struct binder_proc * proc)6175 static void binder_deferred_release(struct binder_proc *proc)
6176 {
6177 struct binder_context *context = proc->context;
6178 struct rb_node *n;
6179 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
6180 struct list_head *special_list = NULL;
6181
6182 mutex_lock(&binder_procs_lock);
6183 hlist_del(&proc->proc_node);
6184 mutex_unlock(&binder_procs_lock);
6185
6186 mutex_lock(&context->context_mgr_node_lock);
6187 if (context->binder_context_mgr_node &&
6188 context->binder_context_mgr_node->proc == proc) {
6189 binder_debug(BINDER_DEBUG_DEAD_BINDER,
6190 "%s: %d context_mgr_node gone\n",
6191 __func__, proc->pid);
6192 context->binder_context_mgr_node = NULL;
6193 }
6194 mutex_unlock(&context->context_mgr_node_lock);
6195 binder_inner_proc_lock(proc);
6196 /*
6197 * Make sure proc stays alive after we
6198 * remove all the threads
6199 */
6200 proc->tmp_ref++;
6201
6202 proc->is_dead = true;
6203 proc->is_frozen = false;
6204 proc->sync_recv = false;
6205 proc->async_recv = false;
6206 threads = 0;
6207 active_transactions = 0;
6208 while ((n = rb_first(&proc->threads))) {
6209 struct binder_thread *thread;
6210
6211 thread = rb_entry(n, struct binder_thread, rb_node);
6212 binder_inner_proc_unlock(proc);
6213 threads++;
6214 active_transactions += binder_thread_release(proc, thread);
6215 binder_inner_proc_lock(proc);
6216 }
6217
6218 nodes = 0;
6219 incoming_refs = 0;
6220 while ((n = rb_first(&proc->nodes))) {
6221 struct binder_node *node;
6222
6223 node = rb_entry(n, struct binder_node, rb_node);
6224 nodes++;
6225 /*
6226 * take a temporary ref on the node before
6227 * calling binder_node_release() which will either
6228 * kfree() the node or call binder_put_node()
6229 */
6230 binder_inc_node_tmpref_ilocked(node);
6231 rb_erase(&node->rb_node, &proc->nodes);
6232 binder_inner_proc_unlock(proc);
6233 incoming_refs = binder_node_release(node, incoming_refs);
6234 binder_inner_proc_lock(proc);
6235 }
6236 binder_inner_proc_unlock(proc);
6237
6238 outgoing_refs = 0;
6239 binder_proc_lock(proc);
6240 while ((n = rb_first(&proc->refs_by_desc))) {
6241 struct binder_ref *ref;
6242
6243 ref = rb_entry(n, struct binder_ref, rb_node_desc);
6244 outgoing_refs++;
6245 binder_cleanup_ref_olocked(ref);
6246 binder_proc_unlock(proc);
6247 binder_free_ref(ref);
6248 binder_proc_lock(proc);
6249 }
6250 binder_proc_unlock(proc);
6251
6252 binder_release_work(proc, &proc->todo);
6253 trace_android_vh_binder_release_special_work(proc, &special_list);
6254 if (special_list)
6255 binder_release_work(proc, special_list);
6256 binder_release_work(proc, &proc->delivered_death);
6257
6258 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6259 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
6260 __func__, proc->pid, threads, nodes, incoming_refs,
6261 outgoing_refs, active_transactions);
6262
6263 binder_proc_dec_tmpref(proc);
6264 }
6265
binder_deferred_func(struct work_struct * work)6266 static void binder_deferred_func(struct work_struct *work)
6267 {
6268 struct binder_proc *proc;
6269
6270 int defer;
6271
6272 do {
6273 mutex_lock(&binder_deferred_lock);
6274 if (!hlist_empty(&binder_deferred_list)) {
6275 proc = hlist_entry(binder_deferred_list.first,
6276 struct binder_proc, deferred_work_node);
6277 hlist_del_init(&proc->deferred_work_node);
6278 defer = proc->deferred_work;
6279 proc->deferred_work = 0;
6280 } else {
6281 proc = NULL;
6282 defer = 0;
6283 }
6284 mutex_unlock(&binder_deferred_lock);
6285
6286 if (defer & BINDER_DEFERRED_FLUSH)
6287 binder_deferred_flush(proc);
6288
6289 if (defer & BINDER_DEFERRED_RELEASE)
6290 binder_deferred_release(proc); /* frees proc */
6291 } while (proc);
6292 }
6293 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
6294
6295 static void
binder_defer_work(struct binder_proc * proc,enum binder_deferred_state defer)6296 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
6297 {
6298 mutex_lock(&binder_deferred_lock);
6299 proc->deferred_work |= defer;
6300 if (hlist_unhashed(&proc->deferred_work_node)) {
6301 hlist_add_head(&proc->deferred_work_node,
6302 &binder_deferred_list);
6303 schedule_work(&binder_deferred_work);
6304 }
6305 mutex_unlock(&binder_deferred_lock);
6306 }
6307
print_binder_transaction_ilocked(struct seq_file * m,struct binder_proc * proc,const char * prefix,struct binder_transaction * t)6308 static void print_binder_transaction_ilocked(struct seq_file *m,
6309 struct binder_proc *proc,
6310 const char *prefix,
6311 struct binder_transaction *t)
6312 {
6313 struct binder_proc *to_proc;
6314 struct binder_buffer *buffer = t->buffer;
6315 ktime_t current_time = ktime_get();
6316
6317 spin_lock(&t->lock);
6318 to_proc = t->to_proc;
6319 seq_printf(m,
6320 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %d:%d r%d elapsed %lldms",
6321 prefix, t->debug_id, t,
6322 t->from_pid,
6323 t->from_tid,
6324 to_proc ? to_proc->pid : 0,
6325 t->to_thread ? t->to_thread->pid : 0,
6326 t->code, t->flags, t->priority.sched_policy,
6327 t->priority.prio, t->need_reply,
6328 ktime_ms_delta(current_time, t->start_time));
6329 spin_unlock(&t->lock);
6330
6331 if (proc != to_proc) {
6332 /*
6333 * Can only safely deref buffer if we are holding the
6334 * correct proc inner lock for this node
6335 */
6336 seq_puts(m, "\n");
6337 return;
6338 }
6339
6340 if (buffer == NULL) {
6341 seq_puts(m, " buffer free\n");
6342 return;
6343 }
6344 if (buffer->target_node)
6345 seq_printf(m, " node %d", buffer->target_node->debug_id);
6346 seq_printf(m, " size %zd:%zd offset %lx\n",
6347 buffer->data_size, buffer->offsets_size,
6348 proc->alloc.buffer - buffer->user_data);
6349 }
6350
print_binder_work_ilocked(struct seq_file * m,struct binder_proc * proc,const char * prefix,const char * transaction_prefix,struct binder_work * w)6351 static void print_binder_work_ilocked(struct seq_file *m,
6352 struct binder_proc *proc,
6353 const char *prefix,
6354 const char *transaction_prefix,
6355 struct binder_work *w)
6356 {
6357 struct binder_node *node;
6358 struct binder_transaction *t;
6359
6360 switch (w->type) {
6361 case BINDER_WORK_TRANSACTION:
6362 t = container_of(w, struct binder_transaction, work);
6363 print_binder_transaction_ilocked(
6364 m, proc, transaction_prefix, t);
6365 break;
6366 case BINDER_WORK_RETURN_ERROR: {
6367 struct binder_error *e = container_of(
6368 w, struct binder_error, work);
6369
6370 seq_printf(m, "%stransaction error: %u\n",
6371 prefix, e->cmd);
6372 } break;
6373 case BINDER_WORK_TRANSACTION_COMPLETE:
6374 seq_printf(m, "%stransaction complete\n", prefix);
6375 break;
6376 case BINDER_WORK_NODE:
6377 node = container_of(w, struct binder_node, work);
6378 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
6379 prefix, node->debug_id,
6380 (u64)node->ptr, (u64)node->cookie);
6381 break;
6382 case BINDER_WORK_DEAD_BINDER:
6383 seq_printf(m, "%shas dead binder\n", prefix);
6384 break;
6385 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
6386 seq_printf(m, "%shas cleared dead binder\n", prefix);
6387 break;
6388 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
6389 seq_printf(m, "%shas cleared death notification\n", prefix);
6390 break;
6391 default:
6392 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
6393 break;
6394 }
6395 }
6396
print_binder_thread_ilocked(struct seq_file * m,struct binder_thread * thread,int print_always)6397 static void print_binder_thread_ilocked(struct seq_file *m,
6398 struct binder_thread *thread,
6399 int print_always)
6400 {
6401 struct binder_transaction *t;
6402 struct binder_work *w;
6403 size_t start_pos = m->count;
6404 size_t header_pos;
6405
6406 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
6407 thread->pid, thread->looper,
6408 thread->looper_need_return,
6409 atomic_read(&thread->tmp_ref));
6410 header_pos = m->count;
6411 t = thread->transaction_stack;
6412 while (t) {
6413 if (t->from == thread) {
6414 print_binder_transaction_ilocked(m, thread->proc,
6415 " outgoing transaction", t);
6416 t = t->from_parent;
6417 } else if (t->to_thread == thread) {
6418 print_binder_transaction_ilocked(m, thread->proc,
6419 " incoming transaction", t);
6420 t = t->to_parent;
6421 } else {
6422 print_binder_transaction_ilocked(m, thread->proc,
6423 " bad transaction", t);
6424 t = NULL;
6425 }
6426 }
6427 list_for_each_entry(w, &thread->todo, entry) {
6428 print_binder_work_ilocked(m, thread->proc, " ",
6429 " pending transaction", w);
6430 }
6431 if (!print_always && m->count == header_pos)
6432 m->count = start_pos;
6433 }
6434
print_binder_node_nilocked(struct seq_file * m,struct binder_node * node)6435 static void print_binder_node_nilocked(struct seq_file *m,
6436 struct binder_node *node)
6437 {
6438 struct binder_ref *ref;
6439 struct binder_work *w;
6440 int count;
6441
6442 count = 0;
6443 hlist_for_each_entry(ref, &node->refs, node_entry)
6444 count++;
6445
6446 seq_printf(m, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6447 node->debug_id, (u64)node->ptr, (u64)node->cookie,
6448 node->sched_policy, node->min_priority,
6449 node->has_strong_ref, node->has_weak_ref,
6450 node->local_strong_refs, node->local_weak_refs,
6451 node->internal_strong_refs, count, node->tmp_refs);
6452 if (count) {
6453 seq_puts(m, " proc");
6454 hlist_for_each_entry(ref, &node->refs, node_entry)
6455 seq_printf(m, " %d", ref->proc->pid);
6456 }
6457 seq_puts(m, "\n");
6458 if (node->proc) {
6459 list_for_each_entry(w, &node->async_todo, entry)
6460 print_binder_work_ilocked(m, node->proc, " ",
6461 " pending async transaction", w);
6462 }
6463 }
6464
print_binder_ref_olocked(struct seq_file * m,struct binder_ref * ref)6465 static void print_binder_ref_olocked(struct seq_file *m,
6466 struct binder_ref *ref)
6467 {
6468 binder_node_lock(ref->node);
6469 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
6470 ref->data.debug_id, ref->data.desc,
6471 ref->node->proc ? "" : "dead ",
6472 ref->node->debug_id, ref->data.strong,
6473 ref->data.weak, ref->death);
6474 binder_node_unlock(ref->node);
6475 }
6476
print_binder_proc(struct seq_file * m,struct binder_proc * proc,int print_all)6477 static void print_binder_proc(struct seq_file *m,
6478 struct binder_proc *proc, int print_all)
6479 {
6480 struct binder_work *w;
6481 struct rb_node *n;
6482 size_t start_pos = m->count;
6483 size_t header_pos;
6484 struct binder_node *last_node = NULL;
6485
6486 seq_printf(m, "proc %d\n", proc->pid);
6487 seq_printf(m, "context %s\n", proc->context->name);
6488 header_pos = m->count;
6489
6490 binder_inner_proc_lock(proc);
6491 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6492 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
6493 rb_node), print_all);
6494
6495 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
6496 struct binder_node *node = rb_entry(n, struct binder_node,
6497 rb_node);
6498 if (!print_all && !node->has_async_transaction)
6499 continue;
6500
6501 /*
6502 * take a temporary reference on the node so it
6503 * survives and isn't removed from the tree
6504 * while we print it.
6505 */
6506 binder_inc_node_tmpref_ilocked(node);
6507 /* Need to drop inner lock to take node lock */
6508 binder_inner_proc_unlock(proc);
6509 if (last_node)
6510 binder_put_node(last_node);
6511 binder_node_inner_lock(node);
6512 print_binder_node_nilocked(m, node);
6513 binder_node_inner_unlock(node);
6514 last_node = node;
6515 binder_inner_proc_lock(proc);
6516 }
6517 binder_inner_proc_unlock(proc);
6518 if (last_node)
6519 binder_put_node(last_node);
6520
6521 if (print_all) {
6522 binder_proc_lock(proc);
6523 for (n = rb_first(&proc->refs_by_desc);
6524 n != NULL;
6525 n = rb_next(n))
6526 print_binder_ref_olocked(m, rb_entry(n,
6527 struct binder_ref,
6528 rb_node_desc));
6529 binder_proc_unlock(proc);
6530 }
6531 binder_alloc_print_allocated(m, &proc->alloc);
6532 binder_inner_proc_lock(proc);
6533 list_for_each_entry(w, &proc->todo, entry)
6534 print_binder_work_ilocked(m, proc, " ",
6535 " pending transaction", w);
6536 list_for_each_entry(w, &proc->delivered_death, entry) {
6537 seq_puts(m, " has delivered dead binder\n");
6538 break;
6539 }
6540 binder_inner_proc_unlock(proc);
6541 if (!print_all && m->count == header_pos)
6542 m->count = start_pos;
6543 }
6544
6545 static const char * const binder_return_strings[] = {
6546 "BR_ERROR",
6547 "BR_OK",
6548 "BR_TRANSACTION",
6549 "BR_REPLY",
6550 "BR_ACQUIRE_RESULT",
6551 "BR_DEAD_REPLY",
6552 "BR_TRANSACTION_COMPLETE",
6553 "BR_INCREFS",
6554 "BR_ACQUIRE",
6555 "BR_RELEASE",
6556 "BR_DECREFS",
6557 "BR_ATTEMPT_ACQUIRE",
6558 "BR_NOOP",
6559 "BR_SPAWN_LOOPER",
6560 "BR_FINISHED",
6561 "BR_DEAD_BINDER",
6562 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
6563 "BR_FAILED_REPLY",
6564 "BR_FROZEN_REPLY",
6565 "BR_ONEWAY_SPAM_SUSPECT",
6566 "BR_TRANSACTION_PENDING_FROZEN"
6567 };
6568
6569 static const char * const binder_command_strings[] = {
6570 "BC_TRANSACTION",
6571 "BC_REPLY",
6572 "BC_ACQUIRE_RESULT",
6573 "BC_FREE_BUFFER",
6574 "BC_INCREFS",
6575 "BC_ACQUIRE",
6576 "BC_RELEASE",
6577 "BC_DECREFS",
6578 "BC_INCREFS_DONE",
6579 "BC_ACQUIRE_DONE",
6580 "BC_ATTEMPT_ACQUIRE",
6581 "BC_REGISTER_LOOPER",
6582 "BC_ENTER_LOOPER",
6583 "BC_EXIT_LOOPER",
6584 "BC_REQUEST_DEATH_NOTIFICATION",
6585 "BC_CLEAR_DEATH_NOTIFICATION",
6586 "BC_DEAD_BINDER_DONE",
6587 "BC_TRANSACTION_SG",
6588 "BC_REPLY_SG",
6589 };
6590
6591 static const char * const binder_objstat_strings[] = {
6592 "proc",
6593 "thread",
6594 "node",
6595 "ref",
6596 "death",
6597 "transaction",
6598 "transaction_complete"
6599 };
6600
print_binder_stats(struct seq_file * m,const char * prefix,struct binder_stats * stats)6601 static void print_binder_stats(struct seq_file *m, const char *prefix,
6602 struct binder_stats *stats)
6603 {
6604 int i;
6605
6606 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6607 ARRAY_SIZE(binder_command_strings));
6608 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6609 int temp = atomic_read(&stats->bc[i]);
6610
6611 if (temp)
6612 seq_printf(m, "%s%s: %d\n", prefix,
6613 binder_command_strings[i], temp);
6614 }
6615
6616 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6617 ARRAY_SIZE(binder_return_strings));
6618 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6619 int temp = atomic_read(&stats->br[i]);
6620
6621 if (temp)
6622 seq_printf(m, "%s%s: %d\n", prefix,
6623 binder_return_strings[i], temp);
6624 }
6625
6626 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6627 ARRAY_SIZE(binder_objstat_strings));
6628 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6629 ARRAY_SIZE(stats->obj_deleted));
6630 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6631 int created = atomic_read(&stats->obj_created[i]);
6632 int deleted = atomic_read(&stats->obj_deleted[i]);
6633
6634 if (created || deleted)
6635 seq_printf(m, "%s%s: active %d total %d\n",
6636 prefix,
6637 binder_objstat_strings[i],
6638 created - deleted,
6639 created);
6640 }
6641 }
6642
print_binder_proc_stats(struct seq_file * m,struct binder_proc * proc)6643 static void print_binder_proc_stats(struct seq_file *m,
6644 struct binder_proc *proc)
6645 {
6646 struct binder_work *w;
6647 struct binder_thread *thread;
6648 struct rb_node *n;
6649 int count, strong, weak, ready_threads;
6650 size_t free_async_space =
6651 binder_alloc_get_free_async_space(&proc->alloc);
6652
6653 seq_printf(m, "proc %d\n", proc->pid);
6654 seq_printf(m, "context %s\n", proc->context->name);
6655 count = 0;
6656 ready_threads = 0;
6657 binder_inner_proc_lock(proc);
6658 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6659 count++;
6660
6661 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6662 ready_threads++;
6663
6664 seq_printf(m, " threads: %d\n", count);
6665 seq_printf(m, " requested threads: %d+%d/%d\n"
6666 " ready threads %d\n"
6667 " free async space %zd\n", proc->requested_threads,
6668 proc->requested_threads_started, proc->max_threads,
6669 ready_threads,
6670 free_async_space);
6671 count = 0;
6672 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
6673 count++;
6674 binder_inner_proc_unlock(proc);
6675 seq_printf(m, " nodes: %d\n", count);
6676 count = 0;
6677 strong = 0;
6678 weak = 0;
6679 binder_proc_lock(proc);
6680 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
6681 struct binder_ref *ref = rb_entry(n, struct binder_ref,
6682 rb_node_desc);
6683 count++;
6684 strong += ref->data.strong;
6685 weak += ref->data.weak;
6686 }
6687 binder_proc_unlock(proc);
6688 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
6689
6690 count = binder_alloc_get_allocated_count(&proc->alloc);
6691 seq_printf(m, " buffers: %d\n", count);
6692
6693 binder_alloc_print_pages(m, &proc->alloc);
6694
6695 count = 0;
6696 binder_inner_proc_lock(proc);
6697 list_for_each_entry(w, &proc->todo, entry) {
6698 if (w->type == BINDER_WORK_TRANSACTION)
6699 count++;
6700 }
6701 binder_inner_proc_unlock(proc);
6702 seq_printf(m, " pending transactions: %d\n", count);
6703
6704 print_binder_stats(m, " ", &proc->stats);
6705 }
6706
state_show(struct seq_file * m,void * unused)6707 static int state_show(struct seq_file *m, void *unused)
6708 {
6709 struct binder_proc *proc;
6710 struct binder_node *node;
6711 struct binder_node *last_node = NULL;
6712
6713 seq_puts(m, "binder state:\n");
6714
6715 spin_lock(&binder_dead_nodes_lock);
6716 if (!hlist_empty(&binder_dead_nodes))
6717 seq_puts(m, "dead nodes:\n");
6718 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
6719 /*
6720 * take a temporary reference on the node so it
6721 * survives and isn't removed from the list
6722 * while we print it.
6723 */
6724 node->tmp_refs++;
6725 spin_unlock(&binder_dead_nodes_lock);
6726 if (last_node)
6727 binder_put_node(last_node);
6728 binder_node_lock(node);
6729 print_binder_node_nilocked(m, node);
6730 binder_node_unlock(node);
6731 last_node = node;
6732 spin_lock(&binder_dead_nodes_lock);
6733 }
6734 spin_unlock(&binder_dead_nodes_lock);
6735 if (last_node)
6736 binder_put_node(last_node);
6737
6738 mutex_lock(&binder_procs_lock);
6739 hlist_for_each_entry(proc, &binder_procs, proc_node)
6740 print_binder_proc(m, proc, 1);
6741 mutex_unlock(&binder_procs_lock);
6742
6743 return 0;
6744 }
6745
stats_show(struct seq_file * m,void * unused)6746 static int stats_show(struct seq_file *m, void *unused)
6747 {
6748 struct binder_proc *proc;
6749
6750 seq_puts(m, "binder stats:\n");
6751
6752 print_binder_stats(m, "", &binder_stats);
6753
6754 mutex_lock(&binder_procs_lock);
6755 hlist_for_each_entry(proc, &binder_procs, proc_node)
6756 print_binder_proc_stats(m, proc);
6757 mutex_unlock(&binder_procs_lock);
6758
6759 return 0;
6760 }
6761
transactions_show(struct seq_file * m,void * unused)6762 static int transactions_show(struct seq_file *m, void *unused)
6763 {
6764 struct binder_proc *proc;
6765
6766 seq_puts(m, "binder transactions:\n");
6767 mutex_lock(&binder_procs_lock);
6768 hlist_for_each_entry(proc, &binder_procs, proc_node)
6769 print_binder_proc(m, proc, 0);
6770 mutex_unlock(&binder_procs_lock);
6771
6772 return 0;
6773 }
6774
proc_show(struct seq_file * m,void * unused)6775 static int proc_show(struct seq_file *m, void *unused)
6776 {
6777 struct binder_proc *itr;
6778 int pid = (unsigned long)m->private;
6779
6780 mutex_lock(&binder_procs_lock);
6781 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6782 if (itr->pid == pid) {
6783 seq_puts(m, "binder proc state:\n");
6784 print_binder_proc(m, itr, 1);
6785 }
6786 }
6787 mutex_unlock(&binder_procs_lock);
6788
6789 return 0;
6790 }
6791
print_binder_transaction_log_entry(struct seq_file * m,struct binder_transaction_log_entry * e)6792 static void print_binder_transaction_log_entry(struct seq_file *m,
6793 struct binder_transaction_log_entry *e)
6794 {
6795 int debug_id = READ_ONCE(e->debug_id_done);
6796 /*
6797 * read barrier to guarantee debug_id_done read before
6798 * we print the log values
6799 */
6800 smp_rmb();
6801 seq_printf(m,
6802 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6803 e->debug_id, (e->call_type == 2) ? "reply" :
6804 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6805 e->from_thread, e->to_proc, e->to_thread, e->context_name,
6806 e->to_node, e->target_handle, e->data_size, e->offsets_size,
6807 e->return_error, e->return_error_param,
6808 e->return_error_line);
6809 /*
6810 * read-barrier to guarantee read of debug_id_done after
6811 * done printing the fields of the entry
6812 */
6813 smp_rmb();
6814 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6815 "\n" : " (incomplete)\n");
6816 }
6817
transaction_log_show(struct seq_file * m,void * unused)6818 static int transaction_log_show(struct seq_file *m, void *unused)
6819 {
6820 struct binder_transaction_log *log = m->private;
6821 unsigned int log_cur = atomic_read(&log->cur);
6822 unsigned int count;
6823 unsigned int cur;
6824 int i;
6825
6826 count = log_cur + 1;
6827 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6828 0 : count % ARRAY_SIZE(log->entry);
6829 if (count > ARRAY_SIZE(log->entry) || log->full)
6830 count = ARRAY_SIZE(log->entry);
6831 for (i = 0; i < count; i++) {
6832 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6833
6834 print_binder_transaction_log_entry(m, &log->entry[index]);
6835 }
6836 return 0;
6837 }
6838
6839 const struct file_operations binder_fops = {
6840 .owner = THIS_MODULE,
6841 .poll = binder_poll,
6842 .unlocked_ioctl = binder_ioctl,
6843 .compat_ioctl = compat_ptr_ioctl,
6844 .mmap = binder_mmap,
6845 .open = binder_open,
6846 .flush = binder_flush,
6847 .release = binder_release,
6848 };
6849
6850 DEFINE_SHOW_ATTRIBUTE(state);
6851 DEFINE_SHOW_ATTRIBUTE(stats);
6852 DEFINE_SHOW_ATTRIBUTE(transactions);
6853 DEFINE_SHOW_ATTRIBUTE(transaction_log);
6854
6855 const struct binder_debugfs_entry binder_debugfs_entries[] = {
6856 {
6857 .name = "state",
6858 .mode = 0444,
6859 .fops = &state_fops,
6860 .data = NULL,
6861 },
6862 {
6863 .name = "stats",
6864 .mode = 0444,
6865 .fops = &stats_fops,
6866 .data = NULL,
6867 },
6868 {
6869 .name = "transactions",
6870 .mode = 0444,
6871 .fops = &transactions_fops,
6872 .data = NULL,
6873 },
6874 {
6875 .name = "transaction_log",
6876 .mode = 0444,
6877 .fops = &transaction_log_fops,
6878 .data = &binder_transaction_log,
6879 },
6880 {
6881 .name = "failed_transaction_log",
6882 .mode = 0444,
6883 .fops = &transaction_log_fops,
6884 .data = &binder_transaction_log_failed,
6885 },
6886 {} /* terminator */
6887 };
6888
init_binder_device(const char * name)6889 static int __init init_binder_device(const char *name)
6890 {
6891 int ret;
6892 struct binder_device *binder_device;
6893
6894 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6895 if (!binder_device)
6896 return -ENOMEM;
6897
6898 binder_device->miscdev.fops = &binder_fops;
6899 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6900 binder_device->miscdev.name = name;
6901
6902 refcount_set(&binder_device->ref, 1);
6903 binder_device->context.binder_context_mgr_uid = INVALID_UID;
6904 binder_device->context.name = name;
6905 mutex_init(&binder_device->context.context_mgr_node_lock);
6906
6907 ret = misc_register(&binder_device->miscdev);
6908 if (ret < 0) {
6909 kfree(binder_device);
6910 return ret;
6911 }
6912
6913 hlist_add_head(&binder_device->hlist, &binder_devices);
6914
6915 return ret;
6916 }
6917
binder_init(void)6918 static int __init binder_init(void)
6919 {
6920 int ret;
6921 char *device_name, *device_tmp;
6922 struct binder_device *device;
6923 struct hlist_node *tmp;
6924 char *device_names = NULL;
6925 const struct binder_debugfs_entry *db_entry;
6926
6927 ret = binder_alloc_shrinker_init();
6928 if (ret)
6929 return ret;
6930
6931 atomic_set(&binder_transaction_log.cur, ~0U);
6932 atomic_set(&binder_transaction_log_failed.cur, ~0U);
6933
6934 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6935
6936 binder_for_each_debugfs_entry(db_entry)
6937 debugfs_create_file(db_entry->name,
6938 db_entry->mode,
6939 binder_debugfs_dir_entry_root,
6940 db_entry->data,
6941 db_entry->fops);
6942
6943 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6944 binder_debugfs_dir_entry_root);
6945
6946 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6947 strcmp(binder_devices_param, "") != 0) {
6948 /*
6949 * Copy the module_parameter string, because we don't want to
6950 * tokenize it in-place.
6951 */
6952 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6953 if (!device_names) {
6954 ret = -ENOMEM;
6955 goto err_alloc_device_names_failed;
6956 }
6957
6958 device_tmp = device_names;
6959 while ((device_name = strsep(&device_tmp, ","))) {
6960 ret = init_binder_device(device_name);
6961 if (ret)
6962 goto err_init_binder_device_failed;
6963 }
6964 }
6965
6966 ret = init_binderfs();
6967 if (ret)
6968 goto err_init_binder_device_failed;
6969
6970 return ret;
6971
6972 err_init_binder_device_failed:
6973 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6974 misc_deregister(&device->miscdev);
6975 hlist_del(&device->hlist);
6976 kfree(device);
6977 }
6978
6979 kfree(device_names);
6980
6981 err_alloc_device_names_failed:
6982 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6983 binder_alloc_shrinker_exit();
6984
6985 return ret;
6986 }
6987
6988 device_initcall(binder_init);
6989
6990 #define CREATE_TRACE_POINTS
6991 #include "binder_trace.h"
6992 EXPORT_TRACEPOINT_SYMBOL_GPL(binder_transaction_received);
6993
6994 MODULE_LICENSE("GPL v2");
6995