1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder.c
3 *
4 * Android IPC Subsystem
5 *
6 * Copyright (C) 2007-2008 Google, Inc.
7 */
8
9 /*
10 * Locking overview
11 *
12 * There are 3 main spinlocks which must be acquired in the
13 * order shown:
14 *
15 * 1) proc->outer_lock : protects binder_ref
16 * binder_proc_lock() and binder_proc_unlock() are
17 * used to acq/rel.
18 * 2) node->lock : protects most fields of binder_node.
19 * binder_node_lock() and binder_node_unlock() are
20 * used to acq/rel
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
23 * and all todo lists associated with the binder_proc
24 * (proc->todo, thread->todo, proc->delivered_death and
25 * node->async_todo), as well as thread->transaction_stack
26 * binder_inner_proc_lock() and binder_inner_proc_unlock()
27 * are used to acq/rel
28 *
29 * Any lock under procA must never be nested under any lock at the same
30 * level or below on procB.
31 *
32 * Functions that require a lock held on entry indicate which lock
33 * in the suffix of the function name:
34 *
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
40 * ...
41 */
42
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
48 #include <linux/fs.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
69 #include <linux/ktime.h>
70 #include <linux/android_vendor.h>
71
72 #include <uapi/linux/sched/types.h>
73 #include <uapi/linux/android/binder.h>
74
75 #include <asm/cacheflush.h>
76
77 #include "binder_internal.h"
78 #include "binder_trace.h"
79 #include <trace/hooks/binder.h>
80
81 #include "../../kernel/sched/sched.h"
82
83 static HLIST_HEAD(binder_deferred_list);
84 static DEFINE_MUTEX(binder_deferred_lock);
85
86 static HLIST_HEAD(binder_devices);
87 static HLIST_HEAD(binder_procs);
88 static DEFINE_MUTEX(binder_procs_lock);
89
90 static HLIST_HEAD(binder_dead_nodes);
91 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
92
93 static struct dentry *binder_debugfs_dir_entry_root;
94 static struct dentry *binder_debugfs_dir_entry_proc;
95 static atomic_t binder_last_id;
96
97 static int proc_show(struct seq_file *m, void *unused);
98 DEFINE_SHOW_ATTRIBUTE(proc);
99
100 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
101
102 enum {
103 BINDER_DEBUG_USER_ERROR = 1U << 0,
104 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
105 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
106 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
107 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
108 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
109 BINDER_DEBUG_READ_WRITE = 1U << 6,
110 BINDER_DEBUG_USER_REFS = 1U << 7,
111 BINDER_DEBUG_THREADS = 1U << 8,
112 BINDER_DEBUG_TRANSACTION = 1U << 9,
113 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
114 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
115 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
116 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
117 BINDER_DEBUG_SPINLOCKS = 1U << 14,
118 };
119 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
120 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
121 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
122
123 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
124 module_param_named(devices, binder_devices_param, charp, 0444);
125
126 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
127 static int binder_stop_on_user_error;
128
binder_set_stop_on_user_error(const char * val,const struct kernel_param * kp)129 static int binder_set_stop_on_user_error(const char *val,
130 const struct kernel_param *kp)
131 {
132 int ret;
133
134 ret = param_set_int(val, kp);
135 if (binder_stop_on_user_error < 2)
136 wake_up(&binder_user_error_wait);
137 return ret;
138 }
139 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
140 param_get_int, &binder_stop_on_user_error, 0644);
141
142 #define binder_debug(mask, x...) \
143 do { \
144 if (binder_debug_mask & mask) \
145 pr_info_ratelimited(x); \
146 } while (0)
147
148 #define binder_user_error(x...) \
149 do { \
150 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
151 pr_info_ratelimited(x); \
152 if (binder_stop_on_user_error) \
153 binder_stop_on_user_error = 2; \
154 } while (0)
155
156 #define to_flat_binder_object(hdr) \
157 container_of(hdr, struct flat_binder_object, hdr)
158
159 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
160
161 #define to_binder_buffer_object(hdr) \
162 container_of(hdr, struct binder_buffer_object, hdr)
163
164 #define to_binder_fd_array_object(hdr) \
165 container_of(hdr, struct binder_fd_array_object, hdr)
166
167 static struct binder_stats binder_stats;
168
binder_stats_deleted(enum binder_stat_types type)169 static inline void binder_stats_deleted(enum binder_stat_types type)
170 {
171 atomic_inc(&binder_stats.obj_deleted[type]);
172 }
173
binder_stats_created(enum binder_stat_types type)174 static inline void binder_stats_created(enum binder_stat_types type)
175 {
176 atomic_inc(&binder_stats.obj_created[type]);
177 }
178
179 struct binder_transaction_log_entry {
180 int debug_id;
181 int debug_id_done;
182 int call_type;
183 int from_proc;
184 int from_thread;
185 int target_handle;
186 int to_proc;
187 int to_thread;
188 int to_node;
189 int data_size;
190 int offsets_size;
191 int return_error_line;
192 uint32_t return_error;
193 uint32_t return_error_param;
194 char context_name[BINDERFS_MAX_NAME + 1];
195 };
196
197 struct binder_transaction_log {
198 atomic_t cur;
199 bool full;
200 struct binder_transaction_log_entry entry[32];
201 };
202
203 static struct binder_transaction_log binder_transaction_log;
204 static struct binder_transaction_log binder_transaction_log_failed;
205
binder_transaction_log_add(struct binder_transaction_log * log)206 static struct binder_transaction_log_entry *binder_transaction_log_add(
207 struct binder_transaction_log *log)
208 {
209 struct binder_transaction_log_entry *e;
210 unsigned int cur = atomic_inc_return(&log->cur);
211
212 if (cur >= ARRAY_SIZE(log->entry))
213 log->full = true;
214 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
215 WRITE_ONCE(e->debug_id_done, 0);
216 /*
217 * write-barrier to synchronize access to e->debug_id_done.
218 * We make sure the initialized 0 value is seen before
219 * memset() other fields are zeroed by memset.
220 */
221 smp_wmb();
222 memset(e, 0, sizeof(*e));
223 return e;
224 }
225
226 enum binder_deferred_state {
227 BINDER_DEFERRED_FLUSH = 0x01,
228 BINDER_DEFERRED_RELEASE = 0x02,
229 };
230
231 enum {
232 BINDER_LOOPER_STATE_REGISTERED = 0x01,
233 BINDER_LOOPER_STATE_ENTERED = 0x02,
234 BINDER_LOOPER_STATE_EXITED = 0x04,
235 BINDER_LOOPER_STATE_INVALID = 0x08,
236 BINDER_LOOPER_STATE_WAITING = 0x10,
237 BINDER_LOOPER_STATE_POLL = 0x20,
238 };
239
240 /**
241 * binder_proc_lock() - Acquire outer lock for given binder_proc
242 * @proc: struct binder_proc to acquire
243 *
244 * Acquires proc->outer_lock. Used to protect binder_ref
245 * structures associated with the given proc.
246 */
247 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
248 static void
_binder_proc_lock(struct binder_proc * proc,int line)249 _binder_proc_lock(struct binder_proc *proc, int line)
250 __acquires(&proc->outer_lock)
251 {
252 binder_debug(BINDER_DEBUG_SPINLOCKS,
253 "%s: line=%d\n", __func__, line);
254 spin_lock(&proc->outer_lock);
255 }
256
257 /**
258 * binder_proc_unlock() - Release spinlock for given binder_proc
259 * @proc: struct binder_proc to acquire
260 *
261 * Release lock acquired via binder_proc_lock()
262 */
263 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
264 static void
_binder_proc_unlock(struct binder_proc * proc,int line)265 _binder_proc_unlock(struct binder_proc *proc, int line)
266 __releases(&proc->outer_lock)
267 {
268 binder_debug(BINDER_DEBUG_SPINLOCKS,
269 "%s: line=%d\n", __func__, line);
270 spin_unlock(&proc->outer_lock);
271 }
272
273 /**
274 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
275 * @proc: struct binder_proc to acquire
276 *
277 * Acquires proc->inner_lock. Used to protect todo lists
278 */
279 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
280 static void
_binder_inner_proc_lock(struct binder_proc * proc,int line)281 _binder_inner_proc_lock(struct binder_proc *proc, int line)
282 __acquires(&proc->inner_lock)
283 {
284 binder_debug(BINDER_DEBUG_SPINLOCKS,
285 "%s: line=%d\n", __func__, line);
286 spin_lock(&proc->inner_lock);
287 }
288
289 /**
290 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
291 * @proc: struct binder_proc to acquire
292 *
293 * Release lock acquired via binder_inner_proc_lock()
294 */
295 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
296 static void
_binder_inner_proc_unlock(struct binder_proc * proc,int line)297 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
298 __releases(&proc->inner_lock)
299 {
300 binder_debug(BINDER_DEBUG_SPINLOCKS,
301 "%s: line=%d\n", __func__, line);
302 spin_unlock(&proc->inner_lock);
303 }
304
305 /**
306 * binder_node_lock() - Acquire spinlock for given binder_node
307 * @node: struct binder_node to acquire
308 *
309 * Acquires node->lock. Used to protect binder_node fields
310 */
311 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
312 static void
_binder_node_lock(struct binder_node * node,int line)313 _binder_node_lock(struct binder_node *node, int line)
314 __acquires(&node->lock)
315 {
316 binder_debug(BINDER_DEBUG_SPINLOCKS,
317 "%s: line=%d\n", __func__, line);
318 spin_lock(&node->lock);
319 }
320
321 /**
322 * binder_node_unlock() - Release spinlock for given binder_proc
323 * @node: struct binder_node to acquire
324 *
325 * Release lock acquired via binder_node_lock()
326 */
327 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
328 static void
_binder_node_unlock(struct binder_node * node,int line)329 _binder_node_unlock(struct binder_node *node, int line)
330 __releases(&node->lock)
331 {
332 binder_debug(BINDER_DEBUG_SPINLOCKS,
333 "%s: line=%d\n", __func__, line);
334 spin_unlock(&node->lock);
335 }
336
337 /**
338 * binder_node_inner_lock() - Acquire node and inner locks
339 * @node: struct binder_node to acquire
340 *
341 * Acquires node->lock. If node->proc also acquires
342 * proc->inner_lock. Used to protect binder_node fields
343 */
344 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
345 static void
_binder_node_inner_lock(struct binder_node * node,int line)346 _binder_node_inner_lock(struct binder_node *node, int line)
347 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
348 {
349 binder_debug(BINDER_DEBUG_SPINLOCKS,
350 "%s: line=%d\n", __func__, line);
351 spin_lock(&node->lock);
352 if (node->proc)
353 binder_inner_proc_lock(node->proc);
354 else
355 /* annotation for sparse */
356 __acquire(&node->proc->inner_lock);
357 }
358
359 /**
360 * binder_node_unlock() - Release node and inner locks
361 * @node: struct binder_node to acquire
362 *
363 * Release lock acquired via binder_node_lock()
364 */
365 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
366 static void
_binder_node_inner_unlock(struct binder_node * node,int line)367 _binder_node_inner_unlock(struct binder_node *node, int line)
368 __releases(&node->lock) __releases(&node->proc->inner_lock)
369 {
370 struct binder_proc *proc = node->proc;
371
372 binder_debug(BINDER_DEBUG_SPINLOCKS,
373 "%s: line=%d\n", __func__, line);
374 if (proc)
375 binder_inner_proc_unlock(proc);
376 else
377 /* annotation for sparse */
378 __release(&node->proc->inner_lock);
379 spin_unlock(&node->lock);
380 }
381
binder_worklist_empty_ilocked(struct list_head * list)382 static bool binder_worklist_empty_ilocked(struct list_head *list)
383 {
384 return list_empty(list);
385 }
386
387 /**
388 * binder_worklist_empty() - Check if no items on the work list
389 * @proc: binder_proc associated with list
390 * @list: list to check
391 *
392 * Return: true if there are no items on list, else false
393 */
binder_worklist_empty(struct binder_proc * proc,struct list_head * list)394 static bool binder_worklist_empty(struct binder_proc *proc,
395 struct list_head *list)
396 {
397 bool ret;
398
399 binder_inner_proc_lock(proc);
400 ret = binder_worklist_empty_ilocked(list);
401 binder_inner_proc_unlock(proc);
402 return ret;
403 }
404
405 /**
406 * binder_enqueue_work_ilocked() - Add an item to the work list
407 * @work: struct binder_work to add to list
408 * @target_list: list to add work to
409 *
410 * Adds the work to the specified list. Asserts that work
411 * is not already on a list.
412 *
413 * Requires the proc->inner_lock to be held.
414 */
415 static void
binder_enqueue_work_ilocked(struct binder_work * work,struct list_head * target_list)416 binder_enqueue_work_ilocked(struct binder_work *work,
417 struct list_head *target_list)
418 {
419 BUG_ON(target_list == NULL);
420 BUG_ON(work->entry.next && !list_empty(&work->entry));
421 list_add_tail(&work->entry, target_list);
422 }
423
424 /**
425 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
426 * @thread: thread to queue work to
427 * @work: struct binder_work to add to list
428 *
429 * Adds the work to the todo list of the thread. Doesn't set the process_todo
430 * flag, which means that (if it wasn't already set) the thread will go to
431 * sleep without handling this work when it calls read.
432 *
433 * Requires the proc->inner_lock to be held.
434 */
435 static void
binder_enqueue_deferred_thread_work_ilocked(struct binder_thread * thread,struct binder_work * work)436 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
437 struct binder_work *work)
438 {
439 WARN_ON(!list_empty(&thread->waiting_thread_node));
440 binder_enqueue_work_ilocked(work, &thread->todo);
441 }
442
443 /**
444 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
445 * @thread: thread to queue work to
446 * @work: struct binder_work to add to list
447 *
448 * Adds the work to the todo list of the thread, and enables processing
449 * of the todo queue.
450 *
451 * Requires the proc->inner_lock to be held.
452 */
453 static void
binder_enqueue_thread_work_ilocked(struct binder_thread * thread,struct binder_work * work)454 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
455 struct binder_work *work)
456 {
457 WARN_ON(!list_empty(&thread->waiting_thread_node));
458 binder_enqueue_work_ilocked(work, &thread->todo);
459
460 /* (e)poll-based threads require an explicit wakeup signal when
461 * queuing their own work; they rely on these events to consume
462 * messages without I/O block. Without it, threads risk waiting
463 * indefinitely without handling the work.
464 */
465 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
466 thread->pid == current->pid && !thread->process_todo)
467 wake_up_interruptible_sync(&thread->wait);
468
469 thread->process_todo = true;
470 }
471
472 /**
473 * binder_enqueue_thread_work() - Add an item to the thread work list
474 * @thread: thread to queue work to
475 * @work: struct binder_work to add to list
476 *
477 * Adds the work to the todo list of the thread, and enables processing
478 * of the todo queue.
479 */
480 static void
binder_enqueue_thread_work(struct binder_thread * thread,struct binder_work * work)481 binder_enqueue_thread_work(struct binder_thread *thread,
482 struct binder_work *work)
483 {
484 binder_inner_proc_lock(thread->proc);
485 binder_enqueue_thread_work_ilocked(thread, work);
486 binder_inner_proc_unlock(thread->proc);
487 }
488
489 static void
binder_dequeue_work_ilocked(struct binder_work * work)490 binder_dequeue_work_ilocked(struct binder_work *work)
491 {
492 list_del_init(&work->entry);
493 }
494
495 /**
496 * binder_dequeue_work() - Removes an item from the work list
497 * @proc: binder_proc associated with list
498 * @work: struct binder_work to remove from list
499 *
500 * Removes the specified work item from whatever list it is on.
501 * Can safely be called if work is not on any list.
502 */
503 static void
binder_dequeue_work(struct binder_proc * proc,struct binder_work * work)504 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
505 {
506 binder_inner_proc_lock(proc);
507 binder_dequeue_work_ilocked(work);
508 binder_inner_proc_unlock(proc);
509 }
510
binder_dequeue_work_head_ilocked(struct list_head * list)511 static struct binder_work *binder_dequeue_work_head_ilocked(
512 struct list_head *list)
513 {
514 struct binder_work *w;
515
516 w = list_first_entry_or_null(list, struct binder_work, entry);
517 if (w)
518 list_del_init(&w->entry);
519 return w;
520 }
521
522 static void
523 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
524 static void binder_free_thread(struct binder_thread *thread);
525 static void binder_free_proc(struct binder_proc *proc);
526 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
527
binder_has_work_ilocked(struct binder_thread * thread,bool do_proc_work)528 static bool binder_has_work_ilocked(struct binder_thread *thread,
529 bool do_proc_work)
530 {
531 int ret = 0;
532
533 trace_android_vh_binder_has_work_ilocked(thread, do_proc_work, &ret);
534 if (ret)
535 return true;
536 return thread->process_todo ||
537 thread->looper_need_return ||
538 (do_proc_work &&
539 !binder_worklist_empty_ilocked(&thread->proc->todo));
540 }
541
binder_has_work(struct binder_thread * thread,bool do_proc_work)542 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
543 {
544 bool has_work;
545
546 binder_inner_proc_lock(thread->proc);
547 has_work = binder_has_work_ilocked(thread, do_proc_work);
548 binder_inner_proc_unlock(thread->proc);
549
550 return has_work;
551 }
552
binder_available_for_proc_work_ilocked(struct binder_thread * thread)553 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
554 {
555 return !thread->transaction_stack &&
556 binder_worklist_empty_ilocked(&thread->todo) &&
557 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
558 BINDER_LOOPER_STATE_REGISTERED));
559 }
560
binder_wakeup_poll_threads_ilocked(struct binder_proc * proc,bool sync)561 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
562 bool sync)
563 {
564 struct rb_node *n;
565 struct binder_thread *thread;
566
567 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
568 thread = rb_entry(n, struct binder_thread, rb_node);
569 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
570 binder_available_for_proc_work_ilocked(thread)) {
571 trace_android_vh_binder_wakeup_ilocked(thread->task, sync, proc);
572 if (sync)
573 wake_up_interruptible_sync(&thread->wait);
574 else
575 wake_up_interruptible(&thread->wait);
576 }
577 }
578 }
579
580 /**
581 * binder_select_thread_ilocked() - selects a thread for doing proc work.
582 * @proc: process to select a thread from
583 *
584 * Note that calling this function moves the thread off the waiting_threads
585 * list, so it can only be woken up by the caller of this function, or a
586 * signal. Therefore, callers *should* always wake up the thread this function
587 * returns.
588 *
589 * Return: If there's a thread currently waiting for process work,
590 * returns that thread. Otherwise returns NULL.
591 */
592 static struct binder_thread *
binder_select_thread_ilocked(struct binder_proc * proc)593 binder_select_thread_ilocked(struct binder_proc *proc)
594 {
595 struct binder_thread *thread;
596
597 assert_spin_locked(&proc->inner_lock);
598 thread = list_first_entry_or_null(&proc->waiting_threads,
599 struct binder_thread,
600 waiting_thread_node);
601
602 if (thread)
603 list_del_init(&thread->waiting_thread_node);
604
605 return thread;
606 }
607
608 /**
609 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
610 * @proc: process to wake up a thread in
611 * @thread: specific thread to wake-up (may be NULL)
612 * @sync: whether to do a synchronous wake-up
613 *
614 * This function wakes up a thread in the @proc process.
615 * The caller may provide a specific thread to wake-up in
616 * the @thread parameter. If @thread is NULL, this function
617 * will wake up threads that have called poll().
618 *
619 * Note that for this function to work as expected, callers
620 * should first call binder_select_thread() to find a thread
621 * to handle the work (if they don't have a thread already),
622 * and pass the result into the @thread parameter.
623 */
binder_wakeup_thread_ilocked(struct binder_proc * proc,struct binder_thread * thread,bool sync)624 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
625 struct binder_thread *thread,
626 bool sync)
627 {
628 assert_spin_locked(&proc->inner_lock);
629
630 if (thread) {
631 trace_android_vh_binder_wakeup_ilocked(thread->task, sync, proc);
632 if (sync)
633 wake_up_interruptible_sync(&thread->wait);
634 else
635 wake_up_interruptible(&thread->wait);
636 return;
637 }
638
639 /* Didn't find a thread waiting for proc work; this can happen
640 * in two scenarios:
641 * 1. All threads are busy handling transactions
642 * In that case, one of those threads should call back into
643 * the kernel driver soon and pick up this work.
644 * 2. Threads are using the (e)poll interface, in which case
645 * they may be blocked on the waitqueue without having been
646 * added to waiting_threads. For this case, we just iterate
647 * over all threads not handling transaction work, and
648 * wake them all up. We wake all because we don't know whether
649 * a thread that called into (e)poll is handling non-binder
650 * work currently.
651 */
652 binder_wakeup_poll_threads_ilocked(proc, sync);
653 }
654
binder_wakeup_proc_ilocked(struct binder_proc * proc)655 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
656 {
657 struct binder_thread *thread = binder_select_thread_ilocked(proc);
658
659 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
660 }
661
is_rt_policy(int policy)662 static bool is_rt_policy(int policy)
663 {
664 return policy == SCHED_FIFO || policy == SCHED_RR;
665 }
666
is_fair_policy(int policy)667 static bool is_fair_policy(int policy)
668 {
669 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
670 }
671
binder_supported_policy(int policy)672 static bool binder_supported_policy(int policy)
673 {
674 return is_fair_policy(policy) || is_rt_policy(policy);
675 }
676
677 #ifdef CONFIG_UCLAMP_TASK
set_binder_prio_uclamp(struct binder_priority * prio,struct task_struct * task)678 static void set_binder_prio_uclamp(struct binder_priority *prio, struct task_struct *task)
679 {
680 if (!uclamp_is_used())
681 return;
682
683 if (task) {
684 prio->uclamp[UCLAMP_MIN] = task->uclamp_req[UCLAMP_MIN].value;
685 prio->uclamp[UCLAMP_MAX] = task->uclamp_req[UCLAMP_MAX].value;
686 } else {
687 prio->uclamp[UCLAMP_MIN] = 0;
688 prio->uclamp[UCLAMP_MAX] = SCHED_CAPACITY_SCALE;
689 }
690 }
691
set_inherited_uclamp(struct binder_transaction * t)692 static void set_inherited_uclamp(struct binder_transaction *t)
693 {
694 if (!uclamp_is_used())
695 return;
696
697 t->priority.uclamp[UCLAMP_MIN] = uclamp_eff_value(current, UCLAMP_MIN);
698 t->priority.uclamp[UCLAMP_MAX] = uclamp_eff_value(current, UCLAMP_MAX);
699 }
700
is_uclamp_equal(struct task_struct * task,const struct binder_priority * desired)701 static bool is_uclamp_equal(struct task_struct *task, const struct binder_priority *desired)
702 {
703 if (!uclamp_is_used())
704 return true;
705
706 return task->uclamp_req[UCLAMP_MIN].value == desired->uclamp[UCLAMP_MIN]
707 && task->uclamp_req[UCLAMP_MAX].value == desired->uclamp[UCLAMP_MAX];
708 }
709
710 #else
set_binder_prio_uclamp(struct binder_priority * prio,struct task_struct * task)711 static void set_binder_prio_uclamp(struct binder_priority *prio, struct task_struct *task) { }
set_inherited_uclamp(struct binder_transaction * t)712 static void set_inherited_uclamp(struct binder_transaction *t) { }
is_uclamp_equal(struct task_struct * task,const struct binder_priority * desired)713 static bool is_uclamp_equal(struct task_struct *task, const struct binder_priority *desired)
714 {
715 return true;
716 }
717 #endif
718
to_userspace_prio(int policy,int kernel_priority)719 static int to_userspace_prio(int policy, int kernel_priority)
720 {
721 if (is_fair_policy(policy))
722 return PRIO_TO_NICE(kernel_priority);
723 else
724 return MAX_RT_PRIO - 1 - kernel_priority;
725 }
726
to_kernel_prio(int policy,int user_priority)727 static int to_kernel_prio(int policy, int user_priority)
728 {
729 if (is_fair_policy(policy))
730 return NICE_TO_PRIO(user_priority);
731 else
732 return MAX_RT_PRIO - 1 - user_priority;
733 }
734
binder_do_set_priority(struct binder_thread * thread,const struct binder_priority * desired,bool verify)735 static void binder_do_set_priority(struct binder_thread *thread,
736 const struct binder_priority *desired,
737 bool verify)
738 {
739 struct task_struct *task = thread->task;
740 int priority; /* user-space prio value */
741 bool has_cap_nice;
742 unsigned int policy = desired->sched_policy;
743 struct sched_attr attrs = {
744 .sched_flags = SCHED_FLAG_RESET_ON_FORK
745 };
746
747 if (uclamp_is_used()) {
748 attrs.sched_flags |= SCHED_FLAG_UTIL_CLAMP;
749 attrs.sched_util_min = desired->uclamp[UCLAMP_MIN];
750 attrs.sched_util_max = desired->uclamp[UCLAMP_MAX];
751 }
752
753 if (task->policy == policy && task->normal_prio == desired->prio
754 && is_uclamp_equal(task, desired)) {
755 spin_lock(&thread->prio_lock);
756 if (thread->prio_state == BINDER_PRIO_PENDING)
757 thread->prio_state = BINDER_PRIO_SET;
758 spin_unlock(&thread->prio_lock);
759 return;
760 }
761
762 has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE);
763
764 priority = to_userspace_prio(policy, desired->prio);
765
766 if (verify && is_rt_policy(policy) && !has_cap_nice) {
767 long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO);
768
769 if (max_rtprio == 0) {
770 policy = SCHED_NORMAL;
771 priority = MIN_NICE;
772 } else if (priority > max_rtprio) {
773 priority = max_rtprio;
774 }
775 }
776
777 if (verify && is_fair_policy(policy) && !has_cap_nice) {
778 long min_nice = rlimit_to_nice(task_rlimit(task, RLIMIT_NICE));
779
780 if (min_nice > MAX_NICE) {
781 binder_user_error("%d RLIMIT_NICE not set\n",
782 task->pid);
783 return;
784 } else if (priority < min_nice) {
785 priority = min_nice;
786 }
787 }
788
789 if (policy != desired->sched_policy ||
790 to_kernel_prio(policy, priority) != desired->prio)
791 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
792 "%d: priority %d not allowed, using %d instead\n",
793 task->pid, desired->prio,
794 to_kernel_prio(policy, priority));
795
796 trace_binder_set_priority(task->tgid, task->pid, task->normal_prio,
797 to_kernel_prio(policy, priority),
798 desired->prio);
799
800 spin_lock(&thread->prio_lock);
801 if (!verify && thread->prio_state == BINDER_PRIO_ABORT) {
802 /*
803 * A new priority has been set by an incoming nested
804 * transaction. Abort this priority restore and allow
805 * the transaction to run at the new desired priority.
806 */
807 spin_unlock(&thread->prio_lock);
808 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
809 "%d: %s: aborting priority restore\n",
810 thread->pid, __func__);
811 return;
812 }
813
814 /* Set the actual priority and uclamp */
815 if (task->policy != policy || is_rt_policy(policy)) {
816 attrs.sched_policy = policy;
817 attrs.sched_priority = is_rt_policy(policy) ? priority : 0;
818 attrs.sched_nice = PRIO_TO_NICE(task->static_prio);
819 } else {
820 attrs.sched_flags |= SCHED_FLAG_KEEP_ALL;
821 }
822
823 sched_setattr_nocheck(task, &attrs);
824
825 if (is_fair_policy(policy))
826 set_user_nice(task, priority);
827
828 thread->prio_state = BINDER_PRIO_SET;
829 spin_unlock(&thread->prio_lock);
830 }
831
binder_set_priority(struct binder_thread * thread,const struct binder_priority * desired)832 static void binder_set_priority(struct binder_thread *thread,
833 const struct binder_priority *desired)
834 {
835 binder_do_set_priority(thread, desired, /* verify = */ true);
836 }
837
binder_restore_priority(struct binder_thread * thread,const struct binder_priority * desired)838 static void binder_restore_priority(struct binder_thread *thread,
839 const struct binder_priority *desired)
840 {
841 binder_do_set_priority(thread, desired, /* verify = */ false);
842 }
843
binder_transaction_priority(struct binder_thread * thread,struct binder_transaction * t,struct binder_node * node)844 static void binder_transaction_priority(struct binder_thread *thread,
845 struct binder_transaction *t,
846 struct binder_node *node)
847 {
848 struct task_struct *task = thread->task;
849 struct binder_priority desired = t->priority;
850 const struct binder_priority node_prio = {
851 .sched_policy = node->sched_policy,
852 .prio = node->min_priority,
853 };
854 bool skip = false;
855
856 if (t->set_priority_called)
857 return;
858
859 t->set_priority_called = true;
860
861 trace_android_vh_binder_priority_skip(task, &skip);
862 if (skip)
863 return;
864
865 if (!node->inherit_rt && is_rt_policy(desired.sched_policy)) {
866 desired.prio = NICE_TO_PRIO(0);
867 desired.sched_policy = SCHED_NORMAL;
868 }
869
870 if (node_prio.prio < t->priority.prio ||
871 (node_prio.prio == t->priority.prio &&
872 node_prio.sched_policy == SCHED_FIFO)) {
873 /*
874 * In case the minimum priority on the node is
875 * higher (lower value), use that priority. If
876 * the priority is the same, but the node uses
877 * SCHED_FIFO, prefer SCHED_FIFO, since it can
878 * run unbounded, unlike SCHED_RR.
879 */
880 desired.prio = node_prio.prio;
881 desired.sched_policy = node_prio.sched_policy;
882 }
883
884 spin_lock(&thread->prio_lock);
885 if (thread->prio_state == BINDER_PRIO_PENDING) {
886 /*
887 * Task is in the process of changing priorities
888 * saving its current values would be incorrect.
889 * Instead, save the pending priority and signal
890 * the task to abort the priority restore.
891 */
892 t->saved_priority = thread->prio_next;
893 thread->prio_state = BINDER_PRIO_ABORT;
894 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
895 "%d: saved pending priority %d\n",
896 current->pid, thread->prio_next.prio);
897 } else {
898 t->saved_priority.sched_policy = task->policy;
899 t->saved_priority.prio = task->normal_prio;
900 set_binder_prio_uclamp(&t->saved_priority, task);
901 }
902 spin_unlock(&thread->prio_lock);
903
904 binder_set_priority(thread, &desired);
905 trace_android_vh_binder_set_priority(t, task);
906 }
907
binder_get_node_ilocked(struct binder_proc * proc,binder_uintptr_t ptr)908 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
909 binder_uintptr_t ptr)
910 {
911 struct rb_node *n = proc->nodes.rb_node;
912 struct binder_node *node;
913
914 assert_spin_locked(&proc->inner_lock);
915
916 while (n) {
917 node = rb_entry(n, struct binder_node, rb_node);
918
919 if (ptr < node->ptr)
920 n = n->rb_left;
921 else if (ptr > node->ptr)
922 n = n->rb_right;
923 else {
924 /*
925 * take an implicit weak reference
926 * to ensure node stays alive until
927 * call to binder_put_node()
928 */
929 binder_inc_node_tmpref_ilocked(node);
930 return node;
931 }
932 }
933 return NULL;
934 }
935
binder_get_node(struct binder_proc * proc,binder_uintptr_t ptr)936 static struct binder_node *binder_get_node(struct binder_proc *proc,
937 binder_uintptr_t ptr)
938 {
939 struct binder_node *node;
940
941 binder_inner_proc_lock(proc);
942 node = binder_get_node_ilocked(proc, ptr);
943 binder_inner_proc_unlock(proc);
944 return node;
945 }
946
binder_init_node_ilocked(struct binder_proc * proc,struct binder_node * new_node,struct flat_binder_object * fp)947 static struct binder_node *binder_init_node_ilocked(
948 struct binder_proc *proc,
949 struct binder_node *new_node,
950 struct flat_binder_object *fp)
951 {
952 struct rb_node **p = &proc->nodes.rb_node;
953 struct rb_node *parent = NULL;
954 struct binder_node *node;
955 binder_uintptr_t ptr = fp ? fp->binder : 0;
956 binder_uintptr_t cookie = fp ? fp->cookie : 0;
957 __u32 flags = fp ? fp->flags : 0;
958 s8 priority;
959
960 assert_spin_locked(&proc->inner_lock);
961
962 while (*p) {
963
964 parent = *p;
965 node = rb_entry(parent, struct binder_node, rb_node);
966
967 if (ptr < node->ptr)
968 p = &(*p)->rb_left;
969 else if (ptr > node->ptr)
970 p = &(*p)->rb_right;
971 else {
972 /*
973 * A matching node is already in
974 * the rb tree. Abandon the init
975 * and return it.
976 */
977 binder_inc_node_tmpref_ilocked(node);
978 return node;
979 }
980 }
981 node = new_node;
982 binder_stats_created(BINDER_STAT_NODE);
983 node->tmp_refs++;
984 rb_link_node(&node->rb_node, parent, p);
985 rb_insert_color(&node->rb_node, &proc->nodes);
986 node->debug_id = atomic_inc_return(&binder_last_id);
987 node->proc = proc;
988 node->ptr = ptr;
989 node->cookie = cookie;
990 node->work.type = BINDER_WORK_NODE;
991 priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
992 node->sched_policy = (flags & FLAT_BINDER_FLAG_SCHED_POLICY_MASK) >>
993 FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
994 node->min_priority = to_kernel_prio(node->sched_policy, priority);
995 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
996 node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);
997 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
998 spin_lock_init(&node->lock);
999 INIT_LIST_HEAD(&node->work.entry);
1000 INIT_LIST_HEAD(&node->async_todo);
1001 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1002 "%d:%d node %d u%016llx c%016llx created\n",
1003 proc->pid, current->pid, node->debug_id,
1004 (u64)node->ptr, (u64)node->cookie);
1005
1006 return node;
1007 }
1008
binder_new_node(struct binder_proc * proc,struct flat_binder_object * fp)1009 static struct binder_node *binder_new_node(struct binder_proc *proc,
1010 struct flat_binder_object *fp)
1011 {
1012 struct binder_node *node;
1013 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1014
1015 if (!new_node)
1016 return NULL;
1017 binder_inner_proc_lock(proc);
1018 node = binder_init_node_ilocked(proc, new_node, fp);
1019 binder_inner_proc_unlock(proc);
1020 if (node != new_node)
1021 /*
1022 * The node was already added by another thread
1023 */
1024 kfree(new_node);
1025
1026 return node;
1027 }
1028
binder_free_node(struct binder_node * node)1029 static void binder_free_node(struct binder_node *node)
1030 {
1031 kfree(node);
1032 binder_stats_deleted(BINDER_STAT_NODE);
1033 }
1034
binder_inc_node_nilocked(struct binder_node * node,int strong,int internal,struct list_head * target_list)1035 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1036 int internal,
1037 struct list_head *target_list)
1038 {
1039 struct binder_proc *proc = node->proc;
1040
1041 assert_spin_locked(&node->lock);
1042 if (proc)
1043 assert_spin_locked(&proc->inner_lock);
1044 if (strong) {
1045 if (internal) {
1046 if (target_list == NULL &&
1047 node->internal_strong_refs == 0 &&
1048 !(node->proc &&
1049 node == node->proc->context->binder_context_mgr_node &&
1050 node->has_strong_ref)) {
1051 pr_err("invalid inc strong node for %d\n",
1052 node->debug_id);
1053 return -EINVAL;
1054 }
1055 node->internal_strong_refs++;
1056 } else
1057 node->local_strong_refs++;
1058 if (!node->has_strong_ref && target_list) {
1059 struct binder_thread *thread = container_of(target_list,
1060 struct binder_thread, todo);
1061 binder_dequeue_work_ilocked(&node->work);
1062 BUG_ON(&thread->todo != target_list);
1063 binder_enqueue_deferred_thread_work_ilocked(thread,
1064 &node->work);
1065 }
1066 } else {
1067 if (!internal)
1068 node->local_weak_refs++;
1069 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1070 if (target_list == NULL) {
1071 pr_err("invalid inc weak node for %d\n",
1072 node->debug_id);
1073 return -EINVAL;
1074 }
1075 /*
1076 * See comment above
1077 */
1078 binder_enqueue_work_ilocked(&node->work, target_list);
1079 }
1080 }
1081 return 0;
1082 }
1083
binder_inc_node(struct binder_node * node,int strong,int internal,struct list_head * target_list)1084 static int binder_inc_node(struct binder_node *node, int strong, int internal,
1085 struct list_head *target_list)
1086 {
1087 int ret;
1088
1089 binder_node_inner_lock(node);
1090 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1091 binder_node_inner_unlock(node);
1092
1093 return ret;
1094 }
1095
binder_dec_node_nilocked(struct binder_node * node,int strong,int internal)1096 static bool binder_dec_node_nilocked(struct binder_node *node,
1097 int strong, int internal)
1098 {
1099 struct binder_proc *proc = node->proc;
1100
1101 assert_spin_locked(&node->lock);
1102 if (proc)
1103 assert_spin_locked(&proc->inner_lock);
1104 if (strong) {
1105 if (internal)
1106 node->internal_strong_refs--;
1107 else
1108 node->local_strong_refs--;
1109 if (node->local_strong_refs || node->internal_strong_refs)
1110 return false;
1111 } else {
1112 if (!internal)
1113 node->local_weak_refs--;
1114 if (node->local_weak_refs || node->tmp_refs ||
1115 !hlist_empty(&node->refs))
1116 return false;
1117 }
1118
1119 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1120 if (list_empty(&node->work.entry)) {
1121 binder_enqueue_work_ilocked(&node->work, &proc->todo);
1122 binder_wakeup_proc_ilocked(proc);
1123 }
1124 } else {
1125 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1126 !node->local_weak_refs && !node->tmp_refs) {
1127 if (proc) {
1128 binder_dequeue_work_ilocked(&node->work);
1129 rb_erase(&node->rb_node, &proc->nodes);
1130 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1131 "refless node %d deleted\n",
1132 node->debug_id);
1133 } else {
1134 BUG_ON(!list_empty(&node->work.entry));
1135 spin_lock(&binder_dead_nodes_lock);
1136 /*
1137 * tmp_refs could have changed so
1138 * check it again
1139 */
1140 if (node->tmp_refs) {
1141 spin_unlock(&binder_dead_nodes_lock);
1142 return false;
1143 }
1144 hlist_del(&node->dead_node);
1145 spin_unlock(&binder_dead_nodes_lock);
1146 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1147 "dead node %d deleted\n",
1148 node->debug_id);
1149 }
1150 return true;
1151 }
1152 }
1153 return false;
1154 }
1155
binder_dec_node(struct binder_node * node,int strong,int internal)1156 static void binder_dec_node(struct binder_node *node, int strong, int internal)
1157 {
1158 bool free_node;
1159
1160 binder_node_inner_lock(node);
1161 free_node = binder_dec_node_nilocked(node, strong, internal);
1162 binder_node_inner_unlock(node);
1163 if (free_node)
1164 binder_free_node(node);
1165 }
1166
binder_inc_node_tmpref_ilocked(struct binder_node * node)1167 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1168 {
1169 /*
1170 * No call to binder_inc_node() is needed since we
1171 * don't need to inform userspace of any changes to
1172 * tmp_refs
1173 */
1174 node->tmp_refs++;
1175 }
1176
1177 /**
1178 * binder_inc_node_tmpref() - take a temporary reference on node
1179 * @node: node to reference
1180 *
1181 * Take reference on node to prevent the node from being freed
1182 * while referenced only by a local variable. The inner lock is
1183 * needed to serialize with the node work on the queue (which
1184 * isn't needed after the node is dead). If the node is dead
1185 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1186 * node->tmp_refs against dead-node-only cases where the node
1187 * lock cannot be acquired (eg traversing the dead node list to
1188 * print nodes)
1189 */
binder_inc_node_tmpref(struct binder_node * node)1190 static void binder_inc_node_tmpref(struct binder_node *node)
1191 {
1192 binder_node_lock(node);
1193 if (node->proc)
1194 binder_inner_proc_lock(node->proc);
1195 else
1196 spin_lock(&binder_dead_nodes_lock);
1197 binder_inc_node_tmpref_ilocked(node);
1198 if (node->proc)
1199 binder_inner_proc_unlock(node->proc);
1200 else
1201 spin_unlock(&binder_dead_nodes_lock);
1202 binder_node_unlock(node);
1203 }
1204
1205 /**
1206 * binder_dec_node_tmpref() - remove a temporary reference on node
1207 * @node: node to reference
1208 *
1209 * Release temporary reference on node taken via binder_inc_node_tmpref()
1210 */
binder_dec_node_tmpref(struct binder_node * node)1211 static void binder_dec_node_tmpref(struct binder_node *node)
1212 {
1213 bool free_node;
1214
1215 binder_node_inner_lock(node);
1216 if (!node->proc)
1217 spin_lock(&binder_dead_nodes_lock);
1218 else
1219 __acquire(&binder_dead_nodes_lock);
1220 node->tmp_refs--;
1221 BUG_ON(node->tmp_refs < 0);
1222 if (!node->proc)
1223 spin_unlock(&binder_dead_nodes_lock);
1224 else
1225 __release(&binder_dead_nodes_lock);
1226 /*
1227 * Call binder_dec_node() to check if all refcounts are 0
1228 * and cleanup is needed. Calling with strong=0 and internal=1
1229 * causes no actual reference to be released in binder_dec_node().
1230 * If that changes, a change is needed here too.
1231 */
1232 free_node = binder_dec_node_nilocked(node, 0, 1);
1233 binder_node_inner_unlock(node);
1234 if (free_node)
1235 binder_free_node(node);
1236 }
1237
binder_put_node(struct binder_node * node)1238 static void binder_put_node(struct binder_node *node)
1239 {
1240 binder_dec_node_tmpref(node);
1241 }
1242
binder_get_ref_olocked(struct binder_proc * proc,u32 desc,bool need_strong_ref)1243 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1244 u32 desc, bool need_strong_ref)
1245 {
1246 struct rb_node *n = proc->refs_by_desc.rb_node;
1247 struct binder_ref *ref;
1248
1249 while (n) {
1250 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1251
1252 if (desc < ref->data.desc) {
1253 n = n->rb_left;
1254 } else if (desc > ref->data.desc) {
1255 n = n->rb_right;
1256 } else if (need_strong_ref && !ref->data.strong) {
1257 binder_user_error("tried to use weak ref as strong ref\n");
1258 return NULL;
1259 } else {
1260 return ref;
1261 }
1262 }
1263 return NULL;
1264 }
1265
1266 /**
1267 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1268 * @proc: binder_proc that owns the ref
1269 * @node: binder_node of target
1270 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1271 *
1272 * Look up the ref for the given node and return it if it exists
1273 *
1274 * If it doesn't exist and the caller provides a newly allocated
1275 * ref, initialize the fields of the newly allocated ref and insert
1276 * into the given proc rb_trees and node refs list.
1277 *
1278 * Return: the ref for node. It is possible that another thread
1279 * allocated/initialized the ref first in which case the
1280 * returned ref would be different than the passed-in
1281 * new_ref. new_ref must be kfree'd by the caller in
1282 * this case.
1283 */
binder_get_ref_for_node_olocked(struct binder_proc * proc,struct binder_node * node,struct binder_ref * new_ref)1284 static struct binder_ref *binder_get_ref_for_node_olocked(
1285 struct binder_proc *proc,
1286 struct binder_node *node,
1287 struct binder_ref *new_ref)
1288 {
1289 struct binder_context *context = proc->context;
1290 struct rb_node **p = &proc->refs_by_node.rb_node;
1291 struct rb_node *parent = NULL;
1292 struct binder_ref *ref;
1293 struct rb_node *n;
1294
1295 while (*p) {
1296 parent = *p;
1297 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1298
1299 if (node < ref->node)
1300 p = &(*p)->rb_left;
1301 else if (node > ref->node)
1302 p = &(*p)->rb_right;
1303 else
1304 return ref;
1305 }
1306 if (!new_ref)
1307 return NULL;
1308
1309 binder_stats_created(BINDER_STAT_REF);
1310 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1311 new_ref->proc = proc;
1312 new_ref->node = node;
1313 rb_link_node(&new_ref->rb_node_node, parent, p);
1314 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1315
1316 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1317 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1318 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1319 if (ref->data.desc > new_ref->data.desc)
1320 break;
1321 new_ref->data.desc = ref->data.desc + 1;
1322 }
1323
1324 p = &proc->refs_by_desc.rb_node;
1325 while (*p) {
1326 parent = *p;
1327 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1328
1329 if (new_ref->data.desc < ref->data.desc)
1330 p = &(*p)->rb_left;
1331 else if (new_ref->data.desc > ref->data.desc)
1332 p = &(*p)->rb_right;
1333 else
1334 BUG();
1335 }
1336 rb_link_node(&new_ref->rb_node_desc, parent, p);
1337 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1338
1339 binder_node_lock(node);
1340 hlist_add_head(&new_ref->node_entry, &node->refs);
1341
1342 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1343 "%d new ref %d desc %d for node %d\n",
1344 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1345 node->debug_id);
1346 binder_node_unlock(node);
1347 return new_ref;
1348 }
1349
binder_cleanup_ref_olocked(struct binder_ref * ref)1350 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1351 {
1352 bool delete_node = false;
1353
1354 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1355 "%d delete ref %d desc %d for node %d\n",
1356 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1357 ref->node->debug_id);
1358
1359 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1360 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1361
1362 binder_node_inner_lock(ref->node);
1363 if (ref->data.strong)
1364 binder_dec_node_nilocked(ref->node, 1, 1);
1365
1366 hlist_del(&ref->node_entry);
1367 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1368 binder_node_inner_unlock(ref->node);
1369 /*
1370 * Clear ref->node unless we want the caller to free the node
1371 */
1372 if (!delete_node) {
1373 /*
1374 * The caller uses ref->node to determine
1375 * whether the node needs to be freed. Clear
1376 * it since the node is still alive.
1377 */
1378 ref->node = NULL;
1379 }
1380
1381 if (ref->death) {
1382 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1383 "%d delete ref %d desc %d has death notification\n",
1384 ref->proc->pid, ref->data.debug_id,
1385 ref->data.desc);
1386 binder_dequeue_work(ref->proc, &ref->death->work);
1387 binder_stats_deleted(BINDER_STAT_DEATH);
1388 }
1389 binder_stats_deleted(BINDER_STAT_REF);
1390 }
1391
1392 /**
1393 * binder_inc_ref_olocked() - increment the ref for given handle
1394 * @ref: ref to be incremented
1395 * @strong: if true, strong increment, else weak
1396 * @target_list: list to queue node work on
1397 *
1398 * Increment the ref. @ref->proc->outer_lock must be held on entry
1399 *
1400 * Return: 0, if successful, else errno
1401 */
binder_inc_ref_olocked(struct binder_ref * ref,int strong,struct list_head * target_list)1402 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1403 struct list_head *target_list)
1404 {
1405 int ret;
1406
1407 if (strong) {
1408 if (ref->data.strong == 0) {
1409 ret = binder_inc_node(ref->node, 1, 1, target_list);
1410 if (ret)
1411 return ret;
1412 }
1413 ref->data.strong++;
1414 } else {
1415 if (ref->data.weak == 0) {
1416 ret = binder_inc_node(ref->node, 0, 1, target_list);
1417 if (ret)
1418 return ret;
1419 }
1420 ref->data.weak++;
1421 }
1422 return 0;
1423 }
1424
1425 /**
1426 * binder_dec_ref() - dec the ref for given handle
1427 * @ref: ref to be decremented
1428 * @strong: if true, strong decrement, else weak
1429 *
1430 * Decrement the ref.
1431 *
1432 * Return: true if ref is cleaned up and ready to be freed
1433 */
binder_dec_ref_olocked(struct binder_ref * ref,int strong)1434 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1435 {
1436 if (strong) {
1437 if (ref->data.strong == 0) {
1438 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1439 ref->proc->pid, ref->data.debug_id,
1440 ref->data.desc, ref->data.strong,
1441 ref->data.weak);
1442 return false;
1443 }
1444 ref->data.strong--;
1445 if (ref->data.strong == 0)
1446 binder_dec_node(ref->node, strong, 1);
1447 } else {
1448 if (ref->data.weak == 0) {
1449 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1450 ref->proc->pid, ref->data.debug_id,
1451 ref->data.desc, ref->data.strong,
1452 ref->data.weak);
1453 return false;
1454 }
1455 ref->data.weak--;
1456 }
1457 if (ref->data.strong == 0 && ref->data.weak == 0) {
1458 binder_cleanup_ref_olocked(ref);
1459 return true;
1460 }
1461 return false;
1462 }
1463
1464 /**
1465 * binder_get_node_from_ref() - get the node from the given proc/desc
1466 * @proc: proc containing the ref
1467 * @desc: the handle associated with the ref
1468 * @need_strong_ref: if true, only return node if ref is strong
1469 * @rdata: the id/refcount data for the ref
1470 *
1471 * Given a proc and ref handle, return the associated binder_node
1472 *
1473 * Return: a binder_node or NULL if not found or not strong when strong required
1474 */
binder_get_node_from_ref(struct binder_proc * proc,u32 desc,bool need_strong_ref,struct binder_ref_data * rdata)1475 static struct binder_node *binder_get_node_from_ref(
1476 struct binder_proc *proc,
1477 u32 desc, bool need_strong_ref,
1478 struct binder_ref_data *rdata)
1479 {
1480 struct binder_node *node;
1481 struct binder_ref *ref;
1482
1483 binder_proc_lock(proc);
1484 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1485 if (!ref)
1486 goto err_no_ref;
1487 node = ref->node;
1488 /*
1489 * Take an implicit reference on the node to ensure
1490 * it stays alive until the call to binder_put_node()
1491 */
1492 binder_inc_node_tmpref(node);
1493 if (rdata)
1494 *rdata = ref->data;
1495 binder_proc_unlock(proc);
1496
1497 return node;
1498
1499 err_no_ref:
1500 binder_proc_unlock(proc);
1501 return NULL;
1502 }
1503
1504 /**
1505 * binder_free_ref() - free the binder_ref
1506 * @ref: ref to free
1507 *
1508 * Free the binder_ref. Free the binder_node indicated by ref->node
1509 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1510 */
binder_free_ref(struct binder_ref * ref)1511 static void binder_free_ref(struct binder_ref *ref)
1512 {
1513 if (ref->node)
1514 binder_free_node(ref->node);
1515 kfree(ref->death);
1516 kfree(ref);
1517 }
1518
1519 /**
1520 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1521 * @proc: proc containing the ref
1522 * @desc: the handle associated with the ref
1523 * @increment: true=inc reference, false=dec reference
1524 * @strong: true=strong reference, false=weak reference
1525 * @rdata: the id/refcount data for the ref
1526 *
1527 * Given a proc and ref handle, increment or decrement the ref
1528 * according to "increment" arg.
1529 *
1530 * Return: 0 if successful, else errno
1531 */
binder_update_ref_for_handle(struct binder_proc * proc,uint32_t desc,bool increment,bool strong,struct binder_ref_data * rdata)1532 static int binder_update_ref_for_handle(struct binder_proc *proc,
1533 uint32_t desc, bool increment, bool strong,
1534 struct binder_ref_data *rdata)
1535 {
1536 int ret = 0;
1537 struct binder_ref *ref;
1538 bool delete_ref = false;
1539
1540 binder_proc_lock(proc);
1541 ref = binder_get_ref_olocked(proc, desc, strong);
1542 if (!ref) {
1543 ret = -EINVAL;
1544 goto err_no_ref;
1545 }
1546 if (increment)
1547 ret = binder_inc_ref_olocked(ref, strong, NULL);
1548 else
1549 delete_ref = binder_dec_ref_olocked(ref, strong);
1550
1551 if (rdata)
1552 *rdata = ref->data;
1553 binder_proc_unlock(proc);
1554
1555 if (delete_ref)
1556 binder_free_ref(ref);
1557 return ret;
1558
1559 err_no_ref:
1560 binder_proc_unlock(proc);
1561 return ret;
1562 }
1563
1564 /**
1565 * binder_dec_ref_for_handle() - dec the ref for given handle
1566 * @proc: proc containing the ref
1567 * @desc: the handle associated with the ref
1568 * @strong: true=strong reference, false=weak reference
1569 * @rdata: the id/refcount data for the ref
1570 *
1571 * Just calls binder_update_ref_for_handle() to decrement the ref.
1572 *
1573 * Return: 0 if successful, else errno
1574 */
binder_dec_ref_for_handle(struct binder_proc * proc,uint32_t desc,bool strong,struct binder_ref_data * rdata)1575 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1576 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1577 {
1578 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1579 }
1580
1581
1582 /**
1583 * binder_inc_ref_for_node() - increment the ref for given proc/node
1584 * @proc: proc containing the ref
1585 * @node: target node
1586 * @strong: true=strong reference, false=weak reference
1587 * @target_list: worklist to use if node is incremented
1588 * @rdata: the id/refcount data for the ref
1589 *
1590 * Given a proc and node, increment the ref. Create the ref if it
1591 * doesn't already exist
1592 *
1593 * Return: 0 if successful, else errno
1594 */
binder_inc_ref_for_node(struct binder_proc * proc,struct binder_node * node,bool strong,struct list_head * target_list,struct binder_ref_data * rdata)1595 static int binder_inc_ref_for_node(struct binder_proc *proc,
1596 struct binder_node *node,
1597 bool strong,
1598 struct list_head *target_list,
1599 struct binder_ref_data *rdata)
1600 {
1601 struct binder_ref *ref;
1602 struct binder_ref *new_ref = NULL;
1603 int ret = 0;
1604
1605 binder_proc_lock(proc);
1606 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1607 if (!ref) {
1608 binder_proc_unlock(proc);
1609 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1610 if (!new_ref)
1611 return -ENOMEM;
1612 binder_proc_lock(proc);
1613 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1614 }
1615 ret = binder_inc_ref_olocked(ref, strong, target_list);
1616 *rdata = ref->data;
1617 if (ret && ref == new_ref) {
1618 /*
1619 * Cleanup the failed reference here as the target
1620 * could now be dead and have already released its
1621 * references by now. Calling on the new reference
1622 * with strong=0 and a tmp_refs will not decrement
1623 * the node. The new_ref gets kfree'd below.
1624 */
1625 binder_cleanup_ref_olocked(new_ref);
1626 ref = NULL;
1627 }
1628
1629 binder_proc_unlock(proc);
1630 if (new_ref && ref != new_ref)
1631 /*
1632 * Another thread created the ref first so
1633 * free the one we allocated
1634 */
1635 kfree(new_ref);
1636 return ret;
1637 }
1638
binder_pop_transaction_ilocked(struct binder_thread * target_thread,struct binder_transaction * t)1639 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1640 struct binder_transaction *t)
1641 {
1642 BUG_ON(!target_thread);
1643 assert_spin_locked(&target_thread->proc->inner_lock);
1644 BUG_ON(target_thread->transaction_stack != t);
1645 BUG_ON(target_thread->transaction_stack->from != target_thread);
1646 target_thread->transaction_stack =
1647 target_thread->transaction_stack->from_parent;
1648 t->from = NULL;
1649 }
1650
1651 /**
1652 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1653 * @thread: thread to decrement
1654 *
1655 * A thread needs to be kept alive while being used to create or
1656 * handle a transaction. binder_get_txn_from() is used to safely
1657 * extract t->from from a binder_transaction and keep the thread
1658 * indicated by t->from from being freed. When done with that
1659 * binder_thread, this function is called to decrement the
1660 * tmp_ref and free if appropriate (thread has been released
1661 * and no transaction being processed by the driver)
1662 */
binder_thread_dec_tmpref(struct binder_thread * thread)1663 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1664 {
1665 /*
1666 * atomic is used to protect the counter value while
1667 * it cannot reach zero or thread->is_dead is false
1668 */
1669 binder_inner_proc_lock(thread->proc);
1670 atomic_dec(&thread->tmp_ref);
1671 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1672 binder_inner_proc_unlock(thread->proc);
1673 binder_free_thread(thread);
1674 return;
1675 }
1676 binder_inner_proc_unlock(thread->proc);
1677 }
1678
1679 /**
1680 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1681 * @proc: proc to decrement
1682 *
1683 * A binder_proc needs to be kept alive while being used to create or
1684 * handle a transaction. proc->tmp_ref is incremented when
1685 * creating a new transaction or the binder_proc is currently in-use
1686 * by threads that are being released. When done with the binder_proc,
1687 * this function is called to decrement the counter and free the
1688 * proc if appropriate (proc has been released, all threads have
1689 * been released and not currenly in-use to process a transaction).
1690 */
binder_proc_dec_tmpref(struct binder_proc * proc)1691 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1692 {
1693 binder_inner_proc_lock(proc);
1694 proc->tmp_ref--;
1695 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1696 !proc->tmp_ref) {
1697 binder_inner_proc_unlock(proc);
1698 binder_free_proc(proc);
1699 return;
1700 }
1701 binder_inner_proc_unlock(proc);
1702 }
1703
1704 /**
1705 * binder_get_txn_from() - safely extract the "from" thread in transaction
1706 * @t: binder transaction for t->from
1707 *
1708 * Atomically return the "from" thread and increment the tmp_ref
1709 * count for the thread to ensure it stays alive until
1710 * binder_thread_dec_tmpref() is called.
1711 *
1712 * Return: the value of t->from
1713 */
binder_get_txn_from(struct binder_transaction * t)1714 static struct binder_thread *binder_get_txn_from(
1715 struct binder_transaction *t)
1716 {
1717 struct binder_thread *from;
1718
1719 spin_lock(&t->lock);
1720 from = t->from;
1721 if (from)
1722 atomic_inc(&from->tmp_ref);
1723 spin_unlock(&t->lock);
1724 return from;
1725 }
1726
1727 /**
1728 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1729 * @t: binder transaction for t->from
1730 *
1731 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1732 * to guarantee that the thread cannot be released while operating on it.
1733 * The caller must call binder_inner_proc_unlock() to release the inner lock
1734 * as well as call binder_dec_thread_txn() to release the reference.
1735 *
1736 * Return: the value of t->from
1737 */
binder_get_txn_from_and_acq_inner(struct binder_transaction * t)1738 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1739 struct binder_transaction *t)
1740 __acquires(&t->from->proc->inner_lock)
1741 {
1742 struct binder_thread *from;
1743
1744 from = binder_get_txn_from(t);
1745 if (!from) {
1746 __acquire(&from->proc->inner_lock);
1747 return NULL;
1748 }
1749 binder_inner_proc_lock(from->proc);
1750 if (t->from) {
1751 BUG_ON(from != t->from);
1752 return from;
1753 }
1754 binder_inner_proc_unlock(from->proc);
1755 __acquire(&from->proc->inner_lock);
1756 binder_thread_dec_tmpref(from);
1757 return NULL;
1758 }
1759
1760 /**
1761 * binder_free_txn_fixups() - free unprocessed fd fixups
1762 * @t: binder transaction for t->from
1763 *
1764 * If the transaction is being torn down prior to being
1765 * processed by the target process, free all of the
1766 * fd fixups and fput the file structs. It is safe to
1767 * call this function after the fixups have been
1768 * processed -- in that case, the list will be empty.
1769 */
binder_free_txn_fixups(struct binder_transaction * t)1770 static void binder_free_txn_fixups(struct binder_transaction *t)
1771 {
1772 struct binder_txn_fd_fixup *fixup, *tmp;
1773
1774 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1775 fput(fixup->file);
1776 list_del(&fixup->fixup_entry);
1777 kfree(fixup);
1778 }
1779 }
1780
binder_txn_latency_free(struct binder_transaction * t)1781 static void binder_txn_latency_free(struct binder_transaction *t)
1782 {
1783 int from_proc, from_thread, to_proc, to_thread;
1784
1785 spin_lock(&t->lock);
1786 from_proc = t->from ? t->from->proc->pid : 0;
1787 from_thread = t->from ? t->from->pid : 0;
1788 to_proc = t->to_proc ? t->to_proc->pid : 0;
1789 to_thread = t->to_thread ? t->to_thread->pid : 0;
1790 spin_unlock(&t->lock);
1791
1792 trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1793 }
1794
binder_free_transaction(struct binder_transaction * t)1795 static void binder_free_transaction(struct binder_transaction *t)
1796 {
1797 struct binder_proc *target_proc = t->to_proc;
1798
1799 trace_android_vh_free_oem_binder_struct(t);
1800 if (target_proc) {
1801 binder_inner_proc_lock(target_proc);
1802 target_proc->outstanding_txns--;
1803 if (target_proc->outstanding_txns < 0)
1804 pr_warn("%s: Unexpected outstanding_txns %d\n",
1805 __func__, target_proc->outstanding_txns);
1806 if (!target_proc->outstanding_txns && target_proc->is_frozen)
1807 wake_up_interruptible_all(&target_proc->freeze_wait);
1808 if (t->buffer)
1809 t->buffer->transaction = NULL;
1810 binder_inner_proc_unlock(target_proc);
1811 }
1812 if (trace_binder_txn_latency_free_enabled())
1813 binder_txn_latency_free(t);
1814 /*
1815 * If the transaction has no target_proc, then
1816 * t->buffer->transaction has already been cleared.
1817 */
1818 binder_free_txn_fixups(t);
1819 kfree(t);
1820 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1821 }
1822
binder_send_failed_reply(struct binder_transaction * t,uint32_t error_code)1823 static void binder_send_failed_reply(struct binder_transaction *t,
1824 uint32_t error_code)
1825 {
1826 struct binder_thread *target_thread;
1827 struct binder_transaction *next;
1828
1829 BUG_ON(t->flags & TF_ONE_WAY);
1830 while (1) {
1831 target_thread = binder_get_txn_from_and_acq_inner(t);
1832 if (target_thread) {
1833 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1834 "send failed reply for transaction %d to %d:%d\n",
1835 t->debug_id,
1836 target_thread->proc->pid,
1837 target_thread->pid);
1838
1839 binder_pop_transaction_ilocked(target_thread, t);
1840 if (target_thread->reply_error.cmd == BR_OK) {
1841 target_thread->reply_error.cmd = error_code;
1842 binder_enqueue_thread_work_ilocked(
1843 target_thread,
1844 &target_thread->reply_error.work);
1845 wake_up_interruptible(&target_thread->wait);
1846 } else {
1847 /*
1848 * Cannot get here for normal operation, but
1849 * we can if multiple synchronous transactions
1850 * are sent without blocking for responses.
1851 * Just ignore the 2nd error in this case.
1852 */
1853 pr_warn("Unexpected reply error: %u\n",
1854 target_thread->reply_error.cmd);
1855 }
1856 binder_inner_proc_unlock(target_thread->proc);
1857 binder_thread_dec_tmpref(target_thread);
1858 binder_free_transaction(t);
1859 return;
1860 }
1861 __release(&target_thread->proc->inner_lock);
1862 next = t->from_parent;
1863
1864 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1865 "send failed reply for transaction %d, target dead\n",
1866 t->debug_id);
1867
1868 binder_free_transaction(t);
1869 if (next == NULL) {
1870 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1871 "reply failed, no target thread at root\n");
1872 return;
1873 }
1874 t = next;
1875 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1876 "reply failed, no target thread -- retry %d\n",
1877 t->debug_id);
1878 }
1879 }
1880
1881 /**
1882 * binder_cleanup_transaction() - cleans up undelivered transaction
1883 * @t: transaction that needs to be cleaned up
1884 * @reason: reason the transaction wasn't delivered
1885 * @error_code: error to return to caller (if synchronous call)
1886 */
binder_cleanup_transaction(struct binder_transaction * t,const char * reason,uint32_t error_code)1887 static void binder_cleanup_transaction(struct binder_transaction *t,
1888 const char *reason,
1889 uint32_t error_code)
1890 {
1891 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1892 binder_send_failed_reply(t, error_code);
1893 } else {
1894 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1895 "undelivered transaction %d, %s\n",
1896 t->debug_id, reason);
1897 binder_free_transaction(t);
1898 }
1899 }
1900
1901 /**
1902 * binder_get_object() - gets object and checks for valid metadata
1903 * @proc: binder_proc owning the buffer
1904 * @u: sender's user pointer to base of buffer
1905 * @buffer: binder_buffer that we're parsing.
1906 * @offset: offset in the @buffer at which to validate an object.
1907 * @object: struct binder_object to read into
1908 *
1909 * Copy the binder object at the given offset into @object. If @u is
1910 * provided then the copy is from the sender's buffer. If not, then
1911 * it is copied from the target's @buffer.
1912 *
1913 * Return: If there's a valid metadata object at @offset, the
1914 * size of that object. Otherwise, it returns zero. The object
1915 * is read into the struct binder_object pointed to by @object.
1916 */
binder_get_object(struct binder_proc * proc,const void __user * u,struct binder_buffer * buffer,unsigned long offset,struct binder_object * object)1917 static size_t binder_get_object(struct binder_proc *proc,
1918 const void __user *u,
1919 struct binder_buffer *buffer,
1920 unsigned long offset,
1921 struct binder_object *object)
1922 {
1923 size_t read_size;
1924 struct binder_object_header *hdr;
1925 size_t object_size = 0;
1926
1927 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1928 if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
1929 !IS_ALIGNED(offset, sizeof(u32)))
1930 return 0;
1931
1932 if (u) {
1933 if (copy_from_user(object, u + offset, read_size))
1934 return 0;
1935 } else {
1936 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1937 offset, read_size))
1938 return 0;
1939 }
1940
1941 /* Ok, now see if we read a complete object. */
1942 hdr = &object->hdr;
1943 switch (hdr->type) {
1944 case BINDER_TYPE_BINDER:
1945 case BINDER_TYPE_WEAK_BINDER:
1946 case BINDER_TYPE_HANDLE:
1947 case BINDER_TYPE_WEAK_HANDLE:
1948 object_size = sizeof(struct flat_binder_object);
1949 break;
1950 case BINDER_TYPE_FD:
1951 object_size = sizeof(struct binder_fd_object);
1952 break;
1953 case BINDER_TYPE_PTR:
1954 object_size = sizeof(struct binder_buffer_object);
1955 break;
1956 case BINDER_TYPE_FDA:
1957 object_size = sizeof(struct binder_fd_array_object);
1958 break;
1959 default:
1960 return 0;
1961 }
1962 if (offset <= buffer->data_size - object_size &&
1963 buffer->data_size >= object_size)
1964 return object_size;
1965 else
1966 return 0;
1967 }
1968
1969 /**
1970 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1971 * @proc: binder_proc owning the buffer
1972 * @b: binder_buffer containing the object
1973 * @object: struct binder_object to read into
1974 * @index: index in offset array at which the binder_buffer_object is
1975 * located
1976 * @start_offset: points to the start of the offset array
1977 * @object_offsetp: offset of @object read from @b
1978 * @num_valid: the number of valid offsets in the offset array
1979 *
1980 * Return: If @index is within the valid range of the offset array
1981 * described by @start and @num_valid, and if there's a valid
1982 * binder_buffer_object at the offset found in index @index
1983 * of the offset array, that object is returned. Otherwise,
1984 * %NULL is returned.
1985 * Note that the offset found in index @index itself is not
1986 * verified; this function assumes that @num_valid elements
1987 * from @start were previously verified to have valid offsets.
1988 * If @object_offsetp is non-NULL, then the offset within
1989 * @b is written to it.
1990 */
binder_validate_ptr(struct binder_proc * proc,struct binder_buffer * b,struct binder_object * object,binder_size_t index,binder_size_t start_offset,binder_size_t * object_offsetp,binder_size_t num_valid)1991 static struct binder_buffer_object *binder_validate_ptr(
1992 struct binder_proc *proc,
1993 struct binder_buffer *b,
1994 struct binder_object *object,
1995 binder_size_t index,
1996 binder_size_t start_offset,
1997 binder_size_t *object_offsetp,
1998 binder_size_t num_valid)
1999 {
2000 size_t object_size;
2001 binder_size_t object_offset;
2002 unsigned long buffer_offset;
2003
2004 if (index >= num_valid)
2005 return NULL;
2006
2007 buffer_offset = start_offset + sizeof(binder_size_t) * index;
2008 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2009 b, buffer_offset,
2010 sizeof(object_offset)))
2011 return NULL;
2012 object_size = binder_get_object(proc, NULL, b, object_offset, object);
2013 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
2014 return NULL;
2015 if (object_offsetp)
2016 *object_offsetp = object_offset;
2017
2018 return &object->bbo;
2019 }
2020
2021 /**
2022 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2023 * @proc: binder_proc owning the buffer
2024 * @b: transaction buffer
2025 * @objects_start_offset: offset to start of objects buffer
2026 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
2027 * @fixup_offset: start offset in @buffer to fix up
2028 * @last_obj_offset: offset to last binder_buffer_object that we fixed
2029 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
2030 *
2031 * Return: %true if a fixup in buffer @buffer at offset @offset is
2032 * allowed.
2033 *
2034 * For safety reasons, we only allow fixups inside a buffer to happen
2035 * at increasing offsets; additionally, we only allow fixup on the last
2036 * buffer object that was verified, or one of its parents.
2037 *
2038 * Example of what is allowed:
2039 *
2040 * A
2041 * B (parent = A, offset = 0)
2042 * C (parent = A, offset = 16)
2043 * D (parent = C, offset = 0)
2044 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2045 *
2046 * Examples of what is not allowed:
2047 *
2048 * Decreasing offsets within the same parent:
2049 * A
2050 * C (parent = A, offset = 16)
2051 * B (parent = A, offset = 0) // decreasing offset within A
2052 *
2053 * Referring to a parent that wasn't the last object or any of its parents:
2054 * A
2055 * B (parent = A, offset = 0)
2056 * C (parent = A, offset = 0)
2057 * C (parent = A, offset = 16)
2058 * D (parent = B, offset = 0) // B is not A or any of A's parents
2059 */
binder_validate_fixup(struct binder_proc * proc,struct binder_buffer * b,binder_size_t objects_start_offset,binder_size_t buffer_obj_offset,binder_size_t fixup_offset,binder_size_t last_obj_offset,binder_size_t last_min_offset)2060 static bool binder_validate_fixup(struct binder_proc *proc,
2061 struct binder_buffer *b,
2062 binder_size_t objects_start_offset,
2063 binder_size_t buffer_obj_offset,
2064 binder_size_t fixup_offset,
2065 binder_size_t last_obj_offset,
2066 binder_size_t last_min_offset)
2067 {
2068 if (!last_obj_offset) {
2069 /* Nothing to fix up in */
2070 return false;
2071 }
2072
2073 while (last_obj_offset != buffer_obj_offset) {
2074 unsigned long buffer_offset;
2075 struct binder_object last_object;
2076 struct binder_buffer_object *last_bbo;
2077 size_t object_size = binder_get_object(proc, NULL, b,
2078 last_obj_offset,
2079 &last_object);
2080 if (object_size != sizeof(*last_bbo))
2081 return false;
2082
2083 last_bbo = &last_object.bbo;
2084 /*
2085 * Safe to retrieve the parent of last_obj, since it
2086 * was already previously verified by the driver.
2087 */
2088 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2089 return false;
2090 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
2091 buffer_offset = objects_start_offset +
2092 sizeof(binder_size_t) * last_bbo->parent;
2093 if (binder_alloc_copy_from_buffer(&proc->alloc,
2094 &last_obj_offset,
2095 b, buffer_offset,
2096 sizeof(last_obj_offset)))
2097 return false;
2098 }
2099 return (fixup_offset >= last_min_offset);
2100 }
2101
2102 /**
2103 * struct binder_task_work_cb - for deferred close
2104 *
2105 * @twork: callback_head for task work
2106 * @fd: fd to close
2107 *
2108 * Structure to pass task work to be handled after
2109 * returning from binder_ioctl() via task_work_add().
2110 */
2111 struct binder_task_work_cb {
2112 struct callback_head twork;
2113 struct file *file;
2114 };
2115
2116 /**
2117 * binder_do_fd_close() - close list of file descriptors
2118 * @twork: callback head for task work
2119 *
2120 * It is not safe to call ksys_close() during the binder_ioctl()
2121 * function if there is a chance that binder's own file descriptor
2122 * might be closed. This is to meet the requirements for using
2123 * fdget() (see comments for __fget_light()). Therefore use
2124 * task_work_add() to schedule the close operation once we have
2125 * returned from binder_ioctl(). This function is a callback
2126 * for that mechanism and does the actual ksys_close() on the
2127 * given file descriptor.
2128 */
binder_do_fd_close(struct callback_head * twork)2129 static void binder_do_fd_close(struct callback_head *twork)
2130 {
2131 struct binder_task_work_cb *twcb = container_of(twork,
2132 struct binder_task_work_cb, twork);
2133
2134 fput(twcb->file);
2135 kfree(twcb);
2136 }
2137
2138 /**
2139 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2140 * @fd: file-descriptor to close
2141 *
2142 * See comments in binder_do_fd_close(). This function is used to schedule
2143 * a file-descriptor to be closed after returning from binder_ioctl().
2144 */
binder_deferred_fd_close(int fd)2145 static void binder_deferred_fd_close(int fd)
2146 {
2147 struct binder_task_work_cb *twcb;
2148
2149 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2150 if (!twcb)
2151 return;
2152 init_task_work(&twcb->twork, binder_do_fd_close);
2153 close_fd_get_file(fd, &twcb->file);
2154 if (twcb->file) {
2155 filp_close(twcb->file, current->files);
2156 task_work_add(current, &twcb->twork, TWA_RESUME);
2157 } else {
2158 kfree(twcb);
2159 }
2160 }
2161
binder_transaction_buffer_release(struct binder_proc * proc,struct binder_thread * thread,struct binder_buffer * buffer,binder_size_t off_end_offset,bool is_failure)2162 static void binder_transaction_buffer_release(struct binder_proc *proc,
2163 struct binder_thread *thread,
2164 struct binder_buffer *buffer,
2165 binder_size_t off_end_offset,
2166 bool is_failure)
2167 {
2168 int debug_id = buffer->debug_id;
2169 binder_size_t off_start_offset, buffer_offset;
2170
2171 binder_debug(BINDER_DEBUG_TRANSACTION,
2172 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2173 proc->pid, buffer->debug_id,
2174 buffer->data_size, buffer->offsets_size,
2175 (unsigned long long)off_end_offset);
2176
2177 if (buffer->target_node)
2178 binder_dec_node(buffer->target_node, 1, 0);
2179
2180 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2181
2182 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2183 buffer_offset += sizeof(binder_size_t)) {
2184 struct binder_object_header *hdr;
2185 size_t object_size = 0;
2186 struct binder_object object;
2187 binder_size_t object_offset;
2188
2189 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2190 buffer, buffer_offset,
2191 sizeof(object_offset)))
2192 object_size = binder_get_object(proc, NULL, buffer,
2193 object_offset, &object);
2194 if (object_size == 0) {
2195 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2196 debug_id, (u64)object_offset, buffer->data_size);
2197 continue;
2198 }
2199 hdr = &object.hdr;
2200 switch (hdr->type) {
2201 case BINDER_TYPE_BINDER:
2202 case BINDER_TYPE_WEAK_BINDER: {
2203 struct flat_binder_object *fp;
2204 struct binder_node *node;
2205
2206 fp = to_flat_binder_object(hdr);
2207 node = binder_get_node(proc, fp->binder);
2208 if (node == NULL) {
2209 pr_err("transaction release %d bad node %016llx\n",
2210 debug_id, (u64)fp->binder);
2211 break;
2212 }
2213 binder_debug(BINDER_DEBUG_TRANSACTION,
2214 " node %d u%016llx\n",
2215 node->debug_id, (u64)node->ptr);
2216 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2217 0);
2218 binder_put_node(node);
2219 } break;
2220 case BINDER_TYPE_HANDLE:
2221 case BINDER_TYPE_WEAK_HANDLE: {
2222 struct flat_binder_object *fp;
2223 struct binder_ref_data rdata;
2224 int ret;
2225
2226 fp = to_flat_binder_object(hdr);
2227 ret = binder_dec_ref_for_handle(proc, fp->handle,
2228 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2229
2230 if (ret) {
2231 pr_err("transaction release %d bad handle %d, ret = %d\n",
2232 debug_id, fp->handle, ret);
2233 break;
2234 }
2235 binder_debug(BINDER_DEBUG_TRANSACTION,
2236 " ref %d desc %d\n",
2237 rdata.debug_id, rdata.desc);
2238 } break;
2239
2240 case BINDER_TYPE_FD: {
2241 /*
2242 * No need to close the file here since user-space
2243 * closes it for for successfully delivered
2244 * transactions. For transactions that weren't
2245 * delivered, the new fd was never allocated so
2246 * there is no need to close and the fput on the
2247 * file is done when the transaction is torn
2248 * down.
2249 */
2250 } break;
2251 case BINDER_TYPE_PTR:
2252 /*
2253 * Nothing to do here, this will get cleaned up when the
2254 * transaction buffer gets freed
2255 */
2256 break;
2257 case BINDER_TYPE_FDA: {
2258 struct binder_fd_array_object *fda;
2259 struct binder_buffer_object *parent;
2260 struct binder_object ptr_object;
2261 binder_size_t fda_offset;
2262 size_t fd_index;
2263 binder_size_t fd_buf_size;
2264 binder_size_t num_valid;
2265
2266 if (is_failure) {
2267 /*
2268 * The fd fixups have not been applied so no
2269 * fds need to be closed.
2270 */
2271 continue;
2272 }
2273
2274 num_valid = (buffer_offset - off_start_offset) /
2275 sizeof(binder_size_t);
2276 fda = to_binder_fd_array_object(hdr);
2277 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2278 fda->parent,
2279 off_start_offset,
2280 NULL,
2281 num_valid);
2282 if (!parent) {
2283 pr_err("transaction release %d bad parent offset\n",
2284 debug_id);
2285 continue;
2286 }
2287 fd_buf_size = sizeof(u32) * fda->num_fds;
2288 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2289 pr_err("transaction release %d invalid number of fds (%lld)\n",
2290 debug_id, (u64)fda->num_fds);
2291 continue;
2292 }
2293 if (fd_buf_size > parent->length ||
2294 fda->parent_offset > parent->length - fd_buf_size) {
2295 /* No space for all file descriptors here. */
2296 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2297 debug_id, (u64)fda->num_fds);
2298 continue;
2299 }
2300 /*
2301 * the source data for binder_buffer_object is visible
2302 * to user-space and the @buffer element is the user
2303 * pointer to the buffer_object containing the fd_array.
2304 * Convert the address to an offset relative to
2305 * the base of the transaction buffer.
2306 */
2307 fda_offset =
2308 (parent->buffer - (uintptr_t)buffer->user_data) +
2309 fda->parent_offset;
2310 for (fd_index = 0; fd_index < fda->num_fds;
2311 fd_index++) {
2312 u32 fd;
2313 int err;
2314 binder_size_t offset = fda_offset +
2315 fd_index * sizeof(fd);
2316
2317 err = binder_alloc_copy_from_buffer(
2318 &proc->alloc, &fd, buffer,
2319 offset, sizeof(fd));
2320 WARN_ON(err);
2321 if (!err) {
2322 binder_deferred_fd_close(fd);
2323 /*
2324 * Need to make sure the thread goes
2325 * back to userspace to complete the
2326 * deferred close
2327 */
2328 if (thread)
2329 thread->looper_need_return = true;
2330 }
2331 }
2332 } break;
2333 default:
2334 pr_err("transaction release %d bad object type %x\n",
2335 debug_id, hdr->type);
2336 break;
2337 }
2338 }
2339 }
2340
2341 /* Clean up all the objects in the buffer */
binder_release_entire_buffer(struct binder_proc * proc,struct binder_thread * thread,struct binder_buffer * buffer,bool is_failure)2342 static inline void binder_release_entire_buffer(struct binder_proc *proc,
2343 struct binder_thread *thread,
2344 struct binder_buffer *buffer,
2345 bool is_failure)
2346 {
2347 binder_size_t off_end_offset;
2348
2349 off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
2350 off_end_offset += buffer->offsets_size;
2351
2352 binder_transaction_buffer_release(proc, thread, buffer,
2353 off_end_offset, is_failure);
2354 }
2355
binder_translate_binder(struct flat_binder_object * fp,struct binder_transaction * t,struct binder_thread * thread)2356 static int binder_translate_binder(struct flat_binder_object *fp,
2357 struct binder_transaction *t,
2358 struct binder_thread *thread)
2359 {
2360 struct binder_node *node;
2361 struct binder_proc *proc = thread->proc;
2362 struct binder_proc *target_proc = t->to_proc;
2363 struct binder_ref_data rdata;
2364 int ret = 0;
2365
2366 node = binder_get_node(proc, fp->binder);
2367 if (!node) {
2368 node = binder_new_node(proc, fp);
2369 if (!node)
2370 return -ENOMEM;
2371 }
2372 if (fp->cookie != node->cookie) {
2373 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2374 proc->pid, thread->pid, (u64)fp->binder,
2375 node->debug_id, (u64)fp->cookie,
2376 (u64)node->cookie);
2377 ret = -EINVAL;
2378 goto done;
2379 }
2380 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2381 ret = -EPERM;
2382 goto done;
2383 }
2384
2385 ret = binder_inc_ref_for_node(target_proc, node,
2386 fp->hdr.type == BINDER_TYPE_BINDER,
2387 &thread->todo, &rdata);
2388 if (ret)
2389 goto done;
2390
2391 if (fp->hdr.type == BINDER_TYPE_BINDER)
2392 fp->hdr.type = BINDER_TYPE_HANDLE;
2393 else
2394 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2395 fp->binder = 0;
2396 fp->handle = rdata.desc;
2397 fp->cookie = 0;
2398
2399 trace_binder_transaction_node_to_ref(t, node, &rdata);
2400 binder_debug(BINDER_DEBUG_TRANSACTION,
2401 " node %d u%016llx -> ref %d desc %d\n",
2402 node->debug_id, (u64)node->ptr,
2403 rdata.debug_id, rdata.desc);
2404 done:
2405 binder_put_node(node);
2406 return ret;
2407 }
2408
binder_translate_handle(struct flat_binder_object * fp,struct binder_transaction * t,struct binder_thread * thread)2409 static int binder_translate_handle(struct flat_binder_object *fp,
2410 struct binder_transaction *t,
2411 struct binder_thread *thread)
2412 {
2413 struct binder_proc *proc = thread->proc;
2414 struct binder_proc *target_proc = t->to_proc;
2415 struct binder_node *node;
2416 struct binder_ref_data src_rdata;
2417 int ret = 0;
2418
2419 node = binder_get_node_from_ref(proc, fp->handle,
2420 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2421 if (!node) {
2422 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2423 proc->pid, thread->pid, fp->handle);
2424 return -EINVAL;
2425 }
2426 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2427 ret = -EPERM;
2428 goto done;
2429 }
2430
2431 binder_node_lock(node);
2432 if (node->proc == target_proc) {
2433 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2434 fp->hdr.type = BINDER_TYPE_BINDER;
2435 else
2436 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2437 fp->binder = node->ptr;
2438 fp->cookie = node->cookie;
2439 if (node->proc)
2440 binder_inner_proc_lock(node->proc);
2441 else
2442 __acquire(&node->proc->inner_lock);
2443 binder_inc_node_nilocked(node,
2444 fp->hdr.type == BINDER_TYPE_BINDER,
2445 0, NULL);
2446 if (node->proc)
2447 binder_inner_proc_unlock(node->proc);
2448 else
2449 __release(&node->proc->inner_lock);
2450 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2451 binder_debug(BINDER_DEBUG_TRANSACTION,
2452 " ref %d desc %d -> node %d u%016llx\n",
2453 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2454 (u64)node->ptr);
2455 binder_node_unlock(node);
2456 } else {
2457 struct binder_ref_data dest_rdata;
2458
2459 binder_node_unlock(node);
2460 ret = binder_inc_ref_for_node(target_proc, node,
2461 fp->hdr.type == BINDER_TYPE_HANDLE,
2462 NULL, &dest_rdata);
2463 if (ret)
2464 goto done;
2465
2466 fp->binder = 0;
2467 fp->handle = dest_rdata.desc;
2468 fp->cookie = 0;
2469 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2470 &dest_rdata);
2471 binder_debug(BINDER_DEBUG_TRANSACTION,
2472 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2473 src_rdata.debug_id, src_rdata.desc,
2474 dest_rdata.debug_id, dest_rdata.desc,
2475 node->debug_id);
2476 }
2477 done:
2478 binder_put_node(node);
2479 return ret;
2480 }
2481
binder_translate_fd(u32 fd,binder_size_t fd_offset,struct binder_transaction * t,struct binder_thread * thread,struct binder_transaction * in_reply_to)2482 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2483 struct binder_transaction *t,
2484 struct binder_thread *thread,
2485 struct binder_transaction *in_reply_to)
2486 {
2487 struct binder_proc *proc = thread->proc;
2488 struct binder_proc *target_proc = t->to_proc;
2489 struct binder_txn_fd_fixup *fixup;
2490 struct file *file;
2491 int ret = 0;
2492 bool target_allows_fd;
2493
2494 if (in_reply_to)
2495 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2496 else
2497 target_allows_fd = t->buffer->target_node->accept_fds;
2498 if (!target_allows_fd) {
2499 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2500 proc->pid, thread->pid,
2501 in_reply_to ? "reply" : "transaction",
2502 fd);
2503 ret = -EPERM;
2504 goto err_fd_not_accepted;
2505 }
2506
2507 file = fget(fd);
2508 if (!file) {
2509 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2510 proc->pid, thread->pid, fd);
2511 ret = -EBADF;
2512 goto err_fget;
2513 }
2514 ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2515 if (ret < 0) {
2516 ret = -EPERM;
2517 goto err_security;
2518 }
2519
2520 /*
2521 * Add fixup record for this transaction. The allocation
2522 * of the fd in the target needs to be done from a
2523 * target thread.
2524 */
2525 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2526 if (!fixup) {
2527 ret = -ENOMEM;
2528 goto err_alloc;
2529 }
2530 fixup->file = file;
2531 fixup->offset = fd_offset;
2532 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2533 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2534
2535 return ret;
2536
2537 err_alloc:
2538 err_security:
2539 fput(file);
2540 err_fget:
2541 err_fd_not_accepted:
2542 return ret;
2543 }
2544
2545 /**
2546 * struct binder_ptr_fixup - data to be fixed-up in target buffer
2547 * @offset offset in target buffer to fixup
2548 * @skip_size bytes to skip in copy (fixup will be written later)
2549 * @fixup_data data to write at fixup offset
2550 * @node list node
2551 *
2552 * This is used for the pointer fixup list (pf) which is created and consumed
2553 * during binder_transaction() and is only accessed locally. No
2554 * locking is necessary.
2555 *
2556 * The list is ordered by @offset.
2557 */
2558 struct binder_ptr_fixup {
2559 binder_size_t offset;
2560 size_t skip_size;
2561 binder_uintptr_t fixup_data;
2562 struct list_head node;
2563 };
2564
2565 /**
2566 * struct binder_sg_copy - scatter-gather data to be copied
2567 * @offset offset in target buffer
2568 * @sender_uaddr user address in source buffer
2569 * @length bytes to copy
2570 * @node list node
2571 *
2572 * This is used for the sg copy list (sgc) which is created and consumed
2573 * during binder_transaction() and is only accessed locally. No
2574 * locking is necessary.
2575 *
2576 * The list is ordered by @offset.
2577 */
2578 struct binder_sg_copy {
2579 binder_size_t offset;
2580 const void __user *sender_uaddr;
2581 size_t length;
2582 struct list_head node;
2583 };
2584
2585 /**
2586 * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2587 * @alloc: binder_alloc associated with @buffer
2588 * @buffer: binder buffer in target process
2589 * @sgc_head: list_head of scatter-gather copy list
2590 * @pf_head: list_head of pointer fixup list
2591 *
2592 * Processes all elements of @sgc_head, applying fixups from @pf_head
2593 * and copying the scatter-gather data from the source process' user
2594 * buffer to the target's buffer. It is expected that the list creation
2595 * and processing all occurs during binder_transaction() so these lists
2596 * are only accessed in local context.
2597 *
2598 * Return: 0=success, else -errno
2599 */
binder_do_deferred_txn_copies(struct binder_alloc * alloc,struct binder_buffer * buffer,struct list_head * sgc_head,struct list_head * pf_head)2600 static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2601 struct binder_buffer *buffer,
2602 struct list_head *sgc_head,
2603 struct list_head *pf_head)
2604 {
2605 int ret = 0;
2606 struct binder_sg_copy *sgc, *tmpsgc;
2607 struct binder_ptr_fixup *tmppf;
2608 struct binder_ptr_fixup *pf =
2609 list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2610 node);
2611
2612 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2613 size_t bytes_copied = 0;
2614
2615 while (bytes_copied < sgc->length) {
2616 size_t copy_size;
2617 size_t bytes_left = sgc->length - bytes_copied;
2618 size_t offset = sgc->offset + bytes_copied;
2619
2620 /*
2621 * We copy up to the fixup (pointed to by pf)
2622 */
2623 copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2624 : bytes_left;
2625 if (!ret && copy_size)
2626 ret = binder_alloc_copy_user_to_buffer(
2627 alloc, buffer,
2628 offset,
2629 sgc->sender_uaddr + bytes_copied,
2630 copy_size);
2631 bytes_copied += copy_size;
2632 if (copy_size != bytes_left) {
2633 BUG_ON(!pf);
2634 /* we stopped at a fixup offset */
2635 if (pf->skip_size) {
2636 /*
2637 * we are just skipping. This is for
2638 * BINDER_TYPE_FDA where the translated
2639 * fds will be fixed up when we get
2640 * to target context.
2641 */
2642 bytes_copied += pf->skip_size;
2643 } else {
2644 /* apply the fixup indicated by pf */
2645 if (!ret)
2646 ret = binder_alloc_copy_to_buffer(
2647 alloc, buffer,
2648 pf->offset,
2649 &pf->fixup_data,
2650 sizeof(pf->fixup_data));
2651 bytes_copied += sizeof(pf->fixup_data);
2652 }
2653 list_del(&pf->node);
2654 kfree(pf);
2655 pf = list_first_entry_or_null(pf_head,
2656 struct binder_ptr_fixup, node);
2657 }
2658 }
2659 list_del(&sgc->node);
2660 kfree(sgc);
2661 }
2662 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2663 BUG_ON(pf->skip_size == 0);
2664 list_del(&pf->node);
2665 kfree(pf);
2666 }
2667 BUG_ON(!list_empty(sgc_head));
2668
2669 return ret > 0 ? -EINVAL : ret;
2670 }
2671
2672 /**
2673 * binder_cleanup_deferred_txn_lists() - free specified lists
2674 * @sgc_head: list_head of scatter-gather copy list
2675 * @pf_head: list_head of pointer fixup list
2676 *
2677 * Called to clean up @sgc_head and @pf_head if there is an
2678 * error.
2679 */
binder_cleanup_deferred_txn_lists(struct list_head * sgc_head,struct list_head * pf_head)2680 static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2681 struct list_head *pf_head)
2682 {
2683 struct binder_sg_copy *sgc, *tmpsgc;
2684 struct binder_ptr_fixup *pf, *tmppf;
2685
2686 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2687 list_del(&sgc->node);
2688 kfree(sgc);
2689 }
2690 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2691 list_del(&pf->node);
2692 kfree(pf);
2693 }
2694 }
2695
2696 /**
2697 * binder_defer_copy() - queue a scatter-gather buffer for copy
2698 * @sgc_head: list_head of scatter-gather copy list
2699 * @offset: binder buffer offset in target process
2700 * @sender_uaddr: user address in source process
2701 * @length: bytes to copy
2702 *
2703 * Specify a scatter-gather block to be copied. The actual copy must
2704 * be deferred until all the needed fixups are identified and queued.
2705 * Then the copy and fixups are done together so un-translated values
2706 * from the source are never visible in the target buffer.
2707 *
2708 * We are guaranteed that repeated calls to this function will have
2709 * monotonically increasing @offset values so the list will naturally
2710 * be ordered.
2711 *
2712 * Return: 0=success, else -errno
2713 */
binder_defer_copy(struct list_head * sgc_head,binder_size_t offset,const void __user * sender_uaddr,size_t length)2714 static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2715 const void __user *sender_uaddr, size_t length)
2716 {
2717 struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2718
2719 if (!bc)
2720 return -ENOMEM;
2721
2722 bc->offset = offset;
2723 bc->sender_uaddr = sender_uaddr;
2724 bc->length = length;
2725 INIT_LIST_HEAD(&bc->node);
2726
2727 /*
2728 * We are guaranteed that the deferred copies are in-order
2729 * so just add to the tail.
2730 */
2731 list_add_tail(&bc->node, sgc_head);
2732
2733 return 0;
2734 }
2735
2736 /**
2737 * binder_add_fixup() - queue a fixup to be applied to sg copy
2738 * @pf_head: list_head of binder ptr fixup list
2739 * @offset: binder buffer offset in target process
2740 * @fixup: bytes to be copied for fixup
2741 * @skip_size: bytes to skip when copying (fixup will be applied later)
2742 *
2743 * Add the specified fixup to a list ordered by @offset. When copying
2744 * the scatter-gather buffers, the fixup will be copied instead of
2745 * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2746 * will be applied later (in target process context), so we just skip
2747 * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2748 * value in @fixup.
2749 *
2750 * This function is called *mostly* in @offset order, but there are
2751 * exceptions. Since out-of-order inserts are relatively uncommon,
2752 * we insert the new element by searching backward from the tail of
2753 * the list.
2754 *
2755 * Return: 0=success, else -errno
2756 */
binder_add_fixup(struct list_head * pf_head,binder_size_t offset,binder_uintptr_t fixup,size_t skip_size)2757 static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2758 binder_uintptr_t fixup, size_t skip_size)
2759 {
2760 struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2761 struct binder_ptr_fixup *tmppf;
2762
2763 if (!pf)
2764 return -ENOMEM;
2765
2766 pf->offset = offset;
2767 pf->fixup_data = fixup;
2768 pf->skip_size = skip_size;
2769 INIT_LIST_HEAD(&pf->node);
2770
2771 /* Fixups are *mostly* added in-order, but there are some
2772 * exceptions. Look backwards through list for insertion point.
2773 */
2774 list_for_each_entry_reverse(tmppf, pf_head, node) {
2775 if (tmppf->offset < pf->offset) {
2776 list_add(&pf->node, &tmppf->node);
2777 return 0;
2778 }
2779 }
2780 /*
2781 * if we get here, then the new offset is the lowest so
2782 * insert at the head
2783 */
2784 list_add(&pf->node, pf_head);
2785 return 0;
2786 }
2787
binder_translate_fd_array(struct list_head * pf_head,struct binder_fd_array_object * fda,const void __user * sender_ubuffer,struct binder_buffer_object * parent,struct binder_buffer_object * sender_uparent,struct binder_transaction * t,struct binder_thread * thread,struct binder_transaction * in_reply_to)2788 static int binder_translate_fd_array(struct list_head *pf_head,
2789 struct binder_fd_array_object *fda,
2790 const void __user *sender_ubuffer,
2791 struct binder_buffer_object *parent,
2792 struct binder_buffer_object *sender_uparent,
2793 struct binder_transaction *t,
2794 struct binder_thread *thread,
2795 struct binder_transaction *in_reply_to)
2796 {
2797 binder_size_t fdi, fd_buf_size;
2798 binder_size_t fda_offset;
2799 const void __user *sender_ufda_base;
2800 struct binder_proc *proc = thread->proc;
2801 int ret;
2802
2803 if (fda->num_fds == 0)
2804 return 0;
2805
2806 fd_buf_size = sizeof(u32) * fda->num_fds;
2807 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2808 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2809 proc->pid, thread->pid, (u64)fda->num_fds);
2810 return -EINVAL;
2811 }
2812 if (fd_buf_size > parent->length ||
2813 fda->parent_offset > parent->length - fd_buf_size) {
2814 /* No space for all file descriptors here. */
2815 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2816 proc->pid, thread->pid, (u64)fda->num_fds);
2817 return -EINVAL;
2818 }
2819 /*
2820 * the source data for binder_buffer_object is visible
2821 * to user-space and the @buffer element is the user
2822 * pointer to the buffer_object containing the fd_array.
2823 * Convert the address to an offset relative to
2824 * the base of the transaction buffer.
2825 */
2826 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2827 fda->parent_offset;
2828 sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2829 fda->parent_offset;
2830
2831 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2832 !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2833 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2834 proc->pid, thread->pid);
2835 return -EINVAL;
2836 }
2837 ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2838 if (ret)
2839 return ret;
2840
2841 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2842 u32 fd;
2843 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2844 binder_size_t sender_uoffset = fdi * sizeof(fd);
2845
2846 ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2847 if (!ret)
2848 ret = binder_translate_fd(fd, offset, t, thread,
2849 in_reply_to);
2850 if (ret)
2851 return ret > 0 ? -EINVAL : ret;
2852 }
2853 return 0;
2854 }
2855
binder_fixup_parent(struct list_head * pf_head,struct binder_transaction * t,struct binder_thread * thread,struct binder_buffer_object * bp,binder_size_t off_start_offset,binder_size_t num_valid,binder_size_t last_fixup_obj_off,binder_size_t last_fixup_min_off)2856 static int binder_fixup_parent(struct list_head *pf_head,
2857 struct binder_transaction *t,
2858 struct binder_thread *thread,
2859 struct binder_buffer_object *bp,
2860 binder_size_t off_start_offset,
2861 binder_size_t num_valid,
2862 binder_size_t last_fixup_obj_off,
2863 binder_size_t last_fixup_min_off)
2864 {
2865 struct binder_buffer_object *parent;
2866 struct binder_buffer *b = t->buffer;
2867 struct binder_proc *proc = thread->proc;
2868 struct binder_proc *target_proc = t->to_proc;
2869 struct binder_object object;
2870 binder_size_t buffer_offset;
2871 binder_size_t parent_offset;
2872
2873 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2874 return 0;
2875
2876 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2877 off_start_offset, &parent_offset,
2878 num_valid);
2879 if (!parent) {
2880 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2881 proc->pid, thread->pid);
2882 return -EINVAL;
2883 }
2884
2885 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2886 parent_offset, bp->parent_offset,
2887 last_fixup_obj_off,
2888 last_fixup_min_off)) {
2889 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2890 proc->pid, thread->pid);
2891 return -EINVAL;
2892 }
2893
2894 if (parent->length < sizeof(binder_uintptr_t) ||
2895 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2896 /* No space for a pointer here! */
2897 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2898 proc->pid, thread->pid);
2899 return -EINVAL;
2900 }
2901 buffer_offset = bp->parent_offset +
2902 (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2903 return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2904 }
2905
2906 /**
2907 * binder_can_update_transaction() - Can a txn be superseded by an updated one?
2908 * @t1: the pending async txn in the frozen process
2909 * @t2: the new async txn to supersede the outdated pending one
2910 *
2911 * Return: true if t2 can supersede t1
2912 * false if t2 can not supersede t1
2913 */
binder_can_update_transaction(struct binder_transaction * t1,struct binder_transaction * t2)2914 static bool binder_can_update_transaction(struct binder_transaction *t1,
2915 struct binder_transaction *t2)
2916 {
2917 if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) !=
2918 (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc)
2919 return false;
2920 if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code &&
2921 t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid &&
2922 t1->buffer->target_node->ptr == t2->buffer->target_node->ptr &&
2923 t1->buffer->target_node->cookie == t2->buffer->target_node->cookie)
2924 return true;
2925 return false;
2926 }
2927
2928 /**
2929 * binder_find_outdated_transaction_ilocked() - Find the outdated transaction
2930 * @t: new async transaction
2931 * @target_list: list to find outdated transaction
2932 *
2933 * Return: the outdated transaction if found
2934 * NULL if no outdated transacton can be found
2935 *
2936 * Requires the proc->inner_lock to be held.
2937 */
2938 static struct binder_transaction *
binder_find_outdated_transaction_ilocked(struct binder_transaction * t,struct list_head * target_list)2939 binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
2940 struct list_head *target_list)
2941 {
2942 struct binder_work *w;
2943
2944 list_for_each_entry(w, target_list, entry) {
2945 struct binder_transaction *t_queued;
2946
2947 if (w->type != BINDER_WORK_TRANSACTION)
2948 continue;
2949 t_queued = container_of(w, struct binder_transaction, work);
2950 if (binder_can_update_transaction(t_queued, t))
2951 return t_queued;
2952 }
2953 return NULL;
2954 }
2955
2956 /**
2957 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2958 * @t: transaction to send
2959 * @proc: process to send the transaction to
2960 * @thread: thread in @proc to send the transaction to (may be NULL)
2961 *
2962 * This function queues a transaction to the specified process. It will try
2963 * to find a thread in the target process to handle the transaction and
2964 * wake it up. If no thread is found, the work is queued to the proc
2965 * waitqueue.
2966 *
2967 * If the @thread parameter is not NULL, the transaction is always queued
2968 * to the waitlist of that specific thread.
2969 *
2970 * Return: 0 if the transaction was successfully queued
2971 * BR_DEAD_REPLY if the target process or thread is dead
2972 * BR_FROZEN_REPLY if the target process or thread is frozen and
2973 * the sync transaction was rejected
2974 * BR_TRANSACTION_PENDING_FROZEN if the target process is frozen
2975 * and the async transaction was successfully queued
2976 */
binder_proc_transaction(struct binder_transaction * t,struct binder_proc * proc,struct binder_thread * thread)2977 static int binder_proc_transaction(struct binder_transaction *t,
2978 struct binder_proc *proc,
2979 struct binder_thread *thread)
2980 {
2981 struct binder_node *node = t->buffer->target_node;
2982 bool oneway = !!(t->flags & TF_ONE_WAY);
2983 bool pending_async = false;
2984 struct binder_transaction *t_outdated = NULL;
2985 bool frozen = false;
2986 bool skip = false;
2987 bool enqueue_task = true;
2988
2989 BUG_ON(!node);
2990 binder_node_lock(node);
2991
2992 if (oneway) {
2993 BUG_ON(thread);
2994 if (node->has_async_transaction)
2995 pending_async = true;
2996 else
2997 node->has_async_transaction = true;
2998 }
2999
3000 binder_inner_proc_lock(proc);
3001 if (proc->is_frozen) {
3002 frozen = true;
3003 proc->sync_recv |= !oneway;
3004 proc->async_recv |= oneway;
3005 }
3006
3007 if ((frozen && !oneway) || proc->is_dead ||
3008 (thread && thread->is_dead)) {
3009 binder_inner_proc_unlock(proc);
3010 binder_node_unlock(node);
3011 return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
3012 }
3013
3014 trace_android_vh_binder_proc_transaction_entry(proc, t,
3015 &thread, node->debug_id, pending_async, !oneway, &skip);
3016
3017 if (!thread && !pending_async && !skip)
3018 thread = binder_select_thread_ilocked(proc);
3019
3020 if (thread) {
3021 binder_transaction_priority(thread, t, node);
3022 binder_enqueue_thread_work_ilocked(thread, &t->work);
3023 } else if (!pending_async) {
3024 trace_android_vh_binder_special_task(t, proc, thread,
3025 &t->work, &proc->todo, !oneway, &enqueue_task);
3026 if (enqueue_task)
3027 binder_enqueue_work_ilocked(&t->work, &proc->todo);
3028 } else {
3029 if ((t->flags & TF_UPDATE_TXN) && frozen) {
3030 t_outdated = binder_find_outdated_transaction_ilocked(t,
3031 &node->async_todo);
3032 if (t_outdated) {
3033 binder_debug(BINDER_DEBUG_TRANSACTION,
3034 "txn %d supersedes %d\n",
3035 t->debug_id, t_outdated->debug_id);
3036 list_del_init(&t_outdated->work.entry);
3037 proc->outstanding_txns--;
3038 }
3039 }
3040 trace_android_vh_binder_special_task(t, proc, thread,
3041 &t->work, &node->async_todo, !oneway, &enqueue_task);
3042 if (enqueue_task)
3043 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
3044 }
3045
3046 trace_android_vh_binder_proc_transaction_finish(proc, t,
3047 thread ? thread->task : NULL, pending_async, !oneway);
3048 if (!pending_async)
3049 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
3050
3051 proc->outstanding_txns++;
3052 binder_inner_proc_unlock(proc);
3053 binder_node_unlock(node);
3054
3055 /*
3056 * To reduce potential contention, free the outdated transaction and
3057 * buffer after releasing the locks.
3058 */
3059 if (t_outdated) {
3060 struct binder_buffer *buffer = t_outdated->buffer;
3061
3062 t_outdated->buffer = NULL;
3063 buffer->transaction = NULL;
3064 trace_binder_transaction_update_buffer_release(buffer);
3065 binder_release_entire_buffer(proc, NULL, buffer, false);
3066 binder_alloc_free_buf(&proc->alloc, buffer);
3067 kfree(t_outdated);
3068 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3069 }
3070
3071 if (oneway && frozen)
3072 return BR_TRANSACTION_PENDING_FROZEN;
3073
3074 return 0;
3075 }
3076
3077 /**
3078 * binder_get_node_refs_for_txn() - Get required refs on node for txn
3079 * @node: struct binder_node for which to get refs
3080 * @proc: returns @node->proc if valid
3081 * @error: if no @proc then returns BR_DEAD_REPLY
3082 *
3083 * User-space normally keeps the node alive when creating a transaction
3084 * since it has a reference to the target. The local strong ref keeps it
3085 * alive if the sending process dies before the target process processes
3086 * the transaction. If the source process is malicious or has a reference
3087 * counting bug, relying on the local strong ref can fail.
3088 *
3089 * Since user-space can cause the local strong ref to go away, we also take
3090 * a tmpref on the node to ensure it survives while we are constructing
3091 * the transaction. We also need a tmpref on the proc while we are
3092 * constructing the transaction, so we take that here as well.
3093 *
3094 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
3095 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
3096 * target proc has died, @error is set to BR_DEAD_REPLY
3097 */
binder_get_node_refs_for_txn(struct binder_node * node,struct binder_proc ** procp,uint32_t * error)3098 static struct binder_node *binder_get_node_refs_for_txn(
3099 struct binder_node *node,
3100 struct binder_proc **procp,
3101 uint32_t *error)
3102 {
3103 struct binder_node *target_node = NULL;
3104
3105 binder_node_inner_lock(node);
3106 if (node->proc) {
3107 target_node = node;
3108 binder_inc_node_nilocked(node, 1, 0, NULL);
3109 binder_inc_node_tmpref_ilocked(node);
3110 node->proc->tmp_ref++;
3111 *procp = node->proc;
3112 } else
3113 *error = BR_DEAD_REPLY;
3114 binder_node_inner_unlock(node);
3115
3116 return target_node;
3117 }
3118
binder_transaction(struct binder_proc * proc,struct binder_thread * thread,struct binder_transaction_data * tr,int reply,binder_size_t extra_buffers_size)3119 static void binder_transaction(struct binder_proc *proc,
3120 struct binder_thread *thread,
3121 struct binder_transaction_data *tr, int reply,
3122 binder_size_t extra_buffers_size)
3123 {
3124 int ret;
3125 struct binder_transaction *t;
3126 struct binder_work *w;
3127 struct binder_work *tcomplete;
3128 binder_size_t buffer_offset = 0;
3129 binder_size_t off_start_offset, off_end_offset;
3130 binder_size_t off_min;
3131 binder_size_t sg_buf_offset, sg_buf_end_offset;
3132 binder_size_t user_offset = 0;
3133 struct binder_proc *target_proc = NULL;
3134 struct binder_thread *target_thread = NULL;
3135 struct binder_node *target_node = NULL;
3136 struct binder_transaction *in_reply_to = NULL;
3137 struct binder_transaction_log_entry *e;
3138 uint32_t return_error = 0;
3139 uint32_t return_error_param = 0;
3140 uint32_t return_error_line = 0;
3141 binder_size_t last_fixup_obj_off = 0;
3142 binder_size_t last_fixup_min_off = 0;
3143 struct binder_context *context = proc->context;
3144 int t_debug_id = atomic_inc_return(&binder_last_id);
3145 ktime_t t_start_time = ktime_get();
3146 char *secctx = NULL;
3147 u32 secctx_sz = 0;
3148 bool is_nested = false;
3149 struct list_head sgc_head;
3150 struct list_head pf_head;
3151 const void __user *user_buffer = (const void __user *)
3152 (uintptr_t)tr->data.ptr.buffer;
3153 INIT_LIST_HEAD(&sgc_head);
3154 INIT_LIST_HEAD(&pf_head);
3155
3156 e = binder_transaction_log_add(&binder_transaction_log);
3157 e->debug_id = t_debug_id;
3158 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
3159 e->from_proc = proc->pid;
3160 e->from_thread = thread->pid;
3161 e->target_handle = tr->target.handle;
3162 e->data_size = tr->data_size;
3163 e->offsets_size = tr->offsets_size;
3164 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
3165
3166 if (reply) {
3167 binder_inner_proc_lock(proc);
3168 in_reply_to = thread->transaction_stack;
3169 if (in_reply_to == NULL) {
3170 binder_inner_proc_unlock(proc);
3171 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
3172 proc->pid, thread->pid);
3173 return_error = BR_FAILED_REPLY;
3174 return_error_param = -EPROTO;
3175 return_error_line = __LINE__;
3176 goto err_empty_call_stack;
3177 }
3178 if (in_reply_to->to_thread != thread) {
3179 spin_lock(&in_reply_to->lock);
3180 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
3181 proc->pid, thread->pid, in_reply_to->debug_id,
3182 in_reply_to->to_proc ?
3183 in_reply_to->to_proc->pid : 0,
3184 in_reply_to->to_thread ?
3185 in_reply_to->to_thread->pid : 0);
3186 spin_unlock(&in_reply_to->lock);
3187 binder_inner_proc_unlock(proc);
3188 return_error = BR_FAILED_REPLY;
3189 return_error_param = -EPROTO;
3190 return_error_line = __LINE__;
3191 in_reply_to = NULL;
3192 goto err_bad_call_stack;
3193 }
3194 thread->transaction_stack = in_reply_to->to_parent;
3195 binder_inner_proc_unlock(proc);
3196 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
3197 if (target_thread == NULL) {
3198 /* annotation for sparse */
3199 __release(&target_thread->proc->inner_lock);
3200 return_error = BR_DEAD_REPLY;
3201 return_error_line = __LINE__;
3202 goto err_dead_binder;
3203 }
3204 if (target_thread->transaction_stack != in_reply_to) {
3205 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
3206 proc->pid, thread->pid,
3207 target_thread->transaction_stack ?
3208 target_thread->transaction_stack->debug_id : 0,
3209 in_reply_to->debug_id);
3210 binder_inner_proc_unlock(target_thread->proc);
3211 return_error = BR_FAILED_REPLY;
3212 return_error_param = -EPROTO;
3213 return_error_line = __LINE__;
3214 in_reply_to = NULL;
3215 target_thread = NULL;
3216 goto err_dead_binder;
3217 }
3218 target_proc = target_thread->proc;
3219 target_proc->tmp_ref++;
3220 binder_inner_proc_unlock(target_thread->proc);
3221 trace_android_vh_binder_reply(target_proc, proc, thread, tr);
3222 } else {
3223 if (tr->target.handle) {
3224 struct binder_ref *ref;
3225
3226 /*
3227 * There must already be a strong ref
3228 * on this node. If so, do a strong
3229 * increment on the node to ensure it
3230 * stays alive until the transaction is
3231 * done.
3232 */
3233 binder_proc_lock(proc);
3234 ref = binder_get_ref_olocked(proc, tr->target.handle,
3235 true);
3236 if (ref) {
3237 target_node = binder_get_node_refs_for_txn(
3238 ref->node, &target_proc,
3239 &return_error);
3240 } else {
3241 binder_user_error("%d:%d got transaction to invalid handle, %u\n",
3242 proc->pid, thread->pid, tr->target.handle);
3243 return_error = BR_FAILED_REPLY;
3244 }
3245 binder_proc_unlock(proc);
3246 } else {
3247 mutex_lock(&context->context_mgr_node_lock);
3248 target_node = context->binder_context_mgr_node;
3249 if (target_node)
3250 target_node = binder_get_node_refs_for_txn(
3251 target_node, &target_proc,
3252 &return_error);
3253 else
3254 return_error = BR_DEAD_REPLY;
3255 mutex_unlock(&context->context_mgr_node_lock);
3256 if (target_node && target_proc->pid == proc->pid) {
3257 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3258 proc->pid, thread->pid);
3259 return_error = BR_FAILED_REPLY;
3260 return_error_param = -EINVAL;
3261 return_error_line = __LINE__;
3262 goto err_invalid_target_handle;
3263 }
3264 }
3265 if (!target_node) {
3266 /*
3267 * return_error is set above
3268 */
3269 return_error_param = -EINVAL;
3270 return_error_line = __LINE__;
3271 goto err_dead_binder;
3272 }
3273 e->to_node = target_node->debug_id;
3274 if (WARN_ON(proc == target_proc)) {
3275 return_error = BR_FAILED_REPLY;
3276 return_error_param = -EINVAL;
3277 return_error_line = __LINE__;
3278 goto err_invalid_target_handle;
3279 }
3280 trace_android_vh_binder_trans(target_proc, proc, thread, tr);
3281 if (security_binder_transaction(proc->cred,
3282 target_proc->cred) < 0) {
3283 return_error = BR_FAILED_REPLY;
3284 return_error_param = -EPERM;
3285 return_error_line = __LINE__;
3286 goto err_invalid_target_handle;
3287 }
3288 binder_inner_proc_lock(proc);
3289
3290 w = list_first_entry_or_null(&thread->todo,
3291 struct binder_work, entry);
3292 if (!(tr->flags & TF_ONE_WAY) && w &&
3293 w->type == BINDER_WORK_TRANSACTION) {
3294 /*
3295 * Do not allow new outgoing transaction from a
3296 * thread that has a transaction at the head of
3297 * its todo list. Only need to check the head
3298 * because binder_select_thread_ilocked picks a
3299 * thread from proc->waiting_threads to enqueue
3300 * the transaction, and nothing is queued to the
3301 * todo list while the thread is on waiting_threads.
3302 */
3303 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3304 proc->pid, thread->pid);
3305 binder_inner_proc_unlock(proc);
3306 return_error = BR_FAILED_REPLY;
3307 return_error_param = -EPROTO;
3308 return_error_line = __LINE__;
3309 goto err_bad_todo_list;
3310 }
3311
3312 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3313 struct binder_transaction *tmp;
3314
3315 tmp = thread->transaction_stack;
3316 if (tmp->to_thread != thread) {
3317 spin_lock(&tmp->lock);
3318 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3319 proc->pid, thread->pid, tmp->debug_id,
3320 tmp->to_proc ? tmp->to_proc->pid : 0,
3321 tmp->to_thread ?
3322 tmp->to_thread->pid : 0);
3323 spin_unlock(&tmp->lock);
3324 binder_inner_proc_unlock(proc);
3325 return_error = BR_FAILED_REPLY;
3326 return_error_param = -EPROTO;
3327 return_error_line = __LINE__;
3328 goto err_bad_call_stack;
3329 }
3330 while (tmp) {
3331 struct binder_thread *from;
3332
3333 spin_lock(&tmp->lock);
3334 from = tmp->from;
3335 if (from && from->proc == target_proc) {
3336 atomic_inc(&from->tmp_ref);
3337 target_thread = from;
3338 spin_unlock(&tmp->lock);
3339 is_nested = true;
3340 break;
3341 }
3342 spin_unlock(&tmp->lock);
3343 tmp = tmp->from_parent;
3344 }
3345 }
3346 binder_inner_proc_unlock(proc);
3347 }
3348 if (target_thread)
3349 e->to_thread = target_thread->pid;
3350 e->to_proc = target_proc->pid;
3351 trace_android_rvh_binder_transaction(target_proc, proc, thread, tr);
3352
3353 /* TODO: reuse incoming transaction for reply */
3354 t = kzalloc(sizeof(*t), GFP_KERNEL);
3355 if (t == NULL) {
3356 return_error = BR_FAILED_REPLY;
3357 return_error_param = -ENOMEM;
3358 return_error_line = __LINE__;
3359 goto err_alloc_t_failed;
3360 }
3361 INIT_LIST_HEAD(&t->fd_fixups);
3362 binder_stats_created(BINDER_STAT_TRANSACTION);
3363 spin_lock_init(&t->lock);
3364 trace_android_vh_binder_transaction_init(t);
3365
3366 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3367 if (tcomplete == NULL) {
3368 return_error = BR_FAILED_REPLY;
3369 return_error_param = -ENOMEM;
3370 return_error_line = __LINE__;
3371 goto err_alloc_tcomplete_failed;
3372 }
3373 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3374
3375 t->debug_id = t_debug_id;
3376 t->start_time = t_start_time;
3377
3378 if (reply)
3379 binder_debug(BINDER_DEBUG_TRANSACTION,
3380 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3381 proc->pid, thread->pid, t->debug_id,
3382 target_proc->pid, target_thread->pid,
3383 (u64)tr->data.ptr.buffer,
3384 (u64)tr->data.ptr.offsets,
3385 (u64)tr->data_size, (u64)tr->offsets_size,
3386 (u64)extra_buffers_size);
3387 else
3388 binder_debug(BINDER_DEBUG_TRANSACTION,
3389 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3390 proc->pid, thread->pid, t->debug_id,
3391 target_proc->pid, target_node->debug_id,
3392 (u64)tr->data.ptr.buffer,
3393 (u64)tr->data.ptr.offsets,
3394 (u64)tr->data_size, (u64)tr->offsets_size,
3395 (u64)extra_buffers_size);
3396
3397 if (!reply && !(tr->flags & TF_ONE_WAY))
3398 t->from = thread;
3399 else
3400 t->from = NULL;
3401 t->from_pid = proc->pid;
3402 t->from_tid = thread->pid;
3403 t->sender_euid = task_euid(proc->tsk);
3404 t->to_proc = target_proc;
3405 t->to_thread = target_thread;
3406 t->code = tr->code;
3407 t->flags = tr->flags;
3408 t->is_nested = is_nested;
3409 if (!(t->flags & TF_ONE_WAY) &&
3410 binder_supported_policy(current->policy)) {
3411 /* Inherit supported policies for synchronous transactions */
3412 t->priority.sched_policy = current->policy;
3413 t->priority.prio = current->normal_prio;
3414 } else {
3415 /* Otherwise, fall back to the default priority */
3416 t->priority = target_proc->default_priority;
3417 }
3418
3419 if (!(t->flags & TF_ONE_WAY))
3420 set_inherited_uclamp(t);
3421
3422 if (target_node && target_node->txn_security_ctx) {
3423 u32 secid;
3424 size_t added_size;
3425
3426 security_cred_getsecid(proc->cred, &secid);
3427 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3428 if (ret) {
3429 return_error = BR_FAILED_REPLY;
3430 return_error_param = ret;
3431 return_error_line = __LINE__;
3432 goto err_get_secctx_failed;
3433 }
3434 added_size = ALIGN(secctx_sz, sizeof(u64));
3435 extra_buffers_size += added_size;
3436 if (extra_buffers_size < added_size) {
3437 /* integer overflow of extra_buffers_size */
3438 return_error = BR_FAILED_REPLY;
3439 return_error_param = -EINVAL;
3440 return_error_line = __LINE__;
3441 goto err_bad_extra_size;
3442 }
3443 }
3444
3445 trace_binder_transaction(reply, t, target_node);
3446
3447 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3448 tr->offsets_size, extra_buffers_size,
3449 !reply && (t->flags & TF_ONE_WAY));
3450 if (IS_ERR(t->buffer)) {
3451 /*
3452 * -ESRCH indicates VMA cleared. The target is dying.
3453 */
3454 return_error_param = PTR_ERR(t->buffer);
3455 return_error = return_error_param == -ESRCH ?
3456 BR_DEAD_REPLY : BR_FAILED_REPLY;
3457 return_error_line = __LINE__;
3458 t->buffer = NULL;
3459 goto err_binder_alloc_buf_failed;
3460 }
3461 if (secctx) {
3462 int err;
3463 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3464 ALIGN(tr->offsets_size, sizeof(void *)) +
3465 ALIGN(extra_buffers_size, sizeof(void *)) -
3466 ALIGN(secctx_sz, sizeof(u64));
3467
3468 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3469 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3470 t->buffer, buf_offset,
3471 secctx, secctx_sz);
3472 if (err) {
3473 t->security_ctx = 0;
3474 WARN_ON(1);
3475 }
3476 security_release_secctx(secctx, secctx_sz);
3477 secctx = NULL;
3478 }
3479 t->buffer->debug_id = t->debug_id;
3480 t->buffer->transaction = t;
3481 t->buffer->target_node = target_node;
3482 t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3483 trace_binder_transaction_alloc_buf(t->buffer);
3484 trace_android_vh_alloc_oem_binder_struct(tr, t, target_proc);
3485
3486 if (binder_alloc_copy_user_to_buffer(
3487 &target_proc->alloc,
3488 t->buffer,
3489 ALIGN(tr->data_size, sizeof(void *)),
3490 (const void __user *)
3491 (uintptr_t)tr->data.ptr.offsets,
3492 tr->offsets_size)) {
3493 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3494 proc->pid, thread->pid);
3495 return_error = BR_FAILED_REPLY;
3496 return_error_param = -EFAULT;
3497 return_error_line = __LINE__;
3498 goto err_copy_data_failed;
3499 }
3500 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3501 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3502 proc->pid, thread->pid, (u64)tr->offsets_size);
3503 return_error = BR_FAILED_REPLY;
3504 return_error_param = -EINVAL;
3505 return_error_line = __LINE__;
3506 goto err_bad_offset;
3507 }
3508 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3509 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3510 proc->pid, thread->pid,
3511 (u64)extra_buffers_size);
3512 return_error = BR_FAILED_REPLY;
3513 return_error_param = -EINVAL;
3514 return_error_line = __LINE__;
3515 goto err_bad_offset;
3516 }
3517 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3518 buffer_offset = off_start_offset;
3519 off_end_offset = off_start_offset + tr->offsets_size;
3520 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3521 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3522 ALIGN(secctx_sz, sizeof(u64));
3523 off_min = 0;
3524 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3525 buffer_offset += sizeof(binder_size_t)) {
3526 struct binder_object_header *hdr;
3527 size_t object_size;
3528 struct binder_object object;
3529 binder_size_t object_offset;
3530 binder_size_t copy_size;
3531
3532 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3533 &object_offset,
3534 t->buffer,
3535 buffer_offset,
3536 sizeof(object_offset))) {
3537 return_error = BR_FAILED_REPLY;
3538 return_error_param = -EINVAL;
3539 return_error_line = __LINE__;
3540 goto err_bad_offset;
3541 }
3542
3543 /*
3544 * Copy the source user buffer up to the next object
3545 * that will be processed.
3546 */
3547 copy_size = object_offset - user_offset;
3548 if (copy_size && (user_offset > object_offset ||
3549 binder_alloc_copy_user_to_buffer(
3550 &target_proc->alloc,
3551 t->buffer, user_offset,
3552 user_buffer + user_offset,
3553 copy_size))) {
3554 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3555 proc->pid, thread->pid);
3556 return_error = BR_FAILED_REPLY;
3557 return_error_param = -EFAULT;
3558 return_error_line = __LINE__;
3559 goto err_copy_data_failed;
3560 }
3561 object_size = binder_get_object(target_proc, user_buffer,
3562 t->buffer, object_offset, &object);
3563 if (object_size == 0 || object_offset < off_min) {
3564 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3565 proc->pid, thread->pid,
3566 (u64)object_offset,
3567 (u64)off_min,
3568 (u64)t->buffer->data_size);
3569 return_error = BR_FAILED_REPLY;
3570 return_error_param = -EINVAL;
3571 return_error_line = __LINE__;
3572 goto err_bad_offset;
3573 }
3574 /*
3575 * Set offset to the next buffer fragment to be
3576 * copied
3577 */
3578 user_offset = object_offset + object_size;
3579
3580 hdr = &object.hdr;
3581 off_min = object_offset + object_size;
3582 switch (hdr->type) {
3583 case BINDER_TYPE_BINDER:
3584 case BINDER_TYPE_WEAK_BINDER: {
3585 struct flat_binder_object *fp;
3586
3587 fp = to_flat_binder_object(hdr);
3588 ret = binder_translate_binder(fp, t, thread);
3589
3590 if (ret < 0 ||
3591 binder_alloc_copy_to_buffer(&target_proc->alloc,
3592 t->buffer,
3593 object_offset,
3594 fp, sizeof(*fp))) {
3595 return_error = BR_FAILED_REPLY;
3596 return_error_param = ret;
3597 return_error_line = __LINE__;
3598 goto err_translate_failed;
3599 }
3600 } break;
3601 case BINDER_TYPE_HANDLE:
3602 case BINDER_TYPE_WEAK_HANDLE: {
3603 struct flat_binder_object *fp;
3604
3605 fp = to_flat_binder_object(hdr);
3606 ret = binder_translate_handle(fp, t, thread);
3607 if (ret < 0 ||
3608 binder_alloc_copy_to_buffer(&target_proc->alloc,
3609 t->buffer,
3610 object_offset,
3611 fp, sizeof(*fp))) {
3612 return_error = BR_FAILED_REPLY;
3613 return_error_param = ret;
3614 return_error_line = __LINE__;
3615 goto err_translate_failed;
3616 }
3617 } break;
3618
3619 case BINDER_TYPE_FD: {
3620 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3621 binder_size_t fd_offset = object_offset +
3622 (uintptr_t)&fp->fd - (uintptr_t)fp;
3623 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3624 thread, in_reply_to);
3625
3626 fp->pad_binder = 0;
3627 if (ret < 0 ||
3628 binder_alloc_copy_to_buffer(&target_proc->alloc,
3629 t->buffer,
3630 object_offset,
3631 fp, sizeof(*fp))) {
3632 return_error = BR_FAILED_REPLY;
3633 return_error_param = ret;
3634 return_error_line = __LINE__;
3635 goto err_translate_failed;
3636 }
3637 } break;
3638 case BINDER_TYPE_FDA: {
3639 struct binder_object ptr_object;
3640 binder_size_t parent_offset;
3641 struct binder_object user_object;
3642 size_t user_parent_size;
3643 struct binder_fd_array_object *fda =
3644 to_binder_fd_array_object(hdr);
3645 size_t num_valid = (buffer_offset - off_start_offset) /
3646 sizeof(binder_size_t);
3647 struct binder_buffer_object *parent =
3648 binder_validate_ptr(target_proc, t->buffer,
3649 &ptr_object, fda->parent,
3650 off_start_offset,
3651 &parent_offset,
3652 num_valid);
3653 if (!parent) {
3654 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3655 proc->pid, thread->pid);
3656 return_error = BR_FAILED_REPLY;
3657 return_error_param = -EINVAL;
3658 return_error_line = __LINE__;
3659 goto err_bad_parent;
3660 }
3661 if (!binder_validate_fixup(target_proc, t->buffer,
3662 off_start_offset,
3663 parent_offset,
3664 fda->parent_offset,
3665 last_fixup_obj_off,
3666 last_fixup_min_off)) {
3667 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3668 proc->pid, thread->pid);
3669 return_error = BR_FAILED_REPLY;
3670 return_error_param = -EINVAL;
3671 return_error_line = __LINE__;
3672 goto err_bad_parent;
3673 }
3674 /*
3675 * We need to read the user version of the parent
3676 * object to get the original user offset
3677 */
3678 user_parent_size =
3679 binder_get_object(proc, user_buffer, t->buffer,
3680 parent_offset, &user_object);
3681 if (user_parent_size != sizeof(user_object.bbo)) {
3682 binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3683 proc->pid, thread->pid,
3684 user_parent_size,
3685 sizeof(user_object.bbo));
3686 return_error = BR_FAILED_REPLY;
3687 return_error_param = -EINVAL;
3688 return_error_line = __LINE__;
3689 goto err_bad_parent;
3690 }
3691 ret = binder_translate_fd_array(&pf_head, fda,
3692 user_buffer, parent,
3693 &user_object.bbo, t,
3694 thread, in_reply_to);
3695 if (!ret)
3696 ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3697 t->buffer,
3698 object_offset,
3699 fda, sizeof(*fda));
3700 if (ret) {
3701 return_error = BR_FAILED_REPLY;
3702 return_error_param = ret > 0 ? -EINVAL : ret;
3703 return_error_line = __LINE__;
3704 goto err_translate_failed;
3705 }
3706 last_fixup_obj_off = parent_offset;
3707 last_fixup_min_off =
3708 fda->parent_offset + sizeof(u32) * fda->num_fds;
3709 } break;
3710 case BINDER_TYPE_PTR: {
3711 struct binder_buffer_object *bp =
3712 to_binder_buffer_object(hdr);
3713 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3714 size_t num_valid;
3715
3716 if (bp->length > buf_left) {
3717 binder_user_error("%d:%d got transaction with too large buffer\n",
3718 proc->pid, thread->pid);
3719 return_error = BR_FAILED_REPLY;
3720 return_error_param = -EINVAL;
3721 return_error_line = __LINE__;
3722 goto err_bad_offset;
3723 }
3724 ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3725 (const void __user *)(uintptr_t)bp->buffer,
3726 bp->length);
3727 if (ret) {
3728 return_error = BR_FAILED_REPLY;
3729 return_error_param = ret;
3730 return_error_line = __LINE__;
3731 goto err_translate_failed;
3732 }
3733 /* Fixup buffer pointer to target proc address space */
3734 bp->buffer = (uintptr_t)
3735 t->buffer->user_data + sg_buf_offset;
3736 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3737
3738 num_valid = (buffer_offset - off_start_offset) /
3739 sizeof(binder_size_t);
3740 ret = binder_fixup_parent(&pf_head, t,
3741 thread, bp,
3742 off_start_offset,
3743 num_valid,
3744 last_fixup_obj_off,
3745 last_fixup_min_off);
3746 if (ret < 0 ||
3747 binder_alloc_copy_to_buffer(&target_proc->alloc,
3748 t->buffer,
3749 object_offset,
3750 bp, sizeof(*bp))) {
3751 return_error = BR_FAILED_REPLY;
3752 return_error_param = ret;
3753 return_error_line = __LINE__;
3754 goto err_translate_failed;
3755 }
3756 last_fixup_obj_off = object_offset;
3757 last_fixup_min_off = 0;
3758 } break;
3759 default:
3760 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3761 proc->pid, thread->pid, hdr->type);
3762 return_error = BR_FAILED_REPLY;
3763 return_error_param = -EINVAL;
3764 return_error_line = __LINE__;
3765 goto err_bad_object_type;
3766 }
3767 }
3768 /* Done processing objects, copy the rest of the buffer */
3769 if (binder_alloc_copy_user_to_buffer(
3770 &target_proc->alloc,
3771 t->buffer, user_offset,
3772 user_buffer + user_offset,
3773 tr->data_size - user_offset)) {
3774 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3775 proc->pid, thread->pid);
3776 return_error = BR_FAILED_REPLY;
3777 return_error_param = -EFAULT;
3778 return_error_line = __LINE__;
3779 goto err_copy_data_failed;
3780 }
3781
3782 ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3783 &sgc_head, &pf_head);
3784 if (ret) {
3785 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3786 proc->pid, thread->pid);
3787 return_error = BR_FAILED_REPLY;
3788 return_error_param = ret;
3789 return_error_line = __LINE__;
3790 goto err_copy_data_failed;
3791 }
3792 if (t->buffer->oneway_spam_suspect)
3793 tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3794 else
3795 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3796 t->work.type = BINDER_WORK_TRANSACTION;
3797
3798 if (reply) {
3799 binder_enqueue_thread_work(thread, tcomplete);
3800 binder_inner_proc_lock(target_proc);
3801 if (target_thread->is_dead) {
3802 return_error = BR_DEAD_REPLY;
3803 binder_inner_proc_unlock(target_proc);
3804 goto err_dead_proc_or_thread;
3805 }
3806 BUG_ON(t->buffer->async_transaction != 0);
3807 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3808 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3809 target_proc->outstanding_txns++;
3810 binder_inner_proc_unlock(target_proc);
3811 if (in_reply_to->is_nested) {
3812 spin_lock(&thread->prio_lock);
3813 thread->prio_state = BINDER_PRIO_PENDING;
3814 thread->prio_next = in_reply_to->saved_priority;
3815 spin_unlock(&thread->prio_lock);
3816 }
3817 wake_up_interruptible_sync(&target_thread->wait);
3818 trace_android_vh_binder_restore_priority(in_reply_to, current);
3819 binder_restore_priority(thread, &in_reply_to->saved_priority);
3820 binder_free_transaction(in_reply_to);
3821 } else if (!(t->flags & TF_ONE_WAY)) {
3822 BUG_ON(t->buffer->async_transaction != 0);
3823 binder_inner_proc_lock(proc);
3824 /*
3825 * Defer the TRANSACTION_COMPLETE, so we don't return to
3826 * userspace immediately; this allows the target process to
3827 * immediately start processing this transaction, reducing
3828 * latency. We will then return the TRANSACTION_COMPLETE when
3829 * the target replies (or there is an error).
3830 */
3831 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3832 t->need_reply = 1;
3833 t->from_parent = thread->transaction_stack;
3834 thread->transaction_stack = t;
3835 binder_inner_proc_unlock(proc);
3836 return_error = binder_proc_transaction(t,
3837 target_proc, target_thread);
3838 if (return_error) {
3839 binder_inner_proc_lock(proc);
3840 binder_pop_transaction_ilocked(thread, t);
3841 binder_inner_proc_unlock(proc);
3842 goto err_dead_proc_or_thread;
3843 }
3844 } else {
3845 BUG_ON(target_node == NULL);
3846 BUG_ON(t->buffer->async_transaction != 1);
3847 return_error = binder_proc_transaction(t, target_proc, NULL);
3848 /*
3849 * Let the caller know when async transaction reaches a frozen
3850 * process and is put in a pending queue, waiting for the target
3851 * process to be unfrozen.
3852 */
3853 if (return_error == BR_TRANSACTION_PENDING_FROZEN)
3854 tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
3855 binder_enqueue_thread_work(thread, tcomplete);
3856 if (return_error &&
3857 return_error != BR_TRANSACTION_PENDING_FROZEN)
3858 goto err_dead_proc_or_thread;
3859 }
3860 if (target_thread)
3861 binder_thread_dec_tmpref(target_thread);
3862 binder_proc_dec_tmpref(target_proc);
3863 if (target_node)
3864 binder_dec_node_tmpref(target_node);
3865 /*
3866 * write barrier to synchronize with initialization
3867 * of log entry
3868 */
3869 smp_wmb();
3870 WRITE_ONCE(e->debug_id_done, t_debug_id);
3871 return;
3872
3873 err_dead_proc_or_thread:
3874 return_error_line = __LINE__;
3875 binder_dequeue_work(proc, tcomplete);
3876 err_translate_failed:
3877 err_bad_object_type:
3878 err_bad_offset:
3879 err_bad_parent:
3880 err_copy_data_failed:
3881 binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3882 binder_free_txn_fixups(t);
3883 trace_binder_transaction_failed_buffer_release(t->buffer);
3884 binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3885 buffer_offset, true);
3886 if (target_node)
3887 binder_dec_node_tmpref(target_node);
3888 target_node = NULL;
3889 t->buffer->transaction = NULL;
3890 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3891 err_binder_alloc_buf_failed:
3892 err_bad_extra_size:
3893 if (secctx)
3894 security_release_secctx(secctx, secctx_sz);
3895 err_get_secctx_failed:
3896 kfree(tcomplete);
3897 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3898 err_alloc_tcomplete_failed:
3899 if (trace_binder_txn_latency_free_enabled())
3900 binder_txn_latency_free(t);
3901 kfree(t);
3902 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3903 err_alloc_t_failed:
3904 err_bad_todo_list:
3905 err_bad_call_stack:
3906 err_empty_call_stack:
3907 err_dead_binder:
3908 err_invalid_target_handle:
3909 if (target_thread)
3910 binder_thread_dec_tmpref(target_thread);
3911 if (target_proc)
3912 binder_proc_dec_tmpref(target_proc);
3913 if (target_node) {
3914 binder_dec_node(target_node, 1, 0);
3915 binder_dec_node_tmpref(target_node);
3916 }
3917
3918 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3919 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3920 proc->pid, thread->pid, return_error, return_error_param,
3921 (u64)tr->data_size, (u64)tr->offsets_size,
3922 return_error_line);
3923
3924 {
3925 struct binder_transaction_log_entry *fe;
3926
3927 e->return_error = return_error;
3928 e->return_error_param = return_error_param;
3929 e->return_error_line = return_error_line;
3930 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3931 *fe = *e;
3932 /*
3933 * write barrier to synchronize with initialization
3934 * of log entry
3935 */
3936 smp_wmb();
3937 WRITE_ONCE(e->debug_id_done, t_debug_id);
3938 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3939 }
3940
3941 BUG_ON(thread->return_error.cmd != BR_OK);
3942 if (in_reply_to) {
3943 trace_android_vh_binder_restore_priority(in_reply_to, current);
3944 binder_restore_priority(thread, &in_reply_to->saved_priority);
3945 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3946 binder_enqueue_thread_work(thread, &thread->return_error.work);
3947 binder_send_failed_reply(in_reply_to, return_error);
3948 } else {
3949 thread->return_error.cmd = return_error;
3950 binder_enqueue_thread_work(thread, &thread->return_error.work);
3951 }
3952 }
3953
3954 /**
3955 * binder_free_buf() - free the specified buffer
3956 * @proc: binder proc that owns buffer
3957 * @buffer: buffer to be freed
3958 * @is_failure: failed to send transaction
3959 *
3960 * If buffer for an async transaction, enqueue the next async
3961 * transaction from the node.
3962 *
3963 * Cleanup buffer and free it.
3964 */
3965 static void
binder_free_buf(struct binder_proc * proc,struct binder_thread * thread,struct binder_buffer * buffer,bool is_failure)3966 binder_free_buf(struct binder_proc *proc,
3967 struct binder_thread *thread,
3968 struct binder_buffer *buffer, bool is_failure)
3969 {
3970 bool enqueue_task = true;
3971
3972 trace_android_vh_binder_free_buf(proc, thread, buffer);
3973 binder_inner_proc_lock(proc);
3974 if (buffer->transaction) {
3975 buffer->transaction->buffer = NULL;
3976 buffer->transaction = NULL;
3977 }
3978 binder_inner_proc_unlock(proc);
3979 if (buffer->async_transaction && buffer->target_node) {
3980 struct binder_node *buf_node;
3981 struct binder_work *w;
3982
3983 buf_node = buffer->target_node;
3984 binder_node_inner_lock(buf_node);
3985 BUG_ON(!buf_node->has_async_transaction);
3986 BUG_ON(buf_node->proc != proc);
3987 w = binder_dequeue_work_head_ilocked(
3988 &buf_node->async_todo);
3989 if (!w) {
3990 buf_node->has_async_transaction = false;
3991 } else {
3992 trace_android_vh_binder_special_task(NULL, proc, thread, w,
3993 &proc->todo, false, &enqueue_task);
3994 if (enqueue_task)
3995 binder_enqueue_work_ilocked(w, &proc->todo);
3996 binder_wakeup_proc_ilocked(proc);
3997 }
3998 binder_node_inner_unlock(buf_node);
3999 }
4000 trace_binder_transaction_buffer_release(buffer);
4001 binder_release_entire_buffer(proc, thread, buffer, is_failure);
4002 binder_alloc_free_buf(&proc->alloc, buffer);
4003 }
4004
binder_thread_write(struct binder_proc * proc,struct binder_thread * thread,binder_uintptr_t binder_buffer,size_t size,binder_size_t * consumed)4005 static int binder_thread_write(struct binder_proc *proc,
4006 struct binder_thread *thread,
4007 binder_uintptr_t binder_buffer, size_t size,
4008 binder_size_t *consumed)
4009 {
4010 uint32_t cmd;
4011 struct binder_context *context = proc->context;
4012 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4013 void __user *ptr = buffer + *consumed;
4014 void __user *end = buffer + size;
4015
4016 while (ptr < end && thread->return_error.cmd == BR_OK) {
4017 int ret;
4018
4019 if (get_user(cmd, (uint32_t __user *)ptr))
4020 return -EFAULT;
4021 ptr += sizeof(uint32_t);
4022 trace_binder_command(cmd);
4023 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
4024 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
4025 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
4026 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
4027 }
4028 switch (cmd) {
4029 case BC_INCREFS:
4030 case BC_ACQUIRE:
4031 case BC_RELEASE:
4032 case BC_DECREFS: {
4033 uint32_t target;
4034 const char *debug_string;
4035 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
4036 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
4037 struct binder_ref_data rdata;
4038
4039 if (get_user(target, (uint32_t __user *)ptr))
4040 return -EFAULT;
4041
4042 ptr += sizeof(uint32_t);
4043 ret = -1;
4044 if (increment && !target) {
4045 struct binder_node *ctx_mgr_node;
4046
4047 mutex_lock(&context->context_mgr_node_lock);
4048 ctx_mgr_node = context->binder_context_mgr_node;
4049 if (ctx_mgr_node) {
4050 if (ctx_mgr_node->proc == proc) {
4051 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
4052 proc->pid, thread->pid);
4053 mutex_unlock(&context->context_mgr_node_lock);
4054 return -EINVAL;
4055 }
4056 ret = binder_inc_ref_for_node(
4057 proc, ctx_mgr_node,
4058 strong, NULL, &rdata);
4059 }
4060 mutex_unlock(&context->context_mgr_node_lock);
4061 }
4062 if (ret)
4063 ret = binder_update_ref_for_handle(
4064 proc, target, increment, strong,
4065 &rdata);
4066 if (!ret && rdata.desc != target) {
4067 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
4068 proc->pid, thread->pid,
4069 target, rdata.desc);
4070 }
4071 switch (cmd) {
4072 case BC_INCREFS:
4073 debug_string = "IncRefs";
4074 break;
4075 case BC_ACQUIRE:
4076 debug_string = "Acquire";
4077 break;
4078 case BC_RELEASE:
4079 debug_string = "Release";
4080 break;
4081 case BC_DECREFS:
4082 default:
4083 debug_string = "DecRefs";
4084 break;
4085 }
4086 if (ret) {
4087 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
4088 proc->pid, thread->pid, debug_string,
4089 strong, target, ret);
4090 break;
4091 }
4092 binder_debug(BINDER_DEBUG_USER_REFS,
4093 "%d:%d %s ref %d desc %d s %d w %d\n",
4094 proc->pid, thread->pid, debug_string,
4095 rdata.debug_id, rdata.desc, rdata.strong,
4096 rdata.weak);
4097 break;
4098 }
4099 case BC_INCREFS_DONE:
4100 case BC_ACQUIRE_DONE: {
4101 binder_uintptr_t node_ptr;
4102 binder_uintptr_t cookie;
4103 struct binder_node *node;
4104 bool free_node;
4105
4106 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
4107 return -EFAULT;
4108 ptr += sizeof(binder_uintptr_t);
4109 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4110 return -EFAULT;
4111 ptr += sizeof(binder_uintptr_t);
4112 node = binder_get_node(proc, node_ptr);
4113 if (node == NULL) {
4114 binder_user_error("%d:%d %s u%016llx no match\n",
4115 proc->pid, thread->pid,
4116 cmd == BC_INCREFS_DONE ?
4117 "BC_INCREFS_DONE" :
4118 "BC_ACQUIRE_DONE",
4119 (u64)node_ptr);
4120 break;
4121 }
4122 if (cookie != node->cookie) {
4123 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
4124 proc->pid, thread->pid,
4125 cmd == BC_INCREFS_DONE ?
4126 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4127 (u64)node_ptr, node->debug_id,
4128 (u64)cookie, (u64)node->cookie);
4129 binder_put_node(node);
4130 break;
4131 }
4132 binder_node_inner_lock(node);
4133 if (cmd == BC_ACQUIRE_DONE) {
4134 if (node->pending_strong_ref == 0) {
4135 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
4136 proc->pid, thread->pid,
4137 node->debug_id);
4138 binder_node_inner_unlock(node);
4139 binder_put_node(node);
4140 break;
4141 }
4142 node->pending_strong_ref = 0;
4143 } else {
4144 if (node->pending_weak_ref == 0) {
4145 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
4146 proc->pid, thread->pid,
4147 node->debug_id);
4148 binder_node_inner_unlock(node);
4149 binder_put_node(node);
4150 break;
4151 }
4152 node->pending_weak_ref = 0;
4153 }
4154 free_node = binder_dec_node_nilocked(node,
4155 cmd == BC_ACQUIRE_DONE, 0);
4156 WARN_ON(free_node);
4157 binder_debug(BINDER_DEBUG_USER_REFS,
4158 "%d:%d %s node %d ls %d lw %d tr %d\n",
4159 proc->pid, thread->pid,
4160 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4161 node->debug_id, node->local_strong_refs,
4162 node->local_weak_refs, node->tmp_refs);
4163 binder_node_inner_unlock(node);
4164 binder_put_node(node);
4165 break;
4166 }
4167 case BC_ATTEMPT_ACQUIRE:
4168 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
4169 return -EINVAL;
4170 case BC_ACQUIRE_RESULT:
4171 pr_err("BC_ACQUIRE_RESULT not supported\n");
4172 return -EINVAL;
4173
4174 case BC_FREE_BUFFER: {
4175 binder_uintptr_t data_ptr;
4176 struct binder_buffer *buffer;
4177
4178 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
4179 return -EFAULT;
4180 ptr += sizeof(binder_uintptr_t);
4181
4182 buffer = binder_alloc_prepare_to_free(&proc->alloc,
4183 data_ptr);
4184 if (IS_ERR_OR_NULL(buffer)) {
4185 if (PTR_ERR(buffer) == -EPERM) {
4186 binder_user_error(
4187 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
4188 proc->pid, thread->pid,
4189 (u64)data_ptr);
4190 } else {
4191 binder_user_error(
4192 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
4193 proc->pid, thread->pid,
4194 (u64)data_ptr);
4195 }
4196 break;
4197 }
4198 binder_debug(BINDER_DEBUG_FREE_BUFFER,
4199 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
4200 proc->pid, thread->pid, (u64)data_ptr,
4201 buffer->debug_id,
4202 buffer->transaction ? "active" : "finished");
4203 binder_free_buf(proc, thread, buffer, false);
4204 break;
4205 }
4206
4207 case BC_TRANSACTION_SG:
4208 case BC_REPLY_SG: {
4209 struct binder_transaction_data_sg tr;
4210
4211 if (copy_from_user(&tr, ptr, sizeof(tr)))
4212 return -EFAULT;
4213 ptr += sizeof(tr);
4214 binder_transaction(proc, thread, &tr.transaction_data,
4215 cmd == BC_REPLY_SG, tr.buffers_size);
4216 break;
4217 }
4218 case BC_TRANSACTION:
4219 case BC_REPLY: {
4220 struct binder_transaction_data tr;
4221
4222 if (copy_from_user(&tr, ptr, sizeof(tr)))
4223 return -EFAULT;
4224 ptr += sizeof(tr);
4225 binder_transaction(proc, thread, &tr,
4226 cmd == BC_REPLY, 0);
4227 break;
4228 }
4229
4230 case BC_REGISTER_LOOPER:
4231 binder_debug(BINDER_DEBUG_THREADS,
4232 "%d:%d BC_REGISTER_LOOPER\n",
4233 proc->pid, thread->pid);
4234 binder_inner_proc_lock(proc);
4235 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
4236 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4237 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
4238 proc->pid, thread->pid);
4239 } else if (proc->requested_threads == 0) {
4240 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4241 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4242 proc->pid, thread->pid);
4243 } else {
4244 proc->requested_threads--;
4245 proc->requested_threads_started++;
4246 }
4247 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
4248 binder_inner_proc_unlock(proc);
4249 trace_android_vh_binder_looper_state_registered(thread, proc);
4250 break;
4251 case BC_ENTER_LOOPER:
4252 binder_debug(BINDER_DEBUG_THREADS,
4253 "%d:%d BC_ENTER_LOOPER\n",
4254 proc->pid, thread->pid);
4255 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4256 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4257 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4258 proc->pid, thread->pid);
4259 }
4260 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4261 break;
4262 case BC_EXIT_LOOPER:
4263 binder_debug(BINDER_DEBUG_THREADS,
4264 "%d:%d BC_EXIT_LOOPER\n",
4265 proc->pid, thread->pid);
4266 thread->looper |= BINDER_LOOPER_STATE_EXITED;
4267 break;
4268
4269 case BC_REQUEST_DEATH_NOTIFICATION:
4270 case BC_CLEAR_DEATH_NOTIFICATION: {
4271 uint32_t target;
4272 binder_uintptr_t cookie;
4273 struct binder_ref *ref;
4274 struct binder_ref_death *death = NULL;
4275
4276 if (get_user(target, (uint32_t __user *)ptr))
4277 return -EFAULT;
4278 ptr += sizeof(uint32_t);
4279 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4280 return -EFAULT;
4281 ptr += sizeof(binder_uintptr_t);
4282 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4283 /*
4284 * Allocate memory for death notification
4285 * before taking lock
4286 */
4287 death = kzalloc(sizeof(*death), GFP_KERNEL);
4288 if (death == NULL) {
4289 WARN_ON(thread->return_error.cmd !=
4290 BR_OK);
4291 thread->return_error.cmd = BR_ERROR;
4292 binder_enqueue_thread_work(
4293 thread,
4294 &thread->return_error.work);
4295 binder_debug(
4296 BINDER_DEBUG_FAILED_TRANSACTION,
4297 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4298 proc->pid, thread->pid);
4299 break;
4300 }
4301 }
4302 binder_proc_lock(proc);
4303 ref = binder_get_ref_olocked(proc, target, false);
4304 if (ref == NULL) {
4305 binder_user_error("%d:%d %s invalid ref %d\n",
4306 proc->pid, thread->pid,
4307 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4308 "BC_REQUEST_DEATH_NOTIFICATION" :
4309 "BC_CLEAR_DEATH_NOTIFICATION",
4310 target);
4311 binder_proc_unlock(proc);
4312 kfree(death);
4313 break;
4314 }
4315
4316 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4317 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4318 proc->pid, thread->pid,
4319 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4320 "BC_REQUEST_DEATH_NOTIFICATION" :
4321 "BC_CLEAR_DEATH_NOTIFICATION",
4322 (u64)cookie, ref->data.debug_id,
4323 ref->data.desc, ref->data.strong,
4324 ref->data.weak, ref->node->debug_id);
4325
4326 binder_node_lock(ref->node);
4327 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4328 if (ref->death) {
4329 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4330 proc->pid, thread->pid);
4331 binder_node_unlock(ref->node);
4332 binder_proc_unlock(proc);
4333 kfree(death);
4334 break;
4335 }
4336 binder_stats_created(BINDER_STAT_DEATH);
4337 INIT_LIST_HEAD(&death->work.entry);
4338 death->cookie = cookie;
4339 ref->death = death;
4340 if (ref->node->proc == NULL) {
4341 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4342
4343 binder_inner_proc_lock(proc);
4344 binder_enqueue_work_ilocked(
4345 &ref->death->work, &proc->todo);
4346 binder_wakeup_proc_ilocked(proc);
4347 binder_inner_proc_unlock(proc);
4348 }
4349 } else {
4350 if (ref->death == NULL) {
4351 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4352 proc->pid, thread->pid);
4353 binder_node_unlock(ref->node);
4354 binder_proc_unlock(proc);
4355 break;
4356 }
4357 death = ref->death;
4358 if (death->cookie != cookie) {
4359 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4360 proc->pid, thread->pid,
4361 (u64)death->cookie,
4362 (u64)cookie);
4363 binder_node_unlock(ref->node);
4364 binder_proc_unlock(proc);
4365 break;
4366 }
4367 ref->death = NULL;
4368 binder_inner_proc_lock(proc);
4369 if (list_empty(&death->work.entry)) {
4370 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4371 if (thread->looper &
4372 (BINDER_LOOPER_STATE_REGISTERED |
4373 BINDER_LOOPER_STATE_ENTERED))
4374 binder_enqueue_thread_work_ilocked(
4375 thread,
4376 &death->work);
4377 else {
4378 binder_enqueue_work_ilocked(
4379 &death->work,
4380 &proc->todo);
4381 binder_wakeup_proc_ilocked(
4382 proc);
4383 }
4384 } else {
4385 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4386 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4387 }
4388 binder_inner_proc_unlock(proc);
4389 }
4390 binder_node_unlock(ref->node);
4391 binder_proc_unlock(proc);
4392 } break;
4393 case BC_DEAD_BINDER_DONE: {
4394 struct binder_work *w;
4395 binder_uintptr_t cookie;
4396 struct binder_ref_death *death = NULL;
4397
4398 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4399 return -EFAULT;
4400
4401 ptr += sizeof(cookie);
4402 binder_inner_proc_lock(proc);
4403 list_for_each_entry(w, &proc->delivered_death,
4404 entry) {
4405 struct binder_ref_death *tmp_death =
4406 container_of(w,
4407 struct binder_ref_death,
4408 work);
4409
4410 if (tmp_death->cookie == cookie) {
4411 death = tmp_death;
4412 break;
4413 }
4414 }
4415 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4416 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4417 proc->pid, thread->pid, (u64)cookie,
4418 death);
4419 if (death == NULL) {
4420 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4421 proc->pid, thread->pid, (u64)cookie);
4422 binder_inner_proc_unlock(proc);
4423 break;
4424 }
4425 binder_dequeue_work_ilocked(&death->work);
4426 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4427 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4428 if (thread->looper &
4429 (BINDER_LOOPER_STATE_REGISTERED |
4430 BINDER_LOOPER_STATE_ENTERED))
4431 binder_enqueue_thread_work_ilocked(
4432 thread, &death->work);
4433 else {
4434 binder_enqueue_work_ilocked(
4435 &death->work,
4436 &proc->todo);
4437 binder_wakeup_proc_ilocked(proc);
4438 }
4439 }
4440 binder_inner_proc_unlock(proc);
4441 } break;
4442
4443 default:
4444 pr_err("%d:%d unknown command %d\n",
4445 proc->pid, thread->pid, cmd);
4446 return -EINVAL;
4447 }
4448 *consumed = ptr - buffer;
4449 }
4450 return 0;
4451 }
4452
binder_stat_br(struct binder_proc * proc,struct binder_thread * thread,uint32_t cmd)4453 static void binder_stat_br(struct binder_proc *proc,
4454 struct binder_thread *thread, uint32_t cmd)
4455 {
4456 trace_binder_return(cmd);
4457 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4458 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4459 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4460 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4461 }
4462 }
4463
binder_put_node_cmd(struct binder_proc * proc,struct binder_thread * thread,void __user ** ptrp,binder_uintptr_t node_ptr,binder_uintptr_t node_cookie,int node_debug_id,uint32_t cmd,const char * cmd_name)4464 static int binder_put_node_cmd(struct binder_proc *proc,
4465 struct binder_thread *thread,
4466 void __user **ptrp,
4467 binder_uintptr_t node_ptr,
4468 binder_uintptr_t node_cookie,
4469 int node_debug_id,
4470 uint32_t cmd, const char *cmd_name)
4471 {
4472 void __user *ptr = *ptrp;
4473
4474 if (put_user(cmd, (uint32_t __user *)ptr))
4475 return -EFAULT;
4476 ptr += sizeof(uint32_t);
4477
4478 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4479 return -EFAULT;
4480 ptr += sizeof(binder_uintptr_t);
4481
4482 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4483 return -EFAULT;
4484 ptr += sizeof(binder_uintptr_t);
4485
4486 binder_stat_br(proc, thread, cmd);
4487 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4488 proc->pid, thread->pid, cmd_name, node_debug_id,
4489 (u64)node_ptr, (u64)node_cookie);
4490
4491 *ptrp = ptr;
4492 return 0;
4493 }
4494
binder_wait_for_work(struct binder_thread * thread,bool do_proc_work)4495 static int binder_wait_for_work(struct binder_thread *thread,
4496 bool do_proc_work)
4497 {
4498 DEFINE_WAIT(wait);
4499 struct binder_proc *proc = thread->proc;
4500 int ret = 0;
4501
4502 freezer_do_not_count();
4503 binder_inner_proc_lock(proc);
4504 for (;;) {
4505 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
4506 if (binder_has_work_ilocked(thread, do_proc_work))
4507 break;
4508 if (do_proc_work)
4509 list_add(&thread->waiting_thread_node,
4510 &proc->waiting_threads);
4511 trace_android_vh_binder_wait_for_work(do_proc_work, thread, proc);
4512 binder_inner_proc_unlock(proc);
4513 schedule();
4514 binder_inner_proc_lock(proc);
4515 list_del_init(&thread->waiting_thread_node);
4516 if (signal_pending(current)) {
4517 ret = -EINTR;
4518 break;
4519 }
4520 }
4521 finish_wait(&thread->wait, &wait);
4522 binder_inner_proc_unlock(proc);
4523 freezer_count();
4524
4525 return ret;
4526 }
4527
4528 /**
4529 * binder_apply_fd_fixups() - finish fd translation
4530 * @proc: binder_proc associated @t->buffer
4531 * @t: binder transaction with list of fd fixups
4532 *
4533 * Now that we are in the context of the transaction target
4534 * process, we can allocate and install fds. Process the
4535 * list of fds to translate and fixup the buffer with the
4536 * new fds.
4537 *
4538 * If we fail to allocate an fd, then free the resources by
4539 * fput'ing files that have not been processed and ksys_close'ing
4540 * any fds that have already been allocated.
4541 */
binder_apply_fd_fixups(struct binder_proc * proc,struct binder_transaction * t)4542 static int binder_apply_fd_fixups(struct binder_proc *proc,
4543 struct binder_transaction *t)
4544 {
4545 struct binder_txn_fd_fixup *fixup, *tmp;
4546 int ret = 0;
4547
4548 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4549 int fd = get_unused_fd_flags(O_CLOEXEC);
4550
4551 if (fd < 0) {
4552 binder_debug(BINDER_DEBUG_TRANSACTION,
4553 "failed fd fixup txn %d fd %d\n",
4554 t->debug_id, fd);
4555 ret = -ENOMEM;
4556 break;
4557 }
4558 binder_debug(BINDER_DEBUG_TRANSACTION,
4559 "fd fixup txn %d fd %d\n",
4560 t->debug_id, fd);
4561 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4562 fd_install(fd, fixup->file);
4563 fixup->file = NULL;
4564 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4565 fixup->offset, &fd,
4566 sizeof(u32))) {
4567 ret = -EINVAL;
4568 break;
4569 }
4570 }
4571 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4572 if (fixup->file) {
4573 fput(fixup->file);
4574 } else if (ret) {
4575 u32 fd;
4576 int err;
4577
4578 err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
4579 t->buffer,
4580 fixup->offset,
4581 sizeof(fd));
4582 WARN_ON(err);
4583 if (!err)
4584 binder_deferred_fd_close(fd);
4585 }
4586 list_del(&fixup->fixup_entry);
4587 kfree(fixup);
4588 }
4589
4590 return ret;
4591 }
4592
binder_thread_read(struct binder_proc * proc,struct binder_thread * thread,binder_uintptr_t binder_buffer,size_t size,binder_size_t * consumed,int non_block)4593 static int binder_thread_read(struct binder_proc *proc,
4594 struct binder_thread *thread,
4595 binder_uintptr_t binder_buffer, size_t size,
4596 binder_size_t *consumed, int non_block)
4597 {
4598 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4599 void __user *ptr = buffer + *consumed;
4600 void __user *end = buffer + size;
4601
4602 int ret = 0;
4603 int wait_for_proc_work;
4604
4605 if (*consumed == 0) {
4606 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4607 return -EFAULT;
4608 ptr += sizeof(uint32_t);
4609 }
4610
4611 retry:
4612 binder_inner_proc_lock(proc);
4613 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4614 binder_inner_proc_unlock(proc);
4615
4616 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4617
4618 trace_binder_wait_for_work(wait_for_proc_work,
4619 !!thread->transaction_stack,
4620 !binder_worklist_empty(proc, &thread->todo));
4621 if (wait_for_proc_work) {
4622 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4623 BINDER_LOOPER_STATE_ENTERED))) {
4624 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4625 proc->pid, thread->pid, thread->looper);
4626 wait_event_interruptible(binder_user_error_wait,
4627 binder_stop_on_user_error < 2);
4628 }
4629 trace_android_vh_binder_restore_priority(NULL, current);
4630 binder_restore_priority(thread, &proc->default_priority);
4631 }
4632
4633 if (non_block) {
4634 if (!binder_has_work(thread, wait_for_proc_work))
4635 ret = -EAGAIN;
4636 } else {
4637 ret = binder_wait_for_work(thread, wait_for_proc_work);
4638 }
4639
4640 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4641
4642 if (ret)
4643 return ret;
4644
4645 while (1) {
4646 uint32_t cmd;
4647 struct binder_transaction_data_secctx tr;
4648 struct binder_transaction_data *trd = &tr.transaction_data;
4649 struct binder_work *w = NULL;
4650 struct list_head *list = NULL;
4651 struct binder_transaction *t = NULL;
4652 struct binder_thread *t_from;
4653 size_t trsize = sizeof(*trd);
4654
4655 binder_inner_proc_lock(proc);
4656 trace_android_vh_binder_select_worklist_ilocked(&list, thread,
4657 proc, wait_for_proc_work);
4658 if (list)
4659 goto skip;
4660 if (!binder_worklist_empty_ilocked(&thread->todo))
4661 list = &thread->todo;
4662 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4663 wait_for_proc_work)
4664 list = &proc->todo;
4665 else {
4666 binder_inner_proc_unlock(proc);
4667
4668 /* no data added */
4669 if (ptr - buffer == 4 && !thread->looper_need_return)
4670 goto retry;
4671 break;
4672 }
4673 skip:
4674 if (end - ptr < sizeof(tr) + 4) {
4675 binder_inner_proc_unlock(proc);
4676 break;
4677 }
4678 trace_android_vh_binder_thread_read(&list, proc, thread);
4679 w = binder_dequeue_work_head_ilocked(list);
4680 if (binder_worklist_empty_ilocked(&thread->todo))
4681 thread->process_todo = false;
4682
4683 switch (w->type) {
4684 case BINDER_WORK_TRANSACTION: {
4685 binder_inner_proc_unlock(proc);
4686 t = container_of(w, struct binder_transaction, work);
4687 } break;
4688 case BINDER_WORK_RETURN_ERROR: {
4689 struct binder_error *e = container_of(
4690 w, struct binder_error, work);
4691
4692 WARN_ON(e->cmd == BR_OK);
4693 binder_inner_proc_unlock(proc);
4694 if (put_user(e->cmd, (uint32_t __user *)ptr))
4695 return -EFAULT;
4696 cmd = e->cmd;
4697 e->cmd = BR_OK;
4698 ptr += sizeof(uint32_t);
4699
4700 binder_stat_br(proc, thread, cmd);
4701 } break;
4702 case BINDER_WORK_TRANSACTION_COMPLETE:
4703 case BINDER_WORK_TRANSACTION_PENDING:
4704 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4705 if (proc->oneway_spam_detection_enabled &&
4706 w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4707 cmd = BR_ONEWAY_SPAM_SUSPECT;
4708 else if (w->type == BINDER_WORK_TRANSACTION_PENDING)
4709 cmd = BR_TRANSACTION_PENDING_FROZEN;
4710 else
4711 cmd = BR_TRANSACTION_COMPLETE;
4712 binder_inner_proc_unlock(proc);
4713 kfree(w);
4714 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4715 if (put_user(cmd, (uint32_t __user *)ptr))
4716 return -EFAULT;
4717 ptr += sizeof(uint32_t);
4718
4719 binder_stat_br(proc, thread, cmd);
4720 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4721 "%d:%d BR_TRANSACTION_COMPLETE\n",
4722 proc->pid, thread->pid);
4723 } break;
4724 case BINDER_WORK_NODE: {
4725 struct binder_node *node = container_of(w, struct binder_node, work);
4726 int strong, weak;
4727 binder_uintptr_t node_ptr = node->ptr;
4728 binder_uintptr_t node_cookie = node->cookie;
4729 int node_debug_id = node->debug_id;
4730 int has_weak_ref;
4731 int has_strong_ref;
4732 void __user *orig_ptr = ptr;
4733
4734 BUG_ON(proc != node->proc);
4735 strong = node->internal_strong_refs ||
4736 node->local_strong_refs;
4737 weak = !hlist_empty(&node->refs) ||
4738 node->local_weak_refs ||
4739 node->tmp_refs || strong;
4740 has_strong_ref = node->has_strong_ref;
4741 has_weak_ref = node->has_weak_ref;
4742
4743 if (weak && !has_weak_ref) {
4744 node->has_weak_ref = 1;
4745 node->pending_weak_ref = 1;
4746 node->local_weak_refs++;
4747 }
4748 if (strong && !has_strong_ref) {
4749 node->has_strong_ref = 1;
4750 node->pending_strong_ref = 1;
4751 node->local_strong_refs++;
4752 }
4753 if (!strong && has_strong_ref)
4754 node->has_strong_ref = 0;
4755 if (!weak && has_weak_ref)
4756 node->has_weak_ref = 0;
4757 if (!weak && !strong) {
4758 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4759 "%d:%d node %d u%016llx c%016llx deleted\n",
4760 proc->pid, thread->pid,
4761 node_debug_id,
4762 (u64)node_ptr,
4763 (u64)node_cookie);
4764 rb_erase(&node->rb_node, &proc->nodes);
4765 binder_inner_proc_unlock(proc);
4766 binder_node_lock(node);
4767 /*
4768 * Acquire the node lock before freeing the
4769 * node to serialize with other threads that
4770 * may have been holding the node lock while
4771 * decrementing this node (avoids race where
4772 * this thread frees while the other thread
4773 * is unlocking the node after the final
4774 * decrement)
4775 */
4776 binder_node_unlock(node);
4777 binder_free_node(node);
4778 } else
4779 binder_inner_proc_unlock(proc);
4780
4781 if (weak && !has_weak_ref)
4782 ret = binder_put_node_cmd(
4783 proc, thread, &ptr, node_ptr,
4784 node_cookie, node_debug_id,
4785 BR_INCREFS, "BR_INCREFS");
4786 if (!ret && strong && !has_strong_ref)
4787 ret = binder_put_node_cmd(
4788 proc, thread, &ptr, node_ptr,
4789 node_cookie, node_debug_id,
4790 BR_ACQUIRE, "BR_ACQUIRE");
4791 if (!ret && !strong && has_strong_ref)
4792 ret = binder_put_node_cmd(
4793 proc, thread, &ptr, node_ptr,
4794 node_cookie, node_debug_id,
4795 BR_RELEASE, "BR_RELEASE");
4796 if (!ret && !weak && has_weak_ref)
4797 ret = binder_put_node_cmd(
4798 proc, thread, &ptr, node_ptr,
4799 node_cookie, node_debug_id,
4800 BR_DECREFS, "BR_DECREFS");
4801 if (orig_ptr == ptr)
4802 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4803 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4804 proc->pid, thread->pid,
4805 node_debug_id,
4806 (u64)node_ptr,
4807 (u64)node_cookie);
4808 if (ret)
4809 return ret;
4810 } break;
4811 case BINDER_WORK_DEAD_BINDER:
4812 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4813 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4814 struct binder_ref_death *death;
4815 uint32_t cmd;
4816 binder_uintptr_t cookie;
4817
4818 death = container_of(w, struct binder_ref_death, work);
4819 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4820 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4821 else
4822 cmd = BR_DEAD_BINDER;
4823 cookie = death->cookie;
4824
4825 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4826 "%d:%d %s %016llx\n",
4827 proc->pid, thread->pid,
4828 cmd == BR_DEAD_BINDER ?
4829 "BR_DEAD_BINDER" :
4830 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4831 (u64)cookie);
4832 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4833 binder_inner_proc_unlock(proc);
4834 kfree(death);
4835 binder_stats_deleted(BINDER_STAT_DEATH);
4836 } else {
4837 binder_enqueue_work_ilocked(
4838 w, &proc->delivered_death);
4839 binder_inner_proc_unlock(proc);
4840 }
4841 if (put_user(cmd, (uint32_t __user *)ptr))
4842 return -EFAULT;
4843 ptr += sizeof(uint32_t);
4844 if (put_user(cookie,
4845 (binder_uintptr_t __user *)ptr))
4846 return -EFAULT;
4847 ptr += sizeof(binder_uintptr_t);
4848 binder_stat_br(proc, thread, cmd);
4849 if (cmd == BR_DEAD_BINDER)
4850 goto done; /* DEAD_BINDER notifications can cause transactions */
4851 } break;
4852 default:
4853 binder_inner_proc_unlock(proc);
4854 pr_err("%d:%d: bad work type %d\n",
4855 proc->pid, thread->pid, w->type);
4856 break;
4857 }
4858
4859 if (!t)
4860 continue;
4861
4862 BUG_ON(t->buffer == NULL);
4863 if (t->buffer->target_node) {
4864 struct binder_node *target_node = t->buffer->target_node;
4865
4866 trd->target.ptr = target_node->ptr;
4867 trd->cookie = target_node->cookie;
4868 binder_transaction_priority(thread, t, target_node);
4869 cmd = BR_TRANSACTION;
4870 } else {
4871 trd->target.ptr = 0;
4872 trd->cookie = 0;
4873 cmd = BR_REPLY;
4874 }
4875 trd->code = t->code;
4876 trd->flags = t->flags;
4877 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4878
4879 t_from = binder_get_txn_from(t);
4880 if (t_from) {
4881 struct task_struct *sender = t_from->proc->tsk;
4882
4883 trd->sender_pid =
4884 task_tgid_nr_ns(sender,
4885 task_active_pid_ns(current));
4886 trace_android_vh_sync_txn_recvd(thread->task, t_from->task);
4887 } else {
4888 trd->sender_pid = 0;
4889 }
4890
4891 ret = binder_apply_fd_fixups(proc, t);
4892 if (ret) {
4893 struct binder_buffer *buffer = t->buffer;
4894 bool oneway = !!(t->flags & TF_ONE_WAY);
4895 int tid = t->debug_id;
4896
4897 if (t_from)
4898 binder_thread_dec_tmpref(t_from);
4899 buffer->transaction = NULL;
4900 binder_cleanup_transaction(t, "fd fixups failed",
4901 BR_FAILED_REPLY);
4902 binder_free_buf(proc, thread, buffer, true);
4903 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4904 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4905 proc->pid, thread->pid,
4906 oneway ? "async " :
4907 (cmd == BR_REPLY ? "reply " : ""),
4908 tid, BR_FAILED_REPLY, ret, __LINE__);
4909 if (cmd == BR_REPLY) {
4910 cmd = BR_FAILED_REPLY;
4911 if (put_user(cmd, (uint32_t __user *)ptr))
4912 return -EFAULT;
4913 ptr += sizeof(uint32_t);
4914 binder_stat_br(proc, thread, cmd);
4915 break;
4916 }
4917 continue;
4918 }
4919 trd->data_size = t->buffer->data_size;
4920 trd->offsets_size = t->buffer->offsets_size;
4921 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4922 trd->data.ptr.offsets = trd->data.ptr.buffer +
4923 ALIGN(t->buffer->data_size,
4924 sizeof(void *));
4925
4926 tr.secctx = t->security_ctx;
4927 if (t->security_ctx) {
4928 cmd = BR_TRANSACTION_SEC_CTX;
4929 trsize = sizeof(tr);
4930 }
4931 if (put_user(cmd, (uint32_t __user *)ptr)) {
4932 if (t_from)
4933 binder_thread_dec_tmpref(t_from);
4934
4935 binder_cleanup_transaction(t, "put_user failed",
4936 BR_FAILED_REPLY);
4937
4938 return -EFAULT;
4939 }
4940 ptr += sizeof(uint32_t);
4941 if (copy_to_user(ptr, &tr, trsize)) {
4942 if (t_from)
4943 binder_thread_dec_tmpref(t_from);
4944
4945 binder_cleanup_transaction(t, "copy_to_user failed",
4946 BR_FAILED_REPLY);
4947
4948 return -EFAULT;
4949 }
4950 ptr += trsize;
4951
4952 trace_binder_transaction_received(t);
4953 trace_android_vh_binder_transaction_received(t, proc, thread, cmd);
4954 binder_stat_br(proc, thread, cmd);
4955 binder_debug(BINDER_DEBUG_TRANSACTION,
4956 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4957 proc->pid, thread->pid,
4958 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4959 (cmd == BR_TRANSACTION_SEC_CTX) ?
4960 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4961 t->debug_id, t_from ? t_from->proc->pid : 0,
4962 t_from ? t_from->pid : 0, cmd,
4963 t->buffer->data_size, t->buffer->offsets_size,
4964 (u64)trd->data.ptr.buffer,
4965 (u64)trd->data.ptr.offsets);
4966
4967 if (t_from)
4968 binder_thread_dec_tmpref(t_from);
4969 t->buffer->allow_user_free = 1;
4970 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4971 binder_inner_proc_lock(thread->proc);
4972 t->to_parent = thread->transaction_stack;
4973 t->to_thread = thread;
4974 thread->transaction_stack = t;
4975 binder_inner_proc_unlock(thread->proc);
4976 } else {
4977 binder_free_transaction(t);
4978 }
4979 break;
4980 }
4981
4982 done:
4983
4984 *consumed = ptr - buffer;
4985 binder_inner_proc_lock(proc);
4986 if (proc->requested_threads == 0 &&
4987 list_empty(&thread->proc->waiting_threads) &&
4988 proc->requested_threads_started < proc->max_threads &&
4989 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4990 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4991 /*spawn a new thread if we leave this out */) {
4992 proc->requested_threads++;
4993 binder_inner_proc_unlock(proc);
4994 binder_debug(BINDER_DEBUG_THREADS,
4995 "%d:%d BR_SPAWN_LOOPER\n",
4996 proc->pid, thread->pid);
4997 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4998 return -EFAULT;
4999 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
5000 } else
5001 binder_inner_proc_unlock(proc);
5002 return 0;
5003 }
5004
binder_release_work(struct binder_proc * proc,struct list_head * list)5005 static void binder_release_work(struct binder_proc *proc,
5006 struct list_head *list)
5007 {
5008 struct binder_work *w;
5009 enum binder_work_type wtype;
5010
5011 while (1) {
5012 binder_inner_proc_lock(proc);
5013 w = binder_dequeue_work_head_ilocked(list);
5014 wtype = w ? w->type : 0;
5015 binder_inner_proc_unlock(proc);
5016 if (!w)
5017 return;
5018
5019 switch (wtype) {
5020 case BINDER_WORK_TRANSACTION: {
5021 struct binder_transaction *t;
5022
5023 t = container_of(w, struct binder_transaction, work);
5024
5025 binder_cleanup_transaction(t, "process died.",
5026 BR_DEAD_REPLY);
5027 } break;
5028 case BINDER_WORK_RETURN_ERROR: {
5029 struct binder_error *e = container_of(
5030 w, struct binder_error, work);
5031
5032 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5033 "undelivered TRANSACTION_ERROR: %u\n",
5034 e->cmd);
5035 } break;
5036 case BINDER_WORK_TRANSACTION_PENDING:
5037 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT:
5038 case BINDER_WORK_TRANSACTION_COMPLETE: {
5039 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5040 "undelivered TRANSACTION_COMPLETE\n");
5041 kfree(w);
5042 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
5043 } break;
5044 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5045 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
5046 struct binder_ref_death *death;
5047
5048 death = container_of(w, struct binder_ref_death, work);
5049 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5050 "undelivered death notification, %016llx\n",
5051 (u64)death->cookie);
5052 kfree(death);
5053 binder_stats_deleted(BINDER_STAT_DEATH);
5054 } break;
5055 case BINDER_WORK_NODE:
5056 break;
5057 default:
5058 pr_err("unexpected work type, %d, not freed\n",
5059 wtype);
5060 break;
5061 }
5062 }
5063
5064 }
5065
binder_get_thread_ilocked(struct binder_proc * proc,struct binder_thread * new_thread)5066 static struct binder_thread *binder_get_thread_ilocked(
5067 struct binder_proc *proc, struct binder_thread *new_thread)
5068 {
5069 struct binder_thread *thread = NULL;
5070 struct rb_node *parent = NULL;
5071 struct rb_node **p = &proc->threads.rb_node;
5072
5073 while (*p) {
5074 parent = *p;
5075 thread = rb_entry(parent, struct binder_thread, rb_node);
5076
5077 if (current->pid < thread->pid)
5078 p = &(*p)->rb_left;
5079 else if (current->pid > thread->pid)
5080 p = &(*p)->rb_right;
5081 else
5082 return thread;
5083 }
5084 if (!new_thread)
5085 return NULL;
5086 thread = new_thread;
5087 binder_stats_created(BINDER_STAT_THREAD);
5088 thread->proc = proc;
5089 thread->pid = current->pid;
5090 get_task_struct(current);
5091 thread->task = current;
5092 atomic_set(&thread->tmp_ref, 0);
5093 init_waitqueue_head(&thread->wait);
5094 INIT_LIST_HEAD(&thread->todo);
5095 rb_link_node(&thread->rb_node, parent, p);
5096 rb_insert_color(&thread->rb_node, &proc->threads);
5097 thread->looper_need_return = true;
5098 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
5099 thread->return_error.cmd = BR_OK;
5100 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
5101 thread->reply_error.cmd = BR_OK;
5102 spin_lock_init(&thread->prio_lock);
5103 thread->prio_state = BINDER_PRIO_SET;
5104 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
5105 return thread;
5106 }
5107
binder_get_thread(struct binder_proc * proc)5108 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
5109 {
5110 struct binder_thread *thread;
5111 struct binder_thread *new_thread;
5112
5113 binder_inner_proc_lock(proc);
5114 thread = binder_get_thread_ilocked(proc, NULL);
5115 binder_inner_proc_unlock(proc);
5116 if (!thread) {
5117 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
5118 if (new_thread == NULL)
5119 return NULL;
5120 binder_inner_proc_lock(proc);
5121 thread = binder_get_thread_ilocked(proc, new_thread);
5122 binder_inner_proc_unlock(proc);
5123 if (thread != new_thread)
5124 kfree(new_thread);
5125 }
5126 return thread;
5127 }
5128
binder_free_proc(struct binder_proc * proc)5129 static void binder_free_proc(struct binder_proc *proc)
5130 {
5131 struct binder_proc_wrap *proc_wrap;
5132 struct binder_device *device;
5133
5134 BUG_ON(!list_empty(&proc->todo));
5135 BUG_ON(!list_empty(&proc->delivered_death));
5136 if (proc->outstanding_txns)
5137 pr_warn("%s: Unexpected outstanding_txns %d\n",
5138 __func__, proc->outstanding_txns);
5139 device = container_of(proc->context, struct binder_device, context);
5140 if (refcount_dec_and_test(&device->ref)) {
5141 kfree(proc->context->name);
5142 kfree(device);
5143 }
5144 binder_alloc_deferred_release(&proc->alloc);
5145 put_task_struct(proc->tsk);
5146 put_cred(proc->cred);
5147 binder_stats_deleted(BINDER_STAT_PROC);
5148 trace_android_vh_binder_free_proc(proc);
5149 proc_wrap = binder_proc_wrap_entry(proc);
5150 kfree(proc_wrap);
5151 }
5152
binder_free_thread(struct binder_thread * thread)5153 static void binder_free_thread(struct binder_thread *thread)
5154 {
5155 BUG_ON(!list_empty(&thread->todo));
5156 binder_stats_deleted(BINDER_STAT_THREAD);
5157 binder_proc_dec_tmpref(thread->proc);
5158 put_task_struct(thread->task);
5159 kfree(thread);
5160 }
5161
binder_thread_release(struct binder_proc * proc,struct binder_thread * thread)5162 static int binder_thread_release(struct binder_proc *proc,
5163 struct binder_thread *thread)
5164 {
5165 struct binder_transaction *t;
5166 struct binder_transaction *send_reply = NULL;
5167 int active_transactions = 0;
5168 struct binder_transaction *last_t = NULL;
5169
5170 binder_inner_proc_lock(thread->proc);
5171 /*
5172 * take a ref on the proc so it survives
5173 * after we remove this thread from proc->threads.
5174 * The corresponding dec is when we actually
5175 * free the thread in binder_free_thread()
5176 */
5177 proc->tmp_ref++;
5178 /*
5179 * take a ref on this thread to ensure it
5180 * survives while we are releasing it
5181 */
5182 atomic_inc(&thread->tmp_ref);
5183 rb_erase(&thread->rb_node, &proc->threads);
5184 t = thread->transaction_stack;
5185 if (t) {
5186 spin_lock(&t->lock);
5187 if (t->to_thread == thread)
5188 send_reply = t;
5189 } else {
5190 __acquire(&t->lock);
5191 }
5192 thread->is_dead = true;
5193
5194 while (t) {
5195 last_t = t;
5196 active_transactions++;
5197 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5198 "release %d:%d transaction %d %s, still active\n",
5199 proc->pid, thread->pid,
5200 t->debug_id,
5201 (t->to_thread == thread) ? "in" : "out");
5202
5203 if (t->to_thread == thread) {
5204 thread->proc->outstanding_txns--;
5205 t->to_proc = NULL;
5206 t->to_thread = NULL;
5207 if (t->buffer) {
5208 t->buffer->transaction = NULL;
5209 t->buffer = NULL;
5210 }
5211 t = t->to_parent;
5212 } else if (t->from == thread) {
5213 t->from = NULL;
5214 t = t->from_parent;
5215 } else
5216 BUG();
5217 spin_unlock(&last_t->lock);
5218 if (t)
5219 spin_lock(&t->lock);
5220 else
5221 __acquire(&t->lock);
5222 }
5223 /* annotation for sparse, lock not acquired in last iteration above */
5224 __release(&t->lock);
5225
5226 /*
5227 * If this thread used poll, make sure we remove the waitqueue from any
5228 * poll data structures holding it.
5229 */
5230 if (thread->looper & BINDER_LOOPER_STATE_POLL)
5231 wake_up_pollfree(&thread->wait);
5232
5233 binder_inner_proc_unlock(thread->proc);
5234
5235 /*
5236 * This is needed to avoid races between wake_up_pollfree() above and
5237 * someone else removing the last entry from the queue for other reasons
5238 * (e.g. ep_remove_wait_queue() being called due to an epoll file
5239 * descriptor being closed). Such other users hold an RCU read lock, so
5240 * we can be sure they're done after we call synchronize_rcu().
5241 */
5242 if (thread->looper & BINDER_LOOPER_STATE_POLL)
5243 synchronize_rcu();
5244
5245 if (send_reply)
5246 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
5247 binder_release_work(proc, &thread->todo);
5248 trace_android_vh_binder_thread_release(proc, thread);
5249 binder_thread_dec_tmpref(thread);
5250 return active_transactions;
5251 }
5252
binder_poll(struct file * filp,struct poll_table_struct * wait)5253 static __poll_t binder_poll(struct file *filp,
5254 struct poll_table_struct *wait)
5255 {
5256 struct binder_proc *proc = filp->private_data;
5257 struct binder_thread *thread = NULL;
5258 bool wait_for_proc_work;
5259
5260 thread = binder_get_thread(proc);
5261 if (!thread)
5262 return EPOLLERR;
5263
5264 binder_inner_proc_lock(thread->proc);
5265 thread->looper |= BINDER_LOOPER_STATE_POLL;
5266 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
5267
5268 binder_inner_proc_unlock(thread->proc);
5269
5270 poll_wait(filp, &thread->wait, wait);
5271
5272 if (binder_has_work(thread, wait_for_proc_work))
5273 return EPOLLIN;
5274
5275 return 0;
5276 }
5277
binder_ioctl_write_read(struct file * filp,unsigned int cmd,unsigned long arg,struct binder_thread * thread)5278 static int binder_ioctl_write_read(struct file *filp,
5279 unsigned int cmd, unsigned long arg,
5280 struct binder_thread *thread)
5281 {
5282 int ret = 0;
5283 struct binder_proc *proc = filp->private_data;
5284 unsigned int size = _IOC_SIZE(cmd);
5285 void __user *ubuf = (void __user *)arg;
5286 struct binder_write_read bwr;
5287
5288 if (size != sizeof(struct binder_write_read)) {
5289 ret = -EINVAL;
5290 goto out;
5291 }
5292 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
5293 ret = -EFAULT;
5294 goto out;
5295 }
5296 binder_debug(BINDER_DEBUG_READ_WRITE,
5297 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
5298 proc->pid, thread->pid,
5299 (u64)bwr.write_size, (u64)bwr.write_buffer,
5300 (u64)bwr.read_size, (u64)bwr.read_buffer);
5301
5302 if (bwr.write_size > 0) {
5303 ret = binder_thread_write(proc, thread,
5304 bwr.write_buffer,
5305 bwr.write_size,
5306 &bwr.write_consumed);
5307 trace_binder_write_done(ret);
5308 if (ret < 0) {
5309 bwr.read_consumed = 0;
5310 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5311 ret = -EFAULT;
5312 goto out;
5313 }
5314 }
5315 if (bwr.read_size > 0) {
5316 ret = binder_thread_read(proc, thread, bwr.read_buffer,
5317 bwr.read_size,
5318 &bwr.read_consumed,
5319 filp->f_flags & O_NONBLOCK);
5320 trace_binder_read_done(ret);
5321 binder_inner_proc_lock(proc);
5322 if (!binder_worklist_empty_ilocked(&proc->todo))
5323 binder_wakeup_proc_ilocked(proc);
5324 binder_inner_proc_unlock(proc);
5325 trace_android_vh_binder_read_done(proc, thread);
5326 if (ret < 0) {
5327 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5328 ret = -EFAULT;
5329 goto out;
5330 }
5331 }
5332 binder_debug(BINDER_DEBUG_READ_WRITE,
5333 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5334 proc->pid, thread->pid,
5335 (u64)bwr.write_consumed, (u64)bwr.write_size,
5336 (u64)bwr.read_consumed, (u64)bwr.read_size);
5337 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
5338 ret = -EFAULT;
5339 goto out;
5340 }
5341 out:
5342 return ret;
5343 }
5344
binder_ioctl_set_ctx_mgr(struct file * filp,struct flat_binder_object * fbo)5345 static int binder_ioctl_set_ctx_mgr(struct file *filp,
5346 struct flat_binder_object *fbo)
5347 {
5348 int ret = 0;
5349 struct binder_proc *proc = filp->private_data;
5350 struct binder_context *context = proc->context;
5351 struct binder_node *new_node;
5352 kuid_t curr_euid = current_euid();
5353
5354 mutex_lock(&context->context_mgr_node_lock);
5355 if (context->binder_context_mgr_node) {
5356 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5357 ret = -EBUSY;
5358 goto out;
5359 }
5360 ret = security_binder_set_context_mgr(proc->cred);
5361 if (ret < 0)
5362 goto out;
5363 if (uid_valid(context->binder_context_mgr_uid)) {
5364 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5365 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5366 from_kuid(&init_user_ns, curr_euid),
5367 from_kuid(&init_user_ns,
5368 context->binder_context_mgr_uid));
5369 ret = -EPERM;
5370 goto out;
5371 }
5372 } else {
5373 context->binder_context_mgr_uid = curr_euid;
5374 }
5375 new_node = binder_new_node(proc, fbo);
5376 if (!new_node) {
5377 ret = -ENOMEM;
5378 goto out;
5379 }
5380 binder_node_lock(new_node);
5381 new_node->local_weak_refs++;
5382 new_node->local_strong_refs++;
5383 new_node->has_strong_ref = 1;
5384 new_node->has_weak_ref = 1;
5385 context->binder_context_mgr_node = new_node;
5386 binder_node_unlock(new_node);
5387 binder_put_node(new_node);
5388 out:
5389 mutex_unlock(&context->context_mgr_node_lock);
5390 return ret;
5391 }
5392
binder_ioctl_get_node_info_for_ref(struct binder_proc * proc,struct binder_node_info_for_ref * info)5393 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5394 struct binder_node_info_for_ref *info)
5395 {
5396 struct binder_node *node;
5397 struct binder_context *context = proc->context;
5398 __u32 handle = info->handle;
5399
5400 if (info->strong_count || info->weak_count || info->reserved1 ||
5401 info->reserved2 || info->reserved3) {
5402 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5403 proc->pid);
5404 return -EINVAL;
5405 }
5406
5407 /* This ioctl may only be used by the context manager */
5408 mutex_lock(&context->context_mgr_node_lock);
5409 if (!context->binder_context_mgr_node ||
5410 context->binder_context_mgr_node->proc != proc) {
5411 mutex_unlock(&context->context_mgr_node_lock);
5412 return -EPERM;
5413 }
5414 mutex_unlock(&context->context_mgr_node_lock);
5415
5416 node = binder_get_node_from_ref(proc, handle, true, NULL);
5417 if (!node)
5418 return -EINVAL;
5419
5420 info->strong_count = node->local_strong_refs +
5421 node->internal_strong_refs;
5422 info->weak_count = node->local_weak_refs;
5423
5424 binder_put_node(node);
5425
5426 return 0;
5427 }
5428
binder_ioctl_get_node_debug_info(struct binder_proc * proc,struct binder_node_debug_info * info)5429 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5430 struct binder_node_debug_info *info)
5431 {
5432 struct rb_node *n;
5433 binder_uintptr_t ptr = info->ptr;
5434
5435 memset(info, 0, sizeof(*info));
5436
5437 binder_inner_proc_lock(proc);
5438 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5439 struct binder_node *node = rb_entry(n, struct binder_node,
5440 rb_node);
5441 if (node->ptr > ptr) {
5442 info->ptr = node->ptr;
5443 info->cookie = node->cookie;
5444 info->has_strong_ref = node->has_strong_ref;
5445 info->has_weak_ref = node->has_weak_ref;
5446 break;
5447 }
5448 }
5449 binder_inner_proc_unlock(proc);
5450
5451 return 0;
5452 }
5453
binder_txns_pending_ilocked(struct binder_proc * proc)5454 static bool binder_txns_pending_ilocked(struct binder_proc *proc)
5455 {
5456 struct rb_node *n;
5457 struct binder_thread *thread;
5458
5459 if (proc->outstanding_txns > 0)
5460 return true;
5461
5462 for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
5463 thread = rb_entry(n, struct binder_thread, rb_node);
5464 if (thread->transaction_stack)
5465 return true;
5466 }
5467 return false;
5468 }
5469
binder_ioctl_freeze(struct binder_freeze_info * info,struct binder_proc * target_proc)5470 static int binder_ioctl_freeze(struct binder_freeze_info *info,
5471 struct binder_proc *target_proc)
5472 {
5473 int ret = 0;
5474
5475 if (!info->enable) {
5476 binder_inner_proc_lock(target_proc);
5477 target_proc->sync_recv = false;
5478 target_proc->async_recv = false;
5479 target_proc->is_frozen = false;
5480 binder_inner_proc_unlock(target_proc);
5481 return 0;
5482 }
5483
5484 /*
5485 * Freezing the target. Prevent new transactions by
5486 * setting frozen state. If timeout specified, wait
5487 * for transactions to drain.
5488 */
5489 binder_inner_proc_lock(target_proc);
5490 target_proc->sync_recv = false;
5491 target_proc->async_recv = false;
5492 target_proc->is_frozen = true;
5493 binder_inner_proc_unlock(target_proc);
5494
5495 if (info->timeout_ms > 0)
5496 ret = wait_event_interruptible_timeout(
5497 target_proc->freeze_wait,
5498 (!target_proc->outstanding_txns),
5499 msecs_to_jiffies(info->timeout_ms));
5500
5501 /* Check pending transactions that wait for reply */
5502 if (ret >= 0) {
5503 binder_inner_proc_lock(target_proc);
5504 if (binder_txns_pending_ilocked(target_proc))
5505 ret = -EAGAIN;
5506 binder_inner_proc_unlock(target_proc);
5507 }
5508
5509 if (ret < 0) {
5510 binder_inner_proc_lock(target_proc);
5511 target_proc->is_frozen = false;
5512 binder_inner_proc_unlock(target_proc);
5513 }
5514
5515 return ret;
5516 }
5517
binder_ioctl_get_freezer_info(struct binder_frozen_status_info * info)5518 static int binder_ioctl_get_freezer_info(
5519 struct binder_frozen_status_info *info)
5520 {
5521 struct binder_proc *target_proc;
5522 bool found = false;
5523 __u32 txns_pending;
5524
5525 info->sync_recv = 0;
5526 info->async_recv = 0;
5527
5528 mutex_lock(&binder_procs_lock);
5529 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5530 if (target_proc->pid == info->pid) {
5531 found = true;
5532 binder_inner_proc_lock(target_proc);
5533 txns_pending = binder_txns_pending_ilocked(target_proc);
5534 info->sync_recv |= target_proc->sync_recv |
5535 (txns_pending << 1);
5536 info->async_recv |= target_proc->async_recv;
5537 binder_inner_proc_unlock(target_proc);
5538 }
5539 }
5540 mutex_unlock(&binder_procs_lock);
5541
5542 if (!found)
5543 return -EINVAL;
5544
5545 return 0;
5546 }
5547
binder_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)5548 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5549 {
5550 int ret;
5551 struct binder_proc *proc = filp->private_data;
5552 struct binder_thread *thread;
5553 unsigned int size = _IOC_SIZE(cmd);
5554 void __user *ubuf = (void __user *)arg;
5555
5556 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
5557 proc->pid, current->pid, cmd, arg);*/
5558
5559 binder_selftest_alloc(&proc->alloc);
5560
5561 trace_binder_ioctl(cmd, arg);
5562
5563 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5564 if (ret)
5565 goto err_unlocked;
5566
5567 thread = binder_get_thread(proc);
5568 if (thread == NULL) {
5569 ret = -ENOMEM;
5570 goto err;
5571 }
5572
5573 switch (cmd) {
5574 case BINDER_WRITE_READ:
5575 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
5576 if (ret)
5577 goto err;
5578 break;
5579 case BINDER_SET_MAX_THREADS: {
5580 int max_threads;
5581
5582 if (copy_from_user(&max_threads, ubuf,
5583 sizeof(max_threads))) {
5584 ret = -EINVAL;
5585 goto err;
5586 }
5587 binder_inner_proc_lock(proc);
5588 proc->max_threads = max_threads;
5589 binder_inner_proc_unlock(proc);
5590 break;
5591 }
5592 case BINDER_SET_CONTEXT_MGR_EXT: {
5593 struct flat_binder_object fbo;
5594
5595 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5596 ret = -EINVAL;
5597 goto err;
5598 }
5599 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5600 if (ret)
5601 goto err;
5602 break;
5603 }
5604 case BINDER_SET_CONTEXT_MGR:
5605 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5606 if (ret)
5607 goto err;
5608 break;
5609 case BINDER_THREAD_EXIT:
5610 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5611 proc->pid, thread->pid);
5612 binder_thread_release(proc, thread);
5613 thread = NULL;
5614 break;
5615 case BINDER_VERSION: {
5616 struct binder_version __user *ver = ubuf;
5617
5618 if (size != sizeof(struct binder_version)) {
5619 ret = -EINVAL;
5620 goto err;
5621 }
5622 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5623 &ver->protocol_version)) {
5624 ret = -EINVAL;
5625 goto err;
5626 }
5627 break;
5628 }
5629 case BINDER_GET_NODE_INFO_FOR_REF: {
5630 struct binder_node_info_for_ref info;
5631
5632 if (copy_from_user(&info, ubuf, sizeof(info))) {
5633 ret = -EFAULT;
5634 goto err;
5635 }
5636
5637 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5638 if (ret < 0)
5639 goto err;
5640
5641 if (copy_to_user(ubuf, &info, sizeof(info))) {
5642 ret = -EFAULT;
5643 goto err;
5644 }
5645
5646 break;
5647 }
5648 case BINDER_GET_NODE_DEBUG_INFO: {
5649 struct binder_node_debug_info info;
5650
5651 if (copy_from_user(&info, ubuf, sizeof(info))) {
5652 ret = -EFAULT;
5653 goto err;
5654 }
5655
5656 ret = binder_ioctl_get_node_debug_info(proc, &info);
5657 if (ret < 0)
5658 goto err;
5659
5660 if (copy_to_user(ubuf, &info, sizeof(info))) {
5661 ret = -EFAULT;
5662 goto err;
5663 }
5664 break;
5665 }
5666 case BINDER_FREEZE: {
5667 struct binder_freeze_info info;
5668 struct binder_proc **target_procs = NULL, *target_proc;
5669 int target_procs_count = 0, i = 0;
5670
5671 ret = 0;
5672
5673 if (copy_from_user(&info, ubuf, sizeof(info))) {
5674 ret = -EFAULT;
5675 goto err;
5676 }
5677
5678 mutex_lock(&binder_procs_lock);
5679 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5680 if (target_proc->pid == info.pid)
5681 target_procs_count++;
5682 }
5683
5684 if (target_procs_count == 0) {
5685 mutex_unlock(&binder_procs_lock);
5686 ret = -EINVAL;
5687 goto err;
5688 }
5689
5690 target_procs = kcalloc(target_procs_count,
5691 sizeof(struct binder_proc *),
5692 GFP_KERNEL);
5693
5694 if (!target_procs) {
5695 mutex_unlock(&binder_procs_lock);
5696 ret = -ENOMEM;
5697 goto err;
5698 }
5699
5700 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5701 if (target_proc->pid != info.pid)
5702 continue;
5703
5704 binder_inner_proc_lock(target_proc);
5705 target_proc->tmp_ref++;
5706 binder_inner_proc_unlock(target_proc);
5707
5708 target_procs[i++] = target_proc;
5709 }
5710 mutex_unlock(&binder_procs_lock);
5711
5712 for (i = 0; i < target_procs_count; i++) {
5713 if (ret >= 0)
5714 ret = binder_ioctl_freeze(&info,
5715 target_procs[i]);
5716
5717 binder_proc_dec_tmpref(target_procs[i]);
5718 }
5719
5720 kfree(target_procs);
5721
5722 if (ret < 0)
5723 goto err;
5724 break;
5725 }
5726 case BINDER_GET_FROZEN_INFO: {
5727 struct binder_frozen_status_info info;
5728
5729 if (copy_from_user(&info, ubuf, sizeof(info))) {
5730 ret = -EFAULT;
5731 goto err;
5732 }
5733
5734 ret = binder_ioctl_get_freezer_info(&info);
5735 if (ret < 0)
5736 goto err;
5737
5738 if (copy_to_user(ubuf, &info, sizeof(info))) {
5739 ret = -EFAULT;
5740 goto err;
5741 }
5742 break;
5743 }
5744 case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5745 uint32_t enable;
5746
5747 if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5748 ret = -EFAULT;
5749 goto err;
5750 }
5751 binder_inner_proc_lock(proc);
5752 proc->oneway_spam_detection_enabled = (bool)enable;
5753 binder_inner_proc_unlock(proc);
5754 break;
5755 }
5756 default:
5757 ret = -EINVAL;
5758 goto err;
5759 }
5760 ret = 0;
5761 err:
5762 if (thread)
5763 thread->looper_need_return = false;
5764 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5765 if (ret && ret != -EINTR)
5766 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5767 err_unlocked:
5768 trace_binder_ioctl_done(ret);
5769 return ret;
5770 }
5771
binder_vma_open(struct vm_area_struct * vma)5772 static void binder_vma_open(struct vm_area_struct *vma)
5773 {
5774 struct binder_proc *proc = vma->vm_private_data;
5775
5776 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5777 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5778 proc->pid, vma->vm_start, vma->vm_end,
5779 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5780 (unsigned long)pgprot_val(vma->vm_page_prot));
5781 }
5782
binder_vma_close(struct vm_area_struct * vma)5783 static void binder_vma_close(struct vm_area_struct *vma)
5784 {
5785 struct binder_proc *proc = vma->vm_private_data;
5786
5787 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5788 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5789 proc->pid, vma->vm_start, vma->vm_end,
5790 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5791 (unsigned long)pgprot_val(vma->vm_page_prot));
5792 binder_alloc_vma_close(&proc->alloc);
5793 }
5794
binder_vm_fault(struct vm_fault * vmf)5795 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5796 {
5797 return VM_FAULT_SIGBUS;
5798 }
5799
5800 static const struct vm_operations_struct binder_vm_ops = {
5801 .open = binder_vma_open,
5802 .close = binder_vma_close,
5803 .fault = binder_vm_fault,
5804 };
5805
binder_mmap(struct file * filp,struct vm_area_struct * vma)5806 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5807 {
5808 struct binder_proc *proc = filp->private_data;
5809
5810 if (proc->tsk != current->group_leader)
5811 return -EINVAL;
5812
5813 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5814 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5815 __func__, proc->pid, vma->vm_start, vma->vm_end,
5816 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5817 (unsigned long)pgprot_val(vma->vm_page_prot));
5818
5819 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5820 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5821 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5822 return -EPERM;
5823 }
5824 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5825 vma->vm_flags &= ~VM_MAYWRITE;
5826
5827 vma->vm_ops = &binder_vm_ops;
5828 vma->vm_private_data = proc;
5829
5830 return binder_alloc_mmap_handler(&proc->alloc, vma);
5831 }
5832
binder_open(struct inode * nodp,struct file * filp)5833 static int binder_open(struct inode *nodp, struct file *filp)
5834 {
5835 struct binder_proc_wrap *proc_wrap;
5836 struct binder_proc *proc, *itr;
5837 struct binder_device *binder_dev;
5838 struct binderfs_info *info;
5839 struct dentry *binder_binderfs_dir_entry_proc = NULL;
5840 bool existing_pid = false;
5841
5842 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5843 current->group_leader->pid, current->pid);
5844
5845 proc_wrap = kzalloc(sizeof(*proc_wrap), GFP_KERNEL);
5846 if (proc_wrap == NULL)
5847 return -ENOMEM;
5848 proc = &proc_wrap->proc;
5849
5850 spin_lock_init(&proc->inner_lock);
5851 spin_lock_init(&proc->outer_lock);
5852 get_task_struct(current->group_leader);
5853 proc->tsk = current->group_leader;
5854 proc->cred = get_cred(filp->f_cred);
5855 INIT_LIST_HEAD(&proc->todo);
5856 init_waitqueue_head(&proc->freeze_wait);
5857 if (binder_supported_policy(current->policy)) {
5858 proc->default_priority.sched_policy = current->policy;
5859 proc->default_priority.prio = current->normal_prio;
5860 } else {
5861 proc->default_priority.sched_policy = SCHED_NORMAL;
5862 proc->default_priority.prio = NICE_TO_PRIO(0);
5863 }
5864
5865 set_binder_prio_uclamp(&proc->default_priority, NULL);
5866
5867 /* binderfs stashes devices in i_private */
5868 if (is_binderfs_device(nodp)) {
5869 binder_dev = nodp->i_private;
5870 info = nodp->i_sb->s_fs_info;
5871 binder_binderfs_dir_entry_proc = info->proc_log_dir;
5872 } else {
5873 binder_dev = container_of(filp->private_data,
5874 struct binder_device, miscdev);
5875 }
5876 refcount_inc(&binder_dev->ref);
5877 proc->context = &binder_dev->context;
5878 binder_alloc_init(&proc->alloc);
5879
5880 binder_stats_created(BINDER_STAT_PROC);
5881 proc->pid = current->group_leader->pid;
5882 INIT_LIST_HEAD(&proc->delivered_death);
5883 INIT_LIST_HEAD(&proc->waiting_threads);
5884 filp->private_data = proc;
5885
5886 mutex_lock(&binder_procs_lock);
5887 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5888 if (itr->pid == proc->pid) {
5889 existing_pid = true;
5890 break;
5891 }
5892 }
5893 hlist_add_head(&proc->proc_node, &binder_procs);
5894 mutex_unlock(&binder_procs_lock);
5895 trace_android_vh_binder_preset(&binder_procs, &binder_procs_lock);
5896 if (binder_debugfs_dir_entry_proc && !existing_pid) {
5897 char strbuf[11];
5898
5899 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5900 /*
5901 * proc debug entries are shared between contexts.
5902 * Only create for the first PID to avoid debugfs log spamming
5903 * The printing code will anyway print all contexts for a given
5904 * PID so this is not a problem.
5905 */
5906 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5907 binder_debugfs_dir_entry_proc,
5908 (void *)(unsigned long)proc->pid,
5909 &proc_fops);
5910 }
5911
5912 if (binder_binderfs_dir_entry_proc && !existing_pid) {
5913 char strbuf[11];
5914 struct dentry *binderfs_entry;
5915
5916 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5917 /*
5918 * Similar to debugfs, the process specific log file is shared
5919 * between contexts. Only create for the first PID.
5920 * This is ok since same as debugfs, the log file will contain
5921 * information on all contexts of a given PID.
5922 */
5923 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5924 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5925 if (!IS_ERR(binderfs_entry)) {
5926 proc->binderfs_entry = binderfs_entry;
5927 } else {
5928 int error;
5929
5930 error = PTR_ERR(binderfs_entry);
5931 pr_warn("Unable to create file %s in binderfs (error %d)\n",
5932 strbuf, error);
5933 }
5934 }
5935
5936 return 0;
5937 }
5938
binder_flush(struct file * filp,fl_owner_t id)5939 static int binder_flush(struct file *filp, fl_owner_t id)
5940 {
5941 struct binder_proc *proc = filp->private_data;
5942
5943 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5944
5945 return 0;
5946 }
5947
binder_deferred_flush(struct binder_proc * proc)5948 static void binder_deferred_flush(struct binder_proc *proc)
5949 {
5950 struct rb_node *n;
5951 int wake_count = 0;
5952
5953 binder_inner_proc_lock(proc);
5954 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5955 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5956
5957 thread->looper_need_return = true;
5958 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5959 wake_up_interruptible(&thread->wait);
5960 wake_count++;
5961 }
5962 }
5963 binder_inner_proc_unlock(proc);
5964
5965 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5966 "binder_flush: %d woke %d threads\n", proc->pid,
5967 wake_count);
5968 }
5969
binder_release(struct inode * nodp,struct file * filp)5970 static int binder_release(struct inode *nodp, struct file *filp)
5971 {
5972 struct binder_proc *proc = filp->private_data;
5973
5974 debugfs_remove(proc->debugfs_entry);
5975
5976 if (proc->binderfs_entry) {
5977 binderfs_remove_file(proc->binderfs_entry);
5978 proc->binderfs_entry = NULL;
5979 }
5980
5981 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5982
5983 return 0;
5984 }
5985
binder_node_release(struct binder_node * node,int refs)5986 static int binder_node_release(struct binder_node *node, int refs)
5987 {
5988 struct binder_ref *ref;
5989 int death = 0;
5990 struct binder_proc *proc = node->proc;
5991
5992 binder_release_work(proc, &node->async_todo);
5993
5994 binder_node_lock(node);
5995 binder_inner_proc_lock(proc);
5996 binder_dequeue_work_ilocked(&node->work);
5997 /*
5998 * The caller must have taken a temporary ref on the node,
5999 */
6000 BUG_ON(!node->tmp_refs);
6001 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
6002 binder_inner_proc_unlock(proc);
6003 binder_node_unlock(node);
6004 binder_free_node(node);
6005
6006 return refs;
6007 }
6008
6009 node->proc = NULL;
6010 node->local_strong_refs = 0;
6011 node->local_weak_refs = 0;
6012 binder_inner_proc_unlock(proc);
6013
6014 spin_lock(&binder_dead_nodes_lock);
6015 hlist_add_head(&node->dead_node, &binder_dead_nodes);
6016 spin_unlock(&binder_dead_nodes_lock);
6017
6018 hlist_for_each_entry(ref, &node->refs, node_entry) {
6019 refs++;
6020 /*
6021 * Need the node lock to synchronize
6022 * with new notification requests and the
6023 * inner lock to synchronize with queued
6024 * death notifications.
6025 */
6026 binder_inner_proc_lock(ref->proc);
6027 if (!ref->death) {
6028 binder_inner_proc_unlock(ref->proc);
6029 continue;
6030 }
6031
6032 death++;
6033
6034 BUG_ON(!list_empty(&ref->death->work.entry));
6035 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
6036 binder_enqueue_work_ilocked(&ref->death->work,
6037 &ref->proc->todo);
6038 binder_wakeup_proc_ilocked(ref->proc);
6039 binder_inner_proc_unlock(ref->proc);
6040 }
6041
6042 binder_debug(BINDER_DEBUG_DEAD_BINDER,
6043 "node %d now dead, refs %d, death %d\n",
6044 node->debug_id, refs, death);
6045 binder_node_unlock(node);
6046 binder_put_node(node);
6047
6048 return refs;
6049 }
6050
binder_deferred_release(struct binder_proc * proc)6051 static void binder_deferred_release(struct binder_proc *proc)
6052 {
6053 struct binder_context *context = proc->context;
6054 struct rb_node *n;
6055 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
6056
6057 mutex_lock(&binder_procs_lock);
6058 hlist_del(&proc->proc_node);
6059 mutex_unlock(&binder_procs_lock);
6060
6061 mutex_lock(&context->context_mgr_node_lock);
6062 if (context->binder_context_mgr_node &&
6063 context->binder_context_mgr_node->proc == proc) {
6064 binder_debug(BINDER_DEBUG_DEAD_BINDER,
6065 "%s: %d context_mgr_node gone\n",
6066 __func__, proc->pid);
6067 context->binder_context_mgr_node = NULL;
6068 }
6069 mutex_unlock(&context->context_mgr_node_lock);
6070 binder_inner_proc_lock(proc);
6071 /*
6072 * Make sure proc stays alive after we
6073 * remove all the threads
6074 */
6075 proc->tmp_ref++;
6076
6077 proc->is_dead = true;
6078 proc->is_frozen = false;
6079 proc->sync_recv = false;
6080 proc->async_recv = false;
6081 threads = 0;
6082 active_transactions = 0;
6083 while ((n = rb_first(&proc->threads))) {
6084 struct binder_thread *thread;
6085
6086 thread = rb_entry(n, struct binder_thread, rb_node);
6087 binder_inner_proc_unlock(proc);
6088 threads++;
6089 active_transactions += binder_thread_release(proc, thread);
6090 binder_inner_proc_lock(proc);
6091 }
6092
6093 nodes = 0;
6094 incoming_refs = 0;
6095 while ((n = rb_first(&proc->nodes))) {
6096 struct binder_node *node;
6097
6098 node = rb_entry(n, struct binder_node, rb_node);
6099 nodes++;
6100 /*
6101 * take a temporary ref on the node before
6102 * calling binder_node_release() which will either
6103 * kfree() the node or call binder_put_node()
6104 */
6105 binder_inc_node_tmpref_ilocked(node);
6106 rb_erase(&node->rb_node, &proc->nodes);
6107 binder_inner_proc_unlock(proc);
6108 incoming_refs = binder_node_release(node, incoming_refs);
6109 binder_inner_proc_lock(proc);
6110 }
6111 binder_inner_proc_unlock(proc);
6112
6113 outgoing_refs = 0;
6114 binder_proc_lock(proc);
6115 while ((n = rb_first(&proc->refs_by_desc))) {
6116 struct binder_ref *ref;
6117
6118 ref = rb_entry(n, struct binder_ref, rb_node_desc);
6119 outgoing_refs++;
6120 binder_cleanup_ref_olocked(ref);
6121 binder_proc_unlock(proc);
6122 binder_free_ref(ref);
6123 binder_proc_lock(proc);
6124 }
6125 binder_proc_unlock(proc);
6126
6127 binder_release_work(proc, &proc->todo);
6128 binder_release_work(proc, &proc->delivered_death);
6129
6130 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6131 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
6132 __func__, proc->pid, threads, nodes, incoming_refs,
6133 outgoing_refs, active_transactions);
6134
6135 binder_proc_dec_tmpref(proc);
6136 }
6137
binder_deferred_func(struct work_struct * work)6138 static void binder_deferred_func(struct work_struct *work)
6139 {
6140 struct binder_proc *proc;
6141
6142 int defer;
6143
6144 do {
6145 mutex_lock(&binder_deferred_lock);
6146 if (!hlist_empty(&binder_deferred_list)) {
6147 proc = hlist_entry(binder_deferred_list.first,
6148 struct binder_proc, deferred_work_node);
6149 hlist_del_init(&proc->deferred_work_node);
6150 defer = proc->deferred_work;
6151 proc->deferred_work = 0;
6152 } else {
6153 proc = NULL;
6154 defer = 0;
6155 }
6156 mutex_unlock(&binder_deferred_lock);
6157
6158 if (defer & BINDER_DEFERRED_FLUSH)
6159 binder_deferred_flush(proc);
6160
6161 if (defer & BINDER_DEFERRED_RELEASE)
6162 binder_deferred_release(proc); /* frees proc */
6163 } while (proc);
6164 }
6165 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
6166
6167 static void
binder_defer_work(struct binder_proc * proc,enum binder_deferred_state defer)6168 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
6169 {
6170 mutex_lock(&binder_deferred_lock);
6171 proc->deferred_work |= defer;
6172 if (hlist_unhashed(&proc->deferred_work_node)) {
6173 hlist_add_head(&proc->deferred_work_node,
6174 &binder_deferred_list);
6175 schedule_work(&binder_deferred_work);
6176 }
6177 mutex_unlock(&binder_deferred_lock);
6178 }
6179
print_binder_transaction_ilocked(struct seq_file * m,struct binder_proc * proc,const char * prefix,struct binder_transaction * t)6180 static void print_binder_transaction_ilocked(struct seq_file *m,
6181 struct binder_proc *proc,
6182 const char *prefix,
6183 struct binder_transaction *t)
6184 {
6185 struct binder_proc *to_proc;
6186 struct binder_buffer *buffer = t->buffer;
6187 ktime_t current_time = ktime_get();
6188
6189 spin_lock(&t->lock);
6190 trace_android_vh_binder_print_transaction_info(m, proc, prefix, t);
6191 to_proc = t->to_proc;
6192 seq_printf(m,
6193 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %d:%d r%d elapsed %lldms",
6194 prefix, t->debug_id, t,
6195 t->from_pid,
6196 t->from_tid,
6197 to_proc ? to_proc->pid : 0,
6198 t->to_thread ? t->to_thread->pid : 0,
6199 t->code, t->flags, t->priority.sched_policy,
6200 t->priority.prio, t->need_reply,
6201 ktime_ms_delta(current_time, t->start_time));
6202 spin_unlock(&t->lock);
6203
6204 if (proc != to_proc) {
6205 /*
6206 * Can only safely deref buffer if we are holding the
6207 * correct proc inner lock for this node
6208 */
6209 seq_puts(m, "\n");
6210 return;
6211 }
6212
6213 if (buffer == NULL) {
6214 seq_puts(m, " buffer free\n");
6215 return;
6216 }
6217 if (buffer->target_node)
6218 seq_printf(m, " node %d", buffer->target_node->debug_id);
6219 seq_printf(m, " size %zd:%zd offset %lx\n",
6220 buffer->data_size, buffer->offsets_size,
6221 proc->alloc.buffer - buffer->user_data);
6222 }
6223
print_binder_work_ilocked(struct seq_file * m,struct binder_proc * proc,const char * prefix,const char * transaction_prefix,struct binder_work * w)6224 static void print_binder_work_ilocked(struct seq_file *m,
6225 struct binder_proc *proc,
6226 const char *prefix,
6227 const char *transaction_prefix,
6228 struct binder_work *w)
6229 {
6230 struct binder_node *node;
6231 struct binder_transaction *t;
6232
6233 switch (w->type) {
6234 case BINDER_WORK_TRANSACTION:
6235 t = container_of(w, struct binder_transaction, work);
6236 print_binder_transaction_ilocked(
6237 m, proc, transaction_prefix, t);
6238 break;
6239 case BINDER_WORK_RETURN_ERROR: {
6240 struct binder_error *e = container_of(
6241 w, struct binder_error, work);
6242
6243 seq_printf(m, "%stransaction error: %u\n",
6244 prefix, e->cmd);
6245 } break;
6246 case BINDER_WORK_TRANSACTION_COMPLETE:
6247 seq_printf(m, "%stransaction complete\n", prefix);
6248 break;
6249 case BINDER_WORK_NODE:
6250 node = container_of(w, struct binder_node, work);
6251 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
6252 prefix, node->debug_id,
6253 (u64)node->ptr, (u64)node->cookie);
6254 break;
6255 case BINDER_WORK_DEAD_BINDER:
6256 seq_printf(m, "%shas dead binder\n", prefix);
6257 break;
6258 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
6259 seq_printf(m, "%shas cleared dead binder\n", prefix);
6260 break;
6261 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
6262 seq_printf(m, "%shas cleared death notification\n", prefix);
6263 break;
6264 default:
6265 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
6266 break;
6267 }
6268 }
6269
print_binder_thread_ilocked(struct seq_file * m,struct binder_thread * thread,int print_always)6270 static void print_binder_thread_ilocked(struct seq_file *m,
6271 struct binder_thread *thread,
6272 int print_always)
6273 {
6274 struct binder_transaction *t;
6275 struct binder_work *w;
6276 size_t start_pos = m->count;
6277 size_t header_pos;
6278
6279 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
6280 thread->pid, thread->looper,
6281 thread->looper_need_return,
6282 atomic_read(&thread->tmp_ref));
6283 header_pos = m->count;
6284 t = thread->transaction_stack;
6285 while (t) {
6286 if (t->from == thread) {
6287 print_binder_transaction_ilocked(m, thread->proc,
6288 " outgoing transaction", t);
6289 t = t->from_parent;
6290 } else if (t->to_thread == thread) {
6291 print_binder_transaction_ilocked(m, thread->proc,
6292 " incoming transaction", t);
6293 t = t->to_parent;
6294 } else {
6295 print_binder_transaction_ilocked(m, thread->proc,
6296 " bad transaction", t);
6297 t = NULL;
6298 }
6299 }
6300 list_for_each_entry(w, &thread->todo, entry) {
6301 print_binder_work_ilocked(m, thread->proc, " ",
6302 " pending transaction", w);
6303 }
6304 if (!print_always && m->count == header_pos)
6305 m->count = start_pos;
6306 }
6307
print_binder_node_nilocked(struct seq_file * m,struct binder_node * node)6308 static void print_binder_node_nilocked(struct seq_file *m,
6309 struct binder_node *node)
6310 {
6311 struct binder_ref *ref;
6312 struct binder_work *w;
6313 int count;
6314
6315 count = 0;
6316 hlist_for_each_entry(ref, &node->refs, node_entry)
6317 count++;
6318
6319 seq_printf(m, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6320 node->debug_id, (u64)node->ptr, (u64)node->cookie,
6321 node->sched_policy, node->min_priority,
6322 node->has_strong_ref, node->has_weak_ref,
6323 node->local_strong_refs, node->local_weak_refs,
6324 node->internal_strong_refs, count, node->tmp_refs);
6325 if (count) {
6326 seq_puts(m, " proc");
6327 hlist_for_each_entry(ref, &node->refs, node_entry)
6328 seq_printf(m, " %d", ref->proc->pid);
6329 }
6330 seq_puts(m, "\n");
6331 if (node->proc) {
6332 list_for_each_entry(w, &node->async_todo, entry)
6333 print_binder_work_ilocked(m, node->proc, " ",
6334 " pending async transaction", w);
6335 }
6336 }
6337
print_binder_ref_olocked(struct seq_file * m,struct binder_ref * ref)6338 static void print_binder_ref_olocked(struct seq_file *m,
6339 struct binder_ref *ref)
6340 {
6341 binder_node_lock(ref->node);
6342 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
6343 ref->data.debug_id, ref->data.desc,
6344 ref->node->proc ? "" : "dead ",
6345 ref->node->debug_id, ref->data.strong,
6346 ref->data.weak, ref->death);
6347 binder_node_unlock(ref->node);
6348 }
6349
print_binder_proc(struct seq_file * m,struct binder_proc * proc,int print_all)6350 static void print_binder_proc(struct seq_file *m,
6351 struct binder_proc *proc, int print_all)
6352 {
6353 struct binder_work *w;
6354 struct rb_node *n;
6355 size_t start_pos = m->count;
6356 size_t header_pos;
6357 struct binder_node *last_node = NULL;
6358
6359 seq_printf(m, "proc %d\n", proc->pid);
6360 seq_printf(m, "context %s\n", proc->context->name);
6361 header_pos = m->count;
6362
6363 binder_inner_proc_lock(proc);
6364 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6365 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
6366 rb_node), print_all);
6367
6368 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
6369 struct binder_node *node = rb_entry(n, struct binder_node,
6370 rb_node);
6371 if (!print_all && !node->has_async_transaction)
6372 continue;
6373
6374 /*
6375 * take a temporary reference on the node so it
6376 * survives and isn't removed from the tree
6377 * while we print it.
6378 */
6379 binder_inc_node_tmpref_ilocked(node);
6380 /* Need to drop inner lock to take node lock */
6381 binder_inner_proc_unlock(proc);
6382 if (last_node)
6383 binder_put_node(last_node);
6384 binder_node_inner_lock(node);
6385 print_binder_node_nilocked(m, node);
6386 binder_node_inner_unlock(node);
6387 last_node = node;
6388 binder_inner_proc_lock(proc);
6389 }
6390 binder_inner_proc_unlock(proc);
6391 if (last_node)
6392 binder_put_node(last_node);
6393
6394 if (print_all) {
6395 binder_proc_lock(proc);
6396 for (n = rb_first(&proc->refs_by_desc);
6397 n != NULL;
6398 n = rb_next(n))
6399 print_binder_ref_olocked(m, rb_entry(n,
6400 struct binder_ref,
6401 rb_node_desc));
6402 binder_proc_unlock(proc);
6403 }
6404 binder_alloc_print_allocated(m, &proc->alloc);
6405 binder_inner_proc_lock(proc);
6406 list_for_each_entry(w, &proc->todo, entry)
6407 print_binder_work_ilocked(m, proc, " ",
6408 " pending transaction", w);
6409 list_for_each_entry(w, &proc->delivered_death, entry) {
6410 seq_puts(m, " has delivered dead binder\n");
6411 break;
6412 }
6413 binder_inner_proc_unlock(proc);
6414 if (!print_all && m->count == header_pos)
6415 m->count = start_pos;
6416 }
6417
6418 static const char * const binder_return_strings[] = {
6419 "BR_ERROR",
6420 "BR_OK",
6421 "BR_TRANSACTION",
6422 "BR_REPLY",
6423 "BR_ACQUIRE_RESULT",
6424 "BR_DEAD_REPLY",
6425 "BR_TRANSACTION_COMPLETE",
6426 "BR_INCREFS",
6427 "BR_ACQUIRE",
6428 "BR_RELEASE",
6429 "BR_DECREFS",
6430 "BR_ATTEMPT_ACQUIRE",
6431 "BR_NOOP",
6432 "BR_SPAWN_LOOPER",
6433 "BR_FINISHED",
6434 "BR_DEAD_BINDER",
6435 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
6436 "BR_FAILED_REPLY",
6437 "BR_FROZEN_REPLY",
6438 "BR_ONEWAY_SPAM_SUSPECT",
6439 "BR_TRANSACTION_PENDING_FROZEN"
6440 };
6441
6442 static const char * const binder_command_strings[] = {
6443 "BC_TRANSACTION",
6444 "BC_REPLY",
6445 "BC_ACQUIRE_RESULT",
6446 "BC_FREE_BUFFER",
6447 "BC_INCREFS",
6448 "BC_ACQUIRE",
6449 "BC_RELEASE",
6450 "BC_DECREFS",
6451 "BC_INCREFS_DONE",
6452 "BC_ACQUIRE_DONE",
6453 "BC_ATTEMPT_ACQUIRE",
6454 "BC_REGISTER_LOOPER",
6455 "BC_ENTER_LOOPER",
6456 "BC_EXIT_LOOPER",
6457 "BC_REQUEST_DEATH_NOTIFICATION",
6458 "BC_CLEAR_DEATH_NOTIFICATION",
6459 "BC_DEAD_BINDER_DONE",
6460 "BC_TRANSACTION_SG",
6461 "BC_REPLY_SG",
6462 };
6463
6464 static const char * const binder_objstat_strings[] = {
6465 "proc",
6466 "thread",
6467 "node",
6468 "ref",
6469 "death",
6470 "transaction",
6471 "transaction_complete"
6472 };
6473
print_binder_stats(struct seq_file * m,const char * prefix,struct binder_stats * stats)6474 static void print_binder_stats(struct seq_file *m, const char *prefix,
6475 struct binder_stats *stats)
6476 {
6477 int i;
6478
6479 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6480 ARRAY_SIZE(binder_command_strings));
6481 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6482 int temp = atomic_read(&stats->bc[i]);
6483
6484 if (temp)
6485 seq_printf(m, "%s%s: %d\n", prefix,
6486 binder_command_strings[i], temp);
6487 }
6488
6489 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6490 ARRAY_SIZE(binder_return_strings));
6491 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6492 int temp = atomic_read(&stats->br[i]);
6493
6494 if (temp)
6495 seq_printf(m, "%s%s: %d\n", prefix,
6496 binder_return_strings[i], temp);
6497 }
6498
6499 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6500 ARRAY_SIZE(binder_objstat_strings));
6501 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6502 ARRAY_SIZE(stats->obj_deleted));
6503 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6504 int created = atomic_read(&stats->obj_created[i]);
6505 int deleted = atomic_read(&stats->obj_deleted[i]);
6506
6507 if (created || deleted)
6508 seq_printf(m, "%s%s: active %d total %d\n",
6509 prefix,
6510 binder_objstat_strings[i],
6511 created - deleted,
6512 created);
6513 }
6514 }
6515
print_binder_proc_stats(struct seq_file * m,struct binder_proc * proc)6516 static void print_binder_proc_stats(struct seq_file *m,
6517 struct binder_proc *proc)
6518 {
6519 struct binder_work *w;
6520 struct binder_thread *thread;
6521 struct rb_node *n;
6522 int count, strong, weak, ready_threads;
6523 size_t free_async_space =
6524 binder_alloc_get_free_async_space(&proc->alloc);
6525
6526 seq_printf(m, "proc %d\n", proc->pid);
6527 seq_printf(m, "context %s\n", proc->context->name);
6528 count = 0;
6529 ready_threads = 0;
6530 binder_inner_proc_lock(proc);
6531 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6532 count++;
6533
6534 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6535 ready_threads++;
6536
6537 seq_printf(m, " threads: %d\n", count);
6538 seq_printf(m, " requested threads: %d+%d/%d\n"
6539 " ready threads %d\n"
6540 " free async space %zd\n", proc->requested_threads,
6541 proc->requested_threads_started, proc->max_threads,
6542 ready_threads,
6543 free_async_space);
6544 count = 0;
6545 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
6546 count++;
6547 binder_inner_proc_unlock(proc);
6548 seq_printf(m, " nodes: %d\n", count);
6549 count = 0;
6550 strong = 0;
6551 weak = 0;
6552 binder_proc_lock(proc);
6553 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
6554 struct binder_ref *ref = rb_entry(n, struct binder_ref,
6555 rb_node_desc);
6556 count++;
6557 strong += ref->data.strong;
6558 weak += ref->data.weak;
6559 }
6560 binder_proc_unlock(proc);
6561 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
6562
6563 count = binder_alloc_get_allocated_count(&proc->alloc);
6564 seq_printf(m, " buffers: %d\n", count);
6565
6566 binder_alloc_print_pages(m, &proc->alloc);
6567
6568 count = 0;
6569 binder_inner_proc_lock(proc);
6570 list_for_each_entry(w, &proc->todo, entry) {
6571 if (w->type == BINDER_WORK_TRANSACTION)
6572 count++;
6573 }
6574 binder_inner_proc_unlock(proc);
6575 seq_printf(m, " pending transactions: %d\n", count);
6576
6577 print_binder_stats(m, " ", &proc->stats);
6578 }
6579
state_show(struct seq_file * m,void * unused)6580 static int state_show(struct seq_file *m, void *unused)
6581 {
6582 struct binder_proc *proc;
6583 struct binder_node *node;
6584 struct binder_node *last_node = NULL;
6585
6586 seq_puts(m, "binder state:\n");
6587
6588 spin_lock(&binder_dead_nodes_lock);
6589 if (!hlist_empty(&binder_dead_nodes))
6590 seq_puts(m, "dead nodes:\n");
6591 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
6592 /*
6593 * take a temporary reference on the node so it
6594 * survives and isn't removed from the list
6595 * while we print it.
6596 */
6597 node->tmp_refs++;
6598 spin_unlock(&binder_dead_nodes_lock);
6599 if (last_node)
6600 binder_put_node(last_node);
6601 binder_node_lock(node);
6602 print_binder_node_nilocked(m, node);
6603 binder_node_unlock(node);
6604 last_node = node;
6605 spin_lock(&binder_dead_nodes_lock);
6606 }
6607 spin_unlock(&binder_dead_nodes_lock);
6608 if (last_node)
6609 binder_put_node(last_node);
6610
6611 mutex_lock(&binder_procs_lock);
6612 hlist_for_each_entry(proc, &binder_procs, proc_node)
6613 print_binder_proc(m, proc, 1);
6614 mutex_unlock(&binder_procs_lock);
6615
6616 return 0;
6617 }
6618
stats_show(struct seq_file * m,void * unused)6619 static int stats_show(struct seq_file *m, void *unused)
6620 {
6621 struct binder_proc *proc;
6622
6623 seq_puts(m, "binder stats:\n");
6624
6625 print_binder_stats(m, "", &binder_stats);
6626
6627 mutex_lock(&binder_procs_lock);
6628 hlist_for_each_entry(proc, &binder_procs, proc_node)
6629 print_binder_proc_stats(m, proc);
6630 mutex_unlock(&binder_procs_lock);
6631
6632 return 0;
6633 }
6634
transactions_show(struct seq_file * m,void * unused)6635 static int transactions_show(struct seq_file *m, void *unused)
6636 {
6637 struct binder_proc *proc;
6638
6639 seq_puts(m, "binder transactions:\n");
6640 mutex_lock(&binder_procs_lock);
6641 hlist_for_each_entry(proc, &binder_procs, proc_node)
6642 print_binder_proc(m, proc, 0);
6643 mutex_unlock(&binder_procs_lock);
6644
6645 return 0;
6646 }
6647
proc_show(struct seq_file * m,void * unused)6648 static int proc_show(struct seq_file *m, void *unused)
6649 {
6650 struct binder_proc *itr;
6651 int pid = (unsigned long)m->private;
6652
6653 mutex_lock(&binder_procs_lock);
6654 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6655 if (itr->pid == pid) {
6656 seq_puts(m, "binder proc state:\n");
6657 print_binder_proc(m, itr, 1);
6658 }
6659 }
6660 mutex_unlock(&binder_procs_lock);
6661
6662 return 0;
6663 }
6664
print_binder_transaction_log_entry(struct seq_file * m,struct binder_transaction_log_entry * e)6665 static void print_binder_transaction_log_entry(struct seq_file *m,
6666 struct binder_transaction_log_entry *e)
6667 {
6668 int debug_id = READ_ONCE(e->debug_id_done);
6669 /*
6670 * read barrier to guarantee debug_id_done read before
6671 * we print the log values
6672 */
6673 smp_rmb();
6674 seq_printf(m,
6675 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6676 e->debug_id, (e->call_type == 2) ? "reply" :
6677 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6678 e->from_thread, e->to_proc, e->to_thread, e->context_name,
6679 e->to_node, e->target_handle, e->data_size, e->offsets_size,
6680 e->return_error, e->return_error_param,
6681 e->return_error_line);
6682 /*
6683 * read-barrier to guarantee read of debug_id_done after
6684 * done printing the fields of the entry
6685 */
6686 smp_rmb();
6687 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6688 "\n" : " (incomplete)\n");
6689 }
6690
transaction_log_show(struct seq_file * m,void * unused)6691 static int transaction_log_show(struct seq_file *m, void *unused)
6692 {
6693 struct binder_transaction_log *log = m->private;
6694 unsigned int log_cur = atomic_read(&log->cur);
6695 unsigned int count;
6696 unsigned int cur;
6697 int i;
6698
6699 count = log_cur + 1;
6700 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6701 0 : count % ARRAY_SIZE(log->entry);
6702 if (count > ARRAY_SIZE(log->entry) || log->full)
6703 count = ARRAY_SIZE(log->entry);
6704 for (i = 0; i < count; i++) {
6705 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6706
6707 print_binder_transaction_log_entry(m, &log->entry[index]);
6708 }
6709 return 0;
6710 }
6711
6712 const struct file_operations binder_fops = {
6713 .owner = THIS_MODULE,
6714 .poll = binder_poll,
6715 .unlocked_ioctl = binder_ioctl,
6716 .compat_ioctl = compat_ptr_ioctl,
6717 .mmap = binder_mmap,
6718 .open = binder_open,
6719 .flush = binder_flush,
6720 .release = binder_release,
6721 };
6722
6723 DEFINE_SHOW_ATTRIBUTE(state);
6724 DEFINE_SHOW_ATTRIBUTE(stats);
6725 DEFINE_SHOW_ATTRIBUTE(transactions);
6726 DEFINE_SHOW_ATTRIBUTE(transaction_log);
6727
6728 const struct binder_debugfs_entry binder_debugfs_entries[] = {
6729 {
6730 .name = "state",
6731 .mode = 0444,
6732 .fops = &state_fops,
6733 .data = NULL,
6734 },
6735 {
6736 .name = "stats",
6737 .mode = 0444,
6738 .fops = &stats_fops,
6739 .data = NULL,
6740 },
6741 {
6742 .name = "transactions",
6743 .mode = 0444,
6744 .fops = &transactions_fops,
6745 .data = NULL,
6746 },
6747 {
6748 .name = "transaction_log",
6749 .mode = 0444,
6750 .fops = &transaction_log_fops,
6751 .data = &binder_transaction_log,
6752 },
6753 {
6754 .name = "failed_transaction_log",
6755 .mode = 0444,
6756 .fops = &transaction_log_fops,
6757 .data = &binder_transaction_log_failed,
6758 },
6759 {} /* terminator */
6760 };
6761
init_binder_device(const char * name)6762 static int __init init_binder_device(const char *name)
6763 {
6764 int ret;
6765 struct binder_device *binder_device;
6766
6767 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6768 if (!binder_device)
6769 return -ENOMEM;
6770
6771 binder_device->miscdev.fops = &binder_fops;
6772 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6773 binder_device->miscdev.name = name;
6774
6775 refcount_set(&binder_device->ref, 1);
6776 binder_device->context.binder_context_mgr_uid = INVALID_UID;
6777 binder_device->context.name = name;
6778 mutex_init(&binder_device->context.context_mgr_node_lock);
6779
6780 ret = misc_register(&binder_device->miscdev);
6781 if (ret < 0) {
6782 kfree(binder_device);
6783 return ret;
6784 }
6785
6786 hlist_add_head(&binder_device->hlist, &binder_devices);
6787
6788 return ret;
6789 }
6790
binder_init(void)6791 static int __init binder_init(void)
6792 {
6793 int ret;
6794 char *device_name, *device_tmp;
6795 struct binder_device *device;
6796 struct hlist_node *tmp;
6797 char *device_names = NULL;
6798
6799 ret = binder_alloc_shrinker_init();
6800 if (ret)
6801 return ret;
6802
6803 atomic_set(&binder_transaction_log.cur, ~0U);
6804 atomic_set(&binder_transaction_log_failed.cur, ~0U);
6805
6806 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6807 if (binder_debugfs_dir_entry_root) {
6808 const struct binder_debugfs_entry *db_entry;
6809
6810 binder_for_each_debugfs_entry(db_entry)
6811 debugfs_create_file(db_entry->name,
6812 db_entry->mode,
6813 binder_debugfs_dir_entry_root,
6814 db_entry->data,
6815 db_entry->fops);
6816
6817 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6818 binder_debugfs_dir_entry_root);
6819 }
6820
6821 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6822 strcmp(binder_devices_param, "") != 0) {
6823 /*
6824 * Copy the module_parameter string, because we don't want to
6825 * tokenize it in-place.
6826 */
6827 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6828 if (!device_names) {
6829 ret = -ENOMEM;
6830 goto err_alloc_device_names_failed;
6831 }
6832
6833 device_tmp = device_names;
6834 while ((device_name = strsep(&device_tmp, ","))) {
6835 ret = init_binder_device(device_name);
6836 if (ret)
6837 goto err_init_binder_device_failed;
6838 }
6839 }
6840
6841 ret = init_binderfs();
6842 if (ret)
6843 goto err_init_binder_device_failed;
6844
6845 return ret;
6846
6847 err_init_binder_device_failed:
6848 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6849 misc_deregister(&device->miscdev);
6850 hlist_del(&device->hlist);
6851 kfree(device);
6852 }
6853
6854 kfree(device_names);
6855
6856 err_alloc_device_names_failed:
6857 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6858 binder_alloc_shrinker_exit();
6859
6860 return ret;
6861 }
6862
6863 device_initcall(binder_init);
6864
6865 #define CREATE_TRACE_POINTS
6866 #include "binder_trace.h"
6867 EXPORT_TRACEPOINT_SYMBOL_GPL(binder_transaction_received);
6868 EXPORT_TRACEPOINT_SYMBOL_GPL(binder_txn_latency_free);
6869
6870 MODULE_LICENSE("GPL v2");
6871