1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder.c
3 *
4 * Android IPC Subsystem
5 *
6 * Copyright (C) 2007-2008 Google, Inc.
7 */
8
9 /*
10 * Locking overview
11 *
12 * There are 3 main spinlocks which must be acquired in the
13 * order shown:
14 *
15 * 1) proc->outer_lock : protects binder_ref
16 * binder_proc_lock() and binder_proc_unlock() are
17 * used to acq/rel.
18 * 2) node->lock : protects most fields of binder_node.
19 * binder_node_lock() and binder_node_unlock() are
20 * used to acq/rel
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
23 * and all todo lists associated with the binder_proc
24 * (proc->todo, thread->todo, proc->delivered_death and
25 * node->async_todo), as well as thread->transaction_stack
26 * binder_inner_proc_lock() and binder_inner_proc_unlock()
27 * are used to acq/rel
28 *
29 * Any lock under procA must never be nested under any lock at the same
30 * level or below on procB.
31 *
32 * Functions that require a lock held on entry indicate which lock
33 * in the suffix of the function name:
34 *
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
40 * ...
41 */
42
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
48 #include <linux/fs.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
69 #include <linux/ktime.h>
70 #ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF
71 #include <linux/trace_clock.h>
72 #include <linux/proc_fs.h>
73 #endif
74
75 #include <uapi/linux/android/binder.h>
76
77 #include <linux/cacheflush.h>
78
79 #include "binder_internal.h"
80 #include "binder_trace.h"
81
82 static HLIST_HEAD(binder_deferred_list);
83 static DEFINE_MUTEX(binder_deferred_lock);
84
85 static HLIST_HEAD(binder_devices);
86 static HLIST_HEAD(binder_procs);
87 static DEFINE_MUTEX(binder_procs_lock);
88
89 static HLIST_HEAD(binder_dead_nodes);
90 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
91
92 static struct dentry *binder_debugfs_dir_entry_root;
93 static struct dentry *binder_debugfs_dir_entry_proc;
94 static atomic_t binder_last_id;
95
96 static int proc_show(struct seq_file *m, void *unused);
97 DEFINE_SHOW_ATTRIBUTE(proc);
98
99 #ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF
100 static int binder_transaction_proc_show(struct seq_file *m, void *unused);
101 DEFINE_PROC_SHOW_ATTRIBUTE(binder_transaction_proc);
102 #endif
103
104 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
105
106 #ifdef CONFIG_ACCESS_TOKENID
107 #define ENABLE_ACCESS_TOKENID 1
108 #else
109 #define ENABLE_ACCESS_TOKENID 0
110 #endif /* CONFIG_ACCESS_TOKENID */
111
112 #ifdef CONFIG_BINDER_SENDER_INFO
113 #define ENABLE_BINDER_SENDER_INFO 1
114 #else
115 #define ENABLE_BINDER_SENDER_INFO 0
116 #endif /* CONFIG_BINDER_SENDER_INFO */
117
118 #define ACCESS_TOKENID_FEATURE_VALUE (ENABLE_ACCESS_TOKENID << 0)
119 #define BINDER_SENDER_INFO_FEATURE_VALUE (ENABLE_BINDER_SENDER_INFO << 2)
120
121 #define BINDER_CURRENT_FEATURE_SET (ACCESS_TOKENID_FEATURE_VALUE | BINDER_SENDER_INFO_FEATURE_VALUE)
122
123 enum {
124 BINDER_DEBUG_USER_ERROR = 1U << 0,
125 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
126 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
127 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
128 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
129 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
130 BINDER_DEBUG_READ_WRITE = 1U << 6,
131 BINDER_DEBUG_USER_REFS = 1U << 7,
132 BINDER_DEBUG_THREADS = 1U << 8,
133 BINDER_DEBUG_TRANSACTION = 1U << 9,
134 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
135 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
136 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
137 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
138 BINDER_DEBUG_SPINLOCKS = 1U << 14,
139 };
140 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
141 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
142 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
143
144 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
145 module_param_named(devices, binder_devices_param, charp, 0444);
146
147 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
148 static int binder_stop_on_user_error;
149
binder_set_stop_on_user_error(const char * val,const struct kernel_param * kp)150 static int binder_set_stop_on_user_error(const char *val,
151 const struct kernel_param *kp)
152 {
153 int ret;
154
155 ret = param_set_int(val, kp);
156 if (binder_stop_on_user_error < 2)
157 wake_up(&binder_user_error_wait);
158 return ret;
159 }
160 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
161 param_get_int, &binder_stop_on_user_error, 0644);
162
binder_debug(int mask,const char * format,...)163 static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
164 {
165 struct va_format vaf;
166 va_list args;
167
168 if (binder_debug_mask & mask) {
169 va_start(args, format);
170 vaf.va = &args;
171 vaf.fmt = format;
172 pr_info_ratelimited("%pV", &vaf);
173 va_end(args);
174 }
175 }
176
177 #define binder_txn_error(x...) \
178 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
179
binder_user_error(const char * format,...)180 static __printf(1, 2) void binder_user_error(const char *format, ...)
181 {
182 struct va_format vaf;
183 va_list args;
184
185 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
186 va_start(args, format);
187 vaf.va = &args;
188 vaf.fmt = format;
189 pr_info_ratelimited("%pV", &vaf);
190 va_end(args);
191 }
192
193 if (binder_stop_on_user_error)
194 binder_stop_on_user_error = 2;
195 }
196
197 #define binder_set_extended_error(ee, _id, _command, _param) \
198 do { \
199 (ee)->id = _id; \
200 (ee)->command = _command; \
201 (ee)->param = _param; \
202 } while (0)
203
204 #define to_flat_binder_object(hdr) \
205 container_of(hdr, struct flat_binder_object, hdr)
206
207 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
208
209 #define to_binder_buffer_object(hdr) \
210 container_of(hdr, struct binder_buffer_object, hdr)
211
212 #define to_binder_fd_array_object(hdr) \
213 container_of(hdr, struct binder_fd_array_object, hdr)
214
215 static struct binder_stats binder_stats;
216
binder_stats_deleted(enum binder_stat_types type)217 static inline void binder_stats_deleted(enum binder_stat_types type)
218 {
219 atomic_inc(&binder_stats.obj_deleted[type]);
220 }
221
binder_stats_created(enum binder_stat_types type)222 static inline void binder_stats_created(enum binder_stat_types type)
223 {
224 atomic_inc(&binder_stats.obj_created[type]);
225 }
226
227 struct binder_transaction_log_entry {
228 int debug_id;
229 int debug_id_done;
230 int call_type;
231 int from_proc;
232 int from_thread;
233 int target_handle;
234 int to_proc;
235 int to_thread;
236 int to_node;
237 int data_size;
238 int offsets_size;
239 int return_error_line;
240 uint32_t return_error;
241 uint32_t return_error_param;
242 char context_name[BINDERFS_MAX_NAME + 1];
243 };
244
245 struct binder_transaction_log {
246 atomic_t cur;
247 bool full;
248 struct binder_transaction_log_entry entry[32];
249 };
250
251 static struct binder_transaction_log binder_transaction_log;
252 static struct binder_transaction_log binder_transaction_log_failed;
253
binder_transaction_log_add(struct binder_transaction_log * log)254 static struct binder_transaction_log_entry *binder_transaction_log_add(
255 struct binder_transaction_log *log)
256 {
257 struct binder_transaction_log_entry *e;
258 unsigned int cur = atomic_inc_return(&log->cur);
259
260 if (cur >= ARRAY_SIZE(log->entry))
261 log->full = true;
262 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
263 WRITE_ONCE(e->debug_id_done, 0);
264 /*
265 * write-barrier to synchronize access to e->debug_id_done.
266 * We make sure the initialized 0 value is seen before
267 * memset() other fields are zeroed by memset.
268 */
269 smp_wmb();
270 memset(e, 0, sizeof(*e));
271 return e;
272 }
273
274 enum binder_deferred_state {
275 BINDER_DEFERRED_FLUSH = 0x01,
276 BINDER_DEFERRED_RELEASE = 0x02,
277 };
278
279 enum {
280 BINDER_LOOPER_STATE_REGISTERED = 0x01,
281 BINDER_LOOPER_STATE_ENTERED = 0x02,
282 BINDER_LOOPER_STATE_EXITED = 0x04,
283 BINDER_LOOPER_STATE_INVALID = 0x08,
284 BINDER_LOOPER_STATE_WAITING = 0x10,
285 BINDER_LOOPER_STATE_POLL = 0x20,
286 };
287
288 /**
289 * binder_proc_lock() - Acquire outer lock for given binder_proc
290 * @proc: struct binder_proc to acquire
291 *
292 * Acquires proc->outer_lock. Used to protect binder_ref
293 * structures associated with the given proc.
294 */
295 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
296 static void
_binder_proc_lock(struct binder_proc * proc,int line)297 _binder_proc_lock(struct binder_proc *proc, int line)
298 __acquires(&proc->outer_lock)
299 {
300 binder_debug(BINDER_DEBUG_SPINLOCKS,
301 "%s: line=%d\n", __func__, line);
302 spin_lock(&proc->outer_lock);
303 }
304
305 /**
306 * binder_proc_unlock() - Release spinlock for given binder_proc
307 * @proc: struct binder_proc to acquire
308 *
309 * Release lock acquired via binder_proc_lock()
310 */
311 #define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__)
312 static void
_binder_proc_unlock(struct binder_proc * proc,int line)313 _binder_proc_unlock(struct binder_proc *proc, int line)
314 __releases(&proc->outer_lock)
315 {
316 binder_debug(BINDER_DEBUG_SPINLOCKS,
317 "%s: line=%d\n", __func__, line);
318 spin_unlock(&proc->outer_lock);
319 }
320
321 /**
322 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
323 * @proc: struct binder_proc to acquire
324 *
325 * Acquires proc->inner_lock. Used to protect todo lists
326 */
327 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
328 static void
_binder_inner_proc_lock(struct binder_proc * proc,int line)329 _binder_inner_proc_lock(struct binder_proc *proc, int line)
330 __acquires(&proc->inner_lock)
331 {
332 binder_debug(BINDER_DEBUG_SPINLOCKS,
333 "%s: line=%d\n", __func__, line);
334 spin_lock(&proc->inner_lock);
335 }
336
337 /**
338 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
339 * @proc: struct binder_proc to acquire
340 *
341 * Release lock acquired via binder_inner_proc_lock()
342 */
343 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
344 static void
_binder_inner_proc_unlock(struct binder_proc * proc,int line)345 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
346 __releases(&proc->inner_lock)
347 {
348 binder_debug(BINDER_DEBUG_SPINLOCKS,
349 "%s: line=%d\n", __func__, line);
350 spin_unlock(&proc->inner_lock);
351 }
352
353 /**
354 * binder_node_lock() - Acquire spinlock for given binder_node
355 * @node: struct binder_node to acquire
356 *
357 * Acquires node->lock. Used to protect binder_node fields
358 */
359 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
360 static void
_binder_node_lock(struct binder_node * node,int line)361 _binder_node_lock(struct binder_node *node, int line)
362 __acquires(&node->lock)
363 {
364 binder_debug(BINDER_DEBUG_SPINLOCKS,
365 "%s: line=%d\n", __func__, line);
366 spin_lock(&node->lock);
367 }
368
369 /**
370 * binder_node_unlock() - Release spinlock for given binder_proc
371 * @node: struct binder_node to acquire
372 *
373 * Release lock acquired via binder_node_lock()
374 */
375 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
376 static void
_binder_node_unlock(struct binder_node * node,int line)377 _binder_node_unlock(struct binder_node *node, int line)
378 __releases(&node->lock)
379 {
380 binder_debug(BINDER_DEBUG_SPINLOCKS,
381 "%s: line=%d\n", __func__, line);
382 spin_unlock(&node->lock);
383 }
384
385 /**
386 * binder_node_inner_lock() - Acquire node and inner locks
387 * @node: struct binder_node to acquire
388 *
389 * Acquires node->lock. If node->proc also acquires
390 * proc->inner_lock. Used to protect binder_node fields
391 */
392 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
393 static void
_binder_node_inner_lock(struct binder_node * node,int line)394 _binder_node_inner_lock(struct binder_node *node, int line)
395 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
396 {
397 binder_debug(BINDER_DEBUG_SPINLOCKS,
398 "%s: line=%d\n", __func__, line);
399 spin_lock(&node->lock);
400 if (node->proc)
401 binder_inner_proc_lock(node->proc);
402 else
403 /* annotation for sparse */
404 __acquire(&node->proc->inner_lock);
405 }
406
407 /**
408 * binder_node_inner_unlock() - Release node and inner locks
409 * @node: struct binder_node to acquire
410 *
411 * Release lock acquired via binder_node_lock()
412 */
413 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
414 static void
_binder_node_inner_unlock(struct binder_node * node,int line)415 _binder_node_inner_unlock(struct binder_node *node, int line)
416 __releases(&node->lock) __releases(&node->proc->inner_lock)
417 {
418 struct binder_proc *proc = node->proc;
419
420 binder_debug(BINDER_DEBUG_SPINLOCKS,
421 "%s: line=%d\n", __func__, line);
422 if (proc)
423 binder_inner_proc_unlock(proc);
424 else
425 /* annotation for sparse */
426 __release(&node->proc->inner_lock);
427 spin_unlock(&node->lock);
428 }
429
binder_worklist_empty_ilocked(struct list_head * list)430 static bool binder_worklist_empty_ilocked(struct list_head *list)
431 {
432 return list_empty(list);
433 }
434
435 /**
436 * binder_worklist_empty() - Check if no items on the work list
437 * @proc: binder_proc associated with list
438 * @list: list to check
439 *
440 * Return: true if there are no items on list, else false
441 */
binder_worklist_empty(struct binder_proc * proc,struct list_head * list)442 static bool binder_worklist_empty(struct binder_proc *proc,
443 struct list_head *list)
444 {
445 bool ret;
446
447 binder_inner_proc_lock(proc);
448 ret = binder_worklist_empty_ilocked(list);
449 binder_inner_proc_unlock(proc);
450 return ret;
451 }
452
453 /**
454 * binder_enqueue_work_ilocked() - Add an item to the work list
455 * @work: struct binder_work to add to list
456 * @target_list: list to add work to
457 *
458 * Adds the work to the specified list. Asserts that work
459 * is not already on a list.
460 *
461 * Requires the proc->inner_lock to be held.
462 */
463 static void
binder_enqueue_work_ilocked(struct binder_work * work,struct list_head * target_list)464 binder_enqueue_work_ilocked(struct binder_work *work,
465 struct list_head *target_list)
466 {
467 BUG_ON(target_list == NULL);
468 BUG_ON(work->entry.next && !list_empty(&work->entry));
469 list_add_tail(&work->entry, target_list);
470 }
471
472 /**
473 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
474 * @thread: thread to queue work to
475 * @work: struct binder_work to add to list
476 *
477 * Adds the work to the todo list of the thread. Doesn't set the process_todo
478 * flag, which means that (if it wasn't already set) the thread will go to
479 * sleep without handling this work when it calls read.
480 *
481 * Requires the proc->inner_lock to be held.
482 */
483 static void
binder_enqueue_deferred_thread_work_ilocked(struct binder_thread * thread,struct binder_work * work)484 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
485 struct binder_work *work)
486 {
487 WARN_ON(!list_empty(&thread->waiting_thread_node));
488 binder_enqueue_work_ilocked(work, &thread->todo);
489 }
490
491 /**
492 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
493 * @thread: thread to queue work to
494 * @work: struct binder_work to add to list
495 *
496 * Adds the work to the todo list of the thread, and enables processing
497 * of the todo queue.
498 *
499 * Requires the proc->inner_lock to be held.
500 */
501 static void
binder_enqueue_thread_work_ilocked(struct binder_thread * thread,struct binder_work * work)502 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
503 struct binder_work *work)
504 {
505 WARN_ON(!list_empty(&thread->waiting_thread_node));
506 binder_enqueue_work_ilocked(work, &thread->todo);
507
508 /* (e)poll-based threads require an explicit wakeup signal when
509 * queuing their own work; they rely on these events to consume
510 * messages without I/O block. Without it, threads risk waiting
511 * indefinitely without handling the work.
512 */
513 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
514 thread->pid == current->pid && !thread->process_todo)
515 wake_up_interruptible_sync(&thread->wait);
516
517 thread->process_todo = true;
518 }
519
520 /**
521 * binder_enqueue_thread_work() - Add an item to the thread work list
522 * @thread: thread to queue work to
523 * @work: struct binder_work to add to list
524 *
525 * Adds the work to the todo list of the thread, and enables processing
526 * of the todo queue.
527 */
528 static void
binder_enqueue_thread_work(struct binder_thread * thread,struct binder_work * work)529 binder_enqueue_thread_work(struct binder_thread *thread,
530 struct binder_work *work)
531 {
532 binder_inner_proc_lock(thread->proc);
533 binder_enqueue_thread_work_ilocked(thread, work);
534 binder_inner_proc_unlock(thread->proc);
535 }
536
537 static void
binder_dequeue_work_ilocked(struct binder_work * work)538 binder_dequeue_work_ilocked(struct binder_work *work)
539 {
540 list_del_init(&work->entry);
541 }
542
543 /**
544 * binder_dequeue_work() - Removes an item from the work list
545 * @proc: binder_proc associated with list
546 * @work: struct binder_work to remove from list
547 *
548 * Removes the specified work item from whatever list it is on.
549 * Can safely be called if work is not on any list.
550 */
551 static void
binder_dequeue_work(struct binder_proc * proc,struct binder_work * work)552 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
553 {
554 binder_inner_proc_lock(proc);
555 binder_dequeue_work_ilocked(work);
556 binder_inner_proc_unlock(proc);
557 }
558
binder_dequeue_work_head_ilocked(struct list_head * list)559 static struct binder_work *binder_dequeue_work_head_ilocked(
560 struct list_head *list)
561 {
562 struct binder_work *w;
563
564 w = list_first_entry_or_null(list, struct binder_work, entry);
565 if (w)
566 list_del_init(&w->entry);
567 return w;
568 }
569
570 static void
571 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
572 static void binder_free_thread(struct binder_thread *thread);
573 static void binder_free_proc(struct binder_proc *proc);
574 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
575
576 #ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF
binder_clock(void)577 static inline u64 binder_clock(void)
578 {
579 #ifdef CONFIG_TRACE_CLOCK
580 return trace_clock_local();
581 #endif
582 return 0;
583 }
584 #endif
585
binder_has_work_ilocked(struct binder_thread * thread,bool do_proc_work)586 static bool binder_has_work_ilocked(struct binder_thread *thread,
587 bool do_proc_work)
588 {
589 return thread->process_todo ||
590 thread->looper_need_return ||
591 (do_proc_work &&
592 !binder_worklist_empty_ilocked(&thread->proc->todo));
593 }
594
binder_has_work(struct binder_thread * thread,bool do_proc_work)595 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
596 {
597 bool has_work;
598
599 binder_inner_proc_lock(thread->proc);
600 has_work = binder_has_work_ilocked(thread, do_proc_work);
601 binder_inner_proc_unlock(thread->proc);
602
603 return has_work;
604 }
605
binder_available_for_proc_work_ilocked(struct binder_thread * thread)606 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
607 {
608 return !thread->transaction_stack &&
609 binder_worklist_empty_ilocked(&thread->todo);
610 }
611
binder_wakeup_poll_threads_ilocked(struct binder_proc * proc,bool sync)612 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
613 bool sync)
614 {
615 struct rb_node *n;
616 struct binder_thread *thread;
617
618 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
619 thread = rb_entry(n, struct binder_thread, rb_node);
620 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
621 binder_available_for_proc_work_ilocked(thread)) {
622 if (sync)
623 wake_up_interruptible_sync(&thread->wait);
624 else
625 wake_up_interruptible(&thread->wait);
626 }
627 }
628 }
629
630 /**
631 * binder_select_thread_ilocked() - selects a thread for doing proc work.
632 * @proc: process to select a thread from
633 *
634 * Note that calling this function moves the thread off the waiting_threads
635 * list, so it can only be woken up by the caller of this function, or a
636 * signal. Therefore, callers *should* always wake up the thread this function
637 * returns.
638 *
639 * Return: If there's a thread currently waiting for process work,
640 * returns that thread. Otherwise returns NULL.
641 */
642 static struct binder_thread *
binder_select_thread_ilocked(struct binder_proc * proc)643 binder_select_thread_ilocked(struct binder_proc *proc)
644 {
645 struct binder_thread *thread;
646
647 assert_spin_locked(&proc->inner_lock);
648 thread = list_first_entry_or_null(&proc->waiting_threads,
649 struct binder_thread,
650 waiting_thread_node);
651
652 if (thread)
653 list_del_init(&thread->waiting_thread_node);
654
655 return thread;
656 }
657
658 /**
659 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
660 * @proc: process to wake up a thread in
661 * @thread: specific thread to wake-up (may be NULL)
662 * @sync: whether to do a synchronous wake-up
663 *
664 * This function wakes up a thread in the @proc process.
665 * The caller may provide a specific thread to wake-up in
666 * the @thread parameter. If @thread is NULL, this function
667 * will wake up threads that have called poll().
668 *
669 * Note that for this function to work as expected, callers
670 * should first call binder_select_thread() to find a thread
671 * to handle the work (if they don't have a thread already),
672 * and pass the result into the @thread parameter.
673 */
binder_wakeup_thread_ilocked(struct binder_proc * proc,struct binder_thread * thread,bool sync)674 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
675 struct binder_thread *thread,
676 bool sync)
677 {
678 assert_spin_locked(&proc->inner_lock);
679
680 if (thread) {
681 if (sync)
682 wake_up_interruptible_sync(&thread->wait);
683 else
684 wake_up_interruptible(&thread->wait);
685 return;
686 }
687
688 /* Didn't find a thread waiting for proc work; this can happen
689 * in two scenarios:
690 * 1. All threads are busy handling transactions
691 * In that case, one of those threads should call back into
692 * the kernel driver soon and pick up this work.
693 * 2. Threads are using the (e)poll interface, in which case
694 * they may be blocked on the waitqueue without having been
695 * added to waiting_threads. For this case, we just iterate
696 * over all threads not handling transaction work, and
697 * wake them all up. We wake all because we don't know whether
698 * a thread that called into (e)poll is handling non-binder
699 * work currently.
700 */
701 binder_wakeup_poll_threads_ilocked(proc, sync);
702 }
703
binder_wakeup_proc_ilocked(struct binder_proc * proc)704 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
705 {
706 struct binder_thread *thread = binder_select_thread_ilocked(proc);
707
708 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
709 }
710
binder_set_nice(long nice)711 static void binder_set_nice(long nice)
712 {
713 long min_nice;
714
715 if (can_nice(current, nice)) {
716 set_user_nice(current, nice);
717 return;
718 }
719 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
720 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
721 "%d: nice value %ld not allowed use %ld instead\n",
722 current->pid, nice, min_nice);
723 set_user_nice(current, min_nice);
724 if (min_nice <= MAX_NICE)
725 return;
726 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
727 }
728
binder_get_node_ilocked(struct binder_proc * proc,binder_uintptr_t ptr)729 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
730 binder_uintptr_t ptr)
731 {
732 struct rb_node *n = proc->nodes.rb_node;
733 struct binder_node *node;
734
735 assert_spin_locked(&proc->inner_lock);
736
737 while (n) {
738 node = rb_entry(n, struct binder_node, rb_node);
739
740 if (ptr < node->ptr)
741 n = n->rb_left;
742 else if (ptr > node->ptr)
743 n = n->rb_right;
744 else {
745 /*
746 * take an implicit weak reference
747 * to ensure node stays alive until
748 * call to binder_put_node()
749 */
750 binder_inc_node_tmpref_ilocked(node);
751 return node;
752 }
753 }
754 return NULL;
755 }
756
binder_get_node(struct binder_proc * proc,binder_uintptr_t ptr)757 static struct binder_node *binder_get_node(struct binder_proc *proc,
758 binder_uintptr_t ptr)
759 {
760 struct binder_node *node;
761
762 binder_inner_proc_lock(proc);
763 node = binder_get_node_ilocked(proc, ptr);
764 binder_inner_proc_unlock(proc);
765 return node;
766 }
767
binder_init_node_ilocked(struct binder_proc * proc,struct binder_node * new_node,struct flat_binder_object * fp)768 static struct binder_node *binder_init_node_ilocked(
769 struct binder_proc *proc,
770 struct binder_node *new_node,
771 struct flat_binder_object *fp)
772 {
773 struct rb_node **p = &proc->nodes.rb_node;
774 struct rb_node *parent = NULL;
775 struct binder_node *node;
776 binder_uintptr_t ptr = fp ? fp->binder : 0;
777 binder_uintptr_t cookie = fp ? fp->cookie : 0;
778 __u32 flags = fp ? fp->flags : 0;
779
780 assert_spin_locked(&proc->inner_lock);
781
782 while (*p) {
783
784 parent = *p;
785 node = rb_entry(parent, struct binder_node, rb_node);
786
787 if (ptr < node->ptr)
788 p = &(*p)->rb_left;
789 else if (ptr > node->ptr)
790 p = &(*p)->rb_right;
791 else {
792 /*
793 * A matching node is already in
794 * the rb tree. Abandon the init
795 * and return it.
796 */
797 binder_inc_node_tmpref_ilocked(node);
798 return node;
799 }
800 }
801 node = new_node;
802 binder_stats_created(BINDER_STAT_NODE);
803 node->tmp_refs++;
804 rb_link_node(&node->rb_node, parent, p);
805 rb_insert_color(&node->rb_node, &proc->nodes);
806 node->debug_id = atomic_inc_return(&binder_last_id);
807 node->proc = proc;
808 node->ptr = ptr;
809 node->cookie = cookie;
810 node->work.type = BINDER_WORK_NODE;
811 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
812 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
813 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
814 spin_lock_init(&node->lock);
815 INIT_LIST_HEAD(&node->work.entry);
816 INIT_LIST_HEAD(&node->async_todo);
817 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
818 "%d:%d node %d u%016llx c%016llx created\n",
819 proc->pid, current->pid, node->debug_id,
820 (u64)node->ptr, (u64)node->cookie);
821
822 return node;
823 }
824
binder_new_node(struct binder_proc * proc,struct flat_binder_object * fp)825 static struct binder_node *binder_new_node(struct binder_proc *proc,
826 struct flat_binder_object *fp)
827 {
828 struct binder_node *node;
829 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
830
831 if (!new_node)
832 return NULL;
833 binder_inner_proc_lock(proc);
834 node = binder_init_node_ilocked(proc, new_node, fp);
835 binder_inner_proc_unlock(proc);
836 if (node != new_node)
837 /*
838 * The node was already added by another thread
839 */
840 kfree(new_node);
841
842 return node;
843 }
844
binder_free_node(struct binder_node * node)845 static void binder_free_node(struct binder_node *node)
846 {
847 kfree(node);
848 binder_stats_deleted(BINDER_STAT_NODE);
849 }
850
binder_inc_node_nilocked(struct binder_node * node,int strong,int internal,struct list_head * target_list)851 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
852 int internal,
853 struct list_head *target_list)
854 {
855 struct binder_proc *proc = node->proc;
856
857 assert_spin_locked(&node->lock);
858 if (proc)
859 assert_spin_locked(&proc->inner_lock);
860 if (strong) {
861 if (internal) {
862 if (target_list == NULL &&
863 node->internal_strong_refs == 0 &&
864 !(node->proc &&
865 node == node->proc->context->binder_context_mgr_node &&
866 node->has_strong_ref)) {
867 pr_err("invalid inc strong node for %d\n",
868 node->debug_id);
869 return -EINVAL;
870 }
871 node->internal_strong_refs++;
872 } else
873 node->local_strong_refs++;
874 if (!node->has_strong_ref && target_list) {
875 struct binder_thread *thread = container_of(target_list,
876 struct binder_thread, todo);
877 binder_dequeue_work_ilocked(&node->work);
878 BUG_ON(&thread->todo != target_list);
879 binder_enqueue_deferred_thread_work_ilocked(thread,
880 &node->work);
881 }
882 } else {
883 if (!internal)
884 node->local_weak_refs++;
885 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
886 if (target_list == NULL) {
887 pr_err("invalid inc weak node for %d\n",
888 node->debug_id);
889 return -EINVAL;
890 }
891 /*
892 * See comment above
893 */
894 binder_enqueue_work_ilocked(&node->work, target_list);
895 }
896 }
897 return 0;
898 }
899
binder_inc_node(struct binder_node * node,int strong,int internal,struct list_head * target_list)900 static int binder_inc_node(struct binder_node *node, int strong, int internal,
901 struct list_head *target_list)
902 {
903 int ret;
904
905 binder_node_inner_lock(node);
906 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
907 binder_node_inner_unlock(node);
908
909 return ret;
910 }
911
binder_dec_node_nilocked(struct binder_node * node,int strong,int internal)912 static bool binder_dec_node_nilocked(struct binder_node *node,
913 int strong, int internal)
914 {
915 struct binder_proc *proc = node->proc;
916
917 assert_spin_locked(&node->lock);
918 if (proc)
919 assert_spin_locked(&proc->inner_lock);
920 if (strong) {
921 if (internal)
922 node->internal_strong_refs--;
923 else
924 node->local_strong_refs--;
925 if (node->local_strong_refs || node->internal_strong_refs)
926 return false;
927 } else {
928 if (!internal)
929 node->local_weak_refs--;
930 if (node->local_weak_refs || node->tmp_refs ||
931 !hlist_empty(&node->refs))
932 return false;
933 }
934
935 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
936 if (list_empty(&node->work.entry)) {
937 binder_enqueue_work_ilocked(&node->work, &proc->todo);
938 binder_wakeup_proc_ilocked(proc);
939 }
940 } else {
941 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
942 !node->local_weak_refs && !node->tmp_refs) {
943 if (proc) {
944 binder_dequeue_work_ilocked(&node->work);
945 rb_erase(&node->rb_node, &proc->nodes);
946 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
947 "refless node %d deleted\n",
948 node->debug_id);
949 } else {
950 BUG_ON(!list_empty(&node->work.entry));
951 spin_lock(&binder_dead_nodes_lock);
952 /*
953 * tmp_refs could have changed so
954 * check it again
955 */
956 if (node->tmp_refs) {
957 spin_unlock(&binder_dead_nodes_lock);
958 return false;
959 }
960 hlist_del(&node->dead_node);
961 spin_unlock(&binder_dead_nodes_lock);
962 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
963 "dead node %d deleted\n",
964 node->debug_id);
965 }
966 return true;
967 }
968 }
969 return false;
970 }
971
binder_dec_node(struct binder_node * node,int strong,int internal)972 static void binder_dec_node(struct binder_node *node, int strong, int internal)
973 {
974 bool free_node;
975
976 binder_node_inner_lock(node);
977 free_node = binder_dec_node_nilocked(node, strong, internal);
978 binder_node_inner_unlock(node);
979 if (free_node)
980 binder_free_node(node);
981 }
982
binder_inc_node_tmpref_ilocked(struct binder_node * node)983 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
984 {
985 /*
986 * No call to binder_inc_node() is needed since we
987 * don't need to inform userspace of any changes to
988 * tmp_refs
989 */
990 node->tmp_refs++;
991 }
992
993 /**
994 * binder_inc_node_tmpref() - take a temporary reference on node
995 * @node: node to reference
996 *
997 * Take reference on node to prevent the node from being freed
998 * while referenced only by a local variable. The inner lock is
999 * needed to serialize with the node work on the queue (which
1000 * isn't needed after the node is dead). If the node is dead
1001 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1002 * node->tmp_refs against dead-node-only cases where the node
1003 * lock cannot be acquired (eg traversing the dead node list to
1004 * print nodes)
1005 */
binder_inc_node_tmpref(struct binder_node * node)1006 static void binder_inc_node_tmpref(struct binder_node *node)
1007 {
1008 binder_node_lock(node);
1009 if (node->proc)
1010 binder_inner_proc_lock(node->proc);
1011 else
1012 spin_lock(&binder_dead_nodes_lock);
1013 binder_inc_node_tmpref_ilocked(node);
1014 if (node->proc)
1015 binder_inner_proc_unlock(node->proc);
1016 else
1017 spin_unlock(&binder_dead_nodes_lock);
1018 binder_node_unlock(node);
1019 }
1020
1021 /**
1022 * binder_dec_node_tmpref() - remove a temporary reference on node
1023 * @node: node to reference
1024 *
1025 * Release temporary reference on node taken via binder_inc_node_tmpref()
1026 */
binder_dec_node_tmpref(struct binder_node * node)1027 static void binder_dec_node_tmpref(struct binder_node *node)
1028 {
1029 bool free_node;
1030
1031 binder_node_inner_lock(node);
1032 if (!node->proc)
1033 spin_lock(&binder_dead_nodes_lock);
1034 else
1035 __acquire(&binder_dead_nodes_lock);
1036 node->tmp_refs--;
1037 BUG_ON(node->tmp_refs < 0);
1038 if (!node->proc)
1039 spin_unlock(&binder_dead_nodes_lock);
1040 else
1041 __release(&binder_dead_nodes_lock);
1042 /*
1043 * Call binder_dec_node() to check if all refcounts are 0
1044 * and cleanup is needed. Calling with strong=0 and internal=1
1045 * causes no actual reference to be released in binder_dec_node().
1046 * If that changes, a change is needed here too.
1047 */
1048 free_node = binder_dec_node_nilocked(node, 0, 1);
1049 binder_node_inner_unlock(node);
1050 if (free_node)
1051 binder_free_node(node);
1052 }
1053
binder_put_node(struct binder_node * node)1054 static void binder_put_node(struct binder_node *node)
1055 {
1056 binder_dec_node_tmpref(node);
1057 }
1058
binder_get_ref_olocked(struct binder_proc * proc,u32 desc,bool need_strong_ref)1059 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1060 u32 desc, bool need_strong_ref)
1061 {
1062 struct rb_node *n = proc->refs_by_desc.rb_node;
1063 struct binder_ref *ref;
1064
1065 while (n) {
1066 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1067
1068 if (desc < ref->data.desc) {
1069 n = n->rb_left;
1070 } else if (desc > ref->data.desc) {
1071 n = n->rb_right;
1072 } else if (need_strong_ref && !ref->data.strong) {
1073 binder_user_error("tried to use weak ref as strong ref\n");
1074 return NULL;
1075 } else {
1076 return ref;
1077 }
1078 }
1079 return NULL;
1080 }
1081
1082 /**
1083 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1084 * @proc: binder_proc that owns the ref
1085 * @node: binder_node of target
1086 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1087 *
1088 * Look up the ref for the given node and return it if it exists
1089 *
1090 * If it doesn't exist and the caller provides a newly allocated
1091 * ref, initialize the fields of the newly allocated ref and insert
1092 * into the given proc rb_trees and node refs list.
1093 *
1094 * Return: the ref for node. It is possible that another thread
1095 * allocated/initialized the ref first in which case the
1096 * returned ref would be different than the passed-in
1097 * new_ref. new_ref must be kfree'd by the caller in
1098 * this case.
1099 */
binder_get_ref_for_node_olocked(struct binder_proc * proc,struct binder_node * node,struct binder_ref * new_ref)1100 static struct binder_ref *binder_get_ref_for_node_olocked(
1101 struct binder_proc *proc,
1102 struct binder_node *node,
1103 struct binder_ref *new_ref)
1104 {
1105 struct binder_context *context = proc->context;
1106 struct rb_node **p = &proc->refs_by_node.rb_node;
1107 struct rb_node *parent = NULL;
1108 struct binder_ref *ref;
1109 struct rb_node *n;
1110
1111 while (*p) {
1112 parent = *p;
1113 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1114
1115 if (node < ref->node)
1116 p = &(*p)->rb_left;
1117 else if (node > ref->node)
1118 p = &(*p)->rb_right;
1119 else
1120 return ref;
1121 }
1122 if (!new_ref)
1123 return NULL;
1124
1125 binder_stats_created(BINDER_STAT_REF);
1126 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1127 new_ref->proc = proc;
1128 new_ref->node = node;
1129 rb_link_node(&new_ref->rb_node_node, parent, p);
1130 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1131
1132 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1133 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1134 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1135 if (ref->data.desc > new_ref->data.desc)
1136 break;
1137 new_ref->data.desc = ref->data.desc + 1;
1138 }
1139
1140 p = &proc->refs_by_desc.rb_node;
1141 while (*p) {
1142 parent = *p;
1143 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1144
1145 if (new_ref->data.desc < ref->data.desc)
1146 p = &(*p)->rb_left;
1147 else if (new_ref->data.desc > ref->data.desc)
1148 p = &(*p)->rb_right;
1149 else
1150 BUG();
1151 }
1152 rb_link_node(&new_ref->rb_node_desc, parent, p);
1153 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1154
1155 binder_node_lock(node);
1156 hlist_add_head(&new_ref->node_entry, &node->refs);
1157
1158 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1159 "%d new ref %d desc %d for node %d\n",
1160 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1161 node->debug_id);
1162 binder_node_unlock(node);
1163 return new_ref;
1164 }
1165
binder_cleanup_ref_olocked(struct binder_ref * ref)1166 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1167 {
1168 bool delete_node = false;
1169
1170 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1171 "%d delete ref %d desc %d for node %d\n",
1172 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1173 ref->node->debug_id);
1174
1175 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1176 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1177
1178 binder_node_inner_lock(ref->node);
1179 if (ref->data.strong)
1180 binder_dec_node_nilocked(ref->node, 1, 1);
1181
1182 hlist_del(&ref->node_entry);
1183 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1184 binder_node_inner_unlock(ref->node);
1185 /*
1186 * Clear ref->node unless we want the caller to free the node
1187 */
1188 if (!delete_node) {
1189 /*
1190 * The caller uses ref->node to determine
1191 * whether the node needs to be freed. Clear
1192 * it since the node is still alive.
1193 */
1194 ref->node = NULL;
1195 }
1196
1197 if (ref->death) {
1198 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1199 "%d delete ref %d desc %d has death notification\n",
1200 ref->proc->pid, ref->data.debug_id,
1201 ref->data.desc);
1202 binder_dequeue_work(ref->proc, &ref->death->work);
1203 binder_stats_deleted(BINDER_STAT_DEATH);
1204 }
1205 binder_stats_deleted(BINDER_STAT_REF);
1206 }
1207
1208 /**
1209 * binder_inc_ref_olocked() - increment the ref for given handle
1210 * @ref: ref to be incremented
1211 * @strong: if true, strong increment, else weak
1212 * @target_list: list to queue node work on
1213 *
1214 * Increment the ref. @ref->proc->outer_lock must be held on entry
1215 *
1216 * Return: 0, if successful, else errno
1217 */
binder_inc_ref_olocked(struct binder_ref * ref,int strong,struct list_head * target_list)1218 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1219 struct list_head *target_list)
1220 {
1221 int ret;
1222
1223 if (strong) {
1224 if (ref->data.strong == 0) {
1225 ret = binder_inc_node(ref->node, 1, 1, target_list);
1226 if (ret)
1227 return ret;
1228 }
1229 ref->data.strong++;
1230 } else {
1231 if (ref->data.weak == 0) {
1232 ret = binder_inc_node(ref->node, 0, 1, target_list);
1233 if (ret)
1234 return ret;
1235 }
1236 ref->data.weak++;
1237 }
1238 return 0;
1239 }
1240
1241 /**
1242 * binder_dec_ref_olocked() - dec the ref for given handle
1243 * @ref: ref to be decremented
1244 * @strong: if true, strong decrement, else weak
1245 *
1246 * Decrement the ref.
1247 *
1248 * Return: %true if ref is cleaned up and ready to be freed.
1249 */
binder_dec_ref_olocked(struct binder_ref * ref,int strong)1250 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1251 {
1252 if (strong) {
1253 if (ref->data.strong == 0) {
1254 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1255 ref->proc->pid, ref->data.debug_id,
1256 ref->data.desc, ref->data.strong,
1257 ref->data.weak);
1258 return false;
1259 }
1260 ref->data.strong--;
1261 if (ref->data.strong == 0)
1262 binder_dec_node(ref->node, strong, 1);
1263 } else {
1264 if (ref->data.weak == 0) {
1265 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1266 ref->proc->pid, ref->data.debug_id,
1267 ref->data.desc, ref->data.strong,
1268 ref->data.weak);
1269 return false;
1270 }
1271 ref->data.weak--;
1272 }
1273 if (ref->data.strong == 0 && ref->data.weak == 0) {
1274 binder_cleanup_ref_olocked(ref);
1275 return true;
1276 }
1277 return false;
1278 }
1279
1280 /**
1281 * binder_get_node_from_ref() - get the node from the given proc/desc
1282 * @proc: proc containing the ref
1283 * @desc: the handle associated with the ref
1284 * @need_strong_ref: if true, only return node if ref is strong
1285 * @rdata: the id/refcount data for the ref
1286 *
1287 * Given a proc and ref handle, return the associated binder_node
1288 *
1289 * Return: a binder_node or NULL if not found or not strong when strong required
1290 */
binder_get_node_from_ref(struct binder_proc * proc,u32 desc,bool need_strong_ref,struct binder_ref_data * rdata)1291 static struct binder_node *binder_get_node_from_ref(
1292 struct binder_proc *proc,
1293 u32 desc, bool need_strong_ref,
1294 struct binder_ref_data *rdata)
1295 {
1296 struct binder_node *node;
1297 struct binder_ref *ref;
1298
1299 binder_proc_lock(proc);
1300 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1301 if (!ref)
1302 goto err_no_ref;
1303 node = ref->node;
1304 /*
1305 * Take an implicit reference on the node to ensure
1306 * it stays alive until the call to binder_put_node()
1307 */
1308 binder_inc_node_tmpref(node);
1309 if (rdata)
1310 *rdata = ref->data;
1311 binder_proc_unlock(proc);
1312
1313 return node;
1314
1315 err_no_ref:
1316 binder_proc_unlock(proc);
1317 return NULL;
1318 }
1319
1320 /**
1321 * binder_free_ref() - free the binder_ref
1322 * @ref: ref to free
1323 *
1324 * Free the binder_ref. Free the binder_node indicated by ref->node
1325 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1326 */
binder_free_ref(struct binder_ref * ref)1327 static void binder_free_ref(struct binder_ref *ref)
1328 {
1329 if (ref->node)
1330 binder_free_node(ref->node);
1331 kfree(ref->death);
1332 kfree(ref);
1333 }
1334
1335 /**
1336 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1337 * @proc: proc containing the ref
1338 * @desc: the handle associated with the ref
1339 * @increment: true=inc reference, false=dec reference
1340 * @strong: true=strong reference, false=weak reference
1341 * @rdata: the id/refcount data for the ref
1342 *
1343 * Given a proc and ref handle, increment or decrement the ref
1344 * according to "increment" arg.
1345 *
1346 * Return: 0 if successful, else errno
1347 */
binder_update_ref_for_handle(struct binder_proc * proc,uint32_t desc,bool increment,bool strong,struct binder_ref_data * rdata)1348 static int binder_update_ref_for_handle(struct binder_proc *proc,
1349 uint32_t desc, bool increment, bool strong,
1350 struct binder_ref_data *rdata)
1351 {
1352 int ret = 0;
1353 struct binder_ref *ref;
1354 bool delete_ref = false;
1355
1356 binder_proc_lock(proc);
1357 ref = binder_get_ref_olocked(proc, desc, strong);
1358 if (!ref) {
1359 ret = -EINVAL;
1360 goto err_no_ref;
1361 }
1362 if (increment)
1363 ret = binder_inc_ref_olocked(ref, strong, NULL);
1364 else
1365 delete_ref = binder_dec_ref_olocked(ref, strong);
1366
1367 if (rdata)
1368 *rdata = ref->data;
1369 binder_proc_unlock(proc);
1370
1371 if (delete_ref)
1372 binder_free_ref(ref);
1373 return ret;
1374
1375 err_no_ref:
1376 binder_proc_unlock(proc);
1377 return ret;
1378 }
1379
1380 /**
1381 * binder_dec_ref_for_handle() - dec the ref for given handle
1382 * @proc: proc containing the ref
1383 * @desc: the handle associated with the ref
1384 * @strong: true=strong reference, false=weak reference
1385 * @rdata: the id/refcount data for the ref
1386 *
1387 * Just calls binder_update_ref_for_handle() to decrement the ref.
1388 *
1389 * Return: 0 if successful, else errno
1390 */
binder_dec_ref_for_handle(struct binder_proc * proc,uint32_t desc,bool strong,struct binder_ref_data * rdata)1391 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1392 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1393 {
1394 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1395 }
1396
1397
1398 /**
1399 * binder_inc_ref_for_node() - increment the ref for given proc/node
1400 * @proc: proc containing the ref
1401 * @node: target node
1402 * @strong: true=strong reference, false=weak reference
1403 * @target_list: worklist to use if node is incremented
1404 * @rdata: the id/refcount data for the ref
1405 *
1406 * Given a proc and node, increment the ref. Create the ref if it
1407 * doesn't already exist
1408 *
1409 * Return: 0 if successful, else errno
1410 */
binder_inc_ref_for_node(struct binder_proc * proc,struct binder_node * node,bool strong,struct list_head * target_list,struct binder_ref_data * rdata)1411 static int binder_inc_ref_for_node(struct binder_proc *proc,
1412 struct binder_node *node,
1413 bool strong,
1414 struct list_head *target_list,
1415 struct binder_ref_data *rdata)
1416 {
1417 struct binder_ref *ref;
1418 struct binder_ref *new_ref = NULL;
1419 int ret = 0;
1420
1421 binder_proc_lock(proc);
1422 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1423 if (!ref) {
1424 binder_proc_unlock(proc);
1425 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1426 if (!new_ref)
1427 return -ENOMEM;
1428 binder_proc_lock(proc);
1429 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1430 }
1431 ret = binder_inc_ref_olocked(ref, strong, target_list);
1432 *rdata = ref->data;
1433 if (ret && ref == new_ref) {
1434 /*
1435 * Cleanup the failed reference here as the target
1436 * could now be dead and have already released its
1437 * references by now. Calling on the new reference
1438 * with strong=0 and a tmp_refs will not decrement
1439 * the node. The new_ref gets kfree'd below.
1440 */
1441 binder_cleanup_ref_olocked(new_ref);
1442 ref = NULL;
1443 }
1444
1445 binder_proc_unlock(proc);
1446 if (new_ref && ref != new_ref)
1447 /*
1448 * Another thread created the ref first so
1449 * free the one we allocated
1450 */
1451 kfree(new_ref);
1452 return ret;
1453 }
1454
binder_pop_transaction_ilocked(struct binder_thread * target_thread,struct binder_transaction * t)1455 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1456 struct binder_transaction *t)
1457 {
1458 BUG_ON(!target_thread);
1459 assert_spin_locked(&target_thread->proc->inner_lock);
1460 BUG_ON(target_thread->transaction_stack != t);
1461 BUG_ON(target_thread->transaction_stack->from != target_thread);
1462 target_thread->transaction_stack =
1463 target_thread->transaction_stack->from_parent;
1464 t->from = NULL;
1465 }
1466
1467 /**
1468 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1469 * @thread: thread to decrement
1470 *
1471 * A thread needs to be kept alive while being used to create or
1472 * handle a transaction. binder_get_txn_from() is used to safely
1473 * extract t->from from a binder_transaction and keep the thread
1474 * indicated by t->from from being freed. When done with that
1475 * binder_thread, this function is called to decrement the
1476 * tmp_ref and free if appropriate (thread has been released
1477 * and no transaction being processed by the driver)
1478 */
binder_thread_dec_tmpref(struct binder_thread * thread)1479 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1480 {
1481 /*
1482 * atomic is used to protect the counter value while
1483 * it cannot reach zero or thread->is_dead is false
1484 */
1485 binder_inner_proc_lock(thread->proc);
1486 atomic_dec(&thread->tmp_ref);
1487 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1488 binder_inner_proc_unlock(thread->proc);
1489 binder_free_thread(thread);
1490 return;
1491 }
1492 binder_inner_proc_unlock(thread->proc);
1493 }
1494
1495 /**
1496 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1497 * @proc: proc to decrement
1498 *
1499 * A binder_proc needs to be kept alive while being used to create or
1500 * handle a transaction. proc->tmp_ref is incremented when
1501 * creating a new transaction or the binder_proc is currently in-use
1502 * by threads that are being released. When done with the binder_proc,
1503 * this function is called to decrement the counter and free the
1504 * proc if appropriate (proc has been released, all threads have
1505 * been released and not currenly in-use to process a transaction).
1506 */
binder_proc_dec_tmpref(struct binder_proc * proc)1507 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1508 {
1509 binder_inner_proc_lock(proc);
1510 proc->tmp_ref--;
1511 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1512 !proc->tmp_ref) {
1513 binder_inner_proc_unlock(proc);
1514 binder_free_proc(proc);
1515 return;
1516 }
1517 binder_inner_proc_unlock(proc);
1518 }
1519
1520 /**
1521 * binder_get_txn_from() - safely extract the "from" thread in transaction
1522 * @t: binder transaction for t->from
1523 *
1524 * Atomically return the "from" thread and increment the tmp_ref
1525 * count for the thread to ensure it stays alive until
1526 * binder_thread_dec_tmpref() is called.
1527 *
1528 * Return: the value of t->from
1529 */
binder_get_txn_from(struct binder_transaction * t)1530 static struct binder_thread *binder_get_txn_from(
1531 struct binder_transaction *t)
1532 {
1533 struct binder_thread *from;
1534
1535 spin_lock(&t->lock);
1536 from = t->from;
1537 if (from)
1538 atomic_inc(&from->tmp_ref);
1539 spin_unlock(&t->lock);
1540 return from;
1541 }
1542
1543 /**
1544 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1545 * @t: binder transaction for t->from
1546 *
1547 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1548 * to guarantee that the thread cannot be released while operating on it.
1549 * The caller must call binder_inner_proc_unlock() to release the inner lock
1550 * as well as call binder_dec_thread_txn() to release the reference.
1551 *
1552 * Return: the value of t->from
1553 */
binder_get_txn_from_and_acq_inner(struct binder_transaction * t)1554 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1555 struct binder_transaction *t)
1556 __acquires(&t->from->proc->inner_lock)
1557 {
1558 struct binder_thread *from;
1559
1560 from = binder_get_txn_from(t);
1561 if (!from) {
1562 __acquire(&from->proc->inner_lock);
1563 return NULL;
1564 }
1565 binder_inner_proc_lock(from->proc);
1566 if (t->from) {
1567 BUG_ON(from != t->from);
1568 return from;
1569 }
1570 binder_inner_proc_unlock(from->proc);
1571 __acquire(&from->proc->inner_lock);
1572 binder_thread_dec_tmpref(from);
1573 return NULL;
1574 }
1575
1576 /**
1577 * binder_free_txn_fixups() - free unprocessed fd fixups
1578 * @t: binder transaction for t->from
1579 *
1580 * If the transaction is being torn down prior to being
1581 * processed by the target process, free all of the
1582 * fd fixups and fput the file structs. It is safe to
1583 * call this function after the fixups have been
1584 * processed -- in that case, the list will be empty.
1585 */
binder_free_txn_fixups(struct binder_transaction * t)1586 static void binder_free_txn_fixups(struct binder_transaction *t)
1587 {
1588 struct binder_txn_fd_fixup *fixup, *tmp;
1589
1590 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1591 fput(fixup->file);
1592 if (fixup->target_fd >= 0)
1593 put_unused_fd(fixup->target_fd);
1594 list_del(&fixup->fixup_entry);
1595 kfree(fixup);
1596 }
1597 }
1598
binder_txn_latency_free(struct binder_transaction * t)1599 static void binder_txn_latency_free(struct binder_transaction *t)
1600 {
1601 int from_proc, from_thread, to_proc, to_thread;
1602
1603 spin_lock(&t->lock);
1604 from_proc = t->from ? t->from->proc->pid : 0;
1605 from_thread = t->from ? t->from->pid : 0;
1606 to_proc = t->to_proc ? t->to_proc->pid : 0;
1607 to_thread = t->to_thread ? t->to_thread->pid : 0;
1608 spin_unlock(&t->lock);
1609
1610 trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1611 }
1612
binder_free_transaction(struct binder_transaction * t)1613 static void binder_free_transaction(struct binder_transaction *t)
1614 {
1615 struct binder_proc *target_proc = t->to_proc;
1616
1617 if (target_proc) {
1618 binder_inner_proc_lock(target_proc);
1619 target_proc->outstanding_txns--;
1620 if (target_proc->outstanding_txns < 0)
1621 pr_warn("%s: Unexpected outstanding_txns %d\n",
1622 __func__, target_proc->outstanding_txns);
1623 if (!target_proc->outstanding_txns && target_proc->is_frozen)
1624 wake_up_interruptible_all(&target_proc->freeze_wait);
1625 if (t->buffer)
1626 t->buffer->transaction = NULL;
1627 binder_inner_proc_unlock(target_proc);
1628 }
1629 if (trace_binder_txn_latency_free_enabled())
1630 binder_txn_latency_free(t);
1631 /*
1632 * If the transaction has no target_proc, then
1633 * t->buffer->transaction has already been cleared.
1634 */
1635 binder_free_txn_fixups(t);
1636 kfree(t);
1637 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1638 }
1639
binder_send_failed_reply(struct binder_transaction * t,uint32_t error_code)1640 static void binder_send_failed_reply(struct binder_transaction *t,
1641 uint32_t error_code)
1642 {
1643 struct binder_thread *target_thread;
1644 struct binder_transaction *next;
1645
1646 BUG_ON(t->flags & TF_ONE_WAY);
1647 while (1) {
1648 target_thread = binder_get_txn_from_and_acq_inner(t);
1649 if (target_thread) {
1650 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1651 "send failed reply for transaction %d to %d:%d\n",
1652 t->debug_id,
1653 target_thread->proc->pid,
1654 target_thread->pid);
1655
1656 binder_pop_transaction_ilocked(target_thread, t);
1657 if (target_thread->reply_error.cmd == BR_OK) {
1658 target_thread->reply_error.cmd = error_code;
1659 binder_enqueue_thread_work_ilocked(
1660 target_thread,
1661 &target_thread->reply_error.work);
1662 wake_up_interruptible(&target_thread->wait);
1663 } else {
1664 /*
1665 * Cannot get here for normal operation, but
1666 * we can if multiple synchronous transactions
1667 * are sent without blocking for responses.
1668 * Just ignore the 2nd error in this case.
1669 */
1670 pr_warn("Unexpected reply error: %u\n",
1671 target_thread->reply_error.cmd);
1672 }
1673 binder_inner_proc_unlock(target_thread->proc);
1674 binder_thread_dec_tmpref(target_thread);
1675 binder_free_transaction(t);
1676 return;
1677 }
1678 __release(&target_thread->proc->inner_lock);
1679 next = t->from_parent;
1680
1681 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1682 "send failed reply for transaction %d, target dead\n",
1683 t->debug_id);
1684
1685 binder_free_transaction(t);
1686 if (next == NULL) {
1687 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1688 "reply failed, no target thread at root\n");
1689 return;
1690 }
1691 t = next;
1692 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1693 "reply failed, no target thread -- retry %d\n",
1694 t->debug_id);
1695 }
1696 }
1697
1698 /**
1699 * binder_cleanup_transaction() - cleans up undelivered transaction
1700 * @t: transaction that needs to be cleaned up
1701 * @reason: reason the transaction wasn't delivered
1702 * @error_code: error to return to caller (if synchronous call)
1703 */
binder_cleanup_transaction(struct binder_transaction * t,const char * reason,uint32_t error_code)1704 static void binder_cleanup_transaction(struct binder_transaction *t,
1705 const char *reason,
1706 uint32_t error_code)
1707 {
1708 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1709 binder_send_failed_reply(t, error_code);
1710 } else {
1711 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1712 "undelivered transaction %d, %s\n",
1713 t->debug_id, reason);
1714 binder_free_transaction(t);
1715 }
1716 }
1717
1718 /**
1719 * binder_get_object() - gets object and checks for valid metadata
1720 * @proc: binder_proc owning the buffer
1721 * @u: sender's user pointer to base of buffer
1722 * @buffer: binder_buffer that we're parsing.
1723 * @offset: offset in the @buffer at which to validate an object.
1724 * @object: struct binder_object to read into
1725 *
1726 * Copy the binder object at the given offset into @object. If @u is
1727 * provided then the copy is from the sender's buffer. If not, then
1728 * it is copied from the target's @buffer.
1729 *
1730 * Return: If there's a valid metadata object at @offset, the
1731 * size of that object. Otherwise, it returns zero. The object
1732 * is read into the struct binder_object pointed to by @object.
1733 */
binder_get_object(struct binder_proc * proc,const void __user * u,struct binder_buffer * buffer,unsigned long offset,struct binder_object * object)1734 static size_t binder_get_object(struct binder_proc *proc,
1735 const void __user *u,
1736 struct binder_buffer *buffer,
1737 unsigned long offset,
1738 struct binder_object *object)
1739 {
1740 size_t read_size;
1741 struct binder_object_header *hdr;
1742 size_t object_size = 0;
1743
1744 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1745 if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
1746 !IS_ALIGNED(offset, sizeof(u32)))
1747 return 0;
1748
1749 if (u) {
1750 if (copy_from_user(object, u + offset, read_size))
1751 return 0;
1752 } else {
1753 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1754 offset, read_size))
1755 return 0;
1756 }
1757
1758 /* Ok, now see if we read a complete object. */
1759 hdr = &object->hdr;
1760 switch (hdr->type) {
1761 case BINDER_TYPE_BINDER:
1762 case BINDER_TYPE_WEAK_BINDER:
1763 case BINDER_TYPE_HANDLE:
1764 case BINDER_TYPE_WEAK_HANDLE:
1765 object_size = sizeof(struct flat_binder_object);
1766 break;
1767 case BINDER_TYPE_FD:
1768 object_size = sizeof(struct binder_fd_object);
1769 break;
1770 case BINDER_TYPE_PTR:
1771 object_size = sizeof(struct binder_buffer_object);
1772 break;
1773 case BINDER_TYPE_FDA:
1774 object_size = sizeof(struct binder_fd_array_object);
1775 break;
1776 default:
1777 return 0;
1778 }
1779 if (offset <= buffer->data_size - object_size &&
1780 buffer->data_size >= object_size)
1781 return object_size;
1782 else
1783 return 0;
1784 }
1785
1786 /**
1787 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1788 * @proc: binder_proc owning the buffer
1789 * @b: binder_buffer containing the object
1790 * @object: struct binder_object to read into
1791 * @index: index in offset array at which the binder_buffer_object is
1792 * located
1793 * @start_offset: points to the start of the offset array
1794 * @object_offsetp: offset of @object read from @b
1795 * @num_valid: the number of valid offsets in the offset array
1796 *
1797 * Return: If @index is within the valid range of the offset array
1798 * described by @start and @num_valid, and if there's a valid
1799 * binder_buffer_object at the offset found in index @index
1800 * of the offset array, that object is returned. Otherwise,
1801 * %NULL is returned.
1802 * Note that the offset found in index @index itself is not
1803 * verified; this function assumes that @num_valid elements
1804 * from @start were previously verified to have valid offsets.
1805 * If @object_offsetp is non-NULL, then the offset within
1806 * @b is written to it.
1807 */
binder_validate_ptr(struct binder_proc * proc,struct binder_buffer * b,struct binder_object * object,binder_size_t index,binder_size_t start_offset,binder_size_t * object_offsetp,binder_size_t num_valid)1808 static struct binder_buffer_object *binder_validate_ptr(
1809 struct binder_proc *proc,
1810 struct binder_buffer *b,
1811 struct binder_object *object,
1812 binder_size_t index,
1813 binder_size_t start_offset,
1814 binder_size_t *object_offsetp,
1815 binder_size_t num_valid)
1816 {
1817 size_t object_size;
1818 binder_size_t object_offset;
1819 unsigned long buffer_offset;
1820
1821 if (index >= num_valid)
1822 return NULL;
1823
1824 buffer_offset = start_offset + sizeof(binder_size_t) * index;
1825 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1826 b, buffer_offset,
1827 sizeof(object_offset)))
1828 return NULL;
1829 object_size = binder_get_object(proc, NULL, b, object_offset, object);
1830 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1831 return NULL;
1832 if (object_offsetp)
1833 *object_offsetp = object_offset;
1834
1835 return &object->bbo;
1836 }
1837
1838 /**
1839 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1840 * @proc: binder_proc owning the buffer
1841 * @b: transaction buffer
1842 * @objects_start_offset: offset to start of objects buffer
1843 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
1844 * @fixup_offset: start offset in @buffer to fix up
1845 * @last_obj_offset: offset to last binder_buffer_object that we fixed
1846 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
1847 *
1848 * Return: %true if a fixup in buffer @buffer at offset @offset is
1849 * allowed.
1850 *
1851 * For safety reasons, we only allow fixups inside a buffer to happen
1852 * at increasing offsets; additionally, we only allow fixup on the last
1853 * buffer object that was verified, or one of its parents.
1854 *
1855 * Example of what is allowed:
1856 *
1857 * A
1858 * B (parent = A, offset = 0)
1859 * C (parent = A, offset = 16)
1860 * D (parent = C, offset = 0)
1861 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1862 *
1863 * Examples of what is not allowed:
1864 *
1865 * Decreasing offsets within the same parent:
1866 * A
1867 * C (parent = A, offset = 16)
1868 * B (parent = A, offset = 0) // decreasing offset within A
1869 *
1870 * Referring to a parent that wasn't the last object or any of its parents:
1871 * A
1872 * B (parent = A, offset = 0)
1873 * C (parent = A, offset = 0)
1874 * C (parent = A, offset = 16)
1875 * D (parent = B, offset = 0) // B is not A or any of A's parents
1876 */
binder_validate_fixup(struct binder_proc * proc,struct binder_buffer * b,binder_size_t objects_start_offset,binder_size_t buffer_obj_offset,binder_size_t fixup_offset,binder_size_t last_obj_offset,binder_size_t last_min_offset)1877 static bool binder_validate_fixup(struct binder_proc *proc,
1878 struct binder_buffer *b,
1879 binder_size_t objects_start_offset,
1880 binder_size_t buffer_obj_offset,
1881 binder_size_t fixup_offset,
1882 binder_size_t last_obj_offset,
1883 binder_size_t last_min_offset)
1884 {
1885 if (!last_obj_offset) {
1886 /* Nothing to fix up in */
1887 return false;
1888 }
1889
1890 while (last_obj_offset != buffer_obj_offset) {
1891 unsigned long buffer_offset;
1892 struct binder_object last_object;
1893 struct binder_buffer_object *last_bbo;
1894 size_t object_size = binder_get_object(proc, NULL, b,
1895 last_obj_offset,
1896 &last_object);
1897 if (object_size != sizeof(*last_bbo))
1898 return false;
1899
1900 last_bbo = &last_object.bbo;
1901 /*
1902 * Safe to retrieve the parent of last_obj, since it
1903 * was already previously verified by the driver.
1904 */
1905 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1906 return false;
1907 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1908 buffer_offset = objects_start_offset +
1909 sizeof(binder_size_t) * last_bbo->parent;
1910 if (binder_alloc_copy_from_buffer(&proc->alloc,
1911 &last_obj_offset,
1912 b, buffer_offset,
1913 sizeof(last_obj_offset)))
1914 return false;
1915 }
1916 return (fixup_offset >= last_min_offset);
1917 }
1918
1919 /**
1920 * struct binder_task_work_cb - for deferred close
1921 *
1922 * @twork: callback_head for task work
1923 * @fd: fd to close
1924 *
1925 * Structure to pass task work to be handled after
1926 * returning from binder_ioctl() via task_work_add().
1927 */
1928 struct binder_task_work_cb {
1929 struct callback_head twork;
1930 struct file *file;
1931 };
1932
1933 /**
1934 * binder_do_fd_close() - close list of file descriptors
1935 * @twork: callback head for task work
1936 *
1937 * It is not safe to call ksys_close() during the binder_ioctl()
1938 * function if there is a chance that binder's own file descriptor
1939 * might be closed. This is to meet the requirements for using
1940 * fdget() (see comments for __fget_light()). Therefore use
1941 * task_work_add() to schedule the close operation once we have
1942 * returned from binder_ioctl(). This function is a callback
1943 * for that mechanism and does the actual ksys_close() on the
1944 * given file descriptor.
1945 */
binder_do_fd_close(struct callback_head * twork)1946 static void binder_do_fd_close(struct callback_head *twork)
1947 {
1948 struct binder_task_work_cb *twcb = container_of(twork,
1949 struct binder_task_work_cb, twork);
1950
1951 fput(twcb->file);
1952 kfree(twcb);
1953 }
1954
1955 /**
1956 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
1957 * @fd: file-descriptor to close
1958 *
1959 * See comments in binder_do_fd_close(). This function is used to schedule
1960 * a file-descriptor to be closed after returning from binder_ioctl().
1961 */
binder_deferred_fd_close(int fd)1962 static void binder_deferred_fd_close(int fd)
1963 {
1964 struct binder_task_work_cb *twcb;
1965
1966 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
1967 if (!twcb)
1968 return;
1969 init_task_work(&twcb->twork, binder_do_fd_close);
1970 twcb->file = close_fd_get_file(fd);
1971 if (twcb->file) {
1972 // pin it until binder_do_fd_close(); see comments there
1973 get_file(twcb->file);
1974 filp_close(twcb->file, current->files);
1975 task_work_add(current, &twcb->twork, TWA_RESUME);
1976 } else {
1977 kfree(twcb);
1978 }
1979 }
1980
binder_transaction_buffer_release(struct binder_proc * proc,struct binder_thread * thread,struct binder_buffer * buffer,binder_size_t off_end_offset,bool is_failure)1981 static void binder_transaction_buffer_release(struct binder_proc *proc,
1982 struct binder_thread *thread,
1983 struct binder_buffer *buffer,
1984 binder_size_t off_end_offset,
1985 bool is_failure)
1986 {
1987 int debug_id = buffer->debug_id;
1988 binder_size_t off_start_offset, buffer_offset;
1989
1990 binder_debug(BINDER_DEBUG_TRANSACTION,
1991 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
1992 proc->pid, buffer->debug_id,
1993 buffer->data_size, buffer->offsets_size,
1994 (unsigned long long)off_end_offset);
1995
1996 if (buffer->target_node)
1997 binder_dec_node(buffer->target_node, 1, 0);
1998
1999 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2000
2001 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2002 buffer_offset += sizeof(binder_size_t)) {
2003 struct binder_object_header *hdr;
2004 size_t object_size = 0;
2005 struct binder_object object;
2006 binder_size_t object_offset;
2007
2008 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2009 buffer, buffer_offset,
2010 sizeof(object_offset)))
2011 object_size = binder_get_object(proc, NULL, buffer,
2012 object_offset, &object);
2013 if (object_size == 0) {
2014 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2015 debug_id, (u64)object_offset, buffer->data_size);
2016 continue;
2017 }
2018 hdr = &object.hdr;
2019 switch (hdr->type) {
2020 case BINDER_TYPE_BINDER:
2021 case BINDER_TYPE_WEAK_BINDER: {
2022 struct flat_binder_object *fp;
2023 struct binder_node *node;
2024
2025 fp = to_flat_binder_object(hdr);
2026 node = binder_get_node(proc, fp->binder);
2027 if (node == NULL) {
2028 pr_err("transaction release %d bad node %016llx\n",
2029 debug_id, (u64)fp->binder);
2030 break;
2031 }
2032 binder_debug(BINDER_DEBUG_TRANSACTION,
2033 " node %d u%016llx\n",
2034 node->debug_id, (u64)node->ptr);
2035 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2036 0);
2037 binder_put_node(node);
2038 } break;
2039 case BINDER_TYPE_HANDLE:
2040 case BINDER_TYPE_WEAK_HANDLE: {
2041 struct flat_binder_object *fp;
2042 struct binder_ref_data rdata;
2043 int ret;
2044
2045 fp = to_flat_binder_object(hdr);
2046 ret = binder_dec_ref_for_handle(proc, fp->handle,
2047 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2048
2049 if (ret) {
2050 pr_err("transaction release %d bad handle %d, ret = %d\n",
2051 debug_id, fp->handle, ret);
2052 break;
2053 }
2054 binder_debug(BINDER_DEBUG_TRANSACTION,
2055 " ref %d desc %d\n",
2056 rdata.debug_id, rdata.desc);
2057 } break;
2058
2059 case BINDER_TYPE_FD: {
2060 /*
2061 * No need to close the file here since user-space
2062 * closes it for successfully delivered
2063 * transactions. For transactions that weren't
2064 * delivered, the new fd was never allocated so
2065 * there is no need to close and the fput on the
2066 * file is done when the transaction is torn
2067 * down.
2068 */
2069 } break;
2070 case BINDER_TYPE_PTR:
2071 /*
2072 * Nothing to do here, this will get cleaned up when the
2073 * transaction buffer gets freed
2074 */
2075 break;
2076 case BINDER_TYPE_FDA: {
2077 struct binder_fd_array_object *fda;
2078 struct binder_buffer_object *parent;
2079 struct binder_object ptr_object;
2080 binder_size_t fda_offset;
2081 size_t fd_index;
2082 binder_size_t fd_buf_size;
2083 binder_size_t num_valid;
2084
2085 if (is_failure) {
2086 /*
2087 * The fd fixups have not been applied so no
2088 * fds need to be closed.
2089 */
2090 continue;
2091 }
2092
2093 num_valid = (buffer_offset - off_start_offset) /
2094 sizeof(binder_size_t);
2095 fda = to_binder_fd_array_object(hdr);
2096 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2097 fda->parent,
2098 off_start_offset,
2099 NULL,
2100 num_valid);
2101 if (!parent) {
2102 pr_err("transaction release %d bad parent offset\n",
2103 debug_id);
2104 continue;
2105 }
2106 fd_buf_size = sizeof(u32) * fda->num_fds;
2107 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2108 pr_err("transaction release %d invalid number of fds (%lld)\n",
2109 debug_id, (u64)fda->num_fds);
2110 continue;
2111 }
2112 if (fd_buf_size > parent->length ||
2113 fda->parent_offset > parent->length - fd_buf_size) {
2114 /* No space for all file descriptors here. */
2115 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2116 debug_id, (u64)fda->num_fds);
2117 continue;
2118 }
2119 /*
2120 * the source data for binder_buffer_object is visible
2121 * to user-space and the @buffer element is the user
2122 * pointer to the buffer_object containing the fd_array.
2123 * Convert the address to an offset relative to
2124 * the base of the transaction buffer.
2125 */
2126 fda_offset =
2127 (parent->buffer - (uintptr_t)buffer->user_data) +
2128 fda->parent_offset;
2129 for (fd_index = 0; fd_index < fda->num_fds;
2130 fd_index++) {
2131 u32 fd;
2132 int err;
2133 binder_size_t offset = fda_offset +
2134 fd_index * sizeof(fd);
2135
2136 err = binder_alloc_copy_from_buffer(
2137 &proc->alloc, &fd, buffer,
2138 offset, sizeof(fd));
2139 WARN_ON(err);
2140 if (!err) {
2141 binder_deferred_fd_close(fd);
2142 /*
2143 * Need to make sure the thread goes
2144 * back to userspace to complete the
2145 * deferred close
2146 */
2147 if (thread)
2148 thread->looper_need_return = true;
2149 }
2150 }
2151 } break;
2152 default:
2153 pr_err("transaction release %d bad object type %x\n",
2154 debug_id, hdr->type);
2155 break;
2156 }
2157 }
2158 }
2159
2160 /* Clean up all the objects in the buffer */
binder_release_entire_buffer(struct binder_proc * proc,struct binder_thread * thread,struct binder_buffer * buffer,bool is_failure)2161 static inline void binder_release_entire_buffer(struct binder_proc *proc,
2162 struct binder_thread *thread,
2163 struct binder_buffer *buffer,
2164 bool is_failure)
2165 {
2166 binder_size_t off_end_offset;
2167
2168 off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
2169 off_end_offset += buffer->offsets_size;
2170
2171 binder_transaction_buffer_release(proc, thread, buffer,
2172 off_end_offset, is_failure);
2173 }
2174
binder_translate_binder(struct flat_binder_object * fp,struct binder_transaction * t,struct binder_thread * thread)2175 static int binder_translate_binder(struct flat_binder_object *fp,
2176 struct binder_transaction *t,
2177 struct binder_thread *thread)
2178 {
2179 struct binder_node *node;
2180 struct binder_proc *proc = thread->proc;
2181 struct binder_proc *target_proc = t->to_proc;
2182 struct binder_ref_data rdata;
2183 int ret = 0;
2184
2185 node = binder_get_node(proc, fp->binder);
2186 if (!node) {
2187 node = binder_new_node(proc, fp);
2188 if (!node)
2189 return -ENOMEM;
2190 }
2191 if (fp->cookie != node->cookie) {
2192 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2193 proc->pid, thread->pid, (u64)fp->binder,
2194 node->debug_id, (u64)fp->cookie,
2195 (u64)node->cookie);
2196 ret = -EINVAL;
2197 goto done;
2198 }
2199 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2200 ret = -EPERM;
2201 goto done;
2202 }
2203
2204 ret = binder_inc_ref_for_node(target_proc, node,
2205 fp->hdr.type == BINDER_TYPE_BINDER,
2206 &thread->todo, &rdata);
2207 if (ret)
2208 goto done;
2209
2210 if (fp->hdr.type == BINDER_TYPE_BINDER)
2211 fp->hdr.type = BINDER_TYPE_HANDLE;
2212 else
2213 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2214 fp->binder = 0;
2215 fp->handle = rdata.desc;
2216 fp->cookie = 0;
2217
2218 trace_binder_transaction_node_to_ref(t, node, &rdata);
2219 binder_debug(BINDER_DEBUG_TRANSACTION,
2220 " node %d u%016llx -> ref %d desc %d\n",
2221 node->debug_id, (u64)node->ptr,
2222 rdata.debug_id, rdata.desc);
2223 done:
2224 binder_put_node(node);
2225 return ret;
2226 }
2227
binder_translate_handle(struct flat_binder_object * fp,struct binder_transaction * t,struct binder_thread * thread)2228 static int binder_translate_handle(struct flat_binder_object *fp,
2229 struct binder_transaction *t,
2230 struct binder_thread *thread)
2231 {
2232 struct binder_proc *proc = thread->proc;
2233 struct binder_proc *target_proc = t->to_proc;
2234 struct binder_node *node;
2235 struct binder_ref_data src_rdata;
2236 int ret = 0;
2237
2238 node = binder_get_node_from_ref(proc, fp->handle,
2239 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2240 if (!node) {
2241 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2242 proc->pid, thread->pid, fp->handle);
2243 return -EINVAL;
2244 }
2245 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2246 ret = -EPERM;
2247 goto done;
2248 }
2249
2250 binder_node_lock(node);
2251 if (node->proc == target_proc) {
2252 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2253 fp->hdr.type = BINDER_TYPE_BINDER;
2254 else
2255 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2256 fp->binder = node->ptr;
2257 fp->cookie = node->cookie;
2258 if (node->proc)
2259 binder_inner_proc_lock(node->proc);
2260 else
2261 __acquire(&node->proc->inner_lock);
2262 binder_inc_node_nilocked(node,
2263 fp->hdr.type == BINDER_TYPE_BINDER,
2264 0, NULL);
2265 if (node->proc)
2266 binder_inner_proc_unlock(node->proc);
2267 else
2268 __release(&node->proc->inner_lock);
2269 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2270 binder_debug(BINDER_DEBUG_TRANSACTION,
2271 " ref %d desc %d -> node %d u%016llx\n",
2272 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2273 (u64)node->ptr);
2274 binder_node_unlock(node);
2275 } else {
2276 struct binder_ref_data dest_rdata;
2277
2278 binder_node_unlock(node);
2279 ret = binder_inc_ref_for_node(target_proc, node,
2280 fp->hdr.type == BINDER_TYPE_HANDLE,
2281 NULL, &dest_rdata);
2282 if (ret)
2283 goto done;
2284
2285 fp->binder = 0;
2286 fp->handle = dest_rdata.desc;
2287 fp->cookie = 0;
2288 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2289 &dest_rdata);
2290 binder_debug(BINDER_DEBUG_TRANSACTION,
2291 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2292 src_rdata.debug_id, src_rdata.desc,
2293 dest_rdata.debug_id, dest_rdata.desc,
2294 node->debug_id);
2295 }
2296 done:
2297 binder_put_node(node);
2298 return ret;
2299 }
2300
binder_translate_fd(u32 fd,binder_size_t fd_offset,struct binder_transaction * t,struct binder_thread * thread,struct binder_transaction * in_reply_to)2301 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2302 struct binder_transaction *t,
2303 struct binder_thread *thread,
2304 struct binder_transaction *in_reply_to)
2305 {
2306 struct binder_proc *proc = thread->proc;
2307 struct binder_proc *target_proc = t->to_proc;
2308 struct binder_txn_fd_fixup *fixup;
2309 struct file *file;
2310 int ret = 0;
2311 bool target_allows_fd;
2312
2313 if (in_reply_to)
2314 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2315 else
2316 target_allows_fd = t->buffer->target_node->accept_fds;
2317 if (!target_allows_fd) {
2318 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2319 proc->pid, thread->pid,
2320 in_reply_to ? "reply" : "transaction",
2321 fd);
2322 ret = -EPERM;
2323 goto err_fd_not_accepted;
2324 }
2325
2326 file = fget(fd);
2327 if (!file) {
2328 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2329 proc->pid, thread->pid, fd);
2330 ret = -EBADF;
2331 goto err_fget;
2332 }
2333 ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2334 if (ret < 0) {
2335 ret = -EPERM;
2336 goto err_security;
2337 }
2338
2339 /*
2340 * Add fixup record for this transaction. The allocation
2341 * of the fd in the target needs to be done from a
2342 * target thread.
2343 */
2344 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2345 if (!fixup) {
2346 ret = -ENOMEM;
2347 goto err_alloc;
2348 }
2349 fixup->file = file;
2350 fixup->offset = fd_offset;
2351 fixup->target_fd = -1;
2352 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2353 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2354
2355 return ret;
2356
2357 err_alloc:
2358 err_security:
2359 fput(file);
2360 err_fget:
2361 err_fd_not_accepted:
2362 return ret;
2363 }
2364
2365 /**
2366 * struct binder_ptr_fixup - data to be fixed-up in target buffer
2367 * @offset offset in target buffer to fixup
2368 * @skip_size bytes to skip in copy (fixup will be written later)
2369 * @fixup_data data to write at fixup offset
2370 * @node list node
2371 *
2372 * This is used for the pointer fixup list (pf) which is created and consumed
2373 * during binder_transaction() and is only accessed locally. No
2374 * locking is necessary.
2375 *
2376 * The list is ordered by @offset.
2377 */
2378 struct binder_ptr_fixup {
2379 binder_size_t offset;
2380 size_t skip_size;
2381 binder_uintptr_t fixup_data;
2382 struct list_head node;
2383 };
2384
2385 /**
2386 * struct binder_sg_copy - scatter-gather data to be copied
2387 * @offset offset in target buffer
2388 * @sender_uaddr user address in source buffer
2389 * @length bytes to copy
2390 * @node list node
2391 *
2392 * This is used for the sg copy list (sgc) which is created and consumed
2393 * during binder_transaction() and is only accessed locally. No
2394 * locking is necessary.
2395 *
2396 * The list is ordered by @offset.
2397 */
2398 struct binder_sg_copy {
2399 binder_size_t offset;
2400 const void __user *sender_uaddr;
2401 size_t length;
2402 struct list_head node;
2403 };
2404
2405 /**
2406 * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2407 * @alloc: binder_alloc associated with @buffer
2408 * @buffer: binder buffer in target process
2409 * @sgc_head: list_head of scatter-gather copy list
2410 * @pf_head: list_head of pointer fixup list
2411 *
2412 * Processes all elements of @sgc_head, applying fixups from @pf_head
2413 * and copying the scatter-gather data from the source process' user
2414 * buffer to the target's buffer. It is expected that the list creation
2415 * and processing all occurs during binder_transaction() so these lists
2416 * are only accessed in local context.
2417 *
2418 * Return: 0=success, else -errno
2419 */
binder_do_deferred_txn_copies(struct binder_alloc * alloc,struct binder_buffer * buffer,struct list_head * sgc_head,struct list_head * pf_head)2420 static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2421 struct binder_buffer *buffer,
2422 struct list_head *sgc_head,
2423 struct list_head *pf_head)
2424 {
2425 int ret = 0;
2426 struct binder_sg_copy *sgc, *tmpsgc;
2427 struct binder_ptr_fixup *tmppf;
2428 struct binder_ptr_fixup *pf =
2429 list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2430 node);
2431
2432 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2433 size_t bytes_copied = 0;
2434
2435 while (bytes_copied < sgc->length) {
2436 size_t copy_size;
2437 size_t bytes_left = sgc->length - bytes_copied;
2438 size_t offset = sgc->offset + bytes_copied;
2439
2440 /*
2441 * We copy up to the fixup (pointed to by pf)
2442 */
2443 copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2444 : bytes_left;
2445 if (!ret && copy_size)
2446 ret = binder_alloc_copy_user_to_buffer(
2447 alloc, buffer,
2448 offset,
2449 sgc->sender_uaddr + bytes_copied,
2450 copy_size);
2451 bytes_copied += copy_size;
2452 if (copy_size != bytes_left) {
2453 BUG_ON(!pf);
2454 /* we stopped at a fixup offset */
2455 if (pf->skip_size) {
2456 /*
2457 * we are just skipping. This is for
2458 * BINDER_TYPE_FDA where the translated
2459 * fds will be fixed up when we get
2460 * to target context.
2461 */
2462 bytes_copied += pf->skip_size;
2463 } else {
2464 /* apply the fixup indicated by pf */
2465 if (!ret)
2466 ret = binder_alloc_copy_to_buffer(
2467 alloc, buffer,
2468 pf->offset,
2469 &pf->fixup_data,
2470 sizeof(pf->fixup_data));
2471 bytes_copied += sizeof(pf->fixup_data);
2472 }
2473 list_del(&pf->node);
2474 kfree(pf);
2475 pf = list_first_entry_or_null(pf_head,
2476 struct binder_ptr_fixup, node);
2477 }
2478 }
2479 list_del(&sgc->node);
2480 kfree(sgc);
2481 }
2482 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2483 BUG_ON(pf->skip_size == 0);
2484 list_del(&pf->node);
2485 kfree(pf);
2486 }
2487 BUG_ON(!list_empty(sgc_head));
2488
2489 return ret > 0 ? -EINVAL : ret;
2490 }
2491
2492 /**
2493 * binder_cleanup_deferred_txn_lists() - free specified lists
2494 * @sgc_head: list_head of scatter-gather copy list
2495 * @pf_head: list_head of pointer fixup list
2496 *
2497 * Called to clean up @sgc_head and @pf_head if there is an
2498 * error.
2499 */
binder_cleanup_deferred_txn_lists(struct list_head * sgc_head,struct list_head * pf_head)2500 static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2501 struct list_head *pf_head)
2502 {
2503 struct binder_sg_copy *sgc, *tmpsgc;
2504 struct binder_ptr_fixup *pf, *tmppf;
2505
2506 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2507 list_del(&sgc->node);
2508 kfree(sgc);
2509 }
2510 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2511 list_del(&pf->node);
2512 kfree(pf);
2513 }
2514 }
2515
2516 /**
2517 * binder_defer_copy() - queue a scatter-gather buffer for copy
2518 * @sgc_head: list_head of scatter-gather copy list
2519 * @offset: binder buffer offset in target process
2520 * @sender_uaddr: user address in source process
2521 * @length: bytes to copy
2522 *
2523 * Specify a scatter-gather block to be copied. The actual copy must
2524 * be deferred until all the needed fixups are identified and queued.
2525 * Then the copy and fixups are done together so un-translated values
2526 * from the source are never visible in the target buffer.
2527 *
2528 * We are guaranteed that repeated calls to this function will have
2529 * monotonically increasing @offset values so the list will naturally
2530 * be ordered.
2531 *
2532 * Return: 0=success, else -errno
2533 */
binder_defer_copy(struct list_head * sgc_head,binder_size_t offset,const void __user * sender_uaddr,size_t length)2534 static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2535 const void __user *sender_uaddr, size_t length)
2536 {
2537 struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2538
2539 if (!bc)
2540 return -ENOMEM;
2541
2542 bc->offset = offset;
2543 bc->sender_uaddr = sender_uaddr;
2544 bc->length = length;
2545 INIT_LIST_HEAD(&bc->node);
2546
2547 /*
2548 * We are guaranteed that the deferred copies are in-order
2549 * so just add to the tail.
2550 */
2551 list_add_tail(&bc->node, sgc_head);
2552
2553 return 0;
2554 }
2555
2556 /**
2557 * binder_add_fixup() - queue a fixup to be applied to sg copy
2558 * @pf_head: list_head of binder ptr fixup list
2559 * @offset: binder buffer offset in target process
2560 * @fixup: bytes to be copied for fixup
2561 * @skip_size: bytes to skip when copying (fixup will be applied later)
2562 *
2563 * Add the specified fixup to a list ordered by @offset. When copying
2564 * the scatter-gather buffers, the fixup will be copied instead of
2565 * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2566 * will be applied later (in target process context), so we just skip
2567 * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2568 * value in @fixup.
2569 *
2570 * This function is called *mostly* in @offset order, but there are
2571 * exceptions. Since out-of-order inserts are relatively uncommon,
2572 * we insert the new element by searching backward from the tail of
2573 * the list.
2574 *
2575 * Return: 0=success, else -errno
2576 */
binder_add_fixup(struct list_head * pf_head,binder_size_t offset,binder_uintptr_t fixup,size_t skip_size)2577 static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2578 binder_uintptr_t fixup, size_t skip_size)
2579 {
2580 struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2581 struct binder_ptr_fixup *tmppf;
2582
2583 if (!pf)
2584 return -ENOMEM;
2585
2586 pf->offset = offset;
2587 pf->fixup_data = fixup;
2588 pf->skip_size = skip_size;
2589 INIT_LIST_HEAD(&pf->node);
2590
2591 /* Fixups are *mostly* added in-order, but there are some
2592 * exceptions. Look backwards through list for insertion point.
2593 */
2594 list_for_each_entry_reverse(tmppf, pf_head, node) {
2595 if (tmppf->offset < pf->offset) {
2596 list_add(&pf->node, &tmppf->node);
2597 return 0;
2598 }
2599 }
2600 /*
2601 * if we get here, then the new offset is the lowest so
2602 * insert at the head
2603 */
2604 list_add(&pf->node, pf_head);
2605 return 0;
2606 }
2607
binder_translate_fd_array(struct list_head * pf_head,struct binder_fd_array_object * fda,const void __user * sender_ubuffer,struct binder_buffer_object * parent,struct binder_buffer_object * sender_uparent,struct binder_transaction * t,struct binder_thread * thread,struct binder_transaction * in_reply_to)2608 static int binder_translate_fd_array(struct list_head *pf_head,
2609 struct binder_fd_array_object *fda,
2610 const void __user *sender_ubuffer,
2611 struct binder_buffer_object *parent,
2612 struct binder_buffer_object *sender_uparent,
2613 struct binder_transaction *t,
2614 struct binder_thread *thread,
2615 struct binder_transaction *in_reply_to)
2616 {
2617 binder_size_t fdi, fd_buf_size;
2618 binder_size_t fda_offset;
2619 const void __user *sender_ufda_base;
2620 struct binder_proc *proc = thread->proc;
2621 int ret;
2622
2623 if (fda->num_fds == 0)
2624 return 0;
2625
2626 fd_buf_size = sizeof(u32) * fda->num_fds;
2627 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2628 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2629 proc->pid, thread->pid, (u64)fda->num_fds);
2630 return -EINVAL;
2631 }
2632 if (fd_buf_size > parent->length ||
2633 fda->parent_offset > parent->length - fd_buf_size) {
2634 /* No space for all file descriptors here. */
2635 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2636 proc->pid, thread->pid, (u64)fda->num_fds);
2637 return -EINVAL;
2638 }
2639 /*
2640 * the source data for binder_buffer_object is visible
2641 * to user-space and the @buffer element is the user
2642 * pointer to the buffer_object containing the fd_array.
2643 * Convert the address to an offset relative to
2644 * the base of the transaction buffer.
2645 */
2646 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2647 fda->parent_offset;
2648 sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2649 fda->parent_offset;
2650
2651 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2652 !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2653 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2654 proc->pid, thread->pid);
2655 return -EINVAL;
2656 }
2657 ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2658 if (ret)
2659 return ret;
2660
2661 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2662 u32 fd;
2663 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2664 binder_size_t sender_uoffset = fdi * sizeof(fd);
2665
2666 ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2667 if (!ret)
2668 ret = binder_translate_fd(fd, offset, t, thread,
2669 in_reply_to);
2670 if (ret)
2671 return ret > 0 ? -EINVAL : ret;
2672 }
2673 return 0;
2674 }
2675
binder_fixup_parent(struct list_head * pf_head,struct binder_transaction * t,struct binder_thread * thread,struct binder_buffer_object * bp,binder_size_t off_start_offset,binder_size_t num_valid,binder_size_t last_fixup_obj_off,binder_size_t last_fixup_min_off)2676 static int binder_fixup_parent(struct list_head *pf_head,
2677 struct binder_transaction *t,
2678 struct binder_thread *thread,
2679 struct binder_buffer_object *bp,
2680 binder_size_t off_start_offset,
2681 binder_size_t num_valid,
2682 binder_size_t last_fixup_obj_off,
2683 binder_size_t last_fixup_min_off)
2684 {
2685 struct binder_buffer_object *parent;
2686 struct binder_buffer *b = t->buffer;
2687 struct binder_proc *proc = thread->proc;
2688 struct binder_proc *target_proc = t->to_proc;
2689 struct binder_object object;
2690 binder_size_t buffer_offset;
2691 binder_size_t parent_offset;
2692
2693 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2694 return 0;
2695
2696 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2697 off_start_offset, &parent_offset,
2698 num_valid);
2699 if (!parent) {
2700 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2701 proc->pid, thread->pid);
2702 return -EINVAL;
2703 }
2704
2705 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2706 parent_offset, bp->parent_offset,
2707 last_fixup_obj_off,
2708 last_fixup_min_off)) {
2709 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2710 proc->pid, thread->pid);
2711 return -EINVAL;
2712 }
2713
2714 if (parent->length < sizeof(binder_uintptr_t) ||
2715 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2716 /* No space for a pointer here! */
2717 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2718 proc->pid, thread->pid);
2719 return -EINVAL;
2720 }
2721 buffer_offset = bp->parent_offset +
2722 (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2723 return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2724 }
2725
2726 /**
2727 * binder_can_update_transaction() - Can a txn be superseded by an updated one?
2728 * @t1: the pending async txn in the frozen process
2729 * @t2: the new async txn to supersede the outdated pending one
2730 *
2731 * Return: true if t2 can supersede t1
2732 * false if t2 can not supersede t1
2733 */
binder_can_update_transaction(struct binder_transaction * t1,struct binder_transaction * t2)2734 static bool binder_can_update_transaction(struct binder_transaction *t1,
2735 struct binder_transaction *t2)
2736 {
2737 if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) !=
2738 (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc)
2739 return false;
2740 if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code &&
2741 t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid &&
2742 t1->buffer->target_node->ptr == t2->buffer->target_node->ptr &&
2743 t1->buffer->target_node->cookie == t2->buffer->target_node->cookie)
2744 return true;
2745 return false;
2746 }
2747
2748 /**
2749 * binder_find_outdated_transaction_ilocked() - Find the outdated transaction
2750 * @t: new async transaction
2751 * @target_list: list to find outdated transaction
2752 *
2753 * Return: the outdated transaction if found
2754 * NULL if no outdated transacton can be found
2755 *
2756 * Requires the proc->inner_lock to be held.
2757 */
2758 static struct binder_transaction *
binder_find_outdated_transaction_ilocked(struct binder_transaction * t,struct list_head * target_list)2759 binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
2760 struct list_head *target_list)
2761 {
2762 struct binder_work *w;
2763
2764 list_for_each_entry(w, target_list, entry) {
2765 struct binder_transaction *t_queued;
2766
2767 if (w->type != BINDER_WORK_TRANSACTION)
2768 continue;
2769 t_queued = container_of(w, struct binder_transaction, work);
2770 if (binder_can_update_transaction(t_queued, t))
2771 return t_queued;
2772 }
2773 return NULL;
2774 }
2775
2776 /**
2777 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2778 * @t: transaction to send
2779 * @proc: process to send the transaction to
2780 * @thread: thread in @proc to send the transaction to (may be NULL)
2781 *
2782 * This function queues a transaction to the specified process. It will try
2783 * to find a thread in the target process to handle the transaction and
2784 * wake it up. If no thread is found, the work is queued to the proc
2785 * waitqueue.
2786 *
2787 * If the @thread parameter is not NULL, the transaction is always queued
2788 * to the waitlist of that specific thread.
2789 *
2790 * Return: 0 if the transaction was successfully queued
2791 * BR_DEAD_REPLY if the target process or thread is dead
2792 * BR_FROZEN_REPLY if the target process or thread is frozen and
2793 * the sync transaction was rejected
2794 * BR_TRANSACTION_PENDING_FROZEN if the target process is frozen
2795 * and the async transaction was successfully queued
2796 */
binder_proc_transaction(struct binder_transaction * t,struct binder_proc * proc,struct binder_thread * thread)2797 static int binder_proc_transaction(struct binder_transaction *t,
2798 struct binder_proc *proc,
2799 struct binder_thread *thread)
2800 {
2801 struct binder_node *node = t->buffer->target_node;
2802 bool oneway = !!(t->flags & TF_ONE_WAY);
2803 bool pending_async = false;
2804 struct binder_transaction *t_outdated = NULL;
2805 bool frozen = false;
2806
2807 BUG_ON(!node);
2808 binder_node_lock(node);
2809 if (oneway) {
2810 BUG_ON(thread);
2811 if (node->has_async_transaction)
2812 pending_async = true;
2813 else
2814 node->has_async_transaction = true;
2815 }
2816
2817 binder_inner_proc_lock(proc);
2818 if (proc->is_frozen) {
2819 frozen = true;
2820 proc->sync_recv |= !oneway;
2821 proc->async_recv |= oneway;
2822 }
2823
2824 if ((frozen && !oneway) || proc->is_dead ||
2825 (thread && thread->is_dead)) {
2826 binder_inner_proc_unlock(proc);
2827 binder_node_unlock(node);
2828 return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2829 }
2830
2831 if (!thread && !pending_async)
2832 thread = binder_select_thread_ilocked(proc);
2833
2834 if (thread) {
2835 binder_enqueue_thread_work_ilocked(thread, &t->work);
2836 } else if (!pending_async) {
2837 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2838 } else {
2839 if ((t->flags & TF_UPDATE_TXN) && frozen) {
2840 t_outdated = binder_find_outdated_transaction_ilocked(t,
2841 &node->async_todo);
2842 if (t_outdated) {
2843 binder_debug(BINDER_DEBUG_TRANSACTION,
2844 "txn %d supersedes %d\n",
2845 t->debug_id, t_outdated->debug_id);
2846 list_del_init(&t_outdated->work.entry);
2847 proc->outstanding_txns--;
2848 }
2849 }
2850 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2851 }
2852
2853 if (!pending_async)
2854 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2855
2856 proc->outstanding_txns++;
2857 binder_inner_proc_unlock(proc);
2858 binder_node_unlock(node);
2859
2860 /*
2861 * To reduce potential contention, free the outdated transaction and
2862 * buffer after releasing the locks.
2863 */
2864 if (t_outdated) {
2865 struct binder_buffer *buffer = t_outdated->buffer;
2866
2867 t_outdated->buffer = NULL;
2868 buffer->transaction = NULL;
2869 trace_binder_transaction_update_buffer_release(buffer);
2870 binder_release_entire_buffer(proc, NULL, buffer, false);
2871 binder_alloc_free_buf(&proc->alloc, buffer);
2872 kfree(t_outdated);
2873 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2874 }
2875
2876 if (oneway && frozen)
2877 return BR_TRANSACTION_PENDING_FROZEN;
2878
2879 return 0;
2880 }
2881
2882 /**
2883 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2884 * @node: struct binder_node for which to get refs
2885 * @procp: returns @node->proc if valid
2886 * @error: if no @procp then returns BR_DEAD_REPLY
2887 *
2888 * User-space normally keeps the node alive when creating a transaction
2889 * since it has a reference to the target. The local strong ref keeps it
2890 * alive if the sending process dies before the target process processes
2891 * the transaction. If the source process is malicious or has a reference
2892 * counting bug, relying on the local strong ref can fail.
2893 *
2894 * Since user-space can cause the local strong ref to go away, we also take
2895 * a tmpref on the node to ensure it survives while we are constructing
2896 * the transaction. We also need a tmpref on the proc while we are
2897 * constructing the transaction, so we take that here as well.
2898 *
2899 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2900 * Also sets @procp if valid. If the @node->proc is NULL indicating that the
2901 * target proc has died, @error is set to BR_DEAD_REPLY.
2902 */
binder_get_node_refs_for_txn(struct binder_node * node,struct binder_proc ** procp,uint32_t * error)2903 static struct binder_node *binder_get_node_refs_for_txn(
2904 struct binder_node *node,
2905 struct binder_proc **procp,
2906 uint32_t *error)
2907 {
2908 struct binder_node *target_node = NULL;
2909
2910 binder_node_inner_lock(node);
2911 if (node->proc) {
2912 target_node = node;
2913 binder_inc_node_nilocked(node, 1, 0, NULL);
2914 binder_inc_node_tmpref_ilocked(node);
2915 node->proc->tmp_ref++;
2916 *procp = node->proc;
2917 } else
2918 *error = BR_DEAD_REPLY;
2919 binder_node_inner_unlock(node);
2920
2921 return target_node;
2922 }
2923
binder_set_txn_from_error(struct binder_transaction * t,int id,uint32_t command,int32_t param)2924 static void binder_set_txn_from_error(struct binder_transaction *t, int id,
2925 uint32_t command, int32_t param)
2926 {
2927 struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
2928
2929 if (!from) {
2930 /* annotation for sparse */
2931 __release(&from->proc->inner_lock);
2932 return;
2933 }
2934
2935 /* don't override existing errors */
2936 if (from->ee.command == BR_OK)
2937 binder_set_extended_error(&from->ee, id, command, param);
2938 binder_inner_proc_unlock(from->proc);
2939 binder_thread_dec_tmpref(from);
2940 }
2941
binder_transaction(struct binder_proc * proc,struct binder_thread * thread,struct binder_transaction_data * tr,int reply,binder_size_t extra_buffers_size)2942 static void binder_transaction(struct binder_proc *proc,
2943 struct binder_thread *thread,
2944 struct binder_transaction_data *tr, int reply,
2945 binder_size_t extra_buffers_size)
2946 {
2947 int ret;
2948 struct binder_transaction *t;
2949 struct binder_work *w;
2950 struct binder_work *tcomplete;
2951 binder_size_t buffer_offset = 0;
2952 binder_size_t off_start_offset, off_end_offset;
2953 binder_size_t off_min;
2954 binder_size_t sg_buf_offset, sg_buf_end_offset;
2955 binder_size_t user_offset = 0;
2956 struct binder_proc *target_proc = NULL;
2957 struct binder_thread *target_thread = NULL;
2958 struct binder_node *target_node = NULL;
2959 struct binder_transaction *in_reply_to = NULL;
2960 struct binder_transaction_log_entry *e;
2961 uint32_t return_error = 0;
2962 uint32_t return_error_param = 0;
2963 uint32_t return_error_line = 0;
2964 binder_size_t last_fixup_obj_off = 0;
2965 binder_size_t last_fixup_min_off = 0;
2966 struct binder_context *context = proc->context;
2967 int t_debug_id = atomic_inc_return(&binder_last_id);
2968 ktime_t t_start_time = ktime_get();
2969 char *secctx = NULL;
2970 u32 secctx_sz = 0;
2971 struct list_head sgc_head;
2972 struct list_head pf_head;
2973 const void __user *user_buffer = (const void __user *)
2974 (uintptr_t)tr->data.ptr.buffer;
2975 INIT_LIST_HEAD(&sgc_head);
2976 INIT_LIST_HEAD(&pf_head);
2977
2978 e = binder_transaction_log_add(&binder_transaction_log);
2979 e->debug_id = t_debug_id;
2980 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2981 e->from_proc = proc->pid;
2982 e->from_thread = thread->pid;
2983 e->target_handle = tr->target.handle;
2984 e->data_size = tr->data_size;
2985 e->offsets_size = tr->offsets_size;
2986 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2987
2988 binder_inner_proc_lock(proc);
2989 binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
2990 binder_inner_proc_unlock(proc);
2991
2992 if (reply) {
2993 binder_inner_proc_lock(proc);
2994 in_reply_to = thread->transaction_stack;
2995 if (in_reply_to == NULL) {
2996 binder_inner_proc_unlock(proc);
2997 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2998 proc->pid, thread->pid);
2999 return_error = BR_FAILED_REPLY;
3000 return_error_param = -EPROTO;
3001 return_error_line = __LINE__;
3002 goto err_empty_call_stack;
3003 }
3004 if (in_reply_to->to_thread != thread) {
3005 spin_lock(&in_reply_to->lock);
3006 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
3007 proc->pid, thread->pid, in_reply_to->debug_id,
3008 in_reply_to->to_proc ?
3009 in_reply_to->to_proc->pid : 0,
3010 in_reply_to->to_thread ?
3011 in_reply_to->to_thread->pid : 0);
3012 spin_unlock(&in_reply_to->lock);
3013 binder_inner_proc_unlock(proc);
3014 return_error = BR_FAILED_REPLY;
3015 return_error_param = -EPROTO;
3016 return_error_line = __LINE__;
3017 in_reply_to = NULL;
3018 goto err_bad_call_stack;
3019 }
3020 thread->transaction_stack = in_reply_to->to_parent;
3021 binder_inner_proc_unlock(proc);
3022 binder_set_nice(in_reply_to->saved_priority);
3023 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
3024 if (target_thread == NULL) {
3025 /* annotation for sparse */
3026 __release(&target_thread->proc->inner_lock);
3027 binder_txn_error("%d:%d reply target not found\n",
3028 thread->pid, proc->pid);
3029 return_error = BR_DEAD_REPLY;
3030 return_error_line = __LINE__;
3031 goto err_dead_binder;
3032 }
3033 if (target_thread->transaction_stack != in_reply_to) {
3034 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
3035 proc->pid, thread->pid,
3036 target_thread->transaction_stack ?
3037 target_thread->transaction_stack->debug_id : 0,
3038 in_reply_to->debug_id);
3039 binder_inner_proc_unlock(target_thread->proc);
3040 return_error = BR_FAILED_REPLY;
3041 return_error_param = -EPROTO;
3042 return_error_line = __LINE__;
3043 in_reply_to = NULL;
3044 target_thread = NULL;
3045 goto err_dead_binder;
3046 }
3047 target_proc = target_thread->proc;
3048 target_proc->tmp_ref++;
3049 binder_inner_proc_unlock(target_thread->proc);
3050 } else {
3051 if (tr->target.handle) {
3052 struct binder_ref *ref;
3053
3054 /*
3055 * There must already be a strong ref
3056 * on this node. If so, do a strong
3057 * increment on the node to ensure it
3058 * stays alive until the transaction is
3059 * done.
3060 */
3061 binder_proc_lock(proc);
3062 ref = binder_get_ref_olocked(proc, tr->target.handle,
3063 true);
3064 if (ref) {
3065 target_node = binder_get_node_refs_for_txn(
3066 ref->node, &target_proc,
3067 &return_error);
3068 } else {
3069 binder_user_error("%d:%d got transaction to invalid handle, %u\n",
3070 proc->pid, thread->pid, tr->target.handle);
3071 return_error = BR_FAILED_REPLY;
3072 }
3073 binder_proc_unlock(proc);
3074 } else {
3075 mutex_lock(&context->context_mgr_node_lock);
3076 target_node = context->binder_context_mgr_node;
3077 if (target_node)
3078 target_node = binder_get_node_refs_for_txn(
3079 target_node, &target_proc,
3080 &return_error);
3081 else
3082 return_error = BR_DEAD_REPLY;
3083 mutex_unlock(&context->context_mgr_node_lock);
3084 if (target_node && target_proc->pid == proc->pid) {
3085 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3086 proc->pid, thread->pid);
3087 return_error = BR_FAILED_REPLY;
3088 return_error_param = -EINVAL;
3089 return_error_line = __LINE__;
3090 goto err_invalid_target_handle;
3091 }
3092 }
3093 if (!target_node) {
3094 binder_txn_error("%d:%d cannot find target node\n",
3095 thread->pid, proc->pid);
3096 /*
3097 * return_error is set above
3098 */
3099 return_error_param = -EINVAL;
3100 return_error_line = __LINE__;
3101 goto err_dead_binder;
3102 }
3103 e->to_node = target_node->debug_id;
3104 if (WARN_ON(proc == target_proc)) {
3105 binder_txn_error("%d:%d self transactions not allowed\n",
3106 thread->pid, proc->pid);
3107 return_error = BR_FAILED_REPLY;
3108 return_error_param = -EINVAL;
3109 return_error_line = __LINE__;
3110 goto err_invalid_target_handle;
3111 }
3112 if (security_binder_transaction(proc->cred,
3113 target_proc->cred) < 0) {
3114 binder_txn_error("%d:%d transaction credentials failed\n",
3115 thread->pid, proc->pid);
3116 return_error = BR_FAILED_REPLY;
3117 return_error_param = -EPERM;
3118 return_error_line = __LINE__;
3119 goto err_invalid_target_handle;
3120 }
3121 binder_inner_proc_lock(proc);
3122
3123 w = list_first_entry_or_null(&thread->todo,
3124 struct binder_work, entry);
3125 if (!(tr->flags & TF_ONE_WAY) && w &&
3126 w->type == BINDER_WORK_TRANSACTION) {
3127 /*
3128 * Do not allow new outgoing transaction from a
3129 * thread that has a transaction at the head of
3130 * its todo list. Only need to check the head
3131 * because binder_select_thread_ilocked picks a
3132 * thread from proc->waiting_threads to enqueue
3133 * the transaction, and nothing is queued to the
3134 * todo list while the thread is on waiting_threads.
3135 */
3136 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3137 proc->pid, thread->pid);
3138 binder_inner_proc_unlock(proc);
3139 return_error = BR_FAILED_REPLY;
3140 return_error_param = -EPROTO;
3141 return_error_line = __LINE__;
3142 goto err_bad_todo_list;
3143 }
3144
3145 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3146 struct binder_transaction *tmp;
3147
3148 tmp = thread->transaction_stack;
3149 if (tmp->to_thread != thread) {
3150 spin_lock(&tmp->lock);
3151 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3152 proc->pid, thread->pid, tmp->debug_id,
3153 tmp->to_proc ? tmp->to_proc->pid : 0,
3154 tmp->to_thread ?
3155 tmp->to_thread->pid : 0);
3156 spin_unlock(&tmp->lock);
3157 binder_inner_proc_unlock(proc);
3158 return_error = BR_FAILED_REPLY;
3159 return_error_param = -EPROTO;
3160 return_error_line = __LINE__;
3161 goto err_bad_call_stack;
3162 }
3163 while (tmp) {
3164 struct binder_thread *from;
3165
3166 spin_lock(&tmp->lock);
3167 from = tmp->from;
3168 if (from && from->proc == target_proc) {
3169 atomic_inc(&from->tmp_ref);
3170 target_thread = from;
3171 spin_unlock(&tmp->lock);
3172 break;
3173 }
3174 spin_unlock(&tmp->lock);
3175 tmp = tmp->from_parent;
3176 }
3177 }
3178 binder_inner_proc_unlock(proc);
3179 }
3180 if (target_thread)
3181 e->to_thread = target_thread->pid;
3182 e->to_proc = target_proc->pid;
3183
3184 /* TODO: reuse incoming transaction for reply */
3185 t = kzalloc(sizeof(*t), GFP_KERNEL);
3186 if (t == NULL) {
3187 binder_txn_error("%d:%d cannot allocate transaction\n",
3188 thread->pid, proc->pid);
3189 return_error = BR_FAILED_REPLY;
3190 return_error_param = -ENOMEM;
3191 return_error_line = __LINE__;
3192 goto err_alloc_t_failed;
3193 }
3194 INIT_LIST_HEAD(&t->fd_fixups);
3195 binder_stats_created(BINDER_STAT_TRANSACTION);
3196 spin_lock_init(&t->lock);
3197
3198 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3199 if (tcomplete == NULL) {
3200 binder_txn_error("%d:%d cannot allocate work for transaction\n",
3201 thread->pid, proc->pid);
3202 return_error = BR_FAILED_REPLY;
3203 return_error_param = -ENOMEM;
3204 return_error_line = __LINE__;
3205 goto err_alloc_tcomplete_failed;
3206 }
3207 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3208
3209 t->debug_id = t_debug_id;
3210 t->start_time = t_start_time;
3211
3212 if (reply)
3213 binder_debug(BINDER_DEBUG_TRANSACTION,
3214 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3215 proc->pid, thread->pid, t->debug_id,
3216 target_proc->pid, target_thread->pid,
3217 (u64)tr->data.ptr.buffer,
3218 (u64)tr->data.ptr.offsets,
3219 (u64)tr->data_size, (u64)tr->offsets_size,
3220 (u64)extra_buffers_size);
3221 else
3222 binder_debug(BINDER_DEBUG_TRANSACTION,
3223 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3224 proc->pid, thread->pid, t->debug_id,
3225 target_proc->pid, target_node->debug_id,
3226 (u64)tr->data.ptr.buffer,
3227 (u64)tr->data.ptr.offsets,
3228 (u64)tr->data_size, (u64)tr->offsets_size,
3229 (u64)extra_buffers_size);
3230
3231 if (!reply && !(tr->flags & TF_ONE_WAY)) {
3232 t->from = thread;
3233 #ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF
3234 t->from_pid = -1;
3235 t->from_tid = -1;
3236 #endif
3237 } else {
3238 t->from = NULL;
3239 #ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF
3240 t->from_pid = thread->proc->pid;
3241 t->from_tid = thread->pid;
3242 #endif
3243 }
3244
3245 t->sender_euid = task_euid(proc->tsk);
3246 #ifdef CONFIG_ACCESS_TOKENID
3247 t->sender_tokenid = current->token;
3248 t->first_tokenid = current->ftoken;
3249 #endif /* CONFIG_ACCESS_TOKENID */
3250 t->to_proc = target_proc;
3251 t->to_thread = target_thread;
3252 t->code = tr->code;
3253 t->flags = tr->flags;
3254 t->priority = task_nice(current);
3255
3256 if (target_node && target_node->txn_security_ctx) {
3257 u32 secid;
3258 size_t added_size;
3259
3260 security_cred_getsecid(proc->cred, &secid);
3261 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3262 if (ret) {
3263 binder_txn_error("%d:%d failed to get security context\n",
3264 thread->pid, proc->pid);
3265 return_error = BR_FAILED_REPLY;
3266 return_error_param = ret;
3267 return_error_line = __LINE__;
3268 goto err_get_secctx_failed;
3269 }
3270 added_size = ALIGN(secctx_sz, sizeof(u64));
3271 extra_buffers_size += added_size;
3272 if (extra_buffers_size < added_size) {
3273 binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
3274 thread->pid, proc->pid);
3275 return_error = BR_FAILED_REPLY;
3276 return_error_param = -EINVAL;
3277 return_error_line = __LINE__;
3278 goto err_bad_extra_size;
3279 }
3280 }
3281
3282 trace_binder_transaction(reply, t, target_node);
3283
3284 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3285 tr->offsets_size, extra_buffers_size,
3286 !reply && (t->flags & TF_ONE_WAY), current->tgid);
3287 if (IS_ERR(t->buffer)) {
3288 char *s;
3289
3290 ret = PTR_ERR(t->buffer);
3291 s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
3292 : (ret == -ENOSPC) ? ": no space left"
3293 : (ret == -ENOMEM) ? ": memory allocation failed"
3294 : "";
3295 binder_txn_error("cannot allocate buffer%s", s);
3296
3297 return_error_param = PTR_ERR(t->buffer);
3298 return_error = return_error_param == -ESRCH ?
3299 BR_DEAD_REPLY : BR_FAILED_REPLY;
3300 return_error_line = __LINE__;
3301 t->buffer = NULL;
3302 goto err_binder_alloc_buf_failed;
3303 }
3304 if (secctx) {
3305 int err;
3306 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3307 ALIGN(tr->offsets_size, sizeof(void *)) +
3308 ALIGN(extra_buffers_size, sizeof(void *)) -
3309 ALIGN(secctx_sz, sizeof(u64));
3310
3311 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3312 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3313 t->buffer, buf_offset,
3314 secctx, secctx_sz);
3315 if (err) {
3316 t->security_ctx = 0;
3317 WARN_ON(1);
3318 }
3319 security_release_secctx(secctx, secctx_sz);
3320 secctx = NULL;
3321 }
3322 t->buffer->debug_id = t->debug_id;
3323 t->buffer->transaction = t;
3324 t->buffer->target_node = target_node;
3325 t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3326 trace_binder_transaction_alloc_buf(t->buffer);
3327
3328 if (binder_alloc_copy_user_to_buffer(
3329 &target_proc->alloc,
3330 t->buffer,
3331 ALIGN(tr->data_size, sizeof(void *)),
3332 (const void __user *)
3333 (uintptr_t)tr->data.ptr.offsets,
3334 tr->offsets_size)) {
3335 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3336 proc->pid, thread->pid);
3337 return_error = BR_FAILED_REPLY;
3338 return_error_param = -EFAULT;
3339 return_error_line = __LINE__;
3340 goto err_copy_data_failed;
3341 }
3342 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3343 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3344 proc->pid, thread->pid, (u64)tr->offsets_size);
3345 return_error = BR_FAILED_REPLY;
3346 return_error_param = -EINVAL;
3347 return_error_line = __LINE__;
3348 goto err_bad_offset;
3349 }
3350 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3351 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3352 proc->pid, thread->pid,
3353 (u64)extra_buffers_size);
3354 return_error = BR_FAILED_REPLY;
3355 return_error_param = -EINVAL;
3356 return_error_line = __LINE__;
3357 goto err_bad_offset;
3358 }
3359 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3360 buffer_offset = off_start_offset;
3361 off_end_offset = off_start_offset + tr->offsets_size;
3362 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3363 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3364 ALIGN(secctx_sz, sizeof(u64));
3365 off_min = 0;
3366 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3367 buffer_offset += sizeof(binder_size_t)) {
3368 struct binder_object_header *hdr;
3369 size_t object_size;
3370 struct binder_object object;
3371 binder_size_t object_offset;
3372 binder_size_t copy_size;
3373
3374 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3375 &object_offset,
3376 t->buffer,
3377 buffer_offset,
3378 sizeof(object_offset))) {
3379 binder_txn_error("%d:%d copy offset from buffer failed\n",
3380 thread->pid, proc->pid);
3381 return_error = BR_FAILED_REPLY;
3382 return_error_param = -EINVAL;
3383 return_error_line = __LINE__;
3384 goto err_bad_offset;
3385 }
3386
3387 /*
3388 * Copy the source user buffer up to the next object
3389 * that will be processed.
3390 */
3391 copy_size = object_offset - user_offset;
3392 if (copy_size && (user_offset > object_offset ||
3393 object_offset > tr->data_size ||
3394 binder_alloc_copy_user_to_buffer(
3395 &target_proc->alloc,
3396 t->buffer, user_offset,
3397 user_buffer + user_offset,
3398 copy_size))) {
3399 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3400 proc->pid, thread->pid);
3401 return_error = BR_FAILED_REPLY;
3402 return_error_param = -EFAULT;
3403 return_error_line = __LINE__;
3404 goto err_copy_data_failed;
3405 }
3406 object_size = binder_get_object(target_proc, user_buffer,
3407 t->buffer, object_offset, &object);
3408 if (object_size == 0 || object_offset < off_min) {
3409 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3410 proc->pid, thread->pid,
3411 (u64)object_offset,
3412 (u64)off_min,
3413 (u64)t->buffer->data_size);
3414 return_error = BR_FAILED_REPLY;
3415 return_error_param = -EINVAL;
3416 return_error_line = __LINE__;
3417 goto err_bad_offset;
3418 }
3419 /*
3420 * Set offset to the next buffer fragment to be
3421 * copied
3422 */
3423 user_offset = object_offset + object_size;
3424
3425 hdr = &object.hdr;
3426 off_min = object_offset + object_size;
3427 switch (hdr->type) {
3428 case BINDER_TYPE_BINDER:
3429 case BINDER_TYPE_WEAK_BINDER: {
3430 struct flat_binder_object *fp;
3431
3432 fp = to_flat_binder_object(hdr);
3433 ret = binder_translate_binder(fp, t, thread);
3434
3435 if (ret < 0 ||
3436 binder_alloc_copy_to_buffer(&target_proc->alloc,
3437 t->buffer,
3438 object_offset,
3439 fp, sizeof(*fp))) {
3440 binder_txn_error("%d:%d translate binder failed\n",
3441 thread->pid, proc->pid);
3442 return_error = BR_FAILED_REPLY;
3443 return_error_param = ret;
3444 return_error_line = __LINE__;
3445 goto err_translate_failed;
3446 }
3447 } break;
3448 case BINDER_TYPE_HANDLE:
3449 case BINDER_TYPE_WEAK_HANDLE: {
3450 struct flat_binder_object *fp;
3451
3452 fp = to_flat_binder_object(hdr);
3453 ret = binder_translate_handle(fp, t, thread);
3454 if (ret < 0 ||
3455 binder_alloc_copy_to_buffer(&target_proc->alloc,
3456 t->buffer,
3457 object_offset,
3458 fp, sizeof(*fp))) {
3459 binder_txn_error("%d:%d translate handle failed\n",
3460 thread->pid, proc->pid);
3461 return_error = BR_FAILED_REPLY;
3462 return_error_param = ret;
3463 return_error_line = __LINE__;
3464 goto err_translate_failed;
3465 }
3466 } break;
3467
3468 case BINDER_TYPE_FD: {
3469 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3470 binder_size_t fd_offset = object_offset +
3471 (uintptr_t)&fp->fd - (uintptr_t)fp;
3472 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3473 thread, in_reply_to);
3474
3475 fp->pad_binder = 0;
3476 if (ret < 0 ||
3477 binder_alloc_copy_to_buffer(&target_proc->alloc,
3478 t->buffer,
3479 object_offset,
3480 fp, sizeof(*fp))) {
3481 binder_txn_error("%d:%d translate fd failed\n",
3482 thread->pid, proc->pid);
3483 return_error = BR_FAILED_REPLY;
3484 return_error_param = ret;
3485 return_error_line = __LINE__;
3486 goto err_translate_failed;
3487 }
3488 } break;
3489 case BINDER_TYPE_FDA: {
3490 struct binder_object ptr_object;
3491 binder_size_t parent_offset;
3492 struct binder_object user_object;
3493 size_t user_parent_size;
3494 struct binder_fd_array_object *fda =
3495 to_binder_fd_array_object(hdr);
3496 size_t num_valid = (buffer_offset - off_start_offset) /
3497 sizeof(binder_size_t);
3498 struct binder_buffer_object *parent =
3499 binder_validate_ptr(target_proc, t->buffer,
3500 &ptr_object, fda->parent,
3501 off_start_offset,
3502 &parent_offset,
3503 num_valid);
3504 if (!parent) {
3505 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3506 proc->pid, thread->pid);
3507 return_error = BR_FAILED_REPLY;
3508 return_error_param = -EINVAL;
3509 return_error_line = __LINE__;
3510 goto err_bad_parent;
3511 }
3512 if (!binder_validate_fixup(target_proc, t->buffer,
3513 off_start_offset,
3514 parent_offset,
3515 fda->parent_offset,
3516 last_fixup_obj_off,
3517 last_fixup_min_off)) {
3518 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3519 proc->pid, thread->pid);
3520 return_error = BR_FAILED_REPLY;
3521 return_error_param = -EINVAL;
3522 return_error_line = __LINE__;
3523 goto err_bad_parent;
3524 }
3525 /*
3526 * We need to read the user version of the parent
3527 * object to get the original user offset
3528 */
3529 user_parent_size =
3530 binder_get_object(proc, user_buffer, t->buffer,
3531 parent_offset, &user_object);
3532 if (user_parent_size != sizeof(user_object.bbo)) {
3533 binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3534 proc->pid, thread->pid,
3535 user_parent_size,
3536 sizeof(user_object.bbo));
3537 return_error = BR_FAILED_REPLY;
3538 return_error_param = -EINVAL;
3539 return_error_line = __LINE__;
3540 goto err_bad_parent;
3541 }
3542 ret = binder_translate_fd_array(&pf_head, fda,
3543 user_buffer, parent,
3544 &user_object.bbo, t,
3545 thread, in_reply_to);
3546 if (!ret)
3547 ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3548 t->buffer,
3549 object_offset,
3550 fda, sizeof(*fda));
3551 if (ret) {
3552 binder_txn_error("%d:%d translate fd array failed\n",
3553 thread->pid, proc->pid);
3554 return_error = BR_FAILED_REPLY;
3555 return_error_param = ret > 0 ? -EINVAL : ret;
3556 return_error_line = __LINE__;
3557 goto err_translate_failed;
3558 }
3559 last_fixup_obj_off = parent_offset;
3560 last_fixup_min_off =
3561 fda->parent_offset + sizeof(u32) * fda->num_fds;
3562 } break;
3563 case BINDER_TYPE_PTR: {
3564 struct binder_buffer_object *bp =
3565 to_binder_buffer_object(hdr);
3566 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3567 size_t num_valid;
3568
3569 if (bp->length > buf_left) {
3570 binder_user_error("%d:%d got transaction with too large buffer\n",
3571 proc->pid, thread->pid);
3572 return_error = BR_FAILED_REPLY;
3573 return_error_param = -EINVAL;
3574 return_error_line = __LINE__;
3575 goto err_bad_offset;
3576 }
3577 ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3578 (const void __user *)(uintptr_t)bp->buffer,
3579 bp->length);
3580 if (ret) {
3581 binder_txn_error("%d:%d deferred copy failed\n",
3582 thread->pid, proc->pid);
3583 return_error = BR_FAILED_REPLY;
3584 return_error_param = ret;
3585 return_error_line = __LINE__;
3586 goto err_translate_failed;
3587 }
3588 /* Fixup buffer pointer to target proc address space */
3589 bp->buffer = (uintptr_t)
3590 t->buffer->user_data + sg_buf_offset;
3591 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3592
3593 num_valid = (buffer_offset - off_start_offset) /
3594 sizeof(binder_size_t);
3595 ret = binder_fixup_parent(&pf_head, t,
3596 thread, bp,
3597 off_start_offset,
3598 num_valid,
3599 last_fixup_obj_off,
3600 last_fixup_min_off);
3601 if (ret < 0 ||
3602 binder_alloc_copy_to_buffer(&target_proc->alloc,
3603 t->buffer,
3604 object_offset,
3605 bp, sizeof(*bp))) {
3606 binder_txn_error("%d:%d failed to fixup parent\n",
3607 thread->pid, proc->pid);
3608 return_error = BR_FAILED_REPLY;
3609 return_error_param = ret;
3610 return_error_line = __LINE__;
3611 goto err_translate_failed;
3612 }
3613 last_fixup_obj_off = object_offset;
3614 last_fixup_min_off = 0;
3615 } break;
3616 default:
3617 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3618 proc->pid, thread->pid, hdr->type);
3619 return_error = BR_FAILED_REPLY;
3620 return_error_param = -EINVAL;
3621 return_error_line = __LINE__;
3622 goto err_bad_object_type;
3623 }
3624 }
3625 /* Done processing objects, copy the rest of the buffer */
3626 if (binder_alloc_copy_user_to_buffer(
3627 &target_proc->alloc,
3628 t->buffer, user_offset,
3629 user_buffer + user_offset,
3630 tr->data_size - user_offset)) {
3631 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3632 proc->pid, thread->pid);
3633 return_error = BR_FAILED_REPLY;
3634 return_error_param = -EFAULT;
3635 return_error_line = __LINE__;
3636 goto err_copy_data_failed;
3637 }
3638
3639 ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3640 &sgc_head, &pf_head);
3641 if (ret) {
3642 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3643 proc->pid, thread->pid);
3644 return_error = BR_FAILED_REPLY;
3645 return_error_param = ret;
3646 return_error_line = __LINE__;
3647 goto err_copy_data_failed;
3648 }
3649 if (t->buffer->oneway_spam_suspect)
3650 tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3651 else
3652 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3653 t->work.type = BINDER_WORK_TRANSACTION;
3654
3655 if (reply) {
3656 binder_enqueue_thread_work(thread, tcomplete);
3657 binder_inner_proc_lock(target_proc);
3658 if (target_thread->is_dead) {
3659 return_error = BR_DEAD_REPLY;
3660 binder_inner_proc_unlock(target_proc);
3661 goto err_dead_proc_or_thread;
3662 }
3663 BUG_ON(t->buffer->async_transaction != 0);
3664 #ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF
3665 t->timestamp = in_reply_to->timestamp;
3666 #endif
3667 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3668 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3669 target_proc->outstanding_txns++;
3670 binder_inner_proc_unlock(target_proc);
3671 wake_up_interruptible_sync(&target_thread->wait);
3672 binder_free_transaction(in_reply_to);
3673 } else if (!(t->flags & TF_ONE_WAY)) {
3674 BUG_ON(t->buffer->async_transaction != 0);
3675 binder_inner_proc_lock(proc);
3676 /*
3677 * Defer the TRANSACTION_COMPLETE, so we don't return to
3678 * userspace immediately; this allows the target process to
3679 * immediately start processing this transaction, reducing
3680 * latency. We will then return the TRANSACTION_COMPLETE when
3681 * the target replies (or there is an error).
3682 */
3683 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3684 t->need_reply = 1;
3685 t->from_parent = thread->transaction_stack;
3686 thread->transaction_stack = t;
3687 #ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF
3688 t->timestamp = binder_clock();
3689 #endif
3690 binder_inner_proc_unlock(proc);
3691 return_error = binder_proc_transaction(t,
3692 target_proc, target_thread);
3693 if (return_error) {
3694 binder_inner_proc_lock(proc);
3695 binder_pop_transaction_ilocked(thread, t);
3696 binder_inner_proc_unlock(proc);
3697 goto err_dead_proc_or_thread;
3698 }
3699 } else {
3700 BUG_ON(target_node == NULL);
3701 BUG_ON(t->buffer->async_transaction != 1);
3702 #ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF
3703 t->timestamp = binder_clock();
3704 #endif
3705 return_error = binder_proc_transaction(t, target_proc, NULL);
3706 /*
3707 * Let the caller know when async transaction reaches a frozen
3708 * process and is put in a pending queue, waiting for the target
3709 * process to be unfrozen.
3710 */
3711 if (return_error == BR_TRANSACTION_PENDING_FROZEN)
3712 tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
3713 binder_enqueue_thread_work(thread, tcomplete);
3714 if (return_error &&
3715 return_error != BR_TRANSACTION_PENDING_FROZEN)
3716 goto err_dead_proc_or_thread;
3717 }
3718 if (target_thread)
3719 binder_thread_dec_tmpref(target_thread);
3720 binder_proc_dec_tmpref(target_proc);
3721 if (target_node)
3722 binder_dec_node_tmpref(target_node);
3723 /*
3724 * write barrier to synchronize with initialization
3725 * of log entry
3726 */
3727 smp_wmb();
3728 WRITE_ONCE(e->debug_id_done, t_debug_id);
3729 return;
3730
3731 err_dead_proc_or_thread:
3732 binder_txn_error("%d:%d dead process or thread\n",
3733 thread->pid, proc->pid);
3734 return_error_line = __LINE__;
3735 binder_dequeue_work(proc, tcomplete);
3736 err_translate_failed:
3737 err_bad_object_type:
3738 err_bad_offset:
3739 err_bad_parent:
3740 err_copy_data_failed:
3741 binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3742 binder_free_txn_fixups(t);
3743 trace_binder_transaction_failed_buffer_release(t->buffer);
3744 binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3745 buffer_offset, true);
3746 if (target_node)
3747 binder_dec_node_tmpref(target_node);
3748 target_node = NULL;
3749 t->buffer->transaction = NULL;
3750 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3751 err_binder_alloc_buf_failed:
3752 err_bad_extra_size:
3753 if (secctx)
3754 security_release_secctx(secctx, secctx_sz);
3755 err_get_secctx_failed:
3756 kfree(tcomplete);
3757 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3758 err_alloc_tcomplete_failed:
3759 if (trace_binder_txn_latency_free_enabled())
3760 binder_txn_latency_free(t);
3761 kfree(t);
3762 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3763 err_alloc_t_failed:
3764 err_bad_todo_list:
3765 err_bad_call_stack:
3766 err_empty_call_stack:
3767 err_dead_binder:
3768 err_invalid_target_handle:
3769 if (target_node) {
3770 binder_dec_node(target_node, 1, 0);
3771 binder_dec_node_tmpref(target_node);
3772 }
3773
3774 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3775 "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n",
3776 proc->pid, thread->pid, reply ? "reply" :
3777 (tr->flags & TF_ONE_WAY ? "async" : "call"),
3778 target_proc ? target_proc->pid : 0,
3779 target_thread ? target_thread->pid : 0,
3780 t_debug_id, return_error, return_error_param,
3781 (u64)tr->data_size, (u64)tr->offsets_size,
3782 return_error_line);
3783
3784 if (target_thread)
3785 binder_thread_dec_tmpref(target_thread);
3786 if (target_proc)
3787 binder_proc_dec_tmpref(target_proc);
3788
3789 {
3790 struct binder_transaction_log_entry *fe;
3791
3792 e->return_error = return_error;
3793 e->return_error_param = return_error_param;
3794 e->return_error_line = return_error_line;
3795 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3796 *fe = *e;
3797 /*
3798 * write barrier to synchronize with initialization
3799 * of log entry
3800 */
3801 smp_wmb();
3802 WRITE_ONCE(e->debug_id_done, t_debug_id);
3803 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3804 }
3805
3806 BUG_ON(thread->return_error.cmd != BR_OK);
3807 if (in_reply_to) {
3808 binder_set_txn_from_error(in_reply_to, t_debug_id,
3809 return_error, return_error_param);
3810 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3811 binder_enqueue_thread_work(thread, &thread->return_error.work);
3812 binder_send_failed_reply(in_reply_to, return_error);
3813 } else {
3814 binder_inner_proc_lock(proc);
3815 binder_set_extended_error(&thread->ee, t_debug_id,
3816 return_error, return_error_param);
3817 binder_inner_proc_unlock(proc);
3818 thread->return_error.cmd = return_error;
3819 binder_enqueue_thread_work(thread, &thread->return_error.work);
3820 }
3821 }
3822
3823 /**
3824 * binder_free_buf() - free the specified buffer
3825 * @proc: binder proc that owns buffer
3826 * @buffer: buffer to be freed
3827 * @is_failure: failed to send transaction
3828 *
3829 * If buffer for an async transaction, enqueue the next async
3830 * transaction from the node.
3831 *
3832 * Cleanup buffer and free it.
3833 */
3834 static void
binder_free_buf(struct binder_proc * proc,struct binder_thread * thread,struct binder_buffer * buffer,bool is_failure)3835 binder_free_buf(struct binder_proc *proc,
3836 struct binder_thread *thread,
3837 struct binder_buffer *buffer, bool is_failure)
3838 {
3839 binder_inner_proc_lock(proc);
3840 if (buffer->transaction) {
3841 buffer->transaction->buffer = NULL;
3842 buffer->transaction = NULL;
3843 }
3844 binder_inner_proc_unlock(proc);
3845 if (buffer->async_transaction && buffer->target_node) {
3846 struct binder_node *buf_node;
3847 struct binder_work *w;
3848
3849 buf_node = buffer->target_node;
3850 binder_node_inner_lock(buf_node);
3851 BUG_ON(!buf_node->has_async_transaction);
3852 BUG_ON(buf_node->proc != proc);
3853 w = binder_dequeue_work_head_ilocked(
3854 &buf_node->async_todo);
3855 if (!w) {
3856 buf_node->has_async_transaction = false;
3857 } else {
3858 binder_enqueue_work_ilocked(
3859 w, &proc->todo);
3860 binder_wakeup_proc_ilocked(proc);
3861 }
3862 binder_node_inner_unlock(buf_node);
3863 }
3864 trace_binder_transaction_buffer_release(buffer);
3865 binder_release_entire_buffer(proc, thread, buffer, is_failure);
3866 binder_alloc_free_buf(&proc->alloc, buffer);
3867 }
3868
binder_thread_write(struct binder_proc * proc,struct binder_thread * thread,binder_uintptr_t binder_buffer,size_t size,binder_size_t * consumed)3869 static int binder_thread_write(struct binder_proc *proc,
3870 struct binder_thread *thread,
3871 binder_uintptr_t binder_buffer, size_t size,
3872 binder_size_t *consumed)
3873 {
3874 uint32_t cmd;
3875 struct binder_context *context = proc->context;
3876 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3877 void __user *ptr = buffer + *consumed;
3878 void __user *end = buffer + size;
3879
3880 while (ptr < end && thread->return_error.cmd == BR_OK) {
3881 int ret;
3882
3883 if (get_user(cmd, (uint32_t __user *)ptr))
3884 return -EFAULT;
3885 ptr += sizeof(uint32_t);
3886 trace_binder_command(cmd);
3887 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3888 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3889 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3890 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3891 }
3892 switch (cmd) {
3893 case BC_INCREFS:
3894 case BC_ACQUIRE:
3895 case BC_RELEASE:
3896 case BC_DECREFS: {
3897 uint32_t target;
3898 const char *debug_string;
3899 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3900 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3901 struct binder_ref_data rdata;
3902
3903 if (get_user(target, (uint32_t __user *)ptr))
3904 return -EFAULT;
3905
3906 ptr += sizeof(uint32_t);
3907 ret = -1;
3908 if (increment && !target) {
3909 struct binder_node *ctx_mgr_node;
3910
3911 mutex_lock(&context->context_mgr_node_lock);
3912 ctx_mgr_node = context->binder_context_mgr_node;
3913 if (ctx_mgr_node) {
3914 if (ctx_mgr_node->proc == proc) {
3915 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3916 proc->pid, thread->pid);
3917 mutex_unlock(&context->context_mgr_node_lock);
3918 return -EINVAL;
3919 }
3920 ret = binder_inc_ref_for_node(
3921 proc, ctx_mgr_node,
3922 strong, NULL, &rdata);
3923 }
3924 mutex_unlock(&context->context_mgr_node_lock);
3925 }
3926 if (ret)
3927 ret = binder_update_ref_for_handle(
3928 proc, target, increment, strong,
3929 &rdata);
3930 if (!ret && rdata.desc != target) {
3931 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3932 proc->pid, thread->pid,
3933 target, rdata.desc);
3934 }
3935 switch (cmd) {
3936 case BC_INCREFS:
3937 debug_string = "IncRefs";
3938 break;
3939 case BC_ACQUIRE:
3940 debug_string = "Acquire";
3941 break;
3942 case BC_RELEASE:
3943 debug_string = "Release";
3944 break;
3945 case BC_DECREFS:
3946 default:
3947 debug_string = "DecRefs";
3948 break;
3949 }
3950 if (ret) {
3951 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3952 proc->pid, thread->pid, debug_string,
3953 strong, target, ret);
3954 break;
3955 }
3956 binder_debug(BINDER_DEBUG_USER_REFS,
3957 "%d:%d %s ref %d desc %d s %d w %d\n",
3958 proc->pid, thread->pid, debug_string,
3959 rdata.debug_id, rdata.desc, rdata.strong,
3960 rdata.weak);
3961 break;
3962 }
3963 case BC_INCREFS_DONE:
3964 case BC_ACQUIRE_DONE: {
3965 binder_uintptr_t node_ptr;
3966 binder_uintptr_t cookie;
3967 struct binder_node *node;
3968 bool free_node;
3969
3970 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3971 return -EFAULT;
3972 ptr += sizeof(binder_uintptr_t);
3973 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3974 return -EFAULT;
3975 ptr += sizeof(binder_uintptr_t);
3976 node = binder_get_node(proc, node_ptr);
3977 if (node == NULL) {
3978 binder_user_error("%d:%d %s u%016llx no match\n",
3979 proc->pid, thread->pid,
3980 cmd == BC_INCREFS_DONE ?
3981 "BC_INCREFS_DONE" :
3982 "BC_ACQUIRE_DONE",
3983 (u64)node_ptr);
3984 break;
3985 }
3986 if (cookie != node->cookie) {
3987 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3988 proc->pid, thread->pid,
3989 cmd == BC_INCREFS_DONE ?
3990 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3991 (u64)node_ptr, node->debug_id,
3992 (u64)cookie, (u64)node->cookie);
3993 binder_put_node(node);
3994 break;
3995 }
3996 binder_node_inner_lock(node);
3997 if (cmd == BC_ACQUIRE_DONE) {
3998 if (node->pending_strong_ref == 0) {
3999 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
4000 proc->pid, thread->pid,
4001 node->debug_id);
4002 binder_node_inner_unlock(node);
4003 binder_put_node(node);
4004 break;
4005 }
4006 node->pending_strong_ref = 0;
4007 } else {
4008 if (node->pending_weak_ref == 0) {
4009 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
4010 proc->pid, thread->pid,
4011 node->debug_id);
4012 binder_node_inner_unlock(node);
4013 binder_put_node(node);
4014 break;
4015 }
4016 node->pending_weak_ref = 0;
4017 }
4018 free_node = binder_dec_node_nilocked(node,
4019 cmd == BC_ACQUIRE_DONE, 0);
4020 WARN_ON(free_node);
4021 binder_debug(BINDER_DEBUG_USER_REFS,
4022 "%d:%d %s node %d ls %d lw %d tr %d\n",
4023 proc->pid, thread->pid,
4024 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4025 node->debug_id, node->local_strong_refs,
4026 node->local_weak_refs, node->tmp_refs);
4027 binder_node_inner_unlock(node);
4028 binder_put_node(node);
4029 break;
4030 }
4031 case BC_ATTEMPT_ACQUIRE:
4032 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
4033 return -EINVAL;
4034 case BC_ACQUIRE_RESULT:
4035 pr_err("BC_ACQUIRE_RESULT not supported\n");
4036 return -EINVAL;
4037
4038 case BC_FREE_BUFFER: {
4039 binder_uintptr_t data_ptr;
4040 struct binder_buffer *buffer;
4041
4042 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
4043 return -EFAULT;
4044 ptr += sizeof(binder_uintptr_t);
4045
4046 buffer = binder_alloc_prepare_to_free(&proc->alloc,
4047 data_ptr);
4048 if (IS_ERR_OR_NULL(buffer)) {
4049 if (PTR_ERR(buffer) == -EPERM) {
4050 binder_user_error(
4051 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
4052 proc->pid, thread->pid,
4053 (u64)data_ptr);
4054 } else {
4055 binder_user_error(
4056 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
4057 proc->pid, thread->pid,
4058 (u64)data_ptr);
4059 }
4060 break;
4061 }
4062 binder_debug(BINDER_DEBUG_FREE_BUFFER,
4063 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
4064 proc->pid, thread->pid, (u64)data_ptr,
4065 buffer->debug_id,
4066 buffer->transaction ? "active" : "finished");
4067 binder_free_buf(proc, thread, buffer, false);
4068 break;
4069 }
4070
4071 case BC_TRANSACTION_SG:
4072 case BC_REPLY_SG: {
4073 struct binder_transaction_data_sg tr;
4074
4075 if (copy_from_user(&tr, ptr, sizeof(tr)))
4076 return -EFAULT;
4077 ptr += sizeof(tr);
4078 binder_transaction(proc, thread, &tr.transaction_data,
4079 cmd == BC_REPLY_SG, tr.buffers_size);
4080 break;
4081 }
4082 case BC_TRANSACTION:
4083 case BC_REPLY: {
4084 struct binder_transaction_data tr;
4085
4086 if (copy_from_user(&tr, ptr, sizeof(tr)))
4087 return -EFAULT;
4088 ptr += sizeof(tr);
4089 binder_transaction(proc, thread, &tr,
4090 cmd == BC_REPLY, 0);
4091 break;
4092 }
4093
4094 case BC_REGISTER_LOOPER:
4095 binder_debug(BINDER_DEBUG_THREADS,
4096 "%d:%d BC_REGISTER_LOOPER\n",
4097 proc->pid, thread->pid);
4098 binder_inner_proc_lock(proc);
4099 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
4100 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4101 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
4102 proc->pid, thread->pid);
4103 } else if (proc->requested_threads == 0) {
4104 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4105 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4106 proc->pid, thread->pid);
4107 } else {
4108 proc->requested_threads--;
4109 proc->requested_threads_started++;
4110 }
4111 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
4112 binder_inner_proc_unlock(proc);
4113 break;
4114 case BC_ENTER_LOOPER:
4115 binder_debug(BINDER_DEBUG_THREADS,
4116 "%d:%d BC_ENTER_LOOPER\n",
4117 proc->pid, thread->pid);
4118 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4119 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4120 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4121 proc->pid, thread->pid);
4122 }
4123 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4124 break;
4125 case BC_EXIT_LOOPER:
4126 binder_debug(BINDER_DEBUG_THREADS,
4127 "%d:%d BC_EXIT_LOOPER\n",
4128 proc->pid, thread->pid);
4129 thread->looper |= BINDER_LOOPER_STATE_EXITED;
4130 break;
4131
4132 case BC_REQUEST_DEATH_NOTIFICATION:
4133 case BC_CLEAR_DEATH_NOTIFICATION: {
4134 uint32_t target;
4135 binder_uintptr_t cookie;
4136 struct binder_ref *ref;
4137 struct binder_ref_death *death = NULL;
4138
4139 if (get_user(target, (uint32_t __user *)ptr))
4140 return -EFAULT;
4141 ptr += sizeof(uint32_t);
4142 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4143 return -EFAULT;
4144 ptr += sizeof(binder_uintptr_t);
4145 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4146 /*
4147 * Allocate memory for death notification
4148 * before taking lock
4149 */
4150 death = kzalloc(sizeof(*death), GFP_KERNEL);
4151 if (death == NULL) {
4152 WARN_ON(thread->return_error.cmd !=
4153 BR_OK);
4154 thread->return_error.cmd = BR_ERROR;
4155 binder_enqueue_thread_work(
4156 thread,
4157 &thread->return_error.work);
4158 binder_debug(
4159 BINDER_DEBUG_FAILED_TRANSACTION,
4160 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4161 proc->pid, thread->pid);
4162 break;
4163 }
4164 }
4165 binder_proc_lock(proc);
4166 ref = binder_get_ref_olocked(proc, target, false);
4167 if (ref == NULL) {
4168 binder_user_error("%d:%d %s invalid ref %d\n",
4169 proc->pid, thread->pid,
4170 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4171 "BC_REQUEST_DEATH_NOTIFICATION" :
4172 "BC_CLEAR_DEATH_NOTIFICATION",
4173 target);
4174 binder_proc_unlock(proc);
4175 kfree(death);
4176 break;
4177 }
4178
4179 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4180 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4181 proc->pid, thread->pid,
4182 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4183 "BC_REQUEST_DEATH_NOTIFICATION" :
4184 "BC_CLEAR_DEATH_NOTIFICATION",
4185 (u64)cookie, ref->data.debug_id,
4186 ref->data.desc, ref->data.strong,
4187 ref->data.weak, ref->node->debug_id);
4188
4189 binder_node_lock(ref->node);
4190 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4191 if (ref->death) {
4192 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4193 proc->pid, thread->pid);
4194 binder_node_unlock(ref->node);
4195 binder_proc_unlock(proc);
4196 kfree(death);
4197 break;
4198 }
4199 binder_stats_created(BINDER_STAT_DEATH);
4200 INIT_LIST_HEAD(&death->work.entry);
4201 death->cookie = cookie;
4202 ref->death = death;
4203 if (ref->node->proc == NULL) {
4204 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4205
4206 binder_inner_proc_lock(proc);
4207 binder_enqueue_work_ilocked(
4208 &ref->death->work, &proc->todo);
4209 binder_wakeup_proc_ilocked(proc);
4210 binder_inner_proc_unlock(proc);
4211 }
4212 } else {
4213 if (ref->death == NULL) {
4214 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4215 proc->pid, thread->pid);
4216 binder_node_unlock(ref->node);
4217 binder_proc_unlock(proc);
4218 break;
4219 }
4220 death = ref->death;
4221 if (death->cookie != cookie) {
4222 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4223 proc->pid, thread->pid,
4224 (u64)death->cookie,
4225 (u64)cookie);
4226 binder_node_unlock(ref->node);
4227 binder_proc_unlock(proc);
4228 break;
4229 }
4230 ref->death = NULL;
4231 binder_inner_proc_lock(proc);
4232 if (list_empty(&death->work.entry)) {
4233 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4234 if (thread->looper &
4235 (BINDER_LOOPER_STATE_REGISTERED |
4236 BINDER_LOOPER_STATE_ENTERED))
4237 binder_enqueue_thread_work_ilocked(
4238 thread,
4239 &death->work);
4240 else {
4241 binder_enqueue_work_ilocked(
4242 &death->work,
4243 &proc->todo);
4244 binder_wakeup_proc_ilocked(
4245 proc);
4246 }
4247 } else {
4248 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4249 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4250 }
4251 binder_inner_proc_unlock(proc);
4252 }
4253 binder_node_unlock(ref->node);
4254 binder_proc_unlock(proc);
4255 } break;
4256 case BC_DEAD_BINDER_DONE: {
4257 struct binder_work *w;
4258 binder_uintptr_t cookie;
4259 struct binder_ref_death *death = NULL;
4260
4261 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4262 return -EFAULT;
4263
4264 ptr += sizeof(cookie);
4265 binder_inner_proc_lock(proc);
4266 list_for_each_entry(w, &proc->delivered_death,
4267 entry) {
4268 struct binder_ref_death *tmp_death =
4269 container_of(w,
4270 struct binder_ref_death,
4271 work);
4272
4273 if (tmp_death->cookie == cookie) {
4274 death = tmp_death;
4275 break;
4276 }
4277 }
4278 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4279 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4280 proc->pid, thread->pid, (u64)cookie,
4281 death);
4282 if (death == NULL) {
4283 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4284 proc->pid, thread->pid, (u64)cookie);
4285 binder_inner_proc_unlock(proc);
4286 break;
4287 }
4288 binder_dequeue_work_ilocked(&death->work);
4289 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4290 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4291 if (thread->looper &
4292 (BINDER_LOOPER_STATE_REGISTERED |
4293 BINDER_LOOPER_STATE_ENTERED))
4294 binder_enqueue_thread_work_ilocked(
4295 thread, &death->work);
4296 else {
4297 binder_enqueue_work_ilocked(
4298 &death->work,
4299 &proc->todo);
4300 binder_wakeup_proc_ilocked(proc);
4301 }
4302 }
4303 binder_inner_proc_unlock(proc);
4304 } break;
4305
4306 default:
4307 pr_err("%d:%d unknown command %u\n",
4308 proc->pid, thread->pid, cmd);
4309 return -EINVAL;
4310 }
4311 *consumed = ptr - buffer;
4312 }
4313 return 0;
4314 }
4315
binder_stat_br(struct binder_proc * proc,struct binder_thread * thread,uint32_t cmd)4316 static void binder_stat_br(struct binder_proc *proc,
4317 struct binder_thread *thread, uint32_t cmd)
4318 {
4319 trace_binder_return(cmd);
4320 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4321 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4322 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4323 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4324 }
4325 }
4326
binder_put_node_cmd(struct binder_proc * proc,struct binder_thread * thread,void __user ** ptrp,binder_uintptr_t node_ptr,binder_uintptr_t node_cookie,int node_debug_id,uint32_t cmd,const char * cmd_name)4327 static int binder_put_node_cmd(struct binder_proc *proc,
4328 struct binder_thread *thread,
4329 void __user **ptrp,
4330 binder_uintptr_t node_ptr,
4331 binder_uintptr_t node_cookie,
4332 int node_debug_id,
4333 uint32_t cmd, const char *cmd_name)
4334 {
4335 void __user *ptr = *ptrp;
4336
4337 if (put_user(cmd, (uint32_t __user *)ptr))
4338 return -EFAULT;
4339 ptr += sizeof(uint32_t);
4340
4341 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4342 return -EFAULT;
4343 ptr += sizeof(binder_uintptr_t);
4344
4345 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4346 return -EFAULT;
4347 ptr += sizeof(binder_uintptr_t);
4348
4349 binder_stat_br(proc, thread, cmd);
4350 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4351 proc->pid, thread->pid, cmd_name, node_debug_id,
4352 (u64)node_ptr, (u64)node_cookie);
4353
4354 *ptrp = ptr;
4355 return 0;
4356 }
4357
binder_wait_for_work(struct binder_thread * thread,bool do_proc_work)4358 static int binder_wait_for_work(struct binder_thread *thread,
4359 bool do_proc_work)
4360 {
4361 DEFINE_WAIT(wait);
4362 struct binder_proc *proc = thread->proc;
4363 int ret = 0;
4364
4365 binder_inner_proc_lock(proc);
4366 for (;;) {
4367 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE);
4368 if (binder_has_work_ilocked(thread, do_proc_work))
4369 break;
4370 if (do_proc_work)
4371 list_add(&thread->waiting_thread_node,
4372 &proc->waiting_threads);
4373 binder_inner_proc_unlock(proc);
4374 schedule();
4375 binder_inner_proc_lock(proc);
4376 list_del_init(&thread->waiting_thread_node);
4377 if (signal_pending(current)) {
4378 ret = -EINTR;
4379 break;
4380 }
4381 }
4382 finish_wait(&thread->wait, &wait);
4383 binder_inner_proc_unlock(proc);
4384
4385 return ret;
4386 }
4387
4388 /**
4389 * binder_apply_fd_fixups() - finish fd translation
4390 * @proc: binder_proc associated @t->buffer
4391 * @t: binder transaction with list of fd fixups
4392 *
4393 * Now that we are in the context of the transaction target
4394 * process, we can allocate and install fds. Process the
4395 * list of fds to translate and fixup the buffer with the
4396 * new fds first and only then install the files.
4397 *
4398 * If we fail to allocate an fd, skip the install and release
4399 * any fds that have already been allocated.
4400 */
binder_apply_fd_fixups(struct binder_proc * proc,struct binder_transaction * t)4401 static int binder_apply_fd_fixups(struct binder_proc *proc,
4402 struct binder_transaction *t)
4403 {
4404 struct binder_txn_fd_fixup *fixup, *tmp;
4405 int ret = 0;
4406
4407 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4408 int fd = get_unused_fd_flags(O_CLOEXEC);
4409
4410 if (fd < 0) {
4411 binder_debug(BINDER_DEBUG_TRANSACTION,
4412 "failed fd fixup txn %d fd %d\n",
4413 t->debug_id, fd);
4414 ret = -ENOMEM;
4415 goto err;
4416 }
4417 binder_debug(BINDER_DEBUG_TRANSACTION,
4418 "fd fixup txn %d fd %d\n",
4419 t->debug_id, fd);
4420 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4421 fixup->target_fd = fd;
4422 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4423 fixup->offset, &fd,
4424 sizeof(u32))) {
4425 ret = -EINVAL;
4426 goto err;
4427 }
4428 }
4429 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4430 fd_install(fixup->target_fd, fixup->file);
4431 list_del(&fixup->fixup_entry);
4432 kfree(fixup);
4433 }
4434
4435 return ret;
4436
4437 err:
4438 binder_free_txn_fixups(t);
4439 return ret;
4440 }
4441
binder_thread_read(struct binder_proc * proc,struct binder_thread * thread,binder_uintptr_t binder_buffer,size_t size,binder_size_t * consumed,int non_block)4442 static int binder_thread_read(struct binder_proc *proc,
4443 struct binder_thread *thread,
4444 binder_uintptr_t binder_buffer, size_t size,
4445 binder_size_t *consumed, int non_block)
4446 {
4447 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4448 void __user *ptr = buffer + *consumed;
4449 void __user *end = buffer + size;
4450
4451 int ret = 0;
4452 int wait_for_proc_work;
4453
4454 if (*consumed == 0) {
4455 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4456 return -EFAULT;
4457 ptr += sizeof(uint32_t);
4458 }
4459
4460 retry:
4461 binder_inner_proc_lock(proc);
4462 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4463 binder_inner_proc_unlock(proc);
4464
4465 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4466
4467 trace_binder_wait_for_work(wait_for_proc_work,
4468 !!thread->transaction_stack,
4469 !binder_worklist_empty(proc, &thread->todo));
4470 if (wait_for_proc_work) {
4471 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4472 BINDER_LOOPER_STATE_ENTERED))) {
4473 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4474 proc->pid, thread->pid, thread->looper);
4475 wait_event_interruptible(binder_user_error_wait,
4476 binder_stop_on_user_error < 2);
4477 }
4478 binder_set_nice(proc->default_priority);
4479 }
4480
4481 if (non_block) {
4482 if (!binder_has_work(thread, wait_for_proc_work))
4483 ret = -EAGAIN;
4484 } else {
4485 ret = binder_wait_for_work(thread, wait_for_proc_work);
4486 }
4487
4488 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4489
4490 if (ret)
4491 return ret;
4492
4493 while (1) {
4494 uint32_t cmd;
4495 struct binder_transaction_data_secctx tr;
4496 struct binder_transaction_data *trd = &tr.transaction_data;
4497 struct binder_work *w = NULL;
4498 struct list_head *list = NULL;
4499 struct binder_transaction *t = NULL;
4500 struct binder_thread *t_from;
4501 size_t trsize = sizeof(*trd);
4502
4503 binder_inner_proc_lock(proc);
4504 if (!binder_worklist_empty_ilocked(&thread->todo))
4505 list = &thread->todo;
4506 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4507 wait_for_proc_work)
4508 list = &proc->todo;
4509 else {
4510 binder_inner_proc_unlock(proc);
4511
4512 /* no data added */
4513 if (ptr - buffer == 4 && !thread->looper_need_return)
4514 goto retry;
4515 break;
4516 }
4517
4518 if (end - ptr < sizeof(tr) + 4) {
4519 binder_inner_proc_unlock(proc);
4520 break;
4521 }
4522 w = binder_dequeue_work_head_ilocked(list);
4523 if (binder_worklist_empty_ilocked(&thread->todo))
4524 thread->process_todo = false;
4525
4526 switch (w->type) {
4527 case BINDER_WORK_TRANSACTION: {
4528 binder_inner_proc_unlock(proc);
4529 t = container_of(w, struct binder_transaction, work);
4530 } break;
4531 case BINDER_WORK_RETURN_ERROR: {
4532 struct binder_error *e = container_of(
4533 w, struct binder_error, work);
4534
4535 WARN_ON(e->cmd == BR_OK);
4536 binder_inner_proc_unlock(proc);
4537 if (put_user(e->cmd, (uint32_t __user *)ptr))
4538 return -EFAULT;
4539 cmd = e->cmd;
4540 e->cmd = BR_OK;
4541 ptr += sizeof(uint32_t);
4542
4543 binder_stat_br(proc, thread, cmd);
4544 } break;
4545 case BINDER_WORK_TRANSACTION_COMPLETE:
4546 case BINDER_WORK_TRANSACTION_PENDING:
4547 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4548 if (proc->oneway_spam_detection_enabled &&
4549 w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4550 cmd = BR_ONEWAY_SPAM_SUSPECT;
4551 else if (w->type == BINDER_WORK_TRANSACTION_PENDING)
4552 cmd = BR_TRANSACTION_PENDING_FROZEN;
4553 else
4554 cmd = BR_TRANSACTION_COMPLETE;
4555 binder_inner_proc_unlock(proc);
4556 kfree(w);
4557 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4558 if (put_user(cmd, (uint32_t __user *)ptr))
4559 return -EFAULT;
4560 ptr += sizeof(uint32_t);
4561
4562 binder_stat_br(proc, thread, cmd);
4563 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4564 "%d:%d BR_TRANSACTION_COMPLETE\n",
4565 proc->pid, thread->pid);
4566 } break;
4567 case BINDER_WORK_NODE: {
4568 struct binder_node *node = container_of(w, struct binder_node, work);
4569 int strong, weak;
4570 binder_uintptr_t node_ptr = node->ptr;
4571 binder_uintptr_t node_cookie = node->cookie;
4572 int node_debug_id = node->debug_id;
4573 int has_weak_ref;
4574 int has_strong_ref;
4575 void __user *orig_ptr = ptr;
4576
4577 BUG_ON(proc != node->proc);
4578 strong = node->internal_strong_refs ||
4579 node->local_strong_refs;
4580 weak = !hlist_empty(&node->refs) ||
4581 node->local_weak_refs ||
4582 node->tmp_refs || strong;
4583 has_strong_ref = node->has_strong_ref;
4584 has_weak_ref = node->has_weak_ref;
4585
4586 if (weak && !has_weak_ref) {
4587 node->has_weak_ref = 1;
4588 node->pending_weak_ref = 1;
4589 node->local_weak_refs++;
4590 }
4591 if (strong && !has_strong_ref) {
4592 node->has_strong_ref = 1;
4593 node->pending_strong_ref = 1;
4594 node->local_strong_refs++;
4595 }
4596 if (!strong && has_strong_ref)
4597 node->has_strong_ref = 0;
4598 if (!weak && has_weak_ref)
4599 node->has_weak_ref = 0;
4600 if (!weak && !strong) {
4601 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4602 "%d:%d node %d u%016llx c%016llx deleted\n",
4603 proc->pid, thread->pid,
4604 node_debug_id,
4605 (u64)node_ptr,
4606 (u64)node_cookie);
4607 rb_erase(&node->rb_node, &proc->nodes);
4608 binder_inner_proc_unlock(proc);
4609 binder_node_lock(node);
4610 /*
4611 * Acquire the node lock before freeing the
4612 * node to serialize with other threads that
4613 * may have been holding the node lock while
4614 * decrementing this node (avoids race where
4615 * this thread frees while the other thread
4616 * is unlocking the node after the final
4617 * decrement)
4618 */
4619 binder_node_unlock(node);
4620 binder_free_node(node);
4621 } else
4622 binder_inner_proc_unlock(proc);
4623
4624 if (weak && !has_weak_ref)
4625 ret = binder_put_node_cmd(
4626 proc, thread, &ptr, node_ptr,
4627 node_cookie, node_debug_id,
4628 BR_INCREFS, "BR_INCREFS");
4629 if (!ret && strong && !has_strong_ref)
4630 ret = binder_put_node_cmd(
4631 proc, thread, &ptr, node_ptr,
4632 node_cookie, node_debug_id,
4633 BR_ACQUIRE, "BR_ACQUIRE");
4634 if (!ret && !strong && has_strong_ref)
4635 ret = binder_put_node_cmd(
4636 proc, thread, &ptr, node_ptr,
4637 node_cookie, node_debug_id,
4638 BR_RELEASE, "BR_RELEASE");
4639 if (!ret && !weak && has_weak_ref)
4640 ret = binder_put_node_cmd(
4641 proc, thread, &ptr, node_ptr,
4642 node_cookie, node_debug_id,
4643 BR_DECREFS, "BR_DECREFS");
4644 if (orig_ptr == ptr)
4645 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4646 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4647 proc->pid, thread->pid,
4648 node_debug_id,
4649 (u64)node_ptr,
4650 (u64)node_cookie);
4651 if (ret)
4652 return ret;
4653 } break;
4654 case BINDER_WORK_DEAD_BINDER:
4655 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4656 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4657 struct binder_ref_death *death;
4658 uint32_t cmd;
4659 binder_uintptr_t cookie;
4660
4661 death = container_of(w, struct binder_ref_death, work);
4662 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4663 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4664 else
4665 cmd = BR_DEAD_BINDER;
4666 cookie = death->cookie;
4667
4668 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4669 "%d:%d %s %016llx\n",
4670 proc->pid, thread->pid,
4671 cmd == BR_DEAD_BINDER ?
4672 "BR_DEAD_BINDER" :
4673 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4674 (u64)cookie);
4675 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4676 binder_inner_proc_unlock(proc);
4677 kfree(death);
4678 binder_stats_deleted(BINDER_STAT_DEATH);
4679 } else {
4680 binder_enqueue_work_ilocked(
4681 w, &proc->delivered_death);
4682 binder_inner_proc_unlock(proc);
4683 }
4684 if (put_user(cmd, (uint32_t __user *)ptr))
4685 return -EFAULT;
4686 ptr += sizeof(uint32_t);
4687 if (put_user(cookie,
4688 (binder_uintptr_t __user *)ptr))
4689 return -EFAULT;
4690 ptr += sizeof(binder_uintptr_t);
4691 binder_stat_br(proc, thread, cmd);
4692 if (cmd == BR_DEAD_BINDER)
4693 goto done; /* DEAD_BINDER notifications can cause transactions */
4694 } break;
4695 default:
4696 binder_inner_proc_unlock(proc);
4697 pr_err("%d:%d: bad work type %d\n",
4698 proc->pid, thread->pid, w->type);
4699 break;
4700 }
4701
4702 if (!t)
4703 continue;
4704
4705 BUG_ON(t->buffer == NULL);
4706 if (t->buffer->target_node) {
4707 struct binder_node *target_node = t->buffer->target_node;
4708
4709 trd->target.ptr = target_node->ptr;
4710 trd->cookie = target_node->cookie;
4711 t->saved_priority = task_nice(current);
4712 if (t->priority < target_node->min_priority &&
4713 !(t->flags & TF_ONE_WAY))
4714 binder_set_nice(t->priority);
4715 else if (!(t->flags & TF_ONE_WAY) ||
4716 t->saved_priority > target_node->min_priority)
4717 binder_set_nice(target_node->min_priority);
4718 cmd = BR_TRANSACTION;
4719 } else {
4720 trd->target.ptr = 0;
4721 trd->cookie = 0;
4722 cmd = BR_REPLY;
4723 }
4724 trd->code = t->code;
4725 trd->flags = t->flags;
4726 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4727
4728 t_from = binder_get_txn_from(t);
4729 if (t_from) {
4730 struct task_struct *sender = t_from->proc->tsk;
4731
4732 trd->sender_pid =
4733 task_tgid_nr_ns(sender,
4734 task_active_pid_ns(current));
4735 #ifdef CONFIG_BINDER_SENDER_INFO
4736 binder_inner_proc_lock(thread->proc);
4737 thread->sender_pid_nr = task_tgid_nr(sender);
4738 binder_inner_proc_unlock(thread->proc);
4739 #endif
4740 } else {
4741 trd->sender_pid = 0;
4742 #ifdef CONFIG_BINDER_SENDER_INFO
4743 binder_inner_proc_lock(thread->proc);
4744 thread->sender_pid_nr = 0;
4745 binder_inner_proc_unlock(thread->proc);
4746 #endif
4747 }
4748
4749 ret = binder_apply_fd_fixups(proc, t);
4750 if (ret) {
4751 struct binder_buffer *buffer = t->buffer;
4752 bool oneway = !!(t->flags & TF_ONE_WAY);
4753 int tid = t->debug_id;
4754
4755 if (t_from)
4756 binder_thread_dec_tmpref(t_from);
4757 buffer->transaction = NULL;
4758 binder_cleanup_transaction(t, "fd fixups failed",
4759 BR_FAILED_REPLY);
4760 binder_free_buf(proc, thread, buffer, true);
4761 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4762 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4763 proc->pid, thread->pid,
4764 oneway ? "async " :
4765 (cmd == BR_REPLY ? "reply " : ""),
4766 tid, BR_FAILED_REPLY, ret, __LINE__);
4767 if (cmd == BR_REPLY) {
4768 cmd = BR_FAILED_REPLY;
4769 if (put_user(cmd, (uint32_t __user *)ptr))
4770 return -EFAULT;
4771 ptr += sizeof(uint32_t);
4772 binder_stat_br(proc, thread, cmd);
4773 break;
4774 }
4775 continue;
4776 }
4777 trd->data_size = t->buffer->data_size;
4778 trd->offsets_size = t->buffer->offsets_size;
4779 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4780 trd->data.ptr.offsets = trd->data.ptr.buffer +
4781 ALIGN(t->buffer->data_size,
4782 sizeof(void *));
4783
4784 tr.secctx = t->security_ctx;
4785 if (t->security_ctx) {
4786 cmd = BR_TRANSACTION_SEC_CTX;
4787 trsize = sizeof(tr);
4788 }
4789 if (put_user(cmd, (uint32_t __user *)ptr)) {
4790 if (t_from)
4791 binder_thread_dec_tmpref(t_from);
4792
4793 binder_cleanup_transaction(t, "put_user failed",
4794 BR_FAILED_REPLY);
4795
4796 return -EFAULT;
4797 }
4798 ptr += sizeof(uint32_t);
4799 if (copy_to_user(ptr, &tr, trsize)) {
4800 if (t_from)
4801 binder_thread_dec_tmpref(t_from);
4802
4803 binder_cleanup_transaction(t, "copy_to_user failed",
4804 BR_FAILED_REPLY);
4805
4806 return -EFAULT;
4807 }
4808 ptr += trsize;
4809
4810 trace_binder_transaction_received(t);
4811 binder_stat_br(proc, thread, cmd);
4812 binder_debug(BINDER_DEBUG_TRANSACTION,
4813 "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n",
4814 proc->pid, thread->pid,
4815 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4816 (cmd == BR_TRANSACTION_SEC_CTX) ?
4817 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4818 t->debug_id, t_from ? t_from->proc->pid : 0,
4819 t_from ? t_from->pid : 0, cmd,
4820 t->buffer->data_size, t->buffer->offsets_size,
4821 (u64)trd->data.ptr.buffer,
4822 (u64)trd->data.ptr.offsets);
4823
4824 if (t_from)
4825 binder_thread_dec_tmpref(t_from);
4826 t->buffer->allow_user_free = 1;
4827 #ifdef CONFIG_ACCESS_TOKENID
4828 binder_inner_proc_lock(thread->proc);
4829 thread->tokens.sender_tokenid = t->sender_tokenid;
4830 thread->tokens.first_tokenid = t->first_tokenid;
4831 binder_inner_proc_unlock(thread->proc);
4832 #endif /* CONFIG_ACCESS_TOKENID */
4833 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4834 binder_inner_proc_lock(thread->proc);
4835 t->to_parent = thread->transaction_stack;
4836 t->to_thread = thread;
4837 thread->transaction_stack = t;
4838 binder_inner_proc_unlock(thread->proc);
4839 } else {
4840 binder_free_transaction(t);
4841 }
4842 break;
4843 }
4844
4845 done:
4846
4847 *consumed = ptr - buffer;
4848 binder_inner_proc_lock(proc);
4849 if (proc->requested_threads == 0 &&
4850 list_empty(&thread->proc->waiting_threads) &&
4851 proc->requested_threads_started < proc->max_threads &&
4852 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4853 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4854 /*spawn a new thread if we leave this out */) {
4855 proc->requested_threads++;
4856 binder_inner_proc_unlock(proc);
4857 binder_debug(BINDER_DEBUG_THREADS,
4858 "%d:%d BR_SPAWN_LOOPER\n",
4859 proc->pid, thread->pid);
4860 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4861 return -EFAULT;
4862 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4863 } else
4864 binder_inner_proc_unlock(proc);
4865 return 0;
4866 }
4867
binder_release_work(struct binder_proc * proc,struct list_head * list)4868 static void binder_release_work(struct binder_proc *proc,
4869 struct list_head *list)
4870 {
4871 struct binder_work *w;
4872 enum binder_work_type wtype;
4873
4874 while (1) {
4875 binder_inner_proc_lock(proc);
4876 w = binder_dequeue_work_head_ilocked(list);
4877 wtype = w ? w->type : 0;
4878 binder_inner_proc_unlock(proc);
4879 if (!w)
4880 return;
4881
4882 switch (wtype) {
4883 case BINDER_WORK_TRANSACTION: {
4884 struct binder_transaction *t;
4885
4886 t = container_of(w, struct binder_transaction, work);
4887
4888 binder_cleanup_transaction(t, "process died.",
4889 BR_DEAD_REPLY);
4890 } break;
4891 case BINDER_WORK_RETURN_ERROR: {
4892 struct binder_error *e = container_of(
4893 w, struct binder_error, work);
4894
4895 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4896 "undelivered TRANSACTION_ERROR: %u\n",
4897 e->cmd);
4898 } break;
4899 case BINDER_WORK_TRANSACTION_PENDING:
4900 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT:
4901 case BINDER_WORK_TRANSACTION_COMPLETE: {
4902 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4903 "undelivered TRANSACTION_COMPLETE\n");
4904 kfree(w);
4905 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4906 } break;
4907 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4908 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4909 struct binder_ref_death *death;
4910
4911 death = container_of(w, struct binder_ref_death, work);
4912 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4913 "undelivered death notification, %016llx\n",
4914 (u64)death->cookie);
4915 kfree(death);
4916 binder_stats_deleted(BINDER_STAT_DEATH);
4917 } break;
4918 case BINDER_WORK_NODE:
4919 break;
4920 default:
4921 pr_err("unexpected work type, %d, not freed\n",
4922 wtype);
4923 break;
4924 }
4925 }
4926
4927 }
4928
binder_get_thread_ilocked(struct binder_proc * proc,struct binder_thread * new_thread)4929 static struct binder_thread *binder_get_thread_ilocked(
4930 struct binder_proc *proc, struct binder_thread *new_thread)
4931 {
4932 struct binder_thread *thread = NULL;
4933 struct rb_node *parent = NULL;
4934 struct rb_node **p = &proc->threads.rb_node;
4935
4936 while (*p) {
4937 parent = *p;
4938 thread = rb_entry(parent, struct binder_thread, rb_node);
4939
4940 if (current->pid < thread->pid)
4941 p = &(*p)->rb_left;
4942 else if (current->pid > thread->pid)
4943 p = &(*p)->rb_right;
4944 else
4945 return thread;
4946 }
4947 if (!new_thread)
4948 return NULL;
4949 thread = new_thread;
4950 binder_stats_created(BINDER_STAT_THREAD);
4951 thread->proc = proc;
4952 thread->pid = current->pid;
4953 atomic_set(&thread->tmp_ref, 0);
4954 init_waitqueue_head(&thread->wait);
4955 INIT_LIST_HEAD(&thread->todo);
4956 rb_link_node(&thread->rb_node, parent, p);
4957 rb_insert_color(&thread->rb_node, &proc->threads);
4958 thread->looper_need_return = true;
4959 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4960 thread->return_error.cmd = BR_OK;
4961 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4962 thread->reply_error.cmd = BR_OK;
4963 thread->ee.command = BR_OK;
4964 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4965 return thread;
4966 }
4967
binder_get_thread(struct binder_proc * proc)4968 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4969 {
4970 struct binder_thread *thread;
4971 struct binder_thread *new_thread;
4972
4973 binder_inner_proc_lock(proc);
4974 thread = binder_get_thread_ilocked(proc, NULL);
4975 binder_inner_proc_unlock(proc);
4976 if (!thread) {
4977 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4978 if (new_thread == NULL)
4979 return NULL;
4980 binder_inner_proc_lock(proc);
4981 thread = binder_get_thread_ilocked(proc, new_thread);
4982 binder_inner_proc_unlock(proc);
4983 if (thread != new_thread)
4984 kfree(new_thread);
4985 }
4986 return thread;
4987 }
4988
binder_free_proc(struct binder_proc * proc)4989 static void binder_free_proc(struct binder_proc *proc)
4990 {
4991 struct binder_device *device;
4992
4993 BUG_ON(!list_empty(&proc->todo));
4994 BUG_ON(!list_empty(&proc->delivered_death));
4995 if (proc->outstanding_txns)
4996 pr_warn("%s: Unexpected outstanding_txns %d\n",
4997 __func__, proc->outstanding_txns);
4998 device = container_of(proc->context, struct binder_device, context);
4999 if (refcount_dec_and_test(&device->ref)) {
5000 kfree(proc->context->name);
5001 kfree(device);
5002 }
5003 binder_alloc_deferred_release(&proc->alloc);
5004 put_task_struct(proc->tsk);
5005 put_cred(proc->cred);
5006 binder_stats_deleted(BINDER_STAT_PROC);
5007 kfree(proc);
5008 }
5009
binder_free_thread(struct binder_thread * thread)5010 static void binder_free_thread(struct binder_thread *thread)
5011 {
5012 BUG_ON(!list_empty(&thread->todo));
5013 binder_stats_deleted(BINDER_STAT_THREAD);
5014 binder_proc_dec_tmpref(thread->proc);
5015 kfree(thread);
5016 }
5017
binder_thread_release(struct binder_proc * proc,struct binder_thread * thread)5018 static int binder_thread_release(struct binder_proc *proc,
5019 struct binder_thread *thread)
5020 {
5021 struct binder_transaction *t;
5022 struct binder_transaction *send_reply = NULL;
5023 int active_transactions = 0;
5024 struct binder_transaction *last_t = NULL;
5025
5026 binder_inner_proc_lock(thread->proc);
5027 /*
5028 * take a ref on the proc so it survives
5029 * after we remove this thread from proc->threads.
5030 * The corresponding dec is when we actually
5031 * free the thread in binder_free_thread()
5032 */
5033 proc->tmp_ref++;
5034 /*
5035 * take a ref on this thread to ensure it
5036 * survives while we are releasing it
5037 */
5038 atomic_inc(&thread->tmp_ref);
5039 rb_erase(&thread->rb_node, &proc->threads);
5040 t = thread->transaction_stack;
5041 if (t) {
5042 spin_lock(&t->lock);
5043 if (t->to_thread == thread)
5044 send_reply = t;
5045 } else {
5046 __acquire(&t->lock);
5047 }
5048 thread->is_dead = true;
5049
5050 while (t) {
5051 last_t = t;
5052 active_transactions++;
5053 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5054 "release %d:%d transaction %d %s, still active\n",
5055 proc->pid, thread->pid,
5056 t->debug_id,
5057 (t->to_thread == thread) ? "in" : "out");
5058
5059 if (t->to_thread == thread) {
5060 thread->proc->outstanding_txns--;
5061 t->to_proc = NULL;
5062 t->to_thread = NULL;
5063 if (t->buffer) {
5064 t->buffer->transaction = NULL;
5065 t->buffer = NULL;
5066 }
5067 t = t->to_parent;
5068 } else if (t->from == thread) {
5069 t->from = NULL;
5070 #ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF
5071 t->from_pid = -1;
5072 t->from_tid = -1;
5073 #endif
5074 t = t->from_parent;
5075 } else
5076 BUG();
5077 spin_unlock(&last_t->lock);
5078 if (t)
5079 spin_lock(&t->lock);
5080 else
5081 __acquire(&t->lock);
5082 }
5083 /* annotation for sparse, lock not acquired in last iteration above */
5084 __release(&t->lock);
5085
5086 /*
5087 * If this thread used poll, make sure we remove the waitqueue from any
5088 * poll data structures holding it.
5089 */
5090 if (thread->looper & BINDER_LOOPER_STATE_POLL)
5091 wake_up_pollfree(&thread->wait);
5092
5093 binder_inner_proc_unlock(thread->proc);
5094
5095 /*
5096 * This is needed to avoid races between wake_up_pollfree() above and
5097 * someone else removing the last entry from the queue for other reasons
5098 * (e.g. ep_remove_wait_queue() being called due to an epoll file
5099 * descriptor being closed). Such other users hold an RCU read lock, so
5100 * we can be sure they're done after we call synchronize_rcu().
5101 */
5102 if (thread->looper & BINDER_LOOPER_STATE_POLL)
5103 synchronize_rcu();
5104
5105 if (send_reply)
5106 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
5107 binder_release_work(proc, &thread->todo);
5108 binder_thread_dec_tmpref(thread);
5109 return active_transactions;
5110 }
5111
binder_poll(struct file * filp,struct poll_table_struct * wait)5112 static __poll_t binder_poll(struct file *filp,
5113 struct poll_table_struct *wait)
5114 {
5115 struct binder_proc *proc = filp->private_data;
5116 struct binder_thread *thread = NULL;
5117 bool wait_for_proc_work;
5118
5119 thread = binder_get_thread(proc);
5120 if (!thread)
5121 return EPOLLERR;
5122
5123 binder_inner_proc_lock(thread->proc);
5124 thread->looper |= BINDER_LOOPER_STATE_POLL;
5125 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
5126
5127 binder_inner_proc_unlock(thread->proc);
5128
5129 poll_wait(filp, &thread->wait, wait);
5130
5131 if (binder_has_work(thread, wait_for_proc_work))
5132 return EPOLLIN;
5133
5134 return 0;
5135 }
5136
binder_ioctl_write_read(struct file * filp,unsigned int cmd,unsigned long arg,struct binder_thread * thread)5137 static int binder_ioctl_write_read(struct file *filp,
5138 unsigned int cmd, unsigned long arg,
5139 struct binder_thread *thread)
5140 {
5141 int ret = 0;
5142 struct binder_proc *proc = filp->private_data;
5143 unsigned int size = _IOC_SIZE(cmd);
5144 void __user *ubuf = (void __user *)arg;
5145 struct binder_write_read bwr;
5146
5147 if (size != sizeof(struct binder_write_read)) {
5148 ret = -EINVAL;
5149 goto out;
5150 }
5151
5152 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
5153 ret = -EFAULT;
5154 goto out;
5155 }
5156 binder_debug(BINDER_DEBUG_READ_WRITE,
5157 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
5158 proc->pid, thread->pid,
5159 (u64)bwr.write_size, (u64)bwr.write_buffer,
5160 (u64)bwr.read_size, (u64)bwr.read_buffer);
5161
5162 if (bwr.write_size > 0) {
5163 ret = binder_thread_write(proc, thread,
5164 bwr.write_buffer,
5165 bwr.write_size,
5166 &bwr.write_consumed);
5167 trace_binder_write_done(ret);
5168 if (ret < 0) {
5169 bwr.read_consumed = 0;
5170 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5171 ret = -EFAULT;
5172 goto out;
5173 }
5174 }
5175 if (bwr.read_size > 0) {
5176 ret = binder_thread_read(proc, thread, bwr.read_buffer,
5177 bwr.read_size,
5178 &bwr.read_consumed,
5179 filp->f_flags & O_NONBLOCK);
5180 trace_binder_read_done(ret);
5181 binder_inner_proc_lock(proc);
5182 if (!binder_worklist_empty_ilocked(&proc->todo))
5183 binder_wakeup_proc_ilocked(proc);
5184 binder_inner_proc_unlock(proc);
5185 if (ret < 0) {
5186 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5187 ret = -EFAULT;
5188 goto out;
5189 }
5190 }
5191 binder_debug(BINDER_DEBUG_READ_WRITE,
5192 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5193 proc->pid, thread->pid,
5194 (u64)bwr.write_consumed, (u64)bwr.write_size,
5195 (u64)bwr.read_consumed, (u64)bwr.read_size);
5196 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
5197 ret = -EFAULT;
5198 goto out;
5199 }
5200 out:
5201 return ret;
5202 }
5203
binder_ioctl_set_ctx_mgr(struct file * filp,struct flat_binder_object * fbo)5204 static int binder_ioctl_set_ctx_mgr(struct file *filp,
5205 struct flat_binder_object *fbo)
5206 {
5207 int ret = 0;
5208 struct binder_proc *proc = filp->private_data;
5209 struct binder_context *context = proc->context;
5210 struct binder_node *new_node;
5211 kuid_t curr_euid = current_euid();
5212
5213 mutex_lock(&context->context_mgr_node_lock);
5214 if (context->binder_context_mgr_node) {
5215 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5216 ret = -EBUSY;
5217 goto out;
5218 }
5219 ret = security_binder_set_context_mgr(proc->cred);
5220 if (ret < 0)
5221 goto out;
5222 if (uid_valid(context->binder_context_mgr_uid)) {
5223 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5224 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5225 from_kuid(&init_user_ns, curr_euid),
5226 from_kuid(&init_user_ns,
5227 context->binder_context_mgr_uid));
5228 ret = -EPERM;
5229 goto out;
5230 }
5231 } else {
5232 context->binder_context_mgr_uid = curr_euid;
5233 }
5234 new_node = binder_new_node(proc, fbo);
5235 if (!new_node) {
5236 ret = -ENOMEM;
5237 goto out;
5238 }
5239 binder_node_lock(new_node);
5240 new_node->local_weak_refs++;
5241 new_node->local_strong_refs++;
5242 new_node->has_strong_ref = 1;
5243 new_node->has_weak_ref = 1;
5244 context->binder_context_mgr_node = new_node;
5245 binder_node_unlock(new_node);
5246 binder_put_node(new_node);
5247 out:
5248 mutex_unlock(&context->context_mgr_node_lock);
5249 return ret;
5250 }
5251
binder_ioctl_get_node_info_for_ref(struct binder_proc * proc,struct binder_node_info_for_ref * info)5252 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5253 struct binder_node_info_for_ref *info)
5254 {
5255 struct binder_node *node;
5256 struct binder_context *context = proc->context;
5257 __u32 handle = info->handle;
5258
5259 if (info->strong_count || info->weak_count || info->reserved1 ||
5260 info->reserved2 || info->reserved3) {
5261 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5262 proc->pid);
5263 return -EINVAL;
5264 }
5265
5266 /* This ioctl may only be used by the context manager */
5267 mutex_lock(&context->context_mgr_node_lock);
5268 if (!context->binder_context_mgr_node ||
5269 context->binder_context_mgr_node->proc != proc) {
5270 mutex_unlock(&context->context_mgr_node_lock);
5271 return -EPERM;
5272 }
5273 mutex_unlock(&context->context_mgr_node_lock);
5274
5275 node = binder_get_node_from_ref(proc, handle, true, NULL);
5276 if (!node)
5277 return -EINVAL;
5278
5279 info->strong_count = node->local_strong_refs +
5280 node->internal_strong_refs;
5281 info->weak_count = node->local_weak_refs;
5282
5283 binder_put_node(node);
5284
5285 return 0;
5286 }
5287
binder_ioctl_get_node_debug_info(struct binder_proc * proc,struct binder_node_debug_info * info)5288 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5289 struct binder_node_debug_info *info)
5290 {
5291 struct rb_node *n;
5292 binder_uintptr_t ptr = info->ptr;
5293
5294 memset(info, 0, sizeof(*info));
5295
5296 binder_inner_proc_lock(proc);
5297 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5298 struct binder_node *node = rb_entry(n, struct binder_node,
5299 rb_node);
5300 if (node->ptr > ptr) {
5301 info->ptr = node->ptr;
5302 info->cookie = node->cookie;
5303 info->has_strong_ref = node->has_strong_ref;
5304 info->has_weak_ref = node->has_weak_ref;
5305 break;
5306 }
5307 }
5308 binder_inner_proc_unlock(proc);
5309
5310 return 0;
5311 }
5312
binder_txns_pending_ilocked(struct binder_proc * proc)5313 static bool binder_txns_pending_ilocked(struct binder_proc *proc)
5314 {
5315 struct rb_node *n;
5316 struct binder_thread *thread;
5317
5318 if (proc->outstanding_txns > 0)
5319 return true;
5320
5321 for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
5322 thread = rb_entry(n, struct binder_thread, rb_node);
5323 if (thread->transaction_stack)
5324 return true;
5325 }
5326 return false;
5327 }
5328
binder_ioctl_freeze(struct binder_freeze_info * info,struct binder_proc * target_proc)5329 static int binder_ioctl_freeze(struct binder_freeze_info *info,
5330 struct binder_proc *target_proc)
5331 {
5332 int ret = 0;
5333
5334 if (!info->enable) {
5335 binder_inner_proc_lock(target_proc);
5336 target_proc->sync_recv = false;
5337 target_proc->async_recv = false;
5338 target_proc->is_frozen = false;
5339 binder_inner_proc_unlock(target_proc);
5340 return 0;
5341 }
5342
5343 /*
5344 * Freezing the target. Prevent new transactions by
5345 * setting frozen state. If timeout specified, wait
5346 * for transactions to drain.
5347 */
5348 binder_inner_proc_lock(target_proc);
5349 target_proc->sync_recv = false;
5350 target_proc->async_recv = false;
5351 target_proc->is_frozen = true;
5352 binder_inner_proc_unlock(target_proc);
5353
5354 if (info->timeout_ms > 0)
5355 ret = wait_event_interruptible_timeout(
5356 target_proc->freeze_wait,
5357 (!target_proc->outstanding_txns),
5358 msecs_to_jiffies(info->timeout_ms));
5359
5360 /* Check pending transactions that wait for reply */
5361 if (ret >= 0) {
5362 binder_inner_proc_lock(target_proc);
5363 if (binder_txns_pending_ilocked(target_proc))
5364 ret = -EAGAIN;
5365 binder_inner_proc_unlock(target_proc);
5366 }
5367
5368 if (ret < 0) {
5369 binder_inner_proc_lock(target_proc);
5370 target_proc->is_frozen = false;
5371 binder_inner_proc_unlock(target_proc);
5372 }
5373
5374 return ret;
5375 }
5376
binder_ioctl_get_freezer_info(struct binder_frozen_status_info * info)5377 static int binder_ioctl_get_freezer_info(
5378 struct binder_frozen_status_info *info)
5379 {
5380 struct binder_proc *target_proc;
5381 bool found = false;
5382 __u32 txns_pending;
5383
5384 info->sync_recv = 0;
5385 info->async_recv = 0;
5386
5387 mutex_lock(&binder_procs_lock);
5388 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5389 if (target_proc->pid == info->pid) {
5390 found = true;
5391 binder_inner_proc_lock(target_proc);
5392 txns_pending = binder_txns_pending_ilocked(target_proc);
5393 info->sync_recv |= target_proc->sync_recv |
5394 (txns_pending << 1);
5395 info->async_recv |= target_proc->async_recv;
5396 binder_inner_proc_unlock(target_proc);
5397 }
5398 }
5399 mutex_unlock(&binder_procs_lock);
5400
5401 if (!found)
5402 return -EINVAL;
5403
5404 return 0;
5405 }
5406
binder_ioctl_get_extended_error(struct binder_thread * thread,void __user * ubuf)5407 static int binder_ioctl_get_extended_error(struct binder_thread *thread,
5408 void __user *ubuf)
5409 {
5410 struct binder_extended_error ee;
5411
5412 binder_inner_proc_lock(thread->proc);
5413 ee = thread->ee;
5414 binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
5415 binder_inner_proc_unlock(thread->proc);
5416
5417 if (copy_to_user(ubuf, &ee, sizeof(ee)))
5418 return -EFAULT;
5419
5420 return 0;
5421 }
5422
binder_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)5423 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5424 {
5425 int ret;
5426 struct binder_proc *proc = filp->private_data;
5427 struct binder_thread *thread;
5428 unsigned int size = _IOC_SIZE(cmd);
5429 void __user *ubuf = (void __user *)arg;
5430
5431 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
5432 proc->pid, current->pid, cmd, arg);*/
5433
5434 binder_selftest_alloc(&proc->alloc);
5435
5436 trace_binder_ioctl(cmd, arg);
5437
5438 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5439 if (ret)
5440 goto err_unlocked;
5441
5442 thread = binder_get_thread(proc);
5443 if (thread == NULL) {
5444 ret = -ENOMEM;
5445 goto err;
5446 }
5447
5448 switch (cmd) {
5449 case BINDER_WRITE_READ:
5450 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
5451 if (ret)
5452 goto err;
5453 break;
5454 case BINDER_SET_MAX_THREADS: {
5455 u32 max_threads;
5456
5457 if (copy_from_user(&max_threads, ubuf,
5458 sizeof(max_threads))) {
5459 ret = -EINVAL;
5460 goto err;
5461 }
5462 binder_inner_proc_lock(proc);
5463 proc->max_threads = max_threads;
5464 binder_inner_proc_unlock(proc);
5465 break;
5466 }
5467 case BINDER_SET_CONTEXT_MGR_EXT: {
5468 struct flat_binder_object fbo;
5469
5470 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5471 ret = -EINVAL;
5472 goto err;
5473 }
5474 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5475 if (ret)
5476 goto err;
5477 break;
5478 }
5479 case BINDER_SET_CONTEXT_MGR:
5480 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5481 if (ret)
5482 goto err;
5483 break;
5484 case BINDER_THREAD_EXIT:
5485 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5486 proc->pid, thread->pid);
5487 binder_thread_release(proc, thread);
5488 thread = NULL;
5489 break;
5490 case BINDER_VERSION: {
5491 struct binder_version __user *ver = ubuf;
5492
5493 if (size != sizeof(struct binder_version)) {
5494 ret = -EINVAL;
5495 goto err;
5496 }
5497 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5498 &ver->protocol_version)) {
5499 ret = -EINVAL;
5500 goto err;
5501 }
5502 break;
5503 }
5504 case BINDER_GET_NODE_INFO_FOR_REF: {
5505 struct binder_node_info_for_ref info;
5506
5507 if (copy_from_user(&info, ubuf, sizeof(info))) {
5508 ret = -EFAULT;
5509 goto err;
5510 }
5511
5512 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5513 if (ret < 0)
5514 goto err;
5515
5516 if (copy_to_user(ubuf, &info, sizeof(info))) {
5517 ret = -EFAULT;
5518 goto err;
5519 }
5520
5521 break;
5522 }
5523 case BINDER_GET_NODE_DEBUG_INFO: {
5524 struct binder_node_debug_info info;
5525
5526 if (copy_from_user(&info, ubuf, sizeof(info))) {
5527 ret = -EFAULT;
5528 goto err;
5529 }
5530
5531 ret = binder_ioctl_get_node_debug_info(proc, &info);
5532 if (ret < 0)
5533 goto err;
5534
5535 if (copy_to_user(ubuf, &info, sizeof(info))) {
5536 ret = -EFAULT;
5537 goto err;
5538 }
5539 break;
5540 }
5541 case BINDER_FREEZE: {
5542 struct binder_freeze_info info;
5543 struct binder_proc **target_procs = NULL, *target_proc;
5544 int target_procs_count = 0, i = 0;
5545
5546 ret = 0;
5547
5548 if (copy_from_user(&info, ubuf, sizeof(info))) {
5549 ret = -EFAULT;
5550 goto err;
5551 }
5552
5553 mutex_lock(&binder_procs_lock);
5554 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5555 if (target_proc->pid == info.pid)
5556 target_procs_count++;
5557 }
5558
5559 if (target_procs_count == 0) {
5560 mutex_unlock(&binder_procs_lock);
5561 ret = -EINVAL;
5562 goto err;
5563 }
5564
5565 target_procs = kcalloc(target_procs_count,
5566 sizeof(struct binder_proc *),
5567 GFP_KERNEL);
5568
5569 if (!target_procs) {
5570 mutex_unlock(&binder_procs_lock);
5571 ret = -ENOMEM;
5572 goto err;
5573 }
5574
5575 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5576 if (target_proc->pid != info.pid)
5577 continue;
5578
5579 binder_inner_proc_lock(target_proc);
5580 target_proc->tmp_ref++;
5581 binder_inner_proc_unlock(target_proc);
5582
5583 target_procs[i++] = target_proc;
5584 }
5585 mutex_unlock(&binder_procs_lock);
5586
5587 for (i = 0; i < target_procs_count; i++) {
5588 if (ret >= 0)
5589 ret = binder_ioctl_freeze(&info,
5590 target_procs[i]);
5591
5592 binder_proc_dec_tmpref(target_procs[i]);
5593 }
5594
5595 kfree(target_procs);
5596
5597 if (ret < 0)
5598 goto err;
5599 break;
5600 }
5601 case BINDER_GET_FROZEN_INFO: {
5602 struct binder_frozen_status_info info;
5603
5604 if (copy_from_user(&info, ubuf, sizeof(info))) {
5605 ret = -EFAULT;
5606 goto err;
5607 }
5608
5609 ret = binder_ioctl_get_freezer_info(&info);
5610 if (ret < 0)
5611 goto err;
5612
5613 if (copy_to_user(ubuf, &info, sizeof(info))) {
5614 ret = -EFAULT;
5615 goto err;
5616 }
5617 break;
5618 }
5619 case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5620 uint32_t enable;
5621
5622 if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5623 ret = -EFAULT;
5624 goto err;
5625 }
5626 binder_inner_proc_lock(proc);
5627 proc->oneway_spam_detection_enabled = (bool)enable;
5628 binder_inner_proc_unlock(proc);
5629 break;
5630 }
5631 case BINDER_GET_EXTENDED_ERROR:
5632 ret = binder_ioctl_get_extended_error(thread, ubuf);
5633 if (ret < 0)
5634 goto err;
5635 break;
5636 case BINDER_FEATURE_SET: {
5637 struct binder_feature_set __user *features = ubuf;
5638
5639 if (size != sizeof(struct binder_feature_set)) {
5640 ret = -EINVAL;
5641 goto err;
5642 }
5643 if (put_user(BINDER_CURRENT_FEATURE_SET, &features->feature_set)) {
5644 ret = -EINVAL;
5645 goto err;
5646 }
5647 break;
5648 }
5649 #ifdef CONFIG_ACCESS_TOKENID
5650 case BINDER_GET_ACCESS_TOKEN: {
5651 struct access_token __user *tokens = ubuf;
5652 u64 token, ftoken;
5653
5654 if (size != sizeof(struct access_token)) {
5655 ret = -EINVAL;
5656 goto err;
5657 }
5658 binder_inner_proc_lock(proc);
5659 token = thread->tokens.sender_tokenid;
5660 ftoken = thread->tokens.first_tokenid;
5661 binder_inner_proc_unlock(proc);
5662 if (put_user(token, &tokens->sender_tokenid)) {
5663 ret = -EINVAL;
5664 goto err;
5665 }
5666 if (put_user(ftoken, &tokens->first_tokenid)) {
5667 ret = -EINVAL;
5668 goto err;
5669 }
5670 break;
5671 }
5672 #endif /* CONFIG_ACCESS_TOKENID */
5673
5674 #ifdef CONFIG_BINDER_SENDER_INFO
5675 case BINDER_GET_SENDER_INFO: {
5676 struct binder_sender_info __user *sender = ubuf;
5677 u64 token, ftoken, sender_pid_nr;
5678 if (size != sizeof(struct binder_sender_info)) {
5679 ret = -EINVAL;
5680 goto err;
5681 }
5682 binder_inner_proc_lock(proc);
5683 #ifdef CONFIG_ACCESS_TOKENID
5684 token = thread->tokens.sender_tokenid;
5685 ftoken = thread->tokens.first_tokenid;
5686 #endif /*CONFIG_ACCESS_TOKENID*/
5687 sender_pid_nr = thread->sender_pid_nr;
5688 binder_inner_proc_unlock(proc);
5689 #ifdef CONFIG_ACCESS_TOKENID
5690 if (put_user(token, &sender->tokens.sender_tokenid)) {
5691 ret = -EFAULT;
5692 goto err;
5693 }
5694 if (put_user(ftoken, &sender->tokens.first_tokenid)) {
5695 ret = -EFAULT;
5696 goto err;
5697 }
5698 #endif /*CONFIG_ACCESS_TOKENID*/
5699 if (put_user(sender_pid_nr, &sender->sender_pid_nr)) {
5700 ret = -EFAULT;
5701 goto err;
5702 }
5703 break;
5704 }
5705 #endif /* CONFIG_BINDER_SENDER_INFO */
5706 default:
5707 ret = -EINVAL;
5708 goto err;
5709 }
5710 ret = 0;
5711 err:
5712 if (thread)
5713 thread->looper_need_return = false;
5714 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5715 if (ret && ret != -EINTR)
5716 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5717 err_unlocked:
5718 trace_binder_ioctl_done(ret);
5719 return ret;
5720 }
5721
binder_vma_open(struct vm_area_struct * vma)5722 static void binder_vma_open(struct vm_area_struct *vma)
5723 {
5724 struct binder_proc *proc = vma->vm_private_data;
5725
5726 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5727 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5728 proc->pid, vma->vm_start, vma->vm_end,
5729 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5730 (unsigned long)pgprot_val(vma->vm_page_prot));
5731 }
5732
binder_vma_close(struct vm_area_struct * vma)5733 static void binder_vma_close(struct vm_area_struct *vma)
5734 {
5735 struct binder_proc *proc = vma->vm_private_data;
5736
5737 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5738 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5739 proc->pid, vma->vm_start, vma->vm_end,
5740 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5741 (unsigned long)pgprot_val(vma->vm_page_prot));
5742 binder_alloc_vma_close(&proc->alloc);
5743 }
5744
binder_vm_fault(struct vm_fault * vmf)5745 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5746 {
5747 return VM_FAULT_SIGBUS;
5748 }
5749
5750 static const struct vm_operations_struct binder_vm_ops = {
5751 .open = binder_vma_open,
5752 .close = binder_vma_close,
5753 .fault = binder_vm_fault,
5754 };
5755
binder_mmap(struct file * filp,struct vm_area_struct * vma)5756 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5757 {
5758 struct binder_proc *proc = filp->private_data;
5759
5760 if (proc->tsk != current->group_leader)
5761 return -EINVAL;
5762
5763 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5764 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5765 __func__, proc->pid, vma->vm_start, vma->vm_end,
5766 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5767 (unsigned long)pgprot_val(vma->vm_page_prot));
5768
5769 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5770 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5771 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5772 return -EPERM;
5773 }
5774 vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
5775
5776 vma->vm_ops = &binder_vm_ops;
5777 vma->vm_private_data = proc;
5778
5779 return binder_alloc_mmap_handler(&proc->alloc, vma);
5780 }
5781
binder_open(struct inode * nodp,struct file * filp)5782 static int binder_open(struct inode *nodp, struct file *filp)
5783 {
5784 struct binder_proc *proc, *itr;
5785 struct binder_device *binder_dev;
5786 struct binderfs_info *info;
5787 struct dentry *binder_binderfs_dir_entry_proc = NULL;
5788 bool existing_pid = false;
5789
5790 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5791 current->group_leader->pid, current->pid);
5792
5793 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5794 if (proc == NULL)
5795 return -ENOMEM;
5796 spin_lock_init(&proc->inner_lock);
5797 spin_lock_init(&proc->outer_lock);
5798 get_task_struct(current->group_leader);
5799 proc->tsk = current->group_leader;
5800 proc->cred = get_cred(filp->f_cred);
5801 INIT_LIST_HEAD(&proc->todo);
5802 init_waitqueue_head(&proc->freeze_wait);
5803 proc->default_priority = task_nice(current);
5804 /* binderfs stashes devices in i_private */
5805 if (is_binderfs_device(nodp)) {
5806 binder_dev = nodp->i_private;
5807 info = nodp->i_sb->s_fs_info;
5808 binder_binderfs_dir_entry_proc = info->proc_log_dir;
5809 } else {
5810 binder_dev = container_of(filp->private_data,
5811 struct binder_device, miscdev);
5812 }
5813 refcount_inc(&binder_dev->ref);
5814 proc->context = &binder_dev->context;
5815 binder_alloc_init(&proc->alloc);
5816
5817 binder_stats_created(BINDER_STAT_PROC);
5818 proc->pid = current->group_leader->pid;
5819 INIT_LIST_HEAD(&proc->delivered_death);
5820 INIT_LIST_HEAD(&proc->waiting_threads);
5821 filp->private_data = proc;
5822
5823 mutex_lock(&binder_procs_lock);
5824 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5825 if (itr->pid == proc->pid) {
5826 existing_pid = true;
5827 break;
5828 }
5829 }
5830 hlist_add_head(&proc->proc_node, &binder_procs);
5831 mutex_unlock(&binder_procs_lock);
5832
5833 if (binder_debugfs_dir_entry_proc && !existing_pid) {
5834 char strbuf[11];
5835
5836 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5837 /*
5838 * proc debug entries are shared between contexts.
5839 * Only create for the first PID to avoid debugfs log spamming
5840 * The printing code will anyway print all contexts for a given
5841 * PID so this is not a problem.
5842 */
5843 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5844 binder_debugfs_dir_entry_proc,
5845 (void *)(unsigned long)proc->pid,
5846 &proc_fops);
5847 }
5848
5849 if (binder_binderfs_dir_entry_proc && !existing_pid) {
5850 char strbuf[11];
5851 struct dentry *binderfs_entry;
5852
5853 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5854 /*
5855 * Similar to debugfs, the process specific log file is shared
5856 * between contexts. Only create for the first PID.
5857 * This is ok since same as debugfs, the log file will contain
5858 * information on all contexts of a given PID.
5859 */
5860 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5861 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5862 if (!IS_ERR(binderfs_entry)) {
5863 proc->binderfs_entry = binderfs_entry;
5864 } else {
5865 int error;
5866
5867 error = PTR_ERR(binderfs_entry);
5868 pr_warn("Unable to create file %s in binderfs (error %d)\n",
5869 strbuf, error);
5870 }
5871 }
5872
5873 return 0;
5874 }
5875
binder_flush(struct file * filp,fl_owner_t id)5876 static int binder_flush(struct file *filp, fl_owner_t id)
5877 {
5878 struct binder_proc *proc = filp->private_data;
5879
5880 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5881
5882 return 0;
5883 }
5884
binder_deferred_flush(struct binder_proc * proc)5885 static void binder_deferred_flush(struct binder_proc *proc)
5886 {
5887 struct rb_node *n;
5888 int wake_count = 0;
5889
5890 binder_inner_proc_lock(proc);
5891 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5892 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5893
5894 thread->looper_need_return = true;
5895 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5896 wake_up_interruptible(&thread->wait);
5897 wake_count++;
5898 }
5899 }
5900 binder_inner_proc_unlock(proc);
5901
5902 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5903 "binder_flush: %d woke %d threads\n", proc->pid,
5904 wake_count);
5905 }
5906
binder_release(struct inode * nodp,struct file * filp)5907 static int binder_release(struct inode *nodp, struct file *filp)
5908 {
5909 struct binder_proc *proc = filp->private_data;
5910
5911 debugfs_remove(proc->debugfs_entry);
5912
5913 if (proc->binderfs_entry) {
5914 binderfs_remove_file(proc->binderfs_entry);
5915 proc->binderfs_entry = NULL;
5916 }
5917
5918 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5919
5920 return 0;
5921 }
5922
binder_node_release(struct binder_node * node,int refs)5923 static int binder_node_release(struct binder_node *node, int refs)
5924 {
5925 struct binder_ref *ref;
5926 int death = 0;
5927 struct binder_proc *proc = node->proc;
5928
5929 binder_release_work(proc, &node->async_todo);
5930
5931 binder_node_lock(node);
5932 binder_inner_proc_lock(proc);
5933 binder_dequeue_work_ilocked(&node->work);
5934 /*
5935 * The caller must have taken a temporary ref on the node,
5936 */
5937 BUG_ON(!node->tmp_refs);
5938 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5939 binder_inner_proc_unlock(proc);
5940 binder_node_unlock(node);
5941 binder_free_node(node);
5942
5943 return refs;
5944 }
5945
5946 node->proc = NULL;
5947 node->local_strong_refs = 0;
5948 node->local_weak_refs = 0;
5949 binder_inner_proc_unlock(proc);
5950
5951 spin_lock(&binder_dead_nodes_lock);
5952 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5953 spin_unlock(&binder_dead_nodes_lock);
5954
5955 hlist_for_each_entry(ref, &node->refs, node_entry) {
5956 refs++;
5957 /*
5958 * Need the node lock to synchronize
5959 * with new notification requests and the
5960 * inner lock to synchronize with queued
5961 * death notifications.
5962 */
5963 binder_inner_proc_lock(ref->proc);
5964 if (!ref->death) {
5965 binder_inner_proc_unlock(ref->proc);
5966 continue;
5967 }
5968
5969 death++;
5970
5971 BUG_ON(!list_empty(&ref->death->work.entry));
5972 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5973 binder_enqueue_work_ilocked(&ref->death->work,
5974 &ref->proc->todo);
5975 binder_wakeup_proc_ilocked(ref->proc);
5976 binder_inner_proc_unlock(ref->proc);
5977 }
5978
5979 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5980 "node %d now dead, refs %d, death %d\n",
5981 node->debug_id, refs, death);
5982 binder_node_unlock(node);
5983 binder_put_node(node);
5984
5985 return refs;
5986 }
5987
binder_deferred_release(struct binder_proc * proc)5988 static void binder_deferred_release(struct binder_proc *proc)
5989 {
5990 struct binder_context *context = proc->context;
5991 struct rb_node *n;
5992 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5993
5994 mutex_lock(&binder_procs_lock);
5995 hlist_del(&proc->proc_node);
5996 mutex_unlock(&binder_procs_lock);
5997
5998 mutex_lock(&context->context_mgr_node_lock);
5999 if (context->binder_context_mgr_node &&
6000 context->binder_context_mgr_node->proc == proc) {
6001 binder_debug(BINDER_DEBUG_DEAD_BINDER,
6002 "%s: %d context_mgr_node gone\n",
6003 __func__, proc->pid);
6004 context->binder_context_mgr_node = NULL;
6005 }
6006 mutex_unlock(&context->context_mgr_node_lock);
6007 binder_inner_proc_lock(proc);
6008 /*
6009 * Make sure proc stays alive after we
6010 * remove all the threads
6011 */
6012 proc->tmp_ref++;
6013
6014 proc->is_dead = true;
6015 proc->is_frozen = false;
6016 proc->sync_recv = false;
6017 proc->async_recv = false;
6018 threads = 0;
6019 active_transactions = 0;
6020 while ((n = rb_first(&proc->threads))) {
6021 struct binder_thread *thread;
6022
6023 thread = rb_entry(n, struct binder_thread, rb_node);
6024 binder_inner_proc_unlock(proc);
6025 threads++;
6026 active_transactions += binder_thread_release(proc, thread);
6027 binder_inner_proc_lock(proc);
6028 }
6029
6030 nodes = 0;
6031 incoming_refs = 0;
6032 while ((n = rb_first(&proc->nodes))) {
6033 struct binder_node *node;
6034
6035 node = rb_entry(n, struct binder_node, rb_node);
6036 nodes++;
6037 /*
6038 * take a temporary ref on the node before
6039 * calling binder_node_release() which will either
6040 * kfree() the node or call binder_put_node()
6041 */
6042 binder_inc_node_tmpref_ilocked(node);
6043 rb_erase(&node->rb_node, &proc->nodes);
6044 binder_inner_proc_unlock(proc);
6045 incoming_refs = binder_node_release(node, incoming_refs);
6046 binder_inner_proc_lock(proc);
6047 }
6048 binder_inner_proc_unlock(proc);
6049
6050 outgoing_refs = 0;
6051 binder_proc_lock(proc);
6052 while ((n = rb_first(&proc->refs_by_desc))) {
6053 struct binder_ref *ref;
6054
6055 ref = rb_entry(n, struct binder_ref, rb_node_desc);
6056 outgoing_refs++;
6057 binder_cleanup_ref_olocked(ref);
6058 binder_proc_unlock(proc);
6059 binder_free_ref(ref);
6060 binder_proc_lock(proc);
6061 }
6062 binder_proc_unlock(proc);
6063
6064 binder_release_work(proc, &proc->todo);
6065 binder_release_work(proc, &proc->delivered_death);
6066
6067 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6068 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
6069 __func__, proc->pid, threads, nodes, incoming_refs,
6070 outgoing_refs, active_transactions);
6071
6072 binder_proc_dec_tmpref(proc);
6073 }
6074
binder_deferred_func(struct work_struct * work)6075 static void binder_deferred_func(struct work_struct *work)
6076 {
6077 struct binder_proc *proc;
6078
6079 int defer;
6080
6081 do {
6082 mutex_lock(&binder_deferred_lock);
6083 if (!hlist_empty(&binder_deferred_list)) {
6084 proc = hlist_entry(binder_deferred_list.first,
6085 struct binder_proc, deferred_work_node);
6086 hlist_del_init(&proc->deferred_work_node);
6087 defer = proc->deferred_work;
6088 proc->deferred_work = 0;
6089 } else {
6090 proc = NULL;
6091 defer = 0;
6092 }
6093 mutex_unlock(&binder_deferred_lock);
6094
6095 if (defer & BINDER_DEFERRED_FLUSH)
6096 binder_deferred_flush(proc);
6097
6098 if (defer & BINDER_DEFERRED_RELEASE)
6099 binder_deferred_release(proc); /* frees proc */
6100 } while (proc);
6101 }
6102 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
6103
6104 static void
binder_defer_work(struct binder_proc * proc,enum binder_deferred_state defer)6105 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
6106 {
6107 mutex_lock(&binder_deferred_lock);
6108 proc->deferred_work |= defer;
6109 if (hlist_unhashed(&proc->deferred_work_node)) {
6110 hlist_add_head(&proc->deferred_work_node,
6111 &binder_deferred_list);
6112 schedule_work(&binder_deferred_work);
6113 }
6114 mutex_unlock(&binder_deferred_lock);
6115 }
6116
print_binder_transaction_ilocked(struct seq_file * m,struct binder_proc * proc,const char * prefix,struct binder_transaction * t)6117 static void print_binder_transaction_ilocked(struct seq_file *m,
6118 struct binder_proc *proc,
6119 const char *prefix,
6120 struct binder_transaction *t)
6121 {
6122 struct binder_proc *to_proc;
6123 struct binder_buffer *buffer = t->buffer;
6124 ktime_t current_time = ktime_get();
6125
6126 spin_lock(&t->lock);
6127 to_proc = t->to_proc;
6128 seq_printf(m,
6129 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d elapsed %lldms",
6130 prefix, t->debug_id, t,
6131 t->from_pid,
6132 t->from_tid,
6133 to_proc ? to_proc->pid : 0,
6134 t->to_thread ? t->to_thread->pid : 0,
6135 t->code, t->flags, t->priority, t->need_reply,
6136 ktime_ms_delta(current_time, t->start_time));
6137 spin_unlock(&t->lock);
6138
6139 if (proc != to_proc) {
6140 /*
6141 * Can only safely deref buffer if we are holding the
6142 * correct proc inner lock for this node
6143 */
6144 seq_puts(m, "\n");
6145 return;
6146 }
6147
6148 if (buffer == NULL) {
6149 seq_puts(m, " buffer free\n");
6150 return;
6151 }
6152 if (buffer->target_node)
6153 seq_printf(m, " node %d", buffer->target_node->debug_id);
6154 seq_printf(m, " size %zd:%zd data %pK\n",
6155 buffer->data_size, buffer->offsets_size,
6156 buffer->user_data);
6157 }
6158
print_binder_work_ilocked(struct seq_file * m,struct binder_proc * proc,const char * prefix,const char * transaction_prefix,struct binder_work * w)6159 static void print_binder_work_ilocked(struct seq_file *m,
6160 struct binder_proc *proc,
6161 const char *prefix,
6162 const char *transaction_prefix,
6163 struct binder_work *w)
6164 {
6165 struct binder_node *node;
6166 struct binder_transaction *t;
6167
6168 switch (w->type) {
6169 case BINDER_WORK_TRANSACTION:
6170 t = container_of(w, struct binder_transaction, work);
6171 print_binder_transaction_ilocked(
6172 m, proc, transaction_prefix, t);
6173 break;
6174 case BINDER_WORK_RETURN_ERROR: {
6175 struct binder_error *e = container_of(
6176 w, struct binder_error, work);
6177
6178 seq_printf(m, "%stransaction error: %u\n",
6179 prefix, e->cmd);
6180 } break;
6181 case BINDER_WORK_TRANSACTION_COMPLETE:
6182 seq_printf(m, "%stransaction complete\n", prefix);
6183 break;
6184 case BINDER_WORK_NODE:
6185 node = container_of(w, struct binder_node, work);
6186 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
6187 prefix, node->debug_id,
6188 (u64)node->ptr, (u64)node->cookie);
6189 break;
6190 case BINDER_WORK_DEAD_BINDER:
6191 seq_printf(m, "%shas dead binder\n", prefix);
6192 break;
6193 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
6194 seq_printf(m, "%shas cleared dead binder\n", prefix);
6195 break;
6196 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
6197 seq_printf(m, "%shas cleared death notification\n", prefix);
6198 break;
6199 default:
6200 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
6201 break;
6202 }
6203 }
6204
print_binder_thread_ilocked(struct seq_file * m,struct binder_thread * thread,int print_always)6205 static void print_binder_thread_ilocked(struct seq_file *m,
6206 struct binder_thread *thread,
6207 int print_always)
6208 {
6209 struct binder_transaction *t;
6210 struct binder_work *w;
6211 size_t start_pos = m->count;
6212 size_t header_pos;
6213
6214 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
6215 thread->pid, thread->looper,
6216 thread->looper_need_return,
6217 atomic_read(&thread->tmp_ref));
6218 header_pos = m->count;
6219 t = thread->transaction_stack;
6220 while (t) {
6221 if (t->from == thread) {
6222 print_binder_transaction_ilocked(m, thread->proc,
6223 " outgoing transaction", t);
6224 t = t->from_parent;
6225 } else if (t->to_thread == thread) {
6226 print_binder_transaction_ilocked(m, thread->proc,
6227 " incoming transaction", t);
6228 t = t->to_parent;
6229 } else {
6230 print_binder_transaction_ilocked(m, thread->proc,
6231 " bad transaction", t);
6232 t = NULL;
6233 }
6234 }
6235 list_for_each_entry(w, &thread->todo, entry) {
6236 print_binder_work_ilocked(m, thread->proc, " ",
6237 " pending transaction", w);
6238 }
6239 if (!print_always && m->count == header_pos)
6240 m->count = start_pos;
6241 }
6242
print_binder_node_nilocked(struct seq_file * m,struct binder_node * node)6243 static void print_binder_node_nilocked(struct seq_file *m,
6244 struct binder_node *node)
6245 {
6246 struct binder_ref *ref;
6247 struct binder_work *w;
6248 int count;
6249
6250 count = 0;
6251 hlist_for_each_entry(ref, &node->refs, node_entry)
6252 count++;
6253
6254 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6255 node->debug_id, (u64)node->ptr, (u64)node->cookie,
6256 node->has_strong_ref, node->has_weak_ref,
6257 node->local_strong_refs, node->local_weak_refs,
6258 node->internal_strong_refs, count, node->tmp_refs);
6259 if (count) {
6260 seq_puts(m, " proc");
6261 hlist_for_each_entry(ref, &node->refs, node_entry)
6262 seq_printf(m, " %d", ref->proc->pid);
6263 }
6264 seq_puts(m, "\n");
6265 if (node->proc) {
6266 list_for_each_entry(w, &node->async_todo, entry)
6267 print_binder_work_ilocked(m, node->proc, " ",
6268 " pending async transaction", w);
6269 }
6270 }
6271
print_binder_ref_olocked(struct seq_file * m,struct binder_ref * ref)6272 static void print_binder_ref_olocked(struct seq_file *m,
6273 struct binder_ref *ref)
6274 {
6275 binder_node_lock(ref->node);
6276 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
6277 ref->data.debug_id, ref->data.desc,
6278 ref->node->proc ? "" : "dead ",
6279 ref->node->debug_id, ref->data.strong,
6280 ref->data.weak, ref->death);
6281 binder_node_unlock(ref->node);
6282 }
6283
print_binder_proc(struct seq_file * m,struct binder_proc * proc,int print_all)6284 static void print_binder_proc(struct seq_file *m,
6285 struct binder_proc *proc, int print_all)
6286 {
6287 struct binder_work *w;
6288 struct rb_node *n;
6289 size_t start_pos = m->count;
6290 size_t header_pos;
6291 struct binder_node *last_node = NULL;
6292
6293 seq_printf(m, "proc %d\n", proc->pid);
6294 seq_printf(m, "context %s\n", proc->context->name);
6295 header_pos = m->count;
6296
6297 binder_inner_proc_lock(proc);
6298 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6299 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
6300 rb_node), print_all);
6301
6302 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
6303 struct binder_node *node = rb_entry(n, struct binder_node,
6304 rb_node);
6305 if (!print_all && !node->has_async_transaction)
6306 continue;
6307
6308 /*
6309 * take a temporary reference on the node so it
6310 * survives and isn't removed from the tree
6311 * while we print it.
6312 */
6313 binder_inc_node_tmpref_ilocked(node);
6314 /* Need to drop inner lock to take node lock */
6315 binder_inner_proc_unlock(proc);
6316 if (last_node)
6317 binder_put_node(last_node);
6318 binder_node_inner_lock(node);
6319 print_binder_node_nilocked(m, node);
6320 binder_node_inner_unlock(node);
6321 last_node = node;
6322 binder_inner_proc_lock(proc);
6323 }
6324 binder_inner_proc_unlock(proc);
6325 if (last_node)
6326 binder_put_node(last_node);
6327
6328 if (print_all) {
6329 binder_proc_lock(proc);
6330 for (n = rb_first(&proc->refs_by_desc);
6331 n != NULL;
6332 n = rb_next(n))
6333 print_binder_ref_olocked(m, rb_entry(n,
6334 struct binder_ref,
6335 rb_node_desc));
6336 binder_proc_unlock(proc);
6337 }
6338 binder_alloc_print_allocated(m, &proc->alloc);
6339 binder_inner_proc_lock(proc);
6340 list_for_each_entry(w, &proc->todo, entry)
6341 print_binder_work_ilocked(m, proc, " ",
6342 " pending transaction", w);
6343 list_for_each_entry(w, &proc->delivered_death, entry) {
6344 seq_puts(m, " has delivered dead binder\n");
6345 break;
6346 }
6347 binder_inner_proc_unlock(proc);
6348 if (!print_all && m->count == header_pos)
6349 m->count = start_pos;
6350 }
6351
6352 static const char * const binder_return_strings[] = {
6353 "BR_ERROR",
6354 "BR_OK",
6355 "BR_TRANSACTION",
6356 "BR_REPLY",
6357 "BR_ACQUIRE_RESULT",
6358 "BR_DEAD_REPLY",
6359 "BR_TRANSACTION_COMPLETE",
6360 "BR_INCREFS",
6361 "BR_ACQUIRE",
6362 "BR_RELEASE",
6363 "BR_DECREFS",
6364 "BR_ATTEMPT_ACQUIRE",
6365 "BR_NOOP",
6366 "BR_SPAWN_LOOPER",
6367 "BR_FINISHED",
6368 "BR_DEAD_BINDER",
6369 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
6370 "BR_FAILED_REPLY",
6371 "BR_FROZEN_REPLY",
6372 "BR_ONEWAY_SPAM_SUSPECT",
6373 "BR_TRANSACTION_PENDING_FROZEN"
6374 };
6375
6376 static const char * const binder_command_strings[] = {
6377 "BC_TRANSACTION",
6378 "BC_REPLY",
6379 "BC_ACQUIRE_RESULT",
6380 "BC_FREE_BUFFER",
6381 "BC_INCREFS",
6382 "BC_ACQUIRE",
6383 "BC_RELEASE",
6384 "BC_DECREFS",
6385 "BC_INCREFS_DONE",
6386 "BC_ACQUIRE_DONE",
6387 "BC_ATTEMPT_ACQUIRE",
6388 "BC_REGISTER_LOOPER",
6389 "BC_ENTER_LOOPER",
6390 "BC_EXIT_LOOPER",
6391 "BC_REQUEST_DEATH_NOTIFICATION",
6392 "BC_CLEAR_DEATH_NOTIFICATION",
6393 "BC_DEAD_BINDER_DONE",
6394 "BC_TRANSACTION_SG",
6395 "BC_REPLY_SG",
6396 };
6397
6398 static const char * const binder_objstat_strings[] = {
6399 "proc",
6400 "thread",
6401 "node",
6402 "ref",
6403 "death",
6404 "transaction",
6405 "transaction_complete"
6406 };
6407
print_binder_stats(struct seq_file * m,const char * prefix,struct binder_stats * stats)6408 static void print_binder_stats(struct seq_file *m, const char *prefix,
6409 struct binder_stats *stats)
6410 {
6411 int i;
6412
6413 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6414 ARRAY_SIZE(binder_command_strings));
6415 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6416 int temp = atomic_read(&stats->bc[i]);
6417
6418 if (temp)
6419 seq_printf(m, "%s%s: %d\n", prefix,
6420 binder_command_strings[i], temp);
6421 }
6422
6423 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6424 ARRAY_SIZE(binder_return_strings));
6425 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6426 int temp = atomic_read(&stats->br[i]);
6427
6428 if (temp)
6429 seq_printf(m, "%s%s: %d\n", prefix,
6430 binder_return_strings[i], temp);
6431 }
6432
6433 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6434 ARRAY_SIZE(binder_objstat_strings));
6435 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6436 ARRAY_SIZE(stats->obj_deleted));
6437 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6438 int created = atomic_read(&stats->obj_created[i]);
6439 int deleted = atomic_read(&stats->obj_deleted[i]);
6440
6441 if (created || deleted)
6442 seq_printf(m, "%s%s: active %d total %d\n",
6443 prefix,
6444 binder_objstat_strings[i],
6445 created - deleted,
6446 created);
6447 }
6448 }
6449
print_binder_proc_stats(struct seq_file * m,struct binder_proc * proc)6450 static void print_binder_proc_stats(struct seq_file *m,
6451 struct binder_proc *proc)
6452 {
6453 struct binder_work *w;
6454 struct binder_thread *thread;
6455 struct rb_node *n;
6456 int count, strong, weak, ready_threads;
6457 size_t free_async_space =
6458 binder_alloc_get_free_async_space(&proc->alloc);
6459
6460 seq_printf(m, "proc %d\n", proc->pid);
6461 seq_printf(m, "context %s\n", proc->context->name);
6462 count = 0;
6463 ready_threads = 0;
6464 binder_inner_proc_lock(proc);
6465 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6466 count++;
6467
6468 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6469 ready_threads++;
6470
6471 seq_printf(m, " threads: %d\n", count);
6472 seq_printf(m, " requested threads: %d+%d/%d\n"
6473 " ready threads %d\n"
6474 " free async space %zd\n", proc->requested_threads,
6475 proc->requested_threads_started, proc->max_threads,
6476 ready_threads,
6477 free_async_space);
6478 count = 0;
6479 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
6480 count++;
6481 binder_inner_proc_unlock(proc);
6482 seq_printf(m, " nodes: %d\n", count);
6483 count = 0;
6484 strong = 0;
6485 weak = 0;
6486 binder_proc_lock(proc);
6487 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
6488 struct binder_ref *ref = rb_entry(n, struct binder_ref,
6489 rb_node_desc);
6490 count++;
6491 strong += ref->data.strong;
6492 weak += ref->data.weak;
6493 }
6494 binder_proc_unlock(proc);
6495 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
6496
6497 count = binder_alloc_get_allocated_count(&proc->alloc);
6498 seq_printf(m, " buffers: %d\n", count);
6499
6500 binder_alloc_print_pages(m, &proc->alloc);
6501
6502 count = 0;
6503 binder_inner_proc_lock(proc);
6504 list_for_each_entry(w, &proc->todo, entry) {
6505 if (w->type == BINDER_WORK_TRANSACTION)
6506 count++;
6507 }
6508 binder_inner_proc_unlock(proc);
6509 seq_printf(m, " pending transactions: %d\n", count);
6510
6511 print_binder_stats(m, " ", &proc->stats);
6512 }
6513
state_show(struct seq_file * m,void * unused)6514 static int state_show(struct seq_file *m, void *unused)
6515 {
6516 struct binder_proc *proc;
6517 struct binder_node *node;
6518 struct binder_node *last_node = NULL;
6519
6520 seq_puts(m, "binder state:\n");
6521
6522 spin_lock(&binder_dead_nodes_lock);
6523 if (!hlist_empty(&binder_dead_nodes))
6524 seq_puts(m, "dead nodes:\n");
6525 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
6526 /*
6527 * take a temporary reference on the node so it
6528 * survives and isn't removed from the list
6529 * while we print it.
6530 */
6531 node->tmp_refs++;
6532 spin_unlock(&binder_dead_nodes_lock);
6533 if (last_node)
6534 binder_put_node(last_node);
6535 binder_node_lock(node);
6536 print_binder_node_nilocked(m, node);
6537 binder_node_unlock(node);
6538 last_node = node;
6539 spin_lock(&binder_dead_nodes_lock);
6540 }
6541 spin_unlock(&binder_dead_nodes_lock);
6542 if (last_node)
6543 binder_put_node(last_node);
6544
6545 mutex_lock(&binder_procs_lock);
6546 hlist_for_each_entry(proc, &binder_procs, proc_node)
6547 print_binder_proc(m, proc, 1);
6548 mutex_unlock(&binder_procs_lock);
6549
6550 return 0;
6551 }
6552
stats_show(struct seq_file * m,void * unused)6553 static int stats_show(struct seq_file *m, void *unused)
6554 {
6555 struct binder_proc *proc;
6556
6557 seq_puts(m, "binder stats:\n");
6558
6559 print_binder_stats(m, "", &binder_stats);
6560
6561 mutex_lock(&binder_procs_lock);
6562 hlist_for_each_entry(proc, &binder_procs, proc_node)
6563 print_binder_proc_stats(m, proc);
6564 mutex_unlock(&binder_procs_lock);
6565
6566 return 0;
6567 }
6568
transactions_show(struct seq_file * m,void * unused)6569 static int transactions_show(struct seq_file *m, void *unused)
6570 {
6571 struct binder_proc *proc;
6572
6573 seq_puts(m, "binder transactions:\n");
6574 mutex_lock(&binder_procs_lock);
6575 hlist_for_each_entry(proc, &binder_procs, proc_node)
6576 print_binder_proc(m, proc, 0);
6577 mutex_unlock(&binder_procs_lock);
6578
6579 return 0;
6580 }
6581
proc_show(struct seq_file * m,void * unused)6582 static int proc_show(struct seq_file *m, void *unused)
6583 {
6584 struct binder_proc *itr;
6585 int pid = (unsigned long)m->private;
6586
6587 mutex_lock(&binder_procs_lock);
6588 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6589 if (itr->pid == pid) {
6590 seq_puts(m, "binder proc state:\n");
6591 print_binder_proc(m, itr, 1);
6592 }
6593 }
6594 mutex_unlock(&binder_procs_lock);
6595
6596 return 0;
6597 }
6598
print_binder_transaction_log_entry(struct seq_file * m,struct binder_transaction_log_entry * e)6599 static void print_binder_transaction_log_entry(struct seq_file *m,
6600 struct binder_transaction_log_entry *e)
6601 {
6602 int debug_id = READ_ONCE(e->debug_id_done);
6603 /*
6604 * read barrier to guarantee debug_id_done read before
6605 * we print the log values
6606 */
6607 smp_rmb();
6608 seq_printf(m,
6609 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6610 e->debug_id, (e->call_type == 2) ? "reply" :
6611 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6612 e->from_thread, e->to_proc, e->to_thread, e->context_name,
6613 e->to_node, e->target_handle, e->data_size, e->offsets_size,
6614 e->return_error, e->return_error_param,
6615 e->return_error_line);
6616 /*
6617 * read-barrier to guarantee read of debug_id_done after
6618 * done printing the fields of the entry
6619 */
6620 smp_rmb();
6621 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6622 "\n" : " (incomplete)\n");
6623 }
6624
transaction_log_show(struct seq_file * m,void * unused)6625 static int transaction_log_show(struct seq_file *m, void *unused)
6626 {
6627 struct binder_transaction_log *log = m->private;
6628 unsigned int log_cur = atomic_read(&log->cur);
6629 unsigned int count;
6630 unsigned int cur;
6631 int i;
6632
6633 count = log_cur + 1;
6634 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6635 0 : count % ARRAY_SIZE(log->entry);
6636 if (count > ARRAY_SIZE(log->entry) || log->full)
6637 count = ARRAY_SIZE(log->entry);
6638 for (i = 0; i < count; i++) {
6639 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6640
6641 print_binder_transaction_log_entry(m, &log->entry[index]);
6642 }
6643 return 0;
6644 }
6645
6646 const struct file_operations binder_fops = {
6647 .owner = THIS_MODULE,
6648 .poll = binder_poll,
6649 .unlocked_ioctl = binder_ioctl,
6650 .compat_ioctl = compat_ptr_ioctl,
6651 .mmap = binder_mmap,
6652 .open = binder_open,
6653 .flush = binder_flush,
6654 .release = binder_release,
6655 };
6656
6657 DEFINE_SHOW_ATTRIBUTE(state);
6658 DEFINE_SHOW_ATTRIBUTE(stats);
6659 DEFINE_SHOW_ATTRIBUTE(transactions);
6660 DEFINE_SHOW_ATTRIBUTE(transaction_log);
6661
6662 const struct binder_debugfs_entry binder_debugfs_entries[] = {
6663 {
6664 .name = "state",
6665 .mode = 0444,
6666 .fops = &state_fops,
6667 .data = NULL,
6668 },
6669 {
6670 .name = "stats",
6671 .mode = 0444,
6672 .fops = &stats_fops,
6673 .data = NULL,
6674 },
6675 {
6676 .name = "transactions",
6677 .mode = 0444,
6678 .fops = &transactions_fops,
6679 .data = NULL,
6680 },
6681 {
6682 .name = "transaction_log",
6683 .mode = 0444,
6684 .fops = &transaction_log_fops,
6685 .data = &binder_transaction_log,
6686 },
6687 {
6688 .name = "failed_transaction_log",
6689 .mode = 0444,
6690 .fops = &transaction_log_fops,
6691 .data = &binder_transaction_log_failed,
6692 },
6693 {} /* terminator */
6694 };
6695
6696 #ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF
print_binder_transaction_brief_ilocked(struct seq_file * m,const char * prefix,struct binder_transaction * t,u64 timestamp)6697 static void print_binder_transaction_brief_ilocked(
6698 struct seq_file *m,
6699 const char *prefix, struct binder_transaction *t,
6700 u64 timestamp)
6701 {
6702 struct binder_proc *to_proc = NULL;
6703 int from_pid = 0;
6704 int from_tid = 0;
6705 int to_pid = 0;
6706 u64 sec;
6707 u32 nsec;
6708
6709 spin_lock(&t->lock);
6710 to_proc = t->to_proc;
6711 from_pid = t->from ? (t->from->proc ? t->from->proc->pid : 0) : t->from_pid;
6712 from_tid = t->from ? t->from->pid : t->from_tid;
6713 to_pid = to_proc ? to_proc->pid : 0;
6714 sec = div_u64_rem((timestamp - t->timestamp), 1000000000, &nsec);
6715
6716 seq_printf(m,
6717 "%s%d:%d to %d:%d code %x wait:%llu.%u s\n",
6718 prefix,
6719 from_pid, from_tid,
6720 to_pid, t->to_thread ? t->to_thread->pid : 0,
6721 t->code,
6722 timestamp > t->timestamp ? sec : 0,
6723 timestamp > t->timestamp ? nsec : 0);
6724 spin_unlock(&t->lock);
6725 }
6726
print_binder_work_transaction_nilocked(struct seq_file * m,const char * prefix,struct binder_work * w,u64 timestamp)6727 static void print_binder_work_transaction_nilocked(struct seq_file *m,
6728 const char *prefix, struct binder_work *w,
6729 u64 timestamp)
6730 {
6731 struct binder_transaction *t = NULL;
6732
6733 switch (w->type) {
6734 case BINDER_WORK_TRANSACTION:
6735 t = container_of(w, struct binder_transaction, work);
6736 print_binder_transaction_brief_ilocked(m, prefix, t, timestamp);
6737 break;
6738
6739 default:
6740 break;
6741 }
6742 }
6743
print_binder_transaction_brief(struct seq_file * m,struct binder_proc * proc,u64 timestamp)6744 static void print_binder_transaction_brief(struct seq_file *m,
6745 struct binder_proc *proc,
6746 u64 timestamp)
6747 {
6748 struct binder_work *w = NULL;
6749 struct rb_node *n = NULL;
6750 struct binder_node *last_node = NULL;
6751 size_t start_pos = m->count;
6752 size_t header_pos = m->count;
6753
6754 /* sync binder / not one way */
6755 binder_inner_proc_lock(proc);
6756 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
6757 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
6758 struct binder_transaction *t = thread->transaction_stack;
6759 while (t) {
6760 if (t->from == thread) {
6761 print_binder_transaction_brief_ilocked(m, "\t", t, timestamp);
6762 t = t->from_parent;
6763 } else if (t->to_thread == thread) {
6764 t = t->to_parent;
6765 } else {
6766 t = NULL;
6767 }
6768 }
6769 }
6770
6771 /* async binder / one way */
6772 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
6773 struct binder_node *node = rb_entry(n, struct binder_node, rb_node);
6774 /*
6775 * take a temporary reference on the node so it
6776 * survives and isn't removed from the tree
6777 * while we print it.
6778 */
6779 binder_inc_node_tmpref_ilocked(node);
6780 /* Need to drop inner lock to take node lock */
6781 binder_inner_proc_unlock(proc);
6782 if (last_node)
6783 binder_put_node(last_node);
6784 binder_node_inner_lock(node);
6785 list_for_each_entry(w, &node->async_todo, entry)
6786 print_binder_work_transaction_nilocked(m, "async\t", w, timestamp);
6787 binder_node_inner_unlock(node);
6788 last_node = node;
6789 binder_inner_proc_lock(proc);
6790 }
6791 binder_inner_proc_unlock(proc);
6792
6793 if (last_node)
6794 binder_put_node(last_node);
6795
6796 if (m->count == header_pos)
6797 m->count = start_pos;
6798 }
6799
print_binder_proc_brief(struct seq_file * m,struct binder_proc * proc)6800 static void print_binder_proc_brief(struct seq_file *m,
6801 struct binder_proc *proc)
6802 {
6803 struct binder_thread *thread = NULL;
6804 int ready_threads = 0;
6805 size_t free_async_space = binder_alloc_get_free_async_space(&proc->alloc);
6806
6807 seq_printf(m, "%d\t", proc->pid);
6808 seq_printf(m, "%s\t", proc->context->name);
6809
6810 binder_inner_proc_lock(proc);
6811 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6812 ready_threads++;
6813
6814 seq_printf(m, "%d\t%d\t%d\t%d"
6815 "\t%zd\n", proc->requested_threads,
6816 proc->requested_threads_started, proc->max_threads,
6817 ready_threads,
6818 free_async_space);
6819 binder_inner_proc_unlock(proc);
6820 }
6821
binder_transaction_proc_show(struct seq_file * m,void * unused)6822 static int binder_transaction_proc_show(struct seq_file *m, void *unused)
6823 {
6824 struct binder_proc *proc = NULL;
6825 u64 now = 0;
6826
6827 mutex_lock(&binder_procs_lock);
6828 now = binder_clock();
6829 hlist_for_each_entry(proc, &binder_procs, proc_node)
6830 print_binder_transaction_brief(m, proc, now);
6831
6832 seq_printf(m, "\npid\tcontext\t\trequest\tstarted\tmax\tready\tfree_async_space\n");
6833 hlist_for_each_entry(proc, &binder_procs, proc_node)
6834 print_binder_proc_brief(m, proc);
6835 mutex_unlock(&binder_procs_lock);
6836
6837 return 0;
6838 }
6839 #endif
6840
init_binder_device(const char * name)6841 static int __init init_binder_device(const char *name)
6842 {
6843 int ret;
6844 struct binder_device *binder_device;
6845
6846 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6847 if (!binder_device)
6848 return -ENOMEM;
6849
6850 binder_device->miscdev.fops = &binder_fops;
6851 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6852 binder_device->miscdev.name = name;
6853
6854 refcount_set(&binder_device->ref, 1);
6855 binder_device->context.binder_context_mgr_uid = INVALID_UID;
6856 binder_device->context.name = name;
6857 mutex_init(&binder_device->context.context_mgr_node_lock);
6858
6859 ret = misc_register(&binder_device->miscdev);
6860 if (ret < 0) {
6861 kfree(binder_device);
6862 return ret;
6863 }
6864
6865 hlist_add_head(&binder_device->hlist, &binder_devices);
6866
6867 return ret;
6868 }
6869
binder_init(void)6870 static int __init binder_init(void)
6871 {
6872 int ret;
6873 char *device_name, *device_tmp;
6874 struct binder_device *device;
6875 struct hlist_node *tmp;
6876 char *device_names = NULL;
6877 const struct binder_debugfs_entry *db_entry;
6878
6879 ret = binder_alloc_shrinker_init();
6880 if (ret)
6881 return ret;
6882
6883 atomic_set(&binder_transaction_log.cur, ~0U);
6884 atomic_set(&binder_transaction_log_failed.cur, ~0U);
6885
6886 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6887
6888 binder_for_each_debugfs_entry(db_entry)
6889 debugfs_create_file(db_entry->name,
6890 db_entry->mode,
6891 binder_debugfs_dir_entry_root,
6892 db_entry->data,
6893 db_entry->fops);
6894
6895 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6896 binder_debugfs_dir_entry_root);
6897
6898 if (binder_debugfs_dir_entry_root) {
6899 #ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF
6900 proc_create_data("transaction_proc",
6901 S_IRUGO,
6902 NULL,
6903 &binder_transaction_proc_proc_ops,
6904 NULL);
6905 #endif
6906 }
6907
6908 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6909 strcmp(binder_devices_param, "") != 0) {
6910 /*
6911 * Copy the module_parameter string, because we don't want to
6912 * tokenize it in-place.
6913 */
6914 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6915 if (!device_names) {
6916 ret = -ENOMEM;
6917 goto err_alloc_device_names_failed;
6918 }
6919
6920 device_tmp = device_names;
6921 while ((device_name = strsep(&device_tmp, ","))) {
6922 ret = init_binder_device(device_name);
6923 if (ret)
6924 goto err_init_binder_device_failed;
6925 }
6926 }
6927
6928 ret = init_binderfs();
6929 if (ret)
6930 goto err_init_binder_device_failed;
6931
6932 return ret;
6933
6934 err_init_binder_device_failed:
6935 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6936 misc_deregister(&device->miscdev);
6937 hlist_del(&device->hlist);
6938 kfree(device);
6939 }
6940
6941 kfree(device_names);
6942
6943 err_alloc_device_names_failed:
6944 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6945 binder_alloc_shrinker_exit();
6946
6947 return ret;
6948 }
6949
6950 device_initcall(binder_init);
6951
6952 #define CREATE_TRACE_POINTS
6953 #include "binder_trace.h"
6954
6955 MODULE_LICENSE("GPL v2");
6956