1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 #ifndef _LINUX_BINDER_INTERNAL_H
4 #define _LINUX_BINDER_INTERNAL_H
5
6 #include <linux/export.h>
7 #include <linux/fs.h>
8 #include <linux/list.h>
9 #include <linux/miscdevice.h>
10 #include <linux/mutex.h>
11 #include <linux/refcount.h>
12 #include <linux/stddef.h>
13 #include <linux/types.h>
14 #include <linux/uidgid.h>
15 #include <uapi/linux/android/binderfs.h>
16 #include "binder_alloc.h"
17
18 struct binder_context {
19 struct binder_node *binder_context_mgr_node;
20 struct mutex context_mgr_node_lock;
21 kuid_t binder_context_mgr_uid;
22 const char *name;
23 };
24
25 /**
26 * struct binder_device - information about a binder device node
27 * @hlist: list of binder devices (only used for devices requested via
28 * CONFIG_ANDROID_BINDER_DEVICES)
29 * @miscdev: information about a binder character device node
30 * @context: binder context information
31 * @binderfs_inode: This is the inode of the root dentry of the super block
32 * belonging to a binderfs mount.
33 */
34 struct binder_device {
35 struct hlist_node hlist;
36 struct miscdevice miscdev;
37 struct binder_context context;
38 struct inode *binderfs_inode;
39 refcount_t ref;
40 };
41
42 /**
43 * binderfs_mount_opts - mount options for binderfs
44 * @max: maximum number of allocatable binderfs binder devices
45 * @stats_mode: enable binder stats in binderfs.
46 */
47 struct binderfs_mount_opts {
48 int max;
49 int stats_mode;
50 };
51
52 /**
53 * binderfs_info - information about a binderfs mount
54 * @ipc_ns: The ipc namespace the binderfs mount belongs to.
55 * @control_dentry: This records the dentry of this binderfs mount
56 * binder-control device.
57 * @root_uid: uid that needs to be used when a new binder device is
58 * created.
59 * @root_gid: gid that needs to be used when a new binder device is
60 * created.
61 * @mount_opts: The mount options in use.
62 * @device_count: The current number of allocated binder devices.
63 * @proc_log_dir: Pointer to the directory dentry containing process-specific
64 * logs.
65 */
66 struct binderfs_info {
67 struct ipc_namespace *ipc_ns;
68 struct dentry *control_dentry;
69 kuid_t root_uid;
70 kgid_t root_gid;
71 struct binderfs_mount_opts mount_opts;
72 int device_count;
73 struct dentry *proc_log_dir;
74 };
75
76 extern const struct file_operations binder_fops;
77
78 extern char *binder_devices_param;
79
80 #ifdef CONFIG_ANDROID_BINDERFS
81 extern bool is_binderfs_device(const struct inode *inode);
82 extern struct dentry *binderfs_create_file(struct dentry *dir, const char *name,
83 const struct file_operations *fops,
84 void *data);
85 extern void binderfs_remove_file(struct dentry *dentry);
86 #else
is_binderfs_device(const struct inode * inode)87 static inline bool is_binderfs_device(const struct inode *inode)
88 {
89 return false;
90 }
binderfs_create_file(struct dentry * dir,const char * name,const struct file_operations * fops,void * data)91 static inline struct dentry *binderfs_create_file(struct dentry *dir,
92 const char *name,
93 const struct file_operations *fops,
94 void *data)
95 {
96 return NULL;
97 }
binderfs_remove_file(struct dentry * dentry)98 static inline void binderfs_remove_file(struct dentry *dentry) {}
99 #endif
100
101 #ifdef CONFIG_ANDROID_BINDERFS
102 extern int __init init_binderfs(void);
103 #else
init_binderfs(void)104 static inline int __init init_binderfs(void)
105 {
106 return 0;
107 }
108 #endif
109
110 struct binder_debugfs_entry {
111 const char *name;
112 umode_t mode;
113 const struct file_operations *fops;
114 void *data;
115 };
116
117 extern const struct binder_debugfs_entry binder_debugfs_entries[];
118
119 #define binder_for_each_debugfs_entry(entry) \
120 for ((entry) = binder_debugfs_entries; \
121 (entry)->name; \
122 (entry)++)
123
124 enum binder_stat_types {
125 BINDER_STAT_PROC,
126 BINDER_STAT_THREAD,
127 BINDER_STAT_NODE,
128 BINDER_STAT_REF,
129 BINDER_STAT_DEATH,
130 BINDER_STAT_TRANSACTION,
131 BINDER_STAT_TRANSACTION_COMPLETE,
132 BINDER_STAT_COUNT
133 };
134
135 struct binder_stats {
136 atomic_t br[_IOC_NR(BR_ONEWAY_SPAM_SUSPECT) + 1];
137 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
138 atomic_t obj_created[BINDER_STAT_COUNT];
139 atomic_t obj_deleted[BINDER_STAT_COUNT];
140 };
141
142 /**
143 * struct binder_work - work enqueued on a worklist
144 * @entry: node enqueued on list
145 * @type: type of work to be performed
146 *
147 * There are separate work lists for proc, thread, and node (async).
148 */
149 struct binder_work {
150 struct list_head entry;
151
152 enum binder_work_type {
153 BINDER_WORK_TRANSACTION = 1,
154 BINDER_WORK_TRANSACTION_COMPLETE,
155 BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT,
156 BINDER_WORK_RETURN_ERROR,
157 BINDER_WORK_NODE,
158 BINDER_WORK_DEAD_BINDER,
159 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
160 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
161 } type;
162 };
163
164 struct binder_error {
165 struct binder_work work;
166 uint32_t cmd;
167 };
168
169 /**
170 * struct binder_node - binder node bookkeeping
171 * @debug_id: unique ID for debugging
172 * (invariant after initialized)
173 * @lock: lock for node fields
174 * @work: worklist element for node work
175 * (protected by @proc->inner_lock)
176 * @rb_node: element for proc->nodes tree
177 * (protected by @proc->inner_lock)
178 * @dead_node: element for binder_dead_nodes list
179 * (protected by binder_dead_nodes_lock)
180 * @proc: binder_proc that owns this node
181 * (invariant after initialized)
182 * @refs: list of references on this node
183 * (protected by @lock)
184 * @internal_strong_refs: used to take strong references when
185 * initiating a transaction
186 * (protected by @proc->inner_lock if @proc
187 * and by @lock)
188 * @local_weak_refs: weak user refs from local process
189 * (protected by @proc->inner_lock if @proc
190 * and by @lock)
191 * @local_strong_refs: strong user refs from local process
192 * (protected by @proc->inner_lock if @proc
193 * and by @lock)
194 * @tmp_refs: temporary kernel refs
195 * (protected by @proc->inner_lock while @proc
196 * is valid, and by binder_dead_nodes_lock
197 * if @proc is NULL. During inc/dec and node release
198 * it is also protected by @lock to provide safety
199 * as the node dies and @proc becomes NULL)
200 * @ptr: userspace pointer for node
201 * (invariant, no lock needed)
202 * @cookie: userspace cookie for node
203 * (invariant, no lock needed)
204 * @has_strong_ref: userspace notified of strong ref
205 * (protected by @proc->inner_lock if @proc
206 * and by @lock)
207 * @pending_strong_ref: userspace has acked notification of strong ref
208 * (protected by @proc->inner_lock if @proc
209 * and by @lock)
210 * @has_weak_ref: userspace notified of weak ref
211 * (protected by @proc->inner_lock if @proc
212 * and by @lock)
213 * @pending_weak_ref: userspace has acked notification of weak ref
214 * (protected by @proc->inner_lock if @proc
215 * and by @lock)
216 * @has_async_transaction: async transaction to node in progress
217 * (protected by @lock)
218 * @sched_policy: minimum scheduling policy for node
219 * (invariant after initialized)
220 * @accept_fds: file descriptor operations supported for node
221 * (invariant after initialized)
222 * @min_priority: minimum scheduling priority
223 * (invariant after initialized)
224 * @inherit_rt: inherit RT scheduling policy from caller
225 * @txn_security_ctx: require sender's security context
226 * (invariant after initialized)
227 * @async_todo: list of async work items
228 * (protected by @proc->inner_lock)
229 *
230 * Bookkeeping structure for binder nodes.
231 */
232 struct binder_node {
233 int debug_id;
234 spinlock_t lock;
235 struct binder_work work;
236 union {
237 struct rb_node rb_node;
238 struct hlist_node dead_node;
239 };
240 struct binder_proc *proc;
241 struct hlist_head refs;
242 int internal_strong_refs;
243 int local_weak_refs;
244 int local_strong_refs;
245 int tmp_refs;
246 binder_uintptr_t ptr;
247 binder_uintptr_t cookie;
248 struct {
249 /*
250 * bitfield elements protected by
251 * proc inner_lock
252 */
253 u8 has_strong_ref:1;
254 u8 pending_strong_ref:1;
255 u8 has_weak_ref:1;
256 u8 pending_weak_ref:1;
257 };
258 struct {
259 /*
260 * invariant after initialization
261 */
262 u8 sched_policy:2;
263 u8 inherit_rt:1;
264 u8 accept_fds:1;
265 u8 txn_security_ctx:1;
266 u8 min_priority;
267 };
268 bool has_async_transaction;
269 struct list_head async_todo;
270 };
271
272 struct binder_ref_death {
273 /**
274 * @work: worklist element for death notifications
275 * (protected by inner_lock of the proc that
276 * this ref belongs to)
277 */
278 struct binder_work work;
279 binder_uintptr_t cookie;
280 };
281
282 /**
283 * struct binder_ref_data - binder_ref counts and id
284 * @debug_id: unique ID for the ref
285 * @desc: unique userspace handle for ref
286 * @strong: strong ref count (debugging only if not locked)
287 * @weak: weak ref count (debugging only if not locked)
288 *
289 * Structure to hold ref count and ref id information. Since
290 * the actual ref can only be accessed with a lock, this structure
291 * is used to return information about the ref to callers of
292 * ref inc/dec functions.
293 */
294 struct binder_ref_data {
295 int debug_id;
296 uint32_t desc;
297 int strong;
298 int weak;
299 };
300
301 /**
302 * struct binder_ref - struct to track references on nodes
303 * @data: binder_ref_data containing id, handle, and current refcounts
304 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
305 * @rb_node_node: node for lookup by @node in proc's rb_tree
306 * @node_entry: list entry for node->refs list in target node
307 * (protected by @node->lock)
308 * @proc: binder_proc containing ref
309 * @node: binder_node of target node. When cleaning up a
310 * ref for deletion in binder_cleanup_ref, a non-NULL
311 * @node indicates the node must be freed
312 * @death: pointer to death notification (ref_death) if requested
313 * (protected by @node->lock)
314 *
315 * Structure to track references from procA to target node (on procB). This
316 * structure is unsafe to access without holding @proc->outer_lock.
317 */
318 struct binder_ref {
319 /* Lookups needed: */
320 /* node + proc => ref (transaction) */
321 /* desc + proc => ref (transaction, inc/dec ref) */
322 /* node => refs + procs (proc exit) */
323 struct binder_ref_data data;
324 struct rb_node rb_node_desc;
325 struct rb_node rb_node_node;
326 struct hlist_node node_entry;
327 struct binder_proc *proc;
328 struct binder_node *node;
329 struct binder_ref_death *death;
330 };
331
332 /**
333 * struct binder_priority - scheduler policy and priority
334 * @sched_policy scheduler policy
335 * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
336 *
337 * The binder driver supports inheriting the following scheduler policies:
338 * SCHED_NORMAL
339 * SCHED_BATCH
340 * SCHED_FIFO
341 * SCHED_RR
342 */
343 struct binder_priority {
344 unsigned int sched_policy;
345 int prio;
346 };
347
348 enum binder_prio_state {
349 BINDER_PRIO_SET, /* desired priority set */
350 BINDER_PRIO_PENDING, /* initiated a saved priority restore */
351 BINDER_PRIO_ABORT, /* abort the pending priority restore */
352 };
353
354 /**
355 * struct binder_proc - binder process bookkeeping
356 * @proc_node: element for binder_procs list
357 * @threads: rbtree of binder_threads in this proc
358 * (protected by @inner_lock)
359 * @nodes: rbtree of binder nodes associated with
360 * this proc ordered by node->ptr
361 * (protected by @inner_lock)
362 * @refs_by_desc: rbtree of refs ordered by ref->desc
363 * (protected by @outer_lock)
364 * @refs_by_node: rbtree of refs ordered by ref->node
365 * (protected by @outer_lock)
366 * @waiting_threads: threads currently waiting for proc work
367 * (protected by @inner_lock)
368 * @pid PID of group_leader of process
369 * (invariant after initialized)
370 * @tsk task_struct for group_leader of process
371 * (invariant after initialized)
372 * @cred struct cred associated with the `struct file`
373 * in binder_open()
374 * (invariant after initialized)
375 * @deferred_work_node: element for binder_deferred_list
376 * (protected by binder_deferred_lock)
377 * @deferred_work: bitmap of deferred work to perform
378 * (protected by binder_deferred_lock)
379 * @outstanding_txns: number of transactions to be transmitted before
380 * processes in freeze_wait are woken up
381 * (protected by @inner_lock)
382 * @is_dead: process is dead and awaiting free
383 * when outstanding transactions are cleaned up
384 * (protected by @inner_lock)
385 * @is_frozen: process is frozen and unable to service
386 * binder transactions
387 * (protected by @inner_lock)
388 * @sync_recv: process received sync transactions since last frozen
389 * bit 0: received sync transaction after being frozen
390 * bit 1: new pending sync transaction during freezing
391 * (protected by @inner_lock)
392 * @async_recv: process received async transactions since last frozen
393 * (protected by @inner_lock)
394 * @freeze_wait: waitqueue of processes waiting for all outstanding
395 * transactions to be processed
396 * (protected by @inner_lock)
397 * @todo: list of work for this process
398 * (protected by @inner_lock)
399 * @stats: per-process binder statistics
400 * (atomics, no lock needed)
401 * @delivered_death: list of delivered death notification
402 * (protected by @inner_lock)
403 * @max_threads: cap on number of binder threads
404 * (protected by @inner_lock)
405 * @requested_threads: number of binder threads requested but not
406 * yet started. In current implementation, can
407 * only be 0 or 1.
408 * (protected by @inner_lock)
409 * @requested_threads_started: number binder threads started
410 * (protected by @inner_lock)
411 * @tmp_ref: temporary reference to indicate proc is in use
412 * (protected by @inner_lock)
413 * @default_priority: default scheduler priority
414 * (invariant after initialized)
415 * @debugfs_entry: debugfs node
416 * @alloc: binder allocator bookkeeping
417 * @context: binder_context for this proc
418 * (invariant after initialized)
419 * @inner_lock: can nest under outer_lock and/or node lock
420 * @outer_lock: no nesting under innor or node lock
421 * Lock order: 1) outer, 2) node, 3) inner
422 * @binderfs_entry: process-specific binderfs log file
423 * @oneway_spam_detection_enabled: process enabled oneway spam detection
424 * or not
425 *
426 * Bookkeeping structure for binder processes
427 */
428 struct binder_proc {
429 struct hlist_node proc_node;
430 struct rb_root threads;
431 struct rb_root nodes;
432 struct rb_root refs_by_desc;
433 struct rb_root refs_by_node;
434 struct list_head waiting_threads;
435 int pid;
436 struct task_struct *tsk;
437 const struct cred *cred;
438 struct hlist_node deferred_work_node;
439 int deferred_work;
440 int outstanding_txns;
441 bool is_dead;
442 bool is_frozen;
443 bool sync_recv;
444 bool async_recv;
445 wait_queue_head_t freeze_wait;
446
447 struct list_head todo;
448 struct binder_stats stats;
449 struct list_head delivered_death;
450 int max_threads;
451 int requested_threads;
452 int requested_threads_started;
453 int tmp_ref;
454 struct binder_priority default_priority;
455 struct dentry *debugfs_entry;
456 struct binder_alloc alloc;
457 struct binder_context *context;
458 spinlock_t inner_lock;
459 spinlock_t outer_lock;
460 struct dentry *binderfs_entry;
461 bool oneway_spam_detection_enabled;
462 };
463
464 struct binder_proc_wrap {
465 struct binder_proc proc;
466 spinlock_t lock;
467 };
468
469 static inline struct binder_proc *
binder_proc_entry(struct binder_alloc * alloc)470 binder_proc_entry(struct binder_alloc *alloc)
471 {
472 return container_of(alloc, struct binder_proc, alloc);
473 }
474
475 static inline struct binder_proc_wrap *
binder_proc_wrap_entry(struct binder_proc * proc)476 binder_proc_wrap_entry(struct binder_proc *proc)
477 {
478 return container_of(proc, struct binder_proc_wrap, proc);
479 }
480
481 static inline struct binder_proc_wrap *
binder_alloc_to_proc_wrap(struct binder_alloc * alloc)482 binder_alloc_to_proc_wrap(struct binder_alloc *alloc)
483 {
484 return binder_proc_wrap_entry(binder_proc_entry(alloc));
485 }
486
binder_alloc_lock_init(struct binder_alloc * alloc)487 static inline void binder_alloc_lock_init(struct binder_alloc *alloc)
488 {
489 spin_lock_init(&binder_alloc_to_proc_wrap(alloc)->lock);
490 }
491
binder_alloc_lock(struct binder_alloc * alloc)492 static inline void binder_alloc_lock(struct binder_alloc *alloc)
493 {
494 spin_lock(&binder_alloc_to_proc_wrap(alloc)->lock);
495 }
496
binder_alloc_unlock(struct binder_alloc * alloc)497 static inline void binder_alloc_unlock(struct binder_alloc *alloc)
498 {
499 spin_unlock(&binder_alloc_to_proc_wrap(alloc)->lock);
500 }
501
binder_alloc_trylock(struct binder_alloc * alloc)502 static inline int binder_alloc_trylock(struct binder_alloc *alloc)
503 {
504 return spin_trylock(&binder_alloc_to_proc_wrap(alloc)->lock);
505 }
506
507 /**
508 * binder_alloc_get_free_async_space() - get free space available for async
509 * @alloc: binder_alloc for this proc
510 *
511 * Return: the bytes remaining in the address-space for async transactions
512 */
513 static inline size_t
binder_alloc_get_free_async_space(struct binder_alloc * alloc)514 binder_alloc_get_free_async_space(struct binder_alloc *alloc)
515 {
516 size_t free_async_space;
517
518 binder_alloc_lock(alloc);
519 free_async_space = alloc->free_async_space;
520 binder_alloc_unlock(alloc);
521 return free_async_space;
522 }
523
524 /**
525 * struct binder_thread - binder thread bookkeeping
526 * @proc: binder process for this thread
527 * (invariant after initialization)
528 * @rb_node: element for proc->threads rbtree
529 * (protected by @proc->inner_lock)
530 * @waiting_thread_node: element for @proc->waiting_threads list
531 * (protected by @proc->inner_lock)
532 * @pid: PID for this thread
533 * (invariant after initialization)
534 * @looper: bitmap of looping state
535 * (only accessed by this thread)
536 * @looper_needs_return: looping thread needs to exit driver
537 * (no lock needed)
538 * @transaction_stack: stack of in-progress transactions for this thread
539 * (protected by @proc->inner_lock)
540 * @todo: list of work to do for this thread
541 * (protected by @proc->inner_lock)
542 * @process_todo: whether work in @todo should be processed
543 * (protected by @proc->inner_lock)
544 * @return_error: transaction errors reported by this thread
545 * (only accessed by this thread)
546 * @reply_error: transaction errors reported by target thread
547 * (protected by @proc->inner_lock)
548 * @wait: wait queue for thread work
549 * @stats: per-thread statistics
550 * (atomics, no lock needed)
551 * @tmp_ref: temporary reference to indicate thread is in use
552 * (atomic since @proc->inner_lock cannot
553 * always be acquired)
554 * @is_dead: thread is dead and awaiting free
555 * when outstanding transactions are cleaned up
556 * (protected by @proc->inner_lock)
557 * @task: struct task_struct for this thread
558 * @prio_lock: protects thread priority fields
559 * @prio_next: saved priority to be restored next
560 * (protected by @prio_lock)
561 * @prio_state: state of the priority restore process as
562 * defined by enum binder_prio_state
563 * (protected by @prio_lock)
564 *
565 * Bookkeeping structure for binder threads.
566 */
567 struct binder_thread {
568 struct binder_proc *proc;
569 struct rb_node rb_node;
570 struct list_head waiting_thread_node;
571 int pid;
572 int looper; /* only modified by this thread */
573 bool looper_need_return; /* can be written by other thread */
574 struct binder_transaction *transaction_stack;
575 struct list_head todo;
576 bool process_todo;
577 struct binder_error return_error;
578 struct binder_error reply_error;
579 wait_queue_head_t wait;
580 struct binder_stats stats;
581 atomic_t tmp_ref;
582 bool is_dead;
583 struct task_struct *task;
584 spinlock_t prio_lock;
585 struct binder_priority prio_next;
586 enum binder_prio_state prio_state;
587 };
588
589 /**
590 * struct binder_txn_fd_fixup - transaction fd fixup list element
591 * @fixup_entry: list entry
592 * @file: struct file to be associated with new fd
593 * @offset: offset in buffer data to this fixup
594 *
595 * List element for fd fixups in a transaction. Since file
596 * descriptors need to be allocated in the context of the
597 * target process, we pass each fd to be processed in this
598 * struct.
599 */
600 struct binder_txn_fd_fixup {
601 struct list_head fixup_entry;
602 struct file *file;
603 size_t offset;
604 };
605
606 struct binder_transaction {
607 int debug_id;
608 struct binder_work work;
609 struct binder_thread *from;
610 struct binder_transaction *from_parent;
611 struct binder_proc *to_proc;
612 struct binder_thread *to_thread;
613 struct binder_transaction *to_parent;
614 unsigned need_reply:1;
615 /* unsigned is_dead:1; */ /* not used at the moment */
616
617 struct binder_buffer *buffer;
618 unsigned int code;
619 unsigned int flags;
620 struct binder_priority priority;
621 struct binder_priority saved_priority;
622 bool set_priority_called;
623 bool is_nested;
624 kuid_t sender_euid;
625 struct list_head fd_fixups;
626 binder_uintptr_t security_ctx;
627 /**
628 * @lock: protects @from, @to_proc, and @to_thread
629 *
630 * @from, @to_proc, and @to_thread can be set to NULL
631 * during thread teardown
632 */
633 spinlock_t lock;
634 ANDROID_VENDOR_DATA(1);
635 ANDROID_OEM_DATA_ARRAY(1, 2);
636 };
637
638 /**
639 * struct binder_object - union of flat binder object types
640 * @hdr: generic object header
641 * @fbo: binder object (nodes and refs)
642 * @fdo: file descriptor object
643 * @bbo: binder buffer pointer
644 * @fdao: file descriptor array
645 *
646 * Used for type-independent object copies
647 */
648 struct binder_object {
649 union {
650 struct binder_object_header hdr;
651 struct flat_binder_object fbo;
652 struct binder_fd_object fdo;
653 struct binder_buffer_object bbo;
654 struct binder_fd_array_object fdao;
655 };
656 };
657
658 #endif /* _LINUX_BINDER_INTERNAL_H */
659