• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef _LINUX_BINDER_INTERNAL_H
4 #define _LINUX_BINDER_INTERNAL_H
5 
6 #include <linux/export.h>
7 #include <linux/fs.h>
8 #include <linux/list.h>
9 #include <linux/miscdevice.h>
10 #include <linux/mutex.h>
11 #include <linux/refcount.h>
12 #include <linux/stddef.h>
13 #include <linux/types.h>
14 #include <linux/uidgid.h>
15 #include <linux/android_vendor.h>
16 #include <uapi/linux/android/binderfs.h>
17 #include "binder_alloc.h"
18 #include "dbitmap.h"
19 
20 extern int binder_use_rust;
21 #ifdef CONFIG_ANDROID_BINDERFS
22 void unload_binderfs(void);
23 int on_binderfs_mount(void);
24 #else
unload_binderfs(void)25 static inline void unload_binderfs(void) {}
26 #endif
27 
28 struct binder_context {
29 	struct binder_node *binder_context_mgr_node;
30 	struct mutex context_mgr_node_lock;
31 	kuid_t binder_context_mgr_uid;
32 	const char *name;
33 };
34 
35 /**
36  * struct binder_device - information about a binder device node
37  * @hlist:          list of binder devices (only used for devices requested via
38  *                  CONFIG_ANDROID_BINDER_DEVICES)
39  * @miscdev:        information about a binder character device node
40  * @context:        binder context information
41  * @binderfs_inode: This is the inode of the root dentry of the super block
42  *                  belonging to a binderfs mount.
43  */
44 struct binder_device {
45 	struct hlist_node hlist;
46 	struct miscdevice miscdev;
47 	struct binder_context context;
48 	struct inode *binderfs_inode;
49 	refcount_t ref;
50 };
51 
52 /**
53  * binderfs_mount_opts - mount options for binderfs
54  * @max: maximum number of allocatable binderfs binder devices
55  * @stats_mode: enable binder stats in binderfs.
56  */
57 struct binderfs_mount_opts {
58 	int max;
59 	int stats_mode;
60 };
61 
62 /**
63  * binderfs_info - information about a binderfs mount
64  * @ipc_ns:         The ipc namespace the binderfs mount belongs to.
65  * @control_dentry: This records the dentry of this binderfs mount
66  *                  binder-control device.
67  * @root_uid:       uid that needs to be used when a new binder device is
68  *                  created.
69  * @root_gid:       gid that needs to be used when a new binder device is
70  *                  created.
71  * @mount_opts:     The mount options in use.
72  * @device_count:   The current number of allocated binder devices.
73  * @proc_log_dir:   Pointer to the directory dentry containing process-specific
74  *                  logs.
75  */
76 struct binderfs_info {
77 	struct ipc_namespace *ipc_ns;
78 	struct dentry *control_dentry;
79 	kuid_t root_uid;
80 	kgid_t root_gid;
81 	struct binderfs_mount_opts mount_opts;
82 	int device_count;
83 	struct dentry *proc_log_dir;
84 };
85 
86 extern const struct file_operations binder_fops;
87 
88 extern char *binder_devices_param;
89 
90 #ifdef CONFIG_ANDROID_BINDERFS
91 extern bool is_binderfs_device(const struct inode *inode);
92 extern struct dentry *binderfs_create_file(struct dentry *dir, const char *name,
93 					   const struct file_operations *fops,
94 					   void *data);
95 extern void binderfs_remove_file(struct dentry *dentry);
96 #else
is_binderfs_device(const struct inode * inode)97 static inline bool is_binderfs_device(const struct inode *inode)
98 {
99 	return false;
100 }
binderfs_create_file(struct dentry * dir,const char * name,const struct file_operations * fops,void * data)101 static inline struct dentry *binderfs_create_file(struct dentry *dir,
102 					   const char *name,
103 					   const struct file_operations *fops,
104 					   void *data)
105 {
106 	return NULL;
107 }
binderfs_remove_file(struct dentry * dentry)108 static inline void binderfs_remove_file(struct dentry *dentry) {}
109 #endif
110 
111 #ifdef CONFIG_ANDROID_BINDERFS
112 extern int __init init_binderfs(void);
113 #else
init_binderfs(void)114 static inline int __init init_binderfs(void)
115 {
116 	return 0;
117 }
118 #endif
119 
120 struct binder_debugfs_entry {
121 	const char *name;
122 	umode_t mode;
123 	const struct file_operations *fops;
124 	void *data;
125 };
126 
127 extern const struct binder_debugfs_entry binder_debugfs_entries[];
128 
129 #define binder_for_each_debugfs_entry(entry)	\
130 	for ((entry) = binder_debugfs_entries;	\
131 	     (entry)->name;			\
132 	     (entry)++)
133 
134 enum binder_stat_types {
135 	BINDER_STAT_PROC,
136 	BINDER_STAT_THREAD,
137 	BINDER_STAT_NODE,
138 	BINDER_STAT_REF,
139 	BINDER_STAT_DEATH,
140 	BINDER_STAT_TRANSACTION,
141 	BINDER_STAT_TRANSACTION_COMPLETE,
142 	BINDER_STAT_FREEZE,
143 	BINDER_STAT_COUNT
144 };
145 
146 struct binder_stats {
147 	atomic_t br[_IOC_NR(BR_CLEAR_FREEZE_NOTIFICATION_DONE) + 1];
148 	atomic_t bc[_IOC_NR(BC_FREEZE_NOTIFICATION_DONE) + 1];
149 	atomic_t obj_created[BINDER_STAT_COUNT];
150 	atomic_t obj_deleted[BINDER_STAT_COUNT];
151 };
152 
153 /**
154  * struct binder_work - work enqueued on a worklist
155  * @entry:             node enqueued on list
156  * @type:              type of work to be performed
157  *
158  * There are separate work lists for proc, thread, and node (async).
159  */
160 struct binder_work {
161 	struct list_head entry;
162 
163 	enum binder_work_type {
164 		BINDER_WORK_TRANSACTION = 1,
165 		BINDER_WORK_TRANSACTION_COMPLETE,
166 		BINDER_WORK_TRANSACTION_PENDING,
167 		BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT,
168 		BINDER_WORK_RETURN_ERROR,
169 		BINDER_WORK_NODE,
170 		BINDER_WORK_DEAD_BINDER,
171 		BINDER_WORK_DEAD_BINDER_AND_CLEAR,
172 		BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
173 		BINDER_WORK_FROZEN_BINDER,
174 		BINDER_WORK_CLEAR_FREEZE_NOTIFICATION,
175 	} type;
176 
177 	ANDROID_OEM_DATA(1);
178 };
179 
180 struct binder_error {
181 	struct binder_work work;
182 	uint32_t cmd;
183 };
184 
185 /**
186  * struct binder_node - binder node bookkeeping
187  * @debug_id:             unique ID for debugging
188  *                        (invariant after initialized)
189  * @lock:                 lock for node fields
190  * @work:                 worklist element for node work
191  *                        (protected by @proc->inner_lock)
192  * @rb_node:              element for proc->nodes tree
193  *                        (protected by @proc->inner_lock)
194  * @dead_node:            element for binder_dead_nodes list
195  *                        (protected by binder_dead_nodes_lock)
196  * @proc:                 binder_proc that owns this node
197  *                        (invariant after initialized)
198  * @refs:                 list of references on this node
199  *                        (protected by @lock)
200  * @internal_strong_refs: used to take strong references when
201  *                        initiating a transaction
202  *                        (protected by @proc->inner_lock if @proc
203  *                        and by @lock)
204  * @local_weak_refs:      weak user refs from local process
205  *                        (protected by @proc->inner_lock if @proc
206  *                        and by @lock)
207  * @local_strong_refs:    strong user refs from local process
208  *                        (protected by @proc->inner_lock if @proc
209  *                        and by @lock)
210  * @tmp_refs:             temporary kernel refs
211  *                        (protected by @proc->inner_lock while @proc
212  *                        is valid, and by binder_dead_nodes_lock
213  *                        if @proc is NULL. During inc/dec and node release
214  *                        it is also protected by @lock to provide safety
215  *                        as the node dies and @proc becomes NULL)
216  * @ptr:                  userspace pointer for node
217  *                        (invariant, no lock needed)
218  * @cookie:               userspace cookie for node
219  *                        (invariant, no lock needed)
220  * @has_strong_ref:       userspace notified of strong ref
221  *                        (protected by @proc->inner_lock if @proc
222  *                        and by @lock)
223  * @pending_strong_ref:   userspace has acked notification of strong ref
224  *                        (protected by @proc->inner_lock if @proc
225  *                        and by @lock)
226  * @has_weak_ref:         userspace notified of weak ref
227  *                        (protected by @proc->inner_lock if @proc
228  *                        and by @lock)
229  * @pending_weak_ref:     userspace has acked notification of weak ref
230  *                        (protected by @proc->inner_lock if @proc
231  *                        and by @lock)
232  * @has_async_transaction: async transaction to node in progress
233  *                        (protected by @lock)
234  * @sched_policy:         minimum scheduling policy for node
235  *                        (invariant after initialized)
236  * @accept_fds:           file descriptor operations supported for node
237  *                        (invariant after initialized)
238  * @min_priority:         minimum scheduling priority
239  *                        (invariant after initialized)
240  * @inherit_rt:           inherit RT scheduling policy from caller
241  * @txn_security_ctx:     require sender's security context
242  *                        (invariant after initialized)
243  * @async_todo:           list of async work items
244  *                        (protected by @proc->inner_lock)
245  *
246  * Bookkeeping structure for binder nodes.
247  */
248 struct binder_node {
249 	int debug_id;
250 	spinlock_t lock;
251 	struct binder_work work;
252 	union {
253 		struct rb_node rb_node;
254 		struct hlist_node dead_node;
255 	};
256 	struct binder_proc *proc;
257 	struct hlist_head refs;
258 	int internal_strong_refs;
259 	int local_weak_refs;
260 	int local_strong_refs;
261 	int tmp_refs;
262 	binder_uintptr_t ptr;
263 	binder_uintptr_t cookie;
264 	struct {
265 		/*
266 		 * bitfield elements protected by
267 		 * proc inner_lock
268 		 */
269 		u8 has_strong_ref:1;
270 		u8 pending_strong_ref:1;
271 		u8 has_weak_ref:1;
272 		u8 pending_weak_ref:1;
273 	};
274 	struct {
275 		/*
276 		 * invariant after initialization
277 		 */
278 		u8 sched_policy:2;
279 		u8 inherit_rt:1;
280 		u8 accept_fds:1;
281 		u8 txn_security_ctx:1;
282 		u8 min_priority;
283 	};
284 	bool has_async_transaction;
285 	struct list_head async_todo;
286 };
287 
288 struct binder_ref_death {
289 	/**
290 	 * @work: worklist element for death notifications
291 	 *        (protected by inner_lock of the proc that
292 	 *        this ref belongs to)
293 	 */
294 	struct binder_work work;
295 	binder_uintptr_t cookie;
296 };
297 
298 struct binder_ref_freeze {
299 	struct binder_work work;
300 	binder_uintptr_t cookie;
301 	bool is_frozen:1;
302 	bool sent:1;
303 	bool resend:1;
304 };
305 
306 /**
307  * struct binder_ref_data - binder_ref counts and id
308  * @debug_id:        unique ID for the ref
309  * @desc:            unique userspace handle for ref
310  * @strong:          strong ref count (debugging only if not locked)
311  * @weak:            weak ref count (debugging only if not locked)
312  *
313  * Structure to hold ref count and ref id information. Since
314  * the actual ref can only be accessed with a lock, this structure
315  * is used to return information about the ref to callers of
316  * ref inc/dec functions.
317  */
318 struct binder_ref_data {
319 	int debug_id;
320 	uint32_t desc;
321 	int strong;
322 	int weak;
323 };
324 
325 /**
326  * struct binder_ref - struct to track references on nodes
327  * @data:        binder_ref_data containing id, handle, and current refcounts
328  * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
329  * @rb_node_node: node for lookup by @node in proc's rb_tree
330  * @node_entry:  list entry for node->refs list in target node
331  *               (protected by @node->lock)
332  * @proc:        binder_proc containing ref
333  * @node:        binder_node of target node. When cleaning up a
334  *               ref for deletion in binder_cleanup_ref, a non-NULL
335  *               @node indicates the node must be freed
336  * @death:       pointer to death notification (ref_death) if requested
337  *               (protected by @node->lock)
338  * @freeze:      pointer to freeze notification (ref_freeze) if requested
339  *               (protected by @node->lock)
340  *
341  * Structure to track references from procA to target node (on procB). This
342  * structure is unsafe to access without holding @proc->outer_lock.
343  */
344 struct binder_ref {
345 	/* Lookups needed: */
346 	/*   node + proc => ref (transaction) */
347 	/*   desc + proc => ref (transaction, inc/dec ref) */
348 	/*   node => refs + procs (proc exit) */
349 	struct binder_ref_data data;
350 	struct rb_node rb_node_desc;
351 	struct rb_node rb_node_node;
352 	struct hlist_node node_entry;
353 	struct binder_proc *proc;
354 	struct binder_node *node;
355 	struct binder_ref_death *death;
356 	struct binder_ref_freeze *freeze;
357 };
358 
359 /**
360  * struct binder_priority - scheduler policy and priority
361  * @sched_policy            scheduler policy
362  * @prio                    [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
363  *
364  * The binder driver supports inheriting the following scheduler policies:
365  * SCHED_NORMAL
366  * SCHED_BATCH
367  * SCHED_FIFO
368  * SCHED_RR
369  */
370 struct binder_priority {
371 	unsigned int sched_policy;
372 	int prio;
373 };
374 
375 enum binder_prio_state {
376 	BINDER_PRIO_SET,	/* desired priority set */
377 	BINDER_PRIO_PENDING,	/* initiated a saved priority restore */
378 	BINDER_PRIO_ABORT,	/* abort the pending priority restore */
379 };
380 
381 /**
382  * struct binder_proc - binder process bookkeeping
383  * @proc_node:            element for binder_procs list
384  * @threads:              rbtree of binder_threads in this proc
385  *                        (protected by @inner_lock)
386  * @nodes:                rbtree of binder nodes associated with
387  *                        this proc ordered by node->ptr
388  *                        (protected by @inner_lock)
389  * @refs_by_desc:         rbtree of refs ordered by ref->desc
390  *                        (protected by @outer_lock)
391  * @refs_by_node:         rbtree of refs ordered by ref->node
392  *                        (protected by @outer_lock)
393  * @waiting_threads:      threads currently waiting for proc work
394  *                        (protected by @inner_lock)
395  * @pid                   PID of group_leader of process
396  *                        (invariant after initialized)
397  * @tsk                   task_struct for group_leader of process
398  *                        (invariant after initialized)
399  * @cred                  struct cred associated with the `struct file`
400  *                        in binder_open()
401  *                        (invariant after initialized)
402  * @deferred_work_node:   element for binder_deferred_list
403  *                        (protected by binder_deferred_lock)
404  * @deferred_work:        bitmap of deferred work to perform
405  *                        (protected by binder_deferred_lock)
406  * @outstanding_txns:     number of transactions to be transmitted before
407  *                        processes in freeze_wait are woken up
408  *                        (protected by @inner_lock)
409  * @is_dead:              process is dead and awaiting free
410  *                        when outstanding transactions are cleaned up
411  *                        (protected by @inner_lock)
412  * @is_frozen:            process is frozen and unable to service
413  *                        binder transactions
414  *                        (protected by @inner_lock)
415  * @sync_recv:            process received sync transactions since last frozen
416  *                        bit 0: received sync transaction after being frozen
417  *                        bit 1: new pending sync transaction during freezing
418  *                        (protected by @inner_lock)
419  * @async_recv:           process received async transactions since last frozen
420  *                        (protected by @inner_lock)
421  * @freeze_wait:          waitqueue of processes waiting for all outstanding
422  *                        transactions to be processed
423  *                        (protected by @inner_lock)
424  * @dmap                  dbitmap to manage available reference descriptors
425  *                        (protected by @outer_lock)
426  * @todo:                 list of work for this process
427  *                        (protected by @inner_lock)
428  * @stats:                per-process binder statistics
429  *                        (atomics, no lock needed)
430  * @delivered_death:      list of delivered death notification
431  *                        (protected by @inner_lock)
432  * @delivered_freeze:     list of delivered freeze notification
433  *                        (protected by @inner_lock)
434  * @max_threads:          cap on number of binder threads
435  *                        (protected by @inner_lock)
436  * @requested_threads:    number of binder threads requested but not
437  *                        yet started. In current implementation, can
438  *                        only be 0 or 1.
439  *                        (protected by @inner_lock)
440  * @requested_threads_started: number binder threads started
441  *                        (protected by @inner_lock)
442  * @tmp_ref:              temporary reference to indicate proc is in use
443  *                        (protected by @inner_lock)
444  * @default_priority:     default scheduler priority
445  *                        (invariant after initialized)
446  * @debugfs_entry:        debugfs node
447  * @alloc:                binder allocator bookkeeping
448  * @context:              binder_context for this proc
449  *                        (invariant after initialized)
450  * @inner_lock:           can nest under outer_lock and/or node lock
451  * @outer_lock:           no nesting under innor or node lock
452  *                        Lock order: 1) outer, 2) node, 3) inner
453  * @binderfs_entry:       process-specific binderfs log file
454  * @oneway_spam_detection_enabled: process enabled oneway spam detection
455  *                        or not
456  *
457  * Bookkeeping structure for binder processes
458  */
459 struct binder_proc {
460 	struct hlist_node proc_node;
461 	struct rb_root threads;
462 	struct rb_root nodes;
463 	struct rb_root refs_by_desc;
464 	struct rb_root refs_by_node;
465 	struct list_head waiting_threads;
466 	int pid;
467 	struct task_struct *tsk;
468 	const struct cred *cred;
469 	struct hlist_node deferred_work_node;
470 	int deferred_work;
471 	int outstanding_txns;
472 	bool is_dead;
473 	bool is_frozen;
474 	bool sync_recv;
475 	bool async_recv;
476 	wait_queue_head_t freeze_wait;
477 	struct dbitmap dmap;
478 	struct list_head todo;
479 	struct binder_stats stats;
480 	struct list_head delivered_death;
481 	struct list_head delivered_freeze;
482 	u32 max_threads;
483 	int requested_threads;
484 	int requested_threads_started;
485 	int tmp_ref;
486 	struct binder_priority default_priority;
487 	struct dentry *debugfs_entry;
488 	struct binder_alloc alloc;
489 	struct binder_context *context;
490 	spinlock_t inner_lock;
491 	spinlock_t outer_lock;
492 	struct dentry *binderfs_entry;
493 	bool oneway_spam_detection_enabled;
494 	ANDROID_OEM_DATA(1);
495 };
496 
497 /**
498  * struct binder_proc_wrap - wrapper to preserve KMI in binder_proc
499  * @proc:                    binder_proc being wrapped
500  * @mutex:                   protects binder_alloc fields
501  * @pages:                   array of struct page *
502  * @mapped:                  whether the vm area is mapped, each binderinstance
503  *                           is allowed a single mapping throughout its lifetime
504  */
505 struct binder_proc_wrap {
506 	struct binder_proc proc;
507 	struct binder_alloc_wrap {
508 		struct mutex mutex;
509 		struct page **pages;
510 		bool mapped;
511 	} alloc;
512 };
513 
514 static inline
proc_wrapper(struct binder_proc * proc)515 struct binder_proc_wrap *proc_wrapper(struct binder_proc *proc)
516 {
517 	return container_of(proc, struct binder_proc_wrap, proc);
518 }
519 
520 static inline
alloc_to_wrap(struct binder_alloc * alloc)521 struct binder_alloc_wrap *alloc_to_wrap(struct binder_alloc *alloc)
522 {
523 	struct binder_proc *proc;
524 
525 	proc = container_of(alloc, struct binder_proc, alloc);
526 
527 	return &proc_wrapper(proc)->alloc;
528 }
529 
530 /**
531  * binder_alloc_get_free_async_space() - get free space available for async
532  * @alloc:	binder_alloc for this proc
533  *
534  * Return:	the bytes remaining in the address-space for async transactions
535  */
536 static inline size_t
binder_alloc_get_free_async_space(struct binder_alloc * alloc)537 binder_alloc_get_free_async_space(struct binder_alloc *alloc)
538 {
539 	size_t free_async_space;
540 
541 	mutex_lock(&alloc_to_wrap(alloc)->mutex);
542 	free_async_space = alloc->free_async_space;
543 	mutex_unlock(&alloc_to_wrap(alloc)->mutex);
544 	return free_async_space;
545 }
546 
547 /**
548  * struct binder_thread - binder thread bookkeeping
549  * @proc:                 binder process for this thread
550  *                        (invariant after initialization)
551  * @rb_node:              element for proc->threads rbtree
552  *                        (protected by @proc->inner_lock)
553  * @waiting_thread_node:  element for @proc->waiting_threads list
554  *                        (protected by @proc->inner_lock)
555  * @pid:                  PID for this thread
556  *                        (invariant after initialization)
557  * @looper:               bitmap of looping state
558  *                        (only accessed by this thread)
559  * @looper_needs_return:  looping thread needs to exit driver
560  *                        (no lock needed)
561  * @transaction_stack:    stack of in-progress transactions for this thread
562  *                        (protected by @proc->inner_lock)
563  * @todo:                 list of work to do for this thread
564  *                        (protected by @proc->inner_lock)
565  * @process_todo:         whether work in @todo should be processed
566  *                        (protected by @proc->inner_lock)
567  * @return_error:         transaction errors reported by this thread
568  *                        (only accessed by this thread)
569  * @reply_error:          transaction errors reported by target thread
570  *                        (protected by @proc->inner_lock)
571  * @ee:                   extended error information from this thread
572  *                        (protected by @proc->inner_lock)
573  * @wait:                 wait queue for thread work
574  * @stats:                per-thread statistics
575  *                        (atomics, no lock needed)
576  * @tmp_ref:              temporary reference to indicate thread is in use
577  *                        (atomic since @proc->inner_lock cannot
578  *                        always be acquired)
579  * @is_dead:              thread is dead and awaiting free
580  *                        when outstanding transactions are cleaned up
581  *                        (protected by @proc->inner_lock)
582  * @task:                 struct task_struct for this thread
583  * @prio_lock:            protects thread priority fields
584  * @prio_next:            saved priority to be restored next
585  *                        (protected by @prio_lock)
586  * @prio_state:           state of the priority restore process as
587  *                        defined by enum binder_prio_state
588  *                        (protected by @prio_lock)
589  *
590  * Bookkeeping structure for binder threads.
591  */
592 struct binder_thread {
593 	struct binder_proc *proc;
594 	struct rb_node rb_node;
595 	struct list_head waiting_thread_node;
596 	int pid;
597 	int looper;              /* only modified by this thread */
598 	bool looper_need_return; /* can be written by other thread */
599 	struct binder_transaction *transaction_stack;
600 	struct list_head todo;
601 	bool process_todo;
602 	struct binder_error return_error;
603 	struct binder_error reply_error;
604 	struct binder_extended_error ee;
605 	wait_queue_head_t wait;
606 	struct binder_stats stats;
607 	atomic_t tmp_ref;
608 	bool is_dead;
609 	struct task_struct *task;
610 	spinlock_t prio_lock;
611 	struct binder_priority prio_next;
612 	enum binder_prio_state prio_state;
613 };
614 
615 /**
616  * struct binder_txn_fd_fixup - transaction fd fixup list element
617  * @fixup_entry:          list entry
618  * @file:                 struct file to be associated with new fd
619  * @offset:               offset in buffer data to this fixup
620  * @target_fd:            fd to use by the target to install @file
621  *
622  * List element for fd fixups in a transaction. Since file
623  * descriptors need to be allocated in the context of the
624  * target process, we pass each fd to be processed in this
625  * struct.
626  */
627 struct binder_txn_fd_fixup {
628 	struct list_head fixup_entry;
629 	struct file *file;
630 	size_t offset;
631 	int target_fd;
632 };
633 
634 struct binder_transaction {
635 	int debug_id;
636 	struct binder_work work;
637 	struct binder_thread *from;
638 	pid_t from_pid;
639 	pid_t from_tid;
640 	struct binder_transaction *from_parent;
641 	struct binder_proc *to_proc;
642 	struct binder_thread *to_thread;
643 	struct binder_transaction *to_parent;
644 	unsigned need_reply:1;
645 	/* unsigned is_dead:1; */       /* not used at the moment */
646 
647 	struct binder_buffer *buffer;
648 	unsigned int    code;
649 	unsigned int    flags;
650 	struct binder_priority priority;
651 	struct binder_priority saved_priority;
652 	bool set_priority_called;
653 	bool is_nested;
654 	kuid_t  sender_euid;
655 	ktime_t start_time;
656 	struct list_head fd_fixups;
657 	binder_uintptr_t security_ctx;
658 	/**
659 	 * @lock:  protects @from, @to_proc, and @to_thread
660 	 *
661 	 * @from, @to_proc, and @to_thread can be set to NULL
662 	 * during thread teardown
663 	 */
664 	spinlock_t lock;
665 	ANDROID_VENDOR_DATA(1);
666 	ANDROID_OEM_DATA(1);
667 };
668 
669 /**
670  * struct binder_object - union of flat binder object types
671  * @hdr:   generic object header
672  * @fbo:   binder object (nodes and refs)
673  * @fdo:   file descriptor object
674  * @bbo:   binder buffer pointer
675  * @fdao:  file descriptor array
676  *
677  * Used for type-independent object copies
678  */
679 struct binder_object {
680 	union {
681 		struct binder_object_header hdr;
682 		struct flat_binder_object fbo;
683 		struct binder_fd_object fdo;
684 		struct binder_buffer_object bbo;
685 		struct binder_fd_array_object fdao;
686 	};
687 };
688 
689 #endif /* _LINUX_BINDER_INTERNAL_H */
690