• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /*
3  * Copyright (C) 2008 Google, Inc.
4  *
5  * Based on, but no longer compatible with, the original
6  * OpenBinder.org binder driver interface, which is:
7  *
8  * Copyright (c) 2005 Palmsource, Inc.
9  *
10  * This software is licensed under the terms of the GNU General Public
11  * License version 2, as published by the Free Software Foundation, and
12  * may be copied, distributed, and modified under those terms.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  */
20 
21 #ifndef _UAPI_LINUX_BINDER_H
22 #define _UAPI_LINUX_BINDER_H
23 
24 #include <linux/types.h>
25 #include <linux/ioctl.h>
26 
27 #define B_PACK_CHARS(c1, c2, c3, c4) \
28 	((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
29 #define B_TYPE_LARGE 0x85
30 
31 enum {
32 	BINDER_TYPE_BINDER	= B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
33 	BINDER_TYPE_WEAK_BINDER	= B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
34 	BINDER_TYPE_HANDLE	= B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
35 	BINDER_TYPE_WEAK_HANDLE	= B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
36 	BINDER_TYPE_FD		= B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
37 	BINDER_TYPE_FDA		= B_PACK_CHARS('f', 'd', 'a', B_TYPE_LARGE),
38 	BINDER_TYPE_PTR		= B_PACK_CHARS('p', 't', '*', B_TYPE_LARGE),
39 };
40 
41 /**
42  * enum flat_binder_object_shifts: shift values for flat_binder_object_flags
43  * @FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT: shift for getting scheduler policy.
44  *
45  */
46 enum flat_binder_object_shifts {
47 	FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT = 9,
48 };
49 
50 /**
51  * enum flat_binder_object_flags - flags for use in flat_binder_object.flags
52  */
53 enum flat_binder_object_flags {
54 	/**
55 	 * @FLAT_BINDER_FLAG_PRIORITY_MASK: bit-mask for min scheduler priority
56 	 *
57 	 * These bits can be used to set the minimum scheduler priority
58 	 * at which transactions into this node should run. Valid values
59 	 * in these bits depend on the scheduler policy encoded in
60 	 * @FLAT_BINDER_FLAG_SCHED_POLICY_MASK.
61 	 *
62 	 * For SCHED_NORMAL/SCHED_BATCH, the valid range is between [-20..19]
63 	 * For SCHED_FIFO/SCHED_RR, the value can run between [1..99]
64 	 */
65 	FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
66 	/**
67 	 * @FLAT_BINDER_FLAG_ACCEPTS_FDS: whether the node accepts fds.
68 	 */
69 	FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
70 
71 	/**
72 	 * @FLAT_BINDER_FLAG_SCHED_POLICY_MASK: bit-mask for scheduling policy
73 	 *
74 	 * These two bits can be used to set the min scheduling policy at which
75 	 * transactions on this node should run. These match the UAPI
76 	 * scheduler policy values, eg:
77 	 * 00b: SCHED_NORMAL
78 	 * 01b: SCHED_FIFO
79 	 * 10b: SCHED_RR
80 	 * 11b: SCHED_BATCH
81 	 */
82 	FLAT_BINDER_FLAG_SCHED_POLICY_MASK =
83 		3U << FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT,
84 
85 	/**
86 	 * @FLAT_BINDER_FLAG_INHERIT_RT: whether the node inherits RT policy
87 	 *
88 	 * Only when set, calls into this node will inherit a real-time
89 	 * scheduling policy from the caller (for synchronous transactions).
90 	 */
91 	FLAT_BINDER_FLAG_INHERIT_RT = 0x800,
92 
93 	/**
94 	 * @FLAT_BINDER_FLAG_TXN_SECURITY_CTX: request security contexts
95 	 *
96 	 * Only when set, causes senders to include their security
97 	 * context
98 	 */
99 	FLAT_BINDER_FLAG_TXN_SECURITY_CTX = 0x1000,
100 };
101 
102 #ifdef BINDER_IPC_32BIT
103 typedef __u32 binder_size_t;
104 typedef __u32 binder_uintptr_t;
105 #else
106 typedef __u64 binder_size_t;
107 typedef __u64 binder_uintptr_t;
108 #endif
109 
110 /**
111  * struct binder_object_header - header shared by all binder metadata objects.
112  * @type:	type of the object
113  */
114 struct binder_object_header {
115 	__u32        type;
116 };
117 
118 /*
119  * This is the flattened representation of a Binder object for transfer
120  * between processes.  The 'offsets' supplied as part of a binder transaction
121  * contains offsets into the data where these structures occur.  The Binder
122  * driver takes care of re-writing the structure type and data as it moves
123  * between processes.
124  */
125 struct flat_binder_object {
126 	struct binder_object_header	hdr;
127 	__u32				flags;
128 
129 	/* 8 bytes of data. */
130 	union {
131 		binder_uintptr_t	binder;	/* local object */
132 		__u32			handle;	/* remote object */
133 	};
134 
135 	/* extra data associated with local object */
136 	binder_uintptr_t	cookie;
137 };
138 
139 /**
140  * struct binder_fd_object - describes a filedescriptor to be fixed up.
141  * @hdr:	common header structure
142  * @pad_flags:	padding to remain compatible with old userspace code
143  * @pad_binder:	padding to remain compatible with old userspace code
144  * @fd:		file descriptor
145  * @cookie:	opaque data, used by user-space
146  */
147 struct binder_fd_object {
148 	struct binder_object_header	hdr;
149 	__u32				pad_flags;
150 	union {
151 		binder_uintptr_t	pad_binder;
152 		__u32			fd;
153 	};
154 
155 	binder_uintptr_t		cookie;
156 };
157 
158 /* struct binder_buffer_object - object describing a userspace buffer
159  * @hdr:		common header structure
160  * @flags:		one or more BINDER_BUFFER_* flags
161  * @buffer:		address of the buffer
162  * @length:		length of the buffer
163  * @parent:		index in offset array pointing to parent buffer
164  * @parent_offset:	offset in @parent pointing to this buffer
165  *
166  * A binder_buffer object represents an object that the
167  * binder kernel driver can copy verbatim to the target
168  * address space. A buffer itself may be pointed to from
169  * within another buffer, meaning that the pointer inside
170  * that other buffer needs to be fixed up as well. This
171  * can be done by setting the BINDER_BUFFER_FLAG_HAS_PARENT
172  * flag in @flags, by setting @parent buffer to the index
173  * in the offset array pointing to the parent binder_buffer_object,
174  * and by setting @parent_offset to the offset in the parent buffer
175  * at which the pointer to this buffer is located.
176  */
177 struct binder_buffer_object {
178 	struct binder_object_header	hdr;
179 	__u32				flags;
180 	binder_uintptr_t		buffer;
181 	binder_size_t			length;
182 	binder_size_t			parent;
183 	binder_size_t			parent_offset;
184 };
185 
186 enum {
187 	BINDER_BUFFER_FLAG_HAS_PARENT = 0x01,
188 };
189 
190 /* struct binder_fd_array_object - object describing an array of fds in a buffer
191  * @hdr:		common header structure
192  * @pad:		padding to ensure correct alignment
193  * @num_fds:		number of file descriptors in the buffer
194  * @parent:		index in offset array to buffer holding the fd array
195  * @parent_offset:	start offset of fd array in the buffer
196  *
197  * A binder_fd_array object represents an array of file
198  * descriptors embedded in a binder_buffer_object. It is
199  * different from a regular binder_buffer_object because it
200  * describes a list of file descriptors to fix up, not an opaque
201  * blob of memory, and hence the kernel needs to treat it differently.
202  *
203  * An example of how this would be used is with Android's
204  * native_handle_t object, which is a struct with a list of integers
205  * and a list of file descriptors. The native_handle_t struct itself
206  * will be represented by a struct binder_buffer_objct, whereas the
207  * embedded list of file descriptors is represented by a
208  * struct binder_fd_array_object with that binder_buffer_object as
209  * a parent.
210  */
211 struct binder_fd_array_object {
212 	struct binder_object_header	hdr;
213 	__u32				pad;
214 	binder_size_t			num_fds;
215 	binder_size_t			parent;
216 	binder_size_t			parent_offset;
217 };
218 
219 /*
220  * On 64-bit platforms where user code may run in 32-bits the driver must
221  * translate the buffer (and local binder) addresses appropriately.
222  */
223 
224 struct binder_write_read {
225 	binder_size_t		write_size;	/* bytes to write */
226 	binder_size_t		write_consumed;	/* bytes consumed by driver */
227 	binder_uintptr_t	write_buffer;
228 	binder_size_t		read_size;	/* bytes to read */
229 	binder_size_t		read_consumed;	/* bytes consumed by driver */
230 	binder_uintptr_t	read_buffer;
231 };
232 
233 /* Use with BINDER_VERSION, driver fills in fields. */
234 struct binder_version {
235 	/* driver protocol version -- increment with incompatible change */
236 	__s32       protocol_version;
237 };
238 
239 /* This is the current protocol version. */
240 #ifdef BINDER_IPC_32BIT
241 #define BINDER_CURRENT_PROTOCOL_VERSION 7
242 #else
243 #define BINDER_CURRENT_PROTOCOL_VERSION 8
244 #endif
245 
246 /*
247  * Use with BINDER_GET_NODE_DEBUG_INFO, driver reads ptr, writes to all fields.
248  * Set ptr to NULL for the first call to get the info for the first node, and
249  * then repeat the call passing the previously returned value to get the next
250  * nodes.  ptr will be 0 when there are no more nodes.
251  */
252 struct binder_node_debug_info {
253 	binder_uintptr_t ptr;
254 	binder_uintptr_t cookie;
255 	__u32            has_strong_ref;
256 	__u32            has_weak_ref;
257 };
258 
259 struct binder_node_info_for_ref {
260 	__u32            handle;
261 	__u32            strong_count;
262 	__u32            weak_count;
263 	__u32            reserved1;
264 	__u32            reserved2;
265 	__u32            reserved3;
266 };
267 
268 struct binder_freeze_info {
269 	__u32            pid;
270 	__u32            enable;
271 	__u32            timeout_ms;
272 };
273 
274 struct binder_frozen_status_info {
275 	__u32            pid;
276 
277 	/* process received sync transactions since last frozen
278 	 * bit 0: received sync transaction after being frozen
279 	 * bit 1: new pending sync transaction during freezing
280 	 */
281 	__u32            sync_recv;
282 
283 	/* process received async transactions since last frozen */
284 	__u32            async_recv;
285 };
286 
287 #define BINDER_WRITE_READ		_IOWR('b', 1, struct binder_write_read)
288 #define BINDER_SET_IDLE_TIMEOUT		_IOW('b', 3, __s64)
289 #define BINDER_SET_MAX_THREADS		_IOW('b', 5, __u32)
290 #define BINDER_SET_IDLE_PRIORITY	_IOW('b', 6, __s32)
291 #define BINDER_SET_CONTEXT_MGR		_IOW('b', 7, __s32)
292 #define BINDER_THREAD_EXIT		_IOW('b', 8, __s32)
293 #define BINDER_VERSION			_IOWR('b', 9, struct binder_version)
294 #define BINDER_GET_NODE_DEBUG_INFO	_IOWR('b', 11, struct binder_node_debug_info)
295 #define BINDER_GET_NODE_INFO_FOR_REF	_IOWR('b', 12, struct binder_node_info_for_ref)
296 #define BINDER_SET_CONTEXT_MGR_EXT	_IOW('b', 13, struct flat_binder_object)
297 #define BINDER_FREEZE			_IOW('b', 14, struct binder_freeze_info)
298 #define BINDER_GET_FROZEN_INFO		_IOWR('b', 15, struct binder_frozen_status_info)
299 #define BINDER_ENABLE_ONEWAY_SPAM_DETECTION	_IOW('b', 16, __u32)
300 
301 /*
302  * NOTE: Two special error codes you should check for when calling
303  * in to the driver are:
304  *
305  * EINTR -- The operation has been interupted.  This should be
306  * handled by retrying the ioctl() until a different error code
307  * is returned.
308  *
309  * ECONNREFUSED -- The driver is no longer accepting operations
310  * from your process.  That is, the process is being destroyed.
311  * You should handle this by exiting from your process.  Note
312  * that once this error code is returned, all further calls to
313  * the driver from any thread will return this same code.
314  */
315 
316 enum transaction_flags {
317 	TF_ONE_WAY	= 0x01,	/* this is a one-way call: async, no return */
318 	TF_ROOT_OBJECT	= 0x04,	/* contents are the component's root object */
319 	TF_STATUS_CODE	= 0x08,	/* contents are a 32-bit status code */
320 	TF_ACCEPT_FDS	= 0x10,	/* allow replies with file descriptors */
321 	TF_CLEAR_BUF	= 0x20,	/* clear buffer on txn complete */
322 	TF_UPDATE_TXN	= 0x40,	/* update the outdated pending async txn */
323 };
324 
325 struct binder_transaction_data {
326 	/* The first two are only used for bcTRANSACTION and brTRANSACTION,
327 	 * identifying the target and contents of the transaction.
328 	 */
329 	union {
330 		/* target descriptor of command transaction */
331 		__u32	handle;
332 		/* target descriptor of return transaction */
333 		binder_uintptr_t ptr;
334 	} target;
335 	binder_uintptr_t	cookie;	/* target object cookie */
336 	__u32		code;		/* transaction command */
337 
338 	/* General information about the transaction. */
339 	__u32	        flags;
340 	pid_t		sender_pid;
341 	uid_t		sender_euid;
342 	binder_size_t	data_size;	/* number of bytes of data */
343 	binder_size_t	offsets_size;	/* number of bytes of offsets */
344 
345 	/* If this transaction is inline, the data immediately
346 	 * follows here; otherwise, it ends with a pointer to
347 	 * the data buffer.
348 	 */
349 	union {
350 		struct {
351 			/* transaction data */
352 			binder_uintptr_t	buffer;
353 			/* offsets from buffer to flat_binder_object structs */
354 			binder_uintptr_t	offsets;
355 		} ptr;
356 		__u8	buf[8];
357 	} data;
358 };
359 
360 struct binder_transaction_data_secctx {
361 	struct binder_transaction_data transaction_data;
362 	binder_uintptr_t secctx;
363 };
364 
365 struct binder_transaction_data_sg {
366 	struct binder_transaction_data transaction_data;
367 	binder_size_t buffers_size;
368 };
369 
370 struct binder_ptr_cookie {
371 	binder_uintptr_t ptr;
372 	binder_uintptr_t cookie;
373 };
374 
375 struct binder_handle_cookie {
376 	__u32 handle;
377 	binder_uintptr_t cookie;
378 } __packed;
379 
380 struct binder_pri_desc {
381 	__s32 priority;
382 	__u32 desc;
383 };
384 
385 struct binder_pri_ptr_cookie {
386 	__s32 priority;
387 	binder_uintptr_t ptr;
388 	binder_uintptr_t cookie;
389 };
390 
391 enum binder_driver_return_protocol {
392 	BR_ERROR = _IOR('r', 0, __s32),
393 	/*
394 	 * int: error code
395 	 */
396 
397 	BR_OK = _IO('r', 1),
398 	/* No parameters! */
399 
400 	BR_TRANSACTION_SEC_CTX = _IOR('r', 2,
401 				      struct binder_transaction_data_secctx),
402 	/*
403 	 * binder_transaction_data_secctx: the received command.
404 	 */
405 	BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
406 	BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
407 	/*
408 	 * binder_transaction_data: the received command.
409 	 */
410 
411 	BR_ACQUIRE_RESULT = _IOR('r', 4, __s32),
412 	/*
413 	 * not currently supported
414 	 * int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
415 	 * Else the remote object has acquired a primary reference.
416 	 */
417 
418 	BR_DEAD_REPLY = _IO('r', 5),
419 	/*
420 	 * The target of the last transaction (either a bcTRANSACTION or
421 	 * a bcATTEMPT_ACQUIRE) is no longer with us.  No parameters.
422 	 */
423 
424 	BR_TRANSACTION_COMPLETE = _IO('r', 6),
425 	/*
426 	 * No parameters... always refers to the last transaction requested
427 	 * (including replies).  Note that this will be sent even for
428 	 * asynchronous transactions.
429 	 */
430 
431 	BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
432 	BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
433 	BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
434 	BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
435 	/*
436 	 * void *:	ptr to binder
437 	 * void *: cookie for binder
438 	 */
439 
440 	BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
441 	/*
442 	 * not currently supported
443 	 * int:	priority
444 	 * void *: ptr to binder
445 	 * void *: cookie for binder
446 	 */
447 
448 	BR_NOOP = _IO('r', 12),
449 	/*
450 	 * No parameters.  Do nothing and examine the next command.  It exists
451 	 * primarily so that we can replace it with a BR_SPAWN_LOOPER command.
452 	 */
453 
454 	BR_SPAWN_LOOPER = _IO('r', 13),
455 	/*
456 	 * No parameters.  The driver has determined that a process has no
457 	 * threads waiting to service incoming transactions.  When a process
458 	 * receives this command, it must spawn a new service thread and
459 	 * register it via bcENTER_LOOPER.
460 	 */
461 
462 	BR_FINISHED = _IO('r', 14),
463 	/*
464 	 * not currently supported
465 	 * stop threadpool thread
466 	 */
467 
468 	BR_DEAD_BINDER = _IOR('r', 15, binder_uintptr_t),
469 	/*
470 	 * void *: cookie
471 	 */
472 	BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, binder_uintptr_t),
473 	/*
474 	 * void *: cookie
475 	 */
476 
477 	BR_FAILED_REPLY = _IO('r', 17),
478 	/*
479 	 * The last transaction (either a bcTRANSACTION or
480 	 * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory).  No parameters.
481 	 */
482 
483 	BR_FROZEN_REPLY = _IO('r', 18),
484 	/*
485 	 * The target of the last transaction (either a bcTRANSACTION or
486 	 * a bcATTEMPT_ACQUIRE) is frozen.  No parameters.
487 	 */
488 
489 	BR_ONEWAY_SPAM_SUSPECT = _IO('r', 19),
490 	/*
491 	 * Current process sent too many oneway calls to target, and the last
492 	 * asynchronous transaction makes the allocated async buffer size exceed
493 	 * detection threshold.  No parameters.
494 	 */
495 };
496 
497 enum binder_driver_command_protocol {
498 	BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
499 	BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
500 	/*
501 	 * binder_transaction_data: the sent command.
502 	 */
503 
504 	BC_ACQUIRE_RESULT = _IOW('c', 2, __s32),
505 	/*
506 	 * not currently supported
507 	 * int:  0 if the last BR_ATTEMPT_ACQUIRE was not successful.
508 	 * Else you have acquired a primary reference on the object.
509 	 */
510 
511 	BC_FREE_BUFFER = _IOW('c', 3, binder_uintptr_t),
512 	/*
513 	 * void *: ptr to transaction data received on a read
514 	 */
515 
516 	BC_INCREFS = _IOW('c', 4, __u32),
517 	BC_ACQUIRE = _IOW('c', 5, __u32),
518 	BC_RELEASE = _IOW('c', 6, __u32),
519 	BC_DECREFS = _IOW('c', 7, __u32),
520 	/*
521 	 * int:	descriptor
522 	 */
523 
524 	BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
525 	BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
526 	/*
527 	 * void *: ptr to binder
528 	 * void *: cookie for binder
529 	 */
530 
531 	BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
532 	/*
533 	 * not currently supported
534 	 * int: priority
535 	 * int: descriptor
536 	 */
537 
538 	BC_REGISTER_LOOPER = _IO('c', 11),
539 	/*
540 	 * No parameters.
541 	 * Register a spawned looper thread with the device.
542 	 */
543 
544 	BC_ENTER_LOOPER = _IO('c', 12),
545 	BC_EXIT_LOOPER = _IO('c', 13),
546 	/*
547 	 * No parameters.
548 	 * These two commands are sent as an application-level thread
549 	 * enters and exits the binder loop, respectively.  They are
550 	 * used so the binder can have an accurate count of the number
551 	 * of looping threads it has available.
552 	 */
553 
554 	BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14,
555 						struct binder_handle_cookie),
556 	/*
557 	 * int: handle
558 	 * void *: cookie
559 	 */
560 
561 	BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15,
562 						struct binder_handle_cookie),
563 	/*
564 	 * int: handle
565 	 * void *: cookie
566 	 */
567 
568 	BC_DEAD_BINDER_DONE = _IOW('c', 16, binder_uintptr_t),
569 	/*
570 	 * void *: cookie
571 	 */
572 
573 	BC_TRANSACTION_SG = _IOW('c', 17, struct binder_transaction_data_sg),
574 	BC_REPLY_SG = _IOW('c', 18, struct binder_transaction_data_sg),
575 	/*
576 	 * binder_transaction_data_sg: the sent command.
577 	 */
578 };
579 
580 #endif /* _UAPI_LINUX_BINDER_H */
581 
582