• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /*
3  * Copyright (C) 2008 Google, Inc.
4  *
5  * Based on, but no longer compatible with, the original
6  * OpenBinder.org binder driver interface, which is:
7  *
8  * Copyright (c) 2005 Palmsource, Inc.
9  *
10  * This software is licensed under the terms of the GNU General Public
11  * License version 2, as published by the Free Software Foundation, and
12  * may be copied, distributed, and modified under those terms.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  */
20 
21 #ifndef _UAPI_LINUX_BINDER_H
22 #define _UAPI_LINUX_BINDER_H
23 
24 #include <linux/types.h>
25 #include <linux/ioctl.h>
26 
27 #define B_PACK_CHARS(c1, c2, c3, c4) \
28 	((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
29 #define B_TYPE_LARGE 0x85
30 
31 enum {
32 	BINDER_TYPE_BINDER	= B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
33 	BINDER_TYPE_WEAK_BINDER	= B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
34 	BINDER_TYPE_HANDLE	= B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
35 	BINDER_TYPE_WEAK_HANDLE	= B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
36 	BINDER_TYPE_FD		= B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
37 	BINDER_TYPE_FDA		= B_PACK_CHARS('f', 'd', 'a', B_TYPE_LARGE),
38 	BINDER_TYPE_PTR		= B_PACK_CHARS('p', 't', '*', B_TYPE_LARGE),
39 };
40 
41 /**
42  * enum flat_binder_object_shifts: shift values for flat_binder_object_flags
43  * @FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT: shift for getting scheduler policy.
44  *
45  */
46 enum flat_binder_object_shifts {
47 	FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT = 9,
48 };
49 
50 /**
51  * enum flat_binder_object_flags - flags for use in flat_binder_object.flags
52  */
53 enum flat_binder_object_flags {
54 	/**
55 	 * @FLAT_BINDER_FLAG_PRIORITY_MASK: bit-mask for min scheduler priority
56 	 *
57 	 * These bits can be used to set the minimum scheduler priority
58 	 * at which transactions into this node should run. Valid values
59 	 * in these bits depend on the scheduler policy encoded in
60 	 * @FLAT_BINDER_FLAG_SCHED_POLICY_MASK.
61 	 *
62 	 * For SCHED_NORMAL/SCHED_BATCH, the valid range is between [-20..19]
63 	 * For SCHED_FIFO/SCHED_RR, the value can run between [1..99]
64 	 */
65 	FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
66 	/**
67 	 * @FLAT_BINDER_FLAG_ACCEPTS_FDS: whether the node accepts fds.
68 	 */
69 	FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
70 
71 	/**
72 	 * @FLAT_BINDER_FLAG_SCHED_POLICY_MASK: bit-mask for scheduling policy
73 	 *
74 	 * These two bits can be used to set the min scheduling policy at which
75 	 * transactions on this node should run. These match the UAPI
76 	 * scheduler policy values, eg:
77 	 * 00b: SCHED_NORMAL
78 	 * 01b: SCHED_FIFO
79 	 * 10b: SCHED_RR
80 	 * 11b: SCHED_BATCH
81 	 */
82 	FLAT_BINDER_FLAG_SCHED_POLICY_MASK =
83 		3U << FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT,
84 
85 	/**
86 	 * @FLAT_BINDER_FLAG_INHERIT_RT: whether the node inherits RT policy
87 	 *
88 	 * Only when set, calls into this node will inherit a real-time
89 	 * scheduling policy from the caller (for synchronous transactions).
90 	 */
91 	FLAT_BINDER_FLAG_INHERIT_RT = 0x800,
92 
93 	/**
94 	 * @FLAT_BINDER_FLAG_TXN_SECURITY_CTX: request security contexts
95 	 *
96 	 * Only when set, causes senders to include their security
97 	 * context
98 	 */
99 	FLAT_BINDER_FLAG_TXN_SECURITY_CTX = 0x1000,
100 };
101 
102 #ifdef BINDER_IPC_32BIT
103 typedef __u32 binder_size_t;
104 typedef __u32 binder_uintptr_t;
105 #else
106 typedef __u64 binder_size_t;
107 typedef __u64 binder_uintptr_t;
108 #endif
109 
110 /**
111  * struct binder_object_header - header shared by all binder metadata objects.
112  * @type:	type of the object
113  */
114 struct binder_object_header {
115 	__u32        type;
116 };
117 
118 /*
119  * This is the flattened representation of a Binder object for transfer
120  * between processes.  The 'offsets' supplied as part of a binder transaction
121  * contains offsets into the data where these structures occur.  The Binder
122  * driver takes care of re-writing the structure type and data as it moves
123  * between processes.
124  */
125 struct flat_binder_object {
126 	struct binder_object_header	hdr;
127 	__u32				flags;
128 
129 	/* 8 bytes of data. */
130 	union {
131 		binder_uintptr_t	binder;	/* local object */
132 		__u32			handle;	/* remote object */
133 	};
134 
135 	/* extra data associated with local object */
136 	binder_uintptr_t	cookie;
137 };
138 
139 /**
140  * struct binder_fd_object - describes a filedescriptor to be fixed up.
141  * @hdr:	common header structure
142  * @pad_flags:	padding to remain compatible with old userspace code
143  * @pad_binder:	padding to remain compatible with old userspace code
144  * @fd:		file descriptor
145  * @cookie:	opaque data, used by user-space
146  */
147 struct binder_fd_object {
148 	struct binder_object_header	hdr;
149 	__u32				pad_flags;
150 	union {
151 		binder_uintptr_t	pad_binder;
152 		__u32			fd;
153 	};
154 
155 	binder_uintptr_t		cookie;
156 };
157 
158 /* struct binder_buffer_object - object describing a userspace buffer
159  * @hdr:		common header structure
160  * @flags:		one or more BINDER_BUFFER_* flags
161  * @buffer:		address of the buffer
162  * @length:		length of the buffer
163  * @parent:		index in offset array pointing to parent buffer
164  * @parent_offset:	offset in @parent pointing to this buffer
165  *
166  * A binder_buffer object represents an object that the
167  * binder kernel driver can copy verbatim to the target
168  * address space. A buffer itself may be pointed to from
169  * within another buffer, meaning that the pointer inside
170  * that other buffer needs to be fixed up as well. This
171  * can be done by setting the BINDER_BUFFER_FLAG_HAS_PARENT
172  * flag in @flags, by setting @parent buffer to the index
173  * in the offset array pointing to the parent binder_buffer_object,
174  * and by setting @parent_offset to the offset in the parent buffer
175  * at which the pointer to this buffer is located.
176  */
177 struct binder_buffer_object {
178 	struct binder_object_header	hdr;
179 	__u32				flags;
180 	binder_uintptr_t		buffer;
181 	binder_size_t			length;
182 	binder_size_t			parent;
183 	binder_size_t			parent_offset;
184 };
185 
186 enum {
187 	BINDER_BUFFER_FLAG_HAS_PARENT = 0x01,
188 };
189 
190 /* struct binder_fd_array_object - object describing an array of fds in a buffer
191  * @hdr:		common header structure
192  * @pad:		padding to ensure correct alignment
193  * @num_fds:		number of file descriptors in the buffer
194  * @parent:		index in offset array to buffer holding the fd array
195  * @parent_offset:	start offset of fd array in the buffer
196  *
197  * A binder_fd_array object represents an array of file
198  * descriptors embedded in a binder_buffer_object. It is
199  * different from a regular binder_buffer_object because it
200  * describes a list of file descriptors to fix up, not an opaque
201  * blob of memory, and hence the kernel needs to treat it differently.
202  *
203  * An example of how this would be used is with Android's
204  * native_handle_t object, which is a struct with a list of integers
205  * and a list of file descriptors. The native_handle_t struct itself
206  * will be represented by a struct binder_buffer_objct, whereas the
207  * embedded list of file descriptors is represented by a
208  * struct binder_fd_array_object with that binder_buffer_object as
209  * a parent.
210  */
211 struct binder_fd_array_object {
212 	struct binder_object_header	hdr;
213 	__u32				pad;
214 	binder_size_t			num_fds;
215 	binder_size_t			parent;
216 	binder_size_t			parent_offset;
217 };
218 
219 /*
220  * On 64-bit platforms where user code may run in 32-bits the driver must
221  * translate the buffer (and local binder) addresses appropriately.
222  */
223 
224 struct binder_write_read {
225 	binder_size_t		write_size;	/* bytes to write */
226 	binder_size_t		write_consumed;	/* bytes consumed by driver */
227 	binder_uintptr_t	write_buffer;
228 	binder_size_t		read_size;	/* bytes to read */
229 	binder_size_t		read_consumed;	/* bytes consumed by driver */
230 	binder_uintptr_t	read_buffer;
231 };
232 
233 /* Use with BINDER_VERSION, driver fills in fields. */
234 struct binder_version {
235 	/* driver protocol version -- increment with incompatible change */
236 	__s32       protocol_version;
237 };
238 
239 /* This is the current protocol version. */
240 #ifdef BINDER_IPC_32BIT
241 #define BINDER_CURRENT_PROTOCOL_VERSION 7
242 #else
243 #define BINDER_CURRENT_PROTOCOL_VERSION 8
244 #endif
245 
246 /*
247  * Use with BINDER_GET_NODE_DEBUG_INFO, driver reads ptr, writes to all fields.
248  * Set ptr to NULL for the first call to get the info for the first node, and
249  * then repeat the call passing the previously returned value to get the next
250  * nodes.  ptr will be 0 when there are no more nodes.
251  */
252 struct binder_node_debug_info {
253 	binder_uintptr_t ptr;
254 	binder_uintptr_t cookie;
255 	__u32            has_strong_ref;
256 	__u32            has_weak_ref;
257 };
258 
259 struct binder_node_info_for_ref {
260 	__u32            handle;
261 	__u32            strong_count;
262 	__u32            weak_count;
263 	__u32            reserved1;
264 	__u32            reserved2;
265 	__u32            reserved3;
266 };
267 
268 struct binder_freeze_info {
269 	__u32            pid;
270 	__u32            enable;
271 	__u32            timeout_ms;
272 };
273 
274 struct binder_frozen_status_info {
275 	__u32            pid;
276 
277 	/* process received sync transactions since last frozen
278 	 * bit 0: received sync transaction after being frozen
279 	 * bit 1: new pending sync transaction during freezing
280 	 */
281 	__u32            sync_recv;
282 
283 	/* process received async transactions since last frozen */
284 	__u32            async_recv;
285 };
286 
287 /* struct binder_extened_error - extended error information
288  * @id:		identifier for the failed operation
289  * @command:	command as defined by binder_driver_return_protocol
290  * @param:	parameter holding a negative errno value
291  *
292  * Used with BINDER_GET_EXTENDED_ERROR. This extends the error information
293  * returned by the driver upon a failed operation. Userspace can pull this
294  * data to properly handle specific error scenarios.
295  */
296 struct binder_extended_error {
297 	__u32	id;
298 	__u32	command;
299 	__s32	param;
300 };
301 
302 #define BINDER_WRITE_READ		_IOWR('b', 1, struct binder_write_read)
303 #define BINDER_SET_IDLE_TIMEOUT		_IOW('b', 3, __s64)
304 #define BINDER_SET_MAX_THREADS		_IOW('b', 5, __u32)
305 #define BINDER_SET_IDLE_PRIORITY	_IOW('b', 6, __s32)
306 #define BINDER_SET_CONTEXT_MGR		_IOW('b', 7, __s32)
307 #define BINDER_THREAD_EXIT		_IOW('b', 8, __s32)
308 #define BINDER_VERSION			_IOWR('b', 9, struct binder_version)
309 #define BINDER_GET_NODE_DEBUG_INFO	_IOWR('b', 11, struct binder_node_debug_info)
310 #define BINDER_GET_NODE_INFO_FOR_REF	_IOWR('b', 12, struct binder_node_info_for_ref)
311 #define BINDER_SET_CONTEXT_MGR_EXT	_IOW('b', 13, struct flat_binder_object)
312 #define BINDER_FREEZE			_IOW('b', 14, struct binder_freeze_info)
313 #define BINDER_GET_FROZEN_INFO		_IOWR('b', 15, struct binder_frozen_status_info)
314 #define BINDER_ENABLE_ONEWAY_SPAM_DETECTION	_IOW('b', 16, __u32)
315 #define BINDER_GET_EXTENDED_ERROR	_IOWR('b', 17, struct binder_extended_error)
316 
317 /*
318  * NOTE: Two special error codes you should check for when calling
319  * in to the driver are:
320  *
321  * EINTR -- The operation has been interupted.  This should be
322  * handled by retrying the ioctl() until a different error code
323  * is returned.
324  *
325  * ECONNREFUSED -- The driver is no longer accepting operations
326  * from your process.  That is, the process is being destroyed.
327  * You should handle this by exiting from your process.  Note
328  * that once this error code is returned, all further calls to
329  * the driver from any thread will return this same code.
330  */
331 
332 enum transaction_flags {
333 	TF_ONE_WAY	= 0x01,	/* this is a one-way call: async, no return */
334 	TF_ROOT_OBJECT	= 0x04,	/* contents are the component's root object */
335 	TF_STATUS_CODE	= 0x08,	/* contents are a 32-bit status code */
336 	TF_ACCEPT_FDS	= 0x10,	/* allow replies with file descriptors */
337 	TF_CLEAR_BUF	= 0x20,	/* clear buffer on txn complete */
338 	TF_UPDATE_TXN	= 0x40,	/* update the outdated pending async txn */
339 };
340 
341 struct binder_transaction_data {
342 	/* The first two are only used for bcTRANSACTION and brTRANSACTION,
343 	 * identifying the target and contents of the transaction.
344 	 */
345 	union {
346 		/* target descriptor of command transaction */
347 		__u32	handle;
348 		/* target descriptor of return transaction */
349 		binder_uintptr_t ptr;
350 	} target;
351 	binder_uintptr_t	cookie;	/* target object cookie */
352 	__u32		code;		/* transaction command */
353 
354 	/* General information about the transaction. */
355 	__u32	        flags;
356 	__kernel_pid_t	sender_pid;
357 	__kernel_uid32_t	sender_euid;
358 	binder_size_t	data_size;	/* number of bytes of data */
359 	binder_size_t	offsets_size;	/* number of bytes of offsets */
360 
361 	/* If this transaction is inline, the data immediately
362 	 * follows here; otherwise, it ends with a pointer to
363 	 * the data buffer.
364 	 */
365 	union {
366 		struct {
367 			/* transaction data */
368 			binder_uintptr_t	buffer;
369 			/* offsets from buffer to flat_binder_object structs */
370 			binder_uintptr_t	offsets;
371 		} ptr;
372 		__u8	buf[8];
373 	} data;
374 };
375 
376 struct binder_transaction_data_secctx {
377 	struct binder_transaction_data transaction_data;
378 	binder_uintptr_t secctx;
379 };
380 
381 struct binder_transaction_data_sg {
382 	struct binder_transaction_data transaction_data;
383 	binder_size_t buffers_size;
384 };
385 
386 struct binder_ptr_cookie {
387 	binder_uintptr_t ptr;
388 	binder_uintptr_t cookie;
389 };
390 
391 struct binder_handle_cookie {
392 	__u32 handle;
393 	binder_uintptr_t cookie;
394 } __packed;
395 
396 struct binder_pri_desc {
397 	__s32 priority;
398 	__u32 desc;
399 };
400 
401 struct binder_pri_ptr_cookie {
402 	__s32 priority;
403 	binder_uintptr_t ptr;
404 	binder_uintptr_t cookie;
405 };
406 
407 enum binder_driver_return_protocol {
408 	BR_ERROR = _IOR('r', 0, __s32),
409 	/*
410 	 * int: error code
411 	 */
412 
413 	BR_OK = _IO('r', 1),
414 	/* No parameters! */
415 
416 	BR_TRANSACTION_SEC_CTX = _IOR('r', 2,
417 				      struct binder_transaction_data_secctx),
418 	/*
419 	 * binder_transaction_data_secctx: the received command.
420 	 */
421 	BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
422 	BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
423 	/*
424 	 * binder_transaction_data: the received command.
425 	 */
426 
427 	BR_ACQUIRE_RESULT = _IOR('r', 4, __s32),
428 	/*
429 	 * not currently supported
430 	 * int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
431 	 * Else the remote object has acquired a primary reference.
432 	 */
433 
434 	BR_DEAD_REPLY = _IO('r', 5),
435 	/*
436 	 * The target of the last transaction (either a bcTRANSACTION or
437 	 * a bcATTEMPT_ACQUIRE) is no longer with us.  No parameters.
438 	 */
439 
440 	BR_TRANSACTION_COMPLETE = _IO('r', 6),
441 	/*
442 	 * No parameters... always refers to the last transaction requested
443 	 * (including replies).  Note that this will be sent even for
444 	 * asynchronous transactions.
445 	 */
446 
447 	BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
448 	BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
449 	BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
450 	BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
451 	/*
452 	 * void *:	ptr to binder
453 	 * void *: cookie for binder
454 	 */
455 
456 	BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
457 	/*
458 	 * not currently supported
459 	 * int:	priority
460 	 * void *: ptr to binder
461 	 * void *: cookie for binder
462 	 */
463 
464 	BR_NOOP = _IO('r', 12),
465 	/*
466 	 * No parameters.  Do nothing and examine the next command.  It exists
467 	 * primarily so that we can replace it with a BR_SPAWN_LOOPER command.
468 	 */
469 
470 	BR_SPAWN_LOOPER = _IO('r', 13),
471 	/*
472 	 * No parameters.  The driver has determined that a process has no
473 	 * threads waiting to service incoming transactions.  When a process
474 	 * receives this command, it must spawn a new service thread and
475 	 * register it via bcENTER_LOOPER.
476 	 */
477 
478 	BR_FINISHED = _IO('r', 14),
479 	/*
480 	 * not currently supported
481 	 * stop threadpool thread
482 	 */
483 
484 	BR_DEAD_BINDER = _IOR('r', 15, binder_uintptr_t),
485 	/*
486 	 * void *: cookie
487 	 */
488 	BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, binder_uintptr_t),
489 	/*
490 	 * void *: cookie
491 	 */
492 
493 	BR_FAILED_REPLY = _IO('r', 17),
494 	/*
495 	 * The last transaction (either a bcTRANSACTION or
496 	 * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory).  No parameters.
497 	 */
498 
499 	BR_FROZEN_REPLY = _IO('r', 18),
500 	/*
501 	 * The target of the last transaction (either a bcTRANSACTION or
502 	 * a bcATTEMPT_ACQUIRE) is frozen.  No parameters.
503 	 */
504 
505 	BR_ONEWAY_SPAM_SUSPECT = _IO('r', 19),
506 	/*
507 	 * Current process sent too many oneway calls to target, and the last
508 	 * asynchronous transaction makes the allocated async buffer size exceed
509 	 * detection threshold.  No parameters.
510 	 */
511 };
512 
513 enum binder_driver_command_protocol {
514 	BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
515 	BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
516 	/*
517 	 * binder_transaction_data: the sent command.
518 	 */
519 
520 	BC_ACQUIRE_RESULT = _IOW('c', 2, __s32),
521 	/*
522 	 * not currently supported
523 	 * int:  0 if the last BR_ATTEMPT_ACQUIRE was not successful.
524 	 * Else you have acquired a primary reference on the object.
525 	 */
526 
527 	BC_FREE_BUFFER = _IOW('c', 3, binder_uintptr_t),
528 	/*
529 	 * void *: ptr to transaction data received on a read
530 	 */
531 
532 	BC_INCREFS = _IOW('c', 4, __u32),
533 	BC_ACQUIRE = _IOW('c', 5, __u32),
534 	BC_RELEASE = _IOW('c', 6, __u32),
535 	BC_DECREFS = _IOW('c', 7, __u32),
536 	/*
537 	 * int:	descriptor
538 	 */
539 
540 	BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
541 	BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
542 	/*
543 	 * void *: ptr to binder
544 	 * void *: cookie for binder
545 	 */
546 
547 	BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
548 	/*
549 	 * not currently supported
550 	 * int: priority
551 	 * int: descriptor
552 	 */
553 
554 	BC_REGISTER_LOOPER = _IO('c', 11),
555 	/*
556 	 * No parameters.
557 	 * Register a spawned looper thread with the device.
558 	 */
559 
560 	BC_ENTER_LOOPER = _IO('c', 12),
561 	BC_EXIT_LOOPER = _IO('c', 13),
562 	/*
563 	 * No parameters.
564 	 * These two commands are sent as an application-level thread
565 	 * enters and exits the binder loop, respectively.  They are
566 	 * used so the binder can have an accurate count of the number
567 	 * of looping threads it has available.
568 	 */
569 
570 	BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14,
571 						struct binder_handle_cookie),
572 	/*
573 	 * int: handle
574 	 * void *: cookie
575 	 */
576 
577 	BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15,
578 						struct binder_handle_cookie),
579 	/*
580 	 * int: handle
581 	 * void *: cookie
582 	 */
583 
584 	BC_DEAD_BINDER_DONE = _IOW('c', 16, binder_uintptr_t),
585 	/*
586 	 * void *: cookie
587 	 */
588 
589 	BC_TRANSACTION_SG = _IOW('c', 17, struct binder_transaction_data_sg),
590 	BC_REPLY_SG = _IOW('c', 18, struct binder_transaction_data_sg),
591 	/*
592 	 * binder_transaction_data_sg: the sent command.
593 	 */
594 };
595 
596 #endif /* _UAPI_LINUX_BINDER_H */
597 
598