1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 /* 3 * Copyright (C) 2008 Google, Inc. 4 * 5 * Based on, but no longer compatible with, the original 6 * OpenBinder.org binder driver interface, which is: 7 * 8 * Copyright (c) 2005 Palmsource, Inc. 9 * 10 * This software is licensed under the terms of the GNU General Public 11 * License version 2, as published by the Free Software Foundation, and 12 * may be copied, distributed, and modified under those terms. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 */ 20 21 #ifndef _UAPI_LINUX_BINDER_H 22 #define _UAPI_LINUX_BINDER_H 23 24 #include <linux/types.h> 25 #include <linux/ioctl.h> 26 27 #define B_PACK_CHARS(c1, c2, c3, c4) \ 28 ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4)) 29 #define B_TYPE_LARGE 0x85 30 31 enum { 32 BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE), 33 BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE), 34 BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE), 35 BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE), 36 BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE), 37 BINDER_TYPE_FDA = B_PACK_CHARS('f', 'd', 'a', B_TYPE_LARGE), 38 BINDER_TYPE_PTR = B_PACK_CHARS('p', 't', '*', B_TYPE_LARGE), 39 }; 40 41 /** 42 * enum flat_binder_object_shifts: shift values for flat_binder_object_flags 43 * @FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT: shift for getting scheduler policy. 44 * 45 */ 46 enum flat_binder_object_shifts { 47 FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT = 9, 48 }; 49 50 /** 51 * enum flat_binder_object_flags - flags for use in flat_binder_object.flags 52 */ 53 enum flat_binder_object_flags { 54 /** 55 * @FLAT_BINDER_FLAG_PRIORITY_MASK: bit-mask for min scheduler priority 56 * 57 * These bits can be used to set the minimum scheduler priority 58 * at which transactions into this node should run. Valid values 59 * in these bits depend on the scheduler policy encoded in 60 * @FLAT_BINDER_FLAG_SCHED_POLICY_MASK. 61 * 62 * For SCHED_NORMAL/SCHED_BATCH, the valid range is between [-20..19] 63 * For SCHED_FIFO/SCHED_RR, the value can run between [1..99] 64 */ 65 FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff, 66 /** 67 * @FLAT_BINDER_FLAG_ACCEPTS_FDS: whether the node accepts fds. 68 */ 69 FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100, 70 71 /** 72 * @FLAT_BINDER_FLAG_SCHED_POLICY_MASK: bit-mask for scheduling policy 73 * 74 * These two bits can be used to set the min scheduling policy at which 75 * transactions on this node should run. These match the UAPI 76 * scheduler policy values, eg: 77 * 00b: SCHED_NORMAL 78 * 01b: SCHED_FIFO 79 * 10b: SCHED_RR 80 * 11b: SCHED_BATCH 81 */ 82 FLAT_BINDER_FLAG_SCHED_POLICY_MASK = 83 3U << FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT, 84 85 /** 86 * @FLAT_BINDER_FLAG_INHERIT_RT: whether the node inherits RT policy 87 * 88 * Only when set, calls into this node will inherit a real-time 89 * scheduling policy from the caller (for synchronous transactions). 90 */ 91 FLAT_BINDER_FLAG_INHERIT_RT = 0x800, 92 93 /** 94 * @FLAT_BINDER_FLAG_TXN_SECURITY_CTX: request security contexts 95 * 96 * Only when set, causes senders to include their security 97 * context 98 */ 99 FLAT_BINDER_FLAG_TXN_SECURITY_CTX = 0x1000, 100 }; 101 102 #ifdef BINDER_IPC_32BIT 103 typedef __u32 binder_size_t; 104 typedef __u32 binder_uintptr_t; 105 #else 106 typedef __u64 binder_size_t; 107 typedef __u64 binder_uintptr_t; 108 #endif 109 110 /** 111 * struct binder_object_header - header shared by all binder metadata objects. 112 * @type: type of the object 113 */ 114 struct binder_object_header { 115 __u32 type; 116 }; 117 118 /* 119 * This is the flattened representation of a Binder object for transfer 120 * between processes. The 'offsets' supplied as part of a binder transaction 121 * contains offsets into the data where these structures occur. The Binder 122 * driver takes care of re-writing the structure type and data as it moves 123 * between processes. 124 */ 125 struct flat_binder_object { 126 struct binder_object_header hdr; 127 __u32 flags; 128 129 /* 8 bytes of data. */ 130 union { 131 binder_uintptr_t binder; /* local object */ 132 __u32 handle; /* remote object */ 133 }; 134 135 /* extra data associated with local object */ 136 binder_uintptr_t cookie; 137 }; 138 139 /** 140 * struct binder_fd_object - describes a filedescriptor to be fixed up. 141 * @hdr: common header structure 142 * @pad_flags: padding to remain compatible with old userspace code 143 * @pad_binder: padding to remain compatible with old userspace code 144 * @fd: file descriptor 145 * @cookie: opaque data, used by user-space 146 */ 147 struct binder_fd_object { 148 struct binder_object_header hdr; 149 __u32 pad_flags; 150 union { 151 binder_uintptr_t pad_binder; 152 __u32 fd; 153 }; 154 155 binder_uintptr_t cookie; 156 }; 157 158 /* struct binder_buffer_object - object describing a userspace buffer 159 * @hdr: common header structure 160 * @flags: one or more BINDER_BUFFER_* flags 161 * @buffer: address of the buffer 162 * @length: length of the buffer 163 * @parent: index in offset array pointing to parent buffer 164 * @parent_offset: offset in @parent pointing to this buffer 165 * 166 * A binder_buffer object represents an object that the 167 * binder kernel driver can copy verbatim to the target 168 * address space. A buffer itself may be pointed to from 169 * within another buffer, meaning that the pointer inside 170 * that other buffer needs to be fixed up as well. This 171 * can be done by setting the BINDER_BUFFER_FLAG_HAS_PARENT 172 * flag in @flags, by setting @parent buffer to the index 173 * in the offset array pointing to the parent binder_buffer_object, 174 * and by setting @parent_offset to the offset in the parent buffer 175 * at which the pointer to this buffer is located. 176 */ 177 struct binder_buffer_object { 178 struct binder_object_header hdr; 179 __u32 flags; 180 binder_uintptr_t buffer; 181 binder_size_t length; 182 binder_size_t parent; 183 binder_size_t parent_offset; 184 }; 185 186 enum { 187 BINDER_BUFFER_FLAG_HAS_PARENT = 0x01, 188 }; 189 190 /* struct binder_fd_array_object - object describing an array of fds in a buffer 191 * @hdr: common header structure 192 * @pad: padding to ensure correct alignment 193 * @num_fds: number of file descriptors in the buffer 194 * @parent: index in offset array to buffer holding the fd array 195 * @parent_offset: start offset of fd array in the buffer 196 * 197 * A binder_fd_array object represents an array of file 198 * descriptors embedded in a binder_buffer_object. It is 199 * different from a regular binder_buffer_object because it 200 * describes a list of file descriptors to fix up, not an opaque 201 * blob of memory, and hence the kernel needs to treat it differently. 202 * 203 * An example of how this would be used is with Android's 204 * native_handle_t object, which is a struct with a list of integers 205 * and a list of file descriptors. The native_handle_t struct itself 206 * will be represented by a struct binder_buffer_objct, whereas the 207 * embedded list of file descriptors is represented by a 208 * struct binder_fd_array_object with that binder_buffer_object as 209 * a parent. 210 */ 211 struct binder_fd_array_object { 212 struct binder_object_header hdr; 213 __u32 pad; 214 binder_size_t num_fds; 215 binder_size_t parent; 216 binder_size_t parent_offset; 217 }; 218 219 /* 220 * On 64-bit platforms where user code may run in 32-bits the driver must 221 * translate the buffer (and local binder) addresses appropriately. 222 */ 223 224 struct binder_write_read { 225 binder_size_t write_size; /* bytes to write */ 226 binder_size_t write_consumed; /* bytes consumed by driver */ 227 binder_uintptr_t write_buffer; 228 binder_size_t read_size; /* bytes to read */ 229 binder_size_t read_consumed; /* bytes consumed by driver */ 230 binder_uintptr_t read_buffer; 231 }; 232 233 /* Use with BINDER_VERSION, driver fills in fields. */ 234 struct binder_version { 235 /* driver protocol version -- increment with incompatible change */ 236 __s32 protocol_version; 237 }; 238 239 /* This is the current protocol version. */ 240 #ifdef BINDER_IPC_32BIT 241 #define BINDER_CURRENT_PROTOCOL_VERSION 7 242 #else 243 #define BINDER_CURRENT_PROTOCOL_VERSION 8 244 #endif 245 246 /* 247 * Use with BINDER_GET_NODE_DEBUG_INFO, driver reads ptr, writes to all fields. 248 * Set ptr to NULL for the first call to get the info for the first node, and 249 * then repeat the call passing the previously returned value to get the next 250 * nodes. ptr will be 0 when there are no more nodes. 251 */ 252 struct binder_node_debug_info { 253 binder_uintptr_t ptr; 254 binder_uintptr_t cookie; 255 __u32 has_strong_ref; 256 __u32 has_weak_ref; 257 }; 258 259 struct binder_node_info_for_ref { 260 __u32 handle; 261 __u32 strong_count; 262 __u32 weak_count; 263 __u32 reserved1; 264 __u32 reserved2; 265 __u32 reserved3; 266 }; 267 268 #define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read) 269 #define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64) 270 #define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32) 271 #define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32) 272 #define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32) 273 #define BINDER_THREAD_EXIT _IOW('b', 8, __s32) 274 #define BINDER_VERSION _IOWR('b', 9, struct binder_version) 275 #define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info) 276 #define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref) 277 #define BINDER_SET_CONTEXT_MGR_EXT _IOW('b', 13, struct flat_binder_object) 278 279 /* 280 * NOTE: Two special error codes you should check for when calling 281 * in to the driver are: 282 * 283 * EINTR -- The operation has been interupted. This should be 284 * handled by retrying the ioctl() until a different error code 285 * is returned. 286 * 287 * ECONNREFUSED -- The driver is no longer accepting operations 288 * from your process. That is, the process is being destroyed. 289 * You should handle this by exiting from your process. Note 290 * that once this error code is returned, all further calls to 291 * the driver from any thread will return this same code. 292 */ 293 294 enum transaction_flags { 295 TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */ 296 TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */ 297 TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */ 298 TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */ 299 TF_CLEAR_BUF = 0x20, /* clear buffer on txn complete */ 300 }; 301 302 struct binder_transaction_data { 303 /* The first two are only used for bcTRANSACTION and brTRANSACTION, 304 * identifying the target and contents of the transaction. 305 */ 306 union { 307 /* target descriptor of command transaction */ 308 __u32 handle; 309 /* target descriptor of return transaction */ 310 binder_uintptr_t ptr; 311 } target; 312 binder_uintptr_t cookie; /* target object cookie */ 313 __u32 code; /* transaction command */ 314 315 /* General information about the transaction. */ 316 __u32 flags; 317 pid_t sender_pid; 318 uid_t sender_euid; 319 binder_size_t data_size; /* number of bytes of data */ 320 binder_size_t offsets_size; /* number of bytes of offsets */ 321 322 /* If this transaction is inline, the data immediately 323 * follows here; otherwise, it ends with a pointer to 324 * the data buffer. 325 */ 326 union { 327 struct { 328 /* transaction data */ 329 binder_uintptr_t buffer; 330 /* offsets from buffer to flat_binder_object structs */ 331 binder_uintptr_t offsets; 332 } ptr; 333 __u8 buf[8]; 334 } data; 335 }; 336 337 struct binder_transaction_data_secctx { 338 struct binder_transaction_data transaction_data; 339 binder_uintptr_t secctx; 340 }; 341 342 struct binder_transaction_data_sg { 343 struct binder_transaction_data transaction_data; 344 binder_size_t buffers_size; 345 }; 346 347 struct binder_ptr_cookie { 348 binder_uintptr_t ptr; 349 binder_uintptr_t cookie; 350 }; 351 352 struct binder_handle_cookie { 353 __u32 handle; 354 binder_uintptr_t cookie; 355 } __packed; 356 357 struct binder_pri_desc { 358 __s32 priority; 359 __u32 desc; 360 }; 361 362 struct binder_pri_ptr_cookie { 363 __s32 priority; 364 binder_uintptr_t ptr; 365 binder_uintptr_t cookie; 366 }; 367 368 enum binder_driver_return_protocol { 369 BR_ERROR = _IOR('r', 0, __s32), 370 /* 371 * int: error code 372 */ 373 374 BR_OK = _IO('r', 1), 375 /* No parameters! */ 376 377 BR_TRANSACTION_SEC_CTX = _IOR('r', 2, 378 struct binder_transaction_data_secctx), 379 /* 380 * binder_transaction_data_secctx: the received command. 381 */ 382 BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data), 383 BR_REPLY = _IOR('r', 3, struct binder_transaction_data), 384 /* 385 * binder_transaction_data: the received command. 386 */ 387 388 BR_ACQUIRE_RESULT = _IOR('r', 4, __s32), 389 /* 390 * not currently supported 391 * int: 0 if the last bcATTEMPT_ACQUIRE was not successful. 392 * Else the remote object has acquired a primary reference. 393 */ 394 395 BR_DEAD_REPLY = _IO('r', 5), 396 /* 397 * The target of the last transaction (either a bcTRANSACTION or 398 * a bcATTEMPT_ACQUIRE) is no longer with us. No parameters. 399 */ 400 401 BR_TRANSACTION_COMPLETE = _IO('r', 6), 402 /* 403 * No parameters... always refers to the last transaction requested 404 * (including replies). Note that this will be sent even for 405 * asynchronous transactions. 406 */ 407 408 BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie), 409 BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie), 410 BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie), 411 BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie), 412 /* 413 * void *: ptr to binder 414 * void *: cookie for binder 415 */ 416 417 BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie), 418 /* 419 * not currently supported 420 * int: priority 421 * void *: ptr to binder 422 * void *: cookie for binder 423 */ 424 425 BR_NOOP = _IO('r', 12), 426 /* 427 * No parameters. Do nothing and examine the next command. It exists 428 * primarily so that we can replace it with a BR_SPAWN_LOOPER command. 429 */ 430 431 BR_SPAWN_LOOPER = _IO('r', 13), 432 /* 433 * No parameters. The driver has determined that a process has no 434 * threads waiting to service incoming transactions. When a process 435 * receives this command, it must spawn a new service thread and 436 * register it via bcENTER_LOOPER. 437 */ 438 439 BR_FINISHED = _IO('r', 14), 440 /* 441 * not currently supported 442 * stop threadpool thread 443 */ 444 445 BR_DEAD_BINDER = _IOR('r', 15, binder_uintptr_t), 446 /* 447 * void *: cookie 448 */ 449 BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, binder_uintptr_t), 450 /* 451 * void *: cookie 452 */ 453 454 BR_FAILED_REPLY = _IO('r', 17), 455 /* 456 * The last transaction (either a bcTRANSACTION or 457 * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters. 458 */ 459 }; 460 461 enum binder_driver_command_protocol { 462 BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data), 463 BC_REPLY = _IOW('c', 1, struct binder_transaction_data), 464 /* 465 * binder_transaction_data: the sent command. 466 */ 467 468 BC_ACQUIRE_RESULT = _IOW('c', 2, __s32), 469 /* 470 * not currently supported 471 * int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful. 472 * Else you have acquired a primary reference on the object. 473 */ 474 475 BC_FREE_BUFFER = _IOW('c', 3, binder_uintptr_t), 476 /* 477 * void *: ptr to transaction data received on a read 478 */ 479 480 BC_INCREFS = _IOW('c', 4, __u32), 481 BC_ACQUIRE = _IOW('c', 5, __u32), 482 BC_RELEASE = _IOW('c', 6, __u32), 483 BC_DECREFS = _IOW('c', 7, __u32), 484 /* 485 * int: descriptor 486 */ 487 488 BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie), 489 BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie), 490 /* 491 * void *: ptr to binder 492 * void *: cookie for binder 493 */ 494 495 BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc), 496 /* 497 * not currently supported 498 * int: priority 499 * int: descriptor 500 */ 501 502 BC_REGISTER_LOOPER = _IO('c', 11), 503 /* 504 * No parameters. 505 * Register a spawned looper thread with the device. 506 */ 507 508 BC_ENTER_LOOPER = _IO('c', 12), 509 BC_EXIT_LOOPER = _IO('c', 13), 510 /* 511 * No parameters. 512 * These two commands are sent as an application-level thread 513 * enters and exits the binder loop, respectively. They are 514 * used so the binder can have an accurate count of the number 515 * of looping threads it has available. 516 */ 517 518 BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, 519 struct binder_handle_cookie), 520 /* 521 * int: handle 522 * void *: cookie 523 */ 524 525 BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, 526 struct binder_handle_cookie), 527 /* 528 * int: handle 529 * void *: cookie 530 */ 531 532 BC_DEAD_BINDER_DONE = _IOW('c', 16, binder_uintptr_t), 533 /* 534 * void *: cookie 535 */ 536 537 BC_TRANSACTION_SG = _IOW('c', 17, struct binder_transaction_data_sg), 538 BC_REPLY_SG = _IOW('c', 18, struct binder_transaction_data_sg), 539 /* 540 * binder_transaction_data_sg: the sent command. 541 */ 542 }; 543 544 #endif /* _UAPI_LINUX_BINDER_H */ 545 546