1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 /* 3 * Copyright (C) 2008 Google, Inc. 4 * 5 * Based on, but no longer compatible with, the original 6 * OpenBinder.org binder driver interface, which is: 7 * 8 * Copyright (c) 2005 Palmsource, Inc. 9 * 10 * This software is licensed under the terms of the GNU General Public 11 * License version 2, as published by the Free Software Foundation, and 12 * may be copied, distributed, and modified under those terms. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 */ 20 21 #ifndef _UAPI_LINUX_BINDER_H 22 #define _UAPI_LINUX_BINDER_H 23 24 #include <linux/types.h> 25 #include <linux/ioctl.h> 26 27 #define B_PACK_CHARS(c1, c2, c3, c4) \ 28 ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4)) 29 #define B_TYPE_LARGE 0x85 30 31 enum { 32 BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE), 33 BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE), 34 BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE), 35 BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE), 36 BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE), 37 BINDER_TYPE_FDA = B_PACK_CHARS('f', 'd', 'a', B_TYPE_LARGE), 38 BINDER_TYPE_PTR = B_PACK_CHARS('p', 't', '*', B_TYPE_LARGE), 39 }; 40 41 /** 42 * enum flat_binder_object_shifts: shift values for flat_binder_object_flags 43 * @FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT: shift for getting scheduler policy. 44 * 45 */ 46 enum flat_binder_object_shifts { 47 FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT = 9, 48 }; 49 50 /** 51 * enum flat_binder_object_flags - flags for use in flat_binder_object.flags 52 */ 53 enum flat_binder_object_flags { 54 /** 55 * @FLAT_BINDER_FLAG_PRIORITY_MASK: bit-mask for min scheduler priority 56 * 57 * These bits can be used to set the minimum scheduler priority 58 * at which transactions into this node should run. Valid values 59 * in these bits depend on the scheduler policy encoded in 60 * @FLAT_BINDER_FLAG_SCHED_POLICY_MASK. 61 * 62 * For SCHED_NORMAL/SCHED_BATCH, the valid range is between [-20..19] 63 * For SCHED_FIFO/SCHED_RR, the value can run between [1..99] 64 */ 65 FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff, 66 /** 67 * @FLAT_BINDER_FLAG_ACCEPTS_FDS: whether the node accepts fds. 68 */ 69 FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100, 70 /** 71 * @FLAT_BINDER_FLAG_SCHED_POLICY_MASK: bit-mask for scheduling policy 72 * 73 * These two bits can be used to set the min scheduling policy at which 74 * transactions on this node should run. These match the UAPI 75 * scheduler policy values, eg: 76 * 00b: SCHED_NORMAL 77 * 01b: SCHED_FIFO 78 * 10b: SCHED_RR 79 * 11b: SCHED_BATCH 80 */ 81 FLAT_BINDER_FLAG_SCHED_POLICY_MASK = 82 3U << FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT, 83 84 /** 85 * @FLAT_BINDER_FLAG_INHERIT_RT: whether the node inherits RT policy 86 * 87 * Only when set, calls into this node will inherit a real-time 88 * scheduling policy from the caller (for synchronous transactions). 89 */ 90 FLAT_BINDER_FLAG_INHERIT_RT = 0x800, 91 92 /** 93 * @FLAT_BINDER_FLAG_TXN_SECURITY_CTX: request security contexts 94 * 95 * Only when set, causes senders to include their security 96 * context 97 */ 98 FLAT_BINDER_FLAG_TXN_SECURITY_CTX = 0x1000, 99 }; 100 101 #ifdef BINDER_IPC_32BIT 102 typedef __u32 binder_size_t; 103 typedef __u32 binder_uintptr_t; 104 #else 105 typedef __u64 binder_size_t; 106 typedef __u64 binder_uintptr_t; 107 #endif 108 109 /** 110 * struct binder_object_header - header shared by all binder metadata objects. 111 * @type: type of the object 112 */ 113 struct binder_object_header { 114 __u32 type; 115 }; 116 117 /* 118 * This is the flattened representation of a Binder object for transfer 119 * between processes. The 'offsets' supplied as part of a binder transaction 120 * contains offsets into the data where these structures occur. The Binder 121 * driver takes care of re-writing the structure type and data as it moves 122 * between processes. 123 */ 124 struct flat_binder_object { 125 struct binder_object_header hdr; 126 __u32 flags; 127 128 /* 8 bytes of data. */ 129 union { 130 binder_uintptr_t binder; /* local object */ 131 __u32 handle; /* remote object */ 132 }; 133 134 /* extra data associated with local object */ 135 binder_uintptr_t cookie; 136 }; 137 138 /** 139 * struct binder_fd_object - describes a filedescriptor to be fixed up. 140 * @hdr: common header structure 141 * @pad_flags: padding to remain compatible with old userspace code 142 * @pad_binder: padding to remain compatible with old userspace code 143 * @fd: file descriptor 144 * @cookie: opaque data, used by user-space 145 */ 146 struct binder_fd_object { 147 struct binder_object_header hdr; 148 __u32 pad_flags; 149 union { 150 binder_uintptr_t pad_binder; 151 __u32 fd; 152 }; 153 154 binder_uintptr_t cookie; 155 }; 156 157 /* struct binder_buffer_object - object describing a userspace buffer 158 * @hdr: common header structure 159 * @flags: one or more BINDER_BUFFER_* flags 160 * @buffer: address of the buffer 161 * @length: length of the buffer 162 * @parent: index in offset array pointing to parent buffer 163 * @parent_offset: offset in @parent pointing to this buffer 164 * 165 * A binder_buffer object represents an object that the 166 * binder kernel driver can copy verbatim to the target 167 * address space. A buffer itself may be pointed to from 168 * within another buffer, meaning that the pointer inside 169 * that other buffer needs to be fixed up as well. This 170 * can be done by setting the BINDER_BUFFER_FLAG_HAS_PARENT 171 * flag in @flags, by setting @parent buffer to the index 172 * in the offset array pointing to the parent binder_buffer_object, 173 * and by setting @parent_offset to the offset in the parent buffer 174 * at which the pointer to this buffer is located. 175 */ 176 struct binder_buffer_object { 177 struct binder_object_header hdr; 178 __u32 flags; 179 binder_uintptr_t buffer; 180 binder_size_t length; 181 binder_size_t parent; 182 binder_size_t parent_offset; 183 }; 184 185 enum { 186 BINDER_BUFFER_FLAG_HAS_PARENT = 0x01, 187 }; 188 189 /* struct binder_fd_array_object - object describing an array of fds in a buffer 190 * @hdr: common header structure 191 * @pad: padding to ensure correct alignment 192 * @num_fds: number of file descriptors in the buffer 193 * @parent: index in offset array to buffer holding the fd array 194 * @parent_offset: start offset of fd array in the buffer 195 * 196 * A binder_fd_array object represents an array of file 197 * descriptors embedded in a binder_buffer_object. It is 198 * different from a regular binder_buffer_object because it 199 * describes a list of file descriptors to fix up, not an opaque 200 * blob of memory, and hence the kernel needs to treat it differently. 201 * 202 * An example of how this would be used is with Android's 203 * native_handle_t object, which is a struct with a list of integers 204 * and a list of file descriptors. The native_handle_t struct itself 205 * will be represented by a struct binder_buffer_objct, whereas the 206 * embedded list of file descriptors is represented by a 207 * struct binder_fd_array_object with that binder_buffer_object as 208 * a parent. 209 */ 210 struct binder_fd_array_object { 211 struct binder_object_header hdr; 212 __u32 pad; 213 binder_size_t num_fds; 214 binder_size_t parent; 215 binder_size_t parent_offset; 216 }; 217 218 /* 219 * On 64-bit platforms where user code may run in 32-bits the driver must 220 * translate the buffer (and local binder) addresses appropriately. 221 */ 222 223 struct binder_write_read { 224 binder_size_t write_size; /* bytes to write */ 225 binder_size_t write_consumed; /* bytes consumed by driver */ 226 binder_uintptr_t write_buffer; 227 binder_size_t read_size; /* bytes to read */ 228 binder_size_t read_consumed; /* bytes consumed by driver */ 229 binder_uintptr_t read_buffer; 230 }; 231 232 /* Use with BINDER_VERSION, driver fills in fields. */ 233 struct binder_version { 234 /* driver protocol version -- increment with incompatible change */ 235 __s32 protocol_version; 236 }; 237 238 /* This is the current protocol version. */ 239 #ifdef BINDER_IPC_32BIT 240 #define BINDER_CURRENT_PROTOCOL_VERSION 7 241 #else 242 #define BINDER_CURRENT_PROTOCOL_VERSION 8 243 #endif 244 245 /* 246 * Use with BINDER_GET_NODE_DEBUG_INFO, driver reads ptr, writes to all fields. 247 * Set ptr to NULL for the first call to get the info for the first node, and 248 * then repeat the call passing the previously returned value to get the next 249 * nodes. ptr will be 0 when there are no more nodes. 250 */ 251 struct binder_node_debug_info { 252 binder_uintptr_t ptr; 253 binder_uintptr_t cookie; 254 __u32 has_strong_ref; 255 __u32 has_weak_ref; 256 }; 257 258 struct binder_node_info_for_ref { 259 __u32 handle; 260 __u32 strong_count; 261 __u32 weak_count; 262 __u32 reserved1; 263 __u32 reserved2; 264 __u32 reserved3; 265 }; 266 267 #define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read) 268 #define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64) 269 #define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32) 270 #define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32) 271 #define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32) 272 #define BINDER_THREAD_EXIT _IOW('b', 8, __s32) 273 #define BINDER_VERSION _IOWR('b', 9, struct binder_version) 274 #define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info) 275 #define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref) 276 #define BINDER_SET_CONTEXT_MGR_EXT _IOW('b', 13, struct flat_binder_object) 277 278 /* 279 * NOTE: Two special error codes you should check for when calling 280 * in to the driver are: 281 * 282 * EINTR -- The operation has been interupted. This should be 283 * handled by retrying the ioctl() until a different error code 284 * is returned. 285 * 286 * ECONNREFUSED -- The driver is no longer accepting operations 287 * from your process. That is, the process is being destroyed. 288 * You should handle this by exiting from your process. Note 289 * that once this error code is returned, all further calls to 290 * the driver from any thread will return this same code. 291 */ 292 293 enum transaction_flags { 294 TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */ 295 TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */ 296 TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */ 297 TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */ 298 }; 299 300 struct binder_transaction_data { 301 /* The first two are only used for bcTRANSACTION and brTRANSACTION, 302 * identifying the target and contents of the transaction. 303 */ 304 union { 305 /* target descriptor of command transaction */ 306 __u32 handle; 307 /* target descriptor of return transaction */ 308 binder_uintptr_t ptr; 309 } target; 310 binder_uintptr_t cookie; /* target object cookie */ 311 __u32 code; /* transaction command */ 312 313 /* General information about the transaction. */ 314 __u32 flags; 315 pid_t sender_pid; 316 uid_t sender_euid; 317 binder_size_t data_size; /* number of bytes of data */ 318 binder_size_t offsets_size; /* number of bytes of offsets */ 319 320 /* If this transaction is inline, the data immediately 321 * follows here; otherwise, it ends with a pointer to 322 * the data buffer. 323 */ 324 union { 325 struct { 326 /* transaction data */ 327 binder_uintptr_t buffer; 328 /* offsets from buffer to flat_binder_object structs */ 329 binder_uintptr_t offsets; 330 } ptr; 331 __u8 buf[8]; 332 } data; 333 }; 334 335 struct binder_transaction_data_secctx { 336 struct binder_transaction_data transaction_data; 337 binder_uintptr_t secctx; 338 }; 339 340 struct binder_transaction_data_sg { 341 struct binder_transaction_data transaction_data; 342 binder_size_t buffers_size; 343 }; 344 345 struct binder_ptr_cookie { 346 binder_uintptr_t ptr; 347 binder_uintptr_t cookie; 348 }; 349 350 struct binder_handle_cookie { 351 __u32 handle; 352 binder_uintptr_t cookie; 353 } __packed; 354 355 struct binder_pri_desc { 356 __s32 priority; 357 __u32 desc; 358 }; 359 360 struct binder_pri_ptr_cookie { 361 __s32 priority; 362 binder_uintptr_t ptr; 363 binder_uintptr_t cookie; 364 }; 365 366 enum binder_driver_return_protocol { 367 BR_ERROR = _IOR('r', 0, __s32), 368 /* 369 * int: error code 370 */ 371 372 BR_OK = _IO('r', 1), 373 /* No parameters! */ 374 375 BR_TRANSACTION_SEC_CTX = _IOR('r', 2, 376 struct binder_transaction_data_secctx), 377 /* 378 * binder_transaction_data_secctx: the received command. 379 */ 380 BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data), 381 BR_REPLY = _IOR('r', 3, struct binder_transaction_data), 382 /* 383 * binder_transaction_data: the received command. 384 */ 385 386 BR_ACQUIRE_RESULT = _IOR('r', 4, __s32), 387 /* 388 * not currently supported 389 * int: 0 if the last bcATTEMPT_ACQUIRE was not successful. 390 * Else the remote object has acquired a primary reference. 391 */ 392 393 BR_DEAD_REPLY = _IO('r', 5), 394 /* 395 * The target of the last transaction (either a bcTRANSACTION or 396 * a bcATTEMPT_ACQUIRE) is no longer with us. No parameters. 397 */ 398 399 BR_TRANSACTION_COMPLETE = _IO('r', 6), 400 /* 401 * No parameters... always refers to the last transaction requested 402 * (including replies). Note that this will be sent even for 403 * asynchronous transactions. 404 */ 405 406 BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie), 407 BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie), 408 BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie), 409 BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie), 410 /* 411 * void *: ptr to binder 412 * void *: cookie for binder 413 */ 414 415 BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie), 416 /* 417 * not currently supported 418 * int: priority 419 * void *: ptr to binder 420 * void *: cookie for binder 421 */ 422 423 BR_NOOP = _IO('r', 12), 424 /* 425 * No parameters. Do nothing and examine the next command. It exists 426 * primarily so that we can replace it with a BR_SPAWN_LOOPER command. 427 */ 428 429 BR_SPAWN_LOOPER = _IO('r', 13), 430 /* 431 * No parameters. The driver has determined that a process has no 432 * threads waiting to service incoming transactions. When a process 433 * receives this command, it must spawn a new service thread and 434 * register it via bcENTER_LOOPER. 435 */ 436 437 BR_FINISHED = _IO('r', 14), 438 /* 439 * not currently supported 440 * stop threadpool thread 441 */ 442 443 BR_DEAD_BINDER = _IOR('r', 15, binder_uintptr_t), 444 /* 445 * void *: cookie 446 */ 447 BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, binder_uintptr_t), 448 /* 449 * void *: cookie 450 */ 451 452 BR_FAILED_REPLY = _IO('r', 17), 453 /* 454 * The the last transaction (either a bcTRANSACTION or 455 * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters. 456 */ 457 }; 458 459 enum binder_driver_command_protocol { 460 BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data), 461 BC_REPLY = _IOW('c', 1, struct binder_transaction_data), 462 /* 463 * binder_transaction_data: the sent command. 464 */ 465 466 BC_ACQUIRE_RESULT = _IOW('c', 2, __s32), 467 /* 468 * not currently supported 469 * int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful. 470 * Else you have acquired a primary reference on the object. 471 */ 472 473 BC_FREE_BUFFER = _IOW('c', 3, binder_uintptr_t), 474 /* 475 * void *: ptr to transaction data received on a read 476 */ 477 478 BC_INCREFS = _IOW('c', 4, __u32), 479 BC_ACQUIRE = _IOW('c', 5, __u32), 480 BC_RELEASE = _IOW('c', 6, __u32), 481 BC_DECREFS = _IOW('c', 7, __u32), 482 /* 483 * int: descriptor 484 */ 485 486 BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie), 487 BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie), 488 /* 489 * void *: ptr to binder 490 * void *: cookie for binder 491 */ 492 493 BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc), 494 /* 495 * not currently supported 496 * int: priority 497 * int: descriptor 498 */ 499 500 BC_REGISTER_LOOPER = _IO('c', 11), 501 /* 502 * No parameters. 503 * Register a spawned looper thread with the device. 504 */ 505 506 BC_ENTER_LOOPER = _IO('c', 12), 507 BC_EXIT_LOOPER = _IO('c', 13), 508 /* 509 * No parameters. 510 * These two commands are sent as an application-level thread 511 * enters and exits the binder loop, respectively. They are 512 * used so the binder can have an accurate count of the number 513 * of looping threads it has available. 514 */ 515 516 BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, 517 struct binder_handle_cookie), 518 /* 519 * int: handle 520 * void *: cookie 521 */ 522 523 BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, 524 struct binder_handle_cookie), 525 /* 526 * int: handle 527 * void *: cookie 528 */ 529 530 BC_DEAD_BINDER_DONE = _IOW('c', 16, binder_uintptr_t), 531 /* 532 * void *: cookie 533 */ 534 535 BC_TRANSACTION_SG = _IOW('c', 17, struct binder_transaction_data_sg), 536 BC_REPLY_SG = _IOW('c', 18, struct binder_transaction_data_sg), 537 /* 538 * binder_transaction_data_sg: the sent command. 539 */ 540 }; 541 542 #endif /* _UAPI_LINUX_BINDER_H */ 543 544