1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 /* 3 * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. 4 * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. 5 */ 6 7 #ifndef _UAPI_MSM_KGSL_H 8 #define _UAPI_MSM_KGSL_H 9 10 #include <linux/types.h> 11 #include <linux/ioctl.h> 12 13 /* 14 * The KGSL version has proven not to be very useful in userspace if features 15 * are cherry picked into other trees out of order so it is frozen as of 3.14. 16 * It is left here for backward compatibility and as a reminder that 17 * software releases are never linear. Also, I like pie. 18 */ 19 20 #define KGSL_VERSION_MAJOR 3 21 #define KGSL_VERSION_MINOR 14 22 23 /* 24 * We have traditionally mixed context and issueibcmds / command batch flags 25 * together into a big flag stew. This worked fine until we started adding a 26 * lot more command batch flags and we started running out of bits. Turns out 27 * we have a bit of room in the context type / priority mask that we could use 28 * for command batches, but that means we need to split out the flags into two 29 * coherent sets. 30 * 31 * If any future definitions are for both context and cmdbatch add both defines 32 * and link the cmdbatch to the context define as we do below. Otherwise feel 33 * free to add exclusive bits to either set. 34 */ 35 36 /* --- context flags --- */ 37 #define KGSL_CONTEXT_SAVE_GMEM 0x00000001 38 #define KGSL_CONTEXT_NO_GMEM_ALLOC 0x00000002 39 /* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */ 40 #define KGSL_CONTEXT_SUBMIT_IB_LIST 0x00000004 41 #define KGSL_CONTEXT_CTX_SWITCH 0x00000008 42 #define KGSL_CONTEXT_PREAMBLE 0x00000010 43 #define KGSL_CONTEXT_TRASH_STATE 0x00000020 44 #define KGSL_CONTEXT_PER_CONTEXT_TS 0x00000040 45 #define KGSL_CONTEXT_USER_GENERATED_TS 0x00000080 46 /* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */ 47 #define KGSL_CONTEXT_END_OF_FRAME 0x00000100 48 #define KGSL_CONTEXT_NO_FAULT_TOLERANCE 0x00000200 49 /* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */ 50 #define KGSL_CONTEXT_SYNC 0x00000400 51 #define KGSL_CONTEXT_PWR_CONSTRAINT 0x00000800 52 #define KGSL_CONTEXT_PRIORITY_MASK 0x0000F000 53 #define KGSL_CONTEXT_PRIORITY_SHIFT 12 54 #define KGSL_CONTEXT_PRIORITY_UNDEF 0 55 56 #define KGSL_CONTEXT_IFH_NOP 0x00010000 57 #define KGSL_CONTEXT_SECURE 0x00020000 58 #define KGSL_CONTEXT_NO_SNAPSHOT 0x00040000 59 #define KGSL_CONTEXT_SPARSE 0x00080000 60 61 #define KGSL_CONTEXT_PREEMPT_STYLE_MASK 0x0E000000 62 #define KGSL_CONTEXT_PREEMPT_STYLE_SHIFT 25 63 #define KGSL_CONTEXT_PREEMPT_STYLE_DEFAULT 0x0 64 #define KGSL_CONTEXT_PREEMPT_STYLE_RINGBUFFER 0x1 65 #define KGSL_CONTEXT_PREEMPT_STYLE_FINEGRAIN 0x2 66 67 #define KGSL_CONTEXT_TYPE_MASK 0x01F00000 68 #define KGSL_CONTEXT_TYPE_SHIFT 20 69 #define KGSL_CONTEXT_TYPE_ANY 0 70 #define KGSL_CONTEXT_TYPE_GL 1 71 #define KGSL_CONTEXT_TYPE_CL 2 72 #define KGSL_CONTEXT_TYPE_C2D 3 73 #define KGSL_CONTEXT_TYPE_RS 4 74 #define KGSL_CONTEXT_TYPE_VK 5 75 #define KGSL_CONTEXT_TYPE_UNKNOWN 0x1E 76 77 #define KGSL_CONTEXT_INVALIDATE_ON_FAULT 0x10000000 78 #define KGSL_CONTEXT_LPAC 0x20000000 79 #define KGSL_CONTEXT_FAULT_INFO 0x40000000 80 81 #define KGSL_CONTEXT_INVALID 0xffffffff 82 83 /* 84 * --- command batch flags --- 85 * The bits that are linked to a KGSL_CONTEXT equivalent are either legacy 86 * definitions or bits that are valid for both contexts and cmdbatches. To be 87 * safe the other 8 bits that are still available in the context field should be 88 * omitted here in case we need to share - the other bits are available for 89 * cmdbatch only flags as needed 90 */ 91 #define KGSL_CMDBATCH_MEMLIST 0x00000001 92 #define KGSL_CMDBATCH_MARKER 0x00000002 93 #define KGSL_CMDBATCH_SUBMIT_IB_LIST KGSL_CONTEXT_SUBMIT_IB_LIST /* 0x004 */ 94 #define KGSL_CMDBATCH_CTX_SWITCH KGSL_CONTEXT_CTX_SWITCH /* 0x008 */ 95 #define KGSL_CMDBATCH_PROFILING 0x00000010 96 /* 97 * KGSL_CMDBATCH_PROFILING must also be set for KGSL_CMDBATCH_PROFILING_KTIME 98 * to take effect, as the latter only affects the time data returned. 99 */ 100 #define KGSL_CMDBATCH_PROFILING_KTIME 0x00000020 101 #define KGSL_CMDBATCH_END_OF_FRAME KGSL_CONTEXT_END_OF_FRAME /* 0x100 */ 102 #define KGSL_CMDBATCH_SYNC KGSL_CONTEXT_SYNC /* 0x400 */ 103 #define KGSL_CMDBATCH_PWR_CONSTRAINT KGSL_CONTEXT_PWR_CONSTRAINT /* 0x800 */ 104 #define KGSL_CMDBATCH_SPARSE 0x1000 /* 0x1000 */ 105 /* RECURRING bits must be set for LSR workload with IOCTL_KGSL_RECURRING_COMMAND. */ 106 #define KGSL_CMDBATCH_START_RECURRING 0x00100000 107 #define KGSL_CMDBATCH_STOP_RECURRING 0x00200000 108 109 /* 110 * Reserve bits [16:19] and bits [28:31] for possible bits shared between 111 * contexts and command batches. Update this comment as new flags are added. 112 */ 113 114 /* 115 * gpu_command_object flags - these flags communicate the type of command or 116 * memory object being submitted for a GPU command 117 */ 118 119 /* Flags for GPU command objects */ 120 #define KGSL_CMDLIST_IB 0x00000001U 121 #define KGSL_CMDLIST_CTXTSWITCH_PREAMBLE 0x00000002U 122 #define KGSL_CMDLIST_IB_PREAMBLE 0x00000004U 123 124 /* Flags for GPU command memory objects */ 125 #define KGSL_OBJLIST_MEMOBJ 0x00000008U 126 #define KGSL_OBJLIST_PROFILE 0x00000010U 127 128 /* Flags for GPU command sync points */ 129 #define KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP 0 130 #define KGSL_CMD_SYNCPOINT_TYPE_FENCE 1 131 #define KGSL_CMD_SYNCPOINT_TYPE_TIMELINE 2 132 133 /* --- Memory allocation flags --- */ 134 135 /* General allocation hints */ 136 #define KGSL_MEMFLAGS_SECURE (1ULL << 3) 137 #define KGSL_MEMFLAGS_GPUREADONLY (1ULL << 24) 138 #define KGSL_MEMFLAGS_GPUWRITEONLY (1ULL << 25) 139 #define KGSL_MEMFLAGS_FORCE_32BIT (1ULL << 32) 140 141 /* Flag for binding all the virt range to single phys data */ 142 #define KGSL_SPARSE_BIND_MULTIPLE_TO_PHYS 0x400000000ULL 143 #define KGSL_SPARSE_BIND 0x1ULL 144 #define KGSL_SPARSE_UNBIND 0x2ULL 145 146 /* Memory caching hints */ 147 #define KGSL_CACHEMODE_MASK 0x0C000000U 148 #define KGSL_CACHEMODE_SHIFT 26 149 150 #define KGSL_CACHEMODE_WRITECOMBINE 0 151 #define KGSL_CACHEMODE_UNCACHED 1 152 #define KGSL_CACHEMODE_WRITETHROUGH 2 153 #define KGSL_CACHEMODE_WRITEBACK 3 154 155 #define KGSL_MEMFLAGS_USE_CPU_MAP (1ULL << 28) 156 #define KGSL_MEMFLAGS_SPARSE_PHYS (1ULL << 29) 157 #define KGSL_MEMFLAGS_SPARSE_VIRT (1ULL << 30) 158 #define KGSL_MEMFLAGS_IOCOHERENT (1ULL << 31) 159 #define KGSL_MEMFLAGS_GUARD_PAGE (1ULL << 33) 160 #define KGSL_MEMFLAGS_VBO (1ULL << 34) 161 #define KGSL_MEMFLAGS_VBO_NO_MAP_ZERO (1ULL << 35) 162 163 /* Memory types for which allocations are made */ 164 #define KGSL_MEMTYPE_MASK 0x0000FF00 165 #define KGSL_MEMTYPE_SHIFT 8 166 167 #define KGSL_MEMTYPE_OBJECTANY 0 168 #define KGSL_MEMTYPE_FRAMEBUFFER 1 169 #define KGSL_MEMTYPE_RENDERBUFFER 2 170 #define KGSL_MEMTYPE_ARRAYBUFFER 3 171 #define KGSL_MEMTYPE_ELEMENTARRAYBUFFER 4 172 #define KGSL_MEMTYPE_VERTEXARRAYBUFFER 5 173 #define KGSL_MEMTYPE_TEXTURE 6 174 #define KGSL_MEMTYPE_SURFACE 7 175 #define KGSL_MEMTYPE_EGL_SURFACE 8 176 #define KGSL_MEMTYPE_GL 9 177 #define KGSL_MEMTYPE_CL 10 178 #define KGSL_MEMTYPE_CL_BUFFER_MAP 11 179 #define KGSL_MEMTYPE_CL_BUFFER_NOMAP 12 180 #define KGSL_MEMTYPE_CL_IMAGE_MAP 13 181 #define KGSL_MEMTYPE_CL_IMAGE_NOMAP 14 182 #define KGSL_MEMTYPE_CL_KERNEL_STACK 15 183 #define KGSL_MEMTYPE_COMMAND 16 184 #define KGSL_MEMTYPE_2D 17 185 #define KGSL_MEMTYPE_EGL_IMAGE 18 186 #define KGSL_MEMTYPE_EGL_SHADOW 19 187 #define KGSL_MEMTYPE_MULTISAMPLE 20 188 #define KGSL_MEMTYPE_KERNEL 255 189 190 /* 191 * Alignment hint, passed as the power of 2 exponent. 192 * i.e 4k (2^12) would be 12, 64k (2^16)would be 16. 193 */ 194 #define KGSL_MEMALIGN_MASK 0x00FF0000 195 #define KGSL_MEMALIGN_SHIFT 16 196 197 enum kgsl_user_mem_type { 198 KGSL_USER_MEM_TYPE_PMEM = 0x00000000, 199 KGSL_USER_MEM_TYPE_ASHMEM = 0x00000001, 200 KGSL_USER_MEM_TYPE_ADDR = 0x00000002, 201 KGSL_USER_MEM_TYPE_ION = 0x00000003, 202 /* 203 * ION type is retained for backward compatibility but ION buffers are 204 * dma-bufs so try to use that naming if we can 205 */ 206 KGSL_USER_MEM_TYPE_DMABUF = 0x00000003, 207 KGSL_USER_MEM_TYPE_MAX = 0x00000007, 208 }; 209 #define KGSL_MEMFLAGS_USERMEM_MASK 0x000000e0 210 #define KGSL_MEMFLAGS_USERMEM_SHIFT 5 211 212 /* 213 * Unfortunately, enum kgsl_user_mem_type starts at 0 which does not 214 * leave a good value for allocated memory. In the flags we use 215 * 0 to indicate allocated memory and thus need to add 1 to the enum 216 * values. 217 */ 218 #define KGSL_USERMEM_FLAG(x) (((x) + 1) << KGSL_MEMFLAGS_USERMEM_SHIFT) 219 220 #define KGSL_MEMFLAGS_NOT_USERMEM 0 221 #define KGSL_MEMFLAGS_USERMEM_PMEM KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_PMEM) 222 #define KGSL_MEMFLAGS_USERMEM_ASHMEM \ 223 KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ASHMEM) 224 #define KGSL_MEMFLAGS_USERMEM_ADDR KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ADDR) 225 #define KGSL_MEMFLAGS_USERMEM_ION KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ION) 226 227 /* --- generic KGSL flag values --- */ 228 229 #define KGSL_FLAGS_NORMALMODE 0x00000000 230 #define KGSL_FLAGS_SAFEMODE 0x00000001 231 #define KGSL_FLAGS_INITIALIZED0 0x00000002 232 #define KGSL_FLAGS_INITIALIZED 0x00000004 233 #define KGSL_FLAGS_STARTED 0x00000008 234 #define KGSL_FLAGS_ACTIVE 0x00000010 235 #define KGSL_FLAGS_RESERVED0 0x00000020 236 #define KGSL_FLAGS_RESERVED1 0x00000040 237 #define KGSL_FLAGS_RESERVED2 0x00000080 238 #define KGSL_FLAGS_SOFT_RESET 0x00000100 239 #define KGSL_FLAGS_PER_CONTEXT_TIMESTAMPS 0x00000200 240 241 /* Server Side Sync Timeout in milliseconds */ 242 #define KGSL_SYNCOBJ_SERVER_TIMEOUT 2000 243 244 /* UBWC Modes */ 245 #define KGSL_UBWC_NONE 0 246 #define KGSL_UBWC_1_0 1 247 #define KGSL_UBWC_2_0 2 248 #define KGSL_UBWC_3_0 3 249 #define KGSL_UBWC_4_0 4 250 251 /* 252 * Reset status values for context 253 */ 254 enum kgsl_ctx_reset_stat { 255 KGSL_CTX_STAT_NO_ERROR = 0x00000000, 256 KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT = 0x00000001, 257 KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT = 0x00000002, 258 KGSL_CTX_STAT_UNKNOWN_CONTEXT_RESET_EXT = 0x00000003 259 }; 260 261 #define KGSL_CONVERT_TO_MBPS(val) \ 262 (val*1000*1000U) 263 264 struct kgsl_devinfo { 265 266 unsigned int device_id; 267 /* 268 * chip revision id 269 * coreid:8 majorrev:8 minorrev:8 patch:8 270 */ 271 unsigned int chip_id; 272 unsigned int mmu_enabled; 273 unsigned long gmem_gpubaseaddr; 274 /* 275 * This field contains the adreno revision 276 * number 200, 205, 220, etc... 277 */ 278 unsigned int gpu_id; 279 __kernel_size_t gmem_sizebytes; 280 }; 281 282 /* 283 * struct kgsl_devmemstore - this structure defines the region of memory 284 * that can be mmap()ed from this driver. The timestamp fields are volatile 285 * because they are written by the GPU 286 * @soptimestamp: Start of pipeline timestamp written by GPU before the 287 * commands in concern are processed 288 * @sbz: Unused, kept for 8 byte alignment 289 * @eoptimestamp: End of pipeline timestamp written by GPU after the 290 * commands in concern are processed 291 * @sbz2: Unused, kept for 8 byte alignment 292 * @preempted: Indicates if the context was preempted 293 * @sbz3: Unused, kept for 8 byte alignment 294 * @ref_wait_ts: Timestamp on which to generate interrupt, unused now. 295 * @sbz4: Unused, kept for 8 byte alignment 296 * @current_context: The current context the GPU is working on 297 * @sbz5: Unused, kept for 8 byte alignment 298 */ 299 struct kgsl_devmemstore { 300 volatile unsigned int soptimestamp; 301 unsigned int sbz; 302 volatile unsigned int eoptimestamp; 303 unsigned int sbz2; 304 volatile unsigned int preempted; 305 unsigned int sbz3; 306 volatile unsigned int ref_wait_ts; 307 unsigned int sbz4; 308 unsigned int current_context; 309 unsigned int sbz5; 310 }; 311 312 #define KGSL_MEMSTORE_OFFSET(ctxt_id, field) \ 313 ((ctxt_id)*sizeof(struct kgsl_devmemstore) + \ 314 offsetof(struct kgsl_devmemstore, field)) 315 316 /* timestamp id*/ 317 enum kgsl_timestamp_type { 318 KGSL_TIMESTAMP_CONSUMED = 0x00000001, /* start-of-pipeline timestamp */ 319 KGSL_TIMESTAMP_RETIRED = 0x00000002, /* end-of-pipeline timestamp*/ 320 KGSL_TIMESTAMP_QUEUED = 0x00000003, 321 }; 322 323 /* property types - used with kgsl_device_getproperty */ 324 #define KGSL_PROP_DEVICE_INFO 0x1 325 #define KGSL_PROP_DEVICE_SHADOW 0x2 326 #define KGSL_PROP_DEVICE_POWER 0x3 327 #define KGSL_PROP_SHMEM 0x4 328 #define KGSL_PROP_SHMEM_APERTURES 0x5 329 #define KGSL_PROP_MMU_ENABLE 0x6 330 #define KGSL_PROP_INTERRUPT_WAITS 0x7 331 #define KGSL_PROP_VERSION 0x8 332 #define KGSL_PROP_GPU_RESET_STAT 0x9 333 #define KGSL_PROP_PWRCTRL 0xE 334 #define KGSL_PROP_PWR_CONSTRAINT 0x12 335 #define KGSL_PROP_UCHE_GMEM_VADDR 0x13 336 #define KGSL_PROP_SP_GENERIC_MEM 0x14 337 #define KGSL_PROP_UCODE_VERSION 0x15 338 #define KGSL_PROP_GPMU_VERSION 0x16 339 #define KGSL_PROP_HIGHEST_BANK_BIT 0x17 340 #define KGSL_PROP_DEVICE_BITNESS 0x18 341 #define KGSL_PROP_DEVICE_QDSS_STM 0x19 342 #define KGSL_PROP_MIN_ACCESS_LENGTH 0x1A 343 #define KGSL_PROP_UBWC_MODE 0x1B 344 #define KGSL_PROP_DEVICE_QTIMER 0x20 345 #define KGSL_PROP_L3_PWR_CONSTRAINT 0x22 346 #define KGSL_PROP_SECURE_BUFFER_ALIGNMENT 0x23 347 #define KGSL_PROP_SECURE_CTXT_SUPPORT 0x24 348 #define KGSL_PROP_SPEED_BIN 0x25 349 #define KGSL_PROP_GAMING_BIN 0x26 350 #define KGSL_PROP_QUERY_CAPABILITIES 0x27 351 #define KGSL_PROP_CONTEXT_PROPERTY 0x28 352 #define KGSL_PROP_GPU_MODEL 0x29 353 #define KGSL_PROP_VK_DEVICE_ID 0x2A 354 #define KGSL_PROP_IS_LPAC_ENABLED 0x2B 355 #define KGSL_PROP_GPU_VA64_SIZE 0x2C 356 #define KGSL_PROP_IS_RAYTRACING_ENABLED 0x2D 357 #define KGSL_PROP_IS_FASTBLEND_ENABLED 0x2E 358 #define KGSL_PROP_UCHE_TRAP_BASE 0x2F 359 #define KGSL_PROP_IS_AQE_ENABLED 0x30 360 #define KGSL_PROP_GPU_SECURE_VA_SIZE 0x31 361 #define KGSL_PROP_GPU_SECURE_VA_INUSE 0x32 362 363 /* 364 * kgsl_capabilities_properties returns a list of supported properties. 365 * If the user passes 0 for 'count' the kernel will set it to the number of 366 * supported properties. The list is expected to be 'count * sizeof(__u32)' 367 * bytes long. The kernel will return the actual number of entries copied into 368 * list via 'count'. 369 */ 370 struct kgsl_capabilities_properties { 371 __u64 list; 372 __u32 count; 373 }; 374 375 /* 376 * KGSL_QUERY_CAPS_PROPERTIES returns a list of the valid properties in the 377 * kernel. The subtype data should be struct kgsl_capabilities_properties 378 */ 379 #define KGSL_QUERY_CAPS_PROPERTIES 1 380 381 /* 382 * kgsl_capabilities allows the user to query kernel capabilities. The 'data' 383 * type should be set appropriately for the querytype (see above). Pass 0 to 384 * 'size' and the kernel will set it to the expected size of 'data' that is 385 * appropriate for querytype (in bytes). 386 */ 387 struct kgsl_capabilities { 388 __u64 data; 389 __u64 size; 390 __u32 querytype; 391 }; 392 393 struct kgsl_shadowprop { 394 unsigned long gpuaddr; 395 __kernel_size_t size; 396 unsigned int flags; /* contains KGSL_FLAGS_ values */ 397 }; 398 399 struct kgsl_qdss_stm_prop { 400 __u64 gpuaddr; 401 __u64 size; 402 }; 403 404 struct kgsl_qtimer_prop { 405 __u64 gpuaddr; 406 __u64 size; 407 }; 408 409 struct kgsl_version { 410 unsigned int drv_major; 411 unsigned int drv_minor; 412 unsigned int dev_major; 413 unsigned int dev_minor; 414 }; 415 416 struct kgsl_sp_generic_mem { 417 __u64 local; 418 __u64 pvt; 419 }; 420 421 struct kgsl_ucode_version { 422 unsigned int pfp; 423 unsigned int pm4; 424 }; 425 426 struct kgsl_gpmu_version { 427 unsigned int major; 428 unsigned int minor; 429 unsigned int features; 430 }; 431 432 struct kgsl_context_property { 433 __u64 data; 434 __u32 size; 435 __u32 type; 436 __u32 contextid; 437 }; 438 439 struct kgsl_context_property_fault { 440 __s32 faults; 441 __u32 timestamp; 442 }; 443 444 struct kgsl_gpu_model { 445 char gpu_model[32]; 446 }; 447 448 /* Context property sub types */ 449 #define KGSL_CONTEXT_PROP_FAULTS 1 450 451 /* Performance counter groups */ 452 453 #define KGSL_PERFCOUNTER_GROUP_CP 0x0 454 #define KGSL_PERFCOUNTER_GROUP_RBBM 0x1 455 #define KGSL_PERFCOUNTER_GROUP_PC 0x2 456 #define KGSL_PERFCOUNTER_GROUP_VFD 0x3 457 #define KGSL_PERFCOUNTER_GROUP_HLSQ 0x4 458 #define KGSL_PERFCOUNTER_GROUP_VPC 0x5 459 #define KGSL_PERFCOUNTER_GROUP_TSE 0x6 460 #define KGSL_PERFCOUNTER_GROUP_RAS 0x7 461 #define KGSL_PERFCOUNTER_GROUP_UCHE 0x8 462 #define KGSL_PERFCOUNTER_GROUP_TP 0x9 463 #define KGSL_PERFCOUNTER_GROUP_SP 0xA 464 #define KGSL_PERFCOUNTER_GROUP_RB 0xB 465 #define KGSL_PERFCOUNTER_GROUP_PWR 0xC 466 #define KGSL_PERFCOUNTER_GROUP_VBIF 0xD 467 #define KGSL_PERFCOUNTER_GROUP_VBIF_PWR 0xE 468 #define KGSL_PERFCOUNTER_GROUP_MH 0xF 469 #define KGSL_PERFCOUNTER_GROUP_PA_SU 0x10 470 #define KGSL_PERFCOUNTER_GROUP_SQ 0x11 471 #define KGSL_PERFCOUNTER_GROUP_SX 0x12 472 #define KGSL_PERFCOUNTER_GROUP_TCF 0x13 473 #define KGSL_PERFCOUNTER_GROUP_TCM 0x14 474 #define KGSL_PERFCOUNTER_GROUP_TCR 0x15 475 #define KGSL_PERFCOUNTER_GROUP_L2 0x16 476 #define KGSL_PERFCOUNTER_GROUP_VSC 0x17 477 #define KGSL_PERFCOUNTER_GROUP_CCU 0x18 478 #define KGSL_PERFCOUNTER_GROUP_LRZ 0x19 479 #define KGSL_PERFCOUNTER_GROUP_CMP 0x1A 480 #define KGSL_PERFCOUNTER_GROUP_ALWAYSON 0x1B 481 #define KGSL_PERFCOUNTER_GROUP_SP_PWR 0x1C 482 #define KGSL_PERFCOUNTER_GROUP_TP_PWR 0x1D 483 #define KGSL_PERFCOUNTER_GROUP_RB_PWR 0x1E 484 #define KGSL_PERFCOUNTER_GROUP_CCU_PWR 0x1F 485 #define KGSL_PERFCOUNTER_GROUP_UCHE_PWR 0x20 486 #define KGSL_PERFCOUNTER_GROUP_CP_PWR 0x21 487 #define KGSL_PERFCOUNTER_GROUP_GPMU_PWR 0x22 488 #define KGSL_PERFCOUNTER_GROUP_ALWAYSON_PWR 0x23 489 #define KGSL_PERFCOUNTER_GROUP_GLC 0x24 490 #define KGSL_PERFCOUNTER_GROUP_FCHE 0x25 491 #define KGSL_PERFCOUNTER_GROUP_MHUB 0x26 492 #define KGSL_PERFCOUNTER_GROUP_GMU_XOCLK 0x27 493 #define KGSL_PERFCOUNTER_GROUP_GMU_GMUCLK 0x28 494 #define KGSL_PERFCOUNTER_GROUP_GMU_PERF 0x29 495 #define KGSL_PERFCOUNTER_GROUP_SW 0x2a 496 #define KGSL_PERFCOUNTER_GROUP_UFC 0x2b 497 #define KGSL_PERFCOUNTER_GROUP_BV_CP 0x2c 498 #define KGSL_PERFCOUNTER_GROUP_BV_PC 0x2d 499 #define KGSL_PERFCOUNTER_GROUP_BV_VFD 0x2e 500 #define KGSL_PERFCOUNTER_GROUP_BV_VPC 0x2f 501 #define KGSL_PERFCOUNTER_GROUP_BV_TP 0x30 502 #define KGSL_PERFCOUNTER_GROUP_BV_SP 0x31 503 #define KGSL_PERFCOUNTER_GROUP_BV_UFC 0x32 504 #define KGSL_PERFCOUNTER_GROUP_BV_TSE 0x33 505 #define KGSL_PERFCOUNTER_GROUP_BV_RAS 0x34 506 #define KGSL_PERFCOUNTER_GROUP_BV_LRZ 0x35 507 #define KGSL_PERFCOUNTER_GROUP_BV_HLSQ 0x36 508 #define KGSL_PERFCOUNTER_GROUP_MAX 0x37 509 510 #define KGSL_PERFCOUNTER_NOT_USED 0xFFFFFFFF 511 #define KGSL_PERFCOUNTER_BROKEN 0xFFFFFFFE 512 513 /* structure holds list of ibs */ 514 struct kgsl_ibdesc { 515 unsigned long gpuaddr; 516 unsigned long __pad; 517 __kernel_size_t sizedwords; 518 unsigned int ctrl; 519 }; 520 521 /** 522 * struct kgsl_cmdbatch_profiling_buffer 523 * @wall_clock_s: Ringbuffer submission time (seconds). 524 * If KGSL_CMDBATCH_PROFILING_KTIME is set, time is provided 525 * in kernel clocks, otherwise wall clock time is used. 526 * @wall_clock_ns: Ringbuffer submission time (nanoseconds). 527 * If KGSL_CMDBATCH_PROFILING_KTIME is set time is provided 528 * in kernel clocks, otherwise wall clock time is used. 529 * @gpu_ticks_queued: GPU ticks at ringbuffer submission 530 * @gpu_ticks_submitted: GPU ticks when starting cmdbatch execution 531 * @gpu_ticks_retired: GPU ticks when finishing cmdbatch execution 532 * 533 * This structure defines the profiling buffer used to measure cmdbatch 534 * execution time 535 */ 536 struct kgsl_cmdbatch_profiling_buffer { 537 __u64 wall_clock_s; 538 __u64 wall_clock_ns; 539 __u64 gpu_ticks_queued; 540 __u64 gpu_ticks_submitted; 541 __u64 gpu_ticks_retired; 542 }; 543 544 /* ioctls */ 545 #define KGSL_IOC_TYPE 0x09 546 547 /* 548 * get misc info about the GPU 549 * type should be a value from enum kgsl_property_type 550 * value points to a structure that varies based on type 551 * sizebytes is sizeof() that structure 552 * for KGSL_PROP_DEVICE_INFO, use struct kgsl_devinfo 553 * this structure contaings hardware versioning info. 554 * for KGSL_PROP_DEVICE_SHADOW, use struct kgsl_shadowprop 555 * this is used to find mmap() offset and sizes for mapping 556 * struct kgsl_memstore into userspace. 557 */ 558 struct kgsl_device_getproperty { 559 unsigned int type; 560 void __user *value; 561 __kernel_size_t sizebytes; 562 }; 563 564 #define IOCTL_KGSL_DEVICE_GETPROPERTY \ 565 _IOWR(KGSL_IOC_TYPE, 0x2, struct kgsl_device_getproperty) 566 567 /* IOCTL_KGSL_DEVICE_READ (0x3) - removed 03/2012 568 */ 569 570 /* block until the GPU has executed past a given timestamp 571 * timeout is in milliseconds. 572 */ 573 struct kgsl_device_waittimestamp { 574 unsigned int timestamp; 575 unsigned int timeout; 576 }; 577 578 #define IOCTL_KGSL_DEVICE_WAITTIMESTAMP \ 579 _IOW(KGSL_IOC_TYPE, 0x6, struct kgsl_device_waittimestamp) 580 581 struct kgsl_device_waittimestamp_ctxtid { 582 unsigned int context_id; 583 unsigned int timestamp; 584 unsigned int timeout; 585 }; 586 587 #define IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID \ 588 _IOW(KGSL_IOC_TYPE, 0x7, struct kgsl_device_waittimestamp_ctxtid) 589 590 /* DEPRECATED: issue indirect commands to the GPU. 591 * drawctxt_id must have been created with IOCTL_KGSL_DRAWCTXT_CREATE 592 * ibaddr and sizedwords must specify a subset of a buffer created 593 * with IOCTL_KGSL_SHAREDMEM_FROM_PMEM 594 * flags may be a mask of KGSL_CONTEXT_ values 595 * timestamp is a returned counter value which can be passed to 596 * other ioctls to determine when the commands have been executed by 597 * the GPU. 598 * 599 * This function is deprecated - consider using IOCTL_KGSL_SUBMIT_COMMANDS 600 * instead 601 */ 602 struct kgsl_ringbuffer_issueibcmds { 603 unsigned int drawctxt_id; 604 unsigned long ibdesc_addr; 605 unsigned int numibs; 606 unsigned int timestamp; /*output param */ 607 unsigned int flags; 608 }; 609 610 #define IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS \ 611 _IOWR(KGSL_IOC_TYPE, 0x10, struct kgsl_ringbuffer_issueibcmds) 612 613 /* read the most recently executed timestamp value 614 * type should be a value from enum kgsl_timestamp_type 615 */ 616 struct kgsl_cmdstream_readtimestamp { 617 unsigned int type; 618 unsigned int timestamp; /*output param */ 619 }; 620 621 #define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_OLD \ 622 _IOR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp) 623 624 #define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP \ 625 _IOWR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp) 626 627 /* free memory when the GPU reaches a given timestamp. 628 * gpuaddr specify a memory region created by a 629 * IOCTL_KGSL_SHAREDMEM_FROM_PMEM call 630 * type should be a value from enum kgsl_timestamp_type 631 */ 632 struct kgsl_cmdstream_freememontimestamp { 633 unsigned long gpuaddr; 634 unsigned int type; 635 unsigned int timestamp; 636 }; 637 638 #define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP \ 639 _IOW(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp) 640 641 /* 642 * Previous versions of this header had incorrectly defined 643 * IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP as a read-only ioctl instead 644 * of a write only ioctl. To ensure binary compatibility, the following 645 * #define will be used to intercept the incorrect ioctl 646 */ 647 648 #define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_OLD \ 649 _IOR(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp) 650 651 /* create a draw context, which is used to preserve GPU state. 652 * The flags field may contain a mask KGSL_CONTEXT_* values 653 */ 654 struct kgsl_drawctxt_create { 655 unsigned int flags; 656 unsigned int drawctxt_id; /*output param */ 657 }; 658 659 #define IOCTL_KGSL_DRAWCTXT_CREATE \ 660 _IOWR(KGSL_IOC_TYPE, 0x13, struct kgsl_drawctxt_create) 661 662 /* destroy a draw context */ 663 struct kgsl_drawctxt_destroy { 664 unsigned int drawctxt_id; 665 }; 666 667 #define IOCTL_KGSL_DRAWCTXT_DESTROY \ 668 _IOW(KGSL_IOC_TYPE, 0x14, struct kgsl_drawctxt_destroy) 669 670 /* 671 * add a block of pmem, fb, ashmem or user allocated address 672 * into the GPU address space 673 */ 674 struct kgsl_map_user_mem { 675 int fd; 676 unsigned long gpuaddr; /*output param */ 677 __kernel_size_t len; 678 __kernel_size_t offset; 679 unsigned long hostptr; /*input param */ 680 enum kgsl_user_mem_type memtype; 681 unsigned int flags; 682 }; 683 684 #define IOCTL_KGSL_MAP_USER_MEM \ 685 _IOWR(KGSL_IOC_TYPE, 0x15, struct kgsl_map_user_mem) 686 687 struct kgsl_cmdstream_readtimestamp_ctxtid { 688 unsigned int context_id; 689 unsigned int type; 690 unsigned int timestamp; /*output param */ 691 }; 692 693 #define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_CTXTID \ 694 _IOWR(KGSL_IOC_TYPE, 0x16, struct kgsl_cmdstream_readtimestamp_ctxtid) 695 696 struct kgsl_cmdstream_freememontimestamp_ctxtid { 697 unsigned int context_id; 698 unsigned long gpuaddr; 699 unsigned int type; 700 unsigned int timestamp; 701 }; 702 703 #define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_CTXTID \ 704 _IOW(KGSL_IOC_TYPE, 0x17, \ 705 struct kgsl_cmdstream_freememontimestamp_ctxtid) 706 707 /* add a block of pmem or fb into the GPU address space */ 708 struct kgsl_sharedmem_from_pmem { 709 int pmem_fd; 710 unsigned long gpuaddr; /*output param */ 711 unsigned int len; 712 unsigned int offset; 713 }; 714 715 #define IOCTL_KGSL_SHAREDMEM_FROM_PMEM \ 716 _IOWR(KGSL_IOC_TYPE, 0x20, struct kgsl_sharedmem_from_pmem) 717 718 /* remove memory from the GPU's address space */ 719 struct kgsl_sharedmem_free { 720 unsigned long gpuaddr; 721 }; 722 723 #define IOCTL_KGSL_SHAREDMEM_FREE \ 724 _IOW(KGSL_IOC_TYPE, 0x21, struct kgsl_sharedmem_free) 725 726 struct kgsl_cff_user_event { 727 unsigned char cff_opcode; 728 unsigned int op1; 729 unsigned int op2; 730 unsigned int op3; 731 unsigned int op4; 732 unsigned int op5; 733 unsigned int __pad[2]; 734 }; 735 736 #define IOCTL_KGSL_CFF_USER_EVENT \ 737 _IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_cff_user_event) 738 739 struct kgsl_gmem_desc { 740 unsigned int x; 741 unsigned int y; 742 unsigned int width; 743 unsigned int height; 744 unsigned int pitch; 745 }; 746 747 struct kgsl_buffer_desc { 748 void *hostptr; 749 unsigned long gpuaddr; 750 int size; 751 unsigned int format; 752 unsigned int pitch; 753 unsigned int enabled; 754 }; 755 756 struct kgsl_bind_gmem_shadow { 757 unsigned int drawctxt_id; 758 struct kgsl_gmem_desc gmem_desc; 759 unsigned int shadow_x; 760 unsigned int shadow_y; 761 struct kgsl_buffer_desc shadow_buffer; 762 unsigned int buffer_id; 763 }; 764 765 #define IOCTL_KGSL_DRAWCTXT_BIND_GMEM_SHADOW \ 766 _IOW(KGSL_IOC_TYPE, 0x22, struct kgsl_bind_gmem_shadow) 767 768 /* add a block of memory into the GPU address space */ 769 770 /* 771 * IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC deprecated 09/2012 772 * use IOCTL_KGSL_GPUMEM_ALLOC instead 773 */ 774 775 struct kgsl_sharedmem_from_vmalloc { 776 unsigned long gpuaddr; /*output param */ 777 unsigned int hostptr; 778 unsigned int flags; 779 }; 780 781 #define IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC \ 782 _IOWR(KGSL_IOC_TYPE, 0x23, struct kgsl_sharedmem_from_vmalloc) 783 784 /* 785 * This is being deprecated in favor of IOCTL_KGSL_GPUMEM_CACHE_SYNC which 786 * supports both directions (flush and invalidate). This code will still 787 * work, but by definition it will do a flush of the cache which might not be 788 * what you want to have happen on a buffer following a GPU operation. It is 789 * safer to go with IOCTL_KGSL_GPUMEM_CACHE_SYNC 790 */ 791 792 #define IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE \ 793 _IOW(KGSL_IOC_TYPE, 0x24, struct kgsl_sharedmem_free) 794 795 struct kgsl_drawctxt_set_bin_base_offset { 796 unsigned int drawctxt_id; 797 unsigned int offset; 798 }; 799 800 #define IOCTL_KGSL_DRAWCTXT_SET_BIN_BASE_OFFSET \ 801 _IOW(KGSL_IOC_TYPE, 0x25, struct kgsl_drawctxt_set_bin_base_offset) 802 803 enum kgsl_cmdwindow_type { 804 KGSL_CMDWINDOW_MIN = 0x00000000, 805 KGSL_CMDWINDOW_2D = 0x00000000, 806 KGSL_CMDWINDOW_3D = 0x00000001, /* legacy */ 807 KGSL_CMDWINDOW_MMU = 0x00000002, 808 KGSL_CMDWINDOW_ARBITER = 0x000000FF, 809 KGSL_CMDWINDOW_MAX = 0x000000FF, 810 }; 811 812 /* write to the command window */ 813 struct kgsl_cmdwindow_write { 814 enum kgsl_cmdwindow_type target; 815 unsigned int addr; 816 unsigned int data; 817 }; 818 819 #define IOCTL_KGSL_CMDWINDOW_WRITE \ 820 _IOW(KGSL_IOC_TYPE, 0x2e, struct kgsl_cmdwindow_write) 821 822 struct kgsl_gpumem_alloc { 823 unsigned long gpuaddr; /* output param */ 824 __kernel_size_t size; 825 unsigned int flags; 826 }; 827 828 #define IOCTL_KGSL_GPUMEM_ALLOC \ 829 _IOWR(KGSL_IOC_TYPE, 0x2f, struct kgsl_gpumem_alloc) 830 831 struct kgsl_cff_syncmem { 832 unsigned long gpuaddr; 833 __kernel_size_t len; 834 unsigned int __pad[2]; /* For future binary compatibility */ 835 }; 836 837 #define IOCTL_KGSL_CFF_SYNCMEM \ 838 _IOW(KGSL_IOC_TYPE, 0x30, struct kgsl_cff_syncmem) 839 840 /* 841 * A timestamp event allows the user space to register an action following an 842 * expired timestamp. Note IOCTL_KGSL_TIMESTAMP_EVENT has been redefined to 843 * _IOWR to support fences which need to return a fd for the priv parameter. 844 */ 845 846 struct kgsl_timestamp_event { 847 int type; /* Type of event (see list below) */ 848 unsigned int timestamp; /* Timestamp to trigger event on */ 849 unsigned int context_id; /* Context for the timestamp */ 850 void __user *priv; /* Pointer to the event specific blob */ 851 __kernel_size_t len; /* Size of the event specific blob */ 852 }; 853 854 #define IOCTL_KGSL_TIMESTAMP_EVENT_OLD \ 855 _IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_timestamp_event) 856 857 /* A genlock timestamp event releases an existing lock on timestamp expire */ 858 859 #define KGSL_TIMESTAMP_EVENT_GENLOCK 1 860 861 struct kgsl_timestamp_event_genlock { 862 int handle; /* Handle of the genlock lock to release */ 863 }; 864 865 /* A fence timestamp event releases an existing lock on timestamp expire */ 866 867 #define KGSL_TIMESTAMP_EVENT_FENCE 2 868 869 struct kgsl_timestamp_event_fence { 870 int fence_fd; /* Fence to signal */ 871 }; 872 873 /* 874 * Set a property within the kernel. Uses the same structure as 875 * IOCTL_KGSL_GETPROPERTY 876 */ 877 878 #define IOCTL_KGSL_SETPROPERTY \ 879 _IOW(KGSL_IOC_TYPE, 0x32, struct kgsl_device_getproperty) 880 881 #define IOCTL_KGSL_TIMESTAMP_EVENT \ 882 _IOWR(KGSL_IOC_TYPE, 0x33, struct kgsl_timestamp_event) 883 884 /** 885 * struct kgsl_gpumem_alloc_id - argument to IOCTL_KGSL_GPUMEM_ALLOC_ID 886 * @id: returned id value for this allocation. 887 * @flags: mask of KGSL_MEM* values requested and actual flags on return. 888 * @size: requested size of the allocation and actual size on return. 889 * @mmapsize: returned size to pass to mmap() which may be larger than 'size' 890 * @gpuaddr: returned GPU address for the allocation 891 * 892 * Allocate memory for access by the GPU. The flags and size fields are echoed 893 * back by the kernel, so that the caller can know if the request was 894 * adjusted. 895 * 896 * Supported flags: 897 * KGSL_MEMFLAGS_GPUREADONLY: the GPU will be unable to write to the buffer 898 * KGSL_MEMTYPE*: usage hint for debugging aid 899 * KGSL_MEMALIGN*: alignment hint, may be ignored or adjusted by the kernel. 900 * KGSL_MEMFLAGS_USE_CPU_MAP: If set on call and return, the returned GPU 901 * address will be 0. Calling mmap() will set the GPU address. 902 */ 903 struct kgsl_gpumem_alloc_id { 904 unsigned int id; 905 unsigned int flags; 906 __kernel_size_t size; 907 __kernel_size_t mmapsize; 908 unsigned long gpuaddr; 909 /* private: reserved for future use*/ 910 unsigned long __pad[2]; 911 }; 912 913 #define IOCTL_KGSL_GPUMEM_ALLOC_ID \ 914 _IOWR(KGSL_IOC_TYPE, 0x34, struct kgsl_gpumem_alloc_id) 915 916 /** 917 * struct kgsl_gpumem_free_id - argument to IOCTL_KGSL_GPUMEM_FREE_ID 918 * @id: GPU allocation id to free 919 * 920 * Free an allocation by id, in case a GPU address has not been assigned or 921 * is unknown. Freeing an allocation by id with this ioctl or by GPU address 922 * with IOCTL_KGSL_SHAREDMEM_FREE are equivalent. 923 */ 924 struct kgsl_gpumem_free_id { 925 unsigned int id; 926 /* private: reserved for future use*/ 927 unsigned int __pad; 928 }; 929 930 #define IOCTL_KGSL_GPUMEM_FREE_ID \ 931 _IOWR(KGSL_IOC_TYPE, 0x35, struct kgsl_gpumem_free_id) 932 933 /** 934 * struct kgsl_gpumem_get_info - argument to IOCTL_KGSL_GPUMEM_GET_INFO 935 * @gpuaddr: GPU address to query. Also set on return. 936 * @id: GPU allocation id to query. Also set on return. 937 * @flags: returned mask of KGSL_MEM* values. 938 * @size: returned size of the allocation. 939 * @mmapsize: returned size to pass mmap(), which may be larger than 'size' 940 * @useraddr: returned address of the userspace mapping for this buffer 941 * 942 * This ioctl allows querying of all user visible attributes of an existing 943 * allocation, by either the GPU address or the id returned by a previous 944 * call to IOCTL_KGSL_GPUMEM_ALLOC_ID. Legacy allocation ioctls may not 945 * return all attributes so this ioctl can be used to look them up if needed. 946 * 947 */ 948 struct kgsl_gpumem_get_info { 949 unsigned long gpuaddr; 950 unsigned int id; 951 unsigned int flags; 952 __kernel_size_t size; 953 __kernel_size_t mmapsize; 954 unsigned long useraddr; 955 /* private: reserved for future use*/ 956 unsigned long __pad[4]; 957 }; 958 959 #define IOCTL_KGSL_GPUMEM_GET_INFO\ 960 _IOWR(KGSL_IOC_TYPE, 0x36, struct kgsl_gpumem_get_info) 961 962 /** 963 * struct kgsl_gpumem_sync_cache - argument to IOCTL_KGSL_GPUMEM_SYNC_CACHE 964 * @gpuaddr: GPU address of the buffer to sync. 965 * @id: id of the buffer to sync. Either gpuaddr or id is sufficient. 966 * @op: a mask of KGSL_GPUMEM_CACHE_* values 967 * @offset: offset into the buffer 968 * @length: number of bytes starting from offset to perform 969 * the cache operation on 970 * 971 * Sync the L2 cache for memory headed to and from the GPU - this replaces 972 * KGSL_SHAREDMEM_FLUSH_CACHE since it can handle cache management for both 973 * directions 974 * 975 */ 976 struct kgsl_gpumem_sync_cache { 977 unsigned long gpuaddr; 978 unsigned int id; 979 unsigned int op; 980 __kernel_size_t offset; 981 __kernel_size_t length; 982 }; 983 984 #define KGSL_GPUMEM_CACHE_CLEAN (1 << 0) 985 #define KGSL_GPUMEM_CACHE_TO_GPU KGSL_GPUMEM_CACHE_CLEAN 986 987 #define KGSL_GPUMEM_CACHE_INV (1 << 1) 988 #define KGSL_GPUMEM_CACHE_FROM_GPU KGSL_GPUMEM_CACHE_INV 989 990 #define KGSL_GPUMEM_CACHE_FLUSH \ 991 (KGSL_GPUMEM_CACHE_CLEAN | KGSL_GPUMEM_CACHE_INV) 992 993 /* Flag to ensure backward compatibility of kgsl_gpumem_sync_cache struct */ 994 #define KGSL_GPUMEM_CACHE_RANGE (1 << 31U) 995 996 #define IOCTL_KGSL_GPUMEM_SYNC_CACHE \ 997 _IOW(KGSL_IOC_TYPE, 0x37, struct kgsl_gpumem_sync_cache) 998 999 /** 1000 * struct kgsl_perfcounter_get - argument to IOCTL_KGSL_PERFCOUNTER_GET 1001 * @groupid: Performance counter group ID 1002 * @countable: Countable to select within the group 1003 * @offset: Return offset of the reserved LO counter 1004 * @offset_hi: Return offset of the reserved HI counter 1005 * 1006 * Get an available performance counter from a specified groupid. The offset 1007 * of the performance counter will be returned after successfully assigning 1008 * the countable to the counter for the specified group. An error will be 1009 * returned and an offset of 0 if the groupid is invalid or there are no 1010 * more counters left. After successfully getting a perfcounter, the user 1011 * must call kgsl_perfcounter_put(groupid, contable) when finished with 1012 * the perfcounter to clear up perfcounter resources. 1013 * 1014 */ 1015 struct kgsl_perfcounter_get { 1016 unsigned int groupid; 1017 unsigned int countable; 1018 unsigned int offset; 1019 unsigned int offset_hi; 1020 /* private: reserved for future use */ 1021 unsigned int __pad; /* For future binary compatibility */ 1022 }; 1023 1024 #define IOCTL_KGSL_PERFCOUNTER_GET \ 1025 _IOWR(KGSL_IOC_TYPE, 0x38, struct kgsl_perfcounter_get) 1026 1027 /** 1028 * struct kgsl_perfcounter_put - argument to IOCTL_KGSL_PERFCOUNTER_PUT 1029 * @groupid: Performance counter group ID 1030 * @countable: Countable to release within the group 1031 * 1032 * Put an allocated performance counter to allow others to have access to the 1033 * resource that was previously taken. This is only to be called after 1034 * successfully getting a performance counter from kgsl_perfcounter_get(). 1035 * 1036 */ 1037 struct kgsl_perfcounter_put { 1038 unsigned int groupid; 1039 unsigned int countable; 1040 /* private: reserved for future use */ 1041 unsigned int __pad[2]; /* For future binary compatibility */ 1042 }; 1043 1044 #define IOCTL_KGSL_PERFCOUNTER_PUT \ 1045 _IOW(KGSL_IOC_TYPE, 0x39, struct kgsl_perfcounter_put) 1046 1047 /** 1048 * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY 1049 * @groupid: Performance counter group ID 1050 * @countables: Return active countables array 1051 * @count: Number of entries in @countables 1052 * @max_counters: Return total number counters for the group ID 1053 * 1054 * Query the available performance counters given a groupid. The array 1055 * *countables is used to return the current active countables in counters. 1056 * The size of the array is passed in so the kernel will only write at most 1057 * size or counter->size for the group id. The total number of available 1058 * counters for the group ID is returned in max_counters. 1059 * If the array or size passed in are invalid, then only the maximum number 1060 * of counters will be returned, no data will be written to *countables. 1061 * If the groupid is invalid an error code will be returned. 1062 * 1063 */ 1064 struct kgsl_perfcounter_query { 1065 unsigned int groupid; 1066 /* Array to return the current countable for up to size counters */ 1067 unsigned int __user *countables; 1068 unsigned int count; 1069 unsigned int max_counters; 1070 /* private: reserved for future use */ 1071 unsigned int __pad[2]; /* For future binary compatibility */ 1072 }; 1073 1074 #define IOCTL_KGSL_PERFCOUNTER_QUERY \ 1075 _IOWR(KGSL_IOC_TYPE, 0x3A, struct kgsl_perfcounter_query) 1076 1077 /** 1078 * struct kgsl_perfcounter_read_group - argument to IOCTL_KGSL_PERFCOUNTER_QUERY 1079 * @groupid: Performance counter group IDs 1080 * @countable: Performance counter countable IDs 1081 * @value: Return performance counter reads 1082 * 1083 * Read in the current value of a performance counter given by the groupid 1084 * and countable. 1085 * 1086 */ 1087 1088 struct kgsl_perfcounter_read_group { 1089 unsigned int groupid; 1090 unsigned int countable; 1091 unsigned long long value; 1092 }; 1093 1094 struct kgsl_perfcounter_read { 1095 struct kgsl_perfcounter_read_group __user *reads; 1096 unsigned int count; 1097 /* private: reserved for future use */ 1098 unsigned int __pad[2]; /* For future binary compatibility */ 1099 }; 1100 1101 #define IOCTL_KGSL_PERFCOUNTER_READ \ 1102 _IOWR(KGSL_IOC_TYPE, 0x3B, struct kgsl_perfcounter_read) 1103 /* 1104 * struct kgsl_gpumem_sync_cache_bulk - argument to 1105 * IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK 1106 * @id_list: list of GPU buffer ids of the buffers to sync 1107 * @count: number of GPU buffer ids in id_list 1108 * @op: a mask of KGSL_GPUMEM_CACHE_* values 1109 * 1110 * Sync the cache for memory headed to and from the GPU. Certain 1111 * optimizations can be made on the cache operation based on the total 1112 * size of the working set of memory to be managed. 1113 */ 1114 struct kgsl_gpumem_sync_cache_bulk { 1115 unsigned int __user *id_list; 1116 unsigned int count; 1117 unsigned int op; 1118 /* private: reserved for future use */ 1119 unsigned int __pad[2]; /* For future binary compatibility */ 1120 }; 1121 1122 #define IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK \ 1123 _IOWR(KGSL_IOC_TYPE, 0x3C, struct kgsl_gpumem_sync_cache_bulk) 1124 1125 /* 1126 * struct kgsl_cmd_syncpoint_timestamp 1127 * @context_id: ID of a KGSL context 1128 * @timestamp: GPU timestamp 1129 * 1130 * This structure defines a syncpoint comprising a context/timestamp pair. A 1131 * list of these may be passed by IOCTL_KGSL_SUBMIT_COMMANDS to define 1132 * dependencies that must be met before the command can be submitted to the 1133 * hardware 1134 */ 1135 struct kgsl_cmd_syncpoint_timestamp { 1136 unsigned int context_id; 1137 unsigned int timestamp; 1138 }; 1139 1140 struct kgsl_cmd_syncpoint_fence { 1141 int fd; 1142 }; 1143 1144 /* 1145 * struct kgsl_cmd_syncpoint_timeline 1146 * @timelines: Address of an array of &struct kgsl_timeline_val 1147 * @count: Number of entries in @timelines 1148 * @timelines_size: Size of each entry in @timelines 1149 * 1150 * Define a syncpoint for a number of timelines. This syncpoint will 1151 * be satisfied when all of the specified timelines are signaled. 1152 */ 1153 struct kgsl_cmd_syncpoint_timeline { 1154 __u64 timelines; 1155 __u32 count; 1156 __u32 timelines_size; 1157 }; 1158 1159 /** 1160 * struct kgsl_cmd_syncpoint - Define a sync point for a command batch 1161 * @type: type of sync point defined here 1162 * @priv: Pointer to the type specific buffer 1163 * @size: Size of the type specific buffer 1164 * 1165 * This structure contains pointers defining a specific command sync point. 1166 * The pointer and size should point to a type appropriate structure. 1167 */ 1168 struct kgsl_cmd_syncpoint { 1169 int type; 1170 void __user *priv; 1171 __kernel_size_t size; 1172 }; 1173 1174 /* Flag to indicate that the cmdlist may contain memlists */ 1175 #define KGSL_IBDESC_MEMLIST 0x1 1176 1177 /* Flag to point out the cmdbatch profiling buffer in the memlist */ 1178 #define KGSL_IBDESC_PROFILING_BUFFER 0x2 1179 1180 /** 1181 * struct kgsl_submit_commands - Argument to IOCTL_KGSL_SUBMIT_COMMANDS 1182 * @context_id: KGSL context ID that owns the commands 1183 * @flags: 1184 * @cmdlist: User pointer to a list of kgsl_ibdesc structures 1185 * @numcmds: Number of commands listed in cmdlist 1186 * @synclist: User pointer to a list of kgsl_cmd_syncpoint structures 1187 * @numsyncs: Number of sync points listed in synclist 1188 * @timestamp: On entry the a user defined timestamp, on exist the timestamp 1189 * assigned to the command batch 1190 * 1191 * This structure specifies a command to send to the GPU hardware. This is 1192 * similar to kgsl_issueibcmds expect that it doesn't support the legacy way to 1193 * submit IB lists and it adds sync points to block the IB until the 1194 * dependencies are satisified. This entry point is the new and preferred way 1195 * to submit commands to the GPU. The memory list can be used to specify all 1196 * memory that is referrenced in the current set of commands. 1197 */ 1198 1199 struct kgsl_submit_commands { 1200 unsigned int context_id; 1201 unsigned int flags; 1202 struct kgsl_ibdesc __user *cmdlist; 1203 unsigned int numcmds; 1204 struct kgsl_cmd_syncpoint __user *synclist; 1205 unsigned int numsyncs; 1206 unsigned int timestamp; 1207 /* private: reserved for future use */ 1208 unsigned int __pad[4]; 1209 }; 1210 1211 #define IOCTL_KGSL_SUBMIT_COMMANDS \ 1212 _IOWR(KGSL_IOC_TYPE, 0x3D, struct kgsl_submit_commands) 1213 1214 /** 1215 * struct kgsl_device_constraint - device constraint argument 1216 * @context_id: KGSL context ID 1217 * @type: type of constraint i.e pwrlevel/none 1218 * @data: constraint data 1219 * @size: size of the constraint data 1220 */ 1221 struct kgsl_device_constraint { 1222 unsigned int type; 1223 unsigned int context_id; 1224 void __user *data; 1225 __kernel_size_t size; 1226 }; 1227 1228 /* Constraint Type*/ 1229 #define KGSL_CONSTRAINT_NONE 0 1230 #define KGSL_CONSTRAINT_PWRLEVEL 1 1231 1232 /* L3 constraint Type */ 1233 #define KGSL_CONSTRAINT_L3_NONE 2 1234 #define KGSL_CONSTRAINT_L3_PWRLEVEL 3 1235 1236 /* PWRLEVEL constraint level*/ 1237 /* set to min frequency */ 1238 #define KGSL_CONSTRAINT_PWR_MIN 0 1239 /* set to max frequency */ 1240 #define KGSL_CONSTRAINT_PWR_MAX 1 1241 1242 struct kgsl_device_constraint_pwrlevel { 1243 unsigned int level; 1244 }; 1245 1246 /** 1247 * struct kgsl_syncsource_create - Argument to IOCTL_KGSL_SYNCSOURCE_CREATE 1248 * @id: returned id for the syncsource that was created. 1249 * 1250 * This ioctl creates a userspace sync timeline. 1251 */ 1252 1253 struct kgsl_syncsource_create { 1254 unsigned int id; 1255 /* private: reserved for future use */ 1256 unsigned int __pad[3]; 1257 }; 1258 1259 #define IOCTL_KGSL_SYNCSOURCE_CREATE \ 1260 _IOWR(KGSL_IOC_TYPE, 0x40, struct kgsl_syncsource_create) 1261 1262 /** 1263 * struct kgsl_syncsource_destroy - Argument to IOCTL_KGSL_SYNCSOURCE_DESTROY 1264 * @id: syncsource id to destroy 1265 * 1266 * This ioctl creates a userspace sync timeline. 1267 */ 1268 1269 struct kgsl_syncsource_destroy { 1270 unsigned int id; 1271 /* private: reserved for future use */ 1272 unsigned int __pad[3]; 1273 }; 1274 1275 #define IOCTL_KGSL_SYNCSOURCE_DESTROY \ 1276 _IOWR(KGSL_IOC_TYPE, 0x41, struct kgsl_syncsource_destroy) 1277 1278 /** 1279 * struct kgsl_syncsource_create_fence - Argument to 1280 * IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE 1281 * @id: syncsource id 1282 * @fence_fd: returned sync_fence fd 1283 * 1284 * Create a fence that may be signaled by userspace by calling 1285 * IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE. There are no order dependencies between 1286 * these fences. 1287 */ 1288 struct kgsl_syncsource_create_fence { 1289 unsigned int id; 1290 int fence_fd; 1291 /* private: reserved for future use */ 1292 unsigned int __pad[4]; 1293 }; 1294 1295 /** 1296 * struct kgsl_syncsource_signal_fence - Argument to 1297 * IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE 1298 * @id: syncsource id 1299 * @fence_fd: sync_fence fd to signal 1300 * 1301 * Signal a fence that was created by a IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE 1302 * call using the same syncsource id. This allows a fence to be shared 1303 * to other processes but only signaled by the process owning the fd 1304 * used to create the fence. 1305 */ 1306 #define IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE \ 1307 _IOWR(KGSL_IOC_TYPE, 0x42, struct kgsl_syncsource_create_fence) 1308 1309 struct kgsl_syncsource_signal_fence { 1310 unsigned int id; 1311 int fence_fd; 1312 /* private: reserved for future use */ 1313 unsigned int __pad[4]; 1314 }; 1315 1316 #define IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE \ 1317 _IOWR(KGSL_IOC_TYPE, 0x43, struct kgsl_syncsource_signal_fence) 1318 1319 /** 1320 * struct kgsl_cff_sync_gpuobj - Argument to IOCTL_KGSL_CFF_SYNC_GPUOBJ 1321 * @offset: Offset into the GPU object to sync 1322 * @length: Number of bytes to sync 1323 * @id: ID of the GPU object to sync 1324 */ 1325 struct kgsl_cff_sync_gpuobj { 1326 __u64 offset; 1327 __u64 length; 1328 unsigned int id; 1329 }; 1330 1331 #define IOCTL_KGSL_CFF_SYNC_GPUOBJ \ 1332 _IOW(KGSL_IOC_TYPE, 0x44, struct kgsl_cff_sync_gpuobj) 1333 1334 /** 1335 * struct kgsl_gpuobj_alloc - Argument to IOCTL_KGSL_GPUOBJ_ALLOC 1336 * @size: Size in bytes of the object to allocate 1337 * @flags: mask of KGSL_MEMFLAG_* bits 1338 * @va_len: Size in bytes of the virtual region to allocate 1339 * @mmapsize: Returns the mmap() size of the object 1340 * @id: Returns the GPU object ID of the new object 1341 * @metadata_len: Length of the metdata to copy from the user 1342 * @metadata: Pointer to the user specified metadata to store for the object 1343 */ 1344 struct kgsl_gpuobj_alloc { 1345 __u64 size; 1346 __u64 flags; 1347 __u64 va_len; 1348 __u64 mmapsize; 1349 unsigned int id; 1350 unsigned int metadata_len; 1351 __u64 metadata; 1352 }; 1353 1354 /* Let the user know that this header supports the gpuobj metadata */ 1355 #define KGSL_GPUOBJ_ALLOC_METADATA_MAX 64 1356 1357 #define IOCTL_KGSL_GPUOBJ_ALLOC \ 1358 _IOWR(KGSL_IOC_TYPE, 0x45, struct kgsl_gpuobj_alloc) 1359 1360 /** 1361 * struct kgsl_gpuobj_free - Argument to IOCTL_KGLS_GPUOBJ_FREE 1362 * @flags: Mask of: KGSL_GUPOBJ_FREE_ON_EVENT 1363 * @priv: Pointer to the private object if KGSL_GPUOBJ_FREE_ON_EVENT is 1364 * specified 1365 * @id: ID of the GPU object to free 1366 * @type: If KGSL_GPUOBJ_FREE_ON_EVENT is specified, the type of asynchronous 1367 * event to free on 1368 * @len: Length of the data passed in priv 1369 */ 1370 struct kgsl_gpuobj_free { 1371 __u64 flags; 1372 __u64 __user priv; 1373 unsigned int id; 1374 unsigned int type; 1375 unsigned int len; 1376 }; 1377 1378 #define KGSL_GPUOBJ_FREE_ON_EVENT 1 1379 1380 #define KGSL_GPU_EVENT_TIMESTAMP 1 1381 #define KGSL_GPU_EVENT_FENCE 2 1382 1383 /** 1384 * struct kgsl_gpu_event_timestamp - Specifies a timestamp event to free a GPU 1385 * object on 1386 * @context_id: ID of the timestamp event to wait for 1387 * @timestamp: Timestamp of the timestamp event to wait for 1388 */ 1389 struct kgsl_gpu_event_timestamp { 1390 unsigned int context_id; 1391 unsigned int timestamp; 1392 }; 1393 1394 /** 1395 * struct kgsl_gpu_event_fence - Specifies a fence ID to to free a GPU object on 1396 * @fd: File descriptor for the fence 1397 */ 1398 struct kgsl_gpu_event_fence { 1399 int fd; 1400 }; 1401 1402 #define IOCTL_KGSL_GPUOBJ_FREE \ 1403 _IOW(KGSL_IOC_TYPE, 0x46, struct kgsl_gpuobj_free) 1404 1405 /** 1406 * struct kgsl_gpuobj_info - argument to IOCTL_KGSL_GPUOBJ_INFO 1407 * @gpuaddr: GPU address of the object 1408 * @flags: Current flags for the object 1409 * @size: Size of the object 1410 * @va_len: VA size of the object 1411 * @va_addr: Virtual address of the object (if it is mapped) 1412 * id - GPU object ID of the object to query 1413 */ 1414 struct kgsl_gpuobj_info { 1415 __u64 gpuaddr; 1416 __u64 flags; 1417 __u64 size; 1418 __u64 va_len; 1419 __u64 va_addr; 1420 unsigned int id; 1421 }; 1422 1423 #define IOCTL_KGSL_GPUOBJ_INFO \ 1424 _IOWR(KGSL_IOC_TYPE, 0x47, struct kgsl_gpuobj_info) 1425 1426 /** 1427 * struct kgsl_gpuobj_import - argument to IOCTL_KGSL_GPUOBJ_IMPORT 1428 * @priv: Pointer to the private data for the import type 1429 * @priv_len: Length of the private data 1430 * @flags: Mask of KGSL_MEMFLAG_ flags 1431 * @type: Type of the import (KGSL_USER_MEM_TYPE_*) 1432 * @id: Returns the ID of the new GPU object 1433 */ 1434 struct kgsl_gpuobj_import { 1435 __u64 __user priv; 1436 __u64 priv_len; 1437 __u64 flags; 1438 unsigned int type; 1439 unsigned int id; 1440 }; 1441 1442 /** 1443 * struct kgsl_gpuobj_import_dma_buf - import a dmabuf object 1444 * @fd: File descriptor for the dma-buf object 1445 */ 1446 struct kgsl_gpuobj_import_dma_buf { 1447 int fd; 1448 }; 1449 1450 /** 1451 * struct kgsl_gpuobj_import_useraddr - import an object based on a useraddr 1452 * @virtaddr: Virtual address of the object to import 1453 */ 1454 struct kgsl_gpuobj_import_useraddr { 1455 __u64 virtaddr; 1456 }; 1457 1458 #define IOCTL_KGSL_GPUOBJ_IMPORT \ 1459 _IOWR(KGSL_IOC_TYPE, 0x48, struct kgsl_gpuobj_import) 1460 1461 /** 1462 * struct kgsl_gpuobj_sync_obj - Individual GPU object to sync 1463 * @offset: Offset within the GPU object to sync 1464 * @length: Number of bytes to sync 1465 * @id: ID of the GPU object to sync 1466 * @op: Cache operation to execute 1467 */ 1468 1469 struct kgsl_gpuobj_sync_obj { 1470 __u64 offset; 1471 __u64 length; 1472 unsigned int id; 1473 unsigned int op; 1474 }; 1475 1476 /** 1477 * struct kgsl_gpuobj_sync - Argument for IOCTL_KGSL_GPUOBJ_SYNC 1478 * @objs: Pointer to an array of kgsl_gpuobj_sync_obj structs 1479 * @obj_len: Size of each item in the array 1480 * @count: Number of items in the array 1481 */ 1482 1483 struct kgsl_gpuobj_sync { 1484 __u64 __user objs; 1485 unsigned int obj_len; 1486 unsigned int count; 1487 }; 1488 1489 #define IOCTL_KGSL_GPUOBJ_SYNC \ 1490 _IOW(KGSL_IOC_TYPE, 0x49, struct kgsl_gpuobj_sync) 1491 1492 /** 1493 * struct kgsl_command_object - GPU command object 1494 * @offset: GPU address offset of the object 1495 * @gpuaddr: GPU address of the object 1496 * @size: Size of the object 1497 * @flags: Current flags for the object 1498 * @id - GPU command object ID 1499 */ 1500 struct kgsl_command_object { 1501 __u64 offset; 1502 __u64 gpuaddr; 1503 __u64 size; 1504 unsigned int flags; 1505 unsigned int id; 1506 }; 1507 1508 /** 1509 * struct kgsl_command_syncpoint - GPU syncpoint object 1510 * @priv: Pointer to the type specific buffer 1511 * @size: Size of the type specific buffer 1512 * @type: type of sync point defined here 1513 */ 1514 struct kgsl_command_syncpoint { 1515 __u64 __user priv; 1516 __u64 size; 1517 unsigned int type; 1518 }; 1519 1520 /** 1521 * struct kgsl_gpu_command - Argument for IOCTL_KGSL_GPU_COMMAND 1522 * @flags: Current flags for the object 1523 * @cmdlist: List of kgsl_command_objects for submission 1524 * @cmdsize: Size of kgsl_command_objects structure 1525 * @numcmds: Number of kgsl_command_objects in command list 1526 * @objlist: List of kgsl_command_objects for tracking 1527 * @objsize: Size of kgsl_command_objects structure 1528 * @numobjs: Number of kgsl_command_objects in object list 1529 * @synclist: List of kgsl_command_syncpoints 1530 * @syncsize: Size of kgsl_command_syncpoint structure 1531 * @numsyncs: Number of kgsl_command_syncpoints in syncpoint list 1532 * @context_id: Context ID submitting the kgsl_gpu_command 1533 * @timestamp: Timestamp for the submitted commands 1534 */ 1535 struct kgsl_gpu_command { 1536 __u64 flags; 1537 __u64 __user cmdlist; 1538 unsigned int cmdsize; 1539 unsigned int numcmds; 1540 __u64 __user objlist; 1541 unsigned int objsize; 1542 unsigned int numobjs; 1543 __u64 __user synclist; 1544 unsigned int syncsize; 1545 unsigned int numsyncs; 1546 unsigned int context_id; 1547 unsigned int timestamp; 1548 }; 1549 1550 #define IOCTL_KGSL_GPU_COMMAND \ 1551 _IOWR(KGSL_IOC_TYPE, 0x4A, struct kgsl_gpu_command) 1552 1553 /** 1554 * struct kgsl_preemption_counters_query - argument to 1555 * IOCTL_KGSL_PREEMPTIONCOUNTER_QUERY 1556 * @counters: Return preemption counters array 1557 * @size_user: Size allocated by userspace 1558 * @size_priority_level: Size of preemption counters for each 1559 * priority level 1560 * @max_priority_level: Return max number of priority levels 1561 * 1562 * Query the available preemption counters. The array counters 1563 * is used to return preemption counters. The size of the array 1564 * is passed in so the kernel will only write at most size_user 1565 * or max available preemption counters. The total number of 1566 * preemption counters is returned in max_priority_level. If the 1567 * array or size passed in are invalid, then an error is 1568 * returned back. 1569 */ 1570 struct kgsl_preemption_counters_query { 1571 __u64 __user counters; 1572 unsigned int size_user; 1573 unsigned int size_priority_level; 1574 unsigned int max_priority_level; 1575 }; 1576 1577 #define IOCTL_KGSL_PREEMPTIONCOUNTER_QUERY \ 1578 _IOWR(KGSL_IOC_TYPE, 0x4B, struct kgsl_preemption_counters_query) 1579 1580 /** 1581 * struct kgsl_gpuobj_set_info - argument for IOCTL_KGSL_GPUOBJ_SET_INFO 1582 * @flags: Flags to indicate which parameters to change 1583 * @metadata: If KGSL_GPUOBJ_SET_INFO_METADATA is set, a pointer to the new 1584 * metadata 1585 * @id: GPU memory object ID to change 1586 * @metadata_len: If KGSL_GPUOBJ_SET_INFO_METADATA is set, the length of the 1587 * new metadata string 1588 * @type: If KGSL_GPUOBJ_SET_INFO_TYPE is set, the new type of the memory object 1589 */ 1590 1591 #define KGSL_GPUOBJ_SET_INFO_METADATA (1 << 0) 1592 #define KGSL_GPUOBJ_SET_INFO_TYPE (1 << 1) 1593 1594 struct kgsl_gpuobj_set_info { 1595 __u64 flags; 1596 __u64 metadata; 1597 unsigned int id; 1598 unsigned int metadata_len; 1599 unsigned int type; 1600 }; 1601 1602 #define IOCTL_KGSL_GPUOBJ_SET_INFO \ 1603 _IOW(KGSL_IOC_TYPE, 0x4C, struct kgsl_gpuobj_set_info) 1604 1605 /** 1606 * struct kgsl_sparse_phys_alloc - Argument for IOCTL_KGSL_SPARSE_PHYS_ALLOC 1607 * @size: Size in bytes to back 1608 * @pagesize: Pagesize alignment required 1609 * @flags: Flags for this allocation 1610 * @id: Returned ID for this allocation 1611 */ 1612 struct kgsl_sparse_phys_alloc { 1613 __u64 size; 1614 __u64 pagesize; 1615 __u64 flags; 1616 unsigned int id; 1617 }; 1618 1619 #define IOCTL_KGSL_SPARSE_PHYS_ALLOC \ 1620 _IOWR(KGSL_IOC_TYPE, 0x50, struct kgsl_sparse_phys_alloc) 1621 1622 /** 1623 * struct kgsl_sparse_phys_free - Argument for IOCTL_KGSL_SPARSE_PHYS_FREE 1624 * @id: ID to free 1625 */ 1626 struct kgsl_sparse_phys_free { 1627 unsigned int id; 1628 }; 1629 1630 #define IOCTL_KGSL_SPARSE_PHYS_FREE \ 1631 _IOW(KGSL_IOC_TYPE, 0x51, struct kgsl_sparse_phys_free) 1632 1633 /** 1634 * struct kgsl_sparse_virt_alloc - Argument for IOCTL_KGSL_SPARSE_VIRT_ALLOC 1635 * @size: Size in bytes to reserve 1636 * @pagesize: Pagesize alignment required 1637 * @flags: Flags for this allocation 1638 * @id: Returned ID for this allocation 1639 * @gpuaddr: Returned GPU address for this allocation 1640 */ 1641 struct kgsl_sparse_virt_alloc { 1642 __u64 size; 1643 __u64 pagesize; 1644 __u64 flags; 1645 __u64 gpuaddr; 1646 unsigned int id; 1647 }; 1648 1649 #define IOCTL_KGSL_SPARSE_VIRT_ALLOC \ 1650 _IOWR(KGSL_IOC_TYPE, 0x52, struct kgsl_sparse_virt_alloc) 1651 1652 /** 1653 * struct kgsl_sparse_virt_free - Argument for IOCTL_KGSL_SPARSE_VIRT_FREE 1654 * @id: ID to free 1655 */ 1656 struct kgsl_sparse_virt_free { 1657 unsigned int id; 1658 }; 1659 1660 #define IOCTL_KGSL_SPARSE_VIRT_FREE \ 1661 _IOW(KGSL_IOC_TYPE, 0x53, struct kgsl_sparse_virt_free) 1662 1663 /** 1664 * struct kgsl_sparse_binding_object - Argument for kgsl_sparse_bind 1665 * @virtoffset: Offset into the virtual ID 1666 * @physoffset: Offset into the physical ID (bind only) 1667 * @size: Size in bytes to reserve 1668 * @flags: Flags for this kgsl_sparse_binding_object 1669 * @id: Physical ID to bind (bind only) 1670 */ 1671 struct kgsl_sparse_binding_object { 1672 __u64 virtoffset; 1673 __u64 physoffset; 1674 __u64 size; 1675 __u64 flags; 1676 unsigned int id; 1677 }; 1678 1679 /** 1680 * struct kgsl_sparse_bind - Argument for IOCTL_KGSL_SPARSE_BIND 1681 * @list: List of kgsl_sparse_bind_objects to bind/unbind 1682 * @id: Virtual ID to bind/unbind 1683 * @size: Size of kgsl_sparse_bind_object 1684 * @count: Number of elements in list 1685 * 1686 */ 1687 struct kgsl_sparse_bind { 1688 __u64 __user list; 1689 unsigned int id; 1690 unsigned int size; 1691 unsigned int count; 1692 }; 1693 1694 #define IOCTL_KGSL_SPARSE_BIND \ 1695 _IOW(KGSL_IOC_TYPE, 0x54, struct kgsl_sparse_bind) 1696 1697 /** 1698 * struct kgsl_gpu_sparse_command - Argument for 1699 * IOCTL_KGSL_GPU_SPARSE_COMMAND 1700 * @flags: Current flags for the object 1701 * @sparselist: List of kgsl_sparse_binding_object to bind/unbind 1702 * @synclist: List of kgsl_command_syncpoints 1703 * @sparsesize: Size of kgsl_sparse_binding_object 1704 * @numsparse: Number of elements in list 1705 * @sync_size: Size of kgsl_command_syncpoint structure 1706 * @numsyncs: Number of kgsl_command_syncpoints in syncpoint list 1707 * @context_id: Context ID submitting the kgsl_gpu_command 1708 * @timestamp: Timestamp for the submitted commands 1709 * @id: Virtual ID to bind/unbind 1710 */ 1711 struct kgsl_gpu_sparse_command { 1712 __u64 flags; 1713 __u64 __user sparselist; 1714 __u64 __user synclist; 1715 unsigned int sparsesize; 1716 unsigned int numsparse; 1717 unsigned int syncsize; 1718 unsigned int numsyncs; 1719 unsigned int context_id; 1720 unsigned int timestamp; 1721 unsigned int id; 1722 }; 1723 1724 #define IOCTL_KGSL_GPU_SPARSE_COMMAND \ 1725 _IOWR(KGSL_IOC_TYPE, 0x55, struct kgsl_gpu_sparse_command) 1726 1727 #define KGSL_GPUMEM_RANGE_OP_BIND 1 1728 #define KGSL_GPUMEM_RANGE_OP_UNBIND 2 1729 1730 /** 1731 * struct kgsl_gpumem_bind_range - specifies a bind operation for a virtual 1732 * buffer object 1733 * @child_offset: Offset to the start of memory within the child buffer object 1734 * (not used for KGSL_GPUMEM_RANGE_OP_UNBIND operations) 1735 * @target_offset: GPU address offset within the target VBO 1736 * @length: Amount of memory to map/unmap (in bytes) 1737 * @child_id: The GPU buffer ID for the child object to map/unmap in the VBO 1738 * @op: One of KGSL_GPUMEM_RANGE_OP_BIND or KGSL_GPUMEM_RANGE_OP_UNBIND 1739 * 1740 * This defines a specific bind operation to a virtual buffer object specified 1741 * in &struct kgsl_gpumem_bind_ranges. When @op is KGSL_GPUMEM_RANGE_OP_BIND the 1742 * physical memory starting at @child_offset in the memory object identified by 1743 * @child_id will be mapped into the target virtual buffer object starting at 1744 * @offset for @length bytes. 1745 * 1746 * When @op is KGSL_GPUMEM_RANGE_OP_UNBIND any entries in the target virtual 1747 * buffer object between @offset and @length that belong to @child_id will be 1748 * removed. 1749 */ 1750 struct kgsl_gpumem_bind_range { 1751 __u64 child_offset; 1752 __u64 target_offset; 1753 __u64 length; 1754 __u32 child_id; 1755 __u32 op; 1756 }; 1757 1758 #define KGSL_GPUMEM_BIND_ASYNC (1UL << 0) 1759 #define KGSL_GPUMEM_BIND_FENCE_OUT (1UL << 1) 1760 1761 /** 1762 * struct kgsl_gpumem_bind_ranges - Argument to IOCTL_KGSL_GPUMEM_BIND_RANGES to 1763 * either map or unmap a child buffer object into a virtual buffer object. 1764 * @ranges: User memory pointer to an array of range operations of type &struct 1765 * kgsl_gpumem_bind_range 1766 * @ranges_nents: Number of entries in @ranges 1767 * @ranges_size: Size of each entry in @ranges in bytes 1768 * @id: GPU buffer object identifier for the target virtual buffer object 1769 * @flags: Bitmap of KGSL_GPUMEM_BIND_ASYNC and KGSL_GPUMEM_BIND_FENCE_OUT 1770 * @fence_id: If KGSL_GPUMEM_BIND_FENCE_OUT is set in @flags contains the 1771 * identifier for the sync fence that will be signaled after the operation 1772 * completes 1773 * 1774 * Describes a number of range operations to perform on a virtual buffer object 1775 * identified by @id. Ranges should be a __u64 representation of an array of 1776 * &struct kgsl_gpumem_bind_range entries. @ranges_nents will contain the number 1777 * of entries in the array, and @ranges_size will contain the size of each entry 1778 * in the array. If KGSL_GPUMEM_BIND_ASYNC is set the operation will be 1779 * performed asynchronously and the operation will immediately return to the 1780 * user. Otherwise the calling context will block until the operation has 1781 * completed. 1782 * 1783 * If KGSL_GPUMEM_BIND_ASYNC and KGSL_GPUMEM_BIND_FENCE_OUT are both set a sync 1784 * fence will be created and returned in @fence_id. The fence will be signaled 1785 * when the bind operation has completed. 1786 */ 1787 struct kgsl_gpumem_bind_ranges { 1788 __u64 ranges; 1789 __u32 ranges_nents; 1790 __u32 ranges_size; 1791 __u32 id; 1792 __u32 flags; 1793 int fence_id; 1794 /* private: 64 bit compatibility */ 1795 __u32 padding; 1796 }; 1797 1798 #define IOCTL_KGSL_GPUMEM_BIND_RANGES \ 1799 _IOWR(KGSL_IOC_TYPE, 0x56, struct kgsl_gpumem_bind_ranges) 1800 1801 #define KGSL_GPU_AUX_COMMAND_BIND (1 << 0) 1802 #define KGSL_GPU_AUX_COMMAND_TIMELINE (1 << 1) 1803 /* Reuse the same flag that GPU COMMAND uses */ 1804 #define KGSL_GPU_AUX_COMMAND_SYNC KGSL_CMDBATCH_SYNC 1805 1806 /** 1807 * struct kgsl_gpu_aux_command_bind - Descriptor for a GPU AUX bind command 1808 * @rangeslist: Pointer to a list of &struct kgsl_gpumem_bind_range items 1809 * @numranges Number of entries in @rangeslist 1810 * @rangesize: Size of each entry in @rangeslist 1811 * @target: The GPU memory ID for the target virtual buffer object 1812 * 1813 * Describe a GPU AUX command to bind ranges in a virtual buffer object. 1814 * @rangeslist points to a &struct kgsl_gpumem_bind_ranges which is the same 1815 * struct that is used by IOCTl_KGSL_GPUMEM_BIND_RANGES. @numrages is the size 1816 * of the array in @rangeslist and @rangesize is the size of each entity in 1817 * @rangeslist. @target points to the GPU ID for the target VBO object. 1818 */ 1819 struct kgsl_gpu_aux_command_bind { 1820 __u64 rangeslist; 1821 __u64 numranges; 1822 __u64 rangesize; 1823 __u32 target; 1824 /* private: Padding for 64 bit compatibility */ 1825 __u32 padding; 1826 }; 1827 1828 /** 1829 * struct kgsl_gpu_aux_command_generic - Container for an AUX command 1830 * @priv: Pointer to the type specific buffer 1831 * @size: Size of the type specific buffer 1832 * @type: type of sync point defined here 1833 * 1834 * Describes a generic container for GPU aux commands. @priv is a user pointer 1835 * to the command struct matching @type of size @size. 1836 */ 1837 struct kgsl_gpu_aux_command_generic { 1838 __u64 priv; 1839 __u64 size; 1840 __u32 type; 1841 /* private: Padding for 64 bit compatibility */ 1842 __u32 padding; 1843 }; 1844 1845 /** 1846 * struct kgsl_gpu_aux_command - Argument for IOCTL_KGSL_GPU_AUX_COMMAND 1847 * @flags: flags for the object 1848 * @cmdlist: List of &struct kgsl_gpu_aux_command_generic objects 1849 * @cmd_size: Size of each entry in @cmdlist 1850 * @numcmds: Number of entries in @cmdlist 1851 * @synclist: List of &struct kgsl_command_syncpoint objects 1852 * @syncsize: Size of each entry in @synclist 1853 * @numsyncs: Number of entries in @synclist 1854 * @context_id: ID of the context submitting the aux command 1855 * @timestamp: Timestamp for the command submission 1856 * 1857 * Describe a GPU auxiliary command. Auxiliary commands are tasks that are not 1858 * performed on hardware but can be queued like normal GPU commands. Like GPU 1859 * commands AUX commands are assigned a timestamp and processed in order in the 1860 * queue. They can also have standard sync objects attached. The only 1861 * difference is that AUX commands usually perform some sort of administrative 1862 * task in the CPU and are retired in the dispatcher. 1863 * 1864 * For bind operations flags must have one of the KGSL_GPU_AUX_COMMAND_* flags 1865 * set. If sync objects are attached KGSL_GPU_AUX_COMMAND_SYNC must be set. 1866 * @cmdlist points to an array of &struct kgsl_gpu_aux_command_generic structs 1867 * which in turn will have a pointer to a specific command type. 1868 * @numcmds is the number of commands in the list and @cmdsize is the size 1869 * of each entity in @cmdlist. 1870 * 1871 * If KGSL_GPU_AUX_COMMAND_SYNC is specified @synclist will point to an array of 1872 * &struct kgsl_command_syncpoint items in the same fashion as a GPU hardware 1873 * command. @numsyncs and @syncsize describe the list. 1874 * 1875 * @context_id is the context that is submitting the command and @timestamp 1876 * contains the timestamp for the operation. 1877 */ 1878 struct kgsl_gpu_aux_command { 1879 __u64 flags; 1880 __u64 cmdlist; 1881 __u32 cmdsize; 1882 __u32 numcmds; 1883 __u64 synclist; 1884 __u32 syncsize; 1885 __u32 numsyncs; 1886 __u32 context_id; 1887 __u32 timestamp; 1888 }; 1889 1890 #define IOCTL_KGSL_GPU_AUX_COMMAND \ 1891 _IOWR(KGSL_IOC_TYPE, 0x57, struct kgsl_gpu_aux_command) 1892 1893 /** 1894 * struct kgsl_timeline_create - Argument for IOCTL_KGSL_TIMELINE_CREATE 1895 * @seqno: Initial sequence number for the timeline 1896 * @id: Timeline identifier [out] 1897 * 1898 * Create a new semaphore timeline and return the identifier in @id. 1899 * The identifier is global for the device and can be used to 1900 * identify the timeline in all subsequent commands. 1901 */ 1902 struct kgsl_timeline_create { 1903 __u64 seqno; 1904 __u32 id; 1905 /* private: padding for 64 bit compatibility */ 1906 __u32 padding; 1907 }; 1908 1909 #define IOCTL_KGSL_TIMELINE_CREATE \ 1910 _IOWR(KGSL_IOC_TYPE, 0x58, struct kgsl_timeline_create) 1911 1912 /** 1913 * struct kgsl_timeline_val - A container to store a timeline/sequence number 1914 * pair. 1915 * @seqno: Sequence number to signal/query 1916 * @timeline: The timeline identifier to signal/query 1917 * 1918 * A container to store a timeline/seqno pair used by the query and signal 1919 * ioctls. 1920 */ 1921 struct kgsl_timeline_val { 1922 __u64 seqno; 1923 __u32 timeline; 1924 /* private: padding for 64 bit compatibility */ 1925 __u32 padding; 1926 }; 1927 1928 #define KGSL_TIMELINE_WAIT_ALL 1 1929 #define KGSL_TIMELINE_WAIT_ANY 2 1930 1931 /** 1932 * struct kgsl_timeline_wait - Argument for IOCTL_KGSL_TIMELINE_WAIT 1933 * @tv_sec: Number of seconds to wait for the signal 1934 * @tv_nsec: Number of nanoseconds to wait for the signal 1935 * @timelines: Address of an array of &struct kgsl_timeline_val entries 1936 * @count: Number of entries in @timeline 1937 * @timelines_size: Size of each entry in @timelines 1938 * @flags: One of KGSL_TIMELINE_WAIT_ALL or KGSL_TIMELINE_WAIT_ANY 1939 * 1940 * Wait for the timelines listed in @timelines to be signaled. If @flags is 1941 * equal to KGSL_TIMELINE_WAIT_ALL then wait for all timelines or if 1942 * KGSL_TIMELINE_WAIT_ANY is specified then wait for any of the timelines to 1943 * signal. @tv_sec and @tv_nsec indicates the number of seconds and nanoseconds 1944 * that the process should be blocked waiting for the signal. 1945 */ 1946 struct kgsl_timeline_wait { 1947 __s64 tv_sec; 1948 __s64 tv_nsec; 1949 __u64 timelines; 1950 __u32 count; 1951 __u32 timelines_size; 1952 __u32 flags; 1953 /* private: padding for 64 bit compatibility */ 1954 __u32 padding; 1955 }; 1956 1957 #define IOCTL_KGSL_TIMELINE_WAIT \ 1958 _IOW(KGSL_IOC_TYPE, 0x59, struct kgsl_timeline_wait) 1959 1960 #define IOCTL_KGSL_TIMELINE_QUERY \ 1961 _IOWR(KGSL_IOC_TYPE, 0x5A, struct kgsl_timeline_val) 1962 1963 /** 1964 * struct kgsl_timeline_signal - argument for IOCTL_KGSL_TIMELINE_SIGNAL 1965 * @timelines: Address of an array of &struct kgsl_timeline_val entries 1966 * @count: Number of entries in @timelines 1967 * @timelines_size: Size of each entry in @timelines 1968 * 1969 * Signal an array of timelines of type @struct kgsl_timeline_val. 1970 */ 1971 struct kgsl_timeline_signal { 1972 __u64 timelines; 1973 __u32 count; 1974 __u32 timelines_size; 1975 }; 1976 1977 #define IOCTL_KGSL_TIMELINE_SIGNAL \ 1978 _IOW(KGSL_IOC_TYPE, 0x5B, struct kgsl_timeline_signal) 1979 1980 /** 1981 * struct kgsl_timeline_fence_get - argument for IOCTL_KGSL_TIMELINE_FENCE_GET 1982 * @seqno: Sequence number for the fence 1983 * @timeline: Timeline to create the fence on 1984 * @handle: Contains the fence fd for a successful operation [out] 1985 * 1986 * Create a sync file descriptor for the seqnum on the timeline and return it in 1987 * @handle. Can be polled and queried just like any other sync file descriptor 1988 */ 1989 struct kgsl_timeline_fence_get { 1990 __u64 seqno; 1991 __u32 timeline; 1992 int handle; 1993 }; 1994 1995 #define IOCTL_KGSL_TIMELINE_FENCE_GET \ 1996 _IOWR(KGSL_IOC_TYPE, 0x5C, struct kgsl_timeline_fence_get) 1997 /** 1998 * IOCTL_KGSL_TIMELINE_DESTROY takes a u32 identifier for the timeline to 1999 * destroy 2000 */ 2001 #define IOCTL_KGSL_TIMELINE_DESTROY _IOW(KGSL_IOC_TYPE, 0x5D, __u32) 2002 2003 /** 2004 * struct kgsl_gpu_aux_command_timeline - An aux command for timeline signals 2005 * @timelines: An array of &struct kgsl_timeline_val elements 2006 * @count: The number of entries in @timelines 2007 * @timelines_size: The size of each element in @timelines 2008 * 2009 * An aux command for timeline signals that can be pointed to by 2010 * &struct kgsl_gpu_aux_command_generic when the type is 2011 * KGSL_GPU_AUX_COMMAND_TIMELINE. 2012 */ 2013 struct kgsl_gpu_aux_command_timeline { 2014 __u64 timelines; 2015 __u32 count; 2016 __u32 timelines_size; 2017 }; 2018 2019 /* Macros for fault type used in kgsl_fault structure */ 2020 #define KGSL_FAULT_TYPE_NO_FAULT 0 2021 #define KGSL_FAULT_TYPE_PAGEFAULT 1 2022 #define KGSL_FAULT_TYPE_MAX 2 2023 2024 /* Macros to be used in kgsl_pagefault_report structure */ 2025 #define KGSL_PAGEFAULT_TYPE_NONE 0 2026 #define KGSL_PAGEFAULT_TYPE_READ (1 << 0) 2027 #define KGSL_PAGEFAULT_TYPE_WRITE (1 << 1) 2028 #define KGSL_PAGEFAULT_TYPE_TRANSLATION (1 << 2) 2029 #define KGSL_PAGEFAULT_TYPE_PERMISSION (1 << 3) 2030 #define KGSL_PAGEFAULT_TYPE_EXTERNAL (1 << 4) 2031 #define KGSL_PAGEFAULT_TYPE_TRANSACTION_STALLED (1 << 5) 2032 2033 /** 2034 * struct kgsl_pagefault_report - Descriptor for each page fault 2035 * @fault_addr: page fault address 2036 * @fault_type: type of page fault 2037 * 2038 * Contains information about supported GPU page fault. 2039 * Supported fault type: KGSL_PAGEFAULT_TYPE_* 2040 */ 2041 struct kgsl_pagefault_report { 2042 __u64 fault_addr; 2043 /* private: reserved for future use */ 2044 __u64 reserved[2]; 2045 __u32 fault_type; 2046 /* private: padding for 64 bit compatibility */ 2047 __u32 __pad; 2048 }; 2049 2050 /** 2051 * struct kgsl_fault - Descriptor for each GPU fault type 2052 * @fault: User memory pointer to list of specific fault type 2053 * @type: Type of gpu fault 2054 * @count: Number of entries in @fault 2055 * @size: Size of each entry in @fault in bytes 2056 * 2057 * Contains information about each GPU fault type. If user passes 0 for all the fields, KGSL 2058 * will return the @count and @type of fault. Based on this, user can allocate a buffer for 2059 * specific fault type, fill the @fault and specify the structure size of type specific fault 2060 * in @size. User can walk through @fault list to parse the fault type specific information. 2061 * 2062 * Supported type: KGSL_FAULT_TYPE_* 2063 */ 2064 struct kgsl_fault { 2065 __u64 fault; 2066 __u32 type; 2067 __u32 count; 2068 __u32 size; 2069 /* private: padding for 64 bit compatibility */ 2070 __u32 padding; 2071 }; 2072 2073 /** 2074 * struct kgsl_fault_report - Container for list of GPU faults 2075 * @faultlist: User memory pointer to list of fault descriptor &struct kgsl_fault 2076 * @faultnents: Number of entries in @faultlist. Each entry corresponds to a fault type i.e. 2077 * KGSL_FAULT_TYPE_* 2078 * @faultsize: Size of each entry in @faultlist in bytes 2079 * @context_id: ID of a KGSL context 2080 * 2081 * Returns a list of GPU faults for a context identified by @context_id. If the user specifies 2082 * @context_id only, then KGSL will set the @faultnents to the number of fault types it has 2083 * for that context. 2084 * 2085 * User is expected to allocate an array of @struct kgsl_fault with @faultnents number of entries 2086 * and fill the @faultlist field. On calling @IOCTL_KGSL_GET_FAULT_REPORT, KGSL will return the 2087 * type and count for each fault. Based on this, user needs to update the @kgsl_fault structure. 2088 * Then, it should call the @IOCTL_KGSL_GET_FAULT_REPORT again for kernel to fill the fault 2089 * information. 2090 */ 2091 struct kgsl_fault_report { 2092 __u64 faultlist; 2093 __u32 faultnents; 2094 __u32 faultsize; 2095 __u32 context_id; 2096 /* private: padding for 64 bit compatibility */ 2097 __u32 padding; 2098 }; 2099 2100 #define IOCTL_KGSL_GET_FAULT_REPORT \ 2101 _IOWR(KGSL_IOC_TYPE, 0x5E, struct kgsl_fault_report) 2102 2103 /** 2104 * struct kgsl_recurring_object - Argument for IOCTL_KGSL_RECURRING_COMMAND 2105 * @flags: Current flags for the object 2106 * @cmdlist: List of kgsl_command_objects for submission 2107 * @cmd_size: Size of kgsl_command_objects structure 2108 * @numcmds: Number of kgsl_command_objects in command list 2109 * @objlist: List of kgsl_command_objects for tracking 2110 * @obj_size: Size of kgsl_command_objects structure 2111 * @numobjs: Number of kgsl_command_objects in object list 2112 * @context_id: Context ID submitting the kgsl_recurring_command 2113 */ 2114 struct kgsl_recurring_command { 2115 __u64 flags; 2116 __u64 __user cmdlist; 2117 __u32 cmdsize; 2118 __u32 numcmds; 2119 __u64 __user objlist; 2120 __u32 objsize; 2121 __u32 numobjs; 2122 __u32 context_id; 2123 /* private: Padding for 64 bit compatibility */ 2124 __u32 padding; 2125 }; 2126 2127 #define IOCTL_KGSL_RECURRING_COMMAND \ 2128 _IOWR(KGSL_IOC_TYPE, 0x5F, struct kgsl_recurring_command) 2129 2130 #endif /* _UAPI_MSM_KGSL_H */ 2131