• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /*
3  * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4  */
5 
6 #ifndef _MSM_KGSL_H
7 #define _MSM_KGSL_H
8 
9 #include <linux/types.h>
10 #include <linux/ioctl.h>
11 
12 /*
13  * The KGSL version has proven not to be very useful in userspace if features
14  * are cherry picked into other trees out of order so it is frozen as of 3.14.
15  * It is left here for backwards compatabilty and as a reminder that
16  * software releases are never linear. Also, I like pie.
17  */
18 
19 #define KGSL_VERSION_MAJOR        3
20 #define KGSL_VERSION_MINOR        14
21 
22 /*
23  * We have traditionally mixed context and issueibcmds / command batch flags
24  * together into a big flag stew. This worked fine until we started adding a
25  * lot more command batch flags and we started running out of bits. Turns out
26  * we have a bit of room in the context type / priority mask that we could use
27  * for command batches, but that means we need to split out the flags into two
28  * coherent sets.
29  *
30  * If any future definitions are for both context and cmdbatch add both defines
31  * and link the cmdbatch to the context define as we do below. Otherwise feel
32  * free to add exclusive bits to either set.
33  */
34 
35 /* --- context flags --- */
36 #define KGSL_CONTEXT_SAVE_GMEM		0x00000001
37 #define KGSL_CONTEXT_NO_GMEM_ALLOC	0x00000002
38 /* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
39 #define KGSL_CONTEXT_SUBMIT_IB_LIST	0x00000004
40 #define KGSL_CONTEXT_CTX_SWITCH		0x00000008
41 #define KGSL_CONTEXT_PREAMBLE		0x00000010
42 #define KGSL_CONTEXT_TRASH_STATE	0x00000020
43 #define KGSL_CONTEXT_PER_CONTEXT_TS	0x00000040
44 #define KGSL_CONTEXT_USER_GENERATED_TS	0x00000080
45 /* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
46 #define KGSL_CONTEXT_END_OF_FRAME	0x00000100
47 #define KGSL_CONTEXT_NO_FAULT_TOLERANCE 0x00000200
48 /* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
49 #define KGSL_CONTEXT_SYNC               0x00000400
50 #define KGSL_CONTEXT_PWR_CONSTRAINT     0x00000800
51 #define KGSL_CONTEXT_PRIORITY_MASK      0x0000F000
52 #define KGSL_CONTEXT_PRIORITY_SHIFT     12
53 #define KGSL_CONTEXT_PRIORITY_UNDEF     0
54 
55 #define KGSL_CONTEXT_IFH_NOP            0x00010000
56 #define KGSL_CONTEXT_SECURE             0x00020000
57 #define KGSL_CONTEXT_NO_SNAPSHOT        0x00040000
58 #define KGSL_CONTEXT_SPARSE             0x00080000
59 
60 #define KGSL_CONTEXT_PREEMPT_STYLE_MASK       0x0E000000
61 #define KGSL_CONTEXT_PREEMPT_STYLE_SHIFT      25
62 #define KGSL_CONTEXT_PREEMPT_STYLE_DEFAULT    0x0
63 #define KGSL_CONTEXT_PREEMPT_STYLE_RINGBUFFER 0x1
64 #define KGSL_CONTEXT_PREEMPT_STYLE_FINEGRAIN  0x2
65 
66 #define KGSL_CONTEXT_TYPE_MASK          0x01F00000
67 #define KGSL_CONTEXT_TYPE_SHIFT         20
68 #define KGSL_CONTEXT_TYPE_ANY		0
69 #define KGSL_CONTEXT_TYPE_GL		1
70 #define KGSL_CONTEXT_TYPE_CL		2
71 #define KGSL_CONTEXT_TYPE_C2D		3
72 #define KGSL_CONTEXT_TYPE_RS		4
73 #define KGSL_CONTEXT_TYPE_VK		5
74 #define KGSL_CONTEXT_TYPE_UNKNOWN	0x1E
75 
76 #define KGSL_CONTEXT_INVALIDATE_ON_FAULT 0x10000000
77 
78 #define KGSL_CONTEXT_INVALID 0xffffffff
79 
80 /*
81  * --- command batch flags ---
82  * The bits that are linked to a KGSL_CONTEXT equivalent are either legacy
83  * definitions or bits that are valid for both contexts and cmdbatches.  To be
84  * safe the other 8 bits that are still available in the context field should be
85  * omitted here in case we need to share - the other bits are available for
86  * cmdbatch only flags as needed
87  */
88 #define KGSL_CMDBATCH_MEMLIST		0x00000001
89 #define KGSL_CMDBATCH_MARKER		0x00000002
90 #define KGSL_CMDBATCH_SUBMIT_IB_LIST	KGSL_CONTEXT_SUBMIT_IB_LIST /* 0x004 */
91 #define KGSL_CMDBATCH_CTX_SWITCH	KGSL_CONTEXT_CTX_SWITCH     /* 0x008 */
92 #define KGSL_CMDBATCH_PROFILING		0x00000010
93 /*
94  * KGSL_CMDBATCH_PROFILING must also be set for KGSL_CMDBATCH_PROFILING_KTIME
95  * to take effect, as the latter only affects the time data returned.
96  */
97 #define KGSL_CMDBATCH_PROFILING_KTIME	0x00000020
98 #define KGSL_CMDBATCH_END_OF_FRAME	KGSL_CONTEXT_END_OF_FRAME   /* 0x100 */
99 #define KGSL_CMDBATCH_SYNC		KGSL_CONTEXT_SYNC           /* 0x400 */
100 #define KGSL_CMDBATCH_PWR_CONSTRAINT	KGSL_CONTEXT_PWR_CONSTRAINT /* 0x800 */
101 #define KGSL_CMDBATCH_SPARSE	    0x1000 /* 0x1000 */
102 
103 /*
104  * Reserve bits [16:19] and bits [28:31] for possible bits shared between
105  * contexts and command batches.  Update this comment as new flags are added.
106  */
107 
108 /*
109  * gpu_command_object flags - these flags communicate the type of command or
110  * memory object being submitted for a GPU command
111  */
112 
113 /* Flags for GPU command objects */
114 #define KGSL_CMDLIST_IB                  0x00000001U
115 #define KGSL_CMDLIST_CTXTSWITCH_PREAMBLE 0x00000002U
116 #define KGSL_CMDLIST_IB_PREAMBLE         0x00000004U
117 
118 /* Flags for GPU command memory objects */
119 #define KGSL_OBJLIST_MEMOBJ  0x00000008U
120 #define KGSL_OBJLIST_PROFILE 0x00000010U
121 
122 /* Flags for GPU command sync points */
123 #define KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP 0
124 #define KGSL_CMD_SYNCPOINT_TYPE_FENCE 1
125 
126 /* --- Memory allocation flags --- */
127 
128 /* General allocation hints */
129 #define KGSL_MEMFLAGS_SECURE      0x00000008ULL
130 #define KGSL_MEMFLAGS_GPUREADONLY 0x01000000U
131 #define KGSL_MEMFLAGS_GPUWRITEONLY 0x02000000U
132 #define KGSL_MEMFLAGS_FORCE_32BIT 0x100000000ULL
133 
134 /* Flag for binding all the virt range to single phys data */
135 #define KGSL_SPARSE_BIND_MULTIPLE_TO_PHYS 0x400000000ULL
136 #define KGSL_SPARSE_BIND 0x1ULL
137 #define KGSL_SPARSE_UNBIND 0x2ULL
138 
139 /* Memory caching hints */
140 #define KGSL_CACHEMODE_MASK       0x0C000000U
141 #define KGSL_CACHEMODE_SHIFT 26
142 
143 #define KGSL_CACHEMODE_WRITECOMBINE 0
144 #define KGSL_CACHEMODE_UNCACHED 1
145 #define KGSL_CACHEMODE_WRITETHROUGH 2
146 #define KGSL_CACHEMODE_WRITEBACK 3
147 
148 #define KGSL_MEMFLAGS_USE_CPU_MAP 0x10000000ULL
149 #define KGSL_MEMFLAGS_SPARSE_PHYS 0x20000000ULL
150 #define KGSL_MEMFLAGS_SPARSE_VIRT 0x40000000ULL
151 #define KGSL_MEMFLAGS_IOCOHERENT  0x80000000ULL
152 
153 /* Memory types for which allocations are made */
154 #define KGSL_MEMTYPE_MASK		0x0000FF00
155 #define KGSL_MEMTYPE_SHIFT		8
156 
157 #define KGSL_MEMTYPE_OBJECTANY			0
158 #define KGSL_MEMTYPE_FRAMEBUFFER		1
159 #define KGSL_MEMTYPE_RENDERBUFFER		2
160 #define KGSL_MEMTYPE_ARRAYBUFFER		3
161 #define KGSL_MEMTYPE_ELEMENTARRAYBUFFER		4
162 #define KGSL_MEMTYPE_VERTEXARRAYBUFFER		5
163 #define KGSL_MEMTYPE_TEXTURE			6
164 #define KGSL_MEMTYPE_SURFACE			7
165 #define KGSL_MEMTYPE_EGL_SURFACE		8
166 #define KGSL_MEMTYPE_GL				9
167 #define KGSL_MEMTYPE_CL				10
168 #define KGSL_MEMTYPE_CL_BUFFER_MAP		11
169 #define KGSL_MEMTYPE_CL_BUFFER_NOMAP		12
170 #define KGSL_MEMTYPE_CL_IMAGE_MAP		13
171 #define KGSL_MEMTYPE_CL_IMAGE_NOMAP		14
172 #define KGSL_MEMTYPE_CL_KERNEL_STACK		15
173 #define KGSL_MEMTYPE_COMMAND			16
174 #define KGSL_MEMTYPE_2D				17
175 #define KGSL_MEMTYPE_EGL_IMAGE			18
176 #define KGSL_MEMTYPE_EGL_SHADOW			19
177 #define KGSL_MEMTYPE_MULTISAMPLE		20
178 #define KGSL_MEMTYPE_KERNEL			255
179 
180 /*
181  * Alignment hint, passed as the power of 2 exponent.
182  * i.e 4k (2^12) would be 12, 64k (2^16)would be 16.
183  */
184 #define KGSL_MEMALIGN_MASK		0x00FF0000
185 #define KGSL_MEMALIGN_SHIFT		16
186 
187 enum kgsl_user_mem_type {
188 	KGSL_USER_MEM_TYPE_PMEM		= 0x00000000,
189 	KGSL_USER_MEM_TYPE_ASHMEM	= 0x00000001,
190 	KGSL_USER_MEM_TYPE_ADDR		= 0x00000002,
191 	KGSL_USER_MEM_TYPE_ION		= 0x00000003,
192 	/*
193 	 * ION type is retained for backwards compatibility but Ion buffers are
194 	 * dma-bufs so try to use that naming if we can
195 	 */
196 	KGSL_USER_MEM_TYPE_DMABUF       = 0x00000003,
197 	KGSL_USER_MEM_TYPE_MAX		= 0x00000007,
198 };
199 #define KGSL_MEMFLAGS_USERMEM_MASK 0x000000e0
200 #define KGSL_MEMFLAGS_USERMEM_SHIFT 5
201 
202 /*
203  * Unfortunately, enum kgsl_user_mem_type starts at 0 which does not
204  * leave a good value for allocated memory. In the flags we use
205  * 0 to indicate allocated memory and thus need to add 1 to the enum
206  * values.
207  */
208 #define KGSL_USERMEM_FLAG(x) (((x) + 1) << KGSL_MEMFLAGS_USERMEM_SHIFT)
209 
210 #define KGSL_MEMFLAGS_NOT_USERMEM 0
211 #define KGSL_MEMFLAGS_USERMEM_PMEM KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_PMEM)
212 #define KGSL_MEMFLAGS_USERMEM_ASHMEM \
213 		KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ASHMEM)
214 #define KGSL_MEMFLAGS_USERMEM_ADDR KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ADDR)
215 #define KGSL_MEMFLAGS_USERMEM_ION KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ION)
216 
217 /* --- generic KGSL flag values --- */
218 
219 #define KGSL_FLAGS_NORMALMODE  0x00000000
220 #define KGSL_FLAGS_SAFEMODE    0x00000001
221 #define KGSL_FLAGS_INITIALIZED0 0x00000002
222 #define KGSL_FLAGS_INITIALIZED 0x00000004
223 #define KGSL_FLAGS_STARTED     0x00000008
224 #define KGSL_FLAGS_ACTIVE      0x00000010
225 #define KGSL_FLAGS_RESERVED0   0x00000020
226 #define KGSL_FLAGS_RESERVED1   0x00000040
227 #define KGSL_FLAGS_RESERVED2   0x00000080
228 #define KGSL_FLAGS_SOFT_RESET  0x00000100
229 #define KGSL_FLAGS_PER_CONTEXT_TIMESTAMPS 0x00000200
230 
231 /* Server Side Sync Timeout in milliseconds */
232 #define KGSL_SYNCOBJ_SERVER_TIMEOUT 2000
233 
234 /* UBWC Modes */
235 #define KGSL_UBWC_NONE	0
236 #define KGSL_UBWC_1_0	1
237 #define KGSL_UBWC_2_0	2
238 #define KGSL_UBWC_3_0	3
239 #define KGSL_UBWC_4_0	4
240 
241 /*
242  * Reset status values for context
243  */
244 enum kgsl_ctx_reset_stat {
245 	KGSL_CTX_STAT_NO_ERROR				= 0x00000000,
246 	KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT		= 0x00000001,
247 	KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT	= 0x00000002,
248 	KGSL_CTX_STAT_UNKNOWN_CONTEXT_RESET_EXT		= 0x00000003
249 };
250 
251 #define KGSL_CONVERT_TO_MBPS(val) \
252 	(val*1000*1000U)
253 
254 struct kgsl_devinfo {
255 
256 	unsigned int device_id;
257 	/*
258 	 * chip revision id
259 	 * coreid:8 majorrev:8 minorrev:8 patch:8
260 	 */
261 	unsigned int chip_id;
262 	unsigned int mmu_enabled;
263 	unsigned long gmem_gpubaseaddr;
264 	/*
265 	 * This field contains the adreno revision
266 	 * number 200, 205, 220, etc...
267 	 */
268 	unsigned int gpu_id;
269 	size_t gmem_sizebytes;
270 };
271 
272 /*
273  * struct kgsl_devmemstore - this structure defines the region of memory
274  * that can be mmap()ed from this driver. The timestamp fields are __volatile__
275  * because they are written by the GPU
276  * @soptimestamp: Start of pipeline timestamp written by GPU before the
277  * commands in concern are processed
278  * @sbz: Unused, kept for 8 byte alignment
279  * @eoptimestamp: End of pipeline timestamp written by GPU after the
280  * commands in concern are processed
281  * @sbz2: Unused, kept for 8 byte alignment
282  * @preempted: Indicates if the context was preempted
283  * @sbz3: Unused, kept for 8 byte alignment
284  * @ref_wait_ts: Timestamp on which to generate interrupt, unused now.
285  * @sbz4: Unused, kept for 8 byte alignment
286  * @current_context: The current context the GPU is working on
287  * @sbz5: Unused, kept for 8 byte alignment
288  */
289 struct kgsl_devmemstore {
290 	__volatile__ unsigned int soptimestamp;
291 	unsigned int sbz;
292 	__volatile__ unsigned int eoptimestamp;
293 	unsigned int sbz2;
294 	__volatile__ unsigned int preempted;
295 	unsigned int sbz3;
296 	__volatile__ unsigned int ref_wait_ts;
297 	unsigned int sbz4;
298 	unsigned int current_context;
299 	unsigned int sbz5;
300 };
301 
302 #define KGSL_MEMSTORE_OFFSET(ctxt_id, field) \
303 	((ctxt_id)*sizeof(struct kgsl_devmemstore) + \
304 	 offsetof(struct kgsl_devmemstore, field))
305 
306 /* timestamp id*/
307 enum kgsl_timestamp_type {
308 	KGSL_TIMESTAMP_CONSUMED = 0x00000001, /* start-of-pipeline timestamp */
309 	KGSL_TIMESTAMP_RETIRED  = 0x00000002, /* end-of-pipeline timestamp*/
310 	KGSL_TIMESTAMP_QUEUED   = 0x00000003,
311 };
312 
313 /* property types - used with kgsl_device_getproperty */
314 #define KGSL_PROP_DEVICE_INFO		0x1
315 #define KGSL_PROP_DEVICE_SHADOW		0x2
316 #define KGSL_PROP_DEVICE_POWER		0x3
317 #define KGSL_PROP_SHMEM			0x4
318 #define KGSL_PROP_SHMEM_APERTURES	0x5
319 #define KGSL_PROP_MMU_ENABLE		0x6
320 #define KGSL_PROP_INTERRUPT_WAITS	0x7
321 #define KGSL_PROP_VERSION		0x8
322 #define KGSL_PROP_GPU_RESET_STAT	0x9
323 #define KGSL_PROP_PWRCTRL		0xE
324 #define KGSL_PROP_PWR_CONSTRAINT	0x12
325 #define KGSL_PROP_UCHE_GMEM_VADDR	0x13
326 #define KGSL_PROP_SP_GENERIC_MEM	0x14
327 #define KGSL_PROP_UCODE_VERSION		0x15
328 #define KGSL_PROP_GPMU_VERSION		0x16
329 #define KGSL_PROP_HIGHEST_BANK_BIT	0x17
330 #define KGSL_PROP_DEVICE_BITNESS	0x18
331 #define KGSL_PROP_DEVICE_QDSS_STM	0x19
332 #define KGSL_PROP_MIN_ACCESS_LENGTH	0x1A
333 #define KGSL_PROP_UBWC_MODE		0x1B
334 #define KGSL_PROP_DEVICE_QTIMER		0x20
335 #define KGSL_PROP_L3_PWR_CONSTRAINT     0x22
336 #define KGSL_PROP_SECURE_BUFFER_ALIGNMENT 0x23
337 #define KGSL_PROP_SECURE_CTXT_SUPPORT 0x24
338 #define KGSL_PROP_SPEED_BIN		0x25
339 #define KGSL_PROP_GAMING_BIN		0x26
340 #define KGSL_PROP_QUERY_CAPABILITIES	0x27
341 #define KGSL_PROP_CONTEXT_PROPERTY	0x28
342 
343 /*
344  * kgsl_capabilties_properties returns a list of supported properties.
345  * If the user passes 0 for 'count' the kernel will set it to the number of
346  * supported properties. The list is expected to be 'count * sizeof(uint32_t)'
347  * bytes long. The kernel will return the actual number of entries copied into
348  * list via 'count'.
349  */
350 struct kgsl_capabilities_properties {
351 	__u64 list;
352 	__u32 count;
353 };
354 
355 /*
356  * KGSL_QUERY_CAPS_PROPERTIES returns a list of the valid properties in the
357  * kernel.  The subtype data should be struct kgsl_capabilities_properties
358  */
359 #define KGSL_QUERY_CAPS_PROPERTIES 1
360 
361 /*
362  * kgsl_capabilities allows the user to query kernel capabiilties. The 'data'
363  * type should be set appropriately for the querytype (see above). Pass 0 to
364  * 'size' and the kernel will set it to the expected size of 'data' that is
365  * appropriate for querytype (in bytes).
366  */
367 struct kgsl_capabilities {
368 	__u64 data;
369 	__u64 size;
370 	__u32 querytype;
371 };
372 
373 struct kgsl_shadowprop {
374 	unsigned long gpuaddr;
375 	size_t size;
376 	unsigned int flags; /* contains KGSL_FLAGS_ values */
377 };
378 
379 struct kgsl_qdss_stm_prop {
380 	uint64_t gpuaddr;
381 	uint64_t size;
382 };
383 
384 struct kgsl_qtimer_prop {
385 	uint64_t gpuaddr;
386 	uint64_t size;
387 };
388 
389 struct kgsl_version {
390 	unsigned int drv_major;
391 	unsigned int drv_minor;
392 	unsigned int dev_major;
393 	unsigned int dev_minor;
394 };
395 
396 struct kgsl_sp_generic_mem {
397 	uint64_t local;
398 	uint64_t pvt;
399 };
400 
401 struct kgsl_ucode_version {
402 	unsigned int pfp;
403 	unsigned int pm4;
404 };
405 
406 struct kgsl_gpmu_version {
407 	unsigned int major;
408 	unsigned int minor;
409 	unsigned int features;
410 };
411 
412 struct kgsl_context_property {
413 	__u64 data;
414 	__u32 size;
415 	__u32 type;
416 	__u32 contextid;
417 };
418 
419 struct kgsl_context_property_fault {
420 	__s32 faults;
421 	__u32 timestamp;
422 };
423 
424 /* Context property sub types */
425 #define KGSL_CONTEXT_PROP_FAULTS 1
426 
427 /* Performance counter groups */
428 
429 #define KGSL_PERFCOUNTER_GROUP_CP 0x0
430 #define KGSL_PERFCOUNTER_GROUP_RBBM 0x1
431 #define KGSL_PERFCOUNTER_GROUP_PC 0x2
432 #define KGSL_PERFCOUNTER_GROUP_VFD 0x3
433 #define KGSL_PERFCOUNTER_GROUP_HLSQ 0x4
434 #define KGSL_PERFCOUNTER_GROUP_VPC 0x5
435 #define KGSL_PERFCOUNTER_GROUP_TSE 0x6
436 #define KGSL_PERFCOUNTER_GROUP_RAS 0x7
437 #define KGSL_PERFCOUNTER_GROUP_UCHE 0x8
438 #define KGSL_PERFCOUNTER_GROUP_TP 0x9
439 #define KGSL_PERFCOUNTER_GROUP_SP 0xA
440 #define KGSL_PERFCOUNTER_GROUP_RB 0xB
441 #define KGSL_PERFCOUNTER_GROUP_PWR 0xC
442 #define KGSL_PERFCOUNTER_GROUP_VBIF 0xD
443 #define KGSL_PERFCOUNTER_GROUP_VBIF_PWR 0xE
444 #define KGSL_PERFCOUNTER_GROUP_MH 0xF
445 #define KGSL_PERFCOUNTER_GROUP_PA_SU 0x10
446 #define KGSL_PERFCOUNTER_GROUP_SQ 0x11
447 #define KGSL_PERFCOUNTER_GROUP_SX 0x12
448 #define KGSL_PERFCOUNTER_GROUP_TCF 0x13
449 #define KGSL_PERFCOUNTER_GROUP_TCM 0x14
450 #define KGSL_PERFCOUNTER_GROUP_TCR 0x15
451 #define KGSL_PERFCOUNTER_GROUP_L2 0x16
452 #define KGSL_PERFCOUNTER_GROUP_VSC 0x17
453 #define KGSL_PERFCOUNTER_GROUP_CCU 0x18
454 #define KGSL_PERFCOUNTER_GROUP_LRZ 0x19
455 #define KGSL_PERFCOUNTER_GROUP_CMP 0x1A
456 #define KGSL_PERFCOUNTER_GROUP_ALWAYSON 0x1B
457 #define KGSL_PERFCOUNTER_GROUP_SP_PWR 0x1C
458 #define KGSL_PERFCOUNTER_GROUP_TP_PWR 0x1D
459 #define KGSL_PERFCOUNTER_GROUP_RB_PWR 0x1E
460 #define KGSL_PERFCOUNTER_GROUP_CCU_PWR 0x1F
461 #define KGSL_PERFCOUNTER_GROUP_UCHE_PWR 0x20
462 #define KGSL_PERFCOUNTER_GROUP_CP_PWR 0x21
463 #define KGSL_PERFCOUNTER_GROUP_GPMU_PWR 0x22
464 #define KGSL_PERFCOUNTER_GROUP_ALWAYSON_PWR 0x23
465 #define KGSL_PERFCOUNTER_GROUP_GLC 0x24
466 #define KGSL_PERFCOUNTER_GROUP_FCHE 0x25
467 #define KGSL_PERFCOUNTER_GROUP_MHUB 0x26
468 #define KGSL_PERFCOUNTER_GROUP_MAX 0x27
469 
470 #define KGSL_PERFCOUNTER_NOT_USED 0xFFFFFFFF
471 #define KGSL_PERFCOUNTER_BROKEN 0xFFFFFFFE
472 
473 /* structure holds list of ibs */
474 struct kgsl_ibdesc {
475 	unsigned long gpuaddr;
476 	unsigned long __pad;
477 	size_t sizedwords;
478 	unsigned int ctrl;
479 };
480 
481 /**
482  * struct kgsl_cmdbatch_profiling_buffer
483  * @wall_clock_s: Ringbuffer submission time (seconds).
484  *                If KGSL_CMDBATCH_PROFILING_KTIME is set, time is provided
485  *                in kernel clocks, otherwise wall clock time is used.
486  * @wall_clock_ns: Ringbuffer submission time (nanoseconds).
487  *                 If KGSL_CMDBATCH_PROFILING_KTIME is set time is provided
488  *                 in kernel clocks, otherwise wall clock time is used.
489  * @gpu_ticks_queued: GPU ticks at ringbuffer submission
490  * @gpu_ticks_submitted: GPU ticks when starting cmdbatch execution
491  * @gpu_ticks_retired: GPU ticks when finishing cmdbatch execution
492  *
493  * This structure defines the profiling buffer used to measure cmdbatch
494  * execution time
495  */
496 struct kgsl_cmdbatch_profiling_buffer {
497 	uint64_t wall_clock_s;
498 	uint64_t wall_clock_ns;
499 	uint64_t gpu_ticks_queued;
500 	uint64_t gpu_ticks_submitted;
501 	uint64_t gpu_ticks_retired;
502 };
503 
504 /* ioctls */
505 #define KGSL_IOC_TYPE 0x09
506 
507 /*
508  * get misc info about the GPU
509  * type should be a value from enum kgsl_property_type
510  * value points to a structure that varies based on type
511  * sizebytes is sizeof() that structure
512  * for KGSL_PROP_DEVICE_INFO, use struct kgsl_devinfo
513  * this structure contaings hardware versioning info.
514  * for KGSL_PROP_DEVICE_SHADOW, use struct kgsl_shadowprop
515  * this is used to find mmap() offset and sizes for mapping
516  * struct kgsl_memstore into userspace.
517  */
518 struct kgsl_device_getproperty {
519 	unsigned int type;
520 	void *value;
521 	size_t sizebytes;
522 };
523 
524 #define IOCTL_KGSL_DEVICE_GETPROPERTY \
525 	_IOWR(KGSL_IOC_TYPE, 0x2, struct kgsl_device_getproperty)
526 
527 /* IOCTL_KGSL_DEVICE_READ (0x3) - removed 03/2012
528  */
529 
530 /* block until the GPU has executed past a given timestamp
531  * timeout is in milliseconds.
532  */
533 struct kgsl_device_waittimestamp {
534 	unsigned int timestamp;
535 	unsigned int timeout;
536 };
537 
538 #define IOCTL_KGSL_DEVICE_WAITTIMESTAMP \
539 	_IOW(KGSL_IOC_TYPE, 0x6, struct kgsl_device_waittimestamp)
540 
541 struct kgsl_device_waittimestamp_ctxtid {
542 	unsigned int context_id;
543 	unsigned int timestamp;
544 	unsigned int timeout;
545 };
546 
547 #define IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID \
548 	_IOW(KGSL_IOC_TYPE, 0x7, struct kgsl_device_waittimestamp_ctxtid)
549 
550 /* DEPRECATED: issue indirect commands to the GPU.
551  * drawctxt_id must have been created with IOCTL_KGSL_DRAWCTXT_CREATE
552  * ibaddr and sizedwords must specify a subset of a buffer created
553  * with IOCTL_KGSL_SHAREDMEM_FROM_PMEM
554  * flags may be a mask of KGSL_CONTEXT_ values
555  * timestamp is a returned counter value which can be passed to
556  * other ioctls to determine when the commands have been executed by
557  * the GPU.
558  *
559  * This function is deprecated - consider using IOCTL_KGSL_SUBMIT_COMMANDS
560  * instead
561  */
562 struct kgsl_ringbuffer_issueibcmds {
563 	unsigned int drawctxt_id;
564 	unsigned long ibdesc_addr;
565 	unsigned int numibs;
566 	unsigned int timestamp; /*output param */
567 	unsigned int flags;
568 };
569 
570 #define IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS \
571 	_IOWR(KGSL_IOC_TYPE, 0x10, struct kgsl_ringbuffer_issueibcmds)
572 
573 /* read the most recently executed timestamp value
574  * type should be a value from enum kgsl_timestamp_type
575  */
576 struct kgsl_cmdstream_readtimestamp {
577 	unsigned int type;
578 	unsigned int timestamp; /*output param */
579 };
580 
581 #define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_OLD \
582 	_IOR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
583 
584 #define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP \
585 	_IOWR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
586 
587 /* free memory when the GPU reaches a given timestamp.
588  * gpuaddr specify a memory region created by a
589  * IOCTL_KGSL_SHAREDMEM_FROM_PMEM call
590  * type should be a value from enum kgsl_timestamp_type
591  */
592 struct kgsl_cmdstream_freememontimestamp {
593 	unsigned long gpuaddr;
594 	unsigned int type;
595 	unsigned int timestamp;
596 };
597 
598 #define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP \
599 	_IOW(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
600 
601 /*
602  * Previous versions of this header had incorrectly defined
603  * IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP as a read-only ioctl instead
604  * of a write only ioctl.  To ensure binary compatibility, the following
605  * #define will be used to intercept the incorrect ioctl
606  */
607 
608 #define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_OLD \
609 	_IOR(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
610 
611 /* create a draw context, which is used to preserve GPU state.
612  * The flags field may contain a mask KGSL_CONTEXT_*  values
613  */
614 struct kgsl_drawctxt_create {
615 	unsigned int flags;
616 	unsigned int drawctxt_id; /*output param */
617 };
618 
619 #define IOCTL_KGSL_DRAWCTXT_CREATE \
620 	_IOWR(KGSL_IOC_TYPE, 0x13, struct kgsl_drawctxt_create)
621 
622 /* destroy a draw context */
623 struct kgsl_drawctxt_destroy {
624 	unsigned int drawctxt_id;
625 };
626 
627 #define IOCTL_KGSL_DRAWCTXT_DESTROY \
628 	_IOW(KGSL_IOC_TYPE, 0x14, struct kgsl_drawctxt_destroy)
629 
630 /*
631  * add a block of pmem, fb, ashmem or user allocated address
632  * into the GPU address space
633  */
634 struct kgsl_map_user_mem {
635 	int fd;
636 	unsigned long gpuaddr;   /*output param */
637 	size_t len;
638 	size_t offset;
639 	unsigned long hostptr;   /*input param */
640 	enum kgsl_user_mem_type memtype;
641 	unsigned int flags;
642 };
643 
644 #define IOCTL_KGSL_MAP_USER_MEM \
645 	_IOWR(KGSL_IOC_TYPE, 0x15, struct kgsl_map_user_mem)
646 
647 struct kgsl_cmdstream_readtimestamp_ctxtid {
648 	unsigned int context_id;
649 	unsigned int type;
650 	unsigned int timestamp; /*output param */
651 };
652 
653 #define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_CTXTID \
654 	_IOWR(KGSL_IOC_TYPE, 0x16, struct kgsl_cmdstream_readtimestamp_ctxtid)
655 
656 struct kgsl_cmdstream_freememontimestamp_ctxtid {
657 	unsigned int context_id;
658 	unsigned long gpuaddr;
659 	unsigned int type;
660 	unsigned int timestamp;
661 };
662 
663 #define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_CTXTID \
664 	_IOW(KGSL_IOC_TYPE, 0x17, \
665 	struct kgsl_cmdstream_freememontimestamp_ctxtid)
666 
667 /* add a block of pmem or fb into the GPU address space */
668 struct kgsl_sharedmem_from_pmem {
669 	int pmem_fd;
670 	unsigned long gpuaddr;  /*output param */
671 	unsigned int len;
672 	unsigned int offset;
673 };
674 
675 #define IOCTL_KGSL_SHAREDMEM_FROM_PMEM \
676 	_IOWR(KGSL_IOC_TYPE, 0x20, struct kgsl_sharedmem_from_pmem)
677 
678 /* remove memory from the GPU's address space */
679 struct kgsl_sharedmem_free {
680 	unsigned long gpuaddr;
681 };
682 
683 #define IOCTL_KGSL_SHAREDMEM_FREE \
684 	_IOW(KGSL_IOC_TYPE, 0x21, struct kgsl_sharedmem_free)
685 
686 struct kgsl_cff_user_event {
687 	unsigned char cff_opcode;
688 	unsigned int op1;
689 	unsigned int op2;
690 	unsigned int op3;
691 	unsigned int op4;
692 	unsigned int op5;
693 	unsigned int __pad[2];
694 };
695 
696 #define IOCTL_KGSL_CFF_USER_EVENT \
697 	_IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_cff_user_event)
698 
699 struct kgsl_gmem_desc {
700 	unsigned int x;
701 	unsigned int y;
702 	unsigned int width;
703 	unsigned int height;
704 	unsigned int pitch;
705 };
706 
707 struct kgsl_buffer_desc {
708 	void		*hostptr;
709 	unsigned long	gpuaddr;
710 	int		size;
711 	unsigned int	format;
712 	unsigned int	pitch;
713 	unsigned int	enabled;
714 };
715 
716 struct kgsl_bind_gmem_shadow {
717 	unsigned int drawctxt_id;
718 	struct kgsl_gmem_desc gmem_desc;
719 	unsigned int shadow_x;
720 	unsigned int shadow_y;
721 	struct kgsl_buffer_desc shadow_buffer;
722 	unsigned int buffer_id;
723 };
724 
725 #define IOCTL_KGSL_DRAWCTXT_BIND_GMEM_SHADOW \
726 	_IOW(KGSL_IOC_TYPE, 0x22, struct kgsl_bind_gmem_shadow)
727 
728 /* add a block of memory into the GPU address space */
729 
730 /*
731  * IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC deprecated 09/2012
732  * use IOCTL_KGSL_GPUMEM_ALLOC instead
733  */
734 
735 struct kgsl_sharedmem_from_vmalloc {
736 	unsigned long gpuaddr;	/*output param */
737 	unsigned int hostptr;
738 	unsigned int flags;
739 };
740 
741 #define IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC \
742 	_IOWR(KGSL_IOC_TYPE, 0x23, struct kgsl_sharedmem_from_vmalloc)
743 
744 /*
745  * This is being deprecated in favor of IOCTL_KGSL_GPUMEM_CACHE_SYNC which
746  * supports both directions (flush and invalidate). This code will still
747  * work, but by definition it will do a flush of the cache which might not be
748  * what you want to have happen on a buffer following a GPU operation.  It is
749  * safer to go with IOCTL_KGSL_GPUMEM_CACHE_SYNC
750  */
751 
752 #define IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE \
753 	_IOW(KGSL_IOC_TYPE, 0x24, struct kgsl_sharedmem_free)
754 
755 struct kgsl_drawctxt_set_bin_base_offset {
756 	unsigned int drawctxt_id;
757 	unsigned int offset;
758 };
759 
760 #define IOCTL_KGSL_DRAWCTXT_SET_BIN_BASE_OFFSET \
761 	_IOW(KGSL_IOC_TYPE, 0x25, struct kgsl_drawctxt_set_bin_base_offset)
762 
763 enum kgsl_cmdwindow_type {
764 	KGSL_CMDWINDOW_MIN     = 0x00000000,
765 	KGSL_CMDWINDOW_2D      = 0x00000000,
766 	KGSL_CMDWINDOW_3D      = 0x00000001, /* legacy */
767 	KGSL_CMDWINDOW_MMU     = 0x00000002,
768 	KGSL_CMDWINDOW_ARBITER = 0x000000FF,
769 	KGSL_CMDWINDOW_MAX     = 0x000000FF,
770 };
771 
772 /* write to the command window */
773 struct kgsl_cmdwindow_write {
774 	enum kgsl_cmdwindow_type target;
775 	unsigned int addr;
776 	unsigned int data;
777 };
778 
779 #define IOCTL_KGSL_CMDWINDOW_WRITE \
780 	_IOW(KGSL_IOC_TYPE, 0x2e, struct kgsl_cmdwindow_write)
781 
782 struct kgsl_gpumem_alloc {
783 	unsigned long gpuaddr; /* output param */
784 	size_t size;
785 	unsigned int flags;
786 };
787 
788 #define IOCTL_KGSL_GPUMEM_ALLOC \
789 	_IOWR(KGSL_IOC_TYPE, 0x2f, struct kgsl_gpumem_alloc)
790 
791 struct kgsl_cff_syncmem {
792 	unsigned long gpuaddr;
793 	size_t len;
794 	unsigned int __pad[2]; /* For future binary compatibility */
795 };
796 
797 #define IOCTL_KGSL_CFF_SYNCMEM \
798 	_IOW(KGSL_IOC_TYPE, 0x30, struct kgsl_cff_syncmem)
799 
800 /*
801  * A timestamp event allows the user space to register an action following an
802  * expired timestamp. Note IOCTL_KGSL_TIMESTAMP_EVENT has been redefined to
803  * _IOWR to support fences which need to return a fd for the priv parameter.
804  */
805 
806 struct kgsl_timestamp_event {
807 	int type;                /* Type of event (see list below) */
808 	unsigned int timestamp;  /* Timestamp to trigger event on */
809 	unsigned int context_id; /* Context for the timestamp */
810 	void *priv;	 /* Pointer to the event specific blob */
811 	size_t len;              /* Size of the event specific blob */
812 };
813 
814 #define IOCTL_KGSL_TIMESTAMP_EVENT_OLD \
815 	_IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_timestamp_event)
816 
817 /* A genlock timestamp event releases an existing lock on timestamp expire */
818 
819 #define KGSL_TIMESTAMP_EVENT_GENLOCK 1
820 
821 struct kgsl_timestamp_event_genlock {
822 	int handle; /* Handle of the genlock lock to release */
823 };
824 
825 /* A fence timestamp event releases an existing lock on timestamp expire */
826 
827 #define KGSL_TIMESTAMP_EVENT_FENCE 2
828 
829 struct kgsl_timestamp_event_fence {
830 	int fence_fd; /* Fence to signal */
831 };
832 
833 /*
834  * Set a property within the kernel.  Uses the same structure as
835  * IOCTL_KGSL_GETPROPERTY
836  */
837 
838 #define IOCTL_KGSL_SETPROPERTY \
839 	_IOW(KGSL_IOC_TYPE, 0x32, struct kgsl_device_getproperty)
840 
841 #define IOCTL_KGSL_TIMESTAMP_EVENT \
842 	_IOWR(KGSL_IOC_TYPE, 0x33, struct kgsl_timestamp_event)
843 
844 /**
845  * struct kgsl_gpumem_alloc_id - argument to IOCTL_KGSL_GPUMEM_ALLOC_ID
846  * @id: returned id value for this allocation.
847  * @flags: mask of KGSL_MEM* values requested and actual flags on return.
848  * @size: requested size of the allocation and actual size on return.
849  * @mmapsize: returned size to pass to mmap() which may be larger than 'size'
850  * @gpuaddr: returned GPU address for the allocation
851  *
852  * Allocate memory for access by the GPU. The flags and size fields are echoed
853  * back by the kernel, so that the caller can know if the request was
854  * adjusted.
855  *
856  * Supported flags:
857  * KGSL_MEMFLAGS_GPUREADONLY: the GPU will be unable to write to the buffer
858  * KGSL_MEMTYPE*: usage hint for debugging aid
859  * KGSL_MEMALIGN*: alignment hint, may be ignored or adjusted by the kernel.
860  * KGSL_MEMFLAGS_USE_CPU_MAP: If set on call and return, the returned GPU
861  * address will be 0. Calling mmap() will set the GPU address.
862  */
863 struct kgsl_gpumem_alloc_id {
864 	unsigned int id;
865 	unsigned int flags;
866 	size_t size;
867 	size_t mmapsize;
868 	unsigned long gpuaddr;
869 /* private: reserved for future use*/
870 	unsigned long __pad[2];
871 };
872 
873 #define IOCTL_KGSL_GPUMEM_ALLOC_ID \
874 	_IOWR(KGSL_IOC_TYPE, 0x34, struct kgsl_gpumem_alloc_id)
875 
876 /**
877  * struct kgsl_gpumem_free_id - argument to IOCTL_KGSL_GPUMEM_FREE_ID
878  * @id: GPU allocation id to free
879  *
880  * Free an allocation by id, in case a GPU address has not been assigned or
881  * is unknown. Freeing an allocation by id with this ioctl or by GPU address
882  * with IOCTL_KGSL_SHAREDMEM_FREE are equivalent.
883  */
884 struct kgsl_gpumem_free_id {
885 	unsigned int id;
886 /* private: reserved for future use*/
887 	unsigned int __pad;
888 };
889 
890 #define IOCTL_KGSL_GPUMEM_FREE_ID \
891 	_IOWR(KGSL_IOC_TYPE, 0x35, struct kgsl_gpumem_free_id)
892 
893 /**
894  * struct kgsl_gpumem_get_info - argument to IOCTL_KGSL_GPUMEM_GET_INFO
895  * @gpuaddr: GPU address to query. Also set on return.
896  * @id: GPU allocation id to query. Also set on return.
897  * @flags: returned mask of KGSL_MEM* values.
898  * @size: returned size of the allocation.
899  * @mmapsize: returned size to pass mmap(), which may be larger than 'size'
900  * @useraddr: returned address of the userspace mapping for this buffer
901  *
902  * This ioctl allows querying of all user visible attributes of an existing
903  * allocation, by either the GPU address or the id returned by a previous
904  * call to IOCTL_KGSL_GPUMEM_ALLOC_ID. Legacy allocation ioctls may not
905  * return all attributes so this ioctl can be used to look them up if needed.
906  *
907  */
908 struct kgsl_gpumem_get_info {
909 	unsigned long gpuaddr;
910 	unsigned int id;
911 	unsigned int flags;
912 	size_t size;
913 	size_t mmapsize;
914 	unsigned long useraddr;
915 /* private: reserved for future use*/
916 	unsigned long __pad[4];
917 };
918 
919 #define IOCTL_KGSL_GPUMEM_GET_INFO\
920 	_IOWR(KGSL_IOC_TYPE, 0x36, struct kgsl_gpumem_get_info)
921 
922 /**
923  * struct kgsl_gpumem_sync_cache - argument to IOCTL_KGSL_GPUMEM_SYNC_CACHE
924  * @gpuaddr: GPU address of the buffer to sync.
925  * @id: id of the buffer to sync. Either gpuaddr or id is sufficient.
926  * @op: a mask of KGSL_GPUMEM_CACHE_* values
927  * @offset: offset into the buffer
928  * @length: number of bytes starting from offset to perform
929  * the cache operation on
930  *
931  * Sync the L2 cache for memory headed to and from the GPU - this replaces
932  * KGSL_SHAREDMEM_FLUSH_CACHE since it can handle cache management for both
933  * directions
934  *
935  */
936 struct kgsl_gpumem_sync_cache {
937 	unsigned long gpuaddr;
938 	unsigned int id;
939 	unsigned int op;
940 	size_t offset;
941 	size_t length;
942 };
943 
944 #define KGSL_GPUMEM_CACHE_CLEAN (1 << 0)
945 #define KGSL_GPUMEM_CACHE_TO_GPU KGSL_GPUMEM_CACHE_CLEAN
946 
947 #define KGSL_GPUMEM_CACHE_INV (1 << 1)
948 #define KGSL_GPUMEM_CACHE_FROM_GPU KGSL_GPUMEM_CACHE_INV
949 
950 #define KGSL_GPUMEM_CACHE_FLUSH \
951 	(KGSL_GPUMEM_CACHE_CLEAN | KGSL_GPUMEM_CACHE_INV)
952 
953 /* Flag to ensure backwards compatibility of kgsl_gpumem_sync_cache struct */
954 #define KGSL_GPUMEM_CACHE_RANGE (1 << 31U)
955 
956 #define IOCTL_KGSL_GPUMEM_SYNC_CACHE \
957 	_IOW(KGSL_IOC_TYPE, 0x37, struct kgsl_gpumem_sync_cache)
958 
959 /**
960  * struct kgsl_perfcounter_get - argument to IOCTL_KGSL_PERFCOUNTER_GET
961  * @groupid: Performance counter group ID
962  * @countable: Countable to select within the group
963  * @offset: Return offset of the reserved LO counter
964  * @offset_hi: Return offset of the reserved HI counter
965  *
966  * Get an available performance counter from a specified groupid.  The offset
967  * of the performance counter will be returned after successfully assigning
968  * the countable to the counter for the specified group.  An error will be
969  * returned and an offset of 0 if the groupid is invalid or there are no
970  * more counters left.  After successfully getting a perfcounter, the user
971  * must call kgsl_perfcounter_put(groupid, contable) when finished with
972  * the perfcounter to clear up perfcounter resources.
973  *
974  */
975 struct kgsl_perfcounter_get {
976 	unsigned int groupid;
977 	unsigned int countable;
978 	unsigned int offset;
979 	unsigned int offset_hi;
980 /* private: reserved for future use */
981 	unsigned int __pad; /* For future binary compatibility */
982 };
983 
984 #define IOCTL_KGSL_PERFCOUNTER_GET \
985 	_IOWR(KGSL_IOC_TYPE, 0x38, struct kgsl_perfcounter_get)
986 
987 /**
988  * struct kgsl_perfcounter_put - argument to IOCTL_KGSL_PERFCOUNTER_PUT
989  * @groupid: Performance counter group ID
990  * @countable: Countable to release within the group
991  *
992  * Put an allocated performance counter to allow others to have access to the
993  * resource that was previously taken.  This is only to be called after
994  * successfully getting a performance counter from kgsl_perfcounter_get().
995  *
996  */
997 struct kgsl_perfcounter_put {
998 	unsigned int groupid;
999 	unsigned int countable;
1000 /* private: reserved for future use */
1001 	unsigned int __pad[2]; /* For future binary compatibility */
1002 };
1003 
1004 #define IOCTL_KGSL_PERFCOUNTER_PUT \
1005 	_IOW(KGSL_IOC_TYPE, 0x39, struct kgsl_perfcounter_put)
1006 
1007 /**
1008  * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY
1009  * @groupid: Performance counter group ID
1010  * @countable: Return active countables array
1011  * @size: Size of active countables array
1012  * @max_counters: Return total number counters for the group ID
1013  *
1014  * Query the available performance counters given a groupid.  The array
1015  * *countables is used to return the current active countables in counters.
1016  * The size of the array is passed in so the kernel will only write at most
1017  * size or counter->size for the group id.  The total number of available
1018  * counters for the group ID is returned in max_counters.
1019  * If the array or size passed in are invalid, then only the maximum number
1020  * of counters will be returned, no data will be written to *countables.
1021  * If the groupid is invalid an error code will be returned.
1022  *
1023  */
1024 struct kgsl_perfcounter_query {
1025 	unsigned int groupid;
1026 	/* Array to return the current countable for up to size counters */
1027 	unsigned int *countables;
1028 	unsigned int count;
1029 	unsigned int max_counters;
1030 /* private: reserved for future use */
1031 	unsigned int __pad[2]; /* For future binary compatibility */
1032 };
1033 
1034 #define IOCTL_KGSL_PERFCOUNTER_QUERY \
1035 	_IOWR(KGSL_IOC_TYPE, 0x3A, struct kgsl_perfcounter_query)
1036 
1037 /**
1038  * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY
1039  * @groupid: Performance counter group IDs
1040  * @countable: Performance counter countable IDs
1041  * @value: Return performance counter reads
1042  * @size: Size of all arrays (groupid/countable pair and return value)
1043  *
1044  * Read in the current value of a performance counter given by the groupid
1045  * and countable.
1046  *
1047  */
1048 
1049 struct kgsl_perfcounter_read_group {
1050 	unsigned int groupid;
1051 	unsigned int countable;
1052 	unsigned long long value;
1053 };
1054 
1055 struct kgsl_perfcounter_read {
1056 	struct kgsl_perfcounter_read_group *reads;
1057 	unsigned int count;
1058 /* private: reserved for future use */
1059 	unsigned int __pad[2]; /* For future binary compatibility */
1060 };
1061 
1062 #define IOCTL_KGSL_PERFCOUNTER_READ \
1063 	_IOWR(KGSL_IOC_TYPE, 0x3B, struct kgsl_perfcounter_read)
1064 /*
1065  * struct kgsl_gpumem_sync_cache_bulk - argument to
1066  * IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK
1067  * @id_list: list of GPU buffer ids of the buffers to sync
1068  * @count: number of GPU buffer ids in id_list
1069  * @op: a mask of KGSL_GPUMEM_CACHE_* values
1070  *
1071  * Sync the cache for memory headed to and from the GPU. Certain
1072  * optimizations can be made on the cache operation based on the total
1073  * size of the working set of memory to be managed.
1074  */
1075 struct kgsl_gpumem_sync_cache_bulk {
1076 	unsigned int *id_list;
1077 	unsigned int count;
1078 	unsigned int op;
1079 /* private: reserved for future use */
1080 	unsigned int __pad[2]; /* For future binary compatibility */
1081 };
1082 
1083 #define IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK \
1084 	_IOWR(KGSL_IOC_TYPE, 0x3C, struct kgsl_gpumem_sync_cache_bulk)
1085 
1086 /*
1087  * struct kgsl_cmd_syncpoint_timestamp
1088  * @context_id: ID of a KGSL context
1089  * @timestamp: GPU timestamp
1090  *
1091  * This structure defines a syncpoint comprising a context/timestamp pair. A
1092  * list of these may be passed by IOCTL_KGSL_SUBMIT_COMMANDS to define
1093  * dependencies that must be met before the command can be submitted to the
1094  * hardware
1095  */
1096 struct kgsl_cmd_syncpoint_timestamp {
1097 	unsigned int context_id;
1098 	unsigned int timestamp;
1099 };
1100 
1101 struct kgsl_cmd_syncpoint_fence {
1102 	int fd;
1103 };
1104 
1105 /**
1106  * struct kgsl_cmd_syncpoint - Define a sync point for a command batch
1107  * @type: type of sync point defined here
1108  * @priv: Pointer to the type specific buffer
1109  * @size: Size of the type specific buffer
1110  *
1111  * This structure contains pointers defining a specific command sync point.
1112  * The pointer and size should point to a type appropriate structure.
1113  */
1114 struct kgsl_cmd_syncpoint {
1115 	int type;
1116 	void *priv;
1117 	size_t size;
1118 };
1119 
1120 /* Flag to indicate that the cmdlist may contain memlists */
1121 #define KGSL_IBDESC_MEMLIST 0x1
1122 
1123 /* Flag to point out the cmdbatch profiling buffer in the memlist */
1124 #define KGSL_IBDESC_PROFILING_BUFFER 0x2
1125 
1126 /**
1127  * struct kgsl_submit_commands - Argument to IOCTL_KGSL_SUBMIT_COMMANDS
1128  * @context_id: KGSL context ID that owns the commands
1129  * @flags:
1130  * @cmdlist: User pointer to a list of kgsl_ibdesc structures
1131  * @numcmds: Number of commands listed in cmdlist
1132  * @synclist: User pointer to a list of kgsl_cmd_syncpoint structures
1133  * @numsyncs: Number of sync points listed in synclist
1134  * @timestamp: On entry the a user defined timestamp, on exist the timestamp
1135  * assigned to the command batch
1136  *
1137  * This structure specifies a command to send to the GPU hardware.  This is
1138  * similar to kgsl_issueibcmds expect that it doesn't support the legacy way to
1139  * submit IB lists and it adds sync points to block the IB until the
1140  * dependencies are satisified.  This entry point is the new and preferred way
1141  * to submit commands to the GPU. The memory list can be used to specify all
1142  * memory that is referrenced in the current set of commands.
1143  */
1144 
1145 struct kgsl_submit_commands {
1146 	unsigned int context_id;
1147 	unsigned int flags;
1148 	struct kgsl_ibdesc *cmdlist;
1149 	unsigned int numcmds;
1150 	struct kgsl_cmd_syncpoint *synclist;
1151 	unsigned int numsyncs;
1152 	unsigned int timestamp;
1153 /* private: reserved for future use */
1154 	unsigned int __pad[4];
1155 };
1156 
1157 #define IOCTL_KGSL_SUBMIT_COMMANDS \
1158 	_IOWR(KGSL_IOC_TYPE, 0x3D, struct kgsl_submit_commands)
1159 
1160 /**
1161  * struct kgsl_device_constraint - device constraint argument
1162  * @context_id: KGSL context ID
1163  * @type: type of constraint i.e pwrlevel/none
1164  * @data: constraint data
1165  * @size: size of the constraint data
1166  */
1167 struct kgsl_device_constraint {
1168 	unsigned int type;
1169 	unsigned int context_id;
1170 	void *data;
1171 	size_t size;
1172 };
1173 
1174 /* Constraint Type*/
1175 #define KGSL_CONSTRAINT_NONE 0
1176 #define KGSL_CONSTRAINT_PWRLEVEL 1
1177 
1178 /* L3 constraint Type */
1179 #define KGSL_CONSTRAINT_L3_NONE	2
1180 #define KGSL_CONSTRAINT_L3_PWRLEVEL	3
1181 
1182 /* PWRLEVEL constraint level*/
1183 /* set to min frequency */
1184 #define KGSL_CONSTRAINT_PWR_MIN    0
1185 /* set to max frequency */
1186 #define KGSL_CONSTRAINT_PWR_MAX    1
1187 
1188 struct kgsl_device_constraint_pwrlevel {
1189 	unsigned int level;
1190 };
1191 
1192 /**
1193  * struct kgsl_syncsource_create - Argument to IOCTL_KGSL_SYNCSOURCE_CREATE
1194  * @id: returned id for the syncsource that was created.
1195  *
1196  * This ioctl creates a userspace sync timeline.
1197  */
1198 
1199 struct kgsl_syncsource_create {
1200 	unsigned int id;
1201 /* private: reserved for future use */
1202 	unsigned int __pad[3];
1203 };
1204 
1205 #define IOCTL_KGSL_SYNCSOURCE_CREATE \
1206 	_IOWR(KGSL_IOC_TYPE, 0x40, struct kgsl_syncsource_create)
1207 
1208 /**
1209  * struct kgsl_syncsource_destroy - Argument to IOCTL_KGSL_SYNCSOURCE_DESTROY
1210  * @id: syncsource id to destroy
1211  *
1212  * This ioctl creates a userspace sync timeline.
1213  */
1214 
1215 struct kgsl_syncsource_destroy {
1216 	unsigned int id;
1217 /* private: reserved for future use */
1218 	unsigned int __pad[3];
1219 };
1220 
1221 #define IOCTL_KGSL_SYNCSOURCE_DESTROY \
1222 	_IOWR(KGSL_IOC_TYPE, 0x41, struct kgsl_syncsource_destroy)
1223 
1224 /**
1225  * struct kgsl_syncsource_create_fence - Argument to
1226  *     IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE
1227  * @id: syncsource id
1228  * @fence_fd: returned sync_fence fd
1229  *
1230  * Create a fence that may be signaled by userspace by calling
1231  * IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE. There are no order dependencies between
1232  * these fences.
1233  */
1234 struct kgsl_syncsource_create_fence {
1235 	unsigned int id;
1236 	int fence_fd;
1237 /* private: reserved for future use */
1238 	unsigned int __pad[4];
1239 };
1240 
1241 /**
1242  * struct kgsl_syncsource_signal_fence - Argument to
1243  *     IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE
1244  * @id: syncsource id
1245  * @fence_fd: sync_fence fd to signal
1246  *
1247  * Signal a fence that was created by a IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE
1248  * call using the same syncsource id. This allows a fence to be shared
1249  * to other processes but only signaled by the process owning the fd
1250  * used to create the fence.
1251  */
1252 #define IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE \
1253 	_IOWR(KGSL_IOC_TYPE, 0x42, struct kgsl_syncsource_create_fence)
1254 
1255 struct kgsl_syncsource_signal_fence {
1256 	unsigned int id;
1257 	int fence_fd;
1258 /* private: reserved for future use */
1259 	unsigned int __pad[4];
1260 };
1261 
1262 #define IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE \
1263 	_IOWR(KGSL_IOC_TYPE, 0x43, struct kgsl_syncsource_signal_fence)
1264 
1265 /**
1266  * struct kgsl_cff_sync_gpuobj - Argument to IOCTL_KGSL_CFF_SYNC_GPUOBJ
1267  * @offset: Offset into the GPU object to sync
1268  * @length: Number of bytes to sync
1269  * @id: ID of the GPU object to sync
1270  */
1271 struct kgsl_cff_sync_gpuobj {
1272 	uint64_t offset;
1273 	uint64_t length;
1274 	unsigned int id;
1275 };
1276 
1277 #define IOCTL_KGSL_CFF_SYNC_GPUOBJ \
1278 	_IOW(KGSL_IOC_TYPE, 0x44, struct kgsl_cff_sync_gpuobj)
1279 
1280 /**
1281  * struct kgsl_gpuobj_alloc - Argument to IOCTL_KGSL_GPUOBJ_ALLOC
1282  * @size: Size in bytes of the object to allocate
1283  * @flags: mask of KGSL_MEMFLAG_* bits
1284  * @va_len: Size in bytes of the virtual region to allocate
1285  * @mmapsize: Returns the mmap() size of the object
1286  * @id: Returns the GPU object ID of the new object
1287  * @metadata_len: Length of the metdata to copy from the user
1288  * @metadata: Pointer to the user specified metadata to store for the object
1289  */
1290 struct kgsl_gpuobj_alloc {
1291 	uint64_t size;
1292 	uint64_t flags;
1293 	uint64_t va_len;
1294 	uint64_t mmapsize;
1295 	unsigned int id;
1296 	unsigned int metadata_len;
1297 	uint64_t metadata;
1298 };
1299 
1300 /* Let the user know that this header supports the gpuobj metadata */
1301 #define KGSL_GPUOBJ_ALLOC_METADATA_MAX 64
1302 
1303 #define IOCTL_KGSL_GPUOBJ_ALLOC \
1304 	_IOWR(KGSL_IOC_TYPE, 0x45, struct kgsl_gpuobj_alloc)
1305 
1306 /**
1307  * struct kgsl_gpuobj_free - Argument to IOCTL_KGLS_GPUOBJ_FREE
1308  * @flags: Mask of: KGSL_GUPOBJ_FREE_ON_EVENT
1309  * @priv: Pointer to the private object if KGSL_GPUOBJ_FREE_ON_EVENT is
1310  * specified
1311  * @id: ID of the GPU object to free
1312  * @type: If KGSL_GPUOBJ_FREE_ON_EVENT is specified, the type of asynchronous
1313  * event to free on
1314  * @len: Length of the data passed in priv
1315  */
1316 struct kgsl_gpuobj_free {
1317 	uint64_t flags;
1318 	uint64_t priv;
1319 	unsigned int id;
1320 	unsigned int type;
1321 	unsigned int len;
1322 };
1323 
1324 #define KGSL_GPUOBJ_FREE_ON_EVENT 1
1325 
1326 #define KGSL_GPU_EVENT_TIMESTAMP 1
1327 #define KGSL_GPU_EVENT_FENCE     2
1328 
1329 /**
1330  * struct kgsl_gpu_event_timestamp - Specifies a timestamp event to free a GPU
1331  * object on
1332  * @context_id: ID of the timestamp event to wait for
1333  * @timestamp: Timestamp of the timestamp event to wait for
1334  */
1335 struct kgsl_gpu_event_timestamp {
1336 	unsigned int context_id;
1337 	unsigned int timestamp;
1338 };
1339 
1340 /**
1341  * struct kgsl_gpu_event_fence - Specifies a fence ID to to free a GPU object on
1342  * @fd: File descriptor for the fence
1343  */
1344 struct kgsl_gpu_event_fence {
1345 	int fd;
1346 };
1347 
1348 #define IOCTL_KGSL_GPUOBJ_FREE \
1349 	_IOW(KGSL_IOC_TYPE, 0x46, struct kgsl_gpuobj_free)
1350 
1351 /**
1352  * struct kgsl_gpuobj_info - argument to IOCTL_KGSL_GPUOBJ_INFO
1353  * @gpuaddr: GPU address of the object
1354  * @flags: Current flags for the object
1355  * @size: Size of the object
1356  * @va_len: VA size of the object
1357  * @va_addr: Virtual address of the object (if it is mapped)
1358  * id - GPU object ID of the object to query
1359  */
1360 struct kgsl_gpuobj_info {
1361 	uint64_t gpuaddr;
1362 	uint64_t flags;
1363 	uint64_t size;
1364 	uint64_t va_len;
1365 	uint64_t va_addr;
1366 	unsigned int id;
1367 };
1368 
1369 #define IOCTL_KGSL_GPUOBJ_INFO \
1370 	_IOWR(KGSL_IOC_TYPE, 0x47, struct kgsl_gpuobj_info)
1371 
1372 /**
1373  * struct kgsl_gpuobj_import - argument to IOCTL_KGSL_GPUOBJ_IMPORT
1374  * @priv: Pointer to the private data for the import type
1375  * @priv_len: Length of the private data
1376  * @flags: Mask of KGSL_MEMFLAG_ flags
1377  * @type: Type of the import (KGSL_USER_MEM_TYPE_*)
1378  * @id: Returns the ID of the new GPU object
1379  */
1380 struct kgsl_gpuobj_import {
1381 	uint64_t priv;
1382 	uint64_t priv_len;
1383 	uint64_t flags;
1384 	unsigned int type;
1385 	unsigned int id;
1386 };
1387 
1388 /**
1389  * struct kgsl_gpuobj_import_dma_buf - import a dmabuf object
1390  * @fd: File descriptor for the dma-buf object
1391  */
1392 struct kgsl_gpuobj_import_dma_buf {
1393 	int fd;
1394 };
1395 
1396 /**
1397  * struct kgsl_gpuobj_import_useraddr - import an object based on a useraddr
1398  * @virtaddr: Virtual address of the object to import
1399  */
1400 struct kgsl_gpuobj_import_useraddr {
1401 	uint64_t virtaddr;
1402 };
1403 
1404 #define IOCTL_KGSL_GPUOBJ_IMPORT \
1405 	_IOWR(KGSL_IOC_TYPE, 0x48, struct kgsl_gpuobj_import)
1406 
1407 /**
1408  * struct kgsl_gpuobj_sync_obj - Individual GPU object to sync
1409  * @offset: Offset within the GPU object to sync
1410  * @length: Number of bytes to sync
1411  * @id: ID of the GPU object to sync
1412  * @op: Cache operation to execute
1413  */
1414 
1415 struct kgsl_gpuobj_sync_obj {
1416 	uint64_t offset;
1417 	uint64_t length;
1418 	unsigned int id;
1419 	unsigned int op;
1420 };
1421 
1422 /**
1423  * struct kgsl_gpuobj_sync - Argument for IOCTL_KGSL_GPUOBJ_SYNC
1424  * @objs: Pointer to an array of kgsl_gpuobj_sync_obj structs
1425  * @obj_len: Size of each item in the array
1426  * @count: Number of items in the array
1427  */
1428 
1429 struct kgsl_gpuobj_sync {
1430 	uint64_t objs;
1431 	unsigned int obj_len;
1432 	unsigned int count;
1433 };
1434 
1435 #define IOCTL_KGSL_GPUOBJ_SYNC \
1436 	_IOW(KGSL_IOC_TYPE, 0x49, struct kgsl_gpuobj_sync)
1437 
1438 /**
1439  * struct kgsl_command_object - GPU command object
1440  * @offset: GPU address offset of the object
1441  * @gpuaddr: GPU address of the object
1442  * @size: Size of the object
1443  * @flags: Current flags for the object
1444  * @id - GPU command object ID
1445  */
1446 struct kgsl_command_object {
1447 	uint64_t offset;
1448 	uint64_t gpuaddr;
1449 	uint64_t size;
1450 	unsigned int flags;
1451 	unsigned int id;
1452 };
1453 
1454 /**
1455  * struct kgsl_command_syncpoint - GPU syncpoint object
1456  * @priv: Pointer to the type specific buffer
1457  * @size: Size of the type specific buffer
1458  * @type: type of sync point defined here
1459  */
1460 struct kgsl_command_syncpoint {
1461 	uint64_t priv;
1462 	uint64_t size;
1463 	unsigned int type;
1464 };
1465 
1466 /**
1467  * struct kgsl_command_object - Argument for IOCTL_KGSL_GPU_COMMAND
1468  * @flags: Current flags for the object
1469  * @cmdlist: List of kgsl_command_objects for submission
1470  * @cmd_size: Size of kgsl_command_objects structure
1471  * @numcmds: Number of kgsl_command_objects in command list
1472  * @objlist: List of kgsl_command_objects for tracking
1473  * @obj_size: Size of kgsl_command_objects structure
1474  * @numobjs: Number of kgsl_command_objects in object list
1475  * @synclist: List of kgsl_command_syncpoints
1476  * @sync_size: Size of kgsl_command_syncpoint structure
1477  * @numsyncs: Number of kgsl_command_syncpoints in syncpoint list
1478  * @context_id: Context ID submittin ghte kgsl_gpu_command
1479  * @timestamp: Timestamp for the submitted commands
1480  */
1481 struct kgsl_gpu_command {
1482 	uint64_t flags;
1483 	uint64_t cmdlist;
1484 	unsigned int cmdsize;
1485 	unsigned int numcmds;
1486 	uint64_t objlist;
1487 	unsigned int objsize;
1488 	unsigned int numobjs;
1489 	uint64_t synclist;
1490 	unsigned int syncsize;
1491 	unsigned int numsyncs;
1492 	unsigned int context_id;
1493 	unsigned int timestamp;
1494 };
1495 
1496 #define IOCTL_KGSL_GPU_COMMAND \
1497 	_IOWR(KGSL_IOC_TYPE, 0x4A, struct kgsl_gpu_command)
1498 
1499 /**
1500  * struct kgsl_preemption_counters_query - argument to
1501  * IOCTL_KGSL_PREEMPTIONCOUNTER_QUERY
1502  * @counters: Return preemption counters array
1503  * @size_user: Size allocated by userspace
1504  * @size_priority_level: Size of preemption counters for each
1505  * priority level
1506  * @max_priority_level: Return max number of priority levels
1507  *
1508  * Query the available preemption counters. The array counters
1509  * is used to return preemption counters. The size of the array
1510  * is passed in so the kernel will only write at most size_user
1511  * or max available preemption counters.  The total number of
1512  * preemption counters is returned in max_priority_level. If the
1513  * array or size passed in are invalid, then an error is
1514  * returned back.
1515  */
1516 struct kgsl_preemption_counters_query {
1517 	uint64_t counters;
1518 	unsigned int size_user;
1519 	unsigned int size_priority_level;
1520 	unsigned int max_priority_level;
1521 };
1522 
1523 #define IOCTL_KGSL_PREEMPTIONCOUNTER_QUERY \
1524 	_IOWR(KGSL_IOC_TYPE, 0x4B, struct kgsl_preemption_counters_query)
1525 
1526 /**
1527  * struct kgsl_gpuobj_set_info - argument for IOCTL_KGSL_GPUOBJ_SET_INFO
1528  * @flags: Flags to indicate which parameters to change
1529  * @metadata:  If KGSL_GPUOBJ_SET_INFO_METADATA is set, a pointer to the new
1530  * metadata
1531  * @id: GPU memory object ID to change
1532  * @metadata_len:  If KGSL_GPUOBJ_SET_INFO_METADATA is set, the length of the
1533  * new metadata string
1534  * @type: If KGSL_GPUOBJ_SET_INFO_TYPE is set, the new type of the memory object
1535  */
1536 
1537 #define KGSL_GPUOBJ_SET_INFO_METADATA (1 << 0)
1538 #define KGSL_GPUOBJ_SET_INFO_TYPE (1 << 1)
1539 
1540 struct kgsl_gpuobj_set_info {
1541 	uint64_t flags;
1542 	uint64_t metadata;
1543 	unsigned int id;
1544 	unsigned int metadata_len;
1545 	unsigned int type;
1546 };
1547 
1548 #define IOCTL_KGSL_GPUOBJ_SET_INFO \
1549 	_IOW(KGSL_IOC_TYPE, 0x4C, struct kgsl_gpuobj_set_info)
1550 
1551 /**
1552  * struct kgsl_sparse_phys_alloc - Argument for IOCTL_KGSL_SPARSE_PHYS_ALLOC
1553  * @size: Size in bytes to back
1554  * @pagesize: Pagesize alignment required
1555  * @flags: Flags for this allocation
1556  * @id: Returned ID for this allocation
1557  */
1558 struct kgsl_sparse_phys_alloc {
1559 	uint64_t size;
1560 	uint64_t pagesize;
1561 	uint64_t flags;
1562 	unsigned int id;
1563 };
1564 
1565 #define IOCTL_KGSL_SPARSE_PHYS_ALLOC \
1566 	_IOWR(KGSL_IOC_TYPE, 0x50, struct kgsl_sparse_phys_alloc)
1567 
1568 /**
1569  * struct kgsl_sparse_phys_free - Argument for IOCTL_KGSL_SPARSE_PHYS_FREE
1570  * @id: ID to free
1571  */
1572 struct kgsl_sparse_phys_free {
1573 	unsigned int id;
1574 };
1575 
1576 #define IOCTL_KGSL_SPARSE_PHYS_FREE \
1577 	_IOW(KGSL_IOC_TYPE, 0x51, struct kgsl_sparse_phys_free)
1578 
1579 /**
1580  * struct kgsl_sparse_virt_alloc - Argument for IOCTL_KGSL_SPARSE_VIRT_ALLOC
1581  * @size: Size in bytes to reserve
1582  * @pagesize: Pagesize alignment required
1583  * @flags: Flags for this allocation
1584  * @id: Returned ID for this allocation
1585  * @gpuaddr: Returned GPU address for this allocation
1586  */
1587 struct kgsl_sparse_virt_alloc {
1588 	uint64_t size;
1589 	uint64_t pagesize;
1590 	uint64_t flags;
1591 	uint64_t gpuaddr;
1592 	unsigned int id;
1593 };
1594 
1595 #define IOCTL_KGSL_SPARSE_VIRT_ALLOC \
1596 	_IOWR(KGSL_IOC_TYPE, 0x52, struct kgsl_sparse_virt_alloc)
1597 
1598 /**
1599  * struct kgsl_sparse_virt_free - Argument for IOCTL_KGSL_SPARSE_VIRT_FREE
1600  * @id: ID to free
1601  */
1602 struct kgsl_sparse_virt_free {
1603 	unsigned int id;
1604 };
1605 
1606 #define IOCTL_KGSL_SPARSE_VIRT_FREE \
1607 	_IOW(KGSL_IOC_TYPE, 0x53, struct kgsl_sparse_virt_free)
1608 
1609 /**
1610  * struct kgsl_sparse_binding_object - Argument for kgsl_sparse_bind
1611  * @virtoffset: Offset into the virtual ID
1612  * @physoffset: Offset into the physical ID (bind only)
1613  * @size: Size in bytes to reserve
1614  * @flags: Flags for this kgsl_sparse_binding_object
1615  * @id: Physical ID to bind (bind only)
1616  */
1617 struct kgsl_sparse_binding_object {
1618 	uint64_t virtoffset;
1619 	uint64_t physoffset;
1620 	uint64_t size;
1621 	uint64_t flags;
1622 	unsigned int id;
1623 };
1624 
1625 /**
1626  * struct kgsl_sparse_bind - Argument for IOCTL_KGSL_SPARSE_BIND
1627  * @list: List of kgsl_sparse_bind_objects to bind/unbind
1628  * @id: Virtual ID to bind/unbind
1629  * @size: Size of kgsl_sparse_bind_object
1630  * @count: Number of elements in list
1631  *
1632  */
1633 struct kgsl_sparse_bind {
1634 	uint64_t list;
1635 	unsigned int id;
1636 	unsigned int size;
1637 	unsigned int count;
1638 };
1639 
1640 #define IOCTL_KGSL_SPARSE_BIND \
1641 	_IOW(KGSL_IOC_TYPE, 0x54, struct kgsl_sparse_bind)
1642 
1643 /**
1644  * struct kgsl_gpu_sparse_command - Argument for
1645  * IOCTL_KGSL_GPU_SPARSE_COMMAND
1646  * @flags: Current flags for the object
1647  * @sparselist: List of kgsl_sparse_binding_object to bind/unbind
1648  * @synclist: List of kgsl_command_syncpoints
1649  * @sparsesize: Size of kgsl_sparse_binding_object
1650  * @numsparse: Number of elements in list
1651  * @sync_size: Size of kgsl_command_syncpoint structure
1652  * @numsyncs: Number of kgsl_command_syncpoints in syncpoint list
1653  * @context_id: Context ID submitting the kgsl_gpu_command
1654  * @timestamp: Timestamp for the submitted commands
1655  * @id: Virtual ID to bind/unbind
1656  */
1657 struct kgsl_gpu_sparse_command {
1658 	uint64_t flags;
1659 	uint64_t sparselist;
1660 	uint64_t synclist;
1661 	unsigned int sparsesize;
1662 	unsigned int numsparse;
1663 	unsigned int syncsize;
1664 	unsigned int numsyncs;
1665 	unsigned int context_id;
1666 	unsigned int timestamp;
1667 	unsigned int id;
1668 };
1669 
1670 #define IOCTL_KGSL_GPU_SPARSE_COMMAND \
1671 	_IOWR(KGSL_IOC_TYPE, 0x55, struct kgsl_gpu_sparse_command)
1672 
1673 #endif /* _MSM_KGSL_H */
1674