• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _UAPI_MSM_KGSL_H
2 #define _UAPI_MSM_KGSL_H
3 
4 #include <linux/types.h>
5 #include <linux/ioctl.h>
6 
7 /*
8  * The KGSL version has proven not to be very useful in userspace if features
9  * are cherry picked into other trees out of order so it is frozen as of 3.14.
10  * It is left here for backwards compatabilty and as a reminder that
11  * software releases are never linear. Also, I like pie.
12  */
13 
14 #define KGSL_VERSION_MAJOR        3
15 #define KGSL_VERSION_MINOR        14
16 
17 /*
18  * We have traditionally mixed context and issueibcmds / command batch flags
19  * together into a big flag stew. This worked fine until we started adding a
20  * lot more command batch flags and we started running out of bits. Turns out
21  * we have a bit of room in the context type / priority mask that we could use
22  * for command batches, but that means we need to split out the flags into two
23  * coherent sets.
24  *
25  * If any future definitions are for both context and cmdbatch add both defines
26  * and link the cmdbatch to the context define as we do below. Otherwise feel
27  * free to add exclusive bits to either set.
28  */
29 
30 /* --- context flags --- */
31 #define KGSL_CONTEXT_SAVE_GMEM		0x00000001
32 #define KGSL_CONTEXT_NO_GMEM_ALLOC	0x00000002
33 /* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
34 #define KGSL_CONTEXT_SUBMIT_IB_LIST	0x00000004
35 #define KGSL_CONTEXT_CTX_SWITCH		0x00000008
36 #define KGSL_CONTEXT_PREAMBLE		0x00000010
37 #define KGSL_CONTEXT_TRASH_STATE	0x00000020
38 #define KGSL_CONTEXT_PER_CONTEXT_TS	0x00000040
39 #define KGSL_CONTEXT_USER_GENERATED_TS	0x00000080
40 /* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
41 #define KGSL_CONTEXT_END_OF_FRAME	0x00000100
42 #define KGSL_CONTEXT_NO_FAULT_TOLERANCE 0x00000200
43 /* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
44 #define KGSL_CONTEXT_SYNC               0x00000400
45 #define KGSL_CONTEXT_PWR_CONSTRAINT     0x00000800
46 #define KGSL_CONTEXT_PRIORITY_MASK      0x0000F000
47 #define KGSL_CONTEXT_PRIORITY_SHIFT     12
48 #define KGSL_CONTEXT_PRIORITY_UNDEF     0
49 
50 #define KGSL_CONTEXT_IFH_NOP            0x00010000
51 #define KGSL_CONTEXT_SECURE             0x00020000
52 #define KGSL_CONTEXT_NO_SNAPSHOT        0x00040000
53 #define KGSL_CONTEXT_SPARSE             0x00080000
54 
55 #define KGSL_CONTEXT_PREEMPT_STYLE_MASK       0x0E000000
56 #define KGSL_CONTEXT_PREEMPT_STYLE_SHIFT      25
57 #define KGSL_CONTEXT_PREEMPT_STYLE_DEFAULT    0x0
58 #define KGSL_CONTEXT_PREEMPT_STYLE_RINGBUFFER 0x1
59 #define KGSL_CONTEXT_PREEMPT_STYLE_FINEGRAIN  0x2
60 
61 #define KGSL_CONTEXT_TYPE_MASK          0x01F00000
62 #define KGSL_CONTEXT_TYPE_SHIFT         20
63 #define KGSL_CONTEXT_TYPE_ANY		0
64 #define KGSL_CONTEXT_TYPE_GL		1
65 #define KGSL_CONTEXT_TYPE_CL		2
66 #define KGSL_CONTEXT_TYPE_C2D		3
67 #define KGSL_CONTEXT_TYPE_RS		4
68 #define KGSL_CONTEXT_TYPE_VK		5
69 #define KGSL_CONTEXT_TYPE_UNKNOWN	0x1E
70 
71 #define KGSL_CONTEXT_INVALIDATE_ON_FAULT 0x10000000
72 
73 #define KGSL_CONTEXT_INVALID 0xffffffff
74 
75 /*
76  * --- command batch flags ---
77  * The bits that are linked to a KGSL_CONTEXT equivalent are either legacy
78  * definitions or bits that are valid for both contexts and cmdbatches.  To be
79  * safe the other 8 bits that are still available in the context field should be
80  * omitted here in case we need to share - the other bits are available for
81  * cmdbatch only flags as needed
82  */
83 #define KGSL_CMDBATCH_MEMLIST		0x00000001
84 #define KGSL_CMDBATCH_MARKER		0x00000002
85 #define KGSL_CMDBATCH_SUBMIT_IB_LIST	KGSL_CONTEXT_SUBMIT_IB_LIST /* 0x004 */
86 #define KGSL_CMDBATCH_CTX_SWITCH	KGSL_CONTEXT_CTX_SWITCH     /* 0x008 */
87 #define KGSL_CMDBATCH_PROFILING		0x00000010
88 /*
89  * KGSL_CMDBATCH_PROFILING must also be set for KGSL_CMDBATCH_PROFILING_KTIME
90  * to take effect, as the latter only affects the time data returned.
91  */
92 #define KGSL_CMDBATCH_PROFILING_KTIME	0x00000020
93 #define KGSL_CMDBATCH_END_OF_FRAME	KGSL_CONTEXT_END_OF_FRAME   /* 0x100 */
94 #define KGSL_CMDBATCH_SYNC		KGSL_CONTEXT_SYNC           /* 0x400 */
95 #define KGSL_CMDBATCH_PWR_CONSTRAINT	KGSL_CONTEXT_PWR_CONSTRAINT /* 0x800 */
96 #define KGSL_CMDBATCH_SPARSE	    0x1000 /* 0x1000 */
97 
98 /*
99  * Reserve bits [16:19] and bits [28:31] for possible bits shared between
100  * contexts and command batches.  Update this comment as new flags are added.
101  */
102 
103 /*
104  * gpu_command_object flags - these flags communicate the type of command or
105  * memory object being submitted for a GPU command
106  */
107 
108 /* Flags for GPU command objects */
109 #define KGSL_CMDLIST_IB                  0x00000001U
110 #define KGSL_CMDLIST_CTXTSWITCH_PREAMBLE 0x00000002U
111 #define KGSL_CMDLIST_IB_PREAMBLE         0x00000004U
112 
113 /* Flags for GPU command memory objects */
114 #define KGSL_OBJLIST_MEMOBJ  0x00000008U
115 #define KGSL_OBJLIST_PROFILE 0x00000010U
116 
117 /* Flags for GPU command sync points */
118 #define KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP 0
119 #define KGSL_CMD_SYNCPOINT_TYPE_FENCE 1
120 
121 /* --- Memory allocation flags --- */
122 
123 /* General allocation hints */
124 #define KGSL_MEMFLAGS_SECURE      0x00000008ULL
125 #define KGSL_MEMFLAGS_GPUREADONLY 0x01000000U
126 #define KGSL_MEMFLAGS_GPUWRITEONLY 0x02000000U
127 #define KGSL_MEMFLAGS_FORCE_32BIT 0x100000000ULL
128 
129 /* Flag for binding all the virt range to single phys data */
130 #define KGSL_SPARSE_BIND_MULTIPLE_TO_PHYS 0x400000000ULL
131 #define KGSL_SPARSE_BIND 0x1ULL
132 #define KGSL_SPARSE_UNBIND 0x2ULL
133 
134 /* Memory caching hints */
135 #define KGSL_CACHEMODE_MASK       0x0C000000U
136 #define KGSL_CACHEMODE_SHIFT 26
137 
138 #define KGSL_CACHEMODE_WRITECOMBINE 0
139 #define KGSL_CACHEMODE_UNCACHED 1
140 #define KGSL_CACHEMODE_WRITETHROUGH 2
141 #define KGSL_CACHEMODE_WRITEBACK 3
142 
143 #define KGSL_MEMFLAGS_USE_CPU_MAP 0x10000000ULL
144 #define KGSL_MEMFLAGS_SPARSE_PHYS 0x20000000ULL
145 #define KGSL_MEMFLAGS_SPARSE_VIRT 0x40000000ULL
146 #define KGSL_MEMFLAGS_IOCOHERENT  0x80000000ULL
147 
148 /* Memory types for which allocations are made */
149 #define KGSL_MEMTYPE_MASK		0x0000FF00
150 #define KGSL_MEMTYPE_SHIFT		8
151 
152 #define KGSL_MEMTYPE_OBJECTANY			0
153 #define KGSL_MEMTYPE_FRAMEBUFFER		1
154 #define KGSL_MEMTYPE_RENDERBUFFER		2
155 #define KGSL_MEMTYPE_ARRAYBUFFER		3
156 #define KGSL_MEMTYPE_ELEMENTARRAYBUFFER		4
157 #define KGSL_MEMTYPE_VERTEXARRAYBUFFER		5
158 #define KGSL_MEMTYPE_TEXTURE			6
159 #define KGSL_MEMTYPE_SURFACE			7
160 #define KGSL_MEMTYPE_EGL_SURFACE		8
161 #define KGSL_MEMTYPE_GL				9
162 #define KGSL_MEMTYPE_CL				10
163 #define KGSL_MEMTYPE_CL_BUFFER_MAP		11
164 #define KGSL_MEMTYPE_CL_BUFFER_NOMAP		12
165 #define KGSL_MEMTYPE_CL_IMAGE_MAP		13
166 #define KGSL_MEMTYPE_CL_IMAGE_NOMAP		14
167 #define KGSL_MEMTYPE_CL_KERNEL_STACK		15
168 #define KGSL_MEMTYPE_COMMAND			16
169 #define KGSL_MEMTYPE_2D				17
170 #define KGSL_MEMTYPE_EGL_IMAGE			18
171 #define KGSL_MEMTYPE_EGL_SHADOW			19
172 #define KGSL_MEMTYPE_MULTISAMPLE		20
173 #define KGSL_MEMTYPE_KERNEL			255
174 
175 /*
176  * Alignment hint, passed as the power of 2 exponent.
177  * i.e 4k (2^12) would be 12, 64k (2^16)would be 16.
178  */
179 #define KGSL_MEMALIGN_MASK		0x00FF0000
180 #define KGSL_MEMALIGN_SHIFT		16
181 
182 enum kgsl_user_mem_type {
183 	KGSL_USER_MEM_TYPE_PMEM		= 0x00000000,
184 	KGSL_USER_MEM_TYPE_ASHMEM	= 0x00000001,
185 	KGSL_USER_MEM_TYPE_ADDR		= 0x00000002,
186 	KGSL_USER_MEM_TYPE_ION		= 0x00000003,
187 	/*
188 	 * ION type is retained for backwards compatibility but Ion buffers are
189 	 * dma-bufs so try to use that naming if we can
190 	 */
191 	KGSL_USER_MEM_TYPE_DMABUF       = 0x00000003,
192 	KGSL_USER_MEM_TYPE_MAX		= 0x00000007,
193 };
194 #define KGSL_MEMFLAGS_USERMEM_MASK 0x000000e0
195 #define KGSL_MEMFLAGS_USERMEM_SHIFT 5
196 
197 /*
198  * Unfortunately, enum kgsl_user_mem_type starts at 0 which does not
199  * leave a good value for allocated memory. In the flags we use
200  * 0 to indicate allocated memory and thus need to add 1 to the enum
201  * values.
202  */
203 #define KGSL_USERMEM_FLAG(x) (((x) + 1) << KGSL_MEMFLAGS_USERMEM_SHIFT)
204 
205 #define KGSL_MEMFLAGS_NOT_USERMEM 0
206 #define KGSL_MEMFLAGS_USERMEM_PMEM KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_PMEM)
207 #define KGSL_MEMFLAGS_USERMEM_ASHMEM \
208 		KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ASHMEM)
209 #define KGSL_MEMFLAGS_USERMEM_ADDR KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ADDR)
210 #define KGSL_MEMFLAGS_USERMEM_ION KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ION)
211 
212 /* --- generic KGSL flag values --- */
213 
214 #define KGSL_FLAGS_NORMALMODE  0x00000000
215 #define KGSL_FLAGS_SAFEMODE    0x00000001
216 #define KGSL_FLAGS_INITIALIZED0 0x00000002
217 #define KGSL_FLAGS_INITIALIZED 0x00000004
218 #define KGSL_FLAGS_STARTED     0x00000008
219 #define KGSL_FLAGS_ACTIVE      0x00000010
220 #define KGSL_FLAGS_RESERVED0   0x00000020
221 #define KGSL_FLAGS_RESERVED1   0x00000040
222 #define KGSL_FLAGS_RESERVED2   0x00000080
223 #define KGSL_FLAGS_SOFT_RESET  0x00000100
224 #define KGSL_FLAGS_PER_CONTEXT_TIMESTAMPS 0x00000200
225 
226 /* Server Side Sync Timeout in milliseconds */
227 #define KGSL_SYNCOBJ_SERVER_TIMEOUT 2000
228 
229 /* UBWC Modes */
230 #define KGSL_UBWC_NONE	0
231 #define KGSL_UBWC_1_0	1
232 #define KGSL_UBWC_2_0	2
233 #define KGSL_UBWC_3_0	3
234 #define KGSL_UBWC_4_0	4
235 
236 /*
237  * Reset status values for context
238  */
239 enum kgsl_ctx_reset_stat {
240 	KGSL_CTX_STAT_NO_ERROR				= 0x00000000,
241 	KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT		= 0x00000001,
242 	KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT	= 0x00000002,
243 	KGSL_CTX_STAT_UNKNOWN_CONTEXT_RESET_EXT		= 0x00000003
244 };
245 
246 #define KGSL_CONVERT_TO_MBPS(val) \
247 	(val*1000*1000U)
248 
249 struct kgsl_devinfo {
250 
251 	unsigned int device_id;
252 	/*
253 	 * chip revision id
254 	 * coreid:8 majorrev:8 minorrev:8 patch:8
255 	 */
256 	unsigned int chip_id;
257 	unsigned int mmu_enabled;
258 	unsigned long gmem_gpubaseaddr;
259 	/*
260 	 * This field contains the adreno revision
261 	 * number 200, 205, 220, etc...
262 	 */
263 	unsigned int gpu_id;
264 	size_t gmem_sizebytes;
265 };
266 
267 /*
268  * struct kgsl_devmemstore - this structure defines the region of memory
269  * that can be mmap()ed from this driver. The timestamp fields are __volatile__
270  * because they are written by the GPU
271  * @soptimestamp: Start of pipeline timestamp written by GPU before the
272  * commands in concern are processed
273  * @sbz: Unused, kept for 8 byte alignment
274  * @eoptimestamp: End of pipeline timestamp written by GPU after the
275  * commands in concern are processed
276  * @sbz2: Unused, kept for 8 byte alignment
277  * @preempted: Indicates if the context was preempted
278  * @sbz3: Unused, kept for 8 byte alignment
279  * @ref_wait_ts: Timestamp on which to generate interrupt, unused now.
280  * @sbz4: Unused, kept for 8 byte alignment
281  * @current_context: The current context the GPU is working on
282  * @sbz5: Unused, kept for 8 byte alignment
283  */
284 struct kgsl_devmemstore {
285 	__volatile__ unsigned int soptimestamp;
286 	unsigned int sbz;
287 	__volatile__ unsigned int eoptimestamp;
288 	unsigned int sbz2;
289 	__volatile__ unsigned int preempted;
290 	unsigned int sbz3;
291 	__volatile__ unsigned int ref_wait_ts;
292 	unsigned int sbz4;
293 	unsigned int current_context;
294 	unsigned int sbz5;
295 };
296 
297 #define KGSL_MEMSTORE_OFFSET(ctxt_id, field) \
298 	((ctxt_id)*sizeof(struct kgsl_devmemstore) + \
299 	 offsetof(struct kgsl_devmemstore, field))
300 
301 /* timestamp id*/
302 enum kgsl_timestamp_type {
303 	KGSL_TIMESTAMP_CONSUMED = 0x00000001, /* start-of-pipeline timestamp */
304 	KGSL_TIMESTAMP_RETIRED  = 0x00000002, /* end-of-pipeline timestamp*/
305 	KGSL_TIMESTAMP_QUEUED   = 0x00000003,
306 };
307 
308 /* property types - used with kgsl_device_getproperty */
309 #define KGSL_PROP_DEVICE_INFO		0x1
310 #define KGSL_PROP_DEVICE_SHADOW		0x2
311 #define KGSL_PROP_DEVICE_POWER		0x3
312 #define KGSL_PROP_SHMEM			0x4
313 #define KGSL_PROP_SHMEM_APERTURES	0x5
314 #define KGSL_PROP_MMU_ENABLE		0x6
315 #define KGSL_PROP_INTERRUPT_WAITS	0x7
316 #define KGSL_PROP_VERSION		0x8
317 #define KGSL_PROP_GPU_RESET_STAT	0x9
318 #define KGSL_PROP_PWRCTRL		0xE
319 #define KGSL_PROP_PWR_CONSTRAINT	0x12
320 #define KGSL_PROP_UCHE_GMEM_VADDR	0x13
321 #define KGSL_PROP_SP_GENERIC_MEM	0x14
322 #define KGSL_PROP_UCODE_VERSION		0x15
323 #define KGSL_PROP_GPMU_VERSION		0x16
324 #define KGSL_PROP_HIGHEST_BANK_BIT	0x17
325 #define KGSL_PROP_DEVICE_BITNESS	0x18
326 #define KGSL_PROP_DEVICE_QDSS_STM	0x19
327 #define KGSL_PROP_MIN_ACCESS_LENGTH	0x1A
328 #define KGSL_PROP_UBWC_MODE		0x1B
329 #define KGSL_PROP_DEVICE_QTIMER		0x20
330 #define KGSL_PROP_L3_PWR_CONSTRAINT     0x22
331 #define KGSL_PROP_SECURE_BUFFER_ALIGNMENT 0x23
332 #define KGSL_PROP_SECURE_CTXT_SUPPORT 0x24
333 #define KGSL_PROP_SPEED_BIN		0x25
334 #define KGSL_PROP_GAMING_BIN		0x26
335 #define KGSL_PROP_QUERY_CAPABILITIES	0x27
336 #define KGSL_PROP_CONTEXT_PROPERTY	0x28
337 
338 /*
339  * kgsl_capabilties_properties returns a list of supported properties.
340  * If the user passes 0 for 'count' the kernel will set it to the number of
341  * supported properties. The list is expected to be 'count * sizeof(uint32_t)'
342  * bytes long. The kernel will return the actual number of entries copied into
343  * list via 'count'.
344  */
345 struct kgsl_capabilities_properties {
346 	__u64 list;
347 	__u32 count;
348 };
349 
350 /*
351  * KGSL_QUERY_CAPS_PROPERTIES returns a list of the valid properties in the
352  * kernel.  The subtype data should be struct kgsl_capabilities_properties
353  */
354 #define KGSL_QUERY_CAPS_PROPERTIES 1
355 
356 /*
357  * kgsl_capabilities allows the user to query kernel capabiilties. The 'data'
358  * type should be set appropriately for the querytype (see above). Pass 0 to
359  * 'size' and the kernel will set it to the expected size of 'data' that is
360  * appropriate for querytype (in bytes).
361  */
362 struct kgsl_capabilities {
363 	__u64 data;
364 	__u64 size;
365 	__u32 querytype;
366 };
367 
368 struct kgsl_shadowprop {
369 	unsigned long gpuaddr;
370 	size_t size;
371 	unsigned int flags; /* contains KGSL_FLAGS_ values */
372 };
373 
374 struct kgsl_qdss_stm_prop {
375 	uint64_t gpuaddr;
376 	uint64_t size;
377 };
378 
379 struct kgsl_qtimer_prop {
380 	uint64_t gpuaddr;
381 	uint64_t size;
382 };
383 
384 struct kgsl_version {
385 	unsigned int drv_major;
386 	unsigned int drv_minor;
387 	unsigned int dev_major;
388 	unsigned int dev_minor;
389 };
390 
391 struct kgsl_sp_generic_mem {
392 	uint64_t local;
393 	uint64_t pvt;
394 };
395 
396 struct kgsl_ucode_version {
397 	unsigned int pfp;
398 	unsigned int pm4;
399 };
400 
401 struct kgsl_gpmu_version {
402 	unsigned int major;
403 	unsigned int minor;
404 	unsigned int features;
405 };
406 
407 struct kgsl_context_property {
408 	__u64 data;
409 	__u32 size;
410 	__u32 type;
411 	__u32 contextid;
412 };
413 
414 struct kgsl_context_property_fault {
415 	__s32 faults;
416 	__u32 timestamp;
417 };
418 
419 /* Context property sub types */
420 #define KGSL_CONTEXT_PROP_FAULTS 1
421 
422 /* Performance counter groups */
423 
424 #define KGSL_PERFCOUNTER_GROUP_CP 0x0
425 #define KGSL_PERFCOUNTER_GROUP_RBBM 0x1
426 #define KGSL_PERFCOUNTER_GROUP_PC 0x2
427 #define KGSL_PERFCOUNTER_GROUP_VFD 0x3
428 #define KGSL_PERFCOUNTER_GROUP_HLSQ 0x4
429 #define KGSL_PERFCOUNTER_GROUP_VPC 0x5
430 #define KGSL_PERFCOUNTER_GROUP_TSE 0x6
431 #define KGSL_PERFCOUNTER_GROUP_RAS 0x7
432 #define KGSL_PERFCOUNTER_GROUP_UCHE 0x8
433 #define KGSL_PERFCOUNTER_GROUP_TP 0x9
434 #define KGSL_PERFCOUNTER_GROUP_SP 0xA
435 #define KGSL_PERFCOUNTER_GROUP_RB 0xB
436 #define KGSL_PERFCOUNTER_GROUP_PWR 0xC
437 #define KGSL_PERFCOUNTER_GROUP_VBIF 0xD
438 #define KGSL_PERFCOUNTER_GROUP_VBIF_PWR 0xE
439 #define KGSL_PERFCOUNTER_GROUP_MH 0xF
440 #define KGSL_PERFCOUNTER_GROUP_PA_SU 0x10
441 #define KGSL_PERFCOUNTER_GROUP_SQ 0x11
442 #define KGSL_PERFCOUNTER_GROUP_SX 0x12
443 #define KGSL_PERFCOUNTER_GROUP_TCF 0x13
444 #define KGSL_PERFCOUNTER_GROUP_TCM 0x14
445 #define KGSL_PERFCOUNTER_GROUP_TCR 0x15
446 #define KGSL_PERFCOUNTER_GROUP_L2 0x16
447 #define KGSL_PERFCOUNTER_GROUP_VSC 0x17
448 #define KGSL_PERFCOUNTER_GROUP_CCU 0x18
449 #define KGSL_PERFCOUNTER_GROUP_LRZ 0x19
450 #define KGSL_PERFCOUNTER_GROUP_CMP 0x1A
451 #define KGSL_PERFCOUNTER_GROUP_ALWAYSON 0x1B
452 #define KGSL_PERFCOUNTER_GROUP_SP_PWR 0x1C
453 #define KGSL_PERFCOUNTER_GROUP_TP_PWR 0x1D
454 #define KGSL_PERFCOUNTER_GROUP_RB_PWR 0x1E
455 #define KGSL_PERFCOUNTER_GROUP_CCU_PWR 0x1F
456 #define KGSL_PERFCOUNTER_GROUP_UCHE_PWR 0x20
457 #define KGSL_PERFCOUNTER_GROUP_CP_PWR 0x21
458 #define KGSL_PERFCOUNTER_GROUP_GPMU_PWR 0x22
459 #define KGSL_PERFCOUNTER_GROUP_ALWAYSON_PWR 0x23
460 #define KGSL_PERFCOUNTER_GROUP_GLC 0x24
461 #define KGSL_PERFCOUNTER_GROUP_FCHE 0x25
462 #define KGSL_PERFCOUNTER_GROUP_MHUB 0x26
463 #define KGSL_PERFCOUNTER_GROUP_MAX 0x27
464 
465 #define KGSL_PERFCOUNTER_NOT_USED 0xFFFFFFFF
466 #define KGSL_PERFCOUNTER_BROKEN 0xFFFFFFFE
467 
468 /* structure holds list of ibs */
469 struct kgsl_ibdesc {
470 	unsigned long gpuaddr;
471 	unsigned long __pad;
472 	size_t sizedwords;
473 	unsigned int ctrl;
474 };
475 
476 /**
477  * struct kgsl_cmdbatch_profiling_buffer
478  * @wall_clock_s: Ringbuffer submission time (seconds).
479  *                If KGSL_CMDBATCH_PROFILING_KTIME is set, time is provided
480  *                in kernel clocks, otherwise wall clock time is used.
481  * @wall_clock_ns: Ringbuffer submission time (nanoseconds).
482  *                 If KGSL_CMDBATCH_PROFILING_KTIME is set time is provided
483  *                 in kernel clocks, otherwise wall clock time is used.
484  * @gpu_ticks_queued: GPU ticks at ringbuffer submission
485  * @gpu_ticks_submitted: GPU ticks when starting cmdbatch execution
486  * @gpu_ticks_retired: GPU ticks when finishing cmdbatch execution
487  *
488  * This structure defines the profiling buffer used to measure cmdbatch
489  * execution time
490  */
491 struct kgsl_cmdbatch_profiling_buffer {
492 	uint64_t wall_clock_s;
493 	uint64_t wall_clock_ns;
494 	uint64_t gpu_ticks_queued;
495 	uint64_t gpu_ticks_submitted;
496 	uint64_t gpu_ticks_retired;
497 };
498 
499 /* ioctls */
500 #define KGSL_IOC_TYPE 0x09
501 
502 /*
503  * get misc info about the GPU
504  * type should be a value from enum kgsl_property_type
505  * value points to a structure that varies based on type
506  * sizebytes is sizeof() that structure
507  * for KGSL_PROP_DEVICE_INFO, use struct kgsl_devinfo
508  * this structure contaings hardware versioning info.
509  * for KGSL_PROP_DEVICE_SHADOW, use struct kgsl_shadowprop
510  * this is used to find mmap() offset and sizes for mapping
511  * struct kgsl_memstore into userspace.
512  */
513 struct kgsl_device_getproperty {
514 	unsigned int type;
515 	void *value;
516 	size_t sizebytes;
517 };
518 
519 #define IOCTL_KGSL_DEVICE_GETPROPERTY \
520 	_IOWR(KGSL_IOC_TYPE, 0x2, struct kgsl_device_getproperty)
521 
522 /* IOCTL_KGSL_DEVICE_READ (0x3) - removed 03/2012
523  */
524 
525 /* block until the GPU has executed past a given timestamp
526  * timeout is in milliseconds.
527  */
528 struct kgsl_device_waittimestamp {
529 	unsigned int timestamp;
530 	unsigned int timeout;
531 };
532 
533 #define IOCTL_KGSL_DEVICE_WAITTIMESTAMP \
534 	_IOW(KGSL_IOC_TYPE, 0x6, struct kgsl_device_waittimestamp)
535 
536 struct kgsl_device_waittimestamp_ctxtid {
537 	unsigned int context_id;
538 	unsigned int timestamp;
539 	unsigned int timeout;
540 };
541 
542 #define IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID \
543 	_IOW(KGSL_IOC_TYPE, 0x7, struct kgsl_device_waittimestamp_ctxtid)
544 
545 /* DEPRECATED: issue indirect commands to the GPU.
546  * drawctxt_id must have been created with IOCTL_KGSL_DRAWCTXT_CREATE
547  * ibaddr and sizedwords must specify a subset of a buffer created
548  * with IOCTL_KGSL_SHAREDMEM_FROM_PMEM
549  * flags may be a mask of KGSL_CONTEXT_ values
550  * timestamp is a returned counter value which can be passed to
551  * other ioctls to determine when the commands have been executed by
552  * the GPU.
553  *
554  * This function is deprecated - consider using IOCTL_KGSL_SUBMIT_COMMANDS
555  * instead
556  */
557 struct kgsl_ringbuffer_issueibcmds {
558 	unsigned int drawctxt_id;
559 	unsigned long ibdesc_addr;
560 	unsigned int numibs;
561 	unsigned int timestamp; /*output param */
562 	unsigned int flags;
563 };
564 
565 #define IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS \
566 	_IOWR(KGSL_IOC_TYPE, 0x10, struct kgsl_ringbuffer_issueibcmds)
567 
568 /* read the most recently executed timestamp value
569  * type should be a value from enum kgsl_timestamp_type
570  */
571 struct kgsl_cmdstream_readtimestamp {
572 	unsigned int type;
573 	unsigned int timestamp; /*output param */
574 };
575 
576 #define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_OLD \
577 	_IOR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
578 
579 #define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP \
580 	_IOWR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
581 
582 /* free memory when the GPU reaches a given timestamp.
583  * gpuaddr specify a memory region created by a
584  * IOCTL_KGSL_SHAREDMEM_FROM_PMEM call
585  * type should be a value from enum kgsl_timestamp_type
586  */
587 struct kgsl_cmdstream_freememontimestamp {
588 	unsigned long gpuaddr;
589 	unsigned int type;
590 	unsigned int timestamp;
591 };
592 
593 #define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP \
594 	_IOW(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
595 
596 /*
597  * Previous versions of this header had incorrectly defined
598  * IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP as a read-only ioctl instead
599  * of a write only ioctl.  To ensure binary compatibility, the following
600  * #define will be used to intercept the incorrect ioctl
601  */
602 
603 #define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_OLD \
604 	_IOR(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
605 
606 /* create a draw context, which is used to preserve GPU state.
607  * The flags field may contain a mask KGSL_CONTEXT_*  values
608  */
609 struct kgsl_drawctxt_create {
610 	unsigned int flags;
611 	unsigned int drawctxt_id; /*output param */
612 };
613 
614 #define IOCTL_KGSL_DRAWCTXT_CREATE \
615 	_IOWR(KGSL_IOC_TYPE, 0x13, struct kgsl_drawctxt_create)
616 
617 /* destroy a draw context */
618 struct kgsl_drawctxt_destroy {
619 	unsigned int drawctxt_id;
620 };
621 
622 #define IOCTL_KGSL_DRAWCTXT_DESTROY \
623 	_IOW(KGSL_IOC_TYPE, 0x14, struct kgsl_drawctxt_destroy)
624 
625 /*
626  * add a block of pmem, fb, ashmem or user allocated address
627  * into the GPU address space
628  */
629 struct kgsl_map_user_mem {
630 	int fd;
631 	unsigned long gpuaddr;   /*output param */
632 	size_t len;
633 	size_t offset;
634 	unsigned long hostptr;   /*input param */
635 	enum kgsl_user_mem_type memtype;
636 	unsigned int flags;
637 };
638 
639 #define IOCTL_KGSL_MAP_USER_MEM \
640 	_IOWR(KGSL_IOC_TYPE, 0x15, struct kgsl_map_user_mem)
641 
642 struct kgsl_cmdstream_readtimestamp_ctxtid {
643 	unsigned int context_id;
644 	unsigned int type;
645 	unsigned int timestamp; /*output param */
646 };
647 
648 #define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_CTXTID \
649 	_IOWR(KGSL_IOC_TYPE, 0x16, struct kgsl_cmdstream_readtimestamp_ctxtid)
650 
651 struct kgsl_cmdstream_freememontimestamp_ctxtid {
652 	unsigned int context_id;
653 	unsigned long gpuaddr;
654 	unsigned int type;
655 	unsigned int timestamp;
656 };
657 
658 #define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_CTXTID \
659 	_IOW(KGSL_IOC_TYPE, 0x17, \
660 	struct kgsl_cmdstream_freememontimestamp_ctxtid)
661 
662 /* add a block of pmem or fb into the GPU address space */
663 struct kgsl_sharedmem_from_pmem {
664 	int pmem_fd;
665 	unsigned long gpuaddr;  /*output param */
666 	unsigned int len;
667 	unsigned int offset;
668 };
669 
670 #define IOCTL_KGSL_SHAREDMEM_FROM_PMEM \
671 	_IOWR(KGSL_IOC_TYPE, 0x20, struct kgsl_sharedmem_from_pmem)
672 
673 /* remove memory from the GPU's address space */
674 struct kgsl_sharedmem_free {
675 	unsigned long gpuaddr;
676 };
677 
678 #define IOCTL_KGSL_SHAREDMEM_FREE \
679 	_IOW(KGSL_IOC_TYPE, 0x21, struct kgsl_sharedmem_free)
680 
681 struct kgsl_cff_user_event {
682 	unsigned char cff_opcode;
683 	unsigned int op1;
684 	unsigned int op2;
685 	unsigned int op3;
686 	unsigned int op4;
687 	unsigned int op5;
688 	unsigned int __pad[2];
689 };
690 
691 #define IOCTL_KGSL_CFF_USER_EVENT \
692 	_IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_cff_user_event)
693 
694 struct kgsl_gmem_desc {
695 	unsigned int x;
696 	unsigned int y;
697 	unsigned int width;
698 	unsigned int height;
699 	unsigned int pitch;
700 };
701 
702 struct kgsl_buffer_desc {
703 	void		*hostptr;
704 	unsigned long	gpuaddr;
705 	int		size;
706 	unsigned int	format;
707 	unsigned int	pitch;
708 	unsigned int	enabled;
709 };
710 
711 struct kgsl_bind_gmem_shadow {
712 	unsigned int drawctxt_id;
713 	struct kgsl_gmem_desc gmem_desc;
714 	unsigned int shadow_x;
715 	unsigned int shadow_y;
716 	struct kgsl_buffer_desc shadow_buffer;
717 	unsigned int buffer_id;
718 };
719 
720 #define IOCTL_KGSL_DRAWCTXT_BIND_GMEM_SHADOW \
721 	_IOW(KGSL_IOC_TYPE, 0x22, struct kgsl_bind_gmem_shadow)
722 
723 /* add a block of memory into the GPU address space */
724 
725 /*
726  * IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC deprecated 09/2012
727  * use IOCTL_KGSL_GPUMEM_ALLOC instead
728  */
729 
730 struct kgsl_sharedmem_from_vmalloc {
731 	unsigned long gpuaddr;	/*output param */
732 	unsigned int hostptr;
733 	unsigned int flags;
734 };
735 
736 #define IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC \
737 	_IOWR(KGSL_IOC_TYPE, 0x23, struct kgsl_sharedmem_from_vmalloc)
738 
739 /*
740  * This is being deprecated in favor of IOCTL_KGSL_GPUMEM_CACHE_SYNC which
741  * supports both directions (flush and invalidate). This code will still
742  * work, but by definition it will do a flush of the cache which might not be
743  * what you want to have happen on a buffer following a GPU operation.  It is
744  * safer to go with IOCTL_KGSL_GPUMEM_CACHE_SYNC
745  */
746 
747 #define IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE \
748 	_IOW(KGSL_IOC_TYPE, 0x24, struct kgsl_sharedmem_free)
749 
750 struct kgsl_drawctxt_set_bin_base_offset {
751 	unsigned int drawctxt_id;
752 	unsigned int offset;
753 };
754 
755 #define IOCTL_KGSL_DRAWCTXT_SET_BIN_BASE_OFFSET \
756 	_IOW(KGSL_IOC_TYPE, 0x25, struct kgsl_drawctxt_set_bin_base_offset)
757 
758 enum kgsl_cmdwindow_type {
759 	KGSL_CMDWINDOW_MIN     = 0x00000000,
760 	KGSL_CMDWINDOW_2D      = 0x00000000,
761 	KGSL_CMDWINDOW_3D      = 0x00000001, /* legacy */
762 	KGSL_CMDWINDOW_MMU     = 0x00000002,
763 	KGSL_CMDWINDOW_ARBITER = 0x000000FF,
764 	KGSL_CMDWINDOW_MAX     = 0x000000FF,
765 };
766 
767 /* write to the command window */
768 struct kgsl_cmdwindow_write {
769 	enum kgsl_cmdwindow_type target;
770 	unsigned int addr;
771 	unsigned int data;
772 };
773 
774 #define IOCTL_KGSL_CMDWINDOW_WRITE \
775 	_IOW(KGSL_IOC_TYPE, 0x2e, struct kgsl_cmdwindow_write)
776 
777 struct kgsl_gpumem_alloc {
778 	unsigned long gpuaddr; /* output param */
779 	size_t size;
780 	unsigned int flags;
781 };
782 
783 #define IOCTL_KGSL_GPUMEM_ALLOC \
784 	_IOWR(KGSL_IOC_TYPE, 0x2f, struct kgsl_gpumem_alloc)
785 
786 struct kgsl_cff_syncmem {
787 	unsigned long gpuaddr;
788 	size_t len;
789 	unsigned int __pad[2]; /* For future binary compatibility */
790 };
791 
792 #define IOCTL_KGSL_CFF_SYNCMEM \
793 	_IOW(KGSL_IOC_TYPE, 0x30, struct kgsl_cff_syncmem)
794 
795 /*
796  * A timestamp event allows the user space to register an action following an
797  * expired timestamp. Note IOCTL_KGSL_TIMESTAMP_EVENT has been redefined to
798  * _IOWR to support fences which need to return a fd for the priv parameter.
799  */
800 
801 struct kgsl_timestamp_event {
802 	int type;                /* Type of event (see list below) */
803 	unsigned int timestamp;  /* Timestamp to trigger event on */
804 	unsigned int context_id; /* Context for the timestamp */
805 	void *priv;	 /* Pointer to the event specific blob */
806 	size_t len;              /* Size of the event specific blob */
807 };
808 
809 #define IOCTL_KGSL_TIMESTAMP_EVENT_OLD \
810 	_IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_timestamp_event)
811 
812 /* A genlock timestamp event releases an existing lock on timestamp expire */
813 
814 #define KGSL_TIMESTAMP_EVENT_GENLOCK 1
815 
816 struct kgsl_timestamp_event_genlock {
817 	int handle; /* Handle of the genlock lock to release */
818 };
819 
820 /* A fence timestamp event releases an existing lock on timestamp expire */
821 
822 #define KGSL_TIMESTAMP_EVENT_FENCE 2
823 
824 struct kgsl_timestamp_event_fence {
825 	int fence_fd; /* Fence to signal */
826 };
827 
828 /*
829  * Set a property within the kernel.  Uses the same structure as
830  * IOCTL_KGSL_GETPROPERTY
831  */
832 
833 #define IOCTL_KGSL_SETPROPERTY \
834 	_IOW(KGSL_IOC_TYPE, 0x32, struct kgsl_device_getproperty)
835 
836 #define IOCTL_KGSL_TIMESTAMP_EVENT \
837 	_IOWR(KGSL_IOC_TYPE, 0x33, struct kgsl_timestamp_event)
838 
839 /**
840  * struct kgsl_gpumem_alloc_id - argument to IOCTL_KGSL_GPUMEM_ALLOC_ID
841  * @id: returned id value for this allocation.
842  * @flags: mask of KGSL_MEM* values requested and actual flags on return.
843  * @size: requested size of the allocation and actual size on return.
844  * @mmapsize: returned size to pass to mmap() which may be larger than 'size'
845  * @gpuaddr: returned GPU address for the allocation
846  *
847  * Allocate memory for access by the GPU. The flags and size fields are echoed
848  * back by the kernel, so that the caller can know if the request was
849  * adjusted.
850  *
851  * Supported flags:
852  * KGSL_MEMFLAGS_GPUREADONLY: the GPU will be unable to write to the buffer
853  * KGSL_MEMTYPE*: usage hint for debugging aid
854  * KGSL_MEMALIGN*: alignment hint, may be ignored or adjusted by the kernel.
855  * KGSL_MEMFLAGS_USE_CPU_MAP: If set on call and return, the returned GPU
856  * address will be 0. Calling mmap() will set the GPU address.
857  */
858 struct kgsl_gpumem_alloc_id {
859 	unsigned int id;
860 	unsigned int flags;
861 	size_t size;
862 	size_t mmapsize;
863 	unsigned long gpuaddr;
864 /* private: reserved for future use*/
865 	unsigned long __pad[2];
866 };
867 
868 #define IOCTL_KGSL_GPUMEM_ALLOC_ID \
869 	_IOWR(KGSL_IOC_TYPE, 0x34, struct kgsl_gpumem_alloc_id)
870 
871 /**
872  * struct kgsl_gpumem_free_id - argument to IOCTL_KGSL_GPUMEM_FREE_ID
873  * @id: GPU allocation id to free
874  *
875  * Free an allocation by id, in case a GPU address has not been assigned or
876  * is unknown. Freeing an allocation by id with this ioctl or by GPU address
877  * with IOCTL_KGSL_SHAREDMEM_FREE are equivalent.
878  */
879 struct kgsl_gpumem_free_id {
880 	unsigned int id;
881 /* private: reserved for future use*/
882 	unsigned int __pad;
883 };
884 
885 #define IOCTL_KGSL_GPUMEM_FREE_ID \
886 	_IOWR(KGSL_IOC_TYPE, 0x35, struct kgsl_gpumem_free_id)
887 
888 /**
889  * struct kgsl_gpumem_get_info - argument to IOCTL_KGSL_GPUMEM_GET_INFO
890  * @gpuaddr: GPU address to query. Also set on return.
891  * @id: GPU allocation id to query. Also set on return.
892  * @flags: returned mask of KGSL_MEM* values.
893  * @size: returned size of the allocation.
894  * @mmapsize: returned size to pass mmap(), which may be larger than 'size'
895  * @useraddr: returned address of the userspace mapping for this buffer
896  *
897  * This ioctl allows querying of all user visible attributes of an existing
898  * allocation, by either the GPU address or the id returned by a previous
899  * call to IOCTL_KGSL_GPUMEM_ALLOC_ID. Legacy allocation ioctls may not
900  * return all attributes so this ioctl can be used to look them up if needed.
901  *
902  */
903 struct kgsl_gpumem_get_info {
904 	unsigned long gpuaddr;
905 	unsigned int id;
906 	unsigned int flags;
907 	size_t size;
908 	size_t mmapsize;
909 	unsigned long useraddr;
910 /* private: reserved for future use*/
911 	unsigned long __pad[4];
912 };
913 
914 #define IOCTL_KGSL_GPUMEM_GET_INFO\
915 	_IOWR(KGSL_IOC_TYPE, 0x36, struct kgsl_gpumem_get_info)
916 
917 /**
918  * struct kgsl_gpumem_sync_cache - argument to IOCTL_KGSL_GPUMEM_SYNC_CACHE
919  * @gpuaddr: GPU address of the buffer to sync.
920  * @id: id of the buffer to sync. Either gpuaddr or id is sufficient.
921  * @op: a mask of KGSL_GPUMEM_CACHE_* values
922  * @offset: offset into the buffer
923  * @length: number of bytes starting from offset to perform
924  * the cache operation on
925  *
926  * Sync the L2 cache for memory headed to and from the GPU - this replaces
927  * KGSL_SHAREDMEM_FLUSH_CACHE since it can handle cache management for both
928  * directions
929  *
930  */
931 struct kgsl_gpumem_sync_cache {
932 	unsigned long gpuaddr;
933 	unsigned int id;
934 	unsigned int op;
935 	size_t offset;
936 	size_t length;
937 };
938 
939 #define KGSL_GPUMEM_CACHE_CLEAN (1 << 0)
940 #define KGSL_GPUMEM_CACHE_TO_GPU KGSL_GPUMEM_CACHE_CLEAN
941 
942 #define KGSL_GPUMEM_CACHE_INV (1 << 1)
943 #define KGSL_GPUMEM_CACHE_FROM_GPU KGSL_GPUMEM_CACHE_INV
944 
945 #define KGSL_GPUMEM_CACHE_FLUSH \
946 	(KGSL_GPUMEM_CACHE_CLEAN | KGSL_GPUMEM_CACHE_INV)
947 
948 /* Flag to ensure backwards compatibility of kgsl_gpumem_sync_cache struct */
949 #define KGSL_GPUMEM_CACHE_RANGE (1 << 31U)
950 
951 #define IOCTL_KGSL_GPUMEM_SYNC_CACHE \
952 	_IOW(KGSL_IOC_TYPE, 0x37, struct kgsl_gpumem_sync_cache)
953 
954 /**
955  * struct kgsl_perfcounter_get - argument to IOCTL_KGSL_PERFCOUNTER_GET
956  * @groupid: Performance counter group ID
957  * @countable: Countable to select within the group
958  * @offset: Return offset of the reserved LO counter
959  * @offset_hi: Return offset of the reserved HI counter
960  *
961  * Get an available performance counter from a specified groupid.  The offset
962  * of the performance counter will be returned after successfully assigning
963  * the countable to the counter for the specified group.  An error will be
964  * returned and an offset of 0 if the groupid is invalid or there are no
965  * more counters left.  After successfully getting a perfcounter, the user
966  * must call kgsl_perfcounter_put(groupid, contable) when finished with
967  * the perfcounter to clear up perfcounter resources.
968  *
969  */
970 struct kgsl_perfcounter_get {
971 	unsigned int groupid;
972 	unsigned int countable;
973 	unsigned int offset;
974 	unsigned int offset_hi;
975 /* private: reserved for future use */
976 	unsigned int __pad; /* For future binary compatibility */
977 };
978 
979 #define IOCTL_KGSL_PERFCOUNTER_GET \
980 	_IOWR(KGSL_IOC_TYPE, 0x38, struct kgsl_perfcounter_get)
981 
982 /**
983  * struct kgsl_perfcounter_put - argument to IOCTL_KGSL_PERFCOUNTER_PUT
984  * @groupid: Performance counter group ID
985  * @countable: Countable to release within the group
986  *
987  * Put an allocated performance counter to allow others to have access to the
988  * resource that was previously taken.  This is only to be called after
989  * successfully getting a performance counter from kgsl_perfcounter_get().
990  *
991  */
992 struct kgsl_perfcounter_put {
993 	unsigned int groupid;
994 	unsigned int countable;
995 /* private: reserved for future use */
996 	unsigned int __pad[2]; /* For future binary compatibility */
997 };
998 
999 #define IOCTL_KGSL_PERFCOUNTER_PUT \
1000 	_IOW(KGSL_IOC_TYPE, 0x39, struct kgsl_perfcounter_put)
1001 
1002 /**
1003  * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY
1004  * @groupid: Performance counter group ID
1005  * @countable: Return active countables array
1006  * @size: Size of active countables array
1007  * @max_counters: Return total number counters for the group ID
1008  *
1009  * Query the available performance counters given a groupid.  The array
1010  * *countables is used to return the current active countables in counters.
1011  * The size of the array is passed in so the kernel will only write at most
1012  * size or counter->size for the group id.  The total number of available
1013  * counters for the group ID is returned in max_counters.
1014  * If the array or size passed in are invalid, then only the maximum number
1015  * of counters will be returned, no data will be written to *countables.
1016  * If the groupid is invalid an error code will be returned.
1017  *
1018  */
1019 struct kgsl_perfcounter_query {
1020 	unsigned int groupid;
1021 	/* Array to return the current countable for up to size counters */
1022 	unsigned int *countables;
1023 	unsigned int count;
1024 	unsigned int max_counters;
1025 /* private: reserved for future use */
1026 	unsigned int __pad[2]; /* For future binary compatibility */
1027 };
1028 
1029 #define IOCTL_KGSL_PERFCOUNTER_QUERY \
1030 	_IOWR(KGSL_IOC_TYPE, 0x3A, struct kgsl_perfcounter_query)
1031 
1032 /**
1033  * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY
1034  * @groupid: Performance counter group IDs
1035  * @countable: Performance counter countable IDs
1036  * @value: Return performance counter reads
1037  * @size: Size of all arrays (groupid/countable pair and return value)
1038  *
1039  * Read in the current value of a performance counter given by the groupid
1040  * and countable.
1041  *
1042  */
1043 
1044 struct kgsl_perfcounter_read_group {
1045 	unsigned int groupid;
1046 	unsigned int countable;
1047 	unsigned long long value;
1048 };
1049 
1050 struct kgsl_perfcounter_read {
1051 	struct kgsl_perfcounter_read_group *reads;
1052 	unsigned int count;
1053 /* private: reserved for future use */
1054 	unsigned int __pad[2]; /* For future binary compatibility */
1055 };
1056 
1057 #define IOCTL_KGSL_PERFCOUNTER_READ \
1058 	_IOWR(KGSL_IOC_TYPE, 0x3B, struct kgsl_perfcounter_read)
1059 /*
1060  * struct kgsl_gpumem_sync_cache_bulk - argument to
1061  * IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK
1062  * @id_list: list of GPU buffer ids of the buffers to sync
1063  * @count: number of GPU buffer ids in id_list
1064  * @op: a mask of KGSL_GPUMEM_CACHE_* values
1065  *
1066  * Sync the cache for memory headed to and from the GPU. Certain
1067  * optimizations can be made on the cache operation based on the total
1068  * size of the working set of memory to be managed.
1069  */
1070 struct kgsl_gpumem_sync_cache_bulk {
1071 	unsigned int *id_list;
1072 	unsigned int count;
1073 	unsigned int op;
1074 /* private: reserved for future use */
1075 	unsigned int __pad[2]; /* For future binary compatibility */
1076 };
1077 
1078 #define IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK \
1079 	_IOWR(KGSL_IOC_TYPE, 0x3C, struct kgsl_gpumem_sync_cache_bulk)
1080 
1081 /*
1082  * struct kgsl_cmd_syncpoint_timestamp
1083  * @context_id: ID of a KGSL context
1084  * @timestamp: GPU timestamp
1085  *
1086  * This structure defines a syncpoint comprising a context/timestamp pair. A
1087  * list of these may be passed by IOCTL_KGSL_SUBMIT_COMMANDS to define
1088  * dependencies that must be met before the command can be submitted to the
1089  * hardware
1090  */
1091 struct kgsl_cmd_syncpoint_timestamp {
1092 	unsigned int context_id;
1093 	unsigned int timestamp;
1094 };
1095 
1096 struct kgsl_cmd_syncpoint_fence {
1097 	int fd;
1098 };
1099 
1100 /**
1101  * struct kgsl_cmd_syncpoint - Define a sync point for a command batch
1102  * @type: type of sync point defined here
1103  * @priv: Pointer to the type specific buffer
1104  * @size: Size of the type specific buffer
1105  *
1106  * This structure contains pointers defining a specific command sync point.
1107  * The pointer and size should point to a type appropriate structure.
1108  */
1109 struct kgsl_cmd_syncpoint {
1110 	int type;
1111 	void *priv;
1112 	size_t size;
1113 };
1114 
1115 /* Flag to indicate that the cmdlist may contain memlists */
1116 #define KGSL_IBDESC_MEMLIST 0x1
1117 
1118 /* Flag to point out the cmdbatch profiling buffer in the memlist */
1119 #define KGSL_IBDESC_PROFILING_BUFFER 0x2
1120 
1121 /**
1122  * struct kgsl_submit_commands - Argument to IOCTL_KGSL_SUBMIT_COMMANDS
1123  * @context_id: KGSL context ID that owns the commands
1124  * @flags:
1125  * @cmdlist: User pointer to a list of kgsl_ibdesc structures
1126  * @numcmds: Number of commands listed in cmdlist
1127  * @synclist: User pointer to a list of kgsl_cmd_syncpoint structures
1128  * @numsyncs: Number of sync points listed in synclist
1129  * @timestamp: On entry the a user defined timestamp, on exist the timestamp
1130  * assigned to the command batch
1131  *
1132  * This structure specifies a command to send to the GPU hardware.  This is
1133  * similar to kgsl_issueibcmds expect that it doesn't support the legacy way to
1134  * submit IB lists and it adds sync points to block the IB until the
1135  * dependencies are satisified.  This entry point is the new and preferred way
1136  * to submit commands to the GPU. The memory list can be used to specify all
1137  * memory that is referrenced in the current set of commands.
1138  */
1139 
1140 struct kgsl_submit_commands {
1141 	unsigned int context_id;
1142 	unsigned int flags;
1143 	struct kgsl_ibdesc *cmdlist;
1144 	unsigned int numcmds;
1145 	struct kgsl_cmd_syncpoint *synclist;
1146 	unsigned int numsyncs;
1147 	unsigned int timestamp;
1148 /* private: reserved for future use */
1149 	unsigned int __pad[4];
1150 };
1151 
1152 #define IOCTL_KGSL_SUBMIT_COMMANDS \
1153 	_IOWR(KGSL_IOC_TYPE, 0x3D, struct kgsl_submit_commands)
1154 
1155 /**
1156  * struct kgsl_device_constraint - device constraint argument
1157  * @context_id: KGSL context ID
1158  * @type: type of constraint i.e pwrlevel/none
1159  * @data: constraint data
1160  * @size: size of the constraint data
1161  */
1162 struct kgsl_device_constraint {
1163 	unsigned int type;
1164 	unsigned int context_id;
1165 	void *data;
1166 	size_t size;
1167 };
1168 
1169 /* Constraint Type*/
1170 #define KGSL_CONSTRAINT_NONE 0
1171 #define KGSL_CONSTRAINT_PWRLEVEL 1
1172 
1173 /* L3 constraint Type */
1174 #define KGSL_CONSTRAINT_L3_NONE	2
1175 #define KGSL_CONSTRAINT_L3_PWRLEVEL	3
1176 
1177 /* PWRLEVEL constraint level*/
1178 /* set to min frequency */
1179 #define KGSL_CONSTRAINT_PWR_MIN    0
1180 /* set to max frequency */
1181 #define KGSL_CONSTRAINT_PWR_MAX    1
1182 
1183 struct kgsl_device_constraint_pwrlevel {
1184 	unsigned int level;
1185 };
1186 
1187 /**
1188  * struct kgsl_syncsource_create - Argument to IOCTL_KGSL_SYNCSOURCE_CREATE
1189  * @id: returned id for the syncsource that was created.
1190  *
1191  * This ioctl creates a userspace sync timeline.
1192  */
1193 
1194 struct kgsl_syncsource_create {
1195 	unsigned int id;
1196 /* private: reserved for future use */
1197 	unsigned int __pad[3];
1198 };
1199 
1200 #define IOCTL_KGSL_SYNCSOURCE_CREATE \
1201 	_IOWR(KGSL_IOC_TYPE, 0x40, struct kgsl_syncsource_create)
1202 
1203 /**
1204  * struct kgsl_syncsource_destroy - Argument to IOCTL_KGSL_SYNCSOURCE_DESTROY
1205  * @id: syncsource id to destroy
1206  *
1207  * This ioctl creates a userspace sync timeline.
1208  */
1209 
1210 struct kgsl_syncsource_destroy {
1211 	unsigned int id;
1212 /* private: reserved for future use */
1213 	unsigned int __pad[3];
1214 };
1215 
1216 #define IOCTL_KGSL_SYNCSOURCE_DESTROY \
1217 	_IOWR(KGSL_IOC_TYPE, 0x41, struct kgsl_syncsource_destroy)
1218 
1219 /**
1220  * struct kgsl_syncsource_create_fence - Argument to
1221  *     IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE
1222  * @id: syncsource id
1223  * @fence_fd: returned sync_fence fd
1224  *
1225  * Create a fence that may be signaled by userspace by calling
1226  * IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE. There are no order dependencies between
1227  * these fences.
1228  */
1229 struct kgsl_syncsource_create_fence {
1230 	unsigned int id;
1231 	int fence_fd;
1232 /* private: reserved for future use */
1233 	unsigned int __pad[4];
1234 };
1235 
1236 /**
1237  * struct kgsl_syncsource_signal_fence - Argument to
1238  *     IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE
1239  * @id: syncsource id
1240  * @fence_fd: sync_fence fd to signal
1241  *
1242  * Signal a fence that was created by a IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE
1243  * call using the same syncsource id. This allows a fence to be shared
1244  * to other processes but only signaled by the process owning the fd
1245  * used to create the fence.
1246  */
1247 #define IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE \
1248 	_IOWR(KGSL_IOC_TYPE, 0x42, struct kgsl_syncsource_create_fence)
1249 
1250 struct kgsl_syncsource_signal_fence {
1251 	unsigned int id;
1252 	int fence_fd;
1253 /* private: reserved for future use */
1254 	unsigned int __pad[4];
1255 };
1256 
1257 #define IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE \
1258 	_IOWR(KGSL_IOC_TYPE, 0x43, struct kgsl_syncsource_signal_fence)
1259 
1260 /**
1261  * struct kgsl_cff_sync_gpuobj - Argument to IOCTL_KGSL_CFF_SYNC_GPUOBJ
1262  * @offset: Offset into the GPU object to sync
1263  * @length: Number of bytes to sync
1264  * @id: ID of the GPU object to sync
1265  */
1266 struct kgsl_cff_sync_gpuobj {
1267 	uint64_t offset;
1268 	uint64_t length;
1269 	unsigned int id;
1270 };
1271 
1272 #define IOCTL_KGSL_CFF_SYNC_GPUOBJ \
1273 	_IOW(KGSL_IOC_TYPE, 0x44, struct kgsl_cff_sync_gpuobj)
1274 
1275 /**
1276  * struct kgsl_gpuobj_alloc - Argument to IOCTL_KGSL_GPUOBJ_ALLOC
1277  * @size: Size in bytes of the object to allocate
1278  * @flags: mask of KGSL_MEMFLAG_* bits
1279  * @va_len: Size in bytes of the virtual region to allocate
1280  * @mmapsize: Returns the mmap() size of the object
1281  * @id: Returns the GPU object ID of the new object
1282  * @metadata_len: Length of the metdata to copy from the user
1283  * @metadata: Pointer to the user specified metadata to store for the object
1284  */
1285 struct kgsl_gpuobj_alloc {
1286 	uint64_t size;
1287 	uint64_t flags;
1288 	uint64_t va_len;
1289 	uint64_t mmapsize;
1290 	unsigned int id;
1291 	unsigned int metadata_len;
1292 	uint64_t metadata;
1293 };
1294 
1295 /* Let the user know that this header supports the gpuobj metadata */
1296 #define KGSL_GPUOBJ_ALLOC_METADATA_MAX 64
1297 
1298 #define IOCTL_KGSL_GPUOBJ_ALLOC \
1299 	_IOWR(KGSL_IOC_TYPE, 0x45, struct kgsl_gpuobj_alloc)
1300 
1301 /**
1302  * struct kgsl_gpuobj_free - Argument to IOCTL_KGLS_GPUOBJ_FREE
1303  * @flags: Mask of: KGSL_GUPOBJ_FREE_ON_EVENT
1304  * @priv: Pointer to the private object if KGSL_GPUOBJ_FREE_ON_EVENT is
1305  * specified
1306  * @id: ID of the GPU object to free
1307  * @type: If KGSL_GPUOBJ_FREE_ON_EVENT is specified, the type of asynchronous
1308  * event to free on
1309  * @len: Length of the data passed in priv
1310  */
1311 struct kgsl_gpuobj_free {
1312 	uint64_t flags;
1313 	uint64_t priv;
1314 	unsigned int id;
1315 	unsigned int type;
1316 	unsigned int len;
1317 };
1318 
1319 #define KGSL_GPUOBJ_FREE_ON_EVENT 1
1320 
1321 #define KGSL_GPU_EVENT_TIMESTAMP 1
1322 #define KGSL_GPU_EVENT_FENCE     2
1323 
1324 /**
1325  * struct kgsl_gpu_event_timestamp - Specifies a timestamp event to free a GPU
1326  * object on
1327  * @context_id: ID of the timestamp event to wait for
1328  * @timestamp: Timestamp of the timestamp event to wait for
1329  */
1330 struct kgsl_gpu_event_timestamp {
1331 	unsigned int context_id;
1332 	unsigned int timestamp;
1333 };
1334 
1335 /**
1336  * struct kgsl_gpu_event_fence - Specifies a fence ID to to free a GPU object on
1337  * @fd: File descriptor for the fence
1338  */
1339 struct kgsl_gpu_event_fence {
1340 	int fd;
1341 };
1342 
1343 #define IOCTL_KGSL_GPUOBJ_FREE \
1344 	_IOW(KGSL_IOC_TYPE, 0x46, struct kgsl_gpuobj_free)
1345 
1346 /**
1347  * struct kgsl_gpuobj_info - argument to IOCTL_KGSL_GPUOBJ_INFO
1348  * @gpuaddr: GPU address of the object
1349  * @flags: Current flags for the object
1350  * @size: Size of the object
1351  * @va_len: VA size of the object
1352  * @va_addr: Virtual address of the object (if it is mapped)
1353  * id - GPU object ID of the object to query
1354  */
1355 struct kgsl_gpuobj_info {
1356 	uint64_t gpuaddr;
1357 	uint64_t flags;
1358 	uint64_t size;
1359 	uint64_t va_len;
1360 	uint64_t va_addr;
1361 	unsigned int id;
1362 };
1363 
1364 #define IOCTL_KGSL_GPUOBJ_INFO \
1365 	_IOWR(KGSL_IOC_TYPE, 0x47, struct kgsl_gpuobj_info)
1366 
1367 /**
1368  * struct kgsl_gpuobj_import - argument to IOCTL_KGSL_GPUOBJ_IMPORT
1369  * @priv: Pointer to the private data for the import type
1370  * @priv_len: Length of the private data
1371  * @flags: Mask of KGSL_MEMFLAG_ flags
1372  * @type: Type of the import (KGSL_USER_MEM_TYPE_*)
1373  * @id: Returns the ID of the new GPU object
1374  */
1375 struct kgsl_gpuobj_import {
1376 	uint64_t priv;
1377 	uint64_t priv_len;
1378 	uint64_t flags;
1379 	unsigned int type;
1380 	unsigned int id;
1381 };
1382 
1383 /**
1384  * struct kgsl_gpuobj_import_dma_buf - import a dmabuf object
1385  * @fd: File descriptor for the dma-buf object
1386  */
1387 struct kgsl_gpuobj_import_dma_buf {
1388 	int fd;
1389 };
1390 
1391 /**
1392  * struct kgsl_gpuobj_import_useraddr - import an object based on a useraddr
1393  * @virtaddr: Virtual address of the object to import
1394  */
1395 struct kgsl_gpuobj_import_useraddr {
1396 	uint64_t virtaddr;
1397 };
1398 
1399 #define IOCTL_KGSL_GPUOBJ_IMPORT \
1400 	_IOWR(KGSL_IOC_TYPE, 0x48, struct kgsl_gpuobj_import)
1401 
1402 /**
1403  * struct kgsl_gpuobj_sync_obj - Individual GPU object to sync
1404  * @offset: Offset within the GPU object to sync
1405  * @length: Number of bytes to sync
1406  * @id: ID of the GPU object to sync
1407  * @op: Cache operation to execute
1408  */
1409 
1410 struct kgsl_gpuobj_sync_obj {
1411 	uint64_t offset;
1412 	uint64_t length;
1413 	unsigned int id;
1414 	unsigned int op;
1415 };
1416 
1417 /**
1418  * struct kgsl_gpuobj_sync - Argument for IOCTL_KGSL_GPUOBJ_SYNC
1419  * @objs: Pointer to an array of kgsl_gpuobj_sync_obj structs
1420  * @obj_len: Size of each item in the array
1421  * @count: Number of items in the array
1422  */
1423 
1424 struct kgsl_gpuobj_sync {
1425 	uint64_t objs;
1426 	unsigned int obj_len;
1427 	unsigned int count;
1428 };
1429 
1430 #define IOCTL_KGSL_GPUOBJ_SYNC \
1431 	_IOW(KGSL_IOC_TYPE, 0x49, struct kgsl_gpuobj_sync)
1432 
1433 /**
1434  * struct kgsl_command_object - GPU command object
1435  * @offset: GPU address offset of the object
1436  * @gpuaddr: GPU address of the object
1437  * @size: Size of the object
1438  * @flags: Current flags for the object
1439  * @id - GPU command object ID
1440  */
1441 struct kgsl_command_object {
1442 	uint64_t offset;
1443 	uint64_t gpuaddr;
1444 	uint64_t size;
1445 	unsigned int flags;
1446 	unsigned int id;
1447 };
1448 
1449 /**
1450  * struct kgsl_command_syncpoint - GPU syncpoint object
1451  * @priv: Pointer to the type specific buffer
1452  * @size: Size of the type specific buffer
1453  * @type: type of sync point defined here
1454  */
1455 struct kgsl_command_syncpoint {
1456 	uint64_t priv;
1457 	uint64_t size;
1458 	unsigned int type;
1459 };
1460 
1461 /**
1462  * struct kgsl_command_object - Argument for IOCTL_KGSL_GPU_COMMAND
1463  * @flags: Current flags for the object
1464  * @cmdlist: List of kgsl_command_objects for submission
1465  * @cmd_size: Size of kgsl_command_objects structure
1466  * @numcmds: Number of kgsl_command_objects in command list
1467  * @objlist: List of kgsl_command_objects for tracking
1468  * @obj_size: Size of kgsl_command_objects structure
1469  * @numobjs: Number of kgsl_command_objects in object list
1470  * @synclist: List of kgsl_command_syncpoints
1471  * @sync_size: Size of kgsl_command_syncpoint structure
1472  * @numsyncs: Number of kgsl_command_syncpoints in syncpoint list
1473  * @context_id: Context ID submittin ghte kgsl_gpu_command
1474  * @timestamp: Timestamp for the submitted commands
1475  */
1476 struct kgsl_gpu_command {
1477 	uint64_t flags;
1478 	uint64_t cmdlist;
1479 	unsigned int cmdsize;
1480 	unsigned int numcmds;
1481 	uint64_t objlist;
1482 	unsigned int objsize;
1483 	unsigned int numobjs;
1484 	uint64_t synclist;
1485 	unsigned int syncsize;
1486 	unsigned int numsyncs;
1487 	unsigned int context_id;
1488 	unsigned int timestamp;
1489 };
1490 
1491 #define IOCTL_KGSL_GPU_COMMAND \
1492 	_IOWR(KGSL_IOC_TYPE, 0x4A, struct kgsl_gpu_command)
1493 
1494 /**
1495  * struct kgsl_preemption_counters_query - argument to
1496  * IOCTL_KGSL_PREEMPTIONCOUNTER_QUERY
1497  * @counters: Return preemption counters array
1498  * @size_user: Size allocated by userspace
1499  * @size_priority_level: Size of preemption counters for each
1500  * priority level
1501  * @max_priority_level: Return max number of priority levels
1502  *
1503  * Query the available preemption counters. The array counters
1504  * is used to return preemption counters. The size of the array
1505  * is passed in so the kernel will only write at most size_user
1506  * or max available preemption counters.  The total number of
1507  * preemption counters is returned in max_priority_level. If the
1508  * array or size passed in are invalid, then an error is
1509  * returned back.
1510  */
1511 struct kgsl_preemption_counters_query {
1512 	uint64_t counters;
1513 	unsigned int size_user;
1514 	unsigned int size_priority_level;
1515 	unsigned int max_priority_level;
1516 };
1517 
1518 #define IOCTL_KGSL_PREEMPTIONCOUNTER_QUERY \
1519 	_IOWR(KGSL_IOC_TYPE, 0x4B, struct kgsl_preemption_counters_query)
1520 
1521 /**
1522  * struct kgsl_gpuobj_set_info - argument for IOCTL_KGSL_GPUOBJ_SET_INFO
1523  * @flags: Flags to indicate which parameters to change
1524  * @metadata:  If KGSL_GPUOBJ_SET_INFO_METADATA is set, a pointer to the new
1525  * metadata
1526  * @id: GPU memory object ID to change
1527  * @metadata_len:  If KGSL_GPUOBJ_SET_INFO_METADATA is set, the length of the
1528  * new metadata string
1529  * @type: If KGSL_GPUOBJ_SET_INFO_TYPE is set, the new type of the memory object
1530  */
1531 
1532 #define KGSL_GPUOBJ_SET_INFO_METADATA (1 << 0)
1533 #define KGSL_GPUOBJ_SET_INFO_TYPE (1 << 1)
1534 
1535 struct kgsl_gpuobj_set_info {
1536 	uint64_t flags;
1537 	uint64_t metadata;
1538 	unsigned int id;
1539 	unsigned int metadata_len;
1540 	unsigned int type;
1541 };
1542 
1543 #define IOCTL_KGSL_GPUOBJ_SET_INFO \
1544 	_IOW(KGSL_IOC_TYPE, 0x4C, struct kgsl_gpuobj_set_info)
1545 
1546 /**
1547  * struct kgsl_sparse_phys_alloc - Argument for IOCTL_KGSL_SPARSE_PHYS_ALLOC
1548  * @size: Size in bytes to back
1549  * @pagesize: Pagesize alignment required
1550  * @flags: Flags for this allocation
1551  * @id: Returned ID for this allocation
1552  */
1553 struct kgsl_sparse_phys_alloc {
1554 	uint64_t size;
1555 	uint64_t pagesize;
1556 	uint64_t flags;
1557 	unsigned int id;
1558 };
1559 
1560 #define IOCTL_KGSL_SPARSE_PHYS_ALLOC \
1561 	_IOWR(KGSL_IOC_TYPE, 0x50, struct kgsl_sparse_phys_alloc)
1562 
1563 /**
1564  * struct kgsl_sparse_phys_free - Argument for IOCTL_KGSL_SPARSE_PHYS_FREE
1565  * @id: ID to free
1566  */
1567 struct kgsl_sparse_phys_free {
1568 	unsigned int id;
1569 };
1570 
1571 #define IOCTL_KGSL_SPARSE_PHYS_FREE \
1572 	_IOW(KGSL_IOC_TYPE, 0x51, struct kgsl_sparse_phys_free)
1573 
1574 /**
1575  * struct kgsl_sparse_virt_alloc - Argument for IOCTL_KGSL_SPARSE_VIRT_ALLOC
1576  * @size: Size in bytes to reserve
1577  * @pagesize: Pagesize alignment required
1578  * @flags: Flags for this allocation
1579  * @id: Returned ID for this allocation
1580  * @gpuaddr: Returned GPU address for this allocation
1581  */
1582 struct kgsl_sparse_virt_alloc {
1583 	uint64_t size;
1584 	uint64_t pagesize;
1585 	uint64_t flags;
1586 	uint64_t gpuaddr;
1587 	unsigned int id;
1588 };
1589 
1590 #define IOCTL_KGSL_SPARSE_VIRT_ALLOC \
1591 	_IOWR(KGSL_IOC_TYPE, 0x52, struct kgsl_sparse_virt_alloc)
1592 
1593 /**
1594  * struct kgsl_sparse_virt_free - Argument for IOCTL_KGSL_SPARSE_VIRT_FREE
1595  * @id: ID to free
1596  */
1597 struct kgsl_sparse_virt_free {
1598 	unsigned int id;
1599 };
1600 
1601 #define IOCTL_KGSL_SPARSE_VIRT_FREE \
1602 	_IOW(KGSL_IOC_TYPE, 0x53, struct kgsl_sparse_virt_free)
1603 
1604 /**
1605  * struct kgsl_sparse_binding_object - Argument for kgsl_sparse_bind
1606  * @virtoffset: Offset into the virtual ID
1607  * @physoffset: Offset into the physical ID (bind only)
1608  * @size: Size in bytes to reserve
1609  * @flags: Flags for this kgsl_sparse_binding_object
1610  * @id: Physical ID to bind (bind only)
1611  */
1612 struct kgsl_sparse_binding_object {
1613 	uint64_t virtoffset;
1614 	uint64_t physoffset;
1615 	uint64_t size;
1616 	uint64_t flags;
1617 	unsigned int id;
1618 };
1619 
1620 /**
1621  * struct kgsl_sparse_bind - Argument for IOCTL_KGSL_SPARSE_BIND
1622  * @list: List of kgsl_sparse_bind_objects to bind/unbind
1623  * @id: Virtual ID to bind/unbind
1624  * @size: Size of kgsl_sparse_bind_object
1625  * @count: Number of elements in list
1626  *
1627  */
1628 struct kgsl_sparse_bind {
1629 	uint64_t list;
1630 	unsigned int id;
1631 	unsigned int size;
1632 	unsigned int count;
1633 };
1634 
1635 #define IOCTL_KGSL_SPARSE_BIND \
1636 	_IOW(KGSL_IOC_TYPE, 0x54, struct kgsl_sparse_bind)
1637 
1638 /**
1639  * struct kgsl_gpu_sparse_command - Argument for
1640  * IOCTL_KGSL_GPU_SPARSE_COMMAND
1641  * @flags: Current flags for the object
1642  * @sparselist: List of kgsl_sparse_binding_object to bind/unbind
1643  * @synclist: List of kgsl_command_syncpoints
1644  * @sparsesize: Size of kgsl_sparse_binding_object
1645  * @numsparse: Number of elements in list
1646  * @sync_size: Size of kgsl_command_syncpoint structure
1647  * @numsyncs: Number of kgsl_command_syncpoints in syncpoint list
1648  * @context_id: Context ID submitting the kgsl_gpu_command
1649  * @timestamp: Timestamp for the submitted commands
1650  * @id: Virtual ID to bind/unbind
1651  */
1652 struct kgsl_gpu_sparse_command {
1653 	uint64_t flags;
1654 	uint64_t sparselist;
1655 	uint64_t synclist;
1656 	unsigned int sparsesize;
1657 	unsigned int numsparse;
1658 	unsigned int syncsize;
1659 	unsigned int numsyncs;
1660 	unsigned int context_id;
1661 	unsigned int timestamp;
1662 	unsigned int id;
1663 };
1664 
1665 #define IOCTL_KGSL_GPU_SPARSE_COMMAND \
1666 	_IOWR(KGSL_IOC_TYPE, 0x55, struct kgsl_gpu_sparse_command)
1667 
1668 #endif /* _UAPI_MSM_KGSL_H */
1669