• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _MSM_KGSL_H
2 #define _MSM_KGSL_H
3 
4 #include <linux/types.h>
5 #include <linux/ioctl.h>
6 
7 /*
8  * The KGSL version has proven not to be very useful in userspace if features
9  * are cherry picked into other trees out of order so it is frozen as of 3.14.
10  * It is left here for backwards compatabilty and as a reminder that
11  * software releases are never linear. Also, I like pie.
12  */
13 
14 #define KGSL_VERSION_MAJOR        3
15 #define KGSL_VERSION_MINOR        14
16 
17 /*
18  * We have traditionally mixed context and issueibcmds / command batch flags
19  * together into a big flag stew. This worked fine until we started adding a
20  * lot more command batch flags and we started running out of bits. Turns out
21  * we have a bit of room in the context type / priority mask that we could use
22  * for command batches, but that means we need to split out the flags into two
23  * coherent sets.
24  *
25  * If any future definitions are for both context and cmdbatch add both defines
26  * and link the cmdbatch to the context define as we do below. Otherwise feel
27  * free to add exclusive bits to either set.
28  */
29 
30 /* --- context flags --- */
31 #define KGSL_CONTEXT_SAVE_GMEM		0x00000001
32 #define KGSL_CONTEXT_NO_GMEM_ALLOC	0x00000002
33 /* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
34 #define KGSL_CONTEXT_SUBMIT_IB_LIST	0x00000004
35 #define KGSL_CONTEXT_CTX_SWITCH		0x00000008
36 #define KGSL_CONTEXT_PREAMBLE		0x00000010
37 #define KGSL_CONTEXT_TRASH_STATE	0x00000020
38 #define KGSL_CONTEXT_PER_CONTEXT_TS	0x00000040
39 #define KGSL_CONTEXT_USER_GENERATED_TS	0x00000080
40 /* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
41 #define KGSL_CONTEXT_END_OF_FRAME	0x00000100
42 #define KGSL_CONTEXT_NO_FAULT_TOLERANCE 0x00000200
43 /* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
44 #define KGSL_CONTEXT_SYNC               0x00000400
45 #define KGSL_CONTEXT_PWR_CONSTRAINT     0x00000800
46 #define KGSL_CONTEXT_PRIORITY_MASK      0x0000F000
47 #define KGSL_CONTEXT_PRIORITY_SHIFT     12
48 #define KGSL_CONTEXT_PRIORITY_UNDEF     0
49 
50 #define KGSL_CONTEXT_IFH_NOP            0x00010000
51 #define KGSL_CONTEXT_SECURE             0x00020000
52 #define KGSL_CONTEXT_NO_SNAPSHOT        0x00040000
53 #define KGSL_CONTEXT_SPARSE             0x00080000
54 
55 #define KGSL_CONTEXT_PREEMPT_STYLE_MASK       0x0E000000
56 #define KGSL_CONTEXT_PREEMPT_STYLE_SHIFT      25
57 #define KGSL_CONTEXT_PREEMPT_STYLE_DEFAULT    0x0
58 #define KGSL_CONTEXT_PREEMPT_STYLE_RINGBUFFER 0x1
59 #define KGSL_CONTEXT_PREEMPT_STYLE_FINEGRAIN  0x2
60 
61 #define KGSL_CONTEXT_TYPE_MASK          0x01F00000
62 #define KGSL_CONTEXT_TYPE_SHIFT         20
63 #define KGSL_CONTEXT_TYPE_ANY		0
64 #define KGSL_CONTEXT_TYPE_GL		1
65 #define KGSL_CONTEXT_TYPE_CL		2
66 #define KGSL_CONTEXT_TYPE_C2D		3
67 #define KGSL_CONTEXT_TYPE_RS		4
68 #define KGSL_CONTEXT_TYPE_UNKNOWN	0x1E
69 
70 #define KGSL_CONTEXT_INVALIDATE_ON_FAULT 0x10000000
71 
72 #define KGSL_CONTEXT_INVALID 0xffffffff
73 
74 /*
75  * --- command batch flags ---
76  * The bits that are linked to a KGSL_CONTEXT equivalent are either legacy
77  * definitions or bits that are valid for both contexts and cmdbatches.  To be
78  * safe the other 8 bits that are still available in the context field should be
79  * omitted here in case we need to share - the other bits are available for
80  * cmdbatch only flags as needed
81  */
82 #define KGSL_CMDBATCH_MEMLIST		0x00000001
83 #define KGSL_CMDBATCH_MARKER		0x00000002
84 #define KGSL_CMDBATCH_SUBMIT_IB_LIST	KGSL_CONTEXT_SUBMIT_IB_LIST /* 0x004 */
85 #define KGSL_CMDBATCH_CTX_SWITCH	KGSL_CONTEXT_CTX_SWITCH     /* 0x008 */
86 #define KGSL_CMDBATCH_PROFILING		0x00000010
87 /*
88  * KGSL_CMDBATCH_PROFILING must also be set for KGSL_CMDBATCH_PROFILING_KTIME
89  * to take effect, as the latter only affects the time data returned.
90  */
91 #define KGSL_CMDBATCH_PROFILING_KTIME	0x00000020
92 #define KGSL_CMDBATCH_END_OF_FRAME	KGSL_CONTEXT_END_OF_FRAME   /* 0x100 */
93 #define KGSL_CMDBATCH_SYNC		KGSL_CONTEXT_SYNC           /* 0x400 */
94 #define KGSL_CMDBATCH_PWR_CONSTRAINT	KGSL_CONTEXT_PWR_CONSTRAINT /* 0x800 */
95 #define KGSL_CMDBATCH_SPARSE	    0x1000 /* 0x1000 */
96 
97 /*
98  * Reserve bits [16:19] and bits [28:31] for possible bits shared between
99  * contexts and command batches.  Update this comment as new flags are added.
100  */
101 
102 /*
103  * gpu_command_object flags - these flags communicate the type of command or
104  * memory object being submitted for a GPU command
105  */
106 
107 /* Flags for GPU command objects */
108 #define KGSL_CMDLIST_IB                  0x00000001U
109 #define KGSL_CMDLIST_CTXTSWITCH_PREAMBLE 0x00000002U
110 #define KGSL_CMDLIST_IB_PREAMBLE         0x00000004U
111 
112 /* Flags for GPU command memory objects */
113 #define KGSL_OBJLIST_MEMOBJ  0x00000008U
114 #define KGSL_OBJLIST_PROFILE 0x00000010U
115 
116 /* Flags for GPU command sync points */
117 #define KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP 0
118 #define KGSL_CMD_SYNCPOINT_TYPE_FENCE 1
119 
120 /* --- Memory allocation flags --- */
121 
122 /* General allocation hints */
123 #define KGSL_MEMFLAGS_SECURE      0x00000008ULL
124 #define KGSL_MEMFLAGS_GPUREADONLY 0x01000000U
125 #define KGSL_MEMFLAGS_GPUWRITEONLY 0x02000000U
126 #define KGSL_MEMFLAGS_FORCE_32BIT 0x100000000ULL
127 
128 /* Flag for binding all the virt range to single phys data */
129 #define KGSL_SPARSE_BIND_MULTIPLE_TO_PHYS 0x400000000ULL
130 #define KGSL_SPARSE_BIND 0x1ULL
131 #define KGSL_SPARSE_UNBIND 0x2ULL
132 
133 /* Memory caching hints */
134 #define KGSL_CACHEMODE_MASK       0x0C000000U
135 #define KGSL_CACHEMODE_SHIFT 26
136 
137 #define KGSL_CACHEMODE_WRITECOMBINE 0
138 #define KGSL_CACHEMODE_UNCACHED 1
139 #define KGSL_CACHEMODE_WRITETHROUGH 2
140 #define KGSL_CACHEMODE_WRITEBACK 3
141 
142 #define KGSL_MEMFLAGS_USE_CPU_MAP 0x10000000ULL
143 #define KGSL_MEMFLAGS_SPARSE_PHYS 0x20000000ULL
144 #define KGSL_MEMFLAGS_SPARSE_VIRT 0x40000000ULL
145 #define KGSL_MEMFLAGS_IOCOHERENT  0x80000000ULL
146 
147 /* Memory types for which allocations are made */
148 #define KGSL_MEMTYPE_MASK		0x0000FF00
149 #define KGSL_MEMTYPE_SHIFT		8
150 
151 #define KGSL_MEMTYPE_OBJECTANY			0
152 #define KGSL_MEMTYPE_FRAMEBUFFER		1
153 #define KGSL_MEMTYPE_RENDERBUFFER		2
154 #define KGSL_MEMTYPE_ARRAYBUFFER		3
155 #define KGSL_MEMTYPE_ELEMENTARRAYBUFFER		4
156 #define KGSL_MEMTYPE_VERTEXARRAYBUFFER		5
157 #define KGSL_MEMTYPE_TEXTURE			6
158 #define KGSL_MEMTYPE_SURFACE			7
159 #define KGSL_MEMTYPE_EGL_SURFACE		8
160 #define KGSL_MEMTYPE_GL				9
161 #define KGSL_MEMTYPE_CL				10
162 #define KGSL_MEMTYPE_CL_BUFFER_MAP		11
163 #define KGSL_MEMTYPE_CL_BUFFER_NOMAP		12
164 #define KGSL_MEMTYPE_CL_IMAGE_MAP		13
165 #define KGSL_MEMTYPE_CL_IMAGE_NOMAP		14
166 #define KGSL_MEMTYPE_CL_KERNEL_STACK		15
167 #define KGSL_MEMTYPE_COMMAND			16
168 #define KGSL_MEMTYPE_2D				17
169 #define KGSL_MEMTYPE_EGL_IMAGE			18
170 #define KGSL_MEMTYPE_EGL_SHADOW			19
171 #define KGSL_MEMTYPE_MULTISAMPLE		20
172 #define KGSL_MEMTYPE_KERNEL			255
173 
174 /*
175  * Alignment hint, passed as the power of 2 exponent.
176  * i.e 4k (2^12) would be 12, 64k (2^16)would be 16.
177  */
178 #define KGSL_MEMALIGN_MASK		0x00FF0000
179 #define KGSL_MEMALIGN_SHIFT		16
180 
181 enum kgsl_user_mem_type {
182 	KGSL_USER_MEM_TYPE_PMEM		= 0x00000000,
183 	KGSL_USER_MEM_TYPE_ASHMEM	= 0x00000001,
184 	KGSL_USER_MEM_TYPE_ADDR		= 0x00000002,
185 	KGSL_USER_MEM_TYPE_ION		= 0x00000003,
186 	/*
187 	 * ION type is retained for backwards compatibility but Ion buffers are
188 	 * dma-bufs so try to use that naming if we can
189 	 */
190 	KGSL_USER_MEM_TYPE_DMABUF       = 0x00000003,
191 	KGSL_USER_MEM_TYPE_MAX		= 0x00000007,
192 };
193 #define KGSL_MEMFLAGS_USERMEM_MASK 0x000000e0
194 #define KGSL_MEMFLAGS_USERMEM_SHIFT 5
195 
196 /*
197  * Unfortunately, enum kgsl_user_mem_type starts at 0 which does not
198  * leave a good value for allocated memory. In the flags we use
199  * 0 to indicate allocated memory and thus need to add 1 to the enum
200  * values.
201  */
202 #define KGSL_USERMEM_FLAG(x) (((x) + 1) << KGSL_MEMFLAGS_USERMEM_SHIFT)
203 
204 #define KGSL_MEMFLAGS_NOT_USERMEM 0
205 #define KGSL_MEMFLAGS_USERMEM_PMEM KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_PMEM)
206 #define KGSL_MEMFLAGS_USERMEM_ASHMEM \
207 		KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ASHMEM)
208 #define KGSL_MEMFLAGS_USERMEM_ADDR KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ADDR)
209 #define KGSL_MEMFLAGS_USERMEM_ION KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ION)
210 
211 /* --- generic KGSL flag values --- */
212 
213 #define KGSL_FLAGS_NORMALMODE  0x00000000
214 #define KGSL_FLAGS_SAFEMODE    0x00000001
215 #define KGSL_FLAGS_INITIALIZED0 0x00000002
216 #define KGSL_FLAGS_INITIALIZED 0x00000004
217 #define KGSL_FLAGS_STARTED     0x00000008
218 #define KGSL_FLAGS_ACTIVE      0x00000010
219 #define KGSL_FLAGS_RESERVED0   0x00000020
220 #define KGSL_FLAGS_RESERVED1   0x00000040
221 #define KGSL_FLAGS_RESERVED2   0x00000080
222 #define KGSL_FLAGS_SOFT_RESET  0x00000100
223 #define KGSL_FLAGS_PER_CONTEXT_TIMESTAMPS 0x00000200
224 
225 /* Server Side Sync Timeout in milliseconds */
226 #define KGSL_SYNCOBJ_SERVER_TIMEOUT 2000
227 
228 /* UBWC Modes */
229 #define KGSL_UBWC_NONE	0
230 #define KGSL_UBWC_1_0	1
231 #define KGSL_UBWC_2_0	2
232 #define KGSL_UBWC_3_0	3
233 
234 /*
235  * Reset status values for context
236  */
237 enum kgsl_ctx_reset_stat {
238 	KGSL_CTX_STAT_NO_ERROR				= 0x00000000,
239 	KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT		= 0x00000001,
240 	KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT	= 0x00000002,
241 	KGSL_CTX_STAT_UNKNOWN_CONTEXT_RESET_EXT		= 0x00000003
242 };
243 
244 #define KGSL_CONVERT_TO_MBPS(val) \
245 	(val*1000*1000U)
246 
247 /* device id */
248 enum kgsl_deviceid {
249 	KGSL_DEVICE_3D0		= 0x00000000,
250 	KGSL_DEVICE_MAX
251 };
252 
253 struct kgsl_devinfo {
254 
255 	unsigned int device_id;
256 	/*
257 	 * chip revision id
258 	 * coreid:8 majorrev:8 minorrev:8 patch:8
259 	 */
260 	unsigned int chip_id;
261 	unsigned int mmu_enabled;
262 	unsigned long gmem_gpubaseaddr;
263 	/*
264 	 * This field contains the adreno revision
265 	 * number 200, 205, 220, etc...
266 	 */
267 	unsigned int gpu_id;
268 	size_t gmem_sizebytes;
269 };
270 
271 /*
272  * struct kgsl_devmemstore - this structure defines the region of memory
273  * that can be mmap()ed from this driver. The timestamp fields are __volatile__
274  * because they are written by the GPU
275  * @soptimestamp: Start of pipeline timestamp written by GPU before the
276  * commands in concern are processed
277  * @sbz: Unused, kept for 8 byte alignment
278  * @eoptimestamp: End of pipeline timestamp written by GPU after the
279  * commands in concern are processed
280  * @sbz2: Unused, kept for 8 byte alignment
281  * @preempted: Indicates if the context was preempted
282  * @sbz3: Unused, kept for 8 byte alignment
283  * @ref_wait_ts: Timestamp on which to generate interrupt, unused now.
284  * @sbz4: Unused, kept for 8 byte alignment
285  * @current_context: The current context the GPU is working on
286  * @sbz5: Unused, kept for 8 byte alignment
287  */
288 struct kgsl_devmemstore {
289 	__volatile__ unsigned int soptimestamp;
290 	unsigned int sbz;
291 	__volatile__ unsigned int eoptimestamp;
292 	unsigned int sbz2;
293 	__volatile__ unsigned int preempted;
294 	unsigned int sbz3;
295 	__volatile__ unsigned int ref_wait_ts;
296 	unsigned int sbz4;
297 	unsigned int current_context;
298 	unsigned int sbz5;
299 };
300 
301 #define KGSL_MEMSTORE_OFFSET(ctxt_id, field) \
302 	((ctxt_id)*sizeof(struct kgsl_devmemstore) + \
303 	 offsetof(struct kgsl_devmemstore, field))
304 
305 /* timestamp id*/
306 enum kgsl_timestamp_type {
307 	KGSL_TIMESTAMP_CONSUMED = 0x00000001, /* start-of-pipeline timestamp */
308 	KGSL_TIMESTAMP_RETIRED  = 0x00000002, /* end-of-pipeline timestamp*/
309 	KGSL_TIMESTAMP_QUEUED   = 0x00000003,
310 };
311 
312 /* property types - used with kgsl_device_getproperty */
313 #define KGSL_PROP_DEVICE_INFO		0x1
314 #define KGSL_PROP_DEVICE_SHADOW		0x2
315 #define KGSL_PROP_DEVICE_POWER		0x3
316 #define KGSL_PROP_SHMEM			0x4
317 #define KGSL_PROP_SHMEM_APERTURES	0x5
318 #define KGSL_PROP_MMU_ENABLE		0x6
319 #define KGSL_PROP_INTERRUPT_WAITS	0x7
320 #define KGSL_PROP_VERSION		0x8
321 #define KGSL_PROP_GPU_RESET_STAT	0x9
322 #define KGSL_PROP_PWRCTRL		0xE
323 #define KGSL_PROP_PWR_CONSTRAINT	0x12
324 #define KGSL_PROP_UCHE_GMEM_VADDR	0x13
325 #define KGSL_PROP_SP_GENERIC_MEM	0x14
326 #define KGSL_PROP_UCODE_VERSION		0x15
327 #define KGSL_PROP_GPMU_VERSION		0x16
328 #define KGSL_PROP_HIGHEST_BANK_BIT	0x17
329 #define KGSL_PROP_DEVICE_BITNESS	0x18
330 #define KGSL_PROP_DEVICE_QDSS_STM	0x19
331 #define KGSL_PROP_MIN_ACCESS_LENGTH	0x1A
332 #define KGSL_PROP_UBWC_MODE		0x1B
333 #define KGSL_PROP_DEVICE_QTIMER		0x20
334 #define KGSL_PROP_L3_PWR_CONSTRAINT     0x22
335 
336 struct kgsl_shadowprop {
337 	unsigned long gpuaddr;
338 	size_t size;
339 	unsigned int flags; /* contains KGSL_FLAGS_ values */
340 };
341 
342 struct kgsl_qdss_stm_prop {
343 	uint64_t gpuaddr;
344 	uint64_t size;
345 };
346 
347 struct kgsl_qtimer_prop {
348 	uint64_t gpuaddr;
349 	uint64_t size;
350 };
351 
352 struct kgsl_version {
353 	unsigned int drv_major;
354 	unsigned int drv_minor;
355 	unsigned int dev_major;
356 	unsigned int dev_minor;
357 };
358 
359 struct kgsl_sp_generic_mem {
360 	uint64_t local;
361 	uint64_t pvt;
362 };
363 
364 struct kgsl_ucode_version {
365 	unsigned int pfp;
366 	unsigned int pm4;
367 };
368 
369 struct kgsl_gpmu_version {
370 	unsigned int major;
371 	unsigned int minor;
372 	unsigned int features;
373 };
374 
375 /* Performance counter groups */
376 
377 #define KGSL_PERFCOUNTER_GROUP_CP 0x0
378 #define KGSL_PERFCOUNTER_GROUP_RBBM 0x1
379 #define KGSL_PERFCOUNTER_GROUP_PC 0x2
380 #define KGSL_PERFCOUNTER_GROUP_VFD 0x3
381 #define KGSL_PERFCOUNTER_GROUP_HLSQ 0x4
382 #define KGSL_PERFCOUNTER_GROUP_VPC 0x5
383 #define KGSL_PERFCOUNTER_GROUP_TSE 0x6
384 #define KGSL_PERFCOUNTER_GROUP_RAS 0x7
385 #define KGSL_PERFCOUNTER_GROUP_UCHE 0x8
386 #define KGSL_PERFCOUNTER_GROUP_TP 0x9
387 #define KGSL_PERFCOUNTER_GROUP_SP 0xA
388 #define KGSL_PERFCOUNTER_GROUP_RB 0xB
389 #define KGSL_PERFCOUNTER_GROUP_PWR 0xC
390 #define KGSL_PERFCOUNTER_GROUP_VBIF 0xD
391 #define KGSL_PERFCOUNTER_GROUP_VBIF_PWR 0xE
392 #define KGSL_PERFCOUNTER_GROUP_MH 0xF
393 #define KGSL_PERFCOUNTER_GROUP_PA_SU 0x10
394 #define KGSL_PERFCOUNTER_GROUP_SQ 0x11
395 #define KGSL_PERFCOUNTER_GROUP_SX 0x12
396 #define KGSL_PERFCOUNTER_GROUP_TCF 0x13
397 #define KGSL_PERFCOUNTER_GROUP_TCM 0x14
398 #define KGSL_PERFCOUNTER_GROUP_TCR 0x15
399 #define KGSL_PERFCOUNTER_GROUP_L2 0x16
400 #define KGSL_PERFCOUNTER_GROUP_VSC 0x17
401 #define KGSL_PERFCOUNTER_GROUP_CCU 0x18
402 #define KGSL_PERFCOUNTER_GROUP_LRZ 0x19
403 #define KGSL_PERFCOUNTER_GROUP_CMP 0x1A
404 #define KGSL_PERFCOUNTER_GROUP_ALWAYSON 0x1B
405 #define KGSL_PERFCOUNTER_GROUP_SP_PWR 0x1C
406 #define KGSL_PERFCOUNTER_GROUP_TP_PWR 0x1D
407 #define KGSL_PERFCOUNTER_GROUP_RB_PWR 0x1E
408 #define KGSL_PERFCOUNTER_GROUP_CCU_PWR 0x1F
409 #define KGSL_PERFCOUNTER_GROUP_UCHE_PWR 0x20
410 #define KGSL_PERFCOUNTER_GROUP_CP_PWR 0x21
411 #define KGSL_PERFCOUNTER_GROUP_GPMU_PWR 0x22
412 #define KGSL_PERFCOUNTER_GROUP_ALWAYSON_PWR 0x23
413 #define KGSL_PERFCOUNTER_GROUP_MAX 0x24
414 
415 #define KGSL_PERFCOUNTER_NOT_USED 0xFFFFFFFF
416 #define KGSL_PERFCOUNTER_BROKEN 0xFFFFFFFE
417 
418 /* structure holds list of ibs */
419 struct kgsl_ibdesc {
420 	unsigned long gpuaddr;
421 	unsigned long __pad;
422 	size_t sizedwords;
423 	unsigned int ctrl;
424 };
425 
426 /**
427  * struct kgsl_cmdbatch_profiling_buffer
428  * @wall_clock_s: Ringbuffer submission time (seconds).
429  *                If KGSL_CMDBATCH_PROFILING_KTIME is set, time is provided
430  *                in kernel clocks, otherwise wall clock time is used.
431  * @wall_clock_ns: Ringbuffer submission time (nanoseconds).
432  *                 If KGSL_CMDBATCH_PROFILING_KTIME is set time is provided
433  *                 in kernel clocks, otherwise wall clock time is used.
434  * @gpu_ticks_queued: GPU ticks at ringbuffer submission
435  * @gpu_ticks_submitted: GPU ticks when starting cmdbatch execution
436  * @gpu_ticks_retired: GPU ticks when finishing cmdbatch execution
437  *
438  * This structure defines the profiling buffer used to measure cmdbatch
439  * execution time
440  */
441 struct kgsl_cmdbatch_profiling_buffer {
442 	uint64_t wall_clock_s;
443 	uint64_t wall_clock_ns;
444 	uint64_t gpu_ticks_queued;
445 	uint64_t gpu_ticks_submitted;
446 	uint64_t gpu_ticks_retired;
447 };
448 
449 /* ioctls */
450 #define KGSL_IOC_TYPE 0x09
451 
452 /*
453  * get misc info about the GPU
454  * type should be a value from enum kgsl_property_type
455  * value points to a structure that varies based on type
456  * sizebytes is sizeof() that structure
457  * for KGSL_PROP_DEVICE_INFO, use struct kgsl_devinfo
458  * this structure contaings hardware versioning info.
459  * for KGSL_PROP_DEVICE_SHADOW, use struct kgsl_shadowprop
460  * this is used to find mmap() offset and sizes for mapping
461  * struct kgsl_memstore into userspace.
462  */
463 struct kgsl_device_getproperty {
464 	unsigned int type;
465 	void *value;
466 	size_t sizebytes;
467 };
468 
469 #define IOCTL_KGSL_DEVICE_GETPROPERTY \
470 	_IOWR(KGSL_IOC_TYPE, 0x2, struct kgsl_device_getproperty)
471 
472 /* IOCTL_KGSL_DEVICE_READ (0x3) - removed 03/2012
473  */
474 
475 /* block until the GPU has executed past a given timestamp
476  * timeout is in milliseconds.
477  */
478 struct kgsl_device_waittimestamp {
479 	unsigned int timestamp;
480 	unsigned int timeout;
481 };
482 
483 #define IOCTL_KGSL_DEVICE_WAITTIMESTAMP \
484 	_IOW(KGSL_IOC_TYPE, 0x6, struct kgsl_device_waittimestamp)
485 
486 struct kgsl_device_waittimestamp_ctxtid {
487 	unsigned int context_id;
488 	unsigned int timestamp;
489 	unsigned int timeout;
490 };
491 
492 #define IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID \
493 	_IOW(KGSL_IOC_TYPE, 0x7, struct kgsl_device_waittimestamp_ctxtid)
494 
495 /* DEPRECATED: issue indirect commands to the GPU.
496  * drawctxt_id must have been created with IOCTL_KGSL_DRAWCTXT_CREATE
497  * ibaddr and sizedwords must specify a subset of a buffer created
498  * with IOCTL_KGSL_SHAREDMEM_FROM_PMEM
499  * flags may be a mask of KGSL_CONTEXT_ values
500  * timestamp is a returned counter value which can be passed to
501  * other ioctls to determine when the commands have been executed by
502  * the GPU.
503  *
504  * This function is deprecated - consider using IOCTL_KGSL_SUBMIT_COMMANDS
505  * instead
506  */
507 struct kgsl_ringbuffer_issueibcmds {
508 	unsigned int drawctxt_id;
509 	unsigned long ibdesc_addr;
510 	unsigned int numibs;
511 	unsigned int timestamp; /*output param */
512 	unsigned int flags;
513 };
514 
515 #define IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS \
516 	_IOWR(KGSL_IOC_TYPE, 0x10, struct kgsl_ringbuffer_issueibcmds)
517 
518 /* read the most recently executed timestamp value
519  * type should be a value from enum kgsl_timestamp_type
520  */
521 struct kgsl_cmdstream_readtimestamp {
522 	unsigned int type;
523 	unsigned int timestamp; /*output param */
524 };
525 
526 #define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_OLD \
527 	_IOR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
528 
529 #define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP \
530 	_IOWR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
531 
532 /* free memory when the GPU reaches a given timestamp.
533  * gpuaddr specify a memory region created by a
534  * IOCTL_KGSL_SHAREDMEM_FROM_PMEM call
535  * type should be a value from enum kgsl_timestamp_type
536  */
537 struct kgsl_cmdstream_freememontimestamp {
538 	unsigned long gpuaddr;
539 	unsigned int type;
540 	unsigned int timestamp;
541 };
542 
543 #define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP \
544 	_IOW(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
545 
546 /*
547  * Previous versions of this header had incorrectly defined
548  * IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP as a read-only ioctl instead
549  * of a write only ioctl.  To ensure binary compatibility, the following
550  * #define will be used to intercept the incorrect ioctl
551  */
552 
553 #define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_OLD \
554 	_IOR(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
555 
556 /* create a draw context, which is used to preserve GPU state.
557  * The flags field may contain a mask KGSL_CONTEXT_*  values
558  */
559 struct kgsl_drawctxt_create {
560 	unsigned int flags;
561 	unsigned int drawctxt_id; /*output param */
562 };
563 
564 #define IOCTL_KGSL_DRAWCTXT_CREATE \
565 	_IOWR(KGSL_IOC_TYPE, 0x13, struct kgsl_drawctxt_create)
566 
567 /* destroy a draw context */
568 struct kgsl_drawctxt_destroy {
569 	unsigned int drawctxt_id;
570 };
571 
572 #define IOCTL_KGSL_DRAWCTXT_DESTROY \
573 	_IOW(KGSL_IOC_TYPE, 0x14, struct kgsl_drawctxt_destroy)
574 
575 /*
576  * add a block of pmem, fb, ashmem or user allocated address
577  * into the GPU address space
578  */
579 struct kgsl_map_user_mem {
580 	int fd;
581 	unsigned long gpuaddr;   /*output param */
582 	size_t len;
583 	size_t offset;
584 	unsigned long hostptr;   /*input param */
585 	enum kgsl_user_mem_type memtype;
586 	unsigned int flags;
587 };
588 
589 #define IOCTL_KGSL_MAP_USER_MEM \
590 	_IOWR(KGSL_IOC_TYPE, 0x15, struct kgsl_map_user_mem)
591 
592 struct kgsl_cmdstream_readtimestamp_ctxtid {
593 	unsigned int context_id;
594 	unsigned int type;
595 	unsigned int timestamp; /*output param */
596 };
597 
598 #define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_CTXTID \
599 	_IOWR(KGSL_IOC_TYPE, 0x16, struct kgsl_cmdstream_readtimestamp_ctxtid)
600 
601 struct kgsl_cmdstream_freememontimestamp_ctxtid {
602 	unsigned int context_id;
603 	unsigned long gpuaddr;
604 	unsigned int type;
605 	unsigned int timestamp;
606 };
607 
608 #define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_CTXTID \
609 	_IOW(KGSL_IOC_TYPE, 0x17, \
610 	struct kgsl_cmdstream_freememontimestamp_ctxtid)
611 
612 /* add a block of pmem or fb into the GPU address space */
613 struct kgsl_sharedmem_from_pmem {
614 	int pmem_fd;
615 	unsigned long gpuaddr;  /*output param */
616 	unsigned int len;
617 	unsigned int offset;
618 };
619 
620 #define IOCTL_KGSL_SHAREDMEM_FROM_PMEM \
621 	_IOWR(KGSL_IOC_TYPE, 0x20, struct kgsl_sharedmem_from_pmem)
622 
623 /* remove memory from the GPU's address space */
624 struct kgsl_sharedmem_free {
625 	unsigned long gpuaddr;
626 };
627 
628 #define IOCTL_KGSL_SHAREDMEM_FREE \
629 	_IOW(KGSL_IOC_TYPE, 0x21, struct kgsl_sharedmem_free)
630 
631 struct kgsl_cff_user_event {
632 	unsigned char cff_opcode;
633 	unsigned int op1;
634 	unsigned int op2;
635 	unsigned int op3;
636 	unsigned int op4;
637 	unsigned int op5;
638 	unsigned int __pad[2];
639 };
640 
641 #define IOCTL_KGSL_CFF_USER_EVENT \
642 	_IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_cff_user_event)
643 
644 struct kgsl_gmem_desc {
645 	unsigned int x;
646 	unsigned int y;
647 	unsigned int width;
648 	unsigned int height;
649 	unsigned int pitch;
650 };
651 
652 struct kgsl_buffer_desc {
653 	void		*hostptr;
654 	unsigned long	gpuaddr;
655 	int		size;
656 	unsigned int	format;
657 	unsigned int	pitch;
658 	unsigned int	enabled;
659 };
660 
661 struct kgsl_bind_gmem_shadow {
662 	unsigned int drawctxt_id;
663 	struct kgsl_gmem_desc gmem_desc;
664 	unsigned int shadow_x;
665 	unsigned int shadow_y;
666 	struct kgsl_buffer_desc shadow_buffer;
667 	unsigned int buffer_id;
668 };
669 
670 #define IOCTL_KGSL_DRAWCTXT_BIND_GMEM_SHADOW \
671 	_IOW(KGSL_IOC_TYPE, 0x22, struct kgsl_bind_gmem_shadow)
672 
673 /* add a block of memory into the GPU address space */
674 
675 /*
676  * IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC deprecated 09/2012
677  * use IOCTL_KGSL_GPUMEM_ALLOC instead
678  */
679 
680 struct kgsl_sharedmem_from_vmalloc {
681 	unsigned long gpuaddr;	/*output param */
682 	unsigned int hostptr;
683 	unsigned int flags;
684 };
685 
686 #define IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC \
687 	_IOWR(KGSL_IOC_TYPE, 0x23, struct kgsl_sharedmem_from_vmalloc)
688 
689 /*
690  * This is being deprecated in favor of IOCTL_KGSL_GPUMEM_CACHE_SYNC which
691  * supports both directions (flush and invalidate). This code will still
692  * work, but by definition it will do a flush of the cache which might not be
693  * what you want to have happen on a buffer following a GPU operation.  It is
694  * safer to go with IOCTL_KGSL_GPUMEM_CACHE_SYNC
695  */
696 
697 #define IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE \
698 	_IOW(KGSL_IOC_TYPE, 0x24, struct kgsl_sharedmem_free)
699 
700 struct kgsl_drawctxt_set_bin_base_offset {
701 	unsigned int drawctxt_id;
702 	unsigned int offset;
703 };
704 
705 #define IOCTL_KGSL_DRAWCTXT_SET_BIN_BASE_OFFSET \
706 	_IOW(KGSL_IOC_TYPE, 0x25, struct kgsl_drawctxt_set_bin_base_offset)
707 
708 enum kgsl_cmdwindow_type {
709 	KGSL_CMDWINDOW_MIN     = 0x00000000,
710 	KGSL_CMDWINDOW_2D      = 0x00000000,
711 	KGSL_CMDWINDOW_3D      = 0x00000001, /* legacy */
712 	KGSL_CMDWINDOW_MMU     = 0x00000002,
713 	KGSL_CMDWINDOW_ARBITER = 0x000000FF,
714 	KGSL_CMDWINDOW_MAX     = 0x000000FF,
715 };
716 
717 /* write to the command window */
718 struct kgsl_cmdwindow_write {
719 	enum kgsl_cmdwindow_type target;
720 	unsigned int addr;
721 	unsigned int data;
722 };
723 
724 #define IOCTL_KGSL_CMDWINDOW_WRITE \
725 	_IOW(KGSL_IOC_TYPE, 0x2e, struct kgsl_cmdwindow_write)
726 
727 struct kgsl_gpumem_alloc {
728 	unsigned long gpuaddr; /* output param */
729 	size_t size;
730 	unsigned int flags;
731 };
732 
733 #define IOCTL_KGSL_GPUMEM_ALLOC \
734 	_IOWR(KGSL_IOC_TYPE, 0x2f, struct kgsl_gpumem_alloc)
735 
736 struct kgsl_cff_syncmem {
737 	unsigned long gpuaddr;
738 	size_t len;
739 	unsigned int __pad[2]; /* For future binary compatibility */
740 };
741 
742 #define IOCTL_KGSL_CFF_SYNCMEM \
743 	_IOW(KGSL_IOC_TYPE, 0x30, struct kgsl_cff_syncmem)
744 
745 /*
746  * A timestamp event allows the user space to register an action following an
747  * expired timestamp. Note IOCTL_KGSL_TIMESTAMP_EVENT has been redefined to
748  * _IOWR to support fences which need to return a fd for the priv parameter.
749  */
750 
751 struct kgsl_timestamp_event {
752 	int type;                /* Type of event (see list below) */
753 	unsigned int timestamp;  /* Timestamp to trigger event on */
754 	unsigned int context_id; /* Context for the timestamp */
755 	void *priv;	 /* Pointer to the event specific blob */
756 	size_t len;              /* Size of the event specific blob */
757 };
758 
759 #define IOCTL_KGSL_TIMESTAMP_EVENT_OLD \
760 	_IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_timestamp_event)
761 
762 /* A genlock timestamp event releases an existing lock on timestamp expire */
763 
764 #define KGSL_TIMESTAMP_EVENT_GENLOCK 1
765 
766 struct kgsl_timestamp_event_genlock {
767 	int handle; /* Handle of the genlock lock to release */
768 };
769 
770 /* A fence timestamp event releases an existing lock on timestamp expire */
771 
772 #define KGSL_TIMESTAMP_EVENT_FENCE 2
773 
774 struct kgsl_timestamp_event_fence {
775 	int fence_fd; /* Fence to signal */
776 };
777 
778 /*
779  * Set a property within the kernel.  Uses the same structure as
780  * IOCTL_KGSL_GETPROPERTY
781  */
782 
783 #define IOCTL_KGSL_SETPROPERTY \
784 	_IOW(KGSL_IOC_TYPE, 0x32, struct kgsl_device_getproperty)
785 
786 #define IOCTL_KGSL_TIMESTAMP_EVENT \
787 	_IOWR(KGSL_IOC_TYPE, 0x33, struct kgsl_timestamp_event)
788 
789 /**
790  * struct kgsl_gpumem_alloc_id - argument to IOCTL_KGSL_GPUMEM_ALLOC_ID
791  * @id: returned id value for this allocation.
792  * @flags: mask of KGSL_MEM* values requested and actual flags on return.
793  * @size: requested size of the allocation and actual size on return.
794  * @mmapsize: returned size to pass to mmap() which may be larger than 'size'
795  * @gpuaddr: returned GPU address for the allocation
796  *
797  * Allocate memory for access by the GPU. The flags and size fields are echoed
798  * back by the kernel, so that the caller can know if the request was
799  * adjusted.
800  *
801  * Supported flags:
802  * KGSL_MEMFLAGS_GPUREADONLY: the GPU will be unable to write to the buffer
803  * KGSL_MEMTYPE*: usage hint for debugging aid
804  * KGSL_MEMALIGN*: alignment hint, may be ignored or adjusted by the kernel.
805  * KGSL_MEMFLAGS_USE_CPU_MAP: If set on call and return, the returned GPU
806  * address will be 0. Calling mmap() will set the GPU address.
807  */
808 struct kgsl_gpumem_alloc_id {
809 	unsigned int id;
810 	unsigned int flags;
811 	size_t size;
812 	size_t mmapsize;
813 	unsigned long gpuaddr;
814 /* private: reserved for future use*/
815 	unsigned long __pad[2];
816 };
817 
818 #define IOCTL_KGSL_GPUMEM_ALLOC_ID \
819 	_IOWR(KGSL_IOC_TYPE, 0x34, struct kgsl_gpumem_alloc_id)
820 
821 /**
822  * struct kgsl_gpumem_free_id - argument to IOCTL_KGSL_GPUMEM_FREE_ID
823  * @id: GPU allocation id to free
824  *
825  * Free an allocation by id, in case a GPU address has not been assigned or
826  * is unknown. Freeing an allocation by id with this ioctl or by GPU address
827  * with IOCTL_KGSL_SHAREDMEM_FREE are equivalent.
828  */
829 struct kgsl_gpumem_free_id {
830 	unsigned int id;
831 /* private: reserved for future use*/
832 	unsigned int __pad;
833 };
834 
835 #define IOCTL_KGSL_GPUMEM_FREE_ID \
836 	_IOWR(KGSL_IOC_TYPE, 0x35, struct kgsl_gpumem_free_id)
837 
838 /**
839  * struct kgsl_gpumem_get_info - argument to IOCTL_KGSL_GPUMEM_GET_INFO
840  * @gpuaddr: GPU address to query. Also set on return.
841  * @id: GPU allocation id to query. Also set on return.
842  * @flags: returned mask of KGSL_MEM* values.
843  * @size: returned size of the allocation.
844  * @mmapsize: returned size to pass mmap(), which may be larger than 'size'
845  * @useraddr: returned address of the userspace mapping for this buffer
846  *
847  * This ioctl allows querying of all user visible attributes of an existing
848  * allocation, by either the GPU address or the id returned by a previous
849  * call to IOCTL_KGSL_GPUMEM_ALLOC_ID. Legacy allocation ioctls may not
850  * return all attributes so this ioctl can be used to look them up if needed.
851  *
852  */
853 struct kgsl_gpumem_get_info {
854 	unsigned long gpuaddr;
855 	unsigned int id;
856 	unsigned int flags;
857 	size_t size;
858 	size_t mmapsize;
859 	unsigned long useraddr;
860 /* private: reserved for future use*/
861 	unsigned long __pad[4];
862 };
863 
864 #define IOCTL_KGSL_GPUMEM_GET_INFO\
865 	_IOWR(KGSL_IOC_TYPE, 0x36, struct kgsl_gpumem_get_info)
866 
867 /**
868  * struct kgsl_gpumem_sync_cache - argument to IOCTL_KGSL_GPUMEM_SYNC_CACHE
869  * @gpuaddr: GPU address of the buffer to sync.
870  * @id: id of the buffer to sync. Either gpuaddr or id is sufficient.
871  * @op: a mask of KGSL_GPUMEM_CACHE_* values
872  * @offset: offset into the buffer
873  * @length: number of bytes starting from offset to perform
874  * the cache operation on
875  *
876  * Sync the L2 cache for memory headed to and from the GPU - this replaces
877  * KGSL_SHAREDMEM_FLUSH_CACHE since it can handle cache management for both
878  * directions
879  *
880  */
881 struct kgsl_gpumem_sync_cache {
882 	unsigned long gpuaddr;
883 	unsigned int id;
884 	unsigned int op;
885 	size_t offset;
886 	size_t length;
887 };
888 
889 #define KGSL_GPUMEM_CACHE_CLEAN (1 << 0)
890 #define KGSL_GPUMEM_CACHE_TO_GPU KGSL_GPUMEM_CACHE_CLEAN
891 
892 #define KGSL_GPUMEM_CACHE_INV (1 << 1)
893 #define KGSL_GPUMEM_CACHE_FROM_GPU KGSL_GPUMEM_CACHE_INV
894 
895 #define KGSL_GPUMEM_CACHE_FLUSH \
896 	(KGSL_GPUMEM_CACHE_CLEAN | KGSL_GPUMEM_CACHE_INV)
897 
898 /* Flag to ensure backwards compatibility of kgsl_gpumem_sync_cache struct */
899 #define KGSL_GPUMEM_CACHE_RANGE (1 << 31U)
900 
901 #define IOCTL_KGSL_GPUMEM_SYNC_CACHE \
902 	_IOW(KGSL_IOC_TYPE, 0x37, struct kgsl_gpumem_sync_cache)
903 
904 /**
905  * struct kgsl_perfcounter_get - argument to IOCTL_KGSL_PERFCOUNTER_GET
906  * @groupid: Performance counter group ID
907  * @countable: Countable to select within the group
908  * @offset: Return offset of the reserved LO counter
909  * @offset_hi: Return offset of the reserved HI counter
910  *
911  * Get an available performance counter from a specified groupid.  The offset
912  * of the performance counter will be returned after successfully assigning
913  * the countable to the counter for the specified group.  An error will be
914  * returned and an offset of 0 if the groupid is invalid or there are no
915  * more counters left.  After successfully getting a perfcounter, the user
916  * must call kgsl_perfcounter_put(groupid, contable) when finished with
917  * the perfcounter to clear up perfcounter resources.
918  *
919  */
920 struct kgsl_perfcounter_get {
921 	unsigned int groupid;
922 	unsigned int countable;
923 	unsigned int offset;
924 	unsigned int offset_hi;
925 /* private: reserved for future use */
926 	unsigned int __pad; /* For future binary compatibility */
927 };
928 
929 #define IOCTL_KGSL_PERFCOUNTER_GET \
930 	_IOWR(KGSL_IOC_TYPE, 0x38, struct kgsl_perfcounter_get)
931 
932 /**
933  * struct kgsl_perfcounter_put - argument to IOCTL_KGSL_PERFCOUNTER_PUT
934  * @groupid: Performance counter group ID
935  * @countable: Countable to release within the group
936  *
937  * Put an allocated performance counter to allow others to have access to the
938  * resource that was previously taken.  This is only to be called after
939  * successfully getting a performance counter from kgsl_perfcounter_get().
940  *
941  */
942 struct kgsl_perfcounter_put {
943 	unsigned int groupid;
944 	unsigned int countable;
945 /* private: reserved for future use */
946 	unsigned int __pad[2]; /* For future binary compatibility */
947 };
948 
949 #define IOCTL_KGSL_PERFCOUNTER_PUT \
950 	_IOW(KGSL_IOC_TYPE, 0x39, struct kgsl_perfcounter_put)
951 
952 /**
953  * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY
954  * @groupid: Performance counter group ID
955  * @countable: Return active countables array
956  * @size: Size of active countables array
957  * @max_counters: Return total number counters for the group ID
958  *
959  * Query the available performance counters given a groupid.  The array
960  * *countables is used to return the current active countables in counters.
961  * The size of the array is passed in so the kernel will only write at most
962  * size or counter->size for the group id.  The total number of available
963  * counters for the group ID is returned in max_counters.
964  * If the array or size passed in are invalid, then only the maximum number
965  * of counters will be returned, no data will be written to *countables.
966  * If the groupid is invalid an error code will be returned.
967  *
968  */
969 struct kgsl_perfcounter_query {
970 	unsigned int groupid;
971 	/* Array to return the current countable for up to size counters */
972 	unsigned int *countables;
973 	unsigned int count;
974 	unsigned int max_counters;
975 /* private: reserved for future use */
976 	unsigned int __pad[2]; /* For future binary compatibility */
977 };
978 
979 #define IOCTL_KGSL_PERFCOUNTER_QUERY \
980 	_IOWR(KGSL_IOC_TYPE, 0x3A, struct kgsl_perfcounter_query)
981 
982 /**
983  * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY
984  * @groupid: Performance counter group IDs
985  * @countable: Performance counter countable IDs
986  * @value: Return performance counter reads
987  * @size: Size of all arrays (groupid/countable pair and return value)
988  *
989  * Read in the current value of a performance counter given by the groupid
990  * and countable.
991  *
992  */
993 
994 struct kgsl_perfcounter_read_group {
995 	unsigned int groupid;
996 	unsigned int countable;
997 	unsigned long long value;
998 };
999 
1000 struct kgsl_perfcounter_read {
1001 	struct kgsl_perfcounter_read_group *reads;
1002 	unsigned int count;
1003 /* private: reserved for future use */
1004 	unsigned int __pad[2]; /* For future binary compatibility */
1005 };
1006 
1007 #define IOCTL_KGSL_PERFCOUNTER_READ \
1008 	_IOWR(KGSL_IOC_TYPE, 0x3B, struct kgsl_perfcounter_read)
1009 /*
1010  * struct kgsl_gpumem_sync_cache_bulk - argument to
1011  * IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK
1012  * @id_list: list of GPU buffer ids of the buffers to sync
1013  * @count: number of GPU buffer ids in id_list
1014  * @op: a mask of KGSL_GPUMEM_CACHE_* values
1015  *
1016  * Sync the cache for memory headed to and from the GPU. Certain
1017  * optimizations can be made on the cache operation based on the total
1018  * size of the working set of memory to be managed.
1019  */
1020 struct kgsl_gpumem_sync_cache_bulk {
1021 	unsigned int *id_list;
1022 	unsigned int count;
1023 	unsigned int op;
1024 /* private: reserved for future use */
1025 	unsigned int __pad[2]; /* For future binary compatibility */
1026 };
1027 
1028 #define IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK \
1029 	_IOWR(KGSL_IOC_TYPE, 0x3C, struct kgsl_gpumem_sync_cache_bulk)
1030 
1031 /*
1032  * struct kgsl_cmd_syncpoint_timestamp
1033  * @context_id: ID of a KGSL context
1034  * @timestamp: GPU timestamp
1035  *
1036  * This structure defines a syncpoint comprising a context/timestamp pair. A
1037  * list of these may be passed by IOCTL_KGSL_SUBMIT_COMMANDS to define
1038  * dependencies that must be met before the command can be submitted to the
1039  * hardware
1040  */
1041 struct kgsl_cmd_syncpoint_timestamp {
1042 	unsigned int context_id;
1043 	unsigned int timestamp;
1044 };
1045 
1046 struct kgsl_cmd_syncpoint_fence {
1047 	int fd;
1048 };
1049 
1050 /**
1051  * struct kgsl_cmd_syncpoint - Define a sync point for a command batch
1052  * @type: type of sync point defined here
1053  * @priv: Pointer to the type specific buffer
1054  * @size: Size of the type specific buffer
1055  *
1056  * This structure contains pointers defining a specific command sync point.
1057  * The pointer and size should point to a type appropriate structure.
1058  */
1059 struct kgsl_cmd_syncpoint {
1060 	int type;
1061 	void *priv;
1062 	size_t size;
1063 };
1064 
1065 /* Flag to indicate that the cmdlist may contain memlists */
1066 #define KGSL_IBDESC_MEMLIST 0x1
1067 
1068 /* Flag to point out the cmdbatch profiling buffer in the memlist */
1069 #define KGSL_IBDESC_PROFILING_BUFFER 0x2
1070 
1071 /**
1072  * struct kgsl_submit_commands - Argument to IOCTL_KGSL_SUBMIT_COMMANDS
1073  * @context_id: KGSL context ID that owns the commands
1074  * @flags:
1075  * @cmdlist: User pointer to a list of kgsl_ibdesc structures
1076  * @numcmds: Number of commands listed in cmdlist
1077  * @synclist: User pointer to a list of kgsl_cmd_syncpoint structures
1078  * @numsyncs: Number of sync points listed in synclist
1079  * @timestamp: On entry the a user defined timestamp, on exist the timestamp
1080  * assigned to the command batch
1081  *
1082  * This structure specifies a command to send to the GPU hardware.  This is
1083  * similar to kgsl_issueibcmds expect that it doesn't support the legacy way to
1084  * submit IB lists and it adds sync points to block the IB until the
1085  * dependencies are satisified.  This entry point is the new and preferred way
1086  * to submit commands to the GPU. The memory list can be used to specify all
1087  * memory that is referrenced in the current set of commands.
1088  */
1089 
1090 struct kgsl_submit_commands {
1091 	unsigned int context_id;
1092 	unsigned int flags;
1093 	struct kgsl_ibdesc *cmdlist;
1094 	unsigned int numcmds;
1095 	struct kgsl_cmd_syncpoint *synclist;
1096 	unsigned int numsyncs;
1097 	unsigned int timestamp;
1098 /* private: reserved for future use */
1099 	unsigned int __pad[4];
1100 };
1101 
1102 #define IOCTL_KGSL_SUBMIT_COMMANDS \
1103 	_IOWR(KGSL_IOC_TYPE, 0x3D, struct kgsl_submit_commands)
1104 
1105 /**
1106  * struct kgsl_device_constraint - device constraint argument
1107  * @context_id: KGSL context ID
1108  * @type: type of constraint i.e pwrlevel/none
1109  * @data: constraint data
1110  * @size: size of the constraint data
1111  */
1112 struct kgsl_device_constraint {
1113 	unsigned int type;
1114 	unsigned int context_id;
1115 	void *data;
1116 	size_t size;
1117 };
1118 
1119 /* Constraint Type*/
1120 #define KGSL_CONSTRAINT_NONE 0
1121 #define KGSL_CONSTRAINT_PWRLEVEL 1
1122 
1123 /* L3 constraint Type */
1124 #define KGSL_CONSTRAINT_L3_NONE	2
1125 #define KGSL_CONSTRAINT_L3_PWRLEVEL	3
1126 
1127 /* PWRLEVEL constraint level*/
1128 /* set to min frequency */
1129 #define KGSL_CONSTRAINT_PWR_MIN 0
1130 /* set to max frequency */
1131 #define KGSL_CONSTRAINT_PWR_MAX 1
1132 
1133 /* L3 PWRLEVEL constraint level */
1134 #define KGSL_CONSTRAINT_L3_PWR_MED	0
1135 #define KGSL_CONSTRAINT_L3_PWR_MAX	1
1136 
1137 struct kgsl_device_constraint_pwrlevel {
1138 	unsigned int level;
1139 };
1140 
1141 /**
1142  * struct kgsl_syncsource_create - Argument to IOCTL_KGSL_SYNCSOURCE_CREATE
1143  * @id: returned id for the syncsource that was created.
1144  *
1145  * This ioctl creates a userspace sync timeline.
1146  */
1147 
1148 struct kgsl_syncsource_create {
1149 	unsigned int id;
1150 /* private: reserved for future use */
1151 	unsigned int __pad[3];
1152 };
1153 
1154 #define IOCTL_KGSL_SYNCSOURCE_CREATE \
1155 	_IOWR(KGSL_IOC_TYPE, 0x40, struct kgsl_syncsource_create)
1156 
1157 /**
1158  * struct kgsl_syncsource_destroy - Argument to IOCTL_KGSL_SYNCSOURCE_DESTROY
1159  * @id: syncsource id to destroy
1160  *
1161  * This ioctl creates a userspace sync timeline.
1162  */
1163 
1164 struct kgsl_syncsource_destroy {
1165 	unsigned int id;
1166 /* private: reserved for future use */
1167 	unsigned int __pad[3];
1168 };
1169 
1170 #define IOCTL_KGSL_SYNCSOURCE_DESTROY \
1171 	_IOWR(KGSL_IOC_TYPE, 0x41, struct kgsl_syncsource_destroy)
1172 
1173 /**
1174  * struct kgsl_syncsource_create_fence - Argument to
1175  *     IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE
1176  * @id: syncsource id
1177  * @fence_fd: returned sync_fence fd
1178  *
1179  * Create a fence that may be signaled by userspace by calling
1180  * IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE. There are no order dependencies between
1181  * these fences.
1182  */
1183 struct kgsl_syncsource_create_fence {
1184 	unsigned int id;
1185 	int fence_fd;
1186 /* private: reserved for future use */
1187 	unsigned int __pad[4];
1188 };
1189 
1190 /**
1191  * struct kgsl_syncsource_signal_fence - Argument to
1192  *     IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE
1193  * @id: syncsource id
1194  * @fence_fd: sync_fence fd to signal
1195  *
1196  * Signal a fence that was created by a IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE
1197  * call using the same syncsource id. This allows a fence to be shared
1198  * to other processes but only signaled by the process owning the fd
1199  * used to create the fence.
1200  */
1201 #define IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE \
1202 	_IOWR(KGSL_IOC_TYPE, 0x42, struct kgsl_syncsource_create_fence)
1203 
1204 struct kgsl_syncsource_signal_fence {
1205 	unsigned int id;
1206 	int fence_fd;
1207 /* private: reserved for future use */
1208 	unsigned int __pad[4];
1209 };
1210 
1211 #define IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE \
1212 	_IOWR(KGSL_IOC_TYPE, 0x43, struct kgsl_syncsource_signal_fence)
1213 
1214 /**
1215  * struct kgsl_cff_sync_gpuobj - Argument to IOCTL_KGSL_CFF_SYNC_GPUOBJ
1216  * @offset: Offset into the GPU object to sync
1217  * @length: Number of bytes to sync
1218  * @id: ID of the GPU object to sync
1219  */
1220 struct kgsl_cff_sync_gpuobj {
1221 	uint64_t offset;
1222 	uint64_t length;
1223 	unsigned int id;
1224 };
1225 
1226 #define IOCTL_KGSL_CFF_SYNC_GPUOBJ \
1227 	_IOW(KGSL_IOC_TYPE, 0x44, struct kgsl_cff_sync_gpuobj)
1228 
1229 /**
1230  * struct kgsl_gpuobj_alloc - Argument to IOCTL_KGSL_GPUOBJ_ALLOC
1231  * @size: Size in bytes of the object to allocate
1232  * @flags: mask of KGSL_MEMFLAG_* bits
1233  * @va_len: Size in bytes of the virtual region to allocate
1234  * @mmapsize: Returns the mmap() size of the object
1235  * @id: Returns the GPU object ID of the new object
1236  * @metadata_len: Length of the metdata to copy from the user
1237  * @metadata: Pointer to the user specified metadata to store for the object
1238  */
1239 struct kgsl_gpuobj_alloc {
1240 	uint64_t size;
1241 	uint64_t flags;
1242 	uint64_t va_len;
1243 	uint64_t mmapsize;
1244 	unsigned int id;
1245 	unsigned int metadata_len;
1246 	uint64_t metadata;
1247 };
1248 
1249 /* Let the user know that this header supports the gpuobj metadata */
1250 #define KGSL_GPUOBJ_ALLOC_METADATA_MAX 64
1251 
1252 #define IOCTL_KGSL_GPUOBJ_ALLOC \
1253 	_IOWR(KGSL_IOC_TYPE, 0x45, struct kgsl_gpuobj_alloc)
1254 
1255 /**
1256  * struct kgsl_gpuobj_free - Argument to IOCTL_KGLS_GPUOBJ_FREE
1257  * @flags: Mask of: KGSL_GUPOBJ_FREE_ON_EVENT
1258  * @priv: Pointer to the private object if KGSL_GPUOBJ_FREE_ON_EVENT is
1259  * specified
1260  * @id: ID of the GPU object to free
1261  * @type: If KGSL_GPUOBJ_FREE_ON_EVENT is specified, the type of asynchronous
1262  * event to free on
1263  * @len: Length of the data passed in priv
1264  */
1265 struct kgsl_gpuobj_free {
1266 	uint64_t flags;
1267 	uint64_t priv;
1268 	unsigned int id;
1269 	unsigned int type;
1270 	unsigned int len;
1271 };
1272 
1273 #define KGSL_GPUOBJ_FREE_ON_EVENT 1
1274 
1275 #define KGSL_GPU_EVENT_TIMESTAMP 1
1276 #define KGSL_GPU_EVENT_FENCE     2
1277 
1278 /**
1279  * struct kgsl_gpu_event_timestamp - Specifies a timestamp event to free a GPU
1280  * object on
1281  * @context_id: ID of the timestamp event to wait for
1282  * @timestamp: Timestamp of the timestamp event to wait for
1283  */
1284 struct kgsl_gpu_event_timestamp {
1285 	unsigned int context_id;
1286 	unsigned int timestamp;
1287 };
1288 
1289 /**
1290  * struct kgsl_gpu_event_fence - Specifies a fence ID to to free a GPU object on
1291  * @fd: File descriptor for the fence
1292  */
1293 struct kgsl_gpu_event_fence {
1294 	int fd;
1295 };
1296 
1297 #define IOCTL_KGSL_GPUOBJ_FREE \
1298 	_IOW(KGSL_IOC_TYPE, 0x46, struct kgsl_gpuobj_free)
1299 
1300 /**
1301  * struct kgsl_gpuobj_info - argument to IOCTL_KGSL_GPUOBJ_INFO
1302  * @gpuaddr: GPU address of the object
1303  * @flags: Current flags for the object
1304  * @size: Size of the object
1305  * @va_len: VA size of the object
1306  * @va_addr: Virtual address of the object (if it is mapped)
1307  * id - GPU object ID of the object to query
1308  */
1309 struct kgsl_gpuobj_info {
1310 	uint64_t gpuaddr;
1311 	uint64_t flags;
1312 	uint64_t size;
1313 	uint64_t va_len;
1314 	uint64_t va_addr;
1315 	unsigned int id;
1316 };
1317 
1318 #define IOCTL_KGSL_GPUOBJ_INFO \
1319 	_IOWR(KGSL_IOC_TYPE, 0x47, struct kgsl_gpuobj_info)
1320 
1321 /**
1322  * struct kgsl_gpuobj_import - argument to IOCTL_KGSL_GPUOBJ_IMPORT
1323  * @priv: Pointer to the private data for the import type
1324  * @priv_len: Length of the private data
1325  * @flags: Mask of KGSL_MEMFLAG_ flags
1326  * @type: Type of the import (KGSL_USER_MEM_TYPE_*)
1327  * @id: Returns the ID of the new GPU object
1328  */
1329 struct kgsl_gpuobj_import {
1330 	uint64_t priv;
1331 	uint64_t priv_len;
1332 	uint64_t flags;
1333 	unsigned int type;
1334 	unsigned int id;
1335 };
1336 
1337 /**
1338  * struct kgsl_gpuobj_import_dma_buf - import a dmabuf object
1339  * @fd: File descriptor for the dma-buf object
1340  */
1341 struct kgsl_gpuobj_import_dma_buf {
1342 	int fd;
1343 };
1344 
1345 /**
1346  * struct kgsl_gpuobj_import_useraddr - import an object based on a useraddr
1347  * @virtaddr: Virtual address of the object to import
1348  */
1349 struct kgsl_gpuobj_import_useraddr {
1350 	uint64_t virtaddr;
1351 };
1352 
1353 #define IOCTL_KGSL_GPUOBJ_IMPORT \
1354 	_IOWR(KGSL_IOC_TYPE, 0x48, struct kgsl_gpuobj_import)
1355 
1356 /**
1357  * struct kgsl_gpuobj_sync_obj - Individual GPU object to sync
1358  * @offset: Offset within the GPU object to sync
1359  * @length: Number of bytes to sync
1360  * @id: ID of the GPU object to sync
1361  * @op: Cache operation to execute
1362  */
1363 
1364 struct kgsl_gpuobj_sync_obj {
1365 	uint64_t offset;
1366 	uint64_t length;
1367 	unsigned int id;
1368 	unsigned int op;
1369 };
1370 
1371 /**
1372  * struct kgsl_gpuobj_sync - Argument for IOCTL_KGSL_GPUOBJ_SYNC
1373  * @objs: Pointer to an array of kgsl_gpuobj_sync_obj structs
1374  * @obj_len: Size of each item in the array
1375  * @count: Number of items in the array
1376  */
1377 
1378 struct kgsl_gpuobj_sync {
1379 	uint64_t objs;
1380 	unsigned int obj_len;
1381 	unsigned int count;
1382 };
1383 
1384 #define IOCTL_KGSL_GPUOBJ_SYNC \
1385 	_IOW(KGSL_IOC_TYPE, 0x49, struct kgsl_gpuobj_sync)
1386 
1387 /**
1388  * struct kgsl_command_object - GPU command object
1389  * @offset: GPU address offset of the object
1390  * @gpuaddr: GPU address of the object
1391  * @size: Size of the object
1392  * @flags: Current flags for the object
1393  * @id - GPU command object ID
1394  */
1395 struct kgsl_command_object {
1396 	uint64_t offset;
1397 	uint64_t gpuaddr;
1398 	uint64_t size;
1399 	unsigned int flags;
1400 	unsigned int id;
1401 };
1402 
1403 /**
1404  * struct kgsl_command_syncpoint - GPU syncpoint object
1405  * @priv: Pointer to the type specific buffer
1406  * @size: Size of the type specific buffer
1407  * @type: type of sync point defined here
1408  */
1409 struct kgsl_command_syncpoint {
1410 	uint64_t priv;
1411 	uint64_t size;
1412 	unsigned int type;
1413 };
1414 
1415 /**
1416  * struct kgsl_command_object - Argument for IOCTL_KGSL_GPU_COMMAND
1417  * @flags: Current flags for the object
1418  * @cmdlist: List of kgsl_command_objects for submission
1419  * @cmd_size: Size of kgsl_command_objects structure
1420  * @numcmds: Number of kgsl_command_objects in command list
1421  * @objlist: List of kgsl_command_objects for tracking
1422  * @obj_size: Size of kgsl_command_objects structure
1423  * @numobjs: Number of kgsl_command_objects in object list
1424  * @synclist: List of kgsl_command_syncpoints
1425  * @sync_size: Size of kgsl_command_syncpoint structure
1426  * @numsyncs: Number of kgsl_command_syncpoints in syncpoint list
1427  * @context_id: Context ID submittin ghte kgsl_gpu_command
1428  * @timestamp: Timestamp for the submitted commands
1429  */
1430 struct kgsl_gpu_command {
1431 	uint64_t flags;
1432 	uint64_t cmdlist;
1433 	unsigned int cmdsize;
1434 	unsigned int numcmds;
1435 	uint64_t objlist;
1436 	unsigned int objsize;
1437 	unsigned int numobjs;
1438 	uint64_t synclist;
1439 	unsigned int syncsize;
1440 	unsigned int numsyncs;
1441 	unsigned int context_id;
1442 	unsigned int timestamp;
1443 };
1444 
1445 #define IOCTL_KGSL_GPU_COMMAND \
1446 	_IOWR(KGSL_IOC_TYPE, 0x4A, struct kgsl_gpu_command)
1447 
1448 /**
1449  * struct kgsl_preemption_counters_query - argument to
1450  * IOCTL_KGSL_PREEMPTIONCOUNTER_QUERY
1451  * @counters: Return preemption counters array
1452  * @size_user: Size allocated by userspace
1453  * @size_priority_level: Size of preemption counters for each
1454  * priority level
1455  * @max_priority_level: Return max number of priority levels
1456  *
1457  * Query the available preemption counters. The array counters
1458  * is used to return preemption counters. The size of the array
1459  * is passed in so the kernel will only write at most size_user
1460  * or max available preemption counters.  The total number of
1461  * preemption counters is returned in max_priority_level. If the
1462  * array or size passed in are invalid, then an error is
1463  * returned back.
1464  */
1465 struct kgsl_preemption_counters_query {
1466 	uint64_t counters;
1467 	unsigned int size_user;
1468 	unsigned int size_priority_level;
1469 	unsigned int max_priority_level;
1470 };
1471 
1472 #define IOCTL_KGSL_PREEMPTIONCOUNTER_QUERY \
1473 	_IOWR(KGSL_IOC_TYPE, 0x4B, struct kgsl_preemption_counters_query)
1474 
1475 /**
1476  * struct kgsl_gpuobj_set_info - argument for IOCTL_KGSL_GPUOBJ_SET_INFO
1477  * @flags: Flags to indicate which paramaters to change
1478  * @metadata:  If KGSL_GPUOBJ_SET_INFO_METADATA is set, a pointer to the new
1479  * metadata
1480  * @id: GPU memory object ID to change
1481  * @metadata_len:  If KGSL_GPUOBJ_SET_INFO_METADATA is set, the length of the
1482  * new metadata string
1483  * @type: If KGSL_GPUOBJ_SET_INFO_TYPE is set, the new type of the memory object
1484  */
1485 
1486 #define KGSL_GPUOBJ_SET_INFO_METADATA (1 << 0)
1487 #define KGSL_GPUOBJ_SET_INFO_TYPE (1 << 1)
1488 
1489 struct kgsl_gpuobj_set_info {
1490 	uint64_t flags;
1491 	uint64_t metadata;
1492 	unsigned int id;
1493 	unsigned int metadata_len;
1494 	unsigned int type;
1495 };
1496 
1497 #define IOCTL_KGSL_GPUOBJ_SET_INFO \
1498 	_IOW(KGSL_IOC_TYPE, 0x4C, struct kgsl_gpuobj_set_info)
1499 
1500 /**
1501  * struct kgsl_sparse_phys_alloc - Argument for IOCTL_KGSL_SPARSE_PHYS_ALLOC
1502  * @size: Size in bytes to back
1503  * @pagesize: Pagesize alignment required
1504  * @flags: Flags for this allocation
1505  * @id: Returned ID for this allocation
1506  */
1507 struct kgsl_sparse_phys_alloc {
1508 	uint64_t size;
1509 	uint64_t pagesize;
1510 	uint64_t flags;
1511 	unsigned int id;
1512 };
1513 
1514 #define IOCTL_KGSL_SPARSE_PHYS_ALLOC \
1515 	_IOWR(KGSL_IOC_TYPE, 0x50, struct kgsl_sparse_phys_alloc)
1516 
1517 /**
1518  * struct kgsl_sparse_phys_free - Argument for IOCTL_KGSL_SPARSE_PHYS_FREE
1519  * @id: ID to free
1520  */
1521 struct kgsl_sparse_phys_free {
1522 	unsigned int id;
1523 };
1524 
1525 #define IOCTL_KGSL_SPARSE_PHYS_FREE \
1526 	_IOW(KGSL_IOC_TYPE, 0x51, struct kgsl_sparse_phys_free)
1527 
1528 /**
1529  * struct kgsl_sparse_virt_alloc - Argument for IOCTL_KGSL_SPARSE_VIRT_ALLOC
1530  * @size: Size in bytes to reserve
1531  * @pagesize: Pagesize alignment required
1532  * @flags: Flags for this allocation
1533  * @id: Returned ID for this allocation
1534  * @gpuaddr: Returned GPU address for this allocation
1535  */
1536 struct kgsl_sparse_virt_alloc {
1537 	uint64_t size;
1538 	uint64_t pagesize;
1539 	uint64_t flags;
1540 	uint64_t gpuaddr;
1541 	unsigned int id;
1542 };
1543 
1544 #define IOCTL_KGSL_SPARSE_VIRT_ALLOC \
1545 	_IOWR(KGSL_IOC_TYPE, 0x52, struct kgsl_sparse_virt_alloc)
1546 
1547 /**
1548  * struct kgsl_sparse_virt_free - Argument for IOCTL_KGSL_SPARSE_VIRT_FREE
1549  * @id: ID to free
1550  */
1551 struct kgsl_sparse_virt_free {
1552 	unsigned int id;
1553 };
1554 
1555 #define IOCTL_KGSL_SPARSE_VIRT_FREE \
1556 	_IOW(KGSL_IOC_TYPE, 0x53, struct kgsl_sparse_virt_free)
1557 
1558 /**
1559  * struct kgsl_sparse_binding_object - Argument for kgsl_sparse_bind
1560  * @virtoffset: Offset into the virtual ID
1561  * @physoffset: Offset into the physical ID (bind only)
1562  * @size: Size in bytes to reserve
1563  * @flags: Flags for this kgsl_sparse_binding_object
1564  * @id: Physical ID to bind (bind only)
1565  */
1566 struct kgsl_sparse_binding_object {
1567 	uint64_t virtoffset;
1568 	uint64_t physoffset;
1569 	uint64_t size;
1570 	uint64_t flags;
1571 	unsigned int id;
1572 };
1573 
1574 /**
1575  * struct kgsl_sparse_bind - Argument for IOCTL_KGSL_SPARSE_BIND
1576  * @list: List of kgsl_sparse_bind_objects to bind/unbind
1577  * @id: Virtual ID to bind/unbind
1578  * @size: Size of kgsl_sparse_bind_object
1579  * @count: Number of elements in list
1580  *
1581  */
1582 struct kgsl_sparse_bind {
1583 	uint64_t list;
1584 	unsigned int id;
1585 	unsigned int size;
1586 	unsigned int count;
1587 };
1588 
1589 #define IOCTL_KGSL_SPARSE_BIND \
1590 	_IOW(KGSL_IOC_TYPE, 0x54, struct kgsl_sparse_bind)
1591 
1592 /**
1593  * struct kgsl_gpu_sparse_command - Argument for
1594  * IOCTL_KGSL_GPU_SPARSE_COMMAND
1595  * @flags: Current flags for the object
1596  * @sparselist: List of kgsl_sparse_binding_object to bind/unbind
1597  * @synclist: List of kgsl_command_syncpoints
1598  * @sparsesize: Size of kgsl_sparse_binding_object
1599  * @numsparse: Number of elements in list
1600  * @sync_size: Size of kgsl_command_syncpoint structure
1601  * @numsyncs: Number of kgsl_command_syncpoints in syncpoint list
1602  * @context_id: Context ID submitting the kgsl_gpu_command
1603  * @timestamp: Timestamp for the submitted commands
1604  * @id: Virtual ID to bind/unbind
1605  */
1606 struct kgsl_gpu_sparse_command {
1607 	uint64_t flags;
1608 	uint64_t sparselist;
1609 	uint64_t synclist;
1610 	unsigned int sparsesize;
1611 	unsigned int numsparse;
1612 	unsigned int syncsize;
1613 	unsigned int numsyncs;
1614 	unsigned int context_id;
1615 	unsigned int timestamp;
1616 	unsigned int id;
1617 };
1618 
1619 #define IOCTL_KGSL_GPU_SPARSE_COMMAND \
1620 	_IOWR(KGSL_IOC_TYPE, 0x55, struct kgsl_gpu_sparse_command)
1621 
1622 #endif /* _MSM_KGSL_H */
1623