• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright (C) The Asahi Linux Contributors
4  *
5  * Based on panfrost_drm.h which is
6  *
7  * Copyright © 2014-2018 Broadcom
8  * Copyright © 2019 Collabora ltd.
9  */
10 /* clang-format off */
11 #ifndef _ASAHI_DRM_H_
12 #define _ASAHI_DRM_H_
13 
14 #include "drm-uapi/drm.h"
15 
16 #if defined(__cplusplus)
17 extern "C" {
18 #endif
19 
20 /*
21  * The UAPI defined in this file MUST NOT BE USED. End users, DO NOT attempt to
22  * use upstream Mesa with asahi kernels, it will blow up. Distro packagers, DO
23  * NOT patch upstream Mesa to do the same.
24  */
25 #define DRM_ASAHI_UNSTABLE_UABI_VERSION (0xDEADBEEF)
26 
27 #define DRM_ASAHI_GET_PARAMS			0x00
28 #define DRM_ASAHI_VM_CREATE			0x01
29 #define DRM_ASAHI_VM_DESTROY			0x02
30 #define DRM_ASAHI_GEM_CREATE			0x03
31 #define DRM_ASAHI_GEM_MMAP_OFFSET		0x04
32 #define DRM_ASAHI_GEM_BIND			0x05
33 #define DRM_ASAHI_QUEUE_CREATE			0x06
34 #define DRM_ASAHI_QUEUE_DESTROY			0x07
35 #define DRM_ASAHI_SUBMIT			0x08
36 #define DRM_ASAHI_GET_TIME			0x09
37 /* TODO: Maybe merge with DRM_ASAHI_GEM_BIND? (Becomes IOWR) */
38 #define DRM_ASAHI_GEM_BIND_OBJECT		0x0a
39 
40 /* TODO: Bump to 64, just in case? */
41 #define DRM_ASAHI_MAX_CLUSTERS	32
42 
43 struct drm_asahi_params_global {
44 	__u32 unstable_uabi_version;
45 	__u32 pad0;
46 
47 	/** @feat_compat: Compatible feature bits, from drm_asahi_feat_compat */
48 	__u64 feat_compat;
49 	/** @feat_incompat: Incompatible feature bits, from drm_asahi_feat_incompat */
50 	__u64 feat_incompat;
51 
52 	/** @gpu_generation: GPU generation, e.g. 13 for G13G */
53 	__u32 gpu_generation;
54 	/** @gpu_variant: GPU variant as a character, e.g. 'G' for G13G */
55 	__u32 gpu_variant;
56 	/** @gpu_revision: GPU revision in BCD, e.g. 0x00 for 'A0', 0x21 for 'C1' */
57 	__u32 gpu_revision;
58 	/** @chip_id: Chip ID in BCD, e.g. 0x8103 for T8103 */
59 	__u32 chip_id;
60 
61 	/** @num_dies: Number of dies in the SoC */
62 	__u32 num_dies;
63 	/** @num_clusters_total: Number of GPU clusters (across all dies) */
64 	__u32 num_clusters_total;
65 	/** @num_cores_per_cluster: Number of logical cores per cluster
66 	 *  (including inactive/nonexistent) */
67 	__u32 num_cores_per_cluster;
68 	/** @num_frags_per_cluster: Number of frags per cluster */
69 	__u32 num_frags_per_cluster;
70 	/** @num_gps_per_cluster: Number of GPs per cluster */
71 	__u32 num_gps_per_cluster;
72 	/** @num_cores_total_active: Total number of active cores (total bit weight of core_masks) */
73 	__u32 num_cores_total_active;
74 	/** @core_masks: Bitmask of present/enabled cores per cluster */
75 	__u64 core_masks[DRM_ASAHI_MAX_CLUSTERS];
76 
77 	/** @vm_page_size: GPU VM page size */
78 	__u32 vm_page_size;
79 	/** @pad1: Padding, MBZ */
80 	__u32 pad1;
81 	/** @vm_user_start: VM user range start VMA */
82 	__u64 vm_user_start;
83 	/** @vm_user_end: VM user range end VMA */
84 	__u64 vm_user_end;
85 	/** @vm_usc_start: VM USC region start VMA (zero if flexible) */
86 	__u64 vm_usc_start;
87 	/** @vm_usc_end: VM USC region end VMA (zero if flexible) */
88 	__u64 vm_usc_end;
89 	/** @vm_kernel_min_size: Minimum kernel VMA window size within user range */
90 	__u64 vm_kernel_min_size;
91 
92 	/** @max_syncs_per_submission: Maximum number of supported sync objects per submission */
93 	__u32 max_syncs_per_submission;
94 	/** @max_commands_per_submission: Maximum number of supported commands per submission */
95 	__u32 max_commands_per_submission;
96 	/** @max_commands_in_flight: Maximum number of commands simultaneously in flight per queue */
97 	/* TODO: Remove? */
98 	__u32 max_commands_in_flight;
99 	/** @max_attachments: Maximum number of attachments per command */
100 	__u32 max_attachments;
101 
102 	/** @timer_frequency_hz: Clock frequency for timestamps */
103 	/* TODO: Switch to u64 */
104 	__u32 timer_frequency_hz;
105 	/** @min_frequency_khz: Minimum GPU core clock frequency */
106 	__u32 min_frequency_khz;
107 	/** @max_frequency_khz: Maximum GPU core clock frequency */
108 	__u32 max_frequency_khz;
109 	/** @max_power_mw: Maximum GPU power consumption */
110 	__u32 max_power_mw;
111 
112 	/** @result_render_size: Result structure size for render commands */
113 	__u32 result_render_size;
114 	/** @result_compute_size: Result structure size for compute commands */
115 	__u32 result_compute_size;
116 
117 	/** @firmware_version: GPU firmware version, as 4 integers */
118 	/* TODO: Do something to distinguish iOS */
119 	__u32 firmware_version[4];
120 
121 	/** @user_timestamp_frequency_hz: Timebase frequency for user timestamps */
122 	__u64 user_timestamp_frequency_hz;
123 };
124 
125 /** Compatible feature bits */
126 enum drm_asahi_feat_compat {
127 	/** GPU has soft faults enabled (for USC and texture sampling) */
128 	DRM_ASAHI_FEAT_SOFT_FAULTS = (1UL) << 0,
129 	DRM_ASAHI_FEAT_GETTIME = (1UL) << 1, /* Remove for upstream */
130 	DRM_ASAHI_FEAT_USER_TIMESTAMPS = (1UL) << 2,
131 };
132 
133 /** Incompatible feature bits */
134 enum drm_asahi_feat_incompat {
135 	/** GPU requires compression for Z/S buffers */
136 	DRM_ASAHI_FEAT_MANDATORY_ZS_COMPRESSION = (1UL) << 0,
137 };
138 
139 /** Get driver/GPU parameters */
140 struct drm_asahi_get_params {
141 	/** @extensions: Pointer to the first extension struct, if any */
142 	__u64 extensions;
143 
144 	/** @param: Parameter group to fetch (MBZ) */
145 	__u32 param_group;
146 
147 	/** @pad: MBZ */
148 	__u32 pad;
149 
150 	/** @value: User pointer to write parameter struct */
151 	__u64 pointer;
152 
153 	/** @value: Size of user buffer, max size supported on return */
154 	__u64 size;
155 };
156 
157 /** Create a GPU VM address space */
158 struct drm_asahi_vm_create {
159 	/** @extensions: Pointer to the first extension struct, if any */
160 	__u64 extensions;
161 
162 	/** @kernel_start: Start of the kernel-reserved address range */
163 	__u64 kernel_start;
164 
165 	/** @kernel_end: End of the kernel-reserved address range */
166 	__u64 kernel_end;
167 
168 	/** @value: Returned VM ID */
169 	__u32 vm_id;
170 
171 	/** @pad: MBZ */
172 	__u32 pad;
173 };
174 
175 /** Destroy a GPU VM address space */
176 struct drm_asahi_vm_destroy {
177 	/** @extensions: Pointer to the first extension struct, if any */
178 	__u64 extensions;
179 
180 	/** @value: VM ID to be destroyed */
181 	__u32 vm_id;
182 
183 	/** @pad: MBZ */
184 	__u32 pad;
185 };
186 /** BO should be CPU-mapped as writeback, not write-combine (optimize for CPU reads) */
187 #define ASAHI_GEM_WRITEBACK	(1L << 0)
188 /** BO is private to this GPU VM (no exports) */
189 #define ASAHI_GEM_VM_PRIVATE	(1L << 1)
190 
191 /** Destroy a GPU VM address space */
192 struct drm_asahi_gem_create {
193 	/** @extensions: Pointer to the first extension struct, if any */
194 	__u64 extensions;
195 
196 	/** @size: Size of the BO */
197 	__u64 size;
198 
199 	/** @flags: BO creation flags */
200 	__u32 flags;
201 
202 	/** @handle: VM ID to assign to the BO, if ASAHI_GEM_VM_PRIVATE is set. */
203 	__u32 vm_id;
204 
205 	/** @handle: Returned GEM handle for the BO */
206 	__u32 handle;
207 
208 	/** @pad: MBZ */
209 	__u32 pad;
210 };
211 
212 /** Get BO mmap offset */
213 struct drm_asahi_gem_mmap_offset {
214 	/** @extensions: Pointer to the first extension struct, if any */
215 	__u64 extensions;
216 
217 	/** @handle: Handle for the object being mapped. */
218 	__u32 handle;
219 
220 	/** @flags: Must be zero */
221 	__u32 flags;
222 
223 	/** @offset: The fake offset to use for subsequent mmap call */
224 	__u64 offset;
225 };
226 
227 /** VM_BIND operations */
228 enum drm_asahi_bind_op {
229 	/** Bind a BO to a GPU VMA range */
230 	ASAHI_BIND_OP_BIND = 0,
231 	/** Unbind a GPU VMA range */
232 	ASAHI_BIND_OP_UNBIND = 1,
233 	/** Unbind all mappings of a given BO */
234 	ASAHI_BIND_OP_UNBIND_ALL = 2,
235 };
236 
237 /** Map BO with GPU read permission */
238 #define ASAHI_BIND_READ		(1L << 0)
239 /** Map BO with GPU write permission */
240 #define ASAHI_BIND_WRITE	(1L << 1)
241 
242 /** BO VM_BIND operations */
243 struct drm_asahi_gem_bind {
244 	/** @extensions: Pointer to the first extension struct, if any */
245 	__u64 extensions;
246 
247 	/** @obj: Bind operation (enum drm_asahi_bind_op) */
248 	__u32 op;
249 
250 	/** @flags: One or more of ASAHI_BIND_* (BIND only) */
251 	__u32 flags;
252 
253 	/** @obj: GEM object to bind/unbind (BIND or UNBIND_ALL) */
254 	__u32 handle;
255 
256 	/** @vm_id: The ID of the VM to operate on */
257 	__u32 vm_id;
258 
259 	/** @offset: Offset into the object (BIND only) */
260 	__u64 offset;
261 
262 	/** @range: Number of bytes to bind/unbind to addr (BIND or UNBIND only) */
263 	__u64 range;
264 
265 	/** @addr: Address to bind to (BIND or UNBIND only) */
266 	__u64 addr;
267 };
268 
269 /** VM_BIND operations */
270 enum drm_asahi_bind_object_op {
271 	/** Bind a BO as a special GPU object */
272 	ASAHI_BIND_OBJECT_OP_BIND = 0,
273 	/** Unbind a special GPU object */
274 	ASAHI_BIND_OBJECT_OP_UNBIND = 1,
275 };
276 
277 /** Map a BO as a timestamp buffer */
278 #define ASAHI_BIND_OBJECT_USAGE_TIMESTAMPS	(1L << 0)
279 
280 /** BO special object operations */
281 struct drm_asahi_gem_bind_object {
282 	/** @extensions: Pointer to the first extension struct, if any */
283 	__u64 extensions;
284 
285 	/** @obj: Bind operation (enum drm_asahi_bind_object_op) */
286 	__u32 op;
287 
288 	/** @flags: One or more of ASAHI_BIND_OBJECT_* */
289 	__u32 flags;
290 
291 	/** @obj: GEM object to bind/unbind (BIND) */
292 	__u32 handle;
293 
294 	/** @vm_id: The ID of the VM to operate on (MBZ currently) */
295 	__u32 vm_id;
296 
297 	/** @offset: Offset into the object (BIND only) */
298 	__u64 offset;
299 
300 	/** @range: Number of bytes to bind/unbind (BIND only) */
301 	__u64 range;
302 
303 	/** @addr: Object handle (out for BIND, in for UNBIND) */
304 	__u32 object_handle;
305 
306 	/** @pad: MBZ */
307 	__u32 pad;
308 };
309 
310 /** Command type */
311 enum drm_asahi_cmd_type {
312 	/** Render command (Render subqueue, Vert+Frag) */
313 	DRM_ASAHI_CMD_RENDER = 0,
314 	/** Blit command (Render subqueue, Frag only, not yet supported) */
315 	DRM_ASAHI_CMD_BLIT = 1,
316 	/** Compute command (Compute subqueue) */
317 	DRM_ASAHI_CMD_COMPUTE = 2,
318 };
319 
320 /** Queue capabilities */
321 /* Note: this is an enum so that it can be resolved by Rust bindgen. */
322 enum drm_asahi_queue_cap {
323 	/** Supports render commands */
324 	DRM_ASAHI_QUEUE_CAP_RENDER	= (1UL << DRM_ASAHI_CMD_RENDER),
325 	/** Supports blit commands */
326 	DRM_ASAHI_QUEUE_CAP_BLIT	= (1UL << DRM_ASAHI_CMD_BLIT),
327 	/** Supports compute commands */
328 	DRM_ASAHI_QUEUE_CAP_COMPUTE	= (1UL << DRM_ASAHI_CMD_COMPUTE),
329 };
330 
331 /** Create a queue */
332 struct drm_asahi_queue_create {
333 	/** @extensions: Pointer to the first extension struct, if any */
334 	__u64 extensions;
335 
336 	/** @flags: MBZ */
337 	__u32 flags;
338 
339 	/** @vm_id: The ID of the VM this queue is bound to */
340 	__u32 vm_id;
341 
342 	/** @type: Bitmask of DRM_ASAHI_QUEUE_CAP_* */
343 	__u32 queue_caps;
344 
345 	/** @priority: Queue priority, 0-3 */
346 	__u32 priority;
347 
348 	/** @queue_id: The returned queue ID */
349 	__u32 queue_id;
350 
351 	/** @pad: MBZ */
352 	__u32 pad;
353 };
354 
355 /** Destroy a queue */
356 struct drm_asahi_queue_destroy {
357 	/** @extensions: Pointer to the first extension struct, if any */
358 	__u64 extensions;
359 
360 	/** @queue_id: The queue ID to be destroyed */
361 	__u32 queue_id;
362 
363 	/** @pad: MBZ */
364 	__u32 pad;
365 };
366 
367 /** Sync item types */
368 enum drm_asahi_sync_type {
369 	/** Simple sync object */
370 	DRM_ASAHI_SYNC_SYNCOBJ = 0,
371 	/** Timeline sync object */
372 	DRM_ASAHI_SYNC_TIMELINE_SYNCOBJ = 1,
373 };
374 
375 /** Sync item */
376 struct drm_asahi_sync {
377 	/** @extensions: Pointer to the first extension struct, if any */
378 	__u64 extensions;
379 
380 	/** @sync_type: One of drm_asahi_sync_type */
381 	__u32 sync_type;
382 
383 	/** @handle: The sync object handle */
384 	__u32 handle;
385 
386 	/** @timeline_value: Timeline value for timeline sync objects */
387 	__u64 timeline_value;
388 };
389 
390 /** Sub-queues within a queue */
391 enum drm_asahi_subqueue {
392 	/** Render subqueue (also blit) */
393 	DRM_ASAHI_SUBQUEUE_RENDER = 0,
394 	/** Compute subqueue */
395 	DRM_ASAHI_SUBQUEUE_COMPUTE = 1,
396 	/** Queue count, must remain multiple of 2 for struct alignment */
397 	DRM_ASAHI_SUBQUEUE_COUNT = 2,
398 };
399 
400 /** Command index for no barrier */
401 #define DRM_ASAHI_BARRIER_NONE ~(0U)
402 
403 /** Top level command structure */
404 struct drm_asahi_command {
405 	/** @extensions: Pointer to the first extension struct, if any */
406 	__u64 extensions;
407 
408 	/** @type: One of drm_asahi_cmd_type */
409 	__u32 cmd_type;
410 
411 	/** @flags: Flags for command submission */
412 	__u32 flags;
413 
414 	/** @cmdbuf: Pointer to the appropriate command buffer structure */
415 	__u64 cmd_buffer;
416 
417 	/** @cmdbuf: Size of the command buffer structure */
418 	__u64 cmd_buffer_size;
419 
420 	/** @cmdbuf: Offset into the result BO to return information about this command */
421 	__u64 result_offset;
422 
423 	/** @cmdbuf: Size of the result data structure */
424 	__u64 result_size;
425 
426 	/** @barriers: Array of command indices per subqueue to wait on */
427 	__u32 barriers[DRM_ASAHI_SUBQUEUE_COUNT];
428 };
429 
430 /** Submit an array of commands to a queue */
431 struct drm_asahi_submit {
432 	/** @extensions: Pointer to the first extension struct, if any */
433 	__u64 extensions;
434 
435 	/** @in_syncs: An optional array of drm_asahi_sync to wait on before starting this job. */
436 	__u64 in_syncs;
437 
438 	/** @in_syncs: An optional array of drm_asahi_sync objects to signal upon completion. */
439 	__u64 out_syncs;
440 
441 	/** @commands: Pointer to the drm_asahi_command array of commands to submit. */
442 	__u64 commands;
443 
444 	/** @flags: Flags for command submission (MBZ) */
445 	__u32 flags;
446 
447 	/** @queue_id: The queue ID to be submitted to */
448 	__u32 queue_id;
449 
450 	/** @result_handle: An optional BO handle to place result data in */
451 	__u32 result_handle;
452 
453 	/** @in_sync_count: Number of sync objects to wait on before starting this job. */
454 	__u32 in_sync_count;
455 
456 	/** @in_sync_count: Number of sync objects to signal upon completion of this job. */
457 	__u32 out_sync_count;
458 
459 	/** @pad: Number of commands to be submitted */
460 	__u32 command_count;
461 };
462 
463 /** An attachment definition for a shader stage */
464 struct drm_asahi_attachment {
465 	/** @pointer: Base address of the attachment */
466 	__u64 pointer;
467 	/** @size: Size of the attachment in bytes */
468 	__u64 size;
469 	/** @order: Power of 2 exponent related to attachment size (?) */
470 	__u32 order;
471 	/** @flags: MBZ */
472 	__u32 flags;
473 };
474 
475 /** XXX investigate real meaning */
476 #define ASAHI_RENDER_NO_CLEAR_PIPELINE_TEXTURES (1UL << 0)
477 /** XXX investigate real meaning */
478 #define ASAHI_RENDER_SET_WHEN_RELOADING_Z_OR_S (1UL << 1)
479 /** Vertex stage shader spills */
480 #define ASAHI_RENDER_VERTEX_SPILLS (1UL << 2)
481 /** Process empty tiles through the fragment load/store */
482 #define ASAHI_RENDER_PROCESS_EMPTY_TILES (1UL << 3)
483 /** Run vertex stage on a single cluster (on multicluster GPUs) */
484 #define ASAHI_RENDER_NO_VERTEX_CLUSTERING (1UL << 4)
485 /** Enable MSAA for Z/S */
486 #define ASAHI_RENDER_MSAA_ZS (1UL << 5)
487 /** Disable preemption (XXX check) */
488 #define ASAHI_RENDER_NO_PREEMPTION (1UL << 6)
489 
490 /** Render command submission data */
491 struct drm_asahi_cmd_render {
492 	/** @extensions: Pointer to the first extension struct, if any */
493 	__u64 extensions;
494 
495 	/** @flags: Zero or more of ASAHI_RENDER_* */
496 	__u64 flags;
497 
498 	__u64 encoder_ptr;
499 	__u64 vertex_usc_base;
500 	__u64 fragment_usc_base;
501 
502 	__u64 vertex_attachments;
503 	__u64 fragment_attachments;
504 	__u32 vertex_attachment_count;
505 	__u32 fragment_attachment_count;
506 
507 	__u32 vertex_helper_program;
508 	__u32 fragment_helper_program;
509 	__u32 vertex_helper_cfg;
510 	__u32 fragment_helper_cfg;
511 	__u64 vertex_helper_arg;
512 	__u64 fragment_helper_arg;
513 
514 	__u64 depth_buffer_load;
515 	__u64 depth_buffer_load_stride;
516 	__u64 depth_buffer_store;
517 	__u64 depth_buffer_store_stride;
518 	__u64 depth_buffer_partial;
519 	__u64 depth_buffer_partial_stride;
520 	__u64 depth_meta_buffer_load;
521 	__u64 depth_meta_buffer_load_stride;
522 	__u64 depth_meta_buffer_store;
523 	__u64 depth_meta_buffer_store_stride;
524 	__u64 depth_meta_buffer_partial;
525 	__u64 depth_meta_buffer_partial_stride;
526 
527 	__u64 stencil_buffer_load;
528 	__u64 stencil_buffer_load_stride;
529 	__u64 stencil_buffer_store;
530 	__u64 stencil_buffer_store_stride;
531 	__u64 stencil_buffer_partial;
532 	__u64 stencil_buffer_partial_stride;
533 	__u64 stencil_meta_buffer_load;
534 	__u64 stencil_meta_buffer_load_stride;
535 	__u64 stencil_meta_buffer_store;
536 	__u64 stencil_meta_buffer_store_stride;
537 	__u64 stencil_meta_buffer_partial;
538 	__u64 stencil_meta_buffer_partial_stride;
539 
540 	__u64 scissor_array;
541 	__u64 depth_bias_array;
542 	__u64 visibility_result_buffer;
543 
544 	__u64 vertex_sampler_array;
545 	__u32 vertex_sampler_count;
546 	__u32 vertex_sampler_max;
547 
548 	__u64 fragment_sampler_array;
549 	__u32 fragment_sampler_count;
550 	__u32 fragment_sampler_max;
551 
552 	__u64 zls_ctrl;
553 	__u64 ppp_multisamplectl;
554 	__u32 ppp_ctrl;
555 
556 	__u32 fb_width;
557 	__u32 fb_height;
558 
559 	__u32 utile_width;
560 	__u32 utile_height;
561 
562 	__u32 samples;
563 	__u32 layers;
564 
565 	__u32 encoder_id;
566 	__u32 cmd_ta_id;
567 	__u32 cmd_3d_id;
568 
569 	__u32 sample_size;
570 	__u32 tib_blocks;
571 	__u32 iogpu_unk_214;
572 
573 	__u32 merge_upper_x;
574 	__u32 merge_upper_y;
575 
576 	__u32 load_pipeline;
577 	__u32 load_pipeline_bind;
578 
579 	__u32 store_pipeline;
580 	__u32 store_pipeline_bind;
581 
582 	__u32 partial_reload_pipeline;
583 	__u32 partial_reload_pipeline_bind;
584 
585 	__u32 partial_store_pipeline;
586 	__u32 partial_store_pipeline_bind;
587 
588 	__u32 depth_dimensions;
589 	__u32 isp_bgobjdepth;
590 	__u32 isp_bgobjvals;
591 };
592 
593 #define ASAHI_RENDER_UNK_UNK1			(1UL << 0)
594 #define ASAHI_RENDER_UNK_SET_TILE_CONFIG	(1UL << 1)
595 #define ASAHI_RENDER_UNK_SET_UTILE_CONFIG	(1UL << 2)
596 #define ASAHI_RENDER_UNK_SET_AUX_FB_UNK		(1UL << 3)
597 #define ASAHI_RENDER_UNK_SET_G14_UNK		(1UL << 4)
598 
599 #define ASAHI_RENDER_UNK_SET_FRG_UNK_140	(1UL << 20)
600 #define ASAHI_RENDER_UNK_SET_FRG_UNK_158	(1UL << 21)
601 #define ASAHI_RENDER_UNK_SET_FRG_TILECFG	(1UL << 22)
602 #define ASAHI_RENDER_UNK_SET_LOAD_BGOBJVALS	(1UL << 23)
603 #define ASAHI_RENDER_UNK_SET_FRG_UNK_38		(1UL << 24)
604 #define ASAHI_RENDER_UNK_SET_FRG_UNK_3C		(1UL << 25)
605 
606 #define ASAHI_RENDER_UNK_SET_RELOAD_ZLSCTRL	(1UL << 27)
607 #define ASAHI_RENDER_UNK_SET_UNK_BUF_10		(1UL << 28)
608 #define ASAHI_RENDER_UNK_SET_FRG_UNK_MASK	(1UL << 29)
609 
610 #define ASAHI_RENDER_UNK_SET_IOGPU_UNK54	(1UL << 40)
611 #define ASAHI_RENDER_UNK_SET_IOGPU_UNK56	(1UL << 41)
612 #define ASAHI_RENDER_UNK_SET_TILING_CONTROL	(1UL << 42)
613 #define ASAHI_RENDER_UNK_SET_TILING_CONTROL_2	(1UL << 43)
614 #define ASAHI_RENDER_UNK_SET_VTX_UNK_F0		(1UL << 44)
615 #define ASAHI_RENDER_UNK_SET_VTX_UNK_F8		(1UL << 45)
616 #define ASAHI_RENDER_UNK_SET_VTX_UNK_118	(1UL << 46)
617 #define ASAHI_RENDER_UNK_SET_VTX_UNK_MASK	(1UL << 47)
618 
619 #define ASAHI_RENDER_EXT_UNKNOWNS	0xff00
620 
621 /* XXX: Do not upstream this struct */
622 struct drm_asahi_cmd_render_unknowns {
623 	/** @type: Type ID of this extension */
624 	__u32 type;
625 	__u32 pad;
626 	/** @next: Pointer to the next extension struct, if any */
627 	__u64 next;
628 
629 	__u64 flags;
630 
631 	__u64 tile_config;
632 	__u64 utile_config;
633 
634 	__u64 aux_fb_unk;
635 	__u64 g14_unk;
636 	__u64 frg_unk_140;
637 	__u64 frg_unk_158;
638 	__u64 frg_tilecfg;
639 	__u64 load_bgobjvals;
640 	__u64 frg_unk_38;
641 	__u64 frg_unk_3c;
642 	__u64 reload_zlsctrl;
643 	__u64 unk_buf_10;
644 	__u64 frg_unk_mask;
645 
646 	__u64 iogpu_unk54;
647 	__u64 iogpu_unk56;
648 	__u64 tiling_control;
649 	__u64 tiling_control_2;
650 	__u64 vtx_unk_f0;
651 	__u64 vtx_unk_f8;
652 	__u64 vtx_unk_118;
653 	__u64 vtx_unk_mask;
654 };
655 
656 #define ASAHI_RENDER_EXT_TIMESTAMPS	0x0001
657 
658 /** User timestamp buffers for render commands */
659 struct drm_asahi_cmd_render_user_timestamps {
660 	/** @type: Type ID of this extension */
661 	__u32 type;
662 	/** @pad: MBZ */
663 	__u32 pad;
664 	/** @next: Pointer to the next extension struct, if any */
665 	__u64 next;
666 
667 	/** @vtx_start_handle: Handle of the timestamp buffer for the vertex start ts */
668 	__u32 vtx_start_handle;
669 	/** @vtx_start_offset: Offset into the timestamp buffer of the vertex start ts */
670 	__u32 vtx_start_offset;
671 
672 	/** @vtx_end_handle: Handle of the timestamp buffer for the vertex end ts */
673 	__u32 vtx_end_handle;
674 	/** @vtx_end_offset: Offset into the timestamp buffer of the vertex end ts */
675 	__u32 vtx_end_offset;
676 
677 	/** @frg_start_handle: Handle of the timestamp buffer for the fragment start ts */
678 	__u32 frg_start_handle;
679 	/** @frg_start_offset: Offset into the timestamp buffer of the fragment start ts */
680 	__u32 frg_start_offset;
681 
682 	/** @frg_end_handle: Handle of the timestamp buffer for the fragment end ts */
683 	__u32 frg_end_handle;
684 	/** @frg_end_offset: Offset into the timestamp buffer of the fragment end ts */
685 	__u32 frg_end_offset;
686 };
687 
688 /* XXX check */
689 #define ASAHI_COMPUTE_NO_PREEMPTION (1UL << 0)
690 
691 /** Compute command submission data */
692 struct drm_asahi_cmd_compute {
693 	/* TODO: remove guards on next bump */
694 #if DRM_ASAHI_UNSTABLE_UABI_VERSION > 10011
695 	/** @extensions: Pointer to the first extension struct, if any */
696 	__u64 extensions;
697 #endif
698 
699 	__u64 flags;
700 
701 	__u64 encoder_ptr;
702 	__u64 encoder_end;
703 	__u64 usc_base;
704 
705 	__u64 attachments;
706 	__u32 attachment_count;
707 	__u32 pad;
708 
709 	__u32 helper_program;
710 	__u32 helper_cfg;
711 	__u64 helper_arg;
712 
713 	__u32 encoder_id;
714 	__u32 cmd_id;
715 
716 	__u64 sampler_array;
717 	__u32 sampler_count;
718 	__u32 sampler_max;
719 
720 	__u32 iogpu_unk_40;
721 	__u32 unk_mask;
722 
723 #if DRM_ASAHI_UNSTABLE_UABI_VERSION <= 10011
724 	/* We forgot the extension pointer in <=10011... */
725 	__u64 extensions;
726 #endif
727 };
728 
729 #define ASAHI_COMPUTE_EXT_TIMESTAMPS	0x0001
730 
731 /** User timestamp buffers for compute commands */
732 struct drm_asahi_cmd_compute_user_timestamps {
733 	/** @type: Type ID of this extension */
734 	__u32 type;
735 	/** @pad: MBZ */
736 	__u32 pad;
737 	/** @next: Pointer to the next extension struct, if any */
738 	__u64 next;
739 
740 	/** @start_handle: Handle of the timestamp buffer for the start ts */
741 	__u32 start_handle;
742 	/** @start_offset: Offset into the timestamp buffer of the start ts */
743 	__u32 start_offset;
744 
745 	/** @end_handle: Handle of the timestamp buffer for the end ts */
746 	__u32 end_handle;
747 	/** @end_offset: Offset into the timestamp buffer of the end ts */
748 	__u32 end_offset;
749 
750 };
751 
752 /** Command completion status */
753 enum drm_asahi_status {
754 	DRM_ASAHI_STATUS_PENDING = 0,
755 	DRM_ASAHI_STATUS_COMPLETE,
756 	DRM_ASAHI_STATUS_UNKNOWN_ERROR,
757 	DRM_ASAHI_STATUS_TIMEOUT,
758 	DRM_ASAHI_STATUS_FAULT,
759 	DRM_ASAHI_STATUS_KILLED,
760 	DRM_ASAHI_STATUS_NO_DEVICE,
761 	DRM_ASAHI_STATUS_CHANNEL_ERROR,
762 };
763 
764 /** GPU fault information */
765 enum drm_asahi_fault {
766 	DRM_ASAHI_FAULT_NONE = 0,
767 	DRM_ASAHI_FAULT_UNKNOWN,
768 	DRM_ASAHI_FAULT_UNMAPPED,
769 	DRM_ASAHI_FAULT_AF_FAULT,
770 	DRM_ASAHI_FAULT_WRITE_ONLY,
771 	DRM_ASAHI_FAULT_READ_ONLY,
772 	DRM_ASAHI_FAULT_NO_ACCESS,
773 };
774 
775 /** Common command completion result information */
776 struct drm_asahi_result_info {
777 	/** @status: One of enum drm_asahi_status */
778 	__u32 status;
779 
780 	/** @reason: One of drm_asahi_fault_type */
781 	__u32 fault_type;
782 
783 	/** @unit: Unit number, hardware dependent */
784 	__u32 unit;
785 
786 	/** @sideband: Sideband information, hardware dependent */
787 	__u32 sideband;
788 
789 	/** @level: Page table level at which the fault occurred, hardware dependent */
790 	__u8 level;
791 
792 	/** @read: Fault was a read */
793 	__u8 is_read;
794 
795 	/** @pad: MBZ */
796 	__u16 pad;
797 
798 	/** @unk_5: Extra bits, hardware dependent */
799 	__u32 extra;
800 
801 	/** @address: Fault address, cache line aligned */
802 	__u64 address;
803 };
804 
805 #define DRM_ASAHI_RESULT_RENDER_TVB_GROW_OVF (1UL << 0)
806 #define DRM_ASAHI_RESULT_RENDER_TVB_GROW_MIN (1UL << 1)
807 #define DRM_ASAHI_RESULT_RENDER_TVB_OVERFLOWED (1UL << 2)
808 
809 /** Render command completion result information */
810 struct drm_asahi_result_render {
811 	/** @address: Common result information */
812 	struct drm_asahi_result_info info;
813 
814 	/** @flags: Zero or more of of DRM_ASAHI_RESULT_RENDER_* */
815 	__u64 flags;
816 
817 	/** @vertex_ts_start: Timestamp of the start of vertex processing */
818 	__u64 vertex_ts_start;
819 
820 	/** @vertex_ts_end: Timestamp of the end of vertex processing */
821 	__u64 vertex_ts_end;
822 
823 	/** @fragment_ts_start: Timestamp of the start of fragment processing */
824 	__u64 fragment_ts_start;
825 
826 	/** @fragment_ts_end: Timestamp of the end of fragment processing */
827 	__u64 fragment_ts_end;
828 
829 	/** @tvb_size_bytes: TVB size at the start of this render */
830 	__u64 tvb_size_bytes;
831 
832 	/** @tvb_usage_bytes: Total TVB usage in bytes for this render */
833 	__u64 tvb_usage_bytes;
834 
835 	/** @num_tvb_overflows: Number of TVB overflows that occurred for this render */
836 	__u32 num_tvb_overflows;
837 
838 	/** @pad: MBZ */
839 	__u32 pad;
840 };
841 
842 /** Compute command completion result information */
843 struct drm_asahi_result_compute {
844 	/** @address: Common result information */
845 	struct drm_asahi_result_info info;
846 
847 	/** @flags: Zero or more of of DRM_ASAHI_RESULT_COMPUTE_* */
848 	__u64 flags;
849 
850 	/** @ts_start: Timestamp of the start of this compute command */
851 	__u64 ts_start;
852 
853 	/** @vertex_ts_end: Timestamp of the end of this compute command */
854 	__u64 ts_end;
855 };
856 
857 /** Fetch the current GPU timestamp time */
858 struct drm_asahi_get_time {
859 	/** @extensions: Pointer to the first extension struct, if any */
860 	__u64 extensions;
861 
862 	/** @flags: MBZ. */
863 	__u64 flags;
864 
865 	/** @gpu_timestamp: On return, the current GPU timestamp */
866 	__u64 gpu_timestamp;
867 };
868 
869 /* Note: this is an enum so that it can be resolved by Rust bindgen. */
870 enum {
871    DRM_IOCTL_ASAHI_GET_PARAMS       = DRM_IOWR(DRM_COMMAND_BASE + DRM_ASAHI_GET_PARAMS, struct drm_asahi_get_params),
872    DRM_IOCTL_ASAHI_VM_CREATE        = DRM_IOWR(DRM_COMMAND_BASE + DRM_ASAHI_VM_CREATE, struct drm_asahi_vm_create),
873    DRM_IOCTL_ASAHI_VM_DESTROY       = DRM_IOW(DRM_COMMAND_BASE + DRM_ASAHI_VM_DESTROY, struct drm_asahi_vm_destroy),
874    DRM_IOCTL_ASAHI_GEM_CREATE       = DRM_IOWR(DRM_COMMAND_BASE + DRM_ASAHI_GEM_CREATE, struct drm_asahi_gem_create),
875    DRM_IOCTL_ASAHI_GEM_MMAP_OFFSET  = DRM_IOWR(DRM_COMMAND_BASE + DRM_ASAHI_GEM_MMAP_OFFSET, struct drm_asahi_gem_mmap_offset),
876    DRM_IOCTL_ASAHI_GEM_BIND         = DRM_IOW(DRM_COMMAND_BASE + DRM_ASAHI_GEM_BIND, struct drm_asahi_gem_bind),
877    DRM_IOCTL_ASAHI_QUEUE_CREATE     = DRM_IOWR(DRM_COMMAND_BASE + DRM_ASAHI_QUEUE_CREATE, struct drm_asahi_queue_create),
878    DRM_IOCTL_ASAHI_QUEUE_DESTROY    = DRM_IOW(DRM_COMMAND_BASE + DRM_ASAHI_QUEUE_DESTROY, struct drm_asahi_queue_destroy),
879    DRM_IOCTL_ASAHI_SUBMIT           = DRM_IOW(DRM_COMMAND_BASE + DRM_ASAHI_SUBMIT, struct drm_asahi_submit),
880    DRM_IOCTL_ASAHI_GET_TIME         = DRM_IOWR(DRM_COMMAND_BASE + DRM_ASAHI_GET_TIME, struct drm_asahi_get_time),
881    DRM_IOCTL_ASAHI_GEM_BIND_OBJECT  = DRM_IOWR(DRM_COMMAND_BASE + DRM_ASAHI_GEM_BIND_OBJECT, struct drm_asahi_gem_bind_object),
882 };
883 
884 #if defined(__cplusplus)
885 }
886 #endif
887 
888 #endif /* _ASAHI_DRM_H_ */
889