• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file is auto-generated. Modifications will be lost.
3  *
4  * See https://android.googlesource.com/platform/bionic/+/master/libc/kernel/
5  * for more information.
6  */
7 #ifndef _UAPI_XE_DRM_H_
8 #define _UAPI_XE_DRM_H_
9 #include "drm.h"
10 #ifdef __cplusplus
11 extern "C" {
12 #endif
13 #define DRM_XE_DEVICE_QUERY 0x00
14 #define DRM_XE_GEM_CREATE 0x01
15 #define DRM_XE_GEM_MMAP_OFFSET 0x02
16 #define DRM_XE_VM_CREATE 0x03
17 #define DRM_XE_VM_DESTROY 0x04
18 #define DRM_XE_VM_BIND 0x05
19 #define DRM_XE_EXEC_QUEUE_CREATE 0x06
20 #define DRM_XE_EXEC_QUEUE_DESTROY 0x07
21 #define DRM_XE_EXEC_QUEUE_GET_PROPERTY 0x08
22 #define DRM_XE_EXEC 0x09
23 #define DRM_XE_WAIT_USER_FENCE 0x0a
24 #define DRM_XE_OBSERVATION 0x0b
25 #define DRM_IOCTL_XE_DEVICE_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query)
26 #define DRM_IOCTL_XE_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_CREATE, struct drm_xe_gem_create)
27 #define DRM_IOCTL_XE_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_MMAP_OFFSET, struct drm_xe_gem_mmap_offset)
28 #define DRM_IOCTL_XE_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_CREATE, struct drm_xe_vm_create)
29 #define DRM_IOCTL_XE_VM_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy)
30 #define DRM_IOCTL_XE_VM_BIND DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_BIND, struct drm_xe_vm_bind)
31 #define DRM_IOCTL_XE_EXEC_QUEUE_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_CREATE, struct drm_xe_exec_queue_create)
32 #define DRM_IOCTL_XE_EXEC_QUEUE_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_DESTROY, struct drm_xe_exec_queue_destroy)
33 #define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property)
34 #define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
35 #define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
36 #define DRM_IOCTL_XE_OBSERVATION DRM_IOW(DRM_COMMAND_BASE + DRM_XE_OBSERVATION, struct drm_xe_observation_param)
37 struct drm_xe_user_extension {
38   __u64 next_extension;
39   __u32 name;
40   __u32 pad;
41 };
42 struct drm_xe_ext_set_property {
43   struct drm_xe_user_extension base;
44   __u32 property;
45   __u32 pad;
46   __u64 value;
47   __u64 reserved[2];
48 };
49 struct drm_xe_engine_class_instance {
50 #define DRM_XE_ENGINE_CLASS_RENDER 0
51 #define DRM_XE_ENGINE_CLASS_COPY 1
52 #define DRM_XE_ENGINE_CLASS_VIDEO_DECODE 2
53 #define DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE 3
54 #define DRM_XE_ENGINE_CLASS_COMPUTE 4
55 #define DRM_XE_ENGINE_CLASS_VM_BIND 5
56   __u16 engine_class;
57   __u16 engine_instance;
58   __u16 gt_id;
59   __u16 pad;
60 };
61 struct drm_xe_engine {
62   struct drm_xe_engine_class_instance instance;
63   __u64 reserved[3];
64 };
65 struct drm_xe_query_engines {
66   __u32 num_engines;
67   __u32 pad;
68   struct drm_xe_engine engines[];
69 };
70 enum drm_xe_memory_class {
71   DRM_XE_MEM_REGION_CLASS_SYSMEM = 0,
72   DRM_XE_MEM_REGION_CLASS_VRAM
73 };
74 struct drm_xe_mem_region {
75   __u16 mem_class;
76   __u16 instance;
77   __u32 min_page_size;
78   __u64 total_size;
79   __u64 used;
80   __u64 cpu_visible_size;
81   __u64 cpu_visible_used;
82   __u64 reserved[6];
83 };
84 struct drm_xe_query_mem_regions {
85   __u32 num_mem_regions;
86   __u32 pad;
87   struct drm_xe_mem_region mem_regions[];
88 };
89 struct drm_xe_query_config {
90   __u32 num_params;
91   __u32 pad;
92 #define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0
93 #define DRM_XE_QUERY_CONFIG_FLAGS 1
94 #define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0)
95 #define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2
96 #define DRM_XE_QUERY_CONFIG_VA_BITS 3
97 #define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4
98   __u64 info[];
99 };
100 struct drm_xe_gt {
101 #define DRM_XE_QUERY_GT_TYPE_MAIN 0
102 #define DRM_XE_QUERY_GT_TYPE_MEDIA 1
103   __u16 type;
104   __u16 tile_id;
105   __u16 gt_id;
106   __u16 pad[3];
107   __u32 reference_clock;
108   __u64 near_mem_regions;
109   __u64 far_mem_regions;
110   __u16 ip_ver_major;
111   __u16 ip_ver_minor;
112   __u16 ip_ver_rev;
113   __u16 pad2;
114   __u64 reserved[7];
115 };
116 struct drm_xe_query_gt_list {
117   __u32 num_gt;
118   __u32 pad;
119   struct drm_xe_gt gt_list[];
120 };
121 struct drm_xe_query_topology_mask {
122   __u16 gt_id;
123 #define DRM_XE_TOPO_DSS_GEOMETRY 1
124 #define DRM_XE_TOPO_DSS_COMPUTE 2
125 #define DRM_XE_TOPO_L3_BANK 3
126 #define DRM_XE_TOPO_EU_PER_DSS 4
127 #define DRM_XE_TOPO_SIMD16_EU_PER_DSS 5
128   __u16 type;
129   __u32 num_bytes;
130   __u8 mask[];
131 };
132 struct drm_xe_query_engine_cycles {
133   struct drm_xe_engine_class_instance eci;
134   __s32 clockid;
135   __u32 width;
136   __u64 engine_cycles;
137   __u64 cpu_timestamp;
138   __u64 cpu_delta;
139 };
140 struct drm_xe_query_uc_fw_version {
141 #define XE_QUERY_UC_TYPE_GUC_SUBMISSION 0
142 #define XE_QUERY_UC_TYPE_HUC 1
143   __u16 uc_type;
144   __u16 pad;
145   __u32 branch_ver;
146   __u32 major_ver;
147   __u32 minor_ver;
148   __u32 patch_ver;
149   __u32 pad2;
150   __u64 reserved;
151 };
152 struct drm_xe_device_query {
153   __u64 extensions;
154 #define DRM_XE_DEVICE_QUERY_ENGINES 0
155 #define DRM_XE_DEVICE_QUERY_MEM_REGIONS 1
156 #define DRM_XE_DEVICE_QUERY_CONFIG 2
157 #define DRM_XE_DEVICE_QUERY_GT_LIST 3
158 #define DRM_XE_DEVICE_QUERY_HWCONFIG 4
159 #define DRM_XE_DEVICE_QUERY_GT_TOPOLOGY 5
160 #define DRM_XE_DEVICE_QUERY_ENGINE_CYCLES 6
161 #define DRM_XE_DEVICE_QUERY_UC_FW_VERSION 7
162 #define DRM_XE_DEVICE_QUERY_OA_UNITS 8
163   __u32 query;
164   __u32 size;
165   __u64 data;
166   __u64 reserved[2];
167 };
168 struct drm_xe_gem_create {
169   __u64 extensions;
170   __u64 size;
171   __u32 placement;
172 #define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING (1 << 0)
173 #define DRM_XE_GEM_CREATE_FLAG_SCANOUT (1 << 1)
174 #define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (1 << 2)
175   __u32 flags;
176   __u32 vm_id;
177   __u32 handle;
178 #define DRM_XE_GEM_CPU_CACHING_WB 1
179 #define DRM_XE_GEM_CPU_CACHING_WC 2
180   __u16 cpu_caching;
181   __u16 pad[3];
182   __u64 reserved[2];
183 };
184 struct drm_xe_gem_mmap_offset {
185   __u64 extensions;
186   __u32 handle;
187   __u32 flags;
188   __u64 offset;
189   __u64 reserved[2];
190 };
191 struct drm_xe_vm_create {
192   __u64 extensions;
193 #define DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE (1 << 0)
194 #define DRM_XE_VM_CREATE_FLAG_LR_MODE (1 << 1)
195 #define DRM_XE_VM_CREATE_FLAG_FAULT_MODE (1 << 2)
196   __u32 flags;
197   __u32 vm_id;
198   __u64 reserved[2];
199 };
200 struct drm_xe_vm_destroy {
201   __u32 vm_id;
202   __u32 pad;
203   __u64 reserved[2];
204 };
205 struct drm_xe_vm_bind_op {
206   __u64 extensions;
207   __u32 obj;
208   __u16 pat_index;
209   __u16 pad;
210   union {
211     __u64 obj_offset;
212     __u64 userptr;
213   };
214   __u64 range;
215   __u64 addr;
216 #define DRM_XE_VM_BIND_OP_MAP 0x0
217 #define DRM_XE_VM_BIND_OP_UNMAP 0x1
218 #define DRM_XE_VM_BIND_OP_MAP_USERPTR 0x2
219 #define DRM_XE_VM_BIND_OP_UNMAP_ALL 0x3
220 #define DRM_XE_VM_BIND_OP_PREFETCH 0x4
221   __u32 op;
222 #define DRM_XE_VM_BIND_FLAG_READONLY (1 << 0)
223 #define DRM_XE_VM_BIND_FLAG_IMMEDIATE (1 << 1)
224 #define DRM_XE_VM_BIND_FLAG_NULL (1 << 2)
225 #define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
226   __u32 flags;
227   __u32 prefetch_mem_region_instance;
228   __u32 pad2;
229   __u64 reserved[3];
230 };
231 struct drm_xe_vm_bind {
232   __u64 extensions;
233   __u32 vm_id;
234   __u32 exec_queue_id;
235   __u32 pad;
236   __u32 num_binds;
237   union {
238     struct drm_xe_vm_bind_op bind;
239     __u64 vector_of_binds;
240   };
241   __u32 pad2;
242   __u32 num_syncs;
243   __u64 syncs;
244   __u64 reserved[2];
245 };
246 struct drm_xe_exec_queue_create {
247 #define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
248 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
249 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
250   __u64 extensions;
251   __u16 width;
252   __u16 num_placements;
253   __u32 vm_id;
254   __u32 flags;
255   __u32 exec_queue_id;
256   __u64 instances;
257   __u64 reserved[2];
258 };
259 struct drm_xe_exec_queue_destroy {
260   __u32 exec_queue_id;
261   __u32 pad;
262   __u64 reserved[2];
263 };
264 struct drm_xe_exec_queue_get_property {
265   __u64 extensions;
266   __u32 exec_queue_id;
267 #define DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN 0
268   __u32 property;
269   __u64 value;
270   __u64 reserved[2];
271 };
272 struct drm_xe_sync {
273   __u64 extensions;
274 #define DRM_XE_SYNC_TYPE_SYNCOBJ 0x0
275 #define DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ 0x1
276 #define DRM_XE_SYNC_TYPE_USER_FENCE 0x2
277   __u32 type;
278 #define DRM_XE_SYNC_FLAG_SIGNAL (1 << 0)
279   __u32 flags;
280   union {
281     __u32 handle;
282     __u64 addr;
283   };
284   __u64 timeline_value;
285   __u64 reserved[2];
286 };
287 struct drm_xe_exec {
288   __u64 extensions;
289   __u32 exec_queue_id;
290   __u32 num_syncs;
291   __u64 syncs;
292   __u64 address;
293   __u16 num_batch_buffer;
294   __u16 pad[3];
295   __u64 reserved[2];
296 };
297 struct drm_xe_wait_user_fence {
298   __u64 extensions;
299   __u64 addr;
300 #define DRM_XE_UFENCE_WAIT_OP_EQ 0x0
301 #define DRM_XE_UFENCE_WAIT_OP_NEQ 0x1
302 #define DRM_XE_UFENCE_WAIT_OP_GT 0x2
303 #define DRM_XE_UFENCE_WAIT_OP_GTE 0x3
304 #define DRM_XE_UFENCE_WAIT_OP_LT 0x4
305 #define DRM_XE_UFENCE_WAIT_OP_LTE 0x5
306   __u16 op;
307 #define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME (1 << 0)
308   __u16 flags;
309   __u32 pad;
310   __u64 value;
311   __u64 mask;
312   __s64 timeout;
313   __u32 exec_queue_id;
314   __u32 pad2;
315   __u64 reserved[2];
316 };
317 enum drm_xe_observation_type {
318   DRM_XE_OBSERVATION_TYPE_OA,
319 };
320 enum drm_xe_observation_op {
321   DRM_XE_OBSERVATION_OP_STREAM_OPEN,
322   DRM_XE_OBSERVATION_OP_ADD_CONFIG,
323   DRM_XE_OBSERVATION_OP_REMOVE_CONFIG,
324 };
325 struct drm_xe_observation_param {
326   __u64 extensions;
327   __u64 observation_type;
328   __u64 observation_op;
329   __u64 param;
330 };
331 enum drm_xe_observation_ioctls {
332   DRM_XE_OBSERVATION_IOCTL_ENABLE = _IO('i', 0x0),
333   DRM_XE_OBSERVATION_IOCTL_DISABLE = _IO('i', 0x1),
334   DRM_XE_OBSERVATION_IOCTL_CONFIG = _IO('i', 0x2),
335   DRM_XE_OBSERVATION_IOCTL_STATUS = _IO('i', 0x3),
336   DRM_XE_OBSERVATION_IOCTL_INFO = _IO('i', 0x4),
337 };
338 enum drm_xe_oa_unit_type {
339   DRM_XE_OA_UNIT_TYPE_OAG,
340   DRM_XE_OA_UNIT_TYPE_OAM,
341 };
342 struct drm_xe_oa_unit {
343   __u64 extensions;
344   __u32 oa_unit_id;
345   __u32 oa_unit_type;
346   __u64 capabilities;
347 #define DRM_XE_OA_CAPS_BASE (1 << 0)
348   __u64 oa_timestamp_freq;
349   __u64 reserved[4];
350   __u64 num_engines;
351   struct drm_xe_engine_class_instance eci[];
352 };
353 struct drm_xe_query_oa_units {
354   __u64 extensions;
355   __u32 num_oa_units;
356   __u32 pad;
357   __u64 oa_units[];
358 };
359 enum drm_xe_oa_format_type {
360   DRM_XE_OA_FMT_TYPE_OAG,
361   DRM_XE_OA_FMT_TYPE_OAR,
362   DRM_XE_OA_FMT_TYPE_OAM,
363   DRM_XE_OA_FMT_TYPE_OAC,
364   DRM_XE_OA_FMT_TYPE_OAM_MPEC,
365   DRM_XE_OA_FMT_TYPE_PEC,
366 };
367 enum drm_xe_oa_property_id {
368 #define DRM_XE_OA_EXTENSION_SET_PROPERTY 0
369   DRM_XE_OA_PROPERTY_OA_UNIT_ID = 1,
370   DRM_XE_OA_PROPERTY_SAMPLE_OA,
371   DRM_XE_OA_PROPERTY_OA_METRIC_SET,
372   DRM_XE_OA_PROPERTY_OA_FORMAT,
373 #define DRM_XE_OA_FORMAT_MASK_FMT_TYPE (0xffu << 0)
374 #define DRM_XE_OA_FORMAT_MASK_COUNTER_SEL (0xffu << 8)
375 #define DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE (0xffu << 16)
376 #define DRM_XE_OA_FORMAT_MASK_BC_REPORT (0xffu << 24)
377   DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT,
378   DRM_XE_OA_PROPERTY_OA_DISABLED,
379   DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID,
380   DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE,
381   DRM_XE_OA_PROPERTY_NO_PREEMPT,
382 };
383 struct drm_xe_oa_config {
384   __u64 extensions;
385   char uuid[36];
386   __u32 n_regs;
387   __u64 regs_ptr;
388 };
389 struct drm_xe_oa_stream_status {
390   __u64 extensions;
391   __u64 oa_status;
392 #define DRM_XE_OASTATUS_MMIO_TRG_Q_FULL (1 << 3)
393 #define DRM_XE_OASTATUS_COUNTER_OVERFLOW (1 << 2)
394 #define DRM_XE_OASTATUS_BUFFER_OVERFLOW (1 << 1)
395 #define DRM_XE_OASTATUS_REPORT_LOST (1 << 0)
396   __u64 reserved[3];
397 };
398 struct drm_xe_oa_stream_info {
399   __u64 extensions;
400   __u64 oa_buf_size;
401   __u64 reserved[3];
402 };
403 #ifdef __cplusplus
404 }
405 #endif
406 #endif
407