1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32 #ifndef ELK_EU_DEFINES_H
33 #define ELK_EU_DEFINES_H
34
35 #include <stdint.h>
36 #include <stdlib.h>
37 #include "util/macros.h"
38 #include "dev/intel_device_info.h"
39 #include "elk_eu_opcodes.h"
40
41 /* The following hunk, up-to "Execution Unit" is used by both the
42 * intel/compiler and i965 codebase. */
43
44 #define INTEL_MASK(high, low) (((1u<<((high)-(low)+1))-1)<<(low))
45 /* Using the GNU statement expression extension */
46 #define SET_FIELD(value, field) \
47 ({ \
48 uint32_t fieldval = (uint32_t)(value) << field ## _SHIFT; \
49 assert((fieldval & ~ field ## _MASK) == 0); \
50 fieldval & field ## _MASK; \
51 })
52
53 #define SET_BITS(value, high, low) \
54 ({ \
55 const uint32_t fieldval = (uint32_t)(value) << (low); \
56 assert((fieldval & ~INTEL_MASK(high, low)) == 0); \
57 fieldval & INTEL_MASK(high, low); \
58 })
59
60 #define GET_BITS(data, high, low) ((data & INTEL_MASK((high), (low))) >> (low))
61 #define GET_FIELD(word, field) (((word) & field ## _MASK) >> field ## _SHIFT)
62
63 /* Bitfields for the URB_WRITE message, DW2 of message header: */
64 #define URB_WRITE_PRIM_END 0x1
65 #define URB_WRITE_PRIM_START 0x2
66 #define URB_WRITE_PRIM_TYPE_SHIFT 2
67
68 #define ELK_SPRITE_POINT_ENABLE 16
69
70 # define GFX7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT 0
71 # define GFX7_GS_CONTROL_DATA_FORMAT_GSCTL_SID 1
72
73 /* Execution Unit (EU) defines
74 */
75
76 #define ELK_ALIGN_1 0
77 #define ELK_ALIGN_16 1
78
79 #define ELK_ADDRESS_DIRECT 0
80 #define ELK_ADDRESS_REGISTER_INDIRECT_REGISTER 1
81
82 #define ELK_CHANNEL_X 0
83 #define ELK_CHANNEL_Y 1
84 #define ELK_CHANNEL_Z 2
85 #define ELK_CHANNEL_W 3
86
87 enum elk_compression {
88 ELK_COMPRESSION_NONE = 0,
89 ELK_COMPRESSION_2NDHALF = 1,
90 ELK_COMPRESSION_COMPRESSED = 2,
91 };
92
93 #define GFX6_COMPRESSION_1Q 0
94 #define GFX6_COMPRESSION_2Q 1
95 #define GFX6_COMPRESSION_3Q 2
96 #define GFX6_COMPRESSION_4Q 3
97 #define GFX6_COMPRESSION_1H 0
98 #define GFX6_COMPRESSION_2H 2
99
100 enum ENUM_PACKED elk_conditional_mod {
101 ELK_CONDITIONAL_NONE = 0,
102 ELK_CONDITIONAL_Z = 1,
103 ELK_CONDITIONAL_NZ = 2,
104 ELK_CONDITIONAL_EQ = 1, /* Z */
105 ELK_CONDITIONAL_NEQ = 2, /* NZ */
106 ELK_CONDITIONAL_G = 3,
107 ELK_CONDITIONAL_GE = 4,
108 ELK_CONDITIONAL_L = 5,
109 ELK_CONDITIONAL_LE = 6,
110 ELK_CONDITIONAL_R = 7, /* Gen <= 5 */
111 ELK_CONDITIONAL_O = 8,
112 ELK_CONDITIONAL_U = 9,
113 };
114
115 #define ELK_DEBUG_NONE 0
116 #define ELK_DEBUG_BREAKPOINT 1
117
118 #define ELK_DEPENDENCY_NORMAL 0
119 #define ELK_DEPENDENCY_NOTCLEARED 1
120 #define ELK_DEPENDENCY_NOTCHECKED 2
121 #define ELK_DEPENDENCY_DISABLE 3
122
123 enum ENUM_PACKED elk_execution_size {
124 ELK_EXECUTE_1 = 0,
125 ELK_EXECUTE_2 = 1,
126 ELK_EXECUTE_4 = 2,
127 ELK_EXECUTE_8 = 3,
128 ELK_EXECUTE_16 = 4,
129 ELK_EXECUTE_32 = 5,
130 };
131
132 enum ENUM_PACKED elk_horizontal_stride {
133 ELK_HORIZONTAL_STRIDE_0 = 0,
134 ELK_HORIZONTAL_STRIDE_1 = 1,
135 ELK_HORIZONTAL_STRIDE_2 = 2,
136 ELK_HORIZONTAL_STRIDE_4 = 3,
137 };
138
139 enum ENUM_PACKED gfx10_align1_3src_src_horizontal_stride {
140 ELK_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_0 = 0,
141 ELK_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_1 = 1,
142 ELK_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_2 = 2,
143 ELK_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_4 = 3,
144 };
145
146 enum ENUM_PACKED gfx10_align1_3src_dst_horizontal_stride {
147 ELK_ALIGN1_3SRC_DST_HORIZONTAL_STRIDE_1 = 0,
148 ELK_ALIGN1_3SRC_DST_HORIZONTAL_STRIDE_2 = 1,
149 };
150
151 #define ELK_INSTRUCTION_NORMAL 0
152 #define ELK_INSTRUCTION_SATURATE 1
153
154 #define ELK_MASK_ENABLE 0
155 #define ELK_MASK_DISABLE 1
156
157 /** @{
158 *
159 * Gfx6 has replaced "mask enable/disable" with WECtrl, which is
160 * effectively the same but much simpler to think about. Now, there
161 * are two contributors ANDed together to whether channels are
162 * executed: The predication on the instruction, and the channel write
163 * enable.
164 */
165 /**
166 * This is the default value. It means that a channel's write enable is set
167 * if the per-channel IP is pointing at this instruction.
168 */
169 #define ELK_WE_NORMAL 0
170 /**
171 * This is used like ELK_MASK_DISABLE, and causes all channels to have
172 * their write enable set. Note that predication still contributes to
173 * whether the channel actually gets written.
174 */
175 #define ELK_WE_ALL 1
176 /** @} */
177
178 enum elk_urb_write_flags {
179 ELK_URB_WRITE_NO_FLAGS = 0,
180
181 /**
182 * Causes a new URB entry to be allocated, and its address stored in the
183 * destination register (gen < 7).
184 */
185 ELK_URB_WRITE_ALLOCATE = 0x1,
186
187 /**
188 * Causes the current URB entry to be deallocated (gen < 7).
189 */
190 ELK_URB_WRITE_UNUSED = 0x2,
191
192 /**
193 * Causes the thread to terminate.
194 */
195 ELK_URB_WRITE_EOT = 0x4,
196
197 /**
198 * Indicates that the given URB entry is complete, and may be sent further
199 * down the 3D pipeline (gen < 7).
200 */
201 ELK_URB_WRITE_COMPLETE = 0x8,
202
203 /**
204 * Indicates that an additional offset (which may be different for the two
205 * vec4 slots) is stored in the message header (gen == 7).
206 */
207 ELK_URB_WRITE_PER_SLOT_OFFSET = 0x10,
208
209 /**
210 * Indicates that the channel masks in the URB_WRITE message header should
211 * not be overridden to 0xff (gen == 7).
212 */
213 ELK_URB_WRITE_USE_CHANNEL_MASKS = 0x20,
214
215 /**
216 * Indicates that the data should be sent to the URB using the
217 * URB_WRITE_OWORD message rather than URB_WRITE_HWORD (gen == 7). This
218 * causes offsets to be interpreted as multiples of an OWORD instead of an
219 * HWORD, and only allows one OWORD to be written.
220 */
221 ELK_URB_WRITE_OWORD = 0x40,
222
223 /**
224 * Convenient combination of flags: end the thread while simultaneously
225 * marking the given URB entry as complete.
226 */
227 ELK_URB_WRITE_EOT_COMPLETE = ELK_URB_WRITE_EOT | ELK_URB_WRITE_COMPLETE,
228
229 /**
230 * Convenient combination of flags: mark the given URB entry as complete
231 * and simultaneously allocate a new one.
232 */
233 ELK_URB_WRITE_ALLOCATE_COMPLETE =
234 ELK_URB_WRITE_ALLOCATE | ELK_URB_WRITE_COMPLETE,
235 };
236
237 enum fb_write_logical_srcs {
238 FB_WRITE_LOGICAL_SRC_COLOR0, /* REQUIRED */
239 FB_WRITE_LOGICAL_SRC_COLOR1, /* for dual source blend messages */
240 FB_WRITE_LOGICAL_SRC_SRC0_ALPHA,
241 FB_WRITE_LOGICAL_SRC_SRC_DEPTH, /* gl_FragDepth */
242 FB_WRITE_LOGICAL_SRC_DST_DEPTH, /* GFX4-5: passthrough from thread */
243 FB_WRITE_LOGICAL_SRC_SRC_STENCIL, /* gl_FragStencilRefARB */
244 FB_WRITE_LOGICAL_SRC_OMASK, /* Sample Mask (gl_SampleMask) */
245 FB_WRITE_LOGICAL_SRC_COMPONENTS, /* REQUIRED */
246 FB_WRITE_LOGICAL_NUM_SRCS
247 };
248
249 enum tex_logical_srcs {
250 /** Texture coordinates */
251 TEX_LOGICAL_SRC_COORDINATE,
252 /** Shadow comparator */
253 TEX_LOGICAL_SRC_SHADOW_C,
254 /** dPdx if the operation takes explicit derivatives, otherwise LOD value */
255 TEX_LOGICAL_SRC_LOD,
256 /** dPdy if the operation takes explicit derivatives */
257 TEX_LOGICAL_SRC_LOD2,
258 /** Min LOD */
259 TEX_LOGICAL_SRC_MIN_LOD,
260 /** Sample index */
261 TEX_LOGICAL_SRC_SAMPLE_INDEX,
262 /** MCS data */
263 TEX_LOGICAL_SRC_MCS,
264 /** REQUIRED: Texture surface index */
265 TEX_LOGICAL_SRC_SURFACE,
266 /** Texture sampler index */
267 TEX_LOGICAL_SRC_SAMPLER,
268 /** Texture surface bindless handle */
269 TEX_LOGICAL_SRC_SURFACE_HANDLE,
270 /** Texture sampler bindless handle */
271 TEX_LOGICAL_SRC_SAMPLER_HANDLE,
272 /** Texel offset for gathers */
273 TEX_LOGICAL_SRC_TG4_OFFSET,
274 /** REQUIRED: Number of coordinate components (as UD immediate) */
275 TEX_LOGICAL_SRC_COORD_COMPONENTS,
276 /** REQUIRED: Number of derivative components (as UD immediate) */
277 TEX_LOGICAL_SRC_GRAD_COMPONENTS,
278 /** REQUIRED: request residency (as UD immediate) */
279 TEX_LOGICAL_SRC_RESIDENCY,
280
281 TEX_LOGICAL_NUM_SRCS,
282 };
283
284 enum pull_uniform_constant_srcs {
285 /** Surface binding table index */
286 PULL_UNIFORM_CONSTANT_SRC_SURFACE,
287 /** Surface bindless handle */
288 PULL_UNIFORM_CONSTANT_SRC_SURFACE_HANDLE,
289 /** Surface offset */
290 PULL_UNIFORM_CONSTANT_SRC_OFFSET,
291 /** Pull size */
292 PULL_UNIFORM_CONSTANT_SRC_SIZE,
293
294 PULL_UNIFORM_CONSTANT_SRCS,
295 };
296
297 enum pull_varying_constant_srcs {
298 /** Surface binding table index */
299 PULL_VARYING_CONSTANT_SRC_SURFACE,
300 /** Surface bindless handle */
301 PULL_VARYING_CONSTANT_SRC_SURFACE_HANDLE,
302 /** Surface offset */
303 PULL_VARYING_CONSTANT_SRC_OFFSET,
304 /** Pull alignment */
305 PULL_VARYING_CONSTANT_SRC_ALIGNMENT,
306
307 PULL_VARYING_CONSTANT_SRCS,
308 };
309
310 enum get_buffer_size_srcs {
311 /** Surface binding table index */
312 GET_BUFFER_SIZE_SRC_SURFACE,
313 /** Surface bindless handle */
314 GET_BUFFER_SIZE_SRC_SURFACE_HANDLE,
315 /** LOD */
316 GET_BUFFER_SIZE_SRC_LOD,
317
318 GET_BUFFER_SIZE_SRCS
319 };
320
321 enum surface_logical_srcs {
322 /** Surface binding table index */
323 SURFACE_LOGICAL_SRC_SURFACE,
324 /** Surface bindless handle */
325 SURFACE_LOGICAL_SRC_SURFACE_HANDLE,
326 /** Surface address; could be multi-dimensional for typed opcodes */
327 SURFACE_LOGICAL_SRC_ADDRESS,
328 /** Data to be written or used in an atomic op */
329 SURFACE_LOGICAL_SRC_DATA,
330 /** Surface number of dimensions. Affects the size of ADDRESS */
331 SURFACE_LOGICAL_SRC_IMM_DIMS,
332 /** Per-opcode immediate argument. For atomics, this is the atomic opcode */
333 SURFACE_LOGICAL_SRC_IMM_ARG,
334 /**
335 * Some instructions with side-effects should not be predicated on
336 * sample mask, e.g. lowered stores to scratch.
337 */
338 SURFACE_LOGICAL_SRC_ALLOW_SAMPLE_MASK,
339
340 SURFACE_LOGICAL_NUM_SRCS
341 };
342
343 enum a64_logical_srcs {
344 /** Address the A64 message operates on */
345 A64_LOGICAL_ADDRESS,
346 /** Source for the operation (unused of LOAD ops) */
347 A64_LOGICAL_SRC,
348 /** Per-opcode immediate argument. Number of dwords, bit size, or atomic op. */
349 A64_LOGICAL_ARG,
350 /**
351 * Some instructions do want to run on helper lanes (like ray queries).
352 */
353 A64_LOGICAL_ENABLE_HELPERS,
354
355 A64_LOGICAL_NUM_SRCS
356 };
357
358 enum rt_logical_srcs {
359 /** Address of the globals */
360 RT_LOGICAL_SRC_GLOBALS,
361 /** Level at which the tracing should start */
362 RT_LOGICAL_SRC_BVH_LEVEL,
363 /** Type of tracing operation */
364 RT_LOGICAL_SRC_TRACE_RAY_CONTROL,
365 /** Synchronous tracing (ray query) */
366 RT_LOGICAL_SRC_SYNCHRONOUS,
367
368 RT_LOGICAL_NUM_SRCS
369 };
370
371 enum urb_logical_srcs {
372 URB_LOGICAL_SRC_HANDLE,
373 URB_LOGICAL_SRC_PER_SLOT_OFFSETS,
374 URB_LOGICAL_SRC_CHANNEL_MASK,
375 /** Data to be written. BAD_FILE for reads. */
376 URB_LOGICAL_SRC_DATA,
377 URB_LOGICAL_SRC_COMPONENTS,
378 URB_LOGICAL_NUM_SRCS
379 };
380
381 enum interpolator_logical_srcs {
382 /** Interpolation offset */
383 INTERP_SRC_OFFSET,
384 /** Message data */
385 INTERP_SRC_MSG_DESC,
386 /** Flag register for dynamic mode */
387 INTERP_SRC_DYNAMIC_MODE,
388
389 INTERP_NUM_SRCS
390 };
391
392
393 #ifdef __cplusplus
394 /**
395 * Allow elk_urb_write_flags enums to be ORed together.
396 */
397 inline elk_urb_write_flags
398 operator|(elk_urb_write_flags x, elk_urb_write_flags y)
399 {
400 return static_cast<elk_urb_write_flags>(static_cast<int>(x) |
401 static_cast<int>(y));
402 }
403 #endif
404
405 enum ENUM_PACKED elk_predicate {
406 ELK_PREDICATE_NONE = 0,
407 ELK_PREDICATE_NORMAL = 1,
408 ELK_PREDICATE_ALIGN1_ANYV = 2,
409 ELK_PREDICATE_ALIGN1_ALLV = 3,
410 ELK_PREDICATE_ALIGN1_ANY2H = 4,
411 ELK_PREDICATE_ALIGN1_ALL2H = 5,
412 ELK_PREDICATE_ALIGN1_ANY4H = 6,
413 ELK_PREDICATE_ALIGN1_ALL4H = 7,
414 ELK_PREDICATE_ALIGN1_ANY8H = 8,
415 ELK_PREDICATE_ALIGN1_ALL8H = 9,
416 ELK_PREDICATE_ALIGN1_ANY16H = 10,
417 ELK_PREDICATE_ALIGN1_ALL16H = 11,
418 ELK_PREDICATE_ALIGN1_ANY32H = 12,
419 ELK_PREDICATE_ALIGN1_ALL32H = 13,
420 ELK_PREDICATE_ALIGN16_REPLICATE_X = 2,
421 ELK_PREDICATE_ALIGN16_REPLICATE_Y = 3,
422 ELK_PREDICATE_ALIGN16_REPLICATE_Z = 4,
423 ELK_PREDICATE_ALIGN16_REPLICATE_W = 5,
424 ELK_PREDICATE_ALIGN16_ANY4H = 6,
425 ELK_PREDICATE_ALIGN16_ALL4H = 7,
426 XE2_PREDICATE_ANY = 2,
427 XE2_PREDICATE_ALL = 3
428 };
429
430 enum ENUM_PACKED elk_reg_file {
431 ELK_ARCHITECTURE_REGISTER_FILE = 0,
432 ELK_GENERAL_REGISTER_FILE = 1,
433 ELK_MESSAGE_REGISTER_FILE = 2,
434 ELK_IMMEDIATE_VALUE = 3,
435
436 ARF = ELK_ARCHITECTURE_REGISTER_FILE,
437 FIXED_GRF = ELK_GENERAL_REGISTER_FILE,
438 MRF = ELK_MESSAGE_REGISTER_FILE,
439 IMM = ELK_IMMEDIATE_VALUE,
440
441 /* These are not hardware values */
442 VGRF,
443 ATTR,
444 UNIFORM, /* prog_data->params[reg] */
445 BAD_FILE,
446 };
447
448 enum ENUM_PACKED gfx10_align1_3src_reg_file {
449 ELK_ALIGN1_3SRC_GENERAL_REGISTER_FILE = 0,
450 ELK_ALIGN1_3SRC_IMMEDIATE_VALUE = 1, /* src0, src2 */
451 ELK_ALIGN1_3SRC_ACCUMULATOR = 1, /* dest, src1 */
452 };
453
454 /* CNL adds Align1 support for 3-src instructions. Bit 35 of the instruction
455 * word is "Execution Datatype" which controls whether the instruction operates
456 * on float or integer types. The register arguments have fields that offer
457 * more fine control their respective types.
458 */
459 enum ENUM_PACKED gfx10_align1_3src_exec_type {
460 ELK_ALIGN1_3SRC_EXEC_TYPE_INT = 0,
461 ELK_ALIGN1_3SRC_EXEC_TYPE_FLOAT = 1,
462 };
463
464 #define ELK_ARF_NULL 0x00
465 #define ELK_ARF_ADDRESS 0x10
466 #define ELK_ARF_ACCUMULATOR 0x20
467 #define ELK_ARF_FLAG 0x30
468 #define ELK_ARF_MASK 0x40
469 #define ELK_ARF_MASK_STACK 0x50
470 #define ELK_ARF_MASK_STACK_DEPTH 0x60
471 #define ELK_ARF_STATE 0x70
472 #define ELK_ARF_CONTROL 0x80
473 #define ELK_ARF_NOTIFICATION_COUNT 0x90
474 #define ELK_ARF_IP 0xA0
475 #define ELK_ARF_TDR 0xB0
476 #define ELK_ARF_TIMESTAMP 0xC0
477
478 #define ELK_MRF_COMPR4 (1 << 7)
479
480 #define ELK_AMASK 0
481 #define ELK_IMASK 1
482 #define ELK_LMASK 2
483 #define ELK_CMASK 3
484
485
486
487 #define ELK_THREAD_NORMAL 0
488 #define ELK_THREAD_ATOMIC 1
489 #define ELK_THREAD_SWITCH 2
490
491 enum ENUM_PACKED elk_vertical_stride {
492 ELK_VERTICAL_STRIDE_0 = 0,
493 ELK_VERTICAL_STRIDE_1 = 1,
494 ELK_VERTICAL_STRIDE_2 = 2,
495 ELK_VERTICAL_STRIDE_4 = 3,
496 ELK_VERTICAL_STRIDE_8 = 4,
497 ELK_VERTICAL_STRIDE_16 = 5,
498 ELK_VERTICAL_STRIDE_32 = 6,
499 ELK_VERTICAL_STRIDE_ONE_DIMENSIONAL = 0xF,
500 };
501
502 enum ENUM_PACKED gfx10_align1_3src_vertical_stride {
503 ELK_ALIGN1_3SRC_VERTICAL_STRIDE_0 = 0,
504 ELK_ALIGN1_3SRC_VERTICAL_STRIDE_1 = 1,
505 ELK_ALIGN1_3SRC_VERTICAL_STRIDE_2 = 1,
506 ELK_ALIGN1_3SRC_VERTICAL_STRIDE_4 = 2,
507 ELK_ALIGN1_3SRC_VERTICAL_STRIDE_8 = 3,
508 };
509
510 enum ENUM_PACKED elk_width {
511 ELK_WIDTH_1 = 0,
512 ELK_WIDTH_2 = 1,
513 ELK_WIDTH_4 = 2,
514 ELK_WIDTH_8 = 3,
515 ELK_WIDTH_16 = 4,
516 };
517
518 /**
519 * Gfx12+ SWSB SBID synchronization mode.
520 *
521 * This is represented as a bitmask including any required SBID token
522 * synchronization modes, used to synchronize out-of-order instructions. Only
523 * the strongest mode of the mask will be provided to the hardware in the SWSB
524 * field of an actual hardware instruction, but virtual instructions may be
525 * able to take into account multiple of them.
526 */
527 enum tgl_sbid_mode {
528 TGL_SBID_NULL = 0,
529 TGL_SBID_SRC = 1,
530 TGL_SBID_DST = 2,
531 TGL_SBID_SET = 4
532 };
533
534
535 enum gfx12_sub_byte_precision {
536 ELK_SUB_BYTE_PRECISION_NONE = 0,
537
538 /** 4 bits. Signedness determined by base type */
539 ELK_SUB_BYTE_PRECISION_4BIT = 1,
540
541 /** 2 bits. Signedness determined by base type */
542 ELK_SUB_BYTE_PRECISION_2BIT = 2,
543 };
544
545 enum elk_gfx12_systolic_depth {
546 ELK_SYSTOLIC_DEPTH_16 = 0,
547 ELK_SYSTOLIC_DEPTH_2 = 1,
548 ELK_SYSTOLIC_DEPTH_4 = 2,
549 ELK_SYSTOLIC_DEPTH_8 = 3,
550 };
551
552 #ifdef __cplusplus
553 /**
554 * Allow bitwise arithmetic of tgl_sbid_mode enums.
555 */
556 inline tgl_sbid_mode
557 operator|(tgl_sbid_mode x, tgl_sbid_mode y)
558 {
559 return tgl_sbid_mode(unsigned(x) | unsigned(y));
560 }
561
562 inline tgl_sbid_mode
563 operator&(tgl_sbid_mode x, tgl_sbid_mode y)
564 {
565 return tgl_sbid_mode(unsigned(x) & unsigned(y));
566 }
567
568 inline tgl_sbid_mode &
569 operator|=(tgl_sbid_mode &x, tgl_sbid_mode y)
570 {
571 return x = x | y;
572 }
573
574 #endif
575
576 /**
577 * TGL+ SWSB RegDist synchronization pipeline.
578 *
579 * On TGL all instructions that use the RegDist synchronization mechanism are
580 * considered to be executed as a single in-order pipeline, therefore only the
581 * TGL_PIPE_FLOAT pipeline is applicable. On XeHP+ platforms there are two
582 * additional asynchronous ALU pipelines (which still execute instructions
583 * in-order and use the RegDist synchronization mechanism). TGL_PIPE_NONE
584 * doesn't provide any RegDist pipeline synchronization information and allows
585 * the hardware to infer the pipeline based on the source types of the
586 * instruction. TGL_PIPE_ALL can be used when synchronization with all ALU
587 * pipelines is intended.
588 */
589 enum tgl_pipe {
590 TGL_PIPE_NONE = 0,
591 TGL_PIPE_FLOAT,
592 TGL_PIPE_INT,
593 TGL_PIPE_LONG,
594 TGL_PIPE_MATH,
595 TGL_PIPE_ALL
596 };
597
598 /**
599 * Logical representation of the SWSB scheduling information of a hardware
600 * instruction. The binary representation is slightly more compact.
601 */
602 struct tgl_swsb {
603 unsigned regdist : 3;
604 enum tgl_pipe pipe : 3;
605 unsigned sbid : 5;
606 enum tgl_sbid_mode mode : 3;
607 };
608
609 /**
610 * Construct a scheduling annotation with a single RegDist dependency. This
611 * synchronizes with the completion of the d-th previous in-order instruction.
612 * The index is one-based, zero causes a no-op tgl_swsb to be constructed.
613 */
614 static inline struct tgl_swsb
tgl_swsb_regdist(unsigned d)615 tgl_swsb_regdist(unsigned d)
616 {
617 const struct tgl_swsb swsb = { d, d ? TGL_PIPE_ALL : TGL_PIPE_NONE };
618 assert(swsb.regdist == d);
619 return swsb;
620 }
621
622 /**
623 * Construct a scheduling annotation that synchronizes with the specified SBID
624 * token.
625 */
626 static inline struct tgl_swsb
tgl_swsb_sbid(enum tgl_sbid_mode mode,unsigned sbid)627 tgl_swsb_sbid(enum tgl_sbid_mode mode, unsigned sbid)
628 {
629 const struct tgl_swsb swsb = { 0, TGL_PIPE_NONE, sbid, mode };
630 assert(swsb.sbid == sbid);
631 return swsb;
632 }
633
634 /**
635 * Construct a no-op scheduling annotation.
636 */
637 static inline struct tgl_swsb
tgl_swsb_null(void)638 tgl_swsb_null(void)
639 {
640 return tgl_swsb_regdist(0);
641 }
642
643 /**
644 * Return a scheduling annotation that allocates the same SBID synchronization
645 * token as \p swsb. In addition it will synchronize against a previous
646 * in-order instruction if \p regdist is non-zero.
647 */
648 static inline struct tgl_swsb
tgl_swsb_dst_dep(struct tgl_swsb swsb,unsigned regdist)649 tgl_swsb_dst_dep(struct tgl_swsb swsb, unsigned regdist)
650 {
651 swsb.regdist = regdist;
652 swsb.mode = swsb.mode & TGL_SBID_SET;
653 swsb.pipe = (regdist ? TGL_PIPE_ALL : TGL_PIPE_NONE);
654 return swsb;
655 }
656
657 /**
658 * Return a scheduling annotation that synchronizes against the same SBID and
659 * RegDist dependencies as \p swsb, but doesn't allocate any SBID token.
660 */
661 static inline struct tgl_swsb
tgl_swsb_src_dep(struct tgl_swsb swsb)662 tgl_swsb_src_dep(struct tgl_swsb swsb)
663 {
664 swsb.mode = swsb.mode & (TGL_SBID_SRC | TGL_SBID_DST);
665 return swsb;
666 }
667
668 /**
669 * Convert the provided tgl_swsb to the hardware's binary representation of an
670 * SWSB annotation.
671 */
672 static inline uint32_t
tgl_swsb_encode(const struct intel_device_info * devinfo,struct tgl_swsb swsb)673 tgl_swsb_encode(const struct intel_device_info *devinfo, struct tgl_swsb swsb)
674 {
675 if (!swsb.mode) {
676 const unsigned pipe = devinfo->verx10 < 125 ? 0 :
677 swsb.pipe == TGL_PIPE_FLOAT ? 0x10 :
678 swsb.pipe == TGL_PIPE_INT ? 0x18 :
679 swsb.pipe == TGL_PIPE_LONG ? 0x20 :
680 swsb.pipe == TGL_PIPE_MATH ? 0x28 :
681 swsb.pipe == TGL_PIPE_ALL ? 0x8 : 0;
682 return pipe | swsb.regdist;
683
684 } else if (swsb.regdist) {
685 if (devinfo->ver >= 20) {
686 if ((swsb.mode & TGL_SBID_SET)) {
687 assert(swsb.pipe == TGL_PIPE_ALL ||
688 swsb.pipe == TGL_PIPE_INT || swsb.pipe == TGL_PIPE_FLOAT);
689 return (swsb.pipe == TGL_PIPE_INT ? 0x300 :
690 swsb.pipe == TGL_PIPE_FLOAT ? 0x200 : 0x100) |
691 swsb.regdist << 5 | swsb.sbid;
692 } else {
693 assert(!(swsb.mode & ~(TGL_SBID_DST | TGL_SBID_SRC)));
694 return (swsb.pipe == TGL_PIPE_ALL ? 0x300 :
695 swsb.mode == TGL_SBID_SRC ? 0x200 : 0x100) |
696 swsb.regdist << 5 | swsb.sbid;
697 }
698 } else {
699 assert(!(swsb.sbid & ~0xfu));
700 return 0x80 | swsb.regdist << 4 | swsb.sbid;
701 }
702
703 } else {
704 if (devinfo->ver >= 20) {
705 return swsb.sbid | (swsb.mode & TGL_SBID_SET ? 0xc0 :
706 swsb.mode & TGL_SBID_DST ? 0x80 : 0xa0);
707 } else {
708 assert(!(swsb.sbid & ~0xfu));
709 return swsb.sbid | (swsb.mode & TGL_SBID_SET ? 0x40 :
710 swsb.mode & TGL_SBID_DST ? 0x20 : 0x30);
711 }
712 }
713 }
714
715 /**
716 * Convert the provided binary representation of an SWSB annotation to a
717 * tgl_swsb.
718 */
719 static inline struct tgl_swsb
tgl_swsb_decode(const struct intel_device_info * devinfo,const bool is_unordered,const uint32_t x)720 tgl_swsb_decode(const struct intel_device_info *devinfo,
721 const bool is_unordered, const uint32_t x)
722 {
723 if (devinfo->ver >= 20) {
724 if (x & 0x300) {
725 if (is_unordered) {
726 const struct tgl_swsb swsb = {
727 (x & 0xe0u) >> 5,
728 ((x & 0x300) == 0x300 ? TGL_PIPE_INT :
729 (x & 0x300) == 0x200 ? TGL_PIPE_FLOAT :
730 TGL_PIPE_ALL),
731 x & 0x1fu,
732 TGL_SBID_SET
733 };
734 return swsb;
735 } else {
736 const struct tgl_swsb swsb = {
737 (x & 0xe0u) >> 5,
738 ((x & 0x300) == 0x300 ? TGL_PIPE_ALL : TGL_PIPE_NONE),
739 x & 0x1fu,
740 ((x & 0x300) == 0x200 ? TGL_SBID_SRC : TGL_SBID_DST)
741 };
742 return swsb;
743 }
744
745 } else if ((x & 0xe0) == 0x80) {
746 return tgl_swsb_sbid(TGL_SBID_DST, x & 0x1f);
747 } else if ((x & 0xe0) == 0xa0) {
748 return tgl_swsb_sbid(TGL_SBID_SRC, x & 0x1fu);
749 } else if ((x & 0xe0) == 0xc0) {
750 return tgl_swsb_sbid(TGL_SBID_SET, x & 0x1fu);
751 } else {
752 const struct tgl_swsb swsb = { x & 0x7u,
753 ((x & 0x38) == 0x10 ? TGL_PIPE_FLOAT :
754 (x & 0x38) == 0x18 ? TGL_PIPE_INT :
755 (x & 0x38) == 0x20 ? TGL_PIPE_LONG :
756 (x & 0x38) == 0x28 ? TGL_PIPE_MATH :
757 (x & 0x38) == 0x8 ? TGL_PIPE_ALL :
758 TGL_PIPE_NONE) };
759 return swsb;
760 }
761
762 } else {
763 if (x & 0x80) {
764 const struct tgl_swsb swsb = { (x & 0x70u) >> 4, TGL_PIPE_NONE,
765 x & 0xfu,
766 is_unordered ?
767 TGL_SBID_SET : TGL_SBID_DST };
768 return swsb;
769 } else if ((x & 0x70) == 0x20) {
770 return tgl_swsb_sbid(TGL_SBID_DST, x & 0xfu);
771 } else if ((x & 0x70) == 0x30) {
772 return tgl_swsb_sbid(TGL_SBID_SRC, x & 0xfu);
773 } else if ((x & 0x70) == 0x40) {
774 return tgl_swsb_sbid(TGL_SBID_SET, x & 0xfu);
775 } else {
776 const struct tgl_swsb swsb = { x & 0x7u,
777 ((x & 0x78) == 0x10 ? TGL_PIPE_FLOAT :
778 (x & 0x78) == 0x18 ? TGL_PIPE_INT :
779 (x & 0x78) == 0x50 ? TGL_PIPE_LONG :
780 (x & 0x78) == 0x8 ? TGL_PIPE_ALL :
781 TGL_PIPE_NONE) };
782 assert(devinfo->verx10 >= 125 || swsb.pipe == TGL_PIPE_NONE);
783 return swsb;
784 }
785 }
786 }
787
788 enum tgl_sync_function {
789 TGL_SYNC_NOP = 0x0,
790 TGL_SYNC_ALLRD = 0x2,
791 TGL_SYNC_ALLWR = 0x3,
792 TGL_SYNC_FENCE = 0xd,
793 TGL_SYNC_BAR = 0xe,
794 TGL_SYNC_HOST = 0xf
795 };
796
797 /**
798 * Message target: Shared Function ID for where to SEND a message.
799 *
800 * These are enumerated in the ISA reference under "send - Send Message".
801 * In particular, see the following tables:
802 * - G45 PRM, Volume 4, Table 14-15 "Message Descriptor Definition"
803 * - Sandybridge PRM, Volume 4 Part 2, Table 8-16 "Extended Message Descriptor"
804 * - Ivybridge PRM, Volume 1 Part 1, section 3.2.7 "GPE Function IDs"
805 */
806 enum elk_message_target {
807 ELK_SFID_NULL = 0,
808 ELK_SFID_MATH = 1, /* Only valid on Gfx4-5 */
809 ELK_SFID_SAMPLER = 2,
810 ELK_SFID_MESSAGE_GATEWAY = 3,
811 ELK_SFID_DATAPORT_READ = 4,
812 ELK_SFID_DATAPORT_WRITE = 5,
813 ELK_SFID_URB = 6,
814 ELK_SFID_THREAD_SPAWNER = 7,
815 ELK_SFID_VME = 8,
816
817 GFX6_SFID_DATAPORT_SAMPLER_CACHE = 4,
818 GFX6_SFID_DATAPORT_RENDER_CACHE = 5,
819 GFX6_SFID_DATAPORT_CONSTANT_CACHE = 9,
820
821 GFX7_SFID_DATAPORT_DATA_CACHE = 10,
822 GFX7_SFID_PIXEL_INTERPOLATOR = 11,
823 HSW_SFID_DATAPORT_DATA_CACHE_1 = 12,
824 HSW_SFID_CRE = 13,
825
826 GFX12_SFID_TGM = 13, /* Typed Global Memory */
827 GFX12_SFID_SLM = 14, /* Shared Local Memory */
828 GFX12_SFID_UGM = 15, /* Untyped Global Memory */
829 };
830
831 #define GFX7_MESSAGE_TARGET_DP_DATA_CACHE 10
832
833 #define ELK_SAMPLER_RETURN_FORMAT_FLOAT32 0
834 #define ELK_SAMPLER_RETURN_FORMAT_UINT32 2
835 #define ELK_SAMPLER_RETURN_FORMAT_SINT32 3
836
837 #define GFX8_SAMPLER_RETURN_FORMAT_32BITS 0
838 #define GFX8_SAMPLER_RETURN_FORMAT_16BITS 1
839
840 #define ELK_SAMPLER_MESSAGE_SIMD8_SAMPLE 0
841 #define ELK_SAMPLER_MESSAGE_SIMD16_SAMPLE 0
842 #define ELK_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS 0
843 #define ELK_SAMPLER_MESSAGE_SIMD8_KILLPIX 1
844 #define ELK_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD 1
845 #define ELK_SAMPLER_MESSAGE_SIMD16_SAMPLE_LOD 1
846 #define ELK_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_GRADIENTS 2
847 #define ELK_SAMPLER_MESSAGE_SIMD8_SAMPLE_GRADIENTS 2
848 #define ELK_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_COMPARE 0
849 #define ELK_SAMPLER_MESSAGE_SIMD16_SAMPLE_COMPARE 2
850 #define ELK_SAMPLER_MESSAGE_SIMD8_SAMPLE_BIAS_COMPARE 0
851 #define ELK_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD_COMPARE 1
852 #define ELK_SAMPLER_MESSAGE_SIMD8_SAMPLE_LOD_COMPARE 1
853 #define ELK_SAMPLER_MESSAGE_SIMD4X2_RESINFO 2
854 #define ELK_SAMPLER_MESSAGE_SIMD16_RESINFO 2
855 #define ELK_SAMPLER_MESSAGE_SIMD4X2_LD 3
856 #define ELK_SAMPLER_MESSAGE_SIMD8_LD 3
857 #define ELK_SAMPLER_MESSAGE_SIMD16_LD 3
858
859 #define GFX5_SAMPLER_MESSAGE_SAMPLE 0
860 #define GFX5_SAMPLER_MESSAGE_SAMPLE_BIAS 1
861 #define GFX5_SAMPLER_MESSAGE_SAMPLE_LOD 2
862 #define GFX5_SAMPLER_MESSAGE_SAMPLE_COMPARE 3
863 #define GFX5_SAMPLER_MESSAGE_SAMPLE_DERIVS 4
864 #define GFX5_SAMPLER_MESSAGE_SAMPLE_BIAS_COMPARE 5
865 #define GFX5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE 6
866 #define GFX5_SAMPLER_MESSAGE_SAMPLE_LD 7
867 #define GFX7_SAMPLER_MESSAGE_SAMPLE_GATHER4 8
868 #define GFX5_SAMPLER_MESSAGE_LOD 9
869 #define GFX5_SAMPLER_MESSAGE_SAMPLE_RESINFO 10
870 #define GFX6_SAMPLER_MESSAGE_SAMPLE_SAMPLEINFO 11
871 #define GFX7_SAMPLER_MESSAGE_SAMPLE_GATHER4_C 16
872 #define GFX7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO 17
873 #define GFX7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_C 18
874 #define XE2_SAMPLER_MESSAGE_SAMPLE_MLOD 18
875 #define XE2_SAMPLER_MESSAGE_SAMPLE_COMPARE_MLOD 19
876 #define HSW_SAMPLER_MESSAGE_SAMPLE_DERIV_COMPARE 20
877 #define GFX9_SAMPLER_MESSAGE_SAMPLE_LZ 24
878 #define GFX9_SAMPLER_MESSAGE_SAMPLE_C_LZ 25
879 #define GFX9_SAMPLER_MESSAGE_SAMPLE_LD_LZ 26
880 #define GFX9_SAMPLER_MESSAGE_SAMPLE_LD2DMS_W 28
881 #define GFX7_SAMPLER_MESSAGE_SAMPLE_LD_MCS 29
882 #define GFX7_SAMPLER_MESSAGE_SAMPLE_LD2DMS 30
883 #define GFX7_SAMPLER_MESSAGE_SAMPLE_LD2DSS 31
884
885 /* for GFX5 only */
886 #define ELK_SAMPLER_SIMD_MODE_SIMD4X2 0
887 #define ELK_SAMPLER_SIMD_MODE_SIMD8 1
888 #define ELK_SAMPLER_SIMD_MODE_SIMD16 2
889 #define ELK_SAMPLER_SIMD_MODE_SIMD32_64 3
890
891 #define GFX10_SAMPLER_SIMD_MODE_SIMD8H 5
892 #define GFX10_SAMPLER_SIMD_MODE_SIMD16H 6
893
894 #define XE2_SAMPLER_SIMD_MODE_SIMD16 1
895 #define XE2_SAMPLER_SIMD_MODE_SIMD32 2
896 #define XE2_SAMPLER_SIMD_MODE_SIMD16H 5
897 #define XE2_SAMPLER_SIMD_MODE_SIMD32H 6
898
899 /* GFX9 changes SIMD mode 0 to mean SIMD8D, but lets us get the SIMD4x2
900 * behavior by setting bit 22 of dword 2 in the message header. */
901 #define GFX9_SAMPLER_SIMD_MODE_SIMD8D 0
902 #define GFX9_SAMPLER_SIMD_MODE_EXTENSION_SIMD4X2 (1 << 22)
903
904 #define ELK_DATAPORT_OWORD_BLOCK_1_OWORDLOW 0
905 #define ELK_DATAPORT_OWORD_BLOCK_1_OWORDHIGH 1
906 #define ELK_DATAPORT_OWORD_BLOCK_2_OWORDS 2
907 #define ELK_DATAPORT_OWORD_BLOCK_4_OWORDS 3
908 #define ELK_DATAPORT_OWORD_BLOCK_8_OWORDS 4
909 #define GFX12_DATAPORT_OWORD_BLOCK_16_OWORDS 5
910 #define ELK_DATAPORT_OWORD_BLOCK_OWORDS(n) \
911 ((n) == 1 ? ELK_DATAPORT_OWORD_BLOCK_1_OWORDLOW : \
912 (n) == 2 ? ELK_DATAPORT_OWORD_BLOCK_2_OWORDS : \
913 (n) == 4 ? ELK_DATAPORT_OWORD_BLOCK_4_OWORDS : \
914 (n) == 8 ? ELK_DATAPORT_OWORD_BLOCK_8_OWORDS : \
915 (n) == 16 ? GFX12_DATAPORT_OWORD_BLOCK_16_OWORDS : \
916 (abort(), ~0))
917 #define ELK_DATAPORT_OWORD_BLOCK_DWORDS(n) \
918 ((n) == 4 ? ELK_DATAPORT_OWORD_BLOCK_1_OWORDLOW : \
919 (n) == 8 ? ELK_DATAPORT_OWORD_BLOCK_2_OWORDS : \
920 (n) == 16 ? ELK_DATAPORT_OWORD_BLOCK_4_OWORDS : \
921 (n) == 32 ? ELK_DATAPORT_OWORD_BLOCK_8_OWORDS : \
922 (abort(), ~0))
923
924 #define ELK_DATAPORT_OWORD_DUAL_BLOCK_1OWORD 0
925 #define ELK_DATAPORT_OWORD_DUAL_BLOCK_4OWORDS 2
926
927 #define ELK_DATAPORT_DWORD_SCATTERED_BLOCK_8DWORDS 2
928 #define ELK_DATAPORT_DWORD_SCATTERED_BLOCK_16DWORDS 3
929
930 /* This one stays the same across generations. */
931 #define ELK_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ 0
932 /* GFX4 */
933 #define ELK_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ 1
934 #define ELK_DATAPORT_READ_MESSAGE_MEDIA_BLOCK_READ 2
935 #define ELK_DATAPORT_READ_MESSAGE_DWORD_SCATTERED_READ 3
936 /* G45, GFX5 */
937 #define G45_DATAPORT_READ_MESSAGE_RENDER_UNORM_READ 1
938 #define G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ 2
939 #define G45_DATAPORT_READ_MESSAGE_AVC_LOOP_FILTER_READ 3
940 #define G45_DATAPORT_READ_MESSAGE_MEDIA_BLOCK_READ 4
941 #define G45_DATAPORT_READ_MESSAGE_DWORD_SCATTERED_READ 6
942 /* GFX6 */
943 #define GFX6_DATAPORT_READ_MESSAGE_RENDER_UNORM_READ 1
944 #define GFX6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ 2
945 #define GFX6_DATAPORT_READ_MESSAGE_MEDIA_BLOCK_READ 4
946 #define GFX6_DATAPORT_READ_MESSAGE_OWORD_UNALIGN_BLOCK_READ 5
947 #define GFX6_DATAPORT_READ_MESSAGE_DWORD_SCATTERED_READ 6
948
949 #define ELK_DATAPORT_READ_TARGET_DATA_CACHE 0
950 #define ELK_DATAPORT_READ_TARGET_RENDER_CACHE 1
951 #define ELK_DATAPORT_READ_TARGET_SAMPLER_CACHE 2
952
953 #define ELK_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE 0
954 #define ELK_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE_REPLICATED 1
955 #define ELK_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN01 2
956 #define ELK_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN23 3
957 #define ELK_DATAPORT_RENDER_TARGET_WRITE_SIMD8_SINGLE_SOURCE_SUBSPAN01 4
958
959 #define ELK_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE 0
960 #define ELK_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE 1
961 #define ELK_DATAPORT_WRITE_MESSAGE_MEDIA_BLOCK_WRITE 2
962 #define ELK_DATAPORT_WRITE_MESSAGE_DWORD_SCATTERED_WRITE 3
963 #define ELK_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE 4
964 #define ELK_DATAPORT_WRITE_MESSAGE_STREAMED_VERTEX_BUFFER_WRITE 5
965 #define ELK_DATAPORT_WRITE_MESSAGE_FLUSH_RENDER_CACHE 7
966
967 /* GFX6 */
968 #define GFX6_DATAPORT_WRITE_MESSAGE_DWORD_ATOMIC_WRITE 7
969 #define GFX6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE 8
970 #define GFX6_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE 9
971 #define GFX6_DATAPORT_WRITE_MESSAGE_MEDIA_BLOCK_WRITE 10
972 #define GFX6_DATAPORT_WRITE_MESSAGE_DWORD_SCATTERED_WRITE 11
973 #define GFX6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE 12
974 #define GFX6_DATAPORT_WRITE_MESSAGE_STREAMED_VB_WRITE 13
975 #define GFX6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_UNORM_WRITE 14
976
977 /* GFX7 */
978 #define GFX7_DATAPORT_RC_MEDIA_BLOCK_READ 4
979 #define GFX7_DATAPORT_RC_TYPED_SURFACE_READ 5
980 #define GFX7_DATAPORT_RC_TYPED_ATOMIC_OP 6
981 #define GFX7_DATAPORT_RC_MEMORY_FENCE 7
982 #define GFX7_DATAPORT_RC_MEDIA_BLOCK_WRITE 10
983 #define GFX7_DATAPORT_RC_RENDER_TARGET_WRITE 12
984 #define GFX7_DATAPORT_RC_TYPED_SURFACE_WRITE 13
985 #define GFX7_DATAPORT_DC_OWORD_BLOCK_READ 0
986 #define GFX7_DATAPORT_DC_UNALIGNED_OWORD_BLOCK_READ 1
987 #define GFX7_DATAPORT_DC_OWORD_DUAL_BLOCK_READ 2
988 #define GFX7_DATAPORT_DC_DWORD_SCATTERED_READ 3
989 #define GFX7_DATAPORT_DC_BYTE_SCATTERED_READ 4
990 #define GFX7_DATAPORT_DC_UNTYPED_SURFACE_READ 5
991 #define GFX7_DATAPORT_DC_UNTYPED_ATOMIC_OP 6
992 #define GFX7_DATAPORT_DC_MEMORY_FENCE 7
993 #define GFX7_DATAPORT_DC_OWORD_BLOCK_WRITE 8
994 #define GFX7_DATAPORT_DC_OWORD_DUAL_BLOCK_WRITE 10
995 #define GFX7_DATAPORT_DC_DWORD_SCATTERED_WRITE 11
996 #define GFX7_DATAPORT_DC_BYTE_SCATTERED_WRITE 12
997 #define GFX7_DATAPORT_DC_UNTYPED_SURFACE_WRITE 13
998
999 #define GFX7_DATAPORT_SCRATCH_READ ((1 << 18) | \
1000 (0 << 17))
1001 #define GFX7_DATAPORT_SCRATCH_WRITE ((1 << 18) | \
1002 (1 << 17))
1003 #define GFX7_DATAPORT_SCRATCH_NUM_REGS_SHIFT 12
1004
1005 #define GFX7_PIXEL_INTERPOLATOR_LOC_SHARED_OFFSET 0
1006 #define GFX7_PIXEL_INTERPOLATOR_LOC_SAMPLE 1
1007 #define GFX7_PIXEL_INTERPOLATOR_LOC_CENTROID 2
1008 #define GFX7_PIXEL_INTERPOLATOR_LOC_PER_SLOT_OFFSET 3
1009
1010 /* HSW */
1011 #define HSW_DATAPORT_DC_PORT0_OWORD_BLOCK_READ 0
1012 #define HSW_DATAPORT_DC_PORT0_UNALIGNED_OWORD_BLOCK_READ 1
1013 #define HSW_DATAPORT_DC_PORT0_OWORD_DUAL_BLOCK_READ 2
1014 #define HSW_DATAPORT_DC_PORT0_DWORD_SCATTERED_READ 3
1015 #define HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_READ 4
1016 #define HSW_DATAPORT_DC_PORT0_MEMORY_FENCE 7
1017 #define HSW_DATAPORT_DC_PORT0_OWORD_BLOCK_WRITE 8
1018 #define HSW_DATAPORT_DC_PORT0_OWORD_DUAL_BLOCK_WRITE 10
1019 #define HSW_DATAPORT_DC_PORT0_DWORD_SCATTERED_WRITE 11
1020 #define HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_WRITE 12
1021
1022 #define HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_READ 1
1023 #define HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP 2
1024 #define HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP_SIMD4X2 3
1025 #define HSW_DATAPORT_DC_PORT1_MEDIA_BLOCK_READ 4
1026 #define HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_READ 5
1027 #define HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP 6
1028 #define HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP_SIMD4X2 7
1029 #define HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_WRITE 9
1030 #define HSW_DATAPORT_DC_PORT1_MEDIA_BLOCK_WRITE 10
1031 #define HSW_DATAPORT_DC_PORT1_ATOMIC_COUNTER_OP 11
1032 #define HSW_DATAPORT_DC_PORT1_ATOMIC_COUNTER_OP_SIMD4X2 12
1033 #define HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_WRITE 13
1034 #define GFX9_DATAPORT_DC_PORT1_A64_SCATTERED_READ 0x10
1035 #define GFX8_DATAPORT_DC_PORT1_A64_UNTYPED_SURFACE_READ 0x11
1036 #define GFX8_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_OP 0x12
1037 #define GFX12_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_HALF_INT_OP 0x13
1038 #define GFX9_DATAPORT_DC_PORT1_A64_OWORD_BLOCK_READ 0x14
1039 #define GFX9_DATAPORT_DC_PORT1_A64_OWORD_BLOCK_WRITE 0x15
1040 #define GFX8_DATAPORT_DC_PORT1_A64_UNTYPED_SURFACE_WRITE 0x19
1041 #define GFX8_DATAPORT_DC_PORT1_A64_SCATTERED_WRITE 0x1a
1042 #define GFX9_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_FLOAT_OP 0x1b
1043 #define GFX9_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_FLOAT_OP 0x1d
1044 #define GFX12_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_HALF_FLOAT_OP 0x1e
1045
1046 /* GFX9 */
1047 #define GFX9_DATAPORT_RC_RENDER_TARGET_WRITE 12
1048 #define GFX9_DATAPORT_RC_RENDER_TARGET_READ 13
1049
1050 /* A64 scattered message subtype */
1051 #define GFX8_A64_SCATTERED_SUBTYPE_BYTE 0
1052 #define GFX8_A64_SCATTERED_SUBTYPE_DWORD 1
1053 #define GFX8_A64_SCATTERED_SUBTYPE_QWORD 2
1054 #define GFX8_A64_SCATTERED_SUBTYPE_HWORD 3
1055
1056 /* Dataport special binding table indices: */
1057 #define ELK_BTI_STATELESS 255
1058 #define GFX7_BTI_SLM 254
1059
1060 #define HSW_BTI_STATELESS_LOCALLY_COHERENT 255
1061 #define HSW_BTI_STATELESS_NON_COHERENT 253
1062 #define HSW_BTI_STATELESS_GLOBALLY_COHERENT 252
1063 #define HSW_BTI_STATELESS_LLC_COHERENT 251
1064 #define HSW_BTI_STATELESS_L3_UNCACHED 250
1065
1066 /* The hardware docs are a bit contradictory here. On Haswell, where they
1067 * first added cache ability control, there were 5 different cache modes (see
1068 * HSW_BTI_STATELESS_* above). On Broadwell, they reduced to two:
1069 *
1070 * - IA-Coherent (BTI=255): Coherent within Gen and coherent within the
1071 * entire IA cache memory hierarchy.
1072 *
1073 * - Non-Coherent (BTI=253): Coherent within Gen, same cache type.
1074 *
1075 * Information about stateless cache coherency can be found in the "A32
1076 * Stateless" section of the "3D Media GPGPU" volume of the PRM for each
1077 * hardware generation.
1078 *
1079 * Unfortunately, the docs for MDC_STATELESS appear to have been copied and
1080 * pasted from Haswell and give the Haswell definitions for the BTI values of
1081 * 255 and 253 including a warning about accessing 253 surfaces from multiple
1082 * threads. This seems to be a copy+paste error and the definitions from the
1083 * "A32 Stateless" section should be trusted instead.
1084 *
1085 * Note that because the DRM sets bit 4 of HDC_CHICKEN0 on BDW, CHV and at
1086 * least some pre-production steppings of SKL due to WaForceEnableNonCoherent,
1087 * HDC memory access may have been overridden by the kernel to be non-coherent
1088 * (matching the behavior of the same BTI on pre-Gfx8 hardware) and BTI 255
1089 * may actually be an alias for BTI 253.
1090 */
1091 #define GFX8_BTI_STATELESS_IA_COHERENT 255
1092 #define GFX8_BTI_STATELESS_NON_COHERENT 253
1093 #define GFX9_BTI_BINDLESS 252
1094
1095 /* This ID doesn't map anything HW related value. It exists to inform the
1096 * lowering code to not use the bindless heap.
1097 */
1098 #define GFX125_NON_BINDLESS (1u << 16)
1099
1100 /* Dataport atomic operations for Untyped Atomic Integer Operation message
1101 * (and others).
1102 */
1103 #define ELK_AOP_AND 1
1104 #define ELK_AOP_OR 2
1105 #define ELK_AOP_XOR 3
1106 #define ELK_AOP_MOV 4
1107 #define ELK_AOP_INC 5
1108 #define ELK_AOP_DEC 6
1109 #define ELK_AOP_ADD 7
1110 #define ELK_AOP_SUB 8
1111 #define ELK_AOP_REVSUB 9
1112 #define ELK_AOP_IMAX 10
1113 #define ELK_AOP_IMIN 11
1114 #define ELK_AOP_UMAX 12
1115 #define ELK_AOP_UMIN 13
1116 #define ELK_AOP_CMPWR 14
1117 #define ELK_AOP_PREDEC 15
1118
1119 /* Dataport atomic operations for Untyped Atomic Float Operation message. */
1120 #define ELK_AOP_FMAX 1
1121 #define ELK_AOP_FMIN 2
1122 #define ELK_AOP_FCMPWR 3
1123 #define ELK_AOP_FADD 4
1124
1125 #define ELK_MATH_FUNCTION_INV 1
1126 #define ELK_MATH_FUNCTION_LOG 2
1127 #define ELK_MATH_FUNCTION_EXP 3
1128 #define ELK_MATH_FUNCTION_SQRT 4
1129 #define ELK_MATH_FUNCTION_RSQ 5
1130 #define ELK_MATH_FUNCTION_SIN 6
1131 #define ELK_MATH_FUNCTION_COS 7
1132 #define ELK_MATH_FUNCTION_SINCOS 8 /* gfx4, gfx5 */
1133 #define ELK_MATH_FUNCTION_FDIV 9 /* gfx6+ */
1134 #define ELK_MATH_FUNCTION_POW 10
1135 #define ELK_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER 11
1136 #define ELK_MATH_FUNCTION_INT_DIV_QUOTIENT 12
1137 #define ELK_MATH_FUNCTION_INT_DIV_REMAINDER 13
1138 #define GFX8_MATH_FUNCTION_INVM 14
1139 #define GFX8_MATH_FUNCTION_RSQRTM 15
1140
1141 #define ELK_MATH_INTEGER_UNSIGNED 0
1142 #define ELK_MATH_INTEGER_SIGNED 1
1143
1144 #define ELK_MATH_PRECISION_FULL 0
1145 #define ELK_MATH_PRECISION_PARTIAL 1
1146
1147 #define ELK_MATH_SATURATE_NONE 0
1148 #define ELK_MATH_SATURATE_SATURATE 1
1149
1150 #define ELK_MATH_DATA_VECTOR 0
1151 #define ELK_MATH_DATA_SCALAR 1
1152
1153 #define ELK_URB_OPCODE_WRITE_HWORD 0
1154 #define ELK_URB_OPCODE_WRITE_OWORD 1
1155 #define ELK_URB_OPCODE_READ_HWORD 2
1156 #define ELK_URB_OPCODE_READ_OWORD 3
1157 #define GFX7_URB_OPCODE_ATOMIC_MOV 4
1158 #define GFX7_URB_OPCODE_ATOMIC_INC 5
1159 #define GFX8_URB_OPCODE_ATOMIC_ADD 6
1160 #define GFX8_URB_OPCODE_SIMD8_WRITE 7
1161 #define GFX8_URB_OPCODE_SIMD8_READ 8
1162 #define GFX125_URB_OPCODE_FENCE 9
1163
1164 #define ELK_URB_SWIZZLE_NONE 0
1165 #define ELK_URB_SWIZZLE_INTERLEAVE 1
1166 #define ELK_URB_SWIZZLE_TRANSPOSE 2
1167
1168 #define ELK_SCRATCH_SPACE_SIZE_1K 0
1169 #define ELK_SCRATCH_SPACE_SIZE_2K 1
1170 #define ELK_SCRATCH_SPACE_SIZE_4K 2
1171 #define ELK_SCRATCH_SPACE_SIZE_8K 3
1172 #define ELK_SCRATCH_SPACE_SIZE_16K 4
1173 #define ELK_SCRATCH_SPACE_SIZE_32K 5
1174 #define ELK_SCRATCH_SPACE_SIZE_64K 6
1175 #define ELK_SCRATCH_SPACE_SIZE_128K 7
1176 #define ELK_SCRATCH_SPACE_SIZE_256K 8
1177 #define ELK_SCRATCH_SPACE_SIZE_512K 9
1178 #define ELK_SCRATCH_SPACE_SIZE_1M 10
1179 #define ELK_SCRATCH_SPACE_SIZE_2M 11
1180
1181 #define ELK_MESSAGE_GATEWAY_SFID_OPEN_GATEWAY 0
1182 #define ELK_MESSAGE_GATEWAY_SFID_CLOSE_GATEWAY 1
1183 #define ELK_MESSAGE_GATEWAY_SFID_FORWARD_MSG 2
1184 #define ELK_MESSAGE_GATEWAY_SFID_GET_TIMESTAMP 3
1185 #define ELK_MESSAGE_GATEWAY_SFID_BARRIER_MSG 4
1186 #define ELK_MESSAGE_GATEWAY_SFID_UPDATE_GATEWAY_STATE 5
1187 #define ELK_MESSAGE_GATEWAY_SFID_MMIO_READ_WRITE 6
1188
1189
1190 /* Gfx7 "GS URB Entry Allocation Size" is a U9-1 field, so the maximum gs_size
1191 * is 2^9, or 512. It's counted in multiples of 64 bytes.
1192 *
1193 * Identical for VS, DS, and HS.
1194 */
1195 #define GFX7_MAX_GS_URB_ENTRY_SIZE_BYTES (512*64)
1196 #define GFX7_MAX_DS_URB_ENTRY_SIZE_BYTES (512*64)
1197 #define GFX7_MAX_HS_URB_ENTRY_SIZE_BYTES (512*64)
1198 #define GFX7_MAX_VS_URB_ENTRY_SIZE_BYTES (512*64)
1199
1200 #define ELK_GS_EDGE_INDICATOR_0 (1 << 8)
1201 #define ELK_GS_EDGE_INDICATOR_1 (1 << 9)
1202
1203 /* Gfx6 "GS URB Entry Allocation Size" is defined as a number of 1024-bit
1204 * (128 bytes) URB rows and the maximum allowed value is 5 rows.
1205 */
1206 #define GFX6_MAX_GS_URB_ENTRY_SIZE_BYTES (5*128)
1207
1208 /* GS Thread Payload
1209 */
1210
1211 /* 3DSTATE_GS "Output Vertex Size" has an effective maximum of 62. It's
1212 * counted in multiples of 16 bytes.
1213 */
1214 #define GFX7_MAX_GS_OUTPUT_VERTEX_SIZE_BYTES (62*16)
1215
1216
1217 /* R0 */
1218 # define GFX7_GS_PAYLOAD_INSTANCE_ID_SHIFT 27
1219
1220 /* CR0.0[5:4] Floating-Point Rounding Modes
1221 * Skylake PRM, Volume 7 Part 1, "Control Register", page 756
1222 */
1223
1224 #define ELK_CR0_RND_MODE_MASK 0x30
1225 #define ELK_CR0_RND_MODE_SHIFT 4
1226
1227 enum ENUM_PACKED elk_rnd_mode {
1228 ELK_RND_MODE_RTNE = 0, /* Round to Nearest or Even */
1229 ELK_RND_MODE_RU = 1, /* Round Up, toward +inf */
1230 ELK_RND_MODE_RD = 2, /* Round Down, toward -inf */
1231 ELK_RND_MODE_RTZ = 3, /* Round Toward Zero */
1232 ELK_RND_MODE_UNSPECIFIED, /* Unspecified rounding mode */
1233 };
1234
1235 #define ELK_CR0_FP64_DENORM_PRESERVE (1 << 6)
1236 #define ELK_CR0_FP32_DENORM_PRESERVE (1 << 7)
1237 #define ELK_CR0_FP16_DENORM_PRESERVE (1 << 10)
1238
1239 #define ELK_CR0_FP_MODE_MASK (ELK_CR0_FP64_DENORM_PRESERVE | \
1240 ELK_CR0_FP32_DENORM_PRESERVE | \
1241 ELK_CR0_FP16_DENORM_PRESERVE | \
1242 ELK_CR0_RND_MODE_MASK)
1243
1244 /* MDC_DS - Data Size Message Descriptor Control Field
1245 * Skylake PRM, Volume 2d, page 129
1246 *
1247 * Specifies the number of Bytes to be read or written per Dword used at
1248 * byte_scattered read/write and byte_scaled read/write messages.
1249 */
1250 #define GFX7_BYTE_SCATTERED_DATA_ELEMENT_BYTE 0
1251 #define GFX7_BYTE_SCATTERED_DATA_ELEMENT_WORD 1
1252 #define GFX7_BYTE_SCATTERED_DATA_ELEMENT_DWORD 2
1253
1254 /* Starting with Xe-HPG, the old dataport was massively reworked dataport.
1255 * The new thing, called Load/Store Cache or LSC, has a significantly improved
1256 * interface. Instead of bespoke messages for every case, there's basically
1257 * one or two messages with different bits to control things like address
1258 * size, how much data is read/written, etc. It's way nicer but also means we
1259 * get to rewrite all our dataport encoding/decoding code. This patch kicks
1260 * off the party with all of the new enums.
1261 */
1262 enum elk_lsc_opcode {
1263 LSC_OP_LOAD = 0,
1264 LSC_OP_LOAD_CMASK = 2,
1265 LSC_OP_STORE = 4,
1266 LSC_OP_STORE_CMASK = 6,
1267 LSC_OP_ATOMIC_INC = 8,
1268 LSC_OP_ATOMIC_DEC = 9,
1269 LSC_OP_ATOMIC_LOAD = 10,
1270 LSC_OP_ATOMIC_STORE = 11,
1271 LSC_OP_ATOMIC_ADD = 12,
1272 LSC_OP_ATOMIC_SUB = 13,
1273 LSC_OP_ATOMIC_MIN = 14,
1274 LSC_OP_ATOMIC_MAX = 15,
1275 LSC_OP_ATOMIC_UMIN = 16,
1276 LSC_OP_ATOMIC_UMAX = 17,
1277 LSC_OP_ATOMIC_CMPXCHG = 18,
1278 LSC_OP_ATOMIC_FADD = 19,
1279 LSC_OP_ATOMIC_FSUB = 20,
1280 LSC_OP_ATOMIC_FMIN = 21,
1281 LSC_OP_ATOMIC_FMAX = 22,
1282 LSC_OP_ATOMIC_FCMPXCHG = 23,
1283 LSC_OP_ATOMIC_AND = 24,
1284 LSC_OP_ATOMIC_OR = 25,
1285 LSC_OP_ATOMIC_XOR = 26,
1286 LSC_OP_FENCE = 31
1287 };
1288
1289 /*
1290 * Specifies the size of the dataport address payload in registers.
1291 */
1292 enum ENUM_PACKED lsc_addr_reg_size {
1293 LSC_ADDR_REG_SIZE_1 = 1,
1294 LSC_ADDR_REG_SIZE_2 = 2,
1295 LSC_ADDR_REG_SIZE_3 = 3,
1296 LSC_ADDR_REG_SIZE_4 = 4,
1297 LSC_ADDR_REG_SIZE_6 = 6,
1298 LSC_ADDR_REG_SIZE_8 = 8,
1299 };
1300
1301 /*
1302 * Specifies the size of the address payload item in a dataport message.
1303 */
1304 enum ENUM_PACKED lsc_addr_size {
1305 LSC_ADDR_SIZE_A16 = 1, /* 16-bit address offset */
1306 LSC_ADDR_SIZE_A32 = 2, /* 32-bit address offset */
1307 LSC_ADDR_SIZE_A64 = 3, /* 64-bit address offset */
1308 };
1309
1310 /*
1311 * Specifies the type of the address payload item in a dataport message. The
1312 * address type specifies how the dataport message decodes the Extended
1313 * Descriptor for the surface attributes and address calculation.
1314 */
1315 enum ENUM_PACKED lsc_addr_surface_type {
1316 LSC_ADDR_SURFTYPE_FLAT = 0, /* Flat */
1317 LSC_ADDR_SURFTYPE_BSS = 1, /* Bindless surface state */
1318 LSC_ADDR_SURFTYPE_SS = 2, /* Surface state */
1319 LSC_ADDR_SURFTYPE_BTI = 3, /* Binding table index */
1320 };
1321
1322 /*
1323 * Specifies the dataport message override to the default L1 and L3 memory
1324 * cache policies. Dataport L1 cache policies are uncached (UC), cached (C),
1325 * cache streaming (S) and invalidate-after-read (IAR). Dataport L3 cache
1326 * policies are uncached (UC) and cached (C).
1327 */
1328 enum lsc_cache_load {
1329 /* No override. Use the non-pipelined state or surface state cache settings
1330 * for L1 and L3.
1331 */
1332 LSC_CACHE_LOAD_L1STATE_L3MOCS = 0,
1333 /* Override to L1 uncached and L3 uncached */
1334 LSC_CACHE_LOAD_L1UC_L3UC = 1,
1335 /* Override to L1 uncached and L3 cached */
1336 LSC_CACHE_LOAD_L1UC_L3C = 2,
1337 /* Override to L1 cached and L3 uncached */
1338 LSC_CACHE_LOAD_L1C_L3UC = 3,
1339 /* Override to cache at both L1 and L3 */
1340 LSC_CACHE_LOAD_L1C_L3C = 4,
1341 /* Override to L1 streaming load and L3 uncached */
1342 LSC_CACHE_LOAD_L1S_L3UC = 5,
1343 /* Override to L1 streaming load and L3 cached */
1344 LSC_CACHE_LOAD_L1S_L3C = 6,
1345 /* For load messages, override to L1 invalidate-after-read, and L3 cached. */
1346 LSC_CACHE_LOAD_L1IAR_L3C = 7,
1347 };
1348
1349 /*
1350 * Specifies the dataport message override to the default L1 and L3 memory
1351 * cache policies. Dataport L1 cache policies are uncached (UC), cached (C),
1352 * streaming (S) and invalidate-after-read (IAR). Dataport L3 cache policies
1353 * are uncached (UC), cached (C), cached-as-a-constand (CC) and
1354 * invalidate-after-read (IAR).
1355 */
1356 enum PACKED xe2_lsc_cache_load {
1357 /* No override. Use the non-pipelined or surface state cache settings for L1
1358 * and L3.
1359 */
1360 XE2_LSC_CACHE_LOAD_L1STATE_L3MOCS = 0,
1361 /* Override to L1 uncached and L3 uncached */
1362 XE2_LSC_CACHE_LOAD_L1UC_L3UC = 2,
1363 /* Override to L1 uncached and L3 cached */
1364 XE2_LSC_CACHE_LOAD_L1UC_L3C = 4,
1365 /* Override to L1 uncached and L3 cached as a constant */
1366 XE2_LSC_CACHE_LOAD_L1UC_L3CC = 5,
1367 /* Override to L1 cached and L3 uncached */
1368 XE2_LSC_CACHE_LOAD_L1C_L3UC = 6,
1369 /* Override to L1 cached and L3 cached */
1370 XE2_LSC_CACHE_LOAD_L1C_L3C = 8,
1371 /* Override to L1 cached and L3 cached as a constant */
1372 XE2_LSC_CACHE_LOAD_L1C_L3CC = 9,
1373 /* Override to L1 cached as streaming load and L3 uncached */
1374 XE2_LSC_CACHE_LOAD_L1S_L3UC = 10,
1375 /* Override to L1 cached as streaming load and L3 cached */
1376 XE2_LSC_CACHE_LOAD_L1S_L3C = 12,
1377 /* Override to L1 and L3 invalidate after read */
1378 XE2_LSC_CACHE_LOAD_L1IAR_L3IAR = 14,
1379
1380 };
1381
1382 /*
1383 * Specifies the dataport message override to the default L1 and L3 memory
1384 * cache policies. Dataport L1 cache policies are uncached (UC), write-through
1385 * (WT), write-back (WB) and streaming (S). Dataport L3 cache policies are
1386 * uncached (UC) and cached (WB).
1387 */
1388 enum ENUM_PACKED lsc_cache_store {
1389 /* No override. Use the non-pipelined or surface state cache settings for L1
1390 * and L3.
1391 */
1392 LSC_CACHE_STORE_L1STATE_L3MOCS = 0,
1393 /* Override to L1 uncached and L3 uncached */
1394 LSC_CACHE_STORE_L1UC_L3UC = 1,
1395 /* Override to L1 uncached and L3 cached */
1396 LSC_CACHE_STORE_L1UC_L3WB = 2,
1397 /* Override to L1 write-through and L3 uncached */
1398 LSC_CACHE_STORE_L1WT_L3UC = 3,
1399 /* Override to L1 write-through and L3 cached */
1400 LSC_CACHE_STORE_L1WT_L3WB = 4,
1401 /* Override to L1 streaming and L3 uncached */
1402 LSC_CACHE_STORE_L1S_L3UC = 5,
1403 /* Override to L1 streaming and L3 cached */
1404 LSC_CACHE_STORE_L1S_L3WB = 6,
1405 /* Override to L1 write-back, and L3 cached */
1406 LSC_CACHE_STORE_L1WB_L3WB = 7,
1407
1408 };
1409
1410 /*
1411 * Specifies the dataport message override to the default L1 and L3 memory
1412 * cache policies. Dataport L1 cache policies are uncached (UC), write-through
1413 * (WT), write-back (WB) and streaming (S). Dataport L3 cache policies are
1414 * uncached (UC) and cached (WB).
1415 */
1416 enum PACKED xe2_lsc_cache_store {
1417 /* No override. Use the non-pipelined or surface state cache settings for L1
1418 * and L3.
1419 */
1420 XE2_LSC_CACHE_STORE_L1STATE_L3MOCS = 0,
1421 /* Override to L1 uncached and L3 uncached */
1422 XE2_LSC_CACHE_STORE_L1UC_L3UC = 2,
1423 /* Override to L1 uncached and L3 cached */
1424 XE2_LSC_CACHE_STORE_L1UC_L3WB = 4,
1425 /* Override to L1 write-through and L3 uncached */
1426 XE2_LSC_CACHE_STORE_L1WT_L3UC = 6,
1427 /* Override to L1 write-through and L3 cached */
1428 XE2_LSC_CACHE_STORE_L1WT_L3WB = 8,
1429 /* Override to L1 streaming and L3 uncached */
1430 XE2_LSC_CACHE_STORE_L1S_L3UC = 10,
1431 /* Override to L1 streaming and L3 cached */
1432 XE2_LSC_CACHE_STORE_L1S_L3WB = 12,
1433 /* Override to L1 write-back and L3 cached */
1434 XE2_LSC_CACHE_STORE_L1WB_L3WB = 14,
1435
1436 };
1437
1438 #define LSC_CACHE(devinfo, l_or_s, cc) \
1439 ((devinfo)->ver < 20 ? (unsigned)LSC_CACHE_ ## l_or_s ## _ ## cc : \
1440 (unsigned)XE2_LSC_CACHE_ ## l_or_s ## _ ## cc)
1441
1442 /*
1443 * Specifies which components of the data payload 4-element vector (X,Y,Z,W) is
1444 * packed into the register payload.
1445 */
1446 enum ENUM_PACKED lsc_cmask {
1447 LSC_CMASK_X = 0x1,
1448 LSC_CMASK_Y = 0x2,
1449 LSC_CMASK_XY = 0x3,
1450 LSC_CMASK_Z = 0x4,
1451 LSC_CMASK_XZ = 0x5,
1452 LSC_CMASK_YZ = 0x6,
1453 LSC_CMASK_XYZ = 0x7,
1454 LSC_CMASK_W = 0x8,
1455 LSC_CMASK_XW = 0x9,
1456 LSC_CMASK_YW = 0xa,
1457 LSC_CMASK_XYW = 0xb,
1458 LSC_CMASK_ZW = 0xc,
1459 LSC_CMASK_XZW = 0xd,
1460 LSC_CMASK_YZW = 0xe,
1461 LSC_CMASK_XYZW = 0xf,
1462 };
1463
1464 /*
1465 * Specifies the size of the data payload item in a dataport message.
1466 */
1467 enum ENUM_PACKED lsc_data_size {
1468 /* 8-bit scalar data value in memory, packed into a 8-bit data value in
1469 * register.
1470 */
1471 LSC_DATA_SIZE_D8 = 0,
1472 /* 16-bit scalar data value in memory, packed into a 16-bit data value in
1473 * register.
1474 */
1475 LSC_DATA_SIZE_D16 = 1,
1476 /* 32-bit scalar data value in memory, packed into 32-bit data value in
1477 * register.
1478 */
1479 LSC_DATA_SIZE_D32 = 2,
1480 /* 64-bit scalar data value in memory, packed into 64-bit data value in
1481 * register.
1482 */
1483 LSC_DATA_SIZE_D64 = 3,
1484 /* 8-bit scalar data value in memory, packed into 32-bit unsigned data value
1485 * in register.
1486 */
1487 LSC_DATA_SIZE_D8U32 = 4,
1488 /* 16-bit scalar data value in memory, packed into 32-bit unsigned data
1489 * value in register.
1490 */
1491 LSC_DATA_SIZE_D16U32 = 5,
1492 /* 16-bit scalar BigFloat data value in memory, packed into 32-bit float
1493 * value in register.
1494 */
1495 LSC_DATA_SIZE_D16BF32 = 6,
1496 };
1497
1498 /*
1499 * Enum specifies the scope of the fence.
1500 */
1501 enum ENUM_PACKED lsc_fence_scope {
1502 /* Wait until all previous memory transactions from this thread are observed
1503 * within the local thread-group.
1504 */
1505 LSC_FENCE_THREADGROUP = 0,
1506 /* Wait until all previous memory transactions from this thread are observed
1507 * within the local sub-slice.
1508 */
1509 LSC_FENCE_LOCAL = 1,
1510 /* Wait until all previous memory transactions from this thread are observed
1511 * in the local tile.
1512 */
1513 LSC_FENCE_TILE = 2,
1514 /* Wait until all previous memory transactions from this thread are observed
1515 * in the local GPU.
1516 */
1517 LSC_FENCE_GPU = 3,
1518 /* Wait until all previous memory transactions from this thread are observed
1519 * across all GPUs in the system.
1520 */
1521 LSC_FENCE_ALL_GPU = 4,
1522 /* Wait until all previous memory transactions from this thread are observed
1523 * at the "system" level.
1524 */
1525 LSC_FENCE_SYSTEM_RELEASE = 5,
1526 /* For GPUs that do not follow PCIe Write ordering for downstream writes
1527 * targeting device memory, a fence message with scope=System_Acquire will
1528 * commit to device memory all downstream and peer writes that have reached
1529 * the device.
1530 */
1531 LSC_FENCE_SYSTEM_ACQUIRE = 6,
1532 };
1533
1534 /*
1535 * Specifies the type of cache flush operation to perform after a fence is
1536 * complete.
1537 */
1538 enum ENUM_PACKED lsc_flush_type {
1539 LSC_FLUSH_TYPE_NONE = 0,
1540 /*
1541 * For a R/W cache, evict dirty lines (M to I state) and invalidate clean
1542 * lines. For a RO cache, invalidate clean lines.
1543 */
1544 LSC_FLUSH_TYPE_EVICT = 1,
1545 /*
1546 * For both R/W and RO cache, invalidate clean lines in the cache.
1547 */
1548 LSC_FLUSH_TYPE_INVALIDATE = 2,
1549 /*
1550 * For a R/W cache, invalidate dirty lines (M to I state), without
1551 * write-back to next level. This opcode does nothing for a RO cache.
1552 */
1553 LSC_FLUSH_TYPE_DISCARD = 3,
1554 /*
1555 * For a R/W cache, write-back dirty lines to the next level, but kept in
1556 * the cache as "clean" (M to V state). This opcode does nothing for a RO
1557 * cache.
1558 */
1559 LSC_FLUSH_TYPE_CLEAN = 4,
1560 /*
1561 * Flush "RW" section of the L3 cache, but leave L1 and L2 caches untouched.
1562 */
1563 LSC_FLUSH_TYPE_L3ONLY = 5,
1564 /*
1565 * HW maps this flush type internally to NONE.
1566 */
1567 LSC_FLUSH_TYPE_NONE_6 = 6,
1568
1569 };
1570
1571 enum ENUM_PACKED lsc_backup_fence_routing {
1572 /* Normal routing: UGM fence is routed to UGM pipeline. */
1573 LSC_NORMAL_ROUTING,
1574 /* Route UGM fence to LSC unit. */
1575 LSC_ROUTE_TO_LSC,
1576 };
1577
1578 /*
1579 * Specifies the size of the vector in a dataport message.
1580 */
1581 enum ENUM_PACKED lsc_vect_size {
1582 LSC_VECT_SIZE_V1 = 0, /* vector length 1 */
1583 LSC_VECT_SIZE_V2 = 1, /* vector length 2 */
1584 LSC_VECT_SIZE_V3 = 2, /* Vector length 3 */
1585 LSC_VECT_SIZE_V4 = 3, /* Vector length 4 */
1586 LSC_VECT_SIZE_V8 = 4, /* Vector length 8 */
1587 LSC_VECT_SIZE_V16 = 5, /* Vector length 16 */
1588 LSC_VECT_SIZE_V32 = 6, /* Vector length 32 */
1589 LSC_VECT_SIZE_V64 = 7, /* Vector length 64 */
1590 };
1591
1592 #define LSC_ONE_ADDR_REG 1
1593
1594 #endif /* ELK_EU_DEFINES_H */
1595