1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32 #ifndef BRW_EU_DEFINES_H
33 #define BRW_EU_DEFINES_H
34
35 #include <stdint.h>
36 #include <stdlib.h>
37 #include "util/macros.h"
38 #include "dev/intel_device_info.h"
39
40 /* The following hunk, up-to "Execution Unit" is used by both the
41 * intel/compiler and i965 codebase. */
42
43 #define INTEL_MASK(high, low) (((1u<<((high)-(low)+1))-1)<<(low))
44 /* Using the GNU statement expression extension */
45 #define SET_FIELD(value, field) \
46 ({ \
47 uint32_t fieldval = (uint32_t)(value) << field ## _SHIFT; \
48 assert((fieldval & ~ field ## _MASK) == 0); \
49 fieldval & field ## _MASK; \
50 })
51
52 #define SET_BITS(value, high, low) \
53 ({ \
54 const uint32_t fieldval = (uint32_t)(value) << (low); \
55 assert((fieldval & ~INTEL_MASK(high, low)) == 0); \
56 fieldval & INTEL_MASK(high, low); \
57 })
58
59 #define GET_BITS(data, high, low) ((data & INTEL_MASK((high), (low))) >> (low))
60 #define GET_FIELD(word, field) (((word) & field ## _MASK) >> field ## _SHIFT)
61
62 /* Bitfields for the URB_WRITE message, DW2 of message header: */
63 #define URB_WRITE_PRIM_END 0x1
64 #define URB_WRITE_PRIM_START 0x2
65 #define URB_WRITE_PRIM_TYPE_SHIFT 2
66
67 #define BRW_SPRITE_POINT_ENABLE 16
68
69 # define GFX7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT 0
70 # define GFX7_GS_CONTROL_DATA_FORMAT_GSCTL_SID 1
71
72 /* Execution Unit (EU) defines
73 */
74
75 #define BRW_ALIGN_1 0
76 #define BRW_ALIGN_16 1
77
78 #define BRW_ADDRESS_DIRECT 0
79 #define BRW_ADDRESS_REGISTER_INDIRECT_REGISTER 1
80
81 #define BRW_CHANNEL_X 0
82 #define BRW_CHANNEL_Y 1
83 #define BRW_CHANNEL_Z 2
84 #define BRW_CHANNEL_W 3
85
86 enum brw_compression {
87 BRW_COMPRESSION_NONE = 0,
88 BRW_COMPRESSION_2NDHALF = 1,
89 BRW_COMPRESSION_COMPRESSED = 2,
90 };
91
92 #define GFX6_COMPRESSION_1Q 0
93 #define GFX6_COMPRESSION_2Q 1
94 #define GFX6_COMPRESSION_3Q 2
95 #define GFX6_COMPRESSION_4Q 3
96 #define GFX6_COMPRESSION_1H 0
97 #define GFX6_COMPRESSION_2H 2
98
99 enum ENUM_PACKED brw_conditional_mod {
100 BRW_CONDITIONAL_NONE = 0,
101 BRW_CONDITIONAL_Z = 1,
102 BRW_CONDITIONAL_NZ = 2,
103 BRW_CONDITIONAL_EQ = 1, /* Z */
104 BRW_CONDITIONAL_NEQ = 2, /* NZ */
105 BRW_CONDITIONAL_G = 3,
106 BRW_CONDITIONAL_GE = 4,
107 BRW_CONDITIONAL_L = 5,
108 BRW_CONDITIONAL_LE = 6,
109 BRW_CONDITIONAL_R = 7, /* Gen <= 5 */
110 BRW_CONDITIONAL_O = 8,
111 BRW_CONDITIONAL_U = 9,
112 };
113
114 #define BRW_DEBUG_NONE 0
115 #define BRW_DEBUG_BREAKPOINT 1
116
117 #define BRW_DEPENDENCY_NORMAL 0
118 #define BRW_DEPENDENCY_NOTCLEARED 1
119 #define BRW_DEPENDENCY_NOTCHECKED 2
120 #define BRW_DEPENDENCY_DISABLE 3
121
122 enum ENUM_PACKED brw_execution_size {
123 BRW_EXECUTE_1 = 0,
124 BRW_EXECUTE_2 = 1,
125 BRW_EXECUTE_4 = 2,
126 BRW_EXECUTE_8 = 3,
127 BRW_EXECUTE_16 = 4,
128 BRW_EXECUTE_32 = 5,
129 };
130
131 enum ENUM_PACKED brw_horizontal_stride {
132 BRW_HORIZONTAL_STRIDE_0 = 0,
133 BRW_HORIZONTAL_STRIDE_1 = 1,
134 BRW_HORIZONTAL_STRIDE_2 = 2,
135 BRW_HORIZONTAL_STRIDE_4 = 3,
136 };
137
138 enum ENUM_PACKED gfx10_align1_3src_src_horizontal_stride {
139 BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_0 = 0,
140 BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_1 = 1,
141 BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_2 = 2,
142 BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_4 = 3,
143 };
144
145 enum ENUM_PACKED gfx10_align1_3src_dst_horizontal_stride {
146 BRW_ALIGN1_3SRC_DST_HORIZONTAL_STRIDE_1 = 0,
147 BRW_ALIGN1_3SRC_DST_HORIZONTAL_STRIDE_2 = 1,
148 };
149
150 #define BRW_INSTRUCTION_NORMAL 0
151 #define BRW_INSTRUCTION_SATURATE 1
152
153 #define BRW_MASK_ENABLE 0
154 #define BRW_MASK_DISABLE 1
155
156 /** @{
157 *
158 * Gfx6 has replaced "mask enable/disable" with WECtrl, which is
159 * effectively the same but much simpler to think about. Now, there
160 * are two contributors ANDed together to whether channels are
161 * executed: The predication on the instruction, and the channel write
162 * enable.
163 */
164 /**
165 * This is the default value. It means that a channel's write enable is set
166 * if the per-channel IP is pointing at this instruction.
167 */
168 #define BRW_WE_NORMAL 0
169 /**
170 * This is used like BRW_MASK_DISABLE, and causes all channels to have
171 * their write enable set. Note that predication still contributes to
172 * whether the channel actually gets written.
173 */
174 #define BRW_WE_ALL 1
175 /** @} */
176
177 enum opcode {
178 /* These are the actual hardware instructions. */
179 BRW_OPCODE_ILLEGAL,
180 BRW_OPCODE_SYNC,
181 BRW_OPCODE_MOV,
182 BRW_OPCODE_SEL,
183 BRW_OPCODE_MOVI,
184 BRW_OPCODE_NOT,
185 BRW_OPCODE_AND,
186 BRW_OPCODE_OR,
187 BRW_OPCODE_XOR,
188 BRW_OPCODE_SHR,
189 BRW_OPCODE_SHL,
190 BRW_OPCODE_SMOV,
191 BRW_OPCODE_ASR,
192 BRW_OPCODE_ROR, /**< Gfx11+ */
193 BRW_OPCODE_ROL, /**< Gfx11+ */
194 BRW_OPCODE_CMP,
195 BRW_OPCODE_CMPN,
196 BRW_OPCODE_CSEL,
197 BRW_OPCODE_BFREV,
198 BRW_OPCODE_BFE,
199 BRW_OPCODE_BFI1,
200 BRW_OPCODE_BFI2,
201 BRW_OPCODE_JMPI,
202 BRW_OPCODE_BRD,
203 BRW_OPCODE_IF,
204 BRW_OPCODE_BRC,
205 BRW_OPCODE_ELSE,
206 BRW_OPCODE_ENDIF,
207 BRW_OPCODE_DO, /**< Used as pseudo opcode, will be moved later. */
208 BRW_OPCODE_WHILE,
209 BRW_OPCODE_BREAK,
210 BRW_OPCODE_CONTINUE,
211 BRW_OPCODE_HALT,
212 BRW_OPCODE_CALLA,
213 BRW_OPCODE_CALL,
214 BRW_OPCODE_RET,
215 BRW_OPCODE_GOTO,
216 BRW_OPCODE_WAIT,
217 BRW_OPCODE_SEND,
218 BRW_OPCODE_SENDC,
219 BRW_OPCODE_SENDS,
220 BRW_OPCODE_SENDSC,
221 BRW_OPCODE_MATH,
222 BRW_OPCODE_ADD,
223 BRW_OPCODE_MUL,
224 BRW_OPCODE_AVG,
225 BRW_OPCODE_FRC,
226 BRW_OPCODE_RNDU,
227 BRW_OPCODE_RNDD,
228 BRW_OPCODE_RNDE,
229 BRW_OPCODE_RNDZ,
230 BRW_OPCODE_MAC,
231 BRW_OPCODE_MACH,
232 BRW_OPCODE_LZD,
233 BRW_OPCODE_FBH,
234 BRW_OPCODE_FBL,
235 BRW_OPCODE_CBIT,
236 BRW_OPCODE_ADDC,
237 BRW_OPCODE_SUBB,
238 BRW_OPCODE_SAD2,
239 BRW_OPCODE_SADA2,
240 BRW_OPCODE_ADD3, /* Gen12+ only */
241 BRW_OPCODE_DP4,
242 BRW_OPCODE_DPH,
243 BRW_OPCODE_DP3,
244 BRW_OPCODE_DP2,
245 BRW_OPCODE_DP4A, /**< Gfx12+ */
246 BRW_OPCODE_LINE,
247 BRW_OPCODE_DPAS, /**< Gfx12.5+ */
248 BRW_OPCODE_PLN, /**< Up until Gfx9 */
249 BRW_OPCODE_MAD,
250 BRW_OPCODE_LRP,
251 BRW_OPCODE_MADM,
252 BRW_OPCODE_NOP,
253
254 NUM_BRW_OPCODES,
255
256 /**
257 * The position/ordering of the arguments are defined
258 * by the enum fb_write_logical_srcs.
259 */
260 FS_OPCODE_FB_WRITE_LOGICAL = NUM_BRW_OPCODES,
261
262 FS_OPCODE_FB_READ,
263 FS_OPCODE_FB_READ_LOGICAL,
264
265 SHADER_OPCODE_RCP,
266 SHADER_OPCODE_RSQ,
267 SHADER_OPCODE_SQRT,
268 SHADER_OPCODE_EXP2,
269 SHADER_OPCODE_LOG2,
270 SHADER_OPCODE_POW,
271 SHADER_OPCODE_INT_QUOTIENT,
272 SHADER_OPCODE_INT_REMAINDER,
273 SHADER_OPCODE_SIN,
274 SHADER_OPCODE_COS,
275
276 /**
277 * A generic "send" opcode. The first two sources are the message
278 * descriptor and extended message descriptor respectively. The third
279 * and optional fourth sources are the message payload
280 */
281 SHADER_OPCODE_SEND,
282
283 /**
284 * An "undefined" write which does nothing but indicates to liveness that
285 * we don't care about any values in the register which predate this
286 * instruction. Used to prevent partial writes from causing issues with
287 * live ranges.
288 */
289 SHADER_OPCODE_UNDEF,
290
291 /**
292 * Texture sampling opcodes.
293 *
294 * LOGICAL opcodes are eventually translated to the matching non-LOGICAL
295 * opcode but instead of taking a single payload blob they expect their
296 * arguments separately as individual sources. The position/ordering of the
297 * arguments are defined by the enum tex_logical_srcs.
298 */
299 SHADER_OPCODE_TEX,
300 SHADER_OPCODE_TEX_LOGICAL,
301 SHADER_OPCODE_TXD,
302 SHADER_OPCODE_TXD_LOGICAL,
303 SHADER_OPCODE_TXF,
304 SHADER_OPCODE_TXF_LOGICAL,
305 SHADER_OPCODE_TXF_LZ,
306 SHADER_OPCODE_TXL,
307 SHADER_OPCODE_TXL_LOGICAL,
308 SHADER_OPCODE_TXL_LZ,
309 SHADER_OPCODE_TXS,
310 SHADER_OPCODE_TXS_LOGICAL,
311 FS_OPCODE_TXB,
312 FS_OPCODE_TXB_LOGICAL,
313 SHADER_OPCODE_TXF_CMS,
314 SHADER_OPCODE_TXF_CMS_LOGICAL,
315 SHADER_OPCODE_TXF_CMS_W,
316 SHADER_OPCODE_TXF_CMS_W_LOGICAL,
317 SHADER_OPCODE_TXF_CMS_W_GFX12_LOGICAL,
318 SHADER_OPCODE_TXF_UMS,
319 SHADER_OPCODE_TXF_UMS_LOGICAL,
320 SHADER_OPCODE_TXF_MCS,
321 SHADER_OPCODE_TXF_MCS_LOGICAL,
322 SHADER_OPCODE_LOD,
323 SHADER_OPCODE_LOD_LOGICAL,
324 SHADER_OPCODE_TG4,
325 SHADER_OPCODE_TG4_LOGICAL,
326 SHADER_OPCODE_TG4_IMPLICIT_LOD,
327 SHADER_OPCODE_TG4_IMPLICIT_LOD_LOGICAL,
328 SHADER_OPCODE_TG4_EXPLICIT_LOD,
329 SHADER_OPCODE_TG4_EXPLICIT_LOD_LOGICAL,
330 SHADER_OPCODE_TG4_BIAS,
331 SHADER_OPCODE_TG4_BIAS_LOGICAL,
332 SHADER_OPCODE_TG4_OFFSET,
333 SHADER_OPCODE_TG4_OFFSET_LOGICAL,
334 SHADER_OPCODE_TG4_OFFSET_LOD,
335 SHADER_OPCODE_TG4_OFFSET_LOD_LOGICAL,
336 SHADER_OPCODE_TG4_OFFSET_BIAS,
337 SHADER_OPCODE_TG4_OFFSET_BIAS_LOGICAL,
338 SHADER_OPCODE_SAMPLEINFO,
339 SHADER_OPCODE_SAMPLEINFO_LOGICAL,
340
341 SHADER_OPCODE_IMAGE_SIZE_LOGICAL,
342
343 /**
344 * Combines multiple sources of size 1 into a larger virtual GRF.
345 * For example, parameters for a send-from-GRF message. Or, updating
346 * channels of a size 4 VGRF used to store vec4s such as texturing results.
347 *
348 * This will be lowered into MOVs from each source to consecutive offsets
349 * of the destination VGRF.
350 *
351 * src[0] may be BAD_FILE. If so, the lowering pass skips emitting the MOV,
352 * but still reserves the first channel of the destination VGRF. This can be
353 * used to reserve space for, say, a message header set up by the generators.
354 */
355 SHADER_OPCODE_LOAD_PAYLOAD,
356
357 /**
358 * Packs a number of sources into a single value. Unlike LOAD_PAYLOAD, this
359 * acts intra-channel, obtaining the final value for each channel by
360 * combining the sources values for the same channel, the first source
361 * occupying the lowest bits and the last source occupying the highest
362 * bits.
363 */
364 FS_OPCODE_PACK,
365
366 /**
367 * Typed and untyped surface access opcodes.
368 *
369 * LOGICAL opcodes are eventually translated to the matching non-LOGICAL
370 * opcode but instead of taking a single payload blob they expect their
371 * arguments separately as individual sources:
372 *
373 * Source 0: [required] Surface coordinates.
374 * Source 1: [optional] Operation source.
375 * Source 2: [required] Surface index.
376 * Source 3: [required] Number of coordinate components (as UD immediate).
377 * Source 4: [required] Opcode-specific control immediate, same as source 2
378 * of the matching non-LOGICAL opcode.
379 */
380 SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL,
381 SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL,
382 SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL,
383
384 SHADER_OPCODE_UNALIGNED_OWORD_BLOCK_READ_LOGICAL,
385 SHADER_OPCODE_OWORD_BLOCK_WRITE_LOGICAL,
386
387 /**
388 * Untyped A64 surface access opcodes.
389 *
390 * Source 0: 64-bit address
391 * Source 1: Operational source
392 * Source 2: [required] Opcode-specific control immediate, same as source 2
393 * of the matching non-LOGICAL opcode.
394 */
395 SHADER_OPCODE_A64_UNTYPED_READ_LOGICAL,
396 SHADER_OPCODE_A64_UNTYPED_WRITE_LOGICAL,
397 SHADER_OPCODE_A64_BYTE_SCATTERED_READ_LOGICAL,
398 SHADER_OPCODE_A64_BYTE_SCATTERED_WRITE_LOGICAL,
399 SHADER_OPCODE_A64_OWORD_BLOCK_READ_LOGICAL,
400 SHADER_OPCODE_A64_UNALIGNED_OWORD_BLOCK_READ_LOGICAL,
401 SHADER_OPCODE_A64_OWORD_BLOCK_WRITE_LOGICAL,
402 SHADER_OPCODE_A64_UNTYPED_ATOMIC_LOGICAL,
403
404 SHADER_OPCODE_TYPED_ATOMIC_LOGICAL,
405 SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL,
406 SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL,
407
408 SHADER_OPCODE_RND_MODE,
409 SHADER_OPCODE_FLOAT_CONTROL_MODE,
410
411 /**
412 * Byte scattered write/read opcodes.
413 *
414 * LOGICAL opcodes are eventually translated to the matching non-LOGICAL
415 * opcode, but instead of taking a single payload blog they expect their
416 * arguments separately as individual sources, like untyped write/read.
417 */
418 SHADER_OPCODE_BYTE_SCATTERED_READ_LOGICAL,
419 SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL,
420 SHADER_OPCODE_DWORD_SCATTERED_READ_LOGICAL,
421 SHADER_OPCODE_DWORD_SCATTERED_WRITE_LOGICAL,
422
423 /**
424 * Memory fence messages.
425 *
426 * Source 0: Must be register g0, used as header.
427 * Source 1: Immediate bool to indicate whether control is returned to the
428 * thread only after the fence has been honored.
429 * Source 2: Immediate byte indicating which memory to fence. Zero means
430 * global memory; GFX7_BTI_SLM means SLM (for Gfx11+ only).
431 *
432 * Vec4 backend only uses Source 0.
433 */
434 SHADER_OPCODE_MEMORY_FENCE,
435
436 /**
437 * Scheduling-only fence.
438 *
439 * Sources can be used to force a stall until the registers in those are
440 * available. This might generate MOVs or SYNC_NOPs (Gfx12+).
441 */
442 FS_OPCODE_SCHEDULING_FENCE,
443
444 SHADER_OPCODE_SCRATCH_HEADER,
445
446 /**
447 * Gfx8+ SIMD8 URB messages.
448 */
449 SHADER_OPCODE_URB_READ_LOGICAL,
450 SHADER_OPCODE_URB_WRITE_LOGICAL,
451
452 /**
453 * Return the index of the first enabled live channel and assign it to
454 * to the first component of the destination. Frequently used as input
455 * for the BROADCAST pseudo-opcode.
456 */
457 SHADER_OPCODE_FIND_LIVE_CHANNEL,
458
459 /**
460 * Return the index of the last enabled live channel and assign it to
461 * the first component of the destination.
462 */
463 SHADER_OPCODE_FIND_LAST_LIVE_CHANNEL,
464
465 /**
466 * Return the current execution mask and assign it to the first component
467 * of the destination.
468 *
469 * \sa opcode::FS_OPCODE_LOAD_LIVE_CHANNELS
470 */
471 SHADER_OPCODE_LOAD_LIVE_CHANNELS,
472
473 /**
474 * Return the current execution mask in the specified flag subregister.
475 * Can be CSE'ed more easily than a plain MOV from the ce0 ARF register.
476 */
477 FS_OPCODE_LOAD_LIVE_CHANNELS,
478
479 /**
480 * Pick the channel from its first source register given by the index
481 * specified as second source. Useful for variable indexing of surfaces.
482 *
483 * Note that because the result of this instruction is by definition
484 * uniform and it can always be splatted to multiple channels using a
485 * scalar regioning mode, only the first channel of the destination region
486 * is guaranteed to be updated, which implies that BROADCAST instructions
487 * should usually be marked force_writemask_all.
488 */
489 SHADER_OPCODE_BROADCAST,
490
491 /* Pick the channel from its first source register given by the index
492 * specified as second source.
493 *
494 * This is similar to the BROADCAST instruction except that it takes a
495 * dynamic index and potentially puts a different value in each output
496 * channel.
497 */
498 SHADER_OPCODE_SHUFFLE,
499
500 /* Select between src0 and src1 based on channel enables.
501 *
502 * This instruction copies src0 into the enabled channels of the
503 * destination and copies src1 into the disabled channels.
504 */
505 SHADER_OPCODE_SEL_EXEC,
506
507 /* This turns into an align16 mov from src0 to dst with a swizzle
508 * provided as an immediate in src1.
509 */
510 SHADER_OPCODE_QUAD_SWIZZLE,
511
512 /* Take every Nth element in src0 and broadcast it to the group of N
513 * channels in which it lives in the destination. The offset within the
514 * cluster is given by src1 and the cluster size is given by src2.
515 */
516 SHADER_OPCODE_CLUSTER_BROADCAST,
517
518 SHADER_OPCODE_GET_BUFFER_SIZE,
519
520 SHADER_OPCODE_INTERLOCK,
521
522 /** Target for a HALT
523 *
524 * All HALT instructions in a shader must target the same jump point and
525 * that point is denoted by a HALT_TARGET instruction.
526 */
527 SHADER_OPCODE_HALT_TARGET,
528
529 FS_OPCODE_DDX_COARSE,
530 FS_OPCODE_DDX_FINE,
531 /**
532 * Compute dFdy(), dFdyCoarse(), or dFdyFine().
533 */
534 FS_OPCODE_DDY_COARSE,
535 FS_OPCODE_DDY_FINE,
536 FS_OPCODE_LINTERP,
537 FS_OPCODE_PIXEL_X,
538 FS_OPCODE_PIXEL_Y,
539 FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
540 FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_LOGICAL,
541 FS_OPCODE_PACK_HALF_2x16_SPLIT,
542 FS_OPCODE_INTERPOLATE_AT_SAMPLE,
543 FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET,
544 FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET,
545
546 /**
547 * Terminate the compute shader.
548 */
549 CS_OPCODE_CS_TERMINATE,
550
551 /**
552 * GLSL barrier()
553 */
554 SHADER_OPCODE_BARRIER,
555
556 /**
557 * Calculate the high 32-bits of a 32x32 multiply.
558 */
559 SHADER_OPCODE_MULH,
560
561 /** Signed subtraction with saturation. */
562 SHADER_OPCODE_ISUB_SAT,
563
564 /** Unsigned subtraction with saturation. */
565 SHADER_OPCODE_USUB_SAT,
566
567 /**
568 * A MOV that uses VxH indirect addressing.
569 *
570 * Source 0: A register to start from (HW_REG).
571 * Source 1: An indirect offset (in bytes, UD GRF).
572 * Source 2: The length of the region that could be accessed (in bytes,
573 * UD immediate).
574 */
575 SHADER_OPCODE_MOV_INDIRECT,
576
577 /** Fills out a relocatable immediate */
578 SHADER_OPCODE_MOV_RELOC_IMM,
579
580 SHADER_OPCODE_BTD_SPAWN_LOGICAL,
581 SHADER_OPCODE_BTD_RETIRE_LOGICAL,
582
583 SHADER_OPCODE_READ_SR_REG,
584
585 RT_OPCODE_TRACE_RAY_LOGICAL,
586 };
587
588 enum brw_urb_write_flags {
589 BRW_URB_WRITE_NO_FLAGS = 0,
590
591 /**
592 * Causes a new URB entry to be allocated, and its address stored in the
593 * destination register (gen < 7).
594 */
595 BRW_URB_WRITE_ALLOCATE = 0x1,
596
597 /**
598 * Causes the current URB entry to be deallocated (gen < 7).
599 */
600 BRW_URB_WRITE_UNUSED = 0x2,
601
602 /**
603 * Causes the thread to terminate.
604 */
605 BRW_URB_WRITE_EOT = 0x4,
606
607 /**
608 * Indicates that the given URB entry is complete, and may be sent further
609 * down the 3D pipeline (gen < 7).
610 */
611 BRW_URB_WRITE_COMPLETE = 0x8,
612
613 /**
614 * Indicates that an additional offset (which may be different for the two
615 * vec4 slots) is stored in the message header (gen == 7).
616 */
617 BRW_URB_WRITE_PER_SLOT_OFFSET = 0x10,
618
619 /**
620 * Indicates that the channel masks in the URB_WRITE message header should
621 * not be overridden to 0xff (gen == 7).
622 */
623 BRW_URB_WRITE_USE_CHANNEL_MASKS = 0x20,
624
625 /**
626 * Indicates that the data should be sent to the URB using the
627 * URB_WRITE_OWORD message rather than URB_WRITE_HWORD (gen == 7). This
628 * causes offsets to be interpreted as multiples of an OWORD instead of an
629 * HWORD, and only allows one OWORD to be written.
630 */
631 BRW_URB_WRITE_OWORD = 0x40,
632
633 /**
634 * Convenient combination of flags: end the thread while simultaneously
635 * marking the given URB entry as complete.
636 */
637 BRW_URB_WRITE_EOT_COMPLETE = BRW_URB_WRITE_EOT | BRW_URB_WRITE_COMPLETE,
638
639 /**
640 * Convenient combination of flags: mark the given URB entry as complete
641 * and simultaneously allocate a new one.
642 */
643 BRW_URB_WRITE_ALLOCATE_COMPLETE =
644 BRW_URB_WRITE_ALLOCATE | BRW_URB_WRITE_COMPLETE,
645 };
646
647 enum fb_write_logical_srcs {
648 FB_WRITE_LOGICAL_SRC_COLOR0, /* REQUIRED */
649 FB_WRITE_LOGICAL_SRC_COLOR1, /* for dual source blend messages */
650 FB_WRITE_LOGICAL_SRC_SRC0_ALPHA,
651 FB_WRITE_LOGICAL_SRC_SRC_DEPTH, /* gl_FragDepth */
652 FB_WRITE_LOGICAL_SRC_DST_DEPTH, /* GFX4-5: passthrough from thread */
653 FB_WRITE_LOGICAL_SRC_SRC_STENCIL, /* gl_FragStencilRefARB */
654 FB_WRITE_LOGICAL_SRC_OMASK, /* Sample Mask (gl_SampleMask) */
655 FB_WRITE_LOGICAL_SRC_COMPONENTS, /* REQUIRED */
656 FB_WRITE_LOGICAL_NUM_SRCS
657 };
658
659 enum tex_logical_srcs {
660 /** Texture coordinates */
661 TEX_LOGICAL_SRC_COORDINATE,
662 /** Shadow comparator */
663 TEX_LOGICAL_SRC_SHADOW_C,
664 /** dPdx if the operation takes explicit derivatives, otherwise LOD value */
665 TEX_LOGICAL_SRC_LOD,
666 /** dPdy if the operation takes explicit derivatives */
667 TEX_LOGICAL_SRC_LOD2,
668 /** Min LOD */
669 TEX_LOGICAL_SRC_MIN_LOD,
670 /** Sample index */
671 TEX_LOGICAL_SRC_SAMPLE_INDEX,
672 /** MCS data */
673 TEX_LOGICAL_SRC_MCS,
674 /** REQUIRED: Texture surface index */
675 TEX_LOGICAL_SRC_SURFACE,
676 /** Texture sampler index */
677 TEX_LOGICAL_SRC_SAMPLER,
678 /** Texture surface bindless handle */
679 TEX_LOGICAL_SRC_SURFACE_HANDLE,
680 /** Texture sampler bindless handle */
681 TEX_LOGICAL_SRC_SAMPLER_HANDLE,
682 /** Texel offset for gathers */
683 TEX_LOGICAL_SRC_TG4_OFFSET,
684 /** REQUIRED: Number of coordinate components (as UD immediate) */
685 TEX_LOGICAL_SRC_COORD_COMPONENTS,
686 /** REQUIRED: Number of derivative components (as UD immediate) */
687 TEX_LOGICAL_SRC_GRAD_COMPONENTS,
688 /** REQUIRED: request residency (as UD immediate) */
689 TEX_LOGICAL_SRC_RESIDENCY,
690
691 TEX_LOGICAL_NUM_SRCS,
692 };
693
694 enum pull_uniform_constant_srcs {
695 /** Surface binding table index */
696 PULL_UNIFORM_CONSTANT_SRC_SURFACE,
697 /** Surface bindless handle */
698 PULL_UNIFORM_CONSTANT_SRC_SURFACE_HANDLE,
699 /** Surface offset */
700 PULL_UNIFORM_CONSTANT_SRC_OFFSET,
701 /** Pull size */
702 PULL_UNIFORM_CONSTANT_SRC_SIZE,
703
704 PULL_UNIFORM_CONSTANT_SRCS,
705 };
706
707 enum pull_varying_constant_srcs {
708 /** Surface binding table index */
709 PULL_VARYING_CONSTANT_SRC_SURFACE,
710 /** Surface bindless handle */
711 PULL_VARYING_CONSTANT_SRC_SURFACE_HANDLE,
712 /** Surface offset */
713 PULL_VARYING_CONSTANT_SRC_OFFSET,
714 /** Pull alignment */
715 PULL_VARYING_CONSTANT_SRC_ALIGNMENT,
716
717 PULL_VARYING_CONSTANT_SRCS,
718 };
719
720 enum get_buffer_size_srcs {
721 /** Surface binding table index */
722 GET_BUFFER_SIZE_SRC_SURFACE,
723 /** Surface bindless handle */
724 GET_BUFFER_SIZE_SRC_SURFACE_HANDLE,
725 /** LOD */
726 GET_BUFFER_SIZE_SRC_LOD,
727
728 GET_BUFFER_SIZE_SRCS
729 };
730
731 enum surface_logical_srcs {
732 /** Surface binding table index */
733 SURFACE_LOGICAL_SRC_SURFACE,
734 /** Surface bindless handle */
735 SURFACE_LOGICAL_SRC_SURFACE_HANDLE,
736 /** Surface address; could be multi-dimensional for typed opcodes */
737 SURFACE_LOGICAL_SRC_ADDRESS,
738 /** Data to be written or used in an atomic op */
739 SURFACE_LOGICAL_SRC_DATA,
740 /** Surface number of dimensions. Affects the size of ADDRESS */
741 SURFACE_LOGICAL_SRC_IMM_DIMS,
742 /** Per-opcode immediate argument. For atomics, this is the atomic opcode */
743 SURFACE_LOGICAL_SRC_IMM_ARG,
744 /**
745 * Some instructions with side-effects should not be predicated on
746 * sample mask, e.g. lowered stores to scratch.
747 */
748 SURFACE_LOGICAL_SRC_ALLOW_SAMPLE_MASK,
749
750 SURFACE_LOGICAL_NUM_SRCS
751 };
752
753 enum a64_logical_srcs {
754 /** Address the A64 message operates on */
755 A64_LOGICAL_ADDRESS,
756 /** Source for the operation (unused of LOAD ops) */
757 A64_LOGICAL_SRC,
758 /** Per-opcode immediate argument. Number of dwords, bit size, or atomic op. */
759 A64_LOGICAL_ARG,
760 /**
761 * Some instructions do want to run on helper lanes (like ray queries).
762 */
763 A64_LOGICAL_ENABLE_HELPERS,
764
765 A64_LOGICAL_NUM_SRCS
766 };
767
768 enum rt_logical_srcs {
769 /** Address of the globals */
770 RT_LOGICAL_SRC_GLOBALS,
771 /** Level at which the tracing should start */
772 RT_LOGICAL_SRC_BVH_LEVEL,
773 /** Type of tracing operation */
774 RT_LOGICAL_SRC_TRACE_RAY_CONTROL,
775 /** Synchronous tracing (ray query) */
776 RT_LOGICAL_SRC_SYNCHRONOUS,
777
778 RT_LOGICAL_NUM_SRCS
779 };
780
781 enum urb_logical_srcs {
782 URB_LOGICAL_SRC_HANDLE,
783 URB_LOGICAL_SRC_PER_SLOT_OFFSETS,
784 URB_LOGICAL_SRC_CHANNEL_MASK,
785 /** Data to be written. BAD_FILE for reads. */
786 URB_LOGICAL_SRC_DATA,
787 URB_LOGICAL_SRC_COMPONENTS,
788 URB_LOGICAL_NUM_SRCS
789 };
790
791 enum interpolator_logical_srcs {
792 /** Interpolation offset */
793 INTERP_SRC_OFFSET,
794 /** Message data */
795 INTERP_SRC_MSG_DESC,
796 /** Flag register for dynamic mode */
797 INTERP_SRC_DYNAMIC_MODE,
798
799 INTERP_NUM_SRCS
800 };
801
802
803 #ifdef __cplusplus
804 /**
805 * Allow brw_urb_write_flags enums to be ORed together.
806 */
807 inline brw_urb_write_flags
808 operator|(brw_urb_write_flags x, brw_urb_write_flags y)
809 {
810 return static_cast<brw_urb_write_flags>(static_cast<int>(x) |
811 static_cast<int>(y));
812 }
813 #endif
814
815 enum ENUM_PACKED brw_predicate {
816 BRW_PREDICATE_NONE = 0,
817 BRW_PREDICATE_NORMAL = 1,
818 BRW_PREDICATE_ALIGN1_ANYV = 2,
819 BRW_PREDICATE_ALIGN1_ALLV = 3,
820 BRW_PREDICATE_ALIGN1_ANY2H = 4,
821 BRW_PREDICATE_ALIGN1_ALL2H = 5,
822 BRW_PREDICATE_ALIGN1_ANY4H = 6,
823 BRW_PREDICATE_ALIGN1_ALL4H = 7,
824 BRW_PREDICATE_ALIGN1_ANY8H = 8,
825 BRW_PREDICATE_ALIGN1_ALL8H = 9,
826 BRW_PREDICATE_ALIGN1_ANY16H = 10,
827 BRW_PREDICATE_ALIGN1_ALL16H = 11,
828 BRW_PREDICATE_ALIGN1_ANY32H = 12,
829 BRW_PREDICATE_ALIGN1_ALL32H = 13,
830 BRW_PREDICATE_ALIGN16_REPLICATE_X = 2,
831 BRW_PREDICATE_ALIGN16_REPLICATE_Y = 3,
832 BRW_PREDICATE_ALIGN16_REPLICATE_Z = 4,
833 BRW_PREDICATE_ALIGN16_REPLICATE_W = 5,
834 BRW_PREDICATE_ALIGN16_ANY4H = 6,
835 BRW_PREDICATE_ALIGN16_ALL4H = 7,
836 XE2_PREDICATE_ANY = 2,
837 XE2_PREDICATE_ALL = 3
838 };
839
840 enum ENUM_PACKED brw_reg_file {
841 BRW_ARCHITECTURE_REGISTER_FILE = 0,
842 BRW_GENERAL_REGISTER_FILE = 1,
843 BRW_IMMEDIATE_VALUE = 3,
844
845 ARF = BRW_ARCHITECTURE_REGISTER_FILE,
846 FIXED_GRF = BRW_GENERAL_REGISTER_FILE,
847 IMM = BRW_IMMEDIATE_VALUE,
848
849 /* These are not hardware values */
850 VGRF,
851 ATTR,
852 UNIFORM, /* prog_data->params[reg] */
853 BAD_FILE,
854 };
855
856 enum ENUM_PACKED gfx10_align1_3src_reg_file {
857 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE = 0,
858 BRW_ALIGN1_3SRC_IMMEDIATE_VALUE = 1, /* src0, src2 */
859 BRW_ALIGN1_3SRC_ACCUMULATOR = 1, /* dest, src1 */
860 };
861
862 /* CNL adds Align1 support for 3-src instructions. Bit 35 of the instruction
863 * word is "Execution Datatype" which controls whether the instruction operates
864 * on float or integer types. The register arguments have fields that offer
865 * more fine control their respective types.
866 */
867 enum ENUM_PACKED gfx10_align1_3src_exec_type {
868 BRW_ALIGN1_3SRC_EXEC_TYPE_INT = 0,
869 BRW_ALIGN1_3SRC_EXEC_TYPE_FLOAT = 1,
870 };
871
872 #define BRW_ARF_NULL 0x00
873 #define BRW_ARF_ADDRESS 0x10
874 #define BRW_ARF_ACCUMULATOR 0x20
875 #define BRW_ARF_FLAG 0x30
876 #define BRW_ARF_MASK 0x40
877 #define BRW_ARF_MASK_STACK 0x50
878 #define BRW_ARF_MASK_STACK_DEPTH 0x60
879 #define BRW_ARF_STATE 0x70
880 #define BRW_ARF_CONTROL 0x80
881 #define BRW_ARF_NOTIFICATION_COUNT 0x90
882 #define BRW_ARF_IP 0xA0
883 #define BRW_ARF_TDR 0xB0
884 #define BRW_ARF_TIMESTAMP 0xC0
885
886 #define BRW_AMASK 0
887 #define BRW_IMASK 1
888 #define BRW_LMASK 2
889 #define BRW_CMASK 3
890
891
892
893 #define BRW_THREAD_NORMAL 0
894 #define BRW_THREAD_ATOMIC 1
895 #define BRW_THREAD_SWITCH 2
896
897 enum ENUM_PACKED brw_vertical_stride {
898 BRW_VERTICAL_STRIDE_0 = 0,
899 BRW_VERTICAL_STRIDE_1 = 1,
900 BRW_VERTICAL_STRIDE_2 = 2,
901 BRW_VERTICAL_STRIDE_4 = 3,
902 BRW_VERTICAL_STRIDE_8 = 4,
903 BRW_VERTICAL_STRIDE_16 = 5,
904 BRW_VERTICAL_STRIDE_32 = 6,
905 BRW_VERTICAL_STRIDE_ONE_DIMENSIONAL = 0xF,
906 };
907
908 enum ENUM_PACKED gfx10_align1_3src_vertical_stride {
909 BRW_ALIGN1_3SRC_VERTICAL_STRIDE_0 = 0,
910 BRW_ALIGN1_3SRC_VERTICAL_STRIDE_1 = 1,
911 BRW_ALIGN1_3SRC_VERTICAL_STRIDE_2 = 1,
912 BRW_ALIGN1_3SRC_VERTICAL_STRIDE_4 = 2,
913 BRW_ALIGN1_3SRC_VERTICAL_STRIDE_8 = 3,
914 };
915
916 enum ENUM_PACKED brw_width {
917 BRW_WIDTH_1 = 0,
918 BRW_WIDTH_2 = 1,
919 BRW_WIDTH_4 = 2,
920 BRW_WIDTH_8 = 3,
921 BRW_WIDTH_16 = 4,
922 };
923
924 /**
925 * Gfx12+ SWSB SBID synchronization mode.
926 *
927 * This is represented as a bitmask including any required SBID token
928 * synchronization modes, used to synchronize out-of-order instructions. Only
929 * the strongest mode of the mask will be provided to the hardware in the SWSB
930 * field of an actual hardware instruction, but virtual instructions may be
931 * able to take into account multiple of them.
932 */
933 enum tgl_sbid_mode {
934 TGL_SBID_NULL = 0,
935 TGL_SBID_SRC = 1,
936 TGL_SBID_DST = 2,
937 TGL_SBID_SET = 4
938 };
939
940
941 enum gfx12_sub_byte_precision {
942 BRW_SUB_BYTE_PRECISION_NONE = 0,
943
944 /** 4 bits. Signedness determined by base type */
945 BRW_SUB_BYTE_PRECISION_4BIT = 1,
946
947 /** 2 bits. Signedness determined by base type */
948 BRW_SUB_BYTE_PRECISION_2BIT = 2,
949 };
950
951 enum gfx12_systolic_depth {
952 BRW_SYSTOLIC_DEPTH_16 = 0,
953 BRW_SYSTOLIC_DEPTH_2 = 1,
954 BRW_SYSTOLIC_DEPTH_4 = 2,
955 BRW_SYSTOLIC_DEPTH_8 = 3,
956 };
957
958 #ifdef __cplusplus
959 /**
960 * Allow bitwise arithmetic of tgl_sbid_mode enums.
961 */
962 inline tgl_sbid_mode
963 operator|(tgl_sbid_mode x, tgl_sbid_mode y)
964 {
965 return tgl_sbid_mode(unsigned(x) | unsigned(y));
966 }
967
968 inline tgl_sbid_mode
969 operator&(tgl_sbid_mode x, tgl_sbid_mode y)
970 {
971 return tgl_sbid_mode(unsigned(x) & unsigned(y));
972 }
973
974 inline tgl_sbid_mode &
975 operator|=(tgl_sbid_mode &x, tgl_sbid_mode y)
976 {
977 return x = x | y;
978 }
979
980 #endif
981
982 /**
983 * TGL+ SWSB RegDist synchronization pipeline.
984 *
985 * On TGL all instructions that use the RegDist synchronization mechanism are
986 * considered to be executed as a single in-order pipeline, therefore only the
987 * TGL_PIPE_FLOAT pipeline is applicable. On XeHP+ platforms there are two
988 * additional asynchronous ALU pipelines (which still execute instructions
989 * in-order and use the RegDist synchronization mechanism). TGL_PIPE_NONE
990 * doesn't provide any RegDist pipeline synchronization information and allows
991 * the hardware to infer the pipeline based on the source types of the
992 * instruction. TGL_PIPE_ALL can be used when synchronization with all ALU
993 * pipelines is intended.
994 */
995 enum tgl_pipe {
996 TGL_PIPE_NONE = 0,
997 TGL_PIPE_FLOAT,
998 TGL_PIPE_INT,
999 TGL_PIPE_LONG,
1000 TGL_PIPE_MATH,
1001 TGL_PIPE_ALL
1002 };
1003
1004 /**
1005 * Logical representation of the SWSB scheduling information of a hardware
1006 * instruction. The binary representation is slightly more compact.
1007 */
1008 struct tgl_swsb {
1009 unsigned regdist : 3;
1010 enum tgl_pipe pipe : 3;
1011 unsigned sbid : 5;
1012 enum tgl_sbid_mode mode : 3;
1013 };
1014
1015 /**
1016 * Construct a scheduling annotation with a single RegDist dependency. This
1017 * synchronizes with the completion of the d-th previous in-order instruction.
1018 * The index is one-based, zero causes a no-op tgl_swsb to be constructed.
1019 */
1020 static inline struct tgl_swsb
tgl_swsb_regdist(unsigned d)1021 tgl_swsb_regdist(unsigned d)
1022 {
1023 const struct tgl_swsb swsb = { d, d ? TGL_PIPE_ALL : TGL_PIPE_NONE };
1024 assert(swsb.regdist == d);
1025 return swsb;
1026 }
1027
1028 /**
1029 * Construct a scheduling annotation that synchronizes with the specified SBID
1030 * token.
1031 */
1032 static inline struct tgl_swsb
tgl_swsb_sbid(enum tgl_sbid_mode mode,unsigned sbid)1033 tgl_swsb_sbid(enum tgl_sbid_mode mode, unsigned sbid)
1034 {
1035 const struct tgl_swsb swsb = { 0, TGL_PIPE_NONE, sbid, mode };
1036 assert(swsb.sbid == sbid);
1037 return swsb;
1038 }
1039
1040 /**
1041 * Construct a no-op scheduling annotation.
1042 */
1043 static inline struct tgl_swsb
tgl_swsb_null(void)1044 tgl_swsb_null(void)
1045 {
1046 return tgl_swsb_regdist(0);
1047 }
1048
1049 /**
1050 * Return a scheduling annotation that allocates the same SBID synchronization
1051 * token as \p swsb. In addition it will synchronize against a previous
1052 * in-order instruction if \p regdist is non-zero.
1053 */
1054 static inline struct tgl_swsb
tgl_swsb_dst_dep(struct tgl_swsb swsb,unsigned regdist)1055 tgl_swsb_dst_dep(struct tgl_swsb swsb, unsigned regdist)
1056 {
1057 swsb.regdist = regdist;
1058 swsb.mode = swsb.mode & TGL_SBID_SET;
1059 swsb.pipe = (regdist ? TGL_PIPE_ALL : TGL_PIPE_NONE);
1060 return swsb;
1061 }
1062
1063 /**
1064 * Return a scheduling annotation that synchronizes against the same SBID and
1065 * RegDist dependencies as \p swsb, but doesn't allocate any SBID token.
1066 */
1067 static inline struct tgl_swsb
tgl_swsb_src_dep(struct tgl_swsb swsb)1068 tgl_swsb_src_dep(struct tgl_swsb swsb)
1069 {
1070 swsb.mode = swsb.mode & (TGL_SBID_SRC | TGL_SBID_DST);
1071 return swsb;
1072 }
1073
1074 /**
1075 * Convert the provided tgl_swsb to the hardware's binary representation of an
1076 * SWSB annotation.
1077 */
1078 static inline uint32_t
tgl_swsb_encode(const struct intel_device_info * devinfo,struct tgl_swsb swsb)1079 tgl_swsb_encode(const struct intel_device_info *devinfo, struct tgl_swsb swsb)
1080 {
1081 if (!swsb.mode) {
1082 const unsigned pipe = devinfo->verx10 < 125 ? 0 :
1083 swsb.pipe == TGL_PIPE_FLOAT ? 0x10 :
1084 swsb.pipe == TGL_PIPE_INT ? 0x18 :
1085 swsb.pipe == TGL_PIPE_LONG ? 0x20 :
1086 swsb.pipe == TGL_PIPE_MATH ? 0x28 :
1087 swsb.pipe == TGL_PIPE_ALL ? 0x8 : 0;
1088 return pipe | swsb.regdist;
1089
1090 } else if (swsb.regdist) {
1091 if (devinfo->ver >= 20) {
1092 if ((swsb.mode & TGL_SBID_SET)) {
1093 assert(swsb.pipe == TGL_PIPE_ALL ||
1094 swsb.pipe == TGL_PIPE_INT || swsb.pipe == TGL_PIPE_FLOAT);
1095 return (swsb.pipe == TGL_PIPE_INT ? 0x300 :
1096 swsb.pipe == TGL_PIPE_FLOAT ? 0x200 : 0x100) |
1097 swsb.regdist << 5 | swsb.sbid;
1098 } else {
1099 assert(!(swsb.mode & ~(TGL_SBID_DST | TGL_SBID_SRC)));
1100 return (swsb.pipe == TGL_PIPE_ALL ? 0x300 :
1101 swsb.mode == TGL_SBID_SRC ? 0x200 : 0x100) |
1102 swsb.regdist << 5 | swsb.sbid;
1103 }
1104 } else {
1105 assert(!(swsb.sbid & ~0xfu));
1106 return 0x80 | swsb.regdist << 4 | swsb.sbid;
1107 }
1108
1109 } else {
1110 if (devinfo->ver >= 20) {
1111 return swsb.sbid | (swsb.mode & TGL_SBID_SET ? 0xc0 :
1112 swsb.mode & TGL_SBID_DST ? 0x80 : 0xa0);
1113 } else {
1114 assert(!(swsb.sbid & ~0xfu));
1115 return swsb.sbid | (swsb.mode & TGL_SBID_SET ? 0x40 :
1116 swsb.mode & TGL_SBID_DST ? 0x20 : 0x30);
1117 }
1118 }
1119 }
1120
1121 /**
1122 * Convert the provided binary representation of an SWSB annotation to a
1123 * tgl_swsb.
1124 */
1125 static inline struct tgl_swsb
tgl_swsb_decode(const struct intel_device_info * devinfo,const bool is_unordered,const uint32_t x)1126 tgl_swsb_decode(const struct intel_device_info *devinfo,
1127 const bool is_unordered, const uint32_t x)
1128 {
1129 if (devinfo->ver >= 20) {
1130 if (x & 0x300) {
1131 if (is_unordered) {
1132 const struct tgl_swsb swsb = {
1133 (x & 0xe0u) >> 5,
1134 ((x & 0x300) == 0x300 ? TGL_PIPE_INT :
1135 (x & 0x300) == 0x200 ? TGL_PIPE_FLOAT :
1136 TGL_PIPE_ALL),
1137 x & 0x1fu,
1138 TGL_SBID_SET
1139 };
1140 return swsb;
1141 } else {
1142 const struct tgl_swsb swsb = {
1143 (x & 0xe0u) >> 5,
1144 ((x & 0x300) == 0x300 ? TGL_PIPE_ALL : TGL_PIPE_NONE),
1145 x & 0x1fu,
1146 ((x & 0x300) == 0x200 ? TGL_SBID_SRC : TGL_SBID_DST)
1147 };
1148 return swsb;
1149 }
1150
1151 } else if ((x & 0xe0) == 0x80) {
1152 return tgl_swsb_sbid(TGL_SBID_DST, x & 0x1f);
1153 } else if ((x & 0xe0) == 0xa0) {
1154 return tgl_swsb_sbid(TGL_SBID_SRC, x & 0x1fu);
1155 } else if ((x & 0xe0) == 0xc0) {
1156 return tgl_swsb_sbid(TGL_SBID_SET, x & 0x1fu);
1157 } else {
1158 const struct tgl_swsb swsb = { x & 0x7u,
1159 ((x & 0x38) == 0x10 ? TGL_PIPE_FLOAT :
1160 (x & 0x38) == 0x18 ? TGL_PIPE_INT :
1161 (x & 0x38) == 0x20 ? TGL_PIPE_LONG :
1162 (x & 0x38) == 0x28 ? TGL_PIPE_MATH :
1163 (x & 0x38) == 0x8 ? TGL_PIPE_ALL :
1164 TGL_PIPE_NONE) };
1165 return swsb;
1166 }
1167
1168 } else {
1169 if (x & 0x80) {
1170 const struct tgl_swsb swsb = { (x & 0x70u) >> 4, TGL_PIPE_NONE,
1171 x & 0xfu,
1172 is_unordered ?
1173 TGL_SBID_SET : TGL_SBID_DST };
1174 return swsb;
1175 } else if ((x & 0x70) == 0x20) {
1176 return tgl_swsb_sbid(TGL_SBID_DST, x & 0xfu);
1177 } else if ((x & 0x70) == 0x30) {
1178 return tgl_swsb_sbid(TGL_SBID_SRC, x & 0xfu);
1179 } else if ((x & 0x70) == 0x40) {
1180 return tgl_swsb_sbid(TGL_SBID_SET, x & 0xfu);
1181 } else {
1182 const struct tgl_swsb swsb = { x & 0x7u,
1183 ((x & 0x78) == 0x10 ? TGL_PIPE_FLOAT :
1184 (x & 0x78) == 0x18 ? TGL_PIPE_INT :
1185 (x & 0x78) == 0x50 ? TGL_PIPE_LONG :
1186 (x & 0x78) == 0x8 ? TGL_PIPE_ALL :
1187 TGL_PIPE_NONE) };
1188 assert(devinfo->verx10 >= 125 || swsb.pipe == TGL_PIPE_NONE);
1189 return swsb;
1190 }
1191 }
1192 }
1193
1194 enum tgl_sync_function {
1195 TGL_SYNC_NOP = 0x0,
1196 TGL_SYNC_ALLRD = 0x2,
1197 TGL_SYNC_ALLWR = 0x3,
1198 TGL_SYNC_FENCE = 0xd,
1199 TGL_SYNC_BAR = 0xe,
1200 TGL_SYNC_HOST = 0xf
1201 };
1202
1203 /**
1204 * Message target: Shared Function ID for where to SEND a message.
1205 *
1206 * These are enumerated in the ISA reference under "send - Send Message".
1207 * In particular, see the following tables:
1208 * - G45 PRM, Volume 4, Table 14-15 "Message Descriptor Definition"
1209 * - Sandybridge PRM, Volume 4 Part 2, Table 8-16 "Extended Message Descriptor"
1210 * - Ivybridge PRM, Volume 1 Part 1, section 3.2.7 "GPE Function IDs"
1211 */
1212 enum brw_message_target {
1213 BRW_SFID_NULL = 0,
1214 BRW_SFID_MATH = 1, /* Only valid on Gfx4-5 */
1215 BRW_SFID_SAMPLER = 2,
1216 BRW_SFID_MESSAGE_GATEWAY = 3,
1217 BRW_SFID_DATAPORT_READ = 4,
1218 BRW_SFID_DATAPORT_WRITE = 5,
1219 BRW_SFID_URB = 6,
1220 BRW_SFID_THREAD_SPAWNER = 7,
1221 BRW_SFID_VME = 8,
1222
1223 GFX6_SFID_DATAPORT_SAMPLER_CACHE = 4,
1224 GFX6_SFID_DATAPORT_RENDER_CACHE = 5,
1225 GFX6_SFID_DATAPORT_CONSTANT_CACHE = 9,
1226
1227 GFX7_SFID_DATAPORT_DATA_CACHE = 10,
1228 GFX7_SFID_PIXEL_INTERPOLATOR = 11,
1229 HSW_SFID_DATAPORT_DATA_CACHE_1 = 12,
1230 HSW_SFID_CRE = 13,
1231
1232 GFX12_SFID_TGM = 13, /* Typed Global Memory */
1233 GFX12_SFID_SLM = 14, /* Shared Local Memory */
1234 GFX12_SFID_UGM = 15, /* Untyped Global Memory */
1235
1236 GEN_RT_SFID_BINDLESS_THREAD_DISPATCH = 7,
1237 GEN_RT_SFID_RAY_TRACE_ACCELERATOR = 8,
1238 };
1239
1240 #define GFX7_MESSAGE_TARGET_DP_DATA_CACHE 10
1241
1242 #define BRW_SAMPLER_RETURN_FORMAT_FLOAT32 0
1243 #define BRW_SAMPLER_RETURN_FORMAT_UINT32 2
1244 #define BRW_SAMPLER_RETURN_FORMAT_SINT32 3
1245
1246 #define GFX8_SAMPLER_RETURN_FORMAT_32BITS 0
1247 #define GFX8_SAMPLER_RETURN_FORMAT_16BITS 1
1248
1249 #define BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE 0
1250 #define BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE 0
1251 #define BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS 0
1252 #define BRW_SAMPLER_MESSAGE_SIMD8_KILLPIX 1
1253 #define BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD 1
1254 #define BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_LOD 1
1255 #define BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_GRADIENTS 2
1256 #define BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_GRADIENTS 2
1257 #define BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_COMPARE 0
1258 #define BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_COMPARE 2
1259 #define BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_BIAS_COMPARE 0
1260 #define BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD_COMPARE 1
1261 #define BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_LOD_COMPARE 1
1262 #define BRW_SAMPLER_MESSAGE_SIMD4X2_RESINFO 2
1263 #define BRW_SAMPLER_MESSAGE_SIMD16_RESINFO 2
1264 #define BRW_SAMPLER_MESSAGE_SIMD4X2_LD 3
1265 #define BRW_SAMPLER_MESSAGE_SIMD8_LD 3
1266 #define BRW_SAMPLER_MESSAGE_SIMD16_LD 3
1267
1268 #define GFX5_SAMPLER_MESSAGE_SAMPLE 0
1269 #define GFX5_SAMPLER_MESSAGE_SAMPLE_BIAS 1
1270 #define GFX5_SAMPLER_MESSAGE_SAMPLE_LOD 2
1271 #define GFX5_SAMPLER_MESSAGE_SAMPLE_COMPARE 3
1272 #define GFX5_SAMPLER_MESSAGE_SAMPLE_DERIVS 4
1273 #define GFX5_SAMPLER_MESSAGE_SAMPLE_BIAS_COMPARE 5
1274 #define GFX5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE 6
1275 #define GFX5_SAMPLER_MESSAGE_SAMPLE_LD 7
1276 #define GFX7_SAMPLER_MESSAGE_SAMPLE_GATHER4 8
1277 #define GFX5_SAMPLER_MESSAGE_LOD 9
1278 #define GFX5_SAMPLER_MESSAGE_SAMPLE_RESINFO 10
1279 #define GFX6_SAMPLER_MESSAGE_SAMPLE_SAMPLEINFO 11
1280 #define XE2_SAMPLER_MESSAGE_SAMPLE_GATHER4_L 13
1281 #define XE2_SAMPLER_MESSAGE_SAMPLE_GATHER4_B 14
1282 #define XE2_SAMPLER_MESSAGE_SAMPLE_GATHER4_I 15
1283 #define GFX7_SAMPLER_MESSAGE_SAMPLE_GATHER4_C 16
1284 #define GFX7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO 17
1285 #define GFX7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_C 18
1286 #define XE2_SAMPLER_MESSAGE_SAMPLE_MLOD 18
1287 #define XE2_SAMPLER_MESSAGE_SAMPLE_COMPARE_MLOD 19
1288 #define HSW_SAMPLER_MESSAGE_SAMPLE_DERIV_COMPARE 20
1289 #define XE2_SAMPLER_MESSAGE_SAMPLE_GATHER4_I_C 21
1290 #define XE2_SAMPLER_MESSAGE_SAMPLE_GATHER4_L_C 23
1291 #define GFX9_SAMPLER_MESSAGE_SAMPLE_LZ 24
1292 #define GFX9_SAMPLER_MESSAGE_SAMPLE_C_LZ 25
1293 #define GFX9_SAMPLER_MESSAGE_SAMPLE_LD_LZ 26
1294 #define GFX9_SAMPLER_MESSAGE_SAMPLE_LD2DMS_W 28
1295 #define GFX7_SAMPLER_MESSAGE_SAMPLE_LD_MCS 29
1296 #define GFX7_SAMPLER_MESSAGE_SAMPLE_LD2DMS 30
1297 #define GFX7_SAMPLER_MESSAGE_SAMPLE_LD2DSS 31
1298 #define XE2_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_L 45
1299 #define XE2_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_B 46
1300 #define XE2_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_L_C 55
1301
1302 /* for GFX5 only */
1303 #define BRW_SAMPLER_SIMD_MODE_SIMD4X2 0
1304 #define BRW_SAMPLER_SIMD_MODE_SIMD8 1
1305 #define BRW_SAMPLER_SIMD_MODE_SIMD16 2
1306 #define BRW_SAMPLER_SIMD_MODE_SIMD32_64 3
1307
1308 #define GFX10_SAMPLER_SIMD_MODE_SIMD8H 5
1309 #define GFX10_SAMPLER_SIMD_MODE_SIMD16H 6
1310
1311 #define XE2_SAMPLER_SIMD_MODE_SIMD16 1
1312 #define XE2_SAMPLER_SIMD_MODE_SIMD32 2
1313 #define XE2_SAMPLER_SIMD_MODE_SIMD16H 5
1314 #define XE2_SAMPLER_SIMD_MODE_SIMD32H 6
1315
1316 /* GFX9 changes SIMD mode 0 to mean SIMD8D, but lets us get the SIMD4x2
1317 * behavior by setting bit 22 of dword 2 in the message header. */
1318 #define GFX9_SAMPLER_SIMD_MODE_SIMD8D 0
1319 #define GFX9_SAMPLER_SIMD_MODE_EXTENSION_SIMD4X2 (1 << 22)
1320
1321 #define BRW_DATAPORT_OWORD_BLOCK_1_OWORDLOW 0
1322 #define BRW_DATAPORT_OWORD_BLOCK_1_OWORDHIGH 1
1323 #define BRW_DATAPORT_OWORD_BLOCK_2_OWORDS 2
1324 #define BRW_DATAPORT_OWORD_BLOCK_4_OWORDS 3
1325 #define BRW_DATAPORT_OWORD_BLOCK_8_OWORDS 4
1326 #define GFX12_DATAPORT_OWORD_BLOCK_16_OWORDS 5
1327 #define BRW_DATAPORT_OWORD_BLOCK_OWORDS(n) \
1328 ((n) == 1 ? BRW_DATAPORT_OWORD_BLOCK_1_OWORDLOW : \
1329 (n) == 2 ? BRW_DATAPORT_OWORD_BLOCK_2_OWORDS : \
1330 (n) == 4 ? BRW_DATAPORT_OWORD_BLOCK_4_OWORDS : \
1331 (n) == 8 ? BRW_DATAPORT_OWORD_BLOCK_8_OWORDS : \
1332 (n) == 16 ? GFX12_DATAPORT_OWORD_BLOCK_16_OWORDS : \
1333 (abort(), ~0))
1334 #define BRW_DATAPORT_OWORD_BLOCK_DWORDS(n) \
1335 ((n) == 4 ? BRW_DATAPORT_OWORD_BLOCK_1_OWORDLOW : \
1336 (n) == 8 ? BRW_DATAPORT_OWORD_BLOCK_2_OWORDS : \
1337 (n) == 16 ? BRW_DATAPORT_OWORD_BLOCK_4_OWORDS : \
1338 (n) == 32 ? BRW_DATAPORT_OWORD_BLOCK_8_OWORDS : \
1339 (abort(), ~0))
1340
1341 #define BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD 0
1342 #define BRW_DATAPORT_OWORD_DUAL_BLOCK_4OWORDS 2
1343
1344 #define BRW_DATAPORT_DWORD_SCATTERED_BLOCK_8DWORDS 2
1345 #define BRW_DATAPORT_DWORD_SCATTERED_BLOCK_16DWORDS 3
1346
1347 /* This one stays the same across generations. */
1348 #define BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ 0
1349 /* GFX4 */
1350 #define BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ 1
1351 #define BRW_DATAPORT_READ_MESSAGE_MEDIA_BLOCK_READ 2
1352 #define BRW_DATAPORT_READ_MESSAGE_DWORD_SCATTERED_READ 3
1353 /* G45, GFX5 */
1354 #define G45_DATAPORT_READ_MESSAGE_RENDER_UNORM_READ 1
1355 #define G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ 2
1356 #define G45_DATAPORT_READ_MESSAGE_AVC_LOOP_FILTER_READ 3
1357 #define G45_DATAPORT_READ_MESSAGE_MEDIA_BLOCK_READ 4
1358 #define G45_DATAPORT_READ_MESSAGE_DWORD_SCATTERED_READ 6
1359 /* GFX6 */
1360 #define GFX6_DATAPORT_READ_MESSAGE_RENDER_UNORM_READ 1
1361 #define GFX6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ 2
1362 #define GFX6_DATAPORT_READ_MESSAGE_MEDIA_BLOCK_READ 4
1363 #define GFX6_DATAPORT_READ_MESSAGE_OWORD_UNALIGN_BLOCK_READ 5
1364 #define GFX6_DATAPORT_READ_MESSAGE_DWORD_SCATTERED_READ 6
1365
1366 #define BRW_DATAPORT_READ_TARGET_DATA_CACHE 0
1367 #define BRW_DATAPORT_READ_TARGET_RENDER_CACHE 1
1368 #define BRW_DATAPORT_READ_TARGET_SAMPLER_CACHE 2
1369
1370 #define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE 0
1371 #define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE_REPLICATED 1
1372 #define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN01 2
1373 #define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN23 3
1374 #define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_SINGLE_SOURCE_SUBSPAN01 4
1375
1376 #define BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE 0
1377 #define BRW_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE 1
1378 #define BRW_DATAPORT_WRITE_MESSAGE_MEDIA_BLOCK_WRITE 2
1379 #define BRW_DATAPORT_WRITE_MESSAGE_DWORD_SCATTERED_WRITE 3
1380 #define BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE 4
1381 #define BRW_DATAPORT_WRITE_MESSAGE_STREAMED_VERTEX_BUFFER_WRITE 5
1382 #define BRW_DATAPORT_WRITE_MESSAGE_FLUSH_RENDER_CACHE 7
1383
1384 /* GFX6 */
1385 #define GFX6_DATAPORT_WRITE_MESSAGE_DWORD_ATOMIC_WRITE 7
1386 #define GFX6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE 8
1387 #define GFX6_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE 9
1388 #define GFX6_DATAPORT_WRITE_MESSAGE_MEDIA_BLOCK_WRITE 10
1389 #define GFX6_DATAPORT_WRITE_MESSAGE_DWORD_SCATTERED_WRITE 11
1390 #define GFX6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE 12
1391 #define GFX6_DATAPORT_WRITE_MESSAGE_STREAMED_VB_WRITE 13
1392 #define GFX6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_UNORM_WRITE 14
1393
1394 /* GFX7 */
1395 #define GFX7_DATAPORT_RC_MEDIA_BLOCK_READ 4
1396 #define GFX7_DATAPORT_RC_TYPED_SURFACE_READ 5
1397 #define GFX7_DATAPORT_RC_TYPED_ATOMIC_OP 6
1398 #define GFX7_DATAPORT_RC_MEMORY_FENCE 7
1399 #define GFX7_DATAPORT_RC_MEDIA_BLOCK_WRITE 10
1400 #define GFX7_DATAPORT_RC_RENDER_TARGET_WRITE 12
1401 #define GFX7_DATAPORT_RC_TYPED_SURFACE_WRITE 13
1402 #define GFX7_DATAPORT_DC_OWORD_BLOCK_READ 0
1403 #define GFX7_DATAPORT_DC_UNALIGNED_OWORD_BLOCK_READ 1
1404 #define GFX7_DATAPORT_DC_OWORD_DUAL_BLOCK_READ 2
1405 #define GFX7_DATAPORT_DC_DWORD_SCATTERED_READ 3
1406 #define GFX7_DATAPORT_DC_BYTE_SCATTERED_READ 4
1407 #define GFX7_DATAPORT_DC_UNTYPED_SURFACE_READ 5
1408 #define GFX7_DATAPORT_DC_UNTYPED_ATOMIC_OP 6
1409 #define GFX7_DATAPORT_DC_MEMORY_FENCE 7
1410 #define GFX7_DATAPORT_DC_OWORD_BLOCK_WRITE 8
1411 #define GFX7_DATAPORT_DC_OWORD_DUAL_BLOCK_WRITE 10
1412 #define GFX7_DATAPORT_DC_DWORD_SCATTERED_WRITE 11
1413 #define GFX7_DATAPORT_DC_BYTE_SCATTERED_WRITE 12
1414 #define GFX7_DATAPORT_DC_UNTYPED_SURFACE_WRITE 13
1415
1416 #define GFX7_DATAPORT_SCRATCH_READ ((1 << 18) | \
1417 (0 << 17))
1418 #define GFX7_DATAPORT_SCRATCH_WRITE ((1 << 18) | \
1419 (1 << 17))
1420 #define GFX7_DATAPORT_SCRATCH_NUM_REGS_SHIFT 12
1421
1422 #define GFX7_PIXEL_INTERPOLATOR_LOC_SHARED_OFFSET 0
1423 #define GFX7_PIXEL_INTERPOLATOR_LOC_SAMPLE 1
1424 #define GFX7_PIXEL_INTERPOLATOR_LOC_CENTROID 2
1425 #define GFX7_PIXEL_INTERPOLATOR_LOC_PER_SLOT_OFFSET 3
1426
1427 /* HSW */
1428 #define HSW_DATAPORT_DC_PORT0_OWORD_BLOCK_READ 0
1429 #define HSW_DATAPORT_DC_PORT0_UNALIGNED_OWORD_BLOCK_READ 1
1430 #define HSW_DATAPORT_DC_PORT0_OWORD_DUAL_BLOCK_READ 2
1431 #define HSW_DATAPORT_DC_PORT0_DWORD_SCATTERED_READ 3
1432 #define HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_READ 4
1433 #define HSW_DATAPORT_DC_PORT0_MEMORY_FENCE 7
1434 #define HSW_DATAPORT_DC_PORT0_OWORD_BLOCK_WRITE 8
1435 #define HSW_DATAPORT_DC_PORT0_OWORD_DUAL_BLOCK_WRITE 10
1436 #define HSW_DATAPORT_DC_PORT0_DWORD_SCATTERED_WRITE 11
1437 #define HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_WRITE 12
1438
1439 #define HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_READ 1
1440 #define HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP 2
1441 #define HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP_SIMD4X2 3
1442 #define HSW_DATAPORT_DC_PORT1_MEDIA_BLOCK_READ 4
1443 #define HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_READ 5
1444 #define HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP 6
1445 #define HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP_SIMD4X2 7
1446 #define HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_WRITE 9
1447 #define HSW_DATAPORT_DC_PORT1_MEDIA_BLOCK_WRITE 10
1448 #define HSW_DATAPORT_DC_PORT1_ATOMIC_COUNTER_OP 11
1449 #define HSW_DATAPORT_DC_PORT1_ATOMIC_COUNTER_OP_SIMD4X2 12
1450 #define HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_WRITE 13
1451 #define GFX9_DATAPORT_DC_PORT1_A64_SCATTERED_READ 0x10
1452 #define GFX8_DATAPORT_DC_PORT1_A64_UNTYPED_SURFACE_READ 0x11
1453 #define GFX8_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_OP 0x12
1454 #define GFX12_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_HALF_INT_OP 0x13
1455 #define GFX9_DATAPORT_DC_PORT1_A64_OWORD_BLOCK_READ 0x14
1456 #define GFX9_DATAPORT_DC_PORT1_A64_OWORD_BLOCK_WRITE 0x15
1457 #define GFX8_DATAPORT_DC_PORT1_A64_UNTYPED_SURFACE_WRITE 0x19
1458 #define GFX8_DATAPORT_DC_PORT1_A64_SCATTERED_WRITE 0x1a
1459 #define GFX9_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_FLOAT_OP 0x1b
1460 #define GFX9_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_FLOAT_OP 0x1d
1461 #define GFX12_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_HALF_FLOAT_OP 0x1e
1462
1463 /* GFX9 */
1464 #define GFX9_DATAPORT_RC_RENDER_TARGET_WRITE 12
1465 #define GFX9_DATAPORT_RC_RENDER_TARGET_READ 13
1466
1467 /* A64 scattered message subtype */
1468 #define GFX8_A64_SCATTERED_SUBTYPE_BYTE 0
1469 #define GFX8_A64_SCATTERED_SUBTYPE_DWORD 1
1470 #define GFX8_A64_SCATTERED_SUBTYPE_QWORD 2
1471 #define GFX8_A64_SCATTERED_SUBTYPE_HWORD 3
1472
1473 /* Dataport special binding table indices: */
1474 #define BRW_BTI_STATELESS 255
1475 #define GFX7_BTI_SLM 254
1476
1477 #define HSW_BTI_STATELESS_LOCALLY_COHERENT 255
1478 #define HSW_BTI_STATELESS_NON_COHERENT 253
1479 #define HSW_BTI_STATELESS_GLOBALLY_COHERENT 252
1480 #define HSW_BTI_STATELESS_LLC_COHERENT 251
1481 #define HSW_BTI_STATELESS_L3_UNCACHED 250
1482
1483 /* The hardware docs are a bit contradictory here. On Haswell, where they
1484 * first added cache ability control, there were 5 different cache modes (see
1485 * HSW_BTI_STATELESS_* above). On Broadwell, they reduced to two:
1486 *
1487 * - IA-Coherent (BTI=255): Coherent within Gen and coherent within the
1488 * entire IA cache memory hierarchy.
1489 *
1490 * - Non-Coherent (BTI=253): Coherent within Gen, same cache type.
1491 *
1492 * Information about stateless cache coherency can be found in the "A32
1493 * Stateless" section of the "3D Media GPGPU" volume of the PRM for each
1494 * hardware generation.
1495 *
1496 * Unfortunately, the docs for MDC_STATELESS appear to have been copied and
1497 * pasted from Haswell and give the Haswell definitions for the BTI values of
1498 * 255 and 253 including a warning about accessing 253 surfaces from multiple
1499 * threads. This seems to be a copy+paste error and the definitions from the
1500 * "A32 Stateless" section should be trusted instead.
1501 *
1502 * Note that because the DRM sets bit 4 of HDC_CHICKEN0 on BDW, CHV and at
1503 * least some pre-production steppings of SKL due to WaForceEnableNonCoherent,
1504 * HDC memory access may have been overridden by the kernel to be non-coherent
1505 * (matching the behavior of the same BTI on pre-Gfx8 hardware) and BTI 255
1506 * may actually be an alias for BTI 253.
1507 */
1508 #define GFX8_BTI_STATELESS_IA_COHERENT 255
1509 #define GFX8_BTI_STATELESS_NON_COHERENT 253
1510 #define GFX9_BTI_BINDLESS 252
1511
1512 /* This ID doesn't map anything HW related value. It exists to inform the
1513 * lowering code to not use the bindless heap.
1514 */
1515 #define GFX125_NON_BINDLESS (1u << 16)
1516
1517 /* Dataport atomic operations for Untyped Atomic Integer Operation message
1518 * (and others).
1519 */
1520 #define BRW_AOP_AND 1
1521 #define BRW_AOP_OR 2
1522 #define BRW_AOP_XOR 3
1523 #define BRW_AOP_MOV 4
1524 #define BRW_AOP_INC 5
1525 #define BRW_AOP_DEC 6
1526 #define BRW_AOP_ADD 7
1527 #define BRW_AOP_SUB 8
1528 #define BRW_AOP_REVSUB 9
1529 #define BRW_AOP_IMAX 10
1530 #define BRW_AOP_IMIN 11
1531 #define BRW_AOP_UMAX 12
1532 #define BRW_AOP_UMIN 13
1533 #define BRW_AOP_CMPWR 14
1534 #define BRW_AOP_PREDEC 15
1535
1536 /* Dataport atomic operations for Untyped Atomic Float Operation message. */
1537 #define BRW_AOP_FMAX 1
1538 #define BRW_AOP_FMIN 2
1539 #define BRW_AOP_FCMPWR 3
1540 #define BRW_AOP_FADD 4
1541
1542 #define BRW_MATH_FUNCTION_INV 1
1543 #define BRW_MATH_FUNCTION_LOG 2
1544 #define BRW_MATH_FUNCTION_EXP 3
1545 #define BRW_MATH_FUNCTION_SQRT 4
1546 #define BRW_MATH_FUNCTION_RSQ 5
1547 #define BRW_MATH_FUNCTION_SIN 6
1548 #define BRW_MATH_FUNCTION_COS 7
1549 #define BRW_MATH_FUNCTION_SINCOS 8 /* gfx4, gfx5 */
1550 #define BRW_MATH_FUNCTION_FDIV 9 /* gfx6+ */
1551 #define BRW_MATH_FUNCTION_POW 10
1552 #define BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER 11
1553 #define BRW_MATH_FUNCTION_INT_DIV_QUOTIENT 12
1554 #define BRW_MATH_FUNCTION_INT_DIV_REMAINDER 13
1555 #define GFX8_MATH_FUNCTION_INVM 14
1556 #define GFX8_MATH_FUNCTION_RSQRTM 15
1557
1558 #define BRW_MATH_INTEGER_UNSIGNED 0
1559 #define BRW_MATH_INTEGER_SIGNED 1
1560
1561 #define BRW_MATH_PRECISION_FULL 0
1562 #define BRW_MATH_PRECISION_PARTIAL 1
1563
1564 #define BRW_MATH_SATURATE_NONE 0
1565 #define BRW_MATH_SATURATE_SATURATE 1
1566
1567 #define BRW_MATH_DATA_VECTOR 0
1568 #define BRW_MATH_DATA_SCALAR 1
1569
1570 #define BRW_URB_OPCODE_WRITE_HWORD 0
1571 #define BRW_URB_OPCODE_WRITE_OWORD 1
1572 #define BRW_URB_OPCODE_READ_HWORD 2
1573 #define BRW_URB_OPCODE_READ_OWORD 3
1574 #define GFX7_URB_OPCODE_ATOMIC_MOV 4
1575 #define GFX7_URB_OPCODE_ATOMIC_INC 5
1576 #define GFX8_URB_OPCODE_ATOMIC_ADD 6
1577 #define GFX8_URB_OPCODE_SIMD8_WRITE 7
1578 #define GFX8_URB_OPCODE_SIMD8_READ 8
1579 #define GFX125_URB_OPCODE_FENCE 9
1580
1581 #define BRW_URB_SWIZZLE_NONE 0
1582 #define BRW_URB_SWIZZLE_INTERLEAVE 1
1583 #define BRW_URB_SWIZZLE_TRANSPOSE 2
1584
1585 #define BRW_SCRATCH_SPACE_SIZE_1K 0
1586 #define BRW_SCRATCH_SPACE_SIZE_2K 1
1587 #define BRW_SCRATCH_SPACE_SIZE_4K 2
1588 #define BRW_SCRATCH_SPACE_SIZE_8K 3
1589 #define BRW_SCRATCH_SPACE_SIZE_16K 4
1590 #define BRW_SCRATCH_SPACE_SIZE_32K 5
1591 #define BRW_SCRATCH_SPACE_SIZE_64K 6
1592 #define BRW_SCRATCH_SPACE_SIZE_128K 7
1593 #define BRW_SCRATCH_SPACE_SIZE_256K 8
1594 #define BRW_SCRATCH_SPACE_SIZE_512K 9
1595 #define BRW_SCRATCH_SPACE_SIZE_1M 10
1596 #define BRW_SCRATCH_SPACE_SIZE_2M 11
1597
1598 #define BRW_MESSAGE_GATEWAY_SFID_OPEN_GATEWAY 0
1599 #define BRW_MESSAGE_GATEWAY_SFID_CLOSE_GATEWAY 1
1600 #define BRW_MESSAGE_GATEWAY_SFID_FORWARD_MSG 2
1601 #define BRW_MESSAGE_GATEWAY_SFID_GET_TIMESTAMP 3
1602 #define BRW_MESSAGE_GATEWAY_SFID_BARRIER_MSG 4
1603 #define BRW_MESSAGE_GATEWAY_SFID_UPDATE_GATEWAY_STATE 5
1604 #define BRW_MESSAGE_GATEWAY_SFID_MMIO_READ_WRITE 6
1605
1606
1607 /* Gfx7 "GS URB Entry Allocation Size" is a U9-1 field, so the maximum gs_size
1608 * is 2^9, or 512. It's counted in multiples of 64 bytes.
1609 *
1610 * Identical for VS, DS, and HS.
1611 */
1612 #define GFX7_MAX_GS_URB_ENTRY_SIZE_BYTES (512*64)
1613 #define GFX7_MAX_DS_URB_ENTRY_SIZE_BYTES (512*64)
1614 #define GFX7_MAX_HS_URB_ENTRY_SIZE_BYTES (512*64)
1615 #define GFX7_MAX_VS_URB_ENTRY_SIZE_BYTES (512*64)
1616
1617 #define BRW_GS_EDGE_INDICATOR_0 (1 << 8)
1618 #define BRW_GS_EDGE_INDICATOR_1 (1 << 9)
1619
1620 /* Gfx6 "GS URB Entry Allocation Size" is defined as a number of 1024-bit
1621 * (128 bytes) URB rows and the maximum allowed value is 5 rows.
1622 */
1623 #define GFX6_MAX_GS_URB_ENTRY_SIZE_BYTES (5*128)
1624
1625 /* GS Thread Payload
1626 */
1627
1628 /* 3DSTATE_GS "Output Vertex Size" has an effective maximum of 62. It's
1629 * counted in multiples of 16 bytes.
1630 */
1631 #define GFX7_MAX_GS_OUTPUT_VERTEX_SIZE_BYTES (62*16)
1632
1633
1634 /* R0 */
1635 # define GFX7_GS_PAYLOAD_INSTANCE_ID_SHIFT 27
1636
1637 /* CR0.0[5:4] Floating-Point Rounding Modes
1638 * Skylake PRM, Volume 7 Part 1, "Control Register", page 756
1639 */
1640
1641 #define BRW_CR0_RND_MODE_MASK 0x30
1642 #define BRW_CR0_RND_MODE_SHIFT 4
1643
1644 enum ENUM_PACKED brw_rnd_mode {
1645 BRW_RND_MODE_RTNE = 0, /* Round to Nearest or Even */
1646 BRW_RND_MODE_RU = 1, /* Round Up, toward +inf */
1647 BRW_RND_MODE_RD = 2, /* Round Down, toward -inf */
1648 BRW_RND_MODE_RTZ = 3, /* Round Toward Zero */
1649 BRW_RND_MODE_UNSPECIFIED, /* Unspecified rounding mode */
1650 };
1651
1652 #define BRW_CR0_FP64_DENORM_PRESERVE (1 << 6)
1653 #define BRW_CR0_FP32_DENORM_PRESERVE (1 << 7)
1654 #define BRW_CR0_FP16_DENORM_PRESERVE (1 << 10)
1655
1656 #define BRW_CR0_FP_MODE_MASK (BRW_CR0_FP64_DENORM_PRESERVE | \
1657 BRW_CR0_FP32_DENORM_PRESERVE | \
1658 BRW_CR0_FP16_DENORM_PRESERVE | \
1659 BRW_CR0_RND_MODE_MASK)
1660
1661 /* MDC_DS - Data Size Message Descriptor Control Field
1662 * Skylake PRM, Volume 2d, page 129
1663 *
1664 * Specifies the number of Bytes to be read or written per Dword used at
1665 * byte_scattered read/write and byte_scaled read/write messages.
1666 */
1667 #define GFX7_BYTE_SCATTERED_DATA_ELEMENT_BYTE 0
1668 #define GFX7_BYTE_SCATTERED_DATA_ELEMENT_WORD 1
1669 #define GFX7_BYTE_SCATTERED_DATA_ELEMENT_DWORD 2
1670
1671 #define GEN_RT_BTD_MESSAGE_SPAWN 1
1672
1673 #define GEN_RT_TRACE_RAY_INITAL 0
1674 #define GEN_RT_TRACE_RAY_INSTANCE 1
1675 #define GEN_RT_TRACE_RAY_COMMIT 2
1676 #define GEN_RT_TRACE_RAY_CONTINUE 3
1677
1678 #define GEN_RT_BTD_SHADER_TYPE_ANY_HIT 0
1679 #define GEN_RT_BTD_SHADER_TYPE_CLOSEST_HIT 1
1680 #define GEN_RT_BTD_SHADER_TYPE_MISS 2
1681 #define GEN_RT_BTD_SHADER_TYPE_INTERSECTION 3
1682
1683 /* Starting with Xe-HPG, the old dataport was massively reworked dataport.
1684 * The new thing, called Load/Store Cache or LSC, has a significantly improved
1685 * interface. Instead of bespoke messages for every case, there's basically
1686 * one or two messages with different bits to control things like address
1687 * size, how much data is read/written, etc. It's way nicer but also means we
1688 * get to rewrite all our dataport encoding/decoding code. This patch kicks
1689 * off the party with all of the new enums.
1690 */
1691 enum lsc_opcode {
1692 LSC_OP_LOAD = 0,
1693 LSC_OP_LOAD_CMASK = 2,
1694 LSC_OP_STORE = 4,
1695 LSC_OP_STORE_CMASK = 6,
1696 LSC_OP_ATOMIC_INC = 8,
1697 LSC_OP_ATOMIC_DEC = 9,
1698 LSC_OP_ATOMIC_LOAD = 10,
1699 LSC_OP_ATOMIC_STORE = 11,
1700 LSC_OP_ATOMIC_ADD = 12,
1701 LSC_OP_ATOMIC_SUB = 13,
1702 LSC_OP_ATOMIC_MIN = 14,
1703 LSC_OP_ATOMIC_MAX = 15,
1704 LSC_OP_ATOMIC_UMIN = 16,
1705 LSC_OP_ATOMIC_UMAX = 17,
1706 LSC_OP_ATOMIC_CMPXCHG = 18,
1707 LSC_OP_ATOMIC_FADD = 19,
1708 LSC_OP_ATOMIC_FSUB = 20,
1709 LSC_OP_ATOMIC_FMIN = 21,
1710 LSC_OP_ATOMIC_FMAX = 22,
1711 LSC_OP_ATOMIC_FCMPXCHG = 23,
1712 LSC_OP_ATOMIC_AND = 24,
1713 LSC_OP_ATOMIC_OR = 25,
1714 LSC_OP_ATOMIC_XOR = 26,
1715 LSC_OP_FENCE = 31
1716 };
1717
1718 /*
1719 * Specifies the size of the dataport address payload in registers.
1720 */
1721 enum ENUM_PACKED lsc_addr_reg_size {
1722 LSC_ADDR_REG_SIZE_1 = 1,
1723 LSC_ADDR_REG_SIZE_2 = 2,
1724 LSC_ADDR_REG_SIZE_3 = 3,
1725 LSC_ADDR_REG_SIZE_4 = 4,
1726 LSC_ADDR_REG_SIZE_6 = 6,
1727 LSC_ADDR_REG_SIZE_8 = 8,
1728 };
1729
1730 /*
1731 * Specifies the size of the address payload item in a dataport message.
1732 */
1733 enum ENUM_PACKED lsc_addr_size {
1734 LSC_ADDR_SIZE_A16 = 1, /* 16-bit address offset */
1735 LSC_ADDR_SIZE_A32 = 2, /* 32-bit address offset */
1736 LSC_ADDR_SIZE_A64 = 3, /* 64-bit address offset */
1737 };
1738
1739 /*
1740 * Specifies the type of the address payload item in a dataport message. The
1741 * address type specifies how the dataport message decodes the Extended
1742 * Descriptor for the surface attributes and address calculation.
1743 */
1744 enum ENUM_PACKED lsc_addr_surface_type {
1745 LSC_ADDR_SURFTYPE_FLAT = 0, /* Flat */
1746 LSC_ADDR_SURFTYPE_BSS = 1, /* Bindless surface state */
1747 LSC_ADDR_SURFTYPE_SS = 2, /* Surface state */
1748 LSC_ADDR_SURFTYPE_BTI = 3, /* Binding table index */
1749 };
1750
1751 /*
1752 * Specifies the dataport message override to the default L1 and L3 memory
1753 * cache policies. Dataport L1 cache policies are uncached (UC), cached (C),
1754 * cache streaming (S) and invalidate-after-read (IAR). Dataport L3 cache
1755 * policies are uncached (UC) and cached (C).
1756 */
1757 enum lsc_cache_load {
1758 /* No override. Use the non-pipelined state or surface state cache settings
1759 * for L1 and L3.
1760 */
1761 LSC_CACHE_LOAD_L1STATE_L3MOCS = 0,
1762 /* Override to L1 uncached and L3 uncached */
1763 LSC_CACHE_LOAD_L1UC_L3UC = 1,
1764 /* Override to L1 uncached and L3 cached */
1765 LSC_CACHE_LOAD_L1UC_L3C = 2,
1766 /* Override to L1 cached and L3 uncached */
1767 LSC_CACHE_LOAD_L1C_L3UC = 3,
1768 /* Override to cache at both L1 and L3 */
1769 LSC_CACHE_LOAD_L1C_L3C = 4,
1770 /* Override to L1 streaming load and L3 uncached */
1771 LSC_CACHE_LOAD_L1S_L3UC = 5,
1772 /* Override to L1 streaming load and L3 cached */
1773 LSC_CACHE_LOAD_L1S_L3C = 6,
1774 /* For load messages, override to L1 invalidate-after-read, and L3 cached. */
1775 LSC_CACHE_LOAD_L1IAR_L3C = 7,
1776 };
1777
1778 /*
1779 * Specifies the dataport message override to the default L1 and L3 memory
1780 * cache policies. Dataport L1 cache policies are uncached (UC), cached (C),
1781 * streaming (S) and invalidate-after-read (IAR). Dataport L3 cache policies
1782 * are uncached (UC), cached (C), cached-as-a-constand (CC) and
1783 * invalidate-after-read (IAR).
1784 */
1785 enum PACKED xe2_lsc_cache_load {
1786 /* No override. Use the non-pipelined or surface state cache settings for L1
1787 * and L3.
1788 */
1789 XE2_LSC_CACHE_LOAD_L1STATE_L3MOCS = 0,
1790 /* Override to L1 uncached and L3 uncached */
1791 XE2_LSC_CACHE_LOAD_L1UC_L3UC = 2,
1792 /* Override to L1 uncached and L3 cached */
1793 XE2_LSC_CACHE_LOAD_L1UC_L3C = 4,
1794 /* Override to L1 uncached and L3 cached as a constant */
1795 XE2_LSC_CACHE_LOAD_L1UC_L3CC = 5,
1796 /* Override to L1 cached and L3 uncached */
1797 XE2_LSC_CACHE_LOAD_L1C_L3UC = 6,
1798 /* Override to L1 cached and L3 cached */
1799 XE2_LSC_CACHE_LOAD_L1C_L3C = 8,
1800 /* Override to L1 cached and L3 cached as a constant */
1801 XE2_LSC_CACHE_LOAD_L1C_L3CC = 9,
1802 /* Override to L1 cached as streaming load and L3 uncached */
1803 XE2_LSC_CACHE_LOAD_L1S_L3UC = 10,
1804 /* Override to L1 cached as streaming load and L3 cached */
1805 XE2_LSC_CACHE_LOAD_L1S_L3C = 12,
1806 /* Override to L1 and L3 invalidate after read */
1807 XE2_LSC_CACHE_LOAD_L1IAR_L3IAR = 14,
1808
1809 };
1810
1811 /*
1812 * Specifies the dataport message override to the default L1 and L3 memory
1813 * cache policies. Dataport L1 cache policies are uncached (UC), write-through
1814 * (WT), write-back (WB) and streaming (S). Dataport L3 cache policies are
1815 * uncached (UC) and cached (WB).
1816 */
1817 enum ENUM_PACKED lsc_cache_store {
1818 /* No override. Use the non-pipelined or surface state cache settings for L1
1819 * and L3.
1820 */
1821 LSC_CACHE_STORE_L1STATE_L3MOCS = 0,
1822 /* Override to L1 uncached and L3 uncached */
1823 LSC_CACHE_STORE_L1UC_L3UC = 1,
1824 /* Override to L1 uncached and L3 cached */
1825 LSC_CACHE_STORE_L1UC_L3WB = 2,
1826 /* Override to L1 write-through and L3 uncached */
1827 LSC_CACHE_STORE_L1WT_L3UC = 3,
1828 /* Override to L1 write-through and L3 cached */
1829 LSC_CACHE_STORE_L1WT_L3WB = 4,
1830 /* Override to L1 streaming and L3 uncached */
1831 LSC_CACHE_STORE_L1S_L3UC = 5,
1832 /* Override to L1 streaming and L3 cached */
1833 LSC_CACHE_STORE_L1S_L3WB = 6,
1834 /* Override to L1 write-back, and L3 cached */
1835 LSC_CACHE_STORE_L1WB_L3WB = 7,
1836
1837 };
1838
1839 /*
1840 * Specifies the dataport message override to the default L1 and L3 memory
1841 * cache policies. Dataport L1 cache policies are uncached (UC), write-through
1842 * (WT), write-back (WB) and streaming (S). Dataport L3 cache policies are
1843 * uncached (UC) and cached (WB).
1844 */
1845 enum PACKED xe2_lsc_cache_store {
1846 /* No override. Use the non-pipelined or surface state cache settings for L1
1847 * and L3.
1848 */
1849 XE2_LSC_CACHE_STORE_L1STATE_L3MOCS = 0,
1850 /* Override to L1 uncached and L3 uncached */
1851 XE2_LSC_CACHE_STORE_L1UC_L3UC = 2,
1852 /* Override to L1 uncached and L3 cached */
1853 XE2_LSC_CACHE_STORE_L1UC_L3WB = 4,
1854 /* Override to L1 write-through and L3 uncached */
1855 XE2_LSC_CACHE_STORE_L1WT_L3UC = 6,
1856 /* Override to L1 write-through and L3 cached */
1857 XE2_LSC_CACHE_STORE_L1WT_L3WB = 8,
1858 /* Override to L1 streaming and L3 uncached */
1859 XE2_LSC_CACHE_STORE_L1S_L3UC = 10,
1860 /* Override to L1 streaming and L3 cached */
1861 XE2_LSC_CACHE_STORE_L1S_L3WB = 12,
1862 /* Override to L1 write-back and L3 cached */
1863 XE2_LSC_CACHE_STORE_L1WB_L3WB = 14,
1864
1865 };
1866
1867 #define LSC_CACHE(devinfo, l_or_s, cc) \
1868 ((devinfo)->ver < 20 ? (unsigned)LSC_CACHE_ ## l_or_s ## _ ## cc : \
1869 (unsigned)XE2_LSC_CACHE_ ## l_or_s ## _ ## cc)
1870
1871 /*
1872 * Specifies which components of the data payload 4-element vector (X,Y,Z,W) is
1873 * packed into the register payload.
1874 */
1875 enum ENUM_PACKED lsc_cmask {
1876 LSC_CMASK_X = 0x1,
1877 LSC_CMASK_Y = 0x2,
1878 LSC_CMASK_XY = 0x3,
1879 LSC_CMASK_Z = 0x4,
1880 LSC_CMASK_XZ = 0x5,
1881 LSC_CMASK_YZ = 0x6,
1882 LSC_CMASK_XYZ = 0x7,
1883 LSC_CMASK_W = 0x8,
1884 LSC_CMASK_XW = 0x9,
1885 LSC_CMASK_YW = 0xa,
1886 LSC_CMASK_XYW = 0xb,
1887 LSC_CMASK_ZW = 0xc,
1888 LSC_CMASK_XZW = 0xd,
1889 LSC_CMASK_YZW = 0xe,
1890 LSC_CMASK_XYZW = 0xf,
1891 };
1892
1893 /*
1894 * Specifies the size of the data payload item in a dataport message.
1895 */
1896 enum ENUM_PACKED lsc_data_size {
1897 /* 8-bit scalar data value in memory, packed into a 8-bit data value in
1898 * register.
1899 */
1900 LSC_DATA_SIZE_D8 = 0,
1901 /* 16-bit scalar data value in memory, packed into a 16-bit data value in
1902 * register.
1903 */
1904 LSC_DATA_SIZE_D16 = 1,
1905 /* 32-bit scalar data value in memory, packed into 32-bit data value in
1906 * register.
1907 */
1908 LSC_DATA_SIZE_D32 = 2,
1909 /* 64-bit scalar data value in memory, packed into 64-bit data value in
1910 * register.
1911 */
1912 LSC_DATA_SIZE_D64 = 3,
1913 /* 8-bit scalar data value in memory, packed into 32-bit unsigned data value
1914 * in register.
1915 */
1916 LSC_DATA_SIZE_D8U32 = 4,
1917 /* 16-bit scalar data value in memory, packed into 32-bit unsigned data
1918 * value in register.
1919 */
1920 LSC_DATA_SIZE_D16U32 = 5,
1921 /* 16-bit scalar BigFloat data value in memory, packed into 32-bit float
1922 * value in register.
1923 */
1924 LSC_DATA_SIZE_D16BF32 = 6,
1925 };
1926
1927 /*
1928 * Enum specifies the scope of the fence.
1929 */
1930 enum ENUM_PACKED lsc_fence_scope {
1931 /* Wait until all previous memory transactions from this thread are observed
1932 * within the local thread-group.
1933 */
1934 LSC_FENCE_THREADGROUP = 0,
1935 /* Wait until all previous memory transactions from this thread are observed
1936 * within the local sub-slice.
1937 */
1938 LSC_FENCE_LOCAL = 1,
1939 /* Wait until all previous memory transactions from this thread are observed
1940 * in the local tile.
1941 */
1942 LSC_FENCE_TILE = 2,
1943 /* Wait until all previous memory transactions from this thread are observed
1944 * in the local GPU.
1945 */
1946 LSC_FENCE_GPU = 3,
1947 /* Wait until all previous memory transactions from this thread are observed
1948 * across all GPUs in the system.
1949 */
1950 LSC_FENCE_ALL_GPU = 4,
1951 /* Wait until all previous memory transactions from this thread are observed
1952 * at the "system" level.
1953 */
1954 LSC_FENCE_SYSTEM_RELEASE = 5,
1955 /* For GPUs that do not follow PCIe Write ordering for downstream writes
1956 * targeting device memory, a fence message with scope=System_Acquire will
1957 * commit to device memory all downstream and peer writes that have reached
1958 * the device.
1959 */
1960 LSC_FENCE_SYSTEM_ACQUIRE = 6,
1961 };
1962
1963 /*
1964 * Specifies the type of cache flush operation to perform after a fence is
1965 * complete.
1966 */
1967 enum ENUM_PACKED lsc_flush_type {
1968 LSC_FLUSH_TYPE_NONE = 0,
1969 /*
1970 * For a R/W cache, evict dirty lines (M to I state) and invalidate clean
1971 * lines. For a RO cache, invalidate clean lines.
1972 */
1973 LSC_FLUSH_TYPE_EVICT = 1,
1974 /*
1975 * For both R/W and RO cache, invalidate clean lines in the cache.
1976 */
1977 LSC_FLUSH_TYPE_INVALIDATE = 2,
1978 /*
1979 * For a R/W cache, invalidate dirty lines (M to I state), without
1980 * write-back to next level. This opcode does nothing for a RO cache.
1981 */
1982 LSC_FLUSH_TYPE_DISCARD = 3,
1983 /*
1984 * For a R/W cache, write-back dirty lines to the next level, but kept in
1985 * the cache as "clean" (M to V state). This opcode does nothing for a RO
1986 * cache.
1987 */
1988 LSC_FLUSH_TYPE_CLEAN = 4,
1989 /*
1990 * Flush "RW" section of the L3 cache, but leave L1 and L2 caches untouched.
1991 */
1992 LSC_FLUSH_TYPE_L3ONLY = 5,
1993 /*
1994 * HW maps this flush type internally to NONE.
1995 */
1996 LSC_FLUSH_TYPE_NONE_6 = 6,
1997
1998 };
1999
2000 enum ENUM_PACKED lsc_backup_fence_routing {
2001 /* Normal routing: UGM fence is routed to UGM pipeline. */
2002 LSC_NORMAL_ROUTING,
2003 /* Route UGM fence to LSC unit. */
2004 LSC_ROUTE_TO_LSC,
2005 };
2006
2007 /*
2008 * Specifies the size of the vector in a dataport message.
2009 */
2010 enum ENUM_PACKED lsc_vect_size {
2011 LSC_VECT_SIZE_V1 = 0, /* vector length 1 */
2012 LSC_VECT_SIZE_V2 = 1, /* vector length 2 */
2013 LSC_VECT_SIZE_V3 = 2, /* Vector length 3 */
2014 LSC_VECT_SIZE_V4 = 3, /* Vector length 4 */
2015 LSC_VECT_SIZE_V8 = 4, /* Vector length 8 */
2016 LSC_VECT_SIZE_V16 = 5, /* Vector length 16 */
2017 LSC_VECT_SIZE_V32 = 6, /* Vector length 32 */
2018 LSC_VECT_SIZE_V64 = 7, /* Vector length 64 */
2019 };
2020
2021 #define LSC_ONE_ADDR_REG 1
2022
2023 #endif /* BRW_EU_DEFINES_H */
2024