1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32 #ifndef BRW_EU_DEFINES_H
33 #define BRW_EU_DEFINES_H
34
35 #include <stdint.h>
36 #include <stdlib.h>
37 #include "util/macros.h"
38 #include "dev/intel_device_info.h"
39
40 /* The following hunk, up-to "Execution Unit" is used by both the
41 * intel/compiler and i965 codebase. */
42
43 #define INTEL_MASK(high, low) (((1u<<((high)-(low)+1))-1)<<(low))
44 /* Using the GNU statement expression extension */
45 #define SET_FIELD(value, field) \
46 ({ \
47 uint32_t fieldval = (uint32_t)(value) << field ## _SHIFT; \
48 assert((fieldval & ~ field ## _MASK) == 0); \
49 fieldval & field ## _MASK; \
50 })
51
52 #define SET_BITS(value, high, low) \
53 ({ \
54 const uint32_t fieldval = (uint32_t)(value) << (low); \
55 assert((fieldval & ~INTEL_MASK(high, low)) == 0); \
56 fieldval & INTEL_MASK(high, low); \
57 })
58
59 #define GET_BITS(data, high, low) ((data & INTEL_MASK((high), (low))) >> (low))
60 #define GET_FIELD(word, field) (((word) & field ## _MASK) >> field ## _SHIFT)
61
62 #define _3DPRIM_POINTLIST 0x01
63 #define _3DPRIM_LINELIST 0x02
64 #define _3DPRIM_LINESTRIP 0x03
65 #define _3DPRIM_TRILIST 0x04
66 #define _3DPRIM_TRISTRIP 0x05
67 #define _3DPRIM_TRIFAN 0x06
68 #define _3DPRIM_QUADLIST 0x07
69 #define _3DPRIM_QUADSTRIP 0x08
70 #define _3DPRIM_LINELIST_ADJ 0x09 /* G45+ */
71 #define _3DPRIM_LINESTRIP_ADJ 0x0A /* G45+ */
72 #define _3DPRIM_TRILIST_ADJ 0x0B /* G45+ */
73 #define _3DPRIM_TRISTRIP_ADJ 0x0C /* G45+ */
74 #define _3DPRIM_TRISTRIP_REVERSE 0x0D
75 #define _3DPRIM_POLYGON 0x0E
76 #define _3DPRIM_RECTLIST 0x0F
77 #define _3DPRIM_LINELOOP 0x10
78 #define _3DPRIM_POINTLIST_BF 0x11
79 #define _3DPRIM_LINESTRIP_CONT 0x12
80 #define _3DPRIM_LINESTRIP_BF 0x13
81 #define _3DPRIM_LINESTRIP_CONT_BF 0x14
82 #define _3DPRIM_TRIFAN_NOSTIPPLE 0x16
83 #define _3DPRIM_PATCHLIST(n) ({ assert(n > 0 && n <= 32); 0x20 + (n - 1); })
84
85 /* Bitfields for the URB_WRITE message, DW2 of message header: */
86 #define URB_WRITE_PRIM_END 0x1
87 #define URB_WRITE_PRIM_START 0x2
88 #define URB_WRITE_PRIM_TYPE_SHIFT 2
89
90 #define BRW_SPRITE_POINT_ENABLE 16
91
92 # define GFX7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT 0
93 # define GFX7_GS_CONTROL_DATA_FORMAT_GSCTL_SID 1
94
95 /* Execution Unit (EU) defines
96 */
97
98 #define BRW_ALIGN_1 0
99 #define BRW_ALIGN_16 1
100
101 #define BRW_ADDRESS_DIRECT 0
102 #define BRW_ADDRESS_REGISTER_INDIRECT_REGISTER 1
103
104 #define BRW_CHANNEL_X 0
105 #define BRW_CHANNEL_Y 1
106 #define BRW_CHANNEL_Z 2
107 #define BRW_CHANNEL_W 3
108
109 enum brw_compression {
110 BRW_COMPRESSION_NONE = 0,
111 BRW_COMPRESSION_2NDHALF = 1,
112 BRW_COMPRESSION_COMPRESSED = 2,
113 };
114
115 #define GFX6_COMPRESSION_1Q 0
116 #define GFX6_COMPRESSION_2Q 1
117 #define GFX6_COMPRESSION_3Q 2
118 #define GFX6_COMPRESSION_4Q 3
119 #define GFX6_COMPRESSION_1H 0
120 #define GFX6_COMPRESSION_2H 2
121
122 enum PACKED brw_conditional_mod {
123 BRW_CONDITIONAL_NONE = 0,
124 BRW_CONDITIONAL_Z = 1,
125 BRW_CONDITIONAL_NZ = 2,
126 BRW_CONDITIONAL_EQ = 1, /* Z */
127 BRW_CONDITIONAL_NEQ = 2, /* NZ */
128 BRW_CONDITIONAL_G = 3,
129 BRW_CONDITIONAL_GE = 4,
130 BRW_CONDITIONAL_L = 5,
131 BRW_CONDITIONAL_LE = 6,
132 BRW_CONDITIONAL_R = 7, /* Gen <= 5 */
133 BRW_CONDITIONAL_O = 8,
134 BRW_CONDITIONAL_U = 9,
135 };
136
137 #define BRW_DEBUG_NONE 0
138 #define BRW_DEBUG_BREAKPOINT 1
139
140 #define BRW_DEPENDENCY_NORMAL 0
141 #define BRW_DEPENDENCY_NOTCLEARED 1
142 #define BRW_DEPENDENCY_NOTCHECKED 2
143 #define BRW_DEPENDENCY_DISABLE 3
144
145 enum PACKED brw_execution_size {
146 BRW_EXECUTE_1 = 0,
147 BRW_EXECUTE_2 = 1,
148 BRW_EXECUTE_4 = 2,
149 BRW_EXECUTE_8 = 3,
150 BRW_EXECUTE_16 = 4,
151 BRW_EXECUTE_32 = 5,
152 };
153
154 enum PACKED brw_horizontal_stride {
155 BRW_HORIZONTAL_STRIDE_0 = 0,
156 BRW_HORIZONTAL_STRIDE_1 = 1,
157 BRW_HORIZONTAL_STRIDE_2 = 2,
158 BRW_HORIZONTAL_STRIDE_4 = 3,
159 };
160
161 enum PACKED gfx10_align1_3src_src_horizontal_stride {
162 BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_0 = 0,
163 BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_1 = 1,
164 BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_2 = 2,
165 BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_4 = 3,
166 };
167
168 enum PACKED gfx10_align1_3src_dst_horizontal_stride {
169 BRW_ALIGN1_3SRC_DST_HORIZONTAL_STRIDE_1 = 0,
170 BRW_ALIGN1_3SRC_DST_HORIZONTAL_STRIDE_2 = 1,
171 };
172
173 #define BRW_INSTRUCTION_NORMAL 0
174 #define BRW_INSTRUCTION_SATURATE 1
175
176 #define BRW_MASK_ENABLE 0
177 #define BRW_MASK_DISABLE 1
178
179 /** @{
180 *
181 * Gfx6 has replaced "mask enable/disable" with WECtrl, which is
182 * effectively the same but much simpler to think about. Now, there
183 * are two contributors ANDed together to whether channels are
184 * executed: The predication on the instruction, and the channel write
185 * enable.
186 */
187 /**
188 * This is the default value. It means that a channel's write enable is set
189 * if the per-channel IP is pointing at this instruction.
190 */
191 #define BRW_WE_NORMAL 0
192 /**
193 * This is used like BRW_MASK_DISABLE, and causes all channels to have
194 * their write enable set. Note that predication still contributes to
195 * whether the channel actually gets written.
196 */
197 #define BRW_WE_ALL 1
198 /** @} */
199
200 enum opcode {
201 /* These are the actual hardware instructions. */
202 BRW_OPCODE_ILLEGAL,
203 BRW_OPCODE_SYNC,
204 BRW_OPCODE_MOV,
205 BRW_OPCODE_SEL,
206 BRW_OPCODE_MOVI, /**< G45+ */
207 BRW_OPCODE_NOT,
208 BRW_OPCODE_AND,
209 BRW_OPCODE_OR,
210 BRW_OPCODE_XOR,
211 BRW_OPCODE_SHR,
212 BRW_OPCODE_SHL,
213 BRW_OPCODE_DIM, /**< Gfx7.5 only */
214 BRW_OPCODE_SMOV, /**< Gfx8+ */
215 BRW_OPCODE_ASR,
216 BRW_OPCODE_ROR, /**< Gfx11+ */
217 BRW_OPCODE_ROL, /**< Gfx11+ */
218 BRW_OPCODE_CMP,
219 BRW_OPCODE_CMPN,
220 BRW_OPCODE_CSEL, /**< Gfx8+ */
221 BRW_OPCODE_F32TO16, /**< Gfx7 only */
222 BRW_OPCODE_F16TO32, /**< Gfx7 only */
223 BRW_OPCODE_BFREV, /**< Gfx7+ */
224 BRW_OPCODE_BFE, /**< Gfx7+ */
225 BRW_OPCODE_BFI1, /**< Gfx7+ */
226 BRW_OPCODE_BFI2, /**< Gfx7+ */
227 BRW_OPCODE_JMPI,
228 BRW_OPCODE_BRD, /**< Gfx7+ */
229 BRW_OPCODE_IF,
230 BRW_OPCODE_IFF, /**< Pre-Gfx6 */
231 BRW_OPCODE_BRC, /**< Gfx7+ */
232 BRW_OPCODE_ELSE,
233 BRW_OPCODE_ENDIF,
234 BRW_OPCODE_DO, /**< Pre-Gfx6 */
235 BRW_OPCODE_CASE, /**< Gfx6 only */
236 BRW_OPCODE_WHILE,
237 BRW_OPCODE_BREAK,
238 BRW_OPCODE_CONTINUE,
239 BRW_OPCODE_HALT,
240 BRW_OPCODE_CALLA, /**< Gfx7.5+ */
241 BRW_OPCODE_MSAVE, /**< Pre-Gfx6 */
242 BRW_OPCODE_CALL, /**< Gfx6+ */
243 BRW_OPCODE_MREST, /**< Pre-Gfx6 */
244 BRW_OPCODE_RET, /**< Gfx6+ */
245 BRW_OPCODE_PUSH, /**< Pre-Gfx6 */
246 BRW_OPCODE_FORK, /**< Gfx6 only */
247 BRW_OPCODE_GOTO, /**< Gfx8+ */
248 BRW_OPCODE_POP, /**< Pre-Gfx6 */
249 BRW_OPCODE_WAIT,
250 BRW_OPCODE_SEND,
251 BRW_OPCODE_SENDC,
252 BRW_OPCODE_SENDS, /**< Gfx9+ */
253 BRW_OPCODE_SENDSC, /**< Gfx9+ */
254 BRW_OPCODE_MATH, /**< Gfx6+ */
255 BRW_OPCODE_ADD,
256 BRW_OPCODE_MUL,
257 BRW_OPCODE_AVG,
258 BRW_OPCODE_FRC,
259 BRW_OPCODE_RNDU,
260 BRW_OPCODE_RNDD,
261 BRW_OPCODE_RNDE,
262 BRW_OPCODE_RNDZ,
263 BRW_OPCODE_MAC,
264 BRW_OPCODE_MACH,
265 BRW_OPCODE_LZD,
266 BRW_OPCODE_FBH, /**< Gfx7+ */
267 BRW_OPCODE_FBL, /**< Gfx7+ */
268 BRW_OPCODE_CBIT, /**< Gfx7+ */
269 BRW_OPCODE_ADDC, /**< Gfx7+ */
270 BRW_OPCODE_SUBB, /**< Gfx7+ */
271 BRW_OPCODE_SAD2,
272 BRW_OPCODE_SADA2,
273 BRW_OPCODE_ADD3, /* Gen12+ only */
274 BRW_OPCODE_DP4,
275 BRW_OPCODE_DPH,
276 BRW_OPCODE_DP3,
277 BRW_OPCODE_DP2,
278 BRW_OPCODE_DP4A, /**< Gfx12+ */
279 BRW_OPCODE_LINE,
280 BRW_OPCODE_PLN, /**< G45+ */
281 BRW_OPCODE_MAD, /**< Gfx6+ */
282 BRW_OPCODE_LRP, /**< Gfx6+ */
283 BRW_OPCODE_MADM, /**< Gfx8+ */
284 BRW_OPCODE_NENOP, /**< G45 only */
285 BRW_OPCODE_NOP,
286
287 NUM_BRW_OPCODES,
288
289 /* These are compiler backend opcodes that get translated into other
290 * instructions.
291 */
292 FS_OPCODE_FB_WRITE = NUM_BRW_OPCODES,
293
294 /**
295 * Same as FS_OPCODE_FB_WRITE but expects its arguments separately as
296 * individual sources instead of as a single payload blob. The
297 * position/ordering of the arguments are defined by the enum
298 * fb_write_logical_srcs.
299 */
300 FS_OPCODE_FB_WRITE_LOGICAL,
301
302 FS_OPCODE_REP_FB_WRITE,
303
304 FS_OPCODE_FB_READ,
305 FS_OPCODE_FB_READ_LOGICAL,
306
307 SHADER_OPCODE_RCP,
308 SHADER_OPCODE_RSQ,
309 SHADER_OPCODE_SQRT,
310 SHADER_OPCODE_EXP2,
311 SHADER_OPCODE_LOG2,
312 SHADER_OPCODE_POW,
313 SHADER_OPCODE_INT_QUOTIENT,
314 SHADER_OPCODE_INT_REMAINDER,
315 SHADER_OPCODE_SIN,
316 SHADER_OPCODE_COS,
317
318 /**
319 * A generic "send" opcode. The first two sources are the message
320 * descriptor and extended message descriptor respectively. The third
321 * and optional fourth sources are the message payload
322 */
323 SHADER_OPCODE_SEND,
324
325 /**
326 * An "undefined" write which does nothing but indicates to liveness that
327 * we don't care about any values in the register which predate this
328 * instruction. Used to prevent partial writes from causing issues with
329 * live ranges.
330 */
331 SHADER_OPCODE_UNDEF,
332
333 /**
334 * Texture sampling opcodes.
335 *
336 * LOGICAL opcodes are eventually translated to the matching non-LOGICAL
337 * opcode but instead of taking a single payload blob they expect their
338 * arguments separately as individual sources. The position/ordering of the
339 * arguments are defined by the enum tex_logical_srcs.
340 */
341 SHADER_OPCODE_TEX,
342 SHADER_OPCODE_TEX_LOGICAL,
343 SHADER_OPCODE_TXD,
344 SHADER_OPCODE_TXD_LOGICAL,
345 SHADER_OPCODE_TXF,
346 SHADER_OPCODE_TXF_LOGICAL,
347 SHADER_OPCODE_TXF_LZ,
348 SHADER_OPCODE_TXL,
349 SHADER_OPCODE_TXL_LOGICAL,
350 SHADER_OPCODE_TXL_LZ,
351 SHADER_OPCODE_TXS,
352 SHADER_OPCODE_TXS_LOGICAL,
353 FS_OPCODE_TXB,
354 FS_OPCODE_TXB_LOGICAL,
355 SHADER_OPCODE_TXF_CMS,
356 SHADER_OPCODE_TXF_CMS_LOGICAL,
357 SHADER_OPCODE_TXF_CMS_W,
358 SHADER_OPCODE_TXF_CMS_W_LOGICAL,
359 SHADER_OPCODE_TXF_UMS,
360 SHADER_OPCODE_TXF_UMS_LOGICAL,
361 SHADER_OPCODE_TXF_MCS,
362 SHADER_OPCODE_TXF_MCS_LOGICAL,
363 SHADER_OPCODE_LOD,
364 SHADER_OPCODE_LOD_LOGICAL,
365 SHADER_OPCODE_TG4,
366 SHADER_OPCODE_TG4_LOGICAL,
367 SHADER_OPCODE_TG4_OFFSET,
368 SHADER_OPCODE_TG4_OFFSET_LOGICAL,
369 SHADER_OPCODE_SAMPLEINFO,
370 SHADER_OPCODE_SAMPLEINFO_LOGICAL,
371
372 SHADER_OPCODE_IMAGE_SIZE_LOGICAL,
373
374 /**
375 * Combines multiple sources of size 1 into a larger virtual GRF.
376 * For example, parameters for a send-from-GRF message. Or, updating
377 * channels of a size 4 VGRF used to store vec4s such as texturing results.
378 *
379 * This will be lowered into MOVs from each source to consecutive offsets
380 * of the destination VGRF.
381 *
382 * src[0] may be BAD_FILE. If so, the lowering pass skips emitting the MOV,
383 * but still reserves the first channel of the destination VGRF. This can be
384 * used to reserve space for, say, a message header set up by the generators.
385 */
386 SHADER_OPCODE_LOAD_PAYLOAD,
387
388 /**
389 * Packs a number of sources into a single value. Unlike LOAD_PAYLOAD, this
390 * acts intra-channel, obtaining the final value for each channel by
391 * combining the sources values for the same channel, the first source
392 * occupying the lowest bits and the last source occupying the highest
393 * bits.
394 */
395 FS_OPCODE_PACK,
396
397 SHADER_OPCODE_SHADER_TIME_ADD,
398
399 /**
400 * Typed and untyped surface access opcodes.
401 *
402 * LOGICAL opcodes are eventually translated to the matching non-LOGICAL
403 * opcode but instead of taking a single payload blob they expect their
404 * arguments separately as individual sources:
405 *
406 * Source 0: [required] Surface coordinates.
407 * Source 1: [optional] Operation source.
408 * Source 2: [required] Surface index.
409 * Source 3: [required] Number of coordinate components (as UD immediate).
410 * Source 4: [required] Opcode-specific control immediate, same as source 2
411 * of the matching non-LOGICAL opcode.
412 */
413 VEC4_OPCODE_UNTYPED_ATOMIC,
414 SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL,
415 SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT_LOGICAL,
416 VEC4_OPCODE_UNTYPED_SURFACE_READ,
417 SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL,
418 VEC4_OPCODE_UNTYPED_SURFACE_WRITE,
419 SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL,
420
421 SHADER_OPCODE_OWORD_BLOCK_READ_LOGICAL,
422 SHADER_OPCODE_UNALIGNED_OWORD_BLOCK_READ_LOGICAL,
423 SHADER_OPCODE_OWORD_BLOCK_WRITE_LOGICAL,
424
425 /**
426 * Untyped A64 surface access opcodes.
427 *
428 * Source 0: 64-bit address
429 * Source 1: Operational source
430 * Source 2: [required] Opcode-specific control immediate, same as source 2
431 * of the matching non-LOGICAL opcode.
432 */
433 SHADER_OPCODE_A64_UNTYPED_READ_LOGICAL,
434 SHADER_OPCODE_A64_UNTYPED_WRITE_LOGICAL,
435 SHADER_OPCODE_A64_BYTE_SCATTERED_READ_LOGICAL,
436 SHADER_OPCODE_A64_BYTE_SCATTERED_WRITE_LOGICAL,
437 SHADER_OPCODE_A64_OWORD_BLOCK_READ_LOGICAL,
438 SHADER_OPCODE_A64_UNALIGNED_OWORD_BLOCK_READ_LOGICAL,
439 SHADER_OPCODE_A64_OWORD_BLOCK_WRITE_LOGICAL,
440 SHADER_OPCODE_A64_UNTYPED_ATOMIC_LOGICAL,
441 SHADER_OPCODE_A64_UNTYPED_ATOMIC_INT16_LOGICAL,
442 SHADER_OPCODE_A64_UNTYPED_ATOMIC_INT64_LOGICAL,
443 SHADER_OPCODE_A64_UNTYPED_ATOMIC_FLOAT16_LOGICAL,
444 SHADER_OPCODE_A64_UNTYPED_ATOMIC_FLOAT32_LOGICAL,
445 SHADER_OPCODE_A64_UNTYPED_ATOMIC_FLOAT64_LOGICAL,
446
447 SHADER_OPCODE_TYPED_ATOMIC_LOGICAL,
448 SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL,
449 SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL,
450
451 SHADER_OPCODE_RND_MODE,
452 SHADER_OPCODE_FLOAT_CONTROL_MODE,
453
454 /**
455 * Byte scattered write/read opcodes.
456 *
457 * LOGICAL opcodes are eventually translated to the matching non-LOGICAL
458 * opcode, but instead of taking a single payload blog they expect their
459 * arguments separately as individual sources, like untyped write/read.
460 */
461 SHADER_OPCODE_BYTE_SCATTERED_READ_LOGICAL,
462 SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL,
463 SHADER_OPCODE_DWORD_SCATTERED_READ_LOGICAL,
464 SHADER_OPCODE_DWORD_SCATTERED_WRITE_LOGICAL,
465
466 /**
467 * Memory fence messages.
468 *
469 * Source 0: Must be register g0, used as header.
470 * Source 1: Immediate bool to indicate whether control is returned to the
471 * thread only after the fence has been honored.
472 * Source 2: Immediate byte indicating which memory to fence. Zero means
473 * global memory; GFX7_BTI_SLM means SLM (for Gfx11+ only).
474 *
475 * Vec4 backend only uses Source 0.
476 */
477 SHADER_OPCODE_MEMORY_FENCE,
478
479 /**
480 * Scheduling-only fence.
481 *
482 * Sources can be used to force a stall until the registers in those are
483 * available. This might generate MOVs or SYNC_NOPs (Gfx12+).
484 */
485 FS_OPCODE_SCHEDULING_FENCE,
486
487 SHADER_OPCODE_GFX4_SCRATCH_READ,
488 SHADER_OPCODE_GFX4_SCRATCH_WRITE,
489 SHADER_OPCODE_GFX7_SCRATCH_READ,
490
491 SHADER_OPCODE_SCRATCH_HEADER,
492
493 /**
494 * Gfx8+ SIMD8 URB Read messages.
495 */
496 SHADER_OPCODE_URB_READ_SIMD8,
497 SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT,
498
499 SHADER_OPCODE_URB_WRITE_SIMD8,
500 SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT,
501 SHADER_OPCODE_URB_WRITE_SIMD8_MASKED,
502 SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT,
503
504 /**
505 * Return the index of an arbitrary live channel (i.e. one of the channels
506 * enabled in the current execution mask) and assign it to the first
507 * component of the destination. Expected to be used as input for the
508 * BROADCAST pseudo-opcode.
509 */
510 SHADER_OPCODE_FIND_LIVE_CHANNEL,
511
512 /**
513 * Return the current execution mask in the specified flag subregister.
514 * Can be CSE'ed more easily than a plain MOV from the ce0 ARF register.
515 */
516 FS_OPCODE_LOAD_LIVE_CHANNELS,
517
518 /**
519 * Pick the channel from its first source register given by the index
520 * specified as second source. Useful for variable indexing of surfaces.
521 *
522 * Note that because the result of this instruction is by definition
523 * uniform and it can always be splatted to multiple channels using a
524 * scalar regioning mode, only the first channel of the destination region
525 * is guaranteed to be updated, which implies that BROADCAST instructions
526 * should usually be marked force_writemask_all.
527 */
528 SHADER_OPCODE_BROADCAST,
529
530 /* Pick the channel from its first source register given by the index
531 * specified as second source.
532 *
533 * This is similar to the BROADCAST instruction except that it takes a
534 * dynamic index and potentially puts a different value in each output
535 * channel.
536 */
537 SHADER_OPCODE_SHUFFLE,
538
539 /* Select between src0 and src1 based on channel enables.
540 *
541 * This instruction copies src0 into the enabled channels of the
542 * destination and copies src1 into the disabled channels.
543 */
544 SHADER_OPCODE_SEL_EXEC,
545
546 /* This turns into an align16 mov from src0 to dst with a swizzle
547 * provided as an immediate in src1.
548 */
549 SHADER_OPCODE_QUAD_SWIZZLE,
550
551 /* Take every Nth element in src0 and broadcast it to the group of N
552 * channels in which it lives in the destination. The offset within the
553 * cluster is given by src1 and the cluster size is given by src2.
554 */
555 SHADER_OPCODE_CLUSTER_BROADCAST,
556
557 SHADER_OPCODE_GET_BUFFER_SIZE,
558
559 SHADER_OPCODE_INTERLOCK,
560
561 /** Target for a HALT
562 *
563 * All HALT instructions in a shader must target the same jump point and
564 * that point is denoted by a HALT_TARGET instruction.
565 */
566 SHADER_OPCODE_HALT_TARGET,
567
568 VEC4_OPCODE_MOV_BYTES,
569 VEC4_OPCODE_PACK_BYTES,
570 VEC4_OPCODE_UNPACK_UNIFORM,
571 VEC4_OPCODE_DOUBLE_TO_F32,
572 VEC4_OPCODE_DOUBLE_TO_D32,
573 VEC4_OPCODE_DOUBLE_TO_U32,
574 VEC4_OPCODE_TO_DOUBLE,
575 VEC4_OPCODE_PICK_LOW_32BIT,
576 VEC4_OPCODE_PICK_HIGH_32BIT,
577 VEC4_OPCODE_SET_LOW_32BIT,
578 VEC4_OPCODE_SET_HIGH_32BIT,
579 VEC4_OPCODE_MOV_FOR_SCRATCH,
580 VEC4_OPCODE_ZERO_OOB_PUSH_REGS,
581
582 FS_OPCODE_DDX_COARSE,
583 FS_OPCODE_DDX_FINE,
584 /**
585 * Compute dFdy(), dFdyCoarse(), or dFdyFine().
586 */
587 FS_OPCODE_DDY_COARSE,
588 FS_OPCODE_DDY_FINE,
589 FS_OPCODE_LINTERP,
590 FS_OPCODE_PIXEL_X,
591 FS_OPCODE_PIXEL_Y,
592 FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
593 FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GFX7,
594 FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GFX4,
595 FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_LOGICAL,
596 FS_OPCODE_SET_SAMPLE_ID,
597 FS_OPCODE_PACK_HALF_2x16_SPLIT,
598 FS_OPCODE_INTERPOLATE_AT_SAMPLE,
599 FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET,
600 FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET,
601
602 VS_OPCODE_URB_WRITE,
603 VS_OPCODE_PULL_CONSTANT_LOAD,
604 VS_OPCODE_PULL_CONSTANT_LOAD_GFX7,
605
606 VS_OPCODE_UNPACK_FLAGS_SIMD4X2,
607
608 /**
609 * Write geometry shader output data to the URB.
610 *
611 * Unlike VS_OPCODE_URB_WRITE, this opcode doesn't do an implied move from
612 * R0 to the first MRF. This allows the geometry shader to override the
613 * "Slot {0,1} Offset" fields in the message header.
614 */
615 GS_OPCODE_URB_WRITE,
616
617 /**
618 * Write geometry shader output data to the URB and request a new URB
619 * handle (gfx6).
620 *
621 * This opcode doesn't do an implied move from R0 to the first MRF.
622 */
623 GS_OPCODE_URB_WRITE_ALLOCATE,
624
625 /**
626 * Terminate the geometry shader thread by doing an empty URB write.
627 *
628 * This opcode doesn't do an implied move from R0 to the first MRF. This
629 * allows the geometry shader to override the "GS Number of Output Vertices
630 * for Slot {0,1}" fields in the message header.
631 */
632 GS_OPCODE_THREAD_END,
633
634 /**
635 * Set the "Slot {0,1} Offset" fields of a URB_WRITE message header.
636 *
637 * - dst is the MRF containing the message header.
638 *
639 * - src0.x indicates which portion of the URB should be written to (e.g. a
640 * vertex number)
641 *
642 * - src1 is an immediate multiplier which will be applied to src0
643 * (e.g. the size of a single vertex in the URB).
644 *
645 * Note: the hardware will apply this offset *in addition to* the offset in
646 * vec4_instruction::offset.
647 */
648 GS_OPCODE_SET_WRITE_OFFSET,
649
650 /**
651 * Set the "GS Number of Output Vertices for Slot {0,1}" fields of a
652 * URB_WRITE message header.
653 *
654 * - dst is the MRF containing the message header.
655 *
656 * - src0.x is the vertex count. The upper 16 bits will be ignored.
657 */
658 GS_OPCODE_SET_VERTEX_COUNT,
659
660 /**
661 * Set DWORD 2 of dst to the value in src.
662 */
663 GS_OPCODE_SET_DWORD_2,
664
665 /**
666 * Prepare the dst register for storage in the "Channel Mask" fields of a
667 * URB_WRITE message header.
668 *
669 * DWORD 4 of dst is shifted left by 4 bits, so that later,
670 * GS_OPCODE_SET_CHANNEL_MASKS can OR DWORDs 0 and 4 together to form the
671 * final channel mask.
672 *
673 * Note: since GS_OPCODE_SET_CHANNEL_MASKS ORs DWORDs 0 and 4 together to
674 * form the final channel mask, DWORDs 0 and 4 of the dst register must not
675 * have any extraneous bits set prior to execution of this opcode (that is,
676 * they should be in the range 0x0 to 0xf).
677 */
678 GS_OPCODE_PREPARE_CHANNEL_MASKS,
679
680 /**
681 * Set the "Channel Mask" fields of a URB_WRITE message header.
682 *
683 * - dst is the MRF containing the message header.
684 *
685 * - src.x is the channel mask, as prepared by
686 * GS_OPCODE_PREPARE_CHANNEL_MASKS. DWORDs 0 and 4 are OR'ed together to
687 * form the final channel mask.
688 */
689 GS_OPCODE_SET_CHANNEL_MASKS,
690
691 /**
692 * Get the "Instance ID" fields from the payload.
693 *
694 * - dst is the GRF for gl_InvocationID.
695 */
696 GS_OPCODE_GET_INSTANCE_ID,
697
698 /**
699 * Send a FF_SYNC message to allocate initial URB handles (gfx6).
700 *
701 * - dst will be used as the writeback register for the FF_SYNC operation.
702 *
703 * - src0 is the number of primitives written.
704 *
705 * - src1 is the value to hold in M0.0: number of SO vertices to write
706 * and number of SO primitives needed. Its value will be overwritten
707 * with the SVBI values if transform feedback is enabled.
708 *
709 * Note: This opcode uses an implicit MRF register for the ff_sync message
710 * header, so the caller is expected to set inst->base_mrf and initialize
711 * that MRF register to r0. This opcode will also write to this MRF register
712 * to include the allocated URB handle so it can then be reused directly as
713 * the header in the URB write operation we are allocating the handle for.
714 */
715 GS_OPCODE_FF_SYNC,
716
717 /**
718 * Move r0.1 (which holds PrimitiveID information in gfx6) to a separate
719 * register.
720 *
721 * - dst is the GRF where PrimitiveID information will be moved.
722 */
723 GS_OPCODE_SET_PRIMITIVE_ID,
724
725 /**
726 * Write transform feedback data to the SVB by sending a SVB WRITE message.
727 * Used in gfx6.
728 *
729 * - dst is the MRF register containing the message header.
730 *
731 * - src0 is the register where the vertex data is going to be copied from.
732 *
733 * - src1 is the destination register when write commit occurs.
734 */
735 GS_OPCODE_SVB_WRITE,
736
737 /**
738 * Set destination index in the SVB write message payload (M0.5). Used
739 * in gfx6 for transform feedback.
740 *
741 * - dst is the header to save the destination indices for SVB WRITE.
742 * - src is the register that holds the destination indices value.
743 */
744 GS_OPCODE_SVB_SET_DST_INDEX,
745
746 /**
747 * Prepare Mx.0 subregister for being used in the FF_SYNC message header.
748 * Used in gfx6 for transform feedback.
749 *
750 * - dst will hold the register with the final Mx.0 value.
751 *
752 * - src0 has the number of vertices emitted in SO (NumSOVertsToWrite)
753 *
754 * - src1 has the number of needed primitives for SO (NumSOPrimsNeeded)
755 *
756 * - src2 is the value to hold in M0: number of SO vertices to write
757 * and number of SO primitives needed.
758 */
759 GS_OPCODE_FF_SYNC_SET_PRIMITIVES,
760
761 /**
762 * Terminate the compute shader.
763 */
764 CS_OPCODE_CS_TERMINATE,
765
766 /**
767 * GLSL barrier()
768 */
769 SHADER_OPCODE_BARRIER,
770
771 /**
772 * Calculate the high 32-bits of a 32x32 multiply.
773 */
774 SHADER_OPCODE_MULH,
775
776 /** Signed subtraction with saturation. */
777 SHADER_OPCODE_ISUB_SAT,
778
779 /** Unsigned subtraction with saturation. */
780 SHADER_OPCODE_USUB_SAT,
781
782 /**
783 * A MOV that uses VxH indirect addressing.
784 *
785 * Source 0: A register to start from (HW_REG).
786 * Source 1: An indirect offset (in bytes, UD GRF).
787 * Source 2: The length of the region that could be accessed (in bytes,
788 * UD immediate).
789 */
790 SHADER_OPCODE_MOV_INDIRECT,
791
792 /** Fills out a relocatable immediate */
793 SHADER_OPCODE_MOV_RELOC_IMM,
794
795 VEC4_OPCODE_URB_READ,
796 TCS_OPCODE_GET_INSTANCE_ID,
797 TCS_OPCODE_URB_WRITE,
798 TCS_OPCODE_SET_INPUT_URB_OFFSETS,
799 TCS_OPCODE_SET_OUTPUT_URB_OFFSETS,
800 TCS_OPCODE_GET_PRIMITIVE_ID,
801 TCS_OPCODE_CREATE_BARRIER_HEADER,
802 TCS_OPCODE_SRC0_010_IS_ZERO,
803 TCS_OPCODE_RELEASE_INPUT,
804 TCS_OPCODE_THREAD_END,
805
806 TES_OPCODE_GET_PRIMITIVE_ID,
807 TES_OPCODE_CREATE_INPUT_READ_HEADER,
808 TES_OPCODE_ADD_INDIRECT_URB_OFFSET,
809
810 SHADER_OPCODE_GET_DSS_ID,
811 SHADER_OPCODE_BTD_SPAWN_LOGICAL,
812 SHADER_OPCODE_BTD_RETIRE_LOGICAL,
813
814 RT_OPCODE_TRACE_RAY_LOGICAL,
815 };
816
817 enum brw_urb_write_flags {
818 BRW_URB_WRITE_NO_FLAGS = 0,
819
820 /**
821 * Causes a new URB entry to be allocated, and its address stored in the
822 * destination register (gen < 7).
823 */
824 BRW_URB_WRITE_ALLOCATE = 0x1,
825
826 /**
827 * Causes the current URB entry to be deallocated (gen < 7).
828 */
829 BRW_URB_WRITE_UNUSED = 0x2,
830
831 /**
832 * Causes the thread to terminate.
833 */
834 BRW_URB_WRITE_EOT = 0x4,
835
836 /**
837 * Indicates that the given URB entry is complete, and may be sent further
838 * down the 3D pipeline (gen < 7).
839 */
840 BRW_URB_WRITE_COMPLETE = 0x8,
841
842 /**
843 * Indicates that an additional offset (which may be different for the two
844 * vec4 slots) is stored in the message header (gen == 7).
845 */
846 BRW_URB_WRITE_PER_SLOT_OFFSET = 0x10,
847
848 /**
849 * Indicates that the channel masks in the URB_WRITE message header should
850 * not be overridden to 0xff (gen == 7).
851 */
852 BRW_URB_WRITE_USE_CHANNEL_MASKS = 0x20,
853
854 /**
855 * Indicates that the data should be sent to the URB using the
856 * URB_WRITE_OWORD message rather than URB_WRITE_HWORD (gen == 7). This
857 * causes offsets to be interpreted as multiples of an OWORD instead of an
858 * HWORD, and only allows one OWORD to be written.
859 */
860 BRW_URB_WRITE_OWORD = 0x40,
861
862 /**
863 * Convenient combination of flags: end the thread while simultaneously
864 * marking the given URB entry as complete.
865 */
866 BRW_URB_WRITE_EOT_COMPLETE = BRW_URB_WRITE_EOT | BRW_URB_WRITE_COMPLETE,
867
868 /**
869 * Convenient combination of flags: mark the given URB entry as complete
870 * and simultaneously allocate a new one.
871 */
872 BRW_URB_WRITE_ALLOCATE_COMPLETE =
873 BRW_URB_WRITE_ALLOCATE | BRW_URB_WRITE_COMPLETE,
874 };
875
876 enum fb_write_logical_srcs {
877 FB_WRITE_LOGICAL_SRC_COLOR0, /* REQUIRED */
878 FB_WRITE_LOGICAL_SRC_COLOR1, /* for dual source blend messages */
879 FB_WRITE_LOGICAL_SRC_SRC0_ALPHA,
880 FB_WRITE_LOGICAL_SRC_SRC_DEPTH, /* gl_FragDepth */
881 FB_WRITE_LOGICAL_SRC_DST_DEPTH, /* GFX4-5: passthrough from thread */
882 FB_WRITE_LOGICAL_SRC_SRC_STENCIL, /* gl_FragStencilRefARB */
883 FB_WRITE_LOGICAL_SRC_OMASK, /* Sample Mask (gl_SampleMask) */
884 FB_WRITE_LOGICAL_SRC_COMPONENTS, /* REQUIRED */
885 FB_WRITE_LOGICAL_NUM_SRCS
886 };
887
888 enum tex_logical_srcs {
889 /** Texture coordinates */
890 TEX_LOGICAL_SRC_COORDINATE,
891 /** Shadow comparator */
892 TEX_LOGICAL_SRC_SHADOW_C,
893 /** dPdx if the operation takes explicit derivatives, otherwise LOD value */
894 TEX_LOGICAL_SRC_LOD,
895 /** dPdy if the operation takes explicit derivatives */
896 TEX_LOGICAL_SRC_LOD2,
897 /** Min LOD */
898 TEX_LOGICAL_SRC_MIN_LOD,
899 /** Sample index */
900 TEX_LOGICAL_SRC_SAMPLE_INDEX,
901 /** MCS data */
902 TEX_LOGICAL_SRC_MCS,
903 /** REQUIRED: Texture surface index */
904 TEX_LOGICAL_SRC_SURFACE,
905 /** Texture sampler index */
906 TEX_LOGICAL_SRC_SAMPLER,
907 /** Texture surface bindless handle */
908 TEX_LOGICAL_SRC_SURFACE_HANDLE,
909 /** Texture sampler bindless handle */
910 TEX_LOGICAL_SRC_SAMPLER_HANDLE,
911 /** Texel offset for gathers */
912 TEX_LOGICAL_SRC_TG4_OFFSET,
913 /** REQUIRED: Number of coordinate components (as UD immediate) */
914 TEX_LOGICAL_SRC_COORD_COMPONENTS,
915 /** REQUIRED: Number of derivative components (as UD immediate) */
916 TEX_LOGICAL_SRC_GRAD_COMPONENTS,
917
918 TEX_LOGICAL_NUM_SRCS,
919 };
920
921 enum surface_logical_srcs {
922 /** Surface binding table index */
923 SURFACE_LOGICAL_SRC_SURFACE,
924 /** Surface bindless handle */
925 SURFACE_LOGICAL_SRC_SURFACE_HANDLE,
926 /** Surface address; could be multi-dimensional for typed opcodes */
927 SURFACE_LOGICAL_SRC_ADDRESS,
928 /** Data to be written or used in an atomic op */
929 SURFACE_LOGICAL_SRC_DATA,
930 /** Surface number of dimensions. Affects the size of ADDRESS */
931 SURFACE_LOGICAL_SRC_IMM_DIMS,
932 /** Per-opcode immediate argument. For atomics, this is the atomic opcode */
933 SURFACE_LOGICAL_SRC_IMM_ARG,
934 /**
935 * Some instructions with side-effects should not be predicated on
936 * sample mask, e.g. lowered stores to scratch.
937 */
938 SURFACE_LOGICAL_SRC_ALLOW_SAMPLE_MASK,
939
940 SURFACE_LOGICAL_NUM_SRCS
941 };
942
943 #ifdef __cplusplus
944 /**
945 * Allow brw_urb_write_flags enums to be ORed together.
946 */
947 inline brw_urb_write_flags
948 operator|(brw_urb_write_flags x, brw_urb_write_flags y)
949 {
950 return static_cast<brw_urb_write_flags>(static_cast<int>(x) |
951 static_cast<int>(y));
952 }
953 #endif
954
955 enum PACKED brw_predicate {
956 BRW_PREDICATE_NONE = 0,
957 BRW_PREDICATE_NORMAL = 1,
958 BRW_PREDICATE_ALIGN1_ANYV = 2,
959 BRW_PREDICATE_ALIGN1_ALLV = 3,
960 BRW_PREDICATE_ALIGN1_ANY2H = 4,
961 BRW_PREDICATE_ALIGN1_ALL2H = 5,
962 BRW_PREDICATE_ALIGN1_ANY4H = 6,
963 BRW_PREDICATE_ALIGN1_ALL4H = 7,
964 BRW_PREDICATE_ALIGN1_ANY8H = 8,
965 BRW_PREDICATE_ALIGN1_ALL8H = 9,
966 BRW_PREDICATE_ALIGN1_ANY16H = 10,
967 BRW_PREDICATE_ALIGN1_ALL16H = 11,
968 BRW_PREDICATE_ALIGN1_ANY32H = 12,
969 BRW_PREDICATE_ALIGN1_ALL32H = 13,
970 BRW_PREDICATE_ALIGN16_REPLICATE_X = 2,
971 BRW_PREDICATE_ALIGN16_REPLICATE_Y = 3,
972 BRW_PREDICATE_ALIGN16_REPLICATE_Z = 4,
973 BRW_PREDICATE_ALIGN16_REPLICATE_W = 5,
974 BRW_PREDICATE_ALIGN16_ANY4H = 6,
975 BRW_PREDICATE_ALIGN16_ALL4H = 7,
976 };
977
978 enum PACKED brw_reg_file {
979 BRW_ARCHITECTURE_REGISTER_FILE = 0,
980 BRW_GENERAL_REGISTER_FILE = 1,
981 BRW_MESSAGE_REGISTER_FILE = 2,
982 BRW_IMMEDIATE_VALUE = 3,
983
984 ARF = BRW_ARCHITECTURE_REGISTER_FILE,
985 FIXED_GRF = BRW_GENERAL_REGISTER_FILE,
986 MRF = BRW_MESSAGE_REGISTER_FILE,
987 IMM = BRW_IMMEDIATE_VALUE,
988
989 /* These are not hardware values */
990 VGRF,
991 ATTR,
992 UNIFORM, /* prog_data->params[reg] */
993 BAD_FILE,
994 };
995
996 enum PACKED gfx10_align1_3src_reg_file {
997 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE = 0,
998 BRW_ALIGN1_3SRC_IMMEDIATE_VALUE = 1, /* src0, src2 */
999 BRW_ALIGN1_3SRC_ACCUMULATOR = 1, /* dest, src1 */
1000 };
1001
1002 /* CNL adds Align1 support for 3-src instructions. Bit 35 of the instruction
1003 * word is "Execution Datatype" which controls whether the instruction operates
1004 * on float or integer types. The register arguments have fields that offer
1005 * more fine control their respective types.
1006 */
1007 enum PACKED gfx10_align1_3src_exec_type {
1008 BRW_ALIGN1_3SRC_EXEC_TYPE_INT = 0,
1009 BRW_ALIGN1_3SRC_EXEC_TYPE_FLOAT = 1,
1010 };
1011
1012 #define BRW_ARF_NULL 0x00
1013 #define BRW_ARF_ADDRESS 0x10
1014 #define BRW_ARF_ACCUMULATOR 0x20
1015 #define BRW_ARF_FLAG 0x30
1016 #define BRW_ARF_MASK 0x40
1017 #define BRW_ARF_MASK_STACK 0x50
1018 #define BRW_ARF_MASK_STACK_DEPTH 0x60
1019 #define BRW_ARF_STATE 0x70
1020 #define BRW_ARF_CONTROL 0x80
1021 #define BRW_ARF_NOTIFICATION_COUNT 0x90
1022 #define BRW_ARF_IP 0xA0
1023 #define BRW_ARF_TDR 0xB0
1024 #define BRW_ARF_TIMESTAMP 0xC0
1025
1026 #define BRW_MRF_COMPR4 (1 << 7)
1027
1028 #define BRW_AMASK 0
1029 #define BRW_IMASK 1
1030 #define BRW_LMASK 2
1031 #define BRW_CMASK 3
1032
1033
1034
1035 #define BRW_THREAD_NORMAL 0
1036 #define BRW_THREAD_ATOMIC 1
1037 #define BRW_THREAD_SWITCH 2
1038
1039 enum PACKED brw_vertical_stride {
1040 BRW_VERTICAL_STRIDE_0 = 0,
1041 BRW_VERTICAL_STRIDE_1 = 1,
1042 BRW_VERTICAL_STRIDE_2 = 2,
1043 BRW_VERTICAL_STRIDE_4 = 3,
1044 BRW_VERTICAL_STRIDE_8 = 4,
1045 BRW_VERTICAL_STRIDE_16 = 5,
1046 BRW_VERTICAL_STRIDE_32 = 6,
1047 BRW_VERTICAL_STRIDE_ONE_DIMENSIONAL = 0xF,
1048 };
1049
1050 enum PACKED gfx10_align1_3src_vertical_stride {
1051 BRW_ALIGN1_3SRC_VERTICAL_STRIDE_0 = 0,
1052 BRW_ALIGN1_3SRC_VERTICAL_STRIDE_1 = 1,
1053 BRW_ALIGN1_3SRC_VERTICAL_STRIDE_2 = 1,
1054 BRW_ALIGN1_3SRC_VERTICAL_STRIDE_4 = 2,
1055 BRW_ALIGN1_3SRC_VERTICAL_STRIDE_8 = 3,
1056 };
1057
1058 enum PACKED brw_width {
1059 BRW_WIDTH_1 = 0,
1060 BRW_WIDTH_2 = 1,
1061 BRW_WIDTH_4 = 2,
1062 BRW_WIDTH_8 = 3,
1063 BRW_WIDTH_16 = 4,
1064 };
1065
1066 /**
1067 * Gfx12+ SWSB SBID synchronization mode.
1068 *
1069 * This is represented as a bitmask including any required SBID token
1070 * synchronization modes, used to synchronize out-of-order instructions. Only
1071 * the strongest mode of the mask will be provided to the hardware in the SWSB
1072 * field of an actual hardware instruction, but virtual instructions may be
1073 * able to take into account multiple of them.
1074 */
1075 enum tgl_sbid_mode {
1076 TGL_SBID_NULL = 0,
1077 TGL_SBID_SRC = 1,
1078 TGL_SBID_DST = 2,
1079 TGL_SBID_SET = 4
1080 };
1081
1082 #ifdef __cplusplus
1083 /**
1084 * Allow bitwise arithmetic of tgl_sbid_mode enums.
1085 */
1086 inline tgl_sbid_mode
1087 operator|(tgl_sbid_mode x, tgl_sbid_mode y)
1088 {
1089 return tgl_sbid_mode(unsigned(x) | unsigned(y));
1090 }
1091
1092 inline tgl_sbid_mode
1093 operator&(tgl_sbid_mode x, tgl_sbid_mode y)
1094 {
1095 return tgl_sbid_mode(unsigned(x) & unsigned(y));
1096 }
1097
1098 inline tgl_sbid_mode &
1099 operator|=(tgl_sbid_mode &x, tgl_sbid_mode y)
1100 {
1101 return x = x | y;
1102 }
1103
1104 #endif
1105
1106 /**
1107 * TGL+ SWSB RegDist synchronization pipeline.
1108 *
1109 * On TGL all instructions that use the RegDist synchronization mechanism are
1110 * considered to be executed as a single in-order pipeline, therefore only the
1111 * TGL_PIPE_FLOAT pipeline is applicable. On XeHP+ platforms there are two
1112 * additional asynchronous ALU pipelines (which still execute instructions
1113 * in-order and use the RegDist synchronization mechanism). TGL_PIPE_NONE
1114 * doesn't provide any RegDist pipeline synchronization information and allows
1115 * the hardware to infer the pipeline based on the source types of the
1116 * instruction. TGL_PIPE_ALL can be used when synchronization with all ALU
1117 * pipelines is intended.
1118 */
1119 enum tgl_pipe {
1120 TGL_PIPE_NONE = 0,
1121 TGL_PIPE_FLOAT,
1122 TGL_PIPE_INT,
1123 TGL_PIPE_LONG,
1124 TGL_PIPE_ALL
1125 };
1126
1127 /**
1128 * Logical representation of the SWSB scheduling information of a hardware
1129 * instruction. The binary representation is slightly more compact.
1130 */
1131 struct tgl_swsb {
1132 unsigned regdist : 3;
1133 enum tgl_pipe pipe : 3;
1134 unsigned sbid : 4;
1135 enum tgl_sbid_mode mode : 3;
1136 };
1137
1138 /**
1139 * Construct a scheduling annotation with a single RegDist dependency. This
1140 * synchronizes with the completion of the d-th previous in-order instruction.
1141 * The index is one-based, zero causes a no-op tgl_swsb to be constructed.
1142 */
1143 static inline struct tgl_swsb
tgl_swsb_regdist(unsigned d)1144 tgl_swsb_regdist(unsigned d)
1145 {
1146 const struct tgl_swsb swsb = { d, d ? TGL_PIPE_ALL : TGL_PIPE_NONE };
1147 assert(swsb.regdist == d);
1148 return swsb;
1149 }
1150
1151 /**
1152 * Construct a scheduling annotation that synchronizes with the specified SBID
1153 * token.
1154 */
1155 static inline struct tgl_swsb
tgl_swsb_sbid(enum tgl_sbid_mode mode,unsigned sbid)1156 tgl_swsb_sbid(enum tgl_sbid_mode mode, unsigned sbid)
1157 {
1158 const struct tgl_swsb swsb = { 0, TGL_PIPE_NONE, sbid, mode };
1159 assert(swsb.sbid == sbid);
1160 return swsb;
1161 }
1162
1163 /**
1164 * Construct a no-op scheduling annotation.
1165 */
1166 static inline struct tgl_swsb
tgl_swsb_null(void)1167 tgl_swsb_null(void)
1168 {
1169 return tgl_swsb_regdist(0);
1170 }
1171
1172 /**
1173 * Return a scheduling annotation that allocates the same SBID synchronization
1174 * token as \p swsb. In addition it will synchronize against a previous
1175 * in-order instruction if \p regdist is non-zero.
1176 */
1177 static inline struct tgl_swsb
tgl_swsb_dst_dep(struct tgl_swsb swsb,unsigned regdist)1178 tgl_swsb_dst_dep(struct tgl_swsb swsb, unsigned regdist)
1179 {
1180 swsb.regdist = regdist;
1181 swsb.mode = swsb.mode & TGL_SBID_SET;
1182 swsb.pipe = (regdist ? TGL_PIPE_ALL : TGL_PIPE_NONE);
1183 return swsb;
1184 }
1185
1186 /**
1187 * Return a scheduling annotation that synchronizes against the same SBID and
1188 * RegDist dependencies as \p swsb, but doesn't allocate any SBID token.
1189 */
1190 static inline struct tgl_swsb
tgl_swsb_src_dep(struct tgl_swsb swsb)1191 tgl_swsb_src_dep(struct tgl_swsb swsb)
1192 {
1193 swsb.mode = swsb.mode & (TGL_SBID_SRC | TGL_SBID_DST);
1194 return swsb;
1195 }
1196
1197 /**
1198 * Convert the provided tgl_swsb to the hardware's binary representation of an
1199 * SWSB annotation.
1200 */
1201 static inline uint8_t
tgl_swsb_encode(const struct intel_device_info * devinfo,struct tgl_swsb swsb)1202 tgl_swsb_encode(const struct intel_device_info *devinfo, struct tgl_swsb swsb)
1203 {
1204 if (!swsb.mode) {
1205 const unsigned pipe = devinfo->verx10 < 125 ? 0 :
1206 swsb.pipe == TGL_PIPE_FLOAT ? 0x10 :
1207 swsb.pipe == TGL_PIPE_INT ? 0x18 :
1208 swsb.pipe == TGL_PIPE_LONG ? 0x50 :
1209 swsb.pipe == TGL_PIPE_ALL ? 0x8 : 0;
1210 return pipe | swsb.regdist;
1211 } else if (swsb.regdist) {
1212 return 0x80 | swsb.regdist << 4 | swsb.sbid;
1213 } else {
1214 return swsb.sbid | (swsb.mode & TGL_SBID_SET ? 0x40 :
1215 swsb.mode & TGL_SBID_DST ? 0x20 : 0x30);
1216 }
1217 }
1218
1219 /**
1220 * Convert the provided binary representation of an SWSB annotation to a
1221 * tgl_swsb.
1222 */
1223 static inline struct tgl_swsb
tgl_swsb_decode(const struct intel_device_info * devinfo,const enum opcode opcode,const uint8_t x)1224 tgl_swsb_decode(const struct intel_device_info *devinfo, const enum opcode opcode,
1225 const uint8_t x)
1226 {
1227 if (x & 0x80) {
1228 const struct tgl_swsb swsb = { (x & 0x70u) >> 4, TGL_PIPE_NONE,
1229 x & 0xfu,
1230 (opcode == BRW_OPCODE_SEND ||
1231 opcode == BRW_OPCODE_SENDC ||
1232 opcode == BRW_OPCODE_MATH) ?
1233 TGL_SBID_SET : TGL_SBID_DST };
1234 return swsb;
1235 } else if ((x & 0x70) == 0x20) {
1236 return tgl_swsb_sbid(TGL_SBID_DST, x & 0xfu);
1237 } else if ((x & 0x70) == 0x30) {
1238 return tgl_swsb_sbid(TGL_SBID_SRC, x & 0xfu);
1239 } else if ((x & 0x70) == 0x40) {
1240 return tgl_swsb_sbid(TGL_SBID_SET, x & 0xfu);
1241 } else {
1242 const struct tgl_swsb swsb = { x & 0x7u,
1243 ((x & 0x78) == 0x10 ? TGL_PIPE_FLOAT :
1244 (x & 0x78) == 0x18 ? TGL_PIPE_INT :
1245 (x & 0x78) == 0x50 ? TGL_PIPE_LONG :
1246 (x & 0x78) == 0x8 ? TGL_PIPE_ALL :
1247 TGL_PIPE_NONE) };
1248 assert(devinfo->verx10 >= 125 || swsb.pipe == TGL_PIPE_NONE);
1249 return swsb;
1250 }
1251 }
1252
1253 enum tgl_sync_function {
1254 TGL_SYNC_NOP = 0x0,
1255 TGL_SYNC_ALLRD = 0x2,
1256 TGL_SYNC_ALLWR = 0x3,
1257 TGL_SYNC_BAR = 0xe,
1258 TGL_SYNC_HOST = 0xf
1259 };
1260
1261 /**
1262 * Message target: Shared Function ID for where to SEND a message.
1263 *
1264 * These are enumerated in the ISA reference under "send - Send Message".
1265 * In particular, see the following tables:
1266 * - G45 PRM, Volume 4, Table 14-15 "Message Descriptor Definition"
1267 * - Sandybridge PRM, Volume 4 Part 2, Table 8-16 "Extended Message Descriptor"
1268 * - Ivybridge PRM, Volume 1 Part 1, section 3.2.7 "GPE Function IDs"
1269 */
1270 enum brw_message_target {
1271 BRW_SFID_NULL = 0,
1272 BRW_SFID_MATH = 1, /* Only valid on Gfx4-5 */
1273 BRW_SFID_SAMPLER = 2,
1274 BRW_SFID_MESSAGE_GATEWAY = 3,
1275 BRW_SFID_DATAPORT_READ = 4,
1276 BRW_SFID_DATAPORT_WRITE = 5,
1277 BRW_SFID_URB = 6,
1278 BRW_SFID_THREAD_SPAWNER = 7,
1279 BRW_SFID_VME = 8,
1280
1281 GFX6_SFID_DATAPORT_SAMPLER_CACHE = 4,
1282 GFX6_SFID_DATAPORT_RENDER_CACHE = 5,
1283 GFX6_SFID_DATAPORT_CONSTANT_CACHE = 9,
1284
1285 GFX7_SFID_DATAPORT_DATA_CACHE = 10,
1286 GFX7_SFID_PIXEL_INTERPOLATOR = 11,
1287 HSW_SFID_DATAPORT_DATA_CACHE_1 = 12,
1288 HSW_SFID_CRE = 13,
1289
1290 GFX12_SFID_TGM = 13, /* Typed Global Memory */
1291 GFX12_SFID_SLM = 14, /* Shared Local Memory */
1292 GFX12_SFID_UGM = 15, /* Untyped Global Memory */
1293
1294 GEN_RT_SFID_BINDLESS_THREAD_DISPATCH = 7,
1295 GEN_RT_SFID_RAY_TRACE_ACCELERATOR = 8,
1296 };
1297
1298 #define GFX7_MESSAGE_TARGET_DP_DATA_CACHE 10
1299
1300 #define BRW_SAMPLER_RETURN_FORMAT_FLOAT32 0
1301 #define BRW_SAMPLER_RETURN_FORMAT_UINT32 2
1302 #define BRW_SAMPLER_RETURN_FORMAT_SINT32 3
1303
1304 #define BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE 0
1305 #define BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE 0
1306 #define BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS 0
1307 #define BRW_SAMPLER_MESSAGE_SIMD8_KILLPIX 1
1308 #define BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD 1
1309 #define BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_LOD 1
1310 #define BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_GRADIENTS 2
1311 #define BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_GRADIENTS 2
1312 #define BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_COMPARE 0
1313 #define BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_COMPARE 2
1314 #define BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_BIAS_COMPARE 0
1315 #define BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD_COMPARE 1
1316 #define BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_LOD_COMPARE 1
1317 #define BRW_SAMPLER_MESSAGE_SIMD4X2_RESINFO 2
1318 #define BRW_SAMPLER_MESSAGE_SIMD16_RESINFO 2
1319 #define BRW_SAMPLER_MESSAGE_SIMD4X2_LD 3
1320 #define BRW_SAMPLER_MESSAGE_SIMD8_LD 3
1321 #define BRW_SAMPLER_MESSAGE_SIMD16_LD 3
1322
1323 #define GFX5_SAMPLER_MESSAGE_SAMPLE 0
1324 #define GFX5_SAMPLER_MESSAGE_SAMPLE_BIAS 1
1325 #define GFX5_SAMPLER_MESSAGE_SAMPLE_LOD 2
1326 #define GFX5_SAMPLER_MESSAGE_SAMPLE_COMPARE 3
1327 #define GFX5_SAMPLER_MESSAGE_SAMPLE_DERIVS 4
1328 #define GFX5_SAMPLER_MESSAGE_SAMPLE_BIAS_COMPARE 5
1329 #define GFX5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE 6
1330 #define GFX5_SAMPLER_MESSAGE_SAMPLE_LD 7
1331 #define GFX7_SAMPLER_MESSAGE_SAMPLE_GATHER4 8
1332 #define GFX5_SAMPLER_MESSAGE_LOD 9
1333 #define GFX5_SAMPLER_MESSAGE_SAMPLE_RESINFO 10
1334 #define GFX6_SAMPLER_MESSAGE_SAMPLE_SAMPLEINFO 11
1335 #define GFX7_SAMPLER_MESSAGE_SAMPLE_GATHER4_C 16
1336 #define GFX7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO 17
1337 #define GFX7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_C 18
1338 #define HSW_SAMPLER_MESSAGE_SAMPLE_DERIV_COMPARE 20
1339 #define GFX9_SAMPLER_MESSAGE_SAMPLE_LZ 24
1340 #define GFX9_SAMPLER_MESSAGE_SAMPLE_C_LZ 25
1341 #define GFX9_SAMPLER_MESSAGE_SAMPLE_LD_LZ 26
1342 #define GFX9_SAMPLER_MESSAGE_SAMPLE_LD2DMS_W 28
1343 #define GFX7_SAMPLER_MESSAGE_SAMPLE_LD_MCS 29
1344 #define GFX7_SAMPLER_MESSAGE_SAMPLE_LD2DMS 30
1345 #define GFX7_SAMPLER_MESSAGE_SAMPLE_LD2DSS 31
1346
1347 /* for GFX5 only */
1348 #define BRW_SAMPLER_SIMD_MODE_SIMD4X2 0
1349 #define BRW_SAMPLER_SIMD_MODE_SIMD8 1
1350 #define BRW_SAMPLER_SIMD_MODE_SIMD16 2
1351 #define BRW_SAMPLER_SIMD_MODE_SIMD32_64 3
1352
1353 /* GFX9 changes SIMD mode 0 to mean SIMD8D, but lets us get the SIMD4x2
1354 * behavior by setting bit 22 of dword 2 in the message header. */
1355 #define GFX9_SAMPLER_SIMD_MODE_SIMD8D 0
1356 #define GFX9_SAMPLER_SIMD_MODE_EXTENSION_SIMD4X2 (1 << 22)
1357
1358 #define BRW_DATAPORT_OWORD_BLOCK_1_OWORDLOW 0
1359 #define BRW_DATAPORT_OWORD_BLOCK_1_OWORDHIGH 1
1360 #define BRW_DATAPORT_OWORD_BLOCK_2_OWORDS 2
1361 #define BRW_DATAPORT_OWORD_BLOCK_4_OWORDS 3
1362 #define BRW_DATAPORT_OWORD_BLOCK_8_OWORDS 4
1363 #define GFX12_DATAPORT_OWORD_BLOCK_16_OWORDS 5
1364 #define BRW_DATAPORT_OWORD_BLOCK_OWORDS(n) \
1365 ((n) == 1 ? BRW_DATAPORT_OWORD_BLOCK_1_OWORDLOW : \
1366 (n) == 2 ? BRW_DATAPORT_OWORD_BLOCK_2_OWORDS : \
1367 (n) == 4 ? BRW_DATAPORT_OWORD_BLOCK_4_OWORDS : \
1368 (n) == 8 ? BRW_DATAPORT_OWORD_BLOCK_8_OWORDS : \
1369 (n) == 16 ? GFX12_DATAPORT_OWORD_BLOCK_16_OWORDS : \
1370 (abort(), ~0))
1371 #define BRW_DATAPORT_OWORD_BLOCK_DWORDS(n) \
1372 ((n) == 4 ? BRW_DATAPORT_OWORD_BLOCK_1_OWORDLOW : \
1373 (n) == 8 ? BRW_DATAPORT_OWORD_BLOCK_2_OWORDS : \
1374 (n) == 16 ? BRW_DATAPORT_OWORD_BLOCK_4_OWORDS : \
1375 (n) == 32 ? BRW_DATAPORT_OWORD_BLOCK_8_OWORDS : \
1376 (abort(), ~0))
1377
1378 #define BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD 0
1379 #define BRW_DATAPORT_OWORD_DUAL_BLOCK_4OWORDS 2
1380
1381 #define BRW_DATAPORT_DWORD_SCATTERED_BLOCK_8DWORDS 2
1382 #define BRW_DATAPORT_DWORD_SCATTERED_BLOCK_16DWORDS 3
1383
1384 /* This one stays the same across generations. */
1385 #define BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ 0
1386 /* GFX4 */
1387 #define BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ 1
1388 #define BRW_DATAPORT_READ_MESSAGE_MEDIA_BLOCK_READ 2
1389 #define BRW_DATAPORT_READ_MESSAGE_DWORD_SCATTERED_READ 3
1390 /* G45, GFX5 */
1391 #define G45_DATAPORT_READ_MESSAGE_RENDER_UNORM_READ 1
1392 #define G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ 2
1393 #define G45_DATAPORT_READ_MESSAGE_AVC_LOOP_FILTER_READ 3
1394 #define G45_DATAPORT_READ_MESSAGE_MEDIA_BLOCK_READ 4
1395 #define G45_DATAPORT_READ_MESSAGE_DWORD_SCATTERED_READ 6
1396 /* GFX6 */
1397 #define GFX6_DATAPORT_READ_MESSAGE_RENDER_UNORM_READ 1
1398 #define GFX6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ 2
1399 #define GFX6_DATAPORT_READ_MESSAGE_MEDIA_BLOCK_READ 4
1400 #define GFX6_DATAPORT_READ_MESSAGE_OWORD_UNALIGN_BLOCK_READ 5
1401 #define GFX6_DATAPORT_READ_MESSAGE_DWORD_SCATTERED_READ 6
1402
1403 #define BRW_DATAPORT_READ_TARGET_DATA_CACHE 0
1404 #define BRW_DATAPORT_READ_TARGET_RENDER_CACHE 1
1405 #define BRW_DATAPORT_READ_TARGET_SAMPLER_CACHE 2
1406
1407 #define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE 0
1408 #define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE_REPLICATED 1
1409 #define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN01 2
1410 #define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN23 3
1411 #define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_SINGLE_SOURCE_SUBSPAN01 4
1412
1413 #define BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE 0
1414 #define BRW_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE 1
1415 #define BRW_DATAPORT_WRITE_MESSAGE_MEDIA_BLOCK_WRITE 2
1416 #define BRW_DATAPORT_WRITE_MESSAGE_DWORD_SCATTERED_WRITE 3
1417 #define BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE 4
1418 #define BRW_DATAPORT_WRITE_MESSAGE_STREAMED_VERTEX_BUFFER_WRITE 5
1419 #define BRW_DATAPORT_WRITE_MESSAGE_FLUSH_RENDER_CACHE 7
1420
1421 /* GFX6 */
1422 #define GFX6_DATAPORT_WRITE_MESSAGE_DWORD_ATOMIC_WRITE 7
1423 #define GFX6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE 8
1424 #define GFX6_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE 9
1425 #define GFX6_DATAPORT_WRITE_MESSAGE_MEDIA_BLOCK_WRITE 10
1426 #define GFX6_DATAPORT_WRITE_MESSAGE_DWORD_SCATTERED_WRITE 11
1427 #define GFX6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE 12
1428 #define GFX6_DATAPORT_WRITE_MESSAGE_STREAMED_VB_WRITE 13
1429 #define GFX6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_UNORM_WRITE 14
1430
1431 /* GFX7 */
1432 #define GFX7_DATAPORT_RC_MEDIA_BLOCK_READ 4
1433 #define GFX7_DATAPORT_RC_TYPED_SURFACE_READ 5
1434 #define GFX7_DATAPORT_RC_TYPED_ATOMIC_OP 6
1435 #define GFX7_DATAPORT_RC_MEMORY_FENCE 7
1436 #define GFX7_DATAPORT_RC_MEDIA_BLOCK_WRITE 10
1437 #define GFX7_DATAPORT_RC_RENDER_TARGET_WRITE 12
1438 #define GFX7_DATAPORT_RC_TYPED_SURFACE_WRITE 13
1439 #define GFX7_DATAPORT_DC_OWORD_BLOCK_READ 0
1440 #define GFX7_DATAPORT_DC_UNALIGNED_OWORD_BLOCK_READ 1
1441 #define GFX7_DATAPORT_DC_OWORD_DUAL_BLOCK_READ 2
1442 #define GFX7_DATAPORT_DC_DWORD_SCATTERED_READ 3
1443 #define GFX7_DATAPORT_DC_BYTE_SCATTERED_READ 4
1444 #define GFX7_DATAPORT_DC_UNTYPED_SURFACE_READ 5
1445 #define GFX7_DATAPORT_DC_UNTYPED_ATOMIC_OP 6
1446 #define GFX7_DATAPORT_DC_MEMORY_FENCE 7
1447 #define GFX7_DATAPORT_DC_OWORD_BLOCK_WRITE 8
1448 #define GFX7_DATAPORT_DC_OWORD_DUAL_BLOCK_WRITE 10
1449 #define GFX7_DATAPORT_DC_DWORD_SCATTERED_WRITE 11
1450 #define GFX7_DATAPORT_DC_BYTE_SCATTERED_WRITE 12
1451 #define GFX7_DATAPORT_DC_UNTYPED_SURFACE_WRITE 13
1452
1453 #define GFX7_DATAPORT_SCRATCH_READ ((1 << 18) | \
1454 (0 << 17))
1455 #define GFX7_DATAPORT_SCRATCH_WRITE ((1 << 18) | \
1456 (1 << 17))
1457 #define GFX7_DATAPORT_SCRATCH_NUM_REGS_SHIFT 12
1458
1459 #define GFX7_PIXEL_INTERPOLATOR_LOC_SHARED_OFFSET 0
1460 #define GFX7_PIXEL_INTERPOLATOR_LOC_SAMPLE 1
1461 #define GFX7_PIXEL_INTERPOLATOR_LOC_CENTROID 2
1462 #define GFX7_PIXEL_INTERPOLATOR_LOC_PER_SLOT_OFFSET 3
1463
1464 /* HSW */
1465 #define HSW_DATAPORT_DC_PORT0_OWORD_BLOCK_READ 0
1466 #define HSW_DATAPORT_DC_PORT0_UNALIGNED_OWORD_BLOCK_READ 1
1467 #define HSW_DATAPORT_DC_PORT0_OWORD_DUAL_BLOCK_READ 2
1468 #define HSW_DATAPORT_DC_PORT0_DWORD_SCATTERED_READ 3
1469 #define HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_READ 4
1470 #define HSW_DATAPORT_DC_PORT0_MEMORY_FENCE 7
1471 #define HSW_DATAPORT_DC_PORT0_OWORD_BLOCK_WRITE 8
1472 #define HSW_DATAPORT_DC_PORT0_OWORD_DUAL_BLOCK_WRITE 10
1473 #define HSW_DATAPORT_DC_PORT0_DWORD_SCATTERED_WRITE 11
1474 #define HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_WRITE 12
1475
1476 #define HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_READ 1
1477 #define HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP 2
1478 #define HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP_SIMD4X2 3
1479 #define HSW_DATAPORT_DC_PORT1_MEDIA_BLOCK_READ 4
1480 #define HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_READ 5
1481 #define HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP 6
1482 #define HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP_SIMD4X2 7
1483 #define HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_WRITE 9
1484 #define HSW_DATAPORT_DC_PORT1_MEDIA_BLOCK_WRITE 10
1485 #define HSW_DATAPORT_DC_PORT1_ATOMIC_COUNTER_OP 11
1486 #define HSW_DATAPORT_DC_PORT1_ATOMIC_COUNTER_OP_SIMD4X2 12
1487 #define HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_WRITE 13
1488 #define GFX9_DATAPORT_DC_PORT1_A64_SCATTERED_READ 0x10
1489 #define GFX8_DATAPORT_DC_PORT1_A64_UNTYPED_SURFACE_READ 0x11
1490 #define GFX8_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_OP 0x12
1491 #define GFX12_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_HALF_INT_OP 0x13
1492 #define GFX9_DATAPORT_DC_PORT1_A64_OWORD_BLOCK_READ 0x14
1493 #define GFX9_DATAPORT_DC_PORT1_A64_OWORD_BLOCK_WRITE 0x15
1494 #define GFX8_DATAPORT_DC_PORT1_A64_UNTYPED_SURFACE_WRITE 0x19
1495 #define GFX8_DATAPORT_DC_PORT1_A64_SCATTERED_WRITE 0x1a
1496 #define GFX9_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_FLOAT_OP 0x1b
1497 #define GFX9_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_FLOAT_OP 0x1d
1498 #define GFX12_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_HALF_FLOAT_OP 0x1e
1499
1500 /* GFX9 */
1501 #define GFX9_DATAPORT_RC_RENDER_TARGET_WRITE 12
1502 #define GFX9_DATAPORT_RC_RENDER_TARGET_READ 13
1503
1504 /* A64 scattered message subtype */
1505 #define GFX8_A64_SCATTERED_SUBTYPE_BYTE 0
1506 #define GFX8_A64_SCATTERED_SUBTYPE_DWORD 1
1507 #define GFX8_A64_SCATTERED_SUBTYPE_QWORD 2
1508 #define GFX8_A64_SCATTERED_SUBTYPE_HWORD 3
1509
1510 /* Dataport special binding table indices: */
1511 #define BRW_BTI_STATELESS 255
1512 #define GFX7_BTI_SLM 254
1513
1514 #define HSW_BTI_STATELESS_LOCALLY_COHERENT 255
1515 #define HSW_BTI_STATELESS_NON_COHERENT 253
1516 #define HSW_BTI_STATELESS_GLOBALLY_COHERENT 252
1517 #define HSW_BTI_STATELESS_LLC_COHERENT 251
1518 #define HSW_BTI_STATELESS_L3_UNCACHED 250
1519
1520 /* The hardware docs are a bit contradictory here. On Haswell, where they
1521 * first added cache ability control, there were 5 different cache modes (see
1522 * HSW_BTI_STATELESS_* above). On Broadwell, they reduced to two:
1523 *
1524 * - IA-Coherent (BTI=255): Coherent within Gen and coherent within the
1525 * entire IA cache memory hierarchy.
1526 *
1527 * - Non-Coherent (BTI=253): Coherent within Gen, same cache type.
1528 *
1529 * Information about stateless cache coherency can be found in the "A32
1530 * Stateless" section of the "3D Media GPGPU" volume of the PRM for each
1531 * hardware generation.
1532 *
1533 * Unfortunately, the docs for MDC_STATELESS appear to have been copied and
1534 * pasted from Haswell and give the Haswell definitions for the BTI values of
1535 * 255 and 253 including a warning about accessing 253 surfaces from multiple
1536 * threads. This seems to be a copy+paste error and the definitions from the
1537 * "A32 Stateless" section should be trusted instead.
1538 *
1539 * Note that because the DRM sets bit 4 of HDC_CHICKEN0 on BDW, CHV and at
1540 * least some pre-production steppings of SKL due to WaForceEnableNonCoherent,
1541 * HDC memory access may have been overridden by the kernel to be non-coherent
1542 * (matching the behavior of the same BTI on pre-Gfx8 hardware) and BTI 255
1543 * may actually be an alias for BTI 253.
1544 */
1545 #define GFX8_BTI_STATELESS_IA_COHERENT 255
1546 #define GFX8_BTI_STATELESS_NON_COHERENT 253
1547 #define GFX9_BTI_BINDLESS 252
1548
1549 /* Dataport atomic operations for Untyped Atomic Integer Operation message
1550 * (and others).
1551 */
1552 #define BRW_AOP_AND 1
1553 #define BRW_AOP_OR 2
1554 #define BRW_AOP_XOR 3
1555 #define BRW_AOP_MOV 4
1556 #define BRW_AOP_INC 5
1557 #define BRW_AOP_DEC 6
1558 #define BRW_AOP_ADD 7
1559 #define BRW_AOP_SUB 8
1560 #define BRW_AOP_REVSUB 9
1561 #define BRW_AOP_IMAX 10
1562 #define BRW_AOP_IMIN 11
1563 #define BRW_AOP_UMAX 12
1564 #define BRW_AOP_UMIN 13
1565 #define BRW_AOP_CMPWR 14
1566 #define BRW_AOP_PREDEC 15
1567
1568 /* Dataport atomic operations for Untyped Atomic Float Operation message. */
1569 #define BRW_AOP_FMAX 1
1570 #define BRW_AOP_FMIN 2
1571 #define BRW_AOP_FCMPWR 3
1572 #define BRW_AOP_FADD 4
1573
1574 #define BRW_MATH_FUNCTION_INV 1
1575 #define BRW_MATH_FUNCTION_LOG 2
1576 #define BRW_MATH_FUNCTION_EXP 3
1577 #define BRW_MATH_FUNCTION_SQRT 4
1578 #define BRW_MATH_FUNCTION_RSQ 5
1579 #define BRW_MATH_FUNCTION_SIN 6
1580 #define BRW_MATH_FUNCTION_COS 7
1581 #define BRW_MATH_FUNCTION_SINCOS 8 /* gfx4, gfx5 */
1582 #define BRW_MATH_FUNCTION_FDIV 9 /* gfx6+ */
1583 #define BRW_MATH_FUNCTION_POW 10
1584 #define BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER 11
1585 #define BRW_MATH_FUNCTION_INT_DIV_QUOTIENT 12
1586 #define BRW_MATH_FUNCTION_INT_DIV_REMAINDER 13
1587 #define GFX8_MATH_FUNCTION_INVM 14
1588 #define GFX8_MATH_FUNCTION_RSQRTM 15
1589
1590 #define BRW_MATH_INTEGER_UNSIGNED 0
1591 #define BRW_MATH_INTEGER_SIGNED 1
1592
1593 #define BRW_MATH_PRECISION_FULL 0
1594 #define BRW_MATH_PRECISION_PARTIAL 1
1595
1596 #define BRW_MATH_SATURATE_NONE 0
1597 #define BRW_MATH_SATURATE_SATURATE 1
1598
1599 #define BRW_MATH_DATA_VECTOR 0
1600 #define BRW_MATH_DATA_SCALAR 1
1601
1602 #define BRW_URB_OPCODE_WRITE_HWORD 0
1603 #define BRW_URB_OPCODE_WRITE_OWORD 1
1604 #define BRW_URB_OPCODE_READ_HWORD 2
1605 #define BRW_URB_OPCODE_READ_OWORD 3
1606 #define GFX7_URB_OPCODE_ATOMIC_MOV 4
1607 #define GFX7_URB_OPCODE_ATOMIC_INC 5
1608 #define GFX8_URB_OPCODE_ATOMIC_ADD 6
1609 #define GFX8_URB_OPCODE_SIMD8_WRITE 7
1610 #define GFX8_URB_OPCODE_SIMD8_READ 8
1611 #define GFX125_URB_OPCODE_FENCE 9
1612
1613 #define BRW_URB_SWIZZLE_NONE 0
1614 #define BRW_URB_SWIZZLE_INTERLEAVE 1
1615 #define BRW_URB_SWIZZLE_TRANSPOSE 2
1616
1617 #define BRW_SCRATCH_SPACE_SIZE_1K 0
1618 #define BRW_SCRATCH_SPACE_SIZE_2K 1
1619 #define BRW_SCRATCH_SPACE_SIZE_4K 2
1620 #define BRW_SCRATCH_SPACE_SIZE_8K 3
1621 #define BRW_SCRATCH_SPACE_SIZE_16K 4
1622 #define BRW_SCRATCH_SPACE_SIZE_32K 5
1623 #define BRW_SCRATCH_SPACE_SIZE_64K 6
1624 #define BRW_SCRATCH_SPACE_SIZE_128K 7
1625 #define BRW_SCRATCH_SPACE_SIZE_256K 8
1626 #define BRW_SCRATCH_SPACE_SIZE_512K 9
1627 #define BRW_SCRATCH_SPACE_SIZE_1M 10
1628 #define BRW_SCRATCH_SPACE_SIZE_2M 11
1629
1630 #define BRW_MESSAGE_GATEWAY_SFID_OPEN_GATEWAY 0
1631 #define BRW_MESSAGE_GATEWAY_SFID_CLOSE_GATEWAY 1
1632 #define BRW_MESSAGE_GATEWAY_SFID_FORWARD_MSG 2
1633 #define BRW_MESSAGE_GATEWAY_SFID_GET_TIMESTAMP 3
1634 #define BRW_MESSAGE_GATEWAY_SFID_BARRIER_MSG 4
1635 #define BRW_MESSAGE_GATEWAY_SFID_UPDATE_GATEWAY_STATE 5
1636 #define BRW_MESSAGE_GATEWAY_SFID_MMIO_READ_WRITE 6
1637
1638
1639 /* Gfx7 "GS URB Entry Allocation Size" is a U9-1 field, so the maximum gs_size
1640 * is 2^9, or 512. It's counted in multiples of 64 bytes.
1641 *
1642 * Identical for VS, DS, and HS.
1643 */
1644 #define GFX7_MAX_GS_URB_ENTRY_SIZE_BYTES (512*64)
1645 #define GFX7_MAX_DS_URB_ENTRY_SIZE_BYTES (512*64)
1646 #define GFX7_MAX_HS_URB_ENTRY_SIZE_BYTES (512*64)
1647 #define GFX7_MAX_VS_URB_ENTRY_SIZE_BYTES (512*64)
1648
1649 #define BRW_GS_EDGE_INDICATOR_0 (1 << 8)
1650 #define BRW_GS_EDGE_INDICATOR_1 (1 << 9)
1651
1652 /* Gfx6 "GS URB Entry Allocation Size" is defined as a number of 1024-bit
1653 * (128 bytes) URB rows and the maximum allowed value is 5 rows.
1654 */
1655 #define GFX6_MAX_GS_URB_ENTRY_SIZE_BYTES (5*128)
1656
1657 /* GS Thread Payload
1658 */
1659
1660 /* 3DSTATE_GS "Output Vertex Size" has an effective maximum of 62. It's
1661 * counted in multiples of 16 bytes.
1662 */
1663 #define GFX7_MAX_GS_OUTPUT_VERTEX_SIZE_BYTES (62*16)
1664
1665
1666 /* R0 */
1667 # define GFX7_GS_PAYLOAD_INSTANCE_ID_SHIFT 27
1668
1669 /* CR0.0[5:4] Floating-Point Rounding Modes
1670 * Skylake PRM, Volume 7 Part 1, "Control Register", page 756
1671 */
1672
1673 #define BRW_CR0_RND_MODE_MASK 0x30
1674 #define BRW_CR0_RND_MODE_SHIFT 4
1675
1676 enum PACKED brw_rnd_mode {
1677 BRW_RND_MODE_RTNE = 0, /* Round to Nearest or Even */
1678 BRW_RND_MODE_RU = 1, /* Round Up, toward +inf */
1679 BRW_RND_MODE_RD = 2, /* Round Down, toward -inf */
1680 BRW_RND_MODE_RTZ = 3, /* Round Toward Zero */
1681 BRW_RND_MODE_UNSPECIFIED, /* Unspecified rounding mode */
1682 };
1683
1684 #define BRW_CR0_FP64_DENORM_PRESERVE (1 << 6)
1685 #define BRW_CR0_FP32_DENORM_PRESERVE (1 << 7)
1686 #define BRW_CR0_FP16_DENORM_PRESERVE (1 << 10)
1687
1688 #define BRW_CR0_FP_MODE_MASK (BRW_CR0_FP64_DENORM_PRESERVE | \
1689 BRW_CR0_FP32_DENORM_PRESERVE | \
1690 BRW_CR0_FP16_DENORM_PRESERVE | \
1691 BRW_CR0_RND_MODE_MASK)
1692
1693 /* MDC_DS - Data Size Message Descriptor Control Field
1694 * Skylake PRM, Volume 2d, page 129
1695 *
1696 * Specifies the number of Bytes to be read or written per Dword used at
1697 * byte_scattered read/write and byte_scaled read/write messages.
1698 */
1699 #define GFX7_BYTE_SCATTERED_DATA_ELEMENT_BYTE 0
1700 #define GFX7_BYTE_SCATTERED_DATA_ELEMENT_WORD 1
1701 #define GFX7_BYTE_SCATTERED_DATA_ELEMENT_DWORD 2
1702
1703 #define GEN_RT_BTD_MESSAGE_SPAWN 1
1704
1705 #define GEN_RT_TRACE_RAY_INITAL 0
1706 #define GEN_RT_TRACE_RAY_INSTANCE 1
1707 #define GEN_RT_TRACE_RAY_COMMIT 2
1708 #define GEN_RT_TRACE_RAY_CONTINUE 3
1709
1710 #define GEN_RT_BTD_SHADER_TYPE_ANY_HIT 0
1711 #define GEN_RT_BTD_SHADER_TYPE_CLOSEST_HIT 1
1712 #define GEN_RT_BTD_SHADER_TYPE_MISS 2
1713 #define GEN_RT_BTD_SHADER_TYPE_INTERSECTION 3
1714
1715 /* Starting with Xe-HPG, the old dataport was massively reworked dataport.
1716 * The new thing, called Load/Store Cache or LSC, has a significantly improved
1717 * interface. Instead of bespoke messages for every case, there's basically
1718 * one or two messages with different bits to control things like address
1719 * size, how much data is read/written, etc. It's way nicer but also means we
1720 * get to rewrite all our dataport encoding/decoding code. This patch kicks
1721 * off the party with all of the new enums.
1722 */
1723 enum lsc_opcode {
1724 LSC_OP_LOAD = 0,
1725 LSC_OP_LOAD_CMASK = 2,
1726 LSC_OP_STORE = 4,
1727 LSC_OP_STORE_CMASK = 6,
1728 LSC_OP_ATOMIC_INC = 8,
1729 LSC_OP_ATOMIC_DEC = 9,
1730 LSC_OP_ATOMIC_LOAD = 10,
1731 LSC_OP_ATOMIC_STORE = 11,
1732 LSC_OP_ATOMIC_ADD = 12,
1733 LSC_OP_ATOMIC_SUB = 13,
1734 LSC_OP_ATOMIC_MIN = 14,
1735 LSC_OP_ATOMIC_MAX = 15,
1736 LSC_OP_ATOMIC_UMIN = 16,
1737 LSC_OP_ATOMIC_UMAX = 17,
1738 LSC_OP_ATOMIC_CMPXCHG = 18,
1739 LSC_OP_ATOMIC_FADD = 19,
1740 LSC_OP_ATOMIC_FSUB = 20,
1741 LSC_OP_ATOMIC_FMIN = 21,
1742 LSC_OP_ATOMIC_FMAX = 22,
1743 LSC_OP_ATOMIC_FCMPXCHG = 23,
1744 LSC_OP_ATOMIC_AND = 24,
1745 LSC_OP_ATOMIC_OR = 25,
1746 LSC_OP_ATOMIC_XOR = 26,
1747 LSC_OP_FENCE = 31
1748 };
1749
1750 /*
1751 * Specifies the size of the dataport address payload in registers.
1752 */
1753 enum PACKED lsc_addr_reg_size {
1754 LSC_ADDR_REG_SIZE_1 = 1,
1755 LSC_ADDR_REG_SIZE_2 = 2,
1756 LSC_ADDR_REG_SIZE_3 = 3,
1757 LSC_ADDR_REG_SIZE_4 = 4,
1758 LSC_ADDR_REG_SIZE_6 = 6,
1759 LSC_ADDR_REG_SIZE_8 = 8,
1760 };
1761
1762 /*
1763 * Specifies the size of the address payload item in a dataport message.
1764 */
1765 enum PACKED lsc_addr_size {
1766 LSC_ADDR_SIZE_A16 = 1, /* 16-bit address offset */
1767 LSC_ADDR_SIZE_A32 = 2, /* 32-bit address offset */
1768 LSC_ADDR_SIZE_A64 = 3, /* 64-bit address offset */
1769 };
1770
1771 /*
1772 * Specifies the type of the address payload item in a dataport message. The
1773 * address type specifies how the dataport message decodes the Extended
1774 * Descriptor for the surface attributes and address calculation.
1775 */
1776 enum PACKED lsc_addr_surface_type {
1777 LSC_ADDR_SURFTYPE_FLAT = 0, /* Flat */
1778 LSC_ADDR_SURFTYPE_BSS = 1, /* Bindless surface state */
1779 LSC_ADDR_SURFTYPE_SS = 2, /* Surface state */
1780 LSC_ADDR_SURFTYPE_BTI = 3, /* Binding table index */
1781 };
1782
1783 /*
1784 * Specifies the dataport message override to the default L1 and L3 memory
1785 * cache policies. Dataport L1 cache policies are uncached (UC), cached (C),
1786 * cache streaming (S) and invalidate-after-read (IAR). Dataport L3 cache
1787 * policies are uncached (UC) and cached (C).
1788 */
1789 enum lsc_cache_load {
1790 /* No override. Use the non-pipelined state or surface state cache settings
1791 * for L1 and L3.
1792 */
1793 LSC_CACHE_LOAD_L1STATE_L3MOCS = 0,
1794 /* Override to L1 uncached and L3 uncached */
1795 LSC_CACHE_LOAD_L1UC_L3UC = 1,
1796 /* Override to L1 uncached and L3 cached */
1797 LSC_CACHE_LOAD_L1UC_L3C = 2,
1798 /* Override to L1 cached and L3 uncached */
1799 LSC_CACHE_LOAD_L1C_L3UC = 3,
1800 /* Override to cache at both L1 and L3 */
1801 LSC_CACHE_LOAD_L1C_L3C = 4,
1802 /* Override to L1 streaming load and L3 uncached */
1803 LSC_CACHE_LOAD_L1S_L3UC = 5,
1804 /* Override to L1 streaming load and L3 cached */
1805 LSC_CACHE_LOAD_L1S_L3C = 6,
1806 /* For load messages, override to L1 invalidate-after-read, and L3 cached. */
1807 LSC_CACHE_LOAD_L1IAR_L3C = 7,
1808 };
1809
1810 /*
1811 * Specifies the dataport message override to the default L1 and L3 memory
1812 * cache policies. Dataport L1 cache policies are uncached (UC), write-through
1813 * (WT), write-back (WB) and streaming (S). Dataport L3 cache policies are
1814 * uncached (UC) and cached (WB).
1815 */
1816 enum PACKED lsc_cache_store {
1817 /* No override. Use the non-pipelined or surface state cache settings for L1
1818 * and L3.
1819 */
1820 LSC_CACHE_STORE_L1STATE_L3MOCS = 0,
1821 /* Override to L1 uncached and L3 uncached */
1822 LSC_CACHE_STORE_L1UC_L3UC = 1,
1823 /* Override to L1 uncached and L3 cached */
1824 LSC_CACHE_STORE_L1UC_L3WB = 2,
1825 /* Override to L1 write-through and L3 uncached */
1826 LSC_CACHE_STORE_L1WT_L3UC = 3,
1827 /* Override to L1 write-through and L3 cached */
1828 LSC_CACHE_STORE_L1WT_L3WB = 4,
1829 /* Override to L1 streaming and L3 uncached */
1830 LSC_CACHE_STORE_L1S_L3UC = 5,
1831 /* Override to L1 streaming and L3 cached */
1832 LSC_CACHE_STORE_L1S_L3WB = 6,
1833 /* Override to L1 write-back, and L3 cached */
1834 LSC_CACHE_STORE_L1WB_L3WB = 7,
1835
1836 };
1837
1838 /*
1839 * Specifies which components of the data payload 4-element vector (X,Y,Z,W) is
1840 * packed into the register payload.
1841 */
1842 enum PACKED lsc_cmask {
1843 LSC_CMASK_X = 0x1,
1844 LSC_CMASK_Y = 0x2,
1845 LSC_CMASK_XY = 0x3,
1846 LSC_CMASK_Z = 0x4,
1847 LSC_CMASK_XZ = 0x5,
1848 LSC_CMASK_YZ = 0x6,
1849 LSC_CMASK_XYZ = 0x7,
1850 LSC_CMASK_W = 0x8,
1851 LSC_CMASK_XW = 0x9,
1852 LSC_CMASK_YW = 0xa,
1853 LSC_CMASK_XYW = 0xb,
1854 LSC_CMASK_ZW = 0xc,
1855 LSC_CMASK_XZW = 0xd,
1856 LSC_CMASK_YZW = 0xe,
1857 LSC_CMASK_XYZW = 0xf,
1858 };
1859
1860 /*
1861 * Specifies the size of the data payload item in a dataport message.
1862 */
1863 enum PACKED lsc_data_size {
1864 /* 8-bit scalar data value in memory, packed into a 8-bit data value in
1865 * register.
1866 */
1867 LSC_DATA_SIZE_D8 = 0,
1868 /* 16-bit scalar data value in memory, packed into a 16-bit data value in
1869 * register.
1870 */
1871 LSC_DATA_SIZE_D16 = 1,
1872 /* 32-bit scalar data value in memory, packed into 32-bit data value in
1873 * register.
1874 */
1875 LSC_DATA_SIZE_D32 = 2,
1876 /* 64-bit scalar data value in memory, packed into 64-bit data value in
1877 * register.
1878 */
1879 LSC_DATA_SIZE_D64 = 3,
1880 /* 8-bit scalar data value in memory, packed into 32-bit unsigned data value
1881 * in register.
1882 */
1883 LSC_DATA_SIZE_D8U32 = 4,
1884 /* 16-bit scalar data value in memory, packed into 32-bit unsigned data
1885 * value in register.
1886 */
1887 LSC_DATA_SIZE_D16U32 = 5,
1888 /* 16-bit scalar BigFloat data value in memory, packed into 32-bit float
1889 * value in register.
1890 */
1891 LSC_DATA_SIZE_D16BF32 = 6,
1892 };
1893
1894 /*
1895 * Enum specifies the scope of the fence.
1896 */
1897 enum PACKED lsc_fence_scope {
1898 /* Wait until all previous memory transactions from this thread are observed
1899 * within the local thread-group.
1900 */
1901 LSC_FENCE_THREADGROUP = 0,
1902 /* Wait until all previous memory transactions from this thread are observed
1903 * within the local sub-slice.
1904 */
1905 LSC_FENCE_LOCAL = 1,
1906 /* Wait until all previous memory transactions from this thread are observed
1907 * in the local tile.
1908 */
1909 LSC_FENCE_TILE = 2,
1910 /* Wait until all previous memory transactions from this thread are observed
1911 * in the local GPU.
1912 */
1913 LSC_FENCE_GPU = 3,
1914 /* Wait until all previous memory transactions from this thread are observed
1915 * across all GPUs in the system.
1916 */
1917 LSC_FENCE_ALL_GPU = 4,
1918 /* Wait until all previous memory transactions from this thread are observed
1919 * at the "system" level.
1920 */
1921 LSC_FENCE_SYSTEM_RELEASE = 5,
1922 /* For GPUs that do not follow PCIe Write ordering for downstream writes
1923 * targeting device memory, a fence message with scope=System_Acquire will
1924 * commit to device memory all downstream and peer writes that have reached
1925 * the device.
1926 */
1927 LSC_FENCE_SYSTEM_ACQUIRE = 6,
1928 };
1929
1930 /*
1931 * Specifies the type of cache flush operation to perform after a fence is
1932 * complete.
1933 */
1934 enum PACKED lsc_flush_type {
1935 LSC_FLUSH_TYPE_NONE = 0,
1936 /*
1937 * For a R/W cache, evict dirty lines (M to I state) and invalidate clean
1938 * lines. For a RO cache, invalidate clean lines.
1939 */
1940 LSC_FLUSH_TYPE_EVICT = 1,
1941 /*
1942 * For both R/W and RO cache, invalidate clean lines in the cache.
1943 */
1944 LSC_FLUSH_TYPE_INVALIDATE = 2,
1945 /*
1946 * For a R/W cache, invalidate dirty lines (M to I state), without
1947 * write-back to next level. This opcode does nothing for a RO cache.
1948 */
1949 LSC_FLUSH_TYPE_DISCARD = 3,
1950 /*
1951 * For a R/W cache, write-back dirty lines to the next level, but kept in
1952 * the cache as "clean" (M to V state). This opcode does nothing for a RO
1953 * cache.
1954 */
1955 LSC_FLUSH_TYPE_CLEAN = 4,
1956 /*
1957 * Flush "RW" section of the L3 cache, but leave L1 and L2 caches untouched.
1958 */
1959 LSC_FLUSH_TYPE_L3ONLY = 5,
1960 };
1961
1962 enum PACKED lsc_backup_fence_routing {
1963 /* Normal routing: UGM fence is routed to UGM pipeline. */
1964 LSC_NORMAL_ROUTING,
1965 /* Route UGM fence to LSC unit. */
1966 LSC_ROUTE_TO_LSC,
1967 };
1968
1969 /*
1970 * Specifies the size of the vector in a dataport message.
1971 */
1972 enum PACKED lsc_vect_size {
1973 LSC_VECT_SIZE_V1 = 0, /* vector length 1 */
1974 LSC_VECT_SIZE_V2 = 1, /* vector length 2 */
1975 LSC_VECT_SIZE_V3 = 2, /* Vector length 3 */
1976 LSC_VECT_SIZE_V4 = 3, /* Vector length 4 */
1977 LSC_VECT_SIZE_V8 = 4, /* Vector length 8 */
1978 LSC_VECT_SIZE_V16 = 5, /* Vector length 16 */
1979 LSC_VECT_SIZE_V32 = 6, /* Vector length 32 */
1980 LSC_VECT_SIZE_V64 = 7, /* Vector length 64 */
1981 };
1982
1983 #define LSC_ONE_ADDR_REG 1
1984
1985 #endif /* BRW_EU_DEFINES_H */
1986