1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32 #ifndef BRW_EU_DEFINES_H
33 #define BRW_EU_DEFINES_H
34
35 #include <stdint.h>
36 #include <stdlib.h>
37 #include "util/macros.h"
38 #include "dev/intel_device_info.h"
39
40 /* The following hunk, up-to "Execution Unit" is used by both the
41 * intel/compiler and i965 codebase. */
42
43 #define INTEL_MASK(high, low) (((1u<<((high)-(low)+1))-1)<<(low))
44 /* Using the GNU statement expression extension */
45 #define SET_FIELD(value, field) \
46 ({ \
47 uint32_t fieldval = (uint32_t)(value) << field ## _SHIFT; \
48 assert((fieldval & ~ field ## _MASK) == 0); \
49 fieldval & field ## _MASK; \
50 })
51
52 #define SET_BITS(value, high, low) \
53 ({ \
54 const uint32_t fieldval = (uint32_t)(value) << (low); \
55 assert((fieldval & ~INTEL_MASK(high, low)) == 0); \
56 fieldval & INTEL_MASK(high, low); \
57 })
58
59 #define GET_BITS(data, high, low) ((data & INTEL_MASK((high), (low))) >> (low))
60 #define GET_FIELD(word, field) (((word) & field ## _MASK) >> field ## _SHIFT)
61
62 /* Bitfields for the URB_WRITE message, DW2 of message header: */
63 #define URB_WRITE_PRIM_END 0x1
64 #define URB_WRITE_PRIM_START 0x2
65 #define URB_WRITE_PRIM_TYPE_SHIFT 2
66
67 #define BRW_SPRITE_POINT_ENABLE 16
68
69 # define GFX7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT 0
70 # define GFX7_GS_CONTROL_DATA_FORMAT_GSCTL_SID 1
71
72 /* Execution Unit (EU) defines
73 */
74
75 #define BRW_ALIGN_1 0
76 #define BRW_ALIGN_16 1
77
78 #define BRW_ADDRESS_DIRECT 0
79 #define BRW_ADDRESS_REGISTER_INDIRECT_REGISTER 1
80
81 #define BRW_CHANNEL_X 0
82 #define BRW_CHANNEL_Y 1
83 #define BRW_CHANNEL_Z 2
84 #define BRW_CHANNEL_W 3
85
86 enum brw_compression {
87 BRW_COMPRESSION_NONE = 0,
88 BRW_COMPRESSION_2NDHALF = 1,
89 BRW_COMPRESSION_COMPRESSED = 2,
90 };
91
92 #define GFX6_COMPRESSION_1Q 0
93 #define GFX6_COMPRESSION_2Q 1
94 #define GFX6_COMPRESSION_3Q 2
95 #define GFX6_COMPRESSION_4Q 3
96 #define GFX6_COMPRESSION_1H 0
97 #define GFX6_COMPRESSION_2H 2
98
99 enum PACKED brw_conditional_mod {
100 BRW_CONDITIONAL_NONE = 0,
101 BRW_CONDITIONAL_Z = 1,
102 BRW_CONDITIONAL_NZ = 2,
103 BRW_CONDITIONAL_EQ = 1, /* Z */
104 BRW_CONDITIONAL_NEQ = 2, /* NZ */
105 BRW_CONDITIONAL_G = 3,
106 BRW_CONDITIONAL_GE = 4,
107 BRW_CONDITIONAL_L = 5,
108 BRW_CONDITIONAL_LE = 6,
109 BRW_CONDITIONAL_R = 7, /* Gen <= 5 */
110 BRW_CONDITIONAL_O = 8,
111 BRW_CONDITIONAL_U = 9,
112 };
113
114 #define BRW_DEBUG_NONE 0
115 #define BRW_DEBUG_BREAKPOINT 1
116
117 #define BRW_DEPENDENCY_NORMAL 0
118 #define BRW_DEPENDENCY_NOTCLEARED 1
119 #define BRW_DEPENDENCY_NOTCHECKED 2
120 #define BRW_DEPENDENCY_DISABLE 3
121
122 enum PACKED brw_execution_size {
123 BRW_EXECUTE_1 = 0,
124 BRW_EXECUTE_2 = 1,
125 BRW_EXECUTE_4 = 2,
126 BRW_EXECUTE_8 = 3,
127 BRW_EXECUTE_16 = 4,
128 BRW_EXECUTE_32 = 5,
129 };
130
131 enum PACKED brw_horizontal_stride {
132 BRW_HORIZONTAL_STRIDE_0 = 0,
133 BRW_HORIZONTAL_STRIDE_1 = 1,
134 BRW_HORIZONTAL_STRIDE_2 = 2,
135 BRW_HORIZONTAL_STRIDE_4 = 3,
136 };
137
138 enum PACKED gfx10_align1_3src_src_horizontal_stride {
139 BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_0 = 0,
140 BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_1 = 1,
141 BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_2 = 2,
142 BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_4 = 3,
143 };
144
145 enum PACKED gfx10_align1_3src_dst_horizontal_stride {
146 BRW_ALIGN1_3SRC_DST_HORIZONTAL_STRIDE_1 = 0,
147 BRW_ALIGN1_3SRC_DST_HORIZONTAL_STRIDE_2 = 1,
148 };
149
150 #define BRW_INSTRUCTION_NORMAL 0
151 #define BRW_INSTRUCTION_SATURATE 1
152
153 #define BRW_MASK_ENABLE 0
154 #define BRW_MASK_DISABLE 1
155
156 /** @{
157 *
158 * Gfx6 has replaced "mask enable/disable" with WECtrl, which is
159 * effectively the same but much simpler to think about. Now, there
160 * are two contributors ANDed together to whether channels are
161 * executed: The predication on the instruction, and the channel write
162 * enable.
163 */
164 /**
165 * This is the default value. It means that a channel's write enable is set
166 * if the per-channel IP is pointing at this instruction.
167 */
168 #define BRW_WE_NORMAL 0
169 /**
170 * This is used like BRW_MASK_DISABLE, and causes all channels to have
171 * their write enable set. Note that predication still contributes to
172 * whether the channel actually gets written.
173 */
174 #define BRW_WE_ALL 1
175 /** @} */
176
177 enum opcode {
178 /* These are the actual hardware instructions. */
179 BRW_OPCODE_ILLEGAL,
180 BRW_OPCODE_SYNC,
181 BRW_OPCODE_MOV,
182 BRW_OPCODE_SEL,
183 BRW_OPCODE_MOVI, /**< G45+ */
184 BRW_OPCODE_NOT,
185 BRW_OPCODE_AND,
186 BRW_OPCODE_OR,
187 BRW_OPCODE_XOR,
188 BRW_OPCODE_SHR,
189 BRW_OPCODE_SHL,
190 BRW_OPCODE_DIM, /**< Gfx7.5 only */
191 BRW_OPCODE_SMOV, /**< Gfx8+ */
192 BRW_OPCODE_ASR,
193 BRW_OPCODE_ROR, /**< Gfx11+ */
194 BRW_OPCODE_ROL, /**< Gfx11+ */
195 BRW_OPCODE_CMP,
196 BRW_OPCODE_CMPN,
197 BRW_OPCODE_CSEL, /**< Gfx8+ */
198 BRW_OPCODE_F32TO16, /**< Gfx7 only */
199 BRW_OPCODE_F16TO32, /**< Gfx7 only */
200 BRW_OPCODE_BFREV, /**< Gfx7+ */
201 BRW_OPCODE_BFE, /**< Gfx7+ */
202 BRW_OPCODE_BFI1, /**< Gfx7+ */
203 BRW_OPCODE_BFI2, /**< Gfx7+ */
204 BRW_OPCODE_JMPI,
205 BRW_OPCODE_BRD, /**< Gfx7+ */
206 BRW_OPCODE_IF,
207 BRW_OPCODE_IFF, /**< Pre-Gfx6 */
208 BRW_OPCODE_BRC, /**< Gfx7+ */
209 BRW_OPCODE_ELSE,
210 BRW_OPCODE_ENDIF,
211 BRW_OPCODE_DO, /**< Pre-Gfx6 */
212 BRW_OPCODE_CASE, /**< Gfx6 only */
213 BRW_OPCODE_WHILE,
214 BRW_OPCODE_BREAK,
215 BRW_OPCODE_CONTINUE,
216 BRW_OPCODE_HALT,
217 BRW_OPCODE_CALLA, /**< Gfx7.5+ */
218 BRW_OPCODE_MSAVE, /**< Pre-Gfx6 */
219 BRW_OPCODE_CALL, /**< Gfx6+ */
220 BRW_OPCODE_MREST, /**< Pre-Gfx6 */
221 BRW_OPCODE_RET, /**< Gfx6+ */
222 BRW_OPCODE_PUSH, /**< Pre-Gfx6 */
223 BRW_OPCODE_FORK, /**< Gfx6 only */
224 BRW_OPCODE_GOTO, /**< Gfx8+ */
225 BRW_OPCODE_POP, /**< Pre-Gfx6 */
226 BRW_OPCODE_WAIT,
227 BRW_OPCODE_SEND,
228 BRW_OPCODE_SENDC,
229 BRW_OPCODE_SENDS, /**< Gfx9+ */
230 BRW_OPCODE_SENDSC, /**< Gfx9+ */
231 BRW_OPCODE_MATH, /**< Gfx6+ */
232 BRW_OPCODE_ADD,
233 BRW_OPCODE_MUL,
234 BRW_OPCODE_AVG,
235 BRW_OPCODE_FRC,
236 BRW_OPCODE_RNDU,
237 BRW_OPCODE_RNDD,
238 BRW_OPCODE_RNDE,
239 BRW_OPCODE_RNDZ,
240 BRW_OPCODE_MAC,
241 BRW_OPCODE_MACH,
242 BRW_OPCODE_LZD,
243 BRW_OPCODE_FBH, /**< Gfx7+ */
244 BRW_OPCODE_FBL, /**< Gfx7+ */
245 BRW_OPCODE_CBIT, /**< Gfx7+ */
246 BRW_OPCODE_ADDC, /**< Gfx7+ */
247 BRW_OPCODE_SUBB, /**< Gfx7+ */
248 BRW_OPCODE_SAD2,
249 BRW_OPCODE_SADA2,
250 BRW_OPCODE_ADD3, /* Gen12+ only */
251 BRW_OPCODE_DP4,
252 BRW_OPCODE_DPH,
253 BRW_OPCODE_DP3,
254 BRW_OPCODE_DP2,
255 BRW_OPCODE_DP4A, /**< Gfx12+ */
256 BRW_OPCODE_LINE,
257 BRW_OPCODE_PLN, /**< G45+ */
258 BRW_OPCODE_MAD, /**< Gfx6+ */
259 BRW_OPCODE_LRP, /**< Gfx6+ */
260 BRW_OPCODE_MADM, /**< Gfx8+ */
261 BRW_OPCODE_NENOP, /**< G45 only */
262 BRW_OPCODE_NOP,
263
264 NUM_BRW_OPCODES,
265
266 /* These are compiler backend opcodes that get translated into other
267 * instructions.
268 */
269 FS_OPCODE_FB_WRITE = NUM_BRW_OPCODES,
270
271 /**
272 * Same as FS_OPCODE_FB_WRITE but expects its arguments separately as
273 * individual sources instead of as a single payload blob. The
274 * position/ordering of the arguments are defined by the enum
275 * fb_write_logical_srcs.
276 */
277 FS_OPCODE_FB_WRITE_LOGICAL,
278
279 FS_OPCODE_REP_FB_WRITE,
280
281 FS_OPCODE_FB_READ,
282 FS_OPCODE_FB_READ_LOGICAL,
283
284 SHADER_OPCODE_RCP,
285 SHADER_OPCODE_RSQ,
286 SHADER_OPCODE_SQRT,
287 SHADER_OPCODE_EXP2,
288 SHADER_OPCODE_LOG2,
289 SHADER_OPCODE_POW,
290 SHADER_OPCODE_INT_QUOTIENT,
291 SHADER_OPCODE_INT_REMAINDER,
292 SHADER_OPCODE_SIN,
293 SHADER_OPCODE_COS,
294
295 /**
296 * A generic "send" opcode. The first two sources are the message
297 * descriptor and extended message descriptor respectively. The third
298 * and optional fourth sources are the message payload
299 */
300 SHADER_OPCODE_SEND,
301
302 /**
303 * An "undefined" write which does nothing but indicates to liveness that
304 * we don't care about any values in the register which predate this
305 * instruction. Used to prevent partial writes from causing issues with
306 * live ranges.
307 */
308 SHADER_OPCODE_UNDEF,
309
310 /**
311 * Texture sampling opcodes.
312 *
313 * LOGICAL opcodes are eventually translated to the matching non-LOGICAL
314 * opcode but instead of taking a single payload blob they expect their
315 * arguments separately as individual sources. The position/ordering of the
316 * arguments are defined by the enum tex_logical_srcs.
317 */
318 SHADER_OPCODE_TEX,
319 SHADER_OPCODE_TEX_LOGICAL,
320 SHADER_OPCODE_TXD,
321 SHADER_OPCODE_TXD_LOGICAL,
322 SHADER_OPCODE_TXF,
323 SHADER_OPCODE_TXF_LOGICAL,
324 SHADER_OPCODE_TXF_LZ,
325 SHADER_OPCODE_TXL,
326 SHADER_OPCODE_TXL_LOGICAL,
327 SHADER_OPCODE_TXL_LZ,
328 SHADER_OPCODE_TXS,
329 SHADER_OPCODE_TXS_LOGICAL,
330 FS_OPCODE_TXB,
331 FS_OPCODE_TXB_LOGICAL,
332 SHADER_OPCODE_TXF_CMS,
333 SHADER_OPCODE_TXF_CMS_LOGICAL,
334 SHADER_OPCODE_TXF_CMS_W,
335 SHADER_OPCODE_TXF_CMS_W_LOGICAL,
336 SHADER_OPCODE_TXF_CMS_W_GFX12_LOGICAL,
337 SHADER_OPCODE_TXF_UMS,
338 SHADER_OPCODE_TXF_UMS_LOGICAL,
339 SHADER_OPCODE_TXF_MCS,
340 SHADER_OPCODE_TXF_MCS_LOGICAL,
341 SHADER_OPCODE_LOD,
342 SHADER_OPCODE_LOD_LOGICAL,
343 SHADER_OPCODE_TG4,
344 SHADER_OPCODE_TG4_LOGICAL,
345 SHADER_OPCODE_TG4_OFFSET,
346 SHADER_OPCODE_TG4_OFFSET_LOGICAL,
347 SHADER_OPCODE_SAMPLEINFO,
348 SHADER_OPCODE_SAMPLEINFO_LOGICAL,
349
350 SHADER_OPCODE_IMAGE_SIZE_LOGICAL,
351
352 /**
353 * Combines multiple sources of size 1 into a larger virtual GRF.
354 * For example, parameters for a send-from-GRF message. Or, updating
355 * channels of a size 4 VGRF used to store vec4s such as texturing results.
356 *
357 * This will be lowered into MOVs from each source to consecutive offsets
358 * of the destination VGRF.
359 *
360 * src[0] may be BAD_FILE. If so, the lowering pass skips emitting the MOV,
361 * but still reserves the first channel of the destination VGRF. This can be
362 * used to reserve space for, say, a message header set up by the generators.
363 */
364 SHADER_OPCODE_LOAD_PAYLOAD,
365
366 /**
367 * Packs a number of sources into a single value. Unlike LOAD_PAYLOAD, this
368 * acts intra-channel, obtaining the final value for each channel by
369 * combining the sources values for the same channel, the first source
370 * occupying the lowest bits and the last source occupying the highest
371 * bits.
372 */
373 FS_OPCODE_PACK,
374
375 /**
376 * Typed and untyped surface access opcodes.
377 *
378 * LOGICAL opcodes are eventually translated to the matching non-LOGICAL
379 * opcode but instead of taking a single payload blob they expect their
380 * arguments separately as individual sources:
381 *
382 * Source 0: [required] Surface coordinates.
383 * Source 1: [optional] Operation source.
384 * Source 2: [required] Surface index.
385 * Source 3: [required] Number of coordinate components (as UD immediate).
386 * Source 4: [required] Opcode-specific control immediate, same as source 2
387 * of the matching non-LOGICAL opcode.
388 */
389 VEC4_OPCODE_UNTYPED_ATOMIC,
390 SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL,
391 SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT_LOGICAL,
392 VEC4_OPCODE_UNTYPED_SURFACE_READ,
393 SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL,
394 VEC4_OPCODE_UNTYPED_SURFACE_WRITE,
395 SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL,
396
397 SHADER_OPCODE_OWORD_BLOCK_READ_LOGICAL,
398 SHADER_OPCODE_UNALIGNED_OWORD_BLOCK_READ_LOGICAL,
399 SHADER_OPCODE_OWORD_BLOCK_WRITE_LOGICAL,
400
401 /**
402 * Untyped A64 surface access opcodes.
403 *
404 * Source 0: 64-bit address
405 * Source 1: Operational source
406 * Source 2: [required] Opcode-specific control immediate, same as source 2
407 * of the matching non-LOGICAL opcode.
408 */
409 SHADER_OPCODE_A64_UNTYPED_READ_LOGICAL,
410 SHADER_OPCODE_A64_UNTYPED_WRITE_LOGICAL,
411 SHADER_OPCODE_A64_BYTE_SCATTERED_READ_LOGICAL,
412 SHADER_OPCODE_A64_BYTE_SCATTERED_WRITE_LOGICAL,
413 SHADER_OPCODE_A64_OWORD_BLOCK_READ_LOGICAL,
414 SHADER_OPCODE_A64_UNALIGNED_OWORD_BLOCK_READ_LOGICAL,
415 SHADER_OPCODE_A64_OWORD_BLOCK_WRITE_LOGICAL,
416 SHADER_OPCODE_A64_UNTYPED_ATOMIC_LOGICAL,
417 SHADER_OPCODE_A64_UNTYPED_ATOMIC_INT16_LOGICAL,
418 SHADER_OPCODE_A64_UNTYPED_ATOMIC_INT64_LOGICAL,
419 SHADER_OPCODE_A64_UNTYPED_ATOMIC_FLOAT16_LOGICAL,
420 SHADER_OPCODE_A64_UNTYPED_ATOMIC_FLOAT32_LOGICAL,
421 SHADER_OPCODE_A64_UNTYPED_ATOMIC_FLOAT64_LOGICAL,
422
423 SHADER_OPCODE_TYPED_ATOMIC_LOGICAL,
424 SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL,
425 SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL,
426
427 SHADER_OPCODE_RND_MODE,
428 SHADER_OPCODE_FLOAT_CONTROL_MODE,
429
430 /**
431 * Byte scattered write/read opcodes.
432 *
433 * LOGICAL opcodes are eventually translated to the matching non-LOGICAL
434 * opcode, but instead of taking a single payload blog they expect their
435 * arguments separately as individual sources, like untyped write/read.
436 */
437 SHADER_OPCODE_BYTE_SCATTERED_READ_LOGICAL,
438 SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL,
439 SHADER_OPCODE_DWORD_SCATTERED_READ_LOGICAL,
440 SHADER_OPCODE_DWORD_SCATTERED_WRITE_LOGICAL,
441
442 /**
443 * Memory fence messages.
444 *
445 * Source 0: Must be register g0, used as header.
446 * Source 1: Immediate bool to indicate whether control is returned to the
447 * thread only after the fence has been honored.
448 * Source 2: Immediate byte indicating which memory to fence. Zero means
449 * global memory; GFX7_BTI_SLM means SLM (for Gfx11+ only).
450 *
451 * Vec4 backend only uses Source 0.
452 */
453 SHADER_OPCODE_MEMORY_FENCE,
454
455 /**
456 * Scheduling-only fence.
457 *
458 * Sources can be used to force a stall until the registers in those are
459 * available. This might generate MOVs or SYNC_NOPs (Gfx12+).
460 */
461 FS_OPCODE_SCHEDULING_FENCE,
462
463 SHADER_OPCODE_GFX4_SCRATCH_READ,
464 SHADER_OPCODE_GFX4_SCRATCH_WRITE,
465 SHADER_OPCODE_GFX7_SCRATCH_READ,
466
467 SHADER_OPCODE_SCRATCH_HEADER,
468
469 /**
470 * Gfx8+ SIMD8 URB messages.
471 */
472 SHADER_OPCODE_URB_READ_LOGICAL,
473 SHADER_OPCODE_URB_WRITE_LOGICAL,
474
475 /**
476 * Return the index of the first enabled live channel and assign it to
477 * to the first component of the destination. Frequently used as input
478 * for the BROADCAST pseudo-opcode.
479 */
480 SHADER_OPCODE_FIND_LIVE_CHANNEL,
481
482 /**
483 * Return the index of the last enabled live channel and assign it to
484 * the first component of the destination.
485 */
486 SHADER_OPCODE_FIND_LAST_LIVE_CHANNEL,
487
488 /**
489 * Return the current execution mask in the specified flag subregister.
490 * Can be CSE'ed more easily than a plain MOV from the ce0 ARF register.
491 */
492 FS_OPCODE_LOAD_LIVE_CHANNELS,
493
494 /**
495 * Pick the channel from its first source register given by the index
496 * specified as second source. Useful for variable indexing of surfaces.
497 *
498 * Note that because the result of this instruction is by definition
499 * uniform and it can always be splatted to multiple channels using a
500 * scalar regioning mode, only the first channel of the destination region
501 * is guaranteed to be updated, which implies that BROADCAST instructions
502 * should usually be marked force_writemask_all.
503 */
504 SHADER_OPCODE_BROADCAST,
505
506 /* Pick the channel from its first source register given by the index
507 * specified as second source.
508 *
509 * This is similar to the BROADCAST instruction except that it takes a
510 * dynamic index and potentially puts a different value in each output
511 * channel.
512 */
513 SHADER_OPCODE_SHUFFLE,
514
515 /* Select between src0 and src1 based on channel enables.
516 *
517 * This instruction copies src0 into the enabled channels of the
518 * destination and copies src1 into the disabled channels.
519 */
520 SHADER_OPCODE_SEL_EXEC,
521
522 /* This turns into an align16 mov from src0 to dst with a swizzle
523 * provided as an immediate in src1.
524 */
525 SHADER_OPCODE_QUAD_SWIZZLE,
526
527 /* Take every Nth element in src0 and broadcast it to the group of N
528 * channels in which it lives in the destination. The offset within the
529 * cluster is given by src1 and the cluster size is given by src2.
530 */
531 SHADER_OPCODE_CLUSTER_BROADCAST,
532
533 SHADER_OPCODE_GET_BUFFER_SIZE,
534
535 SHADER_OPCODE_INTERLOCK,
536
537 /** Target for a HALT
538 *
539 * All HALT instructions in a shader must target the same jump point and
540 * that point is denoted by a HALT_TARGET instruction.
541 */
542 SHADER_OPCODE_HALT_TARGET,
543
544 VEC4_OPCODE_MOV_BYTES,
545 VEC4_OPCODE_PACK_BYTES,
546 VEC4_OPCODE_UNPACK_UNIFORM,
547 VEC4_OPCODE_DOUBLE_TO_F32,
548 VEC4_OPCODE_DOUBLE_TO_D32,
549 VEC4_OPCODE_DOUBLE_TO_U32,
550 VEC4_OPCODE_TO_DOUBLE,
551 VEC4_OPCODE_PICK_LOW_32BIT,
552 VEC4_OPCODE_PICK_HIGH_32BIT,
553 VEC4_OPCODE_SET_LOW_32BIT,
554 VEC4_OPCODE_SET_HIGH_32BIT,
555 VEC4_OPCODE_MOV_FOR_SCRATCH,
556 VEC4_OPCODE_ZERO_OOB_PUSH_REGS,
557
558 FS_OPCODE_DDX_COARSE,
559 FS_OPCODE_DDX_FINE,
560 /**
561 * Compute dFdy(), dFdyCoarse(), or dFdyFine().
562 */
563 FS_OPCODE_DDY_COARSE,
564 FS_OPCODE_DDY_FINE,
565 FS_OPCODE_LINTERP,
566 FS_OPCODE_PIXEL_X,
567 FS_OPCODE_PIXEL_Y,
568 FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
569 FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GFX7,
570 FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GFX4,
571 FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_LOGICAL,
572 FS_OPCODE_SET_SAMPLE_ID,
573 FS_OPCODE_PACK_HALF_2x16_SPLIT,
574 FS_OPCODE_INTERPOLATE_AT_SAMPLE,
575 FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET,
576 FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET,
577
578 VEC4_VS_OPCODE_URB_WRITE,
579 VS_OPCODE_PULL_CONSTANT_LOAD,
580 VS_OPCODE_PULL_CONSTANT_LOAD_GFX7,
581
582 VS_OPCODE_UNPACK_FLAGS_SIMD4X2,
583
584 /**
585 * Write geometry shader output data to the URB.
586 *
587 * Unlike VEC4_VS_OPCODE_URB_WRITE, this opcode doesn't do an implied move from
588 * R0 to the first MRF. This allows the geometry shader to override the
589 * "Slot {0,1} Offset" fields in the message header.
590 */
591 VEC4_GS_OPCODE_URB_WRITE,
592
593 /**
594 * Write geometry shader output data to the URB and request a new URB
595 * handle (gfx6).
596 *
597 * This opcode doesn't do an implied move from R0 to the first MRF.
598 */
599 VEC4_GS_OPCODE_URB_WRITE_ALLOCATE,
600
601 /**
602 * Terminate the geometry shader thread by doing an empty URB write.
603 *
604 * This opcode doesn't do an implied move from R0 to the first MRF. This
605 * allows the geometry shader to override the "GS Number of Output Vertices
606 * for Slot {0,1}" fields in the message header.
607 */
608 GS_OPCODE_THREAD_END,
609
610 /**
611 * Set the "Slot {0,1} Offset" fields of a URB_WRITE message header.
612 *
613 * - dst is the MRF containing the message header.
614 *
615 * - src0.x indicates which portion of the URB should be written to (e.g. a
616 * vertex number)
617 *
618 * - src1 is an immediate multiplier which will be applied to src0
619 * (e.g. the size of a single vertex in the URB).
620 *
621 * Note: the hardware will apply this offset *in addition to* the offset in
622 * vec4_instruction::offset.
623 */
624 GS_OPCODE_SET_WRITE_OFFSET,
625
626 /**
627 * Set the "GS Number of Output Vertices for Slot {0,1}" fields of a
628 * URB_WRITE message header.
629 *
630 * - dst is the MRF containing the message header.
631 *
632 * - src0.x is the vertex count. The upper 16 bits will be ignored.
633 */
634 GS_OPCODE_SET_VERTEX_COUNT,
635
636 /**
637 * Set DWORD 2 of dst to the value in src.
638 */
639 GS_OPCODE_SET_DWORD_2,
640
641 /**
642 * Prepare the dst register for storage in the "Channel Mask" fields of a
643 * URB_WRITE message header.
644 *
645 * DWORD 4 of dst is shifted left by 4 bits, so that later,
646 * GS_OPCODE_SET_CHANNEL_MASKS can OR DWORDs 0 and 4 together to form the
647 * final channel mask.
648 *
649 * Note: since GS_OPCODE_SET_CHANNEL_MASKS ORs DWORDs 0 and 4 together to
650 * form the final channel mask, DWORDs 0 and 4 of the dst register must not
651 * have any extraneous bits set prior to execution of this opcode (that is,
652 * they should be in the range 0x0 to 0xf).
653 */
654 GS_OPCODE_PREPARE_CHANNEL_MASKS,
655
656 /**
657 * Set the "Channel Mask" fields of a URB_WRITE message header.
658 *
659 * - dst is the MRF containing the message header.
660 *
661 * - src.x is the channel mask, as prepared by
662 * GS_OPCODE_PREPARE_CHANNEL_MASKS. DWORDs 0 and 4 are OR'ed together to
663 * form the final channel mask.
664 */
665 GS_OPCODE_SET_CHANNEL_MASKS,
666
667 /**
668 * Get the "Instance ID" fields from the payload.
669 *
670 * - dst is the GRF for gl_InvocationID.
671 */
672 GS_OPCODE_GET_INSTANCE_ID,
673
674 /**
675 * Send a FF_SYNC message to allocate initial URB handles (gfx6).
676 *
677 * - dst will be used as the writeback register for the FF_SYNC operation.
678 *
679 * - src0 is the number of primitives written.
680 *
681 * - src1 is the value to hold in M0.0: number of SO vertices to write
682 * and number of SO primitives needed. Its value will be overwritten
683 * with the SVBI values if transform feedback is enabled.
684 *
685 * Note: This opcode uses an implicit MRF register for the ff_sync message
686 * header, so the caller is expected to set inst->base_mrf and initialize
687 * that MRF register to r0. This opcode will also write to this MRF register
688 * to include the allocated URB handle so it can then be reused directly as
689 * the header in the URB write operation we are allocating the handle for.
690 */
691 GS_OPCODE_FF_SYNC,
692
693 /**
694 * Move r0.1 (which holds PrimitiveID information in gfx6) to a separate
695 * register.
696 *
697 * - dst is the GRF where PrimitiveID information will be moved.
698 */
699 GS_OPCODE_SET_PRIMITIVE_ID,
700
701 /**
702 * Write transform feedback data to the SVB by sending a SVB WRITE message.
703 * Used in gfx6.
704 *
705 * - dst is the MRF register containing the message header.
706 *
707 * - src0 is the register where the vertex data is going to be copied from.
708 *
709 * - src1 is the destination register when write commit occurs.
710 */
711 GS_OPCODE_SVB_WRITE,
712
713 /**
714 * Set destination index in the SVB write message payload (M0.5). Used
715 * in gfx6 for transform feedback.
716 *
717 * - dst is the header to save the destination indices for SVB WRITE.
718 * - src is the register that holds the destination indices value.
719 */
720 GS_OPCODE_SVB_SET_DST_INDEX,
721
722 /**
723 * Prepare Mx.0 subregister for being used in the FF_SYNC message header.
724 * Used in gfx6 for transform feedback.
725 *
726 * - dst will hold the register with the final Mx.0 value.
727 *
728 * - src0 has the number of vertices emitted in SO (NumSOVertsToWrite)
729 *
730 * - src1 has the number of needed primitives for SO (NumSOPrimsNeeded)
731 *
732 * - src2 is the value to hold in M0: number of SO vertices to write
733 * and number of SO primitives needed.
734 */
735 GS_OPCODE_FF_SYNC_SET_PRIMITIVES,
736
737 /**
738 * Terminate the compute shader.
739 */
740 CS_OPCODE_CS_TERMINATE,
741
742 /**
743 * GLSL barrier()
744 */
745 SHADER_OPCODE_BARRIER,
746
747 /**
748 * Calculate the high 32-bits of a 32x32 multiply.
749 */
750 SHADER_OPCODE_MULH,
751
752 /** Signed subtraction with saturation. */
753 SHADER_OPCODE_ISUB_SAT,
754
755 /** Unsigned subtraction with saturation. */
756 SHADER_OPCODE_USUB_SAT,
757
758 /**
759 * A MOV that uses VxH indirect addressing.
760 *
761 * Source 0: A register to start from (HW_REG).
762 * Source 1: An indirect offset (in bytes, UD GRF).
763 * Source 2: The length of the region that could be accessed (in bytes,
764 * UD immediate).
765 */
766 SHADER_OPCODE_MOV_INDIRECT,
767
768 /** Fills out a relocatable immediate */
769 SHADER_OPCODE_MOV_RELOC_IMM,
770
771 VEC4_OPCODE_URB_READ,
772 TCS_OPCODE_GET_INSTANCE_ID,
773 VEC4_TCS_OPCODE_URB_WRITE,
774 VEC4_TCS_OPCODE_SET_INPUT_URB_OFFSETS,
775 VEC4_TCS_OPCODE_SET_OUTPUT_URB_OFFSETS,
776 TCS_OPCODE_GET_PRIMITIVE_ID,
777 TCS_OPCODE_CREATE_BARRIER_HEADER,
778 TCS_OPCODE_SRC0_010_IS_ZERO,
779 TCS_OPCODE_RELEASE_INPUT,
780 TCS_OPCODE_THREAD_END,
781
782 TES_OPCODE_GET_PRIMITIVE_ID,
783 TES_OPCODE_CREATE_INPUT_READ_HEADER,
784 TES_OPCODE_ADD_INDIRECT_URB_OFFSET,
785
786 SHADER_OPCODE_BTD_SPAWN_LOGICAL,
787 SHADER_OPCODE_BTD_RETIRE_LOGICAL,
788
789 SHADER_OPCODE_READ_SR_REG,
790
791 RT_OPCODE_TRACE_RAY_LOGICAL,
792 };
793
794 enum brw_urb_write_flags {
795 BRW_URB_WRITE_NO_FLAGS = 0,
796
797 /**
798 * Causes a new URB entry to be allocated, and its address stored in the
799 * destination register (gen < 7).
800 */
801 BRW_URB_WRITE_ALLOCATE = 0x1,
802
803 /**
804 * Causes the current URB entry to be deallocated (gen < 7).
805 */
806 BRW_URB_WRITE_UNUSED = 0x2,
807
808 /**
809 * Causes the thread to terminate.
810 */
811 BRW_URB_WRITE_EOT = 0x4,
812
813 /**
814 * Indicates that the given URB entry is complete, and may be sent further
815 * down the 3D pipeline (gen < 7).
816 */
817 BRW_URB_WRITE_COMPLETE = 0x8,
818
819 /**
820 * Indicates that an additional offset (which may be different for the two
821 * vec4 slots) is stored in the message header (gen == 7).
822 */
823 BRW_URB_WRITE_PER_SLOT_OFFSET = 0x10,
824
825 /**
826 * Indicates that the channel masks in the URB_WRITE message header should
827 * not be overridden to 0xff (gen == 7).
828 */
829 BRW_URB_WRITE_USE_CHANNEL_MASKS = 0x20,
830
831 /**
832 * Indicates that the data should be sent to the URB using the
833 * URB_WRITE_OWORD message rather than URB_WRITE_HWORD (gen == 7). This
834 * causes offsets to be interpreted as multiples of an OWORD instead of an
835 * HWORD, and only allows one OWORD to be written.
836 */
837 BRW_URB_WRITE_OWORD = 0x40,
838
839 /**
840 * Convenient combination of flags: end the thread while simultaneously
841 * marking the given URB entry as complete.
842 */
843 BRW_URB_WRITE_EOT_COMPLETE = BRW_URB_WRITE_EOT | BRW_URB_WRITE_COMPLETE,
844
845 /**
846 * Convenient combination of flags: mark the given URB entry as complete
847 * and simultaneously allocate a new one.
848 */
849 BRW_URB_WRITE_ALLOCATE_COMPLETE =
850 BRW_URB_WRITE_ALLOCATE | BRW_URB_WRITE_COMPLETE,
851 };
852
853 enum fb_write_logical_srcs {
854 FB_WRITE_LOGICAL_SRC_COLOR0, /* REQUIRED */
855 FB_WRITE_LOGICAL_SRC_COLOR1, /* for dual source blend messages */
856 FB_WRITE_LOGICAL_SRC_SRC0_ALPHA,
857 FB_WRITE_LOGICAL_SRC_SRC_DEPTH, /* gl_FragDepth */
858 FB_WRITE_LOGICAL_SRC_DST_DEPTH, /* GFX4-5: passthrough from thread */
859 FB_WRITE_LOGICAL_SRC_SRC_STENCIL, /* gl_FragStencilRefARB */
860 FB_WRITE_LOGICAL_SRC_OMASK, /* Sample Mask (gl_SampleMask) */
861 FB_WRITE_LOGICAL_SRC_COMPONENTS, /* REQUIRED */
862 FB_WRITE_LOGICAL_NUM_SRCS
863 };
864
865 enum tex_logical_srcs {
866 /** Texture coordinates */
867 TEX_LOGICAL_SRC_COORDINATE,
868 /** Shadow comparator */
869 TEX_LOGICAL_SRC_SHADOW_C,
870 /** dPdx if the operation takes explicit derivatives, otherwise LOD value */
871 TEX_LOGICAL_SRC_LOD,
872 /** dPdy if the operation takes explicit derivatives */
873 TEX_LOGICAL_SRC_LOD2,
874 /** Min LOD */
875 TEX_LOGICAL_SRC_MIN_LOD,
876 /** Sample index */
877 TEX_LOGICAL_SRC_SAMPLE_INDEX,
878 /** MCS data */
879 TEX_LOGICAL_SRC_MCS,
880 /** REQUIRED: Texture surface index */
881 TEX_LOGICAL_SRC_SURFACE,
882 /** Texture sampler index */
883 TEX_LOGICAL_SRC_SAMPLER,
884 /** Texture surface bindless handle */
885 TEX_LOGICAL_SRC_SURFACE_HANDLE,
886 /** Texture sampler bindless handle */
887 TEX_LOGICAL_SRC_SAMPLER_HANDLE,
888 /** Texel offset for gathers */
889 TEX_LOGICAL_SRC_TG4_OFFSET,
890 /** REQUIRED: Number of coordinate components (as UD immediate) */
891 TEX_LOGICAL_SRC_COORD_COMPONENTS,
892 /** REQUIRED: Number of derivative components (as UD immediate) */
893 TEX_LOGICAL_SRC_GRAD_COMPONENTS,
894
895 TEX_LOGICAL_NUM_SRCS,
896 };
897
898 enum surface_logical_srcs {
899 /** Surface binding table index */
900 SURFACE_LOGICAL_SRC_SURFACE,
901 /** Surface bindless handle */
902 SURFACE_LOGICAL_SRC_SURFACE_HANDLE,
903 /** Surface address; could be multi-dimensional for typed opcodes */
904 SURFACE_LOGICAL_SRC_ADDRESS,
905 /** Data to be written or used in an atomic op */
906 SURFACE_LOGICAL_SRC_DATA,
907 /** Surface number of dimensions. Affects the size of ADDRESS */
908 SURFACE_LOGICAL_SRC_IMM_DIMS,
909 /** Per-opcode immediate argument. For atomics, this is the atomic opcode */
910 SURFACE_LOGICAL_SRC_IMM_ARG,
911 /**
912 * Some instructions with side-effects should not be predicated on
913 * sample mask, e.g. lowered stores to scratch.
914 */
915 SURFACE_LOGICAL_SRC_ALLOW_SAMPLE_MASK,
916
917 SURFACE_LOGICAL_NUM_SRCS
918 };
919
920 enum a64_logical_srcs {
921 /** Address the A64 message operates on */
922 A64_LOGICAL_ADDRESS,
923 /** Source for the operation (unused of LOAD ops) */
924 A64_LOGICAL_SRC,
925 /** Per-opcode immediate argument. Number of dwords, bit size, or atomic op. */
926 A64_LOGICAL_ARG,
927 /**
928 * Some instructions do want to run on helper lanes (like ray queries).
929 */
930 A64_LOGICAL_ENABLE_HELPERS,
931
932 A64_LOGICAL_NUM_SRCS
933 };
934
935 enum rt_logical_srcs {
936 /** Address of the globals */
937 RT_LOGICAL_SRC_GLOBALS,
938 /** Level at which the tracing should start */
939 RT_LOGICAL_SRC_BVH_LEVEL,
940 /** Type of tracing operation */
941 RT_LOGICAL_SRC_TRACE_RAY_CONTROL,
942 /** Synchronous tracing (ray query) */
943 RT_LOGICAL_SRC_SYNCHRONOUS,
944
945 RT_LOGICAL_NUM_SRCS
946 };
947
948 enum urb_logical_srcs {
949 URB_LOGICAL_SRC_HANDLE,
950 URB_LOGICAL_SRC_PER_SLOT_OFFSETS,
951 URB_LOGICAL_SRC_CHANNEL_MASK,
952 /** Data to be written. BAD_FILE for reads. */
953 URB_LOGICAL_SRC_DATA,
954
955 URB_LOGICAL_NUM_SRCS
956 };
957
958
959 #ifdef __cplusplus
960 /**
961 * Allow brw_urb_write_flags enums to be ORed together.
962 */
963 inline brw_urb_write_flags
964 operator|(brw_urb_write_flags x, brw_urb_write_flags y)
965 {
966 return static_cast<brw_urb_write_flags>(static_cast<int>(x) |
967 static_cast<int>(y));
968 }
969 #endif
970
971 enum PACKED brw_predicate {
972 BRW_PREDICATE_NONE = 0,
973 BRW_PREDICATE_NORMAL = 1,
974 BRW_PREDICATE_ALIGN1_ANYV = 2,
975 BRW_PREDICATE_ALIGN1_ALLV = 3,
976 BRW_PREDICATE_ALIGN1_ANY2H = 4,
977 BRW_PREDICATE_ALIGN1_ALL2H = 5,
978 BRW_PREDICATE_ALIGN1_ANY4H = 6,
979 BRW_PREDICATE_ALIGN1_ALL4H = 7,
980 BRW_PREDICATE_ALIGN1_ANY8H = 8,
981 BRW_PREDICATE_ALIGN1_ALL8H = 9,
982 BRW_PREDICATE_ALIGN1_ANY16H = 10,
983 BRW_PREDICATE_ALIGN1_ALL16H = 11,
984 BRW_PREDICATE_ALIGN1_ANY32H = 12,
985 BRW_PREDICATE_ALIGN1_ALL32H = 13,
986 BRW_PREDICATE_ALIGN16_REPLICATE_X = 2,
987 BRW_PREDICATE_ALIGN16_REPLICATE_Y = 3,
988 BRW_PREDICATE_ALIGN16_REPLICATE_Z = 4,
989 BRW_PREDICATE_ALIGN16_REPLICATE_W = 5,
990 BRW_PREDICATE_ALIGN16_ANY4H = 6,
991 BRW_PREDICATE_ALIGN16_ALL4H = 7,
992 };
993
994 enum PACKED brw_reg_file {
995 BRW_ARCHITECTURE_REGISTER_FILE = 0,
996 BRW_GENERAL_REGISTER_FILE = 1,
997 BRW_MESSAGE_REGISTER_FILE = 2,
998 BRW_IMMEDIATE_VALUE = 3,
999
1000 ARF = BRW_ARCHITECTURE_REGISTER_FILE,
1001 FIXED_GRF = BRW_GENERAL_REGISTER_FILE,
1002 MRF = BRW_MESSAGE_REGISTER_FILE,
1003 IMM = BRW_IMMEDIATE_VALUE,
1004
1005 /* These are not hardware values */
1006 VGRF,
1007 ATTR,
1008 UNIFORM, /* prog_data->params[reg] */
1009 BAD_FILE,
1010 };
1011
1012 enum PACKED gfx10_align1_3src_reg_file {
1013 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE = 0,
1014 BRW_ALIGN1_3SRC_IMMEDIATE_VALUE = 1, /* src0, src2 */
1015 BRW_ALIGN1_3SRC_ACCUMULATOR = 1, /* dest, src1 */
1016 };
1017
1018 /* CNL adds Align1 support for 3-src instructions. Bit 35 of the instruction
1019 * word is "Execution Datatype" which controls whether the instruction operates
1020 * on float or integer types. The register arguments have fields that offer
1021 * more fine control their respective types.
1022 */
1023 enum PACKED gfx10_align1_3src_exec_type {
1024 BRW_ALIGN1_3SRC_EXEC_TYPE_INT = 0,
1025 BRW_ALIGN1_3SRC_EXEC_TYPE_FLOAT = 1,
1026 };
1027
1028 #define BRW_ARF_NULL 0x00
1029 #define BRW_ARF_ADDRESS 0x10
1030 #define BRW_ARF_ACCUMULATOR 0x20
1031 #define BRW_ARF_FLAG 0x30
1032 #define BRW_ARF_MASK 0x40
1033 #define BRW_ARF_MASK_STACK 0x50
1034 #define BRW_ARF_MASK_STACK_DEPTH 0x60
1035 #define BRW_ARF_STATE 0x70
1036 #define BRW_ARF_CONTROL 0x80
1037 #define BRW_ARF_NOTIFICATION_COUNT 0x90
1038 #define BRW_ARF_IP 0xA0
1039 #define BRW_ARF_TDR 0xB0
1040 #define BRW_ARF_TIMESTAMP 0xC0
1041
1042 #define BRW_MRF_COMPR4 (1 << 7)
1043
1044 #define BRW_AMASK 0
1045 #define BRW_IMASK 1
1046 #define BRW_LMASK 2
1047 #define BRW_CMASK 3
1048
1049
1050
1051 #define BRW_THREAD_NORMAL 0
1052 #define BRW_THREAD_ATOMIC 1
1053 #define BRW_THREAD_SWITCH 2
1054
1055 enum PACKED brw_vertical_stride {
1056 BRW_VERTICAL_STRIDE_0 = 0,
1057 BRW_VERTICAL_STRIDE_1 = 1,
1058 BRW_VERTICAL_STRIDE_2 = 2,
1059 BRW_VERTICAL_STRIDE_4 = 3,
1060 BRW_VERTICAL_STRIDE_8 = 4,
1061 BRW_VERTICAL_STRIDE_16 = 5,
1062 BRW_VERTICAL_STRIDE_32 = 6,
1063 BRW_VERTICAL_STRIDE_ONE_DIMENSIONAL = 0xF,
1064 };
1065
1066 enum PACKED gfx10_align1_3src_vertical_stride {
1067 BRW_ALIGN1_3SRC_VERTICAL_STRIDE_0 = 0,
1068 BRW_ALIGN1_3SRC_VERTICAL_STRIDE_1 = 1,
1069 BRW_ALIGN1_3SRC_VERTICAL_STRIDE_2 = 1,
1070 BRW_ALIGN1_3SRC_VERTICAL_STRIDE_4 = 2,
1071 BRW_ALIGN1_3SRC_VERTICAL_STRIDE_8 = 3,
1072 };
1073
1074 enum PACKED brw_width {
1075 BRW_WIDTH_1 = 0,
1076 BRW_WIDTH_2 = 1,
1077 BRW_WIDTH_4 = 2,
1078 BRW_WIDTH_8 = 3,
1079 BRW_WIDTH_16 = 4,
1080 };
1081
1082 /**
1083 * Gfx12+ SWSB SBID synchronization mode.
1084 *
1085 * This is represented as a bitmask including any required SBID token
1086 * synchronization modes, used to synchronize out-of-order instructions. Only
1087 * the strongest mode of the mask will be provided to the hardware in the SWSB
1088 * field of an actual hardware instruction, but virtual instructions may be
1089 * able to take into account multiple of them.
1090 */
1091 enum tgl_sbid_mode {
1092 TGL_SBID_NULL = 0,
1093 TGL_SBID_SRC = 1,
1094 TGL_SBID_DST = 2,
1095 TGL_SBID_SET = 4
1096 };
1097
1098 #ifdef __cplusplus
1099 /**
1100 * Allow bitwise arithmetic of tgl_sbid_mode enums.
1101 */
1102 inline tgl_sbid_mode
1103 operator|(tgl_sbid_mode x, tgl_sbid_mode y)
1104 {
1105 return tgl_sbid_mode(unsigned(x) | unsigned(y));
1106 }
1107
1108 inline tgl_sbid_mode
1109 operator&(tgl_sbid_mode x, tgl_sbid_mode y)
1110 {
1111 return tgl_sbid_mode(unsigned(x) & unsigned(y));
1112 }
1113
1114 inline tgl_sbid_mode &
1115 operator|=(tgl_sbid_mode &x, tgl_sbid_mode y)
1116 {
1117 return x = x | y;
1118 }
1119
1120 #endif
1121
1122 /**
1123 * TGL+ SWSB RegDist synchronization pipeline.
1124 *
1125 * On TGL all instructions that use the RegDist synchronization mechanism are
1126 * considered to be executed as a single in-order pipeline, therefore only the
1127 * TGL_PIPE_FLOAT pipeline is applicable. On XeHP+ platforms there are two
1128 * additional asynchronous ALU pipelines (which still execute instructions
1129 * in-order and use the RegDist synchronization mechanism). TGL_PIPE_NONE
1130 * doesn't provide any RegDist pipeline synchronization information and allows
1131 * the hardware to infer the pipeline based on the source types of the
1132 * instruction. TGL_PIPE_ALL can be used when synchronization with all ALU
1133 * pipelines is intended.
1134 */
1135 enum tgl_pipe {
1136 TGL_PIPE_NONE = 0,
1137 TGL_PIPE_FLOAT,
1138 TGL_PIPE_INT,
1139 TGL_PIPE_LONG,
1140 TGL_PIPE_ALL
1141 };
1142
1143 /**
1144 * Logical representation of the SWSB scheduling information of a hardware
1145 * instruction. The binary representation is slightly more compact.
1146 */
1147 struct tgl_swsb {
1148 unsigned regdist : 3;
1149 enum tgl_pipe pipe : 3;
1150 unsigned sbid : 4;
1151 enum tgl_sbid_mode mode : 3;
1152 };
1153
1154 /**
1155 * Construct a scheduling annotation with a single RegDist dependency. This
1156 * synchronizes with the completion of the d-th previous in-order instruction.
1157 * The index is one-based, zero causes a no-op tgl_swsb to be constructed.
1158 */
1159 static inline struct tgl_swsb
tgl_swsb_regdist(unsigned d)1160 tgl_swsb_regdist(unsigned d)
1161 {
1162 const struct tgl_swsb swsb = { d, d ? TGL_PIPE_ALL : TGL_PIPE_NONE };
1163 assert(swsb.regdist == d);
1164 return swsb;
1165 }
1166
1167 /**
1168 * Construct a scheduling annotation that synchronizes with the specified SBID
1169 * token.
1170 */
1171 static inline struct tgl_swsb
tgl_swsb_sbid(enum tgl_sbid_mode mode,unsigned sbid)1172 tgl_swsb_sbid(enum tgl_sbid_mode mode, unsigned sbid)
1173 {
1174 const struct tgl_swsb swsb = { 0, TGL_PIPE_NONE, sbid, mode };
1175 assert(swsb.sbid == sbid);
1176 return swsb;
1177 }
1178
1179 /**
1180 * Construct a no-op scheduling annotation.
1181 */
1182 static inline struct tgl_swsb
tgl_swsb_null(void)1183 tgl_swsb_null(void)
1184 {
1185 return tgl_swsb_regdist(0);
1186 }
1187
1188 /**
1189 * Return a scheduling annotation that allocates the same SBID synchronization
1190 * token as \p swsb. In addition it will synchronize against a previous
1191 * in-order instruction if \p regdist is non-zero.
1192 */
1193 static inline struct tgl_swsb
tgl_swsb_dst_dep(struct tgl_swsb swsb,unsigned regdist)1194 tgl_swsb_dst_dep(struct tgl_swsb swsb, unsigned regdist)
1195 {
1196 swsb.regdist = regdist;
1197 swsb.mode = swsb.mode & TGL_SBID_SET;
1198 swsb.pipe = (regdist ? TGL_PIPE_ALL : TGL_PIPE_NONE);
1199 return swsb;
1200 }
1201
1202 /**
1203 * Return a scheduling annotation that synchronizes against the same SBID and
1204 * RegDist dependencies as \p swsb, but doesn't allocate any SBID token.
1205 */
1206 static inline struct tgl_swsb
tgl_swsb_src_dep(struct tgl_swsb swsb)1207 tgl_swsb_src_dep(struct tgl_swsb swsb)
1208 {
1209 swsb.mode = swsb.mode & (TGL_SBID_SRC | TGL_SBID_DST);
1210 return swsb;
1211 }
1212
1213 /**
1214 * Convert the provided tgl_swsb to the hardware's binary representation of an
1215 * SWSB annotation.
1216 */
1217 static inline uint8_t
tgl_swsb_encode(const struct intel_device_info * devinfo,struct tgl_swsb swsb)1218 tgl_swsb_encode(const struct intel_device_info *devinfo, struct tgl_swsb swsb)
1219 {
1220 if (!swsb.mode) {
1221 const unsigned pipe = devinfo->verx10 < 125 ? 0 :
1222 swsb.pipe == TGL_PIPE_FLOAT ? 0x10 :
1223 swsb.pipe == TGL_PIPE_INT ? 0x18 :
1224 swsb.pipe == TGL_PIPE_LONG ? 0x50 :
1225 swsb.pipe == TGL_PIPE_ALL ? 0x8 : 0;
1226 return pipe | swsb.regdist;
1227 } else if (swsb.regdist) {
1228 return 0x80 | swsb.regdist << 4 | swsb.sbid;
1229 } else {
1230 return swsb.sbid | (swsb.mode & TGL_SBID_SET ? 0x40 :
1231 swsb.mode & TGL_SBID_DST ? 0x20 : 0x30);
1232 }
1233 }
1234
1235 /**
1236 * Convert the provided binary representation of an SWSB annotation to a
1237 * tgl_swsb.
1238 */
1239 static inline struct tgl_swsb
tgl_swsb_decode(const struct intel_device_info * devinfo,const enum opcode opcode,const uint8_t x)1240 tgl_swsb_decode(const struct intel_device_info *devinfo, const enum opcode opcode,
1241 const uint8_t x)
1242 {
1243 if (x & 0x80) {
1244 const struct tgl_swsb swsb = { (x & 0x70u) >> 4, TGL_PIPE_NONE,
1245 x & 0xfu,
1246 (opcode == BRW_OPCODE_SEND ||
1247 opcode == BRW_OPCODE_SENDC ||
1248 opcode == BRW_OPCODE_MATH) ?
1249 TGL_SBID_SET : TGL_SBID_DST };
1250 return swsb;
1251 } else if ((x & 0x70) == 0x20) {
1252 return tgl_swsb_sbid(TGL_SBID_DST, x & 0xfu);
1253 } else if ((x & 0x70) == 0x30) {
1254 return tgl_swsb_sbid(TGL_SBID_SRC, x & 0xfu);
1255 } else if ((x & 0x70) == 0x40) {
1256 return tgl_swsb_sbid(TGL_SBID_SET, x & 0xfu);
1257 } else {
1258 const struct tgl_swsb swsb = { x & 0x7u,
1259 ((x & 0x78) == 0x10 ? TGL_PIPE_FLOAT :
1260 (x & 0x78) == 0x18 ? TGL_PIPE_INT :
1261 (x & 0x78) == 0x50 ? TGL_PIPE_LONG :
1262 (x & 0x78) == 0x8 ? TGL_PIPE_ALL :
1263 TGL_PIPE_NONE) };
1264 assert(devinfo->verx10 >= 125 || swsb.pipe == TGL_PIPE_NONE);
1265 return swsb;
1266 }
1267 }
1268
1269 enum tgl_sync_function {
1270 TGL_SYNC_NOP = 0x0,
1271 TGL_SYNC_ALLRD = 0x2,
1272 TGL_SYNC_ALLWR = 0x3,
1273 TGL_SYNC_BAR = 0xe,
1274 TGL_SYNC_HOST = 0xf
1275 };
1276
1277 /**
1278 * Message target: Shared Function ID for where to SEND a message.
1279 *
1280 * These are enumerated in the ISA reference under "send - Send Message".
1281 * In particular, see the following tables:
1282 * - G45 PRM, Volume 4, Table 14-15 "Message Descriptor Definition"
1283 * - Sandybridge PRM, Volume 4 Part 2, Table 8-16 "Extended Message Descriptor"
1284 * - Ivybridge PRM, Volume 1 Part 1, section 3.2.7 "GPE Function IDs"
1285 */
1286 enum brw_message_target {
1287 BRW_SFID_NULL = 0,
1288 BRW_SFID_MATH = 1, /* Only valid on Gfx4-5 */
1289 BRW_SFID_SAMPLER = 2,
1290 BRW_SFID_MESSAGE_GATEWAY = 3,
1291 BRW_SFID_DATAPORT_READ = 4,
1292 BRW_SFID_DATAPORT_WRITE = 5,
1293 BRW_SFID_URB = 6,
1294 BRW_SFID_THREAD_SPAWNER = 7,
1295 BRW_SFID_VME = 8,
1296
1297 GFX6_SFID_DATAPORT_SAMPLER_CACHE = 4,
1298 GFX6_SFID_DATAPORT_RENDER_CACHE = 5,
1299 GFX6_SFID_DATAPORT_CONSTANT_CACHE = 9,
1300
1301 GFX7_SFID_DATAPORT_DATA_CACHE = 10,
1302 GFX7_SFID_PIXEL_INTERPOLATOR = 11,
1303 HSW_SFID_DATAPORT_DATA_CACHE_1 = 12,
1304 HSW_SFID_CRE = 13,
1305
1306 GFX12_SFID_TGM = 13, /* Typed Global Memory */
1307 GFX12_SFID_SLM = 14, /* Shared Local Memory */
1308 GFX12_SFID_UGM = 15, /* Untyped Global Memory */
1309
1310 GEN_RT_SFID_BINDLESS_THREAD_DISPATCH = 7,
1311 GEN_RT_SFID_RAY_TRACE_ACCELERATOR = 8,
1312 };
1313
1314 #define GFX7_MESSAGE_TARGET_DP_DATA_CACHE 10
1315
1316 #define BRW_SAMPLER_RETURN_FORMAT_FLOAT32 0
1317 #define BRW_SAMPLER_RETURN_FORMAT_UINT32 2
1318 #define BRW_SAMPLER_RETURN_FORMAT_SINT32 3
1319
1320 #define GFX8_SAMPLER_RETURN_FORMAT_32BITS 0
1321 #define GFX8_SAMPLER_RETURN_FORMAT_16BITS 1
1322
1323 #define BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE 0
1324 #define BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE 0
1325 #define BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS 0
1326 #define BRW_SAMPLER_MESSAGE_SIMD8_KILLPIX 1
1327 #define BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD 1
1328 #define BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_LOD 1
1329 #define BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_GRADIENTS 2
1330 #define BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_GRADIENTS 2
1331 #define BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_COMPARE 0
1332 #define BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_COMPARE 2
1333 #define BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_BIAS_COMPARE 0
1334 #define BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD_COMPARE 1
1335 #define BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_LOD_COMPARE 1
1336 #define BRW_SAMPLER_MESSAGE_SIMD4X2_RESINFO 2
1337 #define BRW_SAMPLER_MESSAGE_SIMD16_RESINFO 2
1338 #define BRW_SAMPLER_MESSAGE_SIMD4X2_LD 3
1339 #define BRW_SAMPLER_MESSAGE_SIMD8_LD 3
1340 #define BRW_SAMPLER_MESSAGE_SIMD16_LD 3
1341
1342 #define GFX5_SAMPLER_MESSAGE_SAMPLE 0
1343 #define GFX5_SAMPLER_MESSAGE_SAMPLE_BIAS 1
1344 #define GFX5_SAMPLER_MESSAGE_SAMPLE_LOD 2
1345 #define GFX5_SAMPLER_MESSAGE_SAMPLE_COMPARE 3
1346 #define GFX5_SAMPLER_MESSAGE_SAMPLE_DERIVS 4
1347 #define GFX5_SAMPLER_MESSAGE_SAMPLE_BIAS_COMPARE 5
1348 #define GFX5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE 6
1349 #define GFX5_SAMPLER_MESSAGE_SAMPLE_LD 7
1350 #define GFX7_SAMPLER_MESSAGE_SAMPLE_GATHER4 8
1351 #define GFX5_SAMPLER_MESSAGE_LOD 9
1352 #define GFX5_SAMPLER_MESSAGE_SAMPLE_RESINFO 10
1353 #define GFX6_SAMPLER_MESSAGE_SAMPLE_SAMPLEINFO 11
1354 #define GFX7_SAMPLER_MESSAGE_SAMPLE_GATHER4_C 16
1355 #define GFX7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO 17
1356 #define GFX7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_C 18
1357 #define HSW_SAMPLER_MESSAGE_SAMPLE_DERIV_COMPARE 20
1358 #define GFX9_SAMPLER_MESSAGE_SAMPLE_LZ 24
1359 #define GFX9_SAMPLER_MESSAGE_SAMPLE_C_LZ 25
1360 #define GFX9_SAMPLER_MESSAGE_SAMPLE_LD_LZ 26
1361 #define GFX9_SAMPLER_MESSAGE_SAMPLE_LD2DMS_W 28
1362 #define GFX7_SAMPLER_MESSAGE_SAMPLE_LD_MCS 29
1363 #define GFX7_SAMPLER_MESSAGE_SAMPLE_LD2DMS 30
1364 #define GFX7_SAMPLER_MESSAGE_SAMPLE_LD2DSS 31
1365
1366 /* for GFX5 only */
1367 #define BRW_SAMPLER_SIMD_MODE_SIMD4X2 0
1368 #define BRW_SAMPLER_SIMD_MODE_SIMD8 1
1369 #define BRW_SAMPLER_SIMD_MODE_SIMD16 2
1370 #define BRW_SAMPLER_SIMD_MODE_SIMD32_64 3
1371
1372 #define GFX10_SAMPLER_SIMD_MODE_SIMD8H 5
1373 #define GFX10_SAMPLER_SIMD_MODE_SIMD16H 6
1374
1375 /* GFX9 changes SIMD mode 0 to mean SIMD8D, but lets us get the SIMD4x2
1376 * behavior by setting bit 22 of dword 2 in the message header. */
1377 #define GFX9_SAMPLER_SIMD_MODE_SIMD8D 0
1378 #define GFX9_SAMPLER_SIMD_MODE_EXTENSION_SIMD4X2 (1 << 22)
1379
1380 #define BRW_DATAPORT_OWORD_BLOCK_1_OWORDLOW 0
1381 #define BRW_DATAPORT_OWORD_BLOCK_1_OWORDHIGH 1
1382 #define BRW_DATAPORT_OWORD_BLOCK_2_OWORDS 2
1383 #define BRW_DATAPORT_OWORD_BLOCK_4_OWORDS 3
1384 #define BRW_DATAPORT_OWORD_BLOCK_8_OWORDS 4
1385 #define GFX12_DATAPORT_OWORD_BLOCK_16_OWORDS 5
1386 #define BRW_DATAPORT_OWORD_BLOCK_OWORDS(n) \
1387 ((n) == 1 ? BRW_DATAPORT_OWORD_BLOCK_1_OWORDLOW : \
1388 (n) == 2 ? BRW_DATAPORT_OWORD_BLOCK_2_OWORDS : \
1389 (n) == 4 ? BRW_DATAPORT_OWORD_BLOCK_4_OWORDS : \
1390 (n) == 8 ? BRW_DATAPORT_OWORD_BLOCK_8_OWORDS : \
1391 (n) == 16 ? GFX12_DATAPORT_OWORD_BLOCK_16_OWORDS : \
1392 (abort(), ~0))
1393 #define BRW_DATAPORT_OWORD_BLOCK_DWORDS(n) \
1394 ((n) == 4 ? BRW_DATAPORT_OWORD_BLOCK_1_OWORDLOW : \
1395 (n) == 8 ? BRW_DATAPORT_OWORD_BLOCK_2_OWORDS : \
1396 (n) == 16 ? BRW_DATAPORT_OWORD_BLOCK_4_OWORDS : \
1397 (n) == 32 ? BRW_DATAPORT_OWORD_BLOCK_8_OWORDS : \
1398 (abort(), ~0))
1399
1400 #define BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD 0
1401 #define BRW_DATAPORT_OWORD_DUAL_BLOCK_4OWORDS 2
1402
1403 #define BRW_DATAPORT_DWORD_SCATTERED_BLOCK_8DWORDS 2
1404 #define BRW_DATAPORT_DWORD_SCATTERED_BLOCK_16DWORDS 3
1405
1406 /* This one stays the same across generations. */
1407 #define BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ 0
1408 /* GFX4 */
1409 #define BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ 1
1410 #define BRW_DATAPORT_READ_MESSAGE_MEDIA_BLOCK_READ 2
1411 #define BRW_DATAPORT_READ_MESSAGE_DWORD_SCATTERED_READ 3
1412 /* G45, GFX5 */
1413 #define G45_DATAPORT_READ_MESSAGE_RENDER_UNORM_READ 1
1414 #define G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ 2
1415 #define G45_DATAPORT_READ_MESSAGE_AVC_LOOP_FILTER_READ 3
1416 #define G45_DATAPORT_READ_MESSAGE_MEDIA_BLOCK_READ 4
1417 #define G45_DATAPORT_READ_MESSAGE_DWORD_SCATTERED_READ 6
1418 /* GFX6 */
1419 #define GFX6_DATAPORT_READ_MESSAGE_RENDER_UNORM_READ 1
1420 #define GFX6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ 2
1421 #define GFX6_DATAPORT_READ_MESSAGE_MEDIA_BLOCK_READ 4
1422 #define GFX6_DATAPORT_READ_MESSAGE_OWORD_UNALIGN_BLOCK_READ 5
1423 #define GFX6_DATAPORT_READ_MESSAGE_DWORD_SCATTERED_READ 6
1424
1425 #define BRW_DATAPORT_READ_TARGET_DATA_CACHE 0
1426 #define BRW_DATAPORT_READ_TARGET_RENDER_CACHE 1
1427 #define BRW_DATAPORT_READ_TARGET_SAMPLER_CACHE 2
1428
1429 #define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE 0
1430 #define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE_REPLICATED 1
1431 #define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN01 2
1432 #define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN23 3
1433 #define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_SINGLE_SOURCE_SUBSPAN01 4
1434
1435 #define BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE 0
1436 #define BRW_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE 1
1437 #define BRW_DATAPORT_WRITE_MESSAGE_MEDIA_BLOCK_WRITE 2
1438 #define BRW_DATAPORT_WRITE_MESSAGE_DWORD_SCATTERED_WRITE 3
1439 #define BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE 4
1440 #define BRW_DATAPORT_WRITE_MESSAGE_STREAMED_VERTEX_BUFFER_WRITE 5
1441 #define BRW_DATAPORT_WRITE_MESSAGE_FLUSH_RENDER_CACHE 7
1442
1443 /* GFX6 */
1444 #define GFX6_DATAPORT_WRITE_MESSAGE_DWORD_ATOMIC_WRITE 7
1445 #define GFX6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE 8
1446 #define GFX6_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE 9
1447 #define GFX6_DATAPORT_WRITE_MESSAGE_MEDIA_BLOCK_WRITE 10
1448 #define GFX6_DATAPORT_WRITE_MESSAGE_DWORD_SCATTERED_WRITE 11
1449 #define GFX6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE 12
1450 #define GFX6_DATAPORT_WRITE_MESSAGE_STREAMED_VB_WRITE 13
1451 #define GFX6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_UNORM_WRITE 14
1452
1453 /* GFX7 */
1454 #define GFX7_DATAPORT_RC_MEDIA_BLOCK_READ 4
1455 #define GFX7_DATAPORT_RC_TYPED_SURFACE_READ 5
1456 #define GFX7_DATAPORT_RC_TYPED_ATOMIC_OP 6
1457 #define GFX7_DATAPORT_RC_MEMORY_FENCE 7
1458 #define GFX7_DATAPORT_RC_MEDIA_BLOCK_WRITE 10
1459 #define GFX7_DATAPORT_RC_RENDER_TARGET_WRITE 12
1460 #define GFX7_DATAPORT_RC_TYPED_SURFACE_WRITE 13
1461 #define GFX7_DATAPORT_DC_OWORD_BLOCK_READ 0
1462 #define GFX7_DATAPORT_DC_UNALIGNED_OWORD_BLOCK_READ 1
1463 #define GFX7_DATAPORT_DC_OWORD_DUAL_BLOCK_READ 2
1464 #define GFX7_DATAPORT_DC_DWORD_SCATTERED_READ 3
1465 #define GFX7_DATAPORT_DC_BYTE_SCATTERED_READ 4
1466 #define GFX7_DATAPORT_DC_UNTYPED_SURFACE_READ 5
1467 #define GFX7_DATAPORT_DC_UNTYPED_ATOMIC_OP 6
1468 #define GFX7_DATAPORT_DC_MEMORY_FENCE 7
1469 #define GFX7_DATAPORT_DC_OWORD_BLOCK_WRITE 8
1470 #define GFX7_DATAPORT_DC_OWORD_DUAL_BLOCK_WRITE 10
1471 #define GFX7_DATAPORT_DC_DWORD_SCATTERED_WRITE 11
1472 #define GFX7_DATAPORT_DC_BYTE_SCATTERED_WRITE 12
1473 #define GFX7_DATAPORT_DC_UNTYPED_SURFACE_WRITE 13
1474
1475 #define GFX7_DATAPORT_SCRATCH_READ ((1 << 18) | \
1476 (0 << 17))
1477 #define GFX7_DATAPORT_SCRATCH_WRITE ((1 << 18) | \
1478 (1 << 17))
1479 #define GFX7_DATAPORT_SCRATCH_NUM_REGS_SHIFT 12
1480
1481 #define GFX7_PIXEL_INTERPOLATOR_LOC_SHARED_OFFSET 0
1482 #define GFX7_PIXEL_INTERPOLATOR_LOC_SAMPLE 1
1483 #define GFX7_PIXEL_INTERPOLATOR_LOC_CENTROID 2
1484 #define GFX7_PIXEL_INTERPOLATOR_LOC_PER_SLOT_OFFSET 3
1485
1486 /* HSW */
1487 #define HSW_DATAPORT_DC_PORT0_OWORD_BLOCK_READ 0
1488 #define HSW_DATAPORT_DC_PORT0_UNALIGNED_OWORD_BLOCK_READ 1
1489 #define HSW_DATAPORT_DC_PORT0_OWORD_DUAL_BLOCK_READ 2
1490 #define HSW_DATAPORT_DC_PORT0_DWORD_SCATTERED_READ 3
1491 #define HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_READ 4
1492 #define HSW_DATAPORT_DC_PORT0_MEMORY_FENCE 7
1493 #define HSW_DATAPORT_DC_PORT0_OWORD_BLOCK_WRITE 8
1494 #define HSW_DATAPORT_DC_PORT0_OWORD_DUAL_BLOCK_WRITE 10
1495 #define HSW_DATAPORT_DC_PORT0_DWORD_SCATTERED_WRITE 11
1496 #define HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_WRITE 12
1497
1498 #define HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_READ 1
1499 #define HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP 2
1500 #define HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP_SIMD4X2 3
1501 #define HSW_DATAPORT_DC_PORT1_MEDIA_BLOCK_READ 4
1502 #define HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_READ 5
1503 #define HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP 6
1504 #define HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP_SIMD4X2 7
1505 #define HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_WRITE 9
1506 #define HSW_DATAPORT_DC_PORT1_MEDIA_BLOCK_WRITE 10
1507 #define HSW_DATAPORT_DC_PORT1_ATOMIC_COUNTER_OP 11
1508 #define HSW_DATAPORT_DC_PORT1_ATOMIC_COUNTER_OP_SIMD4X2 12
1509 #define HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_WRITE 13
1510 #define GFX9_DATAPORT_DC_PORT1_A64_SCATTERED_READ 0x10
1511 #define GFX8_DATAPORT_DC_PORT1_A64_UNTYPED_SURFACE_READ 0x11
1512 #define GFX8_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_OP 0x12
1513 #define GFX12_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_HALF_INT_OP 0x13
1514 #define GFX9_DATAPORT_DC_PORT1_A64_OWORD_BLOCK_READ 0x14
1515 #define GFX9_DATAPORT_DC_PORT1_A64_OWORD_BLOCK_WRITE 0x15
1516 #define GFX8_DATAPORT_DC_PORT1_A64_UNTYPED_SURFACE_WRITE 0x19
1517 #define GFX8_DATAPORT_DC_PORT1_A64_SCATTERED_WRITE 0x1a
1518 #define GFX9_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_FLOAT_OP 0x1b
1519 #define GFX9_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_FLOAT_OP 0x1d
1520 #define GFX12_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_HALF_FLOAT_OP 0x1e
1521
1522 /* GFX9 */
1523 #define GFX9_DATAPORT_RC_RENDER_TARGET_WRITE 12
1524 #define GFX9_DATAPORT_RC_RENDER_TARGET_READ 13
1525
1526 /* A64 scattered message subtype */
1527 #define GFX8_A64_SCATTERED_SUBTYPE_BYTE 0
1528 #define GFX8_A64_SCATTERED_SUBTYPE_DWORD 1
1529 #define GFX8_A64_SCATTERED_SUBTYPE_QWORD 2
1530 #define GFX8_A64_SCATTERED_SUBTYPE_HWORD 3
1531
1532 /* Dataport special binding table indices: */
1533 #define BRW_BTI_STATELESS 255
1534 #define GFX7_BTI_SLM 254
1535
1536 #define HSW_BTI_STATELESS_LOCALLY_COHERENT 255
1537 #define HSW_BTI_STATELESS_NON_COHERENT 253
1538 #define HSW_BTI_STATELESS_GLOBALLY_COHERENT 252
1539 #define HSW_BTI_STATELESS_LLC_COHERENT 251
1540 #define HSW_BTI_STATELESS_L3_UNCACHED 250
1541
1542 /* The hardware docs are a bit contradictory here. On Haswell, where they
1543 * first added cache ability control, there were 5 different cache modes (see
1544 * HSW_BTI_STATELESS_* above). On Broadwell, they reduced to two:
1545 *
1546 * - IA-Coherent (BTI=255): Coherent within Gen and coherent within the
1547 * entire IA cache memory hierarchy.
1548 *
1549 * - Non-Coherent (BTI=253): Coherent within Gen, same cache type.
1550 *
1551 * Information about stateless cache coherency can be found in the "A32
1552 * Stateless" section of the "3D Media GPGPU" volume of the PRM for each
1553 * hardware generation.
1554 *
1555 * Unfortunately, the docs for MDC_STATELESS appear to have been copied and
1556 * pasted from Haswell and give the Haswell definitions for the BTI values of
1557 * 255 and 253 including a warning about accessing 253 surfaces from multiple
1558 * threads. This seems to be a copy+paste error and the definitions from the
1559 * "A32 Stateless" section should be trusted instead.
1560 *
1561 * Note that because the DRM sets bit 4 of HDC_CHICKEN0 on BDW, CHV and at
1562 * least some pre-production steppings of SKL due to WaForceEnableNonCoherent,
1563 * HDC memory access may have been overridden by the kernel to be non-coherent
1564 * (matching the behavior of the same BTI on pre-Gfx8 hardware) and BTI 255
1565 * may actually be an alias for BTI 253.
1566 */
1567 #define GFX8_BTI_STATELESS_IA_COHERENT 255
1568 #define GFX8_BTI_STATELESS_NON_COHERENT 253
1569 #define GFX9_BTI_BINDLESS 252
1570
1571 /* Dataport atomic operations for Untyped Atomic Integer Operation message
1572 * (and others).
1573 */
1574 #define BRW_AOP_AND 1
1575 #define BRW_AOP_OR 2
1576 #define BRW_AOP_XOR 3
1577 #define BRW_AOP_MOV 4
1578 #define BRW_AOP_INC 5
1579 #define BRW_AOP_DEC 6
1580 #define BRW_AOP_ADD 7
1581 #define BRW_AOP_SUB 8
1582 #define BRW_AOP_REVSUB 9
1583 #define BRW_AOP_IMAX 10
1584 #define BRW_AOP_IMIN 11
1585 #define BRW_AOP_UMAX 12
1586 #define BRW_AOP_UMIN 13
1587 #define BRW_AOP_CMPWR 14
1588 #define BRW_AOP_PREDEC 15
1589
1590 /* Dataport atomic operations for Untyped Atomic Float Operation message. */
1591 #define BRW_AOP_FMAX 1
1592 #define BRW_AOP_FMIN 2
1593 #define BRW_AOP_FCMPWR 3
1594 #define BRW_AOP_FADD 4
1595
1596 #define BRW_MATH_FUNCTION_INV 1
1597 #define BRW_MATH_FUNCTION_LOG 2
1598 #define BRW_MATH_FUNCTION_EXP 3
1599 #define BRW_MATH_FUNCTION_SQRT 4
1600 #define BRW_MATH_FUNCTION_RSQ 5
1601 #define BRW_MATH_FUNCTION_SIN 6
1602 #define BRW_MATH_FUNCTION_COS 7
1603 #define BRW_MATH_FUNCTION_SINCOS 8 /* gfx4, gfx5 */
1604 #define BRW_MATH_FUNCTION_FDIV 9 /* gfx6+ */
1605 #define BRW_MATH_FUNCTION_POW 10
1606 #define BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER 11
1607 #define BRW_MATH_FUNCTION_INT_DIV_QUOTIENT 12
1608 #define BRW_MATH_FUNCTION_INT_DIV_REMAINDER 13
1609 #define GFX8_MATH_FUNCTION_INVM 14
1610 #define GFX8_MATH_FUNCTION_RSQRTM 15
1611
1612 #define BRW_MATH_INTEGER_UNSIGNED 0
1613 #define BRW_MATH_INTEGER_SIGNED 1
1614
1615 #define BRW_MATH_PRECISION_FULL 0
1616 #define BRW_MATH_PRECISION_PARTIAL 1
1617
1618 #define BRW_MATH_SATURATE_NONE 0
1619 #define BRW_MATH_SATURATE_SATURATE 1
1620
1621 #define BRW_MATH_DATA_VECTOR 0
1622 #define BRW_MATH_DATA_SCALAR 1
1623
1624 #define BRW_URB_OPCODE_WRITE_HWORD 0
1625 #define BRW_URB_OPCODE_WRITE_OWORD 1
1626 #define BRW_URB_OPCODE_READ_HWORD 2
1627 #define BRW_URB_OPCODE_READ_OWORD 3
1628 #define GFX7_URB_OPCODE_ATOMIC_MOV 4
1629 #define GFX7_URB_OPCODE_ATOMIC_INC 5
1630 #define GFX8_URB_OPCODE_ATOMIC_ADD 6
1631 #define GFX8_URB_OPCODE_SIMD8_WRITE 7
1632 #define GFX8_URB_OPCODE_SIMD8_READ 8
1633 #define GFX125_URB_OPCODE_FENCE 9
1634
1635 #define BRW_URB_SWIZZLE_NONE 0
1636 #define BRW_URB_SWIZZLE_INTERLEAVE 1
1637 #define BRW_URB_SWIZZLE_TRANSPOSE 2
1638
1639 #define BRW_SCRATCH_SPACE_SIZE_1K 0
1640 #define BRW_SCRATCH_SPACE_SIZE_2K 1
1641 #define BRW_SCRATCH_SPACE_SIZE_4K 2
1642 #define BRW_SCRATCH_SPACE_SIZE_8K 3
1643 #define BRW_SCRATCH_SPACE_SIZE_16K 4
1644 #define BRW_SCRATCH_SPACE_SIZE_32K 5
1645 #define BRW_SCRATCH_SPACE_SIZE_64K 6
1646 #define BRW_SCRATCH_SPACE_SIZE_128K 7
1647 #define BRW_SCRATCH_SPACE_SIZE_256K 8
1648 #define BRW_SCRATCH_SPACE_SIZE_512K 9
1649 #define BRW_SCRATCH_SPACE_SIZE_1M 10
1650 #define BRW_SCRATCH_SPACE_SIZE_2M 11
1651
1652 #define BRW_MESSAGE_GATEWAY_SFID_OPEN_GATEWAY 0
1653 #define BRW_MESSAGE_GATEWAY_SFID_CLOSE_GATEWAY 1
1654 #define BRW_MESSAGE_GATEWAY_SFID_FORWARD_MSG 2
1655 #define BRW_MESSAGE_GATEWAY_SFID_GET_TIMESTAMP 3
1656 #define BRW_MESSAGE_GATEWAY_SFID_BARRIER_MSG 4
1657 #define BRW_MESSAGE_GATEWAY_SFID_UPDATE_GATEWAY_STATE 5
1658 #define BRW_MESSAGE_GATEWAY_SFID_MMIO_READ_WRITE 6
1659
1660
1661 /* Gfx7 "GS URB Entry Allocation Size" is a U9-1 field, so the maximum gs_size
1662 * is 2^9, or 512. It's counted in multiples of 64 bytes.
1663 *
1664 * Identical for VS, DS, and HS.
1665 */
1666 #define GFX7_MAX_GS_URB_ENTRY_SIZE_BYTES (512*64)
1667 #define GFX7_MAX_DS_URB_ENTRY_SIZE_BYTES (512*64)
1668 #define GFX7_MAX_HS_URB_ENTRY_SIZE_BYTES (512*64)
1669 #define GFX7_MAX_VS_URB_ENTRY_SIZE_BYTES (512*64)
1670
1671 #define BRW_GS_EDGE_INDICATOR_0 (1 << 8)
1672 #define BRW_GS_EDGE_INDICATOR_1 (1 << 9)
1673
1674 /* Gfx6 "GS URB Entry Allocation Size" is defined as a number of 1024-bit
1675 * (128 bytes) URB rows and the maximum allowed value is 5 rows.
1676 */
1677 #define GFX6_MAX_GS_URB_ENTRY_SIZE_BYTES (5*128)
1678
1679 /* GS Thread Payload
1680 */
1681
1682 /* 3DSTATE_GS "Output Vertex Size" has an effective maximum of 62. It's
1683 * counted in multiples of 16 bytes.
1684 */
1685 #define GFX7_MAX_GS_OUTPUT_VERTEX_SIZE_BYTES (62*16)
1686
1687
1688 /* R0 */
1689 # define GFX7_GS_PAYLOAD_INSTANCE_ID_SHIFT 27
1690
1691 /* CR0.0[5:4] Floating-Point Rounding Modes
1692 * Skylake PRM, Volume 7 Part 1, "Control Register", page 756
1693 */
1694
1695 #define BRW_CR0_RND_MODE_MASK 0x30
1696 #define BRW_CR0_RND_MODE_SHIFT 4
1697
1698 enum PACKED brw_rnd_mode {
1699 BRW_RND_MODE_RTNE = 0, /* Round to Nearest or Even */
1700 BRW_RND_MODE_RU = 1, /* Round Up, toward +inf */
1701 BRW_RND_MODE_RD = 2, /* Round Down, toward -inf */
1702 BRW_RND_MODE_RTZ = 3, /* Round Toward Zero */
1703 BRW_RND_MODE_UNSPECIFIED, /* Unspecified rounding mode */
1704 };
1705
1706 #define BRW_CR0_FP64_DENORM_PRESERVE (1 << 6)
1707 #define BRW_CR0_FP32_DENORM_PRESERVE (1 << 7)
1708 #define BRW_CR0_FP16_DENORM_PRESERVE (1 << 10)
1709
1710 #define BRW_CR0_FP_MODE_MASK (BRW_CR0_FP64_DENORM_PRESERVE | \
1711 BRW_CR0_FP32_DENORM_PRESERVE | \
1712 BRW_CR0_FP16_DENORM_PRESERVE | \
1713 BRW_CR0_RND_MODE_MASK)
1714
1715 /* MDC_DS - Data Size Message Descriptor Control Field
1716 * Skylake PRM, Volume 2d, page 129
1717 *
1718 * Specifies the number of Bytes to be read or written per Dword used at
1719 * byte_scattered read/write and byte_scaled read/write messages.
1720 */
1721 #define GFX7_BYTE_SCATTERED_DATA_ELEMENT_BYTE 0
1722 #define GFX7_BYTE_SCATTERED_DATA_ELEMENT_WORD 1
1723 #define GFX7_BYTE_SCATTERED_DATA_ELEMENT_DWORD 2
1724
1725 #define GEN_RT_BTD_MESSAGE_SPAWN 1
1726
1727 #define GEN_RT_TRACE_RAY_INITAL 0
1728 #define GEN_RT_TRACE_RAY_INSTANCE 1
1729 #define GEN_RT_TRACE_RAY_COMMIT 2
1730 #define GEN_RT_TRACE_RAY_CONTINUE 3
1731
1732 #define GEN_RT_BTD_SHADER_TYPE_ANY_HIT 0
1733 #define GEN_RT_BTD_SHADER_TYPE_CLOSEST_HIT 1
1734 #define GEN_RT_BTD_SHADER_TYPE_MISS 2
1735 #define GEN_RT_BTD_SHADER_TYPE_INTERSECTION 3
1736
1737 /* Starting with Xe-HPG, the old dataport was massively reworked dataport.
1738 * The new thing, called Load/Store Cache or LSC, has a significantly improved
1739 * interface. Instead of bespoke messages for every case, there's basically
1740 * one or two messages with different bits to control things like address
1741 * size, how much data is read/written, etc. It's way nicer but also means we
1742 * get to rewrite all our dataport encoding/decoding code. This patch kicks
1743 * off the party with all of the new enums.
1744 */
1745 enum lsc_opcode {
1746 LSC_OP_LOAD = 0,
1747 LSC_OP_LOAD_CMASK = 2,
1748 LSC_OP_STORE = 4,
1749 LSC_OP_STORE_CMASK = 6,
1750 LSC_OP_ATOMIC_INC = 8,
1751 LSC_OP_ATOMIC_DEC = 9,
1752 LSC_OP_ATOMIC_LOAD = 10,
1753 LSC_OP_ATOMIC_STORE = 11,
1754 LSC_OP_ATOMIC_ADD = 12,
1755 LSC_OP_ATOMIC_SUB = 13,
1756 LSC_OP_ATOMIC_MIN = 14,
1757 LSC_OP_ATOMIC_MAX = 15,
1758 LSC_OP_ATOMIC_UMIN = 16,
1759 LSC_OP_ATOMIC_UMAX = 17,
1760 LSC_OP_ATOMIC_CMPXCHG = 18,
1761 LSC_OP_ATOMIC_FADD = 19,
1762 LSC_OP_ATOMIC_FSUB = 20,
1763 LSC_OP_ATOMIC_FMIN = 21,
1764 LSC_OP_ATOMIC_FMAX = 22,
1765 LSC_OP_ATOMIC_FCMPXCHG = 23,
1766 LSC_OP_ATOMIC_AND = 24,
1767 LSC_OP_ATOMIC_OR = 25,
1768 LSC_OP_ATOMIC_XOR = 26,
1769 LSC_OP_FENCE = 31
1770 };
1771
1772 /*
1773 * Specifies the size of the dataport address payload in registers.
1774 */
1775 enum PACKED lsc_addr_reg_size {
1776 LSC_ADDR_REG_SIZE_1 = 1,
1777 LSC_ADDR_REG_SIZE_2 = 2,
1778 LSC_ADDR_REG_SIZE_3 = 3,
1779 LSC_ADDR_REG_SIZE_4 = 4,
1780 LSC_ADDR_REG_SIZE_6 = 6,
1781 LSC_ADDR_REG_SIZE_8 = 8,
1782 };
1783
1784 /*
1785 * Specifies the size of the address payload item in a dataport message.
1786 */
1787 enum PACKED lsc_addr_size {
1788 LSC_ADDR_SIZE_A16 = 1, /* 16-bit address offset */
1789 LSC_ADDR_SIZE_A32 = 2, /* 32-bit address offset */
1790 LSC_ADDR_SIZE_A64 = 3, /* 64-bit address offset */
1791 };
1792
1793 /*
1794 * Specifies the type of the address payload item in a dataport message. The
1795 * address type specifies how the dataport message decodes the Extended
1796 * Descriptor for the surface attributes and address calculation.
1797 */
1798 enum PACKED lsc_addr_surface_type {
1799 LSC_ADDR_SURFTYPE_FLAT = 0, /* Flat */
1800 LSC_ADDR_SURFTYPE_BSS = 1, /* Bindless surface state */
1801 LSC_ADDR_SURFTYPE_SS = 2, /* Surface state */
1802 LSC_ADDR_SURFTYPE_BTI = 3, /* Binding table index */
1803 };
1804
1805 /*
1806 * Specifies the dataport message override to the default L1 and L3 memory
1807 * cache policies. Dataport L1 cache policies are uncached (UC), cached (C),
1808 * cache streaming (S) and invalidate-after-read (IAR). Dataport L3 cache
1809 * policies are uncached (UC) and cached (C).
1810 */
1811 enum lsc_cache_load {
1812 /* No override. Use the non-pipelined state or surface state cache settings
1813 * for L1 and L3.
1814 */
1815 LSC_CACHE_LOAD_L1STATE_L3MOCS = 0,
1816 /* Override to L1 uncached and L3 uncached */
1817 LSC_CACHE_LOAD_L1UC_L3UC = 1,
1818 /* Override to L1 uncached and L3 cached */
1819 LSC_CACHE_LOAD_L1UC_L3C = 2,
1820 /* Override to L1 cached and L3 uncached */
1821 LSC_CACHE_LOAD_L1C_L3UC = 3,
1822 /* Override to cache at both L1 and L3 */
1823 LSC_CACHE_LOAD_L1C_L3C = 4,
1824 /* Override to L1 streaming load and L3 uncached */
1825 LSC_CACHE_LOAD_L1S_L3UC = 5,
1826 /* Override to L1 streaming load and L3 cached */
1827 LSC_CACHE_LOAD_L1S_L3C = 6,
1828 /* For load messages, override to L1 invalidate-after-read, and L3 cached. */
1829 LSC_CACHE_LOAD_L1IAR_L3C = 7,
1830 };
1831
1832 /*
1833 * Specifies the dataport message override to the default L1 and L3 memory
1834 * cache policies. Dataport L1 cache policies are uncached (UC), write-through
1835 * (WT), write-back (WB) and streaming (S). Dataport L3 cache policies are
1836 * uncached (UC) and cached (WB).
1837 */
1838 enum PACKED lsc_cache_store {
1839 /* No override. Use the non-pipelined or surface state cache settings for L1
1840 * and L3.
1841 */
1842 LSC_CACHE_STORE_L1STATE_L3MOCS = 0,
1843 /* Override to L1 uncached and L3 uncached */
1844 LSC_CACHE_STORE_L1UC_L3UC = 1,
1845 /* Override to L1 uncached and L3 cached */
1846 LSC_CACHE_STORE_L1UC_L3WB = 2,
1847 /* Override to L1 write-through and L3 uncached */
1848 LSC_CACHE_STORE_L1WT_L3UC = 3,
1849 /* Override to L1 write-through and L3 cached */
1850 LSC_CACHE_STORE_L1WT_L3WB = 4,
1851 /* Override to L1 streaming and L3 uncached */
1852 LSC_CACHE_STORE_L1S_L3UC = 5,
1853 /* Override to L1 streaming and L3 cached */
1854 LSC_CACHE_STORE_L1S_L3WB = 6,
1855 /* Override to L1 write-back, and L3 cached */
1856 LSC_CACHE_STORE_L1WB_L3WB = 7,
1857
1858 };
1859
1860 /*
1861 * Specifies which components of the data payload 4-element vector (X,Y,Z,W) is
1862 * packed into the register payload.
1863 */
1864 enum PACKED lsc_cmask {
1865 LSC_CMASK_X = 0x1,
1866 LSC_CMASK_Y = 0x2,
1867 LSC_CMASK_XY = 0x3,
1868 LSC_CMASK_Z = 0x4,
1869 LSC_CMASK_XZ = 0x5,
1870 LSC_CMASK_YZ = 0x6,
1871 LSC_CMASK_XYZ = 0x7,
1872 LSC_CMASK_W = 0x8,
1873 LSC_CMASK_XW = 0x9,
1874 LSC_CMASK_YW = 0xa,
1875 LSC_CMASK_XYW = 0xb,
1876 LSC_CMASK_ZW = 0xc,
1877 LSC_CMASK_XZW = 0xd,
1878 LSC_CMASK_YZW = 0xe,
1879 LSC_CMASK_XYZW = 0xf,
1880 };
1881
1882 /*
1883 * Specifies the size of the data payload item in a dataport message.
1884 */
1885 enum PACKED lsc_data_size {
1886 /* 8-bit scalar data value in memory, packed into a 8-bit data value in
1887 * register.
1888 */
1889 LSC_DATA_SIZE_D8 = 0,
1890 /* 16-bit scalar data value in memory, packed into a 16-bit data value in
1891 * register.
1892 */
1893 LSC_DATA_SIZE_D16 = 1,
1894 /* 32-bit scalar data value in memory, packed into 32-bit data value in
1895 * register.
1896 */
1897 LSC_DATA_SIZE_D32 = 2,
1898 /* 64-bit scalar data value in memory, packed into 64-bit data value in
1899 * register.
1900 */
1901 LSC_DATA_SIZE_D64 = 3,
1902 /* 8-bit scalar data value in memory, packed into 32-bit unsigned data value
1903 * in register.
1904 */
1905 LSC_DATA_SIZE_D8U32 = 4,
1906 /* 16-bit scalar data value in memory, packed into 32-bit unsigned data
1907 * value in register.
1908 */
1909 LSC_DATA_SIZE_D16U32 = 5,
1910 /* 16-bit scalar BigFloat data value in memory, packed into 32-bit float
1911 * value in register.
1912 */
1913 LSC_DATA_SIZE_D16BF32 = 6,
1914 };
1915
1916 /*
1917 * Enum specifies the scope of the fence.
1918 */
1919 enum PACKED lsc_fence_scope {
1920 /* Wait until all previous memory transactions from this thread are observed
1921 * within the local thread-group.
1922 */
1923 LSC_FENCE_THREADGROUP = 0,
1924 /* Wait until all previous memory transactions from this thread are observed
1925 * within the local sub-slice.
1926 */
1927 LSC_FENCE_LOCAL = 1,
1928 /* Wait until all previous memory transactions from this thread are observed
1929 * in the local tile.
1930 */
1931 LSC_FENCE_TILE = 2,
1932 /* Wait until all previous memory transactions from this thread are observed
1933 * in the local GPU.
1934 */
1935 LSC_FENCE_GPU = 3,
1936 /* Wait until all previous memory transactions from this thread are observed
1937 * across all GPUs in the system.
1938 */
1939 LSC_FENCE_ALL_GPU = 4,
1940 /* Wait until all previous memory transactions from this thread are observed
1941 * at the "system" level.
1942 */
1943 LSC_FENCE_SYSTEM_RELEASE = 5,
1944 /* For GPUs that do not follow PCIe Write ordering for downstream writes
1945 * targeting device memory, a fence message with scope=System_Acquire will
1946 * commit to device memory all downstream and peer writes that have reached
1947 * the device.
1948 */
1949 LSC_FENCE_SYSTEM_ACQUIRE = 6,
1950 };
1951
1952 /*
1953 * Specifies the type of cache flush operation to perform after a fence is
1954 * complete.
1955 */
1956 enum PACKED lsc_flush_type {
1957 LSC_FLUSH_TYPE_NONE = 0,
1958 /*
1959 * For a R/W cache, evict dirty lines (M to I state) and invalidate clean
1960 * lines. For a RO cache, invalidate clean lines.
1961 */
1962 LSC_FLUSH_TYPE_EVICT = 1,
1963 /*
1964 * For both R/W and RO cache, invalidate clean lines in the cache.
1965 */
1966 LSC_FLUSH_TYPE_INVALIDATE = 2,
1967 /*
1968 * For a R/W cache, invalidate dirty lines (M to I state), without
1969 * write-back to next level. This opcode does nothing for a RO cache.
1970 */
1971 LSC_FLUSH_TYPE_DISCARD = 3,
1972 /*
1973 * For a R/W cache, write-back dirty lines to the next level, but kept in
1974 * the cache as "clean" (M to V state). This opcode does nothing for a RO
1975 * cache.
1976 */
1977 LSC_FLUSH_TYPE_CLEAN = 4,
1978 /*
1979 * Flush "RW" section of the L3 cache, but leave L1 and L2 caches untouched.
1980 */
1981 LSC_FLUSH_TYPE_L3ONLY = 5,
1982 /*
1983 * HW maps this flush type internally to NONE.
1984 */
1985 LSC_FLUSH_TYPE_NONE_6 = 6,
1986
1987 };
1988
1989 enum PACKED lsc_backup_fence_routing {
1990 /* Normal routing: UGM fence is routed to UGM pipeline. */
1991 LSC_NORMAL_ROUTING,
1992 /* Route UGM fence to LSC unit. */
1993 LSC_ROUTE_TO_LSC,
1994 };
1995
1996 /*
1997 * Specifies the size of the vector in a dataport message.
1998 */
1999 enum PACKED lsc_vect_size {
2000 LSC_VECT_SIZE_V1 = 0, /* vector length 1 */
2001 LSC_VECT_SIZE_V2 = 1, /* vector length 2 */
2002 LSC_VECT_SIZE_V3 = 2, /* Vector length 3 */
2003 LSC_VECT_SIZE_V4 = 3, /* Vector length 4 */
2004 LSC_VECT_SIZE_V8 = 4, /* Vector length 8 */
2005 LSC_VECT_SIZE_V16 = 5, /* Vector length 16 */
2006 LSC_VECT_SIZE_V32 = 6, /* Vector length 32 */
2007 LSC_VECT_SIZE_V64 = 7, /* Vector length 64 */
2008 };
2009
2010 #define LSC_ONE_ADDR_REG 1
2011
2012 #endif /* BRW_EU_DEFINES_H */
2013