• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  Copyright (C) Intel Corp.  2006.  All Rights Reserved.
3  Intel funded Tungsten Graphics to
4  develop this 3D driver.
5 
6  Permission is hereby granted, free of charge, to any person obtaining
7  a copy of this software and associated documentation files (the
8  "Software"), to deal in the Software without restriction, including
9  without limitation the rights to use, copy, modify, merge, publish,
10  distribute, sublicense, and/or sell copies of the Software, and to
11  permit persons to whom the Software is furnished to do so, subject to
12  the following conditions:
13 
14  The above copyright notice and this permission notice (including the
15  next paragraph) shall be included in all copies or substantial
16  portions of the Software.
17 
18  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21  IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22  LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23  OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24  WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 
26  **********************************************************************/
27  /*
28   * Authors:
29   *   Keith Whitwell <keithw@vmware.com>
30   */
31 
32 #pragma once
33 
34 #include <stdint.h>
35 #include <stdlib.h>
36 #include "util/macros.h"
37 #include "dev/intel_device_info.h"
38 
39 /* The following hunk, up-to "Execution Unit" is used by both the
40  * intel/compiler and i965 codebase. */
41 
42 #define INTEL_MASK(high, low) (((1u<<((high)-(low)+1))-1)<<(low))
43 /* Using the GNU statement expression extension */
44 #define SET_FIELD(value, field)                                         \
45    ({                                                                   \
46       uint32_t fieldval = (uint32_t)(value) << field ## _SHIFT;         \
47       assert((fieldval & ~ field ## _MASK) == 0);                       \
48       fieldval & field ## _MASK;                                        \
49    })
50 
51 #define SET_BITS(value, high, low)                                      \
52    ({                                                                   \
53       const uint32_t fieldval = (uint32_t)(value) << (low);             \
54       assert((fieldval & ~INTEL_MASK(high, low)) == 0);                 \
55       fieldval & INTEL_MASK(high, low);                                 \
56    })
57 
58 #define GET_BITS(data, high, low) ((data & INTEL_MASK((high), (low))) >> (low))
59 #define GET_FIELD(word, field) (((word)  & field ## _MASK) >> field ## _SHIFT)
60 
61 # define GFX7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT		0
62 # define GFX7_GS_CONTROL_DATA_FORMAT_GSCTL_SID		1
63 
64 /* Execution Unit (EU) defines
65  */
66 
67 #define BRW_ALIGN_1   0
68 #define BRW_ALIGN_16  1
69 
70 #define BRW_ADDRESS_DIRECT                        0
71 #define BRW_ADDRESS_REGISTER_INDIRECT_REGISTER    1
72 
73 #define BRW_CHANNEL_X     0
74 #define BRW_CHANNEL_Y     1
75 #define BRW_CHANNEL_Z     2
76 #define BRW_CHANNEL_W     3
77 
78 enum brw_compression {
79    BRW_COMPRESSION_NONE       = 0,
80    BRW_COMPRESSION_2NDHALF    = 1,
81    BRW_COMPRESSION_COMPRESSED = 2,
82 };
83 
84 #define GFX6_COMPRESSION_1Q		0
85 #define GFX6_COMPRESSION_2Q		1
86 #define GFX6_COMPRESSION_3Q		2
87 #define GFX6_COMPRESSION_4Q		3
88 #define GFX6_COMPRESSION_1H		0
89 #define GFX6_COMPRESSION_2H		2
90 
91 enum ENUM_PACKED brw_conditional_mod {
92    BRW_CONDITIONAL_NONE = 0,
93    BRW_CONDITIONAL_Z    = 1,
94    BRW_CONDITIONAL_NZ   = 2,
95    BRW_CONDITIONAL_EQ   = 1,	/* Z */
96    BRW_CONDITIONAL_NEQ  = 2,	/* NZ */
97    BRW_CONDITIONAL_G    = 3,
98    BRW_CONDITIONAL_GE   = 4,
99    BRW_CONDITIONAL_L    = 5,
100    BRW_CONDITIONAL_LE   = 6,
101    BRW_CONDITIONAL_R    = 7,    /* Gen <= 5 */
102    BRW_CONDITIONAL_O    = 8,
103    BRW_CONDITIONAL_U    = 9,
104 };
105 
106 #define BRW_DEBUG_NONE        0
107 #define BRW_DEBUG_BREAKPOINT  1
108 
109 enum ENUM_PACKED brw_execution_size {
110    BRW_EXECUTE_1  = 0,
111    BRW_EXECUTE_2  = 1,
112    BRW_EXECUTE_4  = 2,
113    BRW_EXECUTE_8  = 3,
114    BRW_EXECUTE_16 = 4,
115    BRW_EXECUTE_32 = 5,
116 };
117 
118 enum ENUM_PACKED brw_horizontal_stride {
119    BRW_HORIZONTAL_STRIDE_0 = 0,
120    BRW_HORIZONTAL_STRIDE_1 = 1,
121    BRW_HORIZONTAL_STRIDE_2 = 2,
122    BRW_HORIZONTAL_STRIDE_4 = 3,
123 };
124 
125 enum ENUM_PACKED gfx10_align1_3src_src_horizontal_stride {
126    BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_0 = 0,
127    BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_1 = 1,
128    BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_2 = 2,
129    BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_4 = 3,
130 };
131 
132 enum ENUM_PACKED gfx10_align1_3src_dst_horizontal_stride {
133    BRW_ALIGN1_3SRC_DST_HORIZONTAL_STRIDE_1 = 0,
134    BRW_ALIGN1_3SRC_DST_HORIZONTAL_STRIDE_2 = 1,
135 };
136 
137 #define BRW_INSTRUCTION_NORMAL    0
138 #define BRW_INSTRUCTION_SATURATE  1
139 
140 #define BRW_MASK_ENABLE   0
141 #define BRW_MASK_DISABLE  1
142 
143 /** @{
144  *
145  * Gfx6 has replaced "mask enable/disable" with WECtrl, which is
146  * effectively the same but much simpler to think about.  Now, there
147  * are two contributors ANDed together to whether channels are
148  * executed: The predication on the instruction, and the channel write
149  * enable.
150  */
151 /**
152  * This is the default value.  It means that a channel's write enable is set
153  * if the per-channel IP is pointing at this instruction.
154  */
155 #define BRW_WE_NORMAL		0
156 /**
157  * This is used like BRW_MASK_DISABLE, and causes all channels to have
158  * their write enable set.  Note that predication still contributes to
159  * whether the channel actually gets written.
160  */
161 #define BRW_WE_ALL		1
162 /** @} */
163 
164 enum opcode {
165    /* These are the actual hardware instructions. */
166    BRW_OPCODE_ILLEGAL,
167    BRW_OPCODE_SYNC,
168    BRW_OPCODE_MOV,
169    BRW_OPCODE_SEL,
170    BRW_OPCODE_MOVI,
171    BRW_OPCODE_NOT,
172    BRW_OPCODE_AND,
173    BRW_OPCODE_OR,
174    BRW_OPCODE_XOR,
175    BRW_OPCODE_SHR,
176    BRW_OPCODE_SHL,
177    BRW_OPCODE_SMOV,
178    BRW_OPCODE_ASR,
179    BRW_OPCODE_ROR,  /**< Gfx11+ */
180    BRW_OPCODE_ROL,  /**< Gfx11+ */
181    BRW_OPCODE_CMP,
182    BRW_OPCODE_CMPN,
183    BRW_OPCODE_CSEL,
184    BRW_OPCODE_BFREV,
185    BRW_OPCODE_BFE,
186    BRW_OPCODE_BFI1,
187    BRW_OPCODE_BFI2,
188    BRW_OPCODE_JMPI,
189    BRW_OPCODE_BRD,
190    BRW_OPCODE_IF,
191    BRW_OPCODE_BRC,
192    BRW_OPCODE_ELSE,
193    BRW_OPCODE_ENDIF,
194    BRW_OPCODE_DO, /**< Used as pseudo opcode, will be moved later. */
195    BRW_OPCODE_WHILE,
196    BRW_OPCODE_BREAK,
197    BRW_OPCODE_CONTINUE,
198    BRW_OPCODE_HALT,
199    BRW_OPCODE_CALLA,
200    BRW_OPCODE_CALL,
201    BRW_OPCODE_RET,
202    BRW_OPCODE_GOTO,
203    BRW_OPCODE_WAIT,
204    BRW_OPCODE_SEND,
205    BRW_OPCODE_SENDC,
206    BRW_OPCODE_SENDS,
207    BRW_OPCODE_SENDSC,
208    BRW_OPCODE_MATH,
209    BRW_OPCODE_ADD,
210    BRW_OPCODE_MUL,
211    BRW_OPCODE_AVG,
212    BRW_OPCODE_FRC,
213    BRW_OPCODE_RNDU,
214    BRW_OPCODE_RNDD,
215    BRW_OPCODE_RNDE,
216    BRW_OPCODE_RNDZ,
217    BRW_OPCODE_MAC,
218    BRW_OPCODE_MACH,
219    BRW_OPCODE_LZD,
220    BRW_OPCODE_FBH,
221    BRW_OPCODE_FBL,
222    BRW_OPCODE_CBIT,
223    BRW_OPCODE_ADDC,
224    BRW_OPCODE_SUBB,
225    BRW_OPCODE_ADD3, /* Gen12+ only */
226    BRW_OPCODE_DP4,
227    BRW_OPCODE_DPH,
228    BRW_OPCODE_DP3,
229    BRW_OPCODE_DP2,
230    BRW_OPCODE_DP4A, /**< Gfx12+ */
231    BRW_OPCODE_LINE,
232    BRW_OPCODE_DPAS,  /**< Gfx12.5+ */
233    BRW_OPCODE_PLN, /**< Up until Gfx9 */
234    BRW_OPCODE_MAD,
235    BRW_OPCODE_LRP,
236    BRW_OPCODE_MADM,
237    BRW_OPCODE_NOP,
238 
239    NUM_BRW_OPCODES,
240 
241    /**
242     * The position/ordering of the arguments are defined
243     * by the enum fb_write_logical_srcs.
244     */
245    FS_OPCODE_FB_WRITE_LOGICAL = NUM_BRW_OPCODES,
246 
247    FS_OPCODE_FB_READ_LOGICAL,
248 
249    SHADER_OPCODE_RCP,
250    SHADER_OPCODE_RSQ,
251    SHADER_OPCODE_SQRT,
252    SHADER_OPCODE_EXP2,
253    SHADER_OPCODE_LOG2,
254    SHADER_OPCODE_POW,
255    SHADER_OPCODE_INT_QUOTIENT,
256    SHADER_OPCODE_INT_REMAINDER,
257    SHADER_OPCODE_SIN,
258    SHADER_OPCODE_COS,
259 
260    /**
261     * A generic "send" opcode.  The first two sources are the message
262     * descriptor and extended message descriptor respectively.  The third
263     * and optional fourth sources are the message payload
264     */
265    SHADER_OPCODE_SEND,
266 
267    /**
268     * An "undefined" write which does nothing but indicates to liveness that
269     * we don't care about any values in the register which predate this
270     * instruction.  Used to prevent partial writes from causing issues with
271     * live ranges.
272     */
273    SHADER_OPCODE_UNDEF,
274 
275    /**
276     * Texture sampling opcodes.
277     *
278     * LOGICAL opcodes are eventually translated to SHADER_OPCODE_SEND but
279     * take parameters as individual sources.  See enum tex_logical_srcs.
280     */
281    SHADER_OPCODE_TEX_LOGICAL,
282    SHADER_OPCODE_TXD_LOGICAL,
283    SHADER_OPCODE_TXF_LOGICAL,
284    SHADER_OPCODE_TXL_LOGICAL,
285    SHADER_OPCODE_TXS_LOGICAL,
286    FS_OPCODE_TXB_LOGICAL,
287    SHADER_OPCODE_TXF_CMS_W_LOGICAL,
288    SHADER_OPCODE_TXF_CMS_W_GFX12_LOGICAL,
289    SHADER_OPCODE_TXF_MCS_LOGICAL,
290    SHADER_OPCODE_LOD_LOGICAL,
291    SHADER_OPCODE_TG4_LOGICAL,
292    SHADER_OPCODE_TG4_IMPLICIT_LOD_LOGICAL,
293    SHADER_OPCODE_TG4_EXPLICIT_LOD_LOGICAL,
294    SHADER_OPCODE_TG4_BIAS_LOGICAL,
295    SHADER_OPCODE_TG4_OFFSET_LOGICAL,
296    SHADER_OPCODE_TG4_OFFSET_LOD_LOGICAL,
297    SHADER_OPCODE_TG4_OFFSET_BIAS_LOGICAL,
298    SHADER_OPCODE_SAMPLEINFO_LOGICAL,
299 
300    SHADER_OPCODE_IMAGE_SIZE_LOGICAL,
301 
302    /**
303     * Combines multiple sources of size 1 into a larger virtual GRF.
304     * For example, parameters for a send-from-GRF message.  Or, updating
305     * channels of a size 4 VGRF used to store vec4s such as texturing results.
306     *
307     * This will be lowered into MOVs from each source to consecutive offsets
308     * of the destination VGRF.
309     *
310     * src[0] may be BAD_FILE.  If so, the lowering pass skips emitting the MOV,
311     * but still reserves the first channel of the destination VGRF.  This can be
312     * used to reserve space for, say, a message header set up by the generators.
313     */
314    SHADER_OPCODE_LOAD_PAYLOAD,
315 
316    /**
317     * Packs a number of sources into a single value. Unlike LOAD_PAYLOAD, this
318     * acts intra-channel, obtaining the final value for each channel by
319     * combining the sources values for the same channel, the first source
320     * occupying the lowest bits and the last source occupying the highest
321     * bits.
322     */
323    FS_OPCODE_PACK,
324 
325    SHADER_OPCODE_RND_MODE,
326    SHADER_OPCODE_FLOAT_CONTROL_MODE,
327 
328    /**
329     * Memory fence messages.
330     *
331     * Source 0: Must be register g0, used as header.
332     * Source 1: Immediate bool to indicate whether control is returned to the
333     *           thread only after the fence has been honored.
334     * Source 2: Immediate byte indicating which memory to fence.  Zero means
335     *           global memory; GFX7_BTI_SLM means SLM (for Gfx11+ only).
336     *
337     * Vec4 backend only uses Source 0.
338     */
339    SHADER_OPCODE_MEMORY_FENCE,
340 
341    /**
342     * Scheduling-only fence.
343     *
344     * Sources can be used to force a stall until the registers in those are
345     * available.  This might generate MOVs or SYNC_NOPs (Gfx12+).
346     */
347    FS_OPCODE_SCHEDULING_FENCE,
348 
349    SHADER_OPCODE_SCRATCH_HEADER,
350 
351    /**
352     * Gfx8+ SIMD8 URB messages.
353     */
354    SHADER_OPCODE_URB_READ_LOGICAL,
355    SHADER_OPCODE_URB_WRITE_LOGICAL,
356 
357    /**
358     * Return the index of the first enabled live channel and assign it to
359     * to the first component of the destination.  Frequently used as input
360     * for the BROADCAST pseudo-opcode.
361     */
362    SHADER_OPCODE_FIND_LIVE_CHANNEL,
363 
364    /**
365     * Return the index of the last enabled live channel and assign it to
366     * the first component of the destination.
367     */
368    SHADER_OPCODE_FIND_LAST_LIVE_CHANNEL,
369 
370    /**
371     * Return the current execution mask and assign it to the first component
372     * of the destination.
373     *
374     * \sa opcode::FS_OPCODE_LOAD_LIVE_CHANNELS
375     */
376    SHADER_OPCODE_LOAD_LIVE_CHANNELS,
377 
378    /**
379     * Return the current execution mask in the specified flag subregister.
380     * Can be CSE'ed more easily than a plain MOV from the ce0 ARF register.
381     */
382    FS_OPCODE_LOAD_LIVE_CHANNELS,
383 
384    /**
385     * Pick the channel from its first source register given by the index
386     * specified as second source.  Useful for variable indexing of surfaces.
387     *
388     * Note that because the result of this instruction is by definition
389     * uniform and it can always be splatted to multiple channels using a
390     * scalar regioning mode, only the first channel of the destination region
391     * is guaranteed to be updated, which implies that BROADCAST instructions
392     * should usually be marked force_writemask_all.
393     */
394    SHADER_OPCODE_BROADCAST,
395 
396    /* Pick the channel from its first source register given by the index
397     * specified as second source.
398     *
399     * This is similar to the BROADCAST instruction except that it takes a
400     * dynamic index and potentially puts a different value in each output
401     * channel.
402     */
403    SHADER_OPCODE_SHUFFLE,
404 
405    /* Combine all values in each subset (cluster) of channels using an operation,
406     * and broadcast the result to all channels in the subset.
407     *
408     * Source 0: Value.
409     * Source 1: Immediate with brw_reduce_op.
410     * Source 2: Immediate with cluster size.
411     */
412    SHADER_OPCODE_REDUCE,
413 
414    /* Combine values of previous channels using an operation.  Inclusive scan
415     * will include the value of the channel itself in the channel result.
416     *
417     * Source 0: Value.
418     * Source 1: Immediate with brw_reduce_op.
419     */
420    SHADER_OPCODE_INCLUSIVE_SCAN,
421    SHADER_OPCODE_EXCLUSIVE_SCAN,
422 
423    /* Check if any or all values in each subset (cluster) of channels are set,
424     * and broadcast the result to all channels in the subset.
425     *
426     * Source 0: Boolean value.
427     * Source 1: Immediate with cluster size.
428     */
429    SHADER_OPCODE_VOTE_ANY,
430    SHADER_OPCODE_VOTE_ALL,
431 
432    /* Check if the values of all channels are equal, and broadcast the result
433     * to all channels.
434     *
435     * Source 0: Value.
436     */
437    SHADER_OPCODE_VOTE_EQUAL,
438 
439    /* Produces a mask from the boolean value from all channels, and broadcast
440     * the result to all channels.
441     *
442     * Source 0: Boolean value.
443     */
444    SHADER_OPCODE_BALLOT,
445 
446    /* Select between src0 and src1 based on channel enables.
447     *
448     * This instruction copies src0 into the enabled channels of the
449     * destination and copies src1 into the disabled channels.
450     */
451    SHADER_OPCODE_SEL_EXEC,
452 
453    /* Swap values inside a quad based on the direction.
454     *
455     * Source 0: Value.
456     * Source 1: Immediate with brw_swap_direction.
457     */
458    SHADER_OPCODE_QUAD_SWAP,
459 
460    /* Read value from the first live channel and broadcast the result
461     * to all channels.
462     *
463     * Source 0: Value.
464     */
465    SHADER_OPCODE_READ_FROM_LIVE_CHANNEL,
466 
467    /* Read value from a specified channel and broadcast the result
468     * to all channels.
469     *
470     * Source 0: Value.
471     * Source 1: Index of the channel to pick value from.
472     */
473    SHADER_OPCODE_READ_FROM_CHANNEL,
474 
475    /* This turns into an align16 mov from src0 to dst with a swizzle
476     * provided as an immediate in src1.
477     */
478    SHADER_OPCODE_QUAD_SWIZZLE,
479 
480    /* Take every Nth element in src0 and broadcast it to the group of N
481     * channels in which it lives in the destination.  The offset within the
482     * cluster is given by src1 and the cluster size is given by src2.
483     */
484    SHADER_OPCODE_CLUSTER_BROADCAST,
485 
486    SHADER_OPCODE_GET_BUFFER_SIZE,
487 
488    SHADER_OPCODE_INTERLOCK,
489 
490    /** Target for a HALT
491     *
492     * All HALT instructions in a shader must target the same jump point and
493     * that point is denoted by a HALT_TARGET instruction.
494     */
495    SHADER_OPCODE_HALT_TARGET,
496 
497    FS_OPCODE_DDX_COARSE,
498    FS_OPCODE_DDX_FINE,
499    /**
500     * Compute dFdy(), dFdyCoarse(), or dFdyFine().
501     */
502    FS_OPCODE_DDY_COARSE,
503    FS_OPCODE_DDY_FINE,
504    FS_OPCODE_PIXEL_X,
505    FS_OPCODE_PIXEL_Y,
506    FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
507    FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_LOGICAL,
508    FS_OPCODE_PACK_HALF_2x16_SPLIT,
509    FS_OPCODE_INTERPOLATE_AT_SAMPLE,
510    FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET,
511    FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET,
512 
513    /**
514     * GLSL barrier()
515     */
516    SHADER_OPCODE_BARRIER,
517 
518    /**
519     * Calculate the high 32-bits of a 32x32 multiply.
520     */
521    SHADER_OPCODE_MULH,
522 
523    /** Signed subtraction with saturation. */
524    SHADER_OPCODE_ISUB_SAT,
525 
526    /** Unsigned subtraction with saturation. */
527    SHADER_OPCODE_USUB_SAT,
528 
529    /**
530     * A MOV that uses VxH indirect addressing.
531     *
532     * Source 0: A register to start from (HW_REG).
533     * Source 1: An indirect offset (in bytes, UD GRF).
534     * Source 2: The length of the region that could be accessed (in bytes,
535     *           UD immediate).
536     */
537    SHADER_OPCODE_MOV_INDIRECT,
538 
539    /** Fills out a relocatable immediate */
540    SHADER_OPCODE_MOV_RELOC_IMM,
541 
542    SHADER_OPCODE_BTD_SPAWN_LOGICAL,
543    SHADER_OPCODE_BTD_RETIRE_LOGICAL,
544 
545    SHADER_OPCODE_READ_ARCH_REG,
546 
547    SHADER_OPCODE_LOAD_SUBGROUP_INVOCATION,
548 
549    RT_OPCODE_TRACE_RAY_LOGICAL,
550 
551    SHADER_OPCODE_MEMORY_LOAD_LOGICAL,
552    SHADER_OPCODE_MEMORY_STORE_LOGICAL,
553    SHADER_OPCODE_MEMORY_ATOMIC_LOGICAL,
554 };
555 
556 enum fb_write_logical_srcs {
557    FB_WRITE_LOGICAL_SRC_COLOR0,      /* REQUIRED */
558    FB_WRITE_LOGICAL_SRC_COLOR1,      /* for dual source blend messages */
559    FB_WRITE_LOGICAL_SRC_SRC0_ALPHA,
560    FB_WRITE_LOGICAL_SRC_SRC_DEPTH,   /* gl_FragDepth */
561    FB_WRITE_LOGICAL_SRC_DST_DEPTH,   /* GFX4-5: passthrough from thread */
562    FB_WRITE_LOGICAL_SRC_SRC_STENCIL, /* gl_FragStencilRefARB */
563    FB_WRITE_LOGICAL_SRC_OMASK,       /* Sample Mask (gl_SampleMask) */
564    FB_WRITE_LOGICAL_SRC_COMPONENTS,  /* REQUIRED */
565    FB_WRITE_LOGICAL_SRC_NULL_RT,     /* Null RT write */
566    FB_WRITE_LOGICAL_NUM_SRCS
567 };
568 
569 enum tex_logical_srcs {
570    /** Texture coordinates */
571    TEX_LOGICAL_SRC_COORDINATE,
572    /** Shadow comparator */
573    TEX_LOGICAL_SRC_SHADOW_C,
574    /** dPdx if the operation takes explicit derivatives, otherwise LOD value */
575    TEX_LOGICAL_SRC_LOD,
576    /** dPdy if the operation takes explicit derivatives */
577    TEX_LOGICAL_SRC_LOD2,
578    /** Min LOD */
579    TEX_LOGICAL_SRC_MIN_LOD,
580    /** Sample index */
581    TEX_LOGICAL_SRC_SAMPLE_INDEX,
582    /** MCS data */
583    TEX_LOGICAL_SRC_MCS,
584    /** REQUIRED: Texture surface index */
585    TEX_LOGICAL_SRC_SURFACE,
586    /** Texture sampler index */
587    TEX_LOGICAL_SRC_SAMPLER,
588    /** Texture surface bindless handle */
589    TEX_LOGICAL_SRC_SURFACE_HANDLE,
590    /** Texture sampler bindless handle */
591    TEX_LOGICAL_SRC_SAMPLER_HANDLE,
592    /** Texel offset for gathers */
593    TEX_LOGICAL_SRC_TG4_OFFSET,
594    /** REQUIRED: Number of coordinate components (as UD immediate) */
595    TEX_LOGICAL_SRC_COORD_COMPONENTS,
596    /** REQUIRED: Number of derivative components (as UD immediate) */
597    TEX_LOGICAL_SRC_GRAD_COMPONENTS,
598    /** REQUIRED: request residency (as UD immediate) */
599    TEX_LOGICAL_SRC_RESIDENCY,
600 
601    TEX_LOGICAL_NUM_SRCS,
602 };
603 
604 enum pull_uniform_constant_srcs {
605    /** Surface binding table index */
606    PULL_UNIFORM_CONSTANT_SRC_SURFACE,
607    /** Surface bindless handle */
608    PULL_UNIFORM_CONSTANT_SRC_SURFACE_HANDLE,
609    /** Surface offset */
610    PULL_UNIFORM_CONSTANT_SRC_OFFSET,
611    /** Pull size */
612    PULL_UNIFORM_CONSTANT_SRC_SIZE,
613 
614    PULL_UNIFORM_CONSTANT_SRCS,
615 };
616 
617 enum pull_varying_constant_srcs {
618    /** Surface binding table index */
619    PULL_VARYING_CONSTANT_SRC_SURFACE,
620    /** Surface bindless handle */
621    PULL_VARYING_CONSTANT_SRC_SURFACE_HANDLE,
622    /** Surface offset */
623    PULL_VARYING_CONSTANT_SRC_OFFSET,
624    /** Pull alignment */
625    PULL_VARYING_CONSTANT_SRC_ALIGNMENT,
626 
627    PULL_VARYING_CONSTANT_SRCS,
628 };
629 
630 enum get_buffer_size_srcs {
631    /** Surface binding table index */
632    GET_BUFFER_SIZE_SRC_SURFACE,
633    /** Surface bindless handle */
634    GET_BUFFER_SIZE_SRC_SURFACE_HANDLE,
635    /** LOD */
636    GET_BUFFER_SIZE_SRC_LOD,
637 
638    GET_BUFFER_SIZE_SRCS
639 };
640 
641 enum memory_logical_mode {
642    MEMORY_MODE_TYPED,
643    MEMORY_MODE_UNTYPED,
644    MEMORY_MODE_SHARED_LOCAL,
645    MEMORY_MODE_SCRATCH,
646    MEMORY_MODE_CONSTANT,
647 };
648 
649 enum memory_logical_srcs {
650    /** enum lsc_opcode (as UD immediate) */
651    MEMORY_LOGICAL_OPCODE,
652 
653    /** enum memory_logical_mode (as UD immediate) */
654    MEMORY_LOGICAL_MODE,
655 
656    /** enum lsc_addr_surface_type (as UD immediate) */
657    MEMORY_LOGICAL_BINDING_TYPE,
658 
659    /**
660     * Where to find the surface state.  Depends on BINDING_TYPE above:
661     *
662     * - SS: pointer to surface state (relative to surface base address)
663     * - BSS: pointer to surface state (relative to bindless surface base)
664     * - BTI: binding table index
665     * - FLAT: This should should be BAD_FILE
666     */
667    MEMORY_LOGICAL_BINDING,
668 
669    /** Coordinate/address/offset for where to access memory */
670    MEMORY_LOGICAL_ADDRESS,
671 
672    /** Dimensionality of the "address" source (as UD immediate) */
673    MEMORY_LOGICAL_COORD_COMPONENTS,
674 
675    /** Required alignment of address in bytes; 0 for natural alignment */
676    MEMORY_LOGICAL_ALIGNMENT,
677 
678    /** Bit-size in the form of enum lsc_data_size (as UD immediate) */
679    MEMORY_LOGICAL_DATA_SIZE,
680 
681    /** Number of vector components (as UD immediate) */
682    MEMORY_LOGICAL_COMPONENTS,
683 
684    /** memory_flags bitfield (as UD immediate) */
685    MEMORY_LOGICAL_FLAGS,
686 
687    /** Data to write for stores or the first operand for atomics */
688    MEMORY_LOGICAL_DATA0,
689 
690    /** Second operand for two-source atomics */
691    MEMORY_LOGICAL_DATA1,
692 
693    MEMORY_LOGICAL_NUM_SRCS
694 };
695 
696 enum memory_flags {
697    /** Whether this is a transposed (i.e. block) memory access */
698    MEMORY_FLAG_TRANSPOSE = 1 << 0,
699    /** Whether this operation should fire for helper invocations */
700    MEMORY_FLAG_INCLUDE_HELPERS = 1 << 1,
701 };
702 
703 enum rt_logical_srcs {
704    /** Address of the globals */
705    RT_LOGICAL_SRC_GLOBALS,
706    /** Level at which the tracing should start */
707    RT_LOGICAL_SRC_BVH_LEVEL,
708    /** Type of tracing operation */
709    RT_LOGICAL_SRC_TRACE_RAY_CONTROL,
710    /** Synchronous tracing (ray query) */
711    RT_LOGICAL_SRC_SYNCHRONOUS,
712 
713    RT_LOGICAL_NUM_SRCS
714 };
715 
716 enum urb_logical_srcs {
717    URB_LOGICAL_SRC_HANDLE,
718    /** Offset in bytes on Xe2+ or OWords on older platforms */
719    URB_LOGICAL_SRC_PER_SLOT_OFFSETS,
720    URB_LOGICAL_SRC_CHANNEL_MASK,
721    /** Data to be written.  BAD_FILE for reads. */
722    URB_LOGICAL_SRC_DATA,
723    URB_LOGICAL_SRC_COMPONENTS,
724    URB_LOGICAL_NUM_SRCS
725 };
726 
727 enum interpolator_logical_srcs {
728    /** Interpolation offset */
729    INTERP_SRC_OFFSET,
730    /** Message data  */
731    INTERP_SRC_MSG_DESC,
732    /** Flag register for dynamic mode */
733    INTERP_SRC_DYNAMIC_MODE,
734 
735    INTERP_NUM_SRCS
736 };
737 
738 enum brw_reduce_op {
739    BRW_REDUCE_OP_ADD,
740    BRW_REDUCE_OP_MUL,
741    BRW_REDUCE_OP_MIN,
742    BRW_REDUCE_OP_MAX,
743    BRW_REDUCE_OP_AND,
744    BRW_REDUCE_OP_OR,
745    BRW_REDUCE_OP_XOR,
746 };
747 
748 enum brw_swap_direction {
749    BRW_SWAP_HORIZONTAL,
750    BRW_SWAP_VERTICAL,
751    BRW_SWAP_DIAGONAL,
752 };
753 
754 enum ENUM_PACKED brw_predicate {
755    BRW_PREDICATE_NONE                =  0,
756    BRW_PREDICATE_NORMAL              =  1,
757    BRW_PREDICATE_ALIGN1_ANYV         =  2,
758    BRW_PREDICATE_ALIGN1_ALLV         =  3,
759    BRW_PREDICATE_ALIGN1_ANY2H        =  4,
760    BRW_PREDICATE_ALIGN1_ALL2H        =  5,
761    BRW_PREDICATE_ALIGN1_ANY4H        =  6,
762    BRW_PREDICATE_ALIGN1_ALL4H        =  7,
763    BRW_PREDICATE_ALIGN1_ANY8H        =  8,
764    BRW_PREDICATE_ALIGN1_ALL8H        =  9,
765    BRW_PREDICATE_ALIGN1_ANY16H       = 10,
766    BRW_PREDICATE_ALIGN1_ALL16H       = 11,
767    BRW_PREDICATE_ALIGN1_ANY32H       = 12,
768    BRW_PREDICATE_ALIGN1_ALL32H       = 13,
769    BRW_PREDICATE_ALIGN16_REPLICATE_X =  2,
770    BRW_PREDICATE_ALIGN16_REPLICATE_Y =  3,
771    BRW_PREDICATE_ALIGN16_REPLICATE_Z =  4,
772    BRW_PREDICATE_ALIGN16_REPLICATE_W =  5,
773    BRW_PREDICATE_ALIGN16_ANY4H       =  6,
774    BRW_PREDICATE_ALIGN16_ALL4H       =  7,
775    XE2_PREDICATE_ANY = 2,
776    XE2_PREDICATE_ALL = 3
777 };
778 
779 enum ENUM_PACKED brw_reg_file {
780    BAD_FILE = 0,
781 
782    ARF,
783    FIXED_GRF,
784    IMM,
785 
786    ADDRESS,
787    VGRF,
788    ATTR,
789    UNIFORM, /* prog_data->params[reg] */
790 };
791 
792 /* CNL adds Align1 support for 3-src instructions. Bit 35 of the instruction
793  * word is "Execution Datatype" which controls whether the instruction operates
794  * on float or integer types. The register arguments have fields that offer
795  * more fine control their respective types.
796  */
797 enum ENUM_PACKED gfx10_align1_3src_exec_type {
798    BRW_ALIGN1_3SRC_EXEC_TYPE_INT   = 0,
799    BRW_ALIGN1_3SRC_EXEC_TYPE_FLOAT = 1,
800 };
801 
802 #define BRW_ARF_NULL                  0x00
803 #define BRW_ARF_ADDRESS               0x10
804 #define BRW_ARF_ACCUMULATOR           0x20
805 #define BRW_ARF_FLAG                  0x30
806 #define BRW_ARF_MASK                  0x40
807 #define BRW_ARF_SCALAR                0x60
808 #define BRW_ARF_STATE                 0x70
809 #define BRW_ARF_CONTROL               0x80
810 #define BRW_ARF_NOTIFICATION_COUNT    0x90
811 #define BRW_ARF_IP                    0xA0
812 #define BRW_ARF_TDR                   0xB0
813 #define BRW_ARF_TIMESTAMP             0xC0
814 
815 #define BRW_THREAD_NORMAL     0
816 #define BRW_THREAD_ATOMIC     1
817 #define BRW_THREAD_SWITCH     2
818 
819 /* Subregister of the address register used for particular purposes */
820 enum brw_address_subreg {
821    BRW_ADDRESS_SUBREG_INDIRECT_DESC = 0,
822    BRW_ADDRESS_SUBREG_INDIRECT_EX_DESC = 2,
823    BRW_ADDRESS_SUBREG_INDIRECT_SPILL_DESC = 4,
824 };
825 
826 enum ENUM_PACKED brw_vertical_stride {
827    BRW_VERTICAL_STRIDE_0               = 0,
828    BRW_VERTICAL_STRIDE_1               = 1,
829    BRW_VERTICAL_STRIDE_2               = 2,
830    BRW_VERTICAL_STRIDE_4               = 3,
831    BRW_VERTICAL_STRIDE_8               = 4,
832    BRW_VERTICAL_STRIDE_16              = 5,
833    BRW_VERTICAL_STRIDE_32              = 6,
834    BRW_VERTICAL_STRIDE_ONE_DIMENSIONAL = 0xF,
835 };
836 
837 enum ENUM_PACKED gfx10_align1_3src_vertical_stride {
838    BRW_ALIGN1_3SRC_VERTICAL_STRIDE_0 = 0,
839    BRW_ALIGN1_3SRC_VERTICAL_STRIDE_1 = 1,
840    BRW_ALIGN1_3SRC_VERTICAL_STRIDE_2 = 1,
841    BRW_ALIGN1_3SRC_VERTICAL_STRIDE_4 = 2,
842    BRW_ALIGN1_3SRC_VERTICAL_STRIDE_8 = 3,
843 };
844 
845 enum ENUM_PACKED brw_width {
846    BRW_WIDTH_1  = 0,
847    BRW_WIDTH_2  = 1,
848    BRW_WIDTH_4  = 2,
849    BRW_WIDTH_8  = 3,
850    BRW_WIDTH_16 = 4,
851 };
852 
853 /**
854  * Gfx12+ SWSB SBID synchronization mode.
855  *
856  * This is represented as a bitmask including any required SBID token
857  * synchronization modes, used to synchronize out-of-order instructions.  Only
858  * the strongest mode of the mask will be provided to the hardware in the SWSB
859  * field of an actual hardware instruction, but virtual instructions may be
860  * able to take into account multiple of them.
861  */
862 enum tgl_sbid_mode {
863    TGL_SBID_NULL = 0,
864    TGL_SBID_SRC = 1,
865    TGL_SBID_DST = 2,
866    TGL_SBID_SET = 4
867 };
868 
869 
870 enum gfx12_sub_byte_precision {
871    BRW_SUB_BYTE_PRECISION_NONE = 0,
872 
873    /** 4 bits. Signedness determined by base type */
874    BRW_SUB_BYTE_PRECISION_4BIT = 1,
875 
876    /** 2 bits. Signedness determined by base type */
877    BRW_SUB_BYTE_PRECISION_2BIT = 2,
878 };
879 
880 enum gfx12_systolic_depth {
881    BRW_SYSTOLIC_DEPTH_16 = 0,
882    BRW_SYSTOLIC_DEPTH_2 = 1,
883    BRW_SYSTOLIC_DEPTH_4 = 2,
884    BRW_SYSTOLIC_DEPTH_8 = 3,
885 };
886 
887 #ifdef __cplusplus
888 /**
889  * Allow bitwise arithmetic of tgl_sbid_mode enums.
890  */
891 inline tgl_sbid_mode
892 operator|(tgl_sbid_mode x, tgl_sbid_mode y)
893 {
894    return tgl_sbid_mode(unsigned(x) | unsigned(y));
895 }
896 
897 inline tgl_sbid_mode
898 operator&(tgl_sbid_mode x, tgl_sbid_mode y)
899 {
900    return tgl_sbid_mode(unsigned(x) & unsigned(y));
901 }
902 
903 inline tgl_sbid_mode &
904 operator|=(tgl_sbid_mode &x, tgl_sbid_mode y)
905 {
906    return x = x | y;
907 }
908 
909 #endif
910 
911 /**
912  * TGL+ SWSB RegDist synchronization pipeline.
913  *
914  * On TGL all instructions that use the RegDist synchronization mechanism are
915  * considered to be executed as a single in-order pipeline, therefore only the
916  * TGL_PIPE_FLOAT pipeline is applicable.  On XeHP+ platforms there are two
917  * additional asynchronous ALU pipelines (which still execute instructions
918  * in-order and use the RegDist synchronization mechanism).  TGL_PIPE_NONE
919  * doesn't provide any RegDist pipeline synchronization information and allows
920  * the hardware to infer the pipeline based on the source types of the
921  * instruction.  TGL_PIPE_ALL can be used when synchronization with all ALU
922  * pipelines is intended.
923  *
924  * Xe3 adds TGL_PIPE_SCALAR for a very specific use case (writing immediates
925  * to scalar register).
926  */
927 enum tgl_pipe {
928    TGL_PIPE_NONE = 0,
929    TGL_PIPE_FLOAT,
930    TGL_PIPE_INT,
931    TGL_PIPE_LONG,
932    TGL_PIPE_MATH,
933    TGL_PIPE_SCALAR,
934    TGL_PIPE_ALL
935 };
936 
937 /**
938  * Logical representation of the SWSB scheduling information of a hardware
939  * instruction.  The binary representation is slightly more compact.
940  */
941 struct tgl_swsb {
942    unsigned regdist : 3;
943    enum tgl_pipe pipe : 3;
944    unsigned sbid : 5;
945    enum tgl_sbid_mode mode : 3;
946 };
947 
948 /**
949  * Construct a scheduling annotation with a single RegDist dependency.  This
950  * synchronizes with the completion of the d-th previous in-order instruction.
951  * The index is one-based, zero causes a no-op tgl_swsb to be constructed.
952  */
953 static inline struct tgl_swsb
tgl_swsb_regdist(unsigned d)954 tgl_swsb_regdist(unsigned d)
955 {
956    const struct tgl_swsb swsb = { d, d ? TGL_PIPE_ALL : TGL_PIPE_NONE };
957    assert(swsb.regdist == d);
958    return swsb;
959 }
960 
961 /**
962  * Construct a scheduling annotation that synchronizes with the specified SBID
963  * token.
964  */
965 static inline struct tgl_swsb
tgl_swsb_sbid(enum tgl_sbid_mode mode,unsigned sbid)966 tgl_swsb_sbid(enum tgl_sbid_mode mode, unsigned sbid)
967 {
968    const struct tgl_swsb swsb = { 0, TGL_PIPE_NONE, sbid, mode };
969    assert(swsb.sbid == sbid);
970    return swsb;
971 }
972 
973 /**
974  * Construct a no-op scheduling annotation.
975  */
976 static inline struct tgl_swsb
tgl_swsb_null(void)977 tgl_swsb_null(void)
978 {
979    return tgl_swsb_regdist(0);
980 }
981 
982 /**
983  * Return a scheduling annotation that allocates the same SBID synchronization
984  * token as \p swsb.  In addition it will synchronize against a previous
985  * in-order instruction if \p regdist is non-zero.
986  */
987 static inline struct tgl_swsb
tgl_swsb_dst_dep(struct tgl_swsb swsb,unsigned regdist)988 tgl_swsb_dst_dep(struct tgl_swsb swsb, unsigned regdist)
989 {
990    swsb.regdist = regdist;
991    swsb.mode = swsb.mode & TGL_SBID_SET;
992    swsb.pipe = (regdist ? TGL_PIPE_ALL : TGL_PIPE_NONE);
993    return swsb;
994 }
995 
996 /**
997  * Return a scheduling annotation that synchronizes against the same SBID and
998  * RegDist dependencies as \p swsb, but doesn't allocate any SBID token.
999  */
1000 static inline struct tgl_swsb
tgl_swsb_src_dep(struct tgl_swsb swsb)1001 tgl_swsb_src_dep(struct tgl_swsb swsb)
1002 {
1003    swsb.mode = swsb.mode & (TGL_SBID_SRC | TGL_SBID_DST);
1004    return swsb;
1005 }
1006 
1007 /**
1008  * Convert the provided tgl_swsb to the hardware's binary representation of an
1009  * SWSB annotation.
1010  */
1011 static inline uint32_t
tgl_swsb_encode(const struct intel_device_info * devinfo,struct tgl_swsb swsb,enum opcode opcode)1012 tgl_swsb_encode(const struct intel_device_info *devinfo,
1013                 struct tgl_swsb swsb, enum opcode opcode)
1014 {
1015    if (!swsb.mode) {
1016       const unsigned pipe = devinfo->verx10 < 125 ? 0 :
1017          swsb.pipe == TGL_PIPE_FLOAT ? 0x10 :
1018          swsb.pipe == TGL_PIPE_INT ? 0x18 :
1019          swsb.pipe == TGL_PIPE_LONG ? 0x20 :
1020          swsb.pipe == TGL_PIPE_MATH ? 0x28 :
1021          swsb.pipe == TGL_PIPE_SCALAR ? 0x30 :
1022          swsb.pipe == TGL_PIPE_ALL ? 0x8 : 0;
1023       return pipe | swsb.regdist;
1024 
1025    } else if (swsb.regdist) {
1026       if (devinfo->ver >= 20) {
1027          unsigned mode = 0;
1028          if (opcode == BRW_OPCODE_DPAS) {
1029             mode = (swsb.mode & TGL_SBID_SET) ? 0b01 :
1030                    (swsb.mode & TGL_SBID_SRC) ? 0b10 :
1031                  /* swsb.mode & TGL_SBID_DST */ 0b11;
1032          } else if (swsb.mode & TGL_SBID_SET) {
1033             assert(opcode == BRW_OPCODE_SEND || opcode == BRW_OPCODE_SENDC);
1034             assert(swsb.pipe == TGL_PIPE_ALL ||
1035                    swsb.pipe == TGL_PIPE_INT ||
1036                    swsb.pipe == TGL_PIPE_FLOAT);
1037 
1038             mode = swsb.pipe == TGL_PIPE_INT   ? 0b11 :
1039                    swsb.pipe == TGL_PIPE_FLOAT ? 0b10 :
1040                 /* swsb.pipe == TGL_PIPE_ALL  */ 0b01;
1041          } else {
1042             assert(!(swsb.mode & ~(TGL_SBID_DST | TGL_SBID_SRC)));
1043             mode = swsb.pipe == TGL_PIPE_ALL  ? 0b11 :
1044                    swsb.mode == TGL_SBID_SRC  ? 0b10 :
1045                 /* swsb.mode == TGL_SBID_DST */ 0b01;
1046          }
1047          return mode << 8 | swsb.regdist << 5 | swsb.sbid;
1048       } else {
1049          assert(!(swsb.sbid & ~0xfu));
1050          return 0x80 | swsb.regdist << 4 | swsb.sbid;
1051       }
1052 
1053    } else {
1054       if (devinfo->ver >= 20) {
1055          return swsb.sbid | (swsb.mode & TGL_SBID_SET ? 0xc0 :
1056                              swsb.mode & TGL_SBID_DST ? 0x80 : 0xa0);
1057       } else {
1058          assert(!(swsb.sbid & ~0xfu));
1059          return swsb.sbid | (swsb.mode & TGL_SBID_SET ? 0x40 :
1060                              swsb.mode & TGL_SBID_DST ? 0x20 : 0x30);
1061       }
1062    }
1063 }
1064 
1065 /**
1066  * Convert the provided binary representation of an SWSB annotation to a
1067  * tgl_swsb.
1068  */
1069 static inline struct tgl_swsb
tgl_swsb_decode(const struct intel_device_info * devinfo,const bool is_unordered,const uint32_t x,enum opcode opcode)1070 tgl_swsb_decode(const struct intel_device_info *devinfo,
1071                 const bool is_unordered, const uint32_t x, enum opcode opcode)
1072 {
1073    if (devinfo->ver >= 20) {
1074       if (x & 0x300) {
1075          /* Mode isn't SingleInfo, there's a tuple */
1076          if (opcode == BRW_OPCODE_SEND || opcode == BRW_OPCODE_SENDC) {
1077             const struct tgl_swsb swsb = {
1078                (x & 0xe0u) >> 5,
1079                ((x & 0x300) == 0x300 ? TGL_PIPE_INT :
1080                 (x & 0x300) == 0x200 ? TGL_PIPE_FLOAT :
1081                 TGL_PIPE_ALL),
1082                x & 0x1fu,
1083                TGL_SBID_SET
1084             };
1085             return swsb;
1086          } else if (opcode == BRW_OPCODE_DPAS) {
1087             const struct tgl_swsb swsb = {
1088                .regdist = (x & 0xe0u) >> 5,
1089                .pipe = TGL_PIPE_NONE,
1090                .sbid = x & 0x1fu,
1091                .mode = (x & 0x300) == 0x300 ? TGL_SBID_DST :
1092                        (x & 0x300) == 0x200 ? TGL_SBID_SRC :
1093                                               TGL_SBID_SET,
1094             };
1095             return swsb;
1096          } else {
1097             const struct tgl_swsb swsb = {
1098                (x & 0xe0u) >> 5,
1099                ((x & 0x300) == 0x300 ? TGL_PIPE_ALL : TGL_PIPE_NONE),
1100                x & 0x1fu,
1101                ((x & 0x300) == 0x200 ? TGL_SBID_SRC : TGL_SBID_DST)
1102             };
1103             return swsb;
1104          }
1105 
1106       } else if ((x & 0xe0) == 0x80) {
1107          return tgl_swsb_sbid(TGL_SBID_DST, x & 0x1f);
1108       } else if ((x & 0xe0) == 0xa0) {
1109          return tgl_swsb_sbid(TGL_SBID_SRC, x & 0x1fu);
1110       } else if ((x & 0xe0) == 0xc0) {
1111          return tgl_swsb_sbid(TGL_SBID_SET, x & 0x1fu);
1112       } else {
1113             const struct tgl_swsb swsb = { x & 0x7u,
1114                                            ((x & 0x38) == 0x10 ? TGL_PIPE_FLOAT :
1115                                             (x & 0x38) == 0x18 ? TGL_PIPE_INT :
1116                                             (x & 0x38) == 0x20 ? TGL_PIPE_LONG :
1117                                             (x & 0x38) == 0x28 ? TGL_PIPE_MATH :
1118                                             (x & 0x38) == 0x8 ? TGL_PIPE_ALL :
1119                                             TGL_PIPE_NONE) };
1120             return swsb;
1121       }
1122 
1123    } else {
1124       if (x & 0x80) {
1125          const struct tgl_swsb swsb = { (x & 0x70u) >> 4, TGL_PIPE_NONE,
1126                                         x & 0xfu,
1127                                         is_unordered ?
1128                                         TGL_SBID_SET : TGL_SBID_DST };
1129          return swsb;
1130       } else if ((x & 0x70) == 0x20) {
1131          return tgl_swsb_sbid(TGL_SBID_DST, x & 0xfu);
1132       } else if ((x & 0x70) == 0x30) {
1133          return tgl_swsb_sbid(TGL_SBID_SRC, x & 0xfu);
1134       } else if ((x & 0x70) == 0x40) {
1135          return tgl_swsb_sbid(TGL_SBID_SET, x & 0xfu);
1136       } else {
1137          const struct tgl_swsb swsb = { x & 0x7u,
1138                                         ((x & 0x78) == 0x10 ? TGL_PIPE_FLOAT :
1139                                          (x & 0x78) == 0x18 ? TGL_PIPE_INT :
1140                                          (x & 0x78) == 0x50 ? TGL_PIPE_LONG :
1141                                          (x & 0x78) == 0x8 ? TGL_PIPE_ALL :
1142                                          TGL_PIPE_NONE) };
1143          assert(devinfo->verx10 >= 125 || swsb.pipe == TGL_PIPE_NONE);
1144          return swsb;
1145       }
1146    }
1147 }
1148 
1149 enum tgl_sync_function {
1150    TGL_SYNC_NOP = 0x0,
1151    TGL_SYNC_ALLRD = 0x2,
1152    TGL_SYNC_ALLWR = 0x3,
1153    TGL_SYNC_FENCE = 0xd,
1154    TGL_SYNC_BAR = 0xe,
1155    TGL_SYNC_HOST = 0xf
1156 };
1157 
1158 /**
1159  * Message target: Shared Function ID for where to SEND a message.
1160  *
1161  * These are enumerated in the ISA reference under "send - Send Message".
1162  * In particular, see the following tables:
1163  * - G45 PRM, Volume 4, Table 14-15 "Message Descriptor Definition"
1164  * - Sandybridge PRM, Volume 4 Part 2, Table 8-16 "Extended Message Descriptor"
1165  * - Ivybridge PRM, Volume 1 Part 1, section 3.2.7 "GPE Function IDs"
1166  */
1167 enum brw_message_target {
1168    BRW_SFID_NULL                     = 0,
1169    BRW_SFID_SAMPLER                  = 2,
1170    BRW_SFID_MESSAGE_GATEWAY          = 3,
1171    BRW_SFID_URB                      = 6,
1172    BRW_SFID_THREAD_SPAWNER           = 7,
1173    BRW_SFID_VME                      = 8,
1174 
1175    GFX6_SFID_DATAPORT_SAMPLER_CACHE  = 4,
1176    GFX6_SFID_DATAPORT_RENDER_CACHE   = 5,
1177    GFX6_SFID_DATAPORT_CONSTANT_CACHE = 9,
1178 
1179    GFX7_SFID_DATAPORT_DATA_CACHE     = 10,
1180    GFX7_SFID_PIXEL_INTERPOLATOR      = 11,
1181    HSW_SFID_DATAPORT_DATA_CACHE_1    = 12,
1182    HSW_SFID_CRE                      = 13,
1183 
1184    GFX12_SFID_TGM                      = 13, /* Typed Global Memory */
1185    GFX12_SFID_SLM                      = 14, /* Shared Local Memory */
1186    GFX12_SFID_UGM                      = 15, /* Untyped Global Memory */
1187 
1188    GEN_RT_SFID_BINDLESS_THREAD_DISPATCH = 7,
1189    GEN_RT_SFID_RAY_TRACE_ACCELERATOR = 8,
1190 };
1191 
1192 #define GFX7_MESSAGE_TARGET_DP_DATA_CACHE     10
1193 
1194 #define BRW_SAMPLER_RETURN_FORMAT_FLOAT32     0
1195 #define BRW_SAMPLER_RETURN_FORMAT_UINT32      2
1196 #define BRW_SAMPLER_RETURN_FORMAT_SINT32      3
1197 
1198 #define GFX8_SAMPLER_RETURN_FORMAT_32BITS    0
1199 #define GFX8_SAMPLER_RETURN_FORMAT_16BITS    1
1200 
1201 #define GFX5_SAMPLER_MESSAGE_SAMPLE              0
1202 #define GFX5_SAMPLER_MESSAGE_SAMPLE_BIAS         1
1203 #define GFX5_SAMPLER_MESSAGE_SAMPLE_LOD          2
1204 #define GFX5_SAMPLER_MESSAGE_SAMPLE_COMPARE      3
1205 #define GFX5_SAMPLER_MESSAGE_SAMPLE_DERIVS       4
1206 #define GFX5_SAMPLER_MESSAGE_SAMPLE_BIAS_COMPARE 5
1207 #define GFX5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE  6
1208 #define GFX5_SAMPLER_MESSAGE_SAMPLE_LD           7
1209 #define GFX7_SAMPLER_MESSAGE_SAMPLE_GATHER4      8
1210 #define GFX5_SAMPLER_MESSAGE_LOD                 9
1211 #define GFX5_SAMPLER_MESSAGE_SAMPLE_RESINFO      10
1212 #define GFX6_SAMPLER_MESSAGE_SAMPLE_SAMPLEINFO   11
1213 #define XE2_SAMPLER_MESSAGE_SAMPLE_GATHER4_L     13
1214 #define XE2_SAMPLER_MESSAGE_SAMPLE_GATHER4_B     14
1215 #define XE2_SAMPLER_MESSAGE_SAMPLE_GATHER4_I     15
1216 #define GFX7_SAMPLER_MESSAGE_SAMPLE_GATHER4_C    16
1217 #define GFX7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO   17
1218 #define GFX7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_C 18
1219 #define XE2_SAMPLER_MESSAGE_SAMPLE_MLOD          18
1220 #define XE2_SAMPLER_MESSAGE_SAMPLE_COMPARE_MLOD  19
1221 #define HSW_SAMPLER_MESSAGE_SAMPLE_DERIV_COMPARE 20
1222 #define XE2_SAMPLER_MESSAGE_SAMPLE_GATHER4_I_C   21
1223 #define XE2_SAMPLER_MESSAGE_SAMPLE_GATHER4_L_C   23
1224 #define GFX9_SAMPLER_MESSAGE_SAMPLE_LZ           24
1225 #define GFX9_SAMPLER_MESSAGE_SAMPLE_C_LZ         25
1226 #define GFX9_SAMPLER_MESSAGE_SAMPLE_LD_LZ        26
1227 #define GFX9_SAMPLER_MESSAGE_SAMPLE_LD2DMS_W     28
1228 #define GFX7_SAMPLER_MESSAGE_SAMPLE_LD_MCS       29
1229 #define GFX7_SAMPLER_MESSAGE_SAMPLE_LD2DMS       30
1230 #define GFX7_SAMPLER_MESSAGE_SAMPLE_LD2DSS       31
1231 #define XE2_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_L           45
1232 #define XE2_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_B           46
1233 #define XE2_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_L_C         55
1234 
1235 /* for GFX5 only */
1236 #define BRW_SAMPLER_SIMD_MODE_SIMD4X2                   0
1237 #define BRW_SAMPLER_SIMD_MODE_SIMD8                     1
1238 #define BRW_SAMPLER_SIMD_MODE_SIMD16                    2
1239 #define BRW_SAMPLER_SIMD_MODE_SIMD32_64                 3
1240 
1241 #define GFX10_SAMPLER_SIMD_MODE_SIMD8H                  5
1242 #define GFX10_SAMPLER_SIMD_MODE_SIMD16H                 6
1243 
1244 #define XE2_SAMPLER_SIMD_MODE_SIMD16                  1
1245 #define XE2_SAMPLER_SIMD_MODE_SIMD32                  2
1246 #define XE2_SAMPLER_SIMD_MODE_SIMD16H                 5
1247 #define XE2_SAMPLER_SIMD_MODE_SIMD32H                 6
1248 
1249 /* GFX9 changes SIMD mode 0 to mean SIMD8D, but lets us get the SIMD4x2
1250  * behavior by setting bit 22 of dword 2 in the message header. */
1251 #define GFX9_SAMPLER_SIMD_MODE_SIMD8D                   0
1252 #define GFX9_SAMPLER_SIMD_MODE_EXTENSION_SIMD4X2        (1 << 22)
1253 
1254 #define BRW_DATAPORT_OWORD_BLOCK_1_OWORDLOW   0
1255 #define BRW_DATAPORT_OWORD_BLOCK_1_OWORDHIGH  1
1256 #define BRW_DATAPORT_OWORD_BLOCK_2_OWORDS     2
1257 #define BRW_DATAPORT_OWORD_BLOCK_4_OWORDS     3
1258 #define BRW_DATAPORT_OWORD_BLOCK_8_OWORDS     4
1259 #define GFX12_DATAPORT_OWORD_BLOCK_16_OWORDS  5
1260 #define BRW_DATAPORT_OWORD_BLOCK_OWORDS(n)              \
1261    ((n) == 1 ? BRW_DATAPORT_OWORD_BLOCK_1_OWORDLOW :    \
1262     (n) == 2 ? BRW_DATAPORT_OWORD_BLOCK_2_OWORDS :      \
1263     (n) == 4 ? BRW_DATAPORT_OWORD_BLOCK_4_OWORDS :      \
1264     (n) == 8 ? BRW_DATAPORT_OWORD_BLOCK_8_OWORDS :      \
1265     (n) == 16 ? GFX12_DATAPORT_OWORD_BLOCK_16_OWORDS :  \
1266     (abort(), ~0))
1267 #define BRW_DATAPORT_OWORD_BLOCK_DWORDS(n)              \
1268    ((n) == 4 ? BRW_DATAPORT_OWORD_BLOCK_1_OWORDLOW :    \
1269     (n) == 8 ? BRW_DATAPORT_OWORD_BLOCK_2_OWORDS :      \
1270     (n) == 16 ? BRW_DATAPORT_OWORD_BLOCK_4_OWORDS :     \
1271     (n) == 32 ? BRW_DATAPORT_OWORD_BLOCK_8_OWORDS :     \
1272     (abort(), ~0))
1273 
1274 #define BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD     0
1275 #define BRW_DATAPORT_OWORD_DUAL_BLOCK_4OWORDS    2
1276 
1277 #define BRW_DATAPORT_DWORD_SCATTERED_BLOCK_8DWORDS   2
1278 #define BRW_DATAPORT_DWORD_SCATTERED_BLOCK_16DWORDS  3
1279 
1280 /* This one stays the same across generations. */
1281 #define BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ          0
1282 /* GFX6 */
1283 #define GFX6_DATAPORT_READ_MESSAGE_RENDER_UNORM_READ	    1
1284 #define GFX6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ     2
1285 #define GFX6_DATAPORT_READ_MESSAGE_MEDIA_BLOCK_READ          4
1286 #define GFX6_DATAPORT_READ_MESSAGE_OWORD_UNALIGN_BLOCK_READ  5
1287 #define GFX6_DATAPORT_READ_MESSAGE_DWORD_SCATTERED_READ      6
1288 
1289 #define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE                0
1290 #define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE_REPLICATED     1
1291 #define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN01         2
1292 #define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN23         3
1293 #define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_SINGLE_SOURCE_SUBSPAN01       4
1294 
1295 #define XE2_DATAPORT_RENDER_TARGET_WRITE_SIMD32_SINGLE_SOURCE                1
1296 #define XE2_DATAPORT_RENDER_TARGET_WRITE_SIMD16_DUAL_SOURCE                  2
1297 
1298 /* GFX6 */
1299 #define GFX6_DATAPORT_WRITE_MESSAGE_DWORD_ATOMIC_WRITE              7
1300 #define GFX6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE               8
1301 #define GFX6_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE          9
1302 #define GFX6_DATAPORT_WRITE_MESSAGE_MEDIA_BLOCK_WRITE               10
1303 #define GFX6_DATAPORT_WRITE_MESSAGE_DWORD_SCATTERED_WRITE           11
1304 #define GFX6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE             12
1305 #define GFX6_DATAPORT_WRITE_MESSAGE_STREAMED_VB_WRITE               13
1306 #define GFX6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_UNORM_WRITE       14
1307 
1308 /* GFX7 */
1309 #define GFX7_DATAPORT_RC_MEDIA_BLOCK_READ                           4
1310 #define GFX7_DATAPORT_RC_TYPED_SURFACE_READ                         5
1311 #define GFX7_DATAPORT_RC_TYPED_ATOMIC_OP                            6
1312 #define GFX7_DATAPORT_RC_MEMORY_FENCE                               7
1313 #define GFX7_DATAPORT_RC_MEDIA_BLOCK_WRITE                          10
1314 #define GFX7_DATAPORT_RC_RENDER_TARGET_WRITE                        12
1315 #define GFX7_DATAPORT_RC_TYPED_SURFACE_WRITE                        13
1316 #define GFX7_DATAPORT_DC_OWORD_BLOCK_READ                           0
1317 #define GFX7_DATAPORT_DC_UNALIGNED_OWORD_BLOCK_READ                 1
1318 #define GFX7_DATAPORT_DC_OWORD_DUAL_BLOCK_READ                      2
1319 #define GFX7_DATAPORT_DC_DWORD_SCATTERED_READ                       3
1320 #define GFX7_DATAPORT_DC_BYTE_SCATTERED_READ                        4
1321 #define GFX7_DATAPORT_DC_UNTYPED_SURFACE_READ                       5
1322 #define GFX7_DATAPORT_DC_UNTYPED_ATOMIC_OP                          6
1323 #define GFX7_DATAPORT_DC_MEMORY_FENCE                               7
1324 #define GFX7_DATAPORT_DC_OWORD_BLOCK_WRITE                          8
1325 #define GFX7_DATAPORT_DC_OWORD_DUAL_BLOCK_WRITE                     10
1326 #define GFX7_DATAPORT_DC_DWORD_SCATTERED_WRITE                      11
1327 #define GFX7_DATAPORT_DC_BYTE_SCATTERED_WRITE                       12
1328 #define GFX7_DATAPORT_DC_UNTYPED_SURFACE_WRITE                      13
1329 
1330 #define GFX7_DATAPORT_SCRATCH_READ                            ((1 << 18) | \
1331                                                                (0 << 17))
1332 #define GFX7_DATAPORT_SCRATCH_WRITE                           ((1 << 18) | \
1333                                                                (1 << 17))
1334 #define GFX7_DATAPORT_SCRATCH_NUM_REGS_SHIFT                        12
1335 
1336 #define GFX7_PIXEL_INTERPOLATOR_LOC_SHARED_OFFSET     0
1337 #define GFX7_PIXEL_INTERPOLATOR_LOC_SAMPLE            1
1338 #define GFX7_PIXEL_INTERPOLATOR_LOC_CENTROID          2
1339 #define GFX7_PIXEL_INTERPOLATOR_LOC_PER_SLOT_OFFSET   3
1340 
1341 /* HSW */
1342 #define HSW_DATAPORT_DC_PORT0_OWORD_BLOCK_READ                      0
1343 #define HSW_DATAPORT_DC_PORT0_UNALIGNED_OWORD_BLOCK_READ            1
1344 #define HSW_DATAPORT_DC_PORT0_OWORD_DUAL_BLOCK_READ                 2
1345 #define HSW_DATAPORT_DC_PORT0_DWORD_SCATTERED_READ                  3
1346 #define HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_READ                   4
1347 #define HSW_DATAPORT_DC_PORT0_MEMORY_FENCE                          7
1348 #define HSW_DATAPORT_DC_PORT0_OWORD_BLOCK_WRITE                     8
1349 #define HSW_DATAPORT_DC_PORT0_OWORD_DUAL_BLOCK_WRITE                10
1350 #define HSW_DATAPORT_DC_PORT0_DWORD_SCATTERED_WRITE                 11
1351 #define HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_WRITE                  12
1352 
1353 #define HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_READ                  1
1354 #define HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP                     2
1355 #define HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP_SIMD4X2             3
1356 #define HSW_DATAPORT_DC_PORT1_MEDIA_BLOCK_READ                      4
1357 #define HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_READ                    5
1358 #define HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP                       6
1359 #define HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP_SIMD4X2               7
1360 #define HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_WRITE                 9
1361 #define HSW_DATAPORT_DC_PORT1_MEDIA_BLOCK_WRITE                     10
1362 #define HSW_DATAPORT_DC_PORT1_ATOMIC_COUNTER_OP                     11
1363 #define HSW_DATAPORT_DC_PORT1_ATOMIC_COUNTER_OP_SIMD4X2             12
1364 #define HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_WRITE                   13
1365 #define GFX9_DATAPORT_DC_PORT1_A64_SCATTERED_READ                   0x10
1366 #define GFX8_DATAPORT_DC_PORT1_A64_UNTYPED_SURFACE_READ             0x11
1367 #define GFX8_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_OP                0x12
1368 #define GFX12_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_HALF_INT_OP      0x13
1369 #define GFX9_DATAPORT_DC_PORT1_A64_OWORD_BLOCK_READ                 0x14
1370 #define GFX9_DATAPORT_DC_PORT1_A64_OWORD_BLOCK_WRITE                0x15
1371 #define GFX8_DATAPORT_DC_PORT1_A64_UNTYPED_SURFACE_WRITE            0x19
1372 #define GFX8_DATAPORT_DC_PORT1_A64_SCATTERED_WRITE                  0x1a
1373 #define GFX9_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_FLOAT_OP              0x1b
1374 #define GFX9_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_FLOAT_OP          0x1d
1375 #define GFX12_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_HALF_FLOAT_OP    0x1e
1376 
1377 /* GFX9 */
1378 #define GFX9_DATAPORT_RC_RENDER_TARGET_WRITE                        12
1379 #define GFX9_DATAPORT_RC_RENDER_TARGET_READ                         13
1380 
1381 /* A64 scattered message subtype */
1382 #define GFX8_A64_SCATTERED_SUBTYPE_BYTE                             0
1383 #define GFX8_A64_SCATTERED_SUBTYPE_DWORD                            1
1384 #define GFX8_A64_SCATTERED_SUBTYPE_QWORD                            2
1385 #define GFX8_A64_SCATTERED_SUBTYPE_HWORD                            3
1386 
1387 /* Dataport special binding table indices: */
1388 #define BRW_BTI_STATELESS                255
1389 #define GFX7_BTI_SLM                     254
1390 
1391 /* The hardware docs are a bit contradictory here.  On Haswell, where they
1392  * first added cache ability control, there were 5 different cache modes (see
1393  * HSW_BTI_STATELESS_* above).  On Broadwell, they reduced to two:
1394  *
1395  *  - IA-Coherent (BTI=255): Coherent within Gen and coherent within the
1396  *    entire IA cache memory hierarchy.
1397  *
1398  *  - Non-Coherent (BTI=253): Coherent within Gen, same cache type.
1399  *
1400  * Information about stateless cache coherency can be found in the "A32
1401  * Stateless" section of the "3D Media GPGPU" volume of the PRM for each
1402  * hardware generation.
1403  *
1404  * Unfortunately, the docs for MDC_STATELESS appear to have been copied and
1405  * pasted from Haswell and give the Haswell definitions for the BTI values of
1406  * 255 and 253 including a warning about accessing 253 surfaces from multiple
1407  * threads.  This seems to be a copy+paste error and the definitions from the
1408  * "A32 Stateless" section should be trusted instead.
1409  *
1410  * Note that because the DRM sets bit 4 of HDC_CHICKEN0 on BDW, CHV and at
1411  * least some pre-production steppings of SKL due to WaForceEnableNonCoherent,
1412  * HDC memory access may have been overridden by the kernel to be non-coherent
1413  * (matching the behavior of the same BTI on pre-Gfx8 hardware) and BTI 255
1414  * may actually be an alias for BTI 253.
1415  */
1416 #define GFX8_BTI_STATELESS_IA_COHERENT   255
1417 #define GFX8_BTI_STATELESS_NON_COHERENT  253
1418 #define GFX9_BTI_BINDLESS                252
1419 
1420 /* Dataport atomic operations for Untyped Atomic Integer Operation message
1421  * (and others).
1422  */
1423 #define BRW_AOP_AND                   1
1424 #define BRW_AOP_OR                    2
1425 #define BRW_AOP_XOR                   3
1426 #define BRW_AOP_MOV                   4
1427 #define BRW_AOP_INC                   5
1428 #define BRW_AOP_DEC                   6
1429 #define BRW_AOP_ADD                   7
1430 #define BRW_AOP_SUB                   8
1431 #define BRW_AOP_REVSUB                9
1432 #define BRW_AOP_IMAX                  10
1433 #define BRW_AOP_IMIN                  11
1434 #define BRW_AOP_UMAX                  12
1435 #define BRW_AOP_UMIN                  13
1436 #define BRW_AOP_CMPWR                 14
1437 #define BRW_AOP_PREDEC                15
1438 
1439 /* Dataport atomic operations for Untyped Atomic Float Operation message. */
1440 #define BRW_AOP_FMAX                  1
1441 #define BRW_AOP_FMIN                  2
1442 #define BRW_AOP_FCMPWR                3
1443 #define BRW_AOP_FADD                  4
1444 
1445 #define BRW_MATH_FUNCTION_INV                              1
1446 #define BRW_MATH_FUNCTION_LOG                              2
1447 #define BRW_MATH_FUNCTION_EXP                              3
1448 #define BRW_MATH_FUNCTION_SQRT                             4
1449 #define BRW_MATH_FUNCTION_RSQ                              5
1450 #define BRW_MATH_FUNCTION_SIN                              6
1451 #define BRW_MATH_FUNCTION_COS                              7
1452 #define BRW_MATH_FUNCTION_FDIV                             9 /* gfx6+ */
1453 #define BRW_MATH_FUNCTION_POW                              10
1454 #define BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER   11
1455 #define BRW_MATH_FUNCTION_INT_DIV_QUOTIENT                 12
1456 #define BRW_MATH_FUNCTION_INT_DIV_REMAINDER                13
1457 #define GFX8_MATH_FUNCTION_INVM                            14
1458 #define GFX8_MATH_FUNCTION_RSQRTM                          15
1459 
1460 #define GFX7_URB_OPCODE_ATOMIC_MOV  4
1461 #define GFX7_URB_OPCODE_ATOMIC_INC  5
1462 #define GFX8_URB_OPCODE_ATOMIC_ADD  6
1463 #define GFX8_URB_OPCODE_SIMD8_WRITE 7
1464 #define GFX8_URB_OPCODE_SIMD8_READ  8
1465 #define GFX125_URB_OPCODE_FENCE     9
1466 
1467 #define BRW_URB_SWIZZLE_NONE          0
1468 #define BRW_URB_SWIZZLE_INTERLEAVE    1
1469 #define BRW_URB_SWIZZLE_TRANSPOSE     2
1470 
1471 #define BRW_MESSAGE_GATEWAY_SFID_OPEN_GATEWAY         0
1472 #define BRW_MESSAGE_GATEWAY_SFID_CLOSE_GATEWAY        1
1473 #define BRW_MESSAGE_GATEWAY_SFID_FORWARD_MSG          2
1474 #define BRW_MESSAGE_GATEWAY_SFID_GET_TIMESTAMP        3
1475 #define BRW_MESSAGE_GATEWAY_SFID_BARRIER_MSG          4
1476 #define BRW_MESSAGE_GATEWAY_SFID_UPDATE_GATEWAY_STATE 5
1477 #define BRW_MESSAGE_GATEWAY_SFID_MMIO_READ_WRITE      6
1478 
1479 
1480 /* Gfx7 "GS URB Entry Allocation Size" is a U9-1 field, so the maximum gs_size
1481  * is 2^9, or 512.  It's counted in multiples of 64 bytes.
1482  *
1483  * Identical for VS, DS, and HS.
1484  */
1485 #define GFX7_MAX_GS_URB_ENTRY_SIZE_BYTES                (512*64)
1486 #define GFX7_MAX_DS_URB_ENTRY_SIZE_BYTES                (512*64)
1487 #define GFX7_MAX_HS_URB_ENTRY_SIZE_BYTES                (512*64)
1488 #define GFX7_MAX_VS_URB_ENTRY_SIZE_BYTES                (512*64)
1489 
1490 /* GS Thread Payload
1491  */
1492 
1493 /* 3DSTATE_GS "Output Vertex Size" has an effective maximum of 62. It's
1494  * counted in multiples of 16 bytes.
1495  */
1496 #define GFX7_MAX_GS_OUTPUT_VERTEX_SIZE_BYTES            (62*16)
1497 
1498 
1499 /* CR0.0[5:4] Floating-Point Rounding Modes
1500  *  Skylake PRM, Volume 7 Part 1, "Control Register", page 756
1501  */
1502 
1503 #define BRW_CR0_RND_MODE_MASK     0x30
1504 #define BRW_CR0_RND_MODE_SHIFT    4
1505 
1506 enum ENUM_PACKED brw_rnd_mode {
1507    BRW_RND_MODE_RTNE = 0,  /* Round to Nearest or Even */
1508    BRW_RND_MODE_RU = 1,    /* Round Up, toward +inf */
1509    BRW_RND_MODE_RD = 2,    /* Round Down, toward -inf */
1510    BRW_RND_MODE_RTZ = 3,   /* Round Toward Zero */
1511    BRW_RND_MODE_UNSPECIFIED,  /* Unspecified rounding mode */
1512 };
1513 
1514 #define BRW_CR0_FP64_DENORM_PRESERVE (1 << 6)
1515 #define BRW_CR0_FP32_DENORM_PRESERVE (1 << 7)
1516 #define BRW_CR0_FP16_DENORM_PRESERVE (1 << 10)
1517 
1518 #define BRW_CR0_FP_MODE_MASK (BRW_CR0_FP64_DENORM_PRESERVE | \
1519                               BRW_CR0_FP32_DENORM_PRESERVE | \
1520                               BRW_CR0_FP16_DENORM_PRESERVE | \
1521                               BRW_CR0_RND_MODE_MASK)
1522 
1523 /* MDC_DS - Data Size Message Descriptor Control Field
1524  * Skylake PRM, Volume 2d, page 129
1525  *
1526  * Specifies the number of Bytes to be read or written per Dword used at
1527  * byte_scattered read/write and byte_scaled read/write messages.
1528  */
1529 #define GFX7_BYTE_SCATTERED_DATA_ELEMENT_BYTE     0
1530 #define GFX7_BYTE_SCATTERED_DATA_ELEMENT_WORD     1
1531 #define GFX7_BYTE_SCATTERED_DATA_ELEMENT_DWORD    2
1532 
1533 #define GEN_RT_BTD_MESSAGE_SPAWN 1
1534 
1535 #define GEN_RT_TRACE_RAY_INITAL       0
1536 #define GEN_RT_TRACE_RAY_INSTANCE     1
1537 #define GEN_RT_TRACE_RAY_COMMIT       2
1538 #define GEN_RT_TRACE_RAY_CONTINUE     3
1539 
1540 #define GEN_RT_BTD_SHADER_TYPE_ANY_HIT        0
1541 #define GEN_RT_BTD_SHADER_TYPE_CLOSEST_HIT    1
1542 #define GEN_RT_BTD_SHADER_TYPE_MISS           2
1543 #define GEN_RT_BTD_SHADER_TYPE_INTERSECTION   3
1544 
1545 /* Starting with Xe-HPG, the old dataport was massively reworked dataport.
1546  * The new thing, called Load/Store Cache or LSC, has a significantly improved
1547  * interface.  Instead of bespoke messages for every case, there's basically
1548  * one or two messages with different bits to control things like address
1549  * size, how much data is read/written, etc.  It's way nicer but also means we
1550  * get to rewrite all our dataport encoding/decoding code.  This patch kicks
1551  * off the party with all of the new enums.
1552  */
1553 enum lsc_opcode {
1554    LSC_OP_LOAD            = 0,
1555    LSC_OP_LOAD_CMASK      = 2,
1556    LSC_OP_STORE           = 4,
1557    LSC_OP_STORE_CMASK     = 6,
1558    LSC_OP_ATOMIC_INC      = 8,
1559    LSC_OP_ATOMIC_DEC      = 9,
1560    LSC_OP_ATOMIC_LOAD     = 10,
1561    LSC_OP_ATOMIC_STORE    = 11,
1562    LSC_OP_ATOMIC_ADD      = 12,
1563    LSC_OP_ATOMIC_SUB      = 13,
1564    LSC_OP_ATOMIC_MIN      = 14,
1565    LSC_OP_ATOMIC_MAX      = 15,
1566    LSC_OP_ATOMIC_UMIN     = 16,
1567    LSC_OP_ATOMIC_UMAX     = 17,
1568    LSC_OP_ATOMIC_CMPXCHG  = 18,
1569    LSC_OP_ATOMIC_FADD     = 19,
1570    LSC_OP_ATOMIC_FSUB     = 20,
1571    LSC_OP_ATOMIC_FMIN     = 21,
1572    LSC_OP_ATOMIC_FMAX     = 22,
1573    LSC_OP_ATOMIC_FCMPXCHG = 23,
1574    LSC_OP_ATOMIC_AND      = 24,
1575    LSC_OP_ATOMIC_OR       = 25,
1576    LSC_OP_ATOMIC_XOR      = 26,
1577    LSC_OP_FENCE           = 31
1578 };
1579 
1580 /*
1581  * Specifies the size of the dataport address payload in registers.
1582  */
1583 enum ENUM_PACKED lsc_addr_reg_size {
1584    LSC_ADDR_REG_SIZE_1  = 1,
1585    LSC_ADDR_REG_SIZE_2  = 2,
1586    LSC_ADDR_REG_SIZE_3  = 3,
1587    LSC_ADDR_REG_SIZE_4  = 4,
1588    LSC_ADDR_REG_SIZE_6  = 6,
1589    LSC_ADDR_REG_SIZE_8  = 8,
1590 };
1591 
1592 /*
1593  * Specifies the size of the address payload item in a dataport message.
1594  */
1595 enum ENUM_PACKED lsc_addr_size {
1596   LSC_ADDR_SIZE_A16 = 1,    /* 16-bit address offset */
1597   LSC_ADDR_SIZE_A32 = 2,    /* 32-bit address offset */
1598   LSC_ADDR_SIZE_A64 = 3,    /* 64-bit address offset */
1599 };
1600 
1601 /*
1602  * Specifies the type of the address payload item in a dataport message. The
1603  * address type specifies how the dataport message decodes the Extended
1604  * Descriptor for the surface attributes and address calculation.
1605  */
1606 enum ENUM_PACKED lsc_addr_surface_type {
1607    LSC_ADDR_SURFTYPE_FLAT = 0, /* Flat */
1608    LSC_ADDR_SURFTYPE_BSS = 1,  /* Bindless surface state */
1609    LSC_ADDR_SURFTYPE_SS = 2,   /* Surface state */
1610    LSC_ADDR_SURFTYPE_BTI = 3,  /* Binding table index */
1611 };
1612 
1613 /*
1614  * Specifies the dataport message override to the default L1 and L3 memory
1615  * cache policies. Dataport L1 cache policies are uncached (UC), cached (C),
1616  * cache streaming (S) and invalidate-after-read (IAR). Dataport L3 cache
1617  * policies are uncached (UC) and cached (C).
1618  */
1619 enum lsc_cache_load {
1620    /* No override. Use the non-pipelined state or surface state cache settings
1621     * for L1 and L3.
1622     */
1623    LSC_CACHE_LOAD_L1STATE_L3MOCS = 0,
1624    /* Override to L1 uncached and L3 uncached */
1625    LSC_CACHE_LOAD_L1UC_L3UC      = 1,
1626    /* Override to L1 uncached and L3 cached */
1627    LSC_CACHE_LOAD_L1UC_L3C       = 2,
1628    /* Override to L1 cached and L3 uncached */
1629    LSC_CACHE_LOAD_L1C_L3UC       = 3,
1630    /* Override to cache at both L1 and L3 */
1631    LSC_CACHE_LOAD_L1C_L3C        = 4,
1632    /* Override to L1 streaming load and L3 uncached */
1633    LSC_CACHE_LOAD_L1S_L3UC       = 5,
1634    /* Override to L1 streaming load and L3 cached */
1635    LSC_CACHE_LOAD_L1S_L3C        = 6,
1636    /* For load messages, override to L1 invalidate-after-read, and L3 cached. */
1637    LSC_CACHE_LOAD_L1IAR_L3C      = 7,
1638 };
1639 
1640 /*
1641  * Specifies the dataport message override to the default L1 and L3 memory
1642  * cache policies. Dataport L1 cache policies are uncached (UC), cached (C),
1643  * streaming (S) and invalidate-after-read (IAR). Dataport L3 cache policies
1644  * are uncached (UC), cached (C), cached-as-a-constand (CC) and
1645  * invalidate-after-read (IAR).
1646  */
1647 enum PACKED xe2_lsc_cache_load {
1648    /* No override. Use the non-pipelined or surface state cache settings for L1
1649     * and L3.
1650     */
1651    XE2_LSC_CACHE_LOAD_L1STATE_L3MOCS = 0,
1652    /* Override to L1 uncached and L3 uncached */
1653    XE2_LSC_CACHE_LOAD_L1UC_L3UC = 2,
1654    /* Override to L1 uncached and L3 cached */
1655    XE2_LSC_CACHE_LOAD_L1UC_L3C = 4,
1656    /* Override to L1 uncached and L3 cached as a constant */
1657    XE2_LSC_CACHE_LOAD_L1UC_L3CC = 5,
1658    /* Override to L1 cached and L3 uncached */
1659    XE2_LSC_CACHE_LOAD_L1C_L3UC = 6,
1660    /* Override to L1 cached and L3 cached */
1661    XE2_LSC_CACHE_LOAD_L1C_L3C = 8,
1662    /* Override to L1 cached and L3 cached as a constant */
1663    XE2_LSC_CACHE_LOAD_L1C_L3CC = 9,
1664    /* Override to L1 cached as streaming load and L3 uncached */
1665    XE2_LSC_CACHE_LOAD_L1S_L3UC = 10,
1666    /* Override to L1 cached as streaming load and L3 cached */
1667    XE2_LSC_CACHE_LOAD_L1S_L3C = 12,
1668    /* Override to L1 and L3 invalidate after read */
1669    XE2_LSC_CACHE_LOAD_L1IAR_L3IAR = 14,
1670 
1671 };
1672 
1673 /*
1674  * Specifies the dataport message override to the default L1 and L3 memory
1675  * cache policies. Dataport L1 cache policies are uncached (UC), write-through
1676  * (WT), write-back (WB) and streaming (S). Dataport L3 cache policies are
1677  * uncached (UC) and cached (WB).
1678  */
1679 enum ENUM_PACKED lsc_cache_store {
1680    /* No override. Use the non-pipelined or surface state cache settings for L1
1681     * and L3.
1682     */
1683    LSC_CACHE_STORE_L1STATE_L3MOCS = 0,
1684    /* Override to L1 uncached and L3 uncached */
1685    LSC_CACHE_STORE_L1UC_L3UC = 1,
1686    /* Override to L1 uncached and L3 cached */
1687    LSC_CACHE_STORE_L1UC_L3WB = 2,
1688    /* Override to L1 write-through and L3 uncached */
1689    LSC_CACHE_STORE_L1WT_L3UC = 3,
1690    /* Override to L1 write-through and L3 cached */
1691    LSC_CACHE_STORE_L1WT_L3WB = 4,
1692    /* Override to L1 streaming and L3 uncached */
1693    LSC_CACHE_STORE_L1S_L3UC = 5,
1694    /* Override to L1 streaming and L3 cached */
1695    LSC_CACHE_STORE_L1S_L3WB = 6,
1696    /* Override to L1 write-back, and L3 cached */
1697    LSC_CACHE_STORE_L1WB_L3WB = 7,
1698 
1699 };
1700 
1701 /*
1702  * Specifies the dataport message override to the default L1 and L3 memory
1703  * cache policies. Dataport L1 cache policies are uncached (UC), write-through
1704  * (WT), write-back (WB) and streaming (S). Dataport L3 cache policies are
1705  * uncached (UC) and cached (WB).
1706  */
1707 enum PACKED xe2_lsc_cache_store {
1708    /* No override. Use the non-pipelined or surface state cache settings for L1
1709     * and L3.
1710     */
1711    XE2_LSC_CACHE_STORE_L1STATE_L3MOCS = 0,
1712    /* Override to L1 uncached and L3 uncached */
1713    XE2_LSC_CACHE_STORE_L1UC_L3UC = 2,
1714    /* Override to L1 uncached and L3 cached */
1715    XE2_LSC_CACHE_STORE_L1UC_L3WB = 4,
1716    /* Override to L1 write-through and L3 uncached */
1717    XE2_LSC_CACHE_STORE_L1WT_L3UC = 6,
1718    /* Override to L1 write-through and L3 cached */
1719    XE2_LSC_CACHE_STORE_L1WT_L3WB = 8,
1720    /* Override to L1 streaming and L3 uncached */
1721    XE2_LSC_CACHE_STORE_L1S_L3UC = 10,
1722    /* Override to L1 streaming and L3 cached */
1723    XE2_LSC_CACHE_STORE_L1S_L3WB = 12,
1724    /* Override to L1 write-back and L3 cached */
1725    XE2_LSC_CACHE_STORE_L1WB_L3WB = 14,
1726 
1727 };
1728 
1729 #define LSC_CACHE(devinfo, l_or_s, cc)                                  \
1730    ((devinfo)->ver < 20 ? (unsigned)LSC_CACHE_ ## l_or_s ## _ ## cc :   \
1731                           (unsigned)XE2_LSC_CACHE_ ## l_or_s ## _ ## cc)
1732 
1733 /*
1734  * Specifies which components of the data payload 4-element vector (X,Y,Z,W) is
1735  * packed into the register payload.
1736  */
1737 enum ENUM_PACKED lsc_cmask {
1738    LSC_CMASK_X = 0x1,
1739    LSC_CMASK_Y = 0x2,
1740    LSC_CMASK_XY = 0x3,
1741    LSC_CMASK_Z = 0x4,
1742    LSC_CMASK_XZ = 0x5,
1743    LSC_CMASK_YZ = 0x6,
1744    LSC_CMASK_XYZ = 0x7,
1745    LSC_CMASK_W = 0x8,
1746    LSC_CMASK_XW = 0x9,
1747    LSC_CMASK_YW = 0xa,
1748    LSC_CMASK_XYW = 0xb,
1749    LSC_CMASK_ZW = 0xc,
1750    LSC_CMASK_XZW = 0xd,
1751    LSC_CMASK_YZW = 0xe,
1752    LSC_CMASK_XYZW = 0xf,
1753 };
1754 
1755 /*
1756  * Specifies the size of the data payload item in a dataport message.
1757  */
1758 enum ENUM_PACKED lsc_data_size {
1759    /* 8-bit scalar data value in memory, packed into a 8-bit data value in
1760     * register.
1761     */
1762    LSC_DATA_SIZE_D8 = 0,
1763    /* 16-bit scalar data value in memory, packed into a 16-bit data value in
1764     * register.
1765     */
1766    LSC_DATA_SIZE_D16 = 1,
1767    /* 32-bit scalar data value in memory, packed into 32-bit data value in
1768     * register.
1769     */
1770    LSC_DATA_SIZE_D32 = 2,
1771    /* 64-bit scalar data value in memory, packed into 64-bit data value in
1772     * register.
1773     */
1774    LSC_DATA_SIZE_D64 = 3,
1775    /* 8-bit scalar data value in memory, packed into 32-bit unsigned data value
1776     * in register.
1777     */
1778    LSC_DATA_SIZE_D8U32 = 4,
1779    /* 16-bit scalar data value in memory, packed into 32-bit unsigned data
1780     * value in register.
1781     */
1782    LSC_DATA_SIZE_D16U32 = 5,
1783    /* 16-bit scalar BigFloat data value in memory, packed into 32-bit float
1784     * value in register.
1785     */
1786    LSC_DATA_SIZE_D16BF32 = 6,
1787 };
1788 
1789 /*
1790  *  Enum specifies the scope of the fence.
1791  */
1792 enum ENUM_PACKED lsc_fence_scope {
1793    /* Wait until all previous memory transactions from this thread are observed
1794     * within the local thread-group.
1795     */
1796    LSC_FENCE_THREADGROUP = 0,
1797    /* Wait until all previous memory transactions from this thread are observed
1798     * within the local sub-slice.
1799     */
1800    LSC_FENCE_LOCAL = 1,
1801    /* Wait until all previous memory transactions from this thread are observed
1802     * in the local tile.
1803     */
1804    LSC_FENCE_TILE = 2,
1805    /* Wait until all previous memory transactions from this thread are observed
1806     * in the local GPU.
1807     */
1808    LSC_FENCE_GPU = 3,
1809    /* Wait until all previous memory transactions from this thread are observed
1810     * across all GPUs in the system.
1811     */
1812    LSC_FENCE_ALL_GPU = 4,
1813    /* Wait until all previous memory transactions from this thread are observed
1814     * at the "system" level.
1815     */
1816    LSC_FENCE_SYSTEM_RELEASE = 5,
1817    /* For GPUs that do not follow PCIe Write ordering for downstream writes
1818     * targeting device memory, a fence message with scope=System_Acquire will
1819     * commit to device memory all downstream and peer writes that have reached
1820     * the device.
1821     */
1822    LSC_FENCE_SYSTEM_ACQUIRE = 6,
1823 };
1824 
1825 /*
1826  * Specifies the type of cache flush operation to perform after a fence is
1827  * complete.
1828  */
1829 enum ENUM_PACKED lsc_flush_type {
1830    LSC_FLUSH_TYPE_NONE = 0,
1831    /*
1832     * For a R/W cache, evict dirty lines (M to I state) and invalidate clean
1833     * lines. For a RO cache, invalidate clean lines.
1834     */
1835    LSC_FLUSH_TYPE_EVICT = 1,
1836    /*
1837     * For both R/W and RO cache, invalidate clean lines in the cache.
1838     */
1839    LSC_FLUSH_TYPE_INVALIDATE = 2,
1840    /*
1841     * For a R/W cache, invalidate dirty lines (M to I state), without
1842     * write-back to next level. This opcode does nothing for a RO cache.
1843     */
1844    LSC_FLUSH_TYPE_DISCARD = 3,
1845    /*
1846     * For a R/W cache, write-back dirty lines to the next level, but kept in
1847     * the cache as "clean" (M to V state). This opcode does nothing for a RO
1848     * cache.
1849     */
1850    LSC_FLUSH_TYPE_CLEAN = 4,
1851    /*
1852     * Flush "RW" section of the L3 cache, but leave L1 and L2 caches untouched.
1853     */
1854    LSC_FLUSH_TYPE_L3ONLY = 5,
1855    /*
1856     * HW maps this flush type internally to NONE.
1857     */
1858    LSC_FLUSH_TYPE_NONE_6 = 6,
1859 
1860 };
1861 
1862 enum ENUM_PACKED lsc_backup_fence_routing {
1863    /* Normal routing: UGM fence is routed to UGM pipeline. */
1864    LSC_NORMAL_ROUTING,
1865    /* Route UGM fence to LSC unit. */
1866    LSC_ROUTE_TO_LSC,
1867 };
1868 
1869 /*
1870  * Specifies the size of the vector in a dataport message.
1871  */
1872 enum ENUM_PACKED lsc_vect_size {
1873    LSC_VECT_SIZE_V1 = 0,    /* vector length 1 */
1874    LSC_VECT_SIZE_V2 = 1,    /* vector length 2 */
1875    LSC_VECT_SIZE_V3 = 2,    /* Vector length 3 */
1876    LSC_VECT_SIZE_V4 = 3,    /* Vector length 4 */
1877    LSC_VECT_SIZE_V8 = 4,    /* Vector length 8 */
1878    LSC_VECT_SIZE_V16 = 5,   /* Vector length 16 */
1879    LSC_VECT_SIZE_V32 = 6,   /* Vector length 32 */
1880    LSC_VECT_SIZE_V64 = 7,   /* Vector length 64 */
1881 };
1882 
1883 #define LSC_ONE_ADDR_REG   1
1884