1 /*
2 * Copyright (c) 2013 Rob Clark <robdclark@gmail.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #ifndef INSTR_A3XX_H_
25 #define INSTR_A3XX_H_
26
27 #define PACKED __attribute__((__packed__))
28
29 #include <assert.h>
30 #include <stdbool.h>
31 #include <stdint.h>
32 #include <stdio.h>
33
34 /* clang-format off */
35 void ir3_assert_handler(const char *expr, const char *file, int line,
36 const char *func) __attribute__((weak)) __attribute__((__noreturn__));
37 /* clang-format on */
38
39 /* A wrapper for assert() that allows overriding handling of a failed
40 * assert. This is needed for tools like crashdec which can want to
41 * attempt to disassemble memory that might not actually be valid
42 * instructions.
43 */
44 #define ir3_assert(expr) \
45 do { \
46 if (!(expr)) { \
47 if (ir3_assert_handler) { \
48 ir3_assert_handler(#expr, __FILE__, __LINE__, __func__); \
49 } \
50 assert(expr); \
51 } \
52 } while (0)
53 /* size of largest OPC field of all the instruction categories: */
54 #define NOPC_BITS 7
55
56 #define _OPC(cat, opc) (((cat) << NOPC_BITS) | opc)
57
58 /* clang-format off */
59 typedef enum {
60 /* category 0: */
61 OPC_NOP = _OPC(0, 0),
62 OPC_B = _OPC(0, 1),
63 OPC_JUMP = _OPC(0, 2),
64 OPC_CALL = _OPC(0, 3),
65 OPC_RET = _OPC(0, 4),
66 OPC_KILL = _OPC(0, 5),
67 OPC_END = _OPC(0, 6),
68 OPC_EMIT = _OPC(0, 7),
69 OPC_CUT = _OPC(0, 8),
70 OPC_CHMASK = _OPC(0, 9),
71 OPC_CHSH = _OPC(0, 10),
72 OPC_FLOW_REV = _OPC(0, 11),
73
74 OPC_BKT = _OPC(0, 16),
75 OPC_STKS = _OPC(0, 17),
76 OPC_STKR = _OPC(0, 18),
77 OPC_XSET = _OPC(0, 19),
78 OPC_XCLR = _OPC(0, 20),
79 OPC_GETONE = _OPC(0, 21),
80 OPC_DBG = _OPC(0, 22),
81 OPC_SHPS = _OPC(0, 23), /* shader prologue start */
82 OPC_SHPE = _OPC(0, 24), /* shader prologue end */
83 OPC_GETLAST = _OPC(0, 25),
84
85 OPC_PREDT = _OPC(0, 29), /* predicated true */
86 OPC_PREDF = _OPC(0, 30), /* predicated false */
87 OPC_PREDE = _OPC(0, 31), /* predicated end */
88
89 /* Logical opcodes for different branch instruction variations: */
90 OPC_BR = _OPC(0, 40),
91 OPC_BRAO = _OPC(0, 41),
92 OPC_BRAA = _OPC(0, 42),
93 OPC_BRAC = _OPC(0, 43),
94 OPC_BANY = _OPC(0, 44),
95 OPC_BALL = _OPC(0, 45),
96 OPC_BRAX = _OPC(0, 46),
97
98 /* Logical opcode to distinguish kill and demote */
99 OPC_DEMOTE = _OPC(0, 47),
100
101 /* category 1: */
102 OPC_MOV = _OPC(1, 0),
103 OPC_MOVP = _OPC(1, 1),
104 /* swz, gat, sct */
105 OPC_MOVMSK = _OPC(1, 3),
106
107 /* Virtual opcodes for instructions differentiated via a "sub-opcode" that
108 * replaces the repeat field:
109 */
110 OPC_SWZ = _OPC(1, 4),
111 OPC_GAT = _OPC(1, 5),
112 OPC_SCT = _OPC(1, 6),
113
114 /* Logical opcodes for different variants of mov: */
115 OPC_MOV_IMMED = _OPC(1, 40),
116 OPC_MOV_CONST = _OPC(1, 41),
117 OPC_MOV_GPR = _OPC(1, 42),
118 OPC_MOV_RELGPR = _OPC(1, 43),
119 OPC_MOV_RELCONST = _OPC(1, 44),
120
121 /* Macros that expand to an if statement + move */
122 OPC_BALLOT_MACRO = _OPC(1, 50),
123 OPC_ANY_MACRO = _OPC(1, 51),
124 OPC_ALL_MACRO = _OPC(1, 52),
125 OPC_ELECT_MACRO = _OPC(1, 53),
126 OPC_READ_COND_MACRO = _OPC(1, 54),
127 OPC_READ_FIRST_MACRO = _OPC(1, 55),
128 OPC_SWZ_SHARED_MACRO = _OPC(1, 56),
129 OPC_SHPS_MACRO = _OPC(1, 57),
130
131 /* Macros that expand to a loop */
132 OPC_SCAN_MACRO = _OPC(1, 58),
133
134 /* category 2: */
135 OPC_ADD_F = _OPC(2, 0),
136 OPC_MIN_F = _OPC(2, 1),
137 OPC_MAX_F = _OPC(2, 2),
138 OPC_MUL_F = _OPC(2, 3),
139 OPC_SIGN_F = _OPC(2, 4),
140 OPC_CMPS_F = _OPC(2, 5),
141 OPC_ABSNEG_F = _OPC(2, 6),
142 OPC_CMPV_F = _OPC(2, 7),
143 /* 8 - invalid */
144 OPC_FLOOR_F = _OPC(2, 9),
145 OPC_CEIL_F = _OPC(2, 10),
146 OPC_RNDNE_F = _OPC(2, 11),
147 OPC_RNDAZ_F = _OPC(2, 12),
148 OPC_TRUNC_F = _OPC(2, 13),
149 /* 14-15 - invalid */
150 OPC_ADD_U = _OPC(2, 16),
151 OPC_ADD_S = _OPC(2, 17),
152 OPC_SUB_U = _OPC(2, 18),
153 OPC_SUB_S = _OPC(2, 19),
154 OPC_CMPS_U = _OPC(2, 20),
155 OPC_CMPS_S = _OPC(2, 21),
156 OPC_MIN_U = _OPC(2, 22),
157 OPC_MIN_S = _OPC(2, 23),
158 OPC_MAX_U = _OPC(2, 24),
159 OPC_MAX_S = _OPC(2, 25),
160 OPC_ABSNEG_S = _OPC(2, 26),
161 /* 27 - invalid */
162 OPC_AND_B = _OPC(2, 28),
163 OPC_OR_B = _OPC(2, 29),
164 OPC_NOT_B = _OPC(2, 30),
165 OPC_XOR_B = _OPC(2, 31),
166 /* 32 - invalid */
167 OPC_CMPV_U = _OPC(2, 33),
168 OPC_CMPV_S = _OPC(2, 34),
169 /* 35-47 - invalid */
170 OPC_MUL_U24 = _OPC(2, 48), /* 24b mul into 32b result */
171 OPC_MUL_S24 = _OPC(2, 49), /* 24b mul into 32b result with sign extension */
172 OPC_MULL_U = _OPC(2, 50),
173 OPC_BFREV_B = _OPC(2, 51),
174 OPC_CLZ_S = _OPC(2, 52),
175 OPC_CLZ_B = _OPC(2, 53),
176 OPC_SHL_B = _OPC(2, 54),
177 OPC_SHR_B = _OPC(2, 55),
178 OPC_ASHR_B = _OPC(2, 56),
179 OPC_BARY_F = _OPC(2, 57),
180 OPC_MGEN_B = _OPC(2, 58),
181 OPC_GETBIT_B = _OPC(2, 59),
182 OPC_SETRM = _OPC(2, 60),
183 OPC_CBITS_B = _OPC(2, 61),
184 OPC_SHB = _OPC(2, 62),
185 OPC_MSAD = _OPC(2, 63),
186 OPC_FLAT_B = _OPC(2, 64),
187
188 /* category 3: */
189 OPC_MAD_U16 = _OPC(3, 0),
190 OPC_MADSH_U16 = _OPC(3, 1),
191 OPC_MAD_S16 = _OPC(3, 2),
192 OPC_MADSH_M16 = _OPC(3, 3), /* should this be .s16? */
193 OPC_MAD_U24 = _OPC(3, 4),
194 OPC_MAD_S24 = _OPC(3, 5),
195 OPC_MAD_F16 = _OPC(3, 6),
196 OPC_MAD_F32 = _OPC(3, 7),
197 OPC_SEL_B16 = _OPC(3, 8),
198 OPC_SEL_B32 = _OPC(3, 9),
199 OPC_SEL_S16 = _OPC(3, 10),
200 OPC_SEL_S32 = _OPC(3, 11),
201 OPC_SEL_F16 = _OPC(3, 12),
202 OPC_SEL_F32 = _OPC(3, 13),
203 OPC_SAD_S16 = _OPC(3, 14),
204 OPC_SAD_S32 = _OPC(3, 15),
205 OPC_SHRM = _OPC(3, 16),
206 OPC_SHLM = _OPC(3, 17),
207 OPC_SHRG = _OPC(3, 18),
208 OPC_SHLG = _OPC(3, 19),
209 OPC_ANDG = _OPC(3, 20),
210 OPC_DP2ACC = _OPC(3, 21),
211 OPC_DP4ACC = _OPC(3, 22),
212 OPC_WMM = _OPC(3, 23),
213 OPC_WMM_ACCU = _OPC(3, 24),
214
215 /* category 4: */
216 OPC_RCP = _OPC(4, 0),
217 OPC_RSQ = _OPC(4, 1),
218 OPC_LOG2 = _OPC(4, 2),
219 OPC_EXP2 = _OPC(4, 3),
220 OPC_SIN = _OPC(4, 4),
221 OPC_COS = _OPC(4, 5),
222 OPC_SQRT = _OPC(4, 6),
223 /* NOTE that these are 8+opc from their highp equivs, so it's possible
224 * that the high order bit in the opc field has been repurposed for
225 * half-precision use? But note that other ops (rcp/lsin/cos/sqrt)
226 * still use the same opc as highp
227 */
228 OPC_HRSQ = _OPC(4, 9),
229 OPC_HLOG2 = _OPC(4, 10),
230 OPC_HEXP2 = _OPC(4, 11),
231
232 /* category 5: */
233 OPC_ISAM = _OPC(5, 0),
234 OPC_ISAML = _OPC(5, 1),
235 OPC_ISAMM = _OPC(5, 2),
236 OPC_SAM = _OPC(5, 3),
237 OPC_SAMB = _OPC(5, 4),
238 OPC_SAML = _OPC(5, 5),
239 OPC_SAMGQ = _OPC(5, 6),
240 OPC_GETLOD = _OPC(5, 7),
241 OPC_CONV = _OPC(5, 8),
242 OPC_CONVM = _OPC(5, 9),
243 OPC_GETSIZE = _OPC(5, 10),
244 OPC_GETBUF = _OPC(5, 11),
245 OPC_GETPOS = _OPC(5, 12),
246 OPC_GETINFO = _OPC(5, 13),
247 OPC_DSX = _OPC(5, 14),
248 OPC_DSY = _OPC(5, 15),
249 OPC_GATHER4R = _OPC(5, 16),
250 OPC_GATHER4G = _OPC(5, 17),
251 OPC_GATHER4B = _OPC(5, 18),
252 OPC_GATHER4A = _OPC(5, 19),
253 OPC_SAMGP0 = _OPC(5, 20),
254 OPC_SAMGP1 = _OPC(5, 21),
255 OPC_SAMGP2 = _OPC(5, 22),
256 OPC_SAMGP3 = _OPC(5, 23),
257 OPC_DSXPP_1 = _OPC(5, 24),
258 OPC_DSYPP_1 = _OPC(5, 25),
259 OPC_RGETPOS = _OPC(5, 26),
260 OPC_RGETINFO = _OPC(5, 27),
261 OPC_BRCST_ACTIVE = _OPC(5, 28),
262 OPC_QUAD_SHUFFLE_BRCST = _OPC(5, 29),
263 OPC_QUAD_SHUFFLE_HORIZ = _OPC(5, 30),
264 OPC_QUAD_SHUFFLE_VERT = _OPC(5, 31),
265 OPC_QUAD_SHUFFLE_DIAG = _OPC(5, 32),
266 /* cat5 meta instructions, placed above the cat5 opc field's size */
267 OPC_DSXPP_MACRO = _OPC(5, 35),
268 OPC_DSYPP_MACRO = _OPC(5, 36),
269
270 /* category 6: */
271 OPC_LDG = _OPC(6, 0), /* load-global */
272 OPC_LDL = _OPC(6, 1),
273 OPC_LDP = _OPC(6, 2),
274 OPC_STG = _OPC(6, 3), /* store-global */
275 OPC_STL = _OPC(6, 4),
276 OPC_STP = _OPC(6, 5),
277 OPC_LDIB = _OPC(6, 6),
278 OPC_G2L = _OPC(6, 7),
279 OPC_L2G = _OPC(6, 8),
280 OPC_PREFETCH = _OPC(6, 9),
281 OPC_LDLW = _OPC(6, 10),
282 OPC_STLW = _OPC(6, 11),
283 OPC_RESFMT = _OPC(6, 14),
284 OPC_RESINFO = _OPC(6, 15),
285 OPC_ATOMIC_ADD = _OPC(6, 16),
286 OPC_ATOMIC_SUB = _OPC(6, 17),
287 OPC_ATOMIC_XCHG = _OPC(6, 18),
288 OPC_ATOMIC_INC = _OPC(6, 19),
289 OPC_ATOMIC_DEC = _OPC(6, 20),
290 OPC_ATOMIC_CMPXCHG = _OPC(6, 21),
291 OPC_ATOMIC_MIN = _OPC(6, 22),
292 OPC_ATOMIC_MAX = _OPC(6, 23),
293 OPC_ATOMIC_AND = _OPC(6, 24),
294 OPC_ATOMIC_OR = _OPC(6, 25),
295 OPC_ATOMIC_XOR = _OPC(6, 26),
296 OPC_LDGB = _OPC(6, 27),
297 OPC_STGB = _OPC(6, 28),
298 OPC_STIB = _OPC(6, 29),
299 OPC_LDC = _OPC(6, 30),
300 OPC_LDLV = _OPC(6, 31),
301 OPC_PIPR = _OPC(6, 32), /* ??? */
302 OPC_PIPC = _OPC(6, 33), /* ??? */
303 OPC_EMIT2 = _OPC(6, 34), /* ??? */
304 OPC_ENDLS = _OPC(6, 35), /* ??? */
305 OPC_GETSPID = _OPC(6, 36), /* SP ID */
306 OPC_GETWID = _OPC(6, 37), /* wavefront ID */
307 OPC_GETFIBERID = _OPC(6, 38), /* fiber ID */
308
309 /* Logical opcodes for things that differ in a6xx+ */
310 OPC_STC = _OPC(6, 40),
311 OPC_RESINFO_B = _OPC(6, 41),
312 OPC_LDIB_B = _OPC(6, 42),
313 OPC_STIB_B = _OPC(6, 43),
314
315 /* Logical opcodes for different atomic instruction variations: */
316 OPC_ATOMIC_B_ADD = _OPC(6, 44),
317 OPC_ATOMIC_B_SUB = _OPC(6, 45),
318 OPC_ATOMIC_B_XCHG = _OPC(6, 46),
319 OPC_ATOMIC_B_INC = _OPC(6, 47),
320 OPC_ATOMIC_B_DEC = _OPC(6, 48),
321 OPC_ATOMIC_B_CMPXCHG = _OPC(6, 49),
322 OPC_ATOMIC_B_MIN = _OPC(6, 50),
323 OPC_ATOMIC_B_MAX = _OPC(6, 51),
324 OPC_ATOMIC_B_AND = _OPC(6, 52),
325 OPC_ATOMIC_B_OR = _OPC(6, 53),
326 OPC_ATOMIC_B_XOR = _OPC(6, 54),
327
328 OPC_ATOMIC_S_ADD = _OPC(6, 55),
329 OPC_ATOMIC_S_SUB = _OPC(6, 56),
330 OPC_ATOMIC_S_XCHG = _OPC(6, 57),
331 OPC_ATOMIC_S_INC = _OPC(6, 58),
332 OPC_ATOMIC_S_DEC = _OPC(6, 59),
333 OPC_ATOMIC_S_CMPXCHG = _OPC(6, 60),
334 OPC_ATOMIC_S_MIN = _OPC(6, 61),
335 OPC_ATOMIC_S_MAX = _OPC(6, 62),
336 OPC_ATOMIC_S_AND = _OPC(6, 63),
337 OPC_ATOMIC_S_OR = _OPC(6, 64),
338 OPC_ATOMIC_S_XOR = _OPC(6, 65),
339
340 OPC_ATOMIC_G_ADD = _OPC(6, 66),
341 OPC_ATOMIC_G_SUB = _OPC(6, 67),
342 OPC_ATOMIC_G_XCHG = _OPC(6, 68),
343 OPC_ATOMIC_G_INC = _OPC(6, 69),
344 OPC_ATOMIC_G_DEC = _OPC(6, 70),
345 OPC_ATOMIC_G_CMPXCHG = _OPC(6, 71),
346 OPC_ATOMIC_G_MIN = _OPC(6, 72),
347 OPC_ATOMIC_G_MAX = _OPC(6, 73),
348 OPC_ATOMIC_G_AND = _OPC(6, 74),
349 OPC_ATOMIC_G_OR = _OPC(6, 75),
350 OPC_ATOMIC_G_XOR = _OPC(6, 76),
351
352 OPC_LDG_A = _OPC(6, 77),
353 OPC_STG_A = _OPC(6, 78),
354
355 OPC_SPILL_MACRO = _OPC(6, 79),
356 OPC_RELOAD_MACRO = _OPC(6, 80),
357
358 OPC_LDC_K = _OPC(6, 81),
359
360 /* category 7: */
361 OPC_BAR = _OPC(7, 0),
362 OPC_FENCE = _OPC(7, 1),
363
364 /* meta instructions (category -1): */
365 /* placeholder instr to mark shader inputs: */
366 OPC_META_INPUT = _OPC(-1, 0),
367 /* The "collect" and "split" instructions are used for keeping
368 * track of instructions that write to multiple dst registers
369 * (split) like texture sample instructions, or read multiple
370 * consecutive scalar registers (collect) (bary.f, texture samp)
371 *
372 * A "split" extracts a scalar component from a vecN, and a
373 * "collect" gathers multiple scalar components into a vecN
374 */
375 OPC_META_SPLIT = _OPC(-1, 2),
376 OPC_META_COLLECT = _OPC(-1, 3),
377
378 /* placeholder for texture fetches that run before FS invocation
379 * starts:
380 */
381 OPC_META_TEX_PREFETCH = _OPC(-1, 4),
382
383 /* Parallel copies have multiple destinations, and copy each destination
384 * to its corresponding source. This happens "in parallel," meaning that
385 * it happens as-if every source is read first and then every destination
386 * is stored. These are produced in RA when register shuffling is
387 * required, and then lowered away immediately afterwards.
388 */
389 OPC_META_PARALLEL_COPY = _OPC(-1, 5),
390 OPC_META_PHI = _OPC(-1, 6),
391 } opc_t;
392 /* clang-format on */
393
394 #define opc_cat(opc) ((int)((opc) >> NOPC_BITS))
395 #define opc_op(opc) ((unsigned)((opc) & ((1 << NOPC_BITS) - 1)))
396
397 const char *disasm_a3xx_instr_name(opc_t opc);
398
399 typedef enum {
400 TYPE_F16 = 0,
401 TYPE_F32 = 1,
402 TYPE_U16 = 2,
403 TYPE_U32 = 3,
404 TYPE_S16 = 4,
405 TYPE_S32 = 5,
406 TYPE_U8 = 6,
407 TYPE_S8 = 7, // XXX I assume?
408 } type_t;
409
410 static inline uint32_t
type_size(type_t type)411 type_size(type_t type)
412 {
413 switch (type) {
414 case TYPE_F32:
415 case TYPE_U32:
416 case TYPE_S32:
417 return 32;
418 case TYPE_F16:
419 case TYPE_U16:
420 case TYPE_S16:
421 return 16;
422 case TYPE_U8:
423 case TYPE_S8:
424 return 8;
425 default:
426 ir3_assert(0); /* invalid type */
427 return 0;
428 }
429 }
430
431 static inline type_t
type_uint_size(unsigned bit_size)432 type_uint_size(unsigned bit_size)
433 {
434 switch (bit_size) {
435 case 8: return TYPE_U8;
436 case 1: /* 1b bools are treated as normal half-regs */
437 case 16: return TYPE_U16;
438 case 32: return TYPE_U32;
439 default:
440 ir3_assert(0); /* invalid size */
441 return 0;
442 }
443 }
444
445 static inline type_t
type_float_size(unsigned bit_size)446 type_float_size(unsigned bit_size)
447 {
448 switch (bit_size) {
449 case 16: return TYPE_F16;
450 case 32: return TYPE_F32;
451 default:
452 ir3_assert(0); /* invalid size */
453 return 0;
454 }
455 }
456
457 static inline int
type_float(type_t type)458 type_float(type_t type)
459 {
460 return (type == TYPE_F32) || (type == TYPE_F16);
461 }
462
463 static inline int
type_uint(type_t type)464 type_uint(type_t type)
465 {
466 return (type == TYPE_U32) || (type == TYPE_U16) || (type == TYPE_U8);
467 }
468
469 static inline int
type_sint(type_t type)470 type_sint(type_t type)
471 {
472 return (type == TYPE_S32) || (type == TYPE_S16) || (type == TYPE_S8);
473 }
474
475 typedef enum {
476 ROUND_ZERO = 0,
477 ROUND_EVEN = 1,
478 ROUND_POS_INF = 2,
479 ROUND_NEG_INF = 3,
480 } round_t;
481
482 /* comp:
483 * 0 - x
484 * 1 - y
485 * 2 - z
486 * 3 - w
487 */
488 static inline uint32_t
regid(int num,int comp)489 regid(int num, int comp)
490 {
491 return (num << 2) | (comp & 0x3);
492 }
493
494 #define INVALID_REG regid(63, 0)
495 #define VALIDREG(r) ((r) != INVALID_REG)
496 #define CONDREG(r, val) COND(VALIDREG(r), (val))
497
498 /* special registers: */
499 #define REG_A0 61 /* address register */
500 #define REG_P0 62 /* predicate register */
501
502 typedef enum {
503 BRANCH_PLAIN = 0, /* br */
504 BRANCH_OR = 1, /* brao */
505 BRANCH_AND = 2, /* braa */
506 BRANCH_CONST = 3, /* brac */
507 BRANCH_ANY = 4, /* bany */
508 BRANCH_ALL = 5, /* ball */
509 BRANCH_X = 6, /* brax ??? */
510 } brtype_t;
511
512 /* With is_bindless_s2en = 1, this determines whether bindless is enabled and
513 * if so, how to get the (base, index) pair for both sampler and texture.
514 * There is a single base embedded in the instruction, which is always used
515 * for the texture.
516 */
517 typedef enum {
518 /* Use traditional GL binding model, get texture and sampler index from src3
519 * which is presumed to be uniform on a4xx+ (a3xx doesn't have the other
520 * modes, but does handle non-uniform indexing).
521 */
522 CAT5_UNIFORM = 0,
523
524 /* The sampler base comes from the low 3 bits of a1.x, and the sampler
525 * and texture index come from src3 which is presumed to be uniform.
526 */
527 CAT5_BINDLESS_A1_UNIFORM = 1,
528
529 /* The texture and sampler share the same base, and the sampler and
530 * texture index come from src3 which is *not* presumed to be uniform.
531 */
532 CAT5_BINDLESS_NONUNIFORM = 2,
533
534 /* The sampler base comes from the low 3 bits of a1.x, and the sampler
535 * and texture index come from src3 which is *not* presumed to be
536 * uniform.
537 */
538 CAT5_BINDLESS_A1_NONUNIFORM = 3,
539
540 /* Use traditional GL binding model, get texture and sampler index
541 * from src3 which is *not* presumed to be uniform.
542 */
543 CAT5_NONUNIFORM = 4,
544
545 /* The texture and sampler share the same base, and the sampler and
546 * texture index come from src3 which is presumed to be uniform.
547 */
548 CAT5_BINDLESS_UNIFORM = 5,
549
550 /* The texture and sampler share the same base, get sampler index from low
551 * 4 bits of src3 and texture index from high 4 bits.
552 */
553 CAT5_BINDLESS_IMM = 6,
554
555 /* The sampler base comes from the low 3 bits of a1.x, and the texture
556 * index comes from the next 8 bits of a1.x. The sampler index is an
557 * immediate in src3.
558 */
559 CAT5_BINDLESS_A1_IMM = 7,
560 } cat5_desc_mode_t;
561
562 /* Similar to cat5_desc_mode_t, describes how the descriptor is loaded.
563 */
564 typedef enum {
565 /* Use old GL binding model with an immediate index. */
566 CAT6_IMM = 0,
567
568 CAT6_UNIFORM = 1,
569
570 CAT6_NONUNIFORM = 2,
571
572 /* Use the bindless model, with an immediate index.
573 */
574 CAT6_BINDLESS_IMM = 4,
575
576 /* Use the bindless model, with a uniform register index.
577 */
578 CAT6_BINDLESS_UNIFORM = 5,
579
580 /* Use the bindless model, with a register index that isn't guaranteed
581 * to be uniform. This presumably checks if the indices are equal and
582 * splits up the load/store, because it works the way you would
583 * expect.
584 */
585 CAT6_BINDLESS_NONUNIFORM = 6,
586 } cat6_desc_mode_t;
587
588 static inline bool
is_sat_compatible(opc_t opc)589 is_sat_compatible(opc_t opc)
590 {
591 /* On a6xx saturation doesn't work on cat4 */
592 if (opc_cat(opc) != 2 && opc_cat(opc) != 3)
593 return false;
594
595 switch (opc) {
596 /* On a3xx and a6xx saturation doesn't work on bary.f */
597 case OPC_BARY_F:
598 /* On a6xx saturation doesn't work on sel.* */
599 case OPC_SEL_B16:
600 case OPC_SEL_B32:
601 case OPC_SEL_S16:
602 case OPC_SEL_S32:
603 case OPC_SEL_F16:
604 case OPC_SEL_F32:
605 return false;
606 default:
607 return true;
608 }
609 }
610
611 static inline bool
is_mad(opc_t opc)612 is_mad(opc_t opc)
613 {
614 switch (opc) {
615 case OPC_MAD_U16:
616 case OPC_MAD_S16:
617 case OPC_MAD_U24:
618 case OPC_MAD_S24:
619 case OPC_MAD_F16:
620 case OPC_MAD_F32:
621 return true;
622 default:
623 return false;
624 }
625 }
626
627 static inline bool
is_madsh(opc_t opc)628 is_madsh(opc_t opc)
629 {
630 switch (opc) {
631 case OPC_MADSH_U16:
632 case OPC_MADSH_M16:
633 return true;
634 default:
635 return false;
636 }
637 }
638
639 static inline bool
is_local_atomic(opc_t opc)640 is_local_atomic(opc_t opc)
641 {
642 switch (opc) {
643 case OPC_ATOMIC_ADD:
644 case OPC_ATOMIC_SUB:
645 case OPC_ATOMIC_XCHG:
646 case OPC_ATOMIC_INC:
647 case OPC_ATOMIC_DEC:
648 case OPC_ATOMIC_CMPXCHG:
649 case OPC_ATOMIC_MIN:
650 case OPC_ATOMIC_MAX:
651 case OPC_ATOMIC_AND:
652 case OPC_ATOMIC_OR:
653 case OPC_ATOMIC_XOR:
654 return true;
655 default:
656 return false;
657 }
658 }
659
660 static inline bool
is_global_a3xx_atomic(opc_t opc)661 is_global_a3xx_atomic(opc_t opc)
662 {
663 switch (opc) {
664 case OPC_ATOMIC_S_ADD:
665 case OPC_ATOMIC_S_SUB:
666 case OPC_ATOMIC_S_XCHG:
667 case OPC_ATOMIC_S_INC:
668 case OPC_ATOMIC_S_DEC:
669 case OPC_ATOMIC_S_CMPXCHG:
670 case OPC_ATOMIC_S_MIN:
671 case OPC_ATOMIC_S_MAX:
672 case OPC_ATOMIC_S_AND:
673 case OPC_ATOMIC_S_OR:
674 case OPC_ATOMIC_S_XOR:
675 return true;
676 default:
677 return false;
678 }
679 }
680
681 static inline bool
is_global_a6xx_atomic(opc_t opc)682 is_global_a6xx_atomic(opc_t opc)
683 {
684 switch (opc) {
685 case OPC_ATOMIC_G_ADD:
686 case OPC_ATOMIC_G_SUB:
687 case OPC_ATOMIC_G_XCHG:
688 case OPC_ATOMIC_G_INC:
689 case OPC_ATOMIC_G_DEC:
690 case OPC_ATOMIC_G_CMPXCHG:
691 case OPC_ATOMIC_G_MIN:
692 case OPC_ATOMIC_G_MAX:
693 case OPC_ATOMIC_G_AND:
694 case OPC_ATOMIC_G_OR:
695 case OPC_ATOMIC_G_XOR:
696 return true;
697 default:
698 return false;
699 }
700 }
701
702 static inline bool
is_bindless_atomic(opc_t opc)703 is_bindless_atomic(opc_t opc)
704 {
705 switch (opc) {
706 case OPC_ATOMIC_B_ADD:
707 case OPC_ATOMIC_B_SUB:
708 case OPC_ATOMIC_B_XCHG:
709 case OPC_ATOMIC_B_INC:
710 case OPC_ATOMIC_B_DEC:
711 case OPC_ATOMIC_B_CMPXCHG:
712 case OPC_ATOMIC_B_MIN:
713 case OPC_ATOMIC_B_MAX:
714 case OPC_ATOMIC_B_AND:
715 case OPC_ATOMIC_B_OR:
716 case OPC_ATOMIC_B_XOR:
717 return true;
718 default:
719 return false;
720 }
721 }
722
723 static inline bool
is_atomic(opc_t opc)724 is_atomic(opc_t opc)
725 {
726 return is_local_atomic(opc) || is_global_a3xx_atomic(opc) ||
727 is_global_a6xx_atomic(opc) || is_bindless_atomic(opc);
728 }
729
730 static inline bool
is_ssbo(opc_t opc)731 is_ssbo(opc_t opc)
732 {
733 switch (opc) {
734 case OPC_RESFMT:
735 case OPC_RESINFO:
736 case OPC_LDGB:
737 case OPC_STGB:
738 case OPC_STIB:
739 return true;
740 default:
741 return false;
742 }
743 }
744
745 static inline bool
is_isam(opc_t opc)746 is_isam(opc_t opc)
747 {
748 switch (opc) {
749 case OPC_ISAM:
750 case OPC_ISAML:
751 case OPC_ISAMM:
752 return true;
753 default:
754 return false;
755 }
756 }
757
758 static inline bool
is_cat2_float(opc_t opc)759 is_cat2_float(opc_t opc)
760 {
761 switch (opc) {
762 case OPC_ADD_F:
763 case OPC_MIN_F:
764 case OPC_MAX_F:
765 case OPC_MUL_F:
766 case OPC_SIGN_F:
767 case OPC_CMPS_F:
768 case OPC_ABSNEG_F:
769 case OPC_CMPV_F:
770 case OPC_FLOOR_F:
771 case OPC_CEIL_F:
772 case OPC_RNDNE_F:
773 case OPC_RNDAZ_F:
774 case OPC_TRUNC_F:
775 return true;
776
777 default:
778 return false;
779 }
780 }
781
782 static inline bool
is_cat3_float(opc_t opc)783 is_cat3_float(opc_t opc)
784 {
785 switch (opc) {
786 case OPC_MAD_F16:
787 case OPC_MAD_F32:
788 case OPC_SEL_F16:
789 case OPC_SEL_F32:
790 return true;
791 default:
792 return false;
793 }
794 }
795
796 #endif /* INSTR_A3XX_H_ */
797