• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright (c) 2018-2019 Alyssa Rosenzweig (alyssa@rosenzweig.io)
2  * Copyright (C) 2019-2020 Collabora, Ltd.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a copy
5  * of this software and associated documentation files (the "Software"), to deal
6  * in the Software without restriction, including without limitation the rights
7  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8  * copies of the Software, and to permit persons to whom the Software is
9  * furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20  * THE SOFTWARE.
21  */
22 
23 #ifndef __MDG_HELPERS_H
24 #define __MDG_HELPERS_H
25 
26 #include "util/macros.h"
27 #include <stdio.h>
28 #include <string.h>
29 
30 #define OP_IS_LOAD_VARY_F(op) (\
31                 op == midgard_op_ld_vary_16 || \
32                 op == midgard_op_ld_vary_32 \
33         )
34 
35 #define OP_IS_PROJECTION(op) ( \
36                 op == midgard_op_ldst_perspective_div_y || \
37                 op == midgard_op_ldst_perspective_div_z || \
38                 op == midgard_op_ldst_perspective_div_w \
39         )
40 
41 #define OP_IS_VEC4_ONLY(op) ( \
42                 OP_IS_PROJECTION(op) || \
43                 op == midgard_op_ld_cubemap_coords \
44         )
45 
46 #define OP_IS_MOVE(op) ( \
47                 (op >= midgard_alu_op_fmov && op <= midgard_alu_op_fmov_rtp) || \
48                 op == midgard_alu_op_imov \
49         )
50 
51 #define OP_IS_UBO_READ(op) ( \
52                 op >= midgard_op_ld_ubo_u8 && \
53                 op <= midgard_op_ld_ubo_128_bswap8 \
54         )
55 
56 #define OP_IS_CSEL_V(op) ( \
57                 op == midgard_alu_op_icsel_v || \
58                 op == midgard_alu_op_fcsel_v \
59         )
60 
61 #define OP_IS_CSEL(op) ( \
62                 OP_IS_CSEL_V(op) || \
63                 op == midgard_alu_op_icsel || \
64                 op == midgard_alu_op_fcsel \
65         )
66 
67 #define OP_IS_UNSIGNED_CMP(op) ( \
68                 op == midgard_alu_op_ult || \
69                 op == midgard_alu_op_ule \
70         )
71 
72 #define OP_IS_INTEGER_CMP(op) ( \
73                 op == midgard_alu_op_ieq || \
74                 op == midgard_alu_op_ine || \
75                 op == midgard_alu_op_ilt || \
76                 op == midgard_alu_op_ile || \
77                 OP_IS_UNSIGNED_CMP(op) \
78         )
79 
80 #define OP_IS_COMMON_STORE(op) ( \
81                 op >= midgard_op_st_u8 && \
82                 op <= midgard_op_st_128_bswap8 \
83         )
84 
85 #define OP_IS_IMAGE(op) ( \
86                 (op >= midgard_op_ld_image_32f && op <= midgard_op_ld_image_32i) || \
87                 (op >= midgard_op_st_image_32f && op <= midgard_op_st_image_32i) || \
88                 op == midgard_op_lea_image \
89         )
90 
91 #define OP_IS_SPECIAL(op) ( \
92                 (op >= midgard_op_ld_special_32f && op <= midgard_op_ld_special_32i) || \
93                 (op >= midgard_op_st_special_32f && op <= midgard_op_st_special_32i) \
94         )
95 
96 #define OP_IS_PACK_COLOUR(op) ( \
97                 (op >= midgard_op_pack_colour_f32 && op <= midgard_op_pack_colour_s32) \
98         )
99 
100 #define OP_IS_UNPACK_COLOUR(op) ( \
101                 (op >= midgard_op_unpack_colour_f32 && op <= midgard_op_unpack_colour_s32) \
102         )
103 
104 /* Instructions that are on the load/store unit but don't access memory */
105 #define OP_IS_REG2REG_LDST(op) ( \
106                 op >= midgard_op_unpack_colour_f32 && \
107                 op <= midgard_op_ldst_perspective_div_w \
108         )
109 
110 /* ALU control words are single bit fields with a lot of space */
111 
112 #define ALU_ENAB_VEC_MUL  (1 << 17)
113 #define ALU_ENAB_SCAL_ADD  (1 << 19)
114 #define ALU_ENAB_VEC_ADD  (1 << 21)
115 #define ALU_ENAB_SCAL_MUL  (1 << 23)
116 #define ALU_ENAB_VEC_LUT  (1 << 25)
117 #define ALU_ENAB_BR_COMPACT (1 << 26)
118 #define ALU_ENAB_BRANCH   (1 << 27)
119 
120 /* Other opcode properties that don't conflict with the ALU_ENABs, non-ISA */
121 
122 /* Denotes an opcode that takes a vector input with a fixed-number of
123  * channels, but outputs to only a single output channel, like dot products.
124  * For these, to determine the effective mask, this quirk can be set. We have
125  * an intentional off-by-one (a la MALI_POSITIVE), since 0-channel makes no
126  * sense but we need to fit 4 channels in 2-bits. Similarly, 1-channel doesn't
127  * make sense (since then why are we quirked?), so that corresponds to "no
128  * count set" */
129 
130 #define OP_CHANNEL_COUNT(c) ((c - 1) << 0)
131 #define GET_CHANNEL_COUNT(c) ((c & (0x3 << 0)) ? ((c & (0x3 << 0)) + 1) : 0)
132 
133 /* For instructions that take a single argument, normally the first argument
134  * slot is used for the argument and the second slot is a dummy #0 constant.
135  * However, there are exceptions: instructions like fmov store their argument
136  * in the _second_ slot and store a dummy r24 in the first slot, designated by
137  * QUIRK_FLIPPED_R24 */
138 
139 #define QUIRK_FLIPPED_R24 (1 << 2)
140 
141 /* Is the op commutative? */
142 #define OP_COMMUTES (1 << 3)
143 
144 /* Does the op convert types between int- and float- space (i2f/f2u/etc) */
145 #define OP_TYPE_CONVERT (1 << 4)
146 
147 /* Is this opcode the first in a f2x (rte, rtz, rtn, rtp) sequence? If so,
148  * takes a roundmode argument in the IR. This has the semantic of rounding the
149  * source (it's all fused in), which is why it doesn't necessarily make sense
150  * for i2f (though folding there might be necessary for OpenCL reasons). Comes
151  * up in format conversion, i.e. f2u_rte */
152 #define MIDGARD_ROUNDS (1 << 5)
153 
154 /* Vector-independant shorthands for the above; these numbers are arbitrary and
155  * not from the ISA. Convert to the above with unit_enum_to_midgard */
156 
157 #define UNIT_MUL 0
158 #define UNIT_ADD 1
159 #define UNIT_LUT 2
160 
161 #define IS_ALU(tag) (tag >= TAG_ALU_4)
162 
163 /* Special register aliases */
164 
165 #define MAX_WORK_REGISTERS 16
166 
167 /* Uniforms are begin at (REGISTER_UNIFORMS - uniform_count) */
168 #define REGISTER_UNIFORMS 24
169 
170 /* r24 and r25 are special registers that only exist during the pipeline,
171  * by using them when we don't care about the register we skip a roundtrip
172  * to the register file. */
173 #define REGISTER_UNUSED 24
174 #define REGISTER_CONSTANT 26
175 #define REGISTER_LDST_BASE 26
176 #define REGISTER_TEXTURE_BASE 28
177 #define REGISTER_SELECT 31
178 
179 /* The following registers are read-only */
180 
181 /* XY is Program Counter, ZW is Stack Pointer */
182 #define REGISTER_LDST_PC_SP 2
183 
184 /* XY is Thread Local Storage pointer, ZW is Workgroup Local Storage pointer */
185 #define REGISTER_LDST_LOCAL_STORAGE_PTR 3
186 
187 #define REGISTER_LDST_LOCAL_THREAD_ID 4
188 #define REGISTER_LDST_GROUP_ID 5
189 #define REGISTER_LDST_GLOBAL_THREAD_ID 6
190 
191 /* This register is always zeroed when read. */
192 #define REGISTER_LDST_ZERO 7
193 
194 /* SSA helper aliases to mimic the registers. */
195 
196 #define SSA_FIXED_SHIFT 24
197 #define SSA_FIXED_REGISTER(reg) (((1 + (reg)) << SSA_FIXED_SHIFT) | 1)
198 #define SSA_REG_FROM_FIXED(reg) ((((reg) & ~1) >> SSA_FIXED_SHIFT) - 1)
199 #define SSA_FIXED_MINIMUM SSA_FIXED_REGISTER(0)
200 
201 #define COMPONENT_X 0x0
202 #define COMPONENT_Y 0x1
203 #define COMPONENT_Z 0x2
204 #define COMPONENT_W 0x3
205 
206 #define SWIZZLE_IDENTITY { \
207         { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, \
208         { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, \
209         { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, \
210         { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } \
211 }
212 
213 #define SWIZZLE_IDENTITY_4 { \
214         { 0, 1, 2, 3, 0, 0, 0, 0, 0, 0,  0,  0,  0,  0,  0,  0 }, \
215         { 0, 1, 2, 3, 0, 0, 0, 0, 0, 0,  0,  0,  0,  0,  0,  0 }, \
216         { 0, 1, 2, 3, 0, 0, 0, 0, 0, 0,  0,  0,  0,  0,  0,  0 }, \
217         { 0, 1, 2, 3, 0, 0, 0, 0, 0, 0,  0,  0,  0,  0,  0,  0 }, \
218 }
219 
220 static inline unsigned
mask_of(unsigned nr_comp)221 mask_of(unsigned nr_comp)
222 {
223         return (1 << nr_comp) - 1;
224 }
225 
226 /* See ISA notes */
227 
228 #define LDST_NOP (3)
229 
230 /* There are five ALU units: VMUL, VADD, SMUL, SADD, LUT. A given opcode is
231  * implemented on some subset of these units (or occassionally all of them).
232  * This table encodes a bit mask of valid units for each opcode, so the
233  * scheduler can figure where to plonk the instruction. */
234 
235 /* Shorthands for each unit */
236 #define UNIT_VMUL ALU_ENAB_VEC_MUL
237 #define UNIT_SADD ALU_ENAB_SCAL_ADD
238 #define UNIT_VADD ALU_ENAB_VEC_ADD
239 #define UNIT_SMUL ALU_ENAB_SCAL_MUL
240 #define UNIT_VLUT ALU_ENAB_VEC_LUT
241 
242 /* Shorthands for usual combinations of units */
243 
244 #define UNITS_MUL (UNIT_VMUL | UNIT_SMUL)
245 #define UNITS_ADD (UNIT_VADD | UNIT_SADD)
246 #define UNITS_MOST (UNITS_MUL | UNITS_ADD)
247 #define UNITS_ALL (UNITS_MOST | UNIT_VLUT)
248 #define UNITS_SCALAR (UNIT_SADD | UNIT_SMUL)
249 #define UNITS_VECTOR (UNIT_VMUL | UNIT_VADD)
250 #define UNITS_ANY_VECTOR (UNITS_VECTOR | UNIT_VLUT)
251 
252 struct mir_op_props {
253         const char *name;
254         unsigned props;
255 };
256 
257 /* For load/store */
258 
259 struct mir_ldst_op_props {
260         const char *name;
261         unsigned props;
262 };
263 
264 struct mir_tex_op_props {
265         const char *name;
266         unsigned props;
267 };
268 
269 struct mir_tag_props {
270         const char *name;
271         unsigned size;
272 };
273 
274 /* Lower 2-bits are a midgard_reg_mode */
275 #define GET_LDST_SIZE(c) (c & 3)
276 
277 /* Store (so the primary register is a source, not a destination */
278 #define LDST_STORE (1 << 2)
279 
280 /* Mask has special meaning and should not be manipulated directly */
281 #define LDST_SPECIAL_MASK (1 << 3)
282 
283 /* Non-store operation has side effects and should not be eliminated even if
284  * its mask is 0 */
285 #define LDST_SIDE_FX (1 << 4)
286 
287 /* Computes an address according to indirects/zext/shift/etc */
288 #define LDST_ADDRESS (1 << 5)
289 
290 /* Some fields such swizzle and address have special meanings */
291 #define LDST_ATOMIC (1 << 6)
292 
293 /* Operates on attributes/varyings (including images) */
294 #define LDST_ATTRIB (1 << 7)
295 
296 /* This file is common, so don't define the tables themselves. #include
297  * midgard_op.h if you need that, or edit midgard_ops.c directly */
298 
299 /* Duplicate bits to convert a per-component to duplicated 8-bit format,
300  * which is used for vector units */
301 
302 static inline unsigned
expand_writemask(unsigned mask,unsigned log2_channels)303 expand_writemask(unsigned mask, unsigned log2_channels)
304 {
305         unsigned o = 0;
306         unsigned factor = 8 >> log2_channels;
307         unsigned expanded = (1 << factor) - 1;
308 
309         for (unsigned i = 0; i < (1 << log2_channels); ++i)
310                 if (mask & (1 << i))
311                         o |= (expanded << (factor * i));
312 
313         return o;
314 }
315 
316 /* Coerce structs to integer */
317 
318 static inline unsigned
vector_alu_srco_unsigned(midgard_vector_alu_src src)319 vector_alu_srco_unsigned(midgard_vector_alu_src src)
320 {
321         unsigned u;
322         memcpy(&u, &src, sizeof(src));
323         return u;
324 }
325 
326 static inline midgard_vector_alu_src
vector_alu_from_unsigned(unsigned u)327 vector_alu_from_unsigned(unsigned u)
328 {
329         midgard_vector_alu_src s;
330         memcpy(&s, &u, sizeof(s));
331         return s;
332 }
333 
334 static inline void
mir_compose_swizzle(unsigned * left,unsigned * right,unsigned * final_out)335 mir_compose_swizzle(unsigned *left, unsigned *right, unsigned *final_out)
336 {
337         unsigned out[16];
338 
339         for (unsigned c = 0; c < 16; ++c)
340                 out[c] = right[left[c]];
341 
342         memcpy(final_out, out, sizeof(out));
343 }
344 
345 /* Checks for an xyzw.. swizzle, given a mask */
346 
347 static inline bool
mir_is_simple_swizzle(unsigned * swizzle,unsigned mask)348 mir_is_simple_swizzle(unsigned *swizzle, unsigned mask)
349 {
350         for (unsigned i = 0; i < 16; ++i) {
351                 if (!(mask & (1 << i))) continue;
352 
353                 if (swizzle[i] != i)
354                         return false;
355         }
356 
357         return true;
358 }
359 
360 /* Packs a load/store argument */
361 
362 static inline uint8_t
midgard_ldst_comp(unsigned reg,unsigned component,unsigned size)363 midgard_ldst_comp(unsigned reg, unsigned component, unsigned size)
364 {
365         assert((reg & ~1) == 0);
366         assert(size == 16 || size == 32 || size == 64);
367 
368         /* Shift so everything is in terms of 32-bit units */
369         if (size == 64) {
370                 assert(component < 2);
371                 component <<= 1;
372         } else if (size == 16) {
373                 assert((component & 1) == 0);
374                 component >>= 1;
375         }
376 
377         return component;
378 }
379 
380 /* Packs/unpacks a ubo index immediate */
381 
382 void midgard_pack_ubo_index_imm(midgard_load_store_word *word, unsigned index);
383 unsigned midgard_unpack_ubo_index_imm(midgard_load_store_word word);
384 
385 /* Packs/unpacks varying parameters.
386  * FIXME: IMPORTANT: We currently handle varying mode weirdly, by passing all
387  * parameters via an offset and using REGISTER_LDST_ZERO as base. This works
388  * for most parameters, but does not allow us to encode/decode direct sample
389  * position. */
390 void midgard_pack_varying_params(midgard_load_store_word *word, midgard_varying_params p);
391 midgard_varying_params midgard_unpack_varying_params(midgard_load_store_word word);
392 
393 /* Load/store ops' displacement helpers.
394  * This is useful because different types of load/store ops have different
395  * displacement bitsize. */
396 
397 #define UNPACK_LDST_ATTRIB_OFS(a) ((a) >> 9)
398 #define UNPACK_LDST_VERTEX_OFS(a) util_sign_extend((a) & 0x1FF, 9)
399 #define UNPACK_LDST_SELECTOR_OFS(a) ((a) >> 9)
400 #define UNPACK_LDST_UBO_OFS(a) ((a) >> 2)
401 #define UNPACK_LDST_MEM_OFS(a) ((a))
402 
403 #define PACK_LDST_ATTRIB_OFS(a) ((a) << 9)
404 #define PACK_LDST_VERTEX_OFS(a) ((a) & 0x1FF)
405 #define PACK_LDST_SELECTOR_OFS(a) ((a) << 9)
406 #define PACK_LDST_UBO_OFS(a) ((a) << 2)
407 #define PACK_LDST_MEM_OFS(a) ((a))
408 
409 static inline bool
midgard_is_branch_unit(unsigned unit)410 midgard_is_branch_unit(unsigned unit)
411 {
412         return (unit == ALU_ENAB_BRANCH) || (unit == ALU_ENAB_BR_COMPACT);
413 }
414 
415 /* Packs ALU mod argument */
416 struct midgard_instruction;
417 unsigned mir_pack_mod(struct midgard_instruction *ins, unsigned i, bool scalar);
418 
419 void
420 mir_print_constant_component(FILE *fp, const midgard_constants *consts,
421                              unsigned c, midgard_reg_mode reg_mode, bool half,
422                              unsigned mod, midgard_alu_op op);
423 
424 #endif
425