• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright (c) 2018-2019 Alyssa Rosenzweig (alyssa@rosenzweig.io)
2  * Copyright (C) 2019-2020 Collabora, Ltd.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a copy
5  * of this software and associated documentation files (the "Software"), to deal
6  * in the Software without restriction, including without limitation the rights
7  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8  * copies of the Software, and to permit persons to whom the Software is
9  * furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20  * THE SOFTWARE.
21  */
22 
23 #ifndef __MDG_HELPERS_H
24 #define __MDG_HELPERS_H
25 
26 #include "util/macros.h"
27 #include <stdio.h>
28 #include <string.h>
29 
30 #define OP_IS_LOAD_VARY_F(op) (\
31                 op == midgard_op_ld_vary_16 || \
32                 op == midgard_op_ld_vary_32 \
33         )
34 
35 #define OP_IS_PROJECTION(op) ( \
36                 op == midgard_op_ldst_perspective_division_z || \
37                 op == midgard_op_ldst_perspective_division_w \
38         )
39 
40 #define OP_IS_VEC4_ONLY(op) ( \
41                 OP_IS_PROJECTION(op) || \
42                 op == midgard_op_ld_cubemap_coords \
43         )
44 
45 #define OP_IS_MOVE(op) ( \
46                 op == midgard_alu_op_fmov || \
47                 op == midgard_alu_op_imov \
48         )
49 
50 #define OP_IS_UBO_READ(op) ( \
51                 op == midgard_op_ld_ubo_char  || \
52                 op == midgard_op_ld_ubo_char2  || \
53                 op == midgard_op_ld_ubo_char4  || \
54                 op == midgard_op_ld_ubo_short4  || \
55                 op == midgard_op_ld_ubo_int4 \
56         )
57 
58 #define OP_IS_CSEL_V(op) ( \
59                 op == midgard_alu_op_icsel_v || \
60                 op == midgard_alu_op_fcsel_v \
61         )
62 
63 #define OP_IS_CSEL(op) ( \
64                 OP_IS_CSEL_V(op) || \
65                 op == midgard_alu_op_icsel || \
66                 op == midgard_alu_op_fcsel \
67         )
68 
69 #define OP_IS_UNSIGNED_CMP(op) ( \
70                 op == midgard_alu_op_ult || \
71                 op == midgard_alu_op_ule \
72         )
73 
74 #define OP_IS_INTEGER_CMP(op) ( \
75                 op == midgard_alu_op_ieq || \
76                 op == midgard_alu_op_ine || \
77                 op == midgard_alu_op_ilt || \
78                 op == midgard_alu_op_ile || \
79                 OP_IS_UNSIGNED_CMP(op) \
80         )
81 
82 /* ALU control words are single bit fields with a lot of space */
83 
84 #define ALU_ENAB_VEC_MUL  (1 << 17)
85 #define ALU_ENAB_SCAL_ADD  (1 << 19)
86 #define ALU_ENAB_VEC_ADD  (1 << 21)
87 #define ALU_ENAB_SCAL_MUL  (1 << 23)
88 #define ALU_ENAB_VEC_LUT  (1 << 25)
89 #define ALU_ENAB_BR_COMPACT (1 << 26)
90 #define ALU_ENAB_BRANCH   (1 << 27)
91 
92 /* Other opcode properties that don't conflict with the ALU_ENABs, non-ISA */
93 
94 /* Denotes an opcode that takes a vector input with a fixed-number of
95  * channels, but outputs to only a single output channel, like dot products.
96  * For these, to determine the effective mask, this quirk can be set. We have
97  * an intentional off-by-one (a la MALI_POSITIVE), since 0-channel makes no
98  * sense but we need to fit 4 channels in 2-bits. Similarly, 1-channel doesn't
99  * make sense (since then why are we quirked?), so that corresponds to "no
100  * count set" */
101 
102 #define OP_CHANNEL_COUNT(c) ((c - 1) << 0)
103 #define GET_CHANNEL_COUNT(c) ((c & (0x3 << 0)) ? ((c & (0x3 << 0)) + 1) : 0)
104 
105 /* For instructions that take a single argument, normally the first argument
106  * slot is used for the argument and the second slot is a dummy #0 constant.
107  * However, there are exceptions: instructions like fmov store their argument
108  * in the _second_ slot and store a dummy r24 in the first slot, designated by
109  * QUIRK_FLIPPED_R24 */
110 
111 #define QUIRK_FLIPPED_R24 (1 << 2)
112 
113 /* Is the op commutative? */
114 #define OP_COMMUTES (1 << 3)
115 
116 /* Does the op convert types between int- and float- space (i2f/f2u/etc) */
117 #define OP_TYPE_CONVERT (1 << 4)
118 
119 /* Is this opcode the first in a f2x (rte, rtz, rtn, rtp) sequence? If so,
120  * takes a roundmode argument in the IR. This has the semantic of rounding the
121  * source (it's all fused in), which is why it doesn't necessarily make sense
122  * for i2f (though folding there might be necessary for OpenCL reasons). Comes
123  * up in format conversion, i.e. f2u_rte */
124 #define MIDGARD_ROUNDS (1 << 5)
125 
126 /* Vector-independant shorthands for the above; these numbers are arbitrary and
127  * not from the ISA. Convert to the above with unit_enum_to_midgard */
128 
129 #define UNIT_MUL 0
130 #define UNIT_ADD 1
131 #define UNIT_LUT 2
132 
133 #define IS_ALU(tag) (tag >= TAG_ALU_4)
134 
135 /* Special register aliases */
136 
137 #define MAX_WORK_REGISTERS 16
138 
139 /* Uniforms are begin at (REGISTER_UNIFORMS - uniform_count) */
140 #define REGISTER_UNIFORMS 24
141 
142 /* r24 and r25 are special registers that only exist during the pipeline,
143  * by using them when we don't care about the register we skip a roundtrip
144  * to the register file. */
145 #define REGISTER_UNUSED 24
146 #define REGISTER_CONSTANT 26
147 #define REGISTER_LDST_BASE 26
148 #define REGISTER_TEXTURE_BASE 28
149 #define REGISTER_SELECT 31
150 
151 /* SSA helper aliases to mimic the registers. */
152 
153 #define SSA_FIXED_SHIFT 24
154 #define SSA_FIXED_REGISTER(reg) (((1 + (reg)) << SSA_FIXED_SHIFT) | 1)
155 #define SSA_REG_FROM_FIXED(reg) ((((reg) & ~1) >> SSA_FIXED_SHIFT) - 1)
156 #define SSA_FIXED_MINIMUM SSA_FIXED_REGISTER(0)
157 
158 #define COMPONENT_X 0x0
159 #define COMPONENT_Y 0x1
160 #define COMPONENT_Z 0x2
161 #define COMPONENT_W 0x3
162 
163 #define SWIZZLE_IDENTITY { \
164         { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, \
165         { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, \
166         { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, \
167         { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } \
168 }
169 
170 #define SWIZZLE_IDENTITY_4 { \
171         { 0, 1, 2, 3, 0, 0, 0, 0, 0, 0,  0,  0,  0,  0,  0,  0 }, \
172         { 0, 1, 2, 3, 0, 0, 0, 0, 0, 0,  0,  0,  0,  0,  0,  0 }, \
173         { 0, 1, 2, 3, 0, 0, 0, 0, 0, 0,  0,  0,  0,  0,  0,  0 }, \
174         { 0, 1, 2, 3, 0, 0, 0, 0, 0, 0,  0,  0,  0,  0,  0,  0 }, \
175 }
176 
177 static inline unsigned
mask_of(unsigned nr_comp)178 mask_of(unsigned nr_comp)
179 {
180         return (1 << nr_comp) - 1;
181 }
182 
183 /* See ISA notes */
184 
185 #define LDST_NOP (3)
186 
187 /* There are five ALU units: VMUL, VADD, SMUL, SADD, LUT. A given opcode is
188  * implemented on some subset of these units (or occassionally all of them).
189  * This table encodes a bit mask of valid units for each opcode, so the
190  * scheduler can figure where to plonk the instruction. */
191 
192 /* Shorthands for each unit */
193 #define UNIT_VMUL ALU_ENAB_VEC_MUL
194 #define UNIT_SADD ALU_ENAB_SCAL_ADD
195 #define UNIT_VADD ALU_ENAB_VEC_ADD
196 #define UNIT_SMUL ALU_ENAB_SCAL_MUL
197 #define UNIT_VLUT ALU_ENAB_VEC_LUT
198 
199 /* Shorthands for usual combinations of units */
200 
201 #define UNITS_MUL (UNIT_VMUL | UNIT_SMUL)
202 #define UNITS_ADD (UNIT_VADD | UNIT_SADD)
203 #define UNITS_MOST (UNITS_MUL | UNITS_ADD)
204 #define UNITS_ALL (UNITS_MOST | UNIT_VLUT)
205 #define UNITS_SCALAR (UNIT_SADD | UNIT_SMUL)
206 #define UNITS_VECTOR (UNIT_VMUL | UNIT_VADD)
207 #define UNITS_ANY_VECTOR (UNITS_VECTOR | UNIT_VLUT)
208 
209 struct mir_op_props {
210         const char *name;
211         unsigned props;
212 };
213 
214 /* For load/store */
215 
216 struct mir_ldst_op_props {
217         const char *name;
218         unsigned props;
219 };
220 
221 struct mir_tag_props {
222         const char *name;
223         unsigned size;
224 };
225 
226 /* Lower 2-bits are a midgard_reg_mode */
227 #define GET_LDST_SIZE(c) (c & 3)
228 
229 /* Store (so the primary register is a source, not a destination */
230 #define LDST_STORE (1 << 2)
231 
232 /* Mask has special meaning and should not be manipulated directly */
233 #define LDST_SPECIAL_MASK (1 << 3)
234 
235 /* Non-store operation has side effects and should not be eliminated even if
236  * its mask is 0 */
237 #define LDST_SIDE_FX (1 << 4)
238 
239 /* Computes an address according to indirects/zext/shift/etc */
240 #define LDST_ADDRESS (1 << 5)
241 
242 /* Some fields such swizzle and address have special meanings */
243 #define LDST_ATOMIC (1 << 6)
244 
245 /* This file is common, so don't define the tables themselves. #include
246  * midgard_op.h if you need that, or edit midgard_ops.c directly */
247 
248 /* Duplicate bits to convert a per-component to duplicated 8-bit format,
249  * which is used for vector units */
250 
251 static inline unsigned
expand_writemask(unsigned mask,unsigned log2_channels)252 expand_writemask(unsigned mask, unsigned log2_channels)
253 {
254         unsigned o = 0;
255         unsigned factor = 8 >> log2_channels;
256         unsigned expanded = (1 << factor) - 1;
257 
258         for (unsigned i = 0; i < (1 << log2_channels); ++i)
259                 if (mask & (1 << i))
260                         o |= (expanded << (factor * i));
261 
262         return o;
263 }
264 
265 /* Coerce structs to integer */
266 
267 static inline unsigned
vector_alu_srco_unsigned(midgard_vector_alu_src src)268 vector_alu_srco_unsigned(midgard_vector_alu_src src)
269 {
270         unsigned u;
271         memcpy(&u, &src, sizeof(src));
272         return u;
273 }
274 
275 static inline midgard_vector_alu_src
vector_alu_from_unsigned(unsigned u)276 vector_alu_from_unsigned(unsigned u)
277 {
278         midgard_vector_alu_src s;
279         memcpy(&s, &u, sizeof(s));
280         return s;
281 }
282 
283 static inline void
mir_compose_swizzle(unsigned * left,unsigned * right,unsigned * final_out)284 mir_compose_swizzle(unsigned *left, unsigned *right, unsigned *final_out)
285 {
286         unsigned out[16];
287 
288         for (unsigned c = 0; c < 16; ++c)
289                 out[c] = right[left[c]];
290 
291         memcpy(final_out, out, sizeof(out));
292 }
293 
294 /* Checks for an xyzw.. swizzle, given a mask */
295 
296 static inline bool
mir_is_simple_swizzle(unsigned * swizzle,unsigned mask)297 mir_is_simple_swizzle(unsigned *swizzle, unsigned mask)
298 {
299         for (unsigned i = 0; i < 16; ++i) {
300                 if (!(mask & (1 << i))) continue;
301 
302                 if (swizzle[i] != i)
303                         return false;
304         }
305 
306         return true;
307 }
308 
309 /* Packs a load/store argument */
310 
311 static inline uint8_t
midgard_ldst_reg(unsigned reg,unsigned component,unsigned size)312 midgard_ldst_reg(unsigned reg, unsigned component, unsigned size)
313 {
314         assert((reg == REGISTER_LDST_BASE) || (reg == REGISTER_LDST_BASE + 1));
315         assert(size == 16 || size == 32 || size == 64);
316 
317         /* Shift so everything is in terms of 32-bit units */
318         if (size == 64) {
319                 assert(component < 2);
320                 component <<= 1;
321         } else if (size == 16) {
322                 assert((component & 1) == 0);
323                 component >>= 1;
324         }
325 
326         midgard_ldst_register_select sel = {
327                 .component = component,
328                 .select = reg - 26
329         };
330 
331         uint8_t packed;
332         memcpy(&packed, &sel, sizeof(packed));
333 
334         return packed;
335 }
336 
337 static inline bool
midgard_is_branch_unit(unsigned unit)338 midgard_is_branch_unit(unsigned unit)
339 {
340         return (unit == ALU_ENAB_BRANCH) || (unit == ALU_ENAB_BR_COMPACT);
341 }
342 
343 /* Packs ALU mod argument */
344 struct midgard_instruction;
345 unsigned mir_pack_mod(struct midgard_instruction *ins, unsigned i, bool scalar);
346 
347 void
348 mir_print_constant_component(FILE *fp, const midgard_constants *consts,
349                              unsigned c, midgard_reg_mode reg_mode, bool half,
350                              unsigned mod, midgard_alu_op op);
351 
352 #endif
353