1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28 #include "brw_nir.h"
29 #include "compiler/nir/nir_builder.h"
30
31 /*
32 * Implements a small peephole optimization that looks for a multiply that
33 * is only ever used in an add and replaces both with an fma.
34 */
35
36 static inline bool
are_all_uses_fadd(nir_ssa_def * def)37 are_all_uses_fadd(nir_ssa_def *def)
38 {
39 if (!list_empty(&def->if_uses))
40 return false;
41
42 nir_foreach_use(use_src, def) {
43 nir_instr *use_instr = use_src->parent_instr;
44
45 if (use_instr->type != nir_instr_type_alu)
46 return false;
47
48 nir_alu_instr *use_alu = nir_instr_as_alu(use_instr);
49 switch (use_alu->op) {
50 case nir_op_fadd:
51 break; /* This one's ok */
52
53 case nir_op_imov:
54 case nir_op_fmov:
55 case nir_op_fneg:
56 case nir_op_fabs:
57 assert(use_alu->dest.dest.is_ssa);
58 if (!are_all_uses_fadd(&use_alu->dest.dest.ssa))
59 return false;
60 break;
61
62 default:
63 return false;
64 }
65 }
66
67 return true;
68 }
69
70 static nir_alu_instr *
get_mul_for_src(nir_alu_src * src,int num_components,uint8_t swizzle[4],bool * negate,bool * abs)71 get_mul_for_src(nir_alu_src *src, int num_components,
72 uint8_t swizzle[4], bool *negate, bool *abs)
73 {
74 uint8_t swizzle_tmp[4];
75 assert(src->src.is_ssa && !src->abs && !src->negate);
76
77 nir_instr *instr = src->src.ssa->parent_instr;
78 if (instr->type != nir_instr_type_alu)
79 return NULL;
80
81 nir_alu_instr *alu = nir_instr_as_alu(instr);
82
83 /* We want to bail if any of the other ALU operations involved is labled
84 * exact. One reason for this is that, while the value that is changing is
85 * actually the result of the add and not the multiply, the intention of
86 * the user when they specify an exact multiply is that they want *that*
87 * value and what they don't care about is the add. Another reason is that
88 * SPIR-V explicitly requires this behaviour.
89 */
90 if (alu->exact)
91 return NULL;
92
93 switch (alu->op) {
94 case nir_op_imov:
95 case nir_op_fmov:
96 alu = get_mul_for_src(&alu->src[0], num_components, swizzle, negate, abs);
97 break;
98
99 case nir_op_fneg:
100 alu = get_mul_for_src(&alu->src[0], num_components, swizzle, negate, abs);
101 *negate = !*negate;
102 break;
103
104 case nir_op_fabs:
105 alu = get_mul_for_src(&alu->src[0], num_components, swizzle, negate, abs);
106 *negate = false;
107 *abs = true;
108 break;
109
110 case nir_op_fmul:
111 /* Only absorb a fmul into a ffma if the fmul is only used in fadd
112 * operations. This prevents us from being too aggressive with our
113 * fusing which can actually lead to more instructions.
114 */
115 if (!are_all_uses_fadd(&alu->dest.dest.ssa))
116 return NULL;
117 break;
118
119 default:
120 return NULL;
121 }
122
123 if (!alu)
124 return NULL;
125
126 /* Copy swizzle data before overwriting it to avoid setting a wrong swizzle.
127 *
128 * Example:
129 * Former swizzle[] = xyzw
130 * src->swizzle[] = zyxx
131 *
132 * Expected output swizzle = zyxx
133 * If we reuse swizzle in the loop, then output swizzle would be zyzz.
134 */
135 memcpy(swizzle_tmp, swizzle, 4*sizeof(uint8_t));
136 for (int i = 0; i < num_components; i++)
137 swizzle[i] = swizzle_tmp[src->swizzle[i]];
138
139 return alu;
140 }
141
142 /**
143 * Given a list of (at least two) nir_alu_src's, tells if any of them is a
144 * constant value and is used only once.
145 */
146 static bool
any_alu_src_is_a_constant(nir_alu_src srcs[])147 any_alu_src_is_a_constant(nir_alu_src srcs[])
148 {
149 for (unsigned i = 0; i < 2; i++) {
150 if (srcs[i].src.ssa->parent_instr->type == nir_instr_type_load_const) {
151 nir_load_const_instr *load_const =
152 nir_instr_as_load_const (srcs[i].src.ssa->parent_instr);
153
154 if (list_is_singular(&load_const->def.uses) &&
155 list_empty(&load_const->def.if_uses)) {
156 return true;
157 }
158 }
159 }
160
161 return false;
162 }
163
164 static bool
brw_nir_opt_peephole_ffma_block(nir_builder * b,nir_block * block)165 brw_nir_opt_peephole_ffma_block(nir_builder *b, nir_block *block)
166 {
167 bool progress = false;
168
169 nir_foreach_instr_safe(instr, block) {
170 if (instr->type != nir_instr_type_alu)
171 continue;
172
173 nir_alu_instr *add = nir_instr_as_alu(instr);
174 if (add->op != nir_op_fadd)
175 continue;
176
177 assert(add->dest.dest.is_ssa);
178 if (add->exact)
179 continue;
180
181 assert(add->src[0].src.is_ssa && add->src[1].src.is_ssa);
182
183 /* This, is the case a + a. We would rather handle this with an
184 * algebraic reduction than fuse it. Also, we want to only fuse
185 * things where the multiply is used only once and, in this case,
186 * it would be used twice by the same instruction.
187 */
188 if (add->src[0].src.ssa == add->src[1].src.ssa)
189 continue;
190
191 nir_alu_instr *mul;
192 uint8_t add_mul_src, swizzle[4];
193 bool negate, abs;
194 for (add_mul_src = 0; add_mul_src < 2; add_mul_src++) {
195 for (unsigned i = 0; i < 4; i++)
196 swizzle[i] = i;
197
198 negate = false;
199 abs = false;
200
201 mul = get_mul_for_src(&add->src[add_mul_src],
202 add->dest.dest.ssa.num_components,
203 swizzle, &negate, &abs);
204
205 if (mul != NULL)
206 break;
207 }
208
209 if (mul == NULL)
210 continue;
211
212 unsigned bit_size = add->dest.dest.ssa.bit_size;
213
214 nir_ssa_def *mul_src[2];
215 mul_src[0] = mul->src[0].src.ssa;
216 mul_src[1] = mul->src[1].src.ssa;
217
218 /* If any of the operands of the fmul and any of the fadd is a constant,
219 * we bypass because it will be more efficient as the constants will be
220 * propagated as operands, potentially saving two load_const instructions.
221 */
222 if (any_alu_src_is_a_constant(mul->src) &&
223 any_alu_src_is_a_constant(add->src)) {
224 continue;
225 }
226
227 b->cursor = nir_before_instr(&add->instr);
228
229 if (abs) {
230 for (unsigned i = 0; i < 2; i++)
231 mul_src[i] = nir_fabs(b, mul_src[i]);
232 }
233
234 if (negate)
235 mul_src[0] = nir_fneg(b, mul_src[0]);
236
237 nir_alu_instr *ffma = nir_alu_instr_create(b->shader, nir_op_ffma);
238 ffma->dest.saturate = add->dest.saturate;
239 ffma->dest.write_mask = add->dest.write_mask;
240
241 for (unsigned i = 0; i < 2; i++) {
242 ffma->src[i].src = nir_src_for_ssa(mul_src[i]);
243 for (unsigned j = 0; j < add->dest.dest.ssa.num_components; j++)
244 ffma->src[i].swizzle[j] = mul->src[i].swizzle[swizzle[j]];
245 }
246 nir_alu_src_copy(&ffma->src[2], &add->src[1 - add_mul_src], ffma);
247
248 assert(add->dest.dest.is_ssa);
249
250 nir_ssa_dest_init(&ffma->instr, &ffma->dest.dest,
251 add->dest.dest.ssa.num_components,
252 bit_size,
253 add->dest.dest.ssa.name);
254 nir_ssa_def_rewrite_uses(&add->dest.dest.ssa,
255 nir_src_for_ssa(&ffma->dest.dest.ssa));
256
257 nir_builder_instr_insert(b, &ffma->instr);
258 assert(list_empty(&add->dest.dest.ssa.uses));
259 nir_instr_remove(&add->instr);
260
261 progress = true;
262 }
263
264 return progress;
265 }
266
267 static bool
brw_nir_opt_peephole_ffma_impl(nir_function_impl * impl)268 brw_nir_opt_peephole_ffma_impl(nir_function_impl *impl)
269 {
270 bool progress = false;
271
272 nir_builder builder;
273 nir_builder_init(&builder, impl);
274
275 nir_foreach_block(block, impl) {
276 progress |= brw_nir_opt_peephole_ffma_block(&builder, block);
277 }
278
279 if (progress)
280 nir_metadata_preserve(impl, nir_metadata_block_index |
281 nir_metadata_dominance);
282
283 return progress;
284 }
285
286 bool
brw_nir_opt_peephole_ffma(nir_shader * shader)287 brw_nir_opt_peephole_ffma(nir_shader *shader)
288 {
289 bool progress = false;
290
291 nir_foreach_function(function, shader) {
292 if (function->impl)
293 progress |= brw_nir_opt_peephole_ffma_impl(function->impl);
294 }
295
296 return progress;
297 }
298