1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26
27 /**
28 * \file nir_opt_intrinsics.c
29 */
30
31 static bool
src_is_single_use_shuffle(nir_src src,nir_def ** data,nir_def ** index)32 src_is_single_use_shuffle(nir_src src, nir_def **data, nir_def **index)
33 {
34 nir_intrinsic_instr *shuffle = nir_src_as_intrinsic(src);
35 if (shuffle == NULL || shuffle->intrinsic != nir_intrinsic_shuffle)
36 return false;
37
38 /* This is only called when src is part of an ALU op so requiring no if
39 * uses is reasonable. If we ever want to use this from an if statement,
40 * we can change it then.
41 */
42 if (!list_is_singular(&shuffle->def.uses))
43 return false;
44
45 if (nir_def_used_by_if(&shuffle->def))
46 return false;
47
48 *data = shuffle->src[0].ssa;
49 *index = shuffle->src[1].ssa;
50
51 return true;
52 }
53
54 static nir_def *
try_opt_bcsel_of_shuffle(nir_builder * b,nir_alu_instr * alu,bool block_has_discard)55 try_opt_bcsel_of_shuffle(nir_builder *b, nir_alu_instr *alu,
56 bool block_has_discard)
57 {
58 assert(alu->op == nir_op_bcsel);
59
60 /* If we've seen a discard in this block, don't do the optimization. We
61 * could try to do something fancy where we check if the shuffle is on our
62 * side of the discard or not but this is good enough for correctness for
63 * now and subgroup ops in the presence of discard aren't common.
64 */
65 if (block_has_discard)
66 return false;
67
68 if (!nir_alu_src_is_trivial_ssa(alu, 0))
69 return NULL;
70
71 nir_def *data1, *index1;
72 if (!nir_alu_src_is_trivial_ssa(alu, 1) ||
73 alu->src[1].src.ssa->parent_instr->block != alu->instr.block ||
74 !src_is_single_use_shuffle(alu->src[1].src, &data1, &index1))
75 return NULL;
76
77 nir_def *data2, *index2;
78 if (!nir_alu_src_is_trivial_ssa(alu, 2) ||
79 alu->src[2].src.ssa->parent_instr->block != alu->instr.block ||
80 !src_is_single_use_shuffle(alu->src[2].src, &data2, &index2))
81 return NULL;
82
83 if (data1 != data2)
84 return NULL;
85
86 nir_def *index = nir_bcsel(b, alu->src[0].src.ssa, index1, index2);
87 nir_def *shuffle = nir_shuffle(b, data1, index);
88
89 return shuffle;
90 }
91
92 static bool
src_is_quad_broadcast(nir_block * block,nir_src src,nir_intrinsic_instr ** intrin)93 src_is_quad_broadcast(nir_block *block, nir_src src, nir_intrinsic_instr **intrin)
94 {
95 nir_intrinsic_instr *broadcast = nir_src_as_intrinsic(src);
96 if (broadcast == NULL || broadcast->instr.block != block)
97 return false;
98
99 switch (broadcast->intrinsic) {
100 case nir_intrinsic_quad_broadcast:
101 if (!nir_src_is_const(broadcast->src[1]))
102 return false;
103 FALLTHROUGH;
104 case nir_intrinsic_quad_swap_horizontal:
105 case nir_intrinsic_quad_swap_vertical:
106 case nir_intrinsic_quad_swap_diagonal:
107 case nir_intrinsic_quad_swizzle_amd:
108 *intrin = broadcast;
109 return true;
110 default:
111 return false;
112 }
113 }
114
115 static bool
src_is_alu(nir_op op,nir_src src,nir_src srcs[2])116 src_is_alu(nir_op op, nir_src src, nir_src srcs[2])
117 {
118 nir_alu_instr *alu = nir_src_as_alu_instr(src);
119 if (alu == NULL || alu->op != op)
120 return false;
121
122 if (!nir_alu_src_is_trivial_ssa(alu, 0) || !nir_alu_src_is_trivial_ssa(alu, 1))
123 return false;
124
125 srcs[0] = alu->src[0].src;
126 srcs[1] = alu->src[1].src;
127
128 return true;
129 }
130
131 static nir_def *
try_opt_quad_vote(nir_builder * b,nir_alu_instr * alu,bool block_has_discard)132 try_opt_quad_vote(nir_builder *b, nir_alu_instr *alu, bool block_has_discard)
133 {
134 if (block_has_discard)
135 return NULL;
136
137 if (!nir_alu_src_is_trivial_ssa(alu, 0) || !nir_alu_src_is_trivial_ssa(alu, 1))
138 return NULL;
139
140 nir_intrinsic_instr *quad_broadcasts[4];
141 nir_src srcs[2][2];
142 bool found = false;
143
144 /* Match (broadcast0 op broadcast1) op (broadcast2 op broadcast3). */
145 found = src_is_alu(alu->op, alu->src[0].src, srcs[0]) &&
146 src_is_alu(alu->op, alu->src[1].src, srcs[1]) &&
147 src_is_quad_broadcast(alu->instr.block, srcs[0][0], &quad_broadcasts[0]) &&
148 src_is_quad_broadcast(alu->instr.block, srcs[0][1], &quad_broadcasts[1]) &&
149 src_is_quad_broadcast(alu->instr.block, srcs[1][0], &quad_broadcasts[2]) &&
150 src_is_quad_broadcast(alu->instr.block, srcs[1][1], &quad_broadcasts[3]);
151
152 /* Match ((broadcast2 op broadcast3) op broadcast1) op broadcast0). */
153 if (!found) {
154 if ((src_is_alu(alu->op, alu->src[0].src, srcs[0]) &&
155 src_is_quad_broadcast(alu->instr.block, alu->src[1].src, &quad_broadcasts[0])) ||
156 (src_is_alu(alu->op, alu->src[1].src, srcs[0]) &&
157 src_is_quad_broadcast(alu->instr.block, alu->src[0].src, &quad_broadcasts[0]))) {
158 /* ((broadcast2 || broadcast3) || broadcast1) */
159 if ((src_is_alu(alu->op, srcs[0][0], srcs[1]) &&
160 src_is_quad_broadcast(alu->instr.block, srcs[0][1], &quad_broadcasts[1])) ||
161 (src_is_alu(alu->op, srcs[0][1], srcs[1]) &&
162 src_is_quad_broadcast(alu->instr.block, srcs[0][0], &quad_broadcasts[1]))) {
163 /* (broadcast2 || broadcast3) */
164 found = src_is_quad_broadcast(alu->instr.block, srcs[1][0], &quad_broadcasts[2]) &&
165 src_is_quad_broadcast(alu->instr.block, srcs[1][1], &quad_broadcasts[3]);
166 }
167 }
168 }
169
170 if (!found)
171 return NULL;
172
173 /* Check if each lane in a quad reduces all lanes in the quad, and if all broadcasts read the
174 * same data.
175 */
176 uint16_t lanes_read = 0;
177 for (unsigned i = 0; i < 4; i++) {
178 if (!nir_srcs_equal(quad_broadcasts[i]->src[0], quad_broadcasts[0]->src[0]))
179 return NULL;
180
181 for (unsigned j = 0; j < 4; j++) {
182 unsigned lane;
183 switch (quad_broadcasts[i]->intrinsic) {
184 case nir_intrinsic_quad_broadcast:
185 lane = nir_src_as_uint(quad_broadcasts[i]->src[1]) & 0x3;
186 break;
187 case nir_intrinsic_quad_swap_horizontal:
188 lane = j ^ 1;
189 break;
190 case nir_intrinsic_quad_swap_vertical:
191 lane = j ^ 2;
192 break;
193 case nir_intrinsic_quad_swap_diagonal:
194 lane = 3 - j;
195 break;
196 case nir_intrinsic_quad_swizzle_amd:
197 lane = (nir_intrinsic_swizzle_mask(quad_broadcasts[i]) >> (j * 2)) & 0x3;
198 break;
199 default:
200 unreachable();
201 }
202 lanes_read |= (1 << lane) << (j * 4);
203 }
204 }
205
206 if (lanes_read != 0xffff)
207 return NULL;
208
209 /* Create quad vote. */
210 if (alu->op == nir_op_iand)
211 return nir_quad_vote_all(b, 1, quad_broadcasts[0]->src[0].ssa);
212 else
213 return nir_quad_vote_any(b, 1, quad_broadcasts[0]->src[0].ssa);
214 }
215
216 static bool
opt_intrinsics_alu(nir_builder * b,nir_alu_instr * alu,bool block_has_discard,const struct nir_shader_compiler_options * options)217 opt_intrinsics_alu(nir_builder *b, nir_alu_instr *alu,
218 bool block_has_discard, const struct nir_shader_compiler_options *options)
219 {
220 nir_def *replacement = NULL;
221
222 switch (alu->op) {
223 case nir_op_bcsel:
224 replacement = try_opt_bcsel_of_shuffle(b, alu, block_has_discard);
225 break;
226 case nir_op_iand:
227 case nir_op_ior:
228 if (alu->def.bit_size == 1 && options->optimize_quad_vote_to_reduce)
229 replacement = try_opt_quad_vote(b, alu, block_has_discard);
230 break;
231 default:
232 break;
233 }
234
235 if (replacement) {
236 nir_def_rewrite_uses(&alu->def,
237 replacement);
238 nir_instr_remove(&alu->instr);
239 return true;
240 } else {
241 return false;
242 }
243 }
244
245 static bool
try_opt_exclusive_scan_to_inclusive(nir_intrinsic_instr * intrin)246 try_opt_exclusive_scan_to_inclusive(nir_intrinsic_instr *intrin)
247 {
248 if (intrin->def.num_components != 1)
249 return false;
250
251 nir_foreach_use_including_if(src, &intrin->def) {
252 if (nir_src_is_if(src) || nir_src_parent_instr(src)->type != nir_instr_type_alu)
253 return false;
254
255 nir_alu_instr *alu = nir_instr_as_alu(nir_src_parent_instr(src));
256
257 if (alu->op != (nir_op)nir_intrinsic_reduction_op(intrin))
258 return false;
259
260 /* Don't reassociate exact float operations. */
261 if (nir_alu_type_get_base_type(nir_op_infos[alu->op].output_type) == nir_type_float &&
262 alu->op != nir_op_fmax && alu->op != nir_op_fmin && alu->exact)
263 return false;
264
265 if (alu->def.num_components != 1)
266 return false;
267
268 nir_alu_src *alu_src = list_entry(src, nir_alu_src, src);
269 unsigned src_index = alu_src - alu->src;
270
271 assert(src_index < 2 && nir_op_infos[alu->op].num_inputs == 2);
272
273 nir_scalar scan_scalar = nir_scalar_resolved(intrin->src[0].ssa, 0);
274 nir_scalar op_scalar = nir_scalar_resolved(alu->src[!src_index].src.ssa,
275 alu->src[!src_index].swizzle[0]);
276
277 if (!nir_scalar_equal(scan_scalar, op_scalar))
278 return false;
279 }
280
281 /* Convert to inclusive scan. */
282 intrin->intrinsic = nir_intrinsic_inclusive_scan;
283
284 nir_foreach_use_including_if_safe(src, &intrin->def) {
285 /* Remove alu. */
286 nir_alu_instr *alu = nir_instr_as_alu(nir_src_parent_instr(src));
287 nir_def_rewrite_uses(&alu->def, &intrin->def);
288 nir_instr_remove(&alu->instr);
289 }
290
291 return true;
292 }
293
294 static bool
opt_intrinsics_intrin(nir_builder * b,nir_intrinsic_instr * intrin,const struct nir_shader_compiler_options * options)295 opt_intrinsics_intrin(nir_builder *b, nir_intrinsic_instr *intrin,
296 const struct nir_shader_compiler_options *options)
297 {
298 switch (intrin->intrinsic) {
299 case nir_intrinsic_load_sample_mask_in: {
300 /* Transform:
301 * gl_SampleMaskIn == 0 ---> gl_HelperInvocation
302 * gl_SampleMaskIn != 0 ---> !gl_HelperInvocation
303 */
304 if (!options->optimize_sample_mask_in)
305 return false;
306
307 bool progress = false;
308 nir_foreach_use_safe(use_src, &intrin->def) {
309 if (nir_src_parent_instr(use_src)->type == nir_instr_type_alu) {
310 nir_alu_instr *alu = nir_instr_as_alu(nir_src_parent_instr(use_src));
311
312 if (alu->op == nir_op_ieq ||
313 alu->op == nir_op_ine) {
314 /* Check for 0 in either operand. */
315 nir_const_value *const_val =
316 nir_src_as_const_value(alu->src[0].src);
317 if (!const_val)
318 const_val = nir_src_as_const_value(alu->src[1].src);
319 if (!const_val || const_val->i32 != 0)
320 continue;
321
322 nir_def *new_expr = nir_load_helper_invocation(b, 1);
323
324 if (alu->op == nir_op_ine)
325 new_expr = nir_inot(b, new_expr);
326
327 nir_def_rewrite_uses(&alu->def,
328 new_expr);
329 nir_instr_remove(&alu->instr);
330 progress = true;
331 }
332 }
333 }
334 return progress;
335 }
336 case nir_intrinsic_exclusive_scan:
337 return try_opt_exclusive_scan_to_inclusive(intrin);
338 default:
339 return false;
340 }
341 }
342
343 static bool
opt_intrinsics_impl(nir_function_impl * impl,const struct nir_shader_compiler_options * options)344 opt_intrinsics_impl(nir_function_impl *impl,
345 const struct nir_shader_compiler_options *options)
346 {
347 nir_builder b = nir_builder_create(impl);
348 bool progress = false;
349
350 nir_foreach_block(block, impl) {
351 bool block_has_discard = false;
352
353 nir_foreach_instr_safe(instr, block) {
354 b.cursor = nir_before_instr(instr);
355
356 switch (instr->type) {
357 case nir_instr_type_alu:
358 if (opt_intrinsics_alu(&b, nir_instr_as_alu(instr),
359 block_has_discard, options))
360 progress = true;
361 break;
362
363 case nir_instr_type_intrinsic: {
364 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
365 if (intrin->intrinsic == nir_intrinsic_discard ||
366 intrin->intrinsic == nir_intrinsic_discard_if ||
367 intrin->intrinsic == nir_intrinsic_demote ||
368 intrin->intrinsic == nir_intrinsic_demote_if ||
369 intrin->intrinsic == nir_intrinsic_terminate ||
370 intrin->intrinsic == nir_intrinsic_terminate_if)
371 block_has_discard = true;
372
373 if (opt_intrinsics_intrin(&b, intrin, options))
374 progress = true;
375 break;
376 }
377
378 default:
379 break;
380 }
381 }
382 }
383
384 return progress;
385 }
386
387 bool
nir_opt_intrinsics(nir_shader * shader)388 nir_opt_intrinsics(nir_shader *shader)
389 {
390 bool progress = false;
391
392 nir_foreach_function_impl(impl, shader) {
393 if (opt_intrinsics_impl(impl, shader->options)) {
394 progress = true;
395 nir_metadata_preserve(impl, nir_metadata_block_index |
396 nir_metadata_dominance);
397 } else {
398 nir_metadata_preserve(impl, nir_metadata_all);
399 }
400 }
401
402 return progress;
403 }
404