• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2017 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "nir.h"
25 #include "nir_builder.h"
26 #include "nir_search_helpers.h"
27 
28 /**
29  * \file nir_opt_intrinsics.c
30  */
31 
32 static bool
src_is_single_use_shuffle(nir_src src,nir_def ** data,nir_def ** index)33 src_is_single_use_shuffle(nir_src src, nir_def **data, nir_def **index)
34 {
35    nir_intrinsic_instr *shuffle = nir_src_as_intrinsic(src);
36    if (shuffle == NULL || shuffle->intrinsic != nir_intrinsic_shuffle)
37       return false;
38 
39    /* This is only called when src is part of an ALU op so requiring no if
40     * uses is reasonable.  If we ever want to use this from an if statement,
41     * we can change it then.
42     */
43    if (!list_is_singular(&shuffle->def.uses))
44       return false;
45 
46    if (nir_def_used_by_if(&shuffle->def))
47       return false;
48 
49    *data = shuffle->src[0].ssa;
50    *index = shuffle->src[1].ssa;
51 
52    return true;
53 }
54 
55 static nir_def *
try_opt_bcsel_of_shuffle(nir_builder * b,nir_alu_instr * alu,bool block_has_discard)56 try_opt_bcsel_of_shuffle(nir_builder *b, nir_alu_instr *alu,
57                          bool block_has_discard)
58 {
59    assert(alu->op == nir_op_bcsel);
60 
61    /* If we've seen a discard in this block, don't do the optimization.  We
62     * could try to do something fancy where we check if the shuffle is on our
63     * side of the discard or not but this is good enough for correctness for
64     * now and subgroup ops in the presence of discard aren't common.
65     */
66    if (block_has_discard)
67       return false;
68 
69    if (!nir_alu_src_is_trivial_ssa(alu, 0))
70       return NULL;
71 
72    nir_def *data1, *index1;
73    if (!nir_alu_src_is_trivial_ssa(alu, 1) ||
74        alu->src[1].src.ssa->parent_instr->block != alu->instr.block ||
75        !src_is_single_use_shuffle(alu->src[1].src, &data1, &index1))
76       return NULL;
77 
78    nir_def *data2, *index2;
79    if (!nir_alu_src_is_trivial_ssa(alu, 2) ||
80        alu->src[2].src.ssa->parent_instr->block != alu->instr.block ||
81        !src_is_single_use_shuffle(alu->src[2].src, &data2, &index2))
82       return NULL;
83 
84    if (data1 != data2)
85       return NULL;
86 
87    nir_def *index = nir_bcsel(b, alu->src[0].src.ssa, index1, index2);
88    nir_def *shuffle = nir_shuffle(b, data1, index);
89 
90    return shuffle;
91 }
92 
93 /* load_front_face ? a : -a -> load_front_face_sign * a */
94 static nir_def *
try_opt_front_face_fsign(nir_builder * b,nir_alu_instr * alu)95 try_opt_front_face_fsign(nir_builder *b, nir_alu_instr *alu)
96 {
97    if (alu->def.bit_size != 32 ||
98        !nir_src_as_intrinsic(alu->src[0].src) ||
99        nir_src_as_intrinsic(alu->src[0].src)->intrinsic != nir_intrinsic_load_front_face ||
100        !is_only_used_as_float(alu) ||
101        !nir_alu_srcs_negative_equal_typed(alu, alu, 1, 2, nir_type_float))
102       return NULL;
103 
104    nir_def *src = nir_ssa_for_alu_src(b, alu, 1);
105 
106    return nir_fmul(b, nir_load_front_face_fsign(b), src);
107 }
108 
109 static bool
src_is_quad_broadcast(nir_block * block,nir_src src,nir_intrinsic_instr ** intrin)110 src_is_quad_broadcast(nir_block *block, nir_src src, nir_intrinsic_instr **intrin)
111 {
112    nir_intrinsic_instr *broadcast = nir_src_as_intrinsic(src);
113    if (broadcast == NULL || broadcast->instr.block != block)
114       return false;
115 
116    switch (broadcast->intrinsic) {
117    case nir_intrinsic_quad_broadcast:
118       if (!nir_src_is_const(broadcast->src[1]))
119          return false;
120       FALLTHROUGH;
121    case nir_intrinsic_quad_swap_horizontal:
122    case nir_intrinsic_quad_swap_vertical:
123    case nir_intrinsic_quad_swap_diagonal:
124    case nir_intrinsic_quad_swizzle_amd:
125       *intrin = broadcast;
126       return true;
127    default:
128       return false;
129    }
130 }
131 
132 static bool
src_is_alu(nir_op op,nir_src src,nir_src srcs[2])133 src_is_alu(nir_op op, nir_src src, nir_src srcs[2])
134 {
135    nir_alu_instr *alu = nir_src_as_alu_instr(src);
136    if (alu == NULL || alu->op != op)
137       return false;
138 
139    if (!nir_alu_src_is_trivial_ssa(alu, 0) || !nir_alu_src_is_trivial_ssa(alu, 1))
140       return false;
141 
142    srcs[0] = alu->src[0].src;
143    srcs[1] = alu->src[1].src;
144 
145    return true;
146 }
147 
148 static nir_def *
try_opt_quad_vote(nir_builder * b,nir_alu_instr * alu,bool block_has_discard)149 try_opt_quad_vote(nir_builder *b, nir_alu_instr *alu, bool block_has_discard)
150 {
151    if (block_has_discard)
152       return NULL;
153 
154    if (!nir_alu_src_is_trivial_ssa(alu, 0) || !nir_alu_src_is_trivial_ssa(alu, 1))
155       return NULL;
156 
157    nir_intrinsic_instr *quad_broadcasts[4];
158    nir_src srcs[2][2];
159    bool found = false;
160 
161    /* Match (broadcast0 op broadcast1) op (broadcast2 op broadcast3). */
162    found = src_is_alu(alu->op, alu->src[0].src, srcs[0]) &&
163            src_is_alu(alu->op, alu->src[1].src, srcs[1]) &&
164            src_is_quad_broadcast(alu->instr.block, srcs[0][0], &quad_broadcasts[0]) &&
165            src_is_quad_broadcast(alu->instr.block, srcs[0][1], &quad_broadcasts[1]) &&
166            src_is_quad_broadcast(alu->instr.block, srcs[1][0], &quad_broadcasts[2]) &&
167            src_is_quad_broadcast(alu->instr.block, srcs[1][1], &quad_broadcasts[3]);
168 
169    /* Match ((broadcast2 op broadcast3) op broadcast1) op broadcast0). */
170    if (!found) {
171       if ((src_is_alu(alu->op, alu->src[0].src, srcs[0]) &&
172            src_is_quad_broadcast(alu->instr.block, alu->src[1].src, &quad_broadcasts[0])) ||
173           (src_is_alu(alu->op, alu->src[1].src, srcs[0]) &&
174            src_is_quad_broadcast(alu->instr.block, alu->src[0].src, &quad_broadcasts[0]))) {
175          /* ((broadcast2 || broadcast3) || broadcast1) */
176          if ((src_is_alu(alu->op, srcs[0][0], srcs[1]) &&
177               src_is_quad_broadcast(alu->instr.block, srcs[0][1], &quad_broadcasts[1])) ||
178              (src_is_alu(alu->op, srcs[0][1], srcs[1]) &&
179               src_is_quad_broadcast(alu->instr.block, srcs[0][0], &quad_broadcasts[1]))) {
180             /* (broadcast2 || broadcast3) */
181             found = src_is_quad_broadcast(alu->instr.block, srcs[1][0], &quad_broadcasts[2]) &&
182                     src_is_quad_broadcast(alu->instr.block, srcs[1][1], &quad_broadcasts[3]);
183          }
184       }
185    }
186 
187    if (!found)
188       return NULL;
189 
190    /* Check if each lane in a quad reduces all lanes in the quad, and if all broadcasts read the
191     * same data.
192     */
193    uint16_t lanes_read = 0;
194    for (unsigned i = 0; i < 4; i++) {
195       if (!nir_srcs_equal(quad_broadcasts[i]->src[0], quad_broadcasts[0]->src[0]))
196          return NULL;
197 
198       for (unsigned j = 0; j < 4; j++) {
199          unsigned lane;
200          switch (quad_broadcasts[i]->intrinsic) {
201          case nir_intrinsic_quad_broadcast:
202             lane = nir_src_as_uint(quad_broadcasts[i]->src[1]) & 0x3;
203             break;
204          case nir_intrinsic_quad_swap_horizontal:
205             lane = j ^ 1;
206             break;
207          case nir_intrinsic_quad_swap_vertical:
208             lane = j ^ 2;
209             break;
210          case nir_intrinsic_quad_swap_diagonal:
211             lane = 3 - j;
212             break;
213          case nir_intrinsic_quad_swizzle_amd:
214             lane = (nir_intrinsic_swizzle_mask(quad_broadcasts[i]) >> (j * 2)) & 0x3;
215             break;
216          default:
217             unreachable();
218          }
219          lanes_read |= (1 << lane) << (j * 4);
220       }
221    }
222 
223    if (lanes_read != 0xffff)
224       return NULL;
225 
226    /* Create quad vote. */
227    if (alu->op == nir_op_iand)
228       return nir_quad_vote_all(b, 1, quad_broadcasts[0]->src[0].ssa);
229    else
230       return nir_quad_vote_any(b, 1, quad_broadcasts[0]->src[0].ssa);
231 }
232 
233 static bool
opt_intrinsics_alu(nir_builder * b,nir_alu_instr * alu,bool block_has_discard,const struct nir_shader_compiler_options * options)234 opt_intrinsics_alu(nir_builder *b, nir_alu_instr *alu,
235                    bool block_has_discard, const struct nir_shader_compiler_options *options)
236 {
237    nir_def *replacement = NULL;
238 
239    switch (alu->op) {
240    case nir_op_bcsel:
241       replacement = try_opt_bcsel_of_shuffle(b, alu, block_has_discard);
242       if (!replacement && options->optimize_load_front_face_fsign)
243          replacement = try_opt_front_face_fsign(b, alu);
244       break;
245    case nir_op_iand:
246    case nir_op_ior:
247       if (alu->def.bit_size == 1 && options->optimize_quad_vote_to_reduce)
248          replacement = try_opt_quad_vote(b, alu, block_has_discard);
249       break;
250    default:
251       break;
252    }
253 
254    if (replacement) {
255       nir_def_replace(&alu->def, replacement);
256       return true;
257    } else {
258       return false;
259    }
260 }
261 
262 static bool
try_opt_exclusive_scan_to_inclusive(nir_intrinsic_instr * intrin)263 try_opt_exclusive_scan_to_inclusive(nir_intrinsic_instr *intrin)
264 {
265    if (intrin->def.num_components != 1)
266       return false;
267 
268    nir_foreach_use_including_if(src, &intrin->def) {
269       if (nir_src_is_if(src) || nir_src_parent_instr(src)->type != nir_instr_type_alu)
270          return false;
271 
272       nir_alu_instr *alu = nir_instr_as_alu(nir_src_parent_instr(src));
273 
274       if (alu->op != (nir_op)nir_intrinsic_reduction_op(intrin))
275          return false;
276 
277       /* Don't reassociate exact float operations. */
278       if (nir_alu_type_get_base_type(nir_op_infos[alu->op].output_type) == nir_type_float && alu->exact)
279          return false;
280 
281       /* SPIR-V rules for fmax/fmin scans are *very* stupid.
282        * The required identity is Inf instead of NaN but if one input
283        * is NaN, the other value has to be returned.
284        *
285        * This means for invocation 0:
286        * min(subgroupExclusiveMin(NaN), NaN) -> Inf
287        * subgroupInclusiveMin(NaN) -> undefined (NaN for any sane backend)
288        *
289        * SPIR-V [NF]Min/Max don't allow undefined result, even with standard
290        * float controls.
291        */
292       if (alu->op == nir_op_fmax || alu->op == nir_op_fmin)
293          return false;
294 
295       if (alu->def.num_components != 1)
296          return false;
297 
298       nir_alu_src *alu_src = list_entry(src, nir_alu_src, src);
299       unsigned src_index = alu_src - alu->src;
300 
301       assert(src_index < 2 && nir_op_infos[alu->op].num_inputs == 2);
302 
303       nir_scalar scan_scalar = nir_scalar_resolved(intrin->src[0].ssa, 0);
304       nir_scalar op_scalar = nir_scalar_resolved(alu->src[!src_index].src.ssa,
305                                                  alu->src[!src_index].swizzle[0]);
306 
307       if (!nir_scalar_equal(scan_scalar, op_scalar))
308          return false;
309    }
310 
311    /* Convert to inclusive scan. */
312    intrin->intrinsic = nir_intrinsic_inclusive_scan;
313 
314    nir_foreach_use_including_if_safe(src, &intrin->def) {
315       /* Remove alu. */
316       nir_alu_instr *alu = nir_instr_as_alu(nir_src_parent_instr(src));
317       nir_def_replace(&alu->def, &intrin->def);
318    }
319 
320    return true;
321 }
322 
323 static bool
opt_intrinsics_intrin(nir_builder * b,nir_intrinsic_instr * intrin,const struct nir_shader_compiler_options * options)324 opt_intrinsics_intrin(nir_builder *b, nir_intrinsic_instr *intrin,
325                       const struct nir_shader_compiler_options *options)
326 {
327    switch (intrin->intrinsic) {
328    case nir_intrinsic_load_sample_mask_in: {
329       /* Transform:
330        *   gl_SampleMaskIn == 0 ---> gl_HelperInvocation
331        *   gl_SampleMaskIn != 0 ---> !gl_HelperInvocation
332        */
333       if (!options->optimize_sample_mask_in)
334          return false;
335 
336       bool progress = false;
337       nir_foreach_use_safe(use_src, &intrin->def) {
338          if (nir_src_parent_instr(use_src)->type == nir_instr_type_alu) {
339             nir_alu_instr *alu = nir_instr_as_alu(nir_src_parent_instr(use_src));
340 
341             if ((alu->op != nir_op_ieq && alu->op != nir_op_ine) || alu->def.num_components != 1)
342                continue;
343 
344             nir_alu_src *alu_src = list_entry(use_src, nir_alu_src, src);
345             unsigned src_index = alu_src - alu->src;
346             nir_scalar other = nir_scalar_chase_alu_src(nir_get_scalar(&alu->def, 0), !src_index);
347 
348             if (!nir_scalar_is_const(other) || nir_scalar_as_uint(other))
349                continue;
350 
351             nir_cf_node *cf_node = &intrin->instr.block->cf_node;
352             while (cf_node->parent)
353                cf_node = cf_node->parent;
354 
355             nir_function_impl *func_impl = nir_cf_node_as_function(cf_node);
356 
357             /* We need to insert load_helper before any demote,
358              * which is only possible in the entry point function
359              */
360             if (func_impl != nir_shader_get_entrypoint(b->shader))
361                break;
362 
363             b->cursor = nir_before_impl(func_impl);
364 
365             nir_def *new_expr = nir_load_helper_invocation(b, 1);
366 
367             if (alu->op == nir_op_ine)
368                new_expr = nir_inot(b, new_expr);
369 
370             nir_def_replace(&alu->def, new_expr);
371             progress = true;
372          }
373       }
374       return progress;
375    }
376    case nir_intrinsic_exclusive_scan:
377       return try_opt_exclusive_scan_to_inclusive(intrin);
378    default:
379       return false;
380    }
381 }
382 
383 static bool
opt_intrinsics_impl(nir_function_impl * impl,const struct nir_shader_compiler_options * options)384 opt_intrinsics_impl(nir_function_impl *impl,
385                     const struct nir_shader_compiler_options *options)
386 {
387    nir_builder b = nir_builder_create(impl);
388    bool progress = false;
389 
390    nir_foreach_block(block, impl) {
391       bool block_has_discard = false;
392 
393       nir_foreach_instr_safe(instr, block) {
394          b.cursor = nir_before_instr(instr);
395 
396          switch (instr->type) {
397          case nir_instr_type_alu:
398             if (opt_intrinsics_alu(&b, nir_instr_as_alu(instr),
399                                    block_has_discard, options))
400                progress = true;
401             break;
402 
403          case nir_instr_type_intrinsic: {
404             nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
405             if (intrin->intrinsic == nir_intrinsic_demote ||
406                 intrin->intrinsic == nir_intrinsic_demote_if ||
407                 intrin->intrinsic == nir_intrinsic_terminate ||
408                 intrin->intrinsic == nir_intrinsic_terminate_if)
409                block_has_discard = true;
410 
411             if (opt_intrinsics_intrin(&b, intrin, options))
412                progress = true;
413             break;
414          }
415 
416          default:
417             break;
418          }
419       }
420    }
421 
422    return progress;
423 }
424 
425 bool
nir_opt_intrinsics(nir_shader * shader)426 nir_opt_intrinsics(nir_shader *shader)
427 {
428    bool progress = false;
429 
430    nir_foreach_function_impl(impl, shader) {
431       if (opt_intrinsics_impl(impl, shader->options)) {
432          progress = true;
433          nir_metadata_preserve(impl, nir_metadata_control_flow);
434       } else {
435          nir_metadata_preserve(impl, nir_metadata_all);
436       }
437    }
438 
439    return progress;
440 }
441