1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26
27 /**
28 * \file nir_opt_intrinsics.c
29 */
30
31 static bool
src_is_single_use_shuffle(nir_src src,nir_ssa_def ** data,nir_ssa_def ** index)32 src_is_single_use_shuffle(nir_src src, nir_ssa_def **data, nir_ssa_def **index)
33 {
34 nir_intrinsic_instr *shuffle = nir_src_as_intrinsic(src);
35 if (shuffle == NULL || shuffle->intrinsic != nir_intrinsic_shuffle)
36 return false;
37
38 /* This is only called when src is part of an ALU op so requiring no if
39 * uses is reasonable. If we ever want to use this from an if statement,
40 * we can change it then.
41 */
42 if (!list_is_empty(&shuffle->dest.ssa.if_uses) ||
43 !list_is_singular(&shuffle->dest.ssa.uses))
44 return false;
45
46 assert(shuffle->src[0].is_ssa);
47 assert(shuffle->src[1].is_ssa);
48
49 *data = shuffle->src[0].ssa;
50 *index = shuffle->src[1].ssa;
51
52 return true;
53 }
54
55 static nir_ssa_def *
try_opt_bcsel_of_shuffle(nir_builder * b,nir_alu_instr * alu,bool block_has_discard)56 try_opt_bcsel_of_shuffle(nir_builder *b, nir_alu_instr *alu,
57 bool block_has_discard)
58 {
59 assert(alu->op == nir_op_bcsel);
60
61 /* If we've seen a discard in this block, don't do the optimization. We
62 * could try to do something fancy where we check if the shuffle is on our
63 * side of the discard or not but this is good enough for correctness for
64 * now and subgroup ops in the presence of discard aren't common.
65 */
66 if (block_has_discard)
67 return false;
68
69 if (!nir_alu_src_is_trivial_ssa(alu, 0))
70 return NULL;
71
72 nir_ssa_def *data1, *index1;
73 if (!nir_alu_src_is_trivial_ssa(alu, 1) ||
74 alu->src[1].src.ssa->parent_instr->block != alu->instr.block ||
75 !src_is_single_use_shuffle(alu->src[1].src, &data1, &index1))
76 return NULL;
77
78 nir_ssa_def *data2, *index2;
79 if (!nir_alu_src_is_trivial_ssa(alu, 2) ||
80 alu->src[2].src.ssa->parent_instr->block != alu->instr.block ||
81 !src_is_single_use_shuffle(alu->src[2].src, &data2, &index2))
82 return NULL;
83
84 if (data1 != data2)
85 return NULL;
86
87 nir_ssa_def *index = nir_bcsel(b, alu->src[0].src.ssa, index1, index2);
88 nir_ssa_def *shuffle = nir_shuffle(b, data1, index);
89
90 return shuffle;
91 }
92
93 static bool
opt_intrinsics_alu(nir_builder * b,nir_alu_instr * alu,bool block_has_discard)94 opt_intrinsics_alu(nir_builder *b, nir_alu_instr *alu,
95 bool block_has_discard)
96 {
97 nir_ssa_def *replacement = NULL;
98
99 switch (alu->op) {
100 case nir_op_bcsel:
101 replacement = try_opt_bcsel_of_shuffle(b, alu, block_has_discard);
102 break;
103
104 default:
105 break;
106 }
107
108 if (replacement) {
109 nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa,
110 replacement);
111 nir_instr_remove(&alu->instr);
112 return true;
113 } else {
114 return false;
115 }
116 }
117
118 static bool
opt_intrinsics_intrin(nir_builder * b,nir_intrinsic_instr * intrin,const struct nir_shader_compiler_options * options)119 opt_intrinsics_intrin(nir_builder *b, nir_intrinsic_instr *intrin,
120 const struct nir_shader_compiler_options *options)
121 {
122 switch (intrin->intrinsic) {
123 case nir_intrinsic_load_sample_mask_in: {
124 /* Transform:
125 * gl_SampleMaskIn == 0 ---> gl_HelperInvocation
126 * gl_SampleMaskIn != 0 ---> !gl_HelperInvocation
127 */
128 if (!options->optimize_sample_mask_in)
129 return false;
130
131 bool progress = false;
132 nir_foreach_use_safe(use_src, &intrin->dest.ssa) {
133 if (use_src->parent_instr->type == nir_instr_type_alu) {
134 nir_alu_instr *alu = nir_instr_as_alu(use_src->parent_instr);
135
136 if (alu->op == nir_op_ieq ||
137 alu->op == nir_op_ine) {
138 /* Check for 0 in either operand. */
139 nir_const_value *const_val =
140 nir_src_as_const_value(alu->src[0].src);
141 if (!const_val)
142 const_val = nir_src_as_const_value(alu->src[1].src);
143 if (!const_val || const_val->i32 != 0)
144 continue;
145
146 nir_ssa_def *new_expr = nir_load_helper_invocation(b, 1);
147
148 if (alu->op == nir_op_ine)
149 new_expr = nir_inot(b, new_expr);
150
151 nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa,
152 new_expr);
153 nir_instr_remove(&alu->instr);
154 progress = true;
155 }
156 }
157 }
158 return progress;
159 }
160
161 default:
162 return false;
163 }
164 }
165
166 static bool
opt_intrinsics_impl(nir_function_impl * impl,const struct nir_shader_compiler_options * options)167 opt_intrinsics_impl(nir_function_impl *impl,
168 const struct nir_shader_compiler_options *options)
169 {
170 nir_builder b;
171 nir_builder_init(&b, impl);
172 bool progress = false;
173
174 nir_foreach_block(block, impl) {
175 bool block_has_discard = false;
176
177 nir_foreach_instr_safe(instr, block) {
178 b.cursor = nir_before_instr(instr);
179
180 switch (instr->type) {
181 case nir_instr_type_alu:
182 if (opt_intrinsics_alu(&b, nir_instr_as_alu(instr),
183 block_has_discard))
184 progress = true;
185 break;
186
187 case nir_instr_type_intrinsic: {
188 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
189 if (intrin->intrinsic == nir_intrinsic_discard ||
190 intrin->intrinsic == nir_intrinsic_discard_if ||
191 intrin->intrinsic == nir_intrinsic_demote ||
192 intrin->intrinsic == nir_intrinsic_demote_if ||
193 intrin->intrinsic == nir_intrinsic_terminate ||
194 intrin->intrinsic == nir_intrinsic_terminate_if)
195 block_has_discard = true;
196
197 if (opt_intrinsics_intrin(&b, intrin, options))
198 progress = true;
199 break;
200 }
201
202 default:
203 break;
204 }
205 }
206 }
207
208 return progress;
209 }
210
211 bool
nir_opt_intrinsics(nir_shader * shader)212 nir_opt_intrinsics(nir_shader *shader)
213 {
214 bool progress = false;
215
216 nir_foreach_function(function, shader) {
217 if (!function->impl)
218 continue;
219
220 if (opt_intrinsics_impl(function->impl, shader->options)) {
221 progress = true;
222 nir_metadata_preserve(function->impl, nir_metadata_block_index |
223 nir_metadata_dominance);
224 } else {
225 nir_metadata_preserve(function->impl, nir_metadata_all);
226 }
227 }
228
229 return progress;
230 }
231