1 /*
2 * Copyright © 2020 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 /*
26 * Optimizes atomics (with uniform offsets) using subgroup operations to ensure
27 * only one atomic operation is done per subgroup. So res = atomicAdd(addr, 1)
28 * would become something like:
29 *
30 * uint tmp = subgroupAdd(1);
31 * uint res;
32 * if (subgroupElect())
33 * res = atomicAdd(addr, tmp);
34 * res = subgroupBroadcastFirst(res) + subgroupExclusiveAdd(1);
35 *
36 * This pass requires divergence information.
37 */
38
39 #include "nir/nir.h"
40 #include "nir/nir_builder.h"
41
42 static nir_op
parse_atomic_op(nir_intrinsic_instr * intr,unsigned * offset_src,unsigned * data_src,unsigned * offset2_src)43 parse_atomic_op(nir_intrinsic_instr *intr, unsigned *offset_src,
44 unsigned *data_src, unsigned *offset2_src)
45 {
46 switch (intr->intrinsic) {
47 case nir_intrinsic_ssbo_atomic:
48 *offset_src = 1;
49 *data_src = 2;
50 *offset2_src = *offset_src;
51 return nir_atomic_op_to_alu(nir_intrinsic_atomic_op(intr));
52 case nir_intrinsic_shared_atomic:
53 case nir_intrinsic_global_atomic:
54 case nir_intrinsic_deref_atomic:
55 *offset_src = 0;
56 *data_src = 1;
57 *offset2_src = *offset_src;
58 return nir_atomic_op_to_alu(nir_intrinsic_atomic_op(intr));
59 case nir_intrinsic_global_atomic_amd:
60 *offset_src = 0;
61 *data_src = 1;
62 *offset2_src = 2;
63 return nir_atomic_op_to_alu(nir_intrinsic_atomic_op(intr));
64 case nir_intrinsic_image_deref_atomic:
65 case nir_intrinsic_image_atomic:
66 case nir_intrinsic_bindless_image_atomic:
67 *offset_src = 1;
68 *data_src = 3;
69 *offset2_src = *offset_src;
70 return nir_atomic_op_to_alu(nir_intrinsic_atomic_op(intr));
71
72 default:
73 return nir_num_opcodes;
74 }
75 }
76
77 static unsigned
get_dim(nir_scalar scalar)78 get_dim(nir_scalar scalar)
79 {
80 if (!scalar.def->divergent)
81 return 0;
82
83 if (nir_scalar_is_intrinsic(scalar)) {
84 switch (nir_scalar_intrinsic_op(scalar)) {
85 case nir_intrinsic_load_subgroup_invocation:
86 return 0x8;
87 case nir_intrinsic_load_global_invocation_index:
88 case nir_intrinsic_load_local_invocation_index:
89 return 0x7;
90 case nir_intrinsic_load_global_invocation_id:
91 case nir_intrinsic_load_local_invocation_id:
92 return 1 << scalar.comp;
93 default:
94 break;
95 }
96 } else if (nir_scalar_is_alu(scalar)) {
97 if (nir_scalar_alu_op(scalar) == nir_op_iadd ||
98 nir_scalar_alu_op(scalar) == nir_op_imul) {
99 nir_scalar src0 = nir_scalar_chase_alu_src(scalar, 0);
100 nir_scalar src1 = nir_scalar_chase_alu_src(scalar, 1);
101
102 unsigned src0_dim = get_dim(src0);
103 if (!src0_dim && src0.def->divergent)
104 return 0;
105 unsigned src1_dim = get_dim(src1);
106 if (!src1_dim && src1.def->divergent)
107 return 0;
108
109 return src0_dim | src1_dim;
110 } else if (nir_scalar_alu_op(scalar) == nir_op_ishl) {
111 nir_scalar src0 = nir_scalar_chase_alu_src(scalar, 0);
112 nir_scalar src1 = nir_scalar_chase_alu_src(scalar, 1);
113 return src1.def->divergent ? 0 : get_dim(src0);
114 }
115 }
116
117 return 0;
118 }
119
120 /* Returns a bitmask of invocation indices that are compared against a subgroup
121 * uniform value.
122 */
123 static unsigned
match_invocation_comparison(nir_scalar scalar)124 match_invocation_comparison(nir_scalar scalar)
125 {
126 bool is_alu = nir_scalar_is_alu(scalar);
127 if (is_alu && nir_scalar_alu_op(scalar) == nir_op_iand) {
128 return match_invocation_comparison(nir_scalar_chase_alu_src(scalar, 0)) |
129 match_invocation_comparison(nir_scalar_chase_alu_src(scalar, 1));
130 } else if (is_alu && nir_scalar_alu_op(scalar) == nir_op_ieq) {
131 if (!nir_scalar_chase_alu_src(scalar, 0).def->divergent)
132 return get_dim(nir_scalar_chase_alu_src(scalar, 1));
133 if (!nir_scalar_chase_alu_src(scalar, 1).def->divergent)
134 return get_dim(nir_scalar_chase_alu_src(scalar, 0));
135 } else if (scalar.def->parent_instr->type == nir_instr_type_intrinsic) {
136 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(scalar.def->parent_instr);
137 if (intrin->intrinsic == nir_intrinsic_elect) {
138 return 0x8;
139 } else if (intrin->intrinsic == nir_intrinsic_inverse_ballot) {
140 unsigned bitcount = 0;
141 for (unsigned i = 0; i < intrin->src[0].ssa->num_components; i++) {
142 scalar = nir_scalar_resolved(intrin->src[0].ssa, i);
143 if (!nir_scalar_is_const(scalar))
144 return 0;
145 bitcount += util_bitcount64(nir_scalar_as_uint(scalar));
146 }
147 if (bitcount <= 1)
148 return 0x8;
149 }
150 }
151
152 return 0;
153 }
154
155 /* Returns true if the intrinsic is already conditional so that at most one
156 * invocation in the subgroup does the atomic.
157 */
158 static bool
is_atomic_already_optimized(nir_shader * shader,nir_intrinsic_instr * instr)159 is_atomic_already_optimized(nir_shader *shader, nir_intrinsic_instr *instr)
160 {
161 unsigned dims = 0;
162 for (nir_cf_node *cf = &instr->instr.block->cf_node; cf; cf = cf->parent) {
163 if (cf->type == nir_cf_node_if) {
164 nir_block *first_then = nir_if_first_then_block(nir_cf_node_as_if(cf));
165 nir_block *last_then = nir_if_last_then_block(nir_cf_node_as_if(cf));
166 bool within_then = instr->instr.block->index >= first_then->index;
167 within_then = within_then && instr->instr.block->index <= last_then->index;
168 if (!within_then)
169 continue;
170
171 nir_scalar cond = { nir_cf_node_as_if(cf)->condition.ssa, 0 };
172 dims |= match_invocation_comparison(cond);
173 }
174 }
175
176 if (gl_shader_stage_uses_workgroup(shader->info.stage)) {
177 unsigned dims_needed = 0;
178 for (unsigned i = 0; i < 3; i++)
179 dims_needed |= (shader->info.workgroup_size_variable ||
180 shader->info.workgroup_size[i] > 1)
181 << i;
182 if ((dims & dims_needed) == dims_needed)
183 return true;
184 }
185
186 return dims & 0x8;
187 }
188
189 /* Perform a reduction and/or exclusive scan. */
190 static void
reduce_data(nir_builder * b,nir_op op,nir_def * data,nir_def ** reduce,nir_def ** scan)191 reduce_data(nir_builder *b, nir_op op, nir_def *data,
192 nir_def **reduce, nir_def **scan)
193 {
194 if (scan) {
195 *scan = nir_exclusive_scan(b, data, .reduction_op = op);
196 if (reduce) {
197 nir_def *last_lane = nir_last_invocation(b);
198 nir_def *res = nir_build_alu(b, op, *scan, data, NULL, NULL);
199 *reduce = nir_read_invocation(b, res, last_lane);
200 }
201 } else {
202 *reduce = nir_reduce(b, data, .reduction_op = op);
203 }
204 }
205
206 static nir_def *
optimize_atomic(nir_builder * b,nir_intrinsic_instr * intrin,bool return_prev)207 optimize_atomic(nir_builder *b, nir_intrinsic_instr *intrin, bool return_prev)
208 {
209 unsigned offset_src = 0;
210 unsigned data_src = 0;
211 unsigned offset2_src = 0;
212 nir_op op = parse_atomic_op(intrin, &offset_src, &data_src, &offset2_src);
213 nir_def *data = intrin->src[data_src].ssa;
214
215 /* Separate uniform reduction and scan is faster than doing a combined scan+reduce */
216 bool combined_scan_reduce = return_prev &&
217 nir_src_is_divergent(&intrin->src[data_src]);
218 nir_def *reduce = NULL, *scan = NULL;
219 reduce_data(b, op, data, &reduce, combined_scan_reduce ? &scan : NULL);
220
221 nir_src_rewrite(&intrin->src[data_src], reduce);
222
223 nir_def *cond = nir_elect(b, 1);
224
225 nir_if *nif = nir_push_if(b, cond);
226
227 nir_instr_remove(&intrin->instr);
228 nir_builder_instr_insert(b, &intrin->instr);
229
230 if (return_prev) {
231 nir_push_else(b, nif);
232
233 nir_def *undef = nir_undef(b, 1, intrin->def.bit_size);
234
235 nir_pop_if(b, nif);
236 nir_def *result = nir_if_phi(b, &intrin->def, undef);
237 result = nir_read_first_invocation(b, result);
238
239 if (!combined_scan_reduce)
240 reduce_data(b, op, data, NULL, &scan);
241
242 return nir_build_alu(b, op, result, scan, NULL, NULL);
243 } else {
244 nir_pop_if(b, nif);
245 return NULL;
246 }
247 }
248
249 static void
optimize_and_rewrite_atomic(nir_builder * b,nir_intrinsic_instr * intrin,bool fs_atomics_predicated)250 optimize_and_rewrite_atomic(nir_builder *b, nir_intrinsic_instr *intrin,
251 bool fs_atomics_predicated)
252 {
253 nir_if *helper_nif = NULL;
254 if (b->shader->info.stage == MESA_SHADER_FRAGMENT && !fs_atomics_predicated) {
255 nir_def *helper = nir_is_helper_invocation(b, 1);
256 helper_nif = nir_push_if(b, nir_inot(b, helper));
257 }
258
259 bool return_prev = !nir_def_is_unused(&intrin->def);
260
261 nir_def old_result = intrin->def;
262 list_replace(&intrin->def.uses, &old_result.uses);
263 nir_def_init(&intrin->instr, &intrin->def, 1,
264 intrin->def.bit_size);
265
266 nir_def *result = optimize_atomic(b, intrin, return_prev);
267
268 if (helper_nif) {
269 nir_push_else(b, helper_nif);
270 nir_def *undef = result ? nir_undef(b, 1, result->bit_size) : NULL;
271 nir_pop_if(b, helper_nif);
272 if (result)
273 result = nir_if_phi(b, result, undef);
274 }
275
276 if (result) {
277 /* It's possible the result is used as source for another atomic,
278 * so this needs to be correct.
279 */
280 result->divergent = old_result.divergent;
281 nir_def_rewrite_uses(&old_result, result);
282 }
283 }
284
285 static bool
opt_uniform_atomics(nir_function_impl * impl,bool fs_atomics_predicated)286 opt_uniform_atomics(nir_function_impl *impl, bool fs_atomics_predicated)
287 {
288 bool progress = false;
289 nir_builder b = nir_builder_create(impl);
290
291 nir_foreach_block(block, impl) {
292 nir_foreach_instr_safe(instr, block) {
293 if (instr->type != nir_instr_type_intrinsic)
294 continue;
295
296 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
297 unsigned offset_src, data_src, offset2_src;
298 if (parse_atomic_op(intrin, &offset_src, &data_src, &offset2_src) ==
299 nir_num_opcodes)
300 continue;
301
302 if (nir_src_is_divergent(&intrin->src[offset_src]))
303 continue;
304 if (nir_src_is_divergent(&intrin->src[offset2_src]))
305 continue;
306
307 if (is_atomic_already_optimized(b.shader, intrin))
308 continue;
309
310 b.cursor = nir_before_instr(instr);
311 optimize_and_rewrite_atomic(&b, intrin, fs_atomics_predicated);
312 progress = true;
313 }
314 }
315
316 return progress;
317 }
318
319 bool
nir_opt_uniform_atomics(nir_shader * shader,bool fs_atomics_predicated)320 nir_opt_uniform_atomics(nir_shader *shader, bool fs_atomics_predicated)
321 {
322 bool progress = false;
323
324 /* A 1x1x1 workgroup only ever has one active lane, so there's no point in
325 * optimizing any atomics.
326 */
327 if (gl_shader_stage_uses_workgroup(shader->info.stage) &&
328 !shader->info.workgroup_size_variable &&
329 shader->info.workgroup_size[0] == 1 && shader->info.workgroup_size[1] == 1 &&
330 shader->info.workgroup_size[2] == 1)
331 return false;
332
333 nir_foreach_function_impl(impl, shader) {
334 nir_metadata_require(impl, nir_metadata_block_index);
335
336 if (opt_uniform_atomics(impl, fs_atomics_predicated)) {
337 progress = true;
338 nir_metadata_preserve(impl, nir_metadata_none);
339 } else {
340 nir_metadata_preserve(impl, nir_metadata_all);
341 }
342 }
343
344 return progress;
345 }
346