1 /*
2 * Copyright © 2020 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 /*
26 * Optimizes atomics (with uniform offsets) using subgroup operations to ensure
27 * only one atomic operation is done per subgroup. So res = atomicAdd(addr, 1)
28 * would become something like:
29 *
30 * uint tmp = subgroupAdd(1);
31 * uint res;
32 * if (subgroupElect())
33 * res = atomicAdd(addr, tmp);
34 * res = subgroupBroadcastFirst(res) + subgroupExclusiveAdd(1);
35 *
36 * This pass requires and preserves LCSSA and divergence information.
37 */
38
39 #include "nir/nir.h"
40 #include "nir/nir_builder.h"
41
42 static nir_op
atomic_op_to_alu(nir_atomic_op op)43 atomic_op_to_alu(nir_atomic_op op)
44 {
45 switch (op) {
46 case nir_atomic_op_iadd:
47 return nir_op_iadd;
48 case nir_atomic_op_imin:
49 return nir_op_imin;
50 case nir_atomic_op_umin:
51 return nir_op_umin;
52 case nir_atomic_op_imax:
53 return nir_op_imax;
54 case nir_atomic_op_umax:
55 return nir_op_umax;
56 case nir_atomic_op_iand:
57 return nir_op_iand;
58 case nir_atomic_op_ior:
59 return nir_op_ior;
60 case nir_atomic_op_ixor:
61 return nir_op_ixor;
62 case nir_atomic_op_fadd:
63 return nir_op_fadd;
64 case nir_atomic_op_fmin:
65 return nir_op_fmin;
66 case nir_atomic_op_fmax:
67 return nir_op_fmax;
68
69 /* We don't handle exchanges or wraps */
70 case nir_atomic_op_xchg:
71 case nir_atomic_op_cmpxchg:
72 case nir_atomic_op_fcmpxchg:
73 case nir_atomic_op_inc_wrap:
74 case nir_atomic_op_dec_wrap:
75 return nir_num_opcodes;
76 }
77
78 unreachable("Unknown atomic op");
79 }
80
81 static nir_op
parse_atomic_op(nir_intrinsic_instr * intr,unsigned * offset_src,unsigned * data_src,unsigned * offset2_src)82 parse_atomic_op(nir_intrinsic_instr *intr, unsigned *offset_src,
83 unsigned *data_src, unsigned *offset2_src)
84 {
85 switch (intr->intrinsic) {
86 case nir_intrinsic_ssbo_atomic:
87 *offset_src = 1;
88 *data_src = 2;
89 *offset2_src = *offset_src;
90 return atomic_op_to_alu(nir_intrinsic_atomic_op(intr));
91 case nir_intrinsic_shared_atomic:
92 case nir_intrinsic_global_atomic:
93 case nir_intrinsic_deref_atomic:
94 *offset_src = 0;
95 *data_src = 1;
96 *offset2_src = *offset_src;
97 return atomic_op_to_alu(nir_intrinsic_atomic_op(intr));
98 case nir_intrinsic_global_atomic_amd:
99 *offset_src = 0;
100 *data_src = 1;
101 *offset2_src = 2;
102 return atomic_op_to_alu(nir_intrinsic_atomic_op(intr));
103 case nir_intrinsic_image_deref_atomic:
104 case nir_intrinsic_image_atomic:
105 case nir_intrinsic_bindless_image_atomic:
106 *offset_src = 1;
107 *data_src = 3;
108 *offset2_src = *offset_src;
109 return atomic_op_to_alu(nir_intrinsic_atomic_op(intr));
110
111 default:
112 return nir_num_opcodes;
113 }
114 }
115
116 static unsigned
get_dim(nir_scalar scalar)117 get_dim(nir_scalar scalar)
118 {
119 if (!scalar.def->divergent)
120 return 0;
121
122 if (nir_scalar_is_intrinsic(scalar)) {
123 switch (nir_scalar_intrinsic_op(scalar)) {
124 case nir_intrinsic_load_subgroup_invocation:
125 return 0x8;
126 case nir_intrinsic_load_global_invocation_index:
127 case nir_intrinsic_load_local_invocation_index:
128 return 0x7;
129 case nir_intrinsic_load_global_invocation_id:
130 case nir_intrinsic_load_local_invocation_id:
131 return 1 << scalar.comp;
132 default:
133 break;
134 }
135 } else if (nir_scalar_is_alu(scalar)) {
136 if (nir_scalar_alu_op(scalar) == nir_op_iadd ||
137 nir_scalar_alu_op(scalar) == nir_op_imul) {
138 nir_scalar src0 = nir_scalar_chase_alu_src(scalar, 0);
139 nir_scalar src1 = nir_scalar_chase_alu_src(scalar, 1);
140
141 unsigned src0_dim = get_dim(src0);
142 if (!src0_dim && src0.def->divergent)
143 return 0;
144 unsigned src1_dim = get_dim(src1);
145 if (!src1_dim && src1.def->divergent)
146 return 0;
147
148 return src0_dim | src1_dim;
149 } else if (nir_scalar_alu_op(scalar) == nir_op_ishl) {
150 nir_scalar src0 = nir_scalar_chase_alu_src(scalar, 0);
151 nir_scalar src1 = nir_scalar_chase_alu_src(scalar, 1);
152 return src1.def->divergent ? 0 : get_dim(src0);
153 }
154 }
155
156 return 0;
157 }
158
159 /* Returns a bitmask of invocation indices that are compared against a subgroup
160 * uniform value.
161 */
162 static unsigned
match_invocation_comparison(nir_scalar scalar)163 match_invocation_comparison(nir_scalar scalar)
164 {
165 bool is_alu = nir_scalar_is_alu(scalar);
166 if (is_alu && nir_scalar_alu_op(scalar) == nir_op_iand) {
167 return match_invocation_comparison(nir_scalar_chase_alu_src(scalar, 0)) |
168 match_invocation_comparison(nir_scalar_chase_alu_src(scalar, 1));
169 } else if (is_alu && nir_scalar_alu_op(scalar) == nir_op_ieq) {
170 if (!nir_scalar_chase_alu_src(scalar, 0).def->divergent)
171 return get_dim(nir_scalar_chase_alu_src(scalar, 1));
172 if (!nir_scalar_chase_alu_src(scalar, 1).def->divergent)
173 return get_dim(nir_scalar_chase_alu_src(scalar, 0));
174 } else if (scalar.def->parent_instr->type == nir_instr_type_intrinsic) {
175 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(scalar.def->parent_instr);
176 if (intrin->intrinsic == nir_intrinsic_elect)
177 return 0x8;
178 }
179
180 return 0;
181 }
182
183 /* Returns true if the intrinsic is already conditional so that at most one
184 * invocation in the subgroup does the atomic.
185 */
186 static bool
is_atomic_already_optimized(nir_shader * shader,nir_intrinsic_instr * instr)187 is_atomic_already_optimized(nir_shader *shader, nir_intrinsic_instr *instr)
188 {
189 unsigned dims = 0;
190 for (nir_cf_node *cf = &instr->instr.block->cf_node; cf; cf = cf->parent) {
191 if (cf->type == nir_cf_node_if) {
192 nir_block *first_then = nir_if_first_then_block(nir_cf_node_as_if(cf));
193 nir_block *last_then = nir_if_last_then_block(nir_cf_node_as_if(cf));
194 bool within_then = instr->instr.block->index >= first_then->index;
195 within_then = within_then && instr->instr.block->index <= last_then->index;
196 if (!within_then)
197 continue;
198
199 nir_scalar cond = { nir_cf_node_as_if(cf)->condition.ssa, 0 };
200 dims |= match_invocation_comparison(cond);
201 }
202 }
203
204 if (gl_shader_stage_uses_workgroup(shader->info.stage)) {
205 unsigned dims_needed = 0;
206 for (unsigned i = 0; i < 3; i++)
207 dims_needed |= (shader->info.workgroup_size_variable ||
208 shader->info.workgroup_size[i] > 1)
209 << i;
210 if ((dims & dims_needed) == dims_needed)
211 return true;
212 }
213
214 return dims & 0x8;
215 }
216
217 /* Perform a reduction and/or exclusive scan. */
218 static void
reduce_data(nir_builder * b,nir_op op,nir_def * data,nir_def ** reduce,nir_def ** scan)219 reduce_data(nir_builder *b, nir_op op, nir_def *data,
220 nir_def **reduce, nir_def **scan)
221 {
222 if (scan) {
223 *scan = nir_exclusive_scan(b, data, .reduction_op = op);
224 if (reduce) {
225 nir_def *last_lane = nir_last_invocation(b);
226 nir_def *res = nir_build_alu(b, op, *scan, data, NULL, NULL);
227 *reduce = nir_read_invocation(b, res, last_lane);
228 }
229 } else {
230 *reduce = nir_reduce(b, data, .reduction_op = op);
231 }
232 }
233
234 static nir_def *
optimize_atomic(nir_builder * b,nir_intrinsic_instr * intrin,bool return_prev)235 optimize_atomic(nir_builder *b, nir_intrinsic_instr *intrin, bool return_prev)
236 {
237 unsigned offset_src = 0;
238 unsigned data_src = 0;
239 unsigned offset2_src = 0;
240 nir_op op = parse_atomic_op(intrin, &offset_src, &data_src, &offset2_src);
241 nir_def *data = intrin->src[data_src].ssa;
242
243 /* Separate uniform reduction and scan is faster than doing a combined scan+reduce */
244 bool combined_scan_reduce = return_prev && data->divergent;
245 nir_def *reduce = NULL, *scan = NULL;
246 reduce_data(b, op, data, &reduce, combined_scan_reduce ? &scan : NULL);
247
248 nir_src_rewrite(&intrin->src[data_src], reduce);
249 nir_update_instr_divergence(b->shader, &intrin->instr);
250
251 nir_def *cond = nir_elect(b, 1);
252
253 nir_if *nif = nir_push_if(b, cond);
254
255 nir_instr_remove(&intrin->instr);
256 nir_builder_instr_insert(b, &intrin->instr);
257
258 if (return_prev) {
259 nir_push_else(b, nif);
260
261 nir_def *undef = nir_undef(b, 1, intrin->def.bit_size);
262
263 nir_pop_if(b, nif);
264 nir_def *result = nir_if_phi(b, &intrin->def, undef);
265 result = nir_read_first_invocation(b, result);
266
267 if (!combined_scan_reduce)
268 reduce_data(b, op, data, NULL, &scan);
269
270 return nir_build_alu(b, op, result, scan, NULL, NULL);
271 } else {
272 nir_pop_if(b, nif);
273 return NULL;
274 }
275 }
276
277 static void
optimize_and_rewrite_atomic(nir_builder * b,nir_intrinsic_instr * intrin)278 optimize_and_rewrite_atomic(nir_builder *b, nir_intrinsic_instr *intrin)
279 {
280 nir_if *helper_nif = NULL;
281 if (b->shader->info.stage == MESA_SHADER_FRAGMENT) {
282 nir_def *helper = nir_is_helper_invocation(b, 1);
283 helper_nif = nir_push_if(b, nir_inot(b, helper));
284 }
285
286 ASSERTED bool original_result_divergent = intrin->def.divergent;
287 bool return_prev = !nir_def_is_unused(&intrin->def);
288
289 nir_def old_result = intrin->def;
290 list_replace(&intrin->def.uses, &old_result.uses);
291 nir_def_init(&intrin->instr, &intrin->def, 1,
292 intrin->def.bit_size);
293
294 nir_def *result = optimize_atomic(b, intrin, return_prev);
295
296 if (helper_nif) {
297 nir_push_else(b, helper_nif);
298 nir_def *undef = result ? nir_undef(b, 1, result->bit_size) : NULL;
299 nir_pop_if(b, helper_nif);
300 if (result)
301 result = nir_if_phi(b, result, undef);
302 }
303
304 if (result) {
305 assert(result->divergent == original_result_divergent);
306 nir_def_rewrite_uses(&old_result, result);
307 }
308 }
309
310 static bool
opt_uniform_atomics(nir_function_impl * impl)311 opt_uniform_atomics(nir_function_impl *impl)
312 {
313 bool progress = false;
314 nir_builder b = nir_builder_create(impl);
315 b.update_divergence = true;
316
317 nir_foreach_block(block, impl) {
318 nir_foreach_instr_safe(instr, block) {
319 if (instr->type != nir_instr_type_intrinsic)
320 continue;
321
322 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
323 unsigned offset_src, data_src, offset2_src;
324 if (parse_atomic_op(intrin, &offset_src, &data_src, &offset2_src) ==
325 nir_num_opcodes)
326 continue;
327
328 if (nir_src_is_divergent(intrin->src[offset_src]))
329 continue;
330 if (nir_src_is_divergent(intrin->src[offset2_src]))
331 continue;
332
333 if (is_atomic_already_optimized(b.shader, intrin))
334 continue;
335
336 b.cursor = nir_before_instr(instr);
337 optimize_and_rewrite_atomic(&b, intrin);
338 progress = true;
339 }
340 }
341
342 return progress;
343 }
344
345 bool
nir_opt_uniform_atomics(nir_shader * shader)346 nir_opt_uniform_atomics(nir_shader *shader)
347 {
348 bool progress = false;
349
350 /* A 1x1x1 workgroup only ever has one active lane, so there's no point in
351 * optimizing any atomics.
352 */
353 if (gl_shader_stage_uses_workgroup(shader->info.stage) &&
354 !shader->info.workgroup_size_variable &&
355 shader->info.workgroup_size[0] == 1 && shader->info.workgroup_size[1] == 1 &&
356 shader->info.workgroup_size[2] == 1)
357 return false;
358
359 nir_foreach_function_impl(impl, shader) {
360 if (opt_uniform_atomics(impl)) {
361 progress = true;
362 nir_metadata_preserve(impl, nir_metadata_none);
363 } else {
364 nir_metadata_preserve(impl, nir_metadata_all);
365 }
366 }
367
368 return progress;
369 }
370