• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2020 Valve Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 /*
26  * Optimizes atomics (with uniform offsets) using subgroup operations to ensure
27  * only one atomic operation is done per subgroup. So res = atomicAdd(addr, 1)
28  * would become something like:
29  *
30  * uint tmp = subgroupAdd(1);
31  * uint res;
32  * if (subgroupElect())
33  *    res = atomicAdd(addr, tmp);
34  * res = subgroupBroadcastFirst(res) + subgroupExclusiveAdd(1);
35  *
36  * This pass requires and preserves LCSSA and divergence information.
37  */
38 
39 #include "nir/nir.h"
40 #include "nir/nir_builder.h"
41 
42 static nir_op
parse_atomic_op(nir_intrinsic_op op,unsigned * offset_src,unsigned * data_src,unsigned * offset2_src)43 parse_atomic_op(nir_intrinsic_op op, unsigned *offset_src, unsigned *data_src,
44                 unsigned *offset2_src)
45 {
46    switch (op) {
47    #define OP_NOIMG(intrin, alu) \
48    case nir_intrinsic_ssbo_atomic_##intrin: \
49       *offset_src = 1; \
50       *data_src = 2; \
51       *offset2_src = *offset_src; \
52       return nir_op_##alu; \
53    case nir_intrinsic_shared_atomic_##intrin: \
54    case nir_intrinsic_global_atomic_##intrin: \
55    case nir_intrinsic_deref_atomic_##intrin: \
56       *offset_src = 0; \
57       *data_src = 1; \
58       *offset2_src = *offset_src; \
59       return nir_op_##alu; \
60    case nir_intrinsic_global_atomic_##intrin##_amd: \
61       *offset_src = 0; \
62       *data_src = 1; \
63       *offset2_src = 2; \
64       return nir_op_##alu;
65    #define OP(intrin, alu) \
66    OP_NOIMG(intrin, alu) \
67    case nir_intrinsic_image_deref_atomic_##intrin: \
68    case nir_intrinsic_image_atomic_##intrin: \
69    case nir_intrinsic_bindless_image_atomic_##intrin: \
70       *offset_src = 1; \
71       *data_src = 3; \
72       *offset2_src = *offset_src; \
73       return nir_op_##alu;
74    OP(add, iadd)
75    OP(imin, imin)
76    OP(umin, umin)
77    OP(imax, imax)
78    OP(umax, umax)
79    OP(and, iand)
80    OP(or, ior)
81    OP(xor, ixor)
82    OP(fadd, fadd)
83    OP_NOIMG(fmin, fmin)
84    OP_NOIMG(fmax, fmax)
85    #undef OP_NOIMG
86    #undef OP
87    default:
88       return nir_num_opcodes;
89    }
90 }
91 
92 static unsigned
get_dim(nir_ssa_scalar scalar)93 get_dim(nir_ssa_scalar scalar)
94 {
95    if (!scalar.def->divergent)
96       return 0;
97 
98    if (scalar.def->parent_instr->type == nir_instr_type_intrinsic) {
99       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(scalar.def->parent_instr);
100       if (intrin->intrinsic == nir_intrinsic_load_subgroup_invocation)
101          return 0x8;
102       else if (intrin->intrinsic == nir_intrinsic_load_local_invocation_index)
103          return 0x7;
104       else if (intrin->intrinsic == nir_intrinsic_load_local_invocation_id)
105          return 1 << scalar.comp;
106       else if (intrin->intrinsic == nir_intrinsic_load_global_invocation_index)
107          return 0x7;
108       else if (intrin->intrinsic == nir_intrinsic_load_global_invocation_id)
109          return 1 << scalar.comp;
110    } else if (nir_ssa_scalar_is_alu(scalar)) {
111       if (nir_ssa_scalar_alu_op(scalar) == nir_op_iadd ||
112           nir_ssa_scalar_alu_op(scalar) == nir_op_imul) {
113          nir_ssa_scalar src0 = nir_ssa_scalar_chase_alu_src(scalar, 0);
114          nir_ssa_scalar src1 = nir_ssa_scalar_chase_alu_src(scalar, 1);
115 
116          unsigned src0_dim = get_dim(src0);
117          if (!src0_dim && src0.def->divergent)
118             return 0;
119          unsigned src1_dim = get_dim(src1);
120          if (!src1_dim && src1.def->divergent)
121             return 0;
122 
123          return src0_dim | src1_dim;
124       } else if (nir_ssa_scalar_alu_op(scalar) == nir_op_ishl) {
125          nir_ssa_scalar src0 = nir_ssa_scalar_chase_alu_src(scalar, 0);
126          nir_ssa_scalar src1 = nir_ssa_scalar_chase_alu_src(scalar, 1);
127          return src1.def->divergent ? 0 : get_dim(src0);
128       }
129    }
130 
131    return 0;
132 }
133 
134 /* Returns a bitmask of invocation indices that are compared against a subgroup
135  * uniform value.
136  */
137 static unsigned
match_invocation_comparison(nir_ssa_scalar scalar)138 match_invocation_comparison(nir_ssa_scalar scalar)
139 {
140    bool is_alu = nir_ssa_scalar_is_alu(scalar);
141    if (is_alu && nir_ssa_scalar_alu_op(scalar) == nir_op_iand) {
142       return match_invocation_comparison(nir_ssa_scalar_chase_alu_src(scalar, 0)) |
143              match_invocation_comparison(nir_ssa_scalar_chase_alu_src(scalar, 1));
144    } else if (is_alu && nir_ssa_scalar_alu_op(scalar) == nir_op_ieq) {
145       if (!nir_ssa_scalar_chase_alu_src(scalar, 0).def->divergent)
146          return get_dim(nir_ssa_scalar_chase_alu_src(scalar, 1));
147       if (!nir_ssa_scalar_chase_alu_src(scalar, 1).def->divergent)
148          return get_dim(nir_ssa_scalar_chase_alu_src(scalar, 0));
149    } else if (scalar.def->parent_instr->type == nir_instr_type_intrinsic) {
150       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(scalar.def->parent_instr);
151       if (intrin->intrinsic == nir_intrinsic_elect)
152          return 0x8;
153    }
154 
155    return 0;
156 }
157 
158 /* Returns true if the intrinsic is already conditional so that at most one
159  * invocation in the subgroup does the atomic.
160  */
161 static bool
is_atomic_already_optimized(nir_shader * shader,nir_intrinsic_instr * instr)162 is_atomic_already_optimized(nir_shader *shader, nir_intrinsic_instr *instr)
163 {
164    unsigned dims = 0;
165    for (nir_cf_node *cf = &instr->instr.block->cf_node; cf; cf = cf->parent) {
166       if (cf->type == nir_cf_node_if) {
167          nir_block *first_then = nir_if_first_then_block(nir_cf_node_as_if(cf));
168          nir_block *last_then = nir_if_last_then_block(nir_cf_node_as_if(cf));
169          bool within_then = instr->instr.block->index >= first_then->index;
170          within_then = within_then && instr->instr.block->index <= last_then->index;
171          if (!within_then)
172             continue;
173 
174          nir_ssa_scalar cond = {nir_cf_node_as_if(cf)->condition.ssa, 0};
175          dims |= match_invocation_comparison(cond);
176       }
177    }
178 
179    if (gl_shader_stage_uses_workgroup(shader->info.stage)) {
180       unsigned dims_needed = 0;
181       for (unsigned i = 0; i < 3; i++)
182          dims_needed |= (shader->info.workgroup_size_variable ||
183                          shader->info.workgroup_size[i] > 1) << i;
184       if ((dims & dims_needed) == dims_needed)
185          return true;
186    }
187 
188    return dims & 0x8;
189 }
190 
191 /* Perform a reduction and/or exclusive scan. */
192 static void
reduce_data(nir_builder * b,nir_op op,nir_ssa_def * data,nir_ssa_def ** reduce,nir_ssa_def ** scan)193 reduce_data(nir_builder *b, nir_op op, nir_ssa_def *data,
194             nir_ssa_def **reduce, nir_ssa_def **scan)
195 {
196    if (scan) {
197       *scan = nir_exclusive_scan(b, data, .reduction_op=op);
198       if (reduce) {
199          nir_ssa_def *last_lane = nir_last_invocation(b);
200          nir_ssa_def *res = nir_build_alu(b, op, *scan, data, NULL, NULL);
201          *reduce = nir_read_invocation(b, res, last_lane);
202       }
203    } else {
204       *reduce = nir_reduce(b, data, .reduction_op=op);
205    }
206 }
207 
208 static nir_ssa_def *
optimize_atomic(nir_builder * b,nir_intrinsic_instr * intrin,bool return_prev)209 optimize_atomic(nir_builder *b, nir_intrinsic_instr *intrin, bool return_prev)
210 {
211    unsigned offset_src = 0;
212    unsigned data_src = 0;
213    unsigned offset2_src = 0;
214    nir_op op = parse_atomic_op(intrin->intrinsic, &offset_src, &data_src, &offset2_src);
215    nir_ssa_def *data = intrin->src[data_src].ssa;
216 
217    /* Separate uniform reduction and scan is faster than doing a combined scan+reduce */
218    bool combined_scan_reduce = return_prev && data->divergent;
219    nir_ssa_def *reduce = NULL, *scan = NULL;
220    reduce_data(b, op, data, &reduce, combined_scan_reduce ? &scan : NULL);
221 
222    nir_instr_rewrite_src(&intrin->instr, &intrin->src[data_src], nir_src_for_ssa(reduce));
223    nir_update_instr_divergence(b->shader, &intrin->instr);
224 
225    nir_ssa_def *cond = nir_elect(b, 1);
226 
227    nir_if *nif = nir_push_if(b, cond);
228 
229    nir_instr_remove(&intrin->instr);
230    nir_builder_instr_insert(b, &intrin->instr);
231 
232    if (return_prev) {
233       nir_push_else(b, nif);
234 
235       nir_ssa_def *undef = nir_ssa_undef(b, 1, intrin->dest.ssa.bit_size);
236 
237       nir_pop_if(b, nif);
238       nir_ssa_def *result = nir_if_phi(b, &intrin->dest.ssa, undef);
239       result = nir_read_first_invocation(b, result);
240 
241       if (!combined_scan_reduce)
242          reduce_data(b, op, data, NULL, &scan);
243 
244       return nir_build_alu(b, op, result, scan, NULL, NULL);
245    } else {
246       nir_pop_if(b, nif);
247       return NULL;
248    }
249 }
250 
251 static void
optimize_and_rewrite_atomic(nir_builder * b,nir_intrinsic_instr * intrin)252 optimize_and_rewrite_atomic(nir_builder *b, nir_intrinsic_instr *intrin)
253 {
254    nir_if *helper_nif = NULL;
255    if (b->shader->info.stage == MESA_SHADER_FRAGMENT) {
256       nir_ssa_def *helper = nir_is_helper_invocation(b, 1);
257       helper_nif = nir_push_if(b, nir_inot(b, helper));
258    }
259 
260    ASSERTED bool original_result_divergent = intrin->dest.ssa.divergent;
261    bool return_prev = !nir_ssa_def_is_unused(&intrin->dest.ssa);
262 
263    nir_ssa_def old_result = intrin->dest.ssa;
264    list_replace(&intrin->dest.ssa.uses, &old_result.uses);
265    list_replace(&intrin->dest.ssa.if_uses, &old_result.if_uses);
266    nir_ssa_dest_init(&intrin->instr, &intrin->dest, 1, intrin->dest.ssa.bit_size, NULL);
267 
268    nir_ssa_def *result = optimize_atomic(b, intrin, return_prev);
269 
270    if (helper_nif) {
271       nir_push_else(b, helper_nif);
272       nir_ssa_def *undef = result ? nir_ssa_undef(b, 1, result->bit_size) : NULL;
273       nir_pop_if(b, helper_nif);
274       if (result)
275          result = nir_if_phi(b, result, undef);
276    }
277 
278    if (result) {
279       assert(result->divergent == original_result_divergent);
280       nir_ssa_def_rewrite_uses(&old_result, result);
281    }
282 }
283 
284 static bool
opt_uniform_atomics(nir_function_impl * impl)285 opt_uniform_atomics(nir_function_impl *impl)
286 {
287    bool progress = false;
288    nir_builder b;
289    nir_builder_init(&b, impl);
290    b.update_divergence = true;
291 
292    nir_foreach_block(block, impl) {
293       nir_foreach_instr_safe(instr, block) {
294          if (instr->type != nir_instr_type_intrinsic)
295             continue;
296 
297          nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
298          unsigned offset_src, data_src, offset2_src;
299          if (parse_atomic_op(intrin->intrinsic, &offset_src, &data_src, &offset2_src) ==
300              nir_num_opcodes)
301             continue;
302 
303          if (nir_src_is_divergent(intrin->src[offset_src]))
304             continue;
305          if (nir_src_is_divergent(intrin->src[offset2_src]))
306             continue;
307 
308          if (is_atomic_already_optimized(b.shader, intrin))
309             continue;
310 
311          b.cursor = nir_before_instr(instr);
312          optimize_and_rewrite_atomic(&b, intrin);
313          progress = true;
314       }
315    }
316 
317    return progress;
318 }
319 
320 bool
nir_opt_uniform_atomics(nir_shader * shader)321 nir_opt_uniform_atomics(nir_shader *shader)
322 {
323    bool progress = false;
324 
325    /* A 1x1x1 workgroup only ever has one active lane, so there's no point in
326     * optimizing any atomics.
327     */
328    if (gl_shader_stage_uses_workgroup(shader->info.stage) &&
329        !shader->info.workgroup_size_variable &&
330        shader->info.workgroup_size[0] == 1 && shader->info.workgroup_size[1] == 1 &&
331        shader->info.workgroup_size[2] == 1)
332       return false;
333 
334    nir_foreach_function(function, shader) {
335       if (!function->impl)
336          continue;
337 
338       if (opt_uniform_atomics(function->impl)) {
339          progress = true;
340          nir_metadata_preserve(function->impl, 0);
341       } else {
342          nir_metadata_preserve(function->impl, nir_metadata_all);
343       }
344    }
345 
346    return progress;
347 }
348