• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2020 Valve Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 /*
26  * Optimizes atomics (with uniform offsets) using subgroup operations to ensure
27  * only one atomic operation is done per subgroup. So res = atomicAdd(addr, 1)
28  * would become something like:
29  *
30  * uint tmp = subgroupAdd(1);
31  * uint res;
32  * if (subgroupElect())
33  *    res = atomicAdd(addr, tmp);
34  * res = subgroupBroadcastFirst(res) + subgroupExclusiveAdd(1);
35  *
36  * This pass requires and preserves LCSSA and divergence information.
37  */
38 
39 #include "nir/nir.h"
40 #include "nir/nir_builder.h"
41 
42 static nir_op
parse_atomic_op(nir_intrinsic_op op,unsigned * offset_src,unsigned * data_src)43 parse_atomic_op(nir_intrinsic_op op, unsigned *offset_src, unsigned *data_src)
44 {
45    switch (op) {
46    #define OP_NOIMG(intrin, alu) \
47    case nir_intrinsic_ssbo_atomic_##intrin: \
48       *offset_src = 1; \
49       *data_src = 2; \
50       return nir_op_##alu; \
51    case nir_intrinsic_shared_atomic_##intrin: \
52    case nir_intrinsic_global_atomic_##intrin: \
53    case nir_intrinsic_deref_atomic_##intrin: \
54       *offset_src = 0; \
55       *data_src = 1; \
56       return nir_op_##alu;
57    #define OP(intrin, alu) \
58    OP_NOIMG(intrin, alu) \
59    case nir_intrinsic_image_deref_atomic_##intrin: \
60    case nir_intrinsic_image_atomic_##intrin: \
61    case nir_intrinsic_bindless_image_atomic_##intrin: \
62       *offset_src = 1; \
63       *data_src = 3; \
64       return nir_op_##alu;
65    OP(add, iadd)
66    OP(imin, imin)
67    OP(umin, umin)
68    OP(imax, imax)
69    OP(umax, umax)
70    OP(and, iand)
71    OP(or, ior)
72    OP(xor, ixor)
73    OP(fadd, fadd)
74    OP_NOIMG(fmin, fmin)
75    OP_NOIMG(fmax, fmax)
76    #undef OP_NOIMG
77    #undef OP
78    default:
79       return nir_num_opcodes;
80    }
81 }
82 
83 /* Returns a bitmask of invocation indices that are compared against a subgroup
84  * uniform value.
85  */
86 static unsigned
match_invocation_comparison(nir_ssa_scalar scalar)87 match_invocation_comparison(nir_ssa_scalar scalar)
88 {
89    if (!nir_ssa_scalar_is_alu(scalar))
90       return 0;
91 
92    if (nir_ssa_scalar_alu_op(scalar) == nir_op_iand) {
93       return match_invocation_comparison(nir_ssa_scalar_chase_alu_src(scalar, 0)) |
94              match_invocation_comparison(nir_ssa_scalar_chase_alu_src(scalar, 1));
95    } else if (nir_ssa_scalar_alu_op(scalar) == nir_op_ieq) {
96       unsigned dims = 0;
97       for (unsigned i = 0; i < 2; i++) {
98          nir_ssa_scalar src = nir_ssa_scalar_chase_alu_src(scalar, i);
99          if (src.def->parent_instr->type != nir_instr_type_intrinsic)
100             continue;
101          if (nir_ssa_scalar_chase_alu_src(scalar, !i).def->divergent)
102             continue;
103 
104          nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(src.def->parent_instr);
105          if (intrin->intrinsic == nir_intrinsic_load_subgroup_invocation)
106             dims = 0x8;
107          else if (intrin->intrinsic == nir_intrinsic_load_local_invocation_index)
108             dims = 0x7;
109          else if (intrin->intrinsic == nir_intrinsic_load_local_invocation_id)
110             dims = 1 << src.comp;
111          else if (intrin->intrinsic == nir_intrinsic_load_global_invocation_index)
112             dims = 0x7;
113          else if (intrin->intrinsic == nir_intrinsic_load_global_invocation_id)
114             dims = 1 << src.comp;
115       }
116 
117       return dims;
118    } else if (scalar.def->parent_instr->type == nir_instr_type_intrinsic) {
119       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(scalar.def->parent_instr);
120       if (intrin->intrinsic == nir_intrinsic_elect)
121          return 0x8;
122       return 0;
123    } else {
124       return 0;
125    }
126 }
127 
128 /* Returns true if the intrinsic is already conditional so that at most one
129  * invocation in the subgroup does the atomic.
130  */
131 static bool
is_atomic_already_optimized(nir_shader * shader,nir_intrinsic_instr * instr)132 is_atomic_already_optimized(nir_shader *shader, nir_intrinsic_instr *instr)
133 {
134    unsigned dims = 0;
135    for (nir_cf_node *cf = &instr->instr.block->cf_node; cf; cf = cf->parent) {
136       if (cf->type == nir_cf_node_if) {
137          nir_block *first_then = nir_if_first_then_block(nir_cf_node_as_if(cf));
138          nir_block *last_then = nir_if_last_then_block(nir_cf_node_as_if(cf));
139          bool within_then = instr->instr.block->index >= first_then->index;
140          within_then = within_then && instr->instr.block->index <= last_then->index;
141          if (!within_then)
142             continue;
143 
144          nir_ssa_scalar cond = {nir_cf_node_as_if(cf)->condition.ssa, 0};
145          dims |= match_invocation_comparison(cond);
146       }
147    }
148 
149    unsigned dims_needed = 0;
150    for (unsigned i = 0; i < 3; i++)
151       dims_needed |= (shader->info.cs.local_size[i] > 1) << i;
152 
153    return (dims & dims_needed) == dims_needed || dims & 0x8;
154 }
155 
156 static nir_ssa_def *
emit_scalar_intrinsic(nir_builder * b,nir_intrinsic_op op,unsigned bit_size)157 emit_scalar_intrinsic(nir_builder *b, nir_intrinsic_op op, unsigned bit_size)
158 {
159    nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op);
160    nir_ssa_dest_init(&intrin->instr, &intrin->dest, 1, bit_size, NULL);
161    nir_builder_instr_insert(b, &intrin->instr);
162    return &intrin->dest.ssa;
163 }
164 
165 static nir_ssa_def *
emit_read_invocation(nir_builder * b,nir_ssa_def * data,nir_ssa_def * lane)166 emit_read_invocation(nir_builder *b, nir_ssa_def *data, nir_ssa_def *lane)
167 {
168    nir_intrinsic_instr *ri = nir_intrinsic_instr_create(
169          b->shader, lane ? nir_intrinsic_read_invocation : nir_intrinsic_read_first_invocation);
170    nir_ssa_dest_init(&ri->instr, &ri->dest, 1, data->bit_size, NULL);
171    ri->num_components = 1;
172    ri->src[0] = nir_src_for_ssa(data);
173    if (lane)
174       ri->src[1] = nir_src_for_ssa(lane);
175    nir_builder_instr_insert(b, &ri->instr);
176    return &ri->dest.ssa;
177 }
178 
179 /* Perform a reduction and/or exclusive scan. */
180 static void
reduce_data(nir_builder * b,nir_op op,nir_ssa_def * data,nir_ssa_def ** reduce,nir_ssa_def ** scan)181 reduce_data(nir_builder *b, nir_op op, nir_ssa_def *data,
182             nir_ssa_def **reduce, nir_ssa_def **scan)
183 {
184    nir_intrinsic_op intrin_op = scan ? nir_intrinsic_exclusive_scan : nir_intrinsic_reduce;
185    nir_intrinsic_instr *intrin =
186       nir_intrinsic_instr_create(b->shader, intrin_op);
187    intrin->num_components = 1;
188    intrin->src[0] = nir_src_for_ssa(data);
189    nir_intrinsic_set_reduction_op(intrin, op);
190    nir_ssa_dest_init(&intrin->instr, &intrin->dest, 1, data->bit_size, NULL);
191    nir_builder_instr_insert(b, &intrin->instr);
192 
193    if (scan)
194       *scan = &intrin->dest.ssa;
195 
196    if (scan && reduce) {
197       *scan = &intrin->dest.ssa;
198       nir_ssa_def *last_lane = emit_scalar_intrinsic(b, nir_intrinsic_last_invocation, 32);
199       nir_ssa_def *res = nir_build_alu(b, op, *scan, data, NULL, NULL);
200       *reduce = emit_read_invocation(b, res, last_lane);
201    } else if (reduce) {
202       *reduce = &intrin->dest.ssa;
203    }
204 }
205 
206 static nir_ssa_def *
optimize_atomic(nir_builder * b,nir_intrinsic_instr * intrin,bool return_prev)207 optimize_atomic(nir_builder *b, nir_intrinsic_instr *intrin, bool return_prev)
208 {
209    unsigned offset_src, data_src;
210    nir_op op = parse_atomic_op(intrin->intrinsic, &offset_src, &data_src);
211    nir_ssa_def *data = intrin->src[data_src].ssa;
212 
213    /* Separate uniform reduction and scan is faster than doing a combined scan+reduce */
214    bool combined_scan_reduce = return_prev && data->divergent;
215    nir_ssa_def *reduce = NULL, *scan = NULL;
216    reduce_data(b, op, data, &reduce, combined_scan_reduce ? &scan : NULL);
217 
218    nir_instr_rewrite_src(&intrin->instr, &intrin->src[data_src], nir_src_for_ssa(reduce));
219    nir_update_instr_divergence(b->shader, &intrin->instr);
220 
221    nir_ssa_def *cond = emit_scalar_intrinsic(b, nir_intrinsic_elect, 1);
222 
223    nir_if *nif = nir_push_if(b, cond);
224 
225    nir_instr_remove(&intrin->instr);
226    nir_builder_instr_insert(b, &intrin->instr);
227 
228    if (return_prev) {
229       nir_push_else(b, nif);
230 
231       nir_ssa_def *undef = nir_ssa_undef(b, 1, intrin->dest.ssa.bit_size);
232 
233       nir_pop_if(b, nif);
234       nir_ssa_def *result = nir_if_phi(b, &intrin->dest.ssa, undef);
235       result = emit_read_invocation(b, result, NULL);
236 
237       if (!combined_scan_reduce)
238          reduce_data(b, op, data, NULL, &scan);
239 
240       return nir_build_alu(b, op, result, scan, NULL, NULL);
241    } else {
242       nir_pop_if(b, nif);
243       return NULL;
244    }
245 }
246 
247 static void
optimize_and_rewrite_atomic(nir_builder * b,nir_intrinsic_instr * intrin)248 optimize_and_rewrite_atomic(nir_builder *b, nir_intrinsic_instr *intrin)
249 {
250    nir_if *helper_nif = NULL;
251    if (b->shader->info.stage == MESA_SHADER_FRAGMENT) {
252       nir_ssa_def *helper = emit_scalar_intrinsic(b, nir_intrinsic_is_helper_invocation, 1);
253       helper_nif = nir_push_if(b, nir_inot(b, helper));
254    }
255 
256    ASSERTED bool original_result_divergent = intrin->dest.ssa.divergent;
257    bool return_prev = !list_is_empty(&intrin->dest.ssa.uses) ||
258                       !list_is_empty(&intrin->dest.ssa.if_uses);
259 
260    nir_ssa_def old_result = intrin->dest.ssa;
261    list_replace(&intrin->dest.ssa.uses, &old_result.uses);
262    list_replace(&intrin->dest.ssa.if_uses, &old_result.if_uses);
263    nir_ssa_dest_init(&intrin->instr, &intrin->dest, 1, intrin->dest.ssa.bit_size, NULL);
264 
265    nir_ssa_def *result = optimize_atomic(b, intrin, return_prev);
266 
267    if (helper_nif) {
268       nir_push_else(b, helper_nif);
269       nir_ssa_def *undef = result ? nir_ssa_undef(b, 1, result->bit_size) : NULL;
270       nir_pop_if(b, helper_nif);
271       if (result)
272          result = nir_if_phi(b, result, undef);
273    }
274 
275    if (result) {
276       assert(result->divergent == original_result_divergent);
277       nir_ssa_def_rewrite_uses(&old_result, nir_src_for_ssa(result));
278    }
279 }
280 
281 static bool
opt_uniform_atomics(nir_function_impl * impl)282 opt_uniform_atomics(nir_function_impl *impl)
283 {
284    bool progress = false;
285    nir_builder b;
286    nir_builder_init(&b, impl);
287    b.update_divergence = true;
288 
289    nir_foreach_block(block, impl) {
290       nir_foreach_instr_safe(instr, block) {
291          if (instr->type != nir_instr_type_intrinsic)
292             continue;
293 
294          nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
295          unsigned offset_src, data_src;
296          if (parse_atomic_op(intrin->intrinsic, &offset_src, &data_src) == nir_num_opcodes)
297             continue;
298 
299          if (nir_src_is_divergent(intrin->src[offset_src]))
300             continue;
301 
302          if (is_atomic_already_optimized(b.shader, intrin))
303             continue;
304 
305          b.cursor = nir_before_instr(instr);
306          optimize_and_rewrite_atomic(&b, intrin);
307          progress = true;
308       }
309    }
310 
311    return progress;
312 }
313 
314 bool
nir_opt_uniform_atomics(nir_shader * shader)315 nir_opt_uniform_atomics(nir_shader *shader)
316 {
317    bool progress = false;
318 
319    /* A 1x1x1 workgroup only ever has one active lane, so there's no point in
320     * optimizing any atomics.
321     */
322    if (shader->info.stage == MESA_SHADER_COMPUTE && !shader->info.cs.local_size_variable &&
323        shader->info.cs.local_size[0] == 1 && shader->info.cs.local_size[1] == 1 &&
324        shader->info.cs.local_size[2] == 1)
325       return false;
326 
327    nir_foreach_function(function, shader) {
328       if (!function->impl)
329          continue;
330 
331       if (opt_uniform_atomics(function->impl)) {
332          progress = true;
333          nir_metadata_preserve(function->impl, 0);
334       } else {
335          nir_metadata_preserve(function->impl, nir_metadata_all);
336       }
337    }
338 
339    return progress;
340 }
341