• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2017 Red Hat
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #include "nir.h"
28 #include "nir_builder.h"
29 
30 /*
31  * Remap atomic counters to SSBOs, starting from the shader's next SSBO slot
32  * (info.num_ssbos).
33  */
34 
35 static nir_deref_instr *
deref_offset_var(nir_builder * b,unsigned binding,unsigned offset_align_state)36 deref_offset_var(nir_builder *b, unsigned binding, unsigned offset_align_state)
37 {
38    nir_foreach_uniform_variable(var, b->shader) {
39       if (var->num_state_slots != 1)
40          continue;
41       if (var->state_slots[0].tokens[0] == offset_align_state &&
42           var->state_slots[0].tokens[1] == binding)
43          return nir_build_deref_var(b, var);
44    }
45 
46    nir_variable *var = nir_variable_create(b->shader, nir_var_uniform, glsl_uint_type(), "offset");
47    var->state_slots = ralloc_array(var, nir_state_slot, 1);
48    var->state_slots[0].tokens[0] = offset_align_state;
49    var->state_slots[0].tokens[1] = binding;
50    var->num_state_slots = 1;
51    var->data.how_declared = nir_var_hidden;
52    b->shader->num_uniforms++;
53    return nir_build_deref_var(b, var);
54 }
55 
56 static bool
lower_instr(nir_intrinsic_instr * instr,unsigned ssbo_offset,nir_builder * b,unsigned offset_align_state)57 lower_instr(nir_intrinsic_instr *instr, unsigned ssbo_offset, nir_builder *b, unsigned offset_align_state)
58 {
59    nir_intrinsic_op op;
60 
61    b->cursor = nir_before_instr(&instr->instr);
62 
63    switch (instr->intrinsic) {
64    case nir_intrinsic_memory_barrier_atomic_counter:
65       /* Atomic counters are now SSBOs so memoryBarrierAtomicCounter() is now
66        * memoryBarrierBuffer().
67        */
68       instr->intrinsic = nir_intrinsic_memory_barrier_buffer;
69       return true;
70 
71    case nir_intrinsic_atomic_counter_inc:
72    case nir_intrinsic_atomic_counter_add:
73    case nir_intrinsic_atomic_counter_pre_dec:
74    case nir_intrinsic_atomic_counter_post_dec:
75       /* inc and dec get remapped to add: */
76       op = nir_intrinsic_ssbo_atomic_add;
77       break;
78    case nir_intrinsic_atomic_counter_read:
79       op = nir_intrinsic_load_ssbo;
80       break;
81    case nir_intrinsic_atomic_counter_min:
82       op = nir_intrinsic_ssbo_atomic_umin;
83       break;
84    case nir_intrinsic_atomic_counter_max:
85       op = nir_intrinsic_ssbo_atomic_umax;
86       break;
87    case nir_intrinsic_atomic_counter_and:
88       op = nir_intrinsic_ssbo_atomic_and;
89       break;
90    case nir_intrinsic_atomic_counter_or:
91       op = nir_intrinsic_ssbo_atomic_or;
92       break;
93    case nir_intrinsic_atomic_counter_xor:
94       op = nir_intrinsic_ssbo_atomic_xor;
95       break;
96    case nir_intrinsic_atomic_counter_exchange:
97       op = nir_intrinsic_ssbo_atomic_exchange;
98       break;
99    case nir_intrinsic_atomic_counter_comp_swap:
100       op = nir_intrinsic_ssbo_atomic_comp_swap;
101       break;
102    default:
103       return false;
104    }
105 
106    nir_ssa_def *buffer = nir_imm_int(b, ssbo_offset + nir_intrinsic_base(instr));
107    nir_ssa_def *temp = NULL;
108 
109    nir_ssa_def *offset_load = NULL;
110    if (offset_align_state) {
111       nir_deref_instr *deref_offset = deref_offset_var(b, nir_intrinsic_base(instr), offset_align_state);
112       offset_load = nir_load_deref(b, deref_offset);
113    }
114    nir_intrinsic_instr *new_instr =
115          nir_intrinsic_instr_create(b->shader, op);
116 
117    /* a couple instructions need special handling since they don't map
118     * 1:1 with ssbo atomics
119     */
120    switch (instr->intrinsic) {
121    case nir_intrinsic_atomic_counter_inc:
122       /* remapped to ssbo_atomic_add: { buffer_idx, offset, +1 } */
123       temp = nir_imm_int(b, +1);
124       new_instr->src[0] = nir_src_for_ssa(buffer);
125       nir_src_copy(&new_instr->src[1], &instr->src[0]);
126       new_instr->src[2] = nir_src_for_ssa(temp);
127       break;
128    case nir_intrinsic_atomic_counter_pre_dec:
129    case nir_intrinsic_atomic_counter_post_dec:
130       /* remapped to ssbo_atomic_add: { buffer_idx, offset, -1 } */
131       /* NOTE semantic difference so we adjust the return value below */
132       temp = nir_imm_int(b, -1);
133       new_instr->src[0] = nir_src_for_ssa(buffer);
134       nir_src_copy(&new_instr->src[1], &instr->src[0]);
135       new_instr->src[2] = nir_src_for_ssa(temp);
136       break;
137    case nir_intrinsic_atomic_counter_read:
138       /* remapped to load_ssbo: { buffer_idx, offset } */
139       new_instr->src[0] = nir_src_for_ssa(buffer);
140       nir_src_copy(&new_instr->src[1], &instr->src[0]);
141       break;
142    default:
143       /* remapped to ssbo_atomic_x: { buffer_idx, offset, data, (compare)? } */
144       new_instr->src[0] = nir_src_for_ssa(buffer);
145       nir_src_copy(&new_instr->src[1], &instr->src[0]);
146       nir_src_copy(&new_instr->src[2], &instr->src[1]);
147       if (op == nir_intrinsic_ssbo_atomic_comp_swap ||
148           op == nir_intrinsic_ssbo_atomic_fcomp_swap)
149          nir_src_copy(&new_instr->src[3], &instr->src[2]);
150       break;
151    }
152 
153    if (offset_load)
154       new_instr->src[1].ssa = nir_iadd(b, new_instr->src[1].ssa, offset_load);
155 
156    if (new_instr->intrinsic == nir_intrinsic_load_ssbo) {
157       nir_intrinsic_set_align(new_instr, 4, 0);
158 
159       /* we could be replacing an intrinsic with fixed # of dest
160        * num_components with one that has variable number.  So
161        * best to take this from the dest:
162        */
163       new_instr->num_components = instr->dest.ssa.num_components;
164    }
165 
166    nir_ssa_dest_init(&new_instr->instr, &new_instr->dest,
167                      instr->dest.ssa.num_components,
168                      instr->dest.ssa.bit_size, NULL);
169    nir_instr_insert_before(&instr->instr, &new_instr->instr);
170    nir_instr_remove(&instr->instr);
171 
172    if (instr->intrinsic == nir_intrinsic_atomic_counter_pre_dec) {
173       b->cursor = nir_after_instr(&new_instr->instr);
174       nir_ssa_def *result = nir_iadd(b, &new_instr->dest.ssa, temp);
175       nir_ssa_def_rewrite_uses(&instr->dest.ssa, result);
176    } else {
177       nir_ssa_def_rewrite_uses(&instr->dest.ssa, &new_instr->dest.ssa);
178    }
179 
180    return true;
181 }
182 
183 static bool
is_atomic_uint(const struct glsl_type * type)184 is_atomic_uint(const struct glsl_type *type)
185 {
186    if (glsl_get_base_type(type) == GLSL_TYPE_ARRAY)
187       return is_atomic_uint(glsl_get_array_element(type));
188    return glsl_get_base_type(type) == GLSL_TYPE_ATOMIC_UINT;
189 }
190 
191 bool
nir_lower_atomics_to_ssbo(nir_shader * shader,unsigned offset_align_state)192 nir_lower_atomics_to_ssbo(nir_shader *shader, unsigned offset_align_state)
193 {
194    unsigned ssbo_offset = shader->info.num_ssbos;
195    bool progress = false;
196 
197    nir_foreach_function(function, shader) {
198       if (function->impl) {
199          nir_builder builder;
200          nir_builder_init(&builder, function->impl);
201          nir_foreach_block(block, function->impl) {
202             nir_foreach_instr_safe(instr, block) {
203                if (instr->type == nir_instr_type_intrinsic)
204                   progress |= lower_instr(nir_instr_as_intrinsic(instr),
205                                           ssbo_offset, &builder, offset_align_state);
206             }
207          }
208 
209          nir_metadata_preserve(function->impl, nir_metadata_block_index |
210                                                nir_metadata_dominance);
211       }
212    }
213 
214    if (progress) {
215       /* replace atomic_uint uniforms with ssbo's: */
216       unsigned replaced = 0;
217       nir_foreach_uniform_variable_safe(var, shader) {
218          if (is_atomic_uint(var->type)) {
219             exec_node_remove(&var->node);
220 
221             if (replaced & (1 << var->data.binding))
222                continue;
223 
224             nir_variable *ssbo;
225             char name[16];
226 
227             /* A length of 0 is used to denote unsized arrays */
228             const struct glsl_type *type = glsl_array_type(glsl_uint_type(), 0, 0);
229 
230             snprintf(name, sizeof(name), "counter%d", var->data.binding);
231 
232             ssbo = nir_variable_create(shader, nir_var_mem_ssbo, type, name);
233             ssbo->data.binding = ssbo_offset + var->data.binding;
234             ssbo->data.explicit_binding = var->data.explicit_binding;
235 
236             /* We can't use num_abos, because it only represents the number of
237              * active atomic counters, and currently unlike SSBO's they aren't
238              * compacted so num_abos actually isn't a bound on the index passed
239              * to nir_intrinsic_atomic_counter_*. e.g. if we have a single atomic
240              * counter declared like:
241              *
242              * layout(binding=1) atomic_uint counter0;
243              *
244              * then when we lower accesses to it the atomic_counter_* intrinsics
245              * will have 1 as the index but num_abos will still be 1.
246              */
247             shader->info.num_ssbos = MAX2(shader->info.num_ssbos,
248                                           ssbo->data.binding + 1);
249 
250             struct glsl_struct_field field = {
251                   .type = type,
252                   .name = "counters",
253                   .location = -1,
254             };
255 
256             ssbo->interface_type =
257                   glsl_interface_type(&field, 1, GLSL_INTERFACE_PACKING_STD430,
258                                       false, "counters");
259 
260             replaced |= (1 << var->data.binding);
261          }
262       }
263 
264       shader->info.num_abos = 0;
265    }
266 
267    return progress;
268 }
269 
270