• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Connor Abbott (cwabbott0@gmail.com)
25  *
26  */
27 
28 #include "compiler/glsl/ir_uniform.h"
29 #include "nir.h"
30 #include "main/config.h"
31 #include <assert.h>
32 
33 /*
34  * replace atomic counter intrinsics that use a variable with intrinsics
35  * that directly store the buffer index and byte offset
36  */
37 
38 static bool
lower_instr(nir_intrinsic_instr * instr,const struct gl_shader_program * shader_program,nir_shader * shader)39 lower_instr(nir_intrinsic_instr *instr,
40             const struct gl_shader_program *shader_program,
41             nir_shader *shader)
42 {
43    nir_intrinsic_op op;
44    switch (instr->intrinsic) {
45    case nir_intrinsic_atomic_counter_read_var:
46       op = nir_intrinsic_atomic_counter_read;
47       break;
48 
49    case nir_intrinsic_atomic_counter_inc_var:
50       op = nir_intrinsic_atomic_counter_inc;
51       break;
52 
53    case nir_intrinsic_atomic_counter_dec_var:
54       op = nir_intrinsic_atomic_counter_dec;
55       break;
56 
57    case nir_intrinsic_atomic_counter_add_var:
58       op = nir_intrinsic_atomic_counter_add;
59       break;
60 
61    case nir_intrinsic_atomic_counter_min_var:
62       op = nir_intrinsic_atomic_counter_min;
63       break;
64 
65    case nir_intrinsic_atomic_counter_max_var:
66       op = nir_intrinsic_atomic_counter_max;
67       break;
68 
69    case nir_intrinsic_atomic_counter_and_var:
70       op = nir_intrinsic_atomic_counter_and;
71       break;
72 
73    case nir_intrinsic_atomic_counter_or_var:
74       op = nir_intrinsic_atomic_counter_or;
75       break;
76 
77    case nir_intrinsic_atomic_counter_xor_var:
78       op = nir_intrinsic_atomic_counter_xor;
79       break;
80 
81    case nir_intrinsic_atomic_counter_exchange_var:
82       op = nir_intrinsic_atomic_counter_exchange;
83       break;
84 
85    case nir_intrinsic_atomic_counter_comp_swap_var:
86       op = nir_intrinsic_atomic_counter_comp_swap;
87       break;
88 
89    default:
90       return false;
91    }
92 
93    if (instr->variables[0]->var->data.mode != nir_var_uniform &&
94        instr->variables[0]->var->data.mode != nir_var_shader_storage &&
95        instr->variables[0]->var->data.mode != nir_var_shared)
96       return false; /* atomics passed as function arguments can't be lowered */
97 
98    void *mem_ctx = ralloc_parent(instr);
99    unsigned uniform_loc = instr->variables[0]->var->data.location;
100 
101    nir_intrinsic_instr *new_instr = nir_intrinsic_instr_create(mem_ctx, op);
102    nir_intrinsic_set_base(new_instr,
103       shader_program->data->UniformStorage[uniform_loc].opaque[shader->info.stage].index);
104 
105    nir_load_const_instr *offset_const =
106       nir_load_const_instr_create(mem_ctx, 1, 32);
107    offset_const->value.u32[0] = instr->variables[0]->var->data.offset;
108 
109    nir_instr_insert_before(&instr->instr, &offset_const->instr);
110 
111    nir_ssa_def *offset_def = &offset_const->def;
112 
113    nir_deref *tail = &instr->variables[0]->deref;
114    while (tail->child != NULL) {
115       nir_deref_array *deref_array = nir_deref_as_array(tail->child);
116       tail = tail->child;
117 
118       unsigned child_array_elements = tail->child != NULL ?
119          glsl_get_aoa_size(tail->type) : 1;
120 
121       offset_const->value.u32[0] += deref_array->base_offset *
122          child_array_elements * ATOMIC_COUNTER_SIZE;
123 
124       if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
125          nir_load_const_instr *atomic_counter_size =
126             nir_load_const_instr_create(mem_ctx, 1, 32);
127          atomic_counter_size->value.u32[0] = child_array_elements * ATOMIC_COUNTER_SIZE;
128          nir_instr_insert_before(&instr->instr, &atomic_counter_size->instr);
129 
130          nir_alu_instr *mul = nir_alu_instr_create(mem_ctx, nir_op_imul);
131          nir_ssa_dest_init(&mul->instr, &mul->dest.dest, 1, 32, NULL);
132          mul->dest.write_mask = 0x1;
133          nir_src_copy(&mul->src[0].src, &deref_array->indirect, mul);
134          mul->src[1].src.is_ssa = true;
135          mul->src[1].src.ssa = &atomic_counter_size->def;
136          nir_instr_insert_before(&instr->instr, &mul->instr);
137 
138          nir_alu_instr *add = nir_alu_instr_create(mem_ctx, nir_op_iadd);
139          nir_ssa_dest_init(&add->instr, &add->dest.dest, 1, 32, NULL);
140          add->dest.write_mask = 0x1;
141          add->src[0].src.is_ssa = true;
142          add->src[0].src.ssa = &mul->dest.dest.ssa;
143          add->src[1].src.is_ssa = true;
144          add->src[1].src.ssa = offset_def;
145          nir_instr_insert_before(&instr->instr, &add->instr);
146 
147          offset_def = &add->dest.dest.ssa;
148       }
149    }
150 
151    new_instr->src[0].is_ssa = true;
152    new_instr->src[0].ssa = offset_def;
153 
154    /* Copy the other sources, if any, from the original instruction to the new
155     * instruction.
156     */
157    for (unsigned i = 0; i < nir_intrinsic_infos[instr->intrinsic].num_srcs; i++)
158       nir_src_copy(&new_instr->src[i + 1], &instr->src[i], new_instr);
159 
160    if (instr->dest.is_ssa) {
161       nir_ssa_dest_init(&new_instr->instr, &new_instr->dest,
162                         instr->dest.ssa.num_components, 32, NULL);
163       nir_ssa_def_rewrite_uses(&instr->dest.ssa,
164                                nir_src_for_ssa(&new_instr->dest.ssa));
165    } else {
166       nir_dest_copy(&new_instr->dest, &instr->dest, mem_ctx);
167    }
168 
169    nir_instr_insert_before(&instr->instr, &new_instr->instr);
170    nir_instr_remove(&instr->instr);
171 
172    return true;
173 }
174 
175 bool
nir_lower_atomics(nir_shader * shader,const struct gl_shader_program * shader_program)176 nir_lower_atomics(nir_shader *shader,
177                   const struct gl_shader_program *shader_program)
178 {
179    bool progress = false;
180 
181    nir_foreach_function(function, shader) {
182       if (function->impl) {
183          nir_foreach_block(block, function->impl) {
184             nir_foreach_instr_safe(instr, block) {
185                if (instr->type == nir_instr_type_intrinsic)
186                   progress |= lower_instr(nir_instr_as_intrinsic(instr),
187                                           shader_program, shader);
188             }
189          }
190 
191          nir_metadata_preserve(function->impl, nir_metadata_block_index |
192                                                nir_metadata_dominance);
193       }
194    }
195 
196    return progress;
197 }
198