• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2019 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "nir.h"
25 #include "nir_builder.h"
26 
27 static void
build_write_masked_store(nir_builder * b,nir_deref_instr * vec_deref,nir_ssa_def * value,unsigned component)28 build_write_masked_store(nir_builder *b, nir_deref_instr *vec_deref,
29                          nir_ssa_def *value, unsigned component)
30 {
31    assert(value->num_components == 1);
32    unsigned num_components = glsl_get_components(vec_deref->type);
33    assert(num_components > 1 && num_components <= NIR_MAX_VEC_COMPONENTS);
34 
35    nir_ssa_def *u = nir_ssa_undef(b, 1, value->bit_size);
36    nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS];
37    for (unsigned i = 0; i < num_components; i++)
38       comps[i] = (i == component) ? value : u;
39 
40    nir_ssa_def *vec = nir_vec(b, comps, num_components);
41    nir_store_deref(b, vec_deref, vec, (1u << component));
42 }
43 
44 static void
build_write_masked_stores(nir_builder * b,nir_deref_instr * vec_deref,nir_ssa_def * value,nir_ssa_def * index,unsigned start,unsigned end)45 build_write_masked_stores(nir_builder *b, nir_deref_instr *vec_deref,
46                           nir_ssa_def *value, nir_ssa_def *index,
47                           unsigned start, unsigned end)
48 {
49    if (start == end - 1) {
50       build_write_masked_store(b, vec_deref, value, start);
51    } else {
52       unsigned mid = start + (end - start) / 2;
53       nir_push_if(b, nir_ilt(b, index, nir_imm_int(b, mid)));
54       build_write_masked_stores(b, vec_deref, value, index, start, mid);
55       nir_push_else(b, NULL);
56       build_write_masked_stores(b, vec_deref, value, index, mid, end);
57       nir_pop_if(b, NULL);
58    }
59 }
60 
61 static bool
nir_lower_array_deref_of_vec_impl(nir_function_impl * impl,nir_variable_mode modes,nir_lower_array_deref_of_vec_options options)62 nir_lower_array_deref_of_vec_impl(nir_function_impl *impl,
63                                   nir_variable_mode modes,
64                                   nir_lower_array_deref_of_vec_options options)
65 {
66    bool progress = false;
67 
68    nir_builder b;
69    nir_builder_init(&b, impl);
70 
71    nir_foreach_block(block, impl) {
72       nir_foreach_instr_safe(instr, block) {
73          if (instr->type != nir_instr_type_intrinsic)
74             continue;
75 
76          nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
77          assert(intrin->intrinsic != nir_intrinsic_copy_deref);
78 
79          if (intrin->intrinsic != nir_intrinsic_load_deref &&
80              intrin->intrinsic != nir_intrinsic_interp_deref_at_centroid &&
81              intrin->intrinsic != nir_intrinsic_interp_deref_at_sample &&
82              intrin->intrinsic != nir_intrinsic_interp_deref_at_offset &&
83              intrin->intrinsic != nir_intrinsic_interp_deref_at_vertex &&
84              intrin->intrinsic != nir_intrinsic_store_deref)
85             continue;
86 
87          nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
88 
89          /* We choose to be conservative here.  If the deref contains any
90           * modes which weren't specified, we bail and don't bother lowering.
91           */
92          if (!nir_deref_mode_must_be(deref, modes))
93             continue;
94 
95          /* We only care about array derefs that act on vectors */
96          if (deref->deref_type != nir_deref_type_array)
97             continue;
98 
99          nir_deref_instr *vec_deref = nir_deref_instr_parent(deref);
100          if (!glsl_type_is_vector(vec_deref->type))
101             continue;
102 
103          assert(intrin->num_components == 1);
104          unsigned num_components = glsl_get_components(vec_deref->type);
105          assert(num_components > 1 && num_components <= NIR_MAX_VEC_COMPONENTS);
106 
107          b.cursor = nir_after_instr(&intrin->instr);
108 
109          if (intrin->intrinsic == nir_intrinsic_store_deref) {
110             assert(intrin->src[1].is_ssa);
111             nir_ssa_def *value = intrin->src[1].ssa;
112 
113             if (nir_src_is_const(deref->arr.index)) {
114                if (!(options & nir_lower_direct_array_deref_of_vec_store))
115                   continue;
116 
117                unsigned index = nir_src_as_uint(deref->arr.index);
118                /* If index is OOB, we throw the old store away and don't
119                 * replace it with anything.
120                 */
121                if (index < num_components)
122                   build_write_masked_store(&b, vec_deref, value, index);
123             } else {
124                if (!(options & nir_lower_indirect_array_deref_of_vec_store))
125                   continue;
126 
127                nir_ssa_def *index = nir_ssa_for_src(&b, deref->arr.index, 1);
128                build_write_masked_stores(&b, vec_deref, value, index,
129                                          0, num_components);
130             }
131             nir_instr_remove(&intrin->instr);
132 
133             progress = true;
134          } else {
135             if (nir_src_is_const(deref->arr.index)) {
136                if (!(options & nir_lower_direct_array_deref_of_vec_load))
137                   continue;
138             } else {
139                if (!(options & nir_lower_indirect_array_deref_of_vec_load))
140                   continue;
141             }
142 
143             /* Turn the load into a vector load */
144             nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
145                                   nir_src_for_ssa(&vec_deref->dest.ssa));
146             intrin->dest.ssa.num_components = num_components;
147             intrin->num_components = num_components;
148 
149             nir_ssa_def *index = nir_ssa_for_src(&b, deref->arr.index, 1);
150             nir_ssa_def *scalar =
151                nir_vector_extract(&b, &intrin->dest.ssa, index);
152             if (scalar->parent_instr->type == nir_instr_type_ssa_undef) {
153                nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
154                                         scalar);
155                nir_instr_remove(&intrin->instr);
156             } else {
157                nir_ssa_def_rewrite_uses_after(&intrin->dest.ssa,
158                                               scalar,
159                                               scalar->parent_instr);
160             }
161             progress = true;
162          }
163       }
164    }
165 
166    if (progress) {
167       nir_metadata_preserve(impl, nir_metadata_block_index |
168                                   nir_metadata_dominance);
169    } else {
170       nir_metadata_preserve(impl, nir_metadata_all);
171    }
172 
173    return progress;
174 }
175 
176 /* Lowers away array dereferences on vectors
177  *
178  * These are allowed on certain variable types such as SSBOs and TCS outputs.
179  * However, not everyone can actually handle them everywhere.  There are also
180  * cases where we want to lower them for performance reasons.
181  *
182  * This patch assumes that copy_deref instructions have already been lowered.
183  */
184 bool
nir_lower_array_deref_of_vec(nir_shader * shader,nir_variable_mode modes,nir_lower_array_deref_of_vec_options options)185 nir_lower_array_deref_of_vec(nir_shader *shader, nir_variable_mode modes,
186                              nir_lower_array_deref_of_vec_options options)
187 {
188    bool progress = false;
189 
190    nir_foreach_function(function, shader) {
191       if (function->impl &&
192           nir_lower_array_deref_of_vec_impl(function->impl, modes, options))
193          progress = true;
194    }
195 
196    return progress;
197 }
198