1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26
27 static void
build_constant_load(nir_builder * b,nir_deref_instr * deref,nir_constant * c)28 build_constant_load(nir_builder *b, nir_deref_instr *deref, nir_constant *c)
29 {
30 if (glsl_type_is_vector_or_scalar(deref->type)) {
31 const unsigned num_components = glsl_get_vector_elements(deref->type);
32 const unsigned bit_size = glsl_get_bit_size(deref->type);
33 nir_def *imm = nir_build_imm(b, num_components, bit_size, c->values);
34 nir_store_deref(b, deref, imm, ~0);
35 } else if (glsl_type_is_struct_or_ifc(deref->type)) {
36 unsigned len = glsl_get_length(deref->type);
37 for (unsigned i = 0; i < len; i++) {
38 build_constant_load(b, nir_build_deref_struct(b, deref, i),
39 c->elements[i]);
40 }
41 } else if (glsl_type_is_cmat(deref->type)) {
42 const struct glsl_type *elem_type = glsl_get_cmat_element(deref->type);
43 assert(glsl_type_is_scalar(elem_type));
44 const unsigned bit_size = glsl_get_bit_size(elem_type);
45 nir_def *elem = nir_build_imm(b, 1, bit_size, c->values);
46 nir_cmat_construct(b, &deref->def, elem);
47 } else {
48 assert(glsl_type_is_array(deref->type) ||
49 glsl_type_is_matrix(deref->type));
50 unsigned len = glsl_get_length(deref->type);
51 for (unsigned i = 0; i < len; i++) {
52 build_constant_load(b,
53 nir_build_deref_array_imm(b, deref, i),
54 c->elements[i]);
55 }
56 }
57 }
58
59 static bool
lower_const_initializer(struct nir_builder * b,struct exec_list * var_list,nir_variable_mode modes)60 lower_const_initializer(struct nir_builder *b, struct exec_list *var_list,
61 nir_variable_mode modes)
62 {
63 bool progress = false;
64
65 b->cursor = nir_before_impl(b->impl);
66
67 nir_foreach_variable_in_list(var, var_list) {
68 if (!(var->data.mode & modes))
69 continue;
70
71 if (var->constant_initializer) {
72 build_constant_load(b, nir_build_deref_var(b, var),
73 var->constant_initializer);
74
75 progress = true;
76 var->constant_initializer = NULL;
77 } else if (var->pointer_initializer) {
78 nir_deref_instr *src_deref = nir_build_deref_var(b, var->pointer_initializer);
79 nir_deref_instr *dst_deref = nir_build_deref_var(b, var);
80
81 /* Note that this stores a pointer to src into dst */
82 nir_store_deref(b, dst_deref, &src_deref->def, ~0);
83
84 progress = true;
85 var->pointer_initializer = NULL;
86 }
87 }
88
89 return progress;
90 }
91
92 bool
nir_lower_variable_initializers(nir_shader * shader,nir_variable_mode modes)93 nir_lower_variable_initializers(nir_shader *shader, nir_variable_mode modes)
94 {
95 bool progress = false;
96
97 /* Only some variables have initializers that we want to lower. Others
98 * such as uniforms have initializers which are useful later during linking
99 * so we want to skip over those. Restrict to only variable types where
100 * initializers make sense so that callers can use nir_var_all.
101 */
102 modes &= nir_var_shader_out |
103 nir_var_shader_temp |
104 nir_var_function_temp |
105 nir_var_system_value;
106
107 nir_foreach_function_with_impl(func, impl, shader) {
108 bool impl_progress = false;
109 nir_builder builder = nir_builder_create(impl);
110
111 if ((modes & ~nir_var_function_temp) && func->is_entrypoint) {
112 impl_progress |= lower_const_initializer(&builder,
113 &shader->variables,
114 modes);
115 }
116
117 if (modes & nir_var_function_temp) {
118 impl_progress |= lower_const_initializer(&builder,
119 &impl->locals,
120 nir_var_function_temp);
121 }
122
123 if (impl_progress) {
124 progress = true;
125 nir_metadata_preserve(impl, nir_metadata_control_flow |
126 nir_metadata_live_defs);
127 } else {
128 nir_metadata_preserve(impl, nir_metadata_all);
129 }
130 }
131
132 return progress;
133 }
134
135 /* Zero initialize shared_size bytes of shared memory by splitting work writes
136 * of chunk_size bytes among the invocations.
137 *
138 * Used for implementing VK_KHR_zero_initialize_workgroup_memory.
139 */
140 bool
nir_zero_initialize_shared_memory(nir_shader * shader,const unsigned shared_size,const unsigned chunk_size)141 nir_zero_initialize_shared_memory(nir_shader *shader,
142 const unsigned shared_size,
143 const unsigned chunk_size)
144 {
145 assert(shared_size > 0);
146 assert(chunk_size > 0);
147 assert(chunk_size % 4 == 0);
148
149 nir_function_impl *impl = nir_shader_get_entrypoint(shader);
150 nir_builder b = nir_builder_at(nir_before_impl(impl));
151
152 assert(!shader->info.workgroup_size_variable);
153 const unsigned local_count = shader->info.workgroup_size[0] *
154 shader->info.workgroup_size[1] *
155 shader->info.workgroup_size[2];
156 const unsigned stride = chunk_size * local_count;
157
158 /* The initialization logic is simplified if we can always split the memory
159 * in full chunk_size units.
160 */
161 assert(shared_size % chunk_size == 0);
162
163 const unsigned chunk_comps = chunk_size / 4;
164
165 nir_def *local_index = nir_load_local_invocation_index(&b);
166 nir_def *first_offset = nir_imul_imm(&b, local_index, chunk_size);
167
168 if (stride >= shared_size) {
169 nir_push_if(&b, nir_ult_imm(&b, first_offset, shared_size));
170 {
171 nir_store_shared(&b, nir_imm_zero(&b, chunk_comps, 32), first_offset,
172 .align_mul = chunk_size,
173 .write_mask = ((1 << chunk_comps) - 1));
174 }
175 nir_pop_if(&b, NULL);
176 } else {
177 nir_variable *it = nir_local_variable_create(b.impl, glsl_uint_type(),
178 "zero_init_iterator");
179 nir_store_var(&b, it, first_offset, 0x1);
180
181 nir_loop *loop = nir_push_loop(&b);
182 {
183 nir_def *offset = nir_load_var(&b, it);
184
185 nir_push_if(&b, nir_uge_imm(&b, offset, shared_size));
186 {
187 nir_jump(&b, nir_jump_break);
188 }
189 nir_pop_if(&b, NULL);
190
191 nir_store_shared(&b, nir_imm_zero(&b, chunk_comps, 32), offset,
192 .align_mul = chunk_size,
193 .write_mask = ((1 << chunk_comps) - 1));
194
195 nir_def *new_offset = nir_iadd_imm(&b, offset, stride);
196 nir_store_var(&b, it, new_offset, 0x1);
197 }
198 nir_pop_loop(&b, loop);
199 }
200
201 nir_barrier(&b, SCOPE_WORKGROUP, SCOPE_WORKGROUP, NIR_MEMORY_ACQ_REL,
202 nir_var_mem_shared);
203
204 nir_metadata_preserve(nir_shader_get_entrypoint(shader), nir_metadata_none);
205
206 return true;
207 }
208
209
210 /** Clears all shared memory to zero at the end of the shader
211 *
212 * To easily get to the end of the shader it relies on all exits
213 * being lowered. Designed to be called late in the lowering process,
214 * e.g. doesn't need to lower vars to ssa.
215 */
216 bool
nir_clear_shared_memory(nir_shader * shader,const unsigned shared_size,const unsigned chunk_size)217 nir_clear_shared_memory(nir_shader *shader,
218 const unsigned shared_size,
219 const unsigned chunk_size)
220 {
221 assert(chunk_size > 0);
222 assert(chunk_size % 4 == 0);
223
224 if (shared_size == 0)
225 return false;
226
227 nir_function_impl *impl = nir_shader_get_entrypoint(shader);
228 nir_builder b = nir_builder_at(nir_after_impl(impl));
229
230 /* The initialization logic is simplified if we can always split the memory
231 * in full chunk_size units.
232 */
233 assert(shared_size % chunk_size == 0);
234
235 const unsigned chunk_comps = chunk_size / 4;
236
237 nir_barrier(&b, SCOPE_WORKGROUP, SCOPE_WORKGROUP, NIR_MEMORY_ACQ_REL,
238 nir_var_mem_shared);
239
240 nir_def *local_index = nir_load_local_invocation_index(&b);
241 nir_def *first_offset = nir_imul_imm(&b, local_index, chunk_size);
242
243 unsigned iterations = UINT_MAX;
244 unsigned size_per_iteration = 0;
245 if (!shader->info.workgroup_size_variable) {
246 size_per_iteration = nir_static_workgroup_size(shader) * chunk_size;
247 iterations = DIV_ROUND_UP(shared_size, size_per_iteration);
248 }
249
250 if (iterations <= shader->options->max_unroll_iterations) {
251 /* Doing a manual inline here because (a) we may not optimize after and
252 * (b) the loop unroll pass doesn't deal well with the potential partial
253 * last iteration.*/
254 for (unsigned i = 0; i < iterations; ++i) {
255 const unsigned base = size_per_iteration * i;
256 bool use_check = i >= shared_size / size_per_iteration;
257 if (use_check)
258 nir_push_if(&b, nir_ult_imm(&b, first_offset, shared_size - base));
259
260 nir_store_shared(&b, nir_imm_zero(&b, chunk_comps, 32),
261 nir_iadd_imm(&b, first_offset, base),
262 .align_mul = chunk_size,
263 .write_mask = ((1 << chunk_comps) - 1));
264 if (use_check)
265 nir_pop_if(&b, NULL);
266 }
267 } else {
268 nir_phi_instr *offset_phi = nir_phi_instr_create(shader);
269 nir_def_init(&offset_phi->instr, &offset_phi->def, 1, 32);
270 nir_phi_instr_add_src(offset_phi, nir_cursor_current_block(b.cursor), first_offset);
271
272 nir_def *size_per_iteration_def = shader->info.workgroup_size_variable ?
273 nir_imul_imm(&b, nir_load_workgroup_size(&b), chunk_size) :
274 nir_imm_int(&b, size_per_iteration);
275 nir_def *value = nir_imm_zero(&b, chunk_comps, 32);
276
277 nir_loop *loop = nir_push_loop(&b);
278 nir_block *loop_block = nir_cursor_current_block(b.cursor);
279 {
280 nir_def *offset = &offset_phi->def;
281
282 nir_push_if(&b, nir_uge_imm(&b, offset, shared_size));
283 {
284 nir_jump(&b, nir_jump_break);
285 }
286 nir_pop_if(&b, NULL);
287 nir_store_shared(&b, value, offset,
288 .align_mul = chunk_size,
289 .write_mask = ((1 << chunk_comps) - 1));
290
291 nir_def *new_offset = nir_iadd(&b, offset, size_per_iteration_def);
292 nir_phi_instr_add_src(offset_phi, nir_cursor_current_block(b.cursor), new_offset);
293 }
294 nir_pop_loop(&b, loop);
295
296 b.cursor = nir_before_block(loop_block);
297 nir_builder_instr_insert(&b, &offset_phi->instr);
298 }
299
300 nir_metadata_preserve(nir_shader_get_entrypoint(shader), nir_metadata_none);
301
302 return true;
303 }
304