1 /*
2 * Copyright © 2020 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /* These passes enable converting uniforms to literals when it's profitable,
25 * effectively inlining uniform values in the IR. The main benefit is register
26 * usage decrease leading to better SMT (hyperthreading). It's accomplished
27 * by targetting uniforms that determine whether a conditional branch is
28 * taken or a loop can be unrolled.
29 *
30 * Only uniforms used in these places are analyzed:
31 * 1. if condition
32 * 2. loop terminator
33 * 3. init and update value of induction variable used in loop terminator
34 *
35 * nir_find_inlinable_uniforms finds uniforms that can be inlined and stores
36 * that information in shader_info.
37 *
38 * nir_inline_uniforms inlines uniform values.
39 *
40 * (uniforms must be lowered to load_ubo before calling this)
41 */
42
43 #include "util/hash_table.h"
44 #include "nir_builder.h"
45 #include "nir_loop_analyze.h"
46
47 /* Maximum value in shader_info::inlinable_uniform_dw_offsets[] */
48 #define MAX_OFFSET (UINT16_MAX * 4)
49
50 #define MAX_NUM_BO 32
51
52 /**
53 * Collect uniforms used in a source
54 *
55 * Recursively collects all of the UBO loads with constant UBO index and
56 * constant offset (per the restictions of \c max_num_bo and \c
57 * max_offset). If any values are discovered that are non-constant, uniforms
58 * that don't meet the restrictions, or if more than \c
59 * MAX_INLINEABLE_UNIFORMS are discoverd for any one UBO, false is returned.
60 *
61 * When false is returned, the state of \c uni_offsets and \c num_offsets is
62 * undefined.
63 *
64 * \param max_num_bo Maximum number of uniform buffer objects
65 * \param max_offset Maximum offset within a UBO
66 * \param uni_offset Array of \c max_num_bo * \c MAX_INLINABLE_UNIFORMS values
67 * used to store offsets of discovered uniform loads.
68 * \param num_offsets Array of \c max_num_bo values used to store the number
69 * of uniforms collected from each UBO.
70 */
71 bool
nir_collect_src_uniforms(const nir_src * src,int component,uint32_t * uni_offsets,uint8_t * num_offsets,unsigned max_num_bo,unsigned max_offset)72 nir_collect_src_uniforms(const nir_src *src, int component,
73 uint32_t *uni_offsets, uint8_t *num_offsets,
74 unsigned max_num_bo, unsigned max_offset)
75 {
76 assert(max_num_bo > 0 && max_num_bo <= MAX_NUM_BO);
77 assert(component < src->ssa->num_components);
78
79 nir_instr *instr = src->ssa->parent_instr;
80
81 switch (instr->type) {
82 case nir_instr_type_alu: {
83 nir_alu_instr *alu = nir_instr_as_alu(instr);
84
85 /* Vector ops only need to check the corresponding component. */
86 if (alu->op == nir_op_mov) {
87 return nir_collect_src_uniforms(&alu->src[0].src,
88 alu->src[0].swizzle[component],
89 uni_offsets, num_offsets,
90 max_num_bo, max_offset);
91 } else if (nir_op_is_vec(alu->op)) {
92 nir_alu_src *alu_src = alu->src + component;
93 return nir_collect_src_uniforms(&alu_src->src, alu_src->swizzle[0],
94 uni_offsets, num_offsets,
95 max_num_bo, max_offset);
96 }
97
98 /* Return true if all sources return true. */
99 for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
100 nir_alu_src *alu_src = alu->src + i;
101 int input_sizes = nir_op_infos[alu->op].input_sizes[i];
102
103 if (input_sizes == 0) {
104 /* For ops which has no input size, each component of dest is
105 * only determined by the same component of srcs.
106 */
107 if (!nir_collect_src_uniforms(&alu_src->src, alu_src->swizzle[component],
108 uni_offsets, num_offsets,
109 max_num_bo, max_offset))
110 return false;
111 } else {
112 /* For ops which has input size, all components of dest are
113 * determined by all components of srcs (except vec ops).
114 */
115 for (unsigned j = 0; j < input_sizes; j++) {
116 if (!nir_collect_src_uniforms(&alu_src->src, alu_src->swizzle[j],
117 uni_offsets, num_offsets,
118 max_num_bo, max_offset))
119 return false;
120 }
121 }
122 }
123 return true;
124 }
125
126 case nir_instr_type_intrinsic: {
127 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
128 /* Return true if the intrinsic loads from UBO 0 with a constant
129 * offset.
130 */
131 if (intr->intrinsic == nir_intrinsic_load_ubo &&
132 nir_src_is_const(intr->src[0]) && nir_src_num_components(intr->src[0]) == 1 &&
133 nir_src_as_uint(intr->src[0]) < max_num_bo &&
134 nir_src_is_const(intr->src[1]) &&
135 nir_src_as_uint(intr->src[1]) <= max_offset &&
136 /* TODO: Can't handle other bit sizes for now. */
137 intr->def.bit_size == 32) {
138 /* num_offsets can be NULL if-and-only-if uni_offsets is NULL. */
139 assert((num_offsets == NULL) == (uni_offsets == NULL));
140
141 /* If we're just checking that it's a uniform load, don't check (or
142 * add to) the table.
143 */
144 if (uni_offsets == NULL)
145 return true;
146
147 uint32_t offset = nir_src_as_uint(intr->src[1]) + component * 4;
148 assert(offset < MAX_OFFSET);
149
150 const unsigned ubo = nir_src_as_uint(intr->src[0]);
151
152 /* Already recorded by other one */
153 for (int i = 0; i < num_offsets[ubo]; i++) {
154 if (uni_offsets[ubo * MAX_INLINABLE_UNIFORMS + i] == offset)
155 return true;
156 }
157
158 /* Exceed uniform number limit */
159 if (num_offsets[ubo] == MAX_INLINABLE_UNIFORMS)
160 return false;
161
162 /* Record the uniform offset. */
163 uni_offsets[ubo * MAX_INLINABLE_UNIFORMS + num_offsets[ubo]++] = offset;
164 return true;
165 }
166 return false;
167 }
168
169 case nir_instr_type_load_const:
170 /* Always return true for constants. */
171 return true;
172
173 default:
174 return false;
175 }
176 }
177
178 static bool
is_induction_variable(const nir_src * src,int component,nir_loop_info * info,uint32_t * uni_offsets,uint8_t * num_offsets,unsigned max_num_bo,unsigned max_offset)179 is_induction_variable(const nir_src *src, int component, nir_loop_info *info,
180 uint32_t *uni_offsets, uint8_t *num_offsets,
181 unsigned max_num_bo, unsigned max_offset)
182 {
183 assert(component < src->ssa->num_components);
184
185 /* Return true for induction variable (ie. i in for loop) */
186 struct hash_entry *entry = _mesa_hash_table_search(info->induction_vars, src->ssa);
187 if (!entry)
188 return false;
189
190 nir_loop_induction_variable *var = entry->data;
191 /* Induction variable should have constant initial value (ie. i = 0),
192 * constant update value (ie. i++) and constant end condition
193 * (ie. i < 10), so that we know the exact loop count for unrolling
194 * the loop.
195 *
196 * Add uniforms need to be inlined for this induction variable's
197 * initial and update value to be constant, for example:
198 *
199 * for (i = init; i < count; i += step)
200 *
201 * We collect uniform "init" and "step" here.
202 */
203 if (var->init_src) {
204 if (!nir_collect_src_uniforms(var->init_src, component,
205 uni_offsets, num_offsets,
206 max_num_bo, max_offset))
207 return false;
208 }
209
210 if (var->update_src) {
211 nir_alu_src *alu_src = var->update_src;
212 if (!nir_collect_src_uniforms(&alu_src->src,
213 alu_src->swizzle[component],
214 uni_offsets, num_offsets,
215 max_num_bo, max_offset))
216 return false;
217 }
218
219 return true;
220 }
221
222 void
nir_add_inlinable_uniforms(const nir_src * cond,nir_loop_info * info,uint32_t * uni_offsets,uint8_t * num_offsets,unsigned max_num_bo,unsigned max_offset)223 nir_add_inlinable_uniforms(const nir_src *cond, nir_loop_info *info,
224 uint32_t *uni_offsets, uint8_t *num_offsets,
225 unsigned max_num_bo, unsigned max_offset)
226 {
227 uint8_t new_num[MAX_NUM_BO];
228 memcpy(new_num, num_offsets, sizeof(new_num));
229
230 /* If condition SSA is always scalar, so component is 0. */
231 unsigned component = 0;
232
233 /* Allow induction variable which means a loop terminator. */
234 if (info) {
235 nir_scalar cond_scalar = { cond->ssa, 0 };
236
237 /* Limit terminator condition to loop unroll support case which is a simple
238 * comparison (ie. "i < count" is supported, but "i + 1 < count" is not).
239 */
240 if (nir_is_supported_terminator_condition(cond_scalar)) {
241 if (nir_scalar_alu_op(cond_scalar) == nir_op_inot)
242 cond_scalar = nir_scalar_chase_alu_src(cond_scalar, 0);
243
244 nir_alu_instr *alu = nir_instr_as_alu(cond_scalar.def->parent_instr);
245
246 /* One side of comparison is induction variable, the other side is
247 * only uniform.
248 */
249 for (int i = 0; i < 2; i++) {
250 if (is_induction_variable(&alu->src[i].src, alu->src[i].swizzle[0],
251 info, uni_offsets, new_num,
252 max_num_bo, max_offset)) {
253 cond = &alu->src[1 - i].src;
254 component = alu->src[1 - i].swizzle[0];
255 break;
256 }
257 }
258 }
259 }
260
261 /* Only update uniform number when all uniforms in the expression
262 * can be inlined. Partially inline uniforms can't lower if/loop.
263 *
264 * For example, uniform can be inlined for a shader is limited to 4,
265 * and we have already added 3 uniforms, then want to deal with
266 *
267 * if (uniform0 + uniform1 == 10)
268 *
269 * only uniform0 can be inlined due to we exceed the 4 limit. But
270 * unless both uniform0 and uniform1 are inlined, can we eliminate
271 * the if statement.
272 *
273 * This is even possible when we deal with loop if the induction
274 * variable init and update also contains uniform like
275 *
276 * for (i = uniform0; i < uniform1; i+= uniform2)
277 *
278 * unless uniform0, uniform1 and uniform2 can be inlined at once,
279 * can the loop be unrolled.
280 */
281 if (nir_collect_src_uniforms(cond, component, uni_offsets, new_num,
282 max_num_bo, max_offset))
283 memcpy(num_offsets, new_num, sizeof(new_num[0]) * max_num_bo);
284 }
285
286 static void
process_node(nir_cf_node * node,nir_loop_info * info,uint32_t * uni_offsets,uint8_t * num_offsets)287 process_node(nir_cf_node *node, nir_loop_info *info,
288 uint32_t *uni_offsets, uint8_t *num_offsets)
289 {
290 switch (node->type) {
291 case nir_cf_node_if: {
292 nir_if *if_node = nir_cf_node_as_if(node);
293 const nir_src *cond = &if_node->condition;
294 nir_add_inlinable_uniforms(cond, info, uni_offsets, num_offsets,
295 1, MAX_OFFSET);
296
297 /* Do not pass loop info down so only alow induction variable
298 * in loop terminator "if":
299 *
300 * for (i = 0; true; i++)
301 * if (i == count)
302 * if (i == num)
303 * <no break>
304 * break
305 *
306 * so "num" won't be inlined due to the "if" is not a
307 * terminator.
308 */
309 info = NULL;
310
311 foreach_list_typed(nir_cf_node, nested_node, node, &if_node->then_list)
312 process_node(nested_node, info, uni_offsets, num_offsets);
313 foreach_list_typed(nir_cf_node, nested_node, node, &if_node->else_list)
314 process_node(nested_node, info, uni_offsets, num_offsets);
315 break;
316 }
317
318 case nir_cf_node_loop: {
319 nir_loop *loop = nir_cf_node_as_loop(node);
320 assert(!nir_loop_has_continue_construct(loop));
321
322 /* Replace loop info, no nested loop info currently:
323 *
324 * for (i = 0; i < count0; i++)
325 * for (j = 0; j < count1; j++)
326 * if (i == num)
327 *
328 * so "num" won't be inlined due to "i" is an induction
329 * variable of upper loop.
330 */
331 info = loop->info;
332
333 foreach_list_typed(nir_cf_node, nested_node, node, &loop->body) {
334 bool is_terminator = false;
335 list_for_each_entry(nir_loop_terminator, terminator,
336 &info->loop_terminator_list,
337 loop_terminator_link) {
338 if (nested_node == &terminator->nif->cf_node) {
339 is_terminator = true;
340 break;
341 }
342 }
343
344 /* Allow induction variables for terminator "if" only:
345 *
346 * for (i = 0; i < count; i++)
347 * if (i == num)
348 * <no break>
349 *
350 * so "num" won't be inlined due to the "if" is not a
351 * terminator.
352 */
353 nir_loop_info *use_info = is_terminator ? info : NULL;
354 process_node(nested_node, use_info, uni_offsets, num_offsets);
355 }
356 break;
357 }
358
359 default:
360 break;
361 }
362 }
363
364 void
nir_find_inlinable_uniforms(nir_shader * shader)365 nir_find_inlinable_uniforms(nir_shader *shader)
366 {
367 uint32_t uni_offsets[MAX_INLINABLE_UNIFORMS];
368 uint8_t num_offsets[MAX_NUM_BO] = {0};
369
370 nir_foreach_function_impl(impl, shader) {
371 nir_metadata_require(impl, nir_metadata_loop_analysis,
372 nir_var_all, false);
373
374 foreach_list_typed(nir_cf_node, node, node, &impl->body)
375 process_node(node, NULL, uni_offsets, num_offsets);
376 }
377
378 for (int i = 0; i < num_offsets[0]; i++)
379 shader->info.inlinable_uniform_dw_offsets[i] = uni_offsets[i] / 4;
380 shader->info.num_inlinable_uniforms = num_offsets[0];
381 }
382
383 void
nir_inline_uniforms(nir_shader * shader,unsigned num_uniforms,const uint32_t * uniform_values,const uint16_t * uniform_dw_offsets)384 nir_inline_uniforms(nir_shader *shader, unsigned num_uniforms,
385 const uint32_t *uniform_values,
386 const uint16_t *uniform_dw_offsets)
387 {
388 if (!num_uniforms)
389 return;
390
391 nir_foreach_function_impl(impl, shader) {
392 nir_builder b = nir_builder_create(impl);
393 nir_foreach_block(block, impl) {
394 nir_foreach_instr_safe(instr, block) {
395 if (instr->type != nir_instr_type_intrinsic)
396 continue;
397
398 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
399
400 /* Only replace UBO 0 with constant offsets. */
401 if (intr->intrinsic == nir_intrinsic_load_ubo &&
402 nir_src_is_const(intr->src[0]) &&
403 nir_src_as_uint(intr->src[0]) == 0 &&
404 nir_src_is_const(intr->src[1]) &&
405 /* TODO: Can't handle other bit sizes for now. */
406 intr->def.bit_size == 32) {
407 int num_components = intr->def.num_components;
408 uint32_t offset = nir_src_as_uint(intr->src[1]) / 4;
409
410 if (num_components == 1) {
411 /* Just replace the uniform load to constant load. */
412 for (unsigned i = 0; i < num_uniforms; i++) {
413 if (offset == uniform_dw_offsets[i]) {
414 b.cursor = nir_before_instr(&intr->instr);
415 nir_def *def = nir_imm_int(&b, uniform_values[i]);
416 nir_def_replace(&intr->def, def);
417 break;
418 }
419 }
420 } else {
421 /* Lower vector uniform load to scalar and replace each
422 * found component load with constant load.
423 */
424 uint32_t max_offset = offset + num_components;
425 nir_def *components[NIR_MAX_VEC_COMPONENTS] = { 0 };
426 bool found = false;
427
428 b.cursor = nir_before_instr(&intr->instr);
429
430 /* Find component to replace. */
431 for (unsigned i = 0; i < num_uniforms; i++) {
432 uint32_t uni_offset = uniform_dw_offsets[i];
433 if (uni_offset >= offset && uni_offset < max_offset) {
434 int index = uni_offset - offset;
435 components[index] = nir_imm_int(&b, uniform_values[i]);
436 found = true;
437 }
438 }
439
440 if (!found)
441 continue;
442
443 /* Create per-component uniform load. */
444 for (unsigned i = 0; i < num_components; i++) {
445 if (!components[i]) {
446 uint32_t scalar_offset = (offset + i) * 4;
447 components[i] = nir_load_ubo(&b, 1, intr->def.bit_size,
448 intr->src[0].ssa,
449 nir_imm_int(&b, scalar_offset));
450 nir_intrinsic_instr *load =
451 nir_instr_as_intrinsic(components[i]->parent_instr);
452 nir_intrinsic_set_align(load, NIR_ALIGN_MUL_MAX, scalar_offset);
453 nir_intrinsic_set_range_base(load, scalar_offset);
454 nir_intrinsic_set_range(load, 4);
455 }
456 }
457
458 /* Replace the original uniform load. */
459 nir_def_replace(&intr->def,
460 nir_vec(&b, components, num_components));
461 }
462 }
463 }
464
465 nir_metadata_preserve(impl, nir_metadata_control_flow);
466 }
467 }
468 }
469