1 /*
2 * Copyright © 2020 Advanced Micro Devices, Inc.
3 * Copyright © 2022 Valve Corporation
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 /* enhanced version of nir_inline_uniforms that can inline from any uniform buffer
26 * see nir_inline_uniforms.c for more details
27 */
28
29 #include "nir_builder.h"
30 #include "nir_loop_analyze.h"
31 #include "lvp_private.h"
32
33 static bool
src_only_uses_uniforms(const nir_src * src,int component,uint32_t * uni_offsets,uint8_t * num_offsets)34 src_only_uses_uniforms(const nir_src *src, int component,
35 uint32_t *uni_offsets, uint8_t *num_offsets)
36 {
37 if (!src->is_ssa)
38 return false;
39
40 assert(component < src->ssa->num_components);
41
42 nir_instr *instr = src->ssa->parent_instr;
43
44 switch (instr->type) {
45 case nir_instr_type_alu: {
46 nir_alu_instr *alu = nir_instr_as_alu(instr);
47
48 /* Vector ops only need to check the corresponding component. */
49 if (nir_op_is_vec(alu->op)) {
50 nir_alu_src *alu_src = alu->src + component;
51 return src_only_uses_uniforms(&alu_src->src, alu_src->swizzle[0],
52 uni_offsets, num_offsets);
53 }
54
55 /* Return true if all sources return true. */
56 for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
57 nir_alu_src *alu_src = alu->src + i;
58 int input_sizes = nir_op_infos[alu->op].input_sizes[i];
59
60 if (input_sizes == 0) {
61 /* For ops which has no input size, each component of dest is
62 * only determined by the same component of srcs.
63 */
64 if (!src_only_uses_uniforms(&alu_src->src, alu_src->swizzle[component],
65 uni_offsets, num_offsets))
66 return false;
67 } else {
68 /* For ops which has input size, all components of dest are
69 * determined by all components of srcs (except vec ops).
70 */
71 for (unsigned j = 0; j < input_sizes; j++) {
72 if (!src_only_uses_uniforms(&alu_src->src, alu_src->swizzle[j],
73 uni_offsets, num_offsets))
74 return false;
75 }
76 }
77 }
78 return true;
79 }
80
81 case nir_instr_type_intrinsic: {
82 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
83 /* Return true if the intrinsic loads with a constant offset. */
84 if (intr->intrinsic == nir_intrinsic_load_ubo &&
85 nir_src_is_const(intr->src[0]) &&
86 nir_src_is_const(intr->src[1]) &&
87 /* TODO: Can't handle other bit sizes for now. */
88 intr->dest.ssa.bit_size == 32) {
89 uint32_t offset = nir_src_as_uint(intr->src[1]) + component * 4;
90
91 /* Already recorded by other one */
92 uint32_t ubo = nir_src_as_uint(intr->src[0]);
93 for (int i = 0; uni_offsets && i < num_offsets[ubo]; i++) {
94 if (uni_offsets[ubo * PIPE_MAX_CONSTANT_BUFFERS + i] == offset)
95 return true;
96 }
97
98 /* Exceed uniform number limit */
99 if (num_offsets && num_offsets[ubo] == MAX_INLINABLE_UNIFORMS)
100 return false;
101
102 /* Record the uniform offset. */
103 if (uni_offsets)
104 uni_offsets[ubo * MAX_INLINABLE_UNIFORMS + num_offsets[ubo]++] = offset;
105 return true;
106 }
107 return false;
108 }
109
110 case nir_instr_type_load_const:
111 /* Always return true for constants. */
112 return true;
113
114 default:
115 return false;
116 }
117 }
118
119 static bool
is_induction_variable(const nir_src * src,int component,nir_loop_info * info,uint32_t * uni_offsets,uint8_t * num_offsets)120 is_induction_variable(const nir_src *src, int component, nir_loop_info *info,
121 uint32_t *uni_offsets, uint8_t *num_offsets)
122 {
123 if (!src->is_ssa)
124 return false;
125
126 assert(component < src->ssa->num_components);
127
128 /* Return true for induction variable (ie. i in for loop) */
129 for (int i = 0; i < info->num_induction_vars; i++) {
130 nir_loop_induction_variable *var = info->induction_vars + i;
131 if (var->def == src->ssa) {
132 /* Induction variable should have constant initial value (ie. i = 0),
133 * constant update value (ie. i++) and constant end condition
134 * (ie. i < 10), so that we know the exact loop count for unrolling
135 * the loop.
136 *
137 * Add uniforms need to be inlined for this induction variable's
138 * initial and update value to be constant, for example:
139 *
140 * for (i = init; i < count; i += step)
141 *
142 * We collect uniform "init" and "step" here.
143 */
144 if (var->init_src) {
145 if (!src_only_uses_uniforms(var->init_src, component,
146 uni_offsets, num_offsets))
147 return false;
148 }
149
150 if (var->update_src) {
151 nir_alu_src *alu_src = var->update_src;
152 if (!src_only_uses_uniforms(&alu_src->src,
153 alu_src->swizzle[component],
154 uni_offsets, num_offsets))
155 return false;
156 }
157
158 return true;
159 }
160 }
161
162 return false;
163 }
164
165 static void
add_inlinable_uniforms(const nir_src * cond,nir_loop_info * info,uint32_t * uni_offsets,uint8_t * num_offsets)166 add_inlinable_uniforms(const nir_src *cond, nir_loop_info *info,
167 uint32_t *uni_offsets, uint8_t *num_offsets)
168 {
169 uint8_t new_num[PIPE_MAX_CONSTANT_BUFFERS];
170 memcpy(new_num, num_offsets, sizeof(new_num));
171 /* If condition SSA is always scalar, so component is 0. */
172 unsigned component = 0;
173
174 /* Allow induction variable which means a loop terminator. */
175 if (info) {
176 nir_ssa_scalar cond_scalar = {cond->ssa, 0};
177
178 /* Limit terminator condition to loop unroll support case which is a simple
179 * comparison (ie. "i < count" is supported, but "i + 1 < count" is not).
180 */
181 if (nir_is_supported_terminator_condition(cond_scalar)) {
182 nir_alu_instr *alu = nir_instr_as_alu(cond->ssa->parent_instr);
183
184 /* One side of comparison is induction variable, the other side is
185 * only uniform.
186 */
187 for (int i = 0; i < 2; i++) {
188 if (is_induction_variable(&alu->src[i].src, alu->src[i].swizzle[0],
189 info, uni_offsets, new_num)) {
190 cond = &alu->src[1 - i].src;
191 component = alu->src[1 - i].swizzle[0];
192 break;
193 }
194 }
195 }
196 }
197
198 /* Only update uniform number when all uniforms in the expression
199 * can be inlined. Partially inlines uniforms can't lower if/loop.
200 *
201 * For example, uniform can be inlined for a shader is limited to 4,
202 * and we have already added 3 uniforms, then want to deal with
203 *
204 * if (uniform0 + uniform1 == 10)
205 *
206 * only uniform0 can be inlined due to we exceed the 4 limit. But
207 * unless both uniform0 and uniform1 are inlined, can we eliminate
208 * the if statement.
209 *
210 * This is even possible when we deal with loop if the induction
211 * variable init and update also contains uniform like
212 *
213 * for (i = uniform0; i < uniform1; i+= uniform2)
214 *
215 * unless uniform0, uniform1 and uniform2 can be inlined at once,
216 * can the loop be unrolled.
217 */
218 if (src_only_uses_uniforms(cond, component, uni_offsets, new_num))
219 memcpy(num_offsets, new_num, sizeof(new_num));
220 }
221
222 static bool
is_src_uniform_load(nir_src src)223 is_src_uniform_load(nir_src src)
224 {
225 if (nir_src_bit_size(src) != 32 || nir_src_num_components(src) != 1 || nir_src_is_const(src))
226 return false;
227 return src_only_uses_uniforms(&src, 0, NULL, NULL);
228 }
229
230 static void
process_node(nir_cf_node * node,nir_loop_info * info,uint32_t * uni_offsets,uint8_t * num_offsets,struct set * stores)231 process_node(nir_cf_node *node, nir_loop_info *info,
232 uint32_t *uni_offsets, uint8_t *num_offsets,
233 struct set *stores)
234 {
235 switch (node->type) {
236 case nir_cf_node_if: {
237 nir_if *if_node = nir_cf_node_as_if(node);
238 const nir_src *cond = &if_node->condition;
239 add_inlinable_uniforms(cond, info, uni_offsets, num_offsets);
240
241 /* Do not pass loop info down so only alow induction variable
242 * in loop terminator "if":
243 *
244 * for (i = 0; true; i++)
245 * if (i == count)
246 * if (i == num)
247 * <no break>
248 * break
249 *
250 * so "num" won't be inlined due to the "if" is not a
251 * terminator.
252 */
253 info = NULL;
254
255 foreach_list_typed(nir_cf_node, nested_node, node, &if_node->then_list)
256 process_node(nested_node, info, uni_offsets, num_offsets, stores);
257 foreach_list_typed(nir_cf_node, nested_node, node, &if_node->else_list)
258 process_node(nested_node, info, uni_offsets, num_offsets, stores);
259 break;
260 }
261
262 case nir_cf_node_loop: {
263 nir_loop *loop = nir_cf_node_as_loop(node);
264
265 /* Replace loop info, no nested loop info currently:
266 *
267 * for (i = 0; i < count0; i++)
268 * for (j = 0; j < count1; j++)
269 * if (i == num)
270 *
271 * so "num" won't be inlined due to "i" is an induction
272 * variable of upper loop.
273 */
274 info = loop->info;
275
276 foreach_list_typed(nir_cf_node, nested_node, node, &loop->body) {
277 bool is_terminator = false;
278 list_for_each_entry(nir_loop_terminator, terminator,
279 &info->loop_terminator_list,
280 loop_terminator_link) {
281 if (nested_node == &terminator->nif->cf_node) {
282 is_terminator = true;
283 break;
284 }
285 }
286
287 /* Allow induction variables for terminator "if" only:
288 *
289 * for (i = 0; i < count; i++)
290 * if (i == num)
291 * <no break>
292 *
293 * so "num" won't be inlined due to the "if" is not a
294 * terminator.
295 */
296 nir_loop_info *use_info = is_terminator ? info : NULL;
297 process_node(nested_node, use_info, uni_offsets, num_offsets, stores);
298 }
299 break;
300 }
301
302 case nir_cf_node_block: {
303 nir_block *block = nir_cf_node_as_block(node);
304 nir_foreach_instr(instr, block) {
305 if (instr->type == nir_instr_type_intrinsic) {
306 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
307 if (intr->intrinsic == nir_intrinsic_store_deref && is_src_uniform_load(intr->src[1]))
308 _mesa_set_add(stores, &intr->src[1]);
309 }
310 }
311 break;
312 }
313 default:
314 break;
315 }
316 }
317
318 bool
lvp_find_inlinable_uniforms(struct lvp_pipeline * pipeline,nir_shader * shader)319 lvp_find_inlinable_uniforms(struct lvp_pipeline *pipeline, nir_shader *shader)
320 {
321 bool ret = false;
322 struct set *stores = _mesa_set_create(shader, _mesa_hash_pointer, _mesa_key_pointer_equal);
323 nir_foreach_function(function, shader) {
324 if (function->impl) {
325 nir_metadata_require(function->impl, nir_metadata_loop_analysis, nir_var_all);
326
327 foreach_list_typed(nir_cf_node, node, node, &function->impl->body)
328 process_node(node, NULL, (uint32_t*)pipeline->inlines[shader->info.stage].uniform_offsets, pipeline->inlines[shader->info.stage].count, stores);
329 }
330 }
331 const unsigned threshold = 5;
332 set_foreach(stores, entry) {
333 const nir_src *src = entry->key;
334 unsigned counter = 0;
335 list_for_each_entry(nir_src, rsrc, &src->ssa->uses, use_link) {
336 counter++;
337 if (counter >= threshold)
338 break;
339 }
340 if (counter >= threshold) {
341 uint8_t new_num[PIPE_MAX_CONSTANT_BUFFERS];
342 memcpy(new_num, pipeline->inlines[shader->info.stage].count, sizeof(new_num));
343 if (src_only_uses_uniforms(src, 0, (uint32_t*)pipeline->inlines[shader->info.stage].uniform_offsets, new_num)) {
344 ret = true;
345 memcpy(pipeline->inlines[shader->info.stage].count, new_num, sizeof(new_num));
346 }
347 }
348 }
349 for (unsigned i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++) {
350 if (pipeline->inlines[shader->info.stage].count[i]) {
351 pipeline->inlines[shader->info.stage].can_inline |= BITFIELD_BIT(i);
352 break;
353 }
354 }
355 return ret;
356 }
357
358 void
lvp_inline_uniforms(nir_shader * shader,const struct lvp_pipeline * pipeline,const uint32_t * uniform_values,uint32_t ubo)359 lvp_inline_uniforms(nir_shader *shader, const struct lvp_pipeline *pipeline, const uint32_t *uniform_values, uint32_t ubo)
360 {
361 if (!pipeline->inlines[shader->info.stage].can_inline)
362 return;
363
364 nir_foreach_function(function, shader) {
365 if (function->impl) {
366 nir_builder b;
367 nir_builder_init(&b, function->impl);
368 nir_foreach_block(block, function->impl) {
369 nir_foreach_instr_safe(instr, block) {
370 if (instr->type != nir_instr_type_intrinsic)
371 continue;
372
373 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
374
375 /* Only replace loads with constant offsets. */
376 if (intr->intrinsic == nir_intrinsic_load_ubo &&
377 nir_src_is_const(intr->src[0]) &&
378 nir_src_as_uint(intr->src[0]) == ubo &&
379 nir_src_is_const(intr->src[1]) &&
380 /* TODO: Can't handle other bit sizes for now. */
381 intr->dest.ssa.bit_size == 32) {
382 int num_components = intr->dest.ssa.num_components;
383 uint32_t offset = nir_src_as_uint(intr->src[1]);
384 const unsigned num_uniforms = pipeline->inlines[shader->info.stage].count[ubo];
385 const unsigned *uniform_dw_offsets = pipeline->inlines[shader->info.stage].uniform_offsets[ubo];
386
387 if (num_components == 1) {
388 /* Just replace the uniform load to constant load. */
389 for (unsigned i = 0; i < num_uniforms; i++) {
390 if (offset == uniform_dw_offsets[i]) {
391 b.cursor = nir_before_instr(&intr->instr);
392 nir_ssa_def *def = nir_imm_int(&b, uniform_values[i]);
393 nir_ssa_def_rewrite_uses(&intr->dest.ssa, def);
394 nir_instr_remove(&intr->instr);
395 break;
396 }
397 }
398 } else {
399 /* Lower vector uniform load to scalar and replace each
400 * found component load with constant load.
401 */
402 uint32_t max_offset = offset + num_components;
403 nir_ssa_def *components[NIR_MAX_VEC_COMPONENTS] = {0};
404 bool found = false;
405
406 b.cursor = nir_before_instr(&intr->instr);
407
408 /* Find component to replace. */
409 for (unsigned i = 0; i < num_uniforms; i++) {
410 uint32_t uni_offset = uniform_dw_offsets[i];
411 if (uni_offset >= offset && uni_offset < max_offset) {
412 int index = uni_offset - offset;
413 components[index] = nir_imm_int(&b, uniform_values[i]);
414 found = true;
415 }
416 }
417
418 if (!found)
419 continue;
420
421 /* Create per-component uniform load. */
422 for (unsigned i = 0; i < num_components; i++) {
423 if (!components[i]) {
424 uint32_t scalar_offset = (offset + i) * 4;
425 components[i] = nir_load_ubo(&b, 1, intr->dest.ssa.bit_size,
426 intr->src[0].ssa,
427 nir_imm_int(&b, scalar_offset));
428 nir_intrinsic_instr *load =
429 nir_instr_as_intrinsic(components[i]->parent_instr);
430 nir_intrinsic_set_align(load, NIR_ALIGN_MUL_MAX, scalar_offset);
431 nir_intrinsic_set_range_base(load, scalar_offset);
432 nir_intrinsic_set_range(load, 4);
433 }
434 }
435
436 /* Replace the original uniform load. */
437 nir_ssa_def_rewrite_uses(&intr->dest.ssa,
438 nir_vec(&b, components, num_components));
439 nir_instr_remove(&intr->instr);
440 }
441 }
442 }
443 }
444
445 nir_metadata_preserve(function->impl, nir_metadata_block_index |
446 nir_metadata_dominance);
447 }
448 }
449 }
450