1 /*
2 * Copyright © 2019 Google, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "ir3_nir.h"
25 #include "ir3_compiler.h"
26 #include "compiler/nir/nir.h"
27 #include "compiler/nir/nir_builder.h"
28 #include "util/u_math.h"
29
30 static inline bool
get_ubo_load_range(nir_shader * nir,nir_intrinsic_instr * instr,uint32_t alignment,struct ir3_ubo_range * r)31 get_ubo_load_range(nir_shader *nir, nir_intrinsic_instr *instr, uint32_t alignment, struct ir3_ubo_range *r)
32 {
33 uint32_t offset = nir_intrinsic_range_base(instr);
34 uint32_t size = nir_intrinsic_range(instr);
35
36 /* If the offset is constant, the range is trivial (and NIR may not have
37 * figured it out).
38 */
39 if (nir_src_is_const(instr->src[1])) {
40 offset = nir_src_as_uint(instr->src[1]);
41 size = nir_intrinsic_dest_components(instr) * 4;
42 }
43
44 /* If we haven't figured out the range accessed in the UBO, bail. */
45 if (size == ~0)
46 return false;
47
48 r->start = ROUND_DOWN_TO(offset, alignment * 16);
49 r->end = ALIGN(offset + size, alignment * 16);
50
51 return true;
52 }
53
54 static bool
get_ubo_info(nir_intrinsic_instr * instr,struct ir3_ubo_info * ubo)55 get_ubo_info(nir_intrinsic_instr *instr, struct ir3_ubo_info *ubo)
56 {
57 if (nir_src_is_const(instr->src[0])) {
58 ubo->block = nir_src_as_uint(instr->src[0]);
59 ubo->bindless_base = 0;
60 ubo->bindless = false;
61 return true;
62 } else {
63 nir_intrinsic_instr *rsrc = ir3_bindless_resource(instr->src[0]);
64 if (rsrc && nir_src_is_const(rsrc->src[0])) {
65 ubo->block = nir_src_as_uint(rsrc->src[0]);
66 ubo->bindless_base = nir_intrinsic_desc_set(rsrc);
67 ubo->bindless = true;
68 return true;
69 }
70 }
71 return false;
72 }
73
74 /**
75 * Finds the given instruction's UBO load in the UBO upload plan, if any.
76 */
77 static const struct ir3_ubo_range *
get_existing_range(nir_intrinsic_instr * instr,const struct ir3_ubo_analysis_state * state,struct ir3_ubo_range * r)78 get_existing_range(nir_intrinsic_instr *instr,
79 const struct ir3_ubo_analysis_state *state,
80 struct ir3_ubo_range *r)
81 {
82 struct ir3_ubo_info ubo = {};
83
84 if (!get_ubo_info(instr, &ubo))
85 return NULL;
86
87 for (int i = 0; i < state->num_enabled; i++) {
88 const struct ir3_ubo_range *range = &state->range[i];
89 if (!memcmp(&range->ubo, &ubo, sizeof(ubo)) &&
90 r->start >= range->start &&
91 r->end <= range->end) {
92 return range;
93 }
94 }
95
96 return NULL;
97 }
98
99 /**
100 * Merges together neighboring/overlapping ranges in the range plan with a
101 * newly updated range.
102 */
103 static void
merge_neighbors(struct ir3_ubo_analysis_state * state,int index)104 merge_neighbors(struct ir3_ubo_analysis_state *state, int index)
105 {
106 struct ir3_ubo_range *a = &state->range[index];
107
108 /* index is always the first slot that would have neighbored/overlapped with
109 * the new range.
110 */
111 for (int i = index + 1; i < state->num_enabled; i++) {
112 struct ir3_ubo_range *b = &state->range[i];
113 if (memcmp(&a->ubo, &b->ubo, sizeof(a->ubo)))
114 continue;
115
116 if (a->start > b->end || a->end < b->start)
117 continue;
118
119 /* Merge B into A. */
120 a->start = MIN2(a->start, b->start);
121 a->end = MAX2(a->end, b->end);
122
123 /* Swap the last enabled range into B's now unused slot */
124 *b = state->range[--state->num_enabled];
125 }
126 }
127
128 /**
129 * During the first pass over the shader, makes the plan of which UBO upload
130 * should include the range covering this UBO load.
131 *
132 * We are passed in an upload_remaining of how much space is left for us in
133 * the const file, and we make sure our plan doesn't exceed that.
134 */
135 static void
gather_ubo_ranges(nir_shader * nir,nir_intrinsic_instr * instr,struct ir3_ubo_analysis_state * state,uint32_t alignment,uint32_t * upload_remaining)136 gather_ubo_ranges(nir_shader *nir, nir_intrinsic_instr *instr,
137 struct ir3_ubo_analysis_state *state, uint32_t alignment,
138 uint32_t *upload_remaining)
139 {
140 if (ir3_shader_debug & IR3_DBG_NOUBOOPT)
141 return;
142
143 struct ir3_ubo_info ubo = {};
144 if (!get_ubo_info(instr, &ubo))
145 return;
146
147 struct ir3_ubo_range r;
148 if (!get_ubo_load_range(nir, instr, alignment, &r))
149 return;
150
151 /* See if there's an existing range for this UBO we want to merge into. */
152 for (int i = 0; i < state->num_enabled; i++) {
153 struct ir3_ubo_range *plan_r = &state->range[i];
154 if (memcmp(&plan_r->ubo, &ubo, sizeof(ubo)))
155 continue;
156
157 /* Don't extend existing uploads unless they're
158 * neighboring/overlapping.
159 */
160 if (r.start > plan_r->end || r.end < plan_r->start)
161 continue;
162
163 r.start = MIN2(r.start, plan_r->start);
164 r.end = MAX2(r.end, plan_r->end);
165
166 uint32_t added = (plan_r->start - r.start) + (r.end - plan_r->end);
167 if (added >= *upload_remaining)
168 return;
169
170 plan_r->start = r.start;
171 plan_r->end = r.end;
172 *upload_remaining -= added;
173
174 merge_neighbors(state, i);
175 return;
176 }
177
178 if (state->num_enabled == ARRAY_SIZE(state->range))
179 return;
180
181 uint32_t added = r.end - r.start;
182 if (added >= *upload_remaining)
183 return;
184
185 struct ir3_ubo_range *plan_r = &state->range[state->num_enabled++];
186 plan_r->ubo = ubo;
187 plan_r->start = r.start;
188 plan_r->end = r.end;
189 *upload_remaining -= added;
190 }
191
192 /* For indirect offset, it is common to see a pattern of multiple
193 * loads with the same base, but different constant offset, ie:
194 *
195 * vec1 32 ssa_33 = iadd ssa_base, const_offset
196 * vec4 32 ssa_34 = intrinsic load_uniform (ssa_33) (base=N, 0, 0)
197 *
198 * Detect this, and peel out the const_offset part, to end up with:
199 *
200 * vec4 32 ssa_34 = intrinsic load_uniform (ssa_base) (base=N+const_offset, 0, 0)
201 *
202 * Or similarly:
203 *
204 * vec1 32 ssa_33 = imad24_ir3 a, b, const_offset
205 * vec4 32 ssa_34 = intrinsic load_uniform (ssa_33) (base=N, 0, 0)
206 *
207 * Can be converted to:
208 *
209 * vec1 32 ssa_base = imul24 a, b
210 * vec4 32 ssa_34 = intrinsic load_uniform (ssa_base) (base=N+const_offset, 0, 0)
211 *
212 * This gives the other opt passes something much easier to work
213 * with (ie. not requiring value range tracking)
214 */
215 static void
handle_partial_const(nir_builder * b,nir_ssa_def ** srcp,int * offp)216 handle_partial_const(nir_builder *b, nir_ssa_def **srcp, int *offp)
217 {
218 if ((*srcp)->parent_instr->type != nir_instr_type_alu)
219 return;
220
221 nir_alu_instr *alu = nir_instr_as_alu((*srcp)->parent_instr);
222
223 if (alu->op == nir_op_imad24_ir3) {
224 /* This case is slightly more complicated as we need to
225 * replace the imad24_ir3 with an imul24:
226 */
227 if (!nir_src_is_const(alu->src[2].src))
228 return;
229
230 *offp += nir_src_as_uint(alu->src[2].src);
231 *srcp = nir_imul24(b, nir_ssa_for_alu_src(b, alu, 0),
232 nir_ssa_for_alu_src(b, alu, 1));
233
234 return;
235 }
236
237 if (alu->op != nir_op_iadd)
238 return;
239
240 if (!(alu->src[0].src.is_ssa && alu->src[1].src.is_ssa))
241 return;
242
243 if (nir_src_is_const(alu->src[0].src)) {
244 *offp += nir_src_as_uint(alu->src[0].src);
245 *srcp = alu->src[1].src.ssa;
246 } else if (nir_src_is_const(alu->src[1].src)) {
247 *srcp = alu->src[0].src.ssa;
248 *offp += nir_src_as_uint(alu->src[1].src);
249 }
250 }
251
252 /* Tracks the maximum bindful UBO accessed so that we reduce the UBO
253 * descriptors emitted in the fast path for GL.
254 */
255 static void
track_ubo_use(nir_intrinsic_instr * instr,nir_builder * b,int * num_ubos)256 track_ubo_use(nir_intrinsic_instr *instr, nir_builder *b, int *num_ubos)
257 {
258 if (ir3_bindless_resource(instr->src[0])) {
259 assert(!b->shader->info.first_ubo_is_default_ubo); /* only set for GL */
260 return;
261 }
262
263 if (nir_src_is_const(instr->src[0])) {
264 int block = nir_src_as_uint(instr->src[0]);
265 *num_ubos = MAX2(*num_ubos, block + 1);
266 } else {
267 *num_ubos = b->shader->info.num_ubos;
268 }
269 }
270
271 static bool
lower_ubo_load_to_uniform(nir_intrinsic_instr * instr,nir_builder * b,const struct ir3_ubo_analysis_state * state,int * num_ubos,uint32_t alignment)272 lower_ubo_load_to_uniform(nir_intrinsic_instr *instr, nir_builder *b,
273 const struct ir3_ubo_analysis_state *state,
274 int *num_ubos, uint32_t alignment)
275 {
276 b->cursor = nir_before_instr(&instr->instr);
277
278 struct ir3_ubo_range r;
279 if (!get_ubo_load_range(b->shader, instr, alignment, &r)) {
280 track_ubo_use(instr, b, num_ubos);
281 return false;
282 }
283
284 /* We don't lower dynamic block index UBO loads to load_uniform, but we
285 * could probably with some effort determine a block stride in number of
286 * registers.
287 */
288 const struct ir3_ubo_range *range = get_existing_range(instr, state, &r);
289 if (!range) {
290 track_ubo_use(instr, b, num_ubos);
291 return false;
292 }
293
294 nir_ssa_def *ubo_offset = nir_ssa_for_src(b, instr->src[1], 1);
295 int const_offset = 0;
296
297 handle_partial_const(b, &ubo_offset, &const_offset);
298
299 /* UBO offset is in bytes, but uniform offset is in units of
300 * dwords, so we need to divide by 4 (right-shift by 2). For ldc the
301 * offset is in units of 16 bytes, so we need to multiply by 4. And
302 * also the same for the constant part of the offset:
303 */
304 const int shift = -2;
305 nir_ssa_def *new_offset = ir3_nir_try_propagate_bit_shift(b, ubo_offset, -2);
306 nir_ssa_def *uniform_offset = NULL;
307 if (new_offset) {
308 uniform_offset = new_offset;
309 } else {
310 uniform_offset = shift > 0 ?
311 nir_ishl(b, ubo_offset, nir_imm_int(b, shift)) :
312 nir_ushr(b, ubo_offset, nir_imm_int(b, -shift));
313 }
314
315 debug_assert(!(const_offset & 0x3));
316 const_offset >>= 2;
317
318 const int range_offset = ((int)range->offset - (int)range->start) / 4;
319 const_offset += range_offset;
320
321 /* The range_offset could be negative, if if only part of the UBO
322 * block is accessed, range->start can be greater than range->offset.
323 * But we can't underflow const_offset. If necessary we need to
324 * insert nir instructions to compensate (which can hopefully be
325 * optimized away)
326 */
327 if (const_offset < 0) {
328 uniform_offset = nir_iadd_imm(b, uniform_offset, const_offset);
329 const_offset = 0;
330 }
331
332 nir_intrinsic_instr *uniform =
333 nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_uniform);
334 uniform->num_components = instr->num_components;
335 uniform->src[0] = nir_src_for_ssa(uniform_offset);
336 nir_intrinsic_set_base(uniform, const_offset);
337 nir_ssa_dest_init(&uniform->instr, &uniform->dest,
338 uniform->num_components, instr->dest.ssa.bit_size,
339 instr->dest.ssa.name);
340 nir_builder_instr_insert(b, &uniform->instr);
341 nir_ssa_def_rewrite_uses(&instr->dest.ssa,
342 nir_src_for_ssa(&uniform->dest.ssa));
343
344 nir_instr_remove(&instr->instr);
345
346 return true;
347 }
348
349 static bool
instr_is_load_ubo(nir_instr * instr)350 instr_is_load_ubo(nir_instr *instr)
351 {
352 if (instr->type != nir_instr_type_intrinsic)
353 return false;
354
355 nir_intrinsic_op op = nir_instr_as_intrinsic(instr)->intrinsic;
356
357 /* nir_lower_ubo_vec4 happens after this pass. */
358 assert(op != nir_intrinsic_load_ubo_vec4);
359
360 return op == nir_intrinsic_load_ubo;
361 }
362
363 void
ir3_nir_analyze_ubo_ranges(nir_shader * nir,struct ir3_shader_variant * v)364 ir3_nir_analyze_ubo_ranges(nir_shader *nir, struct ir3_shader_variant *v)
365 {
366 struct ir3_const_state *const_state = ir3_const_state(v);
367 struct ir3_ubo_analysis_state *state = &const_state->ubo_state;
368 struct ir3_compiler *compiler = v->shader->compiler;
369
370 /* Limit our uploads to the amount of constant buffer space available in
371 * the hardware, minus what the shader compiler may need for various
372 * driver params. We do this UBO-to-push-constant before the real
373 * allocation of the driver params' const space, because UBO pointers can
374 * be driver params but this pass usually eliminatings them.
375 */
376 struct ir3_const_state worst_case_const_state = { };
377 ir3_setup_const_state(nir, v, &worst_case_const_state);
378 const uint32_t max_upload = (ir3_max_const(v) -
379 worst_case_const_state.offsets.immediate) * 16;
380
381 memset(state, 0, sizeof(*state));
382
383 uint32_t upload_remaining = max_upload;
384 nir_foreach_function (function, nir) {
385 if (function->impl) {
386 nir_foreach_block (block, function->impl) {
387 nir_foreach_instr (instr, block) {
388 if (instr_is_load_ubo(instr))
389 gather_ubo_ranges(nir, nir_instr_as_intrinsic(instr),
390 state, compiler->const_upload_unit,
391 &upload_remaining);
392 }
393 }
394 }
395 }
396
397 /* For now, everything we upload is accessed statically and thus will be
398 * used by the shader. Once we can upload dynamically indexed data, we may
399 * upload sparsely accessed arrays, at which point we probably want to
400 * give priority to smaller UBOs, on the assumption that big UBOs will be
401 * accessed dynamically. Alternatively, we can track statically and
402 * dynamically accessed ranges separately and upload static rangtes
403 * first.
404 */
405
406 uint32_t offset = v->shader->num_reserved_user_consts * 16;
407 for (uint32_t i = 0; i < state->num_enabled; i++) {
408 uint32_t range_size = state->range[i].end - state->range[i].start;
409
410 debug_assert(offset <= max_upload);
411 state->range[i].offset = offset;
412 assert(offset <= max_upload);
413 offset += range_size;
414
415 }
416 state->size = offset;
417 }
418
419 bool
ir3_nir_lower_ubo_loads(nir_shader * nir,struct ir3_shader_variant * v)420 ir3_nir_lower_ubo_loads(nir_shader *nir, struct ir3_shader_variant *v)
421 {
422 struct ir3_compiler *compiler = v->shader->compiler;
423 /* For the binning pass variant, we re-use the corresponding draw-pass
424 * variants const_state and ubo state. To make these clear, in this
425 * pass it is const (read-only)
426 */
427 const struct ir3_const_state *const_state = ir3_const_state(v);
428 const struct ir3_ubo_analysis_state *state = &const_state->ubo_state;
429
430 int num_ubos = 0;
431 bool progress = false;
432 nir_foreach_function (function, nir) {
433 if (function->impl) {
434 nir_builder builder;
435 nir_builder_init(&builder, function->impl);
436 nir_foreach_block (block, function->impl) {
437 nir_foreach_instr_safe (instr, block) {
438 if (!instr_is_load_ubo(instr))
439 continue;
440 progress |=
441 lower_ubo_load_to_uniform(nir_instr_as_intrinsic(instr),
442 &builder, state, &num_ubos,
443 compiler->const_upload_unit);
444 }
445 }
446
447 nir_metadata_preserve(function->impl, nir_metadata_block_index |
448 nir_metadata_dominance);
449 }
450 }
451 /* Update the num_ubos field for GL (first_ubo_is_default_ubo). With
452 * Vulkan's bindless, we don't use the num_ubos field, so we can leave it
453 * incremented.
454 */
455 if (nir->info.first_ubo_is_default_ubo)
456 nir->info.num_ubos = num_ubos;
457
458 return progress;
459 }
460