1 /*
2 * Copyright © 2023 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "ac_nir.h"
25 #include "nir.h"
26 #include "nir_builder.h"
27 #include "radv_constants.h"
28 #include "radv_nir.h"
29 #include "radv_private.h"
30 #include "radv_shader.h"
31 #include "radv_shader_args.h"
32
33 typedef struct {
34 const struct radv_shader_args *args;
35 const struct radv_shader_info *info;
36 const struct radv_graphics_state_key *gfx_state;
37 const struct radeon_info *rad_info;
38 } lower_vs_inputs_state;
39
40 static nir_def *
lower_load_vs_input_from_prolog(nir_builder * b,nir_intrinsic_instr * intrin,lower_vs_inputs_state * s)41 lower_load_vs_input_from_prolog(nir_builder *b, nir_intrinsic_instr *intrin, lower_vs_inputs_state *s)
42 {
43 nir_src *offset_src = nir_get_io_offset_src(intrin);
44 assert(nir_src_is_const(*offset_src));
45
46 const unsigned base = nir_intrinsic_base(intrin);
47 const unsigned base_offset = nir_src_as_uint(*offset_src);
48 const unsigned driver_location = base + base_offset - VERT_ATTRIB_GENERIC0;
49 const unsigned component = nir_intrinsic_component(intrin);
50 const unsigned bit_size = intrin->def.bit_size;
51 const unsigned num_components = intrin->def.num_components;
52
53 /* 64-bit inputs: they occupy twice as many 32-bit components.
54 * 16-bit inputs: they occupy a 32-bit component (not packed).
55 */
56 const unsigned arg_bit_size = MAX2(bit_size, 32);
57
58 unsigned num_input_args = 1;
59 nir_def *input_args[2] = {ac_nir_load_arg(b, &s->args->ac, s->args->vs_inputs[driver_location]), NULL};
60 if (component * 32 + arg_bit_size * num_components > 128) {
61 assert(bit_size == 64);
62
63 num_input_args++;
64 input_args[1] = ac_nir_load_arg(b, &s->args->ac, s->args->vs_inputs[driver_location + 1]);
65 }
66
67 nir_def *extracted = nir_extract_bits(b, input_args, num_input_args, component * 32, num_components, arg_bit_size);
68
69 if (bit_size < arg_bit_size) {
70 assert(bit_size == 16);
71
72 if (nir_alu_type_get_base_type(nir_intrinsic_dest_type(intrin)) == nir_type_float)
73 return nir_f2f16(b, extracted);
74 else
75 return nir_u2u16(b, extracted);
76 }
77
78 return extracted;
79 }
80
81 static nir_def *
calc_vs_input_index_instance_rate(nir_builder * b,unsigned location,lower_vs_inputs_state * s)82 calc_vs_input_index_instance_rate(nir_builder *b, unsigned location, lower_vs_inputs_state *s)
83 {
84 const uint32_t divisor = s->gfx_state->vi.instance_rate_divisors[location];
85 nir_def *start_instance = nir_load_base_instance(b);
86
87 if (divisor == 0)
88 return start_instance;
89
90 nir_def *instance_id = nir_udiv_imm(b, nir_load_instance_id(b), divisor);
91 return nir_iadd(b, start_instance, instance_id);
92 }
93
94 static nir_def *
calc_vs_input_index(nir_builder * b,unsigned location,lower_vs_inputs_state * s)95 calc_vs_input_index(nir_builder *b, unsigned location, lower_vs_inputs_state *s)
96 {
97 if (s->gfx_state->vi.instance_rate_inputs & BITFIELD_BIT(location))
98 return calc_vs_input_index_instance_rate(b, location, s);
99
100 return nir_iadd(b, nir_load_first_vertex(b), nir_load_vertex_id_zero_base(b));
101 }
102
103 static bool
can_use_untyped_load(const struct util_format_description * f,const unsigned bit_size)104 can_use_untyped_load(const struct util_format_description *f, const unsigned bit_size)
105 {
106 /* All components must have same size and type. */
107 if (!f->is_array)
108 return false;
109
110 const struct util_format_channel_description *c = &f->channel[0];
111 return c->size == bit_size && bit_size >= 32;
112 }
113
114 static nir_def *
oob_input_load_value(nir_builder * b,const unsigned channel_idx,const unsigned bit_size,const bool is_float)115 oob_input_load_value(nir_builder *b, const unsigned channel_idx, const unsigned bit_size, const bool is_float)
116 {
117 /* 22.1.1. Attribute Location and Component Assignment of Vulkan 1.3 specification:
118 * For 64-bit data types, no default attribute values are provided. Input variables
119 * must not use more components than provided by the attribute.
120 */
121 if (bit_size == 64)
122 return nir_undef(b, 1, bit_size);
123
124 if (channel_idx == 3) {
125 if (is_float)
126 return nir_imm_floatN_t(b, 1.0, bit_size);
127 else
128 return nir_imm_intN_t(b, 1, bit_size);
129 }
130
131 return nir_imm_intN_t(b, 0, bit_size);
132 }
133
134 static unsigned
count_format_bytes(const struct util_format_description * f,const unsigned first_channel,const unsigned num_channels)135 count_format_bytes(const struct util_format_description *f, const unsigned first_channel, const unsigned num_channels)
136 {
137 if (!num_channels)
138 return 0;
139
140 const unsigned last_channel = first_channel + num_channels - 1;
141 assert(last_channel < f->nr_channels);
142 unsigned bits = 0;
143 for (unsigned i = first_channel; i <= last_channel; ++i) {
144 bits += f->channel[i].size;
145 }
146
147 assert(bits % 8 == 0);
148 return bits / 8;
149 }
150
151 static bool
format_needs_swizzle(const struct util_format_description * f)152 format_needs_swizzle(const struct util_format_description *f)
153 {
154 for (unsigned i = 0; i < f->nr_channels; ++i) {
155 if (f->swizzle[i] != PIPE_SWIZZLE_X + i)
156 return true;
157 }
158
159 return false;
160 }
161
162 static unsigned
first_used_swizzled_channel(const struct util_format_description * f,const unsigned mask,const bool backwards)163 first_used_swizzled_channel(const struct util_format_description *f, const unsigned mask, const bool backwards)
164 {
165 unsigned first_used = backwards ? 0 : f->nr_channels;
166 const unsigned it_mask = mask & BITFIELD_MASK(f->nr_channels);
167
168 u_foreach_bit (b, it_mask) {
169 assert(f->swizzle[b] != PIPE_SWIZZLE_0 && f->swizzle[b] != PIPE_SWIZZLE_1);
170 const unsigned c = f->swizzle[b] - PIPE_SWIZZLE_X;
171 first_used = backwards ? MAX2(first_used, c) : MIN2(first_used, c);
172 }
173
174 return first_used;
175 }
176
177 static nir_def *
adjust_vertex_fetch_alpha(nir_builder * b,enum ac_vs_input_alpha_adjust alpha_adjust,nir_def * alpha)178 adjust_vertex_fetch_alpha(nir_builder *b, enum ac_vs_input_alpha_adjust alpha_adjust, nir_def *alpha)
179 {
180 if (alpha_adjust == AC_ALPHA_ADJUST_SSCALED)
181 alpha = nir_f2u32(b, alpha);
182
183 /* For the integer-like cases, do a natural sign extension.
184 *
185 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0 and happen to contain 0, 1, 2, 3 as
186 * the two LSBs of the exponent.
187 */
188 unsigned offset = alpha_adjust == AC_ALPHA_ADJUST_SNORM ? 23u : 0u;
189
190 alpha = nir_ibfe_imm(b, alpha, offset, 2u);
191
192 /* Convert back to the right type. */
193 if (alpha_adjust == AC_ALPHA_ADJUST_SNORM) {
194 alpha = nir_i2f32(b, alpha);
195 alpha = nir_fmax(b, alpha, nir_imm_float(b, -1.0f));
196 } else if (alpha_adjust == AC_ALPHA_ADJUST_SSCALED) {
197 alpha = nir_i2f32(b, alpha);
198 }
199
200 return alpha;
201 }
202
203 static nir_def *
lower_load_vs_input(nir_builder * b,nir_intrinsic_instr * intrin,lower_vs_inputs_state * s)204 lower_load_vs_input(nir_builder *b, nir_intrinsic_instr *intrin, lower_vs_inputs_state *s)
205 {
206 nir_src *offset_src = nir_get_io_offset_src(intrin);
207 assert(nir_src_is_const(*offset_src));
208
209 const unsigned base = nir_intrinsic_base(intrin);
210 const unsigned base_offset = nir_src_as_uint(*offset_src);
211 const unsigned location = base + base_offset - VERT_ATTRIB_GENERIC0;
212 const unsigned bit_size = intrin->def.bit_size;
213 const unsigned dest_num_components = intrin->def.num_components;
214
215 /* Convert the component offset to bit_size units.
216 * (Intrinsic component offset is in 32-bit units.)
217 *
218 * Small bitsize inputs consume the same space as 32-bit inputs,
219 * but 64-bit inputs consume twice as many.
220 * 64-bit variables must not have a component of 1 or 3.
221 * (See VK spec 15.1.5 "Component Assignment")
222 */
223 const unsigned component = nir_intrinsic_component(intrin) / (MAX2(32, bit_size) / 32);
224
225 /* Bitmask of components in bit_size units
226 * of the current input load that are actually used.
227 */
228 const unsigned dest_use_mask = nir_def_components_read(&intrin->def) << component;
229
230 /* If the input is entirely unused, just replace it with undef.
231 * This is just in case we debug this pass without running DCE first.
232 */
233 if (!dest_use_mask)
234 return nir_undef(b, dest_num_components, bit_size);
235
236 const uint32_t attrib_binding = s->gfx_state->vi.vertex_attribute_bindings[location];
237 const uint32_t attrib_offset = s->gfx_state->vi.vertex_attribute_offsets[location];
238 const uint32_t attrib_stride = s->gfx_state->vi.vertex_attribute_strides[location];
239 const enum pipe_format attrib_format = s->gfx_state->vi.vertex_attribute_formats[location];
240 const struct util_format_description *f = util_format_description(attrib_format);
241 const struct ac_vtx_format_info *vtx_info =
242 ac_get_vtx_format_info(s->rad_info->gfx_level, s->rad_info->family, attrib_format);
243 const unsigned binding_index = s->info->vs.use_per_attribute_vb_descs ? location : attrib_binding;
244 const unsigned desc_index = util_bitcount(s->info->vs.vb_desc_usage_mask & u_bit_consecutive(0, binding_index));
245
246 nir_def *vertex_buffers_arg = ac_nir_load_arg(b, &s->args->ac, s->args->ac.vertex_buffers);
247 nir_def *vertex_buffers = nir_pack_64_2x32_split(b, vertex_buffers_arg, nir_imm_int(b, s->rad_info->address32_hi));
248 nir_def *descriptor = nir_load_smem_amd(b, 4, vertex_buffers, nir_imm_int(b, desc_index * 16));
249 nir_def *base_index = calc_vs_input_index(b, location, s);
250 nir_def *zero = nir_imm_int(b, 0);
251
252 /* We currently implement swizzling for all formats in shaders.
253 * Note, it is possible to specify swizzling in the DST_SEL fields of descriptors,
254 * but we don't use that because typed loads using the MTBUF instruction format
255 * don't support DST_SEL, so it's simpler to just handle it all in shaders.
256 */
257 const bool needs_swizzle = format_needs_swizzle(f);
258
259 /* We need to adjust the alpha channel as loaded by the HW,
260 * for example sign extension and normalization may be necessary.
261 */
262 const enum ac_vs_input_alpha_adjust alpha_adjust = vtx_info->alpha_adjust;
263
264 /* Try to shrink the load format by skipping unused components from the start.
265 * Beneficial because the backend may be able to emit fewer HW instructions.
266 * Only possible with array formats.
267 */
268 const unsigned first_used_channel = first_used_swizzled_channel(f, dest_use_mask, false);
269 const unsigned skipped_start = f->is_array ? first_used_channel : 0;
270
271 /* Number of channels we actually use and load.
272 * Don't shrink the format here because this might allow the backend to
273 * emit fewer (but larger than needed) HW instructions.
274 */
275 const unsigned first_trailing_unused_channel = first_used_swizzled_channel(f, dest_use_mask, true) + 1;
276 const unsigned max_loaded_channels = MIN2(first_trailing_unused_channel, f->nr_channels);
277 const unsigned fetch_num_channels =
278 first_used_channel >= max_loaded_channels ? 0 : max_loaded_channels - skipped_start;
279
280 /* Load VS inputs from VRAM.
281 *
282 * For the vast majority of cases this will only create 1x load_(typed)_buffer_amd
283 * intrinsic and the backend is responsible for further splitting that
284 * to as many HW instructions as needed based on alignment.
285 *
286 * Take care to prevent loaded components from failing the range check,
287 * by emitting several load intrinsics with different index sources.
288 * This is necessary because the backend can't further roll the const offset
289 * into the index source of MUBUF / MTBUF instructions.
290 */
291 nir_def *loads[NIR_MAX_VEC_COMPONENTS] = {0};
292 unsigned num_loads = 0;
293 for (unsigned x = 0, channels; x < fetch_num_channels; x += channels) {
294 channels = fetch_num_channels - x;
295 const unsigned start = skipped_start + x;
296 enum pipe_format fetch_format = attrib_format;
297 nir_def *index = base_index;
298
299 /* Add excess constant offset to the index. */
300 unsigned const_off = attrib_offset + count_format_bytes(f, 0, start);
301 if (attrib_stride && const_off > attrib_stride) {
302 index = nir_iadd_imm(b, base_index, const_off / attrib_stride);
303 const_off %= attrib_stride;
304 }
305
306 /* Reduce the number of loaded channels until we can pass the range check.
307 * Only for array formats. VK spec mandates proper alignment for packed formats.
308 * Note, NONE seems to occur in real use and is considered an array format.
309 */
310 if (f->is_array && fetch_format != PIPE_FORMAT_NONE) {
311 while (channels > 1 && attrib_stride && (const_off + count_format_bytes(f, start, channels)) > attrib_stride) {
312 channels--;
313 }
314
315 /* Keep the fetch format as large as possible to let the backend emit
316 * larger load instructions when it deems them beneficial.
317 */
318 fetch_format = util_format_get_array(f->channel[0].type, f->channel[0].size, f->nr_channels - start,
319 f->is_unorm || f->is_snorm, f->channel[0].pure_integer);
320 }
321
322 assert(f->is_array || channels == fetch_num_channels);
323
324 /* Prefer using untyped buffer loads if possible, to avoid potential alignment issues.
325 * Typed loads can cause GPU hangs when used with improper alignment.
326 */
327 if (can_use_untyped_load(f, bit_size)) {
328 loads[num_loads++] = nir_load_buffer_amd(b, channels, bit_size, descriptor, zero, zero, index,
329 .base = const_off, .memory_modes = nir_var_shader_in);
330 } else {
331 const unsigned align_mul = MAX2(1, s->gfx_state->vi.vertex_binding_align[attrib_binding]);
332 const unsigned align_offset = const_off % align_mul;
333
334 loads[num_loads++] = nir_load_typed_buffer_amd(
335 b, channels, bit_size, descriptor, zero, zero, index, .base = const_off, .format = fetch_format,
336 .align_mul = align_mul, .align_offset = align_offset, .memory_modes = nir_var_shader_in);
337 }
338 }
339
340 nir_def *load = loads[0];
341
342 /* Extract the channels we actually need when we couldn't skip starting
343 * components or had to emit more than one load intrinsic.
344 */
345 if (num_loads > 0 && (first_used_channel > skipped_start || num_loads != 1))
346 load = nir_extract_bits(b, loads, num_loads, (first_used_channel - skipped_start) * bit_size,
347 max_loaded_channels - first_used_channel, bit_size);
348
349 /* Return early if possible to avoid generating unnecessary IR. */
350 if (num_loads > 0 && first_used_channel == component && load->num_components == dest_num_components &&
351 !needs_swizzle && alpha_adjust == AC_ALPHA_ADJUST_NONE)
352 return load;
353
354 /* Fill unused and OOB components.
355 * Apply swizzle and alpha adjust according to the format.
356 */
357 const nir_alu_type dst_type = nir_alu_type_get_base_type(nir_intrinsic_dest_type(intrin));
358 nir_def *channels[NIR_MAX_VEC_COMPONENTS] = {0};
359 for (unsigned i = 0; i < dest_num_components; ++i) {
360 const unsigned c = i + component;
361
362 if (!(dest_use_mask & BITFIELD_BIT(c))) {
363 /* Fill unused channels with zero. */
364 channels[i] = nir_imm_zero(b, 1, bit_size);
365 continue;
366 }
367
368 const unsigned sw = f->swizzle[c];
369 assert(sw >= first_used_channel);
370 const unsigned loaded_channel = sw - first_used_channel;
371
372 if (load && loaded_channel < load->num_components) {
373 /* Use channels that were loaded from VRAM. */
374 channels[i] = nir_channel(b, load, loaded_channel);
375
376 if (alpha_adjust != AC_ALPHA_ADJUST_NONE && c == 3)
377 channels[i] = adjust_vertex_fetch_alpha(b, alpha_adjust, channels[i]);
378 } else {
379 /* Handle input loads that are larger than their format. */
380 channels[i] = oob_input_load_value(b, c, bit_size, dst_type == nir_type_float);
381 }
382 }
383
384 return nir_vec(b, channels, dest_num_components);
385 }
386
387 static bool
lower_vs_input_instr(nir_builder * b,nir_intrinsic_instr * intrin,void * state)388 lower_vs_input_instr(nir_builder *b, nir_intrinsic_instr *intrin, void *state)
389 {
390 if (intrin->intrinsic != nir_intrinsic_load_input)
391 return false;
392
393 lower_vs_inputs_state *s = (lower_vs_inputs_state *)state;
394
395 b->cursor = nir_before_instr(&intrin->instr);
396
397 nir_def *replacement = NULL;
398
399 if (s->info->vs.dynamic_inputs) {
400 replacement = lower_load_vs_input_from_prolog(b, intrin, s);
401 } else {
402 replacement = lower_load_vs_input(b, intrin, s);
403 }
404
405 nir_def_rewrite_uses(&intrin->def, replacement);
406 nir_instr_remove(&intrin->instr);
407 nir_instr_free(&intrin->instr);
408
409 return true;
410 }
411
412 bool
radv_nir_lower_vs_inputs(nir_shader * shader,const struct radv_shader_stage * vs_stage,const struct radv_graphics_state_key * gfx_state,const struct radeon_info * rad_info)413 radv_nir_lower_vs_inputs(nir_shader *shader, const struct radv_shader_stage *vs_stage,
414 const struct radv_graphics_state_key *gfx_state, const struct radeon_info *rad_info)
415 {
416 assert(shader->info.stage == MESA_SHADER_VERTEX);
417
418 lower_vs_inputs_state state = {
419 .info = &vs_stage->info,
420 .args = &vs_stage->args,
421 .gfx_state = gfx_state,
422 .rad_info = rad_info,
423 };
424
425 return nir_shader_intrinsics_pass(shader, lower_vs_input_instr, nir_metadata_dominance | nir_metadata_block_index,
426 &state);
427 }
428