1 /*
2 * Copyright 2020 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "si_pipe.h"
26 #include "si_shader_internal.h"
27 #include "sid.h"
28 #include "util/u_memory.h"
29 #include "ac_nir.h"
30
unpack_sint16(struct si_shader_context * ctx,LLVMValueRef i32,unsigned index)31 static LLVMValueRef unpack_sint16(struct si_shader_context *ctx, LLVMValueRef i32, unsigned index)
32 {
33 assert(index <= 1);
34
35 if (index == 1)
36 return LLVMBuildAShr(ctx->ac.builder, i32, LLVMConstInt(ctx->ac.i32, 16, 0), "");
37
38 return LLVMBuildSExt(ctx->ac.builder, LLVMBuildTrunc(ctx->ac.builder, i32, ctx->ac.i16, ""),
39 ctx->ac.i32, "");
40 }
41
load_input_vs(struct si_shader_context * ctx,unsigned input_index,LLVMValueRef out[4])42 static void load_input_vs(struct si_shader_context *ctx, unsigned input_index, LLVMValueRef out[4])
43 {
44 const struct si_shader_info *info = &ctx->shader->selector->info;
45 unsigned vs_blit_property = info->base.vs.blit_sgprs_amd;
46
47 if (vs_blit_property) {
48 LLVMValueRef vertex_id = ctx->abi.vertex_id;
49 LLVMValueRef sel_x1 =
50 LLVMBuildICmp(ctx->ac.builder, LLVMIntULE, vertex_id, ctx->ac.i32_1, "");
51 /* Use LLVMIntNE, because we have 3 vertices and only
52 * the middle one should use y2.
53 */
54 LLVMValueRef sel_y1 = LLVMBuildICmp(ctx->ac.builder, LLVMIntNE, vertex_id, ctx->ac.i32_1, "");
55
56 unsigned param_vs_blit_inputs = ctx->vs_blit_inputs.arg_index;
57 if (input_index == 0) {
58 /* Position: */
59 LLVMValueRef x1y1 = LLVMGetParam(ctx->main_fn, param_vs_blit_inputs);
60 LLVMValueRef x2y2 = LLVMGetParam(ctx->main_fn, param_vs_blit_inputs + 1);
61
62 LLVMValueRef x1 = unpack_sint16(ctx, x1y1, 0);
63 LLVMValueRef y1 = unpack_sint16(ctx, x1y1, 1);
64 LLVMValueRef x2 = unpack_sint16(ctx, x2y2, 0);
65 LLVMValueRef y2 = unpack_sint16(ctx, x2y2, 1);
66
67 LLVMValueRef x = LLVMBuildSelect(ctx->ac.builder, sel_x1, x1, x2, "");
68 LLVMValueRef y = LLVMBuildSelect(ctx->ac.builder, sel_y1, y1, y2, "");
69
70 out[0] = LLVMBuildSIToFP(ctx->ac.builder, x, ctx->ac.f32, "");
71 out[1] = LLVMBuildSIToFP(ctx->ac.builder, y, ctx->ac.f32, "");
72 out[2] = LLVMGetParam(ctx->main_fn, param_vs_blit_inputs + 2);
73 out[3] = ctx->ac.f32_1;
74 return;
75 }
76
77 /* Color or texture coordinates: */
78 assert(input_index == 1);
79
80 if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_COLOR) {
81 for (int i = 0; i < 4; i++) {
82 out[i] = LLVMGetParam(ctx->main_fn, param_vs_blit_inputs + 3 + i);
83 }
84 } else {
85 assert(vs_blit_property == SI_VS_BLIT_SGPRS_POS_TEXCOORD);
86 LLVMValueRef x1 = LLVMGetParam(ctx->main_fn, param_vs_blit_inputs + 3);
87 LLVMValueRef y1 = LLVMGetParam(ctx->main_fn, param_vs_blit_inputs + 4);
88 LLVMValueRef x2 = LLVMGetParam(ctx->main_fn, param_vs_blit_inputs + 5);
89 LLVMValueRef y2 = LLVMGetParam(ctx->main_fn, param_vs_blit_inputs + 6);
90
91 out[0] = LLVMBuildSelect(ctx->ac.builder, sel_x1, x1, x2, "");
92 out[1] = LLVMBuildSelect(ctx->ac.builder, sel_y1, y1, y2, "");
93 out[2] = LLVMGetParam(ctx->main_fn, param_vs_blit_inputs + 7);
94 out[3] = LLVMGetParam(ctx->main_fn, param_vs_blit_inputs + 8);
95 }
96 return;
97 }
98
99 /* Set can_speculate=false to help keep all loads grouped together
100 * for better latency hiding. If it was true, LLVM could move the loads forward
101 * and accidentally double memory latency by doing:
102 *
103 * buffer_load_dword_xyzw
104 * s_waitcnt vmcnt(0)
105 * buffer_load_dword_xyzw
106 * s_waitcnt vmcnt(0)
107 *
108 * ... which is what we must prevent at all cost.
109 */
110 const bool can_speculate = false;
111 unsigned bit_size = info->input[input_index].fp16_lo_hi_valid & 0x1 ? 16 : 32;
112 LLVMTypeRef int_type = bit_size == 16 ? ctx->ac.i16 : ctx->ac.i32;
113 LLVMTypeRef float_type = bit_size == 16 ? ctx->ac.f16 : ctx->ac.f32;
114 unsigned num_vbos_in_user_sgprs = ctx->shader->selector->info.num_vbos_in_user_sgprs;
115 union si_vs_fix_fetch fix_fetch;
116 LLVMValueRef vb_desc;
117 LLVMValueRef vertex_index;
118 LLVMValueRef tmp;
119
120 if (input_index < num_vbos_in_user_sgprs) {
121 vb_desc = ac_get_arg(&ctx->ac, ctx->vb_descriptors[input_index]);
122 } else {
123 unsigned index = input_index - num_vbos_in_user_sgprs;
124 vb_desc = ac_build_load_to_sgpr(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args.vertex_buffers),
125 LLVMConstInt(ctx->ac.i32, index, 0));
126 }
127
128 vertex_index = LLVMGetParam(ctx->main_fn, ctx->vertex_index0.arg_index + input_index);
129
130 /* Use the open-coded implementation for all loads of doubles and
131 * of dword-sized data that needs fixups. We need to insert conversion
132 * code anyway, and the amd/common code does it for us.
133 */
134 bool opencode = ctx->shader->key.ge.mono.vs_fetch_opencode & (1 << input_index);
135 fix_fetch.bits = ctx->shader->key.ge.mono.vs_fix_fetch[input_index].bits;
136 if (opencode || (fix_fetch.u.log_size == 3 && fix_fetch.u.format == AC_FETCH_FORMAT_FLOAT) ||
137 (fix_fetch.u.log_size == 2)) {
138 tmp = ac_build_opencoded_load_format(&ctx->ac, fix_fetch.u.log_size,
139 fix_fetch.u.num_channels_m1 + 1, fix_fetch.u.format,
140 fix_fetch.u.reverse, !opencode, vb_desc, vertex_index,
141 ctx->ac.i32_0, ctx->ac.i32_0, 0, can_speculate);
142 for (unsigned i = 0; i < 4; ++i)
143 out[i] =
144 LLVMBuildExtractElement(ctx->ac.builder, tmp, LLVMConstInt(ctx->ac.i32, i, false), "");
145
146 if (bit_size == 16) {
147 if (fix_fetch.u.format == AC_FETCH_FORMAT_UINT ||
148 fix_fetch.u.format == AC_FETCH_FORMAT_SINT) {
149 for (unsigned i = 0; i < 4; i++)
150 out[i] = LLVMBuildTrunc(ctx->ac.builder, out[i], ctx->ac.i16, "");
151 } else {
152 for (unsigned i = 0; i < 4; i++) {
153 out[i] = ac_to_float(&ctx->ac, out[i]);
154 out[i] = LLVMBuildFPTrunc(ctx->ac.builder, out[i], ctx->ac.f16, "");
155 }
156 }
157 }
158 return;
159 }
160
161 unsigned required_channels = util_last_bit(info->input[input_index].usage_mask);
162 if (required_channels == 0) {
163 for (unsigned i = 0; i < 4; ++i)
164 out[i] = LLVMGetUndef(ctx->ac.f32);
165 return;
166 }
167
168 /* Do multiple loads for special formats. */
169 LLVMValueRef fetches[4];
170 unsigned num_fetches;
171 unsigned fetch_stride;
172 unsigned channels_per_fetch;
173
174 if (fix_fetch.u.log_size <= 1 && fix_fetch.u.num_channels_m1 == 2) {
175 num_fetches = MIN2(required_channels, 3);
176 fetch_stride = 1 << fix_fetch.u.log_size;
177 channels_per_fetch = 1;
178 } else {
179 num_fetches = 1;
180 fetch_stride = 0;
181 channels_per_fetch = required_channels;
182 }
183
184 for (unsigned i = 0; i < num_fetches; ++i) {
185 LLVMValueRef voffset = LLVMConstInt(ctx->ac.i32, fetch_stride * i, 0);
186 fetches[i] = ac_build_buffer_load_format(&ctx->ac, vb_desc, vertex_index, voffset,
187 channels_per_fetch, 0, can_speculate,
188 bit_size == 16, false);
189 }
190
191 if (num_fetches == 1 && channels_per_fetch > 1) {
192 LLVMValueRef fetch = fetches[0];
193 for (unsigned i = 0; i < channels_per_fetch; ++i) {
194 tmp = LLVMConstInt(ctx->ac.i32, i, false);
195 fetches[i] = LLVMBuildExtractElement(ctx->ac.builder, fetch, tmp, "");
196 }
197 num_fetches = channels_per_fetch;
198 channels_per_fetch = 1;
199 }
200
201 for (unsigned i = num_fetches; i < 4; ++i)
202 fetches[i] = LLVMGetUndef(float_type);
203
204 if (fix_fetch.u.log_size <= 1 && fix_fetch.u.num_channels_m1 == 2 && required_channels == 4) {
205 if (fix_fetch.u.format == AC_FETCH_FORMAT_UINT || fix_fetch.u.format == AC_FETCH_FORMAT_SINT)
206 fetches[3] = LLVMConstInt(int_type, 1, 0);
207 else
208 fetches[3] = LLVMConstReal(float_type, 1);
209 } else if (fix_fetch.u.log_size == 3 &&
210 (fix_fetch.u.format == AC_FETCH_FORMAT_SNORM ||
211 fix_fetch.u.format == AC_FETCH_FORMAT_SSCALED ||
212 fix_fetch.u.format == AC_FETCH_FORMAT_SINT) &&
213 required_channels == 4) {
214
215 /* For 2_10_10_10, the hardware returns an unsigned value;
216 * convert it to a signed one.
217 */
218 LLVMValueRef tmp = fetches[3];
219 LLVMValueRef c30 = LLVMConstInt(int_type, 30, 0);
220
221 /* First, recover the sign-extended signed integer value. */
222 if (fix_fetch.u.format == AC_FETCH_FORMAT_SSCALED)
223 tmp = LLVMBuildFPToUI(ctx->ac.builder, tmp, int_type, "");
224 else
225 tmp = ac_to_integer(&ctx->ac, tmp);
226
227 /* For the integer-like cases, do a natural sign extension.
228 *
229 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0
230 * and happen to contain 0, 1, 2, 3 as the two LSBs of the
231 * exponent.
232 */
233 tmp = LLVMBuildShl(
234 ctx->ac.builder, tmp,
235 fix_fetch.u.format == AC_FETCH_FORMAT_SNORM ? LLVMConstInt(int_type, 7, 0) : c30, "");
236 tmp = LLVMBuildAShr(ctx->ac.builder, tmp, c30, "");
237
238 /* Convert back to the right type. */
239 if (fix_fetch.u.format == AC_FETCH_FORMAT_SNORM) {
240 LLVMValueRef clamp;
241 LLVMValueRef neg_one = LLVMConstReal(float_type, -1.0);
242 tmp = LLVMBuildSIToFP(ctx->ac.builder, tmp, float_type, "");
243 clamp = LLVMBuildFCmp(ctx->ac.builder, LLVMRealULT, tmp, neg_one, "");
244 tmp = LLVMBuildSelect(ctx->ac.builder, clamp, neg_one, tmp, "");
245 } else if (fix_fetch.u.format == AC_FETCH_FORMAT_SSCALED) {
246 tmp = LLVMBuildSIToFP(ctx->ac.builder, tmp, float_type, "");
247 }
248
249 fetches[3] = tmp;
250 }
251
252 for (unsigned i = 0; i < 4; ++i)
253 out[i] = ac_to_float(&ctx->ac, fetches[i]);
254 }
255
si_load_vs_input(struct ac_shader_abi * abi,unsigned driver_location,unsigned component,unsigned num_components,unsigned vertex_index,LLVMTypeRef type)256 static LLVMValueRef si_load_vs_input(struct ac_shader_abi *abi, unsigned driver_location,
257 unsigned component, unsigned num_components,
258 unsigned vertex_index, LLVMTypeRef type)
259 {
260 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
261 LLVMValueRef values[4];
262
263 load_input_vs(ctx, driver_location, values);
264
265 for (unsigned i = 0; i < 4; i++)
266 values[i] = LLVMBuildBitCast(ctx->ac.builder, values[i], type, "");
267
268 return ac_build_varying_gather_values(&ctx->ac, values, num_components, component);
269 }
270
si_llvm_streamout_store_output(struct si_shader_context * ctx,LLVMValueRef const * so_buffers,LLVMValueRef const * so_write_offsets,struct pipe_stream_output * stream_out,struct si_shader_output_values * shader_out)271 void si_llvm_streamout_store_output(struct si_shader_context *ctx, LLVMValueRef const *so_buffers,
272 LLVMValueRef const *so_write_offsets,
273 struct pipe_stream_output *stream_out,
274 struct si_shader_output_values *shader_out)
275 {
276 unsigned buf_idx = stream_out->output_buffer;
277 unsigned start = stream_out->start_component;
278 unsigned num_comps = stream_out->num_components;
279 LLVMValueRef out[4];
280
281 assert(num_comps && num_comps <= 4);
282 if (!num_comps || num_comps > 4)
283 return;
284
285 /* Load the output as int. */
286 for (int j = 0; j < num_comps; j++) {
287 assert(stream_out->stream == ((shader_out->vertex_streams >> ((start + j) * 2)) & 0x3));
288
289 out[j] = ac_to_integer(&ctx->ac, shader_out->values[start + j]);
290 }
291
292 /* Pack the output. */
293 LLVMValueRef vdata = NULL;
294
295 switch (num_comps) {
296 case 1: /* as i32 */
297 vdata = out[0];
298 break;
299 case 2: /* as v2i32 */
300 case 3: /* as v3i32 */
301 case 4: /* as v4i32 */
302 vdata = ac_build_gather_values(&ctx->ac, out, num_comps);
303 break;
304 }
305
306 ac_build_buffer_store_dword(&ctx->ac, so_buffers[buf_idx], vdata, NULL,
307 LLVMBuildAdd(ctx->ac.builder, so_write_offsets[buf_idx],
308 LLVMConstInt(ctx->ac.i32, stream_out->dst_offset * 4, 0), ""),
309 ctx->ac.i32_0, ac_glc | ac_slc);
310 }
311
312 /**
313 * Write streamout data to buffers for vertex stream @p stream (different
314 * vertex streams can occur for GS copy shaders).
315 */
si_llvm_emit_streamout(struct si_shader_context * ctx,struct si_shader_output_values * outputs,unsigned noutput,unsigned stream)316 void si_llvm_emit_streamout(struct si_shader_context *ctx, struct si_shader_output_values *outputs,
317 unsigned noutput, unsigned stream)
318 {
319 struct pipe_stream_output_info *so = &ctx->so;
320 LLVMBuilderRef builder = ctx->ac.builder;
321 int i;
322
323 /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
324 LLVMValueRef so_vtx_count = si_unpack_param(ctx, ctx->args.streamout_config, 16, 7);
325
326 LLVMValueRef tid = ac_get_thread_id(&ctx->ac);
327
328 /* can_emit = tid < so_vtx_count; */
329 LLVMValueRef can_emit = LLVMBuildICmp(builder, LLVMIntULT, tid, so_vtx_count, "");
330
331 /* Emit the streamout code conditionally. This actually avoids
332 * out-of-bounds buffer access. The hw tells us via the SGPR
333 * (so_vtx_count) which threads are allowed to emit streamout data. */
334 ac_build_ifcc(&ctx->ac, can_emit, 6501);
335 {
336 /* The buffer offset is computed as follows:
337 * ByteOffset = streamout_offset[buffer_id]*4 +
338 * (streamout_write_index + thread_id)*stride[buffer_id] +
339 * attrib_offset
340 */
341
342 LLVMValueRef so_write_index = ac_get_arg(&ctx->ac, ctx->args.streamout_write_index);
343
344 /* Compute (streamout_write_index + thread_id). */
345 so_write_index = LLVMBuildAdd(builder, so_write_index, tid, "");
346
347 /* Load the descriptor and compute the write offset for each
348 * enabled buffer. */
349 LLVMValueRef so_write_offset[4] = {};
350 LLVMValueRef so_buffers[4];
351 LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac, ctx->internal_bindings);
352
353 for (i = 0; i < 4; i++) {
354 if (!so->stride[i])
355 continue;
356
357 LLVMValueRef offset = LLVMConstInt(ctx->ac.i32, SI_VS_STREAMOUT_BUF0 + i, 0);
358
359 so_buffers[i] = ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
360
361 LLVMValueRef so_offset = ac_get_arg(&ctx->ac, ctx->args.streamout_offset[i]);
362 so_offset = LLVMBuildMul(builder, so_offset, LLVMConstInt(ctx->ac.i32, 4, 0), "");
363
364 so_write_offset[i] = ac_build_imad(
365 &ctx->ac, so_write_index, LLVMConstInt(ctx->ac.i32, so->stride[i] * 4, 0), so_offset);
366 }
367
368 /* Write streamout data. */
369 for (i = 0; i < so->num_outputs; i++) {
370 unsigned reg = so->output[i].register_index;
371
372 if (reg >= noutput)
373 continue;
374
375 if (stream != so->output[i].stream)
376 continue;
377
378 si_llvm_streamout_store_output(ctx, so_buffers, so_write_offset, &so->output[i],
379 &outputs[reg]);
380 }
381 }
382 ac_build_endif(&ctx->ac, 6501);
383 }
384
si_llvm_clipvertex_to_clipdist(struct si_shader_context * ctx,struct ac_export_args clipdist[2],LLVMValueRef clipvertex[4])385 void si_llvm_clipvertex_to_clipdist(struct si_shader_context *ctx,
386 struct ac_export_args clipdist[2], LLVMValueRef clipvertex[4])
387 {
388 unsigned reg_index;
389 unsigned chan;
390 unsigned const_chan;
391 LLVMValueRef base_elt;
392 LLVMValueRef ptr = ac_get_arg(&ctx->ac, ctx->internal_bindings);
393 LLVMValueRef constbuf_index = LLVMConstInt(ctx->ac.i32, SI_VS_CONST_CLIP_PLANES, 0);
394 LLVMValueRef const_resource = ac_build_load_to_sgpr(&ctx->ac, ptr, constbuf_index);
395 unsigned clipdist_mask = ctx->shader->selector->info.clipdist_mask &
396 ~ctx->shader->key.ge.opt.kill_clip_distances;
397
398 for (reg_index = 0; reg_index < 2; reg_index++) {
399 struct ac_export_args *args = &clipdist[reg_index];
400
401 if (!(clipdist_mask & BITFIELD_RANGE(reg_index * 4, 4)))
402 continue;
403
404 args->out[0] = args->out[1] = args->out[2] = args->out[3] = LLVMGetUndef(ctx->ac.f32);
405
406 /* Compute dot products of position and user clip plane vectors */
407 for (chan = 0; chan < 4; chan++) {
408 if (!(clipdist_mask & BITFIELD_BIT(reg_index * 4 + chan)))
409 continue;
410
411 for (const_chan = 0; const_chan < 4; const_chan++) {
412 LLVMValueRef addr =
413 LLVMConstInt(ctx->ac.i32, ((reg_index * 4 + chan) * 4 + const_chan) * 4, 0);
414 base_elt = si_buffer_load_const(ctx, const_resource, addr);
415 args->out[chan] =
416 ac_build_fmad(&ctx->ac, base_elt, clipvertex[const_chan],
417 const_chan == 0 ? ctx->ac.f32_0 : args->out[chan]);
418 }
419 }
420
421 args->enabled_channels = 0xf;
422 args->valid_mask = 0;
423 args->done = 0;
424 args->target = V_008DFC_SQ_EXP_POS + 2 + reg_index;
425 args->compr = 0;
426 }
427 }
428
429 /* Initialize arguments for the shader export intrinsic */
si_llvm_init_vs_export_args(struct si_shader_context * ctx,const LLVMValueRef * values,unsigned target,struct ac_export_args * args)430 static void si_llvm_init_vs_export_args(struct si_shader_context *ctx, const LLVMValueRef *values,
431 unsigned target, struct ac_export_args *args)
432 {
433 args->enabled_channels = 0xf; /* writemask - default is 0xf */
434 args->valid_mask = 0; /* Specify whether the EXEC mask represents the valid mask */
435 args->done = 0; /* Specify whether this is the last export */
436 args->target = target; /* Specify the target we are exporting */
437 args->compr = false;
438
439 memcpy(&args->out[0], values, sizeof(values[0]) * 4);
440 }
441
442 /**
443 * Vertex color clamping.
444 *
445 * This uses a state constant loaded in a user data SGPR and
446 * an IF statement is added that clamps all colors if the constant
447 * is true.
448 */
si_vertex_color_clamping(struct si_shader_context * ctx,struct si_shader_output_values * outputs,unsigned noutput)449 static void si_vertex_color_clamping(struct si_shader_context *ctx,
450 struct si_shader_output_values *outputs, unsigned noutput)
451 {
452 LLVMValueRef addr[SI_MAX_VS_OUTPUTS][4];
453 bool has_colors = false;
454
455 /* Store original colors to alloca variables. */
456 for (unsigned i = 0; i < noutput; i++) {
457 if (outputs[i].semantic != VARYING_SLOT_COL0 &&
458 outputs[i].semantic != VARYING_SLOT_COL1 &&
459 outputs[i].semantic != VARYING_SLOT_BFC0 &&
460 outputs[i].semantic != VARYING_SLOT_BFC1)
461 continue;
462
463 for (unsigned j = 0; j < 4; j++)
464 addr[i][j] = ac_build_alloca_init(&ctx->ac, outputs[i].values[j], "");
465
466 has_colors = true;
467 }
468
469 if (!has_colors)
470 return;
471
472 /* The state is in the first bit of the user SGPR. */
473 LLVMValueRef cond = GET_FIELD(ctx, VS_STATE_CLAMP_VERTEX_COLOR);
474 cond = LLVMBuildTrunc(ctx->ac.builder, cond, ctx->ac.i1, "");
475
476 ac_build_ifcc(&ctx->ac, cond, 6502);
477
478 /* Store clamped colors to alloca variables within the conditional block. */
479 for (unsigned i = 0; i < noutput; i++) {
480 if (outputs[i].semantic != VARYING_SLOT_COL0 &&
481 outputs[i].semantic != VARYING_SLOT_COL1 &&
482 outputs[i].semantic != VARYING_SLOT_BFC0 &&
483 outputs[i].semantic != VARYING_SLOT_BFC1)
484 continue;
485
486 for (unsigned j = 0; j < 4; j++) {
487 LLVMBuildStore(ctx->ac.builder, ac_build_clamp(&ctx->ac, outputs[i].values[j]),
488 addr[i][j]);
489 }
490 }
491 ac_build_endif(&ctx->ac, 6502);
492
493 /* Load clamped colors */
494 for (unsigned i = 0; i < noutput; i++) {
495 if (outputs[i].semantic != VARYING_SLOT_COL0 &&
496 outputs[i].semantic != VARYING_SLOT_COL1 &&
497 outputs[i].semantic != VARYING_SLOT_BFC0 &&
498 outputs[i].semantic != VARYING_SLOT_BFC1)
499 continue;
500
501 for (unsigned j = 0; j < 4; j++) {
502 outputs[i].values[j] = LLVMBuildLoad2(ctx->ac.builder, ctx->ac.f32, addr[i][j], "");
503 }
504 }
505 }
506
507 /**
508 * Generate export instructions for hardware VS shader stage or NGG GS stage
509 * (position and parameter data only).
510 *
511 * \param num_export_threads The number of threads that are active for exports. Only used by gfx11.
512 */
si_llvm_build_vs_exports(struct si_shader_context * ctx,LLVMValueRef num_export_threads,struct si_shader_output_values * outputs,unsigned noutput)513 void si_llvm_build_vs_exports(struct si_shader_context *ctx, LLVMValueRef num_export_threads,
514 struct si_shader_output_values *outputs, unsigned noutput)
515 {
516 struct si_shader *shader = ctx->shader;
517 struct ac_export_args pos_args[4] = {};
518 LLVMValueRef psize_value = NULL, edgeflag_value = NULL, layer_value = NULL,
519 viewport_index_value = NULL;
520 unsigned pos_idx, index;
521 unsigned clipdist_mask = (shader->selector->info.clipdist_mask &
522 ~shader->key.ge.opt.kill_clip_distances) |
523 shader->selector->info.culldist_mask;
524 int i;
525
526 si_vertex_color_clamping(ctx, outputs, noutput);
527
528 /* Build position exports. */
529 for (i = 0; i < noutput; i++) {
530 switch (outputs[i].semantic) {
531 case VARYING_SLOT_POS:
532 si_llvm_init_vs_export_args(ctx, outputs[i].values, V_008DFC_SQ_EXP_POS, &pos_args[0]);
533 break;
534 case VARYING_SLOT_PSIZ:
535 psize_value = outputs[i].values[0];
536 break;
537 case VARYING_SLOT_LAYER:
538 layer_value = outputs[i].values[0];
539 break;
540 case VARYING_SLOT_VIEWPORT:
541 viewport_index_value = outputs[i].values[0];
542 break;
543 case VARYING_SLOT_EDGE:
544 edgeflag_value = outputs[i].values[0];
545 break;
546 case VARYING_SLOT_CLIP_DIST0:
547 case VARYING_SLOT_CLIP_DIST1:
548 index = outputs[i].semantic - VARYING_SLOT_CLIP_DIST0;
549 if (clipdist_mask & BITFIELD_RANGE(index * 4, 4)) {
550 si_llvm_init_vs_export_args(ctx, outputs[i].values, V_008DFC_SQ_EXP_POS + 2 + index,
551 &pos_args[2 + index]);
552 }
553 break;
554 case VARYING_SLOT_CLIP_VERTEX:
555 si_llvm_clipvertex_to_clipdist(ctx, pos_args + 2, outputs[i].values);
556 break;
557 }
558 }
559
560 /* We need to add the position output manually if it's missing. */
561 if (!pos_args[0].out[0]) {
562 pos_args[0].enabled_channels = 0xf; /* writemask */
563 pos_args[0].valid_mask = 0; /* EXEC mask */
564 pos_args[0].done = 0; /* last export? */
565 pos_args[0].target = V_008DFC_SQ_EXP_POS;
566 pos_args[0].compr = 0; /* COMPR flag */
567 pos_args[0].out[0] = ctx->ac.f32_0; /* X */
568 pos_args[0].out[1] = ctx->ac.f32_0; /* Y */
569 pos_args[0].out[2] = ctx->ac.f32_0; /* Z */
570 pos_args[0].out[3] = ctx->ac.f32_1; /* W */
571 }
572
573 bool writes_psize = shader->selector->info.writes_psize && !shader->key.ge.opt.kill_pointsize;
574 bool pos_writes_edgeflag = shader->selector->info.writes_edgeflag && !shader->key.ge.as_ngg;
575 bool writes_vrs = ctx->screen->options.vrs2x2;
576
577 /* Write the misc vector (point size, edgeflag, layer, viewport). */
578 if (writes_psize || pos_writes_edgeflag || writes_vrs ||
579 shader->selector->info.writes_viewport_index || shader->selector->info.writes_layer) {
580 pos_args[1].enabled_channels = writes_psize |
581 ((pos_writes_edgeflag | writes_vrs) << 1) |
582 (shader->selector->info.writes_layer << 2);
583
584 pos_args[1].valid_mask = 0; /* EXEC mask */
585 pos_args[1].done = 0; /* last export? */
586 pos_args[1].target = V_008DFC_SQ_EXP_POS + 1;
587 pos_args[1].compr = 0; /* COMPR flag */
588 pos_args[1].out[0] = ctx->ac.f32_0; /* X */
589 pos_args[1].out[1] = ctx->ac.f32_0; /* Y */
590 pos_args[1].out[2] = ctx->ac.f32_0; /* Z */
591 pos_args[1].out[3] = ctx->ac.f32_0; /* W */
592
593 if (writes_psize)
594 pos_args[1].out[0] = psize_value;
595
596 if (pos_writes_edgeflag) {
597 /* The output is a float, but the hw expects an integer
598 * with the first bit containing the edge flag. */
599 edgeflag_value = LLVMBuildFPToUI(ctx->ac.builder, edgeflag_value, ctx->ac.i32, "");
600 edgeflag_value = ac_build_umin(&ctx->ac, edgeflag_value, ctx->ac.i32_1);
601
602 /* The LLVM intrinsic expects a float. */
603 pos_args[1].out[1] = ac_to_float(&ctx->ac, edgeflag_value);
604 }
605
606 if (writes_vrs) {
607 LLVMValueRef rates;
608 if (ctx->screen->info.gfx_level >= GFX11) {
609 /* Bits [2:5] = VRS rate
610 *
611 * The range is [0, 15].
612 *
613 * If the hw doesn't support VRS 4x4, it will silently use 2x2 instead.
614 */
615 rates = LLVMConstInt(ctx->ac.i32, (V_0283D0_VRS_SHADING_RATE_4X4 << 2), 0);
616 } else {
617 /* Bits [2:3] = VRS rate X
618 * Bits [4:5] = VRS rate Y
619 *
620 * The range is [-2, 1]. Values:
621 * 1: 2x coarser shading rate in that direction.
622 * 0: normal shading rate
623 * -1: 2x finer shading rate (sample shading, not directional)
624 * -2: 4x finer shading rate (sample shading, not directional)
625 *
626 * Sample shading can't go above 8 samples, so both numbers can't be -2
627 * at the same time.
628 */
629 rates = LLVMConstInt(ctx->ac.i32, (1 << 2) | (1 << 4), 0);
630 }
631
632 /* If Pos.W != 1 (typical for non-GUI elements), use 2x2 coarse shading. */
633 rates = LLVMBuildSelect(ctx->ac.builder,
634 LLVMBuildFCmp(ctx->ac.builder, LLVMRealUNE,
635 pos_args[0].out[3], ctx->ac.f32_1, ""),
636 rates, ctx->ac.i32_0, "");
637
638 LLVMValueRef v = ac_to_integer(&ctx->ac, pos_args[1].out[1]);
639 v = LLVMBuildOr(ctx->ac.builder, v, rates, "");
640 pos_args[1].out[1] = ac_to_float(&ctx->ac, v);
641 }
642
643 if (ctx->screen->info.gfx_level >= GFX9) {
644 /* GFX9 has the layer in out.z[10:0] and the viewport
645 * index in out.z[19:16].
646 */
647 if (shader->selector->info.writes_layer)
648 pos_args[1].out[2] = layer_value;
649
650 if (shader->selector->info.writes_viewport_index) {
651 LLVMValueRef v = viewport_index_value;
652
653 v = ac_to_integer(&ctx->ac, v);
654 v = LLVMBuildShl(ctx->ac.builder, v, LLVMConstInt(ctx->ac.i32, 16, 0), "");
655 v = LLVMBuildOr(ctx->ac.builder, v, ac_to_integer(&ctx->ac, pos_args[1].out[2]), "");
656 pos_args[1].out[2] = ac_to_float(&ctx->ac, v);
657 pos_args[1].enabled_channels |= 1 << 2;
658 }
659 } else {
660 if (shader->selector->info.writes_layer)
661 pos_args[1].out[2] = layer_value;
662
663 if (shader->selector->info.writes_viewport_index) {
664 pos_args[1].out[3] = viewport_index_value;
665 pos_args[1].enabled_channels |= 1 << 3;
666 }
667 }
668 }
669
670 for (i = 0; i < 4; i++)
671 if (pos_args[i].out[0])
672 shader->info.nr_pos_exports++;
673
674 /* GFX10 (Navi1x) skip POS0 exports if EXEC=0 and DONE=0, causing a hang.
675 * Setting valid_mask=1 prevents it and has no other effect.
676 */
677 if (ctx->screen->info.gfx_level == GFX10)
678 pos_args[0].valid_mask = 1;
679
680 pos_idx = 0;
681 for (i = 0; i < 4; i++) {
682 if (!pos_args[i].out[0])
683 continue;
684
685 /* Specify the target we are exporting */
686 pos_args[i].target = V_008DFC_SQ_EXP_POS + pos_idx++;
687
688 if (pos_idx == shader->info.nr_pos_exports) {
689 /* Specify that this is the last export */
690 pos_args[i].done = 1;
691
692 /* If a shader has no param exports, rasterization can start before
693 * the shader finishes and thus memory stores might not finish before
694 * the pixel shader starts.
695 *
696 * VLOAD is for atomics with return.
697 */
698 if (ctx->screen->info.gfx_level >= GFX10 &&
699 !shader->info.nr_param_exports &&
700 shader->selector->info.base.writes_memory)
701 ac_build_waitcnt(&ctx->ac, AC_WAIT_VLOAD | AC_WAIT_VSTORE);
702 }
703
704 ac_build_export(&ctx->ac, &pos_args[i]);
705 }
706
707 if (!shader->info.nr_param_exports)
708 return;
709
710 /* Build parameter exports. Use 2 loops to export params in ascending order.
711 * 32 is the maximum number of parameter exports.
712 */
713 struct ac_export_args param_exports[32] = {};
714 uint64_t vs_output_param_mask = shader->info.vs_output_param_mask;
715
716 while (vs_output_param_mask) {
717 unsigned i = u_bit_scan64(&vs_output_param_mask);
718 unsigned offset = shader->info.vs_output_param_offset[outputs[i].semantic];
719
720 assert(offset <= AC_EXP_PARAM_OFFSET_31);
721 assert(!param_exports[offset].enabled_channels);
722
723 si_llvm_init_vs_export_args(ctx, outputs[i].values, V_008DFC_SQ_EXP_PARAM + offset,
724 ¶m_exports[offset]);
725 }
726
727 if (ctx->screen->info.gfx_level >= GFX11) {
728 /* Store primitive exports to alloca variables, so that we can read them outside this branch. */
729 for (unsigned i = 0; i < shader->info.nr_param_exports; i++) {
730 for (unsigned chan = 0; chan < 4; chan++) {
731 param_exports[i].out[chan] =
732 ac_build_alloca_init(&ctx->ac, param_exports[i].out[chan], "");
733 }
734 }
735 ac_build_endif(&ctx->ac, 0);
736
737 if (!num_export_threads)
738 num_export_threads = si_unpack_param(ctx, ctx->args.merged_wave_info, 0, 8);
739
740 /* We should always store full vec4s in groups of 8 lanes for the best performance even if
741 * some of them are garbage or have unused components, so align the number of export threads
742 * to 8.
743 */
744 num_export_threads = LLVMBuildAdd(ctx->ac.builder, num_export_threads,
745 LLVMConstInt(ctx->ac.i32, 7, 0), "");
746 num_export_threads = LLVMBuildAnd(ctx->ac.builder, num_export_threads,
747 LLVMConstInt(ctx->ac.i32, ~7, 0), "");
748 ac_build_ifcc(&ctx->ac,
749 LLVMBuildICmp(ctx->ac.builder, LLVMIntULT,
750 ac_get_thread_id(&ctx->ac), num_export_threads, ""), 0);
751
752 /* Get the attribute ring address and descriptor. */
753 LLVMValueRef attr_address;
754 if (ctx->stage == MESA_SHADER_VERTEX && shader->selector->info.base.vs.blit_sgprs_amd) {
755 LLVMValueRef ptr =
756 LLVMBuildPointerCast(ctx->ac.builder,
757 ac_get_arg(&ctx->ac, ctx->internal_bindings),
758 LLVMPointerType(ctx->ac.i32, AC_ADDR_SPACE_CONST_32BIT), "");
759 attr_address = ac_build_load_to_sgpr(&ctx->ac, ptr,
760 LLVMConstInt(ctx->ac.i32, SI_GS_ATTRIBUTE_RING * 4, 0));
761 } else {
762 attr_address = ac_get_arg(&ctx->ac, ctx->gs_attr_address);
763 }
764
765 unsigned stride = 16 * shader->info.nr_param_exports;
766 LLVMValueRef attr_desc[4] = {
767 attr_address,
768 LLVMConstInt(ctx->ac.i32, S_008F04_BASE_ADDRESS_HI(ctx->screen->info.address32_hi) |
769 S_008F04_STRIDE(stride) |
770 S_008F04_SWIZZLE_ENABLE_GFX11(3) /* 16B */, 0),
771 LLVMConstInt(ctx->ac.i32, 0xffffffff, 0),
772 LLVMConstInt(ctx->ac.i32, S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
773 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
774 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
775 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
776 S_008F0C_FORMAT(V_008F0C_GFX11_FORMAT_32_32_32_32_FLOAT) |
777 S_008F0C_INDEX_STRIDE(2) /* 32 elements */, 0),
778 };
779 LLVMValueRef attr_rsrc = ac_build_gather_values(&ctx->ac, attr_desc, 4);
780 LLVMValueRef attr_offset = LLVMBuildShl(ctx->ac.builder,
781 si_unpack_param(ctx, ctx->args.gs_attr_offset, 0, 15),
782 LLVMConstInt(ctx->ac.i32, 9, 0), ""); /* 512B increments */
783 LLVMValueRef vindex = gfx10_get_thread_id_in_tg(ctx);
784
785 LLVMValueRef soffset[32];
786
787 /* Compute scalar offsets first. */
788 for (unsigned i = 0; i < shader->info.nr_param_exports; i++) {
789 soffset[i] = LLVMBuildAdd(ctx->ac.builder, attr_offset,
790 LLVMConstInt(ctx->ac.i32, 32 * i * 16, 0), "");
791 }
792
793 /* Write attributes to the attribute ring buffer. */
794 for (unsigned i = 0; i < shader->info.nr_param_exports; i++) {
795 for (unsigned chan = 0; chan < 4; chan++) {
796 param_exports[i].out[chan] =
797 LLVMBuildLoad2(ctx->ac.builder, ctx->ac.f32, param_exports[i].out[chan], "");
798 }
799
800 LLVMValueRef vdata = ac_build_gather_values_extended(&ctx->ac, param_exports[i].out,
801 4, 1, false);
802
803 ac_build_buffer_store_dword(&ctx->ac, attr_rsrc, vdata, vindex,
804 ctx->ac.i32_0, soffset[i], ac_swizzled);
805 }
806 } else {
807 /* Export attributes using parameter exports. */
808 for (unsigned i = 0; i < shader->info.nr_param_exports; i++)
809 ac_build_export(&ctx->ac, ¶m_exports[i]);
810 }
811 }
812
si_llvm_vs_build_end(struct si_shader_context * ctx)813 void si_llvm_vs_build_end(struct si_shader_context *ctx)
814 {
815 struct si_shader_info *info = &ctx->shader->selector->info;
816 struct si_shader_output_values *outputs = NULL;
817 LLVMValueRef *addrs = ctx->abi.outputs;
818 int i, j;
819
820 assert(!ctx->shader->is_gs_copy_shader);
821 assert(info->num_outputs <= AC_LLVM_MAX_OUTPUTS);
822
823 outputs = MALLOC((info->num_outputs + 1) * sizeof(outputs[0]));
824
825 for (i = 0; i < info->num_outputs; i++) {
826 outputs[i].semantic = info->output_semantic[i];
827
828 for (j = 0; j < 4; j++) {
829 outputs[i].values[j] = LLVMBuildLoad2(ctx->ac.builder, ctx->ac.f32, addrs[4 * i + j], "");
830 outputs[i].vertex_streams = info->output_streams[i];
831 }
832 }
833
834 if (!ctx->screen->use_ngg_streamout && ctx->so.num_outputs)
835 si_llvm_emit_streamout(ctx, outputs, i, 0);
836
837 /* Export PrimitiveID. */
838 if (ctx->shader->key.ge.mono.u.vs_export_prim_id) {
839 outputs[i].semantic = VARYING_SLOT_PRIMITIVE_ID;
840 outputs[i].vertex_streams = 0;
841 outputs[i].values[0] = ac_to_float(&ctx->ac, si_get_primitive_id(ctx, 0));
842 for (j = 1; j < 4; j++)
843 outputs[i].values[j] = LLVMConstReal(ctx->ac.f32, 0);
844 i++;
845 }
846
847 si_llvm_build_vs_exports(ctx, NULL, outputs, i);
848 FREE(outputs);
849 }
850
851 /**
852 * Build the vertex shader prolog function.
853 *
854 * The inputs are the same as VS (a lot of SGPRs and 4 VGPR system values).
855 * All inputs are returned unmodified. The vertex load indices are
856 * stored after them, which will be used by the API VS for fetching inputs.
857 *
858 * For example, the expected outputs for instance_divisors[] = {0, 1, 2} are:
859 * input_v0,
860 * input_v1,
861 * input_v2,
862 * input_v3,
863 * (VertexID + BaseVertex),
864 * (InstanceID + StartInstance),
865 * (InstanceID / 2 + StartInstance)
866 */
si_llvm_build_vs_prolog(struct si_shader_context * ctx,union si_shader_part_key * key)867 void si_llvm_build_vs_prolog(struct si_shader_context *ctx, union si_shader_part_key *key)
868 {
869 LLVMTypeRef *returns;
870 LLVMValueRef ret, func;
871 int num_returns, i;
872 unsigned first_vs_vgpr = key->vs_prolog.num_merged_next_stage_vgprs;
873 unsigned num_input_vgprs =
874 key->vs_prolog.num_merged_next_stage_vgprs + 4;
875 struct ac_arg input_sgpr_param[key->vs_prolog.num_input_sgprs];
876 struct ac_arg input_vgpr_param[10];
877 LLVMValueRef input_vgprs[10];
878 unsigned num_all_input_regs = key->vs_prolog.num_input_sgprs + num_input_vgprs;
879 unsigned user_sgpr_base = key->vs_prolog.num_merged_next_stage_vgprs ? 8 : 0;
880
881 memset(&ctx->args, 0, sizeof(ctx->args));
882
883 /* 4 preloaded VGPRs + vertex load indices as prolog outputs */
884 returns = alloca((num_all_input_regs + key->vs_prolog.num_inputs) * sizeof(LLVMTypeRef));
885 num_returns = 0;
886
887 /* Declare input and output SGPRs. */
888 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
889 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &input_sgpr_param[i]);
890 returns[num_returns++] = ctx->ac.i32;
891 }
892
893 /* Preloaded VGPRs (outputs must be floats) */
894 for (i = 0; i < num_input_vgprs; i++) {
895 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &input_vgpr_param[i]);
896 returns[num_returns++] = ctx->ac.f32;
897 }
898
899 /* Vertex load indices. */
900 for (i = 0; i < key->vs_prolog.num_inputs; i++)
901 returns[num_returns++] = ctx->ac.f32;
902
903 /* Create the function. */
904 si_llvm_create_func(ctx, "vs_prolog", returns, num_returns, 0);
905 func = ctx->main_fn;
906
907 for (i = 0; i < num_input_vgprs; i++) {
908 input_vgprs[i] = ac_get_arg(&ctx->ac, input_vgpr_param[i]);
909 }
910
911 if (key->vs_prolog.num_merged_next_stage_vgprs) {
912 if (!key->vs_prolog.is_monolithic)
913 ac_init_exec_full_mask(&ctx->ac);
914
915 if (key->vs_prolog.as_ls && ctx->screen->info.has_ls_vgpr_init_bug) {
916 /* If there are no HS threads, SPI loads the LS VGPRs
917 * starting at VGPR 0. Shift them back to where they
918 * belong.
919 */
920 LLVMValueRef has_hs_threads =
921 LLVMBuildICmp(ctx->ac.builder, LLVMIntNE,
922 si_unpack_param(ctx, input_sgpr_param[3], 8, 8), ctx->ac.i32_0, "");
923
924 for (i = 4; i > 0; --i) {
925 input_vgprs[i + 1] = LLVMBuildSelect(ctx->ac.builder, has_hs_threads,
926 input_vgprs[i + 1], input_vgprs[i - 1], "");
927 }
928 }
929 }
930
931 /* The culling code stored the LDS addresses of the VGPRs into those VGPRs. Load them. */
932 if (key->vs_prolog.load_vgprs_after_culling) {
933 for (i = 5; i <= 8; i++) {
934 bool is_tes_rel_patch_id = i == 7;
935 LLVMTypeRef t = is_tes_rel_patch_id ? ctx->ac.i8 : ctx->ac.i32;
936 input_vgprs[i] = LLVMBuildIntToPtr(ctx->ac.builder, input_vgprs[i], LLVMPointerType(t, AC_ADDR_SPACE_LDS), "");
937 input_vgprs[i] = LLVMBuildLoad2(ctx->ac.builder, t, input_vgprs[i], "");
938 if (is_tes_rel_patch_id)
939 input_vgprs[i] = LLVMBuildZExt(ctx->ac.builder, input_vgprs[i], ctx->ac.i32, "");
940 }
941 }
942
943 unsigned vertex_id_vgpr = first_vs_vgpr;
944 unsigned instance_id_vgpr = ctx->screen->info.gfx_level >= GFX10
945 ? first_vs_vgpr + 3
946 : first_vs_vgpr + (key->vs_prolog.as_ls ? 2 : 1);
947
948 ctx->abi.vertex_id = input_vgprs[vertex_id_vgpr];
949 ctx->abi.instance_id = input_vgprs[instance_id_vgpr];
950
951 /* Copy inputs to outputs. This should be no-op, as the registers match,
952 * but it will prevent the compiler from overwriting them unintentionally.
953 */
954 ret = ctx->return_value;
955 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
956 LLVMValueRef p = LLVMGetParam(func, i);
957 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, p, i, "");
958 }
959 for (i = 0; i < num_input_vgprs; i++) {
960 LLVMValueRef p = input_vgprs[i];
961
962 if (i == vertex_id_vgpr)
963 p = ctx->abi.vertex_id;
964 else if (i == instance_id_vgpr)
965 p = ctx->abi.instance_id;
966
967 p = ac_to_float(&ctx->ac, p);
968 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, p, key->vs_prolog.num_input_sgprs + i, "");
969 }
970
971 /* Compute vertex load indices from instance divisors. */
972 LLVMValueRef instance_divisor_constbuf = NULL;
973
974 if (key->vs_prolog.states.instance_divisor_is_fetched) {
975 LLVMValueRef list = si_prolog_get_internal_bindings(ctx);
976 LLVMValueRef buf_index = LLVMConstInt(ctx->ac.i32, SI_VS_CONST_INSTANCE_DIVISORS, 0);
977 instance_divisor_constbuf = ac_build_load_to_sgpr(&ctx->ac, list, buf_index);
978 }
979
980 for (i = 0; i < key->vs_prolog.num_inputs; i++) {
981 bool divisor_is_one = key->vs_prolog.states.instance_divisor_is_one & (1u << i);
982 bool divisor_is_fetched = key->vs_prolog.states.instance_divisor_is_fetched & (1u << i);
983 LLVMValueRef index = NULL;
984
985 if (divisor_is_one) {
986 index = ctx->abi.instance_id;
987 } else if (divisor_is_fetched) {
988 LLVMValueRef udiv_factors[4];
989
990 for (unsigned j = 0; j < 4; j++) {
991 udiv_factors[j] = si_buffer_load_const(ctx, instance_divisor_constbuf,
992 LLVMConstInt(ctx->ac.i32, i * 16 + j * 4, 0));
993 udiv_factors[j] = ac_to_integer(&ctx->ac, udiv_factors[j]);
994 }
995 /* The faster NUW version doesn't work when InstanceID == UINT_MAX.
996 * Such InstanceID might not be achievable in a reasonable time though.
997 */
998 index = ac_build_fast_udiv_nuw(&ctx->ac, ctx->abi.instance_id, udiv_factors[0],
999 udiv_factors[1], udiv_factors[2], udiv_factors[3]);
1000 }
1001
1002 if (divisor_is_one || divisor_is_fetched) {
1003 /* Add StartInstance. */
1004 index =
1005 LLVMBuildAdd(ctx->ac.builder, index,
1006 LLVMGetParam(ctx->main_fn, user_sgpr_base + SI_SGPR_START_INSTANCE), "");
1007 } else {
1008 /* VertexID + BaseVertex */
1009 index = LLVMBuildAdd(ctx->ac.builder, ctx->abi.vertex_id,
1010 LLVMGetParam(func, user_sgpr_base + SI_SGPR_BASE_VERTEX), "");
1011 }
1012
1013 index = ac_to_float(&ctx->ac, index);
1014 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, index, ctx->args.arg_count + i, "");
1015 }
1016
1017 si_llvm_build_ret(ctx, ret);
1018 }
1019
si_llvm_init_vs_callbacks(struct si_shader_context * ctx,bool ngg_cull_shader)1020 void si_llvm_init_vs_callbacks(struct si_shader_context *ctx, bool ngg_cull_shader)
1021 {
1022 ctx->abi.load_inputs = si_load_vs_input;
1023 }
1024