• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2016 Red Hat.
3  * Copyright © 2016 Bas Nieuwenhuizen
4  *
5  * based in part on anv driver which is:
6  * Copyright © 2015 Intel Corporation
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25  * IN THE SOFTWARE.
26  */
27 
28 #include "radv_private.h"
29 #include "radv_shader.h"
30 #include "radv_shader_helper.h"
31 #include "radv_shader_args.h"
32 #include "radv_debug.h"
33 #include "nir/nir.h"
34 
35 #include "sid.h"
36 #include "ac_binary.h"
37 #include "ac_llvm_util.h"
38 #include "ac_llvm_build.h"
39 #include "ac_shader_abi.h"
40 #include "ac_shader_util.h"
41 #include "ac_exp_param.h"
42 
43 #define RADEON_LLVM_MAX_INPUTS (VARYING_SLOT_VAR31 + 1)
44 
45 struct radv_shader_context {
46 	struct ac_llvm_context ac;
47 	const struct nir_shader *shader;
48 	struct ac_shader_abi abi;
49 	const struct radv_shader_args *args;
50 
51 	gl_shader_stage stage;
52 
53 	unsigned max_workgroup_size;
54 	LLVMContextRef context;
55 	LLVMValueRef main_function;
56 
57 	LLVMValueRef descriptor_sets[MAX_SETS];
58 
59 	LLVMValueRef ring_offsets;
60 
61 	LLVMValueRef rel_auto_id;
62 
63 	LLVMValueRef gs_wave_id;
64 	LLVMValueRef gs_vtx_offset[6];
65 
66 	LLVMValueRef esgs_ring;
67 	LLVMValueRef gsvs_ring[4];
68 	LLVMValueRef hs_ring_tess_offchip;
69 	LLVMValueRef hs_ring_tess_factor;
70 
71 	LLVMValueRef inputs[RADEON_LLVM_MAX_INPUTS * 4];
72 
73 	uint64_t output_mask;
74 
75 	LLVMValueRef gs_next_vertex[4];
76 	LLVMValueRef gs_curprim_verts[4];
77 	LLVMValueRef gs_generated_prims[4];
78 	LLVMValueRef gs_ngg_emit;
79 	LLVMValueRef gs_ngg_scratch;
80 
81 	uint32_t tcs_num_inputs;
82 	uint32_t tcs_num_patches;
83 	uint32_t tcs_tess_lvl_inner;
84 	uint32_t tcs_tess_lvl_outer;
85 
86 	LLVMValueRef vertexptr; /* GFX10 only */
87 };
88 
89 struct radv_shader_output_values {
90 	LLVMValueRef values[4];
91 	unsigned slot_name;
92 	unsigned slot_index;
93 	unsigned usage_mask;
94 };
95 
96 static inline struct radv_shader_context *
radv_shader_context_from_abi(struct ac_shader_abi * abi)97 radv_shader_context_from_abi(struct ac_shader_abi *abi)
98 {
99 	struct radv_shader_context *ctx = NULL;
100 	return container_of(abi, ctx, abi);
101 }
102 
get_rel_patch_id(struct radv_shader_context * ctx)103 static LLVMValueRef get_rel_patch_id(struct radv_shader_context *ctx)
104 {
105 	switch (ctx->stage) {
106 	case MESA_SHADER_TESS_CTRL:
107 		return ac_unpack_param(&ctx->ac,
108 				       ac_get_arg(&ctx->ac, ctx->args->ac.tcs_rel_ids),
109 				       0, 8);
110 	case MESA_SHADER_TESS_EVAL:
111 		return ac_get_arg(&ctx->ac, ctx->args->tes_rel_patch_id);
112 		break;
113 	default:
114 		unreachable("Illegal stage");
115 	}
116 }
117 
118 /* Tessellation shaders pass outputs to the next shader using LDS.
119  *
120  * LS outputs = TCS inputs
121  * TCS outputs = TES inputs
122  *
123  * The LDS layout is:
124  * - TCS inputs for patch 0
125  * - TCS inputs for patch 1
126  * - TCS inputs for patch 2		= get_tcs_in_current_patch_offset (if RelPatchID==2)
127  * - ...
128  * - TCS outputs for patch 0            = get_tcs_out_patch0_offset
129  * - Per-patch TCS outputs for patch 0  = get_tcs_out_patch0_patch_data_offset
130  * - TCS outputs for patch 1
131  * - Per-patch TCS outputs for patch 1
132  * - TCS outputs for patch 2            = get_tcs_out_current_patch_offset (if RelPatchID==2)
133  * - Per-patch TCS outputs for patch 2  = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
134  * - ...
135  *
136  * All three shaders VS(LS), TCS, TES share the same LDS space.
137  */
138 static LLVMValueRef
get_tcs_in_patch_stride(struct radv_shader_context * ctx)139 get_tcs_in_patch_stride(struct radv_shader_context *ctx)
140 {
141 	assert(ctx->stage == MESA_SHADER_TESS_CTRL);
142 	uint32_t input_vertex_size = ctx->tcs_num_inputs * 16;
143 	uint32_t input_patch_size = ctx->args->options->key.tcs.input_vertices * input_vertex_size;
144 
145 	input_patch_size /= 4;
146 	return LLVMConstInt(ctx->ac.i32, input_patch_size, false);
147 }
148 
149 static LLVMValueRef
get_tcs_out_patch_stride(struct radv_shader_context * ctx)150 get_tcs_out_patch_stride(struct radv_shader_context *ctx)
151 {
152 	uint32_t num_tcs_outputs = ctx->args->shader_info->tcs.num_linked_outputs;
153 	uint32_t num_tcs_patch_outputs = ctx->args->shader_info->tcs.num_linked_patch_outputs;
154 	uint32_t output_vertex_size = num_tcs_outputs * 16;
155 	uint32_t pervertex_output_patch_size = ctx->shader->info.tess.tcs_vertices_out * output_vertex_size;
156 	uint32_t output_patch_size = pervertex_output_patch_size + num_tcs_patch_outputs * 16;
157 	output_patch_size /= 4;
158 	return LLVMConstInt(ctx->ac.i32, output_patch_size, false);
159 }
160 
161 static LLVMValueRef
get_tcs_out_vertex_stride(struct radv_shader_context * ctx)162 get_tcs_out_vertex_stride(struct radv_shader_context *ctx)
163 {
164 	uint32_t num_tcs_outputs = ctx->args->shader_info->tcs.num_linked_outputs;
165 	uint32_t output_vertex_size = num_tcs_outputs * 16;
166 	output_vertex_size /= 4;
167 	return LLVMConstInt(ctx->ac.i32, output_vertex_size, false);
168 }
169 
170 static LLVMValueRef
get_tcs_out_patch0_offset(struct radv_shader_context * ctx)171 get_tcs_out_patch0_offset(struct radv_shader_context *ctx)
172 {
173 	assert (ctx->stage == MESA_SHADER_TESS_CTRL);
174 	uint32_t input_vertex_size = ctx->tcs_num_inputs * 16;
175 	uint32_t input_patch_size = ctx->args->options->key.tcs.input_vertices * input_vertex_size;
176 	uint32_t output_patch0_offset = input_patch_size;
177 	unsigned num_patches = ctx->tcs_num_patches;
178 
179 	output_patch0_offset *= num_patches;
180 	output_patch0_offset /= 4;
181 	return LLVMConstInt(ctx->ac.i32, output_patch0_offset, false);
182 }
183 
184 static LLVMValueRef
get_tcs_out_patch0_patch_data_offset(struct radv_shader_context * ctx)185 get_tcs_out_patch0_patch_data_offset(struct radv_shader_context *ctx)
186 {
187 	assert (ctx->stage == MESA_SHADER_TESS_CTRL);
188 	uint32_t input_vertex_size = ctx->tcs_num_inputs * 16;
189 	uint32_t input_patch_size = ctx->args->options->key.tcs.input_vertices * input_vertex_size;
190 	uint32_t output_patch0_offset = input_patch_size;
191 
192 	uint32_t num_tcs_outputs = ctx->args->shader_info->tcs.num_linked_outputs;
193 	uint32_t output_vertex_size = num_tcs_outputs * 16;
194 	uint32_t pervertex_output_patch_size = ctx->shader->info.tess.tcs_vertices_out * output_vertex_size;
195 	unsigned num_patches = ctx->tcs_num_patches;
196 
197 	output_patch0_offset *= num_patches;
198 	output_patch0_offset += pervertex_output_patch_size;
199 	output_patch0_offset /= 4;
200 	return LLVMConstInt(ctx->ac.i32, output_patch0_offset, false);
201 }
202 
203 static LLVMValueRef
get_tcs_in_current_patch_offset(struct radv_shader_context * ctx)204 get_tcs_in_current_patch_offset(struct radv_shader_context *ctx)
205 {
206 	LLVMValueRef patch_stride = get_tcs_in_patch_stride(ctx);
207 	LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
208 
209 	return LLVMBuildMul(ctx->ac.builder, patch_stride, rel_patch_id, "");
210 }
211 
212 static LLVMValueRef
get_tcs_out_current_patch_offset(struct radv_shader_context * ctx)213 get_tcs_out_current_patch_offset(struct radv_shader_context *ctx)
214 {
215 	LLVMValueRef patch0_offset = get_tcs_out_patch0_offset(ctx);
216 	LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
217 	LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
218 
219 	return ac_build_imad(&ctx->ac, patch_stride, rel_patch_id,
220 			     patch0_offset);
221 }
222 
223 static LLVMValueRef
get_tcs_out_current_patch_data_offset(struct radv_shader_context * ctx)224 get_tcs_out_current_patch_data_offset(struct radv_shader_context *ctx)
225 {
226 	LLVMValueRef patch0_patch_data_offset =
227 		get_tcs_out_patch0_patch_data_offset(ctx);
228 	LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
229 	LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
230 
231 	return ac_build_imad(&ctx->ac, patch_stride, rel_patch_id,
232 			     patch0_patch_data_offset);
233 }
234 
235 static LLVMValueRef
create_llvm_function(struct ac_llvm_context * ctx,LLVMModuleRef module,LLVMBuilderRef builder,const struct ac_shader_args * args,enum ac_llvm_calling_convention convention,unsigned max_workgroup_size,const struct radv_nir_compiler_options * options)236 create_llvm_function(struct ac_llvm_context *ctx, LLVMModuleRef module,
237                      LLVMBuilderRef builder,
238 		     const struct ac_shader_args *args,
239 		     enum ac_llvm_calling_convention convention,
240 		     unsigned max_workgroup_size,
241 		     const struct radv_nir_compiler_options *options)
242 {
243 	LLVMValueRef main_function =
244 		ac_build_main(args, ctx, convention, "main", ctx->voidt, module);
245 
246 	if (options->address32_hi) {
247 		ac_llvm_add_target_dep_function_attr(main_function,
248 						     "amdgpu-32bit-address-high-bits",
249 						     options->address32_hi);
250 	}
251 
252 	ac_llvm_set_workgroup_size(main_function, max_workgroup_size);
253 
254 	return main_function;
255 }
256 
257 static void
load_descriptor_sets(struct radv_shader_context * ctx)258 load_descriptor_sets(struct radv_shader_context *ctx)
259 {
260 	uint32_t mask = ctx->args->shader_info->desc_set_used_mask;
261 	if (ctx->args->shader_info->need_indirect_descriptor_sets) {
262 		LLVMValueRef desc_sets =
263 			ac_get_arg(&ctx->ac, ctx->args->descriptor_sets[0]);
264 		while (mask) {
265 			int i = u_bit_scan(&mask);
266 
267 			ctx->descriptor_sets[i] =
268 				ac_build_load_to_sgpr(&ctx->ac, desc_sets,
269 						      LLVMConstInt(ctx->ac.i32, i, false));
270 
271 		}
272 	} else {
273 		while (mask) {
274 			int i = u_bit_scan(&mask);
275 
276 			ctx->descriptor_sets[i] =
277 				ac_get_arg(&ctx->ac, ctx->args->descriptor_sets[i]);
278 		}
279 	}
280 }
281 
282 static enum ac_llvm_calling_convention
get_llvm_calling_convention(LLVMValueRef func,gl_shader_stage stage)283 get_llvm_calling_convention(LLVMValueRef func, gl_shader_stage stage)
284 {
285 	switch (stage) {
286 	case MESA_SHADER_VERTEX:
287 	case MESA_SHADER_TESS_EVAL:
288 		return AC_LLVM_AMDGPU_VS;
289 		break;
290 	case MESA_SHADER_GEOMETRY:
291 		return AC_LLVM_AMDGPU_GS;
292 		break;
293 	case MESA_SHADER_TESS_CTRL:
294 		return AC_LLVM_AMDGPU_HS;
295 		break;
296 	case MESA_SHADER_FRAGMENT:
297 		return AC_LLVM_AMDGPU_PS;
298 		break;
299 	case MESA_SHADER_COMPUTE:
300 		return AC_LLVM_AMDGPU_CS;
301 		break;
302 	default:
303 		unreachable("Unhandle shader type");
304 	}
305 }
306 
307 /* Returns whether the stage is a stage that can be directly before the GS */
is_pre_gs_stage(gl_shader_stage stage)308 static bool is_pre_gs_stage(gl_shader_stage stage)
309 {
310 	return stage == MESA_SHADER_VERTEX || stage == MESA_SHADER_TESS_EVAL;
311 }
312 
create_function(struct radv_shader_context * ctx,gl_shader_stage stage,bool has_previous_stage)313 static void create_function(struct radv_shader_context *ctx,
314                             gl_shader_stage stage,
315                             bool has_previous_stage)
316 {
317 	if (ctx->ac.chip_class >= GFX10) {
318 		if (is_pre_gs_stage(stage) && ctx->args->options->key.vs_common_out.as_ngg) {
319 			/* On GFX10, VS is merged into GS for NGG. */
320 			stage = MESA_SHADER_GEOMETRY;
321 			has_previous_stage = true;
322 		}
323 	}
324 
325 	ctx->main_function = create_llvm_function(
326 	    &ctx->ac, ctx->ac.module, ctx->ac.builder, &ctx->args->ac,
327 	    get_llvm_calling_convention(ctx->main_function, stage),
328 	    ctx->max_workgroup_size,
329 	    ctx->args->options);
330 
331 	ctx->ring_offsets = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.implicit.buffer.ptr",
332 					       LLVMPointerType(ctx->ac.i8, AC_ADDR_SPACE_CONST),
333 					       NULL, 0, AC_FUNC_ATTR_READNONE);
334 	ctx->ring_offsets = LLVMBuildBitCast(ctx->ac.builder, ctx->ring_offsets,
335 					     ac_array_in_const_addr_space(ctx->ac.v4i32), "");
336 
337 	load_descriptor_sets(ctx);
338 
339 	if (stage == MESA_SHADER_TESS_CTRL ||
340 	    (stage == MESA_SHADER_VERTEX && ctx->args->options->key.vs_common_out.as_ls) ||
341 	    /* GFX9 has the ESGS ring buffer in LDS. */
342 	    (stage == MESA_SHADER_GEOMETRY && has_previous_stage)) {
343 		ac_declare_lds_as_pointer(&ctx->ac);
344 	}
345 
346 }
347 
348 
349 static LLVMValueRef
radv_load_resource(struct ac_shader_abi * abi,LLVMValueRef index,unsigned desc_set,unsigned binding)350 radv_load_resource(struct ac_shader_abi *abi, LLVMValueRef index,
351 		   unsigned desc_set, unsigned binding)
352 {
353 	struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
354 	LLVMValueRef desc_ptr = ctx->descriptor_sets[desc_set];
355 	struct radv_pipeline_layout *pipeline_layout = ctx->args->options->layout;
356 	struct radv_descriptor_set_layout *layout = pipeline_layout->set[desc_set].layout;
357 	unsigned base_offset = layout->binding[binding].offset;
358 	LLVMValueRef offset, stride;
359 
360 	if (layout->binding[binding].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
361 	    layout->binding[binding].type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
362 		unsigned idx = pipeline_layout->set[desc_set].dynamic_offset_start +
363 			layout->binding[binding].dynamic_offset_offset;
364 		desc_ptr = ac_get_arg(&ctx->ac, ctx->args->ac.push_constants);
365 		base_offset = pipeline_layout->push_constant_size + 16 * idx;
366 		stride = LLVMConstInt(ctx->ac.i32, 16, false);
367 	} else
368 		stride = LLVMConstInt(ctx->ac.i32, layout->binding[binding].size, false);
369 
370 	offset = LLVMConstInt(ctx->ac.i32, base_offset, false);
371 
372 	if (layout->binding[binding].type != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
373 		offset = ac_build_imad(&ctx->ac, index, stride, offset);
374 	}
375 
376 	desc_ptr = LLVMBuildGEP(ctx->ac.builder, desc_ptr, &offset, 1, "");
377 	desc_ptr = ac_cast_ptr(&ctx->ac, desc_ptr, ctx->ac.v4i32);
378 	LLVMSetMetadata(desc_ptr, ctx->ac.uniform_md_kind, ctx->ac.empty_md);
379 
380 	return desc_ptr;
381 }
382 
383 
384 /* The offchip buffer layout for TCS->TES is
385  *
386  * - attribute 0 of patch 0 vertex 0
387  * - attribute 0 of patch 0 vertex 1
388  * - attribute 0 of patch 0 vertex 2
389  *   ...
390  * - attribute 0 of patch 1 vertex 0
391  * - attribute 0 of patch 1 vertex 1
392  *   ...
393  * - attribute 1 of patch 0 vertex 0
394  * - attribute 1 of patch 0 vertex 1
395  *   ...
396  * - per patch attribute 0 of patch 0
397  * - per patch attribute 0 of patch 1
398  *   ...
399  *
400  * Note that every attribute has 4 components.
401  */
get_non_vertex_index_offset(struct radv_shader_context * ctx)402 static LLVMValueRef get_non_vertex_index_offset(struct radv_shader_context *ctx)
403 {
404 	uint32_t num_patches = ctx->tcs_num_patches;
405 	uint32_t num_tcs_outputs;
406 	if (ctx->stage == MESA_SHADER_TESS_CTRL)
407 		num_tcs_outputs = ctx->args->shader_info->tcs.num_linked_outputs;
408 	else
409 		num_tcs_outputs = ctx->args->shader_info->tes.num_linked_inputs;
410 
411 	uint32_t output_vertex_size = num_tcs_outputs * 16;
412 	uint32_t pervertex_output_patch_size = ctx->shader->info.tess.tcs_vertices_out * output_vertex_size;
413 
414 	return LLVMConstInt(ctx->ac.i32, pervertex_output_patch_size * num_patches, false);
415 }
416 
calc_param_stride(struct radv_shader_context * ctx,LLVMValueRef vertex_index)417 static LLVMValueRef calc_param_stride(struct radv_shader_context *ctx,
418 				      LLVMValueRef vertex_index)
419 {
420 	LLVMValueRef param_stride;
421 	if (vertex_index)
422 		param_stride = LLVMConstInt(ctx->ac.i32, ctx->shader->info.tess.tcs_vertices_out * ctx->tcs_num_patches, false);
423 	else
424 		param_stride = LLVMConstInt(ctx->ac.i32, ctx->tcs_num_patches, false);
425 	return param_stride;
426 }
427 
get_tcs_tes_buffer_address(struct radv_shader_context * ctx,LLVMValueRef vertex_index,LLVMValueRef param_index)428 static LLVMValueRef get_tcs_tes_buffer_address(struct radv_shader_context *ctx,
429                                                LLVMValueRef vertex_index,
430                                                LLVMValueRef param_index)
431 {
432 	LLVMValueRef base_addr;
433 	LLVMValueRef param_stride, constant16;
434 	LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
435 	LLVMValueRef vertices_per_patch = LLVMConstInt(ctx->ac.i32, ctx->shader->info.tess.tcs_vertices_out, false);
436 	constant16 = LLVMConstInt(ctx->ac.i32, 16, false);
437 	param_stride = calc_param_stride(ctx, vertex_index);
438 	if (vertex_index) {
439 		base_addr = ac_build_imad(&ctx->ac, rel_patch_id,
440 					  vertices_per_patch, vertex_index);
441 	} else {
442 		base_addr = rel_patch_id;
443 	}
444 
445 	base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
446 	                         LLVMBuildMul(ctx->ac.builder, param_index,
447 	                                      param_stride, ""), "");
448 
449 	base_addr = LLVMBuildMul(ctx->ac.builder, base_addr, constant16, "");
450 
451 	if (!vertex_index) {
452 		LLVMValueRef patch_data_offset = get_non_vertex_index_offset(ctx);
453 
454 		base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
455 		                         patch_data_offset, "");
456 	}
457 	return base_addr;
458 }
459 
get_tcs_tes_buffer_address_params(struct radv_shader_context * ctx,unsigned param,LLVMValueRef vertex_index,LLVMValueRef indir_index)460 static LLVMValueRef get_tcs_tes_buffer_address_params(struct radv_shader_context *ctx,
461 						      unsigned param,
462 						      LLVMValueRef vertex_index,
463 						      LLVMValueRef indir_index)
464 {
465 	LLVMValueRef param_index;
466 
467 	if (indir_index)
468 		param_index = LLVMBuildAdd(ctx->ac.builder, LLVMConstInt(ctx->ac.i32, param, false),
469 					   indir_index, "");
470 	else {
471 		param_index = LLVMConstInt(ctx->ac.i32, param, false);
472 	}
473 	return get_tcs_tes_buffer_address(ctx, vertex_index, param_index);
474 }
475 
476 static LLVMValueRef
get_dw_address(struct radv_shader_context * ctx,LLVMValueRef dw_addr,unsigned param,LLVMValueRef vertex_index,LLVMValueRef stride,LLVMValueRef indir_index)477 get_dw_address(struct radv_shader_context *ctx,
478 	       LLVMValueRef dw_addr,
479 	       unsigned param,
480 	       LLVMValueRef vertex_index,
481 	       LLVMValueRef stride,
482 	       LLVMValueRef indir_index)
483 
484 {
485 
486 	if (vertex_index) {
487 		dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
488 				       LLVMBuildMul(ctx->ac.builder,
489 						    vertex_index,
490 						    stride, ""), "");
491 	}
492 
493 	if (indir_index)
494 		dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
495 				       LLVMBuildMul(ctx->ac.builder, indir_index,
496 						    LLVMConstInt(ctx->ac.i32, 4, false), ""), "");
497 
498 	dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
499 			       LLVMConstInt(ctx->ac.i32, param * 4, false), "");
500 
501 	return dw_addr;
502 }
503 
504 static LLVMValueRef
load_tcs_varyings(struct ac_shader_abi * abi,LLVMTypeRef type,LLVMValueRef vertex_index,LLVMValueRef indir_index,unsigned driver_location,unsigned component,unsigned num_components,bool load_input)505 load_tcs_varyings(struct ac_shader_abi *abi,
506 		  LLVMTypeRef type,
507 		  LLVMValueRef vertex_index,
508 		  LLVMValueRef indir_index,
509 		  unsigned driver_location,
510 		  unsigned component,
511 		  unsigned num_components,
512 		  bool load_input)
513 {
514 	struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
515 	LLVMValueRef dw_addr, stride;
516 	LLVMValueRef value[4], result;
517 	unsigned param = driver_location;
518 
519 	bool is_patch = vertex_index == NULL;
520 
521 	if (load_input) {
522 		uint32_t input_vertex_size = (ctx->tcs_num_inputs * 16) / 4;
523 		stride = LLVMConstInt(ctx->ac.i32, input_vertex_size, false);
524 		dw_addr = get_tcs_in_current_patch_offset(ctx);
525 	} else {
526 		if (!is_patch) {
527 			stride = get_tcs_out_vertex_stride(ctx);
528 			dw_addr = get_tcs_out_current_patch_offset(ctx);
529 		} else {
530 			dw_addr = get_tcs_out_current_patch_data_offset(ctx);
531 			stride = NULL;
532 		}
533 	}
534 
535 	dw_addr = get_dw_address(ctx, dw_addr, param, vertex_index, stride, indir_index);
536 
537 	for (unsigned i = 0; i < num_components + component; i++) {
538 		value[i] = ac_lds_load(&ctx->ac, dw_addr);
539 		dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
540 				       ctx->ac.i32_1, "");
541 	}
542 	result = ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
543 	return result;
544 }
545 
546 static void
store_tcs_output(struct ac_shader_abi * abi,LLVMValueRef vertex_index,LLVMValueRef param_index,LLVMValueRef src,unsigned writemask,unsigned component,unsigned location,unsigned driver_location)547 store_tcs_output(struct ac_shader_abi *abi,
548 		 LLVMValueRef vertex_index,
549 		 LLVMValueRef param_index,
550 		 LLVMValueRef src,
551 		 unsigned writemask,
552 		 unsigned component,
553 		 unsigned location,
554 		 unsigned driver_location)
555 {
556 	struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
557 	const bool is_patch = vertex_index == NULL;
558 	LLVMValueRef dw_addr;
559 	LLVMValueRef stride = NULL;
560 	LLVMValueRef buf_addr = NULL;
561 	LLVMValueRef oc_lds = ac_get_arg(&ctx->ac, ctx->args->oc_lds);
562 	unsigned param = driver_location;
563 	bool store_lds = true;
564 
565 	if (is_patch) {
566 		if (!(ctx->shader->info.patch_outputs_read & (1U << (location - VARYING_SLOT_PATCH0))))
567 			store_lds = false;
568 	} else {
569 		if (!(ctx->shader->info.outputs_read & (1ULL << location)))
570 			store_lds = false;
571 	}
572 
573 	if (!is_patch) {
574 		stride = get_tcs_out_vertex_stride(ctx);
575 		dw_addr = get_tcs_out_current_patch_offset(ctx);
576 	} else {
577 		dw_addr = get_tcs_out_current_patch_data_offset(ctx);
578 	}
579 
580 	dw_addr = get_dw_address(ctx, dw_addr, param, vertex_index, stride, param_index);
581 	buf_addr = get_tcs_tes_buffer_address_params(ctx, param, vertex_index, param_index);
582 
583 	bool is_tess_factor = false;
584 	if (location == VARYING_SLOT_TESS_LEVEL_INNER ||
585 	    location == VARYING_SLOT_TESS_LEVEL_OUTER)
586 		is_tess_factor = true;
587 
588 	for (unsigned chan = 0; chan < 8; chan++) {
589 		if (!(writemask & (1 << chan)))
590 			continue;
591 		LLVMValueRef value = ac_llvm_extract_elem(&ctx->ac, src, chan - component);
592 		value = ac_to_integer(&ctx->ac, value);
593 		value = LLVMBuildZExtOrBitCast(ctx->ac.builder, value, ctx->ac.i32, "");
594 
595 		if (store_lds || is_tess_factor) {
596 			LLVMValueRef dw_addr_chan =
597 				LLVMBuildAdd(ctx->ac.builder, dw_addr,
598 				                           LLVMConstInt(ctx->ac.i32, chan, false), "");
599 			ac_lds_store(&ctx->ac, dw_addr_chan, value);
600 		}
601 
602 		if (!is_tess_factor && writemask != 0xF)
603 			ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, value, 1,
604 						    buf_addr, oc_lds,
605 						    4 * chan, ac_glc);
606 	}
607 
608 	if (writemask == 0xF) {
609 		ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, src, 4,
610 					    buf_addr, oc_lds, 0, ac_glc);
611 	}
612 }
613 
614 static LLVMValueRef
load_tes_input(struct ac_shader_abi * abi,LLVMTypeRef type,LLVMValueRef vertex_index,LLVMValueRef param_index,unsigned driver_location,unsigned component,unsigned num_components,bool load_input)615 load_tes_input(struct ac_shader_abi *abi,
616 	       LLVMTypeRef type,
617 	       LLVMValueRef vertex_index,
618 	       LLVMValueRef param_index,
619 	       unsigned driver_location,
620 	       unsigned component,
621 	       unsigned num_components,
622 	       bool load_input)
623 {
624 	struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
625 	LLVMValueRef buf_addr;
626 	LLVMValueRef result;
627 	LLVMValueRef oc_lds = ac_get_arg(&ctx->ac, ctx->args->oc_lds);
628 	unsigned param = driver_location;
629 
630 	buf_addr = get_tcs_tes_buffer_address_params(ctx, param, vertex_index, param_index);
631 
632 	LLVMValueRef comp_offset = LLVMConstInt(ctx->ac.i32, component * 4, false);
633 	buf_addr = LLVMBuildAdd(ctx->ac.builder, buf_addr, comp_offset, "");
634 
635 	result = ac_build_buffer_load(&ctx->ac, ctx->hs_ring_tess_offchip, num_components, NULL,
636 				      buf_addr, oc_lds, 0, ac_glc, true, false);
637 	result = ac_trim_vector(&ctx->ac, result, num_components);
638 	return result;
639 }
640 
641 static LLVMValueRef
load_gs_input(struct ac_shader_abi * abi,unsigned driver_location,unsigned component,unsigned num_components,unsigned vertex_index,LLVMTypeRef type)642 load_gs_input(struct ac_shader_abi *abi,
643 	      unsigned driver_location,
644 	      unsigned component,
645 	      unsigned num_components,
646 	      unsigned vertex_index,
647 	      LLVMTypeRef type)
648 {
649 	struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
650 	LLVMValueRef vtx_offset;
651 	unsigned param = driver_location;
652 	unsigned vtx_offset_param;
653 	LLVMValueRef value[4], result;
654 
655 	vtx_offset_param = vertex_index;
656 	assert(vtx_offset_param < 6);
657 	vtx_offset = LLVMBuildMul(ctx->ac.builder, ctx->gs_vtx_offset[vtx_offset_param],
658 				  LLVMConstInt(ctx->ac.i32, 4, false), "");
659 
660 	for (unsigned i = component; i < num_components + component; i++) {
661 		if (ctx->ac.chip_class >= GFX9) {
662 			LLVMValueRef dw_addr = ctx->gs_vtx_offset[vtx_offset_param];
663 			dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
664 			                       LLVMConstInt(ctx->ac.i32, param * 4 + i, 0), "");
665 			value[i] = ac_lds_load(&ctx->ac, dw_addr);
666 		} else {
667 			LLVMValueRef soffset =
668 				LLVMConstInt(ctx->ac.i32,
669 					     (param * 4 + i) * 256,
670 					     false);
671 
672 			value[i] = ac_build_buffer_load(&ctx->ac,
673 							ctx->esgs_ring, 1,
674 							ctx->ac.i32_0,
675 							vtx_offset, soffset,
676 							0, ac_glc, true, false);
677 		}
678 
679 		if (ac_get_type_size(type) == 2) {
680 			value[i] = LLVMBuildBitCast(ctx->ac.builder, value[i], ctx->ac.i32, "");
681 			value[i] = LLVMBuildTrunc(ctx->ac.builder, value[i], ctx->ac.i16, "");
682 		}
683 		value[i] = LLVMBuildBitCast(ctx->ac.builder, value[i], type, "");
684 	}
685 	result = ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
686 	result = ac_to_integer(&ctx->ac, result);
687 	return result;
688 }
689 
690 static uint32_t
radv_get_sample_pos_offset(uint32_t num_samples)691 radv_get_sample_pos_offset(uint32_t num_samples)
692 {
693 	uint32_t sample_pos_offset = 0;
694 
695 	switch (num_samples) {
696 	case 2:
697 		sample_pos_offset = 1;
698 		break;
699 	case 4:
700 		sample_pos_offset = 3;
701 		break;
702 	case 8:
703 		sample_pos_offset = 7;
704 		break;
705 	default:
706 		break;
707 	}
708 	return sample_pos_offset;
709 }
710 
load_sample_position(struct ac_shader_abi * abi,LLVMValueRef sample_id)711 static LLVMValueRef load_sample_position(struct ac_shader_abi *abi,
712 					 LLVMValueRef sample_id)
713 {
714 	struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
715 
716 	LLVMValueRef result;
717 	LLVMValueRef index = LLVMConstInt(ctx->ac.i32, RING_PS_SAMPLE_POSITIONS, false);
718 	LLVMValueRef ptr = LLVMBuildGEP(ctx->ac.builder, ctx->ring_offsets, &index, 1, "");
719 
720 	ptr = LLVMBuildBitCast(ctx->ac.builder, ptr,
721 			       ac_array_in_const_addr_space(ctx->ac.v2f32), "");
722 
723 	uint32_t sample_pos_offset =
724 		radv_get_sample_pos_offset(ctx->args->options->key.fs.num_samples);
725 
726 	sample_id =
727 		LLVMBuildAdd(ctx->ac.builder, sample_id,
728 			     LLVMConstInt(ctx->ac.i32, sample_pos_offset, false), "");
729 	result = ac_build_load_invariant(&ctx->ac, ptr, sample_id);
730 
731 	return result;
732 }
733 
734 
load_sample_mask_in(struct ac_shader_abi * abi)735 static LLVMValueRef load_sample_mask_in(struct ac_shader_abi *abi)
736 {
737 	struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
738 	uint8_t log2_ps_iter_samples;
739 
740 	if (ctx->args->shader_info->ps.force_persample) {
741 		log2_ps_iter_samples =
742 			util_logbase2(ctx->args->options->key.fs.num_samples);
743 	} else {
744 		log2_ps_iter_samples = ctx->args->options->key.fs.log2_ps_iter_samples;
745 	}
746 
747 	LLVMValueRef result, sample_id;
748 	if (log2_ps_iter_samples) {
749 		/* gl_SampleMaskIn[0] = (SampleCoverage & (1 << gl_SampleID)). */
750 		sample_id = ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->ac.ancillary), 8, 4);
751 		sample_id = LLVMBuildShl(ctx->ac.builder, LLVMConstInt(ctx->ac.i32, 1, false), sample_id, "");
752 		result = LLVMBuildAnd(ctx->ac.builder, sample_id,
753 				      ac_get_arg(&ctx->ac, ctx->args->ac.sample_coverage), "");
754 	} else {
755 		result = ac_get_arg(&ctx->ac, ctx->args->ac.sample_coverage);
756 	}
757 
758 	return result;
759 }
760 
761 
762 static void gfx10_ngg_gs_emit_vertex(struct radv_shader_context *ctx,
763 				     unsigned stream,
764 				     LLVMValueRef vertexidx,
765 				     LLVMValueRef *addrs);
766 
767 static void
visit_emit_vertex_with_counter(struct ac_shader_abi * abi,unsigned stream,LLVMValueRef vertexidx,LLVMValueRef * addrs)768 visit_emit_vertex_with_counter(struct ac_shader_abi *abi, unsigned stream,
769 			       LLVMValueRef vertexidx, LLVMValueRef *addrs)
770 {
771 	unsigned offset = 0;
772 	struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
773 
774 	if (ctx->args->options->key.vs_common_out.as_ngg) {
775 		gfx10_ngg_gs_emit_vertex(ctx, stream, vertexidx, addrs);
776 		return;
777 	}
778 
779 	for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
780 		unsigned output_usage_mask =
781 			ctx->args->shader_info->gs.output_usage_mask[i];
782 		uint8_t output_stream =
783 			ctx->args->shader_info->gs.output_streams[i];
784 		LLVMValueRef *out_ptr = &addrs[i * 4];
785 		int length = util_last_bit(output_usage_mask);
786 
787 		if (!(ctx->output_mask & (1ull << i)) ||
788 		    output_stream != stream)
789 			continue;
790 
791 		for (unsigned j = 0; j < length; j++) {
792 			if (!(output_usage_mask & (1 << j)))
793 				continue;
794 
795 			LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder,
796 							     out_ptr[j], "");
797 			LLVMValueRef voffset =
798 				LLVMConstInt(ctx->ac.i32, offset *
799 					     ctx->shader->info.gs.vertices_out, false);
800 
801 			offset++;
802 
803 			voffset = LLVMBuildAdd(ctx->ac.builder, voffset, vertexidx, "");
804 			voffset = LLVMBuildMul(ctx->ac.builder, voffset, LLVMConstInt(ctx->ac.i32, 4, false), "");
805 
806 			out_val = ac_to_integer(&ctx->ac, out_val);
807 			out_val = LLVMBuildZExtOrBitCast(ctx->ac.builder, out_val, ctx->ac.i32, "");
808 
809 			ac_build_buffer_store_dword(&ctx->ac,
810 						    ctx->gsvs_ring[stream],
811 						    out_val, 1,
812 						    voffset,
813 						    ac_get_arg(&ctx->ac,
814 							       ctx->args->gs2vs_offset),
815 						    0, ac_glc | ac_slc | ac_swizzled);
816 		}
817 	}
818 
819 	ac_build_sendmsg(&ctx->ac,
820 			 AC_SENDMSG_GS_OP_EMIT | AC_SENDMSG_GS | (stream << 8),
821 			 ctx->gs_wave_id);
822 }
823 
824 static void
visit_end_primitive(struct ac_shader_abi * abi,unsigned stream)825 visit_end_primitive(struct ac_shader_abi *abi, unsigned stream)
826 {
827 	struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
828 
829 	if (ctx->args->options->key.vs_common_out.as_ngg) {
830 		LLVMBuildStore(ctx->ac.builder, ctx->ac.i32_0, ctx->gs_curprim_verts[stream]);
831 		return;
832 	}
833 
834 	ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_CUT | AC_SENDMSG_GS | (stream << 8), ctx->gs_wave_id);
835 }
836 
837 static LLVMValueRef
load_tess_coord(struct ac_shader_abi * abi)838 load_tess_coord(struct ac_shader_abi *abi)
839 {
840 	struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
841 
842 	LLVMValueRef coord[4] = {
843 		ac_get_arg(&ctx->ac, ctx->args->tes_u),
844 		ac_get_arg(&ctx->ac, ctx->args->tes_v),
845 		ctx->ac.f32_0,
846 		ctx->ac.f32_0,
847 	};
848 
849 	if (ctx->shader->info.tess.primitive_mode == GL_TRIANGLES)
850 		coord[2] = LLVMBuildFSub(ctx->ac.builder, ctx->ac.f32_1,
851 					LLVMBuildFAdd(ctx->ac.builder, coord[0], coord[1], ""), "");
852 
853 	return ac_build_gather_values(&ctx->ac, coord, 3);
854 }
855 
856 static LLVMValueRef
load_patch_vertices_in(struct ac_shader_abi * abi)857 load_patch_vertices_in(struct ac_shader_abi *abi)
858 {
859 	struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
860 	return LLVMConstInt(ctx->ac.i32, ctx->args->options->key.tcs.input_vertices, false);
861 }
862 
863 
radv_load_base_vertex(struct ac_shader_abi * abi)864 static LLVMValueRef radv_load_base_vertex(struct ac_shader_abi *abi)
865 {
866 	struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
867 	return ac_get_arg(&ctx->ac, ctx->args->ac.base_vertex);
868 }
869 
radv_load_ssbo(struct ac_shader_abi * abi,LLVMValueRef buffer_ptr,bool write)870 static LLVMValueRef radv_load_ssbo(struct ac_shader_abi *abi,
871 				   LLVMValueRef buffer_ptr, bool write)
872 {
873 	struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
874 	LLVMValueRef result;
875 
876 	LLVMSetMetadata(buffer_ptr, ctx->ac.uniform_md_kind, ctx->ac.empty_md);
877 
878 	result = LLVMBuildLoad(ctx->ac.builder, buffer_ptr, "");
879 	LLVMSetMetadata(result, ctx->ac.invariant_load_md_kind, ctx->ac.empty_md);
880 
881 	return result;
882 }
883 
radv_load_ubo(struct ac_shader_abi * abi,unsigned desc_set,unsigned binding,bool valid_binding,LLVMValueRef buffer_ptr)884 static LLVMValueRef radv_load_ubo(struct ac_shader_abi *abi,
885 				  unsigned desc_set, unsigned binding,
886 				  bool valid_binding, LLVMValueRef buffer_ptr)
887 {
888 	struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
889 	LLVMValueRef result;
890 
891 	if (valid_binding) {
892 		struct radv_pipeline_layout *pipeline_layout = ctx->args->options->layout;
893 		struct radv_descriptor_set_layout *layout = pipeline_layout->set[desc_set].layout;
894 
895 		if (layout->binding[binding].type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
896 			uint32_t desc_type = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
897 					     S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
898 					     S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
899 					     S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
900 
901 			if (ctx->ac.chip_class >= GFX10) {
902 				desc_type |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
903 					     S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
904 					     S_008F0C_RESOURCE_LEVEL(1);
905 			} else {
906 				desc_type |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
907 					     S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
908 			}
909 
910 			LLVMValueRef desc_components[4] = {
911 				LLVMBuildPtrToInt(ctx->ac.builder, buffer_ptr, ctx->ac.intptr, ""),
912 				LLVMConstInt(ctx->ac.i32, S_008F04_BASE_ADDRESS_HI(ctx->args->options->address32_hi), false),
913 				LLVMConstInt(ctx->ac.i32, 0xffffffff, false),
914 				LLVMConstInt(ctx->ac.i32, desc_type, false),
915 			};
916 
917 			return ac_build_gather_values(&ctx->ac, desc_components, 4);
918 		}
919 	}
920 
921 	LLVMSetMetadata(buffer_ptr, ctx->ac.uniform_md_kind, ctx->ac.empty_md);
922 
923 	result = LLVMBuildLoad(ctx->ac.builder, buffer_ptr, "");
924 	LLVMSetMetadata(result, ctx->ac.invariant_load_md_kind, ctx->ac.empty_md);
925 
926 	return result;
927 }
928 
radv_get_sampler_desc(struct ac_shader_abi * abi,unsigned descriptor_set,unsigned base_index,unsigned constant_index,LLVMValueRef index,enum ac_descriptor_type desc_type,bool image,bool write,bool bindless)929 static LLVMValueRef radv_get_sampler_desc(struct ac_shader_abi *abi,
930 					  unsigned descriptor_set,
931 					  unsigned base_index,
932 					  unsigned constant_index,
933 					  LLVMValueRef index,
934 					  enum ac_descriptor_type desc_type,
935 					  bool image, bool write,
936 					  bool bindless)
937 {
938 	struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
939 	LLVMValueRef list = ctx->descriptor_sets[descriptor_set];
940 	struct radv_descriptor_set_layout *layout = ctx->args->options->layout->set[descriptor_set].layout;
941 	struct radv_descriptor_set_binding_layout *binding = layout->binding + base_index;
942 	unsigned offset = binding->offset;
943 	unsigned stride = binding->size;
944 	unsigned type_size;
945 	LLVMBuilderRef builder = ctx->ac.builder;
946 	LLVMTypeRef type;
947 
948 	assert(base_index < layout->binding_count);
949 
950 	switch (desc_type) {
951 	case AC_DESC_IMAGE:
952 		type = ctx->ac.v8i32;
953 		type_size = 32;
954 		break;
955 	case AC_DESC_FMASK:
956 		type = ctx->ac.v8i32;
957 		offset += 32;
958 		type_size = 32;
959 		break;
960 	case AC_DESC_SAMPLER:
961 		type = ctx->ac.v4i32;
962 		if (binding->type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
963 			offset += radv_combined_image_descriptor_sampler_offset(binding);
964 		}
965 
966 		type_size = 16;
967 		break;
968 	case AC_DESC_BUFFER:
969 		type = ctx->ac.v4i32;
970 		type_size = 16;
971 		break;
972 	case AC_DESC_PLANE_0:
973 	case AC_DESC_PLANE_1:
974 	case AC_DESC_PLANE_2:
975 		type = ctx->ac.v8i32;
976 		type_size = 32;
977 		offset += 32 * (desc_type - AC_DESC_PLANE_0);
978 		break;
979 	default:
980 		unreachable("invalid desc_type\n");
981 	}
982 
983 	offset += constant_index * stride;
984 
985 	if (desc_type == AC_DESC_SAMPLER && binding->immutable_samplers_offset &&
986 	    (!index || binding->immutable_samplers_equal)) {
987 		if (binding->immutable_samplers_equal)
988 			constant_index = 0;
989 
990 		const uint32_t *samplers = radv_immutable_samplers(layout, binding);
991 
992 		LLVMValueRef constants[] = {
993 			LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 0], 0),
994 			LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 1], 0),
995 			LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 2], 0),
996 			LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 3], 0),
997 		};
998 		return ac_build_gather_values(&ctx->ac, constants, 4);
999 	}
1000 
1001 	assert(stride % type_size == 0);
1002 
1003 	LLVMValueRef adjusted_index = index;
1004 	if (!adjusted_index)
1005 		adjusted_index = ctx->ac.i32_0;
1006 
1007 	adjusted_index = LLVMBuildMul(builder, adjusted_index, LLVMConstInt(ctx->ac.i32, stride / type_size, 0), "");
1008 
1009 	LLVMValueRef val_offset = LLVMConstInt(ctx->ac.i32, offset, 0);
1010 	list = LLVMBuildGEP(builder, list, &val_offset, 1, "");
1011 	list = LLVMBuildPointerCast(builder, list,
1012 				    ac_array_in_const32_addr_space(type), "");
1013 
1014 	LLVMValueRef descriptor = ac_build_load_to_sgpr(&ctx->ac, list, adjusted_index);
1015 
1016 	/* 3 plane formats always have same size and format for plane 1 & 2, so
1017 	 * use the tail from plane 1 so that we can store only the first 16 bytes
1018 	 * of the last plane. */
1019 	if (desc_type == AC_DESC_PLANE_2) {
1020 		LLVMValueRef descriptor2 = radv_get_sampler_desc(abi, descriptor_set, base_index, constant_index, index, AC_DESC_PLANE_1,image, write, bindless);
1021 
1022 		LLVMValueRef components[8];
1023 		for (unsigned i = 0; i < 4; ++i)
1024 			components[i] = ac_llvm_extract_elem(&ctx->ac, descriptor, i);
1025 
1026 		for (unsigned i = 4; i < 8; ++i)
1027 			components[i] = ac_llvm_extract_elem(&ctx->ac, descriptor2, i);
1028 		descriptor = ac_build_gather_values(&ctx->ac, components, 8);
1029 	}
1030 
1031 	return descriptor;
1032 }
1033 
1034 /* For 2_10_10_10 formats the alpha is handled as unsigned by pre-vega HW.
1035  * so we may need to fix it up. */
1036 static LLVMValueRef
adjust_vertex_fetch_alpha(struct radv_shader_context * ctx,unsigned adjustment,LLVMValueRef alpha)1037 adjust_vertex_fetch_alpha(struct radv_shader_context *ctx,
1038                           unsigned adjustment,
1039                           LLVMValueRef alpha)
1040 {
1041 	if (adjustment == AC_FETCH_FORMAT_NONE)
1042 		return alpha;
1043 
1044 	LLVMValueRef c30 = LLVMConstInt(ctx->ac.i32, 30, 0);
1045 
1046 	alpha = LLVMBuildBitCast(ctx->ac.builder, alpha, ctx->ac.f32, "");
1047 
1048 	if (adjustment == AC_FETCH_FORMAT_SSCALED)
1049 		alpha = LLVMBuildFPToUI(ctx->ac.builder, alpha, ctx->ac.i32, "");
1050 	else
1051 		alpha = ac_to_integer(&ctx->ac, alpha);
1052 
1053 	/* For the integer-like cases, do a natural sign extension.
1054 	 *
1055 	 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0
1056 	 * and happen to contain 0, 1, 2, 3 as the two LSBs of the
1057 	 * exponent.
1058 	 */
1059 	alpha = LLVMBuildShl(ctx->ac.builder, alpha,
1060 	                     adjustment == AC_FETCH_FORMAT_SNORM ?
1061 	                     LLVMConstInt(ctx->ac.i32, 7, 0) : c30, "");
1062 	alpha = LLVMBuildAShr(ctx->ac.builder, alpha, c30, "");
1063 
1064 	/* Convert back to the right type. */
1065 	if (adjustment == AC_FETCH_FORMAT_SNORM) {
1066 		LLVMValueRef clamp;
1067 		LLVMValueRef neg_one = LLVMConstReal(ctx->ac.f32, -1.0);
1068 		alpha = LLVMBuildSIToFP(ctx->ac.builder, alpha, ctx->ac.f32, "");
1069 		clamp = LLVMBuildFCmp(ctx->ac.builder, LLVMRealULT, alpha, neg_one, "");
1070 		alpha = LLVMBuildSelect(ctx->ac.builder, clamp, neg_one, alpha, "");
1071 	} else if (adjustment == AC_FETCH_FORMAT_SSCALED) {
1072 		alpha = LLVMBuildSIToFP(ctx->ac.builder, alpha, ctx->ac.f32, "");
1073 	}
1074 
1075 	return LLVMBuildBitCast(ctx->ac.builder, alpha, ctx->ac.i32, "");
1076 }
1077 
1078 static LLVMValueRef
radv_fixup_vertex_input_fetches(struct radv_shader_context * ctx,LLVMValueRef value,unsigned num_channels,bool is_float)1079 radv_fixup_vertex_input_fetches(struct radv_shader_context *ctx,
1080 				LLVMValueRef value,
1081 				unsigned num_channels,
1082 				bool is_float)
1083 {
1084 	LLVMValueRef zero = is_float ? ctx->ac.f32_0 : ctx->ac.i32_0;
1085 	LLVMValueRef one = is_float ? ctx->ac.f32_1 : ctx->ac.i32_1;
1086 	LLVMValueRef chan[4];
1087 
1088 	if (LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMVectorTypeKind) {
1089 		unsigned vec_size = LLVMGetVectorSize(LLVMTypeOf(value));
1090 
1091 		if (num_channels == 4 && num_channels == vec_size)
1092 			return value;
1093 
1094 		num_channels = MIN2(num_channels, vec_size);
1095 
1096 		for (unsigned i = 0; i < num_channels; i++)
1097 			chan[i] = ac_llvm_extract_elem(&ctx->ac, value, i);
1098 	} else {
1099 		assert(num_channels == 1);
1100 		chan[0] = value;
1101 	}
1102 
1103 	for (unsigned i = num_channels; i < 4; i++) {
1104 		chan[i] = i == 3 ? one : zero;
1105 		chan[i] = ac_to_integer(&ctx->ac, chan[i]);
1106 	}
1107 
1108 	return ac_build_gather_values(&ctx->ac, chan, 4);
1109 }
1110 
1111 static void
handle_vs_input_decl(struct radv_shader_context * ctx,struct nir_variable * variable)1112 handle_vs_input_decl(struct radv_shader_context *ctx,
1113 		     struct nir_variable *variable)
1114 {
1115 	LLVMValueRef t_list_ptr = ac_get_arg(&ctx->ac, ctx->args->vertex_buffers);
1116 	LLVMValueRef t_offset;
1117 	LLVMValueRef t_list;
1118 	LLVMValueRef input;
1119 	LLVMValueRef buffer_index;
1120 	unsigned attrib_count = glsl_count_attribute_slots(variable->type, true);
1121 
1122 
1123 	enum glsl_base_type type = glsl_get_base_type(variable->type);
1124 	for (unsigned i = 0; i < attrib_count; ++i) {
1125 		LLVMValueRef output[4];
1126 		unsigned attrib_index = variable->data.location + i - VERT_ATTRIB_GENERIC0;
1127 		unsigned attrib_format = ctx->args->options->key.vs.vertex_attribute_formats[attrib_index];
1128 		unsigned data_format = attrib_format & 0x0f;
1129 		unsigned num_format = (attrib_format >> 4) & 0x07;
1130 		bool is_float = num_format != V_008F0C_BUF_NUM_FORMAT_UINT &&
1131 		                num_format != V_008F0C_BUF_NUM_FORMAT_SINT;
1132 		uint8_t input_usage_mask =
1133 			ctx->args->shader_info->vs.input_usage_mask[variable->data.location + i];
1134 		unsigned num_input_channels = util_last_bit(input_usage_mask);
1135 
1136 		if (num_input_channels == 0)
1137 			continue;
1138 
1139 		if (ctx->args->options->key.vs.instance_rate_inputs & (1u << attrib_index)) {
1140 			uint32_t divisor = ctx->args->options->key.vs.instance_rate_divisors[attrib_index];
1141 
1142 			if (divisor) {
1143 				buffer_index = ctx->abi.instance_id;
1144 
1145 				if (divisor != 1) {
1146 					buffer_index = LLVMBuildUDiv(ctx->ac.builder, buffer_index,
1147 					                             LLVMConstInt(ctx->ac.i32, divisor, 0), "");
1148 				}
1149 			} else {
1150 				buffer_index = ctx->ac.i32_0;
1151 			}
1152 
1153 			buffer_index = LLVMBuildAdd(ctx->ac.builder,
1154 						    ac_get_arg(&ctx->ac,
1155 							       ctx->args->ac.start_instance),\
1156 						    buffer_index, "");
1157 		} else {
1158 			buffer_index = LLVMBuildAdd(ctx->ac.builder,
1159 						    ctx->abi.vertex_id,
1160 			                            ac_get_arg(&ctx->ac,
1161 							       ctx->args->ac.base_vertex), "");
1162 		}
1163 
1164 		const struct ac_data_format_info *vtx_info = ac_get_data_format_info(data_format);
1165 
1166 		/* Adjust the number of channels to load based on the vertex
1167 		 * attribute format.
1168 		 */
1169 		unsigned num_channels = MIN2(num_input_channels, vtx_info->num_channels);
1170 		unsigned attrib_binding = ctx->args->options->key.vs.vertex_attribute_bindings[attrib_index];
1171 		unsigned attrib_offset = ctx->args->options->key.vs.vertex_attribute_offsets[attrib_index];
1172 		unsigned attrib_stride = ctx->args->options->key.vs.vertex_attribute_strides[attrib_index];
1173 		unsigned alpha_adjust = ctx->args->options->key.vs.alpha_adjust[attrib_index];
1174 
1175 		if (ctx->args->options->key.vs.post_shuffle & (1 << attrib_index)) {
1176 			/* Always load, at least, 3 channels for formats that
1177 			 * need to be shuffled because X<->Z.
1178 			 */
1179 			num_channels = MAX2(num_channels, 3);
1180 		}
1181 
1182 		t_offset = LLVMConstInt(ctx->ac.i32, attrib_binding, false);
1183 		t_list = ac_build_load_to_sgpr(&ctx->ac, t_list_ptr, t_offset);
1184 
1185 		/* Always split typed vertex buffer loads on GFX6 and GFX10+
1186 		 * to avoid any alignment issues that triggers memory
1187 		 * violations and eventually a GPU hang. This can happen if
1188 		 * the stride (static or dynamic) is unaligned and also if the
1189 		 * VBO offset is aligned to a scalar (eg. stride is 8 and VBO
1190 		 * offset is 2 for R16G16B16A16_SNORM).
1191 		 */
1192 		if (ctx->ac.chip_class == GFX6 ||
1193 		    ctx->ac.chip_class >= GFX10) {
1194 			unsigned chan_format = vtx_info->chan_format;
1195 			LLVMValueRef values[4];
1196 
1197 			assert(ctx->ac.chip_class == GFX6 ||
1198 			       ctx->ac.chip_class >= GFX10);
1199 
1200 			for (unsigned chan  = 0; chan < num_channels; chan++) {
1201 				unsigned chan_offset = attrib_offset + chan * vtx_info->chan_byte_size;
1202 				LLVMValueRef chan_index = buffer_index;
1203 
1204 				if (attrib_stride != 0 && chan_offset > attrib_stride) {
1205 					LLVMValueRef buffer_offset =
1206 						LLVMConstInt(ctx->ac.i32,
1207 							     chan_offset / attrib_stride, false);
1208 
1209 					chan_index = LLVMBuildAdd(ctx->ac.builder,
1210 								  buffer_index,
1211 								  buffer_offset, "");
1212 
1213 					chan_offset = chan_offset % attrib_stride;
1214 				}
1215 
1216 				values[chan] = ac_build_struct_tbuffer_load(&ctx->ac, t_list,
1217 									   chan_index,
1218 									   LLVMConstInt(ctx->ac.i32, chan_offset, false),
1219 									   ctx->ac.i32_0, ctx->ac.i32_0, 1,
1220 									   chan_format, num_format, 0, true);
1221 			}
1222 
1223 			input = ac_build_gather_values(&ctx->ac, values, num_channels);
1224 		} else {
1225 			if (attrib_stride != 0 && attrib_offset > attrib_stride) {
1226 				LLVMValueRef buffer_offset =
1227 					LLVMConstInt(ctx->ac.i32,
1228 						     attrib_offset / attrib_stride, false);
1229 
1230 				buffer_index = LLVMBuildAdd(ctx->ac.builder,
1231 							    buffer_index,
1232 							    buffer_offset, "");
1233 
1234 				attrib_offset = attrib_offset % attrib_stride;
1235 			}
1236 
1237 			input = ac_build_struct_tbuffer_load(&ctx->ac, t_list,
1238 							     buffer_index,
1239 							     LLVMConstInt(ctx->ac.i32, attrib_offset, false),
1240 							     ctx->ac.i32_0, ctx->ac.i32_0,
1241 							     num_channels,
1242 							     data_format, num_format, 0, true);
1243 		}
1244 
1245 		if (ctx->args->options->key.vs.post_shuffle & (1 << attrib_index)) {
1246 			LLVMValueRef c[4];
1247 			c[0] = ac_llvm_extract_elem(&ctx->ac, input, 2);
1248 			c[1] = ac_llvm_extract_elem(&ctx->ac, input, 1);
1249 			c[2] = ac_llvm_extract_elem(&ctx->ac, input, 0);
1250 			c[3] = ac_llvm_extract_elem(&ctx->ac, input, 3);
1251 
1252 			input = ac_build_gather_values(&ctx->ac, c, 4);
1253 		}
1254 
1255 		input = radv_fixup_vertex_input_fetches(ctx, input, num_channels,
1256 							is_float);
1257 
1258 		for (unsigned chan = 0; chan < 4; chan++) {
1259 			LLVMValueRef llvm_chan = LLVMConstInt(ctx->ac.i32, chan, false);
1260 			output[chan] = LLVMBuildExtractElement(ctx->ac.builder, input, llvm_chan, "");
1261 			if (type == GLSL_TYPE_FLOAT16) {
1262 				output[chan] = LLVMBuildBitCast(ctx->ac.builder, output[chan], ctx->ac.f32, "");
1263 				output[chan] = LLVMBuildFPTrunc(ctx->ac.builder, output[chan], ctx->ac.f16, "");
1264 			}
1265 		}
1266 
1267 		output[3] = adjust_vertex_fetch_alpha(ctx, alpha_adjust, output[3]);
1268 
1269 		for (unsigned chan = 0; chan < 4; chan++) {
1270 			output[chan] = ac_to_integer(&ctx->ac, output[chan]);
1271 			if (type == GLSL_TYPE_UINT16 || type == GLSL_TYPE_INT16)
1272 				output[chan] = LLVMBuildTrunc(ctx->ac.builder, output[chan], ctx->ac.i16, "");
1273 
1274 			ctx->inputs[ac_llvm_reg_index_soa(variable->data.location + i, chan)] = output[chan];
1275 		}
1276 	}
1277 }
1278 
1279 static void
handle_vs_inputs(struct radv_shader_context * ctx,struct nir_shader * nir)1280 handle_vs_inputs(struct radv_shader_context *ctx,
1281                  struct nir_shader *nir) {
1282 	nir_foreach_shader_in_variable(variable, nir)
1283 		handle_vs_input_decl(ctx, variable);
1284 }
1285 
1286 static void
prepare_interp_optimize(struct radv_shader_context * ctx,struct nir_shader * nir)1287 prepare_interp_optimize(struct radv_shader_context *ctx,
1288                         struct nir_shader *nir)
1289 {
1290 	bool uses_center = false;
1291 	bool uses_centroid = false;
1292 	nir_foreach_shader_in_variable(variable, nir) {
1293 		if (glsl_get_base_type(glsl_without_array(variable->type)) != GLSL_TYPE_FLOAT ||
1294 		    variable->data.sample)
1295 			continue;
1296 
1297 		if (variable->data.centroid)
1298 			uses_centroid = true;
1299 		else
1300 			uses_center = true;
1301 	}
1302 
1303 	ctx->abi.persp_centroid = ac_get_arg(&ctx->ac, ctx->args->ac.persp_centroid);
1304 	ctx->abi.linear_centroid = ac_get_arg(&ctx->ac, ctx->args->ac.linear_centroid);
1305 
1306 	if (uses_center && uses_centroid) {
1307 		LLVMValueRef sel = LLVMBuildICmp(ctx->ac.builder, LLVMIntSLT,
1308 						 ac_get_arg(&ctx->ac, ctx->args->ac.prim_mask),
1309 						 ctx->ac.i32_0, "");
1310 		ctx->abi.persp_centroid =
1311 			LLVMBuildSelect(ctx->ac.builder, sel,
1312 					ac_get_arg(&ctx->ac, ctx->args->ac.persp_center),
1313 					ctx->abi.persp_centroid, "");
1314 		ctx->abi.linear_centroid =
1315 			LLVMBuildSelect(ctx->ac.builder, sel,
1316 					ac_get_arg(&ctx->ac, ctx->args->ac.linear_center),
1317 					ctx->abi.linear_centroid, "");
1318 	}
1319 }
1320 
1321 static void
scan_shader_output_decl(struct radv_shader_context * ctx,struct nir_variable * variable,struct nir_shader * shader,gl_shader_stage stage)1322 scan_shader_output_decl(struct radv_shader_context *ctx,
1323 			struct nir_variable *variable,
1324 			struct nir_shader *shader,
1325 			gl_shader_stage stage)
1326 {
1327 	int idx = variable->data.driver_location;
1328 	unsigned attrib_count = glsl_count_attribute_slots(variable->type, false);
1329 	uint64_t mask_attribs;
1330 
1331 	/* tess ctrl has it's own load/store paths for outputs */
1332 	if (stage == MESA_SHADER_TESS_CTRL) {
1333 		/* Remember driver location of tess factors, so we can read
1334 		 * them later, in write_tess_factors.
1335 		 */
1336 		if (variable->data.location == VARYING_SLOT_TESS_LEVEL_INNER) {
1337 			ctx->tcs_tess_lvl_inner = idx;
1338 		} else if (variable->data.location == VARYING_SLOT_TESS_LEVEL_OUTER) {
1339 			ctx->tcs_tess_lvl_outer = idx;
1340 		}
1341 		return;
1342 	}
1343 
1344 	if (variable->data.compact) {
1345 		unsigned component_count = variable->data.location_frac +
1346 		                           glsl_get_length(variable->type);
1347 		attrib_count = (component_count + 3) / 4;
1348 	}
1349 
1350 	mask_attribs = ((1ull << attrib_count) - 1) << idx;
1351 
1352 	ctx->output_mask |= mask_attribs;
1353 }
1354 
1355 
1356 /* Initialize arguments for the shader export intrinsic */
1357 static void
si_llvm_init_export_args(struct radv_shader_context * ctx,LLVMValueRef * values,unsigned enabled_channels,unsigned target,struct ac_export_args * args)1358 si_llvm_init_export_args(struct radv_shader_context *ctx,
1359 			 LLVMValueRef *values,
1360 			 unsigned enabled_channels,
1361 			 unsigned target,
1362 			 struct ac_export_args *args)
1363 {
1364 	/* Specify the channels that are enabled. */
1365 	args->enabled_channels = enabled_channels;
1366 
1367 	/* Specify whether the EXEC mask represents the valid mask */
1368 	args->valid_mask = 0;
1369 
1370 	/* Specify whether this is the last export */
1371 	args->done = 0;
1372 
1373 	/* Specify the target we are exporting */
1374 	args->target = target;
1375 
1376 	args->compr = false;
1377 	args->out[0] = LLVMGetUndef(ctx->ac.f32);
1378 	args->out[1] = LLVMGetUndef(ctx->ac.f32);
1379 	args->out[2] = LLVMGetUndef(ctx->ac.f32);
1380 	args->out[3] = LLVMGetUndef(ctx->ac.f32);
1381 
1382 	if (!values)
1383 		return;
1384 
1385 	bool is_16bit = ac_get_type_size(LLVMTypeOf(values[0])) == 2;
1386 	if (ctx->stage == MESA_SHADER_FRAGMENT) {
1387 		unsigned index = target - V_008DFC_SQ_EXP_MRT;
1388 		unsigned col_format = (ctx->args->options->key.fs.col_format >> (4 * index)) & 0xf;
1389 		bool is_int8 = (ctx->args->options->key.fs.is_int8 >> index) & 1;
1390 		bool is_int10 = (ctx->args->options->key.fs.is_int10 >> index) & 1;
1391 		unsigned chan;
1392 
1393 		LLVMValueRef (*packf)(struct ac_llvm_context *ctx, LLVMValueRef args[2]) = NULL;
1394 		LLVMValueRef (*packi)(struct ac_llvm_context *ctx, LLVMValueRef args[2],
1395 				      unsigned bits, bool hi) = NULL;
1396 
1397 		switch(col_format) {
1398 		case V_028714_SPI_SHADER_ZERO:
1399 			args->enabled_channels = 0; /* writemask */
1400 			args->target = V_008DFC_SQ_EXP_NULL;
1401 			break;
1402 
1403 		case V_028714_SPI_SHADER_32_R:
1404 			args->enabled_channels = 1;
1405 			args->out[0] = values[0];
1406 			break;
1407 
1408 		case V_028714_SPI_SHADER_32_GR:
1409 			args->enabled_channels = 0x3;
1410 			args->out[0] = values[0];
1411 			args->out[1] = values[1];
1412 			break;
1413 
1414 		case V_028714_SPI_SHADER_32_AR:
1415 			if (ctx->ac.chip_class >= GFX10) {
1416 				args->enabled_channels = 0x3;
1417 				args->out[0] = values[0];
1418 				args->out[1] = values[3];
1419 			} else {
1420 				args->enabled_channels = 0x9;
1421 				args->out[0] = values[0];
1422 				args->out[3] = values[3];
1423 			}
1424 			break;
1425 
1426 		case V_028714_SPI_SHADER_FP16_ABGR:
1427 			args->enabled_channels = 0x5;
1428 			packf = ac_build_cvt_pkrtz_f16;
1429 			if (is_16bit) {
1430 				for (unsigned chan = 0; chan < 4; chan++)
1431 					values[chan] = LLVMBuildFPExt(ctx->ac.builder,
1432 								      values[chan],
1433 								      ctx->ac.f32, "");
1434 			}
1435 			break;
1436 
1437 		case V_028714_SPI_SHADER_UNORM16_ABGR:
1438 			args->enabled_channels = 0x5;
1439 			packf = ac_build_cvt_pknorm_u16;
1440 			break;
1441 
1442 		case V_028714_SPI_SHADER_SNORM16_ABGR:
1443 			args->enabled_channels = 0x5;
1444 			packf = ac_build_cvt_pknorm_i16;
1445 			break;
1446 
1447 		case V_028714_SPI_SHADER_UINT16_ABGR:
1448 			args->enabled_channels = 0x5;
1449 			packi = ac_build_cvt_pk_u16;
1450 			if (is_16bit) {
1451 				for (unsigned chan = 0; chan < 4; chan++)
1452 					values[chan] = LLVMBuildZExt(ctx->ac.builder,
1453 								      ac_to_integer(&ctx->ac, values[chan]),
1454 								      ctx->ac.i32, "");
1455 			}
1456 			break;
1457 
1458 		case V_028714_SPI_SHADER_SINT16_ABGR:
1459 			args->enabled_channels = 0x5;
1460 			packi = ac_build_cvt_pk_i16;
1461 			if (is_16bit) {
1462 				for (unsigned chan = 0; chan < 4; chan++)
1463 					values[chan] = LLVMBuildSExt(ctx->ac.builder,
1464 								      ac_to_integer(&ctx->ac, values[chan]),
1465 								      ctx->ac.i32, "");
1466 			}
1467 			break;
1468 
1469 		default:
1470 		case V_028714_SPI_SHADER_32_ABGR:
1471 			memcpy(&args->out[0], values, sizeof(values[0]) * 4);
1472 			break;
1473 		}
1474 
1475 		/* Replace NaN by zero (only 32-bit) to fix game bugs if
1476 		 * requested.
1477 		 */
1478 		if (ctx->args->options->enable_mrt_output_nan_fixup &&
1479 		    !is_16bit &&
1480 		    (col_format == V_028714_SPI_SHADER_32_R ||
1481 		     col_format == V_028714_SPI_SHADER_32_GR ||
1482 		     col_format == V_028714_SPI_SHADER_32_AR ||
1483 		     col_format == V_028714_SPI_SHADER_32_ABGR ||
1484 		     col_format == V_028714_SPI_SHADER_FP16_ABGR)) {
1485 			for (unsigned i = 0; i < 4; i++) {
1486 				LLVMValueRef args[2] = {
1487 					values[i],
1488 					LLVMConstInt(ctx->ac.i32, S_NAN | Q_NAN, false)
1489 				};
1490 				LLVMValueRef isnan =
1491 					ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.class.f32", ctx->ac.i1,
1492 		                                           args, 2, AC_FUNC_ATTR_READNONE);
1493 				values[i] = LLVMBuildSelect(ctx->ac.builder, isnan,
1494 							    ctx->ac.f32_0,
1495 							    values[i], "");
1496 			}
1497 		}
1498 
1499 		/* Pack f16 or norm_i16/u16. */
1500 		if (packf) {
1501 			for (chan = 0; chan < 2; chan++) {
1502 				LLVMValueRef pack_args[2] = {
1503 					values[2 * chan],
1504 					values[2 * chan + 1]
1505 				};
1506 				LLVMValueRef packed;
1507 
1508 				packed = packf(&ctx->ac, pack_args);
1509 				args->out[chan] = ac_to_float(&ctx->ac, packed);
1510 			}
1511 			args->compr = 1; /* COMPR flag */
1512 		}
1513 
1514 		/* Pack i16/u16. */
1515 		if (packi) {
1516 			for (chan = 0; chan < 2; chan++) {
1517 				LLVMValueRef pack_args[2] = {
1518 					ac_to_integer(&ctx->ac, values[2 * chan]),
1519 					ac_to_integer(&ctx->ac, values[2 * chan + 1])
1520 				};
1521 				LLVMValueRef packed;
1522 
1523 				packed = packi(&ctx->ac, pack_args,
1524 					       is_int8 ? 8 : is_int10 ? 10 : 16,
1525 					       chan == 1);
1526 				args->out[chan] = ac_to_float(&ctx->ac, packed);
1527 			}
1528 			args->compr = 1; /* COMPR flag */
1529 		}
1530 		return;
1531 	}
1532 
1533 	if (is_16bit) {
1534 		for (unsigned chan = 0; chan < 4; chan++) {
1535 			values[chan] = LLVMBuildBitCast(ctx->ac.builder, values[chan], ctx->ac.i16, "");
1536 			args->out[chan] = LLVMBuildZExt(ctx->ac.builder, values[chan], ctx->ac.i32, "");
1537 		}
1538 	} else
1539 		memcpy(&args->out[0], values, sizeof(values[0]) * 4);
1540 
1541 	for (unsigned i = 0; i < 4; ++i)
1542 		args->out[i] = ac_to_float(&ctx->ac, args->out[i]);
1543 }
1544 
1545 static void
radv_export_param(struct radv_shader_context * ctx,unsigned index,LLVMValueRef * values,unsigned enabled_channels)1546 radv_export_param(struct radv_shader_context *ctx, unsigned index,
1547 		  LLVMValueRef *values, unsigned enabled_channels)
1548 {
1549 	struct ac_export_args args;
1550 
1551 	si_llvm_init_export_args(ctx, values, enabled_channels,
1552 				 V_008DFC_SQ_EXP_PARAM + index, &args);
1553 	ac_build_export(&ctx->ac, &args);
1554 }
1555 
1556 static LLVMValueRef
radv_load_output(struct radv_shader_context * ctx,unsigned index,unsigned chan)1557 radv_load_output(struct radv_shader_context *ctx, unsigned index, unsigned chan)
1558 {
1559 	LLVMValueRef output = ctx->abi.outputs[ac_llvm_reg_index_soa(index, chan)];
1560 	return LLVMBuildLoad(ctx->ac.builder, output, "");
1561 }
1562 
1563 static void
radv_emit_stream_output(struct radv_shader_context * ctx,LLVMValueRef const * so_buffers,LLVMValueRef const * so_write_offsets,const struct radv_stream_output * output,struct radv_shader_output_values * shader_out)1564 radv_emit_stream_output(struct radv_shader_context *ctx,
1565 			 LLVMValueRef const *so_buffers,
1566 			 LLVMValueRef const *so_write_offsets,
1567 			 const struct radv_stream_output *output,
1568 			 struct radv_shader_output_values *shader_out)
1569 {
1570 	unsigned num_comps = util_bitcount(output->component_mask);
1571 	unsigned buf = output->buffer;
1572 	unsigned offset = output->offset;
1573 	unsigned start;
1574 	LLVMValueRef out[4];
1575 
1576 	assert(num_comps && num_comps <= 4);
1577 	if (!num_comps || num_comps > 4)
1578 		return;
1579 
1580 	/* Get the first component. */
1581 	start = ffs(output->component_mask) - 1;
1582 
1583 	/* Load the output as int. */
1584 	for (int i = 0; i < num_comps; i++) {
1585 		out[i] = ac_to_integer(&ctx->ac, shader_out->values[start + i]);
1586 	}
1587 
1588 	/* Pack the output. */
1589 	LLVMValueRef vdata = NULL;
1590 
1591 	switch (num_comps) {
1592 	case 1: /* as i32 */
1593 		vdata = out[0];
1594 		break;
1595 	case 2: /* as v2i32 */
1596 	case 3: /* as v4i32 (aligned to 4) */
1597 		out[3] = LLVMGetUndef(ctx->ac.i32);
1598 		/* fall through */
1599 	case 4: /* as v4i32 */
1600 		vdata = ac_build_gather_values(&ctx->ac, out,
1601 					       !ac_has_vec3_support(ctx->ac.chip_class, false) ?
1602 					       util_next_power_of_two(num_comps) :
1603 					       num_comps);
1604 		break;
1605 	}
1606 
1607 	ac_build_buffer_store_dword(&ctx->ac, so_buffers[buf],
1608 				    vdata, num_comps, so_write_offsets[buf],
1609 				    ctx->ac.i32_0, offset,
1610 				    ac_glc | ac_slc);
1611 }
1612 
1613 static void
radv_emit_streamout(struct radv_shader_context * ctx,unsigned stream)1614 radv_emit_streamout(struct radv_shader_context *ctx, unsigned stream)
1615 {
1616 	int i;
1617 
1618 	/* Get bits [22:16], i.e. (so_param >> 16) & 127; */
1619 	assert(ctx->args->streamout_config.used);
1620 	LLVMValueRef so_vtx_count =
1621 		ac_build_bfe(&ctx->ac,
1622 			     ac_get_arg(&ctx->ac, ctx->args->streamout_config),
1623 			     LLVMConstInt(ctx->ac.i32, 16, false),
1624 			     LLVMConstInt(ctx->ac.i32, 7, false), false);
1625 
1626 	LLVMValueRef tid = ac_get_thread_id(&ctx->ac);
1627 
1628 	/* can_emit = tid < so_vtx_count; */
1629 	LLVMValueRef can_emit = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT,
1630 					      tid, so_vtx_count, "");
1631 
1632 	/* Emit the streamout code conditionally. This actually avoids
1633 	 * out-of-bounds buffer access. The hw tells us via the SGPR
1634 	 * (so_vtx_count) which threads are allowed to emit streamout data.
1635 	 */
1636 	ac_build_ifcc(&ctx->ac, can_emit, 6501);
1637 	{
1638 		/* The buffer offset is computed as follows:
1639 		 *   ByteOffset = streamout_offset[buffer_id]*4 +
1640 		 *                (streamout_write_index + thread_id)*stride[buffer_id] +
1641 		 *                attrib_offset
1642 		 */
1643 		LLVMValueRef so_write_index =
1644 			ac_get_arg(&ctx->ac, ctx->args->streamout_write_idx);
1645 
1646 		/* Compute (streamout_write_index + thread_id). */
1647 		so_write_index =
1648 			LLVMBuildAdd(ctx->ac.builder, so_write_index, tid, "");
1649 
1650 		/* Load the descriptor and compute the write offset for each
1651 		 * enabled buffer.
1652 		 */
1653 		LLVMValueRef so_write_offset[4] = {0};
1654 		LLVMValueRef so_buffers[4] = {0};
1655 		LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac, ctx->args->streamout_buffers);
1656 
1657 		for (i = 0; i < 4; i++) {
1658 			uint16_t stride = ctx->args->shader_info->so.strides[i];
1659 
1660 			if (!stride)
1661 				continue;
1662 
1663 			LLVMValueRef offset =
1664 				LLVMConstInt(ctx->ac.i32, i, false);
1665 
1666 			so_buffers[i] = ac_build_load_to_sgpr(&ctx->ac,
1667 							      buf_ptr, offset);
1668 
1669 			LLVMValueRef so_offset =
1670 				ac_get_arg(&ctx->ac, ctx->args->streamout_offset[i]);
1671 
1672 			so_offset = LLVMBuildMul(ctx->ac.builder, so_offset,
1673 						 LLVMConstInt(ctx->ac.i32, 4, false), "");
1674 
1675 			so_write_offset[i] =
1676 				ac_build_imad(&ctx->ac, so_write_index,
1677 					      LLVMConstInt(ctx->ac.i32,
1678 							   stride * 4, false),
1679 					      so_offset);
1680 		}
1681 
1682 		/* Write streamout data. */
1683 		for (i = 0; i < ctx->args->shader_info->so.num_outputs; i++) {
1684 			struct radv_shader_output_values shader_out = {0};
1685 			struct radv_stream_output *output =
1686 				&ctx->args->shader_info->so.outputs[i];
1687 
1688 			if (stream != output->stream)
1689 				continue;
1690 
1691 			for (int j = 0; j < 4; j++) {
1692 				shader_out.values[j] =
1693 					radv_load_output(ctx, output->location, j);
1694 			}
1695 
1696 			radv_emit_stream_output(ctx, so_buffers,so_write_offset,
1697 						output, &shader_out);
1698 		}
1699 	}
1700 	ac_build_endif(&ctx->ac, 6501);
1701 }
1702 
1703 static void
radv_build_param_exports(struct radv_shader_context * ctx,struct radv_shader_output_values * outputs,unsigned noutput,struct radv_vs_output_info * outinfo,bool export_clip_dists)1704 radv_build_param_exports(struct radv_shader_context *ctx,
1705 			 struct radv_shader_output_values *outputs,
1706 			 unsigned noutput,
1707 			 struct radv_vs_output_info *outinfo,
1708 			 bool export_clip_dists)
1709 {
1710 	unsigned param_count = 0;
1711 
1712 	for (unsigned i = 0; i < noutput; i++) {
1713 		unsigned slot_name = outputs[i].slot_name;
1714 		unsigned usage_mask = outputs[i].usage_mask;
1715 
1716 		if (slot_name != VARYING_SLOT_LAYER &&
1717 		    slot_name != VARYING_SLOT_PRIMITIVE_ID &&
1718 		    slot_name != VARYING_SLOT_VIEWPORT &&
1719 		    slot_name != VARYING_SLOT_CLIP_DIST0 &&
1720 		    slot_name != VARYING_SLOT_CLIP_DIST1 &&
1721 		    slot_name < VARYING_SLOT_VAR0)
1722 			continue;
1723 
1724 		if ((slot_name == VARYING_SLOT_CLIP_DIST0 ||
1725 		     slot_name == VARYING_SLOT_CLIP_DIST1) && !export_clip_dists)
1726 			continue;
1727 
1728 		radv_export_param(ctx, param_count, outputs[i].values, usage_mask);
1729 
1730 		assert(i < ARRAY_SIZE(outinfo->vs_output_param_offset));
1731 		outinfo->vs_output_param_offset[slot_name] = param_count++;
1732         }
1733 
1734 	outinfo->param_exports = param_count;
1735 }
1736 
1737 /* Generate export instructions for hardware VS shader stage or NGG GS stage
1738  * (position and parameter data only).
1739  */
1740 static void
radv_llvm_export_vs(struct radv_shader_context * ctx,struct radv_shader_output_values * outputs,unsigned noutput,struct radv_vs_output_info * outinfo,bool export_clip_dists)1741 radv_llvm_export_vs(struct radv_shader_context *ctx,
1742                     struct radv_shader_output_values *outputs,
1743                     unsigned noutput,
1744                     struct radv_vs_output_info *outinfo,
1745 		    bool export_clip_dists)
1746 {
1747 	LLVMValueRef psize_value = NULL, layer_value = NULL, viewport_value = NULL;
1748 	struct ac_export_args pos_args[4] = {0};
1749 	unsigned pos_idx, index;
1750 	int i;
1751 
1752 	/* Build position exports */
1753 	for (i = 0; i < noutput; i++) {
1754 		switch (outputs[i].slot_name) {
1755 		case VARYING_SLOT_POS:
1756 			si_llvm_init_export_args(ctx, outputs[i].values, 0xf,
1757 						 V_008DFC_SQ_EXP_POS, &pos_args[0]);
1758 			break;
1759 		case VARYING_SLOT_PSIZ:
1760 			psize_value = outputs[i].values[0];
1761 			break;
1762 		case VARYING_SLOT_LAYER:
1763 			layer_value = outputs[i].values[0];
1764 			break;
1765 		case VARYING_SLOT_VIEWPORT:
1766 			viewport_value = outputs[i].values[0];
1767 			break;
1768 		case VARYING_SLOT_CLIP_DIST0:
1769 		case VARYING_SLOT_CLIP_DIST1:
1770 			index = 2 + outputs[i].slot_index;
1771 			si_llvm_init_export_args(ctx, outputs[i].values, 0xf,
1772 						 V_008DFC_SQ_EXP_POS + index,
1773 						 &pos_args[index]);
1774 			break;
1775 		default:
1776 			break;
1777 		}
1778 	}
1779 
1780 	/* We need to add the position output manually if it's missing. */
1781 	if (!pos_args[0].out[0]) {
1782 		pos_args[0].enabled_channels = 0xf; /* writemask */
1783 		pos_args[0].valid_mask = 0; /* EXEC mask */
1784 		pos_args[0].done = 0; /* last export? */
1785 		pos_args[0].target = V_008DFC_SQ_EXP_POS;
1786 		pos_args[0].compr = 0; /* COMPR flag */
1787 		pos_args[0].out[0] = ctx->ac.f32_0; /* X */
1788 		pos_args[0].out[1] = ctx->ac.f32_0; /* Y */
1789 		pos_args[0].out[2] = ctx->ac.f32_0; /* Z */
1790 		pos_args[0].out[3] = ctx->ac.f32_1;  /* W */
1791 	}
1792 
1793 	if (outinfo->writes_pointsize ||
1794 	    outinfo->writes_layer ||
1795 	    outinfo->writes_viewport_index) {
1796 		pos_args[1].enabled_channels = ((outinfo->writes_pointsize == true ? 1 : 0) |
1797 						(outinfo->writes_layer == true ? 4 : 0));
1798 		pos_args[1].valid_mask = 0;
1799 		pos_args[1].done = 0;
1800 		pos_args[1].target = V_008DFC_SQ_EXP_POS + 1;
1801 		pos_args[1].compr = 0;
1802 		pos_args[1].out[0] = ctx->ac.f32_0; /* X */
1803 		pos_args[1].out[1] = ctx->ac.f32_0; /* Y */
1804 		pos_args[1].out[2] = ctx->ac.f32_0; /* Z */
1805 		pos_args[1].out[3] = ctx->ac.f32_0;  /* W */
1806 
1807 		if (outinfo->writes_pointsize == true)
1808 			pos_args[1].out[0] = psize_value;
1809 		if (outinfo->writes_layer == true)
1810 			pos_args[1].out[2] = layer_value;
1811 		if (outinfo->writes_viewport_index == true) {
1812 			if (ctx->args->options->chip_class >= GFX9) {
1813 				/* GFX9 has the layer in out.z[10:0] and the viewport
1814 				 * index in out.z[19:16].
1815 				 */
1816 				LLVMValueRef v = viewport_value;
1817 				v = ac_to_integer(&ctx->ac, v);
1818 				v = LLVMBuildShl(ctx->ac.builder, v,
1819 						 LLVMConstInt(ctx->ac.i32, 16, false),
1820 						 "");
1821 				v = LLVMBuildOr(ctx->ac.builder, v,
1822 						ac_to_integer(&ctx->ac, pos_args[1].out[2]), "");
1823 
1824 				pos_args[1].out[2] = ac_to_float(&ctx->ac, v);
1825 				pos_args[1].enabled_channels |= 1 << 2;
1826 			} else {
1827 				pos_args[1].out[3] = viewport_value;
1828 				pos_args[1].enabled_channels |= 1 << 3;
1829 			}
1830 		}
1831 	}
1832 
1833 	for (i = 0; i < 4; i++) {
1834 		if (pos_args[i].out[0])
1835 			outinfo->pos_exports++;
1836 	}
1837 
1838 	/* GFX10 skip POS0 exports if EXEC=0 and DONE=0, causing a hang.
1839 	 * Setting valid_mask=1 prevents it and has no other effect.
1840 	 */
1841 	if (ctx->ac.chip_class == GFX10)
1842 		pos_args[0].valid_mask = 1;
1843 
1844 	pos_idx = 0;
1845 	for (i = 0; i < 4; i++) {
1846 		if (!pos_args[i].out[0])
1847 			continue;
1848 
1849 		/* Specify the target we are exporting */
1850 		pos_args[i].target = V_008DFC_SQ_EXP_POS + pos_idx++;
1851 
1852 		if (pos_idx == outinfo->pos_exports)
1853 			/* Specify that this is the last export */
1854 			pos_args[i].done = 1;
1855 
1856 		ac_build_export(&ctx->ac, &pos_args[i]);
1857 	}
1858 
1859 	/* Build parameter exports */
1860 	radv_build_param_exports(ctx, outputs, noutput, outinfo, export_clip_dists);
1861 }
1862 
1863 static void
handle_vs_outputs_post(struct radv_shader_context * ctx,bool export_prim_id,bool export_clip_dists,struct radv_vs_output_info * outinfo)1864 handle_vs_outputs_post(struct radv_shader_context *ctx,
1865 		       bool export_prim_id,
1866 		       bool export_clip_dists,
1867 		       struct radv_vs_output_info *outinfo)
1868 {
1869 	struct radv_shader_output_values *outputs;
1870 	unsigned noutput = 0;
1871 
1872 	if (ctx->args->options->key.has_multiview_view_index) {
1873 		LLVMValueRef* tmp_out = &ctx->abi.outputs[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER, 0)];
1874 		if(!*tmp_out) {
1875 			for(unsigned i = 0; i < 4; ++i)
1876 				ctx->abi.outputs[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER, i)] =
1877 				            ac_build_alloca_undef(&ctx->ac, ctx->ac.f32, "");
1878 		}
1879 
1880 		LLVMValueRef view_index = ac_get_arg(&ctx->ac, ctx->args->ac.view_index);
1881 		LLVMBuildStore(ctx->ac.builder, ac_to_float(&ctx->ac, view_index), *tmp_out);
1882 		ctx->output_mask |= 1ull << VARYING_SLOT_LAYER;
1883 	}
1884 
1885 	memset(outinfo->vs_output_param_offset, AC_EXP_PARAM_UNDEFINED,
1886 	       sizeof(outinfo->vs_output_param_offset));
1887 	outinfo->pos_exports = 0;
1888 
1889 	if (!ctx->args->options->use_ngg_streamout &&
1890 	    ctx->args->shader_info->so.num_outputs &&
1891 	    !ctx->args->is_gs_copy_shader) {
1892 		/* The GS copy shader emission already emits streamout. */
1893 		radv_emit_streamout(ctx, 0);
1894 	}
1895 
1896 	/* Allocate a temporary array for the output values. */
1897 	unsigned num_outputs = util_bitcount64(ctx->output_mask) + export_prim_id;
1898 	outputs = malloc(num_outputs * sizeof(outputs[0]));
1899 
1900 	for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
1901 		if (!(ctx->output_mask & (1ull << i)))
1902 			continue;
1903 
1904 		outputs[noutput].slot_name = i;
1905 		outputs[noutput].slot_index = i == VARYING_SLOT_CLIP_DIST1;
1906 
1907 		if (ctx->stage == MESA_SHADER_VERTEX &&
1908 		    !ctx->args->is_gs_copy_shader) {
1909 			outputs[noutput].usage_mask =
1910 				ctx->args->shader_info->vs.output_usage_mask[i];
1911 		} else if (ctx->stage == MESA_SHADER_TESS_EVAL) {
1912 			outputs[noutput].usage_mask =
1913 				ctx->args->shader_info->tes.output_usage_mask[i];
1914 		} else {
1915 			assert(ctx->args->is_gs_copy_shader);
1916 			outputs[noutput].usage_mask =
1917 				ctx->args->shader_info->gs.output_usage_mask[i];
1918 		}
1919 
1920 		for (unsigned j = 0; j < 4; j++) {
1921 			outputs[noutput].values[j] =
1922 				ac_to_float(&ctx->ac, radv_load_output(ctx, i, j));
1923 		}
1924 
1925 		noutput++;
1926 	}
1927 
1928 	/* Export PrimitiveID. */
1929 	if (export_prim_id) {
1930 		outputs[noutput].slot_name = VARYING_SLOT_PRIMITIVE_ID;
1931 		outputs[noutput].slot_index = 0;
1932 		outputs[noutput].usage_mask = 0x1;
1933 		if (ctx->stage == MESA_SHADER_TESS_EVAL)
1934 			outputs[noutput].values[0] =
1935 				ac_get_arg(&ctx->ac, ctx->args->ac.tes_patch_id);
1936 		else
1937 			outputs[noutput].values[0] =
1938 				ac_get_arg(&ctx->ac, ctx->args->vs_prim_id);
1939 		for (unsigned j = 1; j < 4; j++)
1940 			outputs[noutput].values[j] = ctx->ac.f32_0;
1941 		noutput++;
1942 	}
1943 
1944 	radv_llvm_export_vs(ctx, outputs, noutput, outinfo, export_clip_dists);
1945 
1946 	free(outputs);
1947 }
1948 
1949 static void
handle_es_outputs_post(struct radv_shader_context * ctx,struct radv_es_output_info * outinfo)1950 handle_es_outputs_post(struct radv_shader_context *ctx,
1951 		       struct radv_es_output_info *outinfo)
1952 {
1953 	int j;
1954 	LLVMValueRef lds_base = NULL;
1955 
1956 	if (ctx->ac.chip_class  >= GFX9) {
1957 		unsigned itemsize_dw = outinfo->esgs_itemsize / 4;
1958 		LLVMValueRef vertex_idx = ac_get_thread_id(&ctx->ac);
1959 		LLVMValueRef wave_idx =
1960 			ac_unpack_param(&ctx->ac,
1961 					ac_get_arg(&ctx->ac, ctx->args->merged_wave_info), 24, 4);
1962 		vertex_idx = LLVMBuildOr(ctx->ac.builder, vertex_idx,
1963 					 LLVMBuildMul(ctx->ac.builder, wave_idx,
1964 						      LLVMConstInt(ctx->ac.i32,
1965 								   ctx->ac.wave_size, false), ""), "");
1966 		lds_base = LLVMBuildMul(ctx->ac.builder, vertex_idx,
1967 					LLVMConstInt(ctx->ac.i32, itemsize_dw, 0), "");
1968 	}
1969 
1970 	for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
1971 		LLVMValueRef dw_addr = NULL;
1972 		LLVMValueRef *out_ptr = &ctx->abi.outputs[i * 4];
1973 		unsigned output_usage_mask;
1974 
1975 		if (!(ctx->output_mask & (1ull << i)))
1976 			continue;
1977 
1978 		if (ctx->stage == MESA_SHADER_VERTEX) {
1979 			output_usage_mask =
1980 				ctx->args->shader_info->vs.output_usage_mask[i];
1981 		} else {
1982 			assert(ctx->stage == MESA_SHADER_TESS_EVAL);
1983 			output_usage_mask =
1984 				ctx->args->shader_info->tes.output_usage_mask[i];
1985 		}
1986 
1987 		if (lds_base) {
1988 			dw_addr = LLVMBuildAdd(ctx->ac.builder, lds_base,
1989 			                       LLVMConstInt(ctx->ac.i32, i * 4, false),
1990 			                       "");
1991 		}
1992 
1993 		for (j = 0; j < 4; j++) {
1994 			if (!(output_usage_mask & (1 << j)))
1995 				continue;
1996 
1997 			LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder, out_ptr[j], "");
1998 			out_val = ac_to_integer(&ctx->ac, out_val);
1999 			out_val = LLVMBuildZExtOrBitCast(ctx->ac.builder, out_val, ctx->ac.i32, "");
2000 
2001 			if (ctx->ac.chip_class  >= GFX9) {
2002 				LLVMValueRef dw_addr_offset =
2003 					LLVMBuildAdd(ctx->ac.builder, dw_addr,
2004 						     LLVMConstInt(ctx->ac.i32,
2005 								  j, false), "");
2006 
2007 				ac_lds_store(&ctx->ac, dw_addr_offset, out_val);
2008 			} else {
2009 				ac_build_buffer_store_dword(&ctx->ac,
2010 				                            ctx->esgs_ring,
2011 				                            out_val, 1,
2012 				                            NULL,
2013 							    ac_get_arg(&ctx->ac, ctx->args->es2gs_offset),
2014 				                            (4 * i + j) * 4,
2015 				                            ac_glc | ac_slc | ac_swizzled);
2016 			}
2017 		}
2018 	}
2019 }
2020 
2021 static void
handle_ls_outputs_post(struct radv_shader_context * ctx)2022 handle_ls_outputs_post(struct radv_shader_context *ctx)
2023 {
2024 	LLVMValueRef vertex_id = ctx->rel_auto_id;
2025 	uint32_t num_tcs_inputs = ctx->args->shader_info->vs.num_linked_outputs;
2026 	LLVMValueRef vertex_dw_stride = LLVMConstInt(ctx->ac.i32, num_tcs_inputs * 4, false);
2027 	LLVMValueRef base_dw_addr = LLVMBuildMul(ctx->ac.builder, vertex_id,
2028 						 vertex_dw_stride, "");
2029 
2030 	for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
2031 		LLVMValueRef *out_ptr = &ctx->abi.outputs[i * 4];
2032 
2033 		if (!(ctx->output_mask & (1ull << i)))
2034 			continue;
2035 
2036 		LLVMValueRef dw_addr = LLVMBuildAdd(ctx->ac.builder, base_dw_addr,
2037 						    LLVMConstInt(ctx->ac.i32, i * 4, false),
2038 						    "");
2039 		for (unsigned j = 0; j < 4; j++) {
2040 			LLVMValueRef value = LLVMBuildLoad(ctx->ac.builder, out_ptr[j], "");
2041 			value = ac_to_integer(&ctx->ac, value);
2042 			value = LLVMBuildZExtOrBitCast(ctx->ac.builder, value, ctx->ac.i32, "");
2043 			ac_lds_store(&ctx->ac, dw_addr, value);
2044 			dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr, ctx->ac.i32_1, "");
2045 		}
2046 	}
2047 }
2048 
get_wave_id_in_tg(struct radv_shader_context * ctx)2049 static LLVMValueRef get_wave_id_in_tg(struct radv_shader_context *ctx)
2050 {
2051 	return ac_unpack_param(&ctx->ac,
2052 			       ac_get_arg(&ctx->ac, ctx->args->merged_wave_info), 24, 4);
2053 }
2054 
get_tgsize(struct radv_shader_context * ctx)2055 static LLVMValueRef get_tgsize(struct radv_shader_context *ctx)
2056 {
2057 	return ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->merged_wave_info), 28, 4);
2058 }
2059 
get_thread_id_in_tg(struct radv_shader_context * ctx)2060 static LLVMValueRef get_thread_id_in_tg(struct radv_shader_context *ctx)
2061 {
2062 	LLVMBuilderRef builder = ctx->ac.builder;
2063 	LLVMValueRef tmp;
2064 	tmp = LLVMBuildMul(builder, get_wave_id_in_tg(ctx),
2065 			   LLVMConstInt(ctx->ac.i32, ctx->ac.wave_size, false), "");
2066 	return LLVMBuildAdd(builder, tmp, ac_get_thread_id(&ctx->ac), "");
2067 }
2068 
ngg_get_vtx_cnt(struct radv_shader_context * ctx)2069 static LLVMValueRef ngg_get_vtx_cnt(struct radv_shader_context *ctx)
2070 {
2071 	return ac_build_bfe(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->gs_tg_info),
2072 			    LLVMConstInt(ctx->ac.i32, 12, false),
2073 			    LLVMConstInt(ctx->ac.i32, 9, false),
2074 			    false);
2075 }
2076 
ngg_get_prim_cnt(struct radv_shader_context * ctx)2077 static LLVMValueRef ngg_get_prim_cnt(struct radv_shader_context *ctx)
2078 {
2079 	return ac_build_bfe(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->gs_tg_info),
2080 			    LLVMConstInt(ctx->ac.i32, 22, false),
2081 			    LLVMConstInt(ctx->ac.i32, 9, false),
2082 			    false);
2083 }
2084 
ngg_get_ordered_id(struct radv_shader_context * ctx)2085 static LLVMValueRef ngg_get_ordered_id(struct radv_shader_context *ctx)
2086 {
2087 	return ac_build_bfe(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->gs_tg_info),
2088 			    ctx->ac.i32_0,
2089 			    LLVMConstInt(ctx->ac.i32, 12, false),
2090 			    false);
2091 }
2092 
2093 static LLVMValueRef
ngg_gs_get_vertex_storage(struct radv_shader_context * ctx)2094 ngg_gs_get_vertex_storage(struct radv_shader_context *ctx)
2095 {
2096 	unsigned num_outputs = util_bitcount64(ctx->output_mask);
2097 
2098 	if (ctx->args->options->key.has_multiview_view_index)
2099 		num_outputs++;
2100 
2101 	LLVMTypeRef elements[2] = {
2102 		LLVMArrayType(ctx->ac.i32, 4 * num_outputs),
2103 		LLVMArrayType(ctx->ac.i8, 4),
2104 	};
2105 	LLVMTypeRef type = LLVMStructTypeInContext(ctx->ac.context, elements, 2, false);
2106 	type = LLVMPointerType(LLVMArrayType(type, 0), AC_ADDR_SPACE_LDS);
2107 	return LLVMBuildBitCast(ctx->ac.builder, ctx->gs_ngg_emit, type, "");
2108 }
2109 
2110 /**
2111  * Return a pointer to the LDS storage reserved for the N'th vertex, where N
2112  * is in emit order; that is:
2113  * - during the epilogue, N is the threadidx (relative to the entire threadgroup)
2114  * - during vertex emit, i.e. while the API GS shader invocation is running,
2115  *   N = threadidx * gs_max_out_vertices + emitidx
2116  *
2117  * Goals of the LDS memory layout:
2118  * 1. Eliminate bank conflicts on write for geometry shaders that have all emits
2119  *    in uniform control flow
2120  * 2. Eliminate bank conflicts on read for export if, additionally, there is no
2121  *    culling
2122  * 3. Agnostic to the number of waves (since we don't know it before compiling)
2123  * 4. Allow coalescing of LDS instructions (ds_write_b128 etc.)
2124  * 5. Avoid wasting memory.
2125  *
2126  * We use an AoS layout due to point 4 (this also helps point 3). In an AoS
2127  * layout, elimination of bank conflicts requires that each vertex occupy an
2128  * odd number of dwords. We use the additional dword to store the output stream
2129  * index as well as a flag to indicate whether this vertex ends a primitive
2130  * for rasterization.
2131  *
2132  * Swizzling is required to satisfy points 1 and 2 simultaneously.
2133  *
2134  * Vertices are stored in export order (gsthread * gs_max_out_vertices + emitidx).
2135  * Indices are swizzled in groups of 32, which ensures point 1 without
2136  * disturbing point 2.
2137  *
2138  * \return an LDS pointer to type {[N x i32], [4 x i8]}
2139  */
2140 static LLVMValueRef
ngg_gs_vertex_ptr(struct radv_shader_context * ctx,LLVMValueRef vertexidx)2141 ngg_gs_vertex_ptr(struct radv_shader_context *ctx, LLVMValueRef vertexidx)
2142 {
2143 	LLVMBuilderRef builder = ctx->ac.builder;
2144 	LLVMValueRef storage = ngg_gs_get_vertex_storage(ctx);
2145 
2146 	/* gs_max_out_vertices = 2^(write_stride_2exp) * some odd number */
2147 	unsigned write_stride_2exp = ffs(ctx->shader->info.gs.vertices_out) - 1;
2148 	if (write_stride_2exp) {
2149 		LLVMValueRef row =
2150 			LLVMBuildLShr(builder, vertexidx,
2151 				      LLVMConstInt(ctx->ac.i32, 5, false), "");
2152 		LLVMValueRef swizzle =
2153 			LLVMBuildAnd(builder, row,
2154 				     LLVMConstInt(ctx->ac.i32, (1u << write_stride_2exp) - 1,
2155 						  false), "");
2156 		vertexidx = LLVMBuildXor(builder, vertexidx, swizzle, "");
2157 	}
2158 
2159 	return ac_build_gep0(&ctx->ac, storage, vertexidx);
2160 }
2161 
2162 static LLVMValueRef
ngg_gs_emit_vertex_ptr(struct radv_shader_context * ctx,LLVMValueRef gsthread,LLVMValueRef emitidx)2163 ngg_gs_emit_vertex_ptr(struct radv_shader_context *ctx, LLVMValueRef gsthread,
2164 		       LLVMValueRef emitidx)
2165 {
2166 	LLVMBuilderRef builder = ctx->ac.builder;
2167 	LLVMValueRef tmp;
2168 
2169 	tmp = LLVMConstInt(ctx->ac.i32, ctx->shader->info.gs.vertices_out, false);
2170 	tmp = LLVMBuildMul(builder, tmp, gsthread, "");
2171 	const LLVMValueRef vertexidx = LLVMBuildAdd(builder, tmp, emitidx, "");
2172 	return ngg_gs_vertex_ptr(ctx, vertexidx);
2173 }
2174 
2175 static LLVMValueRef
ngg_gs_get_emit_output_ptr(struct radv_shader_context * ctx,LLVMValueRef vertexptr,unsigned out_idx)2176 ngg_gs_get_emit_output_ptr(struct radv_shader_context *ctx, LLVMValueRef vertexptr,
2177 			   unsigned out_idx)
2178 {
2179 	LLVMValueRef gep_idx[3] = {
2180 		ctx->ac.i32_0, /* implied C-style array */
2181 		ctx->ac.i32_0, /* first struct entry */
2182 		LLVMConstInt(ctx->ac.i32, out_idx, false),
2183 	};
2184 	return LLVMBuildGEP(ctx->ac.builder, vertexptr, gep_idx, 3, "");
2185 }
2186 
2187 static LLVMValueRef
ngg_gs_get_emit_primflag_ptr(struct radv_shader_context * ctx,LLVMValueRef vertexptr,unsigned stream)2188 ngg_gs_get_emit_primflag_ptr(struct radv_shader_context *ctx, LLVMValueRef vertexptr,
2189 			     unsigned stream)
2190 {
2191 	LLVMValueRef gep_idx[3] = {
2192 		ctx->ac.i32_0, /* implied C-style array */
2193 		ctx->ac.i32_1, /* second struct entry */
2194 		LLVMConstInt(ctx->ac.i32, stream, false),
2195 	};
2196 	return LLVMBuildGEP(ctx->ac.builder, vertexptr, gep_idx, 3, "");
2197 }
2198 
2199 static struct radv_stream_output *
radv_get_stream_output_by_loc(struct radv_streamout_info * so,unsigned location)2200 radv_get_stream_output_by_loc(struct radv_streamout_info *so, unsigned location)
2201 {
2202 	for (unsigned i = 0; i < so->num_outputs; ++i) {
2203 		if (so->outputs[i].location == location)
2204 			return &so->outputs[i];
2205 	}
2206 
2207 	return NULL;
2208 }
2209 
build_streamout_vertex(struct radv_shader_context * ctx,LLVMValueRef * so_buffer,LLVMValueRef * wg_offset_dw,unsigned stream,LLVMValueRef offset_vtx,LLVMValueRef vertexptr)2210 static void build_streamout_vertex(struct radv_shader_context *ctx,
2211 				   LLVMValueRef *so_buffer, LLVMValueRef *wg_offset_dw,
2212 				   unsigned stream, LLVMValueRef offset_vtx,
2213 				   LLVMValueRef vertexptr)
2214 {
2215 	struct radv_streamout_info *so = &ctx->args->shader_info->so;
2216 	LLVMBuilderRef builder = ctx->ac.builder;
2217 	LLVMValueRef offset[4] = {0};
2218 	LLVMValueRef tmp;
2219 
2220 	for (unsigned buffer = 0; buffer < 4; ++buffer) {
2221 		if (!wg_offset_dw[buffer])
2222 			continue;
2223 
2224 		tmp = LLVMBuildMul(builder, offset_vtx,
2225 				   LLVMConstInt(ctx->ac.i32, so->strides[buffer], false), "");
2226 		tmp = LLVMBuildAdd(builder, wg_offset_dw[buffer], tmp, "");
2227 		offset[buffer] = LLVMBuildShl(builder, tmp, LLVMConstInt(ctx->ac.i32, 2, false), "");
2228 	}
2229 
2230 	if (ctx->stage == MESA_SHADER_GEOMETRY) {
2231 		struct radv_shader_output_values outputs[AC_LLVM_MAX_OUTPUTS];
2232 		unsigned noutput = 0;
2233 		unsigned out_idx = 0;
2234 
2235 		for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
2236 			unsigned output_usage_mask =
2237 				ctx->args->shader_info->gs.output_usage_mask[i];
2238 			uint8_t output_stream = ctx->args->shader_info->gs.output_streams[i];
2239 
2240 			if (!(ctx->output_mask & (1ull << i)) ||
2241 			    output_stream != stream)
2242 				continue;
2243 
2244 			outputs[noutput].slot_name = i;
2245 			outputs[noutput].slot_index = i == VARYING_SLOT_CLIP_DIST1;
2246 			outputs[noutput].usage_mask = output_usage_mask;
2247 
2248 			int length = util_last_bit(output_usage_mask);
2249 
2250 			for (unsigned j = 0; j < length; j++, out_idx++) {
2251 				if (!(output_usage_mask & (1 << j)))
2252 					continue;
2253 
2254 				tmp = ac_build_gep0(&ctx->ac, vertexptr,
2255 						    LLVMConstInt(ctx->ac.i32, out_idx, false));
2256 				outputs[noutput].values[j] = LLVMBuildLoad(builder, tmp, "");
2257 			}
2258 
2259 			for (unsigned j = length; j < 4; j++)
2260 				outputs[noutput].values[j] = LLVMGetUndef(ctx->ac.f32);
2261 
2262 			noutput++;
2263 		}
2264 
2265 		for (unsigned i = 0; i < noutput; i++) {
2266 			struct radv_stream_output *output =
2267 				radv_get_stream_output_by_loc(so, outputs[i].slot_name);
2268 
2269 			if (!output ||
2270 			    output->stream != stream)
2271 				continue;
2272 
2273 			struct radv_shader_output_values out = {0};
2274 
2275 			for (unsigned j = 0; j < 4; j++) {
2276 				out.values[j] = outputs[i].values[j];
2277 			}
2278 
2279 			radv_emit_stream_output(ctx, so_buffer, offset, output, &out);
2280 		}
2281 	} else {
2282 		for (unsigned i = 0; i < so->num_outputs; ++i) {
2283 			struct radv_stream_output *output =
2284 				&ctx->args->shader_info->so.outputs[i];
2285 
2286 			if (stream != output->stream)
2287 				continue;
2288 
2289 			struct radv_shader_output_values out = {0};
2290 
2291 			for (unsigned comp = 0; comp < 4; comp++) {
2292 				if (!(output->component_mask & (1 << comp)))
2293 					continue;
2294 
2295 				tmp = ac_build_gep0(&ctx->ac, vertexptr,
2296 						    LLVMConstInt(ctx->ac.i32, 4 * i + comp, false));
2297 				out.values[comp] = LLVMBuildLoad(builder, tmp, "");
2298 			}
2299 
2300 			radv_emit_stream_output(ctx, so_buffer, offset, output, &out);
2301 		}
2302 	}
2303 }
2304 
2305 struct ngg_streamout {
2306 	LLVMValueRef num_vertices;
2307 
2308 	/* per-thread data */
2309 	LLVMValueRef prim_enable[4]; /* i1 per stream */
2310 	LLVMValueRef vertices[3]; /* [N x i32] addrspace(LDS)* */
2311 
2312 	/* Output */
2313 	LLVMValueRef emit[4]; /* per-stream emitted primitives (only valid for used streams) */
2314 };
2315 
2316 /**
2317  * Build streamout logic.
2318  *
2319  * Implies a barrier.
2320  *
2321  * Writes number of emitted primitives to gs_ngg_scratch[4:7].
2322  *
2323  * Clobbers gs_ngg_scratch[8:].
2324  */
build_streamout(struct radv_shader_context * ctx,struct ngg_streamout * nggso)2325 static void build_streamout(struct radv_shader_context *ctx,
2326 			    struct ngg_streamout *nggso)
2327 {
2328 	struct radv_streamout_info *so = &ctx->args->shader_info->so;
2329 	LLVMBuilderRef builder = ctx->ac.builder;
2330 	LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac, ctx->args->streamout_buffers);
2331 	LLVMValueRef tid = get_thread_id_in_tg(ctx);
2332 	LLVMValueRef cond, tmp, tmp2;
2333 	LLVMValueRef i32_2 = LLVMConstInt(ctx->ac.i32, 2, false);
2334 	LLVMValueRef i32_4 = LLVMConstInt(ctx->ac.i32, 4, false);
2335 	LLVMValueRef i32_8 = LLVMConstInt(ctx->ac.i32, 8, false);
2336 	LLVMValueRef so_buffer[4] = {0};
2337 	unsigned max_num_vertices = 1 + (nggso->vertices[1] ? 1 : 0) +
2338 					(nggso->vertices[2] ? 1 : 0);
2339 	LLVMValueRef prim_stride_dw[4] = {0};
2340 	LLVMValueRef prim_stride_dw_vgpr = LLVMGetUndef(ctx->ac.i32);
2341 	int stream_for_buffer[4] = { -1, -1, -1, -1 };
2342 	unsigned bufmask_for_stream[4] = {0};
2343 	bool isgs = ctx->stage == MESA_SHADER_GEOMETRY;
2344 	unsigned scratch_emit_base = isgs ? 4 : 0;
2345 	LLVMValueRef scratch_emit_basev = isgs ? i32_4 : ctx->ac.i32_0;
2346 	unsigned scratch_offset_base = isgs ? 8 : 4;
2347 	LLVMValueRef scratch_offset_basev = isgs ? i32_8 : i32_4;
2348 
2349 	ac_llvm_add_target_dep_function_attr(ctx->main_function,
2350 					     "amdgpu-gds-size", 256);
2351 
2352 	/* Determine the mapping of streamout buffers to vertex streams. */
2353 	for (unsigned i = 0; i < so->num_outputs; ++i) {
2354 		unsigned buf = so->outputs[i].buffer;
2355 		unsigned stream = so->outputs[i].stream;
2356 		assert(stream_for_buffer[buf] < 0 || stream_for_buffer[buf] == stream);
2357 		stream_for_buffer[buf] = stream;
2358 		bufmask_for_stream[stream] |= 1 << buf;
2359 	}
2360 
2361 	for (unsigned buffer = 0; buffer < 4; ++buffer) {
2362 		if (stream_for_buffer[buffer] == -1)
2363 			continue;
2364 
2365 		assert(so->strides[buffer]);
2366 
2367 		LLVMValueRef stride_for_buffer =
2368 			LLVMConstInt(ctx->ac.i32, so->strides[buffer], false);
2369 		prim_stride_dw[buffer] =
2370 			LLVMBuildMul(builder, stride_for_buffer,
2371 				     nggso->num_vertices, "");
2372 		prim_stride_dw_vgpr = ac_build_writelane(
2373 			&ctx->ac, prim_stride_dw_vgpr, prim_stride_dw[buffer],
2374 			LLVMConstInt(ctx->ac.i32, buffer, false));
2375 
2376 		LLVMValueRef offset = LLVMConstInt(ctx->ac.i32, buffer, false);
2377 		so_buffer[buffer] = ac_build_load_to_sgpr(&ctx->ac, buf_ptr,
2378 							  offset);
2379 	}
2380 
2381 	cond = LLVMBuildICmp(builder, LLVMIntEQ, get_wave_id_in_tg(ctx), ctx->ac.i32_0, "");
2382 	ac_build_ifcc(&ctx->ac, cond, 5200);
2383 	{
2384 		LLVMTypeRef gdsptr = LLVMPointerType(ctx->ac.i32, AC_ADDR_SPACE_GDS);
2385 		LLVMValueRef gdsbase = LLVMBuildIntToPtr(builder, ctx->ac.i32_0, gdsptr, "");
2386 
2387 		/* Advance the streamout offsets in GDS. */
2388 		LLVMValueRef offsets_vgpr = ac_build_alloca_undef(&ctx->ac, ctx->ac.i32, "");
2389 		LLVMValueRef generated_by_stream_vgpr = ac_build_alloca_undef(&ctx->ac, ctx->ac.i32, "");
2390 
2391 		cond = LLVMBuildICmp(builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), i32_4, "");
2392 		ac_build_ifcc(&ctx->ac, cond, 5210);
2393 		{
2394 			/* Fetch the number of generated primitives and store
2395 			 * it in GDS for later use.
2396 			 */
2397 			if (isgs) {
2398 				tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tid);
2399 				tmp = LLVMBuildLoad(builder, tmp, "");
2400 			} else {
2401 				tmp = ac_build_writelane(&ctx->ac, ctx->ac.i32_0,
2402 						ngg_get_prim_cnt(ctx), ctx->ac.i32_0);
2403 			}
2404 			LLVMBuildStore(builder, tmp, generated_by_stream_vgpr);
2405 
2406 			unsigned swizzle[4];
2407 			int unused_stream = -1;
2408 			for (unsigned stream = 0; stream < 4; ++stream) {
2409 				if (!ctx->args->shader_info->gs.num_stream_output_components[stream]) {
2410 					unused_stream = stream;
2411 					break;
2412 				}
2413 			}
2414 			for (unsigned buffer = 0; buffer < 4; ++buffer) {
2415 				if (stream_for_buffer[buffer] >= 0) {
2416 					swizzle[buffer] = stream_for_buffer[buffer];
2417 				} else {
2418 					assert(unused_stream >= 0);
2419 					swizzle[buffer] = unused_stream;
2420 				}
2421 			}
2422 
2423 			tmp = ac_build_quad_swizzle(&ctx->ac, tmp,
2424 				swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
2425 			tmp = LLVMBuildMul(builder, tmp, prim_stride_dw_vgpr, "");
2426 
2427 			LLVMValueRef args[] = {
2428 				LLVMBuildIntToPtr(builder, ngg_get_ordered_id(ctx), gdsptr, ""),
2429 				tmp,
2430 				ctx->ac.i32_0, // ordering
2431 				ctx->ac.i32_0, // scope
2432 				ctx->ac.i1false, // isVolatile
2433 				LLVMConstInt(ctx->ac.i32, 4 << 24, false), // OA index
2434 				ctx->ac.i1true, // wave release
2435 				ctx->ac.i1true, // wave done
2436 			};
2437 
2438 			tmp = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.ds.ordered.add",
2439 						 ctx->ac.i32, args, ARRAY_SIZE(args), 0);
2440 
2441 			/* Keep offsets in a VGPR for quick retrieval via readlane by
2442 			 * the first wave for bounds checking, and also store in LDS
2443 			 * for retrieval by all waves later. */
2444 			LLVMBuildStore(builder, tmp, offsets_vgpr);
2445 
2446 			tmp2 = LLVMBuildAdd(builder, ac_get_thread_id(&ctx->ac),
2447 					    scratch_offset_basev, "");
2448 			tmp2 = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tmp2);
2449 			LLVMBuildStore(builder, tmp, tmp2);
2450 		}
2451 		ac_build_endif(&ctx->ac, 5210);
2452 
2453 		/* Determine the max emit per buffer. This is done via the SALU, in part
2454 		 * because LLVM can't generate divide-by-multiply if we try to do this
2455 		 * via VALU with one lane per buffer.
2456 		 */
2457 		LLVMValueRef max_emit[4] = {0};
2458 		for (unsigned buffer = 0; buffer < 4; ++buffer) {
2459 			if (stream_for_buffer[buffer] == -1)
2460 				continue;
2461 
2462 			/* Compute the streamout buffer size in DWORD. */
2463 			LLVMValueRef bufsize_dw =
2464 				LLVMBuildLShr(builder,
2465 					LLVMBuildExtractElement(builder, so_buffer[buffer], i32_2, ""),
2466 					i32_2, "");
2467 
2468 			/* Load the streamout buffer offset from GDS. */
2469 			tmp = LLVMBuildLoad(builder, offsets_vgpr, "");
2470 			LLVMValueRef offset_dw =
2471 				ac_build_readlane(&ctx->ac, tmp,
2472 						LLVMConstInt(ctx->ac.i32, buffer, false));
2473 
2474 			/* Compute the remaining size to emit. */
2475 			LLVMValueRef remaining_dw =
2476 				LLVMBuildSub(builder, bufsize_dw, offset_dw, "");
2477 			tmp = LLVMBuildUDiv(builder, remaining_dw,
2478 					    prim_stride_dw[buffer], "");
2479 
2480 			cond = LLVMBuildICmp(builder, LLVMIntULT,
2481 					     bufsize_dw, offset_dw, "");
2482 			max_emit[buffer] = LLVMBuildSelect(builder, cond,
2483 							   ctx->ac.i32_0, tmp, "");
2484 		}
2485 
2486 		/* Determine the number of emitted primitives per stream and fixup the
2487 		 * GDS counter if necessary.
2488 		 *
2489 		 * This is complicated by the fact that a single stream can emit to
2490 		 * multiple buffers (but luckily not vice versa).
2491 		 */
2492 		LLVMValueRef emit_vgpr = ctx->ac.i32_0;
2493 
2494 		for (unsigned stream = 0; stream < 4; ++stream) {
2495 			if (!ctx->args->shader_info->gs.num_stream_output_components[stream])
2496 				continue;
2497 
2498 			/* Load the number of generated primitives from GDS and
2499 			 * determine that number for the given stream.
2500 			 */
2501 			tmp = LLVMBuildLoad(builder, generated_by_stream_vgpr, "");
2502 			LLVMValueRef generated =
2503 				ac_build_readlane(&ctx->ac, tmp,
2504 						  LLVMConstInt(ctx->ac.i32, stream, false));
2505 
2506 
2507 			/* Compute the number of emitted primitives. */
2508 			LLVMValueRef emit = generated;
2509 			for (unsigned buffer = 0; buffer < 4; ++buffer) {
2510 				if (stream_for_buffer[buffer] == stream)
2511 					emit = ac_build_umin(&ctx->ac, emit, max_emit[buffer]);
2512 			}
2513 
2514 			/* Store the number of emitted primitives for that
2515 			 * stream.
2516 			 */
2517 			emit_vgpr = ac_build_writelane(&ctx->ac, emit_vgpr, emit,
2518 						       LLVMConstInt(ctx->ac.i32, stream, false));
2519 
2520 			/* Fixup the offset using a plain GDS atomic if we overflowed. */
2521 			cond = LLVMBuildICmp(builder, LLVMIntULT, emit, generated, "");
2522 			ac_build_ifcc(&ctx->ac, cond, 5221); /* scalar branch */
2523 			tmp = LLVMBuildLShr(builder,
2524 					    LLVMConstInt(ctx->ac.i32, bufmask_for_stream[stream], false),
2525 					    ac_get_thread_id(&ctx->ac), "");
2526 			tmp = LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
2527 			ac_build_ifcc(&ctx->ac, tmp, 5222);
2528 			{
2529 				tmp = LLVMBuildSub(builder, generated, emit, "");
2530 				tmp = LLVMBuildMul(builder, tmp, prim_stride_dw_vgpr, "");
2531 				tmp2 = LLVMBuildGEP(builder, gdsbase, &tid, 1, "");
2532 				LLVMBuildAtomicRMW(builder, LLVMAtomicRMWBinOpSub, tmp2, tmp,
2533 						   LLVMAtomicOrderingMonotonic, false);
2534 			}
2535 			ac_build_endif(&ctx->ac, 5222);
2536 			ac_build_endif(&ctx->ac, 5221);
2537 		}
2538 
2539 		/* Store the number of emitted primitives to LDS for later use. */
2540 		cond = LLVMBuildICmp(builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), i32_4, "");
2541 		ac_build_ifcc(&ctx->ac, cond, 5225);
2542 		{
2543 			tmp = LLVMBuildAdd(builder, ac_get_thread_id(&ctx->ac),
2544 					   scratch_emit_basev, "");
2545 			tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tmp);
2546 			LLVMBuildStore(builder, emit_vgpr, tmp);
2547 		}
2548 		ac_build_endif(&ctx->ac, 5225);
2549 	}
2550 	ac_build_endif(&ctx->ac, 5200);
2551 
2552 	/* Determine the workgroup-relative per-thread / primitive offset into
2553 	 * the streamout buffers */
2554 	struct ac_wg_scan primemit_scan[4] = {0};
2555 
2556 	if (isgs) {
2557 		for (unsigned stream = 0; stream < 4; ++stream) {
2558 			if (!ctx->args->shader_info->gs.num_stream_output_components[stream])
2559 				continue;
2560 
2561 			primemit_scan[stream].enable_exclusive = true;
2562 			primemit_scan[stream].op = nir_op_iadd;
2563 			primemit_scan[stream].src = nggso->prim_enable[stream];
2564 			primemit_scan[stream].scratch =
2565 				ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch,
2566 					LLVMConstInt(ctx->ac.i32, 12 + 8 * stream, false));
2567 			primemit_scan[stream].waveidx = get_wave_id_in_tg(ctx);
2568 			primemit_scan[stream].numwaves = get_tgsize(ctx);
2569 			primemit_scan[stream].maxwaves = 8;
2570 			ac_build_wg_scan_top(&ctx->ac, &primemit_scan[stream]);
2571 		}
2572 	}
2573 
2574 	ac_build_s_barrier(&ctx->ac);
2575 
2576 	/* Fetch the per-buffer offsets and per-stream emit counts in all waves. */
2577 	LLVMValueRef wgoffset_dw[4] = {0};
2578 
2579 	{
2580 		LLVMValueRef scratch_vgpr;
2581 
2582 		tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, ac_get_thread_id(&ctx->ac));
2583 		scratch_vgpr = LLVMBuildLoad(builder, tmp, "");
2584 
2585 		for (unsigned buffer = 0; buffer < 4; ++buffer) {
2586 			if (stream_for_buffer[buffer] >= 0) {
2587 				wgoffset_dw[buffer] = ac_build_readlane(
2588 					&ctx->ac, scratch_vgpr,
2589 					LLVMConstInt(ctx->ac.i32, scratch_offset_base + buffer, false));
2590 			}
2591 		}
2592 
2593 		for (unsigned stream = 0; stream < 4; ++stream) {
2594 			if (ctx->args->shader_info->gs.num_stream_output_components[stream]) {
2595 				nggso->emit[stream] = ac_build_readlane(
2596 					&ctx->ac, scratch_vgpr,
2597 					LLVMConstInt(ctx->ac.i32, scratch_emit_base + stream, false));
2598 			}
2599 		}
2600 	}
2601 
2602 	/* Write out primitive data */
2603 	for (unsigned stream = 0; stream < 4; ++stream) {
2604 		if (!ctx->args->shader_info->gs.num_stream_output_components[stream])
2605 			continue;
2606 
2607 		if (isgs) {
2608 			ac_build_wg_scan_bottom(&ctx->ac, &primemit_scan[stream]);
2609 		} else {
2610 			primemit_scan[stream].result_exclusive = tid;
2611 		}
2612 
2613 		cond = LLVMBuildICmp(builder, LLVMIntULT,
2614 				    primemit_scan[stream].result_exclusive,
2615 				    nggso->emit[stream], "");
2616 		cond = LLVMBuildAnd(builder, cond, nggso->prim_enable[stream], "");
2617 		ac_build_ifcc(&ctx->ac, cond, 5240);
2618 		{
2619 			LLVMValueRef offset_vtx =
2620 				LLVMBuildMul(builder, primemit_scan[stream].result_exclusive,
2621 					     nggso->num_vertices, "");
2622 
2623 			for (unsigned i = 0; i < max_num_vertices; ++i) {
2624 				cond = LLVMBuildICmp(builder, LLVMIntULT,
2625 						    LLVMConstInt(ctx->ac.i32, i, false),
2626 						    nggso->num_vertices, "");
2627 				ac_build_ifcc(&ctx->ac, cond, 5241);
2628 				build_streamout_vertex(ctx, so_buffer, wgoffset_dw,
2629 						       stream, offset_vtx, nggso->vertices[i]);
2630 				ac_build_endif(&ctx->ac, 5241);
2631 				offset_vtx = LLVMBuildAdd(builder, offset_vtx, ctx->ac.i32_1, "");
2632 			}
2633 		}
2634 		ac_build_endif(&ctx->ac, 5240);
2635 	}
2636 }
2637 
ngg_nogs_vertex_size(struct radv_shader_context * ctx)2638 static unsigned ngg_nogs_vertex_size(struct radv_shader_context *ctx)
2639 {
2640 	unsigned lds_vertex_size = 0;
2641 
2642 	if (ctx->args->shader_info->so.num_outputs)
2643 		lds_vertex_size = 4 * ctx->args->shader_info->so.num_outputs + 1;
2644 
2645 	return lds_vertex_size;
2646 }
2647 
2648 /**
2649  * Returns an `[N x i32] addrspace(LDS)*` pointing at contiguous LDS storage
2650  * for the vertex outputs.
2651  */
ngg_nogs_vertex_ptr(struct radv_shader_context * ctx,LLVMValueRef vtxid)2652 static LLVMValueRef ngg_nogs_vertex_ptr(struct radv_shader_context *ctx,
2653 					LLVMValueRef vtxid)
2654 {
2655 	/* The extra dword is used to avoid LDS bank conflicts. */
2656 	unsigned vertex_size = ngg_nogs_vertex_size(ctx);
2657 	LLVMTypeRef ai32 = LLVMArrayType(ctx->ac.i32, vertex_size);
2658 	LLVMTypeRef pai32 = LLVMPointerType(ai32, AC_ADDR_SPACE_LDS);
2659 	LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, ctx->esgs_ring, pai32, "");
2660 	return LLVMBuildGEP(ctx->ac.builder, tmp, &vtxid, 1, "");
2661 }
2662 
2663 static void
handle_ngg_outputs_post_1(struct radv_shader_context * ctx)2664 handle_ngg_outputs_post_1(struct radv_shader_context *ctx)
2665 {
2666 	struct radv_streamout_info *so = &ctx->args->shader_info->so;
2667 	LLVMBuilderRef builder = ctx->ac.builder;
2668 	LLVMValueRef vertex_ptr = NULL;
2669 	LLVMValueRef tmp, tmp2;
2670 
2671 	assert((ctx->stage == MESA_SHADER_VERTEX ||
2672 	        ctx->stage == MESA_SHADER_TESS_EVAL) && !ctx->args->is_gs_copy_shader);
2673 
2674 	if (!ctx->args->shader_info->so.num_outputs)
2675 		return;
2676 
2677 	vertex_ptr = ngg_nogs_vertex_ptr(ctx, get_thread_id_in_tg(ctx));
2678 
2679 	for (unsigned i = 0; i < so->num_outputs; ++i) {
2680 		struct radv_stream_output *output =
2681 			&ctx->args->shader_info->so.outputs[i];
2682 
2683 		unsigned loc = output->location;
2684 
2685 		for (unsigned comp = 0; comp < 4; comp++) {
2686 			if (!(output->component_mask & (1 << comp)))
2687 				continue;
2688 
2689 			tmp = ac_build_gep0(&ctx->ac, vertex_ptr,
2690 					    LLVMConstInt(ctx->ac.i32, 4 * i + comp, false));
2691 			tmp2 = LLVMBuildLoad(builder,
2692 					     ctx->abi.outputs[4 * loc + comp], "");
2693 			tmp2 = ac_to_integer(&ctx->ac, tmp2);
2694 			LLVMBuildStore(builder, tmp2, tmp);
2695 		}
2696 	}
2697 }
2698 
2699 static void
handle_ngg_outputs_post_2(struct radv_shader_context * ctx)2700 handle_ngg_outputs_post_2(struct radv_shader_context *ctx)
2701 {
2702 	LLVMBuilderRef builder = ctx->ac.builder;
2703 	LLVMValueRef tmp;
2704 
2705 	assert((ctx->stage == MESA_SHADER_VERTEX ||
2706 	        ctx->stage == MESA_SHADER_TESS_EVAL) && !ctx->args->is_gs_copy_shader);
2707 
2708 	LLVMValueRef prims_in_wave = ac_unpack_param(&ctx->ac,
2709 						     ac_get_arg(&ctx->ac, ctx->args->merged_wave_info), 8, 8);
2710 	LLVMValueRef vtx_in_wave = ac_unpack_param(&ctx->ac,
2711 						   ac_get_arg(&ctx->ac, ctx->args->merged_wave_info), 0, 8);
2712 	LLVMValueRef is_gs_thread = LLVMBuildICmp(builder, LLVMIntULT,
2713 						  ac_get_thread_id(&ctx->ac), prims_in_wave, "");
2714 	LLVMValueRef is_es_thread = LLVMBuildICmp(builder, LLVMIntULT,
2715 						  ac_get_thread_id(&ctx->ac), vtx_in_wave, "");
2716 	LLVMValueRef vtxindex[] = {
2717 		ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->gs_vtx_offset[0]), 0, 16),
2718 		ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->gs_vtx_offset[0]), 16, 16),
2719 		ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->gs_vtx_offset[2]), 0, 16),
2720 	};
2721 
2722 	/* Determine the number of vertices per primitive. */
2723 	unsigned num_vertices;
2724 	LLVMValueRef num_vertices_val;
2725 
2726 	if (ctx->stage == MESA_SHADER_VERTEX) {
2727 		LLVMValueRef outprim_val =
2728 			LLVMConstInt(ctx->ac.i32,
2729 				     ctx->args->options->key.vs.outprim, false);
2730 		num_vertices_val = LLVMBuildAdd(builder, outprim_val,
2731 						ctx->ac.i32_1, "");
2732 		num_vertices = 3; /* TODO: optimize for points & lines */
2733 	} else {
2734 		assert(ctx->stage == MESA_SHADER_TESS_EVAL);
2735 
2736 		if (ctx->shader->info.tess.point_mode)
2737 			num_vertices = 1;
2738 		else if (ctx->shader->info.tess.primitive_mode == GL_ISOLINES)
2739 			num_vertices = 2;
2740 		else
2741 			num_vertices = 3;
2742 
2743 		num_vertices_val = LLVMConstInt(ctx->ac.i32, num_vertices, false);
2744 	}
2745 
2746 	/* Streamout */
2747 	if (ctx->args->shader_info->so.num_outputs) {
2748 		struct ngg_streamout nggso = {0};
2749 
2750 		nggso.num_vertices = num_vertices_val;
2751 		nggso.prim_enable[0] = is_gs_thread;
2752 
2753 		for (unsigned i = 0; i < num_vertices; ++i)
2754 			nggso.vertices[i] = ngg_nogs_vertex_ptr(ctx, vtxindex[i]);
2755 
2756 		build_streamout(ctx, &nggso);
2757 	}
2758 
2759 	/* Copy Primitive IDs from GS threads to the LDS address corresponding
2760 	 * to the ES thread of the provoking vertex.
2761 	 */
2762 	if (ctx->stage == MESA_SHADER_VERTEX &&
2763 	    ctx->args->options->key.vs_common_out.export_prim_id) {
2764 		if (ctx->args->shader_info->so.num_outputs)
2765 			ac_build_s_barrier(&ctx->ac);
2766 
2767 		ac_build_ifcc(&ctx->ac, is_gs_thread, 5400);
2768 		/* Extract the PROVOKING_VTX_INDEX field. */
2769 		LLVMValueRef provoking_vtx_in_prim =
2770 			LLVMConstInt(ctx->ac.i32, 0, false);
2771 
2772 		/* provoking_vtx_index = vtxindex[provoking_vtx_in_prim]; */
2773 		LLVMValueRef indices = ac_build_gather_values(&ctx->ac, vtxindex, 3);
2774 		LLVMValueRef provoking_vtx_index =
2775 			LLVMBuildExtractElement(builder, indices, provoking_vtx_in_prim, "");
2776 
2777 		LLVMBuildStore(builder, ac_get_arg(&ctx->ac, ctx->args->ac.gs_prim_id),
2778 			       ac_build_gep0(&ctx->ac, ctx->esgs_ring, provoking_vtx_index));
2779 		ac_build_endif(&ctx->ac, 5400);
2780 	}
2781 
2782 	/* TODO: primitive culling */
2783 
2784 	ac_build_sendmsg_gs_alloc_req(&ctx->ac, get_wave_id_in_tg(ctx),
2785 				      ngg_get_vtx_cnt(ctx), ngg_get_prim_cnt(ctx));
2786 
2787 	/* TODO: streamout queries */
2788 	/* Export primitive data to the index buffer.
2789 	 *
2790 	 * For the first version, we will always build up all three indices
2791 	 * independent of the primitive type. The additional garbage data
2792 	 * shouldn't hurt.
2793 	 *
2794 	 * TODO: culling depends on the primitive type, so can have some
2795 	 * interaction here.
2796 	 */
2797 	ac_build_ifcc(&ctx->ac, is_gs_thread, 6001);
2798 	{
2799 		struct ac_ngg_prim prim = {0};
2800 
2801 		if (ctx->args->options->key.vs_common_out.as_ngg_passthrough) {
2802 			prim.passthrough = ac_get_arg(&ctx->ac, ctx->args->gs_vtx_offset[0]);
2803 		} else {
2804 			prim.num_vertices = num_vertices;
2805 			prim.isnull = ctx->ac.i1false;
2806 			memcpy(prim.index, vtxindex, sizeof(vtxindex[0]) * 3);
2807 
2808 			for (unsigned i = 0; i < num_vertices; ++i) {
2809 				tmp = LLVMBuildLShr(builder,
2810 						    ac_get_arg(&ctx->ac, ctx->args->ac.gs_invocation_id),
2811 						    LLVMConstInt(ctx->ac.i32, 8 + i, false), "");
2812 				prim.edgeflag[i] = LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
2813 			}
2814 		}
2815 
2816 		ac_build_export_prim(&ctx->ac, &prim);
2817 	}
2818 	ac_build_endif(&ctx->ac, 6001);
2819 
2820 	/* Export per-vertex data (positions and parameters). */
2821 	ac_build_ifcc(&ctx->ac, is_es_thread, 6002);
2822 	{
2823 		struct radv_vs_output_info *outinfo =
2824 			ctx->stage == MESA_SHADER_TESS_EVAL ?
2825 			&ctx->args->shader_info->tes.outinfo : &ctx->args->shader_info->vs.outinfo;
2826 
2827 		/* Exporting the primitive ID is handled below. */
2828 		/* TODO: use the new VS export path */
2829 		handle_vs_outputs_post(ctx, false,
2830 				       ctx->args->options->key.vs_common_out.export_clip_dists,
2831 				       outinfo);
2832 
2833 		if (ctx->args->options->key.vs_common_out.export_prim_id) {
2834 			unsigned param_count = outinfo->param_exports;
2835 			LLVMValueRef values[4];
2836 
2837 			if (ctx->stage == MESA_SHADER_VERTEX) {
2838 				/* Wait for GS stores to finish. */
2839 				ac_build_s_barrier(&ctx->ac);
2840 
2841 				tmp = ac_build_gep0(&ctx->ac, ctx->esgs_ring,
2842 						    get_thread_id_in_tg(ctx));
2843 				values[0] = LLVMBuildLoad(builder, tmp, "");
2844 			} else {
2845 				assert(ctx->stage == MESA_SHADER_TESS_EVAL);
2846 				values[0] = ac_get_arg(&ctx->ac, ctx->args->ac.tes_patch_id);
2847 			}
2848 
2849 			values[0] = ac_to_float(&ctx->ac, values[0]);
2850 			for (unsigned j = 1; j < 4; j++)
2851 				values[j] = ctx->ac.f32_0;
2852 
2853 			radv_export_param(ctx, param_count, values, 0x1);
2854 
2855 			outinfo->vs_output_param_offset[VARYING_SLOT_PRIMITIVE_ID] = param_count++;
2856 			outinfo->param_exports = param_count;
2857 		}
2858 	}
2859 	ac_build_endif(&ctx->ac, 6002);
2860 }
2861 
gfx10_ngg_gs_emit_prologue(struct radv_shader_context * ctx)2862 static void gfx10_ngg_gs_emit_prologue(struct radv_shader_context *ctx)
2863 {
2864 	/* Zero out the part of LDS scratch that is used to accumulate the
2865 	 * per-stream generated primitive count.
2866 	 */
2867 	LLVMBuilderRef builder = ctx->ac.builder;
2868 	LLVMValueRef scratchptr = ctx->gs_ngg_scratch;
2869 	LLVMValueRef tid = get_thread_id_in_tg(ctx);
2870 	LLVMBasicBlockRef merge_block;
2871 	LLVMValueRef cond;
2872 
2873 	LLVMValueRef fn = LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx->ac.builder));
2874 	LLVMBasicBlockRef then_block = LLVMAppendBasicBlockInContext(ctx->ac.context, fn, "");
2875 	merge_block = LLVMAppendBasicBlockInContext(ctx->ac.context, fn, "");
2876 
2877 	cond = LLVMBuildICmp(builder, LLVMIntULT, tid, LLVMConstInt(ctx->ac.i32, 4, false), "");
2878 	LLVMBuildCondBr(ctx->ac.builder, cond, then_block, merge_block);
2879 	LLVMPositionBuilderAtEnd(ctx->ac.builder, then_block);
2880 
2881 	LLVMValueRef ptr = ac_build_gep0(&ctx->ac, scratchptr, tid);
2882 	LLVMBuildStore(builder, ctx->ac.i32_0, ptr);
2883 
2884 	LLVMBuildBr(ctx->ac.builder, merge_block);
2885 	LLVMPositionBuilderAtEnd(ctx->ac.builder, merge_block);
2886 
2887 	ac_build_s_barrier(&ctx->ac);
2888 }
2889 
gfx10_ngg_gs_emit_epilogue_1(struct radv_shader_context * ctx)2890 static void gfx10_ngg_gs_emit_epilogue_1(struct radv_shader_context *ctx)
2891 {
2892 	LLVMBuilderRef builder = ctx->ac.builder;
2893 	LLVMValueRef i8_0 = LLVMConstInt(ctx->ac.i8, 0, false);
2894 	LLVMValueRef tmp;
2895 
2896 	/* Zero out remaining (non-emitted) primitive flags.
2897 	 *
2898 	 * Note: Alternatively, we could pass the relevant gs_next_vertex to
2899 	 *       the emit threads via LDS. This is likely worse in the expected
2900 	 *       typical case where each GS thread emits the full set of
2901 	 *       vertices.
2902 	 */
2903 	for (unsigned stream = 0; stream < 4; ++stream) {
2904 		unsigned num_components;
2905 
2906 		num_components =
2907 			ctx->args->shader_info->gs.num_stream_output_components[stream];
2908 		if (!num_components)
2909 			continue;
2910 
2911 		const LLVMValueRef gsthread = get_thread_id_in_tg(ctx);
2912 
2913 		ac_build_bgnloop(&ctx->ac, 5100);
2914 
2915 		const LLVMValueRef vertexidx =
2916 			LLVMBuildLoad(builder, ctx->gs_next_vertex[stream], "");
2917 		tmp = LLVMBuildICmp(builder, LLVMIntUGE, vertexidx,
2918 			LLVMConstInt(ctx->ac.i32, ctx->shader->info.gs.vertices_out, false), "");
2919 		ac_build_ifcc(&ctx->ac, tmp, 5101);
2920 		ac_build_break(&ctx->ac);
2921 		ac_build_endif(&ctx->ac, 5101);
2922 
2923 		tmp = LLVMBuildAdd(builder, vertexidx, ctx->ac.i32_1, "");
2924 		LLVMBuildStore(builder, tmp, ctx->gs_next_vertex[stream]);
2925 
2926 		tmp = ngg_gs_emit_vertex_ptr(ctx, gsthread, vertexidx);
2927 		LLVMBuildStore(builder, i8_0,
2928 			       ngg_gs_get_emit_primflag_ptr(ctx, tmp, stream));
2929 
2930 		ac_build_endloop(&ctx->ac, 5100);
2931 	}
2932 
2933 	/* Accumulate generated primitives counts across the entire threadgroup. */
2934 	for (unsigned stream = 0; stream < 4; ++stream) {
2935 		unsigned num_components;
2936 
2937 		num_components =
2938 			ctx->args->shader_info->gs.num_stream_output_components[stream];
2939 		if (!num_components)
2940 			continue;
2941 
2942 		LLVMValueRef numprims =
2943 			LLVMBuildLoad(builder, ctx->gs_generated_prims[stream], "");
2944 		numprims = ac_build_reduce(&ctx->ac, numprims, nir_op_iadd, ctx->ac.wave_size);
2945 
2946 		tmp = LLVMBuildICmp(builder, LLVMIntEQ, ac_get_thread_id(&ctx->ac), ctx->ac.i32_0, "");
2947 		ac_build_ifcc(&ctx->ac, tmp, 5105);
2948 		{
2949 			LLVMBuildAtomicRMW(builder, LLVMAtomicRMWBinOpAdd,
2950 					   ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch,
2951 							 LLVMConstInt(ctx->ac.i32, stream, false)),
2952 					   numprims, LLVMAtomicOrderingMonotonic, false);
2953 		}
2954 		ac_build_endif(&ctx->ac, 5105);
2955 	}
2956 }
2957 
gfx10_ngg_gs_emit_epilogue_2(struct radv_shader_context * ctx)2958 static void gfx10_ngg_gs_emit_epilogue_2(struct radv_shader_context *ctx)
2959 {
2960 	const unsigned verts_per_prim = si_conv_gl_prim_to_vertices(ctx->shader->info.gs.output_primitive);
2961 	LLVMBuilderRef builder = ctx->ac.builder;
2962 	LLVMValueRef tmp, tmp2;
2963 
2964 	ac_build_s_barrier(&ctx->ac);
2965 
2966 	const LLVMValueRef tid = get_thread_id_in_tg(ctx);
2967 	LLVMValueRef num_emit_threads = ngg_get_prim_cnt(ctx);
2968 
2969 	/* Streamout */
2970 	if (ctx->args->shader_info->so.num_outputs) {
2971 		struct ngg_streamout nggso = {0};
2972 
2973 		nggso.num_vertices = LLVMConstInt(ctx->ac.i32, verts_per_prim, false);
2974 
2975 		LLVMValueRef vertexptr = ngg_gs_vertex_ptr(ctx, tid);
2976 		for (unsigned stream = 0; stream < 4; ++stream) {
2977 			if (!ctx->args->shader_info->gs.num_stream_output_components[stream])
2978 				continue;
2979 
2980 			tmp = LLVMBuildLoad(builder,
2981 					    ngg_gs_get_emit_primflag_ptr(ctx, vertexptr, stream), "");
2982 			tmp = LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
2983 			tmp2 = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
2984 			nggso.prim_enable[stream] = LLVMBuildAnd(builder, tmp, tmp2, "");
2985 		}
2986 
2987 		for (unsigned i = 0; i < verts_per_prim; ++i) {
2988 			tmp = LLVMBuildSub(builder, tid,
2989 					   LLVMConstInt(ctx->ac.i32, verts_per_prim - i - 1, false), "");
2990 			tmp = ngg_gs_vertex_ptr(ctx, tmp);
2991 			nggso.vertices[i] = ac_build_gep0(&ctx->ac, tmp, ctx->ac.i32_0);
2992 		}
2993 
2994 		build_streamout(ctx, &nggso);
2995 	}
2996 
2997 	/* Write shader query data. */
2998 	tmp = ac_get_arg(&ctx->ac, ctx->args->ngg_gs_state);
2999 	tmp = LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
3000 	ac_build_ifcc(&ctx->ac, tmp, 5109);
3001 	tmp = LLVMBuildICmp(builder, LLVMIntULT, tid,
3002 			    LLVMConstInt(ctx->ac.i32, 4, false), "");
3003 	ac_build_ifcc(&ctx->ac, tmp, 5110);
3004 	{
3005 		tmp = LLVMBuildLoad(builder, ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tid), "");
3006 
3007 		ac_llvm_add_target_dep_function_attr(ctx->main_function,
3008 						     "amdgpu-gds-size", 256);
3009 
3010 		LLVMTypeRef gdsptr = LLVMPointerType(ctx->ac.i32, AC_ADDR_SPACE_GDS);
3011 		LLVMValueRef gdsbase = LLVMBuildIntToPtr(builder, ctx->ac.i32_0, gdsptr, "");
3012 
3013 		const char *sync_scope = LLVM_VERSION_MAJOR >= 9 ? "workgroup-one-as" : "workgroup";
3014 
3015 		/* Use a plain GDS atomic to accumulate the number of generated
3016 		 * primitives.
3017 		 */
3018 		ac_build_atomic_rmw(&ctx->ac, LLVMAtomicRMWBinOpAdd, gdsbase,
3019 				    tmp, sync_scope);
3020 	}
3021 	ac_build_endif(&ctx->ac, 5110);
3022 	ac_build_endif(&ctx->ac, 5109);
3023 
3024 	/* TODO: culling */
3025 
3026 	/* Determine vertex liveness. */
3027 	LLVMValueRef vertliveptr = ac_build_alloca(&ctx->ac, ctx->ac.i1, "vertexlive");
3028 
3029 	tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
3030 	ac_build_ifcc(&ctx->ac, tmp, 5120);
3031 	{
3032 		for (unsigned i = 0; i < verts_per_prim; ++i) {
3033 			const LLVMValueRef primidx =
3034 				LLVMBuildAdd(builder, tid,
3035 					     LLVMConstInt(ctx->ac.i32, i, false), "");
3036 
3037 			if (i > 0) {
3038 				tmp = LLVMBuildICmp(builder, LLVMIntULT, primidx, num_emit_threads, "");
3039 				ac_build_ifcc(&ctx->ac, tmp, 5121 + i);
3040 			}
3041 
3042 			/* Load primitive liveness */
3043 			tmp = ngg_gs_vertex_ptr(ctx, primidx);
3044 			tmp = LLVMBuildLoad(builder,
3045 					    ngg_gs_get_emit_primflag_ptr(ctx, tmp, 0), "");
3046 			const LLVMValueRef primlive =
3047 				LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
3048 
3049 			tmp = LLVMBuildLoad(builder, vertliveptr, "");
3050 			tmp = LLVMBuildOr(builder, tmp, primlive, ""),
3051 			LLVMBuildStore(builder, tmp, vertliveptr);
3052 
3053 			if (i > 0)
3054 				ac_build_endif(&ctx->ac, 5121 + i);
3055 		}
3056 	}
3057 	ac_build_endif(&ctx->ac, 5120);
3058 
3059 	/* Inclusive scan addition across the current wave. */
3060 	LLVMValueRef vertlive = LLVMBuildLoad(builder, vertliveptr, "");
3061 	struct ac_wg_scan vertlive_scan = {0};
3062 	vertlive_scan.op = nir_op_iadd;
3063 	vertlive_scan.enable_reduce = true;
3064 	vertlive_scan.enable_exclusive = true;
3065 	vertlive_scan.src = vertlive;
3066 	vertlive_scan.scratch = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, ctx->ac.i32_0);
3067 	vertlive_scan.waveidx = get_wave_id_in_tg(ctx);
3068 	vertlive_scan.numwaves = get_tgsize(ctx);
3069 	vertlive_scan.maxwaves = 8;
3070 
3071 	ac_build_wg_scan(&ctx->ac, &vertlive_scan);
3072 
3073 	/* Skip all exports (including index exports) when possible. At least on
3074 	 * early gfx10 revisions this is also to avoid hangs.
3075 	 */
3076 	LLVMValueRef have_exports =
3077 		LLVMBuildICmp(builder, LLVMIntNE, vertlive_scan.result_reduce, ctx->ac.i32_0, "");
3078 	num_emit_threads =
3079 		LLVMBuildSelect(builder, have_exports, num_emit_threads, ctx->ac.i32_0, "");
3080 
3081 	/* Allocate export space. Send this message as early as possible, to
3082 	 * hide the latency of the SQ <-> SPI roundtrip.
3083 	 *
3084 	 * Note: We could consider compacting primitives for export as well.
3085 	 *       PA processes 1 non-null prim / clock, but it fetches 4 DW of
3086 	 *       prim data per clock and skips null primitives at no additional
3087 	 *       cost. So compacting primitives can only be beneficial when
3088 	 *       there are 4 or more contiguous null primitives in the export
3089 	 *       (in the common case of single-dword prim exports).
3090 	 */
3091 	ac_build_sendmsg_gs_alloc_req(&ctx->ac, get_wave_id_in_tg(ctx),
3092 				      vertlive_scan.result_reduce, num_emit_threads);
3093 
3094 	/* Setup the reverse vertex compaction permutation. We re-use stream 1
3095 	 * of the primitive liveness flags, relying on the fact that each
3096 	 * threadgroup can have at most 256 threads. */
3097 	ac_build_ifcc(&ctx->ac, vertlive, 5130);
3098 	{
3099 		tmp = ngg_gs_vertex_ptr(ctx, vertlive_scan.result_exclusive);
3100 		tmp2 = LLVMBuildTrunc(builder, tid, ctx->ac.i8, "");
3101 		LLVMBuildStore(builder, tmp2,
3102 			       ngg_gs_get_emit_primflag_ptr(ctx, tmp, 1));
3103 	}
3104 	ac_build_endif(&ctx->ac, 5130);
3105 
3106 	ac_build_s_barrier(&ctx->ac);
3107 
3108 	/* Export primitive data */
3109 	tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
3110 	ac_build_ifcc(&ctx->ac, tmp, 5140);
3111 	{
3112 		LLVMValueRef flags;
3113 		struct ac_ngg_prim prim = {0};
3114 		prim.num_vertices = verts_per_prim;
3115 
3116 		tmp = ngg_gs_vertex_ptr(ctx, tid);
3117 		flags = LLVMBuildLoad(builder,
3118 				      ngg_gs_get_emit_primflag_ptr(ctx, tmp, 0), "");
3119 		prim.isnull = LLVMBuildNot(builder, LLVMBuildTrunc(builder, flags, ctx->ac.i1, ""), "");
3120 
3121 		for (unsigned i = 0; i < verts_per_prim; ++i) {
3122 			prim.index[i] = LLVMBuildSub(builder, vertlive_scan.result_exclusive,
3123 				LLVMConstInt(ctx->ac.i32, verts_per_prim - i - 1, false), "");
3124 			prim.edgeflag[i] = ctx->ac.i1false;
3125 		}
3126 
3127 		/* Geometry shaders output triangle strips, but NGG expects
3128 		 * triangles. We need to change the vertex order for odd
3129 		 * triangles to get correct front/back facing by swapping 2
3130 		 * vertex indices, but we also have to keep the provoking
3131 		 * vertex in the same place.
3132 		 */
3133 		if (verts_per_prim == 3) {
3134 			LLVMValueRef is_odd = LLVMBuildLShr(builder, flags, ctx->ac.i8_1, "");
3135 			is_odd = LLVMBuildTrunc(builder, is_odd, ctx->ac.i1, "");
3136 
3137 			struct ac_ngg_prim in = prim;
3138 			prim.index[0] = in.index[0];
3139 			prim.index[1] = LLVMBuildSelect(builder, is_odd,
3140 							in.index[2], in.index[1], "");
3141 			prim.index[2] = LLVMBuildSelect(builder, is_odd,
3142 							in.index[1], in.index[2], "");
3143 		}
3144 
3145 		ac_build_export_prim(&ctx->ac, &prim);
3146 	}
3147 	ac_build_endif(&ctx->ac, 5140);
3148 
3149 	/* Export position and parameter data */
3150 	tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, vertlive_scan.result_reduce, "");
3151 	ac_build_ifcc(&ctx->ac, tmp, 5145);
3152 	{
3153 		struct radv_vs_output_info *outinfo = &ctx->args->shader_info->vs.outinfo;
3154 		bool export_view_index = ctx->args->options->key.has_multiview_view_index;
3155 		struct radv_shader_output_values *outputs;
3156 		unsigned noutput = 0;
3157 
3158 		/* Allocate a temporary array for the output values. */
3159 		unsigned num_outputs = util_bitcount64(ctx->output_mask) + export_view_index;
3160 		outputs = calloc(num_outputs, sizeof(outputs[0]));
3161 
3162 		memset(outinfo->vs_output_param_offset, AC_EXP_PARAM_UNDEFINED,
3163 		       sizeof(outinfo->vs_output_param_offset));
3164 		outinfo->pos_exports = 0;
3165 
3166 		tmp = ngg_gs_vertex_ptr(ctx, tid);
3167 		tmp = LLVMBuildLoad(builder,
3168 				    ngg_gs_get_emit_primflag_ptr(ctx, tmp, 1), "");
3169 		tmp = LLVMBuildZExt(builder, tmp, ctx->ac.i32, "");
3170 		const LLVMValueRef vertexptr = ngg_gs_vertex_ptr(ctx, tmp);
3171 
3172 		unsigned out_idx = 0;
3173 		for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
3174 			unsigned output_usage_mask =
3175 				ctx->args->shader_info->gs.output_usage_mask[i];
3176 			int length = util_last_bit(output_usage_mask);
3177 
3178 			if (!(ctx->output_mask & (1ull << i)))
3179 				continue;
3180 
3181 			outputs[noutput].slot_name = i;
3182 			outputs[noutput].slot_index = i == VARYING_SLOT_CLIP_DIST1;
3183 			outputs[noutput].usage_mask = output_usage_mask;
3184 
3185 			for (unsigned j = 0; j < length; j++, out_idx++) {
3186 				if (!(output_usage_mask & (1 << j)))
3187 					continue;
3188 
3189 				tmp = ngg_gs_get_emit_output_ptr(ctx, vertexptr, out_idx);
3190 				tmp = LLVMBuildLoad(builder, tmp, "");
3191 
3192 				LLVMTypeRef type = LLVMGetAllocatedType(ctx->abi.outputs[ac_llvm_reg_index_soa(i, j)]);
3193 				if (ac_get_type_size(type) == 2) {
3194 					tmp = ac_to_integer(&ctx->ac, tmp);
3195 					tmp = LLVMBuildTrunc(ctx->ac.builder, tmp, ctx->ac.i16, "");
3196 				}
3197 
3198 				outputs[noutput].values[j] = ac_to_float(&ctx->ac, tmp);
3199 			}
3200 
3201 			for (unsigned j = length; j < 4; j++)
3202 				outputs[noutput].values[j] = LLVMGetUndef(ctx->ac.f32);
3203 
3204 			noutput++;
3205 		}
3206 
3207 		/* Export ViewIndex. */
3208 		if (export_view_index) {
3209 			outputs[noutput].slot_name = VARYING_SLOT_LAYER;
3210 			outputs[noutput].slot_index = 0;
3211 			outputs[noutput].usage_mask = 0x1;
3212 			outputs[noutput].values[0] =
3213 				ac_to_float(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->ac.view_index));
3214 			for (unsigned j = 1; j < 4; j++)
3215 				outputs[noutput].values[j] = ctx->ac.f32_0;
3216 			noutput++;
3217 		}
3218 
3219 		radv_llvm_export_vs(ctx, outputs, noutput, outinfo,
3220 				    ctx->args->options->key.vs_common_out.export_clip_dists);
3221 		FREE(outputs);
3222 	}
3223 	ac_build_endif(&ctx->ac, 5145);
3224 }
3225 
gfx10_ngg_gs_emit_vertex(struct radv_shader_context * ctx,unsigned stream,LLVMValueRef vertexidx,LLVMValueRef * addrs)3226 static void gfx10_ngg_gs_emit_vertex(struct radv_shader_context *ctx,
3227 				     unsigned stream,
3228 				     LLVMValueRef vertexidx,
3229 				     LLVMValueRef *addrs)
3230 {
3231 	LLVMBuilderRef builder = ctx->ac.builder;
3232 	LLVMValueRef tmp;
3233 
3234 	const LLVMValueRef vertexptr =
3235 		ngg_gs_emit_vertex_ptr(ctx, get_thread_id_in_tg(ctx), vertexidx);
3236 	unsigned out_idx = 0;
3237 	for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
3238 		unsigned output_usage_mask =
3239 			ctx->args->shader_info->gs.output_usage_mask[i];
3240 		uint8_t output_stream =
3241 			ctx->args->shader_info->gs.output_streams[i];
3242 		LLVMValueRef *out_ptr = &addrs[i * 4];
3243 		int length = util_last_bit(output_usage_mask);
3244 
3245 		if (!(ctx->output_mask & (1ull << i)) ||
3246 		    output_stream != stream)
3247 			continue;
3248 
3249 		for (unsigned j = 0; j < length; j++, out_idx++) {
3250 			if (!(output_usage_mask & (1 << j)))
3251 				continue;
3252 
3253 			LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder,
3254 							     out_ptr[j], "");
3255 			out_val = ac_to_integer(&ctx->ac, out_val);
3256 			out_val = LLVMBuildZExtOrBitCast(ctx->ac.builder, out_val, ctx->ac.i32, "");
3257 
3258 			LLVMBuildStore(builder, out_val,
3259 				       ngg_gs_get_emit_output_ptr(ctx, vertexptr, out_idx));
3260 		}
3261 	}
3262 	assert(out_idx * 4 <= ctx->args->shader_info->gs.gsvs_vertex_size);
3263 
3264 	/* Store the current number of emitted vertices to zero out remaining
3265 	 * primitive flags in case the geometry shader doesn't emit the maximum
3266 	 * number of vertices.
3267 	 */
3268 	tmp = LLVMBuildAdd(builder, vertexidx, ctx->ac.i32_1, "");
3269 	LLVMBuildStore(builder, tmp, ctx->gs_next_vertex[stream]);
3270 
3271 	/* Determine and store whether this vertex completed a primitive. */
3272 	const LLVMValueRef curverts = LLVMBuildLoad(builder, ctx->gs_curprim_verts[stream], "");
3273 
3274 	tmp = LLVMConstInt(ctx->ac.i32, si_conv_gl_prim_to_vertices(ctx->shader->info.gs.output_primitive) - 1, false);
3275 	const LLVMValueRef iscompleteprim =
3276 		LLVMBuildICmp(builder, LLVMIntUGE, curverts, tmp, "");
3277 
3278 	/* Since the geometry shader emits triangle strips, we need to
3279 	 * track which primitive is odd and swap vertex indices to get
3280 	 * the correct vertex order.
3281 	 */
3282 	LLVMValueRef is_odd = ctx->ac.i1false;
3283 	if (stream == 0 &&
3284 	    si_conv_gl_prim_to_vertices(ctx->shader->info.gs.output_primitive) == 3) {
3285 		tmp = LLVMBuildAnd(builder, curverts, ctx->ac.i32_1, "");
3286 		is_odd = LLVMBuildICmp(builder, LLVMIntEQ, tmp, ctx->ac.i32_1, "");
3287 	}
3288 
3289 	tmp = LLVMBuildAdd(builder, curverts, ctx->ac.i32_1, "");
3290 	LLVMBuildStore(builder, tmp, ctx->gs_curprim_verts[stream]);
3291 
3292 	/* The per-vertex primitive flag encoding:
3293 	 *   bit 0: whether this vertex finishes a primitive
3294 	 *   bit 1: whether the primitive is odd (if we are emitting triangle strips)
3295 	 */
3296 	tmp = LLVMBuildZExt(builder, iscompleteprim, ctx->ac.i8, "");
3297 	tmp = LLVMBuildOr(builder, tmp,
3298 			  LLVMBuildShl(builder,
3299 				       LLVMBuildZExt(builder, is_odd, ctx->ac.i8, ""),
3300 				       ctx->ac.i8_1, ""), "");
3301 	LLVMBuildStore(builder, tmp,
3302 		       ngg_gs_get_emit_primflag_ptr(ctx, vertexptr, stream));
3303 
3304 	tmp = LLVMBuildLoad(builder, ctx->gs_generated_prims[stream], "");
3305 	tmp = LLVMBuildAdd(builder, tmp, LLVMBuildZExt(builder, iscompleteprim, ctx->ac.i32, ""), "");
3306 	LLVMBuildStore(builder, tmp, ctx->gs_generated_prims[stream]);
3307 }
3308 
3309 static void
write_tess_factors(struct radv_shader_context * ctx)3310 write_tess_factors(struct radv_shader_context *ctx)
3311 {
3312 	unsigned stride, outer_comps, inner_comps;
3313 	LLVMValueRef tcs_rel_ids = ac_get_arg(&ctx->ac, ctx->args->ac.tcs_rel_ids);
3314 	LLVMValueRef invocation_id = ac_unpack_param(&ctx->ac, tcs_rel_ids, 8, 5);
3315 	LLVMValueRef rel_patch_id = ac_unpack_param(&ctx->ac, tcs_rel_ids, 0, 8);
3316 	LLVMValueRef lds_base, lds_inner = NULL, lds_outer, byteoffset, buffer;
3317 	LLVMValueRef out[6], vec0, vec1, tf_base, inner[4], outer[4];
3318 	int i;
3319 	ac_emit_barrier(&ctx->ac, ctx->stage);
3320 
3321 	switch (ctx->args->options->key.tcs.primitive_mode) {
3322 	case GL_ISOLINES:
3323 		stride = 2;
3324 		outer_comps = 2;
3325 		inner_comps = 0;
3326 		break;
3327 	case GL_TRIANGLES:
3328 		stride = 4;
3329 		outer_comps = 3;
3330 		inner_comps = 1;
3331 		break;
3332 	case GL_QUADS:
3333 		stride = 6;
3334 		outer_comps = 4;
3335 		inner_comps = 2;
3336 		break;
3337 	default:
3338 		return;
3339 	}
3340 
3341 	ac_build_ifcc(&ctx->ac,
3342 			LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
3343 				      invocation_id, ctx->ac.i32_0, ""), 6503);
3344 
3345 	lds_base = get_tcs_out_current_patch_data_offset(ctx);
3346 
3347 	if (inner_comps) {
3348 		lds_inner = LLVMBuildAdd(ctx->ac.builder, lds_base,
3349 					 LLVMConstInt(ctx->ac.i32, ctx->tcs_tess_lvl_inner * 4, false), "");
3350 	}
3351 
3352 	lds_outer = LLVMBuildAdd(ctx->ac.builder, lds_base,
3353 				 LLVMConstInt(ctx->ac.i32, ctx->tcs_tess_lvl_outer * 4, false), "");
3354 
3355 	for (i = 0; i < 4; i++) {
3356 		inner[i] = LLVMGetUndef(ctx->ac.i32);
3357 		outer[i] = LLVMGetUndef(ctx->ac.i32);
3358 	}
3359 
3360 	// LINES reversal
3361 	if (ctx->args->options->key.tcs.primitive_mode == GL_ISOLINES) {
3362 		outer[0] = out[1] = ac_lds_load(&ctx->ac, lds_outer);
3363 		lds_outer = LLVMBuildAdd(ctx->ac.builder, lds_outer,
3364 					 ctx->ac.i32_1, "");
3365 		outer[1] = out[0] = ac_lds_load(&ctx->ac, lds_outer);
3366 	} else {
3367 		for (i = 0; i < outer_comps; i++) {
3368 			outer[i] = out[i] =
3369 				ac_lds_load(&ctx->ac, lds_outer);
3370 			lds_outer = LLVMBuildAdd(ctx->ac.builder, lds_outer,
3371 						 ctx->ac.i32_1, "");
3372 		}
3373 		for (i = 0; i < inner_comps; i++) {
3374 			inner[i] = out[outer_comps+i] =
3375 				ac_lds_load(&ctx->ac, lds_inner);
3376 			lds_inner = LLVMBuildAdd(ctx->ac.builder, lds_inner,
3377 						 ctx->ac.i32_1, "");
3378 		}
3379 	}
3380 
3381 	/* Convert the outputs to vectors for stores. */
3382 	vec0 = ac_build_gather_values(&ctx->ac, out, MIN2(stride, 4));
3383 	vec1 = NULL;
3384 
3385 	if (stride > 4)
3386 		vec1 = ac_build_gather_values(&ctx->ac, out + 4, stride - 4);
3387 
3388 
3389 	buffer = ctx->hs_ring_tess_factor;
3390 	tf_base = ac_get_arg(&ctx->ac, ctx->args->tess_factor_offset);
3391 	byteoffset = LLVMBuildMul(ctx->ac.builder, rel_patch_id,
3392 				  LLVMConstInt(ctx->ac.i32, 4 * stride, false), "");
3393 	unsigned tf_offset = 0;
3394 
3395 	if (ctx->ac.chip_class <= GFX8) {
3396 		ac_build_ifcc(&ctx->ac,
3397 		                LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
3398 		                              rel_patch_id, ctx->ac.i32_0, ""), 6504);
3399 
3400 		/* Store the dynamic HS control word. */
3401 		ac_build_buffer_store_dword(&ctx->ac, buffer,
3402 					    LLVMConstInt(ctx->ac.i32, 0x80000000, false),
3403 					    1, ctx->ac.i32_0, tf_base,
3404 					    0, ac_glc);
3405 		tf_offset += 4;
3406 
3407 		ac_build_endif(&ctx->ac, 6504);
3408 	}
3409 
3410 	/* Store the tessellation factors. */
3411 	ac_build_buffer_store_dword(&ctx->ac, buffer, vec0,
3412 				    MIN2(stride, 4), byteoffset, tf_base,
3413 				    tf_offset, ac_glc);
3414 	if (vec1)
3415 		ac_build_buffer_store_dword(&ctx->ac, buffer, vec1,
3416 					    stride - 4, byteoffset, tf_base,
3417 					    16 + tf_offset, ac_glc);
3418 
3419 	//store to offchip for TES to read - only if TES reads them
3420 	if (ctx->args->options->key.tcs.tes_reads_tess_factors) {
3421 		LLVMValueRef inner_vec, outer_vec, tf_outer_offset;
3422 		LLVMValueRef tf_inner_offset;
3423 
3424 		tf_outer_offset = get_tcs_tes_buffer_address(ctx, NULL,
3425 							     LLVMConstInt(ctx->ac.i32, ctx->tcs_tess_lvl_outer, 0));
3426 
3427 		outer_vec = ac_build_gather_values(&ctx->ac, outer,
3428 						   util_next_power_of_two(outer_comps));
3429 
3430 		ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, outer_vec,
3431 					    outer_comps, tf_outer_offset,
3432 					    ac_get_arg(&ctx->ac, ctx->args->oc_lds),
3433 					    0, ac_glc);
3434 		if (inner_comps) {
3435 			tf_inner_offset = get_tcs_tes_buffer_address(ctx, NULL,
3436 								     LLVMConstInt(ctx->ac.i32, ctx->tcs_tess_lvl_inner, 0));
3437 
3438 			inner_vec = inner_comps == 1 ? inner[0] :
3439 				ac_build_gather_values(&ctx->ac, inner, inner_comps);
3440 			ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, inner_vec,
3441 						    inner_comps, tf_inner_offset,
3442 						    ac_get_arg(&ctx->ac, ctx->args->oc_lds),
3443 						    0, ac_glc);
3444 		}
3445 	}
3446 
3447 	ac_build_endif(&ctx->ac, 6503);
3448 }
3449 
3450 static void
handle_tcs_outputs_post(struct radv_shader_context * ctx)3451 handle_tcs_outputs_post(struct radv_shader_context *ctx)
3452 {
3453 	write_tess_factors(ctx);
3454 }
3455 
3456 static bool
si_export_mrt_color(struct radv_shader_context * ctx,LLVMValueRef * color,unsigned index,struct ac_export_args * args)3457 si_export_mrt_color(struct radv_shader_context *ctx,
3458 		    LLVMValueRef *color, unsigned index,
3459 		    struct ac_export_args *args)
3460 {
3461 	/* Export */
3462 	si_llvm_init_export_args(ctx, color, 0xf,
3463 				 V_008DFC_SQ_EXP_MRT + index, args);
3464 	if (!args->enabled_channels)
3465 		return false; /* unnecessary NULL export */
3466 
3467 	return true;
3468 }
3469 
3470 static void
radv_export_mrt_z(struct radv_shader_context * ctx,LLVMValueRef depth,LLVMValueRef stencil,LLVMValueRef samplemask)3471 radv_export_mrt_z(struct radv_shader_context *ctx,
3472 		  LLVMValueRef depth, LLVMValueRef stencil,
3473 		  LLVMValueRef samplemask)
3474 {
3475 	struct ac_export_args args;
3476 
3477 	ac_export_mrt_z(&ctx->ac, depth, stencil, samplemask, &args);
3478 
3479 	ac_build_export(&ctx->ac, &args);
3480 }
3481 
3482 static void
handle_fs_outputs_post(struct radv_shader_context * ctx)3483 handle_fs_outputs_post(struct radv_shader_context *ctx)
3484 {
3485 	unsigned index = 0;
3486 	LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
3487 	struct ac_export_args color_args[8];
3488 
3489 	for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
3490 		LLVMValueRef values[4];
3491 
3492 		if (!(ctx->output_mask & (1ull << i)))
3493 			continue;
3494 
3495 		if (i < FRAG_RESULT_DATA0)
3496 			continue;
3497 
3498 		for (unsigned j = 0; j < 4; j++)
3499 			values[j] = ac_to_float(&ctx->ac,
3500 						radv_load_output(ctx, i, j));
3501 
3502 		bool ret = si_export_mrt_color(ctx, values,
3503 					       i - FRAG_RESULT_DATA0,
3504 					       &color_args[index]);
3505 		if (ret)
3506 			index++;
3507 	}
3508 
3509 	/* Process depth, stencil, samplemask. */
3510 	if (ctx->args->shader_info->ps.writes_z) {
3511 		depth = ac_to_float(&ctx->ac,
3512 				    radv_load_output(ctx, FRAG_RESULT_DEPTH, 0));
3513 	}
3514 	if (ctx->args->shader_info->ps.writes_stencil) {
3515 		stencil = ac_to_float(&ctx->ac,
3516 				      radv_load_output(ctx, FRAG_RESULT_STENCIL, 0));
3517 	}
3518 	if (ctx->args->shader_info->ps.writes_sample_mask) {
3519 		samplemask = ac_to_float(&ctx->ac,
3520 					 radv_load_output(ctx, FRAG_RESULT_SAMPLE_MASK, 0));
3521 	}
3522 
3523 	/* Set the DONE bit on last non-null color export only if Z isn't
3524 	 * exported.
3525 	 */
3526 	if (index > 0 &&
3527 	    !ctx->args->shader_info->ps.writes_z &&
3528 	    !ctx->args->shader_info->ps.writes_stencil &&
3529 	    !ctx->args->shader_info->ps.writes_sample_mask) {
3530 		unsigned last = index - 1;
3531 
3532                color_args[last].valid_mask = 1; /* whether the EXEC mask is valid */
3533                color_args[last].done = 1; /* DONE bit */
3534 	}
3535 
3536 	/* Export PS outputs. */
3537 	for (unsigned i = 0; i < index; i++)
3538 		ac_build_export(&ctx->ac, &color_args[i]);
3539 
3540 	if (depth || stencil || samplemask)
3541 		radv_export_mrt_z(ctx, depth, stencil, samplemask);
3542 	else if (!index)
3543 		ac_build_export_null(&ctx->ac);
3544 }
3545 
3546 static void
emit_gs_epilogue(struct radv_shader_context * ctx)3547 emit_gs_epilogue(struct radv_shader_context *ctx)
3548 {
3549 	if (ctx->args->options->key.vs_common_out.as_ngg) {
3550 		gfx10_ngg_gs_emit_epilogue_1(ctx);
3551 		return;
3552 	}
3553 
3554 	if (ctx->ac.chip_class >= GFX10)
3555 		LLVMBuildFence(ctx->ac.builder, LLVMAtomicOrderingRelease, false, "");
3556 
3557 	ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_NOP | AC_SENDMSG_GS_DONE, ctx->gs_wave_id);
3558 }
3559 
3560 static void
handle_shader_outputs_post(struct ac_shader_abi * abi,unsigned max_outputs,LLVMValueRef * addrs)3561 handle_shader_outputs_post(struct ac_shader_abi *abi, unsigned max_outputs,
3562 			   LLVMValueRef *addrs)
3563 {
3564 	struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
3565 
3566 	switch (ctx->stage) {
3567 	case MESA_SHADER_VERTEX:
3568 		if (ctx->args->options->key.vs_common_out.as_ls)
3569 			handle_ls_outputs_post(ctx);
3570 		else if (ctx->args->options->key.vs_common_out.as_es)
3571 			handle_es_outputs_post(ctx, &ctx->args->shader_info->vs.es_info);
3572 		else if (ctx->args->options->key.vs_common_out.as_ngg)
3573 			handle_ngg_outputs_post_1(ctx);
3574 		else
3575 			handle_vs_outputs_post(ctx, ctx->args->options->key.vs_common_out.export_prim_id,
3576 					       ctx->args->options->key.vs_common_out.export_clip_dists,
3577 					       &ctx->args->shader_info->vs.outinfo);
3578 		break;
3579 	case MESA_SHADER_FRAGMENT:
3580 		handle_fs_outputs_post(ctx);
3581 		break;
3582 	case MESA_SHADER_GEOMETRY:
3583 		emit_gs_epilogue(ctx);
3584 		break;
3585 	case MESA_SHADER_TESS_CTRL:
3586 		handle_tcs_outputs_post(ctx);
3587 		break;
3588 	case MESA_SHADER_TESS_EVAL:
3589 		if (ctx->args->options->key.vs_common_out.as_es)
3590 			handle_es_outputs_post(ctx, &ctx->args->shader_info->tes.es_info);
3591 		else if (ctx->args->options->key.vs_common_out.as_ngg)
3592 			handle_ngg_outputs_post_1(ctx);
3593 		else
3594 			handle_vs_outputs_post(ctx, ctx->args->options->key.vs_common_out.export_prim_id,
3595 					       ctx->args->options->key.vs_common_out.export_clip_dists,
3596 					       &ctx->args->shader_info->tes.outinfo);
3597 		break;
3598 	default:
3599 		break;
3600 	}
3601 }
3602 
ac_llvm_finalize_module(struct radv_shader_context * ctx,LLVMPassManagerRef passmgr,const struct radv_nir_compiler_options * options)3603 static void ac_llvm_finalize_module(struct radv_shader_context *ctx,
3604 				    LLVMPassManagerRef passmgr,
3605 				    const struct radv_nir_compiler_options *options)
3606 {
3607 	LLVMRunPassManager(passmgr, ctx->ac.module);
3608 	LLVMDisposeBuilder(ctx->ac.builder);
3609 
3610 	ac_llvm_context_dispose(&ctx->ac);
3611 }
3612 
3613 static void
ac_nir_eliminate_const_vs_outputs(struct radv_shader_context * ctx)3614 ac_nir_eliminate_const_vs_outputs(struct radv_shader_context *ctx)
3615 {
3616 	struct radv_vs_output_info *outinfo;
3617 
3618 	switch (ctx->stage) {
3619 	case MESA_SHADER_FRAGMENT:
3620 	case MESA_SHADER_COMPUTE:
3621 	case MESA_SHADER_TESS_CTRL:
3622 	case MESA_SHADER_GEOMETRY:
3623 		return;
3624 	case MESA_SHADER_VERTEX:
3625 		if (ctx->args->options->key.vs_common_out.as_ls ||
3626 		    ctx->args->options->key.vs_common_out.as_es)
3627 			return;
3628 		outinfo = &ctx->args->shader_info->vs.outinfo;
3629 		break;
3630 	case MESA_SHADER_TESS_EVAL:
3631 		if (ctx->args->options->key.vs_common_out.as_es)
3632 			return;
3633 		outinfo = &ctx->args->shader_info->tes.outinfo;
3634 		break;
3635 	default:
3636 		unreachable("Unhandled shader type");
3637 	}
3638 
3639 	ac_optimize_vs_outputs(&ctx->ac,
3640 			       ctx->main_function,
3641 			       outinfo->vs_output_param_offset,
3642 			       VARYING_SLOT_MAX, 0,
3643 			       &outinfo->param_exports);
3644 }
3645 
3646 static void
ac_setup_rings(struct radv_shader_context * ctx)3647 ac_setup_rings(struct radv_shader_context *ctx)
3648 {
3649 	if (ctx->args->options->chip_class <= GFX8 &&
3650 	    (ctx->stage == MESA_SHADER_GEOMETRY ||
3651 	     ctx->args->options->key.vs_common_out.as_es)) {
3652 		unsigned ring = ctx->stage == MESA_SHADER_GEOMETRY ? RING_ESGS_GS
3653 								   : RING_ESGS_VS;
3654 		LLVMValueRef offset = LLVMConstInt(ctx->ac.i32, ring, false);
3655 
3656 		ctx->esgs_ring = ac_build_load_to_sgpr(&ctx->ac,
3657 						       ctx->ring_offsets,
3658 						       offset);
3659 	}
3660 
3661 	if (ctx->args->is_gs_copy_shader) {
3662 		ctx->gsvs_ring[0] =
3663 			ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets,
3664 					      LLVMConstInt(ctx->ac.i32,
3665 							   RING_GSVS_VS, false));
3666 	}
3667 
3668 	if (ctx->stage == MESA_SHADER_GEOMETRY) {
3669 		/* The conceptual layout of the GSVS ring is
3670 		 *   v0c0 .. vLv0 v0c1 .. vLc1 ..
3671 		 * but the real memory layout is swizzled across
3672 		 * threads:
3673 		 *   t0v0c0 .. t15v0c0 t0v1c0 .. t15v1c0 ... t15vLcL
3674 		 *   t16v0c0 ..
3675 		 * Override the buffer descriptor accordingly.
3676 		 */
3677 		LLVMTypeRef v2i64 = LLVMVectorType(ctx->ac.i64, 2);
3678 		uint64_t stream_offset = 0;
3679 		unsigned num_records = ctx->ac.wave_size;
3680 		LLVMValueRef base_ring;
3681 
3682 		base_ring =
3683 			ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets,
3684 					      LLVMConstInt(ctx->ac.i32,
3685 							   RING_GSVS_GS, false));
3686 
3687 		for (unsigned stream = 0; stream < 4; stream++) {
3688 			unsigned num_components, stride;
3689 			LLVMValueRef ring, tmp;
3690 
3691 			num_components =
3692 				ctx->args->shader_info->gs.num_stream_output_components[stream];
3693 
3694 			if (!num_components)
3695 				continue;
3696 
3697 			stride = 4 * num_components * ctx->shader->info.gs.vertices_out;
3698 
3699 			/* Limit on the stride field for <= GFX7. */
3700 			assert(stride < (1 << 14));
3701 
3702 			ring = LLVMBuildBitCast(ctx->ac.builder,
3703 						base_ring, v2i64, "");
3704 			tmp = LLVMBuildExtractElement(ctx->ac.builder,
3705 						      ring, ctx->ac.i32_0, "");
3706 			tmp = LLVMBuildAdd(ctx->ac.builder, tmp,
3707 					   LLVMConstInt(ctx->ac.i64,
3708 							stream_offset, 0), "");
3709 			ring = LLVMBuildInsertElement(ctx->ac.builder,
3710 						      ring, tmp, ctx->ac.i32_0, "");
3711 
3712 			stream_offset += stride * ctx->ac.wave_size;
3713 
3714 			ring = LLVMBuildBitCast(ctx->ac.builder, ring,
3715 						ctx->ac.v4i32, "");
3716 
3717 			tmp = LLVMBuildExtractElement(ctx->ac.builder, ring,
3718 						      ctx->ac.i32_1, "");
3719 			tmp = LLVMBuildOr(ctx->ac.builder, tmp,
3720 					  LLVMConstInt(ctx->ac.i32,
3721 						       S_008F04_STRIDE(stride), false), "");
3722 			ring = LLVMBuildInsertElement(ctx->ac.builder, ring, tmp,
3723 						      ctx->ac.i32_1, "");
3724 
3725 			ring = LLVMBuildInsertElement(ctx->ac.builder, ring,
3726 						      LLVMConstInt(ctx->ac.i32,
3727 								   num_records, false),
3728 						      LLVMConstInt(ctx->ac.i32, 2, false), "");
3729 
3730 			ctx->gsvs_ring[stream] = ring;
3731 		}
3732 	}
3733 
3734 	if (ctx->stage == MESA_SHADER_TESS_CTRL ||
3735 	    ctx->stage == MESA_SHADER_TESS_EVAL) {
3736 		ctx->hs_ring_tess_offchip = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_HS_TESS_OFFCHIP, false));
3737 		ctx->hs_ring_tess_factor = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_HS_TESS_FACTOR, false));
3738 	}
3739 }
3740 
3741 unsigned
radv_nir_get_max_workgroup_size(enum chip_class chip_class,gl_shader_stage stage,const struct nir_shader * nir)3742 radv_nir_get_max_workgroup_size(enum chip_class chip_class,
3743 				gl_shader_stage stage,
3744 				const struct nir_shader *nir)
3745 {
3746 	const unsigned backup_sizes[] = {chip_class >= GFX9 ? 128 : 64, 1, 1};
3747 	unsigned sizes[3];
3748 	for (unsigned i = 0; i < 3; i++)
3749 		sizes[i] = nir ? nir->info.cs.local_size[i] : backup_sizes[i];
3750 	return radv_get_max_workgroup_size(chip_class, stage, sizes);
3751 }
3752 
3753 /* Fixup the HW not emitting the TCS regs if there are no HS threads. */
ac_nir_fixup_ls_hs_input_vgprs(struct radv_shader_context * ctx)3754 static void ac_nir_fixup_ls_hs_input_vgprs(struct radv_shader_context *ctx)
3755 {
3756 	LLVMValueRef count =
3757 		ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->merged_wave_info), 8, 8);
3758 	LLVMValueRef hs_empty = LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ, count,
3759 	                                      ctx->ac.i32_0, "");
3760 	ctx->abi.instance_id = LLVMBuildSelect(ctx->ac.builder, hs_empty,
3761 					       ac_get_arg(&ctx->ac, ctx->args->rel_auto_id),
3762 					       ctx->abi.instance_id, "");
3763 	ctx->rel_auto_id = LLVMBuildSelect(ctx->ac.builder, hs_empty,
3764 					   ac_get_arg(&ctx->ac, ctx->args->ac.tcs_rel_ids),
3765 					   ctx->rel_auto_id,
3766 					   "");
3767 	ctx->abi.vertex_id = LLVMBuildSelect(ctx->ac.builder, hs_empty,
3768 						 ac_get_arg(&ctx->ac, ctx->args->ac.tcs_patch_id),
3769 						 ctx->abi.vertex_id, "");
3770 }
3771 
prepare_gs_input_vgprs(struct radv_shader_context * ctx,bool merged)3772 static void prepare_gs_input_vgprs(struct radv_shader_context *ctx, bool merged)
3773 {
3774 	if (merged) {
3775 		for(int i = 5; i >= 0; --i) {
3776 			ctx->gs_vtx_offset[i] =
3777 				ac_unpack_param(&ctx->ac,
3778 						ac_get_arg(&ctx->ac, ctx->args->gs_vtx_offset[i & ~1]),
3779 							   (i & 1) * 16, 16);
3780 		}
3781 
3782 		ctx->gs_wave_id = ac_unpack_param(&ctx->ac,
3783 						  ac_get_arg(&ctx->ac, ctx->args->merged_wave_info),
3784 						  16, 8);
3785 	} else {
3786 		for (int i = 0; i < 6; i++)
3787 			ctx->gs_vtx_offset[i] = ac_get_arg(&ctx->ac, ctx->args->gs_vtx_offset[i]);
3788 		ctx->gs_wave_id = ac_get_arg(&ctx->ac, ctx->args->gs_wave_id);
3789 	}
3790 }
3791 
3792 /* Ensure that the esgs ring is declared.
3793  *
3794  * We declare it with 64KB alignment as a hint that the
3795  * pointer value will always be 0.
3796  */
declare_esgs_ring(struct radv_shader_context * ctx)3797 static void declare_esgs_ring(struct radv_shader_context *ctx)
3798 {
3799 	if (ctx->esgs_ring)
3800 		return;
3801 
3802 	assert(!LLVMGetNamedGlobal(ctx->ac.module, "esgs_ring"));
3803 
3804 	ctx->esgs_ring = LLVMAddGlobalInAddressSpace(
3805 		ctx->ac.module, LLVMArrayType(ctx->ac.i32, 0),
3806 		"esgs_ring",
3807 		AC_ADDR_SPACE_LDS);
3808 	LLVMSetLinkage(ctx->esgs_ring, LLVMExternalLinkage);
3809 	LLVMSetAlignment(ctx->esgs_ring, 64 * 1024);
3810 }
3811 
3812 static
ac_translate_nir_to_llvm(struct ac_llvm_compiler * ac_llvm,struct nir_shader * const * shaders,int shader_count,const struct radv_shader_args * args)3813 LLVMModuleRef ac_translate_nir_to_llvm(struct ac_llvm_compiler *ac_llvm,
3814                                        struct nir_shader *const *shaders,
3815                                        int shader_count,
3816                                        const struct radv_shader_args *args)
3817 {
3818 	struct radv_shader_context ctx = {0};
3819 	ctx.args = args;
3820 
3821 	enum ac_float_mode float_mode = AC_FLOAT_MODE_DEFAULT;
3822 
3823 	if (args->shader_info->float_controls_mode & FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP32) {
3824 		float_mode = AC_FLOAT_MODE_DENORM_FLUSH_TO_ZERO;
3825 	}
3826 
3827 	ac_llvm_context_init(&ctx.ac, ac_llvm, args->options->chip_class,
3828 			     args->options->family, float_mode,
3829 			     args->shader_info->wave_size,
3830 			     args->shader_info->ballot_bit_size);
3831 	ctx.context = ctx.ac.context;
3832 
3833 	ctx.max_workgroup_size = 0;
3834 	for (int i = 0; i < shader_count; ++i) {
3835 		ctx.max_workgroup_size = MAX2(ctx.max_workgroup_size,
3836 		                              radv_nir_get_max_workgroup_size(args->options->chip_class,
3837 									      shaders[i]->info.stage,
3838 									      shaders[i]));
3839 	}
3840 
3841 	if (ctx.ac.chip_class >= GFX10) {
3842 		if (is_pre_gs_stage(shaders[0]->info.stage) &&
3843 		    args->options->key.vs_common_out.as_ngg) {
3844 			ctx.max_workgroup_size = 128;
3845 		}
3846 	}
3847 
3848 	create_function(&ctx, shaders[shader_count - 1]->info.stage, shader_count >= 2);
3849 
3850 	ctx.abi.inputs = &ctx.inputs[0];
3851 	ctx.abi.emit_outputs = handle_shader_outputs_post;
3852 	ctx.abi.emit_vertex_with_counter = visit_emit_vertex_with_counter;
3853 	ctx.abi.load_ubo = radv_load_ubo;
3854 	ctx.abi.load_ssbo = radv_load_ssbo;
3855 	ctx.abi.load_sampler_desc = radv_get_sampler_desc;
3856 	ctx.abi.load_resource = radv_load_resource;
3857 	ctx.abi.clamp_shadow_reference = false;
3858 	ctx.abi.robust_buffer_access = args->options->robust_buffer_access;
3859 
3860 	bool is_ngg = is_pre_gs_stage(shaders[0]->info.stage) &&  args->options->key.vs_common_out.as_ngg;
3861 	if (shader_count >= 2 || is_ngg)
3862 		ac_init_exec_full_mask(&ctx.ac);
3863 
3864 	if (args->ac.vertex_id.used)
3865 		ctx.abi.vertex_id = ac_get_arg(&ctx.ac, args->ac.vertex_id);
3866 	if (args->rel_auto_id.used)
3867 		ctx.rel_auto_id = ac_get_arg(&ctx.ac, args->rel_auto_id);
3868 	if (args->ac.instance_id.used)
3869 		ctx.abi.instance_id = ac_get_arg(&ctx.ac, args->ac.instance_id);
3870 
3871 	if (args->options->has_ls_vgpr_init_bug &&
3872 	    shaders[shader_count - 1]->info.stage == MESA_SHADER_TESS_CTRL)
3873 		ac_nir_fixup_ls_hs_input_vgprs(&ctx);
3874 
3875 	if (is_ngg) {
3876 		/* Declare scratch space base for streamout and vertex
3877 		 * compaction. Whether space is actually allocated is
3878 		 * determined during linking / PM4 creation.
3879 		 *
3880 		 * Add an extra dword per vertex to ensure an odd stride, which
3881 		 * avoids bank conflicts for SoA accesses.
3882 		 */
3883 		if (!args->options->key.vs_common_out.as_ngg_passthrough)
3884 			declare_esgs_ring(&ctx);
3885 
3886 		/* This is really only needed when streamout and / or vertex
3887 		 * compaction is enabled.
3888 		 */
3889 		if (args->shader_info->so.num_outputs) {
3890 			LLVMTypeRef asi32 = LLVMArrayType(ctx.ac.i32, 8);
3891 			ctx.gs_ngg_scratch = LLVMAddGlobalInAddressSpace(ctx.ac.module,
3892 				asi32, "ngg_scratch", AC_ADDR_SPACE_LDS);
3893 			LLVMSetInitializer(ctx.gs_ngg_scratch, LLVMGetUndef(asi32));
3894 			LLVMSetAlignment(ctx.gs_ngg_scratch, 4);
3895 		}
3896 	}
3897 
3898 	for(int i = 0; i < shader_count; ++i) {
3899 		ctx.stage = shaders[i]->info.stage;
3900 		ctx.shader = shaders[i];
3901 		ctx.output_mask = 0;
3902 
3903 		if (shaders[i]->info.stage == MESA_SHADER_GEOMETRY) {
3904 			for (int i = 0; i < 4; i++) {
3905 				ctx.gs_next_vertex[i] =
3906 					ac_build_alloca(&ctx.ac, ctx.ac.i32, "");
3907 			}
3908 			if (args->options->key.vs_common_out.as_ngg) {
3909 				for (unsigned i = 0; i < 4; ++i) {
3910 					ctx.gs_curprim_verts[i] =
3911 						ac_build_alloca(&ctx.ac, ctx.ac.i32, "");
3912 					ctx.gs_generated_prims[i] =
3913 						ac_build_alloca(&ctx.ac, ctx.ac.i32, "");
3914 				}
3915 
3916 				unsigned scratch_size = 8;
3917 				if (args->shader_info->so.num_outputs)
3918 					scratch_size = 44;
3919 
3920 				LLVMTypeRef ai32 = LLVMArrayType(ctx.ac.i32, scratch_size);
3921 				ctx.gs_ngg_scratch =
3922 					LLVMAddGlobalInAddressSpace(ctx.ac.module,
3923 								    ai32, "ngg_scratch", AC_ADDR_SPACE_LDS);
3924 				LLVMSetInitializer(ctx.gs_ngg_scratch, LLVMGetUndef(ai32));
3925 				LLVMSetAlignment(ctx.gs_ngg_scratch, 4);
3926 
3927 				ctx.gs_ngg_emit = LLVMAddGlobalInAddressSpace(ctx.ac.module,
3928 					LLVMArrayType(ctx.ac.i32, 0), "ngg_emit", AC_ADDR_SPACE_LDS);
3929 				LLVMSetLinkage(ctx.gs_ngg_emit, LLVMExternalLinkage);
3930 				LLVMSetAlignment(ctx.gs_ngg_emit, 4);
3931 			}
3932 
3933 			ctx.abi.load_inputs = load_gs_input;
3934 			ctx.abi.emit_primitive = visit_end_primitive;
3935 		} else if (shaders[i]->info.stage == MESA_SHADER_TESS_CTRL) {
3936 			ctx.abi.load_tess_varyings = load_tcs_varyings;
3937 			ctx.abi.load_patch_vertices_in = load_patch_vertices_in;
3938 			ctx.abi.store_tcs_outputs = store_tcs_output;
3939 			ctx.tcs_num_inputs = ctx.args->shader_info->tcs.num_linked_inputs;
3940 			unsigned tcs_num_outputs = ctx.args->shader_info->tcs.num_linked_outputs;
3941 			unsigned tcs_num_patch_outputs = ctx.args->shader_info->tcs.num_linked_patch_outputs;
3942 			ctx.tcs_num_patches =
3943 				get_tcs_num_patches(
3944 					ctx.args->options->key.tcs.input_vertices,
3945 					ctx.shader->info.tess.tcs_vertices_out,
3946 					ctx.tcs_num_inputs,
3947 					tcs_num_outputs,
3948 					tcs_num_patch_outputs,
3949 					ctx.args->options->tess_offchip_block_dw_size,
3950 					ctx.args->options->chip_class,
3951 					ctx.args->options->family);
3952 		} else if (shaders[i]->info.stage == MESA_SHADER_TESS_EVAL) {
3953 			ctx.abi.load_tess_varyings = load_tes_input;
3954 			ctx.abi.load_tess_coord = load_tess_coord;
3955 			ctx.abi.load_patch_vertices_in = load_patch_vertices_in;
3956 			ctx.tcs_num_patches = args->options->key.tes.num_patches;
3957 		} else if (shaders[i]->info.stage == MESA_SHADER_VERTEX) {
3958 			ctx.abi.load_base_vertex = radv_load_base_vertex;
3959 		} else if (shaders[i]->info.stage == MESA_SHADER_FRAGMENT) {
3960 			ctx.abi.load_sample_position = load_sample_position;
3961 			ctx.abi.load_sample_mask_in = load_sample_mask_in;
3962 		}
3963 
3964 		if (shaders[i]->info.stage == MESA_SHADER_VERTEX &&
3965 		    args->options->key.vs_common_out.as_ngg &&
3966 		    args->options->key.vs_common_out.export_prim_id) {
3967 			declare_esgs_ring(&ctx);
3968 		}
3969 
3970 		bool nested_barrier = false;
3971 
3972 		if (i) {
3973 			if (shaders[i]->info.stage == MESA_SHADER_GEOMETRY &&
3974 			    args->options->key.vs_common_out.as_ngg) {
3975 				gfx10_ngg_gs_emit_prologue(&ctx);
3976 				nested_barrier = false;
3977 			} else {
3978 				nested_barrier = true;
3979 			}
3980 		}
3981 
3982 		if (nested_barrier) {
3983 			/* Execute a barrier before the second shader in
3984 			 * a merged shader.
3985 			 *
3986 			 * Execute the barrier inside the conditional block,
3987 			 * so that empty waves can jump directly to s_endpgm,
3988 			 * which will also signal the barrier.
3989 			 *
3990 			 * This is possible in gfx9, because an empty wave
3991 			 * for the second shader does not participate in
3992 			 * the epilogue. With NGG, empty waves may still
3993 			 * be required to export data (e.g. GS output vertices),
3994 			 * so we cannot let them exit early.
3995 			 *
3996 			 * If the shader is TCS and the TCS epilog is present
3997 			 * and contains a barrier, it will wait there and then
3998 			 * reach s_endpgm.
3999 			*/
4000 			ac_emit_barrier(&ctx.ac, ctx.stage);
4001 		}
4002 
4003 		nir_foreach_shader_out_variable(variable, shaders[i])
4004 			scan_shader_output_decl(&ctx, variable, shaders[i], shaders[i]->info.stage);
4005 
4006 		ac_setup_rings(&ctx);
4007 
4008 		LLVMBasicBlockRef merge_block = NULL;
4009 		if (shader_count >= 2 || is_ngg) {
4010 			LLVMValueRef fn = LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx.ac.builder));
4011 			LLVMBasicBlockRef then_block = LLVMAppendBasicBlockInContext(ctx.ac.context, fn, "");
4012 			merge_block = LLVMAppendBasicBlockInContext(ctx.ac.context, fn, "");
4013 
4014 			LLVMValueRef count =
4015 				ac_unpack_param(&ctx.ac,
4016 						ac_get_arg(&ctx.ac, args->merged_wave_info),
4017 						8 * i, 8);
4018 			LLVMValueRef thread_id = ac_get_thread_id(&ctx.ac);
4019 			LLVMValueRef cond = LLVMBuildICmp(ctx.ac.builder, LLVMIntULT,
4020 			                                  thread_id, count, "");
4021 			LLVMBuildCondBr(ctx.ac.builder, cond, then_block, merge_block);
4022 
4023 			LLVMPositionBuilderAtEnd(ctx.ac.builder, then_block);
4024 		}
4025 
4026 		if (shaders[i]->info.stage == MESA_SHADER_FRAGMENT)
4027 			prepare_interp_optimize(&ctx, shaders[i]);
4028 		else if(shaders[i]->info.stage == MESA_SHADER_VERTEX)
4029 			handle_vs_inputs(&ctx, shaders[i]);
4030 		else if(shaders[i]->info.stage == MESA_SHADER_GEOMETRY)
4031 			prepare_gs_input_vgprs(&ctx, shader_count >= 2);
4032 
4033 		ac_nir_translate(&ctx.ac, &ctx.abi, &args->ac, shaders[i]);
4034 
4035 		if (shader_count >= 2 || is_ngg) {
4036 			LLVMBuildBr(ctx.ac.builder, merge_block);
4037 			LLVMPositionBuilderAtEnd(ctx.ac.builder, merge_block);
4038 		}
4039 
4040 		/* This needs to be outside the if wrapping the shader body, as sometimes
4041 		 * the HW generates waves with 0 es/vs threads. */
4042 		if (is_pre_gs_stage(shaders[i]->info.stage) &&
4043 		    args->options->key.vs_common_out.as_ngg &&
4044 		    i == shader_count - 1) {
4045 			handle_ngg_outputs_post_2(&ctx);
4046 		} else if (shaders[i]->info.stage == MESA_SHADER_GEOMETRY &&
4047 			   args->options->key.vs_common_out.as_ngg) {
4048 			gfx10_ngg_gs_emit_epilogue_2(&ctx);
4049 		}
4050 
4051 		if (shaders[i]->info.stage == MESA_SHADER_TESS_CTRL) {
4052 			unsigned tcs_num_outputs = ctx.args->shader_info->tcs.num_linked_outputs;
4053 			unsigned tcs_num_patch_outputs = ctx.args->shader_info->tcs.num_linked_patch_outputs;
4054 			args->shader_info->tcs.num_patches = ctx.tcs_num_patches;
4055 			args->shader_info->tcs.num_lds_blocks =
4056 				calculate_tess_lds_size(
4057 					ctx.args->options->chip_class,
4058 					ctx.args->options->key.tcs.input_vertices,
4059 					ctx.shader->info.tess.tcs_vertices_out,
4060 					ctx.tcs_num_inputs,
4061 					ctx.tcs_num_patches,
4062 					tcs_num_outputs,
4063 					tcs_num_patch_outputs);
4064 		}
4065 	}
4066 
4067 	LLVMBuildRetVoid(ctx.ac.builder);
4068 
4069 	if (args->options->dump_preoptir) {
4070 		fprintf(stderr, "%s LLVM IR:\n\n",
4071 			radv_get_shader_name(args->shader_info,
4072 					     shaders[shader_count - 1]->info.stage));
4073 		ac_dump_module(ctx.ac.module);
4074 		fprintf(stderr, "\n");
4075 	}
4076 
4077 	ac_llvm_finalize_module(&ctx, ac_llvm->passmgr, args->options);
4078 
4079 	if (shader_count == 1)
4080 		ac_nir_eliminate_const_vs_outputs(&ctx);
4081 
4082 	if (args->options->dump_shader) {
4083 		args->shader_info->private_mem_vgprs =
4084 			ac_count_scratch_private_memory(ctx.main_function);
4085 	}
4086 
4087 	return ctx.ac.module;
4088 }
4089 
ac_diagnostic_handler(LLVMDiagnosticInfoRef di,void * context)4090 static void ac_diagnostic_handler(LLVMDiagnosticInfoRef di, void *context)
4091 {
4092 	unsigned *retval = (unsigned *)context;
4093 	LLVMDiagnosticSeverity severity = LLVMGetDiagInfoSeverity(di);
4094 	char *description = LLVMGetDiagInfoDescription(di);
4095 
4096 	if (severity == LLVMDSError) {
4097 		*retval = 1;
4098 		fprintf(stderr, "LLVM triggered Diagnostic Handler: %s\n",
4099 		        description);
4100 	}
4101 
4102 	LLVMDisposeMessage(description);
4103 }
4104 
radv_llvm_compile(LLVMModuleRef M,char ** pelf_buffer,size_t * pelf_size,struct ac_llvm_compiler * ac_llvm)4105 static unsigned radv_llvm_compile(LLVMModuleRef M,
4106                                   char **pelf_buffer, size_t *pelf_size,
4107                                   struct ac_llvm_compiler *ac_llvm)
4108 {
4109 	unsigned retval = 0;
4110 	LLVMContextRef llvm_ctx;
4111 
4112 	/* Setup Diagnostic Handler*/
4113 	llvm_ctx = LLVMGetModuleContext(M);
4114 
4115 	LLVMContextSetDiagnosticHandler(llvm_ctx, ac_diagnostic_handler,
4116 	                                &retval);
4117 
4118 	/* Compile IR*/
4119 	if (!radv_compile_to_elf(ac_llvm, M, pelf_buffer, pelf_size))
4120 		retval = 1;
4121 	return retval;
4122 }
4123 
ac_compile_llvm_module(struct ac_llvm_compiler * ac_llvm,LLVMModuleRef llvm_module,struct radv_shader_binary ** rbinary,gl_shader_stage stage,const char * name,const struct radv_nir_compiler_options * options)4124 static void ac_compile_llvm_module(struct ac_llvm_compiler *ac_llvm,
4125 				   LLVMModuleRef llvm_module,
4126 				   struct radv_shader_binary **rbinary,
4127 				   gl_shader_stage stage,
4128 				   const char *name,
4129 				   const struct radv_nir_compiler_options *options)
4130 {
4131 	char *elf_buffer = NULL;
4132 	size_t elf_size = 0;
4133 	char *llvm_ir_string = NULL;
4134 
4135 	if (options->dump_shader) {
4136 		fprintf(stderr, "%s LLVM IR:\n\n", name);
4137 		ac_dump_module(llvm_module);
4138 		fprintf(stderr, "\n");
4139 	}
4140 
4141 	if (options->record_ir) {
4142 		char *llvm_ir = LLVMPrintModuleToString(llvm_module);
4143 		llvm_ir_string = strdup(llvm_ir);
4144 		LLVMDisposeMessage(llvm_ir);
4145 	}
4146 
4147 	int v = radv_llvm_compile(llvm_module, &elf_buffer, &elf_size, ac_llvm);
4148 	if (v) {
4149 		fprintf(stderr, "compile failed\n");
4150 	}
4151 
4152 	LLVMContextRef ctx = LLVMGetModuleContext(llvm_module);
4153 	LLVMDisposeModule(llvm_module);
4154 	LLVMContextDispose(ctx);
4155 
4156 	size_t llvm_ir_size = llvm_ir_string ? strlen(llvm_ir_string) : 0;
4157 	size_t alloc_size = sizeof(struct radv_shader_binary_rtld) + elf_size + llvm_ir_size + 1;
4158 	struct radv_shader_binary_rtld *rbin = calloc(1, alloc_size);
4159 	memcpy(rbin->data,  elf_buffer, elf_size);
4160 	if (llvm_ir_string)
4161 		memcpy(rbin->data + elf_size, llvm_ir_string, llvm_ir_size + 1);
4162 
4163 	rbin->base.type = RADV_BINARY_TYPE_RTLD;
4164 	rbin->base.stage = stage;
4165 	rbin->base.total_size = alloc_size;
4166 	rbin->elf_size = elf_size;
4167 	rbin->llvm_ir_size = llvm_ir_size;
4168 	*rbinary = &rbin->base;
4169 
4170 	free(llvm_ir_string);
4171 	free(elf_buffer);
4172 }
4173 
4174 static void
radv_compile_nir_shader(struct ac_llvm_compiler * ac_llvm,struct radv_shader_binary ** rbinary,const struct radv_shader_args * args,struct nir_shader * const * nir,int nir_count)4175 radv_compile_nir_shader(struct ac_llvm_compiler *ac_llvm,
4176 			struct radv_shader_binary **rbinary,
4177 			const struct radv_shader_args *args,
4178 			struct nir_shader *const *nir,
4179 			int nir_count)
4180 {
4181 
4182 	LLVMModuleRef llvm_module;
4183 
4184 	llvm_module = ac_translate_nir_to_llvm(ac_llvm, nir, nir_count, args);
4185 
4186 	ac_compile_llvm_module(ac_llvm, llvm_module, rbinary,
4187 			       nir[nir_count - 1]->info.stage,
4188 			       radv_get_shader_name(args->shader_info,
4189 						    nir[nir_count - 1]->info.stage),
4190 			       args->options);
4191 
4192 	/* Determine the ES type (VS or TES) for the GS on GFX9. */
4193 	if (args->options->chip_class >= GFX9) {
4194 		if (nir_count == 2 &&
4195 		    nir[1]->info.stage == MESA_SHADER_GEOMETRY) {
4196 			args->shader_info->gs.es_type = nir[0]->info.stage;
4197 		}
4198 	}
4199 }
4200 
4201 static void
ac_gs_copy_shader_emit(struct radv_shader_context * ctx)4202 ac_gs_copy_shader_emit(struct radv_shader_context *ctx)
4203 {
4204 	LLVMValueRef vtx_offset =
4205 		LLVMBuildMul(ctx->ac.builder, ac_get_arg(&ctx->ac, ctx->args->ac.vertex_id),
4206 			     LLVMConstInt(ctx->ac.i32, 4, false), "");
4207 	LLVMValueRef stream_id;
4208 
4209 	/* Fetch the vertex stream ID. */
4210 	if (!ctx->args->options->use_ngg_streamout &&
4211 	    ctx->args->shader_info->so.num_outputs) {
4212 		stream_id =
4213 			ac_unpack_param(&ctx->ac,
4214 					ac_get_arg(&ctx->ac,
4215 						   ctx->args->streamout_config),
4216 					24, 2);
4217 	} else {
4218 		stream_id = ctx->ac.i32_0;
4219 	}
4220 
4221 	LLVMBasicBlockRef end_bb;
4222 	LLVMValueRef switch_inst;
4223 
4224 	end_bb = LLVMAppendBasicBlockInContext(ctx->ac.context,
4225 					       ctx->main_function, "end");
4226 	switch_inst = LLVMBuildSwitch(ctx->ac.builder, stream_id, end_bb, 4);
4227 
4228 	for (unsigned stream = 0; stream < 4; stream++) {
4229 		unsigned num_components =
4230 			ctx->args->shader_info->gs.num_stream_output_components[stream];
4231 		LLVMBasicBlockRef bb;
4232 		unsigned offset;
4233 
4234 		if (stream > 0 && !num_components)
4235 			continue;
4236 
4237 		if (stream > 0 && !ctx->args->shader_info->so.num_outputs)
4238 			continue;
4239 
4240 		bb = LLVMInsertBasicBlockInContext(ctx->ac.context, end_bb, "out");
4241 		LLVMAddCase(switch_inst, LLVMConstInt(ctx->ac.i32, stream, 0), bb);
4242 		LLVMPositionBuilderAtEnd(ctx->ac.builder, bb);
4243 
4244 		offset = 0;
4245 		for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
4246 			unsigned output_usage_mask =
4247 				ctx->args->shader_info->gs.output_usage_mask[i];
4248 			unsigned output_stream =
4249 				ctx->args->shader_info->gs.output_streams[i];
4250 			int length = util_last_bit(output_usage_mask);
4251 
4252 			if (!(ctx->output_mask & (1ull << i)) ||
4253 			    output_stream != stream)
4254 				continue;
4255 
4256 			for (unsigned j = 0; j < length; j++) {
4257 				LLVMValueRef value, soffset;
4258 
4259 				if (!(output_usage_mask & (1 << j)))
4260 					continue;
4261 
4262 				soffset = LLVMConstInt(ctx->ac.i32,
4263 						       offset *
4264 						       ctx->shader->info.gs.vertices_out * 16 * 4, false);
4265 
4266 				offset++;
4267 
4268 				value = ac_build_buffer_load(&ctx->ac,
4269 							     ctx->gsvs_ring[0],
4270 							     1, ctx->ac.i32_0,
4271 							     vtx_offset, soffset,
4272 							     0, ac_glc | ac_slc, true, false);
4273 
4274 				LLVMTypeRef type = LLVMGetAllocatedType(ctx->abi.outputs[ac_llvm_reg_index_soa(i, j)]);
4275 				if (ac_get_type_size(type) == 2) {
4276 					value = LLVMBuildBitCast(ctx->ac.builder, value, ctx->ac.i32, "");
4277 					value = LLVMBuildTrunc(ctx->ac.builder, value, ctx->ac.i16, "");
4278 				}
4279 
4280 				LLVMBuildStore(ctx->ac.builder,
4281 					       ac_to_float(&ctx->ac, value), ctx->abi.outputs[ac_llvm_reg_index_soa(i, j)]);
4282 			}
4283 		}
4284 
4285 		if (!ctx->args->options->use_ngg_streamout &&
4286 		    ctx->args->shader_info->so.num_outputs)
4287 			radv_emit_streamout(ctx, stream);
4288 
4289 		if (stream == 0) {
4290 			handle_vs_outputs_post(ctx, false, true,
4291 					       &ctx->args->shader_info->vs.outinfo);
4292 		}
4293 
4294 		LLVMBuildBr(ctx->ac.builder, end_bb);
4295 	}
4296 
4297 	LLVMPositionBuilderAtEnd(ctx->ac.builder, end_bb);
4298 }
4299 
4300 static void
radv_compile_gs_copy_shader(struct ac_llvm_compiler * ac_llvm,struct nir_shader * geom_shader,struct radv_shader_binary ** rbinary,const struct radv_shader_args * args)4301 radv_compile_gs_copy_shader(struct ac_llvm_compiler *ac_llvm,
4302 			    struct nir_shader *geom_shader,
4303 			    struct radv_shader_binary **rbinary,
4304 			    const struct radv_shader_args *args)
4305 {
4306 	struct radv_shader_context ctx = {0};
4307 	ctx.args = args;
4308 
4309 	assert(args->is_gs_copy_shader);
4310 
4311 	ac_llvm_context_init(&ctx.ac, ac_llvm, args->options->chip_class,
4312 			     args->options->family, AC_FLOAT_MODE_DEFAULT, 64, 64);
4313 	ctx.context = ctx.ac.context;
4314 
4315 	ctx.stage = MESA_SHADER_VERTEX;
4316 	ctx.shader = geom_shader;
4317 
4318 	create_function(&ctx, MESA_SHADER_VERTEX, false);
4319 
4320 	ac_setup_rings(&ctx);
4321 
4322 	nir_foreach_shader_out_variable(variable, geom_shader) {
4323 		scan_shader_output_decl(&ctx, variable, geom_shader, MESA_SHADER_VERTEX);
4324 		ac_handle_shader_output_decl(&ctx.ac, &ctx.abi, geom_shader,
4325 					     variable, MESA_SHADER_VERTEX);
4326 	}
4327 
4328 	ac_gs_copy_shader_emit(&ctx);
4329 
4330 	LLVMBuildRetVoid(ctx.ac.builder);
4331 
4332 	ac_llvm_finalize_module(&ctx, ac_llvm->passmgr, args->options);
4333 
4334 	ac_compile_llvm_module(ac_llvm, ctx.ac.module, rbinary,
4335 			       MESA_SHADER_VERTEX, "GS Copy Shader", args->options);
4336 	(*rbinary)->is_gs_copy_shader = true;
4337 
4338 }
4339 
4340 void
llvm_compile_shader(struct radv_device * device,unsigned shader_count,struct nir_shader * const * shaders,struct radv_shader_binary ** binary,struct radv_shader_args * args)4341 llvm_compile_shader(struct radv_device *device,
4342 		    unsigned shader_count,
4343 		    struct nir_shader *const *shaders,
4344 		    struct radv_shader_binary **binary,
4345 		    struct radv_shader_args *args)
4346 {
4347 	enum ac_target_machine_options tm_options = 0;
4348 	struct ac_llvm_compiler ac_llvm;
4349 	bool thread_compiler;
4350 
4351 	tm_options |= AC_TM_SUPPORTS_SPILL;
4352 	if (args->options->check_ir)
4353 		tm_options |= AC_TM_CHECK_IR;
4354 
4355 	thread_compiler = !(device->instance->debug_flags & RADV_DEBUG_NOTHREADLLVM);
4356 
4357 	radv_init_llvm_compiler(&ac_llvm, thread_compiler,
4358 				args->options->family, tm_options,
4359 				args->shader_info->wave_size);
4360 
4361 	if (args->is_gs_copy_shader) {
4362 		radv_compile_gs_copy_shader(&ac_llvm, *shaders, binary, args);
4363 	} else {
4364 		radv_compile_nir_shader(&ac_llvm, binary, args,
4365 					shaders, shader_count);
4366 	}
4367 
4368 	radv_destroy_llvm_compiler(&ac_llvm, thread_compiler);
4369 }
4370