1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "ac_nir_to_llvm.h"
26 #include "ac_rtld.h"
27 #include "si_pipe.h"
28 #include "si_shader_internal.h"
29 #include "sid.h"
30 #include "tgsi/tgsi_from_mesa.h"
31 #include "util/u_memory.h"
32
33 struct si_llvm_diagnostics {
34 struct pipe_debug_callback *debug;
35 unsigned retval;
36 };
37
si_diagnostic_handler(LLVMDiagnosticInfoRef di,void * context)38 static void si_diagnostic_handler(LLVMDiagnosticInfoRef di, void *context)
39 {
40 struct si_llvm_diagnostics *diag = (struct si_llvm_diagnostics *)context;
41 LLVMDiagnosticSeverity severity = LLVMGetDiagInfoSeverity(di);
42 const char *severity_str = NULL;
43
44 switch (severity) {
45 case LLVMDSError:
46 severity_str = "error";
47 break;
48 case LLVMDSWarning:
49 severity_str = "warning";
50 break;
51 case LLVMDSRemark:
52 case LLVMDSNote:
53 default:
54 return;
55 }
56
57 char *description = LLVMGetDiagInfoDescription(di);
58
59 pipe_debug_message(diag->debug, SHADER_INFO, "LLVM diagnostic (%s): %s", severity_str,
60 description);
61
62 if (severity == LLVMDSError) {
63 diag->retval = 1;
64 fprintf(stderr, "LLVM triggered Diagnostic Handler: %s\n", description);
65 }
66
67 LLVMDisposeMessage(description);
68 }
69
si_compile_llvm(struct si_screen * sscreen,struct si_shader_binary * binary,struct ac_shader_config * conf,struct ac_llvm_compiler * compiler,struct ac_llvm_context * ac,struct pipe_debug_callback * debug,gl_shader_stage stage,const char * name,bool less_optimized)70 bool si_compile_llvm(struct si_screen *sscreen, struct si_shader_binary *binary,
71 struct ac_shader_config *conf, struct ac_llvm_compiler *compiler,
72 struct ac_llvm_context *ac, struct pipe_debug_callback *debug,
73 gl_shader_stage stage, const char *name, bool less_optimized)
74 {
75 unsigned count = p_atomic_inc_return(&sscreen->num_compilations);
76
77 if (si_can_dump_shader(sscreen, stage)) {
78 fprintf(stderr, "radeonsi: Compiling shader %d\n", count);
79
80 if (!(sscreen->debug_flags & (DBG(NO_IR) | DBG(PREOPT_IR)))) {
81 fprintf(stderr, "%s LLVM IR:\n\n", name);
82 ac_dump_module(ac->module);
83 fprintf(stderr, "\n");
84 }
85 }
86
87 if (sscreen->record_llvm_ir) {
88 char *ir = LLVMPrintModuleToString(ac->module);
89 binary->llvm_ir_string = strdup(ir);
90 LLVMDisposeMessage(ir);
91 }
92
93 if (!si_replace_shader(count, binary)) {
94 struct ac_compiler_passes *passes = compiler->passes;
95
96 if (ac->wave_size == 32)
97 passes = compiler->passes_wave32;
98 else if (less_optimized && compiler->low_opt_passes)
99 passes = compiler->low_opt_passes;
100
101 struct si_llvm_diagnostics diag = {debug};
102 LLVMContextSetDiagnosticHandler(ac->context, si_diagnostic_handler, &diag);
103
104 if (!ac_compile_module_to_elf(passes, ac->module, (char **)&binary->elf_buffer,
105 &binary->elf_size))
106 diag.retval = 1;
107
108 if (diag.retval != 0) {
109 pipe_debug_message(debug, SHADER_INFO, "LLVM compilation failed");
110 return false;
111 }
112 }
113
114 struct ac_rtld_binary rtld;
115 if (!ac_rtld_open(&rtld, (struct ac_rtld_open_info){
116 .info = &sscreen->info,
117 .shader_type = stage,
118 .wave_size = ac->wave_size,
119 .num_parts = 1,
120 .elf_ptrs = &binary->elf_buffer,
121 .elf_sizes = &binary->elf_size}))
122 return false;
123
124 bool ok = ac_rtld_read_config(&sscreen->info, &rtld, conf);
125 ac_rtld_close(&rtld);
126 return ok;
127 }
128
si_llvm_context_init(struct si_shader_context * ctx,struct si_screen * sscreen,struct ac_llvm_compiler * compiler,unsigned wave_size)129 void si_llvm_context_init(struct si_shader_context *ctx, struct si_screen *sscreen,
130 struct ac_llvm_compiler *compiler, unsigned wave_size)
131 {
132 memset(ctx, 0, sizeof(*ctx));
133 ctx->screen = sscreen;
134 ctx->compiler = compiler;
135
136 ac_llvm_context_init(&ctx->ac, compiler, sscreen->info.chip_class, sscreen->info.family,
137 AC_FLOAT_MODE_DEFAULT_OPENGL, wave_size, 64);
138 }
139
si_llvm_create_func(struct si_shader_context * ctx,const char * name,LLVMTypeRef * return_types,unsigned num_return_elems,unsigned max_workgroup_size)140 void si_llvm_create_func(struct si_shader_context *ctx, const char *name, LLVMTypeRef *return_types,
141 unsigned num_return_elems, unsigned max_workgroup_size)
142 {
143 LLVMTypeRef ret_type;
144 enum ac_llvm_calling_convention call_conv;
145
146 if (num_return_elems)
147 ret_type = LLVMStructTypeInContext(ctx->ac.context, return_types, num_return_elems, true);
148 else
149 ret_type = ctx->ac.voidt;
150
151 gl_shader_stage real_stage = ctx->stage;
152
153 /* LS is merged into HS (TCS), and ES is merged into GS. */
154 if (ctx->screen->info.chip_class >= GFX9) {
155 if (ctx->shader->key.as_ls)
156 real_stage = MESA_SHADER_TESS_CTRL;
157 else if (ctx->shader->key.as_es || ctx->shader->key.as_ngg)
158 real_stage = MESA_SHADER_GEOMETRY;
159 }
160
161 switch (real_stage) {
162 case MESA_SHADER_VERTEX:
163 case MESA_SHADER_TESS_EVAL:
164 call_conv = AC_LLVM_AMDGPU_VS;
165 break;
166 case MESA_SHADER_TESS_CTRL:
167 call_conv = AC_LLVM_AMDGPU_HS;
168 break;
169 case MESA_SHADER_GEOMETRY:
170 call_conv = AC_LLVM_AMDGPU_GS;
171 break;
172 case MESA_SHADER_FRAGMENT:
173 call_conv = AC_LLVM_AMDGPU_PS;
174 break;
175 case MESA_SHADER_COMPUTE:
176 call_conv = AC_LLVM_AMDGPU_CS;
177 break;
178 default:
179 unreachable("Unhandle shader type");
180 }
181
182 /* Setup the function */
183 ctx->return_type = ret_type;
184 ctx->main_fn = ac_build_main(&ctx->args, &ctx->ac, call_conv, name, ret_type, ctx->ac.module);
185 ctx->return_value = LLVMGetUndef(ctx->return_type);
186
187 if (ctx->screen->info.address32_hi) {
188 ac_llvm_add_target_dep_function_attr(ctx->main_fn, "amdgpu-32bit-address-high-bits",
189 ctx->screen->info.address32_hi);
190 }
191
192 ac_llvm_set_workgroup_size(ctx->main_fn, max_workgroup_size);
193 }
194
si_llvm_optimize_module(struct si_shader_context * ctx)195 void si_llvm_optimize_module(struct si_shader_context *ctx)
196 {
197 /* Dump LLVM IR before any optimization passes */
198 if (ctx->screen->debug_flags & DBG(PREOPT_IR) && si_can_dump_shader(ctx->screen, ctx->stage))
199 LLVMDumpModule(ctx->ac.module);
200
201 /* Run the pass */
202 LLVMRunPassManager(ctx->compiler->passmgr, ctx->ac.module);
203 LLVMDisposeBuilder(ctx->ac.builder);
204 }
205
si_llvm_dispose(struct si_shader_context * ctx)206 void si_llvm_dispose(struct si_shader_context *ctx)
207 {
208 LLVMDisposeModule(ctx->ac.module);
209 LLVMContextDispose(ctx->ac.context);
210 ac_llvm_context_dispose(&ctx->ac);
211 }
212
213 /**
214 * Load a dword from a constant buffer.
215 */
si_buffer_load_const(struct si_shader_context * ctx,LLVMValueRef resource,LLVMValueRef offset)216 LLVMValueRef si_buffer_load_const(struct si_shader_context *ctx, LLVMValueRef resource,
217 LLVMValueRef offset)
218 {
219 return ac_build_buffer_load(&ctx->ac, resource, 1, NULL, offset, NULL, 0, 0, true, true);
220 }
221
si_llvm_build_ret(struct si_shader_context * ctx,LLVMValueRef ret)222 void si_llvm_build_ret(struct si_shader_context *ctx, LLVMValueRef ret)
223 {
224 if (LLVMGetTypeKind(LLVMTypeOf(ret)) == LLVMVoidTypeKind)
225 LLVMBuildRetVoid(ctx->ac.builder);
226 else
227 LLVMBuildRet(ctx->ac.builder, ret);
228 }
229
si_insert_input_ret(struct si_shader_context * ctx,LLVMValueRef ret,struct ac_arg param,unsigned return_index)230 LLVMValueRef si_insert_input_ret(struct si_shader_context *ctx, LLVMValueRef ret,
231 struct ac_arg param, unsigned return_index)
232 {
233 return LLVMBuildInsertValue(ctx->ac.builder, ret, ac_get_arg(&ctx->ac, param), return_index, "");
234 }
235
si_insert_input_ret_float(struct si_shader_context * ctx,LLVMValueRef ret,struct ac_arg param,unsigned return_index)236 LLVMValueRef si_insert_input_ret_float(struct si_shader_context *ctx, LLVMValueRef ret,
237 struct ac_arg param, unsigned return_index)
238 {
239 LLVMBuilderRef builder = ctx->ac.builder;
240 LLVMValueRef p = ac_get_arg(&ctx->ac, param);
241
242 return LLVMBuildInsertValue(builder, ret, ac_to_float(&ctx->ac, p), return_index, "");
243 }
244
si_insert_input_ptr(struct si_shader_context * ctx,LLVMValueRef ret,struct ac_arg param,unsigned return_index)245 LLVMValueRef si_insert_input_ptr(struct si_shader_context *ctx, LLVMValueRef ret,
246 struct ac_arg param, unsigned return_index)
247 {
248 LLVMBuilderRef builder = ctx->ac.builder;
249 LLVMValueRef ptr = ac_get_arg(&ctx->ac, param);
250 ptr = LLVMBuildPtrToInt(builder, ptr, ctx->ac.i32, "");
251 return LLVMBuildInsertValue(builder, ret, ptr, return_index, "");
252 }
253
si_prolog_get_rw_buffers(struct si_shader_context * ctx)254 LLVMValueRef si_prolog_get_rw_buffers(struct si_shader_context *ctx)
255 {
256 LLVMValueRef ptr[2], list;
257 bool merged_shader = si_is_merged_shader(ctx->shader);
258
259 ptr[0] = LLVMGetParam(ctx->main_fn, (merged_shader ? 8 : 0) + SI_SGPR_RW_BUFFERS);
260 list =
261 LLVMBuildIntToPtr(ctx->ac.builder, ptr[0], ac_array_in_const32_addr_space(ctx->ac.v4i32), "");
262 return list;
263 }
264
si_llvm_emit_barrier(struct si_shader_context * ctx)265 void si_llvm_emit_barrier(struct si_shader_context *ctx)
266 {
267 /* GFX6 only (thanks to a hw bug workaround):
268 * The real barrier instruction isn’t needed, because an entire patch
269 * always fits into a single wave.
270 */
271 if (ctx->screen->info.chip_class == GFX6 && ctx->stage == MESA_SHADER_TESS_CTRL) {
272 ac_build_waitcnt(&ctx->ac, AC_WAIT_LGKM | AC_WAIT_VLOAD | AC_WAIT_VSTORE);
273 return;
274 }
275
276 ac_build_s_barrier(&ctx->ac);
277 }
278
279 /* Ensure that the esgs ring is declared.
280 *
281 * We declare it with 64KB alignment as a hint that the
282 * pointer value will always be 0.
283 */
si_llvm_declare_esgs_ring(struct si_shader_context * ctx)284 void si_llvm_declare_esgs_ring(struct si_shader_context *ctx)
285 {
286 if (ctx->esgs_ring)
287 return;
288
289 assert(!LLVMGetNamedGlobal(ctx->ac.module, "esgs_ring"));
290
291 ctx->esgs_ring = LLVMAddGlobalInAddressSpace(ctx->ac.module, LLVMArrayType(ctx->ac.i32, 0),
292 "esgs_ring", AC_ADDR_SPACE_LDS);
293 LLVMSetLinkage(ctx->esgs_ring, LLVMExternalLinkage);
294 LLVMSetAlignment(ctx->esgs_ring, 64 * 1024);
295 }
296
si_init_exec_from_input(struct si_shader_context * ctx,struct ac_arg param,unsigned bitoffset)297 void si_init_exec_from_input(struct si_shader_context *ctx, struct ac_arg param, unsigned bitoffset)
298 {
299 LLVMValueRef args[] = {
300 ac_get_arg(&ctx->ac, param),
301 LLVMConstInt(ctx->ac.i32, bitoffset, 0),
302 };
303 ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.init.exec.from.input", ctx->ac.voidt, args, 2,
304 AC_FUNC_ATTR_CONVERGENT);
305 }
306
307 /**
308 * Get the value of a shader input parameter and extract a bitfield.
309 */
unpack_llvm_param(struct si_shader_context * ctx,LLVMValueRef value,unsigned rshift,unsigned bitwidth)310 static LLVMValueRef unpack_llvm_param(struct si_shader_context *ctx, LLVMValueRef value,
311 unsigned rshift, unsigned bitwidth)
312 {
313 if (LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMFloatTypeKind)
314 value = ac_to_integer(&ctx->ac, value);
315
316 if (rshift)
317 value = LLVMBuildLShr(ctx->ac.builder, value, LLVMConstInt(ctx->ac.i32, rshift, 0), "");
318
319 if (rshift + bitwidth < 32) {
320 unsigned mask = (1 << bitwidth) - 1;
321 value = LLVMBuildAnd(ctx->ac.builder, value, LLVMConstInt(ctx->ac.i32, mask, 0), "");
322 }
323
324 return value;
325 }
326
si_unpack_param(struct si_shader_context * ctx,struct ac_arg param,unsigned rshift,unsigned bitwidth)327 LLVMValueRef si_unpack_param(struct si_shader_context *ctx, struct ac_arg param, unsigned rshift,
328 unsigned bitwidth)
329 {
330 LLVMValueRef value = ac_get_arg(&ctx->ac, param);
331
332 return unpack_llvm_param(ctx, value, rshift, bitwidth);
333 }
334
si_get_primitive_id(struct si_shader_context * ctx,unsigned swizzle)335 LLVMValueRef si_get_primitive_id(struct si_shader_context *ctx, unsigned swizzle)
336 {
337 if (swizzle > 0)
338 return ctx->ac.i32_0;
339
340 switch (ctx->stage) {
341 case MESA_SHADER_VERTEX:
342 return ac_get_arg(&ctx->ac, ctx->vs_prim_id);
343 case MESA_SHADER_TESS_CTRL:
344 return ac_get_arg(&ctx->ac, ctx->args.tcs_patch_id);
345 case MESA_SHADER_TESS_EVAL:
346 return ac_get_arg(&ctx->ac, ctx->args.tes_patch_id);
347 case MESA_SHADER_GEOMETRY:
348 return ac_get_arg(&ctx->ac, ctx->args.gs_prim_id);
349 default:
350 assert(0);
351 return ctx->ac.i32_0;
352 }
353 }
354
si_llvm_get_block_size(struct ac_shader_abi * abi)355 LLVMValueRef si_llvm_get_block_size(struct ac_shader_abi *abi)
356 {
357 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
358
359 assert(ctx->shader->selector->info.base.cs.local_size_variable);
360 return ac_get_arg(&ctx->ac, ctx->block_size);
361 }
362
si_llvm_declare_compute_memory(struct si_shader_context * ctx)363 void si_llvm_declare_compute_memory(struct si_shader_context *ctx)
364 {
365 struct si_shader_selector *sel = ctx->shader->selector;
366 unsigned lds_size = sel->info.base.cs.shared_size;
367
368 LLVMTypeRef i8p = LLVMPointerType(ctx->ac.i8, AC_ADDR_SPACE_LDS);
369 LLVMValueRef var;
370
371 assert(!ctx->ac.lds);
372
373 var = LLVMAddGlobalInAddressSpace(ctx->ac.module, LLVMArrayType(ctx->ac.i8, lds_size),
374 "compute_lds", AC_ADDR_SPACE_LDS);
375 LLVMSetAlignment(var, 64 * 1024);
376
377 ctx->ac.lds = LLVMBuildBitCast(ctx->ac.builder, var, i8p, "");
378 }
379
si_nir_build_llvm(struct si_shader_context * ctx,struct nir_shader * nir)380 bool si_nir_build_llvm(struct si_shader_context *ctx, struct nir_shader *nir)
381 {
382 if (nir->info.stage == MESA_SHADER_VERTEX) {
383 si_llvm_load_vs_inputs(ctx, nir);
384 } else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
385 unsigned colors_read = ctx->shader->selector->info.colors_read;
386 LLVMValueRef main_fn = ctx->main_fn;
387
388 LLVMValueRef undef = LLVMGetUndef(ctx->ac.f32);
389
390 unsigned offset = SI_PARAM_POS_FIXED_PT + 1;
391
392 if (colors_read & 0x0f) {
393 unsigned mask = colors_read & 0x0f;
394 LLVMValueRef values[4];
395 values[0] = mask & 0x1 ? LLVMGetParam(main_fn, offset++) : undef;
396 values[1] = mask & 0x2 ? LLVMGetParam(main_fn, offset++) : undef;
397 values[2] = mask & 0x4 ? LLVMGetParam(main_fn, offset++) : undef;
398 values[3] = mask & 0x8 ? LLVMGetParam(main_fn, offset++) : undef;
399 ctx->abi.color0 = ac_to_integer(&ctx->ac, ac_build_gather_values(&ctx->ac, values, 4));
400 }
401 if (colors_read & 0xf0) {
402 unsigned mask = (colors_read & 0xf0) >> 4;
403 LLVMValueRef values[4];
404 values[0] = mask & 0x1 ? LLVMGetParam(main_fn, offset++) : undef;
405 values[1] = mask & 0x2 ? LLVMGetParam(main_fn, offset++) : undef;
406 values[2] = mask & 0x4 ? LLVMGetParam(main_fn, offset++) : undef;
407 values[3] = mask & 0x8 ? LLVMGetParam(main_fn, offset++) : undef;
408 ctx->abi.color1 = ac_to_integer(&ctx->ac, ac_build_gather_values(&ctx->ac, values, 4));
409 }
410
411 ctx->abi.interp_at_sample_force_center =
412 ctx->shader->key.mono.u.ps.interpolate_at_sample_force_center;
413
414 ctx->abi.kill_ps_if_inf_interp =
415 ctx->screen->options.no_infinite_interp &&
416 (ctx->shader->selector->info.uses_persp_center ||
417 ctx->shader->selector->info.uses_persp_centroid ||
418 ctx->shader->selector->info.uses_persp_sample);
419
420 } else if (nir->info.stage == MESA_SHADER_COMPUTE) {
421 if (nir->info.cs.user_data_components_amd) {
422 ctx->abi.user_data = ac_get_arg(&ctx->ac, ctx->cs_user_data);
423 ctx->abi.user_data = ac_build_expand_to_vec4(&ctx->ac, ctx->abi.user_data,
424 nir->info.cs.user_data_components_amd);
425 }
426
427 if (ctx->shader->selector->info.base.cs.shared_size)
428 si_llvm_declare_compute_memory(ctx);
429 }
430
431 ctx->abi.inputs = &ctx->inputs[0];
432 ctx->abi.clamp_shadow_reference = true;
433 ctx->abi.robust_buffer_access = true;
434 ctx->abi.convert_undef_to_zero = true;
435 ctx->abi.clamp_div_by_zero = ctx->screen->options.clamp_div_by_zero;
436
437 const struct si_shader_info *info = &ctx->shader->selector->info;
438 for (unsigned i = 0; i < info->num_outputs; i++) {
439 LLVMTypeRef type = ctx->ac.f32;
440
441 if (nir_alu_type_get_type_size(ctx->shader->selector->info.output_type[i]) == 16)
442 type = ctx->ac.f16;
443
444 for (unsigned j = 0; j < 4; j++)
445 ctx->abi.outputs[i * 4 + j] = ac_build_alloca_undef(&ctx->ac, type, "");
446 }
447
448 ac_nir_translate(&ctx->ac, &ctx->abi, &ctx->args, nir);
449
450 return true;
451 }
452
453 /**
454 * Given a list of shader part functions, build a wrapper function that
455 * runs them in sequence to form a monolithic shader.
456 */
si_build_wrapper_function(struct si_shader_context * ctx,LLVMValueRef * parts,unsigned num_parts,unsigned main_part,unsigned next_shader_first_part)457 void si_build_wrapper_function(struct si_shader_context *ctx, LLVMValueRef *parts,
458 unsigned num_parts, unsigned main_part,
459 unsigned next_shader_first_part)
460 {
461 LLVMBuilderRef builder = ctx->ac.builder;
462 /* PS epilog has one arg per color component; gfx9 merged shader
463 * prologs need to forward 40 SGPRs.
464 */
465 LLVMValueRef initial[AC_MAX_ARGS], out[AC_MAX_ARGS];
466 LLVMTypeRef function_type;
467 unsigned num_first_params;
468 unsigned num_out, initial_num_out;
469 ASSERTED unsigned num_out_sgpr; /* used in debug checks */
470 ASSERTED unsigned initial_num_out_sgpr; /* used in debug checks */
471 unsigned num_sgprs, num_vgprs;
472 unsigned gprs;
473
474 memset(&ctx->args, 0, sizeof(ctx->args));
475
476 for (unsigned i = 0; i < num_parts; ++i) {
477 ac_add_function_attr(ctx->ac.context, parts[i], -1, AC_FUNC_ATTR_ALWAYSINLINE);
478 LLVMSetLinkage(parts[i], LLVMPrivateLinkage);
479 }
480
481 /* The parameters of the wrapper function correspond to those of the
482 * first part in terms of SGPRs and VGPRs, but we use the types of the
483 * main part to get the right types. This is relevant for the
484 * dereferenceable attribute on descriptor table pointers.
485 */
486 num_sgprs = 0;
487 num_vgprs = 0;
488
489 function_type = LLVMGetElementType(LLVMTypeOf(parts[0]));
490 num_first_params = LLVMCountParamTypes(function_type);
491
492 for (unsigned i = 0; i < num_first_params; ++i) {
493 LLVMValueRef param = LLVMGetParam(parts[0], i);
494
495 if (ac_is_sgpr_param(param)) {
496 assert(num_vgprs == 0);
497 num_sgprs += ac_get_type_size(LLVMTypeOf(param)) / 4;
498 } else {
499 num_vgprs += ac_get_type_size(LLVMTypeOf(param)) / 4;
500 }
501 }
502
503 gprs = 0;
504 while (gprs < num_sgprs + num_vgprs) {
505 LLVMValueRef param = LLVMGetParam(parts[main_part], ctx->args.arg_count);
506 LLVMTypeRef type = LLVMTypeOf(param);
507 unsigned size = ac_get_type_size(type) / 4;
508
509 /* This is going to get casted anyways, so we don't have to
510 * have the exact same type. But we do have to preserve the
511 * pointer-ness so that LLVM knows about it.
512 */
513 enum ac_arg_type arg_type = AC_ARG_INT;
514 if (LLVMGetTypeKind(type) == LLVMPointerTypeKind) {
515 type = LLVMGetElementType(type);
516
517 if (LLVMGetTypeKind(type) == LLVMVectorTypeKind) {
518 if (LLVMGetVectorSize(type) == 4)
519 arg_type = AC_ARG_CONST_DESC_PTR;
520 else if (LLVMGetVectorSize(type) == 8)
521 arg_type = AC_ARG_CONST_IMAGE_PTR;
522 else
523 assert(0);
524 } else if (type == ctx->ac.f32) {
525 arg_type = AC_ARG_CONST_FLOAT_PTR;
526 } else {
527 assert(0);
528 }
529 }
530
531 ac_add_arg(&ctx->args, gprs < num_sgprs ? AC_ARG_SGPR : AC_ARG_VGPR, size, arg_type, NULL);
532
533 assert(ac_is_sgpr_param(param) == (gprs < num_sgprs));
534 assert(gprs + size <= num_sgprs + num_vgprs &&
535 (gprs >= num_sgprs || gprs + size <= num_sgprs));
536
537 gprs += size;
538 }
539
540 /* Prepare the return type. */
541 unsigned num_returns = 0;
542 LLVMTypeRef returns[AC_MAX_ARGS], last_func_type, return_type;
543
544 last_func_type = LLVMGetElementType(LLVMTypeOf(parts[num_parts - 1]));
545 return_type = LLVMGetReturnType(last_func_type);
546
547 switch (LLVMGetTypeKind(return_type)) {
548 case LLVMStructTypeKind:
549 num_returns = LLVMCountStructElementTypes(return_type);
550 assert(num_returns <= ARRAY_SIZE(returns));
551 LLVMGetStructElementTypes(return_type, returns);
552 break;
553 case LLVMVoidTypeKind:
554 break;
555 default:
556 unreachable("unexpected type");
557 }
558
559 si_llvm_create_func(ctx, "wrapper", returns, num_returns,
560 si_get_max_workgroup_size(ctx->shader));
561
562 if (si_is_merged_shader(ctx->shader))
563 ac_init_exec_full_mask(&ctx->ac);
564
565 /* Record the arguments of the function as if they were an output of
566 * a previous part.
567 */
568 num_out = 0;
569 num_out_sgpr = 0;
570
571 for (unsigned i = 0; i < ctx->args.arg_count; ++i) {
572 LLVMValueRef param = LLVMGetParam(ctx->main_fn, i);
573 LLVMTypeRef param_type = LLVMTypeOf(param);
574 LLVMTypeRef out_type = ctx->args.args[i].file == AC_ARG_SGPR ? ctx->ac.i32 : ctx->ac.f32;
575 unsigned size = ac_get_type_size(param_type) / 4;
576
577 if (size == 1) {
578 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
579 param = LLVMBuildPtrToInt(builder, param, ctx->ac.i32, "");
580 param_type = ctx->ac.i32;
581 }
582
583 if (param_type != out_type)
584 param = LLVMBuildBitCast(builder, param, out_type, "");
585 out[num_out++] = param;
586 } else {
587 LLVMTypeRef vector_type = LLVMVectorType(out_type, size);
588
589 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
590 param = LLVMBuildPtrToInt(builder, param, ctx->ac.i64, "");
591 param_type = ctx->ac.i64;
592 }
593
594 if (param_type != vector_type)
595 param = LLVMBuildBitCast(builder, param, vector_type, "");
596
597 for (unsigned j = 0; j < size; ++j)
598 out[num_out++] =
599 LLVMBuildExtractElement(builder, param, LLVMConstInt(ctx->ac.i32, j, 0), "");
600 }
601
602 if (ctx->args.args[i].file == AC_ARG_SGPR)
603 num_out_sgpr = num_out;
604 }
605
606 memcpy(initial, out, sizeof(out));
607 initial_num_out = num_out;
608 initial_num_out_sgpr = num_out_sgpr;
609
610 /* Now chain the parts. */
611 LLVMValueRef ret = NULL;
612 for (unsigned part = 0; part < num_parts; ++part) {
613 LLVMValueRef in[AC_MAX_ARGS];
614 LLVMTypeRef ret_type;
615 unsigned out_idx = 0;
616 unsigned num_params = LLVMCountParams(parts[part]);
617
618 /* Merged shaders are executed conditionally depending
619 * on the number of enabled threads passed in the input SGPRs. */
620 if (si_is_multi_part_shader(ctx->shader) && part == 0) {
621 LLVMValueRef ena, count = initial[3];
622
623 count = LLVMBuildAnd(builder, count, LLVMConstInt(ctx->ac.i32, 0x7f, 0), "");
624 ena = LLVMBuildICmp(builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), count, "");
625 ac_build_ifcc(&ctx->ac, ena, 6506);
626 }
627
628 /* Derive arguments for the next part from outputs of the
629 * previous one.
630 */
631 for (unsigned param_idx = 0; param_idx < num_params; ++param_idx) {
632 LLVMValueRef param;
633 LLVMTypeRef param_type;
634 bool is_sgpr;
635 unsigned param_size;
636 LLVMValueRef arg = NULL;
637
638 param = LLVMGetParam(parts[part], param_idx);
639 param_type = LLVMTypeOf(param);
640 param_size = ac_get_type_size(param_type) / 4;
641 is_sgpr = ac_is_sgpr_param(param);
642
643 if (is_sgpr) {
644 ac_add_function_attr(ctx->ac.context, parts[part], param_idx + 1, AC_FUNC_ATTR_INREG);
645 } else if (out_idx < num_out_sgpr) {
646 /* Skip returned SGPRs the current part doesn't
647 * declare on the input. */
648 out_idx = num_out_sgpr;
649 }
650
651 assert(out_idx + param_size <= (is_sgpr ? num_out_sgpr : num_out));
652
653 if (param_size == 1)
654 arg = out[out_idx];
655 else
656 arg = ac_build_gather_values(&ctx->ac, &out[out_idx], param_size);
657
658 if (LLVMTypeOf(arg) != param_type) {
659 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
660 if (LLVMGetPointerAddressSpace(param_type) == AC_ADDR_SPACE_CONST_32BIT) {
661 arg = LLVMBuildBitCast(builder, arg, ctx->ac.i32, "");
662 arg = LLVMBuildIntToPtr(builder, arg, param_type, "");
663 } else {
664 arg = LLVMBuildBitCast(builder, arg, ctx->ac.i64, "");
665 arg = LLVMBuildIntToPtr(builder, arg, param_type, "");
666 }
667 } else {
668 arg = LLVMBuildBitCast(builder, arg, param_type, "");
669 }
670 }
671
672 in[param_idx] = arg;
673 out_idx += param_size;
674 }
675
676 ret = ac_build_call(&ctx->ac, parts[part], in, num_params);
677
678 if (si_is_multi_part_shader(ctx->shader) && part + 1 == next_shader_first_part) {
679 ac_build_endif(&ctx->ac, 6506);
680
681 /* The second half of the merged shader should use
682 * the inputs from the toplevel (wrapper) function,
683 * not the return value from the last call.
684 *
685 * That's because the last call was executed condi-
686 * tionally, so we can't consume it in the main
687 * block.
688 */
689 memcpy(out, initial, sizeof(initial));
690 num_out = initial_num_out;
691 num_out_sgpr = initial_num_out_sgpr;
692 continue;
693 }
694
695 /* Extract the returned GPRs. */
696 ret_type = LLVMTypeOf(ret);
697 num_out = 0;
698 num_out_sgpr = 0;
699
700 if (LLVMGetTypeKind(ret_type) != LLVMVoidTypeKind) {
701 assert(LLVMGetTypeKind(ret_type) == LLVMStructTypeKind);
702
703 unsigned ret_size = LLVMCountStructElementTypes(ret_type);
704
705 for (unsigned i = 0; i < ret_size; ++i) {
706 LLVMValueRef val = LLVMBuildExtractValue(builder, ret, i, "");
707
708 assert(num_out < ARRAY_SIZE(out));
709 out[num_out++] = val;
710
711 if (LLVMTypeOf(val) == ctx->ac.i32) {
712 assert(num_out_sgpr + 1 == num_out);
713 num_out_sgpr = num_out;
714 }
715 }
716 }
717 }
718
719 /* Return the value from the last part. */
720 if (LLVMGetTypeKind(LLVMTypeOf(ret)) == LLVMVoidTypeKind)
721 LLVMBuildRetVoid(builder);
722 else
723 LLVMBuildRet(builder, ret);
724 }
725