1 /*
2 * Copyright © 2016 Bas Nieuwenhuizen
3 *
4 * SPDX-License-Identifier: MIT
5 */
6
7 #include "ac_nir_to_llvm.h"
8 #include "ac_gpu_info.h"
9 #include "ac_binary.h"
10 #include "ac_llvm_build.h"
11 #include "ac_llvm_util.h"
12 #include "ac_shader_abi.h"
13 #include "ac_shader_util.h"
14 #include "ac_nir.h"
15 #include "nir/nir.h"
16 #include "nir/nir_deref.h"
17 #include "sid.h"
18 #include "util/bitscan.h"
19 #include "util/u_math.h"
20 #include <llvm/Config/llvm-config.h>
21
22 struct ac_nir_context {
23 struct ac_llvm_context ac;
24 struct ac_shader_abi *abi;
25 const struct ac_shader_args *args;
26
27 gl_shader_stage stage;
28 shader_info *info;
29
30 LLVMValueRef *ssa_defs;
31
32 struct ac_llvm_pointer scratch;
33 struct ac_llvm_pointer constant_data;
34
35 struct hash_table *defs;
36 struct hash_table *phis;
37 struct hash_table *verified_interp;
38
39 LLVMValueRef main_function;
40 LLVMBasicBlockRef continue_block;
41 LLVMBasicBlockRef break_block;
42 };
43
get_def_type(struct ac_nir_context * ctx,const nir_def * def)44 static LLVMTypeRef get_def_type(struct ac_nir_context *ctx, const nir_def *def)
45 {
46 LLVMTypeRef type = LLVMIntTypeInContext(ctx->ac.context, def->bit_size);
47 if (def->num_components > 1) {
48 type = LLVMVectorType(type, def->num_components);
49 }
50 return type;
51 }
52
get_src(struct ac_nir_context * nir,nir_src src)53 static LLVMValueRef get_src(struct ac_nir_context *nir, nir_src src)
54 {
55 return nir->ssa_defs[src.ssa->index];
56 }
57
get_memory_ptr(struct ac_nir_context * ctx,nir_src src,unsigned c_off)58 static LLVMValueRef get_memory_ptr(struct ac_nir_context *ctx, nir_src src, unsigned c_off)
59 {
60 LLVMValueRef ptr = get_src(ctx, src);
61 ptr = LLVMBuildAdd(ctx->ac.builder, ptr, LLVMConstInt(ctx->ac.i32, c_off, 0), "");
62 /* LDS is used here as a i8 pointer. */
63 return LLVMBuildGEP2(ctx->ac.builder, ctx->ac.i8, ctx->ac.lds.value, &ptr, 1, "");
64 }
65
get_block(struct ac_nir_context * nir,const struct nir_block * b)66 static LLVMBasicBlockRef get_block(struct ac_nir_context *nir, const struct nir_block *b)
67 {
68 struct hash_entry *entry = _mesa_hash_table_search(nir->defs, b);
69 return (LLVMBasicBlockRef)entry->data;
70 }
71
get_alu_src(struct ac_nir_context * ctx,nir_alu_src src,unsigned num_components)72 static LLVMValueRef get_alu_src(struct ac_nir_context *ctx, nir_alu_src src,
73 unsigned num_components)
74 {
75 LLVMValueRef value = get_src(ctx, src.src);
76 bool need_swizzle = false;
77
78 assert(value);
79 unsigned src_components = ac_get_llvm_num_components(value);
80 for (unsigned i = 0; i < num_components; ++i) {
81 assert(src.swizzle[i] < src_components);
82 if (src.swizzle[i] != i)
83 need_swizzle = true;
84 }
85
86 if (need_swizzle || num_components != src_components) {
87 LLVMValueRef masks[] = {LLVMConstInt(ctx->ac.i32, src.swizzle[0], false),
88 LLVMConstInt(ctx->ac.i32, src.swizzle[1], false),
89 LLVMConstInt(ctx->ac.i32, src.swizzle[2], false),
90 LLVMConstInt(ctx->ac.i32, src.swizzle[3], false)};
91
92 if (src_components > 1 && num_components == 1) {
93 value = LLVMBuildExtractElement(ctx->ac.builder, value, masks[0], "");
94 } else if (src_components == 1 && num_components > 1) {
95 LLVMValueRef values[] = {value, value, value, value};
96 value = ac_build_gather_values(&ctx->ac, values, num_components);
97 } else {
98 LLVMValueRef swizzle = LLVMConstVector(masks, num_components);
99 value = LLVMBuildShuffleVector(ctx->ac.builder, value, value, swizzle, "");
100 }
101 }
102 return value;
103 }
104
emit_int_cmp(struct ac_llvm_context * ctx,LLVMIntPredicate pred,LLVMValueRef src0,LLVMValueRef src1)105 static LLVMValueRef emit_int_cmp(struct ac_llvm_context *ctx, LLVMIntPredicate pred,
106 LLVMValueRef src0, LLVMValueRef src1)
107 {
108 src0 = ac_to_integer(ctx, src0);
109 src1 = ac_to_integer(ctx, src1);
110 return LLVMBuildICmp(ctx->builder, pred, src0, src1, "");
111 }
112
emit_float_cmp(struct ac_llvm_context * ctx,LLVMRealPredicate pred,LLVMValueRef src0,LLVMValueRef src1)113 static LLVMValueRef emit_float_cmp(struct ac_llvm_context *ctx, LLVMRealPredicate pred,
114 LLVMValueRef src0, LLVMValueRef src1)
115 {
116 src0 = ac_to_float(ctx, src0);
117 src1 = ac_to_float(ctx, src1);
118 return LLVMBuildFCmp(ctx->builder, pred, src0, src1, "");
119 }
120
emit_intrin_1f_param(struct ac_llvm_context * ctx,const char * intrin,LLVMTypeRef result_type,LLVMValueRef src0)121 static LLVMValueRef emit_intrin_1f_param(struct ac_llvm_context *ctx, const char *intrin,
122 LLVMTypeRef result_type, LLVMValueRef src0)
123 {
124 char name[64], type[64];
125 LLVMValueRef params[] = {
126 ac_to_float(ctx, src0),
127 };
128
129 ac_build_type_name_for_intr(LLVMTypeOf(params[0]), type, sizeof(type));
130 ASSERTED const int length = snprintf(name, sizeof(name), "%s.%s", intrin, type);
131 assert(length < sizeof(name));
132 return ac_build_intrinsic(ctx, name, result_type, params, 1, 0);
133 }
134
emit_intrin_1f_param_scalar(struct ac_llvm_context * ctx,const char * intrin,LLVMTypeRef result_type,LLVMValueRef src0)135 static LLVMValueRef emit_intrin_1f_param_scalar(struct ac_llvm_context *ctx, const char *intrin,
136 LLVMTypeRef result_type, LLVMValueRef src0)
137 {
138 if (LLVMGetTypeKind(result_type) != LLVMVectorTypeKind)
139 return emit_intrin_1f_param(ctx, intrin, result_type, src0);
140
141 LLVMTypeRef elem_type = LLVMGetElementType(result_type);
142 LLVMValueRef ret = LLVMGetUndef(result_type);
143
144 /* Scalarize the intrinsic, because vectors are not supported. */
145 for (unsigned i = 0; i < LLVMGetVectorSize(result_type); i++) {
146 char name[64], type[64];
147 LLVMValueRef params[] = {
148 ac_to_float(ctx, ac_llvm_extract_elem(ctx, src0, i)),
149 };
150
151 ac_build_type_name_for_intr(LLVMTypeOf(params[0]), type, sizeof(type));
152 ASSERTED const int length = snprintf(name, sizeof(name), "%s.%s", intrin, type);
153 assert(length < sizeof(name));
154 ret = LLVMBuildInsertElement(
155 ctx->builder, ret,
156 ac_build_intrinsic(ctx, name, elem_type, params, 1, 0),
157 LLVMConstInt(ctx->i32, i, 0), "");
158 }
159 return ret;
160 }
161
emit_intrin_2f_param(struct ac_llvm_context * ctx,const char * intrin,LLVMTypeRef result_type,LLVMValueRef src0,LLVMValueRef src1)162 static LLVMValueRef emit_intrin_2f_param(struct ac_llvm_context *ctx, const char *intrin,
163 LLVMTypeRef result_type, LLVMValueRef src0,
164 LLVMValueRef src1)
165 {
166 char name[64], type[64];
167 LLVMValueRef params[] = {
168 ac_to_float(ctx, src0),
169 ac_to_float(ctx, src1),
170 };
171
172 ac_build_type_name_for_intr(LLVMTypeOf(params[0]), type, sizeof(type));
173 ASSERTED const int length = snprintf(name, sizeof(name), "%s.%s", intrin, type);
174 assert(length < sizeof(name));
175 return ac_build_intrinsic(ctx, name, result_type, params, 2, 0);
176 }
177
emit_intrin_3f_param(struct ac_llvm_context * ctx,const char * intrin,LLVMTypeRef result_type,LLVMValueRef src0,LLVMValueRef src1,LLVMValueRef src2)178 static LLVMValueRef emit_intrin_3f_param(struct ac_llvm_context *ctx, const char *intrin,
179 LLVMTypeRef result_type, LLVMValueRef src0,
180 LLVMValueRef src1, LLVMValueRef src2)
181 {
182 char name[64], type[64];
183 LLVMValueRef params[] = {
184 ac_to_float(ctx, src0),
185 ac_to_float(ctx, src1),
186 ac_to_float(ctx, src2),
187 };
188
189 ac_build_type_name_for_intr(LLVMTypeOf(params[0]), type, sizeof(type));
190 ASSERTED const int length = snprintf(name, sizeof(name), "%s.%s", intrin, type);
191 assert(length < sizeof(name));
192 return ac_build_intrinsic(ctx, name, result_type, params, 3, 0);
193 }
194
emit_bcsel(struct ac_llvm_context * ctx,LLVMValueRef src0,LLVMValueRef src1,LLVMValueRef src2)195 static LLVMValueRef emit_bcsel(struct ac_llvm_context *ctx, LLVMValueRef src0, LLVMValueRef src1,
196 LLVMValueRef src2)
197 {
198 LLVMTypeRef src1_type = LLVMTypeOf(src1);
199 LLVMTypeRef src2_type = LLVMTypeOf(src2);
200
201 if (LLVMGetTypeKind(src1_type) == LLVMPointerTypeKind &&
202 LLVMGetTypeKind(src2_type) != LLVMPointerTypeKind) {
203 src2 = LLVMBuildIntToPtr(ctx->builder, src2, src1_type, "");
204 } else if (LLVMGetTypeKind(src2_type) == LLVMPointerTypeKind &&
205 LLVMGetTypeKind(src1_type) != LLVMPointerTypeKind) {
206 src1 = LLVMBuildIntToPtr(ctx->builder, src1, src2_type, "");
207 }
208
209 return LLVMBuildSelect(ctx->builder, src0, ac_to_integer_or_pointer(ctx, src1),
210 ac_to_integer_or_pointer(ctx, src2), "");
211 }
212
emit_iabs(struct ac_llvm_context * ctx,LLVMValueRef src0)213 static LLVMValueRef emit_iabs(struct ac_llvm_context *ctx, LLVMValueRef src0)
214 {
215 return ac_build_imax(ctx, src0, LLVMBuildNeg(ctx->builder, src0, ""));
216 }
217
emit_uint_carry(struct ac_llvm_context * ctx,const char * intrin,LLVMValueRef src0,LLVMValueRef src1)218 static LLVMValueRef emit_uint_carry(struct ac_llvm_context *ctx, const char *intrin,
219 LLVMValueRef src0, LLVMValueRef src1)
220 {
221 LLVMTypeRef ret_type;
222 LLVMTypeRef types[] = {ctx->i32, ctx->i1};
223 LLVMValueRef res;
224 LLVMValueRef params[] = {src0, src1};
225 ret_type = LLVMStructTypeInContext(ctx->context, types, 2, false);
226
227 res = ac_build_intrinsic(ctx, intrin, ret_type, params, 2, 0);
228
229 res = LLVMBuildExtractValue(ctx->builder, res, 1, "");
230 res = LLVMBuildZExt(ctx->builder, res, ctx->i32, "");
231 return res;
232 }
233
emit_b2f(struct ac_llvm_context * ctx,LLVMValueRef src0,unsigned bitsize)234 static LLVMValueRef emit_b2f(struct ac_llvm_context *ctx, LLVMValueRef src0, unsigned bitsize)
235 {
236 assert(ac_get_elem_bits(ctx, LLVMTypeOf(src0)) == 1);
237
238 switch (bitsize) {
239 case 16:
240 if (LLVMGetTypeKind(LLVMTypeOf(src0)) == LLVMVectorTypeKind) {
241 assert(LLVMGetVectorSize(LLVMTypeOf(src0)) == 2);
242 LLVMValueRef f[] = {
243 LLVMBuildSelect(ctx->builder, ac_llvm_extract_elem(ctx, src0, 0),
244 ctx->f16_1, ctx->f16_0, ""),
245 LLVMBuildSelect(ctx->builder, ac_llvm_extract_elem(ctx, src0, 1),
246 ctx->f16_1, ctx->f16_0, ""),
247 };
248 return ac_build_gather_values(ctx, f, 2);
249 }
250 return LLVMBuildSelect(ctx->builder, src0, ctx->f16_1, ctx->f16_0, "");
251 case 32:
252 return LLVMBuildSelect(ctx->builder, src0, ctx->f32_1, ctx->f32_0, "");
253 case 64:
254 return LLVMBuildSelect(ctx->builder, src0, ctx->f64_1, ctx->f64_0, "");
255 default:
256 unreachable("Unsupported bit size.");
257 }
258 }
259
emit_b2i(struct ac_llvm_context * ctx,LLVMValueRef src0,unsigned bitsize)260 static LLVMValueRef emit_b2i(struct ac_llvm_context *ctx, LLVMValueRef src0, unsigned bitsize)
261 {
262 switch (bitsize) {
263 case 8:
264 return LLVMBuildSelect(ctx->builder, src0, ctx->i8_1, ctx->i8_0, "");
265 case 16:
266 if (LLVMGetTypeKind(LLVMTypeOf(src0)) == LLVMVectorTypeKind) {
267 assert(LLVMGetVectorSize(LLVMTypeOf(src0)) == 2);
268 LLVMValueRef i[] = {
269 LLVMBuildSelect(ctx->builder, ac_llvm_extract_elem(ctx, src0, 0),
270 ctx->i16_1, ctx->i16_0, ""),
271 LLVMBuildSelect(ctx->builder, ac_llvm_extract_elem(ctx, src0, 1),
272 ctx->i16_1, ctx->i16_0, ""),
273 };
274 return ac_build_gather_values(ctx, i, 2);
275 }
276 return LLVMBuildSelect(ctx->builder, src0, ctx->i16_1, ctx->i16_0, "");
277 case 32:
278 return LLVMBuildSelect(ctx->builder, src0, ctx->i32_1, ctx->i32_0, "");
279 case 64:
280 return LLVMBuildSelect(ctx->builder, src0, ctx->i64_1, ctx->i64_0, "");
281 default:
282 unreachable("Unsupported bit size.");
283 }
284 }
285
emit_i2b(struct ac_llvm_context * ctx,LLVMValueRef src0)286 static LLVMValueRef emit_i2b(struct ac_llvm_context *ctx, LLVMValueRef src0)
287 {
288 LLVMValueRef zero = LLVMConstNull(LLVMTypeOf(src0));
289 return LLVMBuildICmp(ctx->builder, LLVMIntNE, src0, zero, "");
290 }
291
emit_f2f16(struct ac_llvm_context * ctx,LLVMValueRef src0)292 static LLVMValueRef emit_f2f16(struct ac_llvm_context *ctx, LLVMValueRef src0)
293 {
294 LLVMValueRef result;
295 LLVMValueRef cond = NULL;
296
297 src0 = ac_to_float(ctx, src0);
298 result = LLVMBuildFPTrunc(ctx->builder, src0, ctx->f16, "");
299
300 if (ctx->gfx_level >= GFX8) {
301 LLVMValueRef args[2];
302 /* Check if the result is a denormal - and flush to 0 if so. */
303 args[0] = result;
304 args[1] = LLVMConstInt(ctx->i32, N_SUBNORMAL | P_SUBNORMAL, false);
305 cond =
306 ac_build_intrinsic(ctx, "llvm.amdgcn.class.f16", ctx->i1, args, 2, 0);
307 }
308
309 /* need to convert back up to f32 */
310 result = LLVMBuildFPExt(ctx->builder, result, ctx->f32, "");
311
312 if (ctx->gfx_level >= GFX8)
313 result = LLVMBuildSelect(ctx->builder, cond, ctx->f32_0, result, "");
314 else {
315 /* for GFX6-GFX7 */
316 /* 0x38800000 is smallest half float value (2^-14) in 32-bit float,
317 * so compare the result and flush to 0 if it's smaller.
318 */
319 LLVMValueRef temp, cond2;
320 temp = emit_intrin_1f_param(ctx, "llvm.fabs", ctx->f32, result);
321 cond = LLVMBuildFCmp(
322 ctx->builder, LLVMRealOGT,
323 LLVMBuildBitCast(ctx->builder, LLVMConstInt(ctx->i32, 0x38800000, false), ctx->f32, ""),
324 temp, "");
325 cond2 = LLVMBuildFCmp(ctx->builder, LLVMRealONE, temp, ctx->f32_0, "");
326 cond = LLVMBuildAnd(ctx->builder, cond, cond2, "");
327 result = LLVMBuildSelect(ctx->builder, cond, ctx->f32_0, result, "");
328 }
329 return result;
330 }
331
emit_umul_high(struct ac_llvm_context * ctx,LLVMValueRef src0,LLVMValueRef src1)332 static LLVMValueRef emit_umul_high(struct ac_llvm_context *ctx, LLVMValueRef src0,
333 LLVMValueRef src1)
334 {
335 LLVMValueRef dst64, result;
336 src0 = LLVMBuildZExt(ctx->builder, src0, ctx->i64, "");
337 src1 = LLVMBuildZExt(ctx->builder, src1, ctx->i64, "");
338
339 dst64 = LLVMBuildMul(ctx->builder, src0, src1, "");
340 dst64 = LLVMBuildLShr(ctx->builder, dst64, LLVMConstInt(ctx->i64, 32, false), "");
341 result = LLVMBuildTrunc(ctx->builder, dst64, ctx->i32, "");
342 return result;
343 }
344
emit_imul_high(struct ac_llvm_context * ctx,LLVMValueRef src0,LLVMValueRef src1)345 static LLVMValueRef emit_imul_high(struct ac_llvm_context *ctx, LLVMValueRef src0,
346 LLVMValueRef src1)
347 {
348 LLVMValueRef dst64, result;
349 src0 = LLVMBuildSExt(ctx->builder, src0, ctx->i64, "");
350 src1 = LLVMBuildSExt(ctx->builder, src1, ctx->i64, "");
351
352 dst64 = LLVMBuildMul(ctx->builder, src0, src1, "");
353 dst64 = LLVMBuildAShr(ctx->builder, dst64, LLVMConstInt(ctx->i64, 32, false), "");
354 result = LLVMBuildTrunc(ctx->builder, dst64, ctx->i32, "");
355 return result;
356 }
357
emit_bfm(struct ac_llvm_context * ctx,LLVMValueRef bits,LLVMValueRef offset)358 static LLVMValueRef emit_bfm(struct ac_llvm_context *ctx, LLVMValueRef bits, LLVMValueRef offset)
359 {
360 /* mask = ((1 << bits) - 1) << offset */
361 return LLVMBuildShl(
362 ctx->builder,
363 LLVMBuildSub(ctx->builder, LLVMBuildShl(ctx->builder, ctx->i32_1, bits, ""), ctx->i32_1, ""),
364 offset, "");
365 }
366
emit_bitfield_select(struct ac_llvm_context * ctx,LLVMValueRef mask,LLVMValueRef insert,LLVMValueRef base)367 static LLVMValueRef emit_bitfield_select(struct ac_llvm_context *ctx, LLVMValueRef mask,
368 LLVMValueRef insert, LLVMValueRef base)
369 {
370 /* Calculate:
371 * (mask & insert) | (~mask & base) = base ^ (mask & (insert ^ base))
372 * Use the right-hand side, which the LLVM backend can convert to V_BFI.
373 */
374 return LLVMBuildXor(
375 ctx->builder, base,
376 LLVMBuildAnd(ctx->builder, mask, LLVMBuildXor(ctx->builder, insert, base, ""), ""), "");
377 }
378
emit_pack_2x16(struct ac_llvm_context * ctx,LLVMValueRef src0,LLVMValueRef (* pack)(struct ac_llvm_context * ctx,LLVMValueRef args[2]))379 static LLVMValueRef emit_pack_2x16(struct ac_llvm_context *ctx, LLVMValueRef src0,
380 LLVMValueRef (*pack)(struct ac_llvm_context *ctx,
381 LLVMValueRef args[2]))
382 {
383 LLVMValueRef comp[2];
384
385 src0 = ac_to_float(ctx, src0);
386 comp[0] = LLVMBuildExtractElement(ctx->builder, src0, ctx->i32_0, "");
387 comp[1] = LLVMBuildExtractElement(ctx->builder, src0, ctx->i32_1, "");
388
389 return LLVMBuildBitCast(ctx->builder, pack(ctx, comp), ctx->i32, "");
390 }
391
emit_unpack_half_2x16(struct ac_llvm_context * ctx,LLVMValueRef src0)392 static LLVMValueRef emit_unpack_half_2x16(struct ac_llvm_context *ctx, LLVMValueRef src0)
393 {
394 LLVMValueRef const16 = LLVMConstInt(ctx->i32, 16, false);
395 LLVMValueRef temps[2], val;
396 int i;
397
398 for (i = 0; i < 2; i++) {
399 val = i == 1 ? LLVMBuildLShr(ctx->builder, src0, const16, "") : src0;
400 val = LLVMBuildTrunc(ctx->builder, val, ctx->i16, "");
401 val = LLVMBuildBitCast(ctx->builder, val, ctx->f16, "");
402 temps[i] = LLVMBuildFPExt(ctx->builder, val, ctx->f32, "");
403 }
404 return ac_build_gather_values(ctx, temps, 2);
405 }
406
emit_ddxy(struct ac_nir_context * ctx,nir_op op,LLVMValueRef src0)407 static LLVMValueRef emit_ddxy(struct ac_nir_context *ctx, nir_op op, LLVMValueRef src0)
408 {
409 unsigned mask;
410 int idx;
411 LLVMValueRef result;
412
413 if (op == nir_op_fddx_fine)
414 mask = AC_TID_MASK_LEFT;
415 else if (op == nir_op_fddy_fine)
416 mask = AC_TID_MASK_TOP;
417 else
418 mask = AC_TID_MASK_TOP_LEFT;
419
420 /* for DDX we want to next X pixel, DDY next Y pixel. */
421 if (op == nir_op_fddx_fine || op == nir_op_fddx_coarse || op == nir_op_fddx)
422 idx = 1;
423 else
424 idx = 2;
425
426 result = ac_build_ddxy(&ctx->ac, mask, idx, src0);
427 return result;
428 }
429
430 struct waterfall_context {
431 LLVMBasicBlockRef phi_bb[2];
432 bool use_waterfall;
433 };
434
435 /* To deal with divergent descriptors we can create a loop that handles all
436 * lanes with the same descriptor on a given iteration (henceforth a
437 * waterfall loop).
438 *
439 * These helper create the begin and end of the loop leaving the caller
440 * to implement the body.
441 *
442 * params:
443 * - ctx is the usual nir context
444 * - wctx is a temporary struct containing some loop info. Can be left uninitialized.
445 * - value is the possibly divergent value for which we built the loop
446 * - divergent is whether value is actually divergent. If false we just pass
447 * things through.
448 */
enter_waterfall(struct ac_nir_context * ctx,struct waterfall_context * wctx,LLVMValueRef value,bool divergent)449 static LLVMValueRef enter_waterfall(struct ac_nir_context *ctx, struct waterfall_context *wctx,
450 LLVMValueRef value, bool divergent)
451 {
452 /* If the app claims the value is divergent but it is constant we can
453 * end up with a dynamic index of NULL. */
454 if (!value)
455 divergent = false;
456
457 wctx->use_waterfall = divergent;
458 if (!divergent)
459 return value;
460
461 ac_build_bgnloop(&ctx->ac, 6000);
462
463 LLVMValueRef active = ctx->ac.i1true;
464 LLVMValueRef scalar_value[NIR_MAX_VEC_COMPONENTS];
465
466 for (unsigned i = 0; i < ac_get_llvm_num_components(value); i++) {
467 LLVMValueRef comp = ac_llvm_extract_elem(&ctx->ac, value, i);
468 scalar_value[i] = ac_build_readlane(&ctx->ac, comp, NULL);
469 active = LLVMBuildAnd(ctx->ac.builder, active,
470 LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ, comp, scalar_value[i], ""), "");
471 }
472
473 wctx->phi_bb[0] = LLVMGetInsertBlock(ctx->ac.builder);
474 ac_build_ifcc(&ctx->ac, active, 6001);
475
476 return ac_build_gather_values(&ctx->ac, scalar_value, ac_get_llvm_num_components(value));
477 }
478
exit_waterfall(struct ac_nir_context * ctx,struct waterfall_context * wctx,LLVMValueRef value)479 static LLVMValueRef exit_waterfall(struct ac_nir_context *ctx, struct waterfall_context *wctx,
480 LLVMValueRef value)
481 {
482 LLVMValueRef ret = NULL;
483 LLVMValueRef phi_src[2];
484 LLVMValueRef cc_phi_src[2] = {
485 ctx->ac.i32_0,
486 LLVMConstInt(ctx->ac.i32, 0xffffffff, false),
487 };
488
489 if (!wctx->use_waterfall)
490 return value;
491
492 wctx->phi_bb[1] = LLVMGetInsertBlock(ctx->ac.builder);
493
494 ac_build_endif(&ctx->ac, 6001);
495
496 if (value) {
497 phi_src[0] = LLVMGetUndef(LLVMTypeOf(value));
498 phi_src[1] = value;
499
500 ret = ac_build_phi(&ctx->ac, LLVMTypeOf(value), 2, phi_src, wctx->phi_bb);
501 }
502
503 /*
504 * By using the optimization barrier on the exit decision, we decouple
505 * the operations from the break, and hence avoid LLVM hoisting the
506 * opteration into the break block.
507 */
508 LLVMValueRef cc = ac_build_phi(&ctx->ac, ctx->ac.i32, 2, cc_phi_src, wctx->phi_bb);
509 ac_build_optimization_barrier(&ctx->ac, &cc, false);
510
511 LLVMValueRef active =
512 LLVMBuildICmp(ctx->ac.builder, LLVMIntNE, cc, ctx->ac.i32_0, "uniform_active2");
513 ac_build_ifcc(&ctx->ac, active, 6002);
514 ac_build_break(&ctx->ac);
515 ac_build_endif(&ctx->ac, 6002);
516
517 ac_build_endloop(&ctx->ac, 6000);
518 return ret;
519 }
520
521 static LLVMValueRef
ac_build_const_int_vec(struct ac_llvm_context * ctx,LLVMTypeRef type,long long val,bool sign_extend)522 ac_build_const_int_vec(struct ac_llvm_context *ctx, LLVMTypeRef type, long long val, bool sign_extend)
523 {
524 unsigned num_components = LLVMGetTypeKind(type) == LLVMVectorTypeKind ? LLVMGetVectorSize(type) : 1;
525
526 if (num_components == 1)
527 return LLVMConstInt(type, val, sign_extend);
528
529 assert(num_components == 2);
530 assert(ac_get_elem_bits(ctx, type) == 16);
531
532 LLVMTypeRef elem_type = LLVMGetElementType(type);
533
534 LLVMValueRef elems[2];
535 for (unsigned i = 0; i < 2; ++i)
536 elems[i] = LLVMConstInt(elem_type, val, sign_extend);
537
538 return LLVMConstVector(elems, 2);
539 }
540
visit_alu(struct ac_nir_context * ctx,const nir_alu_instr * instr)541 static bool visit_alu(struct ac_nir_context *ctx, const nir_alu_instr *instr)
542 {
543 LLVMValueRef src[16], result = NULL;
544 unsigned num_components = instr->def.num_components;
545 unsigned src_components;
546 LLVMTypeRef def_type = get_def_type(ctx, &instr->def);
547
548 assert(nir_op_infos[instr->op].num_inputs <= ARRAY_SIZE(src));
549 switch (instr->op) {
550 case nir_op_vec2:
551 case nir_op_vec3:
552 case nir_op_vec4:
553 case nir_op_vec5:
554 case nir_op_vec8:
555 case nir_op_vec16:
556 case nir_op_unpack_32_4x8:
557 case nir_op_unpack_32_2x16:
558 case nir_op_unpack_64_2x32:
559 case nir_op_unpack_64_4x16:
560 src_components = 1;
561 break;
562 case nir_op_pack_snorm_2x16:
563 case nir_op_pack_unorm_2x16:
564 case nir_op_pack_uint_2x16:
565 case nir_op_pack_sint_2x16:
566 src_components = 2;
567 break;
568 case nir_op_cube_amd:
569 src_components = 3;
570 break;
571 case nir_op_pack_32_4x8:
572 src_components = 4;
573 break;
574 default:
575 src_components = num_components;
576 break;
577 }
578 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
579 src[i] = get_alu_src(ctx, instr->src[i], src_components);
580
581 switch (instr->op) {
582 case nir_op_mov:
583 result = src[0];
584 break;
585 case nir_op_fneg:
586 src[0] = ac_to_float(&ctx->ac, src[0]);
587 result = LLVMBuildFNeg(ctx->ac.builder, src[0], "");
588 if (ctx->ac.float_mode == AC_FLOAT_MODE_DENORM_FLUSH_TO_ZERO) {
589 /* fneg will be optimized by backend compiler with sign
590 * bit removed via XOR. This is probably a LLVM bug.
591 */
592 result = ac_build_canonicalize(&ctx->ac, result, instr->def.bit_size);
593 }
594 break;
595 case nir_op_inot:
596 result = LLVMBuildNot(ctx->ac.builder, src[0], "");
597 break;
598 case nir_op_iadd:
599 if (instr->no_unsigned_wrap)
600 result = LLVMBuildNUWAdd(ctx->ac.builder, src[0], src[1], "");
601 else if (instr->no_signed_wrap)
602 result = LLVMBuildNSWAdd(ctx->ac.builder, src[0], src[1], "");
603 else
604 result = LLVMBuildAdd(ctx->ac.builder, src[0], src[1], "");
605 break;
606 case nir_op_uadd_sat:
607 case nir_op_iadd_sat: {
608 char name[64], type[64];
609 ac_build_type_name_for_intr(def_type, type, sizeof(type));
610 snprintf(name, sizeof(name), "llvm.%cadd.sat.%s",
611 instr->op == nir_op_uadd_sat ? 'u' : 's', type);
612 result = ac_build_intrinsic(&ctx->ac, name, def_type, src, 2, 0);
613 break;
614 }
615 case nir_op_usub_sat:
616 case nir_op_isub_sat: {
617 char name[64], type[64];
618 ac_build_type_name_for_intr(def_type, type, sizeof(type));
619 snprintf(name, sizeof(name), "llvm.%csub.sat.%s",
620 instr->op == nir_op_usub_sat ? 'u' : 's', type);
621 result = ac_build_intrinsic(&ctx->ac, name, def_type, src, 2, 0);
622 break;
623 }
624 case nir_op_fadd:
625 src[0] = ac_to_float(&ctx->ac, src[0]);
626 src[1] = ac_to_float(&ctx->ac, src[1]);
627 result = LLVMBuildFAdd(ctx->ac.builder, src[0], src[1], "");
628 break;
629 case nir_op_fsub:
630 src[0] = ac_to_float(&ctx->ac, src[0]);
631 src[1] = ac_to_float(&ctx->ac, src[1]);
632 result = LLVMBuildFSub(ctx->ac.builder, src[0], src[1], "");
633 break;
634 case nir_op_isub:
635 if (instr->no_unsigned_wrap)
636 result = LLVMBuildNUWSub(ctx->ac.builder, src[0], src[1], "");
637 else if (instr->no_signed_wrap)
638 result = LLVMBuildNSWSub(ctx->ac.builder, src[0], src[1], "");
639 else
640 result = LLVMBuildSub(ctx->ac.builder, src[0], src[1], "");
641 break;
642 case nir_op_imul:
643 if (instr->no_unsigned_wrap)
644 result = LLVMBuildNUWMul(ctx->ac.builder, src[0], src[1], "");
645 else if (instr->no_signed_wrap)
646 result = LLVMBuildNSWMul(ctx->ac.builder, src[0], src[1], "");
647 else
648 result = LLVMBuildMul(ctx->ac.builder, src[0], src[1], "");
649 break;
650 case nir_op_fmul:
651 src[0] = ac_to_float(&ctx->ac, src[0]);
652 src[1] = ac_to_float(&ctx->ac, src[1]);
653 result = LLVMBuildFMul(ctx->ac.builder, src[0], src[1], "");
654 break;
655 case nir_op_fmulz:
656 src[0] = ac_to_float(&ctx->ac, src[0]);
657 src[1] = ac_to_float(&ctx->ac, src[1]);
658 result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.fmul.legacy", ctx->ac.f32,
659 src, 2, 0);
660 break;
661 case nir_op_frcp:
662 result = emit_intrin_1f_param_scalar(&ctx->ac, "llvm.amdgcn.rcp",
663 ac_to_float_type(&ctx->ac, def_type), src[0]);
664 if (ctx->abi->clamp_div_by_zero)
665 result = ac_build_fmin(&ctx->ac, result,
666 LLVMConstReal(ac_to_float_type(&ctx->ac, def_type), FLT_MAX));
667 break;
668 case nir_op_iand:
669 result = LLVMBuildAnd(ctx->ac.builder, src[0], src[1], "");
670 break;
671 case nir_op_ior:
672 result = LLVMBuildOr(ctx->ac.builder, src[0], src[1], "");
673 break;
674 case nir_op_ixor:
675 result = LLVMBuildXor(ctx->ac.builder, src[0], src[1], "");
676 break;
677 case nir_op_ishl:
678 case nir_op_ishr:
679 case nir_op_ushr: {
680 if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[1])) <
681 ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])))
682 src[1] = LLVMBuildZExt(ctx->ac.builder, src[1], LLVMTypeOf(src[0]), "");
683 else if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[1])) >
684 ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])))
685 src[1] = LLVMBuildTrunc(ctx->ac.builder, src[1], LLVMTypeOf(src[0]), "");
686 LLVMTypeRef type = LLVMTypeOf(src[1]);
687 src[1] = LLVMBuildAnd(ctx->ac.builder, src[1],
688 ac_build_const_int_vec(&ctx->ac, type, ac_get_elem_bits(&ctx->ac, type) - 1, false), "");
689 switch (instr->op) {
690 case nir_op_ishl:
691 result = LLVMBuildShl(ctx->ac.builder, src[0], src[1], "");
692 break;
693 case nir_op_ishr:
694 result = LLVMBuildAShr(ctx->ac.builder, src[0], src[1], "");
695 break;
696 case nir_op_ushr:
697 result = LLVMBuildLShr(ctx->ac.builder, src[0], src[1], "");
698 break;
699 default:
700 break;
701 }
702 break;
703 }
704 case nir_op_ilt:
705 result = emit_int_cmp(&ctx->ac, LLVMIntSLT, src[0], src[1]);
706 break;
707 case nir_op_ine:
708 result = emit_int_cmp(&ctx->ac, LLVMIntNE, src[0], src[1]);
709 break;
710 case nir_op_ieq:
711 result = emit_int_cmp(&ctx->ac, LLVMIntEQ, src[0], src[1]);
712 break;
713 case nir_op_ige:
714 result = emit_int_cmp(&ctx->ac, LLVMIntSGE, src[0], src[1]);
715 break;
716 case nir_op_ult:
717 result = emit_int_cmp(&ctx->ac, LLVMIntULT, src[0], src[1]);
718 break;
719 case nir_op_uge:
720 result = emit_int_cmp(&ctx->ac, LLVMIntUGE, src[0], src[1]);
721 break;
722 case nir_op_feq:
723 result = emit_float_cmp(&ctx->ac, LLVMRealOEQ, src[0], src[1]);
724 break;
725 case nir_op_fneu:
726 result = emit_float_cmp(&ctx->ac, LLVMRealUNE, src[0], src[1]);
727 break;
728 case nir_op_flt:
729 result = emit_float_cmp(&ctx->ac, LLVMRealOLT, src[0], src[1]);
730 break;
731 case nir_op_fge:
732 result = emit_float_cmp(&ctx->ac, LLVMRealOGE, src[0], src[1]);
733 break;
734 case nir_op_fabs:
735 result =
736 emit_intrin_1f_param(&ctx->ac, "llvm.fabs", ac_to_float_type(&ctx->ac, def_type), src[0]);
737 if (ctx->ac.float_mode == AC_FLOAT_MODE_DENORM_FLUSH_TO_ZERO) {
738 /* fabs will be optimized by backend compiler with sign
739 * bit removed via AND.
740 */
741 result = ac_build_canonicalize(&ctx->ac, result, instr->def.bit_size);
742 }
743 break;
744 case nir_op_fsat:
745 src[0] = ac_to_float(&ctx->ac, src[0]);
746 result = ac_build_fsat(&ctx->ac, src[0],
747 ac_to_float_type(&ctx->ac, def_type));
748 break;
749 case nir_op_iabs:
750 result = emit_iabs(&ctx->ac, src[0]);
751 break;
752 case nir_op_imax:
753 result = ac_build_imax(&ctx->ac, src[0], src[1]);
754 break;
755 case nir_op_imin:
756 result = ac_build_imin(&ctx->ac, src[0], src[1]);
757 break;
758 case nir_op_umax:
759 result = ac_build_umax(&ctx->ac, src[0], src[1]);
760 break;
761 case nir_op_umin:
762 result = ac_build_umin(&ctx->ac, src[0], src[1]);
763 break;
764 case nir_op_isign:
765 result = ac_build_isign(&ctx->ac, src[0]);
766 break;
767 case nir_op_fsign:
768 src[0] = ac_to_float(&ctx->ac, src[0]);
769 result = ac_build_fsign(&ctx->ac, src[0]);
770 break;
771 case nir_op_ffloor:
772 result =
773 emit_intrin_1f_param(&ctx->ac, "llvm.floor", ac_to_float_type(&ctx->ac, def_type), src[0]);
774 break;
775 case nir_op_ftrunc:
776 result =
777 emit_intrin_1f_param(&ctx->ac, "llvm.trunc", ac_to_float_type(&ctx->ac, def_type), src[0]);
778 break;
779 case nir_op_fceil:
780 result =
781 emit_intrin_1f_param(&ctx->ac, "llvm.ceil", ac_to_float_type(&ctx->ac, def_type), src[0]);
782 break;
783 case nir_op_fround_even:
784 result =
785 emit_intrin_1f_param(&ctx->ac, "llvm.rint", ac_to_float_type(&ctx->ac, def_type), src[0]);
786 break;
787 case nir_op_ffract:
788 result = emit_intrin_1f_param_scalar(&ctx->ac, "llvm.amdgcn.fract",
789 ac_to_float_type(&ctx->ac, def_type), src[0]);
790 break;
791 case nir_op_fsin_amd:
792 case nir_op_fcos_amd:
793 /* before GFX9, v_sin_f32 and v_cos_f32 had a valid input domain of [-256, +256] */
794 if (ctx->ac.gfx_level < GFX9)
795 src[0] = emit_intrin_1f_param_scalar(&ctx->ac, "llvm.amdgcn.fract",
796 ac_to_float_type(&ctx->ac, def_type), src[0]);
797 result =
798 emit_intrin_1f_param(&ctx->ac, instr->op == nir_op_fsin_amd ? "llvm.amdgcn.sin" : "llvm.amdgcn.cos",
799 ac_to_float_type(&ctx->ac, def_type), src[0]);
800 break;
801 case nir_op_fsqrt:
802 result =
803 emit_intrin_1f_param(&ctx->ac, "llvm.sqrt", ac_to_float_type(&ctx->ac, def_type), src[0]);
804 LLVMSetMetadata(result, ctx->ac.fpmath_md_kind, ctx->ac.three_md);
805 break;
806 case nir_op_fexp2:
807 result =
808 emit_intrin_1f_param(&ctx->ac, "llvm.exp2", ac_to_float_type(&ctx->ac, def_type), src[0]);
809 break;
810 case nir_op_flog2:
811 result =
812 emit_intrin_1f_param(&ctx->ac, "llvm.log2", ac_to_float_type(&ctx->ac, def_type), src[0]);
813 break;
814 case nir_op_frsq:
815 result = emit_intrin_1f_param_scalar(&ctx->ac, "llvm.amdgcn.rsq",
816 ac_to_float_type(&ctx->ac, def_type), src[0]);
817 if (ctx->abi->clamp_div_by_zero)
818 result = ac_build_fmin(&ctx->ac, result,
819 LLVMConstReal(ac_to_float_type(&ctx->ac, def_type), FLT_MAX));
820 break;
821 case nir_op_frexp_exp:
822 src[0] = ac_to_float(&ctx->ac, src[0]);
823 result = ac_build_frexp_exp(&ctx->ac, src[0], ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])));
824 if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])) == 16)
825 result = LLVMBuildSExt(ctx->ac.builder, result, ctx->ac.i32, "");
826 break;
827 case nir_op_frexp_sig:
828 src[0] = ac_to_float(&ctx->ac, src[0]);
829 result = ac_build_frexp_mant(&ctx->ac, src[0], instr->def.bit_size);
830 break;
831 case nir_op_fmax:
832 result = emit_intrin_2f_param(&ctx->ac, "llvm.maxnum", ac_to_float_type(&ctx->ac, def_type),
833 src[0], src[1]);
834 if (ctx->ac.gfx_level < GFX9 && instr->def.bit_size == 32) {
835 /* Only pre-GFX9 chips do not flush denorms. */
836 result = ac_build_canonicalize(&ctx->ac, result, instr->def.bit_size);
837 }
838 break;
839 case nir_op_fmin:
840 result = emit_intrin_2f_param(&ctx->ac, "llvm.minnum", ac_to_float_type(&ctx->ac, def_type),
841 src[0], src[1]);
842 if (ctx->ac.gfx_level < GFX9 && instr->def.bit_size == 32) {
843 /* Only pre-GFX9 chips do not flush denorms. */
844 result = ac_build_canonicalize(&ctx->ac, result, instr->def.bit_size);
845 }
846 break;
847 case nir_op_ffma:
848 /* FMA is slow on gfx6-8, so it shouldn't be used. */
849 assert(instr->def.bit_size != 32 || ctx->ac.gfx_level >= GFX9);
850 result = emit_intrin_3f_param(&ctx->ac, "llvm.fma", ac_to_float_type(&ctx->ac, def_type),
851 src[0], src[1], src[2]);
852 break;
853 case nir_op_ffmaz:
854 assert(ctx->ac.gfx_level >= GFX10_3);
855 src[0] = ac_to_float(&ctx->ac, src[0]);
856 src[1] = ac_to_float(&ctx->ac, src[1]);
857 src[2] = ac_to_float(&ctx->ac, src[2]);
858 result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.fma.legacy", ctx->ac.f32,
859 src, 3, 0);
860 break;
861 case nir_op_ldexp:
862 src[0] = ac_to_float(&ctx->ac, src[0]);
863 if (ac_get_elem_bits(&ctx->ac, def_type) == 32)
864 result = ac_build_intrinsic(&ctx->ac,
865 LLVM_VERSION_MAJOR >= 18 ? "llvm.ldexp.f32.i32"
866 : "llvm.amdgcn.ldexp.f32",
867 ctx->ac.f32, src, 2, 0);
868 else if (ac_get_elem_bits(&ctx->ac, def_type) == 16)
869 result = ac_build_intrinsic(&ctx->ac,
870 LLVM_VERSION_MAJOR >= 18 ? "llvm.ldexp.f16.i32"
871 : "llvm.amdgcn.ldexp.f16",
872 ctx->ac.f16, src, 2, 0);
873 else
874 result = ac_build_intrinsic(&ctx->ac,
875 LLVM_VERSION_MAJOR >= 18 ? "llvm.ldexp.f64.i32"
876 : "llvm.amdgcn.ldexp.f64",
877 ctx->ac.f64, src, 2, 0);
878 break;
879 case nir_op_bfm:
880 result = emit_bfm(&ctx->ac, src[0], src[1]);
881 break;
882 case nir_op_bitfield_select:
883 result = emit_bitfield_select(&ctx->ac, src[0], src[1], src[2]);
884 break;
885 case nir_op_ubfe:
886 result = ac_build_bfe(&ctx->ac, src[0], src[1], src[2], false);
887 break;
888 case nir_op_ibfe:
889 result = ac_build_bfe(&ctx->ac, src[0], src[1], src[2], true);
890 break;
891 case nir_op_bitfield_reverse:
892 result = ac_build_bitfield_reverse(&ctx->ac, src[0]);
893 break;
894 case nir_op_bit_count:
895 result = ac_build_bit_count(&ctx->ac, src[0]);
896 break;
897 case nir_op_vec2:
898 case nir_op_vec3:
899 case nir_op_vec4:
900 case nir_op_vec5:
901 case nir_op_vec8:
902 case nir_op_vec16:
903 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
904 src[i] = ac_to_integer(&ctx->ac, src[i]);
905 result = ac_build_gather_values(&ctx->ac, src, num_components);
906 break;
907 case nir_op_f2i8:
908 case nir_op_f2i16:
909 case nir_op_f2i32:
910 case nir_op_f2i64:
911 src[0] = ac_to_float(&ctx->ac, src[0]);
912 result = LLVMBuildFPToSI(ctx->ac.builder, src[0], def_type, "");
913 break;
914 case nir_op_f2u8:
915 case nir_op_f2u16:
916 case nir_op_f2u32:
917 case nir_op_f2u64:
918 src[0] = ac_to_float(&ctx->ac, src[0]);
919 result = LLVMBuildFPToUI(ctx->ac.builder, src[0], def_type, "");
920 break;
921 case nir_op_i2f16:
922 case nir_op_i2f32:
923 case nir_op_i2f64:
924 result = LLVMBuildSIToFP(ctx->ac.builder, src[0], ac_to_float_type(&ctx->ac, def_type), "");
925 break;
926 case nir_op_u2f16:
927 case nir_op_u2f32:
928 case nir_op_u2f64:
929 result = LLVMBuildUIToFP(ctx->ac.builder, src[0], ac_to_float_type(&ctx->ac, def_type), "");
930 break;
931 case nir_op_f2f16_rtz: {
932 src[0] = ac_to_float(&ctx->ac, src[0]);
933
934 if (LLVMTypeOf(src[0]) == ctx->ac.f64)
935 src[0] = LLVMBuildFPTrunc(ctx->ac.builder, src[0], ctx->ac.f32, "");
936
937 /* Fast path conversion. This only works if NIR is vectorized
938 * to vec2 16.
939 */
940 if (LLVMTypeOf(src[0]) == ctx->ac.v2f32) {
941 LLVMValueRef args[] = {
942 ac_llvm_extract_elem(&ctx->ac, src[0], 0),
943 ac_llvm_extract_elem(&ctx->ac, src[0], 1),
944 };
945 result = ac_build_cvt_pkrtz_f16(&ctx->ac, args);
946 break;
947 }
948
949 assert(ac_get_llvm_num_components(src[0]) == 1);
950 LLVMValueRef param[2] = {src[0], LLVMGetUndef(ctx->ac.f32)};
951 result = ac_build_cvt_pkrtz_f16(&ctx->ac, param);
952 result = LLVMBuildExtractElement(ctx->ac.builder, result, ctx->ac.i32_0, "");
953 break;
954 }
955 case nir_op_f2f16:
956 case nir_op_f2f16_rtne:
957 case nir_op_f2f32:
958 case nir_op_f2f64:
959 src[0] = ac_to_float(&ctx->ac, src[0]);
960 if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])) < ac_get_elem_bits(&ctx->ac, def_type))
961 result = LLVMBuildFPExt(ctx->ac.builder, src[0], ac_to_float_type(&ctx->ac, def_type), "");
962 else
963 result =
964 LLVMBuildFPTrunc(ctx->ac.builder, src[0], ac_to_float_type(&ctx->ac, def_type), "");
965 break;
966 case nir_op_u2u8:
967 case nir_op_u2u16:
968 case nir_op_u2u32:
969 case nir_op_u2u64:
970 if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])) < ac_get_elem_bits(&ctx->ac, def_type))
971 result = LLVMBuildZExt(ctx->ac.builder, src[0], def_type, "");
972 else
973 result = LLVMBuildTrunc(ctx->ac.builder, src[0], def_type, "");
974 break;
975 case nir_op_i2i8:
976 case nir_op_i2i16:
977 case nir_op_i2i32:
978 case nir_op_i2i64:
979 if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])) < ac_get_elem_bits(&ctx->ac, def_type))
980 result = LLVMBuildSExt(ctx->ac.builder, src[0], def_type, "");
981 else
982 result = LLVMBuildTrunc(ctx->ac.builder, src[0], def_type, "");
983 break;
984 case nir_op_bcsel:
985 result = emit_bcsel(&ctx->ac, src[0], src[1], src[2]);
986 break;
987 case nir_op_find_lsb:
988 result = ac_find_lsb(&ctx->ac, ctx->ac.i32, src[0]);
989 break;
990 case nir_op_ufind_msb:
991 result = ac_build_umsb(&ctx->ac, src[0], ctx->ac.i32, false);
992 break;
993 case nir_op_ifind_msb:
994 result = ac_build_imsb(&ctx->ac, src[0], ctx->ac.i32);
995 break;
996 case nir_op_ufind_msb_rev:
997 result = ac_build_umsb(&ctx->ac, src[0], ctx->ac.i32, true);
998 break;
999 case nir_op_ifind_msb_rev:
1000 result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.sffbh.i32", ctx->ac.i32, &src[0], 1,
1001 0);
1002 break;
1003 case nir_op_uclz: {
1004 LLVMValueRef params[2] = {
1005 src[0],
1006 ctx->ac.i1false,
1007 };
1008 result = ac_build_intrinsic(&ctx->ac, "llvm.ctlz.i32", ctx->ac.i32, params, 2, 0);
1009 break;
1010 }
1011 case nir_op_uadd_carry:
1012 result = emit_uint_carry(&ctx->ac, "llvm.uadd.with.overflow.i32", src[0], src[1]);
1013 break;
1014 case nir_op_usub_borrow:
1015 result = emit_uint_carry(&ctx->ac, "llvm.usub.with.overflow.i32", src[0], src[1]);
1016 break;
1017 case nir_op_b2f16:
1018 case nir_op_b2f32:
1019 case nir_op_b2f64:
1020 result = emit_b2f(&ctx->ac, src[0], instr->def.bit_size);
1021 break;
1022 case nir_op_b2i8:
1023 case nir_op_b2i16:
1024 case nir_op_b2i32:
1025 case nir_op_b2i64:
1026 result = emit_b2i(&ctx->ac, src[0], instr->def.bit_size);
1027 break;
1028 case nir_op_b2b1: /* after loads */
1029 result = emit_i2b(&ctx->ac, src[0]);
1030 break;
1031 case nir_op_b2b16: /* before stores */
1032 result = LLVMBuildZExt(ctx->ac.builder, src[0], ctx->ac.i16, "");
1033 break;
1034 case nir_op_b2b32: /* before stores */
1035 result = LLVMBuildZExt(ctx->ac.builder, src[0], ctx->ac.i32, "");
1036 break;
1037 case nir_op_fquantize2f16:
1038 result = emit_f2f16(&ctx->ac, src[0]);
1039 break;
1040 case nir_op_umul_high:
1041 result = emit_umul_high(&ctx->ac, src[0], src[1]);
1042 break;
1043 case nir_op_imul_high:
1044 result = emit_imul_high(&ctx->ac, src[0], src[1]);
1045 break;
1046 case nir_op_pack_half_2x16_rtz_split:
1047 case nir_op_pack_half_2x16_split:
1048 src[0] = ac_to_float(&ctx->ac, src[0]);
1049 src[1] = ac_to_float(&ctx->ac, src[1]);
1050 result = LLVMBuildBitCast(ctx->ac.builder,
1051 ac_build_cvt_pkrtz_f16(&ctx->ac, src),
1052 ctx->ac.i32, "");
1053 break;
1054 case nir_op_pack_snorm_2x16:
1055 case nir_op_pack_unorm_2x16: {
1056 unsigned bit_size = instr->src[0].src.ssa->bit_size;
1057 /* Only support 16 and 32bit. */
1058 assert(bit_size == 16 || bit_size == 32);
1059
1060 LLVMValueRef data = src[0];
1061 /* Work around for pre-GFX9 GPU which don't have fp16 pknorm instruction. */
1062 if (bit_size == 16 && ctx->ac.gfx_level < GFX9) {
1063 data = LLVMBuildFPExt(ctx->ac.builder, data, ctx->ac.v2f32, "");
1064 bit_size = 32;
1065 }
1066
1067 LLVMValueRef (*pack)(struct ac_llvm_context *ctx, LLVMValueRef args[2]);
1068 if (bit_size == 32) {
1069 pack = instr->op == nir_op_pack_snorm_2x16 ?
1070 ac_build_cvt_pknorm_i16 : ac_build_cvt_pknorm_u16;
1071 } else {
1072 pack = instr->op == nir_op_pack_snorm_2x16 ?
1073 ac_build_cvt_pknorm_i16_f16 : ac_build_cvt_pknorm_u16_f16;
1074 }
1075 result = emit_pack_2x16(&ctx->ac, data, pack);
1076 break;
1077 }
1078 case nir_op_pack_uint_2x16: {
1079 LLVMValueRef comp[2];
1080
1081 comp[0] = LLVMBuildExtractElement(ctx->ac.builder, src[0], ctx->ac.i32_0, "");
1082 comp[1] = LLVMBuildExtractElement(ctx->ac.builder, src[0], ctx->ac.i32_1, "");
1083
1084 result = ac_build_cvt_pk_u16(&ctx->ac, comp, 16, false);
1085 break;
1086 }
1087 case nir_op_pack_sint_2x16: {
1088 LLVMValueRef comp[2];
1089
1090 comp[0] = LLVMBuildExtractElement(ctx->ac.builder, src[0], ctx->ac.i32_0, "");
1091 comp[1] = LLVMBuildExtractElement(ctx->ac.builder, src[0], ctx->ac.i32_1, "");
1092
1093 result = ac_build_cvt_pk_i16(&ctx->ac, comp, 16, false);
1094 break;
1095 }
1096 case nir_op_unpack_half_2x16_split_x: {
1097 assert(ac_get_llvm_num_components(src[0]) == 1);
1098 LLVMValueRef tmp = emit_unpack_half_2x16(&ctx->ac, src[0]);
1099 result = LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->ac.i32_0, "");
1100 break;
1101 }
1102 case nir_op_unpack_half_2x16_split_y: {
1103 assert(ac_get_llvm_num_components(src[0]) == 1);
1104 LLVMValueRef tmp = emit_unpack_half_2x16(&ctx->ac, src[0]);
1105 result = LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->ac.i32_1, "");
1106 break;
1107 }
1108 case nir_op_fddx:
1109 case nir_op_fddy:
1110 case nir_op_fddx_fine:
1111 case nir_op_fddy_fine:
1112 case nir_op_fddx_coarse:
1113 case nir_op_fddy_coarse:
1114 result = emit_ddxy(ctx, instr->op, src[0]);
1115 break;
1116
1117 case nir_op_unpack_64_4x16: {
1118 result = LLVMBuildBitCast(ctx->ac.builder, src[0], ctx->ac.v4i16, "");
1119 break;
1120 }
1121
1122 case nir_op_unpack_64_2x32: {
1123 result = LLVMBuildBitCast(ctx->ac.builder, src[0],
1124 ctx->ac.v2i32, "");
1125 break;
1126 }
1127 case nir_op_unpack_64_2x32_split_x: {
1128 assert(ac_get_llvm_num_components(src[0]) == 1);
1129 LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, src[0], ctx->ac.v2i32, "");
1130 result = LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->ac.i32_0, "");
1131 break;
1132 }
1133 case nir_op_unpack_64_2x32_split_y: {
1134 assert(ac_get_llvm_num_components(src[0]) == 1);
1135 LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, src[0], ctx->ac.v2i32, "");
1136 result = LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->ac.i32_1, "");
1137 break;
1138 }
1139
1140 case nir_op_pack_64_2x32_split: {
1141 LLVMValueRef tmp = ac_build_gather_values(&ctx->ac, src, 2);
1142 result = LLVMBuildBitCast(ctx->ac.builder, tmp, ctx->ac.i64, "");
1143 break;
1144 }
1145
1146 case nir_op_pack_32_4x8: {
1147 result = LLVMBuildBitCast(ctx->ac.builder, src[0],
1148 ctx->ac.i32, "");
1149 break;
1150 }
1151 case nir_op_pack_32_2x16_split: {
1152 LLVMValueRef tmp = ac_build_gather_values(&ctx->ac, src, 2);
1153 result = LLVMBuildBitCast(ctx->ac.builder, tmp, ctx->ac.i32, "");
1154 break;
1155 }
1156
1157 case nir_op_unpack_32_4x8:
1158 result = LLVMBuildBitCast(ctx->ac.builder, src[0], ctx->ac.v4i8, "");
1159 break;
1160 case nir_op_unpack_32_2x16: {
1161 result = LLVMBuildBitCast(ctx->ac.builder, src[0],
1162 ctx->ac.v2i16, "");
1163 break;
1164 }
1165 case nir_op_unpack_32_2x16_split_x: {
1166 LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, src[0], ctx->ac.v2i16, "");
1167 result = LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->ac.i32_0, "");
1168 break;
1169 }
1170 case nir_op_unpack_32_2x16_split_y: {
1171 LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, src[0], ctx->ac.v2i16, "");
1172 result = LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->ac.i32_1, "");
1173 break;
1174 }
1175
1176 case nir_op_cube_amd: {
1177 src[0] = ac_to_float(&ctx->ac, src[0]);
1178 LLVMValueRef results[4];
1179 LLVMValueRef in[3];
1180 for (unsigned chan = 0; chan < 3; chan++)
1181 in[chan] = ac_llvm_extract_elem(&ctx->ac, src[0], chan);
1182 results[0] = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.cubetc", ctx->ac.f32, in, 3, 0);
1183 results[1] = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.cubesc", ctx->ac.f32, in, 3, 0);
1184 results[2] = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.cubema", ctx->ac.f32, in, 3, 0);
1185 results[3] = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.cubeid", ctx->ac.f32, in, 3, 0);
1186 result = ac_build_gather_values(&ctx->ac, results, 4);
1187 break;
1188 }
1189
1190 case nir_op_extract_u8:
1191 case nir_op_extract_i8:
1192 case nir_op_extract_u16:
1193 case nir_op_extract_i16: {
1194 bool is_signed = instr->op == nir_op_extract_i16 || instr->op == nir_op_extract_i8;
1195 unsigned size = instr->op == nir_op_extract_u8 || instr->op == nir_op_extract_i8 ? 8 : 16;
1196 LLVMValueRef offset = LLVMConstInt(LLVMTypeOf(src[0]), nir_src_as_uint(instr->src[1].src) * size, false);
1197 result = LLVMBuildLShr(ctx->ac.builder, src[0], offset, "");
1198 result = LLVMBuildTrunc(ctx->ac.builder, result, LLVMIntTypeInContext(ctx->ac.context, size), "");
1199 if (is_signed)
1200 result = LLVMBuildSExt(ctx->ac.builder, result, LLVMTypeOf(src[0]), "");
1201 else
1202 result = LLVMBuildZExt(ctx->ac.builder, result, LLVMTypeOf(src[0]), "");
1203 break;
1204 }
1205
1206 case nir_op_insert_u8:
1207 case nir_op_insert_u16: {
1208 unsigned size = instr->op == nir_op_insert_u8 ? 8 : 16;
1209 LLVMValueRef offset = LLVMConstInt(LLVMTypeOf(src[0]), nir_src_as_uint(instr->src[1].src) * size, false);
1210 LLVMValueRef mask = LLVMConstInt(LLVMTypeOf(src[0]), u_bit_consecutive(0, size), false);
1211 result = LLVMBuildShl(ctx->ac.builder, LLVMBuildAnd(ctx->ac.builder, src[0], mask, ""), offset, "");
1212 break;
1213 }
1214
1215 case nir_op_sdot_4x8_iadd:
1216 case nir_op_sdot_4x8_iadd_sat: {
1217 if (ctx->ac.gfx_level >= GFX11) {
1218 result = ac_build_sudot_4x8(&ctx->ac, src[0], src[1], src[2],
1219 instr->op == nir_op_sdot_4x8_iadd_sat, 0x3);
1220 } else {
1221 const char *name = "llvm.amdgcn.sdot4";
1222 src[3] = LLVMConstInt(ctx->ac.i1, instr->op == nir_op_sdot_4x8_iadd_sat, false);
1223 result = ac_build_intrinsic(&ctx->ac, name, def_type, src, 4, 0);
1224 }
1225 break;
1226 }
1227 case nir_op_sudot_4x8_iadd:
1228 case nir_op_sudot_4x8_iadd_sat: {
1229 result = ac_build_sudot_4x8(&ctx->ac, src[0], src[1], src[2],
1230 instr->op == nir_op_sudot_4x8_iadd_sat, 0x1);
1231 break;
1232 }
1233 case nir_op_udot_4x8_uadd:
1234 case nir_op_udot_4x8_uadd_sat: {
1235 const char *name = "llvm.amdgcn.udot4";
1236 src[3] = LLVMConstInt(ctx->ac.i1, instr->op == nir_op_udot_4x8_uadd_sat, false);
1237 result = ac_build_intrinsic(&ctx->ac, name, def_type, src, 4, 0);
1238 break;
1239 }
1240
1241 case nir_op_sdot_2x16_iadd:
1242 case nir_op_udot_2x16_uadd:
1243 case nir_op_sdot_2x16_iadd_sat:
1244 case nir_op_udot_2x16_uadd_sat: {
1245 const char *name = instr->op == nir_op_sdot_2x16_iadd ||
1246 instr->op == nir_op_sdot_2x16_iadd_sat
1247 ? "llvm.amdgcn.sdot2" : "llvm.amdgcn.udot2";
1248 src[0] = LLVMBuildBitCast(ctx->ac.builder, src[0], ctx->ac.v2i16, "");
1249 src[1] = LLVMBuildBitCast(ctx->ac.builder, src[1], ctx->ac.v2i16, "");
1250 src[3] = LLVMConstInt(ctx->ac.i1, instr->op == nir_op_sdot_2x16_iadd_sat ||
1251 instr->op == nir_op_udot_2x16_uadd_sat, false);
1252 result = ac_build_intrinsic(&ctx->ac, name, def_type, src, 4, 0);
1253 break;
1254 }
1255
1256 case nir_op_msad_4x8:
1257 result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.msad.u8", ctx->ac.i32,
1258 (LLVMValueRef[]){src[1], src[0], src[2]}, 3, 0);
1259 break;
1260
1261 default:
1262 fprintf(stderr, "Unknown NIR alu instr: ");
1263 nir_print_instr(&instr->instr, stderr);
1264 fprintf(stderr, "\n");
1265 return false;
1266 }
1267
1268 if (result) {
1269 result = ac_to_integer_or_pointer(&ctx->ac, result);
1270 ctx->ssa_defs[instr->def.index] = result;
1271 }
1272 return true;
1273 }
1274
visit_load_const(struct ac_nir_context * ctx,const nir_load_const_instr * instr)1275 static bool visit_load_const(struct ac_nir_context *ctx, const nir_load_const_instr *instr)
1276 {
1277 LLVMValueRef values[16], value = NULL;
1278 LLVMTypeRef element_type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
1279
1280 for (unsigned i = 0; i < instr->def.num_components; ++i) {
1281 switch (instr->def.bit_size) {
1282 case 1:
1283 values[i] = LLVMConstInt(element_type, instr->value[i].b, false);
1284 break;
1285 case 8:
1286 values[i] = LLVMConstInt(element_type, instr->value[i].u8, false);
1287 break;
1288 case 16:
1289 values[i] = LLVMConstInt(element_type, instr->value[i].u16, false);
1290 break;
1291 case 32:
1292 values[i] = LLVMConstInt(element_type, instr->value[i].u32, false);
1293 break;
1294 case 64:
1295 values[i] = LLVMConstInt(element_type, instr->value[i].u64, false);
1296 break;
1297 default:
1298 fprintf(stderr, "unsupported nir load_const bit_size: %d\n", instr->def.bit_size);
1299 return false;
1300 }
1301 }
1302 if (instr->def.num_components > 1) {
1303 value = LLVMConstVector(values, instr->def.num_components);
1304 } else
1305 value = values[0];
1306
1307 ctx->ssa_defs[instr->def.index] = value;
1308 return true;
1309 }
1310
1311 /* Gather4 should follow the same rules as bilinear filtering, but the hardware
1312 * incorrectly forces nearest filtering if the texture format is integer.
1313 * The only effect it has on Gather4, which always returns 4 texels for
1314 * bilinear filtering, is that the final coordinates are off by 0.5 of
1315 * the texel size.
1316 *
1317 * The workaround is to subtract 0.5 from the unnormalized coordinates,
1318 * or (0.5 / size) from the normalized coordinates.
1319 *
1320 * However, cube textures with 8_8_8_8 data formats require a different
1321 * workaround of overriding the num format to USCALED/SSCALED. This would lose
1322 * precision in 32-bit data formats, so it needs to be applied dynamically at
1323 * runtime. In this case, return an i1 value that indicates whether the
1324 * descriptor was overridden (and hence a fixup of the sampler result is needed).
1325 */
lower_gather4_integer(struct ac_llvm_context * ctx,struct ac_image_args * args,const nir_tex_instr * instr)1326 static LLVMValueRef lower_gather4_integer(struct ac_llvm_context *ctx, struct ac_image_args *args,
1327 const nir_tex_instr *instr)
1328 {
1329 nir_alu_type stype = nir_alu_type_get_base_type(instr->dest_type);
1330 LLVMValueRef wa_8888 = NULL;
1331 LLVMValueRef half_texel[2];
1332 LLVMValueRef result;
1333
1334 assert(stype == nir_type_int || stype == nir_type_uint);
1335
1336 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
1337 LLVMValueRef formats;
1338 LLVMValueRef data_format;
1339 LLVMValueRef wa_formats;
1340
1341 formats = LLVMBuildExtractElement(ctx->builder, args->resource, ctx->i32_1, "");
1342
1343 data_format = LLVMBuildLShr(ctx->builder, formats, LLVMConstInt(ctx->i32, 20, false), "");
1344 data_format =
1345 LLVMBuildAnd(ctx->builder, data_format, LLVMConstInt(ctx->i32, (1u << 6) - 1, false), "");
1346 wa_8888 = LLVMBuildICmp(ctx->builder, LLVMIntEQ, data_format,
1347 LLVMConstInt(ctx->i32, V_008F14_IMG_DATA_FORMAT_8_8_8_8, false), "");
1348
1349 uint32_t wa_num_format = stype == nir_type_uint
1350 ? S_008F14_NUM_FORMAT(V_008F14_IMG_NUM_FORMAT_USCALED)
1351 : S_008F14_NUM_FORMAT(V_008F14_IMG_NUM_FORMAT_SSCALED);
1352 wa_formats = LLVMBuildAnd(ctx->builder, formats,
1353 LLVMConstInt(ctx->i32, C_008F14_NUM_FORMAT, false), "");
1354 wa_formats =
1355 LLVMBuildOr(ctx->builder, wa_formats, LLVMConstInt(ctx->i32, wa_num_format, false), "");
1356
1357 formats = LLVMBuildSelect(ctx->builder, wa_8888, wa_formats, formats, "");
1358 args->resource =
1359 LLVMBuildInsertElement(ctx->builder, args->resource, formats, ctx->i32_1, "");
1360 }
1361
1362 if (instr->sampler_dim == GLSL_SAMPLER_DIM_RECT) {
1363 assert(!wa_8888);
1364 half_texel[0] = half_texel[1] = LLVMConstReal(ctx->f32, -0.5);
1365 } else {
1366 struct ac_image_args resinfo = {0};
1367 LLVMBasicBlockRef bbs[2];
1368
1369 LLVMValueRef unnorm = NULL;
1370 LLVMValueRef default_offset = ctx->f32_0;
1371 if (instr->sampler_dim == GLSL_SAMPLER_DIM_2D && !instr->is_array) {
1372 /* In vulkan, whether the sampler uses unnormalized
1373 * coordinates or not is a dynamic property of the
1374 * sampler. Hence, to figure out whether or not we
1375 * need to divide by the texture size, we need to test
1376 * the sampler at runtime. This tests the bit set by
1377 * radv_init_sampler().
1378 */
1379 LLVMValueRef sampler0 =
1380 LLVMBuildExtractElement(ctx->builder, args->sampler, ctx->i32_0, "");
1381 sampler0 = LLVMBuildLShr(ctx->builder, sampler0, LLVMConstInt(ctx->i32, 15, false), "");
1382 sampler0 = LLVMBuildAnd(ctx->builder, sampler0, ctx->i32_1, "");
1383 unnorm = LLVMBuildICmp(ctx->builder, LLVMIntEQ, sampler0, ctx->i32_1, "");
1384 default_offset = LLVMConstReal(ctx->f32, -0.5);
1385 }
1386
1387 bbs[0] = LLVMGetInsertBlock(ctx->builder);
1388 if (wa_8888 || unnorm) {
1389 assert(!(wa_8888 && unnorm));
1390 LLVMValueRef not_needed = wa_8888 ? wa_8888 : unnorm;
1391 /* Skip the texture size query entirely if we don't need it. */
1392 ac_build_ifcc(ctx, LLVMBuildNot(ctx->builder, not_needed, ""), 2000);
1393 bbs[1] = LLVMGetInsertBlock(ctx->builder);
1394 }
1395
1396 /* Query the texture size. */
1397 resinfo.dim = ac_get_sampler_dim(ctx->gfx_level, instr->sampler_dim, instr->is_array);
1398 resinfo.opcode = ac_image_get_resinfo;
1399 resinfo.dmask = 0xf;
1400 resinfo.lod = ctx->i32_0;
1401 resinfo.resource = args->resource;
1402 resinfo.attributes = AC_ATTR_INVARIANT_LOAD;
1403 LLVMValueRef size = ac_build_image_opcode(ctx, &resinfo);
1404
1405 /* Compute -0.5 / size. */
1406 for (unsigned c = 0; c < 2; c++) {
1407 half_texel[c] =
1408 LLVMBuildExtractElement(ctx->builder, size, LLVMConstInt(ctx->i32, c, 0), "");
1409 half_texel[c] = LLVMBuildUIToFP(ctx->builder, half_texel[c], ctx->f32, "");
1410 half_texel[c] = ac_build_fdiv(ctx, ctx->f32_1, half_texel[c]);
1411 half_texel[c] =
1412 LLVMBuildFMul(ctx->builder, half_texel[c], LLVMConstReal(ctx->f32, -0.5), "");
1413 }
1414
1415 if (wa_8888 || unnorm) {
1416 ac_build_endif(ctx, 2000);
1417
1418 for (unsigned c = 0; c < 2; c++) {
1419 LLVMValueRef values[2] = {default_offset, half_texel[c]};
1420 half_texel[c] = ac_build_phi(ctx, ctx->f32, 2, values, bbs);
1421 }
1422 }
1423 }
1424
1425 for (unsigned c = 0; c < 2; c++) {
1426 LLVMValueRef tmp;
1427 tmp = LLVMBuildBitCast(ctx->builder, args->coords[c], ctx->f32, "");
1428 args->coords[c] = LLVMBuildFAdd(ctx->builder, tmp, half_texel[c], "");
1429 }
1430
1431 args->attributes = AC_ATTR_INVARIANT_LOAD;
1432 result = ac_build_image_opcode(ctx, args);
1433
1434 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
1435 LLVMValueRef tmp, tmp2;
1436
1437 /* if the cube workaround is in place, f2i the result. */
1438 for (unsigned c = 0; c < 4; c++) {
1439 tmp = LLVMBuildExtractElement(ctx->builder, result, LLVMConstInt(ctx->i32, c, false), "");
1440 if (stype == nir_type_uint)
1441 tmp2 = LLVMBuildFPToUI(ctx->builder, tmp, ctx->i32, "");
1442 else
1443 tmp2 = LLVMBuildFPToSI(ctx->builder, tmp, ctx->i32, "");
1444 tmp = LLVMBuildBitCast(ctx->builder, tmp, ctx->i32, "");
1445 tmp2 = LLVMBuildBitCast(ctx->builder, tmp2, ctx->i32, "");
1446 tmp = LLVMBuildSelect(ctx->builder, wa_8888, tmp2, tmp, "");
1447 tmp = LLVMBuildBitCast(ctx->builder, tmp, ctx->f32, "");
1448 result =
1449 LLVMBuildInsertElement(ctx->builder, result, tmp, LLVMConstInt(ctx->i32, c, false), "");
1450 }
1451 }
1452 return result;
1453 }
1454
build_tex_intrinsic(struct ac_nir_context * ctx,const nir_tex_instr * instr,struct ac_image_args * args)1455 static LLVMValueRef build_tex_intrinsic(struct ac_nir_context *ctx, const nir_tex_instr *instr,
1456 struct ac_image_args *args)
1457 {
1458 assert((!args->tfe || !args->d16) && "unsupported");
1459
1460 if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF) {
1461 unsigned mask = nir_def_components_read(&instr->def);
1462
1463 /* Buffers don't support A16. */
1464 if (args->a16)
1465 args->coords[0] = LLVMBuildZExt(ctx->ac.builder, args->coords[0], ctx->ac.i32, "");
1466
1467 return ac_build_buffer_load_format(&ctx->ac, args->resource, args->coords[0], ctx->ac.i32_0,
1468 util_last_bit(mask), 0, true,
1469 instr->def.bit_size == 16,
1470 args->tfe);
1471 }
1472
1473 args->opcode = ac_image_sample;
1474
1475 switch (instr->op) {
1476 case nir_texop_txf:
1477 case nir_texop_txf_ms:
1478 args->opcode = args->level_zero || instr->sampler_dim == GLSL_SAMPLER_DIM_MS
1479 ? ac_image_load
1480 : ac_image_load_mip;
1481 args->level_zero = false;
1482 break;
1483 case nir_texop_txs:
1484 case nir_texop_query_levels:
1485 case nir_texop_texture_samples:
1486 assert(!"should have been lowered");
1487 break;
1488 case nir_texop_tex:
1489 if (ctx->stage != MESA_SHADER_FRAGMENT &&
1490 (!gl_shader_stage_is_compute(ctx->stage) ||
1491 ctx->info->cs.derivative_group == DERIVATIVE_GROUP_NONE)) {
1492 assert(!args->lod);
1493 args->level_zero = true;
1494 }
1495 break;
1496 case nir_texop_tg4:
1497 args->opcode = ac_image_gather4;
1498 if (!args->lod && !instr->is_gather_implicit_lod)
1499 args->level_zero = true;
1500 /* GFX11 supports implicit LOD, but the extension is unsupported. */
1501 assert(args->level_zero || ctx->ac.gfx_level < GFX11);
1502 break;
1503 case nir_texop_lod:
1504 args->opcode = ac_image_get_lod;
1505 break;
1506 case nir_texop_fragment_fetch_amd:
1507 case nir_texop_fragment_mask_fetch_amd:
1508 args->opcode = ac_image_load;
1509 args->level_zero = false;
1510 break;
1511 default:
1512 break;
1513 }
1514
1515 /* MI200 doesn't have image_sample_lz, but image_sample behaves like lz. */
1516 if (!ctx->ac.info->has_3d_cube_border_color_mipmap)
1517 args->level_zero = false;
1518
1519 if (instr->op == nir_texop_tg4 && ctx->ac.gfx_level <= GFX8 &&
1520 (instr->dest_type & (nir_type_int | nir_type_uint))) {
1521 return lower_gather4_integer(&ctx->ac, args, instr);
1522 }
1523
1524 args->attributes = AC_ATTR_INVARIANT_LOAD;
1525 bool cs_derivs =
1526 gl_shader_stage_is_compute(ctx->stage) && ctx->info->cs.derivative_group != DERIVATIVE_GROUP_NONE;
1527 if (ctx->stage == MESA_SHADER_FRAGMENT || cs_derivs) {
1528 /* Prevent texture instructions with implicit derivatives from being
1529 * sinked into branches. */
1530 switch (instr->op) {
1531 case nir_texop_tex:
1532 case nir_texop_txb:
1533 case nir_texop_lod:
1534 args->attributes |= AC_ATTR_CONVERGENT;
1535 break;
1536 default:
1537 break;
1538 }
1539 }
1540
1541 return ac_build_image_opcode(&ctx->ac, args);
1542 }
1543
visit_load_push_constant(struct ac_nir_context * ctx,nir_intrinsic_instr * instr)1544 static LLVMValueRef visit_load_push_constant(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)
1545 {
1546 LLVMValueRef ptr, addr;
1547 LLVMValueRef src0 = get_src(ctx, instr->src[0]);
1548 unsigned index = nir_intrinsic_base(instr);
1549
1550 addr = LLVMConstInt(ctx->ac.i32, index, 0);
1551 addr = LLVMBuildAdd(ctx->ac.builder, addr, src0, "");
1552
1553 /* Load constant values from user SGPRS when possible, otherwise
1554 * fallback to the default path that loads directly from memory.
1555 */
1556 if (LLVMIsConstant(src0) && instr->def.bit_size >= 32) {
1557 unsigned count = instr->def.num_components;
1558 unsigned offset = index;
1559
1560 if (instr->def.bit_size == 64)
1561 count *= 2;
1562
1563 offset += LLVMConstIntGetZExtValue(src0);
1564 offset /= 4;
1565
1566 uint64_t mask = BITFIELD64_MASK(count) << offset;
1567 if ((ctx->args->inline_push_const_mask | mask) == ctx->args->inline_push_const_mask &&
1568 offset + count <= (sizeof(ctx->args->inline_push_const_mask) * 8u)) {
1569 LLVMValueRef *const push_constants = alloca(count * sizeof(LLVMValueRef));
1570 unsigned arg_index =
1571 util_bitcount64(ctx->args->inline_push_const_mask & BITFIELD64_MASK(offset));
1572 for (unsigned i = 0; i < count; i++)
1573 push_constants[i] = ac_get_arg(&ctx->ac, ctx->args->inline_push_consts[arg_index++]);
1574 LLVMValueRef res = ac_build_gather_values(&ctx->ac, push_constants, count);
1575 return instr->def.bit_size == 64
1576 ? LLVMBuildBitCast(ctx->ac.builder, res, get_def_type(ctx, &instr->def), "")
1577 : res;
1578 }
1579 }
1580
1581 struct ac_llvm_pointer pc = ac_get_ptr_arg(&ctx->ac, ctx->args, ctx->args->push_constants);
1582 ptr = LLVMBuildGEP2(ctx->ac.builder, pc.t, pc.v, &addr, 1, "");
1583
1584 if (instr->def.bit_size == 8) {
1585 unsigned load_dwords = instr->def.num_components > 1 ? 2 : 1;
1586 LLVMTypeRef vec_type = LLVMVectorType(ctx->ac.i8, 4 * load_dwords);
1587 ptr = ac_cast_ptr(&ctx->ac, ptr, vec_type);
1588 LLVMValueRef res = LLVMBuildLoad2(ctx->ac.builder, vec_type, ptr, "");
1589
1590 LLVMValueRef params[3];
1591 if (load_dwords > 1) {
1592 LLVMValueRef res_vec = LLVMBuildBitCast(ctx->ac.builder, res, ctx->ac.v2i32, "");
1593 params[0] = LLVMBuildExtractElement(ctx->ac.builder, res_vec,
1594 ctx->ac.i32_1, "");
1595 params[1] = LLVMBuildExtractElement(ctx->ac.builder, res_vec,
1596 ctx->ac.i32_0, "");
1597 } else {
1598 res = LLVMBuildBitCast(ctx->ac.builder, res, ctx->ac.i32, "");
1599 params[0] = ctx->ac.i32_0;
1600 params[1] = res;
1601 }
1602 params[2] = addr;
1603 res = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.alignbyte", ctx->ac.i32, params, 3, 0);
1604
1605 res = LLVMBuildTrunc(
1606 ctx->ac.builder, res,
1607 LLVMIntTypeInContext(ctx->ac.context, instr->def.num_components * 8), "");
1608 if (instr->def.num_components > 1)
1609 res = LLVMBuildBitCast(ctx->ac.builder, res,
1610 LLVMVectorType(ctx->ac.i8, instr->def.num_components), "");
1611 return res;
1612 } else if (instr->def.bit_size == 16) {
1613 unsigned load_dwords = instr->def.num_components / 2 + 1;
1614 LLVMTypeRef vec_type = LLVMVectorType(ctx->ac.i16, 2 * load_dwords);
1615 ptr = ac_cast_ptr(&ctx->ac, ptr, vec_type);
1616 LLVMValueRef res = LLVMBuildLoad2(ctx->ac.builder, vec_type, ptr, "");
1617 res = LLVMBuildBitCast(ctx->ac.builder, res, vec_type, "");
1618 LLVMValueRef cond = LLVMBuildLShr(ctx->ac.builder, addr, ctx->ac.i32_1, "");
1619 cond = LLVMBuildTrunc(ctx->ac.builder, cond, ctx->ac.i1, "");
1620 LLVMValueRef mask[] = {
1621 ctx->ac.i32_0, ctx->ac.i32_1,
1622 LLVMConstInt(ctx->ac.i32, 2, false), LLVMConstInt(ctx->ac.i32, 3, false),
1623 LLVMConstInt(ctx->ac.i32, 4, false)};
1624 LLVMValueRef swizzle_aligned = LLVMConstVector(&mask[0], instr->def.num_components);
1625 LLVMValueRef swizzle_unaligned = LLVMConstVector(&mask[1], instr->def.num_components);
1626 LLVMValueRef shuffle_aligned =
1627 LLVMBuildShuffleVector(ctx->ac.builder, res, res, swizzle_aligned, "");
1628 LLVMValueRef shuffle_unaligned =
1629 LLVMBuildShuffleVector(ctx->ac.builder, res, res, swizzle_unaligned, "");
1630 res = LLVMBuildSelect(ctx->ac.builder, cond, shuffle_unaligned, shuffle_aligned, "");
1631 return LLVMBuildBitCast(ctx->ac.builder, res, get_def_type(ctx, &instr->def), "");
1632 }
1633
1634 LLVMTypeRef ptr_type = get_def_type(ctx, &instr->def);
1635 ptr = ac_cast_ptr(&ctx->ac, ptr, ptr_type);
1636
1637 return LLVMBuildLoad2(ctx->ac.builder, ptr_type, ptr, "");
1638 }
1639
visit_get_ssbo_size(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr)1640 static LLVMValueRef visit_get_ssbo_size(struct ac_nir_context *ctx,
1641 const nir_intrinsic_instr *instr)
1642 {
1643 bool non_uniform = nir_intrinsic_access(instr) & ACCESS_NON_UNIFORM;
1644
1645 LLVMValueRef rsrc = get_src(ctx, instr->src[0]);
1646 if (ctx->abi->load_ssbo)
1647 rsrc = ctx->abi->load_ssbo(ctx->abi, rsrc, false, non_uniform);
1648
1649 return LLVMBuildExtractElement(ctx->ac.builder, rsrc, LLVMConstInt(ctx->ac.i32, 2, false), "");
1650 }
1651
extract_vector_range(struct ac_llvm_context * ctx,LLVMValueRef src,unsigned start,unsigned count)1652 static LLVMValueRef extract_vector_range(struct ac_llvm_context *ctx, LLVMValueRef src,
1653 unsigned start, unsigned count)
1654 {
1655 LLVMValueRef mask[] = {ctx->i32_0, ctx->i32_1, LLVMConstInt(ctx->i32, 2, false),
1656 LLVMConstInt(ctx->i32, 3, false)};
1657
1658 unsigned src_elements = ac_get_llvm_num_components(src);
1659
1660 if (count == src_elements) {
1661 assert(start == 0);
1662 return src;
1663 } else if (count == 1) {
1664 assert(start < src_elements);
1665 return LLVMBuildExtractElement(ctx->builder, src, mask[start], "");
1666 } else {
1667 assert(start + count <= src_elements);
1668 assert(count <= 4);
1669 LLVMValueRef swizzle = LLVMConstVector(&mask[start], count);
1670 return LLVMBuildShuffleVector(ctx->builder, src, src, swizzle, "");
1671 }
1672 }
1673
enter_waterfall_ssbo(struct ac_nir_context * ctx,struct waterfall_context * wctx,const nir_intrinsic_instr * instr,nir_src src)1674 static LLVMValueRef enter_waterfall_ssbo(struct ac_nir_context *ctx, struct waterfall_context *wctx,
1675 const nir_intrinsic_instr *instr, nir_src src)
1676 {
1677 return enter_waterfall(ctx, wctx, get_src(ctx, src),
1678 nir_intrinsic_access(instr) & ACCESS_NON_UNIFORM);
1679 }
1680
visit_store_ssbo(struct ac_nir_context * ctx,nir_intrinsic_instr * instr)1681 static void visit_store_ssbo(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)
1682 {
1683 LLVMValueRef src_data = get_src(ctx, instr->src[0]);
1684 int elem_size_bytes = ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src_data)) / 8;
1685 unsigned writemask = nir_intrinsic_write_mask(instr);
1686 enum gl_access_qualifier access = ac_get_mem_access_flags(instr);
1687
1688 struct waterfall_context wctx;
1689 LLVMValueRef rsrc_base = enter_waterfall_ssbo(ctx, &wctx, instr, instr->src[1]);
1690
1691 LLVMValueRef rsrc = ctx->abi->load_ssbo ?
1692 ctx->abi->load_ssbo(ctx->abi, rsrc_base, true, false) : rsrc_base;
1693
1694 LLVMValueRef base_data = src_data;
1695 base_data = ac_trim_vector(&ctx->ac, base_data, instr->num_components);
1696 LLVMValueRef base_offset = get_src(ctx, instr->src[2]);
1697
1698 while (writemask) {
1699 int start, count;
1700 LLVMValueRef data, offset;
1701 LLVMTypeRef data_type;
1702
1703 u_bit_scan_consecutive_range(&writemask, &start, &count);
1704
1705 if (count == 3 && elem_size_bytes != 4) {
1706 writemask |= 1 << (start + 2);
1707 count = 2;
1708 }
1709 int num_bytes = count * elem_size_bytes; /* count in bytes */
1710
1711 /* we can only store 4 DWords at the same time.
1712 * can only happen for 64 Bit vectors. */
1713 if (num_bytes > 16) {
1714 writemask |= ((1u << (count - 2)) - 1u) << (start + 2);
1715 count = 2;
1716 num_bytes = 16;
1717 }
1718
1719 /* check alignment of 16 Bit stores */
1720 if (elem_size_bytes == 2 && num_bytes > 2 && (start % 2) == 1) {
1721 writemask |= ((1u << (count - 1)) - 1u) << (start + 1);
1722 count = 1;
1723 num_bytes = 2;
1724 }
1725
1726 /* Due to alignment issues, split stores of 8-bit/16-bit
1727 * vectors.
1728 */
1729 if (ctx->ac.gfx_level == GFX6 && count > 1 && elem_size_bytes < 4) {
1730 writemask |= ((1u << (count - 1)) - 1u) << (start + 1);
1731 count = 1;
1732 num_bytes = elem_size_bytes;
1733 }
1734
1735 data = extract_vector_range(&ctx->ac, base_data, start, count);
1736
1737 offset = LLVMBuildAdd(ctx->ac.builder, base_offset,
1738 LLVMConstInt(ctx->ac.i32, start * elem_size_bytes, false), "");
1739
1740 if (num_bytes == 1) {
1741 ac_build_buffer_store_byte(&ctx->ac, rsrc, data, offset, ctx->ac.i32_0, access);
1742 } else if (num_bytes == 2) {
1743 ac_build_buffer_store_short(&ctx->ac, rsrc, data, offset, ctx->ac.i32_0, access);
1744 } else {
1745 switch (num_bytes) {
1746 case 16: /* v4f32 */
1747 data_type = ctx->ac.v4f32;
1748 break;
1749 case 12: /* v3f32 */
1750 data_type = ctx->ac.v3f32;
1751 break;
1752 case 8: /* v2f32 */
1753 data_type = ctx->ac.v2f32;
1754 break;
1755 case 4: /* f32 */
1756 data_type = ctx->ac.f32;
1757 break;
1758 default:
1759 unreachable("Malformed vector store.");
1760 }
1761 data = LLVMBuildBitCast(ctx->ac.builder, data, data_type, "");
1762
1763 ac_build_buffer_store_dword(&ctx->ac, rsrc, data, NULL, offset,
1764 ctx->ac.i32_0, access);
1765 }
1766 }
1767
1768 exit_waterfall(ctx, &wctx, NULL);
1769 }
1770
emit_ssbo_comp_swap_64(struct ac_nir_context * ctx,LLVMValueRef descriptor,LLVMValueRef offset,LLVMValueRef compare,LLVMValueRef exchange,bool image)1771 static LLVMValueRef emit_ssbo_comp_swap_64(struct ac_nir_context *ctx, LLVMValueRef descriptor,
1772 LLVMValueRef offset, LLVMValueRef compare,
1773 LLVMValueRef exchange, bool image)
1774 {
1775 LLVMBasicBlockRef start_block = NULL, then_block = NULL;
1776 if (ctx->abi->robust_buffer_access || image) {
1777 LLVMValueRef size = ac_llvm_extract_elem(&ctx->ac, descriptor, 2);
1778
1779 LLVMValueRef cond = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT, offset, size, "");
1780 start_block = LLVMGetInsertBlock(ctx->ac.builder);
1781
1782 ac_build_ifcc(&ctx->ac, cond, -1);
1783
1784 then_block = LLVMGetInsertBlock(ctx->ac.builder);
1785 }
1786
1787 if (image)
1788 offset = LLVMBuildMul(ctx->ac.builder, offset, LLVMConstInt(ctx->ac.i32, 8, false), "");
1789
1790 LLVMValueRef ptr_parts[2] = {
1791 ac_llvm_extract_elem(&ctx->ac, descriptor, 0),
1792 LLVMBuildAnd(ctx->ac.builder, ac_llvm_extract_elem(&ctx->ac, descriptor, 1),
1793 LLVMConstInt(ctx->ac.i32, 65535, 0), "")};
1794
1795 ptr_parts[1] = LLVMBuildTrunc(ctx->ac.builder, ptr_parts[1], ctx->ac.i16, "");
1796 ptr_parts[1] = LLVMBuildSExt(ctx->ac.builder, ptr_parts[1], ctx->ac.i32, "");
1797
1798 offset = LLVMBuildZExt(ctx->ac.builder, offset, ctx->ac.i64, "");
1799
1800 LLVMValueRef ptr = ac_build_gather_values(&ctx->ac, ptr_parts, 2);
1801 ptr = LLVMBuildBitCast(ctx->ac.builder, ptr, ctx->ac.i64, "");
1802 ptr = LLVMBuildAdd(ctx->ac.builder, ptr, offset, "");
1803 ptr = LLVMBuildIntToPtr(ctx->ac.builder, ptr, LLVMPointerType(ctx->ac.i64, AC_ADDR_SPACE_GLOBAL),
1804 "");
1805
1806 LLVMValueRef result =
1807 ac_build_atomic_cmp_xchg(&ctx->ac, ptr, compare, exchange, "singlethread-one-as");
1808 result = LLVMBuildExtractValue(ctx->ac.builder, result, 0, "");
1809
1810 if (ctx->abi->robust_buffer_access || image) {
1811 ac_build_endif(&ctx->ac, -1);
1812
1813 LLVMBasicBlockRef incoming_blocks[2] = {
1814 start_block,
1815 then_block,
1816 };
1817
1818 LLVMValueRef incoming_values[2] = {
1819 ctx->ac.i64_0,
1820 result,
1821 };
1822 LLVMValueRef ret = LLVMBuildPhi(ctx->ac.builder, ctx->ac.i64, "");
1823 LLVMAddIncoming(ret, incoming_values, incoming_blocks, 2);
1824 return ret;
1825 } else {
1826 return result;
1827 }
1828 }
1829
1830 static const char *
translate_atomic_op_str(nir_atomic_op op)1831 translate_atomic_op_str(nir_atomic_op op)
1832 {
1833 switch (op) {
1834 case nir_atomic_op_iadd: return "add";
1835 case nir_atomic_op_imin: return "smin";
1836 case nir_atomic_op_umin: return "umin";
1837 case nir_atomic_op_imax: return "smax";
1838 case nir_atomic_op_umax: return "umax";
1839 case nir_atomic_op_iand: return "and";
1840 case nir_atomic_op_ior: return "or";
1841 case nir_atomic_op_ixor: return "xor";
1842 case nir_atomic_op_fadd: return "fadd";
1843 case nir_atomic_op_fmin: return "fmin";
1844 case nir_atomic_op_fmax: return "fmax";
1845 case nir_atomic_op_xchg: return "swap";
1846 case nir_atomic_op_cmpxchg: return "cmpswap";
1847 case nir_atomic_op_inc_wrap: return "inc";
1848 case nir_atomic_op_dec_wrap: return "dec";
1849 default: abort();
1850 }
1851 }
1852
1853 static LLVMAtomicRMWBinOp
translate_atomic_op(nir_atomic_op op)1854 translate_atomic_op(nir_atomic_op op)
1855 {
1856 switch (op) {
1857 case nir_atomic_op_iadd: return LLVMAtomicRMWBinOpAdd;
1858 case nir_atomic_op_xchg: return LLVMAtomicRMWBinOpXchg;
1859 case nir_atomic_op_iand: return LLVMAtomicRMWBinOpAnd;
1860 case nir_atomic_op_ior: return LLVMAtomicRMWBinOpOr;
1861 case nir_atomic_op_ixor: return LLVMAtomicRMWBinOpXor;
1862 case nir_atomic_op_umin: return LLVMAtomicRMWBinOpUMin;
1863 case nir_atomic_op_umax: return LLVMAtomicRMWBinOpUMax;
1864 case nir_atomic_op_imin: return LLVMAtomicRMWBinOpMin;
1865 case nir_atomic_op_imax: return LLVMAtomicRMWBinOpMax;
1866 case nir_atomic_op_fadd: return LLVMAtomicRMWBinOpFAdd;
1867 default: unreachable("Unexpected atomic");
1868 }
1869 }
1870
visit_atomic_ssbo(struct ac_nir_context * ctx,nir_intrinsic_instr * instr)1871 static LLVMValueRef visit_atomic_ssbo(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)
1872 {
1873 nir_atomic_op nir_op = nir_intrinsic_atomic_op(instr);
1874 const char *op = translate_atomic_op_str(nir_op);
1875 bool is_float = nir_atomic_op_type(nir_op) == nir_type_float;
1876
1877 LLVMTypeRef return_type = LLVMTypeOf(get_src(ctx, instr->src[2]));
1878 char name[64], type[8];
1879 LLVMValueRef params[6], descriptor;
1880 LLVMValueRef result;
1881 int arg_count = 0;
1882
1883 struct waterfall_context wctx;
1884 LLVMValueRef rsrc_base = enter_waterfall_ssbo(ctx, &wctx, instr, instr->src[0]);
1885
1886 descriptor = ctx->abi->load_ssbo ?
1887 ctx->abi->load_ssbo(ctx->abi, rsrc_base, true, false) : rsrc_base;
1888
1889 if (instr->intrinsic == nir_intrinsic_ssbo_atomic_swap && return_type == ctx->ac.i64) {
1890 result = emit_ssbo_comp_swap_64(ctx, descriptor, get_src(ctx, instr->src[1]),
1891 get_src(ctx, instr->src[2]), get_src(ctx, instr->src[3]), false);
1892 } else {
1893 LLVMValueRef data = ac_llvm_extract_elem(&ctx->ac, get_src(ctx, instr->src[2]), 0);
1894
1895 if (instr->intrinsic == nir_intrinsic_ssbo_atomic_swap) {
1896 params[arg_count++] = ac_llvm_extract_elem(&ctx->ac, get_src(ctx, instr->src[3]), 0);
1897 }
1898 if (is_float) {
1899 data = ac_to_float(&ctx->ac, data);
1900 return_type = LLVMTypeOf(data);
1901 }
1902
1903 unsigned cache_flags =
1904 ac_get_hw_cache_flags(ctx->ac.info,
1905 ac_get_mem_access_flags(instr) | ACCESS_TYPE_ATOMIC).value;
1906
1907 params[arg_count++] = data;
1908 params[arg_count++] = descriptor;
1909 params[arg_count++] = get_src(ctx, instr->src[1]); /* voffset */
1910 params[arg_count++] = ctx->ac.i32_0; /* soffset */
1911 params[arg_count++] = LLVMConstInt(ctx->ac.i32, cache_flags, 0);
1912
1913 ac_build_type_name_for_intr(return_type, type, sizeof(type));
1914 snprintf(name, sizeof(name), "llvm.amdgcn.raw.buffer.atomic.%s.%s", op, type);
1915
1916 result = ac_build_intrinsic(&ctx->ac, name, return_type, params, arg_count, 0);
1917
1918 if (is_float) {
1919 result = ac_to_integer(&ctx->ac, result);
1920 }
1921 }
1922
1923 return exit_waterfall(ctx, &wctx, result);
1924 }
1925
visit_load_buffer(struct ac_nir_context * ctx,nir_intrinsic_instr * instr)1926 static LLVMValueRef visit_load_buffer(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)
1927 {
1928 struct waterfall_context wctx;
1929 LLVMValueRef rsrc_base = enter_waterfall_ssbo(ctx, &wctx, instr, instr->src[0]);
1930
1931 int elem_size_bytes = instr->def.bit_size / 8;
1932 int num_components = instr->num_components;
1933 enum gl_access_qualifier access = ac_get_mem_access_flags(instr);
1934
1935 LLVMValueRef offset = get_src(ctx, instr->src[1]);
1936 LLVMValueRef rsrc = ctx->abi->load_ssbo ?
1937 ctx->abi->load_ssbo(ctx->abi, rsrc_base, false, false) : rsrc_base;
1938 LLVMValueRef vindex = ctx->ac.i32_0;
1939
1940 LLVMTypeRef def_type = get_def_type(ctx, &instr->def);
1941 LLVMTypeRef def_elem_type = num_components > 1 ? LLVMGetElementType(def_type) : def_type;
1942
1943 LLVMValueRef results[4];
1944 for (int i = 0; i < num_components;) {
1945 int num_elems = num_components - i;
1946 if (elem_size_bytes < 4 && nir_intrinsic_align(instr) % 4 != 0)
1947 num_elems = 1;
1948 if (num_elems * elem_size_bytes > 16)
1949 num_elems = 16 / elem_size_bytes;
1950 int load_bytes = num_elems * elem_size_bytes;
1951
1952 LLVMValueRef immoffset = LLVMConstInt(ctx->ac.i32, i * elem_size_bytes, false);
1953 LLVMValueRef voffset = LLVMBuildAdd(ctx->ac.builder, offset, immoffset, "");
1954
1955 LLVMValueRef ret;
1956
1957 if (load_bytes == 1) {
1958 ret = ac_build_buffer_load_byte(&ctx->ac, rsrc, voffset, ctx->ac.i32_0,
1959 access);
1960 } else if (load_bytes == 2) {
1961 ret = ac_build_buffer_load_short(&ctx->ac, rsrc, voffset, ctx->ac.i32_0,
1962 access);
1963 } else {
1964 int num_channels = util_next_power_of_two(load_bytes) / 4;
1965 bool can_speculate = access & ACCESS_CAN_REORDER;
1966
1967 ret = ac_build_buffer_load(&ctx->ac, rsrc, num_channels, vindex, voffset, ctx->ac.i32_0,
1968 ctx->ac.f32, access, can_speculate, false);
1969 }
1970
1971 LLVMTypeRef byte_vec = LLVMVectorType(ctx->ac.i8, ac_get_type_size(LLVMTypeOf(ret)));
1972 ret = LLVMBuildBitCast(ctx->ac.builder, ret, byte_vec, "");
1973 ret = ac_trim_vector(&ctx->ac, ret, load_bytes);
1974
1975 LLVMTypeRef ret_type = LLVMVectorType(def_elem_type, num_elems);
1976 ret = LLVMBuildBitCast(ctx->ac.builder, ret, ret_type, "");
1977
1978 for (unsigned j = 0; j < num_elems; j++) {
1979 results[i + j] =
1980 LLVMBuildExtractElement(ctx->ac.builder, ret, LLVMConstInt(ctx->ac.i32, j, false), "");
1981 }
1982 i += num_elems;
1983 }
1984
1985 LLVMValueRef ret = ac_build_gather_values(&ctx->ac, results, num_components);
1986 return exit_waterfall(ctx, &wctx, ret);
1987 }
1988
enter_waterfall_ubo(struct ac_nir_context * ctx,struct waterfall_context * wctx,const nir_intrinsic_instr * instr)1989 static LLVMValueRef enter_waterfall_ubo(struct ac_nir_context *ctx, struct waterfall_context *wctx,
1990 const nir_intrinsic_instr *instr)
1991 {
1992 return enter_waterfall(ctx, wctx, get_src(ctx, instr->src[0]),
1993 nir_intrinsic_access(instr) & ACCESS_NON_UNIFORM);
1994 }
1995
get_global_address(struct ac_nir_context * ctx,nir_intrinsic_instr * instr,LLVMTypeRef type)1996 static LLVMValueRef get_global_address(struct ac_nir_context *ctx,
1997 nir_intrinsic_instr *instr,
1998 LLVMTypeRef type)
1999 {
2000 bool is_store = instr->intrinsic == nir_intrinsic_store_global ||
2001 instr->intrinsic == nir_intrinsic_store_global_amd;
2002 LLVMValueRef addr = get_src(ctx, instr->src[is_store ? 1 : 0]);
2003
2004 LLVMTypeRef ptr_type = LLVMPointerType(type, AC_ADDR_SPACE_GLOBAL);
2005
2006 if (nir_intrinsic_has_base(instr)) {
2007 /* _amd variants */
2008 uint32_t base = nir_intrinsic_base(instr);
2009 unsigned num_src = nir_intrinsic_infos[instr->intrinsic].num_srcs;
2010 LLVMValueRef offset = get_src(ctx, instr->src[num_src - 1]);
2011 offset = LLVMBuildAdd(ctx->ac.builder, offset, LLVMConstInt(ctx->ac.i32, base, false), "");
2012
2013 LLVMTypeRef i8_ptr_type = LLVMPointerType(ctx->ac.i8, AC_ADDR_SPACE_GLOBAL);
2014 addr = LLVMBuildIntToPtr(ctx->ac.builder, addr, i8_ptr_type, "");
2015 addr = LLVMBuildGEP2(ctx->ac.builder, ctx->ac.i8, addr, &offset, 1, "");
2016 return LLVMBuildPointerCast(ctx->ac.builder, addr, ptr_type, "");
2017 } else {
2018 return LLVMBuildIntToPtr(ctx->ac.builder, addr, ptr_type, "");
2019 }
2020 }
2021
visit_load_global(struct ac_nir_context * ctx,nir_intrinsic_instr * instr)2022 static LLVMValueRef visit_load_global(struct ac_nir_context *ctx,
2023 nir_intrinsic_instr *instr)
2024 {
2025 LLVMTypeRef result_type = get_def_type(ctx, &instr->def);
2026 LLVMValueRef val;
2027 LLVMValueRef addr = get_global_address(ctx, instr, result_type);
2028
2029 val = LLVMBuildLoad2(ctx->ac.builder, result_type, addr, "");
2030
2031 if (nir_intrinsic_access(instr) & (ACCESS_COHERENT | ACCESS_VOLATILE)) {
2032 LLVMSetOrdering(val, LLVMAtomicOrderingMonotonic);
2033 LLVMSetAlignment(val, ac_get_type_size(result_type));
2034 }
2035
2036 return val;
2037 }
2038
visit_store_global(struct ac_nir_context * ctx,nir_intrinsic_instr * instr)2039 static void visit_store_global(struct ac_nir_context *ctx,
2040 nir_intrinsic_instr *instr)
2041 {
2042 LLVMValueRef data = get_src(ctx, instr->src[0]);
2043 LLVMTypeRef type = LLVMTypeOf(data);
2044 LLVMValueRef addr = get_global_address(ctx, instr, type);
2045 LLVMValueRef val;
2046
2047 val = LLVMBuildStore(ctx->ac.builder, data, addr);
2048
2049 if (nir_intrinsic_access(instr) & (ACCESS_COHERENT | ACCESS_VOLATILE)) {
2050 LLVMSetOrdering(val, LLVMAtomicOrderingMonotonic);
2051 LLVMSetAlignment(val, ac_get_type_size(type));
2052 }
2053 }
2054
visit_global_atomic(struct ac_nir_context * ctx,nir_intrinsic_instr * instr)2055 static LLVMValueRef visit_global_atomic(struct ac_nir_context *ctx,
2056 nir_intrinsic_instr *instr)
2057 {
2058 LLVMValueRef data = get_src(ctx, instr->src[1]);
2059 LLVMAtomicRMWBinOp op;
2060 LLVMValueRef result;
2061
2062 /* use "singlethread" sync scope to implement relaxed ordering */
2063 const char *sync_scope = "singlethread-one-as";
2064
2065 nir_atomic_op nir_op = nir_intrinsic_atomic_op(instr);
2066 bool is_float = nir_atomic_op_type(nir_op) == nir_type_float;
2067
2068 LLVMTypeRef data_type = LLVMTypeOf(data);
2069
2070 assert(instr->src[1].ssa->num_components == 1);
2071 if (is_float) {
2072 switch (instr->src[1].ssa->bit_size) {
2073 case 32:
2074 data_type = ctx->ac.f32;
2075 break;
2076 case 64:
2077 data_type = ctx->ac.f64;
2078 break;
2079 default:
2080 unreachable("Unsupported float bit size");
2081 }
2082
2083 data = LLVMBuildBitCast(ctx->ac.builder, data, data_type, "");
2084 }
2085
2086 LLVMValueRef addr = get_global_address(ctx, instr, data_type);
2087
2088 if (instr->intrinsic == nir_intrinsic_global_atomic_swap ||
2089 instr->intrinsic == nir_intrinsic_global_atomic_swap_amd) {
2090 LLVMValueRef data1 = get_src(ctx, instr->src[2]);
2091 result = ac_build_atomic_cmp_xchg(&ctx->ac, addr, data, data1, sync_scope);
2092 result = LLVMBuildExtractValue(ctx->ac.builder, result, 0, "");
2093 } else if (is_float) {
2094 const char *op = translate_atomic_op_str(nir_op);
2095 char name[64], type[8];
2096 LLVMValueRef params[2];
2097 int arg_count = 0;
2098
2099 params[arg_count++] = addr;
2100 params[arg_count++] = data;
2101
2102 ac_build_type_name_for_intr(data_type, type, sizeof(type));
2103 snprintf(name, sizeof(name), "llvm.amdgcn.global.atomic.%s.%s.p1.%s", op, type, type);
2104
2105 result = ac_build_intrinsic(&ctx->ac, name, data_type, params, arg_count, 0);
2106 } else {
2107 op = translate_atomic_op(nir_op);
2108 result = ac_build_atomic_rmw(&ctx->ac, op, addr, ac_to_integer(&ctx->ac, data), sync_scope);
2109 }
2110
2111 result = ac_to_integer(&ctx->ac, result);
2112
2113 return result;
2114 }
2115
visit_load_ubo_buffer(struct ac_nir_context * ctx,nir_intrinsic_instr * instr)2116 static LLVMValueRef visit_load_ubo_buffer(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)
2117 {
2118 struct waterfall_context wctx;
2119 LLVMValueRef rsrc_base = enter_waterfall_ubo(ctx, &wctx, instr);
2120
2121 LLVMValueRef ret;
2122 LLVMValueRef rsrc = rsrc_base;
2123 LLVMValueRef offset = get_src(ctx, instr->src[1]);
2124 int num_components = instr->num_components;
2125
2126 assert(instr->def.bit_size >= 32 && instr->def.bit_size % 32 == 0);
2127
2128 if (ctx->abi->load_ubo)
2129 rsrc = ctx->abi->load_ubo(ctx->abi, rsrc);
2130
2131 /* Convert to a 32-bit load. */
2132 if (instr->def.bit_size == 64)
2133 num_components *= 2;
2134
2135 ret = ac_build_buffer_load(&ctx->ac, rsrc, num_components, NULL, offset, NULL,
2136 ctx->ac.f32, 0, true, true);
2137 ret = LLVMBuildBitCast(ctx->ac.builder, ret, get_def_type(ctx, &instr->def), "");
2138
2139 return exit_waterfall(ctx, &wctx, ret);
2140 }
2141
visit_store_output(struct ac_nir_context * ctx,nir_intrinsic_instr * instr)2142 static void visit_store_output(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)
2143 {
2144 unsigned base = nir_intrinsic_base(instr);
2145 unsigned writemask = nir_intrinsic_write_mask(instr);
2146 unsigned component = nir_intrinsic_component(instr);
2147 LLVMValueRef src = ac_to_float(&ctx->ac, get_src(ctx, instr->src[0]));
2148 ASSERTED nir_src offset = *nir_get_io_offset_src(instr);
2149
2150 /* No indirect indexing is allowed here. */
2151 assert(nir_src_is_const(offset) && nir_src_as_uint(offset) == 0);
2152
2153 switch (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src))) {
2154 case 16:
2155 case 32:
2156 break;
2157 case 64:
2158 unreachable("64-bit IO should have been lowered to 32 bits");
2159 return;
2160 default:
2161 unreachable("unhandled store_output bit size");
2162 return;
2163 }
2164
2165 writemask <<= component;
2166
2167 for (unsigned chan = 0; chan < 8; chan++) {
2168 if (!(writemask & (1 << chan)))
2169 continue;
2170
2171 LLVMValueRef value = ac_llvm_extract_elem(&ctx->ac, src, chan - component);
2172 LLVMValueRef output_addr = ctx->abi->outputs[base * 4 + chan];
2173
2174 if (!ctx->abi->is_16bit[base * 4 + chan] &&
2175 LLVMTypeOf(value) == ctx->ac.f16) {
2176 LLVMValueRef output, index;
2177
2178 /* Insert the 16-bit value into the low or high bits of the 32-bit output
2179 * using read-modify-write.
2180 */
2181 index = LLVMConstInt(ctx->ac.i32, nir_intrinsic_io_semantics(instr).high_16bits, 0);
2182
2183 output = LLVMBuildLoad2(ctx->ac.builder, ctx->ac.v2f16, output_addr, "");
2184 output = LLVMBuildInsertElement(ctx->ac.builder, output, value, index, "");
2185 value = LLVMBuildBitCast(ctx->ac.builder, output, ctx->ac.f32, "");
2186 }
2187 LLVMBuildStore(ctx->ac.builder, value, output_addr);
2188 }
2189 }
2190
image_type_to_components_count(enum glsl_sampler_dim dim,bool array)2191 static int image_type_to_components_count(enum glsl_sampler_dim dim, bool array)
2192 {
2193 switch (dim) {
2194 case GLSL_SAMPLER_DIM_BUF:
2195 return 1;
2196 case GLSL_SAMPLER_DIM_1D:
2197 return array ? 2 : 1;
2198 case GLSL_SAMPLER_DIM_2D:
2199 return array ? 3 : 2;
2200 case GLSL_SAMPLER_DIM_MS:
2201 return array ? 4 : 3;
2202 case GLSL_SAMPLER_DIM_3D:
2203 case GLSL_SAMPLER_DIM_CUBE:
2204 return 3;
2205 case GLSL_SAMPLER_DIM_RECT:
2206 case GLSL_SAMPLER_DIM_SUBPASS:
2207 return 2;
2208 case GLSL_SAMPLER_DIM_SUBPASS_MS:
2209 return 3;
2210 default:
2211 break;
2212 }
2213 return 0;
2214 }
2215
get_image_coords(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr,LLVMValueRef dynamic_desc_index,struct ac_image_args * args,enum glsl_sampler_dim dim,bool is_array)2216 static void get_image_coords(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr,
2217 LLVMValueRef dynamic_desc_index, struct ac_image_args *args,
2218 enum glsl_sampler_dim dim, bool is_array)
2219 {
2220 LLVMValueRef src0 = get_src(ctx, instr->src[1]);
2221 LLVMValueRef masks[] = {
2222 ctx->ac.i32_0,
2223 ctx->ac.i32_1,
2224 LLVMConstInt(ctx->ac.i32, 2, false),
2225 LLVMConstInt(ctx->ac.i32, 3, false),
2226 };
2227
2228 int count;
2229 ASSERTED bool add_frag_pos =
2230 (dim == GLSL_SAMPLER_DIM_SUBPASS || dim == GLSL_SAMPLER_DIM_SUBPASS_MS);
2231 bool is_ms = (dim == GLSL_SAMPLER_DIM_MS || dim == GLSL_SAMPLER_DIM_SUBPASS_MS);
2232 bool gfx9_1d = ctx->ac.gfx_level == GFX9 && dim == GLSL_SAMPLER_DIM_1D;
2233 assert(!add_frag_pos && "Input attachments should be lowered by this point.");
2234 count = image_type_to_components_count(dim, is_array);
2235
2236 if (count == 1 && !gfx9_1d) {
2237 if (instr->src[1].ssa->num_components)
2238 args->coords[0] = LLVMBuildExtractElement(ctx->ac.builder, src0, masks[0], "");
2239 else
2240 args->coords[0] = src0;
2241 } else {
2242 int chan;
2243 if (is_ms)
2244 count--;
2245 for (chan = 0; chan < count; ++chan) {
2246 args->coords[chan] = ac_llvm_extract_elem(&ctx->ac, src0, chan);
2247 }
2248
2249 if (gfx9_1d) {
2250 if (is_array) {
2251 args->coords[2] = args->coords[1];
2252 args->coords[1] = ctx->ac.i32_0;
2253 } else
2254 args->coords[1] = ctx->ac.i32_0;
2255 count++;
2256 }
2257 if (ctx->ac.gfx_level == GFX9 && dim == GLSL_SAMPLER_DIM_2D && !is_array) {
2258 /* The hw can't bind a slice of a 3D image as a 2D
2259 * image, because it ignores BASE_ARRAY if the target
2260 * is 3D. The workaround is to read BASE_ARRAY and set
2261 * it as the 3rd address operand for all 2D images.
2262 */
2263 LLVMValueRef first_layer, const5, mask;
2264
2265 const5 = LLVMConstInt(ctx->ac.i32, 5, 0);
2266 mask = LLVMConstInt(ctx->ac.i32, S_008F24_BASE_ARRAY(~0), 0);
2267 first_layer = LLVMBuildExtractElement(ctx->ac.builder, args->resource, const5, "");
2268 first_layer = LLVMBuildAnd(ctx->ac.builder, first_layer, mask, "");
2269
2270 if (instr->intrinsic == nir_intrinsic_bindless_image_load ||
2271 instr->intrinsic == nir_intrinsic_bindless_image_sparse_load ||
2272 instr->intrinsic == nir_intrinsic_bindless_image_store) {
2273 int lod_index = instr->intrinsic == nir_intrinsic_bindless_image_store ? 4 : 3;
2274 bool has_lod = !nir_src_is_const(instr->src[lod_index]) ||
2275 nir_src_as_uint(instr->src[lod_index]) != 0;
2276 if (has_lod) {
2277 /* If there's a lod parameter it matter if the image is 3d or 2d because
2278 * the hw reads either the fourth or third component as lod. So detect
2279 * 3d images and place the lod at the third component otherwise.
2280 */
2281 LLVMValueRef const3, const28, const4, rword3, type3d, type, is_3d, lod;
2282 const3 = LLVMConstInt(ctx->ac.i32, 3, 0);
2283 const28 = LLVMConstInt(ctx->ac.i32, 28, 0);
2284 const4 = LLVMConstInt(ctx->ac.i32, 4, 0);
2285 type3d = LLVMConstInt(ctx->ac.i32, V_008F1C_SQ_RSRC_IMG_3D, 0);
2286 rword3 = LLVMBuildExtractElement(ctx->ac.builder, args->resource, const3, "");
2287 type = ac_build_bfe(&ctx->ac, rword3, const28, const4, false);
2288 is_3d = emit_int_cmp(&ctx->ac, LLVMIntEQ, type, type3d);
2289 lod = get_src(ctx, instr->src[lod_index]);
2290 first_layer = emit_bcsel(&ctx->ac, is_3d, first_layer, lod);
2291 }
2292 }
2293
2294 args->coords[count] = first_layer;
2295 count++;
2296 }
2297
2298 if (is_ms) {
2299 /* sample index */
2300 args->coords[count] = ac_llvm_extract_elem(&ctx->ac, get_src(ctx, instr->src[2]), 0);
2301 count++;
2302 }
2303 }
2304 }
2305
enter_waterfall_image(struct ac_nir_context * ctx,struct waterfall_context * wctx,const nir_intrinsic_instr * instr)2306 static LLVMValueRef enter_waterfall_image(struct ac_nir_context *ctx,
2307 struct waterfall_context *wctx,
2308 const nir_intrinsic_instr *instr)
2309 {
2310 /* src0 is desc when uniform, desc index when non uniform */
2311 LLVMValueRef value = get_src(ctx, instr->src[0]);
2312
2313 return enter_waterfall(ctx, wctx, value, nir_intrinsic_access(instr) & ACCESS_NON_UNIFORM);
2314 }
2315
visit_image_load(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr)2316 static LLVMValueRef visit_image_load(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)
2317 {
2318 LLVMValueRef res;
2319
2320 enum glsl_sampler_dim dim = nir_intrinsic_image_dim(instr);
2321 enum gl_access_qualifier access = nir_intrinsic_access(instr);
2322 bool is_array = nir_intrinsic_image_array(instr);
2323
2324 struct waterfall_context wctx;
2325 LLVMValueRef dynamic_index = enter_waterfall_image(ctx, &wctx, instr);
2326
2327 struct ac_image_args args = {0};
2328
2329 args.access = ac_get_mem_access_flags(instr);
2330 args.tfe = instr->intrinsic == nir_intrinsic_bindless_image_sparse_load;
2331
2332 if (dim == GLSL_SAMPLER_DIM_BUF) {
2333 unsigned num_channels = util_last_bit(nir_def_components_read(&instr->def));
2334 if (instr->def.bit_size == 64)
2335 num_channels = num_channels < 4 ? 2 : 4;
2336 LLVMValueRef rsrc, vindex;
2337
2338 rsrc = ctx->abi->load_sampler_desc(ctx->abi, dynamic_index, AC_DESC_BUFFER);
2339 vindex =
2340 LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[1]), ctx->ac.i32_0, "");
2341
2342 bool can_speculate = access & ACCESS_CAN_REORDER;
2343 res = ac_build_buffer_load_format(&ctx->ac, rsrc, vindex, ctx->ac.i32_0, num_channels,
2344 args.access, can_speculate,
2345 instr->def.bit_size == 16,
2346 args.tfe);
2347 res = ac_build_expand(&ctx->ac, res, num_channels, args.tfe ? 5 : 4);
2348
2349 res = ac_trim_vector(&ctx->ac, res, instr->def.num_components);
2350 res = ac_to_integer(&ctx->ac, res);
2351 } else if (instr->intrinsic == nir_intrinsic_bindless_image_fragment_mask_load_amd) {
2352 assert(ctx->ac.gfx_level < GFX11);
2353
2354 args.opcode = ac_image_load;
2355 args.resource = ctx->abi->load_sampler_desc(ctx->abi, dynamic_index, AC_DESC_FMASK);
2356 get_image_coords(ctx, instr, dynamic_index, &args, GLSL_SAMPLER_DIM_2D, is_array);
2357 args.dmask = 0xf;
2358 args.dim = is_array ? ac_image_2darray : ac_image_2d;
2359 args.attributes = AC_ATTR_INVARIANT_LOAD;
2360 args.a16 = ac_get_elem_bits(&ctx->ac, LLVMTypeOf(args.coords[0])) == 16;
2361
2362 res = ac_build_image_opcode(&ctx->ac, &args);
2363 } else {
2364 bool level_zero = nir_src_is_const(instr->src[3]) && nir_src_as_uint(instr->src[3]) == 0;
2365
2366 args.opcode = level_zero ? ac_image_load : ac_image_load_mip;
2367 args.resource = ctx->abi->load_sampler_desc(ctx->abi, dynamic_index, AC_DESC_IMAGE);
2368 get_image_coords(ctx, instr, dynamic_index, &args, dim, is_array);
2369 args.dim = ac_get_image_dim(ctx->ac.gfx_level, dim, is_array);
2370 if (!level_zero)
2371 args.lod = get_src(ctx, instr->src[3]);
2372 args.dmask = 15;
2373 args.attributes = access & ACCESS_CAN_REORDER ? AC_ATTR_INVARIANT_LOAD : 0;
2374
2375 args.d16 = instr->def.bit_size == 16;
2376
2377 res = ac_build_image_opcode(&ctx->ac, &args);
2378 }
2379
2380 if (instr->def.bit_size == 64) {
2381 LLVMValueRef code = NULL;
2382 if (args.tfe) {
2383 code = ac_llvm_extract_elem(&ctx->ac, res, 4);
2384 res = ac_trim_vector(&ctx->ac, res, 4);
2385 }
2386
2387 res = LLVMBuildBitCast(ctx->ac.builder, res, LLVMVectorType(ctx->ac.i64, 2), "");
2388 LLVMValueRef x = LLVMBuildExtractElement(ctx->ac.builder, res, ctx->ac.i32_0, "");
2389 LLVMValueRef w = LLVMBuildExtractElement(ctx->ac.builder, res, ctx->ac.i32_1, "");
2390
2391 if (code)
2392 code = LLVMBuildZExt(ctx->ac.builder, code, ctx->ac.i64, "");
2393 LLVMValueRef values[5] = {x, ctx->ac.i64_0, ctx->ac.i64_0, w, code};
2394 res = ac_build_gather_values(&ctx->ac, values, 4 + args.tfe);
2395 }
2396
2397 return exit_waterfall(ctx, &wctx, res);
2398 }
2399
visit_image_store(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr)2400 static void visit_image_store(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)
2401 {
2402 enum glsl_sampler_dim dim = nir_intrinsic_image_dim(instr);
2403 bool is_array = nir_intrinsic_image_array(instr);
2404
2405 struct waterfall_context wctx;
2406 LLVMValueRef dynamic_index = enter_waterfall_image(ctx, &wctx, instr);
2407
2408 struct ac_image_args args = {0};
2409 args.access = ac_get_mem_access_flags(instr);
2410
2411 LLVMValueRef src = get_src(ctx, instr->src[3]);
2412 if (instr->src[3].ssa->bit_size == 64) {
2413 /* only R64_UINT and R64_SINT supported */
2414 src = ac_llvm_extract_elem(&ctx->ac, src, 0);
2415 src = LLVMBuildBitCast(ctx->ac.builder, src, ctx->ac.v2f32, "");
2416 } else {
2417 src = ac_to_float(&ctx->ac, src);
2418 }
2419
2420 if (dim == GLSL_SAMPLER_DIM_BUF) {
2421 LLVMValueRef rsrc = ctx->abi->load_sampler_desc(ctx->abi, dynamic_index, AC_DESC_BUFFER);
2422 unsigned src_channels = ac_get_llvm_num_components(src);
2423 LLVMValueRef vindex;
2424
2425 if (src_channels == 3)
2426 src = ac_build_expand_to_vec4(&ctx->ac, src, 3);
2427
2428 vindex =
2429 LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[1]), ctx->ac.i32_0, "");
2430
2431 ac_build_buffer_store_format(&ctx->ac, rsrc, src, vindex, ctx->ac.i32_0, args.access);
2432 } else {
2433 bool level_zero = nir_src_is_const(instr->src[4]) && nir_src_as_uint(instr->src[4]) == 0;
2434
2435 args.opcode = level_zero ? ac_image_store : ac_image_store_mip;
2436 args.data[0] = src;
2437 args.resource = ctx->abi->load_sampler_desc(ctx->abi, dynamic_index, AC_DESC_IMAGE);
2438 get_image_coords(ctx, instr, dynamic_index, &args, dim, is_array);
2439 args.dim = ac_get_image_dim(ctx->ac.gfx_level, dim, is_array);
2440 if (!level_zero)
2441 args.lod = get_src(ctx, instr->src[4]);
2442 args.dmask = 15;
2443 args.d16 = ac_get_elem_bits(&ctx->ac, LLVMTypeOf(args.data[0])) == 16;
2444
2445 ac_build_image_opcode(&ctx->ac, &args);
2446 }
2447
2448 exit_waterfall(ctx, &wctx, NULL);
2449 }
2450
visit_image_atomic(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr)2451 static LLVMValueRef visit_image_atomic(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)
2452 {
2453 LLVMValueRef params[7];
2454 int param_count = 0;
2455
2456 nir_atomic_op op = nir_intrinsic_atomic_op(instr);
2457 bool cmpswap = op == nir_atomic_op_cmpxchg;
2458 const char *atomic_name = translate_atomic_op_str(op);
2459 char intrinsic_name[64];
2460 enum ac_atomic_op atomic_subop;
2461 ASSERTED int length;
2462
2463 enum glsl_sampler_dim dim = nir_intrinsic_image_dim(instr);
2464 bool is_array = nir_intrinsic_image_array(instr);
2465
2466 struct waterfall_context wctx;
2467 LLVMValueRef dynamic_index = enter_waterfall_image(ctx, &wctx, instr);
2468
2469 switch (op) {
2470 case nir_atomic_op_iadd:
2471 atomic_subop = ac_atomic_add;
2472 break;
2473 case nir_atomic_op_imin:
2474 atomic_subop = ac_atomic_smin;
2475 break;
2476 case nir_atomic_op_umin:
2477 atomic_subop = ac_atomic_umin;
2478 break;
2479 case nir_atomic_op_imax:
2480 atomic_subop = ac_atomic_smax;
2481 break;
2482 case nir_atomic_op_umax:
2483 atomic_subop = ac_atomic_umax;
2484 break;
2485 case nir_atomic_op_iand:
2486 atomic_subop = ac_atomic_and;
2487 break;
2488 case nir_atomic_op_ior:
2489 atomic_subop = ac_atomic_or;
2490 break;
2491 case nir_atomic_op_ixor:
2492 atomic_subop = ac_atomic_xor;
2493 break;
2494 case nir_atomic_op_xchg:
2495 atomic_subop = ac_atomic_swap;
2496 break;
2497 case nir_atomic_op_cmpxchg:
2498 atomic_subop = 0; /* not used */
2499 break;
2500 case nir_atomic_op_inc_wrap:
2501 atomic_subop = ac_atomic_inc_wrap;
2502 break;
2503 case nir_atomic_op_dec_wrap:
2504 atomic_subop = ac_atomic_dec_wrap;
2505 break;
2506 case nir_atomic_op_fadd:
2507 atomic_subop = ac_atomic_fmin; /* Non-buffer fadd atomics are not supported. */
2508 break;
2509 case nir_atomic_op_fmin:
2510 atomic_subop = ac_atomic_fmin;
2511 break;
2512 case nir_atomic_op_fmax:
2513 atomic_subop = ac_atomic_fmax;
2514 break;
2515 default:
2516 abort();
2517 }
2518
2519 if (cmpswap)
2520 params[param_count++] = get_src(ctx, instr->src[4]);
2521 params[param_count++] = get_src(ctx, instr->src[3]);
2522
2523 if (atomic_subop == ac_atomic_fmin || atomic_subop == ac_atomic_fmax)
2524 params[0] = ac_to_float(&ctx->ac, params[0]);
2525
2526 LLVMValueRef result;
2527 if (dim == GLSL_SAMPLER_DIM_BUF) {
2528 params[param_count++] = ctx->abi->load_sampler_desc(ctx->abi, dynamic_index, AC_DESC_BUFFER);
2529 params[param_count++] = LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[1]),
2530 ctx->ac.i32_0, ""); /* vindex */
2531 params[param_count++] = ctx->ac.i32_0; /* voffset */
2532 if (cmpswap && instr->def.bit_size == 64) {
2533 result = emit_ssbo_comp_swap_64(ctx, params[2], params[3], params[1], params[0], true);
2534 } else {
2535 LLVMTypeRef data_type = LLVMTypeOf(params[0]);
2536 char type[8];
2537 unsigned cache_flags =
2538 ac_get_hw_cache_flags(ctx->ac.info,
2539 ac_get_mem_access_flags(instr) | ACCESS_TYPE_ATOMIC).value;
2540
2541 params[param_count++] = ctx->ac.i32_0; /* soffset */
2542 params[param_count++] = LLVMConstInt(ctx->ac.i32, cache_flags, 0);
2543
2544 ac_build_type_name_for_intr(data_type, type, sizeof(type));
2545 length = snprintf(intrinsic_name, sizeof(intrinsic_name),
2546 "llvm.amdgcn.struct.buffer.atomic.%s.%s",
2547 atomic_name, type);
2548
2549 assert(length < sizeof(intrinsic_name));
2550 result = ac_build_intrinsic(&ctx->ac, intrinsic_name, LLVMTypeOf(params[0]), params, param_count, 0);
2551 }
2552 } else {
2553 struct ac_image_args args = {0};
2554 args.opcode = cmpswap ? ac_image_atomic_cmpswap : ac_image_atomic;
2555 args.atomic = atomic_subop;
2556 args.data[0] = params[0];
2557 if (cmpswap)
2558 args.data[1] = params[1];
2559 args.resource = ctx->abi->load_sampler_desc(ctx->abi, dynamic_index, AC_DESC_IMAGE);
2560 get_image_coords(ctx, instr, dynamic_index, &args, dim, is_array);
2561 args.dim = ac_get_image_dim(ctx->ac.gfx_level, dim, is_array);
2562 args.access = ac_get_mem_access_flags(instr);
2563
2564 result = ac_build_image_opcode(&ctx->ac, &args);
2565 }
2566
2567 return exit_waterfall(ctx, &wctx, result);
2568 }
2569
emit_discard(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr)2570 static void emit_discard(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)
2571 {
2572 LLVMValueRef cond;
2573
2574 if (instr->intrinsic == nir_intrinsic_discard_if ||
2575 instr->intrinsic == nir_intrinsic_terminate_if) {
2576 cond = LLVMBuildNot(ctx->ac.builder, get_src(ctx, instr->src[0]), "");
2577 } else {
2578 assert(instr->intrinsic == nir_intrinsic_discard ||
2579 instr->intrinsic == nir_intrinsic_terminate);
2580 cond = ctx->ac.i1false;
2581 }
2582
2583 ac_build_kill_if_false(&ctx->ac, cond);
2584 }
2585
emit_demote(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr)2586 static void emit_demote(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)
2587 {
2588 LLVMValueRef cond;
2589
2590 if (instr->intrinsic == nir_intrinsic_demote_if) {
2591 cond = LLVMBuildNot(ctx->ac.builder, get_src(ctx, instr->src[0]), "");
2592 } else {
2593 assert(instr->intrinsic == nir_intrinsic_demote);
2594 cond = ctx->ac.i1false;
2595 }
2596
2597 /* This demotes the pixel if the condition is false. */
2598 ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.wqm.demote", ctx->ac.voidt, &cond, 1, 0);
2599 }
2600
visit_load_subgroup_id(struct ac_nir_context * ctx)2601 static LLVMValueRef visit_load_subgroup_id(struct ac_nir_context *ctx)
2602 {
2603 if (gl_shader_stage_is_compute(ctx->stage)) {
2604 if (ctx->ac.gfx_level >= GFX10_3)
2605 return ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->tg_size), 20, 5);
2606 else
2607 return ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->tg_size), 6, 6);
2608 } else if (ctx->args->tcs_wave_id.used) {
2609 return ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->tcs_wave_id), 0, 3);
2610 } else if (ctx->args->merged_wave_info.used) {
2611 return ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->merged_wave_info), 24, 4);
2612 } else {
2613 return ctx->ac.i32_0;
2614 }
2615 }
2616
visit_load_local_invocation_index(struct ac_nir_context * ctx)2617 static LLVMValueRef visit_load_local_invocation_index(struct ac_nir_context *ctx)
2618 {
2619 if (ctx->abi->vs_rel_patch_id)
2620 return ctx->abi->vs_rel_patch_id;
2621
2622 return ac_build_imad(&ctx->ac, visit_load_subgroup_id(ctx),
2623 LLVMConstInt(ctx->ac.i32, ctx->ac.wave_size, 0),
2624 ac_get_thread_id(&ctx->ac));
2625 }
2626
visit_first_invocation(struct ac_nir_context * ctx)2627 static LLVMValueRef visit_first_invocation(struct ac_nir_context *ctx)
2628 {
2629 LLVMValueRef active_set = ac_build_ballot(&ctx->ac, ctx->ac.i32_1);
2630 const char *intr = ctx->ac.wave_size == 32 ? "llvm.cttz.i32" : "llvm.cttz.i64";
2631
2632 /* The second argument is whether cttz(0) should be defined, but we do not care. */
2633 LLVMValueRef args[] = {active_set, ctx->ac.i1false};
2634 LLVMValueRef result = ac_build_intrinsic(&ctx->ac, intr, ctx->ac.iN_wavemask, args, 2, 0);
2635
2636 return LLVMBuildTrunc(ctx->ac.builder, result, ctx->ac.i32, "");
2637 }
2638
visit_load_shared(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr)2639 static LLVMValueRef visit_load_shared(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)
2640 {
2641 LLVMValueRef values[16], derived_ptr, index, ret;
2642 unsigned const_off = nir_intrinsic_base(instr);
2643
2644 LLVMTypeRef elem_type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
2645 LLVMValueRef ptr = get_memory_ptr(ctx, instr->src[0], const_off);
2646
2647 for (int chan = 0; chan < instr->num_components; chan++) {
2648 index = LLVMConstInt(ctx->ac.i32, chan, 0);
2649 derived_ptr = LLVMBuildGEP2(ctx->ac.builder, elem_type, ptr, &index, 1, "");
2650 values[chan] = LLVMBuildLoad2(ctx->ac.builder, elem_type, derived_ptr, "");
2651 }
2652
2653 ret = ac_build_gather_values(&ctx->ac, values, instr->num_components);
2654
2655 return LLVMBuildBitCast(ctx->ac.builder, ret, get_def_type(ctx, &instr->def), "");
2656 }
2657
visit_store_shared(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr)2658 static void visit_store_shared(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)
2659 {
2660 LLVMValueRef derived_ptr, data, index;
2661 LLVMBuilderRef builder = ctx->ac.builder;
2662
2663 unsigned const_off = nir_intrinsic_base(instr);
2664 LLVMTypeRef elem_type = LLVMIntTypeInContext(ctx->ac.context, instr->src[0].ssa->bit_size);
2665 LLVMValueRef ptr = get_memory_ptr(ctx, instr->src[1], const_off);
2666 LLVMValueRef src = get_src(ctx, instr->src[0]);
2667
2668 int writemask = nir_intrinsic_write_mask(instr);
2669 for (int chan = 0; chan < 16; chan++) {
2670 if (!(writemask & (1 << chan))) {
2671 continue;
2672 }
2673 data = ac_llvm_extract_elem(&ctx->ac, src, chan);
2674 index = LLVMConstInt(ctx->ac.i32, chan, 0);
2675 derived_ptr = LLVMBuildGEP2(builder, elem_type, ptr, &index, 1, "");
2676 LLVMBuildStore(builder, data, derived_ptr);
2677 }
2678 }
2679
visit_load_shared2_amd(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr)2680 static LLVMValueRef visit_load_shared2_amd(struct ac_nir_context *ctx,
2681 const nir_intrinsic_instr *instr)
2682 {
2683 LLVMTypeRef pointee_type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
2684 LLVMValueRef ptr = get_memory_ptr(ctx, instr->src[0], 0);
2685
2686 LLVMValueRef values[2];
2687 uint8_t offsets[] = {nir_intrinsic_offset0(instr), nir_intrinsic_offset1(instr)};
2688 unsigned stride = nir_intrinsic_st64(instr) ? 64 : 1;
2689 for (unsigned i = 0; i < 2; i++) {
2690 LLVMValueRef index = LLVMConstInt(ctx->ac.i32, offsets[i] * stride, 0);
2691 LLVMValueRef derived_ptr = LLVMBuildGEP2(ctx->ac.builder, pointee_type, ptr, &index, 1, "");
2692 values[i] = LLVMBuildLoad2(ctx->ac.builder, pointee_type, derived_ptr, "");
2693 }
2694
2695 LLVMValueRef ret = ac_build_gather_values(&ctx->ac, values, 2);
2696 return LLVMBuildBitCast(ctx->ac.builder, ret, get_def_type(ctx, &instr->def), "");
2697 }
2698
visit_store_shared2_amd(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr)2699 static void visit_store_shared2_amd(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)
2700 {
2701 LLVMTypeRef pointee_type = LLVMIntTypeInContext(ctx->ac.context, instr->src[0].ssa->bit_size);
2702 LLVMValueRef ptr = get_memory_ptr(ctx, instr->src[1], 0);
2703 LLVMValueRef src = get_src(ctx, instr->src[0]);
2704
2705 uint8_t offsets[] = {nir_intrinsic_offset0(instr), nir_intrinsic_offset1(instr)};
2706 unsigned stride = nir_intrinsic_st64(instr) ? 64 : 1;
2707 for (unsigned i = 0; i < 2; i++) {
2708 LLVMValueRef index = LLVMConstInt(ctx->ac.i32, offsets[i] * stride, 0);
2709 LLVMValueRef derived_ptr = LLVMBuildGEP2(ctx->ac.builder, pointee_type, ptr, &index, 1, "");
2710 LLVMBuildStore(ctx->ac.builder, ac_llvm_extract_elem(&ctx->ac, src, i), derived_ptr);
2711 }
2712 }
2713
visit_var_atomic(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr,LLVMValueRef ptr,int src_idx)2714 static LLVMValueRef visit_var_atomic(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr,
2715 LLVMValueRef ptr, int src_idx)
2716 {
2717 LLVMValueRef result;
2718 LLVMValueRef src = get_src(ctx, instr->src[src_idx]);
2719 nir_atomic_op nir_op = nir_intrinsic_atomic_op(instr);
2720
2721 const char *sync_scope = "workgroup-one-as";
2722
2723 if (nir_op == nir_atomic_op_cmpxchg) {
2724 LLVMValueRef src1 = get_src(ctx, instr->src[src_idx + 1]);
2725 result = ac_build_atomic_cmp_xchg(&ctx->ac, ptr, src, src1, sync_scope);
2726 result = LLVMBuildExtractValue(ctx->ac.builder, result, 0, "");
2727 } else if (nir_op == nir_atomic_op_fmin || nir_op == nir_atomic_op_fmax) {
2728 const char *op = translate_atomic_op_str(nir_op);
2729 char name[64], type[8];
2730 LLVMValueRef params[5];
2731 LLVMTypeRef src_type;
2732 int arg_count = 0;
2733
2734 src = ac_to_float(&ctx->ac, src);
2735 src_type = LLVMTypeOf(src);
2736
2737 params[arg_count++] = ptr;
2738 params[arg_count++] = src;
2739 params[arg_count++] = ctx->ac.i32_0;
2740 params[arg_count++] = ctx->ac.i32_0;
2741 params[arg_count++] = ctx->ac.i1false;
2742
2743 ac_build_type_name_for_intr(src_type, type, sizeof(type));
2744 snprintf(name, sizeof(name), "llvm.amdgcn.ds.%s.%s", op, type);
2745
2746 result = ac_build_intrinsic(&ctx->ac, name, src_type, params, arg_count, 0);
2747 result = ac_to_integer(&ctx->ac, result);
2748 } else {
2749 LLVMAtomicRMWBinOp op = translate_atomic_op(nir_op);
2750 LLVMValueRef val;
2751
2752 if (nir_op == nir_atomic_op_fadd) {
2753 val = ac_to_float(&ctx->ac, src);
2754 } else {
2755 val = ac_to_integer(&ctx->ac, src);
2756 }
2757
2758 result = ac_build_atomic_rmw(&ctx->ac, op, ptr, val, sync_scope);
2759
2760 if (nir_op == nir_atomic_op_fadd) {
2761 result = ac_to_integer(&ctx->ac, result);
2762 }
2763 }
2764
2765 return result;
2766 }
2767
load_sample_pos(struct ac_nir_context * ctx)2768 static LLVMValueRef load_sample_pos(struct ac_nir_context *ctx)
2769 {
2770 LLVMValueRef values[2];
2771 LLVMValueRef pos[2];
2772
2773 pos[0] = ac_to_float(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->frag_pos[0]));
2774 pos[1] = ac_to_float(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->frag_pos[1]));
2775
2776 values[0] = ac_build_fract(&ctx->ac, pos[0], 32);
2777 values[1] = ac_build_fract(&ctx->ac, pos[1], 32);
2778 return ac_build_gather_values(&ctx->ac, values, 2);
2779 }
2780
lookup_interp_param(struct ac_nir_context * ctx,enum glsl_interp_mode interp,unsigned location)2781 static LLVMValueRef lookup_interp_param(struct ac_nir_context *ctx, enum glsl_interp_mode interp,
2782 unsigned location)
2783 {
2784 switch (interp) {
2785 case INTERP_MODE_FLAT:
2786 default:
2787 return NULL;
2788 case INTERP_MODE_SMOOTH:
2789 case INTERP_MODE_NONE:
2790 if (location == INTERP_CENTER)
2791 return ac_get_arg(&ctx->ac, ctx->args->persp_center);
2792 else if (location == INTERP_CENTROID)
2793 return ac_get_arg(&ctx->ac, ctx->args->persp_centroid);
2794 else if (location == INTERP_SAMPLE)
2795 return ac_get_arg(&ctx->ac, ctx->args->persp_sample);
2796 break;
2797 case INTERP_MODE_NOPERSPECTIVE:
2798 if (location == INTERP_CENTER)
2799 return ac_get_arg(&ctx->ac, ctx->args->linear_center);
2800 else if (location == INTERP_CENTROID)
2801 return ac_get_arg(&ctx->ac, ctx->args->linear_centroid);
2802 else if (location == INTERP_SAMPLE)
2803 return ac_get_arg(&ctx->ac, ctx->args->linear_sample);
2804 break;
2805 }
2806 return NULL;
2807 }
2808
barycentric_center(struct ac_nir_context * ctx,unsigned mode)2809 static LLVMValueRef barycentric_center(struct ac_nir_context *ctx, unsigned mode)
2810 {
2811 LLVMValueRef interp_param = lookup_interp_param(ctx, mode, INTERP_CENTER);
2812 return LLVMBuildBitCast(ctx->ac.builder, interp_param, ctx->ac.v2i32, "");
2813 }
2814
barycentric_offset(struct ac_nir_context * ctx,unsigned mode,LLVMValueRef offset)2815 static LLVMValueRef barycentric_offset(struct ac_nir_context *ctx, unsigned mode,
2816 LLVMValueRef offset)
2817 {
2818 LLVMValueRef interp_param = lookup_interp_param(ctx, mode, INTERP_CENTER);
2819 LLVMValueRef src_c0 =
2820 ac_to_float(&ctx->ac, LLVMBuildExtractElement(ctx->ac.builder, offset, ctx->ac.i32_0, ""));
2821 LLVMValueRef src_c1 =
2822 ac_to_float(&ctx->ac, LLVMBuildExtractElement(ctx->ac.builder, offset, ctx->ac.i32_1, ""));
2823
2824 LLVMValueRef ij_out[2];
2825 LLVMValueRef ddxy_out = ac_build_ddxy_interp(&ctx->ac, interp_param);
2826
2827 /*
2828 * take the I then J parameters, and the DDX/Y for it, and
2829 * calculate the IJ inputs for the interpolator.
2830 * temp1 = ddx * offset/sample.x + I;
2831 * interp_param.I = ddy * offset/sample.y + temp1;
2832 * temp1 = ddx * offset/sample.x + J;
2833 * interp_param.J = ddy * offset/sample.y + temp1;
2834 */
2835 for (unsigned i = 0; i < 2; i++) {
2836 LLVMValueRef ix_ll = LLVMConstInt(ctx->ac.i32, i, false);
2837 LLVMValueRef iy_ll = LLVMConstInt(ctx->ac.i32, i + 2, false);
2838 LLVMValueRef ddx_el = LLVMBuildExtractElement(ctx->ac.builder, ddxy_out, ix_ll, "");
2839 LLVMValueRef ddy_el = LLVMBuildExtractElement(ctx->ac.builder, ddxy_out, iy_ll, "");
2840 LLVMValueRef interp_el = LLVMBuildExtractElement(ctx->ac.builder, interp_param, ix_ll, "");
2841 LLVMValueRef temp1, temp2;
2842
2843 interp_el = LLVMBuildBitCast(ctx->ac.builder, interp_el, ctx->ac.f32, "");
2844
2845 temp1 = ac_build_fmad(&ctx->ac, ddx_el, src_c0, interp_el);
2846 temp2 = ac_build_fmad(&ctx->ac, ddy_el, src_c1, temp1);
2847
2848 ij_out[i] = LLVMBuildBitCast(ctx->ac.builder, temp2, ctx->ac.i32, "");
2849 }
2850 interp_param = ac_build_gather_values(&ctx->ac, ij_out, 2);
2851 return LLVMBuildBitCast(ctx->ac.builder, interp_param, ctx->ac.v2i32, "");
2852 }
2853
barycentric_centroid(struct ac_nir_context * ctx,unsigned mode)2854 static LLVMValueRef barycentric_centroid(struct ac_nir_context *ctx, unsigned mode)
2855 {
2856 LLVMValueRef interp_param = lookup_interp_param(ctx, mode, INTERP_CENTROID);
2857 return LLVMBuildBitCast(ctx->ac.builder, interp_param, ctx->ac.v2i32, "");
2858 }
2859
barycentric_sample(struct ac_nir_context * ctx,unsigned mode)2860 static LLVMValueRef barycentric_sample(struct ac_nir_context *ctx, unsigned mode)
2861 {
2862 LLVMValueRef interp_param = lookup_interp_param(ctx, mode, INTERP_SAMPLE);
2863 return LLVMBuildBitCast(ctx->ac.builder, interp_param, ctx->ac.v2i32, "");
2864 }
2865
barycentric_model(struct ac_nir_context * ctx)2866 static LLVMValueRef barycentric_model(struct ac_nir_context *ctx)
2867 {
2868 return LLVMBuildBitCast(ctx->ac.builder, ac_get_arg(&ctx->ac, ctx->args->pull_model),
2869 ctx->ac.v3i32, "");
2870 }
2871
load_interpolated_input(struct ac_nir_context * ctx,LLVMValueRef interp_param,unsigned index,unsigned comp_start,unsigned num_components,unsigned bitsize,bool high_16bits)2872 static LLVMValueRef load_interpolated_input(struct ac_nir_context *ctx, LLVMValueRef interp_param,
2873 unsigned index, unsigned comp_start,
2874 unsigned num_components, unsigned bitsize,
2875 bool high_16bits)
2876 {
2877 LLVMValueRef attr_number = LLVMConstInt(ctx->ac.i32, index, false);
2878 LLVMValueRef interp_param_f;
2879
2880 interp_param_f = LLVMBuildBitCast(ctx->ac.builder, interp_param, ctx->ac.v2f32, "");
2881 LLVMValueRef i = LLVMBuildExtractElement(ctx->ac.builder, interp_param_f, ctx->ac.i32_0, "");
2882 LLVMValueRef j = LLVMBuildExtractElement(ctx->ac.builder, interp_param_f, ctx->ac.i32_1, "");
2883
2884 /* Workaround for issue 2647: kill threads with infinite interpolation coeffs */
2885 if (ctx->verified_interp && !_mesa_hash_table_search(ctx->verified_interp, interp_param)) {
2886 LLVMValueRef cond = ac_build_is_inf_or_nan(&ctx->ac, i);
2887 ac_build_kill_if_false(&ctx->ac, LLVMBuildNot(ctx->ac.builder, cond, ""));
2888 _mesa_hash_table_insert(ctx->verified_interp, interp_param, interp_param);
2889 }
2890
2891 LLVMValueRef values[4];
2892 assert(bitsize == 16 || bitsize == 32);
2893 for (unsigned comp = 0; comp < num_components; comp++) {
2894 LLVMValueRef llvm_chan = LLVMConstInt(ctx->ac.i32, comp_start + comp, false);
2895 if (bitsize == 16) {
2896 values[comp] = ac_build_fs_interp_f16(&ctx->ac, llvm_chan, attr_number,
2897 ac_get_arg(&ctx->ac, ctx->args->prim_mask), i, j,
2898 high_16bits);
2899 } else {
2900 values[comp] = ac_build_fs_interp(&ctx->ac, llvm_chan, attr_number,
2901 ac_get_arg(&ctx->ac, ctx->args->prim_mask), i, j);
2902 }
2903 }
2904
2905 return ac_to_integer(&ctx->ac, ac_build_gather_values(&ctx->ac, values, num_components));
2906 }
2907
visit_load(struct ac_nir_context * ctx,nir_intrinsic_instr * instr,bool is_output)2908 static LLVMValueRef visit_load(struct ac_nir_context *ctx, nir_intrinsic_instr *instr,
2909 bool is_output)
2910 {
2911 LLVMValueRef values[8];
2912 LLVMTypeRef dest_type = get_def_type(ctx, &instr->def);
2913 LLVMTypeRef component_type;
2914 unsigned base = nir_intrinsic_base(instr);
2915 unsigned component = nir_intrinsic_component(instr);
2916 unsigned count = instr->def.num_components;
2917 nir_src *vertex_index_src = nir_get_io_arrayed_index_src(instr);
2918 LLVMValueRef vertex_index = vertex_index_src ? get_src(ctx, *vertex_index_src) : NULL;
2919 nir_src offset = *nir_get_io_offset_src(instr);
2920 LLVMValueRef indir_index = NULL;
2921
2922 switch (instr->def.bit_size) {
2923 case 16:
2924 case 32:
2925 break;
2926 case 64:
2927 if (ctx->stage != MESA_SHADER_VERTEX || is_output) {
2928 unreachable("64-bit IO should have been lowered");
2929 return NULL;
2930 }
2931 break;
2932 default:
2933 unreachable("unhandled load type");
2934 return NULL;
2935 }
2936
2937 if (LLVMGetTypeKind(dest_type) == LLVMVectorTypeKind)
2938 component_type = LLVMGetElementType(dest_type);
2939 else
2940 component_type = dest_type;
2941
2942 if (nir_src_is_const(offset))
2943 assert(nir_src_as_uint(offset) == 0);
2944 else
2945 indir_index = get_src(ctx, offset);
2946
2947 if (ctx->stage == MESA_SHADER_TESS_CTRL) {
2948 LLVMValueRef result = ctx->abi->load_tess_varyings(ctx->abi, component_type,
2949 vertex_index, indir_index,
2950 base, component,
2951 count, !is_output);
2952 if (instr->def.bit_size == 16) {
2953 result = ac_to_integer(&ctx->ac, result);
2954 result = LLVMBuildTrunc(ctx->ac.builder, result, dest_type, "");
2955 }
2956 return LLVMBuildBitCast(ctx->ac.builder, result, dest_type, "");
2957 }
2958
2959 /* No indirect indexing is allowed after this point. */
2960 assert(!indir_index);
2961
2962 /* Other non-fragment cases have outputs in temporaries. */
2963 if (is_output && (ctx->stage == MESA_SHADER_VERTEX || ctx->stage == MESA_SHADER_TESS_EVAL)) {
2964 assert(is_output);
2965
2966 for (unsigned chan = component; chan < count + component; chan++)
2967 values[chan] = LLVMBuildLoad2(ctx->ac.builder, ctx->ac.f32,
2968 ctx->abi->outputs[base * 4 + chan], "");
2969
2970 LLVMValueRef result = ac_build_varying_gather_values(&ctx->ac, values, count, component);
2971 return LLVMBuildBitCast(ctx->ac.builder, result, dest_type, "");
2972 }
2973
2974 /* Fragment shader inputs. */
2975 assert(ctx->stage == MESA_SHADER_FRAGMENT);
2976 unsigned vertex_id = 0; /* P0 */
2977
2978 if (instr->intrinsic == nir_intrinsic_load_input_vertex)
2979 vertex_id = nir_src_as_uint(instr->src[0]);
2980
2981 LLVMValueRef attr_number = LLVMConstInt(ctx->ac.i32, base, false);
2982
2983 for (unsigned chan = 0; chan < count; chan++) {
2984 LLVMValueRef llvm_chan = LLVMConstInt(ctx->ac.i32, (component + chan) % 4, false);
2985 values[chan] = ac_build_fs_interp_mov(&ctx->ac, vertex_id, llvm_chan, attr_number,
2986 ac_get_arg(&ctx->ac, ctx->args->prim_mask));
2987 values[chan] = LLVMBuildBitCast(ctx->ac.builder, values[chan], ctx->ac.i32, "");
2988 if (instr->def.bit_size == 16 &&
2989 nir_intrinsic_io_semantics(instr).high_16bits)
2990 values[chan] = LLVMBuildLShr(ctx->ac.builder, values[chan], LLVMConstInt(ctx->ac.i32, 16, 0), "");
2991 values[chan] =
2992 LLVMBuildTruncOrBitCast(ctx->ac.builder, values[chan],
2993 instr->def.bit_size == 16 ? ctx->ac.i16 : ctx->ac.i32, "");
2994 }
2995
2996 LLVMValueRef result = ac_build_gather_values(&ctx->ac, values, count);
2997 return LLVMBuildBitCast(ctx->ac.builder, result, dest_type, "");
2998 }
2999
3000 static LLVMValueRef
emit_load_frag_shading_rate(struct ac_nir_context * ctx)3001 emit_load_frag_shading_rate(struct ac_nir_context *ctx)
3002 {
3003 LLVMValueRef x_rate, y_rate, cond;
3004
3005 /* VRS Rate X = Ancillary[2:3]
3006 * VRS Rate Y = Ancillary[4:5]
3007 */
3008 x_rate = ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->ancillary), 2, 2);
3009 y_rate = ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->ancillary), 4, 2);
3010
3011 /* xRate = xRate == 0x1 ? Horizontal2Pixels : None. */
3012 cond = LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ, x_rate, ctx->ac.i32_1, "");
3013 x_rate = LLVMBuildSelect(ctx->ac.builder, cond,
3014 LLVMConstInt(ctx->ac.i32, 4, false), ctx->ac.i32_0, "");
3015
3016 /* yRate = yRate == 0x1 ? Vertical2Pixels : None. */
3017 cond = LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ, y_rate, ctx->ac.i32_1, "");
3018 y_rate = LLVMBuildSelect(ctx->ac.builder, cond,
3019 ctx->ac.i32_1, ctx->ac.i32_0, "");
3020
3021 return LLVMBuildOr(ctx->ac.builder, x_rate, y_rate, "");
3022 }
3023
3024 static LLVMValueRef
emit_load_frag_coord(struct ac_nir_context * ctx)3025 emit_load_frag_coord(struct ac_nir_context *ctx)
3026 {
3027 LLVMValueRef values[4] = {
3028 ac_get_arg(&ctx->ac, ctx->args->frag_pos[0]), ac_get_arg(&ctx->ac, ctx->args->frag_pos[1]),
3029 ac_get_arg(&ctx->ac, ctx->args->frag_pos[2]),
3030 ac_build_fdiv(&ctx->ac, ctx->ac.f32_1, ac_get_arg(&ctx->ac, ctx->args->frag_pos[3]))};
3031
3032 return ac_to_integer(&ctx->ac, ac_build_gather_values(&ctx->ac, values, 4));
3033 }
3034
visit_intrinsic(struct ac_nir_context * ctx,nir_intrinsic_instr * instr)3035 static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)
3036 {
3037 LLVMValueRef result = NULL;
3038
3039 switch (instr->intrinsic) {
3040 case nir_intrinsic_ballot:
3041 case nir_intrinsic_ballot_relaxed:
3042 result = ac_build_ballot(&ctx->ac, get_src(ctx, instr->src[0]));
3043 if (instr->def.bit_size > ctx->ac.wave_size) {
3044 LLVMTypeRef dest_type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
3045 result = LLVMBuildZExt(ctx->ac.builder, result, dest_type, "");
3046 }
3047 break;
3048 case nir_intrinsic_inverse_ballot: {
3049 LLVMValueRef src = get_src(ctx, instr->src[0]);
3050 if (instr->src[0].ssa->bit_size > ctx->ac.wave_size) {
3051 LLVMTypeRef src_type = LLVMIntTypeInContext(ctx->ac.context, ctx->ac.wave_size);
3052 src = LLVMBuildTrunc(ctx->ac.builder, src, src_type, "");
3053 }
3054 result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.inverse.ballot", ctx->ac.i1, &src, 1, 0);
3055 break;
3056 }
3057 case nir_intrinsic_read_invocation:
3058 result =
3059 ac_build_readlane(&ctx->ac, get_src(ctx, instr->src[0]), get_src(ctx, instr->src[1]));
3060 break;
3061 case nir_intrinsic_read_first_invocation:
3062 case nir_intrinsic_as_uniform:
3063 result = ac_build_readlane(&ctx->ac, get_src(ctx, instr->src[0]), NULL);
3064 break;
3065 case nir_intrinsic_load_subgroup_invocation:
3066 result = ac_get_thread_id(&ctx->ac);
3067 break;
3068 case nir_intrinsic_load_workgroup_id: {
3069 LLVMValueRef values[3] = {ctx->ac.i32_0, ctx->ac.i32_0, ctx->ac.i32_0};
3070
3071 for (int i = 0; i < 3; i++) {
3072 if (ctx->args->workgroup_ids[i].used)
3073 values[i] = ac_get_arg(&ctx->ac, ctx->args->workgroup_ids[i]);
3074 }
3075 result = ac_build_gather_values(&ctx->ac, values, 3);
3076 break;
3077 }
3078 case nir_intrinsic_load_base_vertex:
3079 case nir_intrinsic_load_first_vertex:
3080 case nir_intrinsic_load_tess_rel_patch_id_amd:
3081 case nir_intrinsic_load_ring_attr_amd:
3082 case nir_intrinsic_load_lds_ngg_scratch_base_amd:
3083 case nir_intrinsic_load_lds_ngg_gs_out_vertex_base_amd:
3084 result = ctx->abi->intrinsic_load(ctx->abi, instr);
3085 break;
3086 case nir_intrinsic_load_vertex_id_zero_base:
3087 result = ctx->abi->vertex_id_replaced ? ctx->abi->vertex_id_replaced : ctx->abi->vertex_id;
3088 break;
3089 case nir_intrinsic_load_local_invocation_id: {
3090 LLVMValueRef ids = ac_get_arg(&ctx->ac, ctx->args->local_invocation_ids);
3091
3092 if (LLVMGetTypeKind(LLVMTypeOf(ids)) == LLVMIntegerTypeKind) {
3093 /* Thread IDs are packed in VGPR0, 10 bits per component. */
3094 LLVMValueRef id[3];
3095
3096 for (unsigned i = 0; i < 3; i++)
3097 id[i] = ac_unpack_param(&ctx->ac, ids, i * 10, 10);
3098
3099 result = ac_build_gather_values(&ctx->ac, id, 3);
3100 } else {
3101 result = ids;
3102 }
3103 break;
3104 }
3105 case nir_intrinsic_load_base_instance:
3106 result = ac_get_arg(&ctx->ac, ctx->args->start_instance);
3107 break;
3108 case nir_intrinsic_load_draw_id:
3109 result = ac_get_arg(&ctx->ac, ctx->args->draw_id);
3110 break;
3111 case nir_intrinsic_load_view_index:
3112 result = ac_get_arg(&ctx->ac, ctx->args->view_index);
3113 break;
3114 case nir_intrinsic_load_invocation_id:
3115 assert(ctx->stage == MESA_SHADER_TESS_CTRL || ctx->stage == MESA_SHADER_GEOMETRY);
3116 if (ctx->stage == MESA_SHADER_TESS_CTRL) {
3117 result = ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->tcs_rel_ids), 8, 5);
3118 } else if (ctx->ac.gfx_level >= GFX10) {
3119 result = ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->gs_invocation_id), 0, 7);
3120 } else {
3121 result = ac_get_arg(&ctx->ac, ctx->args->gs_invocation_id);
3122 }
3123 break;
3124 case nir_intrinsic_load_primitive_id:
3125 if (ctx->stage == MESA_SHADER_GEOMETRY) {
3126 result = ac_get_arg(&ctx->ac, ctx->args->gs_prim_id);
3127 } else if (ctx->stage == MESA_SHADER_TESS_CTRL) {
3128 result = ac_get_arg(&ctx->ac, ctx->args->tcs_patch_id);
3129 } else if (ctx->stage == MESA_SHADER_TESS_EVAL) {
3130 result = ctx->abi->tes_patch_id_replaced ?
3131 ctx->abi->tes_patch_id_replaced : ac_get_arg(&ctx->ac, ctx->args->tes_patch_id);
3132 } else if (ctx->stage == MESA_SHADER_VERTEX) {
3133 if (ctx->args->vs_prim_id.used)
3134 result = ac_get_arg(&ctx->ac, ctx->args->vs_prim_id); /* legacy */
3135 else
3136 result = ac_get_arg(&ctx->ac, ctx->args->gs_prim_id); /* NGG */
3137 } else
3138 fprintf(stderr, "Unknown primitive id intrinsic: %d", ctx->stage);
3139 break;
3140 case nir_intrinsic_load_sample_id:
3141 result = ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->ancillary), 8, 4);
3142 break;
3143 case nir_intrinsic_load_sample_pos:
3144 result = load_sample_pos(ctx);
3145 break;
3146 case nir_intrinsic_load_frag_coord:
3147 result = emit_load_frag_coord(ctx);
3148 break;
3149 case nir_intrinsic_load_frag_shading_rate:
3150 result = emit_load_frag_shading_rate(ctx);
3151 break;
3152 case nir_intrinsic_load_front_face:
3153 result = emit_i2b(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->front_face));
3154 break;
3155 case nir_intrinsic_load_helper_invocation:
3156 case nir_intrinsic_is_helper_invocation:
3157 result = ac_build_load_helper_invocation(&ctx->ac);
3158 break;
3159 case nir_intrinsic_load_instance_id:
3160 result = ctx->abi->instance_id_replaced ?
3161 ctx->abi->instance_id_replaced : ctx->abi->instance_id;
3162 break;
3163 case nir_intrinsic_load_num_workgroups:
3164 if (ctx->abi->load_grid_size_from_user_sgpr) {
3165 result = ac_get_arg(&ctx->ac, ctx->args->num_work_groups);
3166 } else {
3167 result = ac_build_load_invariant(&ctx->ac,
3168 ac_get_ptr_arg(&ctx->ac, ctx->args, ctx->args->num_work_groups), ctx->ac.i32_0);
3169 }
3170 break;
3171 case nir_intrinsic_load_local_invocation_index:
3172 result = visit_load_local_invocation_index(ctx);
3173 break;
3174 case nir_intrinsic_first_invocation:
3175 result = visit_first_invocation(ctx);
3176 break;
3177 case nir_intrinsic_load_push_constant:
3178 result = visit_load_push_constant(ctx, instr);
3179 break;
3180 case nir_intrinsic_store_ssbo:
3181 visit_store_ssbo(ctx, instr);
3182 break;
3183 case nir_intrinsic_load_ssbo:
3184 result = visit_load_buffer(ctx, instr);
3185 break;
3186 case nir_intrinsic_load_global_constant:
3187 case nir_intrinsic_load_global:
3188 case nir_intrinsic_load_global_amd:
3189 result = visit_load_global(ctx, instr);
3190 break;
3191 case nir_intrinsic_store_global:
3192 case nir_intrinsic_store_global_amd:
3193 visit_store_global(ctx, instr);
3194 break;
3195 case nir_intrinsic_global_atomic:
3196 case nir_intrinsic_global_atomic_swap:
3197 case nir_intrinsic_global_atomic_amd:
3198 case nir_intrinsic_global_atomic_swap_amd:
3199 result = visit_global_atomic(ctx, instr);
3200 break;
3201 case nir_intrinsic_ssbo_atomic:
3202 case nir_intrinsic_ssbo_atomic_swap:
3203 result = visit_atomic_ssbo(ctx, instr);
3204 break;
3205 case nir_intrinsic_load_ubo:
3206 result = visit_load_ubo_buffer(ctx, instr);
3207 break;
3208 case nir_intrinsic_get_ssbo_size:
3209 result = visit_get_ssbo_size(ctx, instr);
3210 break;
3211 case nir_intrinsic_load_input:
3212 case nir_intrinsic_load_input_vertex:
3213 case nir_intrinsic_load_per_vertex_input:
3214 result = visit_load(ctx, instr, false);
3215 break;
3216 case nir_intrinsic_load_output:
3217 case nir_intrinsic_load_per_vertex_output:
3218 result = visit_load(ctx, instr, true);
3219 break;
3220 case nir_intrinsic_store_output:
3221 case nir_intrinsic_store_per_vertex_output:
3222 visit_store_output(ctx, instr);
3223 break;
3224 case nir_intrinsic_load_shared:
3225 result = visit_load_shared(ctx, instr);
3226 break;
3227 case nir_intrinsic_store_shared:
3228 visit_store_shared(ctx, instr);
3229 break;
3230 case nir_intrinsic_load_shared2_amd:
3231 result = visit_load_shared2_amd(ctx, instr);
3232 break;
3233 case nir_intrinsic_store_shared2_amd:
3234 visit_store_shared2_amd(ctx, instr);
3235 break;
3236 case nir_intrinsic_bindless_image_load:
3237 case nir_intrinsic_bindless_image_sparse_load:
3238 case nir_intrinsic_bindless_image_fragment_mask_load_amd:
3239 result = visit_image_load(ctx, instr);
3240 break;
3241 case nir_intrinsic_bindless_image_store:
3242 visit_image_store(ctx, instr);
3243 break;
3244 case nir_intrinsic_bindless_image_atomic:
3245 case nir_intrinsic_bindless_image_atomic_swap:
3246 result = visit_image_atomic(ctx, instr);
3247 break;
3248 case nir_intrinsic_shader_clock:
3249 result = ac_build_shader_clock(&ctx->ac, nir_intrinsic_memory_scope(instr));
3250 break;
3251 case nir_intrinsic_discard:
3252 case nir_intrinsic_discard_if:
3253 case nir_intrinsic_terminate:
3254 case nir_intrinsic_terminate_if:
3255 emit_discard(ctx, instr);
3256 break;
3257 case nir_intrinsic_demote:
3258 case nir_intrinsic_demote_if:
3259 emit_demote(ctx, instr);
3260 break;
3261 case nir_intrinsic_barrier: {
3262 assert(!(nir_intrinsic_memory_semantics(instr) &
3263 (NIR_MEMORY_MAKE_AVAILABLE | NIR_MEMORY_MAKE_VISIBLE)));
3264
3265 nir_variable_mode modes = nir_intrinsic_memory_modes(instr);
3266
3267 unsigned wait_flags = 0;
3268 if (modes & (nir_var_mem_global | nir_var_mem_ssbo | nir_var_image))
3269 wait_flags |= AC_WAIT_VLOAD | AC_WAIT_VSTORE;
3270 if (modes & nir_var_mem_shared)
3271 wait_flags |= AC_WAIT_LGKM;
3272
3273 if (wait_flags)
3274 ac_build_waitcnt(&ctx->ac, wait_flags);
3275
3276 if (nir_intrinsic_execution_scope(instr) == SCOPE_WORKGROUP)
3277 ac_build_s_barrier(&ctx->ac, ctx->stage);
3278 break;
3279 }
3280 case nir_intrinsic_optimization_barrier_vgpr_amd:
3281 result = get_src(ctx, instr->src[0]);
3282 ac_build_optimization_barrier(&ctx->ac, &result, false);
3283 break;
3284 case nir_intrinsic_shared_atomic:
3285 case nir_intrinsic_shared_atomic_swap: {
3286 LLVMValueRef ptr = get_memory_ptr(ctx, instr->src[0], 0);
3287 result = visit_var_atomic(ctx, instr, ptr, 1);
3288 break;
3289 }
3290 case nir_intrinsic_load_barycentric_pixel:
3291 result = barycentric_center(ctx, nir_intrinsic_interp_mode(instr));
3292 break;
3293 case nir_intrinsic_load_barycentric_centroid:
3294 result = barycentric_centroid(ctx, nir_intrinsic_interp_mode(instr));
3295 break;
3296 case nir_intrinsic_load_barycentric_sample:
3297 result = barycentric_sample(ctx, nir_intrinsic_interp_mode(instr));
3298 break;
3299 case nir_intrinsic_load_barycentric_model:
3300 result = barycentric_model(ctx);
3301 break;
3302 case nir_intrinsic_load_barycentric_at_offset: {
3303 LLVMValueRef offset = ac_to_float(&ctx->ac, get_src(ctx, instr->src[0]));
3304 result = barycentric_offset(ctx, nir_intrinsic_interp_mode(instr), offset);
3305 break;
3306 }
3307 case nir_intrinsic_load_interpolated_input: {
3308 /* We assume any indirect loads have been lowered away */
3309 ASSERTED nir_const_value *offset = nir_src_as_const_value(instr->src[1]);
3310 assert(offset);
3311 assert(offset[0].i32 == 0);
3312
3313 LLVMValueRef interp_param = get_src(ctx, instr->src[0]);
3314 unsigned index = nir_intrinsic_base(instr);
3315 unsigned component = nir_intrinsic_component(instr);
3316 result = load_interpolated_input(ctx, interp_param, index, component,
3317 instr->def.num_components, instr->def.bit_size,
3318 nir_intrinsic_io_semantics(instr).high_16bits);
3319 break;
3320 }
3321 case nir_intrinsic_sendmsg_amd: {
3322 unsigned imm = nir_intrinsic_base(instr);
3323 LLVMValueRef m0_content = get_src(ctx, instr->src[0]);
3324 ac_build_sendmsg(&ctx->ac, imm, m0_content);
3325 break;
3326 }
3327 case nir_intrinsic_load_gs_wave_id_amd: {
3328 if (ctx->args->merged_wave_info.used)
3329 result = ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->merged_wave_info), 16, 8);
3330 else if (ctx->args->gs_wave_id.used)
3331 result = ac_get_arg(&ctx->ac, ctx->args->gs_wave_id);
3332 else
3333 unreachable("Shader doesn't have GS wave ID.");
3334 break;
3335 }
3336 case nir_intrinsic_load_tess_coord: {
3337 LLVMValueRef coord[] = {
3338 ctx->abi->tes_u_replaced ? ctx->abi->tes_u_replaced : ac_get_arg(&ctx->ac, ctx->args->tes_u),
3339 ctx->abi->tes_v_replaced ? ctx->abi->tes_v_replaced : ac_get_arg(&ctx->ac, ctx->args->tes_v),
3340 ctx->ac.f32_0,
3341 };
3342
3343 /* For triangles, the vector should be (u, v, 1-u-v). */
3344 if (ctx->info->tess._primitive_mode == TESS_PRIMITIVE_TRIANGLES) {
3345 coord[2] = LLVMBuildFSub(ctx->ac.builder, ctx->ac.f32_1,
3346 LLVMBuildFAdd(ctx->ac.builder, coord[0], coord[1], ""), "");
3347 }
3348 result = ac_build_gather_values(&ctx->ac, coord, 3);
3349 break;
3350 }
3351 case nir_intrinsic_vote_all: {
3352 result = ac_build_vote_all(&ctx->ac, get_src(ctx, instr->src[0]));
3353 break;
3354 }
3355 case nir_intrinsic_vote_any: {
3356 result = ac_build_vote_any(&ctx->ac, get_src(ctx, instr->src[0]));
3357 break;
3358 }
3359 case nir_intrinsic_quad_vote_any: {
3360 result = ac_build_wqm_vote(&ctx->ac, get_src(ctx, instr->src[0]));
3361 break;
3362 }
3363 case nir_intrinsic_quad_vote_all: {
3364 LLVMValueRef src = LLVMBuildNot(ctx->ac.builder, get_src(ctx, instr->src[0]), "");
3365 result = LLVMBuildNot(ctx->ac.builder, ac_build_wqm_vote(&ctx->ac, src), "");
3366 break;
3367 }
3368 case nir_intrinsic_shuffle:
3369 if (ctx->ac.gfx_level == GFX8 || ctx->ac.gfx_level == GFX9 ||
3370 (ctx->ac.gfx_level >= GFX10 && ctx->ac.wave_size == 32)) {
3371 result =
3372 ac_build_shuffle(&ctx->ac, get_src(ctx, instr->src[0]), get_src(ctx, instr->src[1]));
3373 } else {
3374 LLVMValueRef src = get_src(ctx, instr->src[0]);
3375 LLVMValueRef index = get_src(ctx, instr->src[1]);
3376 LLVMTypeRef type = LLVMTypeOf(src);
3377 struct waterfall_context wctx;
3378 LLVMValueRef index_val;
3379
3380 index_val = enter_waterfall(ctx, &wctx, index, true);
3381
3382 src = LLVMBuildZExt(ctx->ac.builder, src, ctx->ac.i32, "");
3383
3384 result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.readlane", ctx->ac.i32,
3385 (LLVMValueRef[]){src, index_val}, 2, 0);
3386
3387 result = LLVMBuildTrunc(ctx->ac.builder, result, type, "");
3388
3389 result = exit_waterfall(ctx, &wctx, result);
3390 }
3391 break;
3392 case nir_intrinsic_reduce:
3393 result = ac_build_reduce(&ctx->ac, get_src(ctx, instr->src[0]), instr->const_index[0],
3394 instr->const_index[1]);
3395 break;
3396 case nir_intrinsic_inclusive_scan:
3397 result =
3398 ac_build_inclusive_scan(&ctx->ac, get_src(ctx, instr->src[0]), instr->const_index[0]);
3399 break;
3400 case nir_intrinsic_exclusive_scan:
3401 result =
3402 ac_build_exclusive_scan(&ctx->ac, get_src(ctx, instr->src[0]), instr->const_index[0]);
3403 break;
3404 case nir_intrinsic_quad_broadcast: {
3405 unsigned lane = nir_src_as_uint(instr->src[1]);
3406 result = ac_build_quad_swizzle(&ctx->ac, get_src(ctx, instr->src[0]), lane, lane, lane, lane);
3407 result = ac_build_wqm(&ctx->ac, result);
3408 break;
3409 }
3410 case nir_intrinsic_quad_swap_horizontal:
3411 result = ac_build_quad_swizzle(&ctx->ac, get_src(ctx, instr->src[0]), 1, 0, 3, 2);
3412 result = ac_build_wqm(&ctx->ac, result);
3413 break;
3414 case nir_intrinsic_quad_swap_vertical:
3415 result = ac_build_quad_swizzle(&ctx->ac, get_src(ctx, instr->src[0]), 2, 3, 0, 1);
3416 result = ac_build_wqm(&ctx->ac, result);
3417 break;
3418 case nir_intrinsic_quad_swap_diagonal:
3419 result = ac_build_quad_swizzle(&ctx->ac, get_src(ctx, instr->src[0]), 3, 2, 1, 0);
3420 result = ac_build_wqm(&ctx->ac, result);
3421 break;
3422 case nir_intrinsic_quad_swizzle_amd: {
3423 uint32_t mask = nir_intrinsic_swizzle_mask(instr);
3424 result = ac_build_quad_swizzle(&ctx->ac, get_src(ctx, instr->src[0]), mask & 0x3,
3425 (mask >> 2) & 0x3, (mask >> 4) & 0x3, (mask >> 6) & 0x3);
3426 result = ac_build_wqm(&ctx->ac, result);
3427 break;
3428 }
3429 case nir_intrinsic_masked_swizzle_amd: {
3430 uint32_t mask = nir_intrinsic_swizzle_mask(instr);
3431 result = ac_build_ds_swizzle(&ctx->ac, get_src(ctx, instr->src[0]), mask);
3432 break;
3433 }
3434 case nir_intrinsic_write_invocation_amd:
3435 result = ac_build_writelane(&ctx->ac, get_src(ctx, instr->src[0]),
3436 get_src(ctx, instr->src[1]), get_src(ctx, instr->src[2]));
3437 break;
3438 case nir_intrinsic_mbcnt_amd:
3439 result = ac_build_mbcnt_add(&ctx->ac, get_src(ctx, instr->src[0]), get_src(ctx, instr->src[1]));
3440 break;
3441 case nir_intrinsic_load_scratch: {
3442 LLVMValueRef offset = get_src(ctx, instr->src[0]);
3443 LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->scratch, offset);
3444 LLVMTypeRef comp_type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
3445 LLVMTypeRef vec_type = instr->def.num_components == 1
3446 ? comp_type
3447 : LLVMVectorType(comp_type, instr->def.num_components);
3448 result = LLVMBuildLoad2(ctx->ac.builder, vec_type, ptr, "");
3449 break;
3450 }
3451 case nir_intrinsic_store_scratch: {
3452 LLVMValueRef offset = get_src(ctx, instr->src[1]);
3453 LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->scratch, offset);
3454 LLVMTypeRef comp_type = LLVMIntTypeInContext(ctx->ac.context, instr->src[0].ssa->bit_size);
3455 LLVMValueRef src = get_src(ctx, instr->src[0]);
3456 unsigned wrmask = nir_intrinsic_write_mask(instr);
3457 while (wrmask) {
3458 int start, count;
3459 u_bit_scan_consecutive_range(&wrmask, &start, &count);
3460
3461 LLVMValueRef offset = LLVMConstInt(ctx->ac.i32, start, false);
3462 LLVMValueRef offset_ptr = LLVMBuildGEP2(ctx->ac.builder, comp_type, ptr, &offset, 1, "");
3463 LLVMValueRef offset_src = ac_extract_components(&ctx->ac, src, start, count);
3464 LLVMBuildStore(ctx->ac.builder, offset_src, offset_ptr);
3465 }
3466 break;
3467 }
3468 case nir_intrinsic_load_constant: {
3469 unsigned base = nir_intrinsic_base(instr);
3470 unsigned range = nir_intrinsic_range(instr);
3471
3472 LLVMValueRef offset = get_src(ctx, instr->src[0]);
3473 offset = LLVMBuildAdd(ctx->ac.builder, offset, LLVMConstInt(ctx->ac.i32, base, false), "");
3474
3475 /* Clamp the offset to avoid out-of-bound access because global
3476 * instructions can't handle them.
3477 */
3478 LLVMValueRef size = LLVMConstInt(ctx->ac.i32, base + range, false);
3479 LLVMValueRef cond = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT, offset, size, "");
3480 offset = LLVMBuildSelect(ctx->ac.builder, cond, offset, size, "");
3481
3482 LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->constant_data, offset);
3483 LLVMTypeRef comp_type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
3484 LLVMTypeRef vec_type = instr->def.num_components == 1
3485 ? comp_type
3486 : LLVMVectorType(comp_type, instr->def.num_components);
3487 result = LLVMBuildLoad2(ctx->ac.builder, vec_type, ptr, "");
3488 break;
3489 }
3490 case nir_intrinsic_set_vertex_and_primitive_count:
3491 /* Currently ignored. */
3492 break;
3493 case nir_intrinsic_load_typed_buffer_amd:
3494 case nir_intrinsic_load_buffer_amd:
3495 case nir_intrinsic_store_buffer_amd: {
3496 unsigned src_base = instr->intrinsic == nir_intrinsic_store_buffer_amd ? 1 : 0;
3497 bool idxen = !nir_src_is_const(instr->src[src_base + 3]) ||
3498 nir_src_as_uint(instr->src[src_base + 3]);
3499
3500 LLVMValueRef store_data = get_src(ctx, instr->src[0]);
3501 LLVMValueRef descriptor = get_src(ctx, instr->src[src_base + 0]);
3502 LLVMValueRef addr_voffset = get_src(ctx, instr->src[src_base + 1]);
3503 LLVMValueRef addr_soffset = get_src(ctx, instr->src[src_base + 2]);
3504 LLVMValueRef vidx = idxen ? get_src(ctx, instr->src[src_base + 3]) : NULL;
3505 unsigned num_components = instr->def.num_components;
3506 unsigned const_offset = nir_intrinsic_base(instr);
3507 bool reorder = nir_intrinsic_can_reorder(instr);
3508 enum gl_access_qualifier access = ac_get_mem_access_flags(instr);
3509 bool uses_format = access & ACCESS_USES_FORMAT_AMD;
3510
3511 LLVMValueRef voffset = LLVMBuildAdd(ctx->ac.builder, addr_voffset,
3512 LLVMConstInt(ctx->ac.i32, const_offset, 0), "");
3513
3514 if (instr->intrinsic == nir_intrinsic_load_buffer_amd && uses_format) {
3515 assert(instr->def.bit_size == 16 || instr->def.bit_size == 32);
3516 result = ac_build_buffer_load_format(&ctx->ac, descriptor, vidx, voffset, num_components,
3517 access, reorder,
3518 instr->def.bit_size == 16, false);
3519 result = ac_to_integer(&ctx->ac, result);
3520 } else if (instr->intrinsic == nir_intrinsic_store_buffer_amd && uses_format) {
3521 assert(instr->src[0].ssa->bit_size == 16 || instr->src[0].ssa->bit_size == 32);
3522 ac_build_buffer_store_format(&ctx->ac, descriptor, store_data, vidx, voffset, access);
3523 } else if (instr->intrinsic == nir_intrinsic_load_buffer_amd ||
3524 instr->intrinsic == nir_intrinsic_load_typed_buffer_amd) {
3525 /* LLVM is unable to select instructions for larger than 32-bit channel types.
3526 * Workaround by using i32 and casting to the correct type later.
3527 */
3528 const unsigned fetch_num_components =
3529 num_components * MAX2(32, instr->def.bit_size) / 32;
3530
3531 LLVMTypeRef channel_type =
3532 LLVMIntTypeInContext(ctx->ac.context, MIN2(32, instr->def.bit_size));
3533
3534 if (instr->intrinsic == nir_intrinsic_load_buffer_amd) {
3535 result = ac_build_buffer_load(&ctx->ac, descriptor, fetch_num_components, vidx, voffset,
3536 addr_soffset, channel_type, access, reorder, false);
3537 } else {
3538 const unsigned align_offset = nir_intrinsic_align_offset(instr);
3539 const unsigned align_mul = nir_intrinsic_align_mul(instr);
3540 const enum pipe_format format = nir_intrinsic_format(instr);
3541
3542 result =
3543 ac_build_safe_tbuffer_load(&ctx->ac, descriptor, vidx, addr_voffset, addr_soffset,
3544 format, MIN2(32, instr->def.bit_size), const_offset, align_offset,
3545 align_mul, fetch_num_components, access, reorder);
3546 }
3547
3548 /* Trim to needed vector components. */
3549 result = ac_trim_vector(&ctx->ac, result, fetch_num_components);
3550
3551 /* Cast to larger than 32-bit sized components if needed. */
3552 if (instr->def.bit_size > 32) {
3553 LLVMTypeRef cast_channel_type =
3554 LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
3555 LLVMTypeRef cast_type =
3556 num_components == 1 ? cast_channel_type :
3557 LLVMVectorType(cast_channel_type, num_components);
3558 result = LLVMBuildBitCast(ctx->ac.builder, result, cast_type, "");
3559 }
3560
3561 /* Cast the result to an integer (or vector of integers). */
3562 result = ac_to_integer(&ctx->ac, result);
3563 } else {
3564 unsigned writemask = nir_intrinsic_write_mask(instr);
3565 while (writemask) {
3566 int start, count;
3567 u_bit_scan_consecutive_range(&writemask, &start, &count);
3568
3569 LLVMValueRef voffset = LLVMBuildAdd(
3570 ctx->ac.builder, addr_voffset,
3571 LLVMConstInt(ctx->ac.i32, const_offset + start * 4, 0), "");
3572
3573 LLVMValueRef data = extract_vector_range(&ctx->ac, store_data, start, count);
3574 ac_build_buffer_store_dword(&ctx->ac, descriptor, data, vidx, voffset, addr_soffset,
3575 access);
3576 }
3577 }
3578 break;
3579 }
3580 case nir_intrinsic_is_subgroup_invocation_lt_amd: {
3581 LLVMValueRef count = LLVMBuildAnd(ctx->ac.builder, get_src(ctx, instr->src[0]),
3582 LLVMConstInt(ctx->ac.i32, 0xff, 0), "");
3583 result = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), count, "");
3584 break;
3585 }
3586 case nir_intrinsic_overwrite_vs_arguments_amd:
3587 ctx->abi->vertex_id_replaced = get_src(ctx, instr->src[0]);
3588 ctx->abi->instance_id_replaced = get_src(ctx, instr->src[1]);
3589 break;
3590 case nir_intrinsic_overwrite_tes_arguments_amd:
3591 ctx->abi->tes_u_replaced = ac_to_float(&ctx->ac, get_src(ctx, instr->src[0]));
3592 ctx->abi->tes_v_replaced = ac_to_float(&ctx->ac, get_src(ctx, instr->src[1]));
3593 ctx->abi->tes_rel_patch_id_replaced = get_src(ctx, instr->src[3]);
3594 ctx->abi->tes_patch_id_replaced = get_src(ctx, instr->src[2]);
3595 break;
3596 case nir_intrinsic_gds_atomic_add_amd: {
3597 LLVMValueRef store_val = get_src(ctx, instr->src[0]);
3598 LLVMValueRef addr = get_src(ctx, instr->src[1]);
3599 LLVMTypeRef gds_ptr_type = LLVMPointerType(ctx->ac.i32, AC_ADDR_SPACE_GDS);
3600 LLVMValueRef gds_base = LLVMBuildIntToPtr(ctx->ac.builder, addr, gds_ptr_type, "");
3601 ac_build_atomic_rmw(&ctx->ac, LLVMAtomicRMWBinOpAdd, gds_base, store_val, "workgroup-one-as");
3602 break;
3603 }
3604 case nir_intrinsic_elect:
3605 result = LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ, visit_first_invocation(ctx),
3606 ac_get_thread_id(&ctx->ac), "");
3607 break;
3608 case nir_intrinsic_lane_permute_16_amd:
3609 result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.permlane16", ctx->ac.i32,
3610 (LLVMValueRef[]){get_src(ctx, instr->src[0]),
3611 get_src(ctx, instr->src[0]),
3612 get_src(ctx, instr->src[1]),
3613 get_src(ctx, instr->src[2]),
3614 ctx->ac.i1false,
3615 ctx->ac.i1false}, 6, 0);
3616 break;
3617 case nir_intrinsic_load_scalar_arg_amd:
3618 case nir_intrinsic_load_vector_arg_amd: {
3619 assert(nir_intrinsic_base(instr) < AC_MAX_ARGS);
3620 struct ac_arg arg;
3621 arg.arg_index = nir_intrinsic_base(instr);
3622 arg.used = true;
3623 result = ac_to_integer(&ctx->ac, ac_get_arg(&ctx->ac, arg));
3624 if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(result)) != 32)
3625 result = LLVMBuildBitCast(ctx->ac.builder, result, get_def_type(ctx, &instr->def), "");
3626 break;
3627 }
3628 case nir_intrinsic_load_smem_amd: {
3629 LLVMValueRef base = get_src(ctx, instr->src[0]);
3630 LLVMValueRef offset = get_src(ctx, instr->src[1]);
3631
3632 bool is_addr_32bit = nir_src_bit_size(instr->src[0]) == 32;
3633 int addr_space = is_addr_32bit ? AC_ADDR_SPACE_CONST_32BIT : AC_ADDR_SPACE_CONST;
3634
3635 LLVMTypeRef result_type = get_def_type(ctx, &instr->def);
3636 LLVMTypeRef byte_ptr_type = LLVMPointerType(ctx->ac.i8, addr_space);
3637
3638 LLVMValueRef addr = LLVMBuildIntToPtr(ctx->ac.builder, base, byte_ptr_type, "");
3639 /* see ac_build_load_custom() for 32bit/64bit addr GEP difference */
3640 addr = is_addr_32bit ?
3641 LLVMBuildInBoundsGEP2(ctx->ac.builder, ctx->ac.i8, addr, &offset, 1, "") :
3642 LLVMBuildGEP2(ctx->ac.builder, ctx->ac.i8, addr, &offset, 1, "");
3643
3644 LLVMSetMetadata(addr, ctx->ac.uniform_md_kind, ctx->ac.empty_md);
3645 result = LLVMBuildLoad2(ctx->ac.builder, result_type, addr, "");
3646 LLVMSetMetadata(result, ctx->ac.invariant_load_md_kind, ctx->ac.empty_md);
3647 break;
3648 }
3649 case nir_intrinsic_ordered_xfb_counter_add_amd: {
3650 /* must be called in a single lane of a workgroup. */
3651 LLVMTypeRef gdsptr = LLVMPointerType(ctx->ac.i32, AC_ADDR_SPACE_GDS);
3652
3653 /* Gfx11 GDS instructions only operate on the first active lane. All other lanes are
3654 * ignored. So are their EXEC bits. This uses the mutex feature of ds_ordered_count
3655 * to emulate a multi-dword atomic.
3656 *
3657 * This is the expected code:
3658 * ds_ordered_count release=0 done=0 // lock mutex
3659 * if (gfx_level >= GFX11) {
3660 * ds_add_gs_reg_rtn GDS_STRMOUT_DWORDS_WRITTEN_0
3661 * ds_add_gs_reg_rtn GDS_STRMOUT_DWORDS_WRITTEN_1
3662 * ds_add_gs_reg_rtn GDS_STRMOUT_DWORDS_WRITTEN_2
3663 * ds_add_gs_reg_rtn GDS_STRMOUT_DWORDS_WRITTEN_3
3664 * } else {
3665 * ds_add_rtn_u32 dwords_written0
3666 * ds_add_rtn_u32 dwords_written1
3667 * ds_add_rtn_u32 dwords_written2
3668 * ds_add_rtn_u32 dwords_written3
3669 * }
3670 * ds_ordered_count release=1 done=1 // unlock mutex
3671 *
3672 * GDS_STRMOUT_DWORDS_WRITTEN_n are just general-purpose global registers. We use them
3673 * because MCBP (mid-command-buffer preemption) saves and restores them, and it doesn't
3674 * save and restore GDS memory.
3675 */
3676 LLVMValueRef args[8] = {
3677 LLVMBuildIntToPtr(ctx->ac.builder, get_src(ctx, instr->src[0]), gdsptr, ""),
3678 ctx->ac.i32_0, /* value to add */
3679 ctx->ac.i32_0, /* ordering */
3680 ctx->ac.i32_0, /* scope */
3681 ctx->ac.i1false, /* isVolatile */
3682 LLVMConstInt(ctx->ac.i32, 1 << 24, false), /* OA index, bits 24+: lane count */
3683 ctx->ac.i1false, /* wave release */
3684 ctx->ac.i1false, /* wave done */
3685 };
3686
3687 /* Set release=0 to start a GDS mutex. Set done=0 because it's not the last one. */
3688 ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.ds.ordered.add", ctx->ac.i32,
3689 args, ARRAY_SIZE(args), 0);
3690 ac_build_waitcnt(&ctx->ac, AC_WAIT_LGKM);
3691
3692 LLVMValueRef global_count[4];
3693 LLVMValueRef count_vec = get_src(ctx, instr->src[1]);
3694 unsigned write_mask = nir_intrinsic_write_mask(instr);
3695 for (unsigned i = 0; i < instr->num_components; i++) {
3696 LLVMValueRef value =
3697 LLVMBuildExtractElement(ctx->ac.builder, count_vec,
3698 LLVMConstInt(ctx->ac.i32, i, false), "");
3699 if (write_mask & (1 << i)) {
3700 /* The offset is a relative offset from GDS_STRMOUT_DWORDS_WRITTEN_0. */
3701 global_count[i] =
3702 ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.ds.add.gs.reg.rtn.i32", ctx->ac.i32,
3703 (LLVMValueRef[]){value, LLVMConstInt(ctx->ac.i32, i * 4, 0)},
3704 2, 0);
3705 } else {
3706 global_count[i] = LLVMGetUndef(ctx->ac.i32);
3707 }
3708 }
3709
3710 ac_build_waitcnt(&ctx->ac, AC_WAIT_LGKM);
3711
3712 /* Set release=1 to end a GDS mutex. Set done=1 because it's the last one. */
3713 args[6] = args[7] = ctx->ac.i1true;
3714 ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.ds.ordered.add", ctx->ac.i32,
3715 args, ARRAY_SIZE(args), 0);
3716 result = ac_build_gather_values(&ctx->ac, global_count, instr->num_components);
3717 break;
3718 }
3719 case nir_intrinsic_xfb_counter_sub_amd: {
3720 /* must be called in a single lane of a workgroup. */
3721 LLVMValueRef sub_vec = get_src(ctx, instr->src[0]);
3722 unsigned write_mask = nir_intrinsic_write_mask(instr);
3723
3724 for (unsigned i = 0; i < instr->num_components; i++) {
3725 if (write_mask & (1 << i)) {
3726 LLVMValueRef value =
3727 LLVMBuildExtractElement(ctx->ac.builder, sub_vec,
3728 LLVMConstInt(ctx->ac.i32, i, false), "");
3729 /* The offset is a relative offset from GDS_STRMOUT_DWORDS_WRITTEN_0. */
3730 ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.ds.sub.gs.reg.rtn.i32", ctx->ac.i32,
3731 (LLVMValueRef[]){value, LLVMConstInt(ctx->ac.i32, i * 4, 0)},
3732 2, 0);
3733 }
3734 }
3735 break;
3736 }
3737 case nir_intrinsic_export_amd: {
3738 unsigned flags = nir_intrinsic_flags(instr);
3739 unsigned target = nir_intrinsic_base(instr);
3740 unsigned write_mask = nir_intrinsic_write_mask(instr);
3741
3742 struct ac_export_args args = {
3743 .target = target,
3744 .enabled_channels = write_mask,
3745 .compr = flags & AC_EXP_FLAG_COMPRESSED,
3746 .done = flags & AC_EXP_FLAG_DONE,
3747 .valid_mask = flags & AC_EXP_FLAG_VALID_MASK,
3748 };
3749
3750 LLVMValueRef value = get_src(ctx, instr->src[0]);
3751 int num_components = ac_get_llvm_num_components(value);
3752 for (int i = 0; i < num_components; i++)
3753 args.out[i] = ac_llvm_extract_elem(&ctx->ac, value, i);
3754
3755 ac_build_export(&ctx->ac, &args);
3756 break;
3757 }
3758 case nir_intrinsic_bvh64_intersect_ray_amd: {
3759 LLVMValueRef desc = get_src(ctx, instr->src[0]);
3760 LLVMValueRef node_id =
3761 LLVMBuildBitCast(ctx->ac.builder, get_src(ctx, instr->src[1]), ctx->ac.i64, "");
3762 LLVMValueRef t_max =
3763 LLVMBuildBitCast(ctx->ac.builder, get_src(ctx, instr->src[2]), ctx->ac.f32, "");
3764 LLVMValueRef origin =
3765 LLVMBuildBitCast(ctx->ac.builder, get_src(ctx, instr->src[3]), ctx->ac.v3f32, "");
3766 LLVMValueRef dir =
3767 LLVMBuildBitCast(ctx->ac.builder, get_src(ctx, instr->src[4]), ctx->ac.v3f32, "");
3768 LLVMValueRef inv_dir =
3769 LLVMBuildBitCast(ctx->ac.builder, get_src(ctx, instr->src[5]), ctx->ac.v3f32, "");
3770
3771 LLVMValueRef args[6] = {
3772 node_id, t_max, origin, dir, inv_dir, desc,
3773 };
3774
3775 result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.image.bvh.intersect.ray.i64.v3f32",
3776 ctx->ac.v4i32, args, ARRAY_SIZE(args), 0);
3777 break;
3778 }
3779 default:
3780 fprintf(stderr, "Unknown intrinsic: ");
3781 nir_print_instr(&instr->instr, stderr);
3782 fprintf(stderr, "\n");
3783 return false;
3784 }
3785 if (result) {
3786 ctx->ssa_defs[instr->def.index] = result;
3787 }
3788 return true;
3789 }
3790
3791 /* Disable anisotropic filtering if BASE_LEVEL == LAST_LEVEL.
3792 *
3793 * GFX6-GFX7:
3794 * If BASE_LEVEL == LAST_LEVEL, the shader must disable anisotropic
3795 * filtering manually. The driver sets img7 to a mask clearing
3796 * MAX_ANISO_RATIO if BASE_LEVEL == LAST_LEVEL. The shader must do:
3797 * s_and_b32 samp0, samp0, img7
3798 *
3799 * GFX8:
3800 * The ANISO_OVERRIDE sampler field enables this fix in TA.
3801 */
sici_fix_sampler_aniso(struct ac_nir_context * ctx,LLVMValueRef res,LLVMValueRef samp)3802 static LLVMValueRef sici_fix_sampler_aniso(struct ac_nir_context *ctx, LLVMValueRef res,
3803 LLVMValueRef samp)
3804 {
3805 LLVMBuilderRef builder = ctx->ac.builder;
3806 LLVMValueRef img7, samp0;
3807
3808 if (ctx->ac.gfx_level >= GFX8)
3809 return samp;
3810
3811 img7 = LLVMBuildExtractElement(builder, res, LLVMConstInt(ctx->ac.i32, 7, 0), "");
3812 samp0 = LLVMBuildExtractElement(builder, samp, ctx->ac.i32_0, "");
3813 samp0 = LLVMBuildAnd(builder, samp0, img7, "");
3814 return LLVMBuildInsertElement(builder, samp, samp0, ctx->ac.i32_0, "");
3815 }
3816
tex_fetch_ptrs(struct ac_nir_context * ctx,nir_tex_instr * instr,struct waterfall_context * wctx,LLVMValueRef * res_ptr,LLVMValueRef * samp_ptr)3817 static void tex_fetch_ptrs(struct ac_nir_context *ctx, nir_tex_instr *instr,
3818 struct waterfall_context *wctx, LLVMValueRef *res_ptr,
3819 LLVMValueRef *samp_ptr)
3820 {
3821 LLVMValueRef texture_dynamic_handle = NULL;
3822 LLVMValueRef sampler_dynamic_handle = NULL;
3823 int plane = -1;
3824
3825 *res_ptr = NULL;
3826 *samp_ptr = NULL;
3827 for (unsigned i = 0; i < instr->num_srcs; i++) {
3828 switch (instr->src[i].src_type) {
3829 case nir_tex_src_texture_handle:
3830 case nir_tex_src_sampler_handle: {
3831 LLVMValueRef val = get_src(ctx, instr->src[i].src);
3832 if (LLVMGetTypeKind(LLVMTypeOf(val)) == LLVMVectorTypeKind) {
3833 if (instr->src[i].src_type == nir_tex_src_texture_handle)
3834 *res_ptr = val;
3835 else
3836 *samp_ptr = val;
3837 } else {
3838 if (instr->src[i].src_type == nir_tex_src_texture_handle)
3839 texture_dynamic_handle = val;
3840 else
3841 sampler_dynamic_handle = val;
3842 }
3843 break;
3844 }
3845 case nir_tex_src_plane:
3846 plane = nir_src_as_int(instr->src[i].src);
3847 break;
3848 default:
3849 break;
3850 }
3851 }
3852
3853 enum ac_descriptor_type main_descriptor =
3854 instr->sampler_dim == GLSL_SAMPLER_DIM_BUF ? AC_DESC_BUFFER : AC_DESC_IMAGE;
3855
3856 if (plane >= 0) {
3857 assert(instr->op != nir_texop_txf_ms);
3858 assert(instr->sampler_dim != GLSL_SAMPLER_DIM_BUF);
3859
3860 main_descriptor = AC_DESC_PLANE_0 + plane;
3861 }
3862
3863 if (instr->op == nir_texop_fragment_mask_fetch_amd) {
3864 /* The fragment mask is fetched from the compressed
3865 * multisampled surface.
3866 */
3867 assert(ctx->ac.gfx_level < GFX11);
3868 main_descriptor = AC_DESC_FMASK;
3869 }
3870
3871 /* descriptor handles given through nir_tex_src_{texture,sampler}_handle */
3872 if (instr->texture_non_uniform)
3873 texture_dynamic_handle = enter_waterfall(ctx, &wctx[0], texture_dynamic_handle, true);
3874
3875 if (instr->sampler_non_uniform)
3876 sampler_dynamic_handle = enter_waterfall(ctx, &wctx[1], sampler_dynamic_handle, true);
3877
3878 if (texture_dynamic_handle)
3879 *res_ptr = ctx->abi->load_sampler_desc(ctx->abi, texture_dynamic_handle, main_descriptor);
3880
3881 if (sampler_dynamic_handle) {
3882 *samp_ptr = ctx->abi->load_sampler_desc(ctx->abi, sampler_dynamic_handle, AC_DESC_SAMPLER);
3883
3884 if (ctx->abi->disable_aniso_single_level && instr->sampler_dim < GLSL_SAMPLER_DIM_RECT)
3885 *samp_ptr = sici_fix_sampler_aniso(ctx, *res_ptr, *samp_ptr);
3886 }
3887 }
3888
visit_tex(struct ac_nir_context * ctx,nir_tex_instr * instr)3889 static void visit_tex(struct ac_nir_context *ctx, nir_tex_instr *instr)
3890 {
3891 LLVMValueRef result = NULL;
3892 struct ac_image_args args = {0};
3893 LLVMValueRef sample_index = NULL;
3894 LLVMValueRef ddx = NULL, ddy = NULL;
3895 struct waterfall_context wctx[2] = {{{0}}};
3896
3897 tex_fetch_ptrs(ctx, instr, wctx, &args.resource, &args.sampler);
3898
3899 for (unsigned i = 0; i < instr->num_srcs; i++) {
3900 switch (instr->src[i].src_type) {
3901 case nir_tex_src_coord: {
3902 LLVMValueRef coord = get_src(ctx, instr->src[i].src);
3903 args.a16 = instr->src[i].src.ssa->bit_size == 16;
3904 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
3905 args.coords[chan] = ac_llvm_extract_elem(&ctx->ac, coord, chan);
3906 break;
3907 }
3908 case nir_tex_src_projector:
3909 break;
3910 case nir_tex_src_comparator:
3911 if (instr->is_shadow) {
3912 args.compare = get_src(ctx, instr->src[i].src);
3913 args.compare = ac_to_float(&ctx->ac, args.compare);
3914 assert(instr->src[i].src.ssa->bit_size == 32);
3915 }
3916 break;
3917 case nir_tex_src_offset:
3918 args.offset = get_src(ctx, instr->src[i].src);
3919 /* We pack it with bit shifts, so we need it to be 32-bit. */
3920 assert(ac_get_elem_bits(&ctx->ac, LLVMTypeOf(args.offset)) == 32);
3921 break;
3922 case nir_tex_src_bias:
3923 args.bias = get_src(ctx, instr->src[i].src);
3924 assert(ac_get_elem_bits(&ctx->ac, LLVMTypeOf(args.bias)) == 32);
3925 break;
3926 case nir_tex_src_lod:
3927 if (nir_src_is_const(instr->src[i].src) && nir_src_as_uint(instr->src[i].src) == 0)
3928 args.level_zero = true;
3929 else
3930 args.lod = get_src(ctx, instr->src[i].src);
3931 break;
3932 case nir_tex_src_ms_index:
3933 sample_index = get_src(ctx, instr->src[i].src);
3934 break;
3935 case nir_tex_src_ddx:
3936 ddx = get_src(ctx, instr->src[i].src);
3937 args.g16 = instr->src[i].src.ssa->bit_size == 16;
3938 break;
3939 case nir_tex_src_ddy:
3940 ddy = get_src(ctx, instr->src[i].src);
3941 assert(LLVMTypeOf(ddy) == LLVMTypeOf(ddx));
3942 break;
3943 case nir_tex_src_min_lod:
3944 args.min_lod = get_src(ctx, instr->src[i].src);
3945 break;
3946 case nir_tex_src_texture_offset:
3947 case nir_tex_src_sampler_offset:
3948 case nir_tex_src_plane:
3949 default:
3950 break;
3951 }
3952 }
3953
3954 if (args.offset) {
3955 /* offset for txf has been lowered in nir. */
3956 assert(instr->op != nir_texop_txf);
3957
3958 LLVMValueRef offset[3], pack;
3959 for (unsigned chan = 0; chan < 3; ++chan)
3960 offset[chan] = ctx->ac.i32_0;
3961
3962 unsigned num_components = ac_get_llvm_num_components(args.offset);
3963 for (unsigned chan = 0; chan < num_components; chan++) {
3964 offset[chan] = ac_llvm_extract_elem(&ctx->ac, args.offset, chan);
3965 offset[chan] =
3966 LLVMBuildAnd(ctx->ac.builder, offset[chan], LLVMConstInt(ctx->ac.i32, 0x3f, false), "");
3967 if (chan)
3968 offset[chan] = LLVMBuildShl(ctx->ac.builder, offset[chan],
3969 LLVMConstInt(ctx->ac.i32, chan * 8, false), "");
3970 }
3971 pack = LLVMBuildOr(ctx->ac.builder, offset[0], offset[1], "");
3972 pack = LLVMBuildOr(ctx->ac.builder, pack, offset[2], "");
3973 args.offset = pack;
3974 }
3975
3976 /* Section 8.23.1 (Depth Texture Comparison Mode) of the
3977 * OpenGL 4.5 spec says:
3978 *
3979 * "If the texture’s internal format indicates a fixed-point
3980 * depth texture, then D_t and D_ref are clamped to the
3981 * range [0, 1]; otherwise no clamping is performed."
3982 *
3983 * TC-compatible HTILE promotes Z16 and Z24 to Z32_FLOAT,
3984 * so the depth comparison value isn't clamped for Z16 and
3985 * Z24 anymore. Do it manually here for GFX8-9; GFX10 has
3986 * an explicitly clamped 32-bit float format.
3987 */
3988 if (args.compare && ctx->ac.gfx_level >= GFX8 && ctx->ac.gfx_level <= GFX9 &&
3989 ctx->abi->clamp_shadow_reference) {
3990 LLVMValueRef upgraded, clamped;
3991
3992 upgraded = LLVMBuildExtractElement(ctx->ac.builder, args.sampler,
3993 LLVMConstInt(ctx->ac.i32, 3, false), "");
3994 upgraded = LLVMBuildLShr(ctx->ac.builder, upgraded, LLVMConstInt(ctx->ac.i32, 29, false), "");
3995 upgraded = LLVMBuildTrunc(ctx->ac.builder, upgraded, ctx->ac.i1, "");
3996 clamped = ac_build_clamp(&ctx->ac, args.compare);
3997 args.compare = LLVMBuildSelect(ctx->ac.builder, upgraded, clamped, args.compare, "");
3998 }
3999
4000 /* pack derivatives */
4001 if (ddx || ddy) {
4002 int num_deriv_channels;
4003 switch (instr->sampler_dim) {
4004 case GLSL_SAMPLER_DIM_3D:
4005 num_deriv_channels = 3;
4006 break;
4007 case GLSL_SAMPLER_DIM_2D:
4008 case GLSL_SAMPLER_DIM_CUBE:
4009 default:
4010 num_deriv_channels = 2;
4011 break;
4012 case GLSL_SAMPLER_DIM_1D:
4013 num_deriv_channels = 1;
4014 break;
4015 }
4016
4017 for (unsigned i = 0; i < num_deriv_channels; i++) {
4018 args.derivs[i] = ac_to_float(&ctx->ac, ac_llvm_extract_elem(&ctx->ac, ddx, i));
4019 args.derivs[num_deriv_channels + i] =
4020 ac_to_float(&ctx->ac, ac_llvm_extract_elem(&ctx->ac, ddy, i));
4021 }
4022 }
4023
4024 /* Pack sample index */
4025 if (sample_index && (instr->op == nir_texop_txf_ms || instr->op == nir_texop_fragment_fetch_amd))
4026 args.coords[instr->coord_components] = sample_index;
4027
4028 /* DMASK was repurposed for GATHER4. 4 components are always
4029 * returned and DMASK works like a swizzle - it selects
4030 * the component to fetch. The only valid DMASK values are
4031 * 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns
4032 * (red,red,red,red) etc.) The ISA document doesn't mention
4033 * this.
4034 */
4035 args.dmask = 0xf;
4036 if (instr->op == nir_texop_tg4) {
4037 if (instr->is_shadow)
4038 args.dmask = 1;
4039 else
4040 args.dmask = 1 << instr->component;
4041 }
4042
4043 if (instr->sampler_dim != GLSL_SAMPLER_DIM_BUF) {
4044 args.dim = ac_get_sampler_dim(ctx->ac.gfx_level, instr->sampler_dim, instr->is_array);
4045 args.unorm = instr->sampler_dim == GLSL_SAMPLER_DIM_RECT;
4046 }
4047
4048 /* Adjust the number of coordinates because we only need (x,y) for 2D
4049 * multisampled images and (x,y,layer) for 2D multisampled layered
4050 * images or for multisampled input attachments.
4051 */
4052 if (instr->op == nir_texop_fragment_mask_fetch_amd) {
4053 if (args.dim == ac_image_2dmsaa) {
4054 args.dim = ac_image_2d;
4055 } else {
4056 assert(args.dim == ac_image_2darraymsaa);
4057 args.dim = ac_image_2darray;
4058 }
4059 }
4060
4061 /* Set TRUNC_COORD=0 for textureGather(). */
4062 if (instr->op == nir_texop_tg4 && !ctx->ac.info->conformant_trunc_coord) {
4063 LLVMValueRef dword0 = LLVMBuildExtractElement(ctx->ac.builder, args.sampler, ctx->ac.i32_0, "");
4064 dword0 = LLVMBuildAnd(ctx->ac.builder, dword0, LLVMConstInt(ctx->ac.i32, C_008F30_TRUNC_COORD, 0), "");
4065 args.sampler = LLVMBuildInsertElement(ctx->ac.builder, args.sampler, dword0, ctx->ac.i32_0, "");
4066 }
4067
4068 args.d16 = instr->def.bit_size == 16;
4069 args.tfe = instr->is_sparse;
4070
4071 result = build_tex_intrinsic(ctx, instr, &args);
4072
4073 LLVMValueRef code = NULL;
4074 if (instr->is_sparse) {
4075 code = ac_llvm_extract_elem(&ctx->ac, result, 4);
4076 result = ac_trim_vector(&ctx->ac, result, 4);
4077 }
4078
4079 if (instr->is_shadow && instr->is_new_style_shadow &&
4080 instr->op != nir_texop_lod && instr->op != nir_texop_tg4)
4081 result = LLVMBuildExtractElement(ctx->ac.builder, result, ctx->ac.i32_0, "");
4082 else if (instr->op == nir_texop_fragment_mask_fetch_amd) {
4083 /* Use 0x76543210 if the image doesn't have FMASK. */
4084 LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, args.resource, ctx->ac.v8i32, "");
4085 tmp = LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->ac.i32_1, "");
4086 tmp = LLVMBuildICmp(ctx->ac.builder, LLVMIntNE, tmp, ctx->ac.i32_0, "");
4087 result = LLVMBuildSelect(ctx->ac.builder, tmp,
4088 LLVMBuildExtractElement(ctx->ac.builder, result, ctx->ac.i32_0, ""),
4089 LLVMConstInt(ctx->ac.i32, 0x76543210, false), "");
4090 } else if (nir_tex_instr_result_size(instr) != 4)
4091 result = ac_trim_vector(&ctx->ac, result, instr->def.num_components);
4092
4093 if (instr->is_sparse)
4094 result = ac_build_concat(&ctx->ac, result, code);
4095
4096 if (result) {
4097 result = ac_to_integer(&ctx->ac, result);
4098
4099 for (int i = ARRAY_SIZE(wctx); --i >= 0;) {
4100 result = exit_waterfall(ctx, wctx + i, result);
4101 }
4102
4103 ctx->ssa_defs[instr->def.index] = result;
4104 }
4105 }
4106
visit_phi(struct ac_nir_context * ctx,nir_phi_instr * instr)4107 static void visit_phi(struct ac_nir_context *ctx, nir_phi_instr *instr)
4108 {
4109 LLVMTypeRef type = get_def_type(ctx, &instr->def);
4110 LLVMValueRef result = LLVMBuildPhi(ctx->ac.builder, type, "");
4111
4112 ctx->ssa_defs[instr->def.index] = result;
4113 _mesa_hash_table_insert(ctx->phis, instr, result);
4114 }
4115
visit_post_phi(struct ac_nir_context * ctx,nir_phi_instr * instr,LLVMValueRef llvm_phi)4116 static void visit_post_phi(struct ac_nir_context *ctx, nir_phi_instr *instr, LLVMValueRef llvm_phi)
4117 {
4118 nir_foreach_phi_src (src, instr) {
4119 LLVMBasicBlockRef block = get_block(ctx, src->pred);
4120 LLVMValueRef llvm_src = get_src(ctx, src->src);
4121
4122 LLVMAddIncoming(llvm_phi, &llvm_src, &block, 1);
4123 }
4124 }
4125
phi_post_pass(struct ac_nir_context * ctx)4126 static void phi_post_pass(struct ac_nir_context *ctx)
4127 {
4128 hash_table_foreach(ctx->phis, entry)
4129 {
4130 visit_post_phi(ctx, (nir_phi_instr *)entry->key, (LLVMValueRef)entry->data);
4131 }
4132 }
4133
visit_ssa_undef(struct ac_nir_context * ctx,const nir_undef_instr * instr)4134 static void visit_ssa_undef(struct ac_nir_context *ctx, const nir_undef_instr *instr)
4135 {
4136 unsigned num_components = instr->def.num_components;
4137 LLVMTypeRef type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
4138
4139 LLVMValueRef undef;
4140
4141 if (num_components == 1)
4142 undef = LLVMGetUndef(type);
4143 else {
4144 undef = LLVMGetUndef(LLVMVectorType(type, num_components));
4145 }
4146 ctx->ssa_defs[instr->def.index] = undef;
4147 }
4148
visit_jump(struct ac_llvm_context * ctx,const nir_jump_instr * instr)4149 static bool visit_jump(struct ac_llvm_context *ctx, const nir_jump_instr *instr)
4150 {
4151 switch (instr->type) {
4152 case nir_jump_break:
4153 ac_build_break(ctx);
4154 break;
4155 case nir_jump_continue:
4156 ac_build_continue(ctx);
4157 break;
4158 default:
4159 fprintf(stderr, "Unknown NIR jump instr: ");
4160 nir_print_instr(&instr->instr, stderr);
4161 fprintf(stderr, "\n");
4162 return false;
4163 }
4164 return true;
4165 }
4166
4167 static bool visit_cf_list(struct ac_nir_context *ctx, struct exec_list *list);
4168
visit_block(struct ac_nir_context * ctx,nir_block * block)4169 static bool visit_block(struct ac_nir_context *ctx, nir_block *block)
4170 {
4171 LLVMBasicBlockRef blockref = LLVMGetInsertBlock(ctx->ac.builder);
4172 LLVMValueRef first = LLVMGetFirstInstruction(blockref);
4173 if (first) {
4174 /* ac_branch_exited() might have already inserted non-phis */
4175 LLVMPositionBuilderBefore(ctx->ac.builder, LLVMGetFirstInstruction(blockref));
4176 }
4177
4178 nir_foreach_phi(phi, block) {
4179 visit_phi(ctx, phi);
4180 }
4181
4182 LLVMPositionBuilderAtEnd(ctx->ac.builder, blockref);
4183
4184 nir_foreach_instr (instr, block) {
4185 switch (instr->type) {
4186 case nir_instr_type_alu:
4187 if (!visit_alu(ctx, nir_instr_as_alu(instr)))
4188 return false;
4189 break;
4190 case nir_instr_type_load_const:
4191 if (!visit_load_const(ctx, nir_instr_as_load_const(instr)))
4192 return false;
4193 break;
4194 case nir_instr_type_intrinsic:
4195 if (!visit_intrinsic(ctx, nir_instr_as_intrinsic(instr)))
4196 return false;
4197 break;
4198 case nir_instr_type_tex:
4199 visit_tex(ctx, nir_instr_as_tex(instr));
4200 break;
4201 case nir_instr_type_phi:
4202 break;
4203 case nir_instr_type_undef:
4204 visit_ssa_undef(ctx, nir_instr_as_undef(instr));
4205 break;
4206 case nir_instr_type_jump:
4207 if (!visit_jump(&ctx->ac, nir_instr_as_jump(instr)))
4208 return false;
4209 break;
4210 case nir_instr_type_deref:
4211 assert (!nir_deref_mode_is_one_of(nir_instr_as_deref(instr),
4212 nir_var_mem_shared | nir_var_mem_global));
4213 break;
4214 default:
4215 fprintf(stderr, "Unknown NIR instr type: ");
4216 nir_print_instr(instr, stderr);
4217 fprintf(stderr, "\n");
4218 return false;
4219 }
4220 }
4221
4222 _mesa_hash_table_insert(ctx->defs, block, LLVMGetInsertBlock(ctx->ac.builder));
4223
4224 return true;
4225 }
4226
visit_if(struct ac_nir_context * ctx,nir_if * if_stmt)4227 static bool visit_if(struct ac_nir_context *ctx, nir_if *if_stmt)
4228 {
4229 LLVMValueRef value = get_src(ctx, if_stmt->condition);
4230
4231 nir_block *then_block = (nir_block *)exec_list_get_head(&if_stmt->then_list);
4232
4233 ac_build_ifcc(&ctx->ac, value, then_block->index);
4234
4235 if (!visit_cf_list(ctx, &if_stmt->then_list))
4236 return false;
4237
4238 if (!exec_list_is_empty(&if_stmt->else_list)) {
4239 nir_block *else_block = (nir_block *)exec_list_get_head(&if_stmt->else_list);
4240
4241 ac_build_else(&ctx->ac, else_block->index);
4242 if (!visit_cf_list(ctx, &if_stmt->else_list))
4243 return false;
4244 }
4245
4246 ac_build_endif(&ctx->ac, then_block->index);
4247 return true;
4248 }
4249
visit_loop(struct ac_nir_context * ctx,nir_loop * loop)4250 static bool visit_loop(struct ac_nir_context *ctx, nir_loop *loop)
4251 {
4252 assert(!nir_loop_has_continue_construct(loop));
4253 nir_block *first_loop_block = (nir_block *)exec_list_get_head(&loop->body);
4254
4255 ac_build_bgnloop(&ctx->ac, first_loop_block->index);
4256
4257 if (!visit_cf_list(ctx, &loop->body))
4258 return false;
4259
4260 ac_build_endloop(&ctx->ac, first_loop_block->index);
4261 return true;
4262 }
4263
visit_cf_list(struct ac_nir_context * ctx,struct exec_list * list)4264 static bool visit_cf_list(struct ac_nir_context *ctx, struct exec_list *list)
4265 {
4266 foreach_list_typed(nir_cf_node, node, node, list)
4267 {
4268 switch (node->type) {
4269 case nir_cf_node_block:
4270 if (!visit_block(ctx, nir_cf_node_as_block(node)))
4271 return false;
4272 break;
4273
4274 case nir_cf_node_if:
4275 if (!visit_if(ctx, nir_cf_node_as_if(node)))
4276 return false;
4277 break;
4278
4279 case nir_cf_node_loop:
4280 if (!visit_loop(ctx, nir_cf_node_as_loop(node)))
4281 return false;
4282 break;
4283
4284 default:
4285 return false;
4286 }
4287 }
4288 return true;
4289 }
4290
setup_scratch(struct ac_nir_context * ctx,struct nir_shader * shader)4291 static void setup_scratch(struct ac_nir_context *ctx, struct nir_shader *shader)
4292 {
4293 if (shader->scratch_size == 0)
4294 return;
4295
4296 LLVMTypeRef type = LLVMArrayType(ctx->ac.i8, shader->scratch_size);
4297 ctx->scratch = (struct ac_llvm_pointer) {
4298 .value = ac_build_alloca_undef(&ctx->ac, type, "scratch"),
4299 .pointee_type = type
4300 };
4301 }
4302
setup_constant_data(struct ac_nir_context * ctx,struct nir_shader * shader)4303 static void setup_constant_data(struct ac_nir_context *ctx, struct nir_shader *shader)
4304 {
4305 if (!shader->constant_data)
4306 return;
4307
4308 LLVMValueRef data = LLVMConstStringInContext(ctx->ac.context, shader->constant_data,
4309 shader->constant_data_size, true);
4310 LLVMTypeRef type = LLVMArrayType(ctx->ac.i8, shader->constant_data_size);
4311 LLVMValueRef global =
4312 LLVMAddGlobalInAddressSpace(ctx->ac.module, type, "const_data", AC_ADDR_SPACE_CONST);
4313
4314 LLVMSetInitializer(global, data);
4315 LLVMSetGlobalConstant(global, true);
4316 LLVMSetVisibility(global, LLVMHiddenVisibility);
4317 ctx->constant_data = (struct ac_llvm_pointer) {
4318 .value = global,
4319 .pointee_type = type
4320 };
4321 }
4322
setup_shared(struct ac_nir_context * ctx,struct nir_shader * nir)4323 static void setup_shared(struct ac_nir_context *ctx, struct nir_shader *nir)
4324 {
4325 if (ctx->ac.lds.value)
4326 return;
4327
4328 LLVMTypeRef type = LLVMArrayType(ctx->ac.i8, nir->info.shared_size);
4329
4330 LLVMValueRef lds =
4331 LLVMAddGlobalInAddressSpace(ctx->ac.module, type, "compute_lds", AC_ADDR_SPACE_LDS);
4332 LLVMSetAlignment(lds, 64 * 1024);
4333
4334 ctx->ac.lds = (struct ac_llvm_pointer) {
4335 .value = lds,
4336 .pointee_type = type
4337 };
4338 }
4339
setup_gds(struct ac_nir_context * ctx,nir_function_impl * impl)4340 static void setup_gds(struct ac_nir_context *ctx, nir_function_impl *impl)
4341 {
4342 bool has_gds_atomic = false;
4343
4344 if (ctx->ac.gfx_level >= GFX10 &&
4345 (ctx->stage == MESA_SHADER_VERTEX ||
4346 ctx->stage == MESA_SHADER_TESS_EVAL ||
4347 ctx->stage == MESA_SHADER_GEOMETRY)) {
4348
4349 nir_foreach_block(block, impl) {
4350 nir_foreach_instr(instr, block) {
4351 if (instr->type != nir_instr_type_intrinsic)
4352 continue;
4353
4354 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
4355 has_gds_atomic |= intrin->intrinsic == nir_intrinsic_gds_atomic_add_amd;
4356 }
4357 }
4358 }
4359
4360 unsigned gds_size = has_gds_atomic ? 0x100 : 0;
4361
4362 if (gds_size)
4363 ac_llvm_add_target_dep_function_attr(ctx->main_function, "amdgpu-gds-size", gds_size);
4364 }
4365
ac_nir_translate(struct ac_llvm_context * ac,struct ac_shader_abi * abi,const struct ac_shader_args * args,struct nir_shader * nir)4366 bool ac_nir_translate(struct ac_llvm_context *ac, struct ac_shader_abi *abi,
4367 const struct ac_shader_args *args, struct nir_shader *nir)
4368 {
4369 struct ac_nir_context ctx = {0};
4370 struct nir_function *func;
4371
4372 ctx.ac = *ac;
4373 ctx.abi = abi;
4374 ctx.args = args;
4375
4376 ctx.stage = nir->info.stage;
4377 ctx.info = &nir->info;
4378
4379 ctx.main_function = LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx.ac.builder));
4380
4381 ctx.defs = _mesa_hash_table_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
4382 ctx.phis = _mesa_hash_table_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
4383
4384 if (ctx.abi->kill_ps_if_inf_interp)
4385 ctx.verified_interp =
4386 _mesa_hash_table_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
4387
4388 func = (struct nir_function *)exec_list_get_head(&nir->functions);
4389
4390 nir_index_ssa_defs(func->impl);
4391 ctx.ssa_defs = calloc(func->impl->ssa_alloc, sizeof(LLVMValueRef));
4392
4393 setup_scratch(&ctx, nir);
4394 setup_constant_data(&ctx, nir);
4395 setup_gds(&ctx, func->impl);
4396
4397 if (gl_shader_stage_is_compute(nir->info.stage))
4398 setup_shared(&ctx, nir);
4399
4400 if (!visit_cf_list(&ctx, &func->impl->body))
4401 return false;
4402
4403 phi_post_pass(&ctx);
4404
4405 free(ctx.ssa_defs);
4406 ralloc_free(ctx.defs);
4407 ralloc_free(ctx.phis);
4408 if (ctx.abi->kill_ps_if_inf_interp)
4409 ralloc_free(ctx.verified_interp);
4410
4411 return true;
4412 }
4413
4414 /* Fixup the HW not emitting the TCS regs if there are no HS threads. */
ac_fixup_ls_hs_input_vgprs(struct ac_llvm_context * ac,struct ac_shader_abi * abi,const struct ac_shader_args * args)4415 void ac_fixup_ls_hs_input_vgprs(struct ac_llvm_context *ac, struct ac_shader_abi *abi,
4416 const struct ac_shader_args *args)
4417 {
4418 LLVMValueRef count = ac_unpack_param(ac, ac_get_arg(ac, args->merged_wave_info), 8, 8);
4419 LLVMValueRef hs_empty = LLVMBuildICmp(ac->builder, LLVMIntEQ, count, ac->i32_0, "");
4420
4421 abi->instance_id =
4422 LLVMBuildSelect(ac->builder, hs_empty, ac_get_arg(ac, args->vertex_id),
4423 abi->instance_id, "");
4424
4425 abi->vs_rel_patch_id =
4426 LLVMBuildSelect(ac->builder, hs_empty, ac_get_arg(ac, args->tcs_rel_ids),
4427 abi->vs_rel_patch_id, "");
4428
4429 abi->vertex_id =
4430 LLVMBuildSelect(ac->builder, hs_empty, ac_get_arg(ac, args->tcs_patch_id),
4431 abi->vertex_id, "");
4432 }
4433