• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2016 Bas Nieuwenhuizen
3  *
4  * SPDX-License-Identifier: MIT
5  */
6 
7 #include "ac_nir_to_llvm.h"
8 #include "ac_gpu_info.h"
9 #include "ac_binary.h"
10 #include "ac_llvm_build.h"
11 #include "ac_llvm_util.h"
12 #include "ac_shader_abi.h"
13 #include "ac_shader_util.h"
14 #include "ac_nir.h"
15 #include "nir/nir.h"
16 #include "nir/nir_deref.h"
17 #include "sid.h"
18 #include "util/bitscan.h"
19 #include "util/u_math.h"
20 #include <llvm/Config/llvm-config.h>
21 
22 struct ac_nir_context {
23    struct ac_llvm_context ac;
24    struct ac_shader_abi *abi;
25    const struct ac_shader_args *args;
26 
27    gl_shader_stage stage;
28    shader_info *info;
29 
30    LLVMValueRef *ssa_defs;
31 
32    struct ac_llvm_pointer scratch;
33    struct ac_llvm_pointer constant_data;
34 
35    struct hash_table *defs;
36    struct hash_table *phis;
37    struct hash_table *verified_interp;
38 
39    LLVMValueRef main_function;
40    LLVMBasicBlockRef continue_block;
41    LLVMBasicBlockRef break_block;
42 };
43 
get_def_type(struct ac_nir_context * ctx,const nir_def * def)44 static LLVMTypeRef get_def_type(struct ac_nir_context *ctx, const nir_def *def)
45 {
46    LLVMTypeRef type = LLVMIntTypeInContext(ctx->ac.context, def->bit_size);
47    if (def->num_components > 1) {
48       type = LLVMVectorType(type, def->num_components);
49    }
50    return type;
51 }
52 
get_src(struct ac_nir_context * nir,nir_src src)53 static LLVMValueRef get_src(struct ac_nir_context *nir, nir_src src)
54 {
55    return nir->ssa_defs[src.ssa->index];
56 }
57 
get_memory_ptr(struct ac_nir_context * ctx,nir_src src,unsigned c_off)58 static LLVMValueRef get_memory_ptr(struct ac_nir_context *ctx, nir_src src, unsigned c_off)
59 {
60    LLVMValueRef ptr = get_src(ctx, src);
61    ptr = LLVMBuildAdd(ctx->ac.builder, ptr, LLVMConstInt(ctx->ac.i32, c_off, 0), "");
62    /* LDS is used here as a i8 pointer. */
63    return LLVMBuildGEP2(ctx->ac.builder, ctx->ac.i8, ctx->ac.lds.value, &ptr, 1, "");
64 }
65 
get_block(struct ac_nir_context * nir,const struct nir_block * b)66 static LLVMBasicBlockRef get_block(struct ac_nir_context *nir, const struct nir_block *b)
67 {
68    struct hash_entry *entry = _mesa_hash_table_search(nir->defs, b);
69    return (LLVMBasicBlockRef)entry->data;
70 }
71 
get_alu_src(struct ac_nir_context * ctx,nir_alu_src src,unsigned num_components)72 static LLVMValueRef get_alu_src(struct ac_nir_context *ctx, nir_alu_src src,
73                                 unsigned num_components)
74 {
75    LLVMValueRef value = get_src(ctx, src.src);
76    bool need_swizzle = false;
77 
78    assert(value);
79    unsigned src_components = ac_get_llvm_num_components(value);
80    for (unsigned i = 0; i < num_components; ++i) {
81       assert(src.swizzle[i] < src_components);
82       if (src.swizzle[i] != i)
83          need_swizzle = true;
84    }
85 
86    if (need_swizzle || num_components != src_components) {
87       LLVMValueRef masks[] = {LLVMConstInt(ctx->ac.i32, src.swizzle[0], false),
88                               LLVMConstInt(ctx->ac.i32, src.swizzle[1], false),
89                               LLVMConstInt(ctx->ac.i32, src.swizzle[2], false),
90                               LLVMConstInt(ctx->ac.i32, src.swizzle[3], false)};
91 
92       if (src_components > 1 && num_components == 1) {
93          value = LLVMBuildExtractElement(ctx->ac.builder, value, masks[0], "");
94       } else if (src_components == 1 && num_components > 1) {
95          LLVMValueRef values[] = {value, value, value, value};
96          value = ac_build_gather_values(&ctx->ac, values, num_components);
97       } else {
98          LLVMValueRef swizzle = LLVMConstVector(masks, num_components);
99          value = LLVMBuildShuffleVector(ctx->ac.builder, value, value, swizzle, "");
100       }
101    }
102    return value;
103 }
104 
emit_int_cmp(struct ac_llvm_context * ctx,LLVMIntPredicate pred,LLVMValueRef src0,LLVMValueRef src1)105 static LLVMValueRef emit_int_cmp(struct ac_llvm_context *ctx, LLVMIntPredicate pred,
106                                  LLVMValueRef src0, LLVMValueRef src1)
107 {
108    src0 = ac_to_integer(ctx, src0);
109    src1 = ac_to_integer(ctx, src1);
110    return LLVMBuildICmp(ctx->builder, pred, src0, src1, "");
111 }
112 
emit_float_cmp(struct ac_llvm_context * ctx,LLVMRealPredicate pred,LLVMValueRef src0,LLVMValueRef src1)113 static LLVMValueRef emit_float_cmp(struct ac_llvm_context *ctx, LLVMRealPredicate pred,
114                                    LLVMValueRef src0, LLVMValueRef src1)
115 {
116    src0 = ac_to_float(ctx, src0);
117    src1 = ac_to_float(ctx, src1);
118    return LLVMBuildFCmp(ctx->builder, pred, src0, src1, "");
119 }
120 
emit_intrin_1f_param(struct ac_llvm_context * ctx,const char * intrin,LLVMTypeRef result_type,LLVMValueRef src0)121 static LLVMValueRef emit_intrin_1f_param(struct ac_llvm_context *ctx, const char *intrin,
122                                          LLVMTypeRef result_type, LLVMValueRef src0)
123 {
124    char name[64], type[64];
125    LLVMValueRef params[] = {
126       ac_to_float(ctx, src0),
127    };
128 
129    ac_build_type_name_for_intr(LLVMTypeOf(params[0]), type, sizeof(type));
130    ASSERTED const int length = snprintf(name, sizeof(name), "%s.%s", intrin, type);
131    assert(length < sizeof(name));
132    return ac_build_intrinsic(ctx, name, result_type, params, 1, 0);
133 }
134 
emit_intrin_1f_param_scalar(struct ac_llvm_context * ctx,const char * intrin,LLVMTypeRef result_type,LLVMValueRef src0)135 static LLVMValueRef emit_intrin_1f_param_scalar(struct ac_llvm_context *ctx, const char *intrin,
136                                                 LLVMTypeRef result_type, LLVMValueRef src0)
137 {
138    if (LLVMGetTypeKind(result_type) != LLVMVectorTypeKind)
139       return emit_intrin_1f_param(ctx, intrin, result_type, src0);
140 
141    LLVMTypeRef elem_type = LLVMGetElementType(result_type);
142    LLVMValueRef ret = LLVMGetUndef(result_type);
143 
144    /* Scalarize the intrinsic, because vectors are not supported. */
145    for (unsigned i = 0; i < LLVMGetVectorSize(result_type); i++) {
146       char name[64], type[64];
147       LLVMValueRef params[] = {
148          ac_to_float(ctx, ac_llvm_extract_elem(ctx, src0, i)),
149       };
150 
151       ac_build_type_name_for_intr(LLVMTypeOf(params[0]), type, sizeof(type));
152       ASSERTED const int length = snprintf(name, sizeof(name), "%s.%s", intrin, type);
153       assert(length < sizeof(name));
154       ret = LLVMBuildInsertElement(
155          ctx->builder, ret,
156          ac_build_intrinsic(ctx, name, elem_type, params, 1, 0),
157          LLVMConstInt(ctx->i32, i, 0), "");
158    }
159    return ret;
160 }
161 
emit_intrin_2f_param(struct ac_llvm_context * ctx,const char * intrin,LLVMTypeRef result_type,LLVMValueRef src0,LLVMValueRef src1)162 static LLVMValueRef emit_intrin_2f_param(struct ac_llvm_context *ctx, const char *intrin,
163                                          LLVMTypeRef result_type, LLVMValueRef src0,
164                                          LLVMValueRef src1)
165 {
166    char name[64], type[64];
167    LLVMValueRef params[] = {
168       ac_to_float(ctx, src0),
169       ac_to_float(ctx, src1),
170    };
171 
172    ac_build_type_name_for_intr(LLVMTypeOf(params[0]), type, sizeof(type));
173    ASSERTED const int length = snprintf(name, sizeof(name), "%s.%s", intrin, type);
174    assert(length < sizeof(name));
175    return ac_build_intrinsic(ctx, name, result_type, params, 2, 0);
176 }
177 
emit_intrin_3f_param(struct ac_llvm_context * ctx,const char * intrin,LLVMTypeRef result_type,LLVMValueRef src0,LLVMValueRef src1,LLVMValueRef src2)178 static LLVMValueRef emit_intrin_3f_param(struct ac_llvm_context *ctx, const char *intrin,
179                                          LLVMTypeRef result_type, LLVMValueRef src0,
180                                          LLVMValueRef src1, LLVMValueRef src2)
181 {
182    char name[64], type[64];
183    LLVMValueRef params[] = {
184       ac_to_float(ctx, src0),
185       ac_to_float(ctx, src1),
186       ac_to_float(ctx, src2),
187    };
188 
189    ac_build_type_name_for_intr(LLVMTypeOf(params[0]), type, sizeof(type));
190    ASSERTED const int length = snprintf(name, sizeof(name), "%s.%s", intrin, type);
191    assert(length < sizeof(name));
192    return ac_build_intrinsic(ctx, name, result_type, params, 3, 0);
193 }
194 
emit_bcsel(struct ac_llvm_context * ctx,LLVMValueRef src0,LLVMValueRef src1,LLVMValueRef src2)195 static LLVMValueRef emit_bcsel(struct ac_llvm_context *ctx, LLVMValueRef src0, LLVMValueRef src1,
196                                LLVMValueRef src2)
197 {
198    LLVMTypeRef src1_type = LLVMTypeOf(src1);
199    LLVMTypeRef src2_type = LLVMTypeOf(src2);
200 
201    if (LLVMGetTypeKind(src1_type) == LLVMPointerTypeKind &&
202        LLVMGetTypeKind(src2_type) != LLVMPointerTypeKind) {
203       src2 = LLVMBuildIntToPtr(ctx->builder, src2, src1_type, "");
204    } else if (LLVMGetTypeKind(src2_type) == LLVMPointerTypeKind &&
205               LLVMGetTypeKind(src1_type) != LLVMPointerTypeKind) {
206       src1 = LLVMBuildIntToPtr(ctx->builder, src1, src2_type, "");
207    }
208 
209    return LLVMBuildSelect(ctx->builder, src0, ac_to_integer_or_pointer(ctx, src1),
210                           ac_to_integer_or_pointer(ctx, src2), "");
211 }
212 
emit_iabs(struct ac_llvm_context * ctx,LLVMValueRef src0)213 static LLVMValueRef emit_iabs(struct ac_llvm_context *ctx, LLVMValueRef src0)
214 {
215    return ac_build_imax(ctx, src0, LLVMBuildNeg(ctx->builder, src0, ""));
216 }
217 
emit_uint_carry(struct ac_llvm_context * ctx,const char * intrin,LLVMValueRef src0,LLVMValueRef src1)218 static LLVMValueRef emit_uint_carry(struct ac_llvm_context *ctx, const char *intrin,
219                                     LLVMValueRef src0, LLVMValueRef src1)
220 {
221    LLVMTypeRef ret_type;
222    LLVMTypeRef types[] = {ctx->i32, ctx->i1};
223    LLVMValueRef res;
224    LLVMValueRef params[] = {src0, src1};
225    ret_type = LLVMStructTypeInContext(ctx->context, types, 2, false);
226 
227    res = ac_build_intrinsic(ctx, intrin, ret_type, params, 2, 0);
228 
229    res = LLVMBuildExtractValue(ctx->builder, res, 1, "");
230    res = LLVMBuildZExt(ctx->builder, res, ctx->i32, "");
231    return res;
232 }
233 
emit_b2f(struct ac_llvm_context * ctx,LLVMValueRef src0,unsigned bitsize)234 static LLVMValueRef emit_b2f(struct ac_llvm_context *ctx, LLVMValueRef src0, unsigned bitsize)
235 {
236    assert(ac_get_elem_bits(ctx, LLVMTypeOf(src0)) == 1);
237 
238    switch (bitsize) {
239    case 16:
240       if (LLVMGetTypeKind(LLVMTypeOf(src0)) == LLVMVectorTypeKind) {
241          assert(LLVMGetVectorSize(LLVMTypeOf(src0)) == 2);
242          LLVMValueRef f[] = {
243             LLVMBuildSelect(ctx->builder, ac_llvm_extract_elem(ctx, src0, 0),
244                             ctx->f16_1, ctx->f16_0, ""),
245             LLVMBuildSelect(ctx->builder, ac_llvm_extract_elem(ctx, src0, 1),
246                             ctx->f16_1, ctx->f16_0, ""),
247          };
248          return ac_build_gather_values(ctx, f, 2);
249       }
250       return LLVMBuildSelect(ctx->builder, src0, ctx->f16_1, ctx->f16_0, "");
251    case 32:
252       return LLVMBuildSelect(ctx->builder, src0, ctx->f32_1, ctx->f32_0, "");
253    case 64:
254       return LLVMBuildSelect(ctx->builder, src0, ctx->f64_1, ctx->f64_0, "");
255    default:
256       unreachable("Unsupported bit size.");
257    }
258 }
259 
emit_b2i(struct ac_llvm_context * ctx,LLVMValueRef src0,unsigned bitsize)260 static LLVMValueRef emit_b2i(struct ac_llvm_context *ctx, LLVMValueRef src0, unsigned bitsize)
261 {
262    switch (bitsize) {
263    case 8:
264       return LLVMBuildSelect(ctx->builder, src0, ctx->i8_1, ctx->i8_0, "");
265    case 16:
266       if (LLVMGetTypeKind(LLVMTypeOf(src0)) == LLVMVectorTypeKind) {
267          assert(LLVMGetVectorSize(LLVMTypeOf(src0)) == 2);
268          LLVMValueRef i[] = {
269             LLVMBuildSelect(ctx->builder, ac_llvm_extract_elem(ctx, src0, 0),
270                             ctx->i16_1, ctx->i16_0, ""),
271             LLVMBuildSelect(ctx->builder, ac_llvm_extract_elem(ctx, src0, 1),
272                             ctx->i16_1, ctx->i16_0, ""),
273          };
274          return ac_build_gather_values(ctx, i, 2);
275       }
276       return LLVMBuildSelect(ctx->builder, src0, ctx->i16_1, ctx->i16_0, "");
277    case 32:
278       return LLVMBuildSelect(ctx->builder, src0, ctx->i32_1, ctx->i32_0, "");
279    case 64:
280       return LLVMBuildSelect(ctx->builder, src0, ctx->i64_1, ctx->i64_0, "");
281    default:
282       unreachable("Unsupported bit size.");
283    }
284 }
285 
emit_i2b(struct ac_llvm_context * ctx,LLVMValueRef src0)286 static LLVMValueRef emit_i2b(struct ac_llvm_context *ctx, LLVMValueRef src0)
287 {
288    LLVMValueRef zero = LLVMConstNull(LLVMTypeOf(src0));
289    return LLVMBuildICmp(ctx->builder, LLVMIntNE, src0, zero, "");
290 }
291 
emit_f2f16(struct ac_llvm_context * ctx,LLVMValueRef src0)292 static LLVMValueRef emit_f2f16(struct ac_llvm_context *ctx, LLVMValueRef src0)
293 {
294    LLVMValueRef result;
295    LLVMValueRef cond = NULL;
296 
297    src0 = ac_to_float(ctx, src0);
298    result = LLVMBuildFPTrunc(ctx->builder, src0, ctx->f16, "");
299 
300    if (ctx->gfx_level >= GFX8) {
301       LLVMValueRef args[2];
302       /* Check if the result is a denormal - and flush to 0 if so. */
303       args[0] = result;
304       args[1] = LLVMConstInt(ctx->i32, N_SUBNORMAL | P_SUBNORMAL, false);
305       cond =
306          ac_build_intrinsic(ctx, "llvm.amdgcn.class.f16", ctx->i1, args, 2, 0);
307    }
308 
309    /* need to convert back up to f32 */
310    result = LLVMBuildFPExt(ctx->builder, result, ctx->f32, "");
311 
312    if (ctx->gfx_level >= GFX8)
313       result = LLVMBuildSelect(ctx->builder, cond, ctx->f32_0, result, "");
314    else {
315       /* for GFX6-GFX7 */
316       /* 0x38800000 is smallest half float value (2^-14) in 32-bit float,
317        * so compare the result and flush to 0 if it's smaller.
318        */
319       LLVMValueRef temp, cond2;
320       temp = emit_intrin_1f_param(ctx, "llvm.fabs", ctx->f32, result);
321       cond = LLVMBuildFCmp(
322          ctx->builder, LLVMRealOGT,
323          LLVMBuildBitCast(ctx->builder, LLVMConstInt(ctx->i32, 0x38800000, false), ctx->f32, ""),
324          temp, "");
325       cond2 = LLVMBuildFCmp(ctx->builder, LLVMRealONE, temp, ctx->f32_0, "");
326       cond = LLVMBuildAnd(ctx->builder, cond, cond2, "");
327       result = LLVMBuildSelect(ctx->builder, cond, ctx->f32_0, result, "");
328    }
329    return result;
330 }
331 
emit_umul_high(struct ac_llvm_context * ctx,LLVMValueRef src0,LLVMValueRef src1)332 static LLVMValueRef emit_umul_high(struct ac_llvm_context *ctx, LLVMValueRef src0,
333                                    LLVMValueRef src1)
334 {
335    LLVMValueRef dst64, result;
336 
337 /* 64-bit multiplication by a constant is broken in old LLVM. Fixed in LLVM 19.1 and LLVM 20. */
338 #if LLVM_VERSION_MAJOR < 19 || (LLVM_VERSION_MAJOR == 19 && LLVM_VERSION_MINOR == 0)
339    if (LLVMIsConstant(src0))
340       ac_build_optimization_barrier(ctx, &src1, false);
341    else
342       ac_build_optimization_barrier(ctx, &src0, false);
343 #endif
344 
345    src0 = LLVMBuildZExt(ctx->builder, src0, ctx->i64, "");
346    src1 = LLVMBuildZExt(ctx->builder, src1, ctx->i64, "");
347 
348    dst64 = LLVMBuildMul(ctx->builder, src0, src1, "");
349    dst64 = LLVMBuildLShr(ctx->builder, dst64, LLVMConstInt(ctx->i64, 32, false), "");
350    result = LLVMBuildTrunc(ctx->builder, dst64, ctx->i32, "");
351    return result;
352 }
353 
emit_imul_high(struct ac_llvm_context * ctx,LLVMValueRef src0,LLVMValueRef src1)354 static LLVMValueRef emit_imul_high(struct ac_llvm_context *ctx, LLVMValueRef src0,
355                                    LLVMValueRef src1)
356 {
357    LLVMValueRef dst64, result;
358    src0 = LLVMBuildSExt(ctx->builder, src0, ctx->i64, "");
359    src1 = LLVMBuildSExt(ctx->builder, src1, ctx->i64, "");
360 
361    dst64 = LLVMBuildMul(ctx->builder, src0, src1, "");
362    dst64 = LLVMBuildAShr(ctx->builder, dst64, LLVMConstInt(ctx->i64, 32, false), "");
363    result = LLVMBuildTrunc(ctx->builder, dst64, ctx->i32, "");
364    return result;
365 }
366 
emit_bfm(struct ac_llvm_context * ctx,LLVMValueRef bits,LLVMValueRef offset)367 static LLVMValueRef emit_bfm(struct ac_llvm_context *ctx, LLVMValueRef bits, LLVMValueRef offset)
368 {
369    /* mask = ((1 << bits) - 1) << offset */
370    return LLVMBuildShl(
371       ctx->builder,
372       LLVMBuildSub(ctx->builder, LLVMBuildShl(ctx->builder, ctx->i32_1, bits, ""), ctx->i32_1, ""),
373       offset, "");
374 }
375 
emit_bitfield_select(struct ac_llvm_context * ctx,LLVMValueRef mask,LLVMValueRef insert,LLVMValueRef base)376 static LLVMValueRef emit_bitfield_select(struct ac_llvm_context *ctx, LLVMValueRef mask,
377                                          LLVMValueRef insert, LLVMValueRef base)
378 {
379    /* Calculate:
380     *   (mask & insert) | (~mask & base) = base ^ (mask & (insert ^ base))
381     * Use the right-hand side, which the LLVM backend can convert to V_BFI.
382     */
383    return LLVMBuildXor(
384       ctx->builder, base,
385       LLVMBuildAnd(ctx->builder, mask, LLVMBuildXor(ctx->builder, insert, base, ""), ""), "");
386 }
387 
emit_pack_2x16(struct ac_llvm_context * ctx,LLVMValueRef src0,LLVMValueRef (* pack)(struct ac_llvm_context * ctx,LLVMValueRef args[2]))388 static LLVMValueRef emit_pack_2x16(struct ac_llvm_context *ctx, LLVMValueRef src0,
389                                    LLVMValueRef (*pack)(struct ac_llvm_context *ctx,
390                                                         LLVMValueRef args[2]))
391 {
392    LLVMValueRef comp[2];
393 
394    src0 = ac_to_float(ctx, src0);
395    comp[0] = LLVMBuildExtractElement(ctx->builder, src0, ctx->i32_0, "");
396    comp[1] = LLVMBuildExtractElement(ctx->builder, src0, ctx->i32_1, "");
397 
398    return LLVMBuildBitCast(ctx->builder, pack(ctx, comp), ctx->i32, "");
399 }
400 
emit_unpack_half_2x16(struct ac_llvm_context * ctx,LLVMValueRef src0)401 static LLVMValueRef emit_unpack_half_2x16(struct ac_llvm_context *ctx, LLVMValueRef src0)
402 {
403    LLVMValueRef const16 = LLVMConstInt(ctx->i32, 16, false);
404    LLVMValueRef temps[2], val;
405    int i;
406 
407    for (i = 0; i < 2; i++) {
408       val = i == 1 ? LLVMBuildLShr(ctx->builder, src0, const16, "") : src0;
409       val = LLVMBuildTrunc(ctx->builder, val, ctx->i16, "");
410       val = LLVMBuildBitCast(ctx->builder, val, ctx->f16, "");
411       temps[i] = LLVMBuildFPExt(ctx->builder, val, ctx->f32, "");
412    }
413    return ac_build_gather_values(ctx, temps, 2);
414 }
415 
emit_ddxy(struct ac_nir_context * ctx,nir_intrinsic_op op,LLVMValueRef src0)416 static LLVMValueRef emit_ddxy(struct ac_nir_context *ctx, nir_intrinsic_op op, LLVMValueRef src0)
417 {
418    unsigned mask;
419    int idx;
420    LLVMValueRef result;
421 
422    if (op == nir_intrinsic_ddx_fine)
423       mask = AC_TID_MASK_LEFT;
424    else if (op == nir_intrinsic_ddy_fine)
425       mask = AC_TID_MASK_TOP;
426    else
427       mask = AC_TID_MASK_TOP_LEFT;
428 
429    /* for DDX we want to next X pixel, DDY next Y pixel. */
430    if (op == nir_intrinsic_ddx_fine || op == nir_intrinsic_ddx_coarse || op == nir_intrinsic_ddx)
431       idx = 1;
432    else
433       idx = 2;
434 
435    result = ac_build_ddxy(&ctx->ac, mask, idx, src0);
436    return result;
437 }
438 
439 struct waterfall_context {
440    LLVMBasicBlockRef phi_bb[2];
441    bool use_waterfall;
442 };
443 
444 /* To deal with divergent descriptors we can create a loop that handles all
445  * lanes with the same descriptor on a given iteration (henceforth a
446  * waterfall loop).
447  *
448  * These helper create the begin and end of the loop leaving the caller
449  * to implement the body.
450  *
451  * params:
452  *  - ctx is the usual nir context
453  *  - wctx is a temporary struct containing some loop info. Can be left uninitialized.
454  *  - value is the possibly divergent value for which we built the loop
455  *  - divergent is whether value is actually divergent. If false we just pass
456  *     things through.
457  */
enter_waterfall(struct ac_nir_context * ctx,struct waterfall_context * wctx,LLVMValueRef value,bool divergent)458 static LLVMValueRef enter_waterfall(struct ac_nir_context *ctx, struct waterfall_context *wctx,
459                                     LLVMValueRef value, bool divergent)
460 {
461    /* If the app claims the value is divergent but it is constant we can
462     * end up with a dynamic index of NULL. */
463    if (!value)
464       divergent = false;
465 
466    wctx->use_waterfall = divergent;
467    if (!divergent)
468       return value;
469 
470    ac_build_bgnloop(&ctx->ac, 6000);
471 
472    LLVMValueRef active = ctx->ac.i1true;
473    LLVMValueRef scalar_value[NIR_MAX_VEC_COMPONENTS];
474 
475    for (unsigned i = 0; i < ac_get_llvm_num_components(value); i++) {
476       LLVMValueRef comp = ac_llvm_extract_elem(&ctx->ac, value, i);
477       scalar_value[i] = ac_build_readlane(&ctx->ac, comp, NULL);
478       active = LLVMBuildAnd(ctx->ac.builder, active,
479                             LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ, comp, scalar_value[i], ""), "");
480    }
481 
482    wctx->phi_bb[0] = LLVMGetInsertBlock(ctx->ac.builder);
483    ac_build_ifcc(&ctx->ac, active, 6001);
484 
485    return ac_build_gather_values(&ctx->ac, scalar_value, ac_get_llvm_num_components(value));
486 }
487 
exit_waterfall(struct ac_nir_context * ctx,struct waterfall_context * wctx,LLVMValueRef value)488 static LLVMValueRef exit_waterfall(struct ac_nir_context *ctx, struct waterfall_context *wctx,
489                                    LLVMValueRef value)
490 {
491    LLVMValueRef ret = NULL;
492    LLVMValueRef phi_src[2];
493    LLVMValueRef cc_phi_src[2] = {
494       ctx->ac.i32_0,
495       LLVMConstInt(ctx->ac.i32, 0xffffffff, false),
496    };
497 
498    if (!wctx->use_waterfall)
499       return value;
500 
501    wctx->phi_bb[1] = LLVMGetInsertBlock(ctx->ac.builder);
502 
503    ac_build_endif(&ctx->ac, 6001);
504 
505    if (value) {
506       phi_src[0] = LLVMGetUndef(LLVMTypeOf(value));
507       phi_src[1] = value;
508 
509       ret = ac_build_phi(&ctx->ac, LLVMTypeOf(value), 2, phi_src, wctx->phi_bb);
510    }
511 
512    /*
513     * By using the optimization barrier on the exit decision, we decouple
514     * the operations from the break, and hence avoid LLVM hoisting the
515     * opteration into the break block.
516     */
517    LLVMValueRef cc = ac_build_phi(&ctx->ac, ctx->ac.i32, 2, cc_phi_src, wctx->phi_bb);
518    ac_build_optimization_barrier(&ctx->ac, &cc, false);
519 
520    LLVMValueRef active =
521       LLVMBuildICmp(ctx->ac.builder, LLVMIntNE, cc, ctx->ac.i32_0, "uniform_active2");
522    ac_build_ifcc(&ctx->ac, active, 6002);
523    ac_build_break(&ctx->ac);
524    ac_build_endif(&ctx->ac, 6002);
525 
526    ac_build_endloop(&ctx->ac, 6000);
527    return ret;
528 }
529 
530 static LLVMValueRef
ac_build_const_int_vec(struct ac_llvm_context * ctx,LLVMTypeRef type,long long val,bool sign_extend)531 ac_build_const_int_vec(struct ac_llvm_context *ctx, LLVMTypeRef type, long long val, bool sign_extend)
532 {
533    unsigned num_components = LLVMGetTypeKind(type) == LLVMVectorTypeKind ? LLVMGetVectorSize(type) : 1;
534 
535    if (num_components == 1)
536       return LLVMConstInt(type, val, sign_extend);
537 
538    assert(num_components == 2);
539    assert(ac_get_elem_bits(ctx, type) == 16);
540 
541    LLVMTypeRef elem_type = LLVMGetElementType(type);
542 
543    LLVMValueRef elems[2];
544    for (unsigned i = 0; i < 2; ++i)
545       elems[i] = LLVMConstInt(elem_type, val, sign_extend);
546 
547    return LLVMConstVector(elems, 2);
548 }
549 
visit_alu(struct ac_nir_context * ctx,const nir_alu_instr * instr)550 static bool visit_alu(struct ac_nir_context *ctx, const nir_alu_instr *instr)
551 {
552    LLVMValueRef src[16], result = NULL;
553    unsigned num_components = instr->def.num_components;
554    LLVMTypeRef def_type = get_def_type(ctx, &instr->def);
555 
556    assert(nir_op_infos[instr->op].num_inputs <= ARRAY_SIZE(src));
557    for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
558       src[i] = get_alu_src(ctx, instr->src[i], nir_ssa_alu_instr_src_components(instr, i));
559 
560    switch (instr->op) {
561    case nir_op_mov:
562       result = src[0];
563       break;
564    case nir_op_fneg:
565       src[0] = ac_to_float(&ctx->ac, src[0]);
566       result = LLVMBuildFNeg(ctx->ac.builder, src[0], "");
567       break;
568    case nir_op_inot:
569       result = LLVMBuildNot(ctx->ac.builder, src[0], "");
570       break;
571    case nir_op_iadd:
572       if (instr->no_unsigned_wrap)
573          result = LLVMBuildNUWAdd(ctx->ac.builder, src[0], src[1], "");
574       else if (instr->no_signed_wrap)
575          result = LLVMBuildNSWAdd(ctx->ac.builder, src[0], src[1], "");
576       else
577          result = LLVMBuildAdd(ctx->ac.builder, src[0], src[1], "");
578       break;
579    case nir_op_uadd_sat:
580    case nir_op_iadd_sat: {
581       char name[64], type[64];
582       ac_build_type_name_for_intr(def_type, type, sizeof(type));
583       snprintf(name, sizeof(name), "llvm.%cadd.sat.%s",
584                instr->op == nir_op_uadd_sat ? 'u' : 's', type);
585       result = ac_build_intrinsic(&ctx->ac, name, def_type, src, 2, 0);
586       break;
587    }
588    case nir_op_usub_sat:
589    case nir_op_isub_sat: {
590       char name[64], type[64];
591       ac_build_type_name_for_intr(def_type, type, sizeof(type));
592       snprintf(name, sizeof(name), "llvm.%csub.sat.%s",
593                instr->op == nir_op_usub_sat ? 'u' : 's', type);
594       result = ac_build_intrinsic(&ctx->ac, name, def_type, src, 2, 0);
595       break;
596    }
597    case nir_op_fadd:
598       src[0] = ac_to_float(&ctx->ac, src[0]);
599       src[1] = ac_to_float(&ctx->ac, src[1]);
600       result = LLVMBuildFAdd(ctx->ac.builder, src[0], src[1], "");
601       break;
602    case nir_op_fsub:
603       src[0] = ac_to_float(&ctx->ac, src[0]);
604       src[1] = ac_to_float(&ctx->ac, src[1]);
605       result = LLVMBuildFSub(ctx->ac.builder, src[0], src[1], "");
606       break;
607    case nir_op_isub:
608       if (instr->no_unsigned_wrap)
609          result = LLVMBuildNUWSub(ctx->ac.builder, src[0], src[1], "");
610       else if (instr->no_signed_wrap)
611          result = LLVMBuildNSWSub(ctx->ac.builder, src[0], src[1], "");
612       else
613          result = LLVMBuildSub(ctx->ac.builder, src[0], src[1], "");
614       break;
615    case nir_op_imul:
616       if (instr->no_unsigned_wrap)
617          result = LLVMBuildNUWMul(ctx->ac.builder, src[0], src[1], "");
618       else if (instr->no_signed_wrap)
619          result = LLVMBuildNSWMul(ctx->ac.builder, src[0], src[1], "");
620       else
621          result = LLVMBuildMul(ctx->ac.builder, src[0], src[1], "");
622       break;
623    case nir_op_fmul:
624       src[0] = ac_to_float(&ctx->ac, src[0]);
625       src[1] = ac_to_float(&ctx->ac, src[1]);
626       result = LLVMBuildFMul(ctx->ac.builder, src[0], src[1], "");
627       break;
628    case nir_op_fmulz:
629       src[0] = ac_to_float(&ctx->ac, src[0]);
630       src[1] = ac_to_float(&ctx->ac, src[1]);
631       result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.fmul.legacy", ctx->ac.f32,
632                                   src, 2, 0);
633       break;
634    case nir_op_frcp:
635       result = emit_intrin_1f_param_scalar(&ctx->ac, "llvm.amdgcn.rcp",
636                                            ac_to_float_type(&ctx->ac, def_type), src[0]);
637       if (ctx->abi->clamp_div_by_zero)
638          result = ac_build_fmin(&ctx->ac, result,
639                                 LLVMConstReal(ac_to_float_type(&ctx->ac, def_type), FLT_MAX));
640       break;
641    case nir_op_iand:
642       result = LLVMBuildAnd(ctx->ac.builder, src[0], src[1], "");
643       break;
644    case nir_op_ior:
645       result = LLVMBuildOr(ctx->ac.builder, src[0], src[1], "");
646       break;
647    case nir_op_ixor:
648       result = LLVMBuildXor(ctx->ac.builder, src[0], src[1], "");
649       break;
650    case nir_op_ishl:
651    case nir_op_ishr:
652    case nir_op_ushr: {
653       if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[1])) <
654           ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])))
655          src[1] = LLVMBuildZExt(ctx->ac.builder, src[1], LLVMTypeOf(src[0]), "");
656       else if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[1])) >
657                ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])))
658          src[1] = LLVMBuildTrunc(ctx->ac.builder, src[1], LLVMTypeOf(src[0]), "");
659       LLVMTypeRef type = LLVMTypeOf(src[1]);
660       src[1] = LLVMBuildAnd(ctx->ac.builder, src[1],
661                             ac_build_const_int_vec(&ctx->ac, type, ac_get_elem_bits(&ctx->ac, type) - 1, false), "");
662       switch (instr->op) {
663       case nir_op_ishl:
664          result = LLVMBuildShl(ctx->ac.builder, src[0], src[1], "");
665          break;
666       case nir_op_ishr:
667          result = LLVMBuildAShr(ctx->ac.builder, src[0], src[1], "");
668          break;
669       case nir_op_ushr:
670          result = LLVMBuildLShr(ctx->ac.builder, src[0], src[1], "");
671          break;
672       default:
673          break;
674       }
675       break;
676    }
677    case nir_op_ilt:
678       result = emit_int_cmp(&ctx->ac, LLVMIntSLT, src[0], src[1]);
679       break;
680    case nir_op_ine:
681       result = emit_int_cmp(&ctx->ac, LLVMIntNE, src[0], src[1]);
682       break;
683    case nir_op_ieq:
684       result = emit_int_cmp(&ctx->ac, LLVMIntEQ, src[0], src[1]);
685       break;
686    case nir_op_ige:
687       result = emit_int_cmp(&ctx->ac, LLVMIntSGE, src[0], src[1]);
688       break;
689    case nir_op_ult:
690       result = emit_int_cmp(&ctx->ac, LLVMIntULT, src[0], src[1]);
691       break;
692    case nir_op_uge:
693       result = emit_int_cmp(&ctx->ac, LLVMIntUGE, src[0], src[1]);
694       break;
695    case nir_op_feq:
696       result = emit_float_cmp(&ctx->ac, LLVMRealOEQ, src[0], src[1]);
697       break;
698    case nir_op_fneu:
699       result = emit_float_cmp(&ctx->ac, LLVMRealUNE, src[0], src[1]);
700       break;
701    case nir_op_fequ:
702       result = emit_float_cmp(&ctx->ac, LLVMRealUEQ, src[0], src[1]);
703       break;
704    case nir_op_fneo:
705       result = emit_float_cmp(&ctx->ac, LLVMRealONE, src[0], src[1]);
706       break;
707    case nir_op_flt:
708       result = emit_float_cmp(&ctx->ac, LLVMRealOLT, src[0], src[1]);
709       break;
710    case nir_op_fge:
711       result = emit_float_cmp(&ctx->ac, LLVMRealOGE, src[0], src[1]);
712       break;
713    case nir_op_fltu:
714       result = emit_float_cmp(&ctx->ac, LLVMRealULT, src[0], src[1]);
715       break;
716    case nir_op_fgeu:
717       result = emit_float_cmp(&ctx->ac, LLVMRealUGE, src[0], src[1]);
718       break;
719    case nir_op_funord:
720       result = emit_float_cmp(&ctx->ac, LLVMRealUNO, src[0], src[1]);
721       break;
722    case nir_op_ford:
723       result = emit_float_cmp(&ctx->ac, LLVMRealORD, src[0], src[1]);
724       break;
725    case nir_op_fabs:
726       result =
727          emit_intrin_1f_param(&ctx->ac, "llvm.fabs", ac_to_float_type(&ctx->ac, def_type), src[0]);
728       break;
729    case nir_op_fsat:
730       src[0] = ac_to_float(&ctx->ac, src[0]);
731       result = ac_build_fsat(&ctx->ac, src[0],
732                              ac_to_float_type(&ctx->ac, def_type));
733       break;
734    case nir_op_iabs:
735       result = emit_iabs(&ctx->ac, src[0]);
736       break;
737    case nir_op_imax:
738       result = ac_build_imax(&ctx->ac, src[0], src[1]);
739       break;
740    case nir_op_imin:
741       result = ac_build_imin(&ctx->ac, src[0], src[1]);
742       break;
743    case nir_op_umax:
744       result = ac_build_umax(&ctx->ac, src[0], src[1]);
745       break;
746    case nir_op_umin:
747       result = ac_build_umin(&ctx->ac, src[0], src[1]);
748       break;
749    case nir_op_isign:
750       result = ac_build_isign(&ctx->ac, src[0]);
751       break;
752    case nir_op_fsign:
753       src[0] = ac_to_float(&ctx->ac, src[0]);
754       result = ac_build_fsign(&ctx->ac, src[0]);
755       break;
756    case nir_op_ffloor:
757       result =
758          emit_intrin_1f_param(&ctx->ac, "llvm.floor", ac_to_float_type(&ctx->ac, def_type), src[0]);
759       break;
760    case nir_op_ftrunc:
761       result =
762          emit_intrin_1f_param(&ctx->ac, "llvm.trunc", ac_to_float_type(&ctx->ac, def_type), src[0]);
763       break;
764    case nir_op_fceil:
765       result =
766          emit_intrin_1f_param(&ctx->ac, "llvm.ceil", ac_to_float_type(&ctx->ac, def_type), src[0]);
767       break;
768    case nir_op_fround_even:
769       result =
770          emit_intrin_1f_param(&ctx->ac, "llvm.rint", ac_to_float_type(&ctx->ac, def_type), src[0]);
771       break;
772    case nir_op_ffract:
773       result = emit_intrin_1f_param_scalar(&ctx->ac, "llvm.amdgcn.fract",
774                                            ac_to_float_type(&ctx->ac, def_type), src[0]);
775       break;
776    case nir_op_fsin_amd:
777    case nir_op_fcos_amd:
778       /* before GFX9, v_sin_f32 and v_cos_f32 had a valid input domain of [-256, +256] */
779       if (ctx->ac.gfx_level < GFX9)
780          src[0] = emit_intrin_1f_param_scalar(&ctx->ac, "llvm.amdgcn.fract",
781                                               ac_to_float_type(&ctx->ac, def_type), src[0]);
782       result =
783          emit_intrin_1f_param(&ctx->ac, instr->op == nir_op_fsin_amd ? "llvm.amdgcn.sin" : "llvm.amdgcn.cos",
784                               ac_to_float_type(&ctx->ac, def_type), src[0]);
785       break;
786    case nir_op_fsqrt:
787       result =
788          emit_intrin_1f_param(&ctx->ac, "llvm.sqrt", ac_to_float_type(&ctx->ac, def_type), src[0]);
789       LLVMSetMetadata(result, ctx->ac.fpmath_md_kind, ctx->ac.three_md);
790       break;
791    case nir_op_fexp2:
792       result =
793          emit_intrin_1f_param(&ctx->ac, "llvm.exp2", ac_to_float_type(&ctx->ac, def_type), src[0]);
794       break;
795    case nir_op_flog2:
796       result =
797          emit_intrin_1f_param(&ctx->ac, "llvm.log2", ac_to_float_type(&ctx->ac, def_type), src[0]);
798       break;
799    case nir_op_frsq:
800       result = emit_intrin_1f_param_scalar(&ctx->ac, "llvm.amdgcn.rsq",
801                                            ac_to_float_type(&ctx->ac, def_type), src[0]);
802       if (ctx->abi->clamp_div_by_zero)
803          result = ac_build_fmin(&ctx->ac, result,
804                                 LLVMConstReal(ac_to_float_type(&ctx->ac, def_type), FLT_MAX));
805       break;
806    case nir_op_frexp_exp:
807       src[0] = ac_to_float(&ctx->ac, src[0]);
808       result = ac_build_frexp_exp(&ctx->ac, src[0], ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])));
809       if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])) == 16)
810          result = LLVMBuildSExt(ctx->ac.builder, result, ctx->ac.i32, "");
811       break;
812    case nir_op_frexp_sig:
813       src[0] = ac_to_float(&ctx->ac, src[0]);
814       result = ac_build_frexp_mant(&ctx->ac, src[0], instr->def.bit_size);
815       break;
816    case nir_op_fmax:
817       result = emit_intrin_2f_param(&ctx->ac, "llvm.maxnum", ac_to_float_type(&ctx->ac, def_type),
818                                     src[0], src[1]);
819       if (ctx->ac.gfx_level < GFX9 && instr->def.bit_size == 32) {
820          /* Only pre-GFX9 chips do not flush denorms. */
821          result = ac_build_canonicalize(&ctx->ac, result, instr->def.bit_size);
822       }
823       break;
824    case nir_op_fmin:
825       result = emit_intrin_2f_param(&ctx->ac, "llvm.minnum", ac_to_float_type(&ctx->ac, def_type),
826                                     src[0], src[1]);
827       if (ctx->ac.gfx_level < GFX9 && instr->def.bit_size == 32) {
828          /* Only pre-GFX9 chips do not flush denorms. */
829          result = ac_build_canonicalize(&ctx->ac, result, instr->def.bit_size);
830       }
831       break;
832    case nir_op_ffma:
833       /* FMA is slow on gfx6-8, so it shouldn't be used. */
834       assert(instr->def.bit_size != 32 || ctx->ac.gfx_level >= GFX9);
835       result = emit_intrin_3f_param(&ctx->ac, "llvm.fma", ac_to_float_type(&ctx->ac, def_type),
836                                     src[0], src[1], src[2]);
837       break;
838    case nir_op_ffmaz:
839       assert(ctx->ac.gfx_level >= GFX10_3);
840       src[0] = ac_to_float(&ctx->ac, src[0]);
841       src[1] = ac_to_float(&ctx->ac, src[1]);
842       src[2] = ac_to_float(&ctx->ac, src[2]);
843       result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.fma.legacy", ctx->ac.f32,
844                                   src, 3, 0);
845       break;
846    case nir_op_ldexp:
847       src[0] = ac_to_float(&ctx->ac, src[0]);
848       if (ac_get_elem_bits(&ctx->ac, def_type) == 32)
849          result = ac_build_intrinsic(&ctx->ac,
850                                      LLVM_VERSION_MAJOR >= 18 ? "llvm.ldexp.f32.i32"
851                                                               : "llvm.amdgcn.ldexp.f32",
852                                      ctx->ac.f32, src, 2, 0);
853       else if (ac_get_elem_bits(&ctx->ac, def_type) == 16)
854          result = ac_build_intrinsic(&ctx->ac,
855                                      LLVM_VERSION_MAJOR >= 18 ? "llvm.ldexp.f16.i32"
856                                                               : "llvm.amdgcn.ldexp.f16",
857                                      ctx->ac.f16, src, 2, 0);
858       else
859          result = ac_build_intrinsic(&ctx->ac,
860                                      LLVM_VERSION_MAJOR >= 18 ? "llvm.ldexp.f64.i32"
861                                                               : "llvm.amdgcn.ldexp.f64",
862                                      ctx->ac.f64, src, 2, 0);
863       break;
864    case nir_op_bfm:
865       result = emit_bfm(&ctx->ac, src[0], src[1]);
866       break;
867    case nir_op_bitfield_select:
868       result = emit_bitfield_select(&ctx->ac, src[0], src[1], src[2]);
869       break;
870    case nir_op_ubfe:
871       result = ac_build_bfe(&ctx->ac, src[0], src[1], src[2], false);
872       break;
873    case nir_op_ibfe:
874       result = ac_build_bfe(&ctx->ac, src[0], src[1], src[2], true);
875       break;
876    case nir_op_bitfield_reverse:
877       result = ac_build_bitfield_reverse(&ctx->ac, src[0]);
878       break;
879    case nir_op_bit_count:
880       result = ac_build_bit_count(&ctx->ac, src[0]);
881       break;
882    case nir_op_vec2:
883    case nir_op_vec3:
884    case nir_op_vec4:
885    case nir_op_vec5:
886    case nir_op_vec8:
887    case nir_op_vec16:
888       for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
889          src[i] = ac_to_integer(&ctx->ac, src[i]);
890       result = ac_build_gather_values(&ctx->ac, src, num_components);
891       break;
892    case nir_op_f2i8:
893    case nir_op_f2i16:
894    case nir_op_f2i32:
895    case nir_op_f2i64:
896       src[0] = ac_to_float(&ctx->ac, src[0]);
897       result = LLVMBuildFPToSI(ctx->ac.builder, src[0], def_type, "");
898       break;
899    case nir_op_f2u8:
900    case nir_op_f2u16:
901    case nir_op_f2u32:
902    case nir_op_f2u64:
903       src[0] = ac_to_float(&ctx->ac, src[0]);
904       result = LLVMBuildFPToUI(ctx->ac.builder, src[0], def_type, "");
905       break;
906    case nir_op_i2f16:
907    case nir_op_i2f32:
908    case nir_op_i2f64:
909       result = LLVMBuildSIToFP(ctx->ac.builder, src[0], ac_to_float_type(&ctx->ac, def_type), "");
910       break;
911    case nir_op_u2f16:
912    case nir_op_u2f32:
913    case nir_op_u2f64:
914       result = LLVMBuildUIToFP(ctx->ac.builder, src[0], ac_to_float_type(&ctx->ac, def_type), "");
915       break;
916    case nir_op_f2f16_rtz: {
917       src[0] = ac_to_float(&ctx->ac, src[0]);
918 
919       if (LLVMTypeOf(src[0]) == ctx->ac.f64)
920          src[0] = LLVMBuildFPTrunc(ctx->ac.builder, src[0], ctx->ac.f32, "");
921 
922       /* Fast path conversion. This only works if NIR is vectorized
923        * to vec2 16.
924        */
925       if (LLVMTypeOf(src[0]) == ctx->ac.v2f32) {
926          LLVMValueRef args[] = {
927             ac_llvm_extract_elem(&ctx->ac, src[0], 0),
928             ac_llvm_extract_elem(&ctx->ac, src[0], 1),
929          };
930          result = ac_build_cvt_pkrtz_f16(&ctx->ac, args);
931          break;
932       }
933 
934       assert(ac_get_llvm_num_components(src[0]) == 1);
935       LLVMValueRef param[2] = {src[0], LLVMGetUndef(ctx->ac.f32)};
936       result = ac_build_cvt_pkrtz_f16(&ctx->ac, param);
937       result = LLVMBuildExtractElement(ctx->ac.builder, result, ctx->ac.i32_0, "");
938       break;
939    }
940    case nir_op_f2f16:
941    case nir_op_f2f16_rtne:
942    case nir_op_f2f32:
943    case nir_op_f2f64:
944       src[0] = ac_to_float(&ctx->ac, src[0]);
945       if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])) < ac_get_elem_bits(&ctx->ac, def_type))
946          result = LLVMBuildFPExt(ctx->ac.builder, src[0], ac_to_float_type(&ctx->ac, def_type), "");
947       else
948          result =
949             LLVMBuildFPTrunc(ctx->ac.builder, src[0], ac_to_float_type(&ctx->ac, def_type), "");
950       break;
951    case nir_op_u2u8:
952    case nir_op_u2u16:
953    case nir_op_u2u32:
954    case nir_op_u2u64:
955       if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])) < ac_get_elem_bits(&ctx->ac, def_type))
956          result = LLVMBuildZExt(ctx->ac.builder, src[0], def_type, "");
957       else
958          result = LLVMBuildTrunc(ctx->ac.builder, src[0], def_type, "");
959       break;
960    case nir_op_i2i8:
961    case nir_op_i2i16:
962    case nir_op_i2i32:
963    case nir_op_i2i64:
964       if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])) < ac_get_elem_bits(&ctx->ac, def_type))
965          result = LLVMBuildSExt(ctx->ac.builder, src[0], def_type, "");
966       else
967          result = LLVMBuildTrunc(ctx->ac.builder, src[0], def_type, "");
968       break;
969    case nir_op_bcsel:
970       result = emit_bcsel(&ctx->ac, src[0], src[1], src[2]);
971       break;
972    case nir_op_find_lsb:
973       result = ac_find_lsb(&ctx->ac, ctx->ac.i32, src[0]);
974       break;
975    case nir_op_ufind_msb:
976       result = ac_build_umsb(&ctx->ac, src[0], ctx->ac.i32, false);
977       break;
978    case nir_op_ifind_msb:
979       result = ac_build_imsb(&ctx->ac, src[0], ctx->ac.i32);
980       break;
981    case nir_op_ufind_msb_rev:
982       result = ac_build_umsb(&ctx->ac, src[0], ctx->ac.i32, true);
983       break;
984    case nir_op_ifind_msb_rev:
985       result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.sffbh.i32", ctx->ac.i32, &src[0], 1,
986                                   0);
987       break;
988    case nir_op_uclz: {
989       LLVMValueRef params[2] = {
990          src[0],
991          ctx->ac.i1false,
992       };
993       result = ac_build_intrinsic(&ctx->ac, "llvm.ctlz.i32", ctx->ac.i32, params, 2, 0);
994       break;
995    }
996    case nir_op_uadd_carry:
997       result = emit_uint_carry(&ctx->ac, "llvm.uadd.with.overflow.i32", src[0], src[1]);
998       break;
999    case nir_op_usub_borrow:
1000       result = emit_uint_carry(&ctx->ac, "llvm.usub.with.overflow.i32", src[0], src[1]);
1001       break;
1002    case nir_op_b2f16:
1003    case nir_op_b2f32:
1004    case nir_op_b2f64:
1005       result = emit_b2f(&ctx->ac, src[0], instr->def.bit_size);
1006       break;
1007    case nir_op_b2i8:
1008    case nir_op_b2i16:
1009    case nir_op_b2i32:
1010    case nir_op_b2i64:
1011       result = emit_b2i(&ctx->ac, src[0], instr->def.bit_size);
1012       break;
1013    case nir_op_b2b1: /* after loads */
1014       result = emit_i2b(&ctx->ac, src[0]);
1015       break;
1016    case nir_op_b2b16: /* before stores */
1017       result = LLVMBuildZExt(ctx->ac.builder, src[0], ctx->ac.i16, "");
1018       break;
1019    case nir_op_b2b32: /* before stores */
1020       result = LLVMBuildZExt(ctx->ac.builder, src[0], ctx->ac.i32, "");
1021       break;
1022    case nir_op_fquantize2f16:
1023       result = emit_f2f16(&ctx->ac, src[0]);
1024       break;
1025    case nir_op_umul_high:
1026       result = emit_umul_high(&ctx->ac, src[0], src[1]);
1027       break;
1028    case nir_op_imul_high:
1029       result = emit_imul_high(&ctx->ac, src[0], src[1]);
1030       break;
1031    case nir_op_pack_half_2x16_rtz_split:
1032    case nir_op_pack_half_2x16_split:
1033       src[0] = ac_to_float(&ctx->ac, src[0]);
1034       src[1] = ac_to_float(&ctx->ac, src[1]);
1035       result = LLVMBuildBitCast(ctx->ac.builder,
1036                                 ac_build_cvt_pkrtz_f16(&ctx->ac, src),
1037                                 ctx->ac.i32, "");
1038       break;
1039    case nir_op_pack_snorm_2x16:
1040    case nir_op_pack_unorm_2x16: {
1041       unsigned bit_size = instr->src[0].src.ssa->bit_size;
1042       /* Only support 16 and 32bit. */
1043       assert(bit_size == 16 || bit_size == 32);
1044 
1045       LLVMValueRef data = src[0];
1046       /* Work around for pre-GFX9 GPU which don't have fp16 pknorm instruction. */
1047       if (bit_size == 16 && ctx->ac.gfx_level < GFX9) {
1048          data = LLVMBuildFPExt(ctx->ac.builder, data, ctx->ac.v2f32, "");
1049          bit_size = 32;
1050       }
1051 
1052       LLVMValueRef (*pack)(struct ac_llvm_context *ctx, LLVMValueRef args[2]);
1053       if (bit_size == 32) {
1054          pack = instr->op == nir_op_pack_snorm_2x16 ?
1055             ac_build_cvt_pknorm_i16 : ac_build_cvt_pknorm_u16;
1056       } else {
1057          pack = instr->op == nir_op_pack_snorm_2x16 ?
1058             ac_build_cvt_pknorm_i16_f16 : ac_build_cvt_pknorm_u16_f16;
1059       }
1060       result = emit_pack_2x16(&ctx->ac, data, pack);
1061       break;
1062    }
1063    case nir_op_pack_uint_2x16: {
1064       LLVMValueRef comp[2];
1065 
1066       comp[0] = LLVMBuildExtractElement(ctx->ac.builder, src[0], ctx->ac.i32_0, "");
1067       comp[1] = LLVMBuildExtractElement(ctx->ac.builder, src[0], ctx->ac.i32_1, "");
1068 
1069       result = ac_build_cvt_pk_u16(&ctx->ac, comp, 16, false);
1070       break;
1071    }
1072    case nir_op_pack_sint_2x16: {
1073       LLVMValueRef comp[2];
1074 
1075       comp[0] = LLVMBuildExtractElement(ctx->ac.builder, src[0], ctx->ac.i32_0, "");
1076       comp[1] = LLVMBuildExtractElement(ctx->ac.builder, src[0], ctx->ac.i32_1, "");
1077 
1078       result = ac_build_cvt_pk_i16(&ctx->ac, comp, 16, false);
1079       break;
1080    }
1081    case nir_op_unpack_half_2x16_split_x: {
1082       assert(ac_get_llvm_num_components(src[0]) == 1);
1083       LLVMValueRef tmp = emit_unpack_half_2x16(&ctx->ac, src[0]);
1084       result = LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->ac.i32_0, "");
1085       break;
1086    }
1087    case nir_op_unpack_half_2x16_split_y: {
1088       assert(ac_get_llvm_num_components(src[0]) == 1);
1089       LLVMValueRef tmp = emit_unpack_half_2x16(&ctx->ac, src[0]);
1090       result = LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->ac.i32_1, "");
1091       break;
1092    }
1093    case nir_op_unpack_64_4x16: {
1094       result = LLVMBuildBitCast(ctx->ac.builder, src[0], ctx->ac.v4i16, "");
1095       break;
1096    }
1097 
1098    case nir_op_unpack_64_2x32: {
1099       result = LLVMBuildBitCast(ctx->ac.builder, src[0],
1100             ctx->ac.v2i32, "");
1101       break;
1102    }
1103    case nir_op_unpack_64_2x32_split_x: {
1104       assert(ac_get_llvm_num_components(src[0]) == 1);
1105       LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, src[0], ctx->ac.v2i32, "");
1106       result = LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->ac.i32_0, "");
1107       break;
1108    }
1109    case nir_op_unpack_64_2x32_split_y: {
1110       assert(ac_get_llvm_num_components(src[0]) == 1);
1111       LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, src[0], ctx->ac.v2i32, "");
1112       result = LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->ac.i32_1, "");
1113       break;
1114    }
1115 
1116    case nir_op_pack_64_2x32_split: {
1117       LLVMValueRef tmp = ac_build_gather_values(&ctx->ac, src, 2);
1118       result = LLVMBuildBitCast(ctx->ac.builder, tmp, ctx->ac.i64, "");
1119       break;
1120    }
1121 
1122    case nir_op_pack_32_4x8: {
1123       result = LLVMBuildBitCast(ctx->ac.builder, src[0],
1124             ctx->ac.i32, "");
1125       break;
1126    }
1127    case nir_op_pack_32_2x16_split: {
1128       LLVMValueRef tmp = ac_build_gather_values(&ctx->ac, src, 2);
1129       result = LLVMBuildBitCast(ctx->ac.builder, tmp, ctx->ac.i32, "");
1130       break;
1131    }
1132 
1133    case nir_op_unpack_32_4x8:
1134       result = LLVMBuildBitCast(ctx->ac.builder, src[0], ctx->ac.v4i8, "");
1135       break;
1136    case nir_op_unpack_32_2x16: {
1137       result = LLVMBuildBitCast(ctx->ac.builder, src[0],
1138             ctx->ac.v2i16, "");
1139       break;
1140    }
1141    case nir_op_unpack_32_2x16_split_x: {
1142       LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, src[0], ctx->ac.v2i16, "");
1143       result = LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->ac.i32_0, "");
1144       break;
1145    }
1146    case nir_op_unpack_32_2x16_split_y: {
1147       LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, src[0], ctx->ac.v2i16, "");
1148       result = LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->ac.i32_1, "");
1149       break;
1150    }
1151 
1152    case nir_op_cube_amd: {
1153       src[0] = ac_to_float(&ctx->ac, src[0]);
1154       LLVMValueRef results[4];
1155       LLVMValueRef in[3];
1156       for (unsigned chan = 0; chan < 3; chan++)
1157          in[chan] = ac_llvm_extract_elem(&ctx->ac, src[0], chan);
1158       results[0] = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.cubetc", ctx->ac.f32, in, 3, 0);
1159       results[1] = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.cubesc", ctx->ac.f32, in, 3, 0);
1160       results[2] = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.cubema", ctx->ac.f32, in, 3, 0);
1161       results[3] = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.cubeid", ctx->ac.f32, in, 3, 0);
1162       result = ac_build_gather_values(&ctx->ac, results, 4);
1163       break;
1164    }
1165 
1166    case nir_op_extract_u8:
1167    case nir_op_extract_i8:
1168    case nir_op_extract_u16:
1169    case nir_op_extract_i16: {
1170       bool is_signed = instr->op == nir_op_extract_i16 || instr->op == nir_op_extract_i8;
1171       unsigned size = instr->op == nir_op_extract_u8 || instr->op == nir_op_extract_i8 ? 8 : 16;
1172       LLVMValueRef offset = LLVMConstInt(LLVMTypeOf(src[0]), nir_src_as_uint(instr->src[1].src) * size, false);
1173       result = LLVMBuildLShr(ctx->ac.builder, src[0], offset, "");
1174       result = LLVMBuildTrunc(ctx->ac.builder, result, LLVMIntTypeInContext(ctx->ac.context, size), "");
1175       if (is_signed)
1176          result = LLVMBuildSExt(ctx->ac.builder, result, LLVMTypeOf(src[0]), "");
1177       else
1178          result = LLVMBuildZExt(ctx->ac.builder, result, LLVMTypeOf(src[0]), "");
1179       break;
1180    }
1181 
1182    case nir_op_insert_u8:
1183    case nir_op_insert_u16: {
1184       unsigned size = instr->op == nir_op_insert_u8 ? 8 : 16;
1185       LLVMValueRef offset = LLVMConstInt(LLVMTypeOf(src[0]), nir_src_as_uint(instr->src[1].src) * size, false);
1186       LLVMValueRef mask = LLVMConstInt(LLVMTypeOf(src[0]), u_bit_consecutive(0, size), false);
1187       result = LLVMBuildShl(ctx->ac.builder, LLVMBuildAnd(ctx->ac.builder, src[0], mask, ""), offset, "");
1188       break;
1189    }
1190 
1191    case nir_op_sdot_4x8_iadd:
1192    case nir_op_sdot_4x8_iadd_sat: {
1193       if (ctx->ac.gfx_level >= GFX11) {
1194          result = ac_build_sudot_4x8(&ctx->ac, src[0], src[1], src[2],
1195                                      instr->op == nir_op_sdot_4x8_iadd_sat, 0x3);
1196       } else {
1197          const char *name = "llvm.amdgcn.sdot4";
1198          src[3] = LLVMConstInt(ctx->ac.i1, instr->op == nir_op_sdot_4x8_iadd_sat, false);
1199          result = ac_build_intrinsic(&ctx->ac, name, def_type, src, 4, 0);
1200       }
1201       break;
1202    }
1203    case nir_op_sudot_4x8_iadd:
1204    case nir_op_sudot_4x8_iadd_sat: {
1205       result = ac_build_sudot_4x8(&ctx->ac, src[0], src[1], src[2],
1206                                   instr->op == nir_op_sudot_4x8_iadd_sat, 0x1);
1207       break;
1208    }
1209    case nir_op_udot_4x8_uadd:
1210    case nir_op_udot_4x8_uadd_sat: {
1211       const char *name = "llvm.amdgcn.udot4";
1212       src[3] = LLVMConstInt(ctx->ac.i1, instr->op == nir_op_udot_4x8_uadd_sat, false);
1213       result = ac_build_intrinsic(&ctx->ac, name, def_type, src, 4, 0);
1214       break;
1215    }
1216 
1217    case nir_op_sdot_2x16_iadd:
1218    case nir_op_udot_2x16_uadd:
1219    case nir_op_sdot_2x16_iadd_sat:
1220    case nir_op_udot_2x16_uadd_sat: {
1221       const char *name = instr->op == nir_op_sdot_2x16_iadd ||
1222                          instr->op == nir_op_sdot_2x16_iadd_sat
1223                          ? "llvm.amdgcn.sdot2" : "llvm.amdgcn.udot2";
1224       src[0] = LLVMBuildBitCast(ctx->ac.builder, src[0], ctx->ac.v2i16, "");
1225       src[1] = LLVMBuildBitCast(ctx->ac.builder, src[1], ctx->ac.v2i16, "");
1226       src[3] = LLVMConstInt(ctx->ac.i1, instr->op == nir_op_sdot_2x16_iadd_sat ||
1227                                         instr->op == nir_op_udot_2x16_uadd_sat, false);
1228       result = ac_build_intrinsic(&ctx->ac, name, def_type, src, 4, 0);
1229       break;
1230    }
1231 
1232    case nir_op_msad_4x8:
1233       result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.msad.u8", ctx->ac.i32,
1234                                   (LLVMValueRef[]){src[1], src[0], src[2]}, 3, 0);
1235       break;
1236 
1237    case nir_op_mqsad_4x8:
1238       src[1] = LLVMBuildBitCast(ctx->ac.builder, src[1], ctx->ac.i64, "");
1239       result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.mqsad.u32.u8", ctx->ac.v4i32,
1240                                   (LLVMValueRef[]){src[1], src[0], src[2]}, 3, 0);
1241       break;
1242 
1243    case nir_op_shfr:
1244       result = ac_build_intrinsic(&ctx->ac, "llvm.fshr.i32", ctx->ac.i32,
1245                                   (LLVMValueRef[]){src[0], src[1], src[2]}, 3, 0);
1246       break;
1247 
1248    case nir_op_alignbyte_amd:
1249       result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.alignbyte", ctx->ac.i32,
1250                                   (LLVMValueRef[]){src[0], src[1], src[2]}, 3, 0);
1251       break;
1252 
1253    default:
1254       fprintf(stderr, "Unknown NIR alu instr: ");
1255       nir_print_instr(&instr->instr, stderr);
1256       fprintf(stderr, "\n");
1257       return false;
1258    }
1259 
1260    if (result) {
1261       LLVMTypeKind type_kind = LLVMGetTypeKind(LLVMTypeOf(result));
1262       bool is_float = type_kind == LLVMHalfTypeKind || type_kind == LLVMFloatTypeKind || type_kind == LLVMDoubleTypeKind;
1263       if (ctx->ac.float_mode == AC_FLOAT_MODE_DENORM_FLUSH_TO_ZERO && is_float)
1264          result = ac_build_canonicalize(&ctx->ac, result, instr->def.bit_size);
1265 
1266       result = ac_to_integer_or_pointer(&ctx->ac, result);
1267       ctx->ssa_defs[instr->def.index] = result;
1268    }
1269    return true;
1270 }
1271 
visit_load_const(struct ac_nir_context * ctx,const nir_load_const_instr * instr)1272 static void visit_load_const(struct ac_nir_context *ctx, const nir_load_const_instr *instr)
1273 {
1274    assert(instr->def.num_components == 1);
1275 
1276    ctx->ssa_defs[instr->def.index] =
1277       LLVMConstInt(LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size),
1278                    nir_const_value_as_uint(instr->value[0], instr->def.bit_size), false);
1279 }
1280 
1281 /* Gather4 should follow the same rules as bilinear filtering, but the hardware
1282  * incorrectly forces nearest filtering if the texture format is integer.
1283  * The only effect it has on Gather4, which always returns 4 texels for
1284  * bilinear filtering, is that the final coordinates are off by 0.5 of
1285  * the texel size.
1286  *
1287  * The workaround is to subtract 0.5 from the unnormalized coordinates,
1288  * or (0.5 / size) from the normalized coordinates.
1289  *
1290  * However, cube textures with 8_8_8_8 data formats require a different
1291  * workaround of overriding the num format to USCALED/SSCALED. This would lose
1292  * precision in 32-bit data formats, so it needs to be applied dynamically at
1293  * runtime. In this case, return an i1 value that indicates whether the
1294  * descriptor was overridden (and hence a fixup of the sampler result is needed).
1295  */
lower_gather4_integer(struct ac_llvm_context * ctx,struct ac_image_args * args,const nir_tex_instr * instr)1296 static LLVMValueRef lower_gather4_integer(struct ac_llvm_context *ctx, struct ac_image_args *args,
1297                                           const nir_tex_instr *instr)
1298 {
1299    nir_alu_type stype = nir_alu_type_get_base_type(instr->dest_type);
1300    LLVMValueRef wa_8888 = NULL;
1301    LLVMValueRef half_texel[2];
1302    LLVMValueRef result;
1303 
1304    assert(stype == nir_type_int || stype == nir_type_uint);
1305 
1306    if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
1307       LLVMValueRef formats;
1308       LLVMValueRef data_format;
1309       LLVMValueRef wa_formats;
1310 
1311       formats = LLVMBuildExtractElement(ctx->builder, args->resource, ctx->i32_1, "");
1312 
1313       data_format = LLVMBuildLShr(ctx->builder, formats, LLVMConstInt(ctx->i32, 20, false), "");
1314       data_format =
1315          LLVMBuildAnd(ctx->builder, data_format, LLVMConstInt(ctx->i32, (1u << 6) - 1, false), "");
1316       wa_8888 = LLVMBuildICmp(ctx->builder, LLVMIntEQ, data_format,
1317                               LLVMConstInt(ctx->i32, V_008F14_IMG_DATA_FORMAT_8_8_8_8, false), "");
1318 
1319       uint32_t wa_num_format = stype == nir_type_uint
1320                                   ? S_008F14_NUM_FORMAT(V_008F14_IMG_NUM_FORMAT_USCALED)
1321                                   : S_008F14_NUM_FORMAT(V_008F14_IMG_NUM_FORMAT_SSCALED);
1322       wa_formats = LLVMBuildAnd(ctx->builder, formats,
1323                                 LLVMConstInt(ctx->i32, C_008F14_NUM_FORMAT, false), "");
1324       wa_formats =
1325          LLVMBuildOr(ctx->builder, wa_formats, LLVMConstInt(ctx->i32, wa_num_format, false), "");
1326 
1327       formats = LLVMBuildSelect(ctx->builder, wa_8888, wa_formats, formats, "");
1328       args->resource =
1329          LLVMBuildInsertElement(ctx->builder, args->resource, formats, ctx->i32_1, "");
1330    }
1331 
1332    if (instr->sampler_dim == GLSL_SAMPLER_DIM_RECT) {
1333       assert(!wa_8888);
1334       half_texel[0] = half_texel[1] = LLVMConstReal(ctx->f32, -0.5);
1335    } else {
1336       struct ac_image_args resinfo = {0};
1337       LLVMBasicBlockRef bbs[2];
1338 
1339       LLVMValueRef unnorm = NULL;
1340       LLVMValueRef default_offset = ctx->f32_0;
1341       if (instr->sampler_dim == GLSL_SAMPLER_DIM_2D && !instr->is_array) {
1342          /* In vulkan, whether the sampler uses unnormalized
1343           * coordinates or not is a dynamic property of the
1344           * sampler. Hence, to figure out whether or not we
1345           * need to divide by the texture size, we need to test
1346           * the sampler at runtime. This tests the bit set by
1347           * radv_init_sampler().
1348           */
1349          LLVMValueRef sampler0 =
1350             LLVMBuildExtractElement(ctx->builder, args->sampler, ctx->i32_0, "");
1351          sampler0 = LLVMBuildLShr(ctx->builder, sampler0, LLVMConstInt(ctx->i32, 15, false), "");
1352          sampler0 = LLVMBuildAnd(ctx->builder, sampler0, ctx->i32_1, "");
1353          unnorm = LLVMBuildICmp(ctx->builder, LLVMIntEQ, sampler0, ctx->i32_1, "");
1354          default_offset = LLVMConstReal(ctx->f32, -0.5);
1355       }
1356 
1357       bbs[0] = LLVMGetInsertBlock(ctx->builder);
1358       if (wa_8888 || unnorm) {
1359          assert(!(wa_8888 && unnorm));
1360          LLVMValueRef not_needed = wa_8888 ? wa_8888 : unnorm;
1361          /* Skip the texture size query entirely if we don't need it. */
1362          ac_build_ifcc(ctx, LLVMBuildNot(ctx->builder, not_needed, ""), 2000);
1363          bbs[1] = LLVMGetInsertBlock(ctx->builder);
1364       }
1365 
1366       /* Query the texture size. */
1367       resinfo.dim = ac_get_sampler_dim(ctx->gfx_level, instr->sampler_dim, instr->is_array);
1368       resinfo.opcode = ac_image_get_resinfo;
1369       resinfo.dmask = 0xf;
1370       resinfo.lod = ctx->i32_0;
1371       resinfo.resource = args->resource;
1372       resinfo.attributes = AC_ATTR_INVARIANT_LOAD;
1373       LLVMValueRef size = ac_build_image_opcode(ctx, &resinfo);
1374 
1375       /* Compute -0.5 / size. */
1376       for (unsigned c = 0; c < 2; c++) {
1377          half_texel[c] =
1378             LLVMBuildExtractElement(ctx->builder, size, LLVMConstInt(ctx->i32, c, 0), "");
1379          half_texel[c] = LLVMBuildUIToFP(ctx->builder, half_texel[c], ctx->f32, "");
1380          half_texel[c] = ac_build_fdiv(ctx, ctx->f32_1, half_texel[c]);
1381          half_texel[c] =
1382             LLVMBuildFMul(ctx->builder, half_texel[c], LLVMConstReal(ctx->f32, -0.5), "");
1383       }
1384 
1385       if (wa_8888 || unnorm) {
1386          ac_build_endif(ctx, 2000);
1387 
1388          for (unsigned c = 0; c < 2; c++) {
1389             LLVMValueRef values[2] = {default_offset, half_texel[c]};
1390             half_texel[c] = ac_build_phi(ctx, ctx->f32, 2, values, bbs);
1391          }
1392       }
1393    }
1394 
1395    for (unsigned c = 0; c < 2; c++) {
1396       LLVMValueRef tmp;
1397       tmp = LLVMBuildBitCast(ctx->builder, args->coords[c], ctx->f32, "");
1398       args->coords[c] = LLVMBuildFAdd(ctx->builder, tmp, half_texel[c], "");
1399    }
1400 
1401    args->attributes = AC_ATTR_INVARIANT_LOAD;
1402    result = ac_build_image_opcode(ctx, args);
1403 
1404    if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
1405       LLVMValueRef tmp, tmp2;
1406 
1407       /* if the cube workaround is in place, f2i the result. */
1408       for (unsigned c = 0; c < 4; c++) {
1409          tmp = LLVMBuildExtractElement(ctx->builder, result, LLVMConstInt(ctx->i32, c, false), "");
1410          if (stype == nir_type_uint)
1411             tmp2 = LLVMBuildFPToUI(ctx->builder, tmp, ctx->i32, "");
1412          else
1413             tmp2 = LLVMBuildFPToSI(ctx->builder, tmp, ctx->i32, "");
1414          tmp = LLVMBuildBitCast(ctx->builder, tmp, ctx->i32, "");
1415          tmp2 = LLVMBuildBitCast(ctx->builder, tmp2, ctx->i32, "");
1416          tmp = LLVMBuildSelect(ctx->builder, wa_8888, tmp2, tmp, "");
1417          tmp = LLVMBuildBitCast(ctx->builder, tmp, ctx->f32, "");
1418          result =
1419             LLVMBuildInsertElement(ctx->builder, result, tmp, LLVMConstInt(ctx->i32, c, false), "");
1420       }
1421    }
1422    return result;
1423 }
1424 
build_tex_intrinsic(struct ac_nir_context * ctx,const nir_tex_instr * instr,struct ac_image_args * args)1425 static LLVMValueRef build_tex_intrinsic(struct ac_nir_context *ctx, const nir_tex_instr *instr,
1426                                         struct ac_image_args *args)
1427 {
1428    assert((!args->tfe || !args->d16) && "unsupported");
1429 
1430    if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF) {
1431       unsigned mask = nir_def_components_read(&instr->def);
1432 
1433       /* Buffers don't support A16. */
1434       if (args->a16)
1435          args->coords[0] = LLVMBuildZExt(ctx->ac.builder, args->coords[0], ctx->ac.i32, "");
1436 
1437       return ac_build_buffer_load_format(&ctx->ac, args->resource, args->coords[0], ctx->ac.i32_0,
1438                                          util_last_bit(mask), 0, true,
1439                                          instr->def.bit_size == 16,
1440                                          args->tfe);
1441    }
1442 
1443    args->opcode = ac_image_sample;
1444 
1445    switch (instr->op) {
1446    case nir_texop_txf:
1447    case nir_texop_txf_ms:
1448       args->opcode = args->level_zero || instr->sampler_dim == GLSL_SAMPLER_DIM_MS
1449                         ? ac_image_load
1450                         : ac_image_load_mip;
1451       args->level_zero = false;
1452       break;
1453    case nir_texop_txs:
1454    case nir_texop_query_levels:
1455    case nir_texop_texture_samples:
1456       assert(!"should have been lowered");
1457       break;
1458    case nir_texop_tex:
1459       if (ctx->stage != MESA_SHADER_FRAGMENT &&
1460           (!gl_shader_stage_is_compute(ctx->stage) ||
1461            ctx->info->derivative_group == DERIVATIVE_GROUP_NONE)) {
1462          assert(!args->lod);
1463          args->level_zero = true;
1464       }
1465       break;
1466    case nir_texop_tg4:
1467       args->opcode = ac_image_gather4;
1468       if (!args->lod && !instr->is_gather_implicit_lod)
1469          args->level_zero = true;
1470       /* GFX11 supports implicit LOD, but the extension is unsupported. */
1471       assert(args->level_zero || ctx->ac.gfx_level < GFX11);
1472       break;
1473    case nir_texop_lod:
1474       args->opcode = ac_image_get_lod;
1475       break;
1476    case nir_texop_fragment_fetch_amd:
1477    case nir_texop_fragment_mask_fetch_amd:
1478       args->opcode = ac_image_load;
1479       args->level_zero = false;
1480       break;
1481    default:
1482       break;
1483    }
1484 
1485    /* MI200 doesn't have image_sample_lz, but image_sample behaves like lz. */
1486    if (!ctx->ac.info->has_3d_cube_border_color_mipmap)
1487       args->level_zero = false;
1488 
1489    if (instr->op == nir_texop_tg4 && ctx->ac.gfx_level <= GFX8 &&
1490        (instr->dest_type & (nir_type_int | nir_type_uint))) {
1491       return lower_gather4_integer(&ctx->ac, args, instr);
1492    }
1493 
1494    args->attributes = AC_ATTR_INVARIANT_LOAD;
1495    bool cs_derivs =
1496       gl_shader_stage_is_compute(ctx->stage) && ctx->info->derivative_group != DERIVATIVE_GROUP_NONE;
1497    if (ctx->stage == MESA_SHADER_FRAGMENT || cs_derivs) {
1498       /* Prevent texture instructions with implicit derivatives from being
1499        * sinked into branches. */
1500       switch (instr->op) {
1501       case nir_texop_tex:
1502       case nir_texop_txb:
1503       case nir_texop_lod:
1504          args->attributes |= AC_ATTR_CONVERGENT;
1505          break;
1506       default:
1507          break;
1508       }
1509    }
1510 
1511    return ac_build_image_opcode(&ctx->ac, args);
1512 }
1513 
extract_vector_range(struct ac_llvm_context * ctx,LLVMValueRef src,unsigned start,unsigned count)1514 static LLVMValueRef extract_vector_range(struct ac_llvm_context *ctx, LLVMValueRef src,
1515                                          unsigned start, unsigned count)
1516 {
1517    LLVMValueRef mask[] = {ctx->i32_0, ctx->i32_1, LLVMConstInt(ctx->i32, 2, false),
1518                           LLVMConstInt(ctx->i32, 3, false)};
1519 
1520    unsigned src_elements = ac_get_llvm_num_components(src);
1521 
1522    if (count == src_elements) {
1523       assert(start == 0);
1524       return src;
1525    } else if (count == 1) {
1526       assert(start < src_elements);
1527       return LLVMBuildExtractElement(ctx->builder, src, mask[start], "");
1528    } else {
1529       assert(start + count <= src_elements);
1530       assert(count <= 4);
1531       LLVMValueRef swizzle = LLVMConstVector(&mask[start], count);
1532       return LLVMBuildShuffleVector(ctx->builder, src, src, swizzle, "");
1533    }
1534 }
1535 
enter_waterfall_ssbo(struct ac_nir_context * ctx,struct waterfall_context * wctx,const nir_intrinsic_instr * instr,nir_src src)1536 static LLVMValueRef enter_waterfall_ssbo(struct ac_nir_context *ctx, struct waterfall_context *wctx,
1537                                          const nir_intrinsic_instr *instr, nir_src src)
1538 {
1539    return enter_waterfall(ctx, wctx, get_src(ctx, src),
1540                           nir_intrinsic_access(instr) & ACCESS_NON_UNIFORM);
1541 }
1542 
visit_store_ssbo(struct ac_nir_context * ctx,nir_intrinsic_instr * instr)1543 static void visit_store_ssbo(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)
1544 {
1545    LLVMValueRef src_data = get_src(ctx, instr->src[0]);
1546    int elem_size_bytes = ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src_data)) / 8;
1547    unsigned writemask = nir_intrinsic_write_mask(instr);
1548    enum gl_access_qualifier access = ac_nir_get_mem_access_flags(instr);
1549 
1550    struct waterfall_context wctx;
1551    LLVMValueRef rsrc_base = enter_waterfall_ssbo(ctx, &wctx, instr, instr->src[1]);
1552 
1553    LLVMValueRef rsrc = ctx->abi->load_ssbo ?
1554       ctx->abi->load_ssbo(ctx->abi, rsrc_base, true, false) : rsrc_base;
1555 
1556    LLVMValueRef base_data = src_data;
1557    base_data = ac_trim_vector(&ctx->ac, base_data, instr->num_components);
1558    LLVMValueRef base_offset = get_src(ctx, instr->src[2]);
1559 
1560    while (writemask) {
1561       int start, count;
1562       LLVMValueRef data, offset;
1563       LLVMTypeRef data_type;
1564 
1565       u_bit_scan_consecutive_range(&writemask, &start, &count);
1566 
1567       if (count == 3 && elem_size_bytes != 4) {
1568          writemask |= 1 << (start + 2);
1569          count = 2;
1570       }
1571       int num_bytes = count * elem_size_bytes; /* count in bytes */
1572 
1573       /* we can only store 4 DWords at the same time.
1574        * can only happen for 64 Bit vectors. */
1575       if (num_bytes > 16) {
1576          writemask |= ((1u << (count - 2)) - 1u) << (start + 2);
1577          count = 2;
1578          num_bytes = 16;
1579       }
1580 
1581       /* check alignment of 16 Bit stores */
1582       if (elem_size_bytes == 2 && num_bytes > 2 && (start % 2) == 1) {
1583          writemask |= ((1u << (count - 1)) - 1u) << (start + 1);
1584          count = 1;
1585          num_bytes = 2;
1586       }
1587 
1588       /* Due to alignment issues, split stores of 8-bit/16-bit
1589        * vectors.
1590        */
1591       if (ctx->ac.gfx_level == GFX6 && count > 1 && elem_size_bytes < 4) {
1592          writemask |= ((1u << (count - 1)) - 1u) << (start + 1);
1593          count = 1;
1594          num_bytes = elem_size_bytes;
1595       }
1596 
1597       data = extract_vector_range(&ctx->ac, base_data, start, count);
1598 
1599       offset = LLVMBuildAdd(ctx->ac.builder, base_offset,
1600                             LLVMConstInt(ctx->ac.i32, start * elem_size_bytes, false), "");
1601 
1602       if (num_bytes == 1) {
1603          ac_build_buffer_store_byte(&ctx->ac, rsrc, data, offset, ctx->ac.i32_0, access);
1604       } else if (num_bytes == 2) {
1605          ac_build_buffer_store_short(&ctx->ac, rsrc, data, offset, ctx->ac.i32_0, access);
1606       } else {
1607          switch (num_bytes) {
1608          case 16: /* v4f32 */
1609             data_type = ctx->ac.v4f32;
1610             break;
1611          case 12: /* v3f32 */
1612             data_type = ctx->ac.v3f32;
1613             break;
1614          case 8: /* v2f32 */
1615             data_type = ctx->ac.v2f32;
1616             break;
1617          case 4: /* f32 */
1618             data_type = ctx->ac.f32;
1619             break;
1620          default:
1621             unreachable("Malformed vector store.");
1622          }
1623          data = LLVMBuildBitCast(ctx->ac.builder, data, data_type, "");
1624 
1625          ac_build_buffer_store_dword(&ctx->ac, rsrc, data, NULL, offset,
1626                                      ctx->ac.i32_0, access);
1627       }
1628    }
1629 
1630    exit_waterfall(ctx, &wctx, NULL);
1631 }
1632 
emit_ssbo_comp_swap_64(struct ac_nir_context * ctx,LLVMValueRef descriptor,LLVMValueRef offset,LLVMValueRef compare,LLVMValueRef exchange,bool image)1633 static LLVMValueRef emit_ssbo_comp_swap_64(struct ac_nir_context *ctx, LLVMValueRef descriptor,
1634                                            LLVMValueRef offset, LLVMValueRef compare,
1635                                            LLVMValueRef exchange, bool image)
1636 {
1637    LLVMBasicBlockRef start_block = NULL, then_block = NULL;
1638    if (ctx->abi->robust_buffer_access || image) {
1639       LLVMValueRef size = ac_llvm_extract_elem(&ctx->ac, descriptor, 2);
1640 
1641       LLVMValueRef cond = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT, offset, size, "");
1642       start_block = LLVMGetInsertBlock(ctx->ac.builder);
1643 
1644       ac_build_ifcc(&ctx->ac, cond, -1);
1645 
1646       then_block = LLVMGetInsertBlock(ctx->ac.builder);
1647    }
1648 
1649    if (image)
1650       offset = LLVMBuildMul(ctx->ac.builder, offset, LLVMConstInt(ctx->ac.i32, 8, false), "");
1651 
1652    LLVMValueRef ptr_parts[2] = {
1653       ac_llvm_extract_elem(&ctx->ac, descriptor, 0),
1654       LLVMBuildAnd(ctx->ac.builder, ac_llvm_extract_elem(&ctx->ac, descriptor, 1),
1655                    LLVMConstInt(ctx->ac.i32, 65535, 0), "")};
1656 
1657    ptr_parts[1] = LLVMBuildTrunc(ctx->ac.builder, ptr_parts[1], ctx->ac.i16, "");
1658    ptr_parts[1] = LLVMBuildSExt(ctx->ac.builder, ptr_parts[1], ctx->ac.i32, "");
1659 
1660    offset = LLVMBuildZExt(ctx->ac.builder, offset, ctx->ac.i64, "");
1661 
1662    LLVMValueRef ptr = ac_build_gather_values(&ctx->ac, ptr_parts, 2);
1663    ptr = LLVMBuildBitCast(ctx->ac.builder, ptr, ctx->ac.i64, "");
1664    ptr = LLVMBuildAdd(ctx->ac.builder, ptr, offset, "");
1665    ptr = LLVMBuildIntToPtr(ctx->ac.builder, ptr, LLVMPointerType(ctx->ac.i64, AC_ADDR_SPACE_GLOBAL),
1666                            "");
1667 
1668    LLVMValueRef result =
1669       ac_build_atomic_cmp_xchg(&ctx->ac, ptr, compare, exchange, "singlethread-one-as");
1670    result = LLVMBuildExtractValue(ctx->ac.builder, result, 0, "");
1671 
1672    if (ctx->abi->robust_buffer_access || image) {
1673       ac_build_endif(&ctx->ac, -1);
1674 
1675       LLVMBasicBlockRef incoming_blocks[2] = {
1676          start_block,
1677          then_block,
1678       };
1679 
1680       LLVMValueRef incoming_values[2] = {
1681          ctx->ac.i64_0,
1682          result,
1683       };
1684       LLVMValueRef ret = LLVMBuildPhi(ctx->ac.builder, ctx->ac.i64, "");
1685       LLVMAddIncoming(ret, incoming_values, incoming_blocks, 2);
1686       return ret;
1687    } else {
1688       return result;
1689    }
1690 }
1691 
1692 static const char *
translate_atomic_op_str(nir_atomic_op op)1693 translate_atomic_op_str(nir_atomic_op op)
1694 {
1695    switch (op) {
1696    case nir_atomic_op_iadd:     return "add";
1697    case nir_atomic_op_imin:     return "smin";
1698    case nir_atomic_op_umin:     return "umin";
1699    case nir_atomic_op_imax:     return "smax";
1700    case nir_atomic_op_umax:     return "umax";
1701    case nir_atomic_op_iand:     return "and";
1702    case nir_atomic_op_ior:      return "or";
1703    case nir_atomic_op_ixor:     return "xor";
1704    case nir_atomic_op_fadd:     return "fadd";
1705    case nir_atomic_op_fmin:     return "fmin";
1706    case nir_atomic_op_fmax:     return "fmax";
1707    case nir_atomic_op_xchg:     return "swap";
1708    case nir_atomic_op_cmpxchg:  return "cmpswap";
1709    case nir_atomic_op_inc_wrap: return "inc";
1710    case nir_atomic_op_dec_wrap: return "dec";
1711    case nir_atomic_op_ordered_add_gfx12_amd: return "ordered.add";
1712    default: abort();
1713    }
1714 }
1715 
1716 static LLVMAtomicRMWBinOp
translate_atomic_op(nir_atomic_op op)1717 translate_atomic_op(nir_atomic_op op)
1718 {
1719    switch (op) {
1720    case nir_atomic_op_iadd: return LLVMAtomicRMWBinOpAdd;
1721    case nir_atomic_op_xchg: return LLVMAtomicRMWBinOpXchg;
1722    case nir_atomic_op_iand: return LLVMAtomicRMWBinOpAnd;
1723    case nir_atomic_op_ior:  return LLVMAtomicRMWBinOpOr;
1724    case nir_atomic_op_ixor: return LLVMAtomicRMWBinOpXor;
1725    case nir_atomic_op_umin: return LLVMAtomicRMWBinOpUMin;
1726    case nir_atomic_op_umax: return LLVMAtomicRMWBinOpUMax;
1727    case nir_atomic_op_imin: return LLVMAtomicRMWBinOpMin;
1728    case nir_atomic_op_imax: return LLVMAtomicRMWBinOpMax;
1729    case nir_atomic_op_fadd: return LLVMAtomicRMWBinOpFAdd;
1730    default: unreachable("Unexpected atomic");
1731    }
1732 }
1733 
visit_atomic_ssbo(struct ac_nir_context * ctx,nir_intrinsic_instr * instr)1734 static LLVMValueRef visit_atomic_ssbo(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)
1735 {
1736    nir_atomic_op nir_op = nir_intrinsic_atomic_op(instr);
1737    const char *op = translate_atomic_op_str(nir_op);
1738    bool is_float = nir_atomic_op_type(nir_op) == nir_type_float;
1739 
1740    LLVMTypeRef return_type = LLVMTypeOf(get_src(ctx, instr->src[2]));
1741    char name[64], type[8];
1742    LLVMValueRef params[6], descriptor;
1743    LLVMValueRef result;
1744    int arg_count = 0;
1745 
1746    struct waterfall_context wctx;
1747    LLVMValueRef rsrc_base = enter_waterfall_ssbo(ctx, &wctx, instr, instr->src[0]);
1748 
1749    descriptor = ctx->abi->load_ssbo ?
1750       ctx->abi->load_ssbo(ctx->abi, rsrc_base, true, false) : rsrc_base;
1751 
1752    if (instr->intrinsic == nir_intrinsic_ssbo_atomic_swap && return_type == ctx->ac.i64) {
1753       result = emit_ssbo_comp_swap_64(ctx, descriptor, get_src(ctx, instr->src[1]),
1754                                       get_src(ctx, instr->src[2]), get_src(ctx, instr->src[3]), false);
1755    } else {
1756       LLVMValueRef data = ac_llvm_extract_elem(&ctx->ac, get_src(ctx, instr->src[2]), 0);
1757 
1758       if (instr->intrinsic == nir_intrinsic_ssbo_atomic_swap) {
1759          params[arg_count++] = ac_llvm_extract_elem(&ctx->ac, get_src(ctx, instr->src[3]), 0);
1760       }
1761       if (is_float) {
1762          data = ac_to_float(&ctx->ac, data);
1763          return_type = LLVMTypeOf(data);
1764       }
1765 
1766       unsigned cache_flags =
1767          ac_get_hw_cache_flags(ctx->ac.gfx_level,
1768 			       ac_nir_get_mem_access_flags(instr) | ACCESS_TYPE_ATOMIC).value;
1769 
1770       params[arg_count++] = data;
1771       params[arg_count++] = descriptor;
1772       params[arg_count++] = get_src(ctx, instr->src[1]); /* voffset */
1773       params[arg_count++] = ctx->ac.i32_0;               /* soffset */
1774       params[arg_count++] = LLVMConstInt(ctx->ac.i32, cache_flags, 0);
1775 
1776       ac_build_type_name_for_intr(return_type, type, sizeof(type));
1777       snprintf(name, sizeof(name), "llvm.amdgcn.raw.buffer.atomic.%s.%s", op, type);
1778 
1779       result = ac_build_intrinsic(&ctx->ac, name, return_type, params, arg_count, 0);
1780 
1781       if (is_float) {
1782          result = ac_to_integer(&ctx->ac, result);
1783       }
1784    }
1785 
1786    return exit_waterfall(ctx, &wctx, result);
1787 }
1788 
visit_load_buffer(struct ac_nir_context * ctx,nir_intrinsic_instr * instr)1789 static LLVMValueRef visit_load_buffer(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)
1790 {
1791    struct waterfall_context wctx;
1792    LLVMValueRef rsrc_base = enter_waterfall_ssbo(ctx, &wctx, instr, instr->src[0]);
1793 
1794    int elem_size_bytes = instr->def.bit_size / 8;
1795    int num_components = instr->num_components;
1796    enum gl_access_qualifier access = ac_nir_get_mem_access_flags(instr);
1797 
1798    LLVMValueRef offset = get_src(ctx, instr->src[1]);
1799    LLVMValueRef rsrc = ctx->abi->load_ssbo ?
1800       ctx->abi->load_ssbo(ctx->abi, rsrc_base, false, false) : rsrc_base;
1801 
1802    LLVMTypeRef def_type = get_def_type(ctx, &instr->def);
1803    LLVMTypeRef def_elem_type = num_components > 1 ? LLVMGetElementType(def_type) : def_type;
1804 
1805    LLVMValueRef results[4];
1806    for (int i = 0; i < num_components;) {
1807       int num_elems = num_components - i;
1808       /* Multi-component subdword loads are lowered by ac_nir_lower_subdword_loads. */
1809       assert(elem_size_bytes >= 4 || num_elems == 1);
1810 
1811       if (num_elems * elem_size_bytes > 16)
1812          num_elems = 16 / elem_size_bytes;
1813       int load_bytes = num_elems * elem_size_bytes;
1814 
1815       LLVMValueRef immoffset = LLVMConstInt(ctx->ac.i32, i * elem_size_bytes, false);
1816       LLVMValueRef voffset = LLVMBuildAdd(ctx->ac.builder, offset, immoffset, "");
1817 
1818       LLVMValueRef ret;
1819 
1820       if (load_bytes == 1) {
1821          ret = ac_build_buffer_load_byte(&ctx->ac, rsrc, voffset, ctx->ac.i32_0,
1822                                           access);
1823       } else if (load_bytes == 2) {
1824          ret = ac_build_buffer_load_short(&ctx->ac, rsrc, voffset, ctx->ac.i32_0,
1825                                            access);
1826       } else {
1827          assert(elem_size_bytes >= 4);
1828          int num_channels = load_bytes / 4;
1829          bool can_speculate = access & ACCESS_CAN_REORDER;
1830 
1831          ret = ac_build_buffer_load(&ctx->ac, rsrc, num_channels, NULL, voffset, ctx->ac.i32_0,
1832                                     ctx->ac.f32, access, can_speculate, false);
1833       }
1834 
1835       LLVMTypeRef ret_type = LLVMVectorType(def_elem_type, num_elems);
1836       ret = LLVMBuildBitCast(ctx->ac.builder, ret, ret_type, "");
1837 
1838       for (unsigned j = 0; j < num_elems; j++) {
1839          results[i + j] =
1840             LLVMBuildExtractElement(ctx->ac.builder, ret, LLVMConstInt(ctx->ac.i32, j, false), "");
1841       }
1842       i += num_elems;
1843    }
1844 
1845    LLVMValueRef ret = ac_build_gather_values(&ctx->ac, results, num_components);
1846    return exit_waterfall(ctx, &wctx, ret);
1847 }
1848 
enter_waterfall_ubo(struct ac_nir_context * ctx,struct waterfall_context * wctx,const nir_intrinsic_instr * instr)1849 static LLVMValueRef enter_waterfall_ubo(struct ac_nir_context *ctx, struct waterfall_context *wctx,
1850                                         const nir_intrinsic_instr *instr)
1851 {
1852    return enter_waterfall(ctx, wctx, get_src(ctx, instr->src[0]),
1853                           nir_intrinsic_access(instr) & ACCESS_NON_UNIFORM);
1854 }
1855 
get_global_address(struct ac_nir_context * ctx,nir_intrinsic_instr * instr,LLVMTypeRef type)1856 static LLVMValueRef get_global_address(struct ac_nir_context *ctx,
1857                                        nir_intrinsic_instr *instr,
1858                                        LLVMTypeRef type)
1859 {
1860    bool is_store = instr->intrinsic == nir_intrinsic_store_global_amd;
1861    LLVMValueRef addr = get_src(ctx, instr->src[is_store ? 1 : 0]);
1862 
1863    LLVMTypeRef ptr_type = LLVMPointerType(type, AC_ADDR_SPACE_GLOBAL);
1864 
1865    uint32_t base = nir_intrinsic_base(instr);
1866    unsigned num_src = nir_intrinsic_infos[instr->intrinsic].num_srcs;
1867    LLVMValueRef offset = get_src(ctx, instr->src[num_src - 1]);
1868    offset = LLVMBuildAdd(ctx->ac.builder, offset, LLVMConstInt(ctx->ac.i32, base, false), "");
1869 
1870    LLVMTypeRef i8_ptr_type = LLVMPointerType(ctx->ac.i8, AC_ADDR_SPACE_GLOBAL);
1871    addr = LLVMBuildIntToPtr(ctx->ac.builder, addr, i8_ptr_type, "");
1872    addr = LLVMBuildGEP2(ctx->ac.builder, ctx->ac.i8, addr, &offset, 1, "");
1873    return LLVMBuildPointerCast(ctx->ac.builder, addr, ptr_type, "");
1874 }
1875 
visit_load_global(struct ac_nir_context * ctx,nir_intrinsic_instr * instr)1876 static LLVMValueRef visit_load_global(struct ac_nir_context *ctx,
1877                                       nir_intrinsic_instr *instr)
1878 {
1879    LLVMTypeRef result_type = get_def_type(ctx, &instr->def);
1880    LLVMValueRef val;
1881    LLVMValueRef addr = get_global_address(ctx, instr, result_type);
1882 
1883    val = LLVMBuildLoad2(ctx->ac.builder, result_type, addr, "");
1884 
1885    if (nir_intrinsic_access(instr) & (ACCESS_COHERENT | ACCESS_VOLATILE)) {
1886       LLVMSetOrdering(val, LLVMAtomicOrderingMonotonic);
1887       LLVMSetAlignment(val, ac_get_type_size(result_type));
1888    }
1889 
1890    return val;
1891 }
1892 
visit_store_global(struct ac_nir_context * ctx,nir_intrinsic_instr * instr)1893 static void visit_store_global(struct ac_nir_context *ctx,
1894 				     nir_intrinsic_instr *instr)
1895 {
1896    LLVMValueRef data = get_src(ctx, instr->src[0]);
1897    LLVMTypeRef type = LLVMTypeOf(data);
1898    LLVMValueRef addr = get_global_address(ctx, instr, type);
1899    LLVMValueRef val;
1900    /* nir_opt_shrink_stores should be enough to simplify the writemask. Store writemasks should
1901     * have no holes.
1902     */
1903    assert(nir_intrinsic_write_mask(instr) == BITFIELD_MASK(instr->src[0].ssa->num_components));
1904 
1905    val = LLVMBuildStore(ctx->ac.builder, data, addr);
1906 
1907    if (nir_intrinsic_access(instr) & (ACCESS_COHERENT | ACCESS_VOLATILE)) {
1908       LLVMSetOrdering(val, LLVMAtomicOrderingMonotonic);
1909       LLVMSetAlignment(val, ac_get_type_size(type));
1910    }
1911 }
1912 
visit_global_atomic(struct ac_nir_context * ctx,nir_intrinsic_instr * instr)1913 static LLVMValueRef visit_global_atomic(struct ac_nir_context *ctx,
1914 					nir_intrinsic_instr *instr)
1915 {
1916    LLVMValueRef data = get_src(ctx, instr->src[1]);
1917    LLVMAtomicRMWBinOp op;
1918    LLVMValueRef result;
1919 
1920    /* use "singlethread" sync scope to implement relaxed ordering */
1921    const char *sync_scope = "singlethread-one-as";
1922 
1923    nir_atomic_op nir_op = nir_intrinsic_atomic_op(instr);
1924    bool is_float = nir_atomic_op_type(nir_op) == nir_type_float;
1925 
1926    LLVMTypeRef data_type = LLVMTypeOf(data);
1927 
1928    assert(instr->src[1].ssa->num_components == 1);
1929    if (is_float) {
1930       switch (instr->src[1].ssa->bit_size) {
1931       case 32:
1932          data_type = ctx->ac.f32;
1933          break;
1934       case 64:
1935          data_type = ctx->ac.f64;
1936          break;
1937       default:
1938          unreachable("Unsupported float bit size");
1939       }
1940 
1941       data = LLVMBuildBitCast(ctx->ac.builder, data, data_type, "");
1942    }
1943 
1944    LLVMValueRef addr = get_global_address(ctx, instr, data_type);
1945 
1946    if (instr->intrinsic == nir_intrinsic_global_atomic_swap_amd) {
1947       LLVMValueRef data1 = get_src(ctx, instr->src[2]);
1948       result = ac_build_atomic_cmp_xchg(&ctx->ac, addr, data, data1, sync_scope);
1949       result = LLVMBuildExtractValue(ctx->ac.builder, result, 0, "");
1950    } else if (nir_op == nir_atomic_op_ordered_add_gfx12_amd) {
1951       result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.global.atomic.ordered.add.b64", ctx->ac.i64,
1952                                   (LLVMValueRef[]){addr, data}, 2, 0);
1953    } else if (is_float) {
1954       const char *op = translate_atomic_op_str(nir_op);
1955       char name[64], type[8];
1956       LLVMValueRef params[2];
1957       int arg_count = 0;
1958 
1959       params[arg_count++] = addr;
1960       params[arg_count++] = data;
1961 
1962       ac_build_type_name_for_intr(data_type, type, sizeof(type));
1963       snprintf(name, sizeof(name), "llvm.amdgcn.global.atomic.%s.%s.p1.%s", op, type, type);
1964 
1965       result = ac_build_intrinsic(&ctx->ac, name, data_type, params, arg_count, 0);
1966    } else {
1967       op = translate_atomic_op(nir_op);
1968       result = ac_build_atomic_rmw(&ctx->ac, op, addr, ac_to_integer(&ctx->ac, data), sync_scope);
1969    }
1970 
1971    result = ac_to_integer(&ctx->ac, result);
1972 
1973    return result;
1974 }
1975 
visit_load_ubo_buffer(struct ac_nir_context * ctx,nir_intrinsic_instr * instr)1976 static LLVMValueRef visit_load_ubo_buffer(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)
1977 {
1978    struct waterfall_context wctx;
1979    LLVMValueRef rsrc_base = enter_waterfall_ubo(ctx, &wctx, instr);
1980 
1981    LLVMValueRef ret;
1982    LLVMValueRef rsrc = rsrc_base;
1983    LLVMValueRef offset = get_src(ctx, instr->src[1]);
1984    int num_components = instr->num_components;
1985 
1986    assert(instr->def.bit_size >= 32 && instr->def.bit_size % 32 == 0);
1987 
1988    if (ctx->abi->load_ubo)
1989       rsrc = ctx->abi->load_ubo(ctx->abi, rsrc);
1990 
1991    /* Convert to a 32-bit load. */
1992    if (instr->def.bit_size == 64)
1993       num_components *= 2;
1994 
1995    ret = ac_build_buffer_load(&ctx->ac, rsrc, num_components, NULL, offset, NULL,
1996                               ctx->ac.f32, 0, true, true);
1997    ret = LLVMBuildBitCast(ctx->ac.builder, ret, get_def_type(ctx, &instr->def), "");
1998 
1999    return exit_waterfall(ctx, &wctx, ret);
2000 }
2001 
visit_store_output(struct ac_nir_context * ctx,nir_intrinsic_instr * instr)2002 static void visit_store_output(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)
2003 {
2004    unsigned base = nir_intrinsic_base(instr);
2005    unsigned writemask = nir_intrinsic_write_mask(instr);
2006    unsigned component = nir_intrinsic_component(instr);
2007    LLVMValueRef src = ac_to_float(&ctx->ac, get_src(ctx, instr->src[0]));
2008    ASSERTED unsigned bit_size = ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src));
2009    ASSERTED nir_src offset = *nir_get_io_offset_src(instr);
2010 
2011    /* Non-monolithic PS and also LS before TCS in radeonsi use this to forward outputs to
2012     * registers.
2013     */
2014    assert(bit_size == 16 || bit_size == 32);
2015    /* No indirect indexing is allowed here. */
2016    assert(nir_src_is_const(offset) && nir_src_as_uint(offset) == 0);
2017 
2018    writemask <<= component;
2019 
2020    for (unsigned chan = 0; chan < 8; chan++) {
2021       if (!(writemask & (1 << chan)))
2022          continue;
2023 
2024       LLVMValueRef value = ac_llvm_extract_elem(&ctx->ac, src, chan - component);
2025       LLVMValueRef output_addr = ctx->abi->outputs[base * 4 + chan];
2026 
2027       if (!ctx->abi->is_16bit[base * 4 + chan] &&
2028           LLVMTypeOf(value) == ctx->ac.f16) {
2029          LLVMValueRef output, index;
2030 
2031          /* Insert the 16-bit value into the low or high bits of the 32-bit output
2032           * using read-modify-write.
2033           */
2034          index = LLVMConstInt(ctx->ac.i32, nir_intrinsic_io_semantics(instr).high_16bits, 0);
2035 
2036          output = LLVMBuildLoad2(ctx->ac.builder, ctx->ac.v2f16, output_addr, "");
2037          output = LLVMBuildInsertElement(ctx->ac.builder, output, value, index, "");
2038          value = LLVMBuildBitCast(ctx->ac.builder, output, ctx->ac.f32, "");
2039       }
2040       LLVMBuildStore(ctx->ac.builder, value, output_addr);
2041    }
2042 }
2043 
image_type_to_components_count(enum glsl_sampler_dim dim,bool array)2044 static int image_type_to_components_count(enum glsl_sampler_dim dim, bool array)
2045 {
2046    switch (dim) {
2047    case GLSL_SAMPLER_DIM_BUF:
2048       return 1;
2049    case GLSL_SAMPLER_DIM_1D:
2050       return array ? 2 : 1;
2051    case GLSL_SAMPLER_DIM_2D:
2052       return array ? 3 : 2;
2053    case GLSL_SAMPLER_DIM_MS:
2054       return array ? 4 : 3;
2055    case GLSL_SAMPLER_DIM_3D:
2056    case GLSL_SAMPLER_DIM_CUBE:
2057       return 3;
2058    case GLSL_SAMPLER_DIM_RECT:
2059    case GLSL_SAMPLER_DIM_SUBPASS:
2060       return 2;
2061    case GLSL_SAMPLER_DIM_SUBPASS_MS:
2062       return 3;
2063    default:
2064       break;
2065    }
2066    return 0;
2067 }
2068 
get_image_coords(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr,LLVMValueRef dynamic_desc_index,struct ac_image_args * args,enum glsl_sampler_dim dim,bool is_array)2069 static void get_image_coords(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr,
2070                              LLVMValueRef dynamic_desc_index, struct ac_image_args *args,
2071                              enum glsl_sampler_dim dim, bool is_array)
2072 {
2073    LLVMValueRef src0 = get_src(ctx, instr->src[1]);
2074    int count;
2075    ASSERTED bool add_frag_pos =
2076       (dim == GLSL_SAMPLER_DIM_SUBPASS || dim == GLSL_SAMPLER_DIM_SUBPASS_MS);
2077    bool is_ms = (dim == GLSL_SAMPLER_DIM_MS || dim == GLSL_SAMPLER_DIM_SUBPASS_MS);
2078    bool gfx9_1d = ctx->ac.gfx_level == GFX9 && dim == GLSL_SAMPLER_DIM_1D;
2079    assert(!add_frag_pos && "Input attachments should be lowered by this point.");
2080    count = image_type_to_components_count(dim, is_array);
2081 
2082    if (count == 1 && !gfx9_1d) {
2083       if (instr->src[1].ssa->num_components)
2084          args->coords[0] = ac_llvm_extract_elem(&ctx->ac, src0, 0);
2085       else
2086          args->coords[0] = src0;
2087    } else {
2088       int chan;
2089       if (is_ms)
2090          count--;
2091       for (chan = 0; chan < count; ++chan) {
2092          args->coords[chan] = ac_llvm_extract_elem(&ctx->ac, src0, chan);
2093       }
2094 
2095       if (gfx9_1d) {
2096          if (is_array)
2097             args->coords[2] = args->coords[1];
2098          args->coords[1] = LLVMConstInt(LLVMTypeOf(args->coords[0]), 0, 0);
2099          count++;
2100       }
2101       if (ctx->ac.gfx_level == GFX9 && dim == GLSL_SAMPLER_DIM_2D && !is_array) {
2102          /* The hw can't bind a slice of a 3D image as a 2D
2103           * image, because it ignores BASE_ARRAY if the target
2104           * is 3D. The workaround is to read BASE_ARRAY and set
2105           * it as the 3rd address operand for all 2D images.
2106           */
2107          LLVMValueRef first_layer, const5, mask;
2108 
2109          const5 = LLVMConstInt(ctx->ac.i32, 5, 0);
2110          mask = LLVMConstInt(ctx->ac.i32, S_008F24_BASE_ARRAY(~0), 0);
2111          first_layer = LLVMBuildExtractElement(ctx->ac.builder, args->resource, const5, "");
2112          first_layer = LLVMBuildAnd(ctx->ac.builder, first_layer, mask, "");
2113 
2114          if (instr->intrinsic == nir_intrinsic_bindless_image_load ||
2115              instr->intrinsic == nir_intrinsic_bindless_image_sparse_load ||
2116              instr->intrinsic == nir_intrinsic_bindless_image_store) {
2117             int lod_index = instr->intrinsic == nir_intrinsic_bindless_image_store ? 4 : 3;
2118             bool has_lod = !nir_src_is_const(instr->src[lod_index]) ||
2119                            nir_src_as_uint(instr->src[lod_index]) != 0;
2120             if (has_lod) {
2121                /* If there's a lod parameter it matter if the image is 3d or 2d because
2122                 * the hw reads either the fourth or third component as lod. So detect
2123                 * 3d images and place the lod at the third component otherwise.
2124                 */
2125                LLVMValueRef const3, const28, const4, rword3, type3d, type, is_3d, lod;
2126                const3 = LLVMConstInt(ctx->ac.i32, 3, 0);
2127                const28 = LLVMConstInt(ctx->ac.i32, 28, 0);
2128                const4 = LLVMConstInt(ctx->ac.i32, 4, 0);
2129                type3d = LLVMConstInt(ctx->ac.i32, V_008F1C_SQ_RSRC_IMG_3D, 0);
2130                rword3 = LLVMBuildExtractElement(ctx->ac.builder, args->resource, const3, "");
2131                type = ac_build_bfe(&ctx->ac, rword3, const28, const4, false);
2132                is_3d = emit_int_cmp(&ctx->ac, LLVMIntEQ, type, type3d);
2133                lod = get_src(ctx, instr->src[lod_index]);
2134                first_layer = emit_bcsel(&ctx->ac, is_3d, first_layer, lod);
2135             }
2136          }
2137 
2138          args->coords[count] = LLVMBuildTrunc(ctx->ac.builder, first_layer,
2139                                               LLVMTypeOf(args->coords[0]), "");
2140          count++;
2141       }
2142 
2143       if (is_ms) {
2144          /* sample index */
2145          args->coords[count] = ac_llvm_extract_elem(&ctx->ac, get_src(ctx, instr->src[2]), 0);
2146          count++;
2147       }
2148    }
2149 }
2150 
enter_waterfall_image(struct ac_nir_context * ctx,struct waterfall_context * wctx,const nir_intrinsic_instr * instr)2151 static LLVMValueRef enter_waterfall_image(struct ac_nir_context *ctx,
2152                                           struct waterfall_context *wctx,
2153                                           const nir_intrinsic_instr *instr)
2154 {
2155    /* src0 is desc when uniform, desc index when non uniform */
2156    LLVMValueRef value = get_src(ctx, instr->src[0]);
2157 
2158    return enter_waterfall(ctx, wctx, value, nir_intrinsic_access(instr) & ACCESS_NON_UNIFORM);
2159 }
2160 
visit_image_load(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr)2161 static LLVMValueRef visit_image_load(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)
2162 {
2163    LLVMValueRef res;
2164 
2165    enum glsl_sampler_dim dim = nir_intrinsic_image_dim(instr);
2166    enum gl_access_qualifier access = nir_intrinsic_access(instr);
2167    bool is_array = nir_intrinsic_image_array(instr);
2168 
2169    struct waterfall_context wctx;
2170    LLVMValueRef dynamic_index = enter_waterfall_image(ctx, &wctx, instr);
2171 
2172    struct ac_image_args args = {0};
2173 
2174    args.access = ac_nir_get_mem_access_flags(instr);
2175    args.tfe = instr->intrinsic == nir_intrinsic_bindless_image_sparse_load;
2176 
2177    if (dim == GLSL_SAMPLER_DIM_BUF) {
2178       unsigned num_channels = util_last_bit(nir_def_components_read(&instr->def));
2179       if (instr->def.bit_size == 64)
2180          num_channels = num_channels < 4 ? 2 : 4;
2181       LLVMValueRef rsrc, vindex;
2182 
2183       rsrc = ctx->abi->load_sampler_desc(ctx->abi, dynamic_index, AC_DESC_BUFFER);
2184       vindex =
2185          LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[1]), ctx->ac.i32_0, "");
2186 
2187       bool can_speculate = access & ACCESS_CAN_REORDER;
2188       res = ac_build_buffer_load_format(&ctx->ac, rsrc, vindex, ctx->ac.i32_0, num_channels,
2189                                         args.access, can_speculate,
2190                                         instr->def.bit_size == 16,
2191                                         args.tfe);
2192       res = ac_build_expand(&ctx->ac, res, num_channels, args.tfe ? 5 : 4);
2193 
2194       res = ac_trim_vector(&ctx->ac, res, instr->def.num_components);
2195       res = ac_to_integer(&ctx->ac, res);
2196    } else if (instr->intrinsic == nir_intrinsic_bindless_image_fragment_mask_load_amd) {
2197       assert(ctx->ac.gfx_level < GFX11);
2198 
2199       args.opcode = ac_image_load;
2200       args.resource = ctx->abi->load_sampler_desc(ctx->abi, dynamic_index, AC_DESC_FMASK);
2201       get_image_coords(ctx, instr, dynamic_index, &args, GLSL_SAMPLER_DIM_2D, is_array);
2202       args.dmask = 0x1;
2203       args.dim = is_array ? ac_image_2darray : ac_image_2d;
2204       args.attributes = AC_ATTR_INVARIANT_LOAD;
2205       args.a16 = ac_get_elem_bits(&ctx->ac, LLVMTypeOf(args.coords[0])) == 16;
2206 
2207       res = ac_build_image_opcode(&ctx->ac, &args);
2208    } else {
2209       bool level_zero = nir_src_is_const(instr->src[3]) && nir_src_as_uint(instr->src[3]) == 0;
2210 
2211       args.opcode = level_zero ? ac_image_load : ac_image_load_mip;
2212       args.resource = ctx->abi->load_sampler_desc(ctx->abi, dynamic_index, AC_DESC_IMAGE);
2213       get_image_coords(ctx, instr, dynamic_index, &args, dim, is_array);
2214       args.dim = ac_get_image_dim(ctx->ac.gfx_level, dim, is_array);
2215       if (!level_zero)
2216          args.lod = get_src(ctx, instr->src[3]);
2217       /* TODO: Fix in LLVM. LLVM doesn't reduce DMASK for D16 if optimization barriers are
2218        * present and even if the vector is trimmed before the optimization barriers.
2219        */
2220       args.dmask = BITFIELD_MASK(instr->def.num_components);
2221       args.attributes = access & ACCESS_CAN_REORDER ? AC_ATTR_INVARIANT_LOAD : 0;
2222       args.d16 = instr->def.bit_size == 16;
2223       args.a16 = ac_get_elem_bits(&ctx->ac, LLVMTypeOf(args.coords[0])) == 16;
2224 
2225       res = ac_build_image_opcode(&ctx->ac, &args);
2226    }
2227 
2228    if (instr->def.bit_size == 64) {
2229       LLVMValueRef code = NULL;
2230       if (args.tfe) {
2231          code = ac_llvm_extract_elem(&ctx->ac, res, 4);
2232          res = ac_trim_vector(&ctx->ac, res, 4);
2233       }
2234 
2235       res = LLVMBuildBitCast(ctx->ac.builder, res, LLVMVectorType(ctx->ac.i64, 2), "");
2236       LLVMValueRef x = LLVMBuildExtractElement(ctx->ac.builder, res, ctx->ac.i32_0, "");
2237       LLVMValueRef w = LLVMBuildExtractElement(ctx->ac.builder, res, ctx->ac.i32_1, "");
2238 
2239       if (code)
2240          code = LLVMBuildZExt(ctx->ac.builder, code, ctx->ac.i64, "");
2241       LLVMValueRef values[5] = {x, ctx->ac.i64_0, ctx->ac.i64_0, w, code};
2242       res = ac_build_gather_values(&ctx->ac, values, 4 + args.tfe);
2243    }
2244 
2245    if (instr->def.num_components < 4)
2246       res = ac_trim_vector(&ctx->ac, res, instr->def.num_components);
2247 
2248    return exit_waterfall(ctx, &wctx, res);
2249 }
2250 
visit_image_store(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr)2251 static void visit_image_store(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)
2252 {
2253    enum glsl_sampler_dim dim = nir_intrinsic_image_dim(instr);
2254    bool is_array = nir_intrinsic_image_array(instr);
2255 
2256    struct waterfall_context wctx;
2257    LLVMValueRef dynamic_index = enter_waterfall_image(ctx, &wctx, instr);
2258 
2259    struct ac_image_args args = {0};
2260    args.access = ac_nir_get_mem_access_flags(instr);
2261 
2262    LLVMValueRef src = get_src(ctx, instr->src[3]);
2263    if (instr->src[3].ssa->bit_size == 64) {
2264       /* only R64_UINT and R64_SINT supported */
2265       src = ac_llvm_extract_elem(&ctx->ac, src, 0);
2266       src = LLVMBuildBitCast(ctx->ac.builder, src, ctx->ac.v2f32, "");
2267    } else {
2268       src = ac_to_float(&ctx->ac, src);
2269    }
2270 
2271    if (dim == GLSL_SAMPLER_DIM_BUF) {
2272       LLVMValueRef rsrc = ctx->abi->load_sampler_desc(ctx->abi, dynamic_index, AC_DESC_BUFFER);
2273       unsigned src_channels = ac_get_llvm_num_components(src);
2274       LLVMValueRef vindex;
2275 
2276       if (src_channels == 3)
2277          src = ac_build_expand_to_vec4(&ctx->ac, src, 3);
2278 
2279       vindex =
2280          LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[1]), ctx->ac.i32_0, "");
2281 
2282       ac_build_buffer_store_format(&ctx->ac, rsrc, src, vindex, ctx->ac.i32_0, args.access);
2283    } else {
2284       bool level_zero = nir_src_is_const(instr->src[4]) && nir_src_as_uint(instr->src[4]) == 0;
2285 
2286       args.opcode = level_zero ? ac_image_store : ac_image_store_mip;
2287       args.data[0] = src;
2288       args.resource = ctx->abi->load_sampler_desc(ctx->abi, dynamic_index, AC_DESC_IMAGE);
2289       get_image_coords(ctx, instr, dynamic_index, &args, dim, is_array);
2290       args.dim = ac_get_image_dim(ctx->ac.gfx_level, dim, is_array);
2291       if (!level_zero)
2292          args.lod = get_src(ctx, instr->src[4]);
2293       args.dmask = 15;
2294       args.d16 = ac_get_elem_bits(&ctx->ac, LLVMTypeOf(args.data[0])) == 16;
2295       args.a16 = ac_get_elem_bits(&ctx->ac, LLVMTypeOf(args.coords[0])) == 16;
2296 
2297       ac_build_image_opcode(&ctx->ac, &args);
2298    }
2299 
2300    exit_waterfall(ctx, &wctx, NULL);
2301 }
2302 
visit_image_atomic(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr)2303 static LLVMValueRef visit_image_atomic(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)
2304 {
2305    LLVMValueRef params[7];
2306    int param_count = 0;
2307 
2308    nir_atomic_op op = nir_intrinsic_atomic_op(instr);
2309    bool cmpswap = op == nir_atomic_op_cmpxchg;
2310    const char *atomic_name = translate_atomic_op_str(op);
2311    char intrinsic_name[64];
2312    enum ac_atomic_op atomic_subop;
2313    ASSERTED int length;
2314 
2315    enum glsl_sampler_dim dim = nir_intrinsic_image_dim(instr);
2316    bool is_array = nir_intrinsic_image_array(instr);
2317 
2318    struct waterfall_context wctx;
2319    LLVMValueRef dynamic_index = enter_waterfall_image(ctx, &wctx, instr);
2320 
2321    switch (op) {
2322    case nir_atomic_op_iadd:
2323       atomic_subop = ac_atomic_add;
2324       break;
2325    case nir_atomic_op_imin:
2326       atomic_subop = ac_atomic_smin;
2327       break;
2328    case nir_atomic_op_umin:
2329       atomic_subop = ac_atomic_umin;
2330       break;
2331    case nir_atomic_op_imax:
2332       atomic_subop = ac_atomic_smax;
2333       break;
2334    case nir_atomic_op_umax:
2335       atomic_subop = ac_atomic_umax;
2336       break;
2337    case nir_atomic_op_iand:
2338       atomic_subop = ac_atomic_and;
2339       break;
2340    case nir_atomic_op_ior:
2341       atomic_subop = ac_atomic_or;
2342       break;
2343    case nir_atomic_op_ixor:
2344       atomic_subop = ac_atomic_xor;
2345       break;
2346    case nir_atomic_op_xchg:
2347       atomic_subop = ac_atomic_swap;
2348       break;
2349    case nir_atomic_op_cmpxchg:
2350       atomic_subop = 0; /* not used */
2351       break;
2352    case nir_atomic_op_inc_wrap:
2353       atomic_subop = ac_atomic_inc_wrap;
2354       break;
2355    case nir_atomic_op_dec_wrap:
2356       atomic_subop = ac_atomic_dec_wrap;
2357       break;
2358    case nir_atomic_op_fadd:
2359       atomic_subop = ac_atomic_fmin; /* Non-buffer fadd atomics are not supported. */
2360       break;
2361    case nir_atomic_op_fmin:
2362       atomic_subop = ac_atomic_fmin;
2363       break;
2364    case nir_atomic_op_fmax:
2365       atomic_subop = ac_atomic_fmax;
2366       break;
2367    default:
2368       abort();
2369    }
2370 
2371    if (cmpswap)
2372       params[param_count++] = get_src(ctx, instr->src[4]);
2373    params[param_count++] = get_src(ctx, instr->src[3]);
2374 
2375    if (atomic_subop == ac_atomic_fmin || atomic_subop == ac_atomic_fmax)
2376       params[0] = ac_to_float(&ctx->ac, params[0]);
2377 
2378    LLVMValueRef result;
2379    if (dim == GLSL_SAMPLER_DIM_BUF) {
2380       params[param_count++] = ctx->abi->load_sampler_desc(ctx->abi, dynamic_index, AC_DESC_BUFFER);
2381       params[param_count++] = LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[1]),
2382                                                       ctx->ac.i32_0, ""); /* vindex */
2383       params[param_count++] = ctx->ac.i32_0;                              /* voffset */
2384       if (cmpswap && instr->def.bit_size == 64) {
2385          result = emit_ssbo_comp_swap_64(ctx, params[2], params[3], params[1], params[0], true);
2386       } else {
2387          LLVMTypeRef data_type = LLVMTypeOf(params[0]);
2388          char type[8];
2389          unsigned cache_flags =
2390             ac_get_hw_cache_flags(ctx->ac.gfx_level,
2391 				  ac_nir_get_mem_access_flags(instr) | ACCESS_TYPE_ATOMIC).value;
2392 
2393          params[param_count++] = ctx->ac.i32_0; /* soffset */
2394          params[param_count++] = LLVMConstInt(ctx->ac.i32, cache_flags, 0);
2395 
2396          ac_build_type_name_for_intr(data_type, type, sizeof(type));
2397          length = snprintf(intrinsic_name, sizeof(intrinsic_name),
2398                            "llvm.amdgcn.struct.buffer.atomic.%s.%s",
2399                            atomic_name, type);
2400 
2401          assert(length < sizeof(intrinsic_name));
2402          result = ac_build_intrinsic(&ctx->ac, intrinsic_name, LLVMTypeOf(params[0]), params, param_count, 0);
2403       }
2404    } else {
2405       struct ac_image_args args = {0};
2406       args.opcode = cmpswap ? ac_image_atomic_cmpswap : ac_image_atomic;
2407       args.atomic = atomic_subop;
2408       args.data[0] = params[0];
2409       if (cmpswap)
2410          args.data[1] = params[1];
2411       args.resource = ctx->abi->load_sampler_desc(ctx->abi, dynamic_index, AC_DESC_IMAGE);
2412       get_image_coords(ctx, instr, dynamic_index, &args, dim, is_array);
2413       args.dim = ac_get_image_dim(ctx->ac.gfx_level, dim, is_array);
2414       args.a16 = ac_get_elem_bits(&ctx->ac, LLVMTypeOf(args.coords[0])) == 16;
2415       args.access = ac_nir_get_mem_access_flags(instr);
2416 
2417       result = ac_build_image_opcode(&ctx->ac, &args);
2418    }
2419 
2420    return exit_waterfall(ctx, &wctx, result);
2421 }
2422 
emit_discard(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr)2423 static void emit_discard(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)
2424 {
2425    LLVMValueRef cond;
2426 
2427    if (instr->intrinsic == nir_intrinsic_terminate_if) {
2428       cond = LLVMBuildNot(ctx->ac.builder, get_src(ctx, instr->src[0]), "");
2429    } else {
2430       assert(instr->intrinsic == nir_intrinsic_terminate);
2431       cond = ctx->ac.i1false;
2432    }
2433 
2434    ac_build_kill_if_false(&ctx->ac, cond);
2435 }
2436 
emit_demote(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr)2437 static void emit_demote(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)
2438 {
2439    LLVMValueRef cond;
2440 
2441    if (instr->intrinsic == nir_intrinsic_demote_if) {
2442       cond = LLVMBuildNot(ctx->ac.builder, get_src(ctx, instr->src[0]), "");
2443    } else {
2444       assert(instr->intrinsic == nir_intrinsic_demote);
2445       cond = ctx->ac.i1false;
2446    }
2447 
2448    /* This demotes the pixel if the condition is false. */
2449    ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.wqm.demote", ctx->ac.voidt, &cond, 1, 0);
2450 }
2451 
visit_first_invocation(struct ac_nir_context * ctx)2452 static LLVMValueRef visit_first_invocation(struct ac_nir_context *ctx)
2453 {
2454    LLVMValueRef active_set = ac_build_ballot(&ctx->ac, ctx->ac.i32_1);
2455    const char *intr = ctx->ac.wave_size == 32 ? "llvm.cttz.i32" : "llvm.cttz.i64";
2456 
2457    /* The second argument is whether cttz(0) should be defined, but we do not care. */
2458    LLVMValueRef args[] = {active_set, ctx->ac.i1false};
2459    LLVMValueRef result = ac_build_intrinsic(&ctx->ac, intr, ctx->ac.iN_wavemask, args, 2, 0);
2460 
2461    return LLVMBuildTrunc(ctx->ac.builder, result, ctx->ac.i32, "");
2462 }
2463 
visit_load_shared(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr)2464 static LLVMValueRef visit_load_shared(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)
2465 {
2466    LLVMValueRef values[16], derived_ptr, index, ret;
2467    unsigned const_off = nir_intrinsic_base(instr);
2468 
2469    LLVMTypeRef elem_type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
2470    LLVMValueRef ptr = get_memory_ptr(ctx, instr->src[0], const_off);
2471 
2472    for (int chan = 0; chan < instr->num_components; chan++) {
2473       index = LLVMConstInt(ctx->ac.i32, chan, 0);
2474       derived_ptr = LLVMBuildGEP2(ctx->ac.builder, elem_type, ptr, &index, 1, "");
2475       values[chan] = LLVMBuildLoad2(ctx->ac.builder, elem_type, derived_ptr, "");
2476    }
2477 
2478    ret = ac_build_gather_values(&ctx->ac, values, instr->num_components);
2479 
2480    return LLVMBuildBitCast(ctx->ac.builder, ret, get_def_type(ctx, &instr->def), "");
2481 }
2482 
visit_store_shared(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr)2483 static void visit_store_shared(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)
2484 {
2485    LLVMValueRef derived_ptr, data, index;
2486    LLVMBuilderRef builder = ctx->ac.builder;
2487 
2488    unsigned const_off = nir_intrinsic_base(instr);
2489    LLVMTypeRef elem_type = LLVMIntTypeInContext(ctx->ac.context, instr->src[0].ssa->bit_size);
2490    LLVMValueRef ptr = get_memory_ptr(ctx, instr->src[1], const_off);
2491    LLVMValueRef src = get_src(ctx, instr->src[0]);
2492 
2493    int writemask = nir_intrinsic_write_mask(instr);
2494    for (int chan = 0; chan < 16; chan++) {
2495       if (!(writemask & (1 << chan))) {
2496          continue;
2497       }
2498       data = ac_llvm_extract_elem(&ctx->ac, src, chan);
2499       index = LLVMConstInt(ctx->ac.i32, chan, 0);
2500       derived_ptr = LLVMBuildGEP2(builder, elem_type, ptr, &index, 1, "");
2501       LLVMBuildStore(builder, data, derived_ptr);
2502    }
2503 }
2504 
visit_load_shared2_amd(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr)2505 static LLVMValueRef visit_load_shared2_amd(struct ac_nir_context *ctx,
2506                                            const nir_intrinsic_instr *instr)
2507 {
2508    LLVMTypeRef pointee_type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
2509    LLVMValueRef ptr = get_memory_ptr(ctx, instr->src[0], 0);
2510 
2511    LLVMValueRef values[2];
2512    uint8_t offsets[] = {nir_intrinsic_offset0(instr), nir_intrinsic_offset1(instr)};
2513    unsigned stride = nir_intrinsic_st64(instr) ? 64 : 1;
2514    for (unsigned i = 0; i < 2; i++) {
2515       LLVMValueRef index = LLVMConstInt(ctx->ac.i32, offsets[i] * stride, 0);
2516       LLVMValueRef derived_ptr = LLVMBuildGEP2(ctx->ac.builder, pointee_type, ptr, &index, 1, "");
2517       values[i] = LLVMBuildLoad2(ctx->ac.builder, pointee_type, derived_ptr, "");
2518    }
2519 
2520    LLVMValueRef ret = ac_build_gather_values(&ctx->ac, values, 2);
2521    return LLVMBuildBitCast(ctx->ac.builder, ret, get_def_type(ctx, &instr->def), "");
2522 }
2523 
visit_store_shared2_amd(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr)2524 static void visit_store_shared2_amd(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)
2525 {
2526    LLVMTypeRef pointee_type = LLVMIntTypeInContext(ctx->ac.context, instr->src[0].ssa->bit_size);
2527    LLVMValueRef ptr = get_memory_ptr(ctx, instr->src[1], 0);
2528    LLVMValueRef src = get_src(ctx, instr->src[0]);
2529 
2530    uint8_t offsets[] = {nir_intrinsic_offset0(instr), nir_intrinsic_offset1(instr)};
2531    unsigned stride = nir_intrinsic_st64(instr) ? 64 : 1;
2532    for (unsigned i = 0; i < 2; i++) {
2533       LLVMValueRef index = LLVMConstInt(ctx->ac.i32, offsets[i] * stride, 0);
2534       LLVMValueRef derived_ptr = LLVMBuildGEP2(ctx->ac.builder, pointee_type, ptr, &index, 1, "");
2535       LLVMBuildStore(ctx->ac.builder, ac_llvm_extract_elem(&ctx->ac, src, i), derived_ptr);
2536    }
2537 }
2538 
visit_var_atomic(struct ac_nir_context * ctx,const nir_intrinsic_instr * instr,LLVMValueRef ptr,int src_idx)2539 static LLVMValueRef visit_var_atomic(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr,
2540                                      LLVMValueRef ptr, int src_idx)
2541 {
2542    LLVMValueRef result;
2543    LLVMValueRef src = get_src(ctx, instr->src[src_idx]);
2544    nir_atomic_op nir_op = nir_intrinsic_atomic_op(instr);
2545 
2546    const char *sync_scope = "workgroup-one-as";
2547 
2548    if (nir_op == nir_atomic_op_cmpxchg) {
2549       LLVMValueRef src1 = get_src(ctx, instr->src[src_idx + 1]);
2550       result = ac_build_atomic_cmp_xchg(&ctx->ac, ptr, src, src1, sync_scope);
2551       result = LLVMBuildExtractValue(ctx->ac.builder, result, 0, "");
2552    } else if (nir_op == nir_atomic_op_fmin || nir_op == nir_atomic_op_fmax) {
2553       const char *op = translate_atomic_op_str(nir_op);
2554       char name[64], type[8];
2555       LLVMValueRef params[5];
2556       LLVMTypeRef src_type;
2557       int arg_count = 0;
2558 
2559       src = ac_to_float(&ctx->ac, src);
2560       src_type = LLVMTypeOf(src);
2561 
2562       params[arg_count++] = ptr;
2563       params[arg_count++] = src;
2564       params[arg_count++] = ctx->ac.i32_0;
2565       params[arg_count++] = ctx->ac.i32_0;
2566       params[arg_count++] = ctx->ac.i1false;
2567 
2568       ac_build_type_name_for_intr(src_type, type, sizeof(type));
2569       snprintf(name, sizeof(name), "llvm.amdgcn.ds.%s.%s", op, type);
2570 
2571       result = ac_build_intrinsic(&ctx->ac, name, src_type, params, arg_count, 0);
2572       result = ac_to_integer(&ctx->ac, result);
2573    } else {
2574       LLVMAtomicRMWBinOp op = translate_atomic_op(nir_op);
2575       LLVMValueRef val;
2576 
2577       if (nir_op == nir_atomic_op_fadd) {
2578          val = ac_to_float(&ctx->ac, src);
2579       } else {
2580          val = ac_to_integer(&ctx->ac, src);
2581       }
2582 
2583       result = ac_build_atomic_rmw(&ctx->ac, op, ptr, val, sync_scope);
2584 
2585       if (nir_op == nir_atomic_op_fadd) {
2586          result = ac_to_integer(&ctx->ac, result);
2587       }
2588    }
2589 
2590    return result;
2591 }
2592 
load_interpolated_input(struct ac_nir_context * ctx,LLVMValueRef interp_param,unsigned index,unsigned comp_start,unsigned num_components,unsigned bitsize,bool high_16bits)2593 static LLVMValueRef load_interpolated_input(struct ac_nir_context *ctx, LLVMValueRef interp_param,
2594                                             unsigned index, unsigned comp_start,
2595                                             unsigned num_components, unsigned bitsize,
2596                                             bool high_16bits)
2597 {
2598    LLVMValueRef attr_number = LLVMConstInt(ctx->ac.i32, index, false);
2599    LLVMValueRef interp_param_f;
2600 
2601    interp_param_f = LLVMBuildBitCast(ctx->ac.builder, interp_param, ctx->ac.v2f32, "");
2602    LLVMValueRef i = LLVMBuildExtractElement(ctx->ac.builder, interp_param_f, ctx->ac.i32_0, "");
2603    LLVMValueRef j = LLVMBuildExtractElement(ctx->ac.builder, interp_param_f, ctx->ac.i32_1, "");
2604 
2605    /* Workaround for issue 2647: kill threads with infinite interpolation coeffs */
2606    if (ctx->verified_interp && !_mesa_hash_table_search(ctx->verified_interp, interp_param)) {
2607       LLVMValueRef cond = ac_build_is_inf_or_nan(&ctx->ac, i);
2608       ac_build_kill_if_false(&ctx->ac, LLVMBuildNot(ctx->ac.builder, cond, ""));
2609       _mesa_hash_table_insert(ctx->verified_interp, interp_param, interp_param);
2610    }
2611 
2612    LLVMValueRef values[4];
2613    assert(bitsize == 16 || bitsize == 32);
2614    for (unsigned comp = 0; comp < num_components; comp++) {
2615       LLVMValueRef llvm_chan = LLVMConstInt(ctx->ac.i32, comp_start + comp, false);
2616       if (bitsize == 16) {
2617          values[comp] = ac_build_fs_interp_f16(&ctx->ac, llvm_chan, attr_number,
2618                                                ac_get_arg(&ctx->ac, ctx->args->prim_mask), i, j,
2619                                                high_16bits);
2620       } else {
2621          values[comp] = ac_build_fs_interp(&ctx->ac, llvm_chan, attr_number,
2622                                            ac_get_arg(&ctx->ac, ctx->args->prim_mask), i, j);
2623       }
2624    }
2625 
2626    return ac_to_integer(&ctx->ac, ac_build_gather_values(&ctx->ac, values, num_components));
2627 }
2628 
visit_load(struct ac_nir_context * ctx,nir_intrinsic_instr * instr)2629 static LLVMValueRef visit_load(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)
2630 {
2631    LLVMValueRef values[8];
2632    LLVMTypeRef dest_type = get_def_type(ctx, &instr->def);
2633    unsigned base = nir_intrinsic_base(instr);
2634    unsigned component = nir_intrinsic_component(instr);
2635    unsigned count = instr->def.num_components;
2636    nir_src offset = *nir_get_io_offset_src(instr);
2637 
2638    assert(instr->def.bit_size == 16 || instr->def.bit_size == 32);
2639    /* No indirect indexing allowed. */
2640    assert(nir_src_is_const(offset) && nir_src_as_uint(offset) == 0);
2641 
2642    /* This is used to load TCS inputs from VGPRs in radeonsi. */
2643    if (ctx->stage == MESA_SHADER_TESS_CTRL) {
2644       LLVMTypeRef component_type = LLVMGetTypeKind(dest_type) == LLVMVectorTypeKind ?
2645                                       LLVMGetElementType(dest_type) : dest_type;
2646 
2647       LLVMValueRef result = ctx->abi->load_tess_varyings(ctx->abi, component_type,
2648                                                          base, component, count);
2649       if (instr->def.bit_size == 16) {
2650          result = ac_to_integer(&ctx->ac, result);
2651          result = LLVMBuildTrunc(ctx->ac.builder, result, dest_type, "");
2652       }
2653       return LLVMBuildBitCast(ctx->ac.builder, result, dest_type, "");
2654    }
2655 
2656    assert(ctx->stage == MESA_SHADER_FRAGMENT);
2657    unsigned vertex_id = 0; /* P0 */
2658 
2659    if (instr->intrinsic == nir_intrinsic_load_input_vertex)
2660       vertex_id = nir_src_as_uint(instr->src[0]);
2661 
2662    LLVMValueRef attr_number = LLVMConstInt(ctx->ac.i32, base, false);
2663 
2664    for (unsigned chan = 0; chan < count; chan++) {
2665       LLVMValueRef llvm_chan = LLVMConstInt(ctx->ac.i32, (component + chan) % 4, false);
2666       values[chan] = ac_build_fs_interp_mov(&ctx->ac, vertex_id, llvm_chan, attr_number,
2667                                             ac_get_arg(&ctx->ac, ctx->args->prim_mask));
2668       values[chan] = LLVMBuildBitCast(ctx->ac.builder, values[chan], ctx->ac.i32, "");
2669       if (instr->def.bit_size == 16 &&
2670           nir_intrinsic_io_semantics(instr).high_16bits)
2671          values[chan] = LLVMBuildLShr(ctx->ac.builder, values[chan], LLVMConstInt(ctx->ac.i32, 16, 0), "");
2672       values[chan] =
2673          LLVMBuildTruncOrBitCast(ctx->ac.builder, values[chan],
2674                                  instr->def.bit_size == 16 ? ctx->ac.i16 : ctx->ac.i32, "");
2675    }
2676 
2677    LLVMValueRef result = ac_build_gather_values(&ctx->ac, values, count);
2678    return LLVMBuildBitCast(ctx->ac.builder, result, dest_type, "");
2679 }
2680 
visit_intrinsic(struct ac_nir_context * ctx,nir_intrinsic_instr * instr)2681 static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)
2682 {
2683    LLVMValueRef result = NULL;
2684 
2685    switch (instr->intrinsic) {
2686    case nir_intrinsic_ddx:
2687    case nir_intrinsic_ddy:
2688    case nir_intrinsic_ddx_fine:
2689    case nir_intrinsic_ddy_fine:
2690    case nir_intrinsic_ddx_coarse:
2691    case nir_intrinsic_ddy_coarse:
2692       result = emit_ddxy(ctx, instr->intrinsic, get_src(ctx, instr->src[0]));
2693       result = ac_to_integer(&ctx->ac, result);
2694       break;
2695    case nir_intrinsic_ballot:
2696    case nir_intrinsic_ballot_relaxed:
2697       result = ac_build_ballot(&ctx->ac, get_src(ctx, instr->src[0]));
2698       if (instr->def.bit_size > ctx->ac.wave_size) {
2699          LLVMTypeRef dest_type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
2700          result = LLVMBuildZExt(ctx->ac.builder, result, dest_type, "");
2701       }
2702       break;
2703    case nir_intrinsic_inverse_ballot: {
2704       LLVMValueRef src = get_src(ctx, instr->src[0]);
2705       if (instr->src[0].ssa->bit_size > ctx->ac.wave_size) {
2706          LLVMTypeRef src_type = LLVMIntTypeInContext(ctx->ac.context, ctx->ac.wave_size);
2707          src = LLVMBuildTrunc(ctx->ac.builder, src, src_type, "");
2708       }
2709       result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.inverse.ballot", ctx->ac.i1, &src, 1, 0);
2710       break;
2711    }
2712    case nir_intrinsic_read_invocation:
2713       result =
2714          ac_build_readlane(&ctx->ac, get_src(ctx, instr->src[0]), get_src(ctx, instr->src[1]));
2715       break;
2716    case nir_intrinsic_read_first_invocation:
2717    case nir_intrinsic_as_uniform:
2718       result = ac_build_readlane(&ctx->ac, get_src(ctx, instr->src[0]), NULL);
2719       break;
2720    case nir_intrinsic_load_workgroup_id: {
2721       LLVMValueRef values[3] = {ctx->ac.i32_0, ctx->ac.i32_0, ctx->ac.i32_0};
2722 
2723       for (int i = 0; i < 3; i++) {
2724          if (ctx->args->workgroup_ids[i].used) {
2725             if (ctx->ac.gfx_level >= GFX12) {
2726                char intr_name[256];
2727                snprintf(intr_name, sizeof(intr_name), "llvm.amdgcn.workgroup.id.%c", "xyz"[i]);
2728                values[i] = ac_build_intrinsic(&ctx->ac, intr_name, ctx->ac.i32, NULL, 0, 0);
2729             } else {
2730                values[i] = ac_get_arg(&ctx->ac, ctx->args->workgroup_ids[i]);
2731             }
2732          }
2733       }
2734       result = ac_build_gather_values(&ctx->ac, values, 3);
2735       break;
2736    }
2737    case nir_intrinsic_load_lds_ngg_scratch_base_amd:
2738    case nir_intrinsic_load_lds_ngg_gs_out_vertex_base_amd:
2739       result = ctx->abi->intrinsic_load(ctx->abi, instr);
2740       break;
2741    case nir_intrinsic_load_helper_invocation:
2742    case nir_intrinsic_is_helper_invocation:
2743       result = ac_build_load_helper_invocation(&ctx->ac);
2744       break;
2745    case nir_intrinsic_load_num_workgroups:
2746       if (ctx->abi->load_grid_size_from_user_sgpr) {
2747          result = ac_get_arg(&ctx->ac, ctx->args->num_work_groups);
2748       } else {
2749          result = ac_build_load_invariant(&ctx->ac,
2750             ac_get_ptr_arg(&ctx->ac, ctx->args, ctx->args->num_work_groups), ctx->ac.i32_0);
2751       }
2752       break;
2753    case nir_intrinsic_load_subgroup_id:
2754       assert(gl_shader_stage_is_compute(ctx->stage) && ctx->ac.gfx_level >= GFX12);
2755       result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.wave.id", ctx->ac.i32, NULL, 0, 0);
2756       break;
2757    case nir_intrinsic_first_invocation:
2758       result = visit_first_invocation(ctx);
2759       break;
2760    case nir_intrinsic_store_ssbo:
2761       visit_store_ssbo(ctx, instr);
2762       break;
2763    case nir_intrinsic_load_ssbo:
2764       result = visit_load_buffer(ctx, instr);
2765       break;
2766    case nir_intrinsic_load_global_amd:
2767       result = visit_load_global(ctx, instr);
2768       break;
2769    case nir_intrinsic_store_global_amd:
2770       visit_store_global(ctx, instr);
2771       break;
2772    case nir_intrinsic_global_atomic_amd:
2773    case nir_intrinsic_global_atomic_swap_amd:
2774       result = visit_global_atomic(ctx, instr);
2775       break;
2776    case nir_intrinsic_ssbo_atomic:
2777    case nir_intrinsic_ssbo_atomic_swap:
2778       result = visit_atomic_ssbo(ctx, instr);
2779       break;
2780    case nir_intrinsic_load_ubo:
2781       result = visit_load_ubo_buffer(ctx, instr);
2782       break;
2783    case nir_intrinsic_load_input:
2784    case nir_intrinsic_load_per_primitive_input:
2785    case nir_intrinsic_load_input_vertex:
2786    case nir_intrinsic_load_per_vertex_input:
2787       result = visit_load(ctx, instr);
2788       break;
2789    case nir_intrinsic_store_output:
2790       visit_store_output(ctx, instr);
2791       break;
2792    case nir_intrinsic_load_shared:
2793       result = visit_load_shared(ctx, instr);
2794       break;
2795    case nir_intrinsic_store_shared:
2796       visit_store_shared(ctx, instr);
2797       break;
2798    case nir_intrinsic_load_shared2_amd:
2799       result = visit_load_shared2_amd(ctx, instr);
2800       break;
2801    case nir_intrinsic_store_shared2_amd:
2802       visit_store_shared2_amd(ctx, instr);
2803       break;
2804    case nir_intrinsic_bindless_image_load:
2805    case nir_intrinsic_bindless_image_sparse_load:
2806    case nir_intrinsic_bindless_image_fragment_mask_load_amd:
2807       result = visit_image_load(ctx, instr);
2808       break;
2809    case nir_intrinsic_bindless_image_store:
2810       visit_image_store(ctx, instr);
2811       break;
2812    case nir_intrinsic_bindless_image_atomic:
2813    case nir_intrinsic_bindless_image_atomic_swap:
2814       result = visit_image_atomic(ctx, instr);
2815       break;
2816    case nir_intrinsic_shader_clock:
2817       result = ac_build_shader_clock(&ctx->ac, nir_intrinsic_memory_scope(instr));
2818       break;
2819    case nir_intrinsic_terminate:
2820    case nir_intrinsic_terminate_if:
2821       emit_discard(ctx, instr);
2822       break;
2823    case nir_intrinsic_demote:
2824    case nir_intrinsic_demote_if:
2825       emit_demote(ctx, instr);
2826       break;
2827    case nir_intrinsic_barrier: {
2828       assert(!(nir_intrinsic_memory_semantics(instr) &
2829                (NIR_MEMORY_MAKE_AVAILABLE | NIR_MEMORY_MAKE_VISIBLE)));
2830 
2831       nir_variable_mode modes = nir_intrinsic_memory_modes(instr);
2832 
2833       unsigned wait_flags = 0;
2834       if (modes & (nir_var_mem_global | nir_var_mem_ssbo | nir_var_image))
2835          wait_flags |= AC_WAIT_LOAD | AC_WAIT_STORE;
2836       if (modes & nir_var_mem_shared)
2837          wait_flags |= AC_WAIT_DS;
2838 
2839       if (wait_flags)
2840          ac_build_waitcnt(&ctx->ac, wait_flags);
2841 
2842       if (nir_intrinsic_execution_scope(instr) == SCOPE_WORKGROUP)
2843          ac_build_s_barrier(&ctx->ac, ctx->stage);
2844       break;
2845    }
2846    case nir_intrinsic_optimization_barrier_vgpr_amd:
2847       result = get_src(ctx, instr->src[0]);
2848       ac_build_optimization_barrier(&ctx->ac, &result, false);
2849       break;
2850    case nir_intrinsic_optimization_barrier_sgpr_amd:
2851       result = get_src(ctx, instr->src[0]);
2852       ac_build_optimization_barrier(&ctx->ac, &result, true);
2853       break;
2854    case nir_intrinsic_shared_atomic:
2855    case nir_intrinsic_shared_atomic_swap: {
2856       LLVMValueRef ptr = get_memory_ptr(ctx, instr->src[0], nir_intrinsic_base(instr));
2857       result = visit_var_atomic(ctx, instr, ptr, 1);
2858       break;
2859    }
2860    case nir_intrinsic_load_interpolated_input: {
2861       /* We assume any indirect loads have been lowered away */
2862       ASSERTED nir_const_value *offset = nir_src_as_const_value(instr->src[1]);
2863       assert(offset);
2864       assert(offset[0].i32 == 0);
2865 
2866       LLVMValueRef interp_param = get_src(ctx, instr->src[0]);
2867       unsigned index = nir_intrinsic_base(instr);
2868       unsigned component = nir_intrinsic_component(instr);
2869       result = load_interpolated_input(ctx, interp_param, index, component,
2870                                        instr->def.num_components, instr->def.bit_size,
2871                                        nir_intrinsic_io_semantics(instr).high_16bits);
2872       break;
2873    }
2874    case nir_intrinsic_sendmsg_amd: {
2875       unsigned imm = nir_intrinsic_base(instr);
2876       LLVMValueRef m0_content = get_src(ctx, instr->src[0]);
2877       ac_build_sendmsg(&ctx->ac, imm, m0_content);
2878       break;
2879    }
2880    case nir_intrinsic_vote_all: {
2881       result = ac_build_vote_all(&ctx->ac, get_src(ctx, instr->src[0]));
2882       if (ctx->info->stage == MESA_SHADER_FRAGMENT)
2883          result = ac_build_wqm(&ctx->ac, result);
2884       break;
2885    }
2886    case nir_intrinsic_vote_any: {
2887       result = ac_build_vote_any(&ctx->ac, get_src(ctx, instr->src[0]));
2888       if (ctx->info->stage == MESA_SHADER_FRAGMENT)
2889          result = ac_build_wqm(&ctx->ac, result);
2890       break;
2891    }
2892    case nir_intrinsic_quad_vote_any: {
2893       result = ac_build_wqm_vote(&ctx->ac, get_src(ctx, instr->src[0]));
2894       break;
2895    }
2896    case nir_intrinsic_quad_vote_all: {
2897       LLVMValueRef src = LLVMBuildNot(ctx->ac.builder, get_src(ctx, instr->src[0]), "");
2898       result = LLVMBuildNot(ctx->ac.builder, ac_build_wqm_vote(&ctx->ac, src), "");
2899       break;
2900    }
2901    case nir_intrinsic_shuffle:
2902       if (ctx->ac.gfx_level == GFX8 || ctx->ac.gfx_level == GFX9 ||
2903           (ctx->ac.gfx_level >= GFX10 && ctx->ac.wave_size == 32)) {
2904          result =
2905             ac_build_shuffle(&ctx->ac, get_src(ctx, instr->src[0]), get_src(ctx, instr->src[1]));
2906       } else {
2907          LLVMValueRef src = get_src(ctx, instr->src[0]);
2908          LLVMValueRef index = get_src(ctx, instr->src[1]);
2909          LLVMTypeRef type = LLVMTypeOf(src);
2910          struct waterfall_context wctx;
2911          LLVMValueRef index_val;
2912 
2913          index_val = enter_waterfall(ctx, &wctx, index, true);
2914 
2915          src = LLVMBuildZExt(ctx->ac.builder, src, ctx->ac.i32, "");
2916 
2917          result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.readlane", ctx->ac.i32,
2918                                      (LLVMValueRef[]){src, index_val}, 2, 0);
2919 
2920          result = LLVMBuildTrunc(ctx->ac.builder, result, type, "");
2921 
2922          result = exit_waterfall(ctx, &wctx, result);
2923       }
2924       break;
2925    case nir_intrinsic_reduce:
2926       result = ac_build_reduce(&ctx->ac, get_src(ctx, instr->src[0]), instr->const_index[0],
2927                                instr->const_index[1]);
2928       break;
2929    case nir_intrinsic_inclusive_scan:
2930       result =
2931          ac_build_inclusive_scan(&ctx->ac, get_src(ctx, instr->src[0]), instr->const_index[0]);
2932       break;
2933    case nir_intrinsic_exclusive_scan:
2934       result =
2935          ac_build_exclusive_scan(&ctx->ac, get_src(ctx, instr->src[0]), instr->const_index[0]);
2936       break;
2937    case nir_intrinsic_quad_broadcast: {
2938       unsigned lane = nir_src_as_uint(instr->src[1]);
2939       result = ac_build_quad_swizzle(&ctx->ac, get_src(ctx, instr->src[0]), lane, lane, lane, lane, false);
2940       if (ctx->info->stage == MESA_SHADER_FRAGMENT)
2941          result = ac_build_wqm(&ctx->ac, result);
2942       break;
2943    }
2944    case nir_intrinsic_quad_swap_horizontal:
2945       result = ac_build_quad_swizzle(&ctx->ac, get_src(ctx, instr->src[0]), 1, 0, 3, 2, false);
2946       if (ctx->info->stage == MESA_SHADER_FRAGMENT)
2947          result = ac_build_wqm(&ctx->ac, result);
2948       break;
2949    case nir_intrinsic_quad_swap_vertical:
2950       result = ac_build_quad_swizzle(&ctx->ac, get_src(ctx, instr->src[0]), 2, 3, 0, 1, false);
2951       if (ctx->info->stage == MESA_SHADER_FRAGMENT)
2952          result = ac_build_wqm(&ctx->ac, result);
2953       break;
2954    case nir_intrinsic_quad_swap_diagonal:
2955       result = ac_build_quad_swizzle(&ctx->ac, get_src(ctx, instr->src[0]), 3, 2, 1, 0, false);
2956       if (ctx->info->stage == MESA_SHADER_FRAGMENT)
2957          result = ac_build_wqm(&ctx->ac, result);
2958       break;
2959    case nir_intrinsic_quad_swizzle_amd: {
2960       uint32_t mask = nir_intrinsic_swizzle_mask(instr);
2961       result = ac_build_quad_swizzle(&ctx->ac, get_src(ctx, instr->src[0]), mask & 0x3,
2962                                      (mask >> 2) & 0x3, (mask >> 4) & 0x3, (mask >> 6) & 0x3, false);
2963       if (ctx->info->stage == MESA_SHADER_FRAGMENT)
2964          result = ac_build_wqm(&ctx->ac, result);
2965       break;
2966    }
2967    case nir_intrinsic_masked_swizzle_amd: {
2968       uint32_t mask = nir_intrinsic_swizzle_mask(instr);
2969       result = ac_build_ds_swizzle(&ctx->ac, get_src(ctx, instr->src[0]), mask);
2970       break;
2971    }
2972    case nir_intrinsic_write_invocation_amd:
2973       result = ac_build_writelane(&ctx->ac, get_src(ctx, instr->src[0]),
2974                                   get_src(ctx, instr->src[1]), get_src(ctx, instr->src[2]));
2975       break;
2976    case nir_intrinsic_mbcnt_amd:
2977       result = ac_build_mbcnt_add(&ctx->ac, get_src(ctx, instr->src[0]), get_src(ctx, instr->src[1]));
2978       break;
2979    case nir_intrinsic_load_scratch: {
2980       LLVMValueRef offset = get_src(ctx, instr->src[0]);
2981       LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->scratch, offset);
2982       LLVMTypeRef comp_type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
2983       LLVMTypeRef vec_type = instr->def.num_components == 1
2984                                 ? comp_type
2985                                 : LLVMVectorType(comp_type, instr->def.num_components);
2986       result = LLVMBuildLoad2(ctx->ac.builder, vec_type, ptr, "");
2987       break;
2988    }
2989    case nir_intrinsic_store_scratch: {
2990       LLVMValueRef offset = get_src(ctx, instr->src[1]);
2991       LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->scratch, offset);
2992       LLVMTypeRef comp_type = LLVMIntTypeInContext(ctx->ac.context, instr->src[0].ssa->bit_size);
2993       LLVMValueRef src = get_src(ctx, instr->src[0]);
2994       unsigned wrmask = nir_intrinsic_write_mask(instr);
2995       while (wrmask) {
2996          int start, count;
2997          u_bit_scan_consecutive_range(&wrmask, &start, &count);
2998 
2999          LLVMValueRef offset = LLVMConstInt(ctx->ac.i32, start, false);
3000          LLVMValueRef offset_ptr = LLVMBuildGEP2(ctx->ac.builder, comp_type, ptr, &offset, 1, "");
3001          LLVMValueRef offset_src = ac_extract_components(&ctx->ac, src, start, count);
3002          LLVMBuildStore(ctx->ac.builder, offset_src, offset_ptr);
3003       }
3004       break;
3005    }
3006    case nir_intrinsic_load_constant: {
3007       unsigned base = nir_intrinsic_base(instr);
3008       unsigned range = nir_intrinsic_range(instr);
3009 
3010       LLVMValueRef offset = get_src(ctx, instr->src[0]);
3011       offset = LLVMBuildAdd(ctx->ac.builder, offset, LLVMConstInt(ctx->ac.i32, base, false), "");
3012 
3013       /* Clamp the offset to avoid out-of-bound access because global
3014        * instructions can't handle them.
3015        */
3016       LLVMValueRef size = LLVMConstInt(ctx->ac.i32, base + range, false);
3017       LLVMValueRef cond = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT, offset, size, "");
3018       offset = LLVMBuildSelect(ctx->ac.builder, cond, offset, size, "");
3019 
3020       LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->constant_data, offset);
3021 
3022       /* TODO: LLVM doesn't sign-extend the result of s_getpc_b64 correctly, causing hangs.
3023        * Do it manually here.
3024        */
3025       if (ctx->ac.gfx_level == GFX12) {
3026          LLVMValueRef addr = LLVMBuildPtrToInt(ctx->ac.builder, ptr, ctx->ac.i64, "");
3027          addr = LLVMBuildOr(ctx->ac.builder, addr,
3028                             LLVMConstInt(ctx->ac.i64, 0xffff000000000000ull, 0), "");
3029          ptr = LLVMBuildIntToPtr(ctx->ac.builder, addr, LLVMTypeOf(ptr), "");
3030       }
3031 
3032       LLVMTypeRef comp_type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
3033       LLVMTypeRef vec_type = instr->def.num_components == 1
3034                                 ? comp_type
3035                                 : LLVMVectorType(comp_type, instr->def.num_components);
3036       result = LLVMBuildLoad2(ctx->ac.builder, vec_type, ptr, "");
3037       break;
3038    }
3039    case nir_intrinsic_set_vertex_and_primitive_count:
3040       /* Currently ignored. */
3041       break;
3042    case nir_intrinsic_load_typed_buffer_amd:
3043    case nir_intrinsic_load_buffer_amd:
3044    case nir_intrinsic_store_buffer_amd: {
3045       unsigned src_base = instr->intrinsic == nir_intrinsic_store_buffer_amd ? 1 : 0;
3046       bool idxen = !nir_src_is_const(instr->src[src_base + 3]) ||
3047                    nir_src_as_uint(instr->src[src_base + 3]);
3048 
3049       LLVMValueRef store_data = get_src(ctx, instr->src[0]);
3050       LLVMValueRef descriptor = get_src(ctx, instr->src[src_base + 0]);
3051       LLVMValueRef addr_voffset = get_src(ctx, instr->src[src_base + 1]);
3052       LLVMValueRef addr_soffset = get_src(ctx, instr->src[src_base + 2]);
3053       LLVMValueRef vidx = idxen ? get_src(ctx, instr->src[src_base + 3]) : NULL;
3054       unsigned num_components = instr->def.num_components;
3055       unsigned const_offset = nir_intrinsic_base(instr);
3056       bool reorder = nir_intrinsic_can_reorder(instr);
3057       enum gl_access_qualifier access = ac_nir_get_mem_access_flags(instr);
3058       bool uses_format = access & ACCESS_USES_FORMAT_AMD;
3059 
3060       LLVMValueRef voffset = LLVMBuildAdd(ctx->ac.builder, addr_voffset,
3061                                           LLVMConstInt(ctx->ac.i32, const_offset, 0), "");
3062 
3063       if (instr->intrinsic == nir_intrinsic_load_buffer_amd && uses_format) {
3064          assert(instr->def.bit_size == 16 || instr->def.bit_size == 32);
3065          result = ac_build_buffer_load_format(&ctx->ac, descriptor, vidx, voffset, num_components,
3066                                               access, reorder,
3067                                               instr->def.bit_size == 16, false);
3068          result = ac_to_integer(&ctx->ac, result);
3069       } else if (instr->intrinsic == nir_intrinsic_store_buffer_amd && uses_format) {
3070          assert(instr->src[0].ssa->bit_size == 16 || instr->src[0].ssa->bit_size == 32);
3071          ac_build_buffer_store_format(&ctx->ac, descriptor, store_data, vidx, voffset, access);
3072       } else if (instr->intrinsic == nir_intrinsic_load_buffer_amd ||
3073                  instr->intrinsic == nir_intrinsic_load_typed_buffer_amd) {
3074          /* LLVM is unable to select instructions for larger than 32-bit channel types.
3075           * Workaround by using i32 and casting to the correct type later.
3076           */
3077          const unsigned fetch_num_components =
3078             num_components * MAX2(32, instr->def.bit_size) / 32;
3079 
3080          LLVMTypeRef channel_type =
3081             LLVMIntTypeInContext(ctx->ac.context, MIN2(32, instr->def.bit_size));
3082 
3083          if (instr->intrinsic == nir_intrinsic_load_buffer_amd) {
3084             result = ac_build_buffer_load(&ctx->ac, descriptor, fetch_num_components, vidx, voffset,
3085                                           addr_soffset, channel_type, access, reorder, false);
3086          } else {
3087             const unsigned align_offset = nir_intrinsic_align_offset(instr);
3088             const unsigned align_mul = nir_intrinsic_align_mul(instr);
3089             const enum pipe_format format = nir_intrinsic_format(instr);
3090 
3091             result =
3092                ac_build_safe_tbuffer_load(&ctx->ac, descriptor, vidx, addr_voffset, addr_soffset,
3093                                           format, MIN2(32, instr->def.bit_size), const_offset, align_offset,
3094                                           align_mul, fetch_num_components, access, reorder);
3095          }
3096 
3097          /* Trim to needed vector components. */
3098          result = ac_trim_vector(&ctx->ac, result, fetch_num_components);
3099 
3100          /* Cast to larger than 32-bit sized components if needed. */
3101          if (instr->def.bit_size > 32) {
3102             LLVMTypeRef cast_channel_type =
3103                LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
3104             LLVMTypeRef cast_type =
3105                num_components == 1 ? cast_channel_type :
3106                LLVMVectorType(cast_channel_type, num_components);
3107             result = LLVMBuildBitCast(ctx->ac.builder, result, cast_type, "");
3108          }
3109 
3110          /* Cast the result to an integer (or vector of integers). */
3111          result = ac_to_integer(&ctx->ac, result);
3112       } else {
3113          unsigned writemask = nir_intrinsic_write_mask(instr);
3114          while (writemask) {
3115             int start, count;
3116             u_bit_scan_consecutive_range(&writemask, &start, &count);
3117 
3118             LLVMValueRef voffset = LLVMBuildAdd(
3119                ctx->ac.builder, addr_voffset,
3120                LLVMConstInt(ctx->ac.i32, const_offset + start * 4, 0), "");
3121 
3122             LLVMValueRef data = extract_vector_range(&ctx->ac, store_data, start, count);
3123             ac_build_buffer_store_dword(&ctx->ac, descriptor, data, vidx, voffset, addr_soffset,
3124                                         access);
3125          }
3126       }
3127       break;
3128    }
3129    case nir_intrinsic_is_subgroup_invocation_lt_amd: {
3130       unsigned offset = nir_intrinsic_base(instr);
3131       LLVMValueRef count = get_src(ctx, instr->src[0]);
3132       if (offset)
3133          count = LLVMBuildLShr(ctx->ac.builder, count, LLVMConstInt(ctx->ac.i32, offset, 0), "");
3134       count = LLVMBuildAnd(ctx->ac.builder, count, LLVMConstInt(ctx->ac.i32, 0xff, 0), "");
3135       result = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), count, "");
3136       break;
3137    }
3138    case nir_intrinsic_gds_atomic_add_amd: {
3139       LLVMValueRef store_val = get_src(ctx, instr->src[0]);
3140       LLVMValueRef addr = get_src(ctx, instr->src[1]);
3141       LLVMTypeRef gds_ptr_type = LLVMPointerType(ctx->ac.i32, AC_ADDR_SPACE_GDS);
3142       LLVMValueRef gds_base = LLVMBuildIntToPtr(ctx->ac.builder, addr, gds_ptr_type, "");
3143       ac_build_atomic_rmw(&ctx->ac, LLVMAtomicRMWBinOpAdd, gds_base, store_val, "workgroup-one-as");
3144       break;
3145    }
3146    case nir_intrinsic_elect:
3147       result = LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ, visit_first_invocation(ctx),
3148                              ac_get_thread_id(&ctx->ac), "");
3149       break;
3150    case nir_intrinsic_lane_permute_16_amd:
3151       result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.permlane16", ctx->ac.i32,
3152                                   (LLVMValueRef[]){get_src(ctx, instr->src[0]),
3153                                                    get_src(ctx, instr->src[0]),
3154                                                    get_src(ctx, instr->src[1]),
3155                                                    get_src(ctx, instr->src[2]),
3156                                                    ctx->ac.i1false,
3157                                                    ctx->ac.i1false}, 6, 0);
3158       break;
3159    case nir_intrinsic_load_scalar_arg_amd:
3160    case nir_intrinsic_load_vector_arg_amd: {
3161       assert(nir_intrinsic_base(instr) < AC_MAX_ARGS);
3162       struct ac_arg arg;
3163       arg.arg_index = nir_intrinsic_base(instr);
3164       arg.used = true;
3165       result = ac_to_integer(&ctx->ac, ac_get_arg(&ctx->ac, arg));
3166       if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(result)) != 32)
3167          result = LLVMBuildBitCast(ctx->ac.builder, result, get_def_type(ctx, &instr->def), "");
3168       break;
3169    }
3170    case nir_intrinsic_load_smem_amd: {
3171       LLVMValueRef base = get_src(ctx, instr->src[0]);
3172       LLVMValueRef offset = get_src(ctx, instr->src[1]);
3173 
3174       bool is_addr_32bit = nir_src_bit_size(instr->src[0]) == 32;
3175       int addr_space = is_addr_32bit ? AC_ADDR_SPACE_CONST_32BIT : AC_ADDR_SPACE_CONST;
3176 
3177       LLVMTypeRef result_type = get_def_type(ctx, &instr->def);
3178       LLVMTypeRef byte_ptr_type = LLVMPointerType(ctx->ac.i8, addr_space);
3179 
3180       LLVMValueRef addr = LLVMBuildIntToPtr(ctx->ac.builder, base, byte_ptr_type, "");
3181       /* see ac_build_load_custom() for 32bit/64bit addr GEP difference */
3182       addr = is_addr_32bit ?
3183          LLVMBuildInBoundsGEP2(ctx->ac.builder, ctx->ac.i8, addr, &offset, 1, "") :
3184          LLVMBuildGEP2(ctx->ac.builder, ctx->ac.i8, addr, &offset, 1, "");
3185 
3186       LLVMSetMetadata(addr, ctx->ac.uniform_md_kind, ctx->ac.empty_md);
3187       result = LLVMBuildLoad2(ctx->ac.builder, result_type, addr, "");
3188       LLVMSetMetadata(result, ctx->ac.invariant_load_md_kind, ctx->ac.empty_md);
3189       break;
3190    }
3191    case nir_intrinsic_ordered_xfb_counter_add_gfx11_amd: {
3192       /* Gfx11 GDS instructions only operate on the first active lane. All other lanes are
3193        * ignored. So are their EXEC bits. This uses the mutex feature of ds_ordered_count
3194        * to emulate a multi-dword atomic.
3195        *
3196        * This is the expected code:
3197        *    ds_ordered_count release=0 done=0   // lock mutex
3198        *    ds_add_gs_reg_rtn GDS_STRMOUT_DWORDS_WRITTEN_0
3199        *    ds_add_gs_reg_rtn GDS_STRMOUT_DWORDS_WRITTEN_1
3200        *    ds_add_gs_reg_rtn GDS_STRMOUT_DWORDS_WRITTEN_2
3201        *    ds_add_gs_reg_rtn GDS_STRMOUT_DWORDS_WRITTEN_3
3202        *    ds_ordered_count release=1 done=1   // unlock mutex
3203        *
3204        * GDS_STRMOUT_DWORDS_WRITTEN_n are just general-purpose global registers. We use them
3205        * because MCBP (mid-command-buffer preemption) saves and restores them, and it doesn't
3206        * save and restore GDS memory.
3207        */
3208       LLVMValueRef args[8] = {
3209          LLVMBuildIntToPtr(ctx->ac.builder, get_src(ctx, instr->src[0]),
3210                            LLVMPointerType(ctx->ac.i32, AC_ADDR_SPACE_GDS), ""),
3211          ctx->ac.i32_0,                             /* value to add */
3212          ctx->ac.i32_0,                             /* ordering */
3213          ctx->ac.i32_0,                             /* scope */
3214          ctx->ac.i1false,                           /* isVolatile */
3215          LLVMConstInt(ctx->ac.i32, 1 << 24, false), /* OA index, bits 24+: lane count */
3216          ctx->ac.i1false,                           /* wave release */
3217          ctx->ac.i1false,                           /* wave done */
3218       };
3219 
3220       /* Set release=0 to start a GDS mutex. Set done=0 because it's not the last one. */
3221       ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.ds.ordered.add", ctx->ac.i32,
3222                          args, ARRAY_SIZE(args), 0);
3223       ac_build_waitcnt(&ctx->ac, AC_WAIT_DS);
3224 
3225       LLVMValueRef global_count[4];
3226       LLVMValueRef count_vec = get_src(ctx, instr->src[1]);
3227       unsigned write_mask = nir_intrinsic_write_mask(instr);
3228       for (unsigned i = 0; i < instr->num_components; i++) {
3229          LLVMValueRef value =
3230             LLVMBuildExtractElement(ctx->ac.builder, count_vec,
3231                                     LLVMConstInt(ctx->ac.i32, i, false), "");
3232          if (write_mask & (1 << i)) {
3233             /* The offset is a relative offset from GDS_STRMOUT_DWORDS_WRITTEN_0. */
3234             global_count[i] =
3235                ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.ds.add.gs.reg.rtn.i32", ctx->ac.i32,
3236                                   (LLVMValueRef[]){value, LLVMConstInt(ctx->ac.i32, i * 4, 0)},
3237                                   2, 0);
3238          } else {
3239             global_count[i] = LLVMGetUndef(ctx->ac.i32);
3240          }
3241       }
3242 
3243       ac_build_waitcnt(&ctx->ac, AC_WAIT_DS);
3244 
3245       /* Set release=1 to end a GDS mutex. Set done=1 because it's the last one. */
3246       args[6] = args[7] = ctx->ac.i1true;
3247       ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.ds.ordered.add", ctx->ac.i32,
3248                          args, ARRAY_SIZE(args), 0);
3249       result = ac_build_gather_values(&ctx->ac, global_count, instr->num_components);
3250       break;
3251    }
3252    case nir_intrinsic_xfb_counter_sub_gfx11_amd: {
3253       /* must be called in a single lane of a workgroup. */
3254       LLVMValueRef sub_vec = get_src(ctx, instr->src[0]);
3255       unsigned write_mask = nir_intrinsic_write_mask(instr);
3256 
3257       for (unsigned i = 0; i < instr->num_components; i++) {
3258          if (write_mask & (1 << i)) {
3259             LLVMValueRef value =
3260                LLVMBuildExtractElement(ctx->ac.builder, sub_vec,
3261                                        LLVMConstInt(ctx->ac.i32, i, false), "");
3262             /* The offset is a relative offset from GDS_STRMOUT_DWORDS_WRITTEN_0. */
3263             ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.ds.sub.gs.reg.rtn.i32", ctx->ac.i32,
3264                                (LLVMValueRef[]){value, LLVMConstInt(ctx->ac.i32, i * 4, 0)},
3265                                2, 0);
3266          }
3267       }
3268       break;
3269    }
3270    case nir_intrinsic_ordered_add_loop_gfx12_amd: {
3271       const unsigned num_atomics = 6;
3272       char code[2048];
3273       char *ptr = code;
3274 
3275       /* Assembly outputs:
3276        *    i32 VGPR $0 = previous value in memory
3277        *
3278        * Assembly inputs:
3279        *    EXEC = one lane per counter (use nir_push_if, streamout should always enable 4 lanes)
3280        *    i64 SGPR $1 = atomic base address
3281        *    i32 VGPR $2 = 32-bit VGPR voffset (streamout should set local_invocation_index * 8)
3282        *    i32 SGPR $3 = orderedID
3283        *    i64 VGPR $4 = 64-bit VGPR atomic src (streamout should set {orderedID, numDwords})
3284        */
3285 
3286       /* Issue (num_atomics - 1) atomics to initialize the results.
3287        * There are no s_sleeps here because the atomics must be pipelined.
3288        */
3289       for (int i = 0; i < num_atomics - 1; i++) {
3290          /* global_atomic_ordered_add_b64 dst, offset, data, address */
3291          ptr += sprintf(ptr,
3292                         "global_atomic_ordered_add_b64 v[%u:%u], $2, $4, $1 th:TH_ATOMIC_RETURN\n"
3293                         "s_nop 15\n"
3294                         "s_nop 7\n",
3295                         3 + i * 2,
3296                         3 + i * 2 + 1);
3297       }
3298 
3299       /* This is an infinite while loop with breaks. The loop body executes "num_atomics"
3300        * atomics and the same number of conditional breaks.
3301        *
3302        * It's pipelined such that we only wait for the oldest atomic, so there is always
3303        * "num_atomics" atomics in flight while the shader is waiting.
3304        */
3305       unsigned inst_block_size = 3 + 1 + 3; /* size of the next sprintf in dwords */
3306 
3307       for (unsigned i = 0; i < num_atomics; i++) {
3308          unsigned issue_index = (num_atomics - 1 + i) % num_atomics;
3309          unsigned read_index = i;
3310 
3311          ptr += sprintf(ptr,
3312                         /* Issue (or repeat) the attempt. */
3313                         "global_atomic_ordered_add_b64 v[%u:%u], $2, $4, $1 th:TH_ATOMIC_RETURN\n"
3314                         "s_wait_loadcnt 0x%x\n"
3315                         /* if (result[check_index].ordered_id == ordered_id) {
3316                          *    return_value = result[check_index].value;
3317                          *    break;
3318                          * }
3319                          */
3320                         "v_cmp_eq_u32 %s, $3, v%u\n"
3321                         "v_mov_b32 $0, v%u\n"
3322                         "s_cbranch_vccnz 0x%x\n",
3323                         3 + issue_index * 2,
3324                         3 + issue_index * 2 + 1,
3325                         num_atomics - 1, /* wait count */
3326                         ctx->ac.wave_size == 32 ? "vcc_lo" : "vcc",
3327                         3 + read_index * 2, /* v_cmp_eq: src1 */
3328                         3 + read_index * 2 + 1, /* output */
3329                         inst_block_size * (num_atomics - i - 1) + 1); /* forward s_cbranch as loop break */
3330       }
3331 
3332       /* Jump to the beginning of the loop. */
3333       ptr += sprintf(ptr,
3334                      "s_branch 0x%x\n"
3335                      "s_wait_alu 0xfffe\n"
3336                      "s_wait_loadcnt 0x0\n",
3337                      (inst_block_size * -(int)num_atomics - 1) & 0xffff);
3338 
3339       LLVMTypeRef param_types[] = {ctx->ac.i64, ctx->ac.i32, ctx->ac.i32, ctx->ac.i64};
3340       LLVMTypeRef calltype = LLVMFunctionType(ctx->ac.i32, param_types, 4, false);
3341 
3342       /* =v means a VGPR output, =& means the dst register must be different from src registers,
3343        * s means an SGPR input, v means a VGPR input, ~{reg} means that the register is clobbered
3344        *
3345        * We need to list the registers manually because the clobber constraint doesn't prevent
3346        * input and output registers from being assigned the same registers as the ones that are
3347        * clobbered.
3348        *
3349        * Since registers in the clobber constraints are ignored by LLVM during computation of
3350        * register usage, we have to set the input register to the highest used register because
3351        * that one is included in the register usage computation.
3352        */
3353       char constraint[128];
3354       snprintf(constraint, sizeof(constraint), "=&{v0},{s[8:9]},{v%u},{s12},{v[1:2]},~{%s},~{v[3:%u]}",
3355                3 + num_atomics * 2,
3356                ctx->ac.wave_size == 32 ? "vcc_lo" : "vcc",
3357                3 + num_atomics * 2 - 1);
3358 
3359       LLVMValueRef inlineasm = LLVMConstInlineAsm(calltype, code, constraint, true, false);
3360 
3361       LLVMValueRef args[] = {
3362          get_src(ctx, instr->src[0]),
3363          get_src(ctx, instr->src[1]),
3364          get_src(ctx, instr->src[2]),
3365          get_src(ctx, instr->src[3]),
3366       };
3367       result = LLVMBuildCall2(ctx->ac.builder, calltype, inlineasm, args, 4, "");
3368 
3369       assert(ptr < code + sizeof(code));
3370       break;
3371    }
3372    case nir_intrinsic_export_amd: {
3373       unsigned flags = nir_intrinsic_flags(instr);
3374       unsigned target = nir_intrinsic_base(instr);
3375       unsigned write_mask = nir_intrinsic_write_mask(instr);
3376 
3377       struct ac_export_args args = {
3378          .target = target,
3379          .enabled_channels = write_mask,
3380          .compr = flags & AC_EXP_FLAG_COMPRESSED,
3381          .done = flags & AC_EXP_FLAG_DONE,
3382          .valid_mask = flags & AC_EXP_FLAG_VALID_MASK,
3383       };
3384 
3385       LLVMValueRef value = get_src(ctx, instr->src[0]);
3386       int num_components = ac_get_llvm_num_components(value);
3387       for (int i = 0; i < num_components; i++)
3388          args.out[i] = ac_llvm_extract_elem(&ctx->ac, value, i);
3389 
3390       ac_build_export(&ctx->ac, &args);
3391       break;
3392    }
3393    case nir_intrinsic_bvh64_intersect_ray_amd: {
3394       LLVMValueRef desc = get_src(ctx, instr->src[0]);
3395       LLVMValueRef node_id =
3396          LLVMBuildBitCast(ctx->ac.builder, get_src(ctx, instr->src[1]), ctx->ac.i64, "");
3397       LLVMValueRef t_max =
3398          LLVMBuildBitCast(ctx->ac.builder, get_src(ctx, instr->src[2]), ctx->ac.f32, "");
3399       LLVMValueRef origin =
3400          LLVMBuildBitCast(ctx->ac.builder, get_src(ctx, instr->src[3]), ctx->ac.v3f32, "");
3401       LLVMValueRef dir =
3402          LLVMBuildBitCast(ctx->ac.builder, get_src(ctx, instr->src[4]), ctx->ac.v3f32, "");
3403       LLVMValueRef inv_dir =
3404          LLVMBuildBitCast(ctx->ac.builder, get_src(ctx, instr->src[5]), ctx->ac.v3f32, "");
3405 
3406       LLVMValueRef args[6] = {
3407          node_id, t_max, origin, dir, inv_dir, desc,
3408       };
3409 
3410       result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.image.bvh.intersect.ray.i64.v3f32",
3411                                   ctx->ac.v4i32, args, ARRAY_SIZE(args), 0);
3412       break;
3413    }
3414    case nir_intrinsic_sleep_amd: {
3415       LLVMValueRef arg = LLVMConstInt(ctx->ac.i32, nir_intrinsic_base(instr), 0);
3416       ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.s.sleep", ctx->ac.voidt, &arg, 1, 0);
3417       break;
3418    }
3419    case nir_intrinsic_nop_amd: {
3420       LLVMValueRef arg = LLVMConstInt(ctx->ac.i16, nir_intrinsic_base(instr), 0);
3421       ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.s.nop", ctx->ac.voidt, &arg, 1, 0);
3422       break;
3423    }
3424    default:
3425       fprintf(stderr, "Unknown intrinsic: ");
3426       nir_print_instr(&instr->instr, stderr);
3427       fprintf(stderr, "\n");
3428       return false;
3429    }
3430    if (result) {
3431       ctx->ssa_defs[instr->def.index] = result;
3432    }
3433    return true;
3434 }
3435 
3436 /* Disable anisotropic filtering if BASE_LEVEL == LAST_LEVEL.
3437  *
3438  * GFX6-GFX7:
3439  *   If BASE_LEVEL == LAST_LEVEL, the shader must disable anisotropic
3440  *   filtering manually. The driver sets img7 to a mask clearing
3441  *   MAX_ANISO_RATIO if BASE_LEVEL == LAST_LEVEL. The shader must do:
3442  *     s_and_b32 samp0, samp0, img7
3443  *
3444  * GFX8:
3445  *   The ANISO_OVERRIDE sampler field enables this fix in TA.
3446  */
sici_fix_sampler_aniso(struct ac_nir_context * ctx,LLVMValueRef res,LLVMValueRef samp)3447 static LLVMValueRef sici_fix_sampler_aniso(struct ac_nir_context *ctx, LLVMValueRef res,
3448                                            LLVMValueRef samp)
3449 {
3450    LLVMBuilderRef builder = ctx->ac.builder;
3451    LLVMValueRef img7, samp0;
3452 
3453    if (ctx->ac.gfx_level >= GFX8)
3454       return samp;
3455 
3456    img7 = LLVMBuildExtractElement(builder, res, LLVMConstInt(ctx->ac.i32, 7, 0), "");
3457    samp0 = LLVMBuildExtractElement(builder, samp, ctx->ac.i32_0, "");
3458    samp0 = LLVMBuildAnd(builder, samp0, img7, "");
3459    return LLVMBuildInsertElement(builder, samp, samp0, ctx->ac.i32_0, "");
3460 }
3461 
tex_fetch_ptrs(struct ac_nir_context * ctx,nir_tex_instr * instr,struct waterfall_context * wctx,LLVMValueRef * res_ptr,LLVMValueRef * samp_ptr)3462 static void tex_fetch_ptrs(struct ac_nir_context *ctx, nir_tex_instr *instr,
3463                            struct waterfall_context *wctx, LLVMValueRef *res_ptr,
3464                            LLVMValueRef *samp_ptr)
3465 {
3466    LLVMValueRef texture_dynamic_handle = NULL;
3467    LLVMValueRef sampler_dynamic_handle = NULL;
3468    int plane = -1;
3469 
3470    *res_ptr = NULL;
3471    *samp_ptr = NULL;
3472    for (unsigned i = 0; i < instr->num_srcs; i++) {
3473       switch (instr->src[i].src_type) {
3474       case nir_tex_src_texture_handle:
3475       case nir_tex_src_sampler_handle: {
3476          LLVMValueRef val = get_src(ctx, instr->src[i].src);
3477          if (LLVMGetTypeKind(LLVMTypeOf(val)) == LLVMVectorTypeKind) {
3478             if (instr->src[i].src_type == nir_tex_src_texture_handle)
3479                *res_ptr = val;
3480             else
3481                *samp_ptr = val;
3482          } else {
3483             if (instr->src[i].src_type == nir_tex_src_texture_handle)
3484                texture_dynamic_handle = val;
3485             else
3486                sampler_dynamic_handle = val;
3487          }
3488          break;
3489       }
3490       case nir_tex_src_plane:
3491          plane = nir_src_as_int(instr->src[i].src);
3492          break;
3493       default:
3494          break;
3495       }
3496    }
3497 
3498    enum ac_descriptor_type main_descriptor =
3499       instr->sampler_dim == GLSL_SAMPLER_DIM_BUF ? AC_DESC_BUFFER : AC_DESC_IMAGE;
3500 
3501    if (plane >= 0) {
3502       assert(instr->op != nir_texop_txf_ms);
3503       assert(instr->sampler_dim != GLSL_SAMPLER_DIM_BUF);
3504 
3505       main_descriptor = AC_DESC_PLANE_0 + plane;
3506    }
3507 
3508    if (instr->op == nir_texop_fragment_mask_fetch_amd) {
3509       /* The fragment mask is fetched from the compressed
3510        * multisampled surface.
3511        */
3512       assert(ctx->ac.gfx_level < GFX11);
3513       main_descriptor = AC_DESC_FMASK;
3514    }
3515 
3516    /* descriptor handles given through nir_tex_src_{texture,sampler}_handle */
3517    if (instr->texture_non_uniform)
3518       texture_dynamic_handle = enter_waterfall(ctx, &wctx[0], texture_dynamic_handle, true);
3519 
3520    if (instr->sampler_non_uniform)
3521       sampler_dynamic_handle = enter_waterfall(ctx, &wctx[1], sampler_dynamic_handle, true);
3522 
3523    if (texture_dynamic_handle)
3524       *res_ptr = ctx->abi->load_sampler_desc(ctx->abi, texture_dynamic_handle, main_descriptor);
3525 
3526    if (sampler_dynamic_handle) {
3527       *samp_ptr = ctx->abi->load_sampler_desc(ctx->abi, sampler_dynamic_handle, AC_DESC_SAMPLER);
3528 
3529       if (ctx->abi->disable_aniso_single_level && instr->sampler_dim < GLSL_SAMPLER_DIM_RECT)
3530          *samp_ptr = sici_fix_sampler_aniso(ctx, *res_ptr, *samp_ptr);
3531    }
3532 }
3533 
visit_tex(struct ac_nir_context * ctx,nir_tex_instr * instr)3534 static void visit_tex(struct ac_nir_context *ctx, nir_tex_instr *instr)
3535 {
3536    LLVMValueRef result = NULL;
3537    struct ac_image_args args = {0};
3538    LLVMValueRef sample_index = NULL;
3539    LLVMValueRef ddx = NULL, ddy = NULL;
3540    struct waterfall_context wctx[2] = {{{0}}};
3541 
3542    tex_fetch_ptrs(ctx, instr, wctx, &args.resource, &args.sampler);
3543 
3544    for (unsigned i = 0; i < instr->num_srcs; i++) {
3545       switch (instr->src[i].src_type) {
3546       case nir_tex_src_coord: {
3547          LLVMValueRef coord = get_src(ctx, instr->src[i].src);
3548          args.a16 = instr->src[i].src.ssa->bit_size == 16;
3549          for (unsigned chan = 0; chan < instr->coord_components; ++chan)
3550             args.coords[chan] = ac_llvm_extract_elem(&ctx->ac, coord, chan);
3551          break;
3552       }
3553       case nir_tex_src_projector:
3554          break;
3555       case nir_tex_src_comparator:
3556          if (instr->is_shadow) {
3557             args.compare = get_src(ctx, instr->src[i].src);
3558             args.compare = ac_to_float(&ctx->ac, args.compare);
3559             assert(instr->src[i].src.ssa->bit_size == 32);
3560          }
3561          break;
3562       case nir_tex_src_offset:
3563          args.offset = get_src(ctx, instr->src[i].src);
3564          /* We pack it with bit shifts, so we need it to be 32-bit. */
3565          assert(ac_get_elem_bits(&ctx->ac, LLVMTypeOf(args.offset)) == 32);
3566          break;
3567       case nir_tex_src_bias:
3568          args.bias = get_src(ctx, instr->src[i].src);
3569          break;
3570       case nir_tex_src_lod:
3571          if (nir_src_is_const(instr->src[i].src) && nir_src_as_uint(instr->src[i].src) == 0)
3572             args.level_zero = true;
3573          else
3574             args.lod = get_src(ctx, instr->src[i].src);
3575          break;
3576       case nir_tex_src_ms_index:
3577          sample_index = get_src(ctx, instr->src[i].src);
3578          break;
3579       case nir_tex_src_ddx:
3580          ddx = get_src(ctx, instr->src[i].src);
3581          args.g16 = instr->src[i].src.ssa->bit_size == 16;
3582          break;
3583       case nir_tex_src_ddy:
3584          ddy = get_src(ctx, instr->src[i].src);
3585          assert(LLVMTypeOf(ddy) == LLVMTypeOf(ddx));
3586          break;
3587       case nir_tex_src_min_lod:
3588          args.min_lod = get_src(ctx, instr->src[i].src);
3589          break;
3590       case nir_tex_src_texture_offset:
3591       case nir_tex_src_sampler_offset:
3592       case nir_tex_src_plane:
3593       default:
3594          break;
3595       }
3596    }
3597 
3598    if (args.offset) {
3599       /* offset for txf has been lowered in nir. */
3600       assert(instr->op != nir_texop_txf);
3601 
3602       LLVMValueRef offset[3], pack;
3603       for (unsigned chan = 0; chan < 3; ++chan)
3604          offset[chan] = ctx->ac.i32_0;
3605 
3606       unsigned num_components = ac_get_llvm_num_components(args.offset);
3607       for (unsigned chan = 0; chan < num_components; chan++) {
3608          offset[chan] = ac_llvm_extract_elem(&ctx->ac, args.offset, chan);
3609          offset[chan] =
3610             LLVMBuildAnd(ctx->ac.builder, offset[chan], LLVMConstInt(ctx->ac.i32, 0x3f, false), "");
3611          if (chan)
3612             offset[chan] = LLVMBuildShl(ctx->ac.builder, offset[chan],
3613                                         LLVMConstInt(ctx->ac.i32, chan * 8, false), "");
3614       }
3615       pack = LLVMBuildOr(ctx->ac.builder, offset[0], offset[1], "");
3616       pack = LLVMBuildOr(ctx->ac.builder, pack, offset[2], "");
3617       args.offset = pack;
3618    }
3619 
3620    /* Section 8.23.1 (Depth Texture Comparison Mode) of the
3621     * OpenGL 4.5 spec says:
3622     *
3623     *    "If the texture’s internal format indicates a fixed-point
3624     *     depth texture, then D_t and D_ref are clamped to the
3625     *     range [0, 1]; otherwise no clamping is performed."
3626     *
3627     * TC-compatible HTILE promotes Z16 and Z24 to Z32_FLOAT,
3628     * so the depth comparison value isn't clamped for Z16 and
3629     * Z24 anymore. Do it manually here for GFX8-9; GFX10 has
3630     * an explicitly clamped 32-bit float format.
3631     */
3632    if (args.compare && ctx->ac.gfx_level >= GFX8 && ctx->ac.gfx_level <= GFX9 &&
3633        ctx->abi->clamp_shadow_reference) {
3634       LLVMValueRef upgraded, clamped;
3635 
3636       upgraded = LLVMBuildExtractElement(ctx->ac.builder, args.sampler,
3637                                          LLVMConstInt(ctx->ac.i32, 3, false), "");
3638       upgraded = LLVMBuildLShr(ctx->ac.builder, upgraded, LLVMConstInt(ctx->ac.i32, 29, false), "");
3639       upgraded = LLVMBuildTrunc(ctx->ac.builder, upgraded, ctx->ac.i1, "");
3640       clamped = ac_build_clamp(&ctx->ac, args.compare);
3641       args.compare = LLVMBuildSelect(ctx->ac.builder, upgraded, clamped, args.compare, "");
3642    }
3643 
3644    /* pack derivatives */
3645    if (ddx || ddy) {
3646       int num_deriv_channels;
3647       switch (instr->sampler_dim) {
3648       case GLSL_SAMPLER_DIM_3D:
3649          num_deriv_channels = 3;
3650          break;
3651       case GLSL_SAMPLER_DIM_2D:
3652       case GLSL_SAMPLER_DIM_CUBE:
3653       default:
3654          num_deriv_channels = 2;
3655          break;
3656       case GLSL_SAMPLER_DIM_1D:
3657          num_deriv_channels = 1;
3658          break;
3659       }
3660 
3661       for (unsigned i = 0; i < num_deriv_channels; i++) {
3662          args.derivs[i] = ac_to_float(&ctx->ac, ac_llvm_extract_elem(&ctx->ac, ddx, i));
3663          args.derivs[num_deriv_channels + i] =
3664             ac_to_float(&ctx->ac, ac_llvm_extract_elem(&ctx->ac, ddy, i));
3665       }
3666    }
3667 
3668    /* Pack sample index */
3669    if (sample_index && (instr->op == nir_texop_txf_ms || instr->op == nir_texop_fragment_fetch_amd))
3670       args.coords[instr->coord_components] = sample_index;
3671 
3672    bool is_new_style_shadow = instr->is_shadow && instr->is_new_style_shadow &&
3673                               instr->op != nir_texop_lod && instr->op != nir_texop_tg4;
3674    unsigned num_components = util_last_bit(nir_def_components_read(&instr->def));
3675 
3676    /* DMASK was repurposed for GATHER4. 4 components are always
3677     * returned and DMASK works like a swizzle - it selects
3678     * the component to fetch. The only valid DMASK values are
3679     * 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns
3680     * (red,red,red,red) etc.) The ISA document doesn't mention
3681     * this.
3682     */
3683    if (instr->op == nir_texop_tg4) {
3684       if (instr->is_shadow)
3685          args.dmask = 1;
3686       else
3687          args.dmask = 1 << instr->component;
3688    } else if (is_new_style_shadow || instr->op == nir_texop_fragment_mask_fetch_amd) {
3689       args.dmask = 1;
3690    } else {
3691       args.dmask = BITFIELD_MASK(num_components);
3692    }
3693 
3694    if (instr->sampler_dim != GLSL_SAMPLER_DIM_BUF) {
3695       args.dim = ac_get_sampler_dim(ctx->ac.gfx_level, instr->sampler_dim, instr->is_array);
3696       args.unorm = instr->sampler_dim == GLSL_SAMPLER_DIM_RECT;
3697    }
3698 
3699    /* Adjust the number of coordinates because we only need (x,y) for 2D
3700     * multisampled images and (x,y,layer) for 2D multisampled layered
3701     * images or for multisampled input attachments.
3702     */
3703    if (instr->op == nir_texop_fragment_mask_fetch_amd) {
3704       if (args.dim == ac_image_2dmsaa) {
3705          args.dim = ac_image_2d;
3706       } else {
3707          assert(args.dim == ac_image_2darraymsaa);
3708          args.dim = ac_image_2darray;
3709       }
3710    }
3711 
3712    /* Set TRUNC_COORD=0 for textureGather(). */
3713    if (instr->op == nir_texop_tg4 && !ctx->ac.info->conformant_trunc_coord) {
3714       LLVMValueRef dword0 = LLVMBuildExtractElement(ctx->ac.builder, args.sampler, ctx->ac.i32_0, "");
3715       dword0 = LLVMBuildAnd(ctx->ac.builder, dword0, LLVMConstInt(ctx->ac.i32, C_008F30_TRUNC_COORD, 0), "");
3716       args.sampler = LLVMBuildInsertElement(ctx->ac.builder, args.sampler, dword0, ctx->ac.i32_0, "");
3717    }
3718 
3719    args.d16 = instr->def.bit_size == 16;
3720    args.tfe = instr->is_sparse;
3721 
3722    result = build_tex_intrinsic(ctx, instr, &args);
3723 
3724    LLVMValueRef code = NULL;
3725    if (instr->is_sparse) {
3726       unsigned num_color_components = num_components - 1;
3727       code = ac_llvm_extract_elem(&ctx->ac, result, num_color_components);
3728       result = ac_trim_vector(&ctx->ac, result, num_color_components);
3729    }
3730 
3731    if (is_new_style_shadow)
3732       result = LLVMBuildExtractElement(ctx->ac.builder, result, ctx->ac.i32_0, "");
3733    else if (instr->op == nir_texop_fragment_mask_fetch_amd) {
3734       /* Use 0x76543210 if the image doesn't have FMASK. */
3735       LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, args.resource, ctx->ac.v8i32, "");
3736       tmp = LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->ac.i32_1, "");
3737       tmp = LLVMBuildICmp(ctx->ac.builder, LLVMIntNE, tmp, ctx->ac.i32_0, "");
3738       result = LLVMBuildSelect(ctx->ac.builder, tmp,
3739                                LLVMBuildExtractElement(ctx->ac.builder, result, ctx->ac.i32_0, ""),
3740                                LLVMConstInt(ctx->ac.i32, 0x76543210, false), "");
3741    } else {
3742       unsigned num_color_components = num_components - (instr->is_sparse ? 1 : 0);
3743       result = ac_trim_vector(&ctx->ac, result, num_color_components);
3744    }
3745 
3746    if (instr->is_sparse)
3747       result = ac_build_concat(&ctx->ac, result, code);
3748 
3749    if (result) {
3750       result = ac_to_integer(&ctx->ac, result);
3751 
3752       for (int i = ARRAY_SIZE(wctx); --i >= 0;) {
3753          result = exit_waterfall(ctx, wctx + i, result);
3754       }
3755 
3756       ctx->ssa_defs[instr->def.index] = result;
3757    }
3758 }
3759 
visit_phi(struct ac_nir_context * ctx,nir_phi_instr * instr)3760 static void visit_phi(struct ac_nir_context *ctx, nir_phi_instr *instr)
3761 {
3762    LLVMTypeRef type = get_def_type(ctx, &instr->def);
3763    LLVMValueRef result = LLVMBuildPhi(ctx->ac.builder, type, "");
3764 
3765    ctx->ssa_defs[instr->def.index] = result;
3766    _mesa_hash_table_insert(ctx->phis, instr, result);
3767 }
3768 
visit_post_phi(struct ac_nir_context * ctx,nir_phi_instr * instr,LLVMValueRef llvm_phi)3769 static void visit_post_phi(struct ac_nir_context *ctx, nir_phi_instr *instr, LLVMValueRef llvm_phi)
3770 {
3771    nir_foreach_phi_src (src, instr) {
3772       LLVMBasicBlockRef block = get_block(ctx, src->pred);
3773       LLVMValueRef llvm_src = get_src(ctx, src->src);
3774 
3775       LLVMAddIncoming(llvm_phi, &llvm_src, &block, 1);
3776    }
3777 }
3778 
phi_post_pass(struct ac_nir_context * ctx)3779 static void phi_post_pass(struct ac_nir_context *ctx)
3780 {
3781    hash_table_foreach(ctx->phis, entry)
3782    {
3783       visit_post_phi(ctx, (nir_phi_instr *)entry->key, (LLVMValueRef)entry->data);
3784    }
3785 }
3786 
visit_ssa_undef(struct ac_nir_context * ctx,const nir_undef_instr * instr)3787 static void visit_ssa_undef(struct ac_nir_context *ctx, const nir_undef_instr *instr)
3788 {
3789    unsigned num_components = instr->def.num_components;
3790    LLVMTypeRef type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
3791 
3792    LLVMValueRef undef;
3793 
3794    if (num_components == 1)
3795       undef = LLVMGetUndef(type);
3796    else {
3797       undef = LLVMGetUndef(LLVMVectorType(type, num_components));
3798    }
3799    ctx->ssa_defs[instr->def.index] = undef;
3800 }
3801 
visit_jump(struct ac_llvm_context * ctx,const nir_jump_instr * instr)3802 static bool visit_jump(struct ac_llvm_context *ctx, const nir_jump_instr *instr)
3803 {
3804    switch (instr->type) {
3805    case nir_jump_break:
3806       ac_build_break(ctx);
3807       break;
3808    case nir_jump_continue:
3809       ac_build_continue(ctx);
3810       break;
3811    default:
3812       fprintf(stderr, "Unknown NIR jump instr: ");
3813       nir_print_instr(&instr->instr, stderr);
3814       fprintf(stderr, "\n");
3815       return false;
3816    }
3817    return true;
3818 }
3819 
3820 static bool visit_cf_list(struct ac_nir_context *ctx, struct exec_list *list);
3821 
visit_block(struct ac_nir_context * ctx,nir_block * block)3822 static bool visit_block(struct ac_nir_context *ctx, nir_block *block)
3823 {
3824    LLVMBasicBlockRef blockref = LLVMGetInsertBlock(ctx->ac.builder);
3825    LLVMValueRef first = LLVMGetFirstInstruction(blockref);
3826    if (first) {
3827       /* ac_branch_exited() might have already inserted non-phis */
3828       LLVMPositionBuilderBefore(ctx->ac.builder, LLVMGetFirstInstruction(blockref));
3829    }
3830 
3831    nir_foreach_phi(phi, block) {
3832       visit_phi(ctx, phi);
3833    }
3834 
3835    LLVMPositionBuilderAtEnd(ctx->ac.builder, blockref);
3836 
3837    nir_foreach_instr (instr, block) {
3838       switch (instr->type) {
3839       case nir_instr_type_alu:
3840          if (!visit_alu(ctx, nir_instr_as_alu(instr)))
3841             return false;
3842          break;
3843       case nir_instr_type_load_const:
3844          visit_load_const(ctx, nir_instr_as_load_const(instr));
3845          break;
3846       case nir_instr_type_intrinsic:
3847          if (!visit_intrinsic(ctx, nir_instr_as_intrinsic(instr)))
3848             return false;
3849          break;
3850       case nir_instr_type_tex:
3851          visit_tex(ctx, nir_instr_as_tex(instr));
3852          break;
3853       case nir_instr_type_phi:
3854          break;
3855       case nir_instr_type_undef:
3856          visit_ssa_undef(ctx, nir_instr_as_undef(instr));
3857          break;
3858       case nir_instr_type_jump:
3859          if (!visit_jump(&ctx->ac, nir_instr_as_jump(instr)))
3860             return false;
3861          break;
3862       case nir_instr_type_deref:
3863          assert (!nir_deref_mode_is_one_of(nir_instr_as_deref(instr),
3864                                            nir_var_mem_shared | nir_var_mem_global));
3865          break;
3866       default:
3867          fprintf(stderr, "Unknown NIR instr type: ");
3868          nir_print_instr(instr, stderr);
3869          fprintf(stderr, "\n");
3870          return false;
3871       }
3872    }
3873 
3874    _mesa_hash_table_insert(ctx->defs, block, LLVMGetInsertBlock(ctx->ac.builder));
3875 
3876    return true;
3877 }
3878 
visit_if(struct ac_nir_context * ctx,nir_if * if_stmt)3879 static bool visit_if(struct ac_nir_context *ctx, nir_if *if_stmt)
3880 {
3881    LLVMValueRef value = get_src(ctx, if_stmt->condition);
3882 
3883    nir_block *then_block = (nir_block *)exec_list_get_head(&if_stmt->then_list);
3884 
3885    ac_build_ifcc(&ctx->ac, value, then_block->index);
3886 
3887    if (!visit_cf_list(ctx, &if_stmt->then_list))
3888       return false;
3889 
3890    if (!exec_list_is_empty(&if_stmt->else_list)) {
3891       nir_block *else_block = (nir_block *)exec_list_get_head(&if_stmt->else_list);
3892 
3893       ac_build_else(&ctx->ac, else_block->index);
3894       if (!visit_cf_list(ctx, &if_stmt->else_list))
3895          return false;
3896    }
3897 
3898    ac_build_endif(&ctx->ac, then_block->index);
3899    return true;
3900 }
3901 
visit_loop(struct ac_nir_context * ctx,nir_loop * loop)3902 static bool visit_loop(struct ac_nir_context *ctx, nir_loop *loop)
3903 {
3904    assert(!nir_loop_has_continue_construct(loop));
3905    nir_block *first_loop_block = (nir_block *)exec_list_get_head(&loop->body);
3906 
3907    ac_build_bgnloop(&ctx->ac, first_loop_block->index);
3908 
3909    if (!visit_cf_list(ctx, &loop->body))
3910       return false;
3911 
3912    ac_build_endloop(&ctx->ac, first_loop_block->index);
3913    return true;
3914 }
3915 
visit_cf_list(struct ac_nir_context * ctx,struct exec_list * list)3916 static bool visit_cf_list(struct ac_nir_context *ctx, struct exec_list *list)
3917 {
3918    foreach_list_typed(nir_cf_node, node, node, list)
3919    {
3920       switch (node->type) {
3921       case nir_cf_node_block:
3922          if (!visit_block(ctx, nir_cf_node_as_block(node)))
3923             return false;
3924          break;
3925 
3926       case nir_cf_node_if:
3927          if (!visit_if(ctx, nir_cf_node_as_if(node)))
3928             return false;
3929          break;
3930 
3931       case nir_cf_node_loop:
3932          if (!visit_loop(ctx, nir_cf_node_as_loop(node)))
3933             return false;
3934          break;
3935 
3936       default:
3937          return false;
3938       }
3939    }
3940    return true;
3941 }
3942 
setup_scratch(struct ac_nir_context * ctx,struct nir_shader * shader)3943 static void setup_scratch(struct ac_nir_context *ctx, struct nir_shader *shader)
3944 {
3945    if (shader->scratch_size == 0)
3946       return;
3947 
3948    LLVMTypeRef type = LLVMArrayType(ctx->ac.i8, shader->scratch_size);
3949    ctx->scratch = (struct ac_llvm_pointer) {
3950       .value = ac_build_alloca_undef(&ctx->ac, type, "scratch"),
3951       .pointee_type = type
3952    };
3953 }
3954 
setup_constant_data(struct ac_nir_context * ctx,struct nir_shader * shader)3955 static void setup_constant_data(struct ac_nir_context *ctx, struct nir_shader *shader)
3956 {
3957    if (!shader->constant_data)
3958       return;
3959 
3960    LLVMValueRef data = LLVMConstStringInContext(ctx->ac.context, shader->constant_data,
3961                                                 shader->constant_data_size, true);
3962    LLVMTypeRef type = LLVMArrayType(ctx->ac.i8, shader->constant_data_size);
3963    LLVMValueRef global =
3964       LLVMAddGlobalInAddressSpace(ctx->ac.module, type, "const_data", AC_ADDR_SPACE_CONST);
3965 
3966    LLVMSetInitializer(global, data);
3967    LLVMSetGlobalConstant(global, true);
3968    LLVMSetVisibility(global, LLVMHiddenVisibility);
3969    ctx->constant_data = (struct ac_llvm_pointer) {
3970       .value = global,
3971       .pointee_type = type
3972    };
3973 }
3974 
setup_shared(struct ac_nir_context * ctx,struct nir_shader * nir)3975 static void setup_shared(struct ac_nir_context *ctx, struct nir_shader *nir)
3976 {
3977    if (ctx->ac.lds.value)
3978       return;
3979 
3980    LLVMTypeRef type = LLVMArrayType(ctx->ac.i8, nir->info.shared_size);
3981 
3982    LLVMValueRef lds =
3983       LLVMAddGlobalInAddressSpace(ctx->ac.module, type, "compute_lds", AC_ADDR_SPACE_LDS);
3984    LLVMSetAlignment(lds, 64 * 1024);
3985 
3986    ctx->ac.lds = (struct ac_llvm_pointer) {
3987       .value = lds,
3988       .pointee_type = type
3989    };
3990 }
3991 
setup_gds(struct ac_nir_context * ctx,nir_function_impl * impl)3992 static void setup_gds(struct ac_nir_context *ctx, nir_function_impl *impl)
3993 {
3994    bool has_gds_atomic = false;
3995 
3996    if (ctx->ac.gfx_level >= GFX10 &&
3997        (ctx->stage == MESA_SHADER_VERTEX ||
3998         ctx->stage == MESA_SHADER_TESS_EVAL ||
3999         ctx->stage == MESA_SHADER_GEOMETRY)) {
4000 
4001       nir_foreach_block(block, impl) {
4002          nir_foreach_instr(instr, block) {
4003             if (instr->type != nir_instr_type_intrinsic)
4004                continue;
4005 
4006             nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
4007             has_gds_atomic |= intrin->intrinsic == nir_intrinsic_gds_atomic_add_amd;
4008          }
4009       }
4010    }
4011 
4012    unsigned gds_size = has_gds_atomic ? 0x100 : 0;
4013 
4014    if (gds_size)
4015       ac_llvm_add_target_dep_function_attr(ctx->main_function, "amdgpu-gds-size", gds_size);
4016 }
4017 
ac_nir_translate(struct ac_llvm_context * ac,struct ac_shader_abi * abi,const struct ac_shader_args * args,struct nir_shader * nir)4018 bool ac_nir_translate(struct ac_llvm_context *ac, struct ac_shader_abi *abi,
4019                       const struct ac_shader_args *args, struct nir_shader *nir)
4020 {
4021    struct ac_nir_context ctx = {0};
4022    struct nir_function *func;
4023    bool ret;
4024 
4025    ctx.ac = *ac;
4026    ctx.abi = abi;
4027    ctx.args = args;
4028 
4029    ctx.stage = nir->info.stage;
4030    ctx.info = &nir->info;
4031 
4032    ctx.main_function = LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx.ac.builder));
4033 
4034    ctx.defs = _mesa_hash_table_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
4035    ctx.phis = _mesa_hash_table_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
4036 
4037    if (ctx.abi->kill_ps_if_inf_interp)
4038       ctx.verified_interp =
4039          _mesa_hash_table_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
4040 
4041    func = (struct nir_function *)exec_list_get_head(&nir->functions);
4042 
4043    nir_index_ssa_defs(func->impl);
4044    ctx.ssa_defs = calloc(func->impl->ssa_alloc, sizeof(LLVMValueRef));
4045 
4046    setup_scratch(&ctx, nir);
4047    setup_constant_data(&ctx, nir);
4048    setup_gds(&ctx, func->impl);
4049 
4050    if (gl_shader_stage_is_compute(nir->info.stage))
4051       setup_shared(&ctx, nir);
4052 
4053    if ((ret = visit_cf_list(&ctx, &func->impl->body)))
4054       phi_post_pass(&ctx);
4055 
4056    free(ctx.ssa_defs);
4057    ralloc_free(ctx.defs);
4058    ralloc_free(ctx.phis);
4059    if (ctx.abi->kill_ps_if_inf_interp)
4060       ralloc_free(ctx.verified_interp);
4061 
4062    return ret;
4063 }
4064