• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**************************************************************************
2  *
3  * Copyright 2019 Red Hat.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included
14  * in all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  *
24  **************************************************************************/
25 
26 #include "lp_bld_nir.h"
27 #include "lp_bld_arit.h"
28 #include "lp_bld_bitarit.h"
29 #include "lp_bld_const.h"
30 #include "lp_bld_conv.h"
31 #include "lp_bld_gather.h"
32 #include "lp_bld_logic.h"
33 #include "lp_bld_quad.h"
34 #include "lp_bld_flow.h"
35 #include "lp_bld_intr.h"
36 #include "lp_bld_struct.h"
37 #include "lp_bld_debug.h"
38 #include "lp_bld_printf.h"
39 #include "nir.h"
40 #include "nir_deref.h"
41 #include "nir_search_helpers.h"
42 
43 
44 // Doing AOS (and linear) codegen?
45 static bool
is_aos(const struct lp_build_nir_context * bld_base)46 is_aos(const struct lp_build_nir_context *bld_base)
47 {
48    // AOS is used for vectors of uint8[16]
49    return bld_base->base.type.length == 16 && bld_base->base.type.width == 8;
50 }
51 
52 
53 static void
54 visit_cf_list(struct lp_build_nir_context *bld_base,
55               struct exec_list *list);
56 
57 
58 static LLVMValueRef
cast_type(struct lp_build_nir_context * bld_base,LLVMValueRef val,nir_alu_type alu_type,unsigned bit_size)59 cast_type(struct lp_build_nir_context *bld_base, LLVMValueRef val,
60           nir_alu_type alu_type, unsigned bit_size)
61 {
62    LLVMBuilderRef builder = bld_base->base.gallivm->builder;
63    switch (alu_type) {
64    case nir_type_float:
65       switch (bit_size) {
66       case 16:
67          return LLVMBuildBitCast(builder, val, bld_base->half_bld.vec_type, "");
68       case 32:
69          return LLVMBuildBitCast(builder, val, bld_base->base.vec_type, "");
70       case 64:
71          return LLVMBuildBitCast(builder, val, bld_base->dbl_bld.vec_type, "");
72       default:
73          assert(0);
74          break;
75       }
76       break;
77    case nir_type_int:
78       switch (bit_size) {
79       case 8:
80          return LLVMBuildBitCast(builder, val, bld_base->int8_bld.vec_type, "");
81       case 16:
82          return LLVMBuildBitCast(builder, val, bld_base->int16_bld.vec_type, "");
83       case 32:
84          return LLVMBuildBitCast(builder, val, bld_base->int_bld.vec_type, "");
85       case 64:
86          return LLVMBuildBitCast(builder, val, bld_base->int64_bld.vec_type, "");
87       default:
88          assert(0);
89          break;
90       }
91       break;
92    case nir_type_uint:
93       switch (bit_size) {
94       case 8:
95          return LLVMBuildBitCast(builder, val, bld_base->uint8_bld.vec_type, "");
96       case 16:
97          return LLVMBuildBitCast(builder, val, bld_base->uint16_bld.vec_type, "");
98       case 1:
99       case 32:
100          return LLVMBuildBitCast(builder, val, bld_base->uint_bld.vec_type, "");
101       case 64:
102          return LLVMBuildBitCast(builder, val, bld_base->uint64_bld.vec_type, "");
103       default:
104          assert(0);
105          break;
106       }
107       break;
108    case nir_type_uint32:
109       return LLVMBuildBitCast(builder, val, bld_base->uint_bld.vec_type, "");
110    default:
111       return val;
112    }
113    return NULL;
114 }
115 
116 
117 static unsigned
glsl_sampler_to_pipe(int sampler_dim,bool is_array)118 glsl_sampler_to_pipe(int sampler_dim, bool is_array)
119 {
120    unsigned pipe_target = PIPE_BUFFER;
121    switch (sampler_dim) {
122    case GLSL_SAMPLER_DIM_1D:
123       pipe_target = is_array ? PIPE_TEXTURE_1D_ARRAY : PIPE_TEXTURE_1D;
124       break;
125    case GLSL_SAMPLER_DIM_2D:
126       pipe_target = is_array ? PIPE_TEXTURE_2D_ARRAY : PIPE_TEXTURE_2D;
127       break;
128    case GLSL_SAMPLER_DIM_SUBPASS:
129    case GLSL_SAMPLER_DIM_SUBPASS_MS:
130       pipe_target = PIPE_TEXTURE_2D_ARRAY;
131       break;
132    case GLSL_SAMPLER_DIM_3D:
133       pipe_target = PIPE_TEXTURE_3D;
134       break;
135    case GLSL_SAMPLER_DIM_MS:
136       pipe_target = is_array ? PIPE_TEXTURE_2D_ARRAY : PIPE_TEXTURE_2D;
137       break;
138    case GLSL_SAMPLER_DIM_CUBE:
139       pipe_target = is_array ? PIPE_TEXTURE_CUBE_ARRAY : PIPE_TEXTURE_CUBE;
140       break;
141    case GLSL_SAMPLER_DIM_RECT:
142       pipe_target = PIPE_TEXTURE_RECT;
143       break;
144    case GLSL_SAMPLER_DIM_BUF:
145       pipe_target = PIPE_BUFFER;
146       break;
147    default:
148       break;
149    }
150    return pipe_target;
151 }
152 
153 
154 static LLVMValueRef
get_src(struct lp_build_nir_context * bld_base,nir_src src)155 get_src(struct lp_build_nir_context *bld_base, nir_src src)
156 {
157    return bld_base->ssa_defs[src.ssa->index];
158 }
159 
160 
161 static void
assign_ssa(struct lp_build_nir_context * bld_base,int idx,LLVMValueRef ptr)162 assign_ssa(struct lp_build_nir_context *bld_base, int idx, LLVMValueRef ptr)
163 {
164    bld_base->ssa_defs[idx] = ptr;
165 }
166 
167 
168 static void
assign_ssa_dest(struct lp_build_nir_context * bld_base,const nir_def * ssa,LLVMValueRef vals[NIR_MAX_VEC_COMPONENTS])169 assign_ssa_dest(struct lp_build_nir_context *bld_base, const nir_def *ssa,
170                 LLVMValueRef vals[NIR_MAX_VEC_COMPONENTS])
171 {
172    if ((ssa->num_components == 1 || is_aos(bld_base))) {
173       assign_ssa(bld_base, ssa->index, vals[0]);
174    } else {
175       assign_ssa(bld_base, ssa->index,
176              lp_nir_array_build_gather_values(bld_base->base.gallivm->builder,
177                                               vals, ssa->num_components));
178    }
179 }
180 
181 
182 static LLVMValueRef
fcmp32(struct lp_build_nir_context * bld_base,enum pipe_compare_func compare,uint32_t src_bit_size,LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])183 fcmp32(struct lp_build_nir_context *bld_base,
184        enum pipe_compare_func compare,
185        uint32_t src_bit_size,
186        LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
187 {
188    LLVMBuilderRef builder = bld_base->base.gallivm->builder;
189    struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size);
190    LLVMValueRef result;
191 
192    if (compare != PIPE_FUNC_NOTEQUAL)
193       result = lp_build_cmp_ordered(flt_bld, compare, src[0], src[1]);
194    else
195       result = lp_build_cmp(flt_bld, compare, src[0], src[1]);
196    if (src_bit_size == 64)
197       result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
198    else if (src_bit_size == 16)
199       result = LLVMBuildSExt(builder, result, bld_base->int_bld.vec_type, "");
200    return result;
201 }
202 
203 
204 static LLVMValueRef
icmp32(struct lp_build_nir_context * bld_base,enum pipe_compare_func compare,bool is_unsigned,uint32_t src_bit_size,LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])205 icmp32(struct lp_build_nir_context *bld_base,
206        enum pipe_compare_func compare,
207        bool is_unsigned,
208        uint32_t src_bit_size,
209        LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
210 {
211    LLVMBuilderRef builder = bld_base->base.gallivm->builder;
212    struct lp_build_context *i_bld =
213       get_int_bld(bld_base, is_unsigned, src_bit_size);
214    LLVMValueRef result = lp_build_cmp(i_bld, compare, src[0], src[1]);
215    if (src_bit_size < 32)
216       result = LLVMBuildSExt(builder, result, bld_base->int_bld.vec_type, "");
217    else if (src_bit_size == 64)
218       result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
219    return result;
220 }
221 
222 
223 /**
224  * Get a source register value for an ALU instruction.
225  * This is where swizzles are handled.  There should be no negation
226  * or absolute value modifiers.
227  * num_components indicates the number of components needed in the
228  * returned array or vector.
229  */
230 static LLVMValueRef
get_alu_src(struct lp_build_nir_context * bld_base,nir_alu_src src,unsigned num_components)231 get_alu_src(struct lp_build_nir_context *bld_base,
232             nir_alu_src src,
233             unsigned num_components)
234 {
235    assert(num_components >= 1);
236    assert(num_components <= 4);
237 
238    struct gallivm_state *gallivm = bld_base->base.gallivm;
239    LLVMBuilderRef builder = gallivm->builder;
240    const unsigned src_components = nir_src_num_components(src.src);
241    assert(src_components > 0);
242    LLVMValueRef value = get_src(bld_base, src.src);
243    assert(value);
244 
245    /* check if swizzling needed for the src vector */
246    bool need_swizzle = false;
247    for (unsigned i = 0; i < src_components; ++i) {
248       if (src.swizzle[i] != i) {
249          need_swizzle = true;
250          break;
251       }
252    }
253 
254    if (is_aos(bld_base) && !need_swizzle) {
255       return value;
256    }
257 
258    if (need_swizzle || num_components != src_components) {
259       if (is_aos(bld_base) && need_swizzle) {
260          // Handle swizzle for AOS
261          assert(LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMVectorTypeKind);
262 
263          // swizzle vector of ((r,g,b,a), (r,g,b,a), (r,g,b,a), (r,g,b,a))
264          assert(bld_base->base.type.width == 8);
265          assert(bld_base->base.type.length == 16);
266 
267          // Do our own swizzle here since lp_build_swizzle_aos_n() does
268          // not do what we want.
269          // Ex: value = {r0,g0,b0,a0, r1,g1,b1,a1, r2,g2,b2,a2, r3,g3,b3,a3}.
270          // aos swizzle = {2,1,0,3}  // swap red/blue
271          // shuffles = {2,1,0,3, 6,5,4,7, 10,9,8,11, 14,13,12,15}
272          // result = {b0,g0,r0,a0, b1,g1,r1,a1, b2,g2,r2,a2, b3,g3,r3,a3}.
273          LLVMValueRef shuffles[LP_MAX_VECTOR_WIDTH];
274          for (unsigned i = 0; i < 16; i++) {
275             unsigned chan = i % 4;
276             /* apply src register swizzle */
277             if (chan < num_components) {
278                chan = src.swizzle[chan];
279             } else {
280                chan = src.swizzle[0];
281             }
282             /* apply aos swizzle */
283             chan = lp_nir_aos_swizzle(bld_base, chan);
284             shuffles[i] = lp_build_const_int32(gallivm, (i & ~3) + chan);
285          }
286          value = LLVMBuildShuffleVector(builder, value,
287                                         LLVMGetUndef(LLVMTypeOf(value)),
288                                         LLVMConstVector(shuffles, 16), "");
289       } else if (src_components > 1 && num_components == 1) {
290          value = LLVMBuildExtractValue(gallivm->builder, value,
291                                        src.swizzle[0], "");
292       } else if (src_components == 1 && num_components > 1) {
293          LLVMValueRef values[] = {value, value, value, value,
294                                   value, value, value, value,
295                                   value, value, value, value,
296                                   value, value, value, value};
297          value = lp_nir_array_build_gather_values(builder, values, num_components);
298       } else {
299          LLVMValueRef arr = LLVMGetUndef(LLVMArrayType(LLVMTypeOf(LLVMBuildExtractValue(builder, value, 0, "")), num_components));
300          for (unsigned i = 0; i < num_components; i++)
301             arr = LLVMBuildInsertValue(builder, arr, LLVMBuildExtractValue(builder, value, src.swizzle[i], ""), i, "");
302          value = arr;
303       }
304    }
305 
306    return value;
307 }
308 
309 
310 static LLVMValueRef
emit_b2f(struct lp_build_nir_context * bld_base,LLVMValueRef src0,unsigned bitsize)311 emit_b2f(struct lp_build_nir_context *bld_base,
312          LLVMValueRef src0,
313          unsigned bitsize)
314 {
315    LLVMBuilderRef builder = bld_base->base.gallivm->builder;
316    LLVMValueRef result =
317       LLVMBuildAnd(builder, cast_type(bld_base, src0, nir_type_int, 32),
318                    LLVMBuildBitCast(builder,
319                                     lp_build_const_vec(bld_base->base.gallivm,
320                                                        bld_base->base.type,
321                                                        1.0),
322                                     bld_base->int_bld.vec_type, ""),
323                    "");
324    result = LLVMBuildBitCast(builder, result, bld_base->base.vec_type, "");
325    switch (bitsize) {
326    case 16:
327       result = LLVMBuildFPTrunc(builder, result,
328                                 bld_base->half_bld.vec_type, "");
329       break;
330    case 32:
331       break;
332    case 64:
333       result = LLVMBuildFPExt(builder, result,
334                               bld_base->dbl_bld.vec_type, "");
335       break;
336    default:
337       unreachable("unsupported bit size.");
338    }
339    return result;
340 }
341 
342 
343 static LLVMValueRef
emit_b2i(struct lp_build_nir_context * bld_base,LLVMValueRef src0,unsigned bitsize)344 emit_b2i(struct lp_build_nir_context *bld_base,
345          LLVMValueRef src0,
346          unsigned bitsize)
347 {
348    LLVMBuilderRef builder = bld_base->base.gallivm->builder;
349    LLVMValueRef result = LLVMBuildAnd(builder,
350                           cast_type(bld_base, src0, nir_type_int, 32),
351                           lp_build_const_int_vec(bld_base->base.gallivm,
352                                                  bld_base->base.type, 1), "");
353    switch (bitsize) {
354    case 8:
355       return LLVMBuildTrunc(builder, result, bld_base->int8_bld.vec_type, "");
356    case 16:
357       return LLVMBuildTrunc(builder, result, bld_base->int16_bld.vec_type, "");
358    case 32:
359       return result;
360    case 64:
361       return LLVMBuildZExt(builder, result, bld_base->int64_bld.vec_type, "");
362    default:
363       unreachable("unsupported bit size.");
364    }
365 }
366 
367 
368 static LLVMValueRef
emit_b32csel(struct lp_build_nir_context * bld_base,unsigned src_bit_size[NIR_MAX_VEC_COMPONENTS],LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])369 emit_b32csel(struct lp_build_nir_context *bld_base,
370              unsigned src_bit_size[NIR_MAX_VEC_COMPONENTS],
371              LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
372 {
373    LLVMValueRef sel = cast_type(bld_base, src[0], nir_type_int, 32);
374    LLVMValueRef v = lp_build_compare(bld_base->base.gallivm, bld_base->int_bld.type, PIPE_FUNC_NOTEQUAL, sel, bld_base->int_bld.zero);
375    struct lp_build_context *bld = get_int_bld(bld_base, false, src_bit_size[1]);
376    return lp_build_select(bld, v, src[1], src[2]);
377 }
378 
379 
380 static LLVMValueRef
split_64bit(struct lp_build_nir_context * bld_base,LLVMValueRef src,bool hi)381 split_64bit(struct lp_build_nir_context *bld_base,
382             LLVMValueRef src,
383             bool hi)
384 {
385    struct gallivm_state *gallivm = bld_base->base.gallivm;
386    LLVMValueRef shuffles[LP_MAX_VECTOR_WIDTH/32];
387    LLVMValueRef shuffles2[LP_MAX_VECTOR_WIDTH/32];
388    int len = bld_base->base.type.length * 2;
389    for (unsigned i = 0; i < bld_base->base.type.length; i++) {
390 #if UTIL_ARCH_LITTLE_ENDIAN
391       shuffles[i] = lp_build_const_int32(gallivm, i * 2);
392       shuffles2[i] = lp_build_const_int32(gallivm, (i * 2) + 1);
393 #else
394       shuffles[i] = lp_build_const_int32(gallivm, (i * 2) + 1);
395       shuffles2[i] = lp_build_const_int32(gallivm, (i * 2));
396 #endif
397    }
398 
399    src = LLVMBuildBitCast(gallivm->builder, src,
400            LLVMVectorType(LLVMInt32TypeInContext(gallivm->context), len), "");
401    return LLVMBuildShuffleVector(gallivm->builder, src,
402                                  LLVMGetUndef(LLVMTypeOf(src)),
403                                  LLVMConstVector(hi ? shuffles2 : shuffles,
404                                                  bld_base->base.type.length),
405                                  "");
406 }
407 
408 
409 static LLVMValueRef
merge_64bit(struct lp_build_nir_context * bld_base,LLVMValueRef input,LLVMValueRef input2)410 merge_64bit(struct lp_build_nir_context *bld_base,
411             LLVMValueRef input,
412             LLVMValueRef input2)
413 {
414    struct gallivm_state *gallivm = bld_base->base.gallivm;
415    LLVMBuilderRef builder = gallivm->builder;
416    int i;
417    LLVMValueRef shuffles[2 * (LP_MAX_VECTOR_WIDTH/32)];
418    int len = bld_base->base.type.length * 2;
419    assert(len <= (2 * (LP_MAX_VECTOR_WIDTH/32)));
420 
421    for (i = 0; i < bld_base->base.type.length * 2; i+=2) {
422 #if UTIL_ARCH_LITTLE_ENDIAN
423       shuffles[i] = lp_build_const_int32(gallivm, i / 2);
424       shuffles[i + 1] = lp_build_const_int32(gallivm, i / 2 + bld_base->base.type.length);
425 #else
426       shuffles[i] = lp_build_const_int32(gallivm, i / 2 + bld_base->base.type.length);
427       shuffles[i + 1] = lp_build_const_int32(gallivm, i / 2);
428 #endif
429    }
430    return LLVMBuildShuffleVector(builder, input, input2, LLVMConstVector(shuffles, len), "");
431 }
432 
433 
434 static LLVMValueRef
split_16bit(struct lp_build_nir_context * bld_base,LLVMValueRef src,bool hi)435 split_16bit(struct lp_build_nir_context *bld_base,
436             LLVMValueRef src,
437             bool hi)
438 {
439    struct gallivm_state *gallivm = bld_base->base.gallivm;
440    LLVMValueRef shuffles[LP_MAX_VECTOR_WIDTH/32];
441    LLVMValueRef shuffles2[LP_MAX_VECTOR_WIDTH/32];
442    int len = bld_base->base.type.length * 2;
443    for (unsigned i = 0; i < bld_base->base.type.length; i++) {
444 #if UTIL_ARCH_LITTLE_ENDIAN
445       shuffles[i] = lp_build_const_int32(gallivm, i * 2);
446       shuffles2[i] = lp_build_const_int32(gallivm, (i * 2) + 1);
447 #else
448       shuffles[i] = lp_build_const_int32(gallivm, (i * 2) + 1);
449       shuffles2[i] = lp_build_const_int32(gallivm, (i * 2));
450 #endif
451    }
452 
453    src = LLVMBuildBitCast(gallivm->builder, src, LLVMVectorType(LLVMInt16TypeInContext(gallivm->context), len), "");
454    return LLVMBuildShuffleVector(gallivm->builder, src,
455                                  LLVMGetUndef(LLVMTypeOf(src)),
456                                  LLVMConstVector(hi ? shuffles2 : shuffles,
457                                                  bld_base->base.type.length),
458                                  "");
459 }
460 
461 
462 static LLVMValueRef
merge_16bit(struct lp_build_nir_context * bld_base,LLVMValueRef input,LLVMValueRef input2)463 merge_16bit(struct lp_build_nir_context *bld_base,
464             LLVMValueRef input,
465             LLVMValueRef input2)
466 {
467    struct gallivm_state *gallivm = bld_base->base.gallivm;
468    LLVMBuilderRef builder = gallivm->builder;
469    int i;
470    LLVMValueRef shuffles[2 * (LP_MAX_VECTOR_WIDTH/32)];
471    int len = bld_base->int16_bld.type.length * 2;
472    assert(len <= (2 * (LP_MAX_VECTOR_WIDTH/32)));
473 
474    for (i = 0; i < bld_base->int_bld.type.length * 2; i+=2) {
475 #if UTIL_ARCH_LITTLE_ENDIAN
476       shuffles[i] = lp_build_const_int32(gallivm, i / 2);
477       shuffles[i + 1] = lp_build_const_int32(gallivm, i / 2 + bld_base->base.type.length);
478 #else
479       shuffles[i] = lp_build_const_int32(gallivm, i / 2 + bld_base->base.type.length);
480       shuffles[i + 1] = lp_build_const_int32(gallivm, i / 2);
481 #endif
482    }
483    return LLVMBuildShuffleVector(builder, input, input2, LLVMConstVector(shuffles, len), "");
484 }
485 
486 
487 static LLVMValueRef
get_signed_divisor(struct gallivm_state * gallivm,struct lp_build_context * int_bld,struct lp_build_context * mask_bld,int src_bit_size,LLVMValueRef src,LLVMValueRef divisor)488 get_signed_divisor(struct gallivm_state *gallivm,
489                    struct lp_build_context *int_bld,
490                    struct lp_build_context *mask_bld,
491                    int src_bit_size,
492                    LLVMValueRef src, LLVMValueRef divisor)
493 {
494    LLVMBuilderRef builder = gallivm->builder;
495    /* However for signed divides SIGFPE can occur if the numerator is INT_MIN
496       and divisor is -1. */
497    /* set mask if numerator == INT_MIN */
498    long long min_val;
499    switch (src_bit_size) {
500    case 8:
501       min_val = INT8_MIN;
502       break;
503    case 16:
504       min_val = INT16_MIN;
505       break;
506    default:
507    case 32:
508       min_val = INT_MIN;
509       break;
510    case 64:
511       min_val = INT64_MIN;
512       break;
513    }
514    LLVMValueRef div_mask2 = lp_build_cmp(mask_bld, PIPE_FUNC_EQUAL, src,
515                                          lp_build_const_int_vec(gallivm, int_bld->type, min_val));
516    /* set another mask if divisor is - 1 */
517    LLVMValueRef div_mask3 = lp_build_cmp(mask_bld, PIPE_FUNC_EQUAL, divisor,
518                                          lp_build_const_int_vec(gallivm, int_bld->type, -1));
519    div_mask2 = LLVMBuildAnd(builder, div_mask2, div_mask3, "");
520 
521    divisor = lp_build_select(mask_bld, div_mask2, int_bld->one, divisor);
522    return divisor;
523 }
524 
525 
526 static LLVMValueRef
do_int_divide(struct lp_build_nir_context * bld_base,bool is_unsigned,unsigned src_bit_size,LLVMValueRef src,LLVMValueRef src2)527 do_int_divide(struct lp_build_nir_context *bld_base,
528               bool is_unsigned, unsigned src_bit_size,
529               LLVMValueRef src, LLVMValueRef src2)
530 {
531    struct gallivm_state *gallivm = bld_base->base.gallivm;
532    LLVMBuilderRef builder = gallivm->builder;
533    struct lp_build_context *int_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
534    struct lp_build_context *mask_bld = get_int_bld(bld_base, true, src_bit_size);
535 
536    /* avoid divide by 0. Converted divisor from 0 to -1 */
537    LLVMValueRef div_mask = lp_build_cmp(mask_bld, PIPE_FUNC_EQUAL, src2,
538                                         mask_bld->zero);
539 
540    LLVMValueRef divisor = LLVMBuildOr(builder, div_mask, src2, "");
541    if (!is_unsigned) {
542       divisor = get_signed_divisor(gallivm, int_bld, mask_bld,
543                                    src_bit_size, src, divisor);
544    }
545    LLVMValueRef result = lp_build_div(int_bld, src, divisor);
546 
547    if (!is_unsigned) {
548       LLVMValueRef not_div_mask = LLVMBuildNot(builder, div_mask, "");
549       return LLVMBuildAnd(builder, not_div_mask, result, "");
550    } else
551       /* udiv by zero is guaranteed to return 0xffffffff at least with d3d10
552        * may as well do same for idiv */
553       return LLVMBuildOr(builder, div_mask, result, "");
554 }
555 
556 
557 static LLVMValueRef
do_int_mod(struct lp_build_nir_context * bld_base,bool is_unsigned,unsigned src_bit_size,LLVMValueRef src,LLVMValueRef src2)558 do_int_mod(struct lp_build_nir_context *bld_base,
559            bool is_unsigned, unsigned src_bit_size,
560            LLVMValueRef src, LLVMValueRef src2)
561 {
562    struct gallivm_state *gallivm = bld_base->base.gallivm;
563    LLVMBuilderRef builder = gallivm->builder;
564    struct lp_build_context *int_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
565    struct lp_build_context *mask_bld = get_int_bld(bld_base, true, src_bit_size);
566    LLVMValueRef div_mask = lp_build_cmp(mask_bld, PIPE_FUNC_EQUAL, src2,
567                                         mask_bld->zero);
568    LLVMValueRef divisor = LLVMBuildOr(builder,
569                                       div_mask,
570                                       src2, "");
571    if (!is_unsigned) {
572       divisor = get_signed_divisor(gallivm, int_bld, mask_bld,
573                                    src_bit_size, src, divisor);
574    }
575    LLVMValueRef result = lp_build_mod(int_bld, src, divisor);
576    return LLVMBuildOr(builder, div_mask, result, "");
577 }
578 
579 static LLVMValueRef
do_alu_action(struct lp_build_nir_context * bld_base,const nir_alu_instr * instr,unsigned src_bit_size[NIR_MAX_VEC_COMPONENTS],LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])580 do_alu_action(struct lp_build_nir_context *bld_base,
581               const nir_alu_instr *instr,
582               unsigned src_bit_size[NIR_MAX_VEC_COMPONENTS],
583               LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
584 {
585    struct gallivm_state *gallivm = bld_base->base.gallivm;
586    LLVMBuilderRef builder = gallivm->builder;
587    LLVMValueRef result;
588 
589    switch (instr->op) {
590    case nir_op_b2f16:
591       result = emit_b2f(bld_base, src[0], 16);
592       break;
593    case nir_op_b2f32:
594       result = emit_b2f(bld_base, src[0], 32);
595       break;
596    case nir_op_b2f64:
597       result = emit_b2f(bld_base, src[0], 64);
598       break;
599    case nir_op_b2i8:
600       result = emit_b2i(bld_base, src[0], 8);
601       break;
602    case nir_op_b2i16:
603       result = emit_b2i(bld_base, src[0], 16);
604       break;
605    case nir_op_b2i32:
606       result = emit_b2i(bld_base, src[0], 32);
607       break;
608    case nir_op_b2i64:
609       result = emit_b2i(bld_base, src[0], 64);
610       break;
611    case nir_op_b32csel:
612       result = emit_b32csel(bld_base, src_bit_size, src);
613       break;
614    case nir_op_bit_count:
615       result = lp_build_popcount(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
616       if (src_bit_size[0] < 32)
617          result = LLVMBuildZExt(builder, result, bld_base->int_bld.vec_type, "");
618       else if (src_bit_size[0] > 32)
619          result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
620       break;
621    case nir_op_bitfield_select:
622       result = lp_build_xor(&bld_base->uint_bld, src[2], lp_build_and(&bld_base->uint_bld, src[0], lp_build_xor(&bld_base->uint_bld, src[1], src[2])));
623       break;
624    case nir_op_bitfield_reverse:
625       result = lp_build_bitfield_reverse(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
626       break;
627    case nir_op_f2f16:
628       if (src_bit_size[0] == 64)
629          src[0] = LLVMBuildFPTrunc(builder, src[0],
630                                    bld_base->base.vec_type, "");
631       result = LLVMBuildFPTrunc(builder, src[0],
632                                 bld_base->half_bld.vec_type, "");
633       break;
634    case nir_op_f2f32:
635       if (src_bit_size[0] < 32)
636          result = LLVMBuildFPExt(builder, src[0],
637                                  bld_base->base.vec_type, "");
638       else
639          result = LLVMBuildFPTrunc(builder, src[0],
640                                    bld_base->base.vec_type, "");
641       break;
642    case nir_op_f2f64:
643       result = LLVMBuildFPExt(builder, src[0],
644                               bld_base->dbl_bld.vec_type, "");
645       break;
646    case nir_op_f2i8:
647    case nir_op_f2i16:
648    case nir_op_f2i32:
649    case nir_op_f2i64:
650    case nir_op_f2u8:
651    case nir_op_f2u16:
652    case nir_op_f2u32:
653    case nir_op_f2u64: {
654       nir_alu_type dst_type = nir_op_infos[instr->op].output_type;
655       bool is_unsigned = nir_alu_type_get_base_type(dst_type) == nir_type_uint;
656       LLVMTypeRef int_type = get_int_bld(bld_base, is_unsigned, nir_alu_type_get_type_size(dst_type))->vec_type;
657 
658       char name[64];
659       char tmp[64];
660       char intrinsic[64];
661       snprintf(name, sizeof(name), "llvm.fpto%ci.sat", is_unsigned ? 'u' : 's');
662       lp_format_intrinsic(tmp, 64, name, int_type);
663       lp_format_intrinsic(intrinsic, 64, tmp, LLVMTypeOf(src[0]));
664       result = lp_build_intrinsic_unary(builder, intrinsic, int_type, src[0]);
665       break;
666    }
667    case nir_op_fabs:
668       result = lp_build_abs(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
669       break;
670    case nir_op_fadd:
671       result = lp_build_add(get_flt_bld(bld_base, src_bit_size[0]),
672                             src[0], src[1]);
673       break;
674    case nir_op_fceil:
675       result = lp_build_ceil(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
676       break;
677    case nir_op_fcos:
678       result = lp_build_cos(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
679       break;
680    case nir_op_fddx:
681    case nir_op_fddx_coarse:
682    case nir_op_fddx_fine:
683       result = lp_build_ddx(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
684       break;
685    case nir_op_fddy:
686    case nir_op_fddy_coarse:
687    case nir_op_fddy_fine:
688       result = lp_build_ddy(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
689       break;
690    case nir_op_fdiv:
691       result = lp_build_div(get_flt_bld(bld_base, src_bit_size[0]),
692                             src[0], src[1]);
693       break;
694    case nir_op_feq32:
695       result = fcmp32(bld_base, PIPE_FUNC_EQUAL, src_bit_size[0], src);
696       break;
697    case nir_op_fexp2:
698       result = lp_build_exp2(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
699       break;
700    case nir_op_ffloor:
701       result = lp_build_floor(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
702       break;
703    case nir_op_ffma:
704       result = lp_build_fmuladd(builder, src[0], src[1], src[2]);
705       break;
706    case nir_op_ffract: {
707       struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size[0]);
708       LLVMValueRef tmp = lp_build_floor(flt_bld, src[0]);
709       result = lp_build_sub(flt_bld, src[0], tmp);
710       break;
711    }
712    case nir_op_fge:
713    case nir_op_fge32:
714       result = fcmp32(bld_base, PIPE_FUNC_GEQUAL, src_bit_size[0], src);
715       break;
716    case nir_op_find_lsb: {
717       struct lp_build_context *int_bld = get_int_bld(bld_base, false, src_bit_size[0]);
718       result = lp_build_cttz(int_bld, src[0]);
719       if (src_bit_size[0] < 32)
720          result = LLVMBuildZExt(builder, result, bld_base->uint_bld.vec_type, "");
721       else if (src_bit_size[0] > 32)
722          result = LLVMBuildTrunc(builder, result, bld_base->uint_bld.vec_type, "");
723       break;
724    }
725    case nir_op_fisfinite32:
726       unreachable("Should have been lowered in nir_opt_algebraic_late.");
727    case nir_op_flog2:
728       result = lp_build_log2_safe(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
729       break;
730    case nir_op_flt:
731    case nir_op_flt32:
732       result = fcmp32(bld_base, PIPE_FUNC_LESS, src_bit_size[0], src);
733       break;
734    case nir_op_fmax:
735    case nir_op_fmin: {
736       enum gallivm_nan_behavior minmax_nan;
737       int first = 0;
738 
739       /* If one of the sources is known to be a number (i.e., not NaN), then
740        * better code can be generated by passing that information along.
741        */
742       if (is_a_number(bld_base->range_ht, instr, 1,
743                       0 /* unused num_components */,
744                       NULL /* unused swizzle */)) {
745          minmax_nan = GALLIVM_NAN_RETURN_OTHER_SECOND_NONNAN;
746       } else if (is_a_number(bld_base->range_ht, instr, 0,
747                              0 /* unused num_components */,
748                              NULL /* unused swizzle */)) {
749          first = 1;
750          minmax_nan = GALLIVM_NAN_RETURN_OTHER_SECOND_NONNAN;
751       } else {
752          minmax_nan = GALLIVM_NAN_RETURN_OTHER;
753       }
754 
755       if (instr->op == nir_op_fmin) {
756          result = lp_build_min_ext(get_flt_bld(bld_base, src_bit_size[0]),
757                                    src[first], src[1 - first], minmax_nan);
758       } else {
759          result = lp_build_max_ext(get_flt_bld(bld_base, src_bit_size[0]),
760                                    src[first], src[1 - first], minmax_nan);
761       }
762       break;
763    }
764    case nir_op_fmod: {
765       struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size[0]);
766       result = lp_build_div(flt_bld, src[0], src[1]);
767       result = lp_build_floor(flt_bld, result);
768       result = lp_build_mul(flt_bld, src[1], result);
769       result = lp_build_sub(flt_bld, src[0], result);
770       break;
771    }
772    case nir_op_fmul:
773       result = lp_build_mul(get_flt_bld(bld_base, src_bit_size[0]),
774                             src[0], src[1]);
775       break;
776    case nir_op_fneu32:
777       result = fcmp32(bld_base, PIPE_FUNC_NOTEQUAL, src_bit_size[0], src);
778       break;
779    case nir_op_fneg:
780       result = lp_build_negate(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
781       break;
782    case nir_op_fpow:
783       result = lp_build_pow(get_flt_bld(bld_base, src_bit_size[0]), src[0], src[1]);
784       break;
785    case nir_op_frcp:
786       result = lp_build_rcp(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
787       break;
788    case nir_op_fround_even:
789       if (src_bit_size[0] == 16) {
790          struct lp_build_context *bld = get_flt_bld(bld_base, 16);
791          char intrinsic[64];
792          lp_format_intrinsic(intrinsic, 64, "llvm.roundeven", bld->vec_type);
793          result = lp_build_intrinsic_unary(builder, intrinsic, bld->vec_type, src[0]);
794       } else {
795          result = lp_build_round(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
796       }
797       break;
798    case nir_op_frsq:
799       result = lp_build_rsqrt(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
800       break;
801    case nir_op_fsat:
802       result = lp_build_clamp_zero_one_nanzero(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
803       break;
804    case nir_op_fsign:
805       result = lp_build_sgn(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
806       break;
807    case nir_op_fsin:
808       result = lp_build_sin(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
809       break;
810    case nir_op_fsqrt:
811       result = lp_build_sqrt(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
812       break;
813    case nir_op_ftrunc:
814       result = lp_build_trunc(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
815       break;
816    case nir_op_i2f16:
817       result = LLVMBuildSIToFP(builder, src[0],
818                                bld_base->half_bld.vec_type, "");
819       break;
820    case nir_op_i2f32:
821       result = lp_build_int_to_float(&bld_base->base, src[0]);
822       break;
823    case nir_op_i2f64:
824       result = lp_build_int_to_float(&bld_base->dbl_bld, src[0]);
825       break;
826    case nir_op_i2i8:
827       result = LLVMBuildTrunc(builder, src[0], bld_base->int8_bld.vec_type, "");
828       break;
829    case nir_op_i2i16:
830       if (src_bit_size[0] < 16)
831          result = LLVMBuildSExt(builder, src[0], bld_base->int16_bld.vec_type, "");
832       else
833          result = LLVMBuildTrunc(builder, src[0], bld_base->int16_bld.vec_type, "");
834       break;
835    case nir_op_i2i32:
836       if (src_bit_size[0] < 32)
837          result = LLVMBuildSExt(builder, src[0], bld_base->int_bld.vec_type, "");
838       else
839          result = LLVMBuildTrunc(builder, src[0], bld_base->int_bld.vec_type, "");
840       break;
841    case nir_op_i2i64:
842       result = LLVMBuildSExt(builder, src[0], bld_base->int64_bld.vec_type, "");
843       break;
844    case nir_op_iabs:
845       result = lp_build_abs(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
846       break;
847    case nir_op_iadd:
848       result = lp_build_add(get_int_bld(bld_base, false, src_bit_size[0]),
849                             src[0], src[1]);
850       break;
851    case nir_op_iand:
852       result = lp_build_and(get_int_bld(bld_base, false, src_bit_size[0]),
853                             src[0], src[1]);
854       break;
855    case nir_op_idiv:
856       result = do_int_divide(bld_base, false, src_bit_size[0], src[0], src[1]);
857       break;
858    case nir_op_ieq32:
859       result = icmp32(bld_base, PIPE_FUNC_EQUAL, false, src_bit_size[0], src);
860       break;
861    case nir_op_ige32:
862       result = icmp32(bld_base, PIPE_FUNC_GEQUAL, false, src_bit_size[0], src);
863       break;
864    case nir_op_ilt32:
865       result = icmp32(bld_base, PIPE_FUNC_LESS, false, src_bit_size[0], src);
866       break;
867    case nir_op_imax:
868       result = lp_build_max(get_int_bld(bld_base, false, src_bit_size[0]), src[0], src[1]);
869       break;
870    case nir_op_imin:
871       result = lp_build_min(get_int_bld(bld_base, false, src_bit_size[0]), src[0], src[1]);
872       break;
873    case nir_op_imul:
874    case nir_op_imul24:
875       result = lp_build_mul(get_int_bld(bld_base, false, src_bit_size[0]),
876                             src[0], src[1]);
877       break;
878    case nir_op_imul_high: {
879       LLVMValueRef hi_bits;
880       lp_build_mul_32_lohi(get_int_bld(bld_base, false, src_bit_size[0]), src[0], src[1], &hi_bits);
881       result = hi_bits;
882       break;
883    }
884    case nir_op_ine32:
885       result = icmp32(bld_base, PIPE_FUNC_NOTEQUAL, false, src_bit_size[0], src);
886       break;
887    case nir_op_ineg:
888       result = lp_build_negate(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
889       break;
890    case nir_op_inot:
891       result = lp_build_not(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
892       break;
893    case nir_op_ior:
894       result = lp_build_or(get_int_bld(bld_base, false, src_bit_size[0]),
895                            src[0], src[1]);
896       break;
897    case nir_op_imod:
898    case nir_op_irem:
899       result = do_int_mod(bld_base, false, src_bit_size[0], src[0], src[1]);
900       break;
901    case nir_op_ishl: {
902       struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
903       struct lp_build_context *int_bld = get_int_bld(bld_base, false, src_bit_size[0]);
904       if (src_bit_size[0] == 64)
905          src[1] = LLVMBuildZExt(builder, src[1], uint_bld->vec_type, "");
906       if (src_bit_size[0] < 32)
907          src[1] = LLVMBuildTrunc(builder, src[1], uint_bld->vec_type, "");
908       src[1] = lp_build_and(uint_bld, src[1], lp_build_const_int_vec(gallivm, uint_bld->type, (src_bit_size[0] - 1)));
909       result = lp_build_shl(int_bld, src[0], src[1]);
910       break;
911    }
912    case nir_op_ishr: {
913       struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
914       struct lp_build_context *int_bld = get_int_bld(bld_base, false, src_bit_size[0]);
915       if (src_bit_size[0] == 64)
916          src[1] = LLVMBuildZExt(builder, src[1], uint_bld->vec_type, "");
917       if (src_bit_size[0] < 32)
918          src[1] = LLVMBuildTrunc(builder, src[1], uint_bld->vec_type, "");
919       src[1] = lp_build_and(uint_bld, src[1], lp_build_const_int_vec(gallivm, uint_bld->type, (src_bit_size[0] - 1)));
920       result = lp_build_shr(int_bld, src[0], src[1]);
921       break;
922    }
923    case nir_op_isign:
924       result = lp_build_sgn(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
925       break;
926    case nir_op_isub:
927       result = lp_build_sub(get_int_bld(bld_base, false, src_bit_size[0]),
928                             src[0], src[1]);
929       break;
930    case nir_op_ixor:
931       result = lp_build_xor(get_int_bld(bld_base, false, src_bit_size[0]),
932                             src[0], src[1]);
933       break;
934    case nir_op_mov:
935       result = src[0];
936       break;
937    case nir_op_unpack_64_2x32_split_x:
938       result = split_64bit(bld_base, src[0], false);
939       break;
940    case nir_op_unpack_64_2x32_split_y:
941       result = split_64bit(bld_base, src[0], true);
942       break;
943 
944    case nir_op_pack_32_2x16_split: {
945       LLVMValueRef tmp = merge_16bit(bld_base, src[0], src[1]);
946       result = LLVMBuildBitCast(builder, tmp, bld_base->base.vec_type, "");
947       break;
948    }
949    case nir_op_unpack_32_2x16_split_x:
950       result = split_16bit(bld_base, src[0], false);
951       break;
952    case nir_op_unpack_32_2x16_split_y:
953       result = split_16bit(bld_base, src[0], true);
954       break;
955    case nir_op_pack_64_2x32_split: {
956       LLVMValueRef tmp = merge_64bit(bld_base, src[0], src[1]);
957       result = LLVMBuildBitCast(builder, tmp, bld_base->uint64_bld.vec_type, "");
958       break;
959    }
960    case nir_op_pack_32_4x8_split: {
961       LLVMValueRef tmp1 = merge_16bit(bld_base, src[0], src[1]);
962       LLVMValueRef tmp2 = merge_16bit(bld_base, src[2], src[3]);
963       tmp1 = LLVMBuildBitCast(builder, tmp1, bld_base->uint16_bld.vec_type, "");
964       tmp2 = LLVMBuildBitCast(builder, tmp2, bld_base->uint16_bld.vec_type, "");
965       LLVMValueRef tmp = merge_16bit(bld_base, tmp1, tmp2);
966       result = LLVMBuildBitCast(builder, tmp, bld_base->uint_bld.vec_type, "");
967       break;
968    }
969    case nir_op_u2f16:
970       result = LLVMBuildUIToFP(builder, src[0],
971                                bld_base->half_bld.vec_type, "");
972       break;
973    case nir_op_u2f32:
974       result = LLVMBuildUIToFP(builder, src[0], bld_base->base.vec_type, "");
975       break;
976    case nir_op_u2f64:
977       result = LLVMBuildUIToFP(builder, src[0], bld_base->dbl_bld.vec_type, "");
978       break;
979    case nir_op_u2u8:
980       result = LLVMBuildTrunc(builder, src[0], bld_base->uint8_bld.vec_type, "");
981       break;
982    case nir_op_u2u16:
983       if (src_bit_size[0] < 16)
984          result = LLVMBuildZExt(builder, src[0], bld_base->uint16_bld.vec_type, "");
985       else
986          result = LLVMBuildTrunc(builder, src[0], bld_base->uint16_bld.vec_type, "");
987       break;
988    case nir_op_u2u32:
989       if (src_bit_size[0] < 32)
990          result = LLVMBuildZExt(builder, src[0], bld_base->uint_bld.vec_type, "");
991       else
992          result = LLVMBuildTrunc(builder, src[0], bld_base->uint_bld.vec_type, "");
993       break;
994    case nir_op_u2u64:
995       result = LLVMBuildZExt(builder, src[0], bld_base->uint64_bld.vec_type, "");
996       break;
997    case nir_op_udiv:
998       result = do_int_divide(bld_base, true, src_bit_size[0], src[0], src[1]);
999       break;
1000    case nir_op_ufind_msb: {
1001       struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
1002       result = lp_build_ctlz(uint_bld, src[0]);
1003       result = lp_build_sub(uint_bld, lp_build_const_int_vec(gallivm, uint_bld->type, src_bit_size[0] - 1), result);
1004       if (src_bit_size[0] < 32)
1005          result = LLVMBuildZExt(builder, result, bld_base->uint_bld.vec_type, "");
1006       else
1007          result = LLVMBuildTrunc(builder, result, bld_base->uint_bld.vec_type, "");
1008       break;
1009    }
1010    case nir_op_uge32:
1011       result = icmp32(bld_base, PIPE_FUNC_GEQUAL, true, src_bit_size[0], src);
1012       break;
1013    case nir_op_ult32:
1014       result = icmp32(bld_base, PIPE_FUNC_LESS, true, src_bit_size[0], src);
1015       break;
1016    case nir_op_umax:
1017       result = lp_build_max(get_int_bld(bld_base, true, src_bit_size[0]), src[0], src[1]);
1018       break;
1019    case nir_op_umin:
1020       result = lp_build_min(get_int_bld(bld_base, true, src_bit_size[0]), src[0], src[1]);
1021       break;
1022    case nir_op_umod:
1023       result = do_int_mod(bld_base, true, src_bit_size[0], src[0], src[1]);
1024       break;
1025    case nir_op_umul_high: {
1026       LLVMValueRef hi_bits;
1027       lp_build_mul_32_lohi(get_int_bld(bld_base, true, src_bit_size[0]), src[0], src[1], &hi_bits);
1028       result = hi_bits;
1029       break;
1030    }
1031    case nir_op_ushr: {
1032       struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
1033       if (src_bit_size[0] == 64)
1034          src[1] = LLVMBuildZExt(builder, src[1], uint_bld->vec_type, "");
1035       if (src_bit_size[0] < 32)
1036          src[1] = LLVMBuildTrunc(builder, src[1], uint_bld->vec_type, "");
1037       src[1] = lp_build_and(uint_bld, src[1], lp_build_const_int_vec(gallivm, uint_bld->type, (src_bit_size[0] - 1)));
1038       result = lp_build_shr(uint_bld, src[0], src[1]);
1039       break;
1040    }
1041    case nir_op_bcsel: {
1042       LLVMTypeRef src1_type = LLVMTypeOf(src[1]);
1043       LLVMTypeRef src2_type = LLVMTypeOf(src[2]);
1044 
1045       if (LLVMGetTypeKind(src1_type) == LLVMPointerTypeKind &&
1046           LLVMGetTypeKind(src2_type) != LLVMPointerTypeKind) {
1047          src[2] = LLVMBuildIntToPtr(builder, src[2], src1_type, "");
1048       } else if (LLVMGetTypeKind(src2_type) == LLVMPointerTypeKind &&
1049                  LLVMGetTypeKind(src1_type) != LLVMPointerTypeKind) {
1050          src[1] = LLVMBuildIntToPtr(builder, src[1], src2_type, "");
1051       }
1052 
1053       for (int i = 1; i <= 2; i++) {
1054          LLVMTypeRef type = LLVMTypeOf(src[i]);
1055          if (LLVMGetTypeKind(type) == LLVMPointerTypeKind)
1056             break;
1057          src[i] = LLVMBuildBitCast(builder, src[i], get_int_bld(bld_base, true, src_bit_size[i])->vec_type, "");
1058       }
1059       return LLVMBuildSelect(builder, src[0], src[1], src[2], "");
1060    }
1061    default:
1062       assert(0);
1063       break;
1064    }
1065    return result;
1066 }
1067 
1068 
1069 static void
visit_alu(struct lp_build_nir_context * bld_base,const nir_alu_instr * instr)1070 visit_alu(struct lp_build_nir_context *bld_base,
1071           const nir_alu_instr *instr)
1072 {
1073    struct gallivm_state *gallivm = bld_base->base.gallivm;
1074    LLVMValueRef src[NIR_MAX_VEC_COMPONENTS];
1075    unsigned src_bit_size[NIR_MAX_VEC_COMPONENTS];
1076    const unsigned num_components = instr->def.num_components;
1077    unsigned src_components;
1078 
1079    switch (instr->op) {
1080    case nir_op_vec2:
1081    case nir_op_vec3:
1082    case nir_op_vec4:
1083    case nir_op_vec8:
1084    case nir_op_vec16:
1085       src_components = 1;
1086       break;
1087    case nir_op_pack_half_2x16:
1088       src_components = 2;
1089       break;
1090    case nir_op_unpack_half_2x16:
1091       src_components = 1;
1092       break;
1093    case nir_op_cube_amd:
1094       src_components = 3;
1095       break;
1096    case nir_op_fsum2:
1097    case nir_op_fsum3:
1098    case nir_op_fsum4:
1099       src_components = nir_op_infos[instr->op].input_sizes[0];
1100       break;
1101    default:
1102       src_components = num_components;
1103       break;
1104    }
1105 
1106    for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
1107       src[i] = get_alu_src(bld_base, instr->src[i], src_components);
1108       src_bit_size[i] = nir_src_bit_size(instr->src[i].src);
1109    }
1110 
1111    LLVMValueRef result[NIR_MAX_VEC_COMPONENTS];
1112    if (instr->op == nir_op_vec4 ||
1113        instr->op == nir_op_vec3 ||
1114        instr->op == nir_op_vec2 ||
1115        instr->op == nir_op_vec8 ||
1116        instr->op == nir_op_vec16) {
1117       for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
1118          result[i] = cast_type(bld_base, src[i],
1119                                nir_op_infos[instr->op].input_types[i],
1120                                src_bit_size[i]);
1121       }
1122    } else if (instr->op == nir_op_fsum4 ||
1123               instr->op == nir_op_fsum3 ||
1124               instr->op == nir_op_fsum2) {
1125       for (unsigned c = 0; c < nir_op_infos[instr->op].input_sizes[0]; c++) {
1126          LLVMValueRef temp_chan = LLVMBuildExtractValue(gallivm->builder,
1127                                                           src[0], c, "");
1128          temp_chan = cast_type(bld_base, temp_chan,
1129                                nir_op_infos[instr->op].input_types[0],
1130                                src_bit_size[0]);
1131          result[0] = (c == 0) ? temp_chan
1132             : lp_build_add(get_flt_bld(bld_base, src_bit_size[0]),
1133                            result[0], temp_chan);
1134       }
1135    } else if (is_aos(bld_base)) {
1136       result[0] = do_alu_action(bld_base, instr, src_bit_size, src);
1137    } else {
1138       /* Loop for R,G,B,A channels */
1139       for (unsigned c = 0; c < num_components; c++) {
1140          LLVMValueRef src_chan[NIR_MAX_VEC_COMPONENTS];
1141 
1142          /* Loop over instruction operands */
1143          for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
1144             if (num_components > 1) {
1145                src_chan[i] = LLVMBuildExtractValue(gallivm->builder,
1146                                                      src[i], c, "");
1147             } else {
1148                src_chan[i] = src[i];
1149             }
1150             src_chan[i] = cast_type(bld_base, src_chan[i],
1151                                     nir_op_infos[instr->op].input_types[i],
1152                                     src_bit_size[i]);
1153          }
1154          result[c] = do_alu_action(bld_base, instr, src_bit_size, src_chan);
1155          result[c] = cast_type(bld_base, result[c],
1156                                nir_op_infos[instr->op].output_type,
1157                                instr->def.bit_size);
1158       }
1159    }
1160    assign_ssa_dest(bld_base, &instr->def, result);
1161 }
1162 
1163 
1164 static void
visit_load_const(struct lp_build_nir_context * bld_base,const nir_load_const_instr * instr)1165 visit_load_const(struct lp_build_nir_context *bld_base,
1166                  const nir_load_const_instr *instr)
1167 {
1168    LLVMValueRef result[NIR_MAX_VEC_COMPONENTS];
1169    bld_base->load_const(bld_base, instr, result);
1170    assign_ssa_dest(bld_base, &instr->def, result);
1171 }
1172 
1173 
1174 static void
get_deref_offset(struct lp_build_nir_context * bld_base,nir_deref_instr * instr,bool vs_in,unsigned * vertex_index_out,LLVMValueRef * vertex_index_ref,unsigned * const_out,LLVMValueRef * indir_out)1175 get_deref_offset(struct lp_build_nir_context *bld_base, nir_deref_instr *instr,
1176                  bool vs_in, unsigned *vertex_index_out,
1177                  LLVMValueRef *vertex_index_ref,
1178                  unsigned *const_out, LLVMValueRef *indir_out)
1179 {
1180    LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1181    nir_variable *var = nir_deref_instr_get_variable(instr);
1182    nir_deref_path path;
1183    unsigned idx_lvl = 1;
1184 
1185    nir_deref_path_init(&path, instr, NULL);
1186 
1187    if (vertex_index_out != NULL || vertex_index_ref != NULL) {
1188       if (vertex_index_ref) {
1189          *vertex_index_ref = get_src(bld_base, path.path[idx_lvl]->arr.index);
1190          if (vertex_index_out)
1191             *vertex_index_out = 0;
1192       } else {
1193          *vertex_index_out = nir_src_as_uint(path.path[idx_lvl]->arr.index);
1194       }
1195       ++idx_lvl;
1196    }
1197 
1198    uint32_t const_offset = 0;
1199    LLVMValueRef offset = NULL;
1200 
1201    if (var->data.compact && nir_src_is_const(instr->arr.index)) {
1202       assert(instr->deref_type == nir_deref_type_array);
1203       const_offset = nir_src_as_uint(instr->arr.index);
1204       goto out;
1205    }
1206 
1207    for (; path.path[idx_lvl]; ++idx_lvl) {
1208       const struct glsl_type *parent_type = path.path[idx_lvl - 1]->type;
1209       if (path.path[idx_lvl]->deref_type == nir_deref_type_struct) {
1210          unsigned index = path.path[idx_lvl]->strct.index;
1211 
1212          for (unsigned i = 0; i < index; i++) {
1213             const struct glsl_type *ft = glsl_get_struct_field(parent_type, i);
1214             const_offset += glsl_count_attribute_slots(ft, vs_in);
1215          }
1216       } else if (path.path[idx_lvl]->deref_type == nir_deref_type_array) {
1217          unsigned size = glsl_count_attribute_slots(path.path[idx_lvl]->type, vs_in);
1218          if (nir_src_is_const(path.path[idx_lvl]->arr.index)) {
1219             const_offset += nir_src_comp_as_int(path.path[idx_lvl]->arr.index, 0) * size;
1220          } else {
1221             LLVMValueRef idx_src = get_src(bld_base, path.path[idx_lvl]->arr.index);
1222             idx_src = cast_type(bld_base, idx_src, nir_type_uint, 32);
1223             LLVMValueRef array_off = lp_build_mul(&bld_base->uint_bld, lp_build_const_int_vec(bld_base->base.gallivm, bld_base->base.type, size),
1224                                                   idx_src);
1225             if (offset)
1226                offset = lp_build_add(&bld_base->uint_bld, offset, array_off);
1227             else
1228                offset = array_off;
1229          }
1230       } else
1231          unreachable("Uhandled deref type in get_deref_instr_offset");
1232    }
1233 
1234 out:
1235    nir_deref_path_finish(&path);
1236 
1237    if (const_offset && offset)
1238       offset = LLVMBuildAdd(builder, offset,
1239                             lp_build_const_int_vec(bld_base->base.gallivm, bld_base->uint_bld.type, const_offset),
1240                             "");
1241    *const_out = const_offset;
1242    *indir_out = offset;
1243 }
1244 
1245 
1246 static void
visit_load_input(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1247 visit_load_input(struct lp_build_nir_context *bld_base,
1248                  nir_intrinsic_instr *instr,
1249                  LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1250 {
1251    nir_variable var = {0};
1252    var.data.location = nir_intrinsic_io_semantics(instr).location;
1253    var.data.driver_location = nir_intrinsic_base(instr);
1254    var.data.location_frac = nir_intrinsic_component(instr);
1255 
1256    unsigned nc = instr->def.num_components;
1257    unsigned bit_size = instr->def.bit_size;
1258 
1259    nir_src offset = *nir_get_io_offset_src(instr);
1260    bool indirect = !nir_src_is_const(offset);
1261    if (!indirect)
1262       assert(nir_src_as_uint(offset) == 0);
1263    LLVMValueRef indir_index = indirect ? get_src(bld_base, offset) : NULL;
1264 
1265    bld_base->load_var(bld_base, nir_var_shader_in, nc, bit_size, &var, 0, NULL, 0, indir_index, result);
1266 }
1267 
1268 
1269 static void
visit_store_output(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)1270 visit_store_output(struct lp_build_nir_context *bld_base,
1271                    nir_intrinsic_instr *instr)
1272 {
1273    nir_variable var = {0};
1274    var.data.location = nir_intrinsic_io_semantics(instr).location;
1275    var.data.driver_location = nir_intrinsic_base(instr);
1276    var.data.location_frac = nir_intrinsic_component(instr);
1277 
1278    unsigned mask = nir_intrinsic_write_mask(instr);
1279 
1280    unsigned bit_size = nir_src_bit_size(instr->src[0]);
1281    LLVMValueRef src = get_src(bld_base, instr->src[0]);
1282 
1283    nir_src offset = *nir_get_io_offset_src(instr);
1284    bool indirect = !nir_src_is_const(offset);
1285    if (!indirect)
1286       assert(nir_src_as_uint(offset) == 0);
1287    LLVMValueRef indir_index = indirect ? get_src(bld_base, offset) : NULL;
1288 
1289    if (mask == 0x1 && LLVMGetTypeKind(LLVMTypeOf(src)) == LLVMArrayTypeKind) {
1290       src = LLVMBuildExtractValue(bld_base->base.gallivm->builder,
1291                                   src, 0, "");
1292    }
1293 
1294    bld_base->store_var(bld_base, nir_var_shader_out, util_last_bit(mask),
1295                        bit_size, &var, mask, NULL, 0, indir_index, src);
1296 }
1297 
1298 
1299 static void
visit_load_reg(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1300 visit_load_reg(struct lp_build_nir_context *bld_base,
1301                nir_intrinsic_instr *instr,
1302                LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1303 {
1304    struct gallivm_state *gallivm = bld_base->base.gallivm;
1305    LLVMBuilderRef builder = gallivm->builder;
1306 
1307    nir_intrinsic_instr *decl = nir_reg_get_decl(instr->src[0].ssa);
1308    unsigned base = nir_intrinsic_base(instr);
1309 
1310    struct hash_entry *entry = _mesa_hash_table_search(bld_base->regs, decl);
1311    LLVMValueRef reg_storage = (LLVMValueRef)entry->data;
1312 
1313    unsigned bit_size = nir_intrinsic_bit_size(decl);
1314    struct lp_build_context *reg_bld = get_int_bld(bld_base, true, bit_size);
1315 
1316    LLVMValueRef indir_src = NULL;
1317    if (instr->intrinsic == nir_intrinsic_load_reg_indirect) {
1318       indir_src = cast_type(bld_base, get_src(bld_base, instr->src[1]),
1319                             nir_type_uint, 32);
1320    }
1321 
1322    LLVMValueRef val = bld_base->load_reg(bld_base, reg_bld, decl, base, indir_src, reg_storage);
1323 
1324    if (!is_aos(bld_base) && instr->def.num_components > 1) {
1325       for (unsigned i = 0; i < instr->def.num_components; i++)
1326          result[i] = LLVMBuildExtractValue(builder, val, i, "");
1327    } else {
1328       result[0] = val;
1329    }
1330 }
1331 
1332 
1333 static void
visit_store_reg(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)1334 visit_store_reg(struct lp_build_nir_context *bld_base,
1335                 nir_intrinsic_instr *instr)
1336 {
1337    struct gallivm_state *gallivm = bld_base->base.gallivm;
1338    LLVMBuilderRef builder = gallivm->builder;
1339 
1340    nir_intrinsic_instr *decl = nir_reg_get_decl(instr->src[1].ssa);
1341    unsigned base = nir_intrinsic_base(instr);
1342    unsigned write_mask = nir_intrinsic_write_mask(instr);
1343    assert(write_mask != 0x0);
1344 
1345    LLVMValueRef val = get_src(bld_base, instr->src[0]);
1346    LLVMValueRef vals[NIR_MAX_VEC_COMPONENTS] = { NULL };
1347    if (!is_aos(bld_base) && nir_src_num_components(instr->src[0]) > 1) {
1348       for (unsigned i = 0; i < nir_src_num_components(instr->src[0]); i++)
1349          vals[i] = LLVMBuildExtractValue(builder, val, i, "");
1350    } else {
1351       vals[0] = val;
1352    }
1353 
1354    struct hash_entry *entry = _mesa_hash_table_search(bld_base->regs, decl);
1355    LLVMValueRef reg_storage = (LLVMValueRef)entry->data;
1356 
1357    unsigned bit_size = nir_intrinsic_bit_size(decl);
1358    struct lp_build_context *reg_bld = get_int_bld(bld_base, true, bit_size);
1359 
1360    LLVMValueRef indir_src = NULL;
1361    if (instr->intrinsic == nir_intrinsic_store_reg_indirect) {
1362       indir_src = cast_type(bld_base, get_src(bld_base, instr->src[2]),
1363                             nir_type_uint, 32);
1364    }
1365 
1366    bld_base->store_reg(bld_base, reg_bld, decl, write_mask, base,
1367                        indir_src, reg_storage, vals);
1368 }
1369 
1370 
1371 static bool
compact_array_index_oob(struct lp_build_nir_context * bld_base,nir_variable * var,const uint32_t index)1372 compact_array_index_oob(struct lp_build_nir_context *bld_base, nir_variable *var, const uint32_t index)
1373 {
1374    const struct glsl_type *type = var->type;
1375    if (nir_is_arrayed_io(var, bld_base->shader->info.stage)) {
1376       assert(glsl_type_is_array(type));
1377       type = glsl_get_array_element(type);
1378    }
1379    return index >= glsl_get_length(type);
1380 }
1381 
1382 static void
visit_load_var(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1383 visit_load_var(struct lp_build_nir_context *bld_base,
1384                nir_intrinsic_instr *instr,
1385                LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1386 {
1387    nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1388    nir_variable *var = nir_deref_instr_get_variable(deref);
1389    assert(util_bitcount(deref->modes) == 1);
1390    nir_variable_mode mode = deref->modes;
1391    unsigned const_index = 0;
1392    LLVMValueRef indir_index = NULL;
1393    LLVMValueRef indir_vertex_index = NULL;
1394    unsigned vertex_index = 0;
1395    unsigned nc = instr->def.num_components;
1396    unsigned bit_size = instr->def.bit_size;
1397    if (var) {
1398       bool vs_in = bld_base->shader->info.stage == MESA_SHADER_VERTEX &&
1399          var->data.mode == nir_var_shader_in;
1400       bool gs_in = bld_base->shader->info.stage == MESA_SHADER_GEOMETRY &&
1401          var->data.mode == nir_var_shader_in;
1402       bool tcs_in = bld_base->shader->info.stage == MESA_SHADER_TESS_CTRL &&
1403          var->data.mode == nir_var_shader_in;
1404       bool tcs_out = bld_base->shader->info.stage == MESA_SHADER_TESS_CTRL &&
1405          var->data.mode == nir_var_shader_out && !var->data.patch;
1406       bool tes_in = bld_base->shader->info.stage == MESA_SHADER_TESS_EVAL &&
1407          var->data.mode == nir_var_shader_in && !var->data.patch;
1408 
1409       mode = var->data.mode;
1410 
1411       get_deref_offset(bld_base, deref, vs_in,
1412                    gs_in ? &vertex_index : NULL,
1413                    (tcs_in || tcs_out || tes_in) ? &indir_vertex_index : NULL,
1414                    &const_index, &indir_index);
1415 
1416       /* Return undef for loads definitely outside of the array bounds
1417        * (tcs-tes-levels-out-of-bounds-read.shader_test).
1418        */
1419       if (var->data.compact && compact_array_index_oob(bld_base, var, const_index)) {
1420          struct lp_build_context *undef_bld = get_int_bld(bld_base, true,
1421                                                           instr->def.bit_size);
1422          for (int i = 0; i < instr->def.num_components; i++)
1423             result[i] = LLVMGetUndef(undef_bld->vec_type);
1424          return;
1425       }
1426    }
1427    bld_base->load_var(bld_base, mode, nc, bit_size, var, vertex_index,
1428                       indir_vertex_index, const_index, indir_index, result);
1429 }
1430 
1431 
1432 static void
visit_store_var(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)1433 visit_store_var(struct lp_build_nir_context *bld_base,
1434                 nir_intrinsic_instr *instr)
1435 {
1436    nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1437    nir_variable *var = nir_deref_instr_get_variable(deref);
1438    assert(util_bitcount(deref->modes) == 1);
1439    nir_variable_mode mode = deref->modes;
1440    int writemask = instr->const_index[0];
1441    unsigned bit_size = nir_src_bit_size(instr->src[1]);
1442    LLVMValueRef src = get_src(bld_base, instr->src[1]);
1443    unsigned const_index = 0;
1444    LLVMValueRef indir_index = NULL, indir_vertex_index = NULL;
1445    if (var) {
1446       bool tcs_out = bld_base->shader->info.stage == MESA_SHADER_TESS_CTRL &&
1447          var->data.mode == nir_var_shader_out && !var->data.patch;
1448       bool mesh_out = bld_base->shader->info.stage == MESA_SHADER_MESH &&
1449          var->data.mode == nir_var_shader_out;
1450       get_deref_offset(bld_base, deref, false, NULL,
1451                        (tcs_out || mesh_out) ? &indir_vertex_index : NULL,
1452                        &const_index, &indir_index);
1453 
1454       /* Skip stores definitely outside of the array bounds
1455        * (tcs-tes-levels-out-of-bounds-write.shader_test).
1456        */
1457       if (var->data.compact && compact_array_index_oob(bld_base, var, const_index))
1458          return;
1459    }
1460    bld_base->store_var(bld_base, mode, instr->num_components, bit_size,
1461                        var, writemask, indir_vertex_index, const_index,
1462                        indir_index, src);
1463 }
1464 
1465 
1466 static void
visit_load_ubo(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1467 visit_load_ubo(struct lp_build_nir_context *bld_base,
1468                nir_intrinsic_instr *instr,
1469                LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1470 {
1471    struct gallivm_state *gallivm = bld_base->base.gallivm;
1472    LLVMBuilderRef builder = gallivm->builder;
1473    LLVMValueRef idx = get_src(bld_base, instr->src[0]);
1474    LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1475 
1476    bool offset_is_uniform = nir_src_is_always_uniform(instr->src[1]);
1477 
1478    if (nir_src_num_components(instr->src[0]) == 1)
1479       idx = LLVMBuildExtractElement(builder, idx, lp_build_const_int32(gallivm, 0), "");
1480 
1481    bld_base->load_ubo(bld_base, instr->def.num_components,
1482                       instr->def.bit_size,
1483                       offset_is_uniform, idx, offset, result);
1484 }
1485 
1486 
1487 static void
visit_load_push_constant(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[4])1488 visit_load_push_constant(struct lp_build_nir_context *bld_base,
1489                          nir_intrinsic_instr *instr,
1490                          LLVMValueRef result[4])
1491 {
1492    struct gallivm_state *gallivm = bld_base->base.gallivm;
1493    LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1494    LLVMValueRef idx = lp_build_const_int32(gallivm, 0);
1495    bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
1496 
1497    bld_base->load_ubo(bld_base, instr->def.num_components,
1498                       instr->def.bit_size,
1499                       offset_is_uniform, idx, offset, result);
1500 }
1501 
1502 
1503 static void
visit_load_ssbo(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1504 visit_load_ssbo(struct lp_build_nir_context *bld_base,
1505                 nir_intrinsic_instr *instr,
1506                 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1507 {
1508    LLVMValueRef idx = get_src(bld_base, instr->src[0]);
1509    if (nir_src_num_components(instr->src[0]) == 1)
1510       idx = cast_type(bld_base, idx, nir_type_uint, 32);
1511 
1512    LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1513    bool index_and_offset_are_uniform =
1514       nir_src_is_always_uniform(instr->src[0]) &&
1515       nir_src_is_always_uniform(instr->src[1]);
1516    bld_base->load_mem(bld_base, instr->def.num_components,
1517                       instr->def.bit_size,
1518                       index_and_offset_are_uniform, false, idx, offset, result);
1519 }
1520 
1521 
1522 static void
visit_store_ssbo(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)1523 visit_store_ssbo(struct lp_build_nir_context *bld_base,
1524                  nir_intrinsic_instr *instr)
1525 {
1526    LLVMValueRef val = get_src(bld_base, instr->src[0]);
1527 
1528    LLVMValueRef idx = get_src(bld_base, instr->src[1]);
1529    if (nir_src_num_components(instr->src[1]) == 1)
1530       idx = cast_type(bld_base, idx, nir_type_uint, 32);
1531 
1532    LLVMValueRef offset = get_src(bld_base, instr->src[2]);
1533    bool index_and_offset_are_uniform =
1534       nir_src_is_always_uniform(instr->src[1]) &&
1535       nir_src_is_always_uniform(instr->src[2]);
1536    int writemask = instr->const_index[0];
1537    int nc = nir_src_num_components(instr->src[0]);
1538    int bitsize = nir_src_bit_size(instr->src[0]);
1539    bld_base->store_mem(bld_base, writemask, nc, bitsize,
1540                        index_and_offset_are_uniform, false, idx, offset, val);
1541 }
1542 
1543 
1544 static void
visit_get_ssbo_size(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1545 visit_get_ssbo_size(struct lp_build_nir_context *bld_base,
1546                     nir_intrinsic_instr *instr,
1547                     LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1548 {
1549    LLVMValueRef idx = get_src(bld_base, instr->src[0]);
1550    if (nir_src_num_components(instr->src[0]) == 1)
1551       idx = cast_type(bld_base, idx, nir_type_uint, 32);
1552 
1553    result[0] = bld_base->get_ssbo_size(bld_base, idx);
1554 }
1555 
1556 
1557 static void
visit_ssbo_atomic(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1558 visit_ssbo_atomic(struct lp_build_nir_context *bld_base,
1559                   nir_intrinsic_instr *instr,
1560                   LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1561 {
1562    LLVMValueRef idx = get_src(bld_base, instr->src[0]);
1563    if (nir_src_num_components(instr->src[0]) == 1)
1564       idx = cast_type(bld_base, idx, nir_type_uint, 32);
1565 
1566    LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1567    LLVMValueRef val = get_src(bld_base, instr->src[2]);
1568    LLVMValueRef val2 = NULL;
1569    int bitsize = nir_src_bit_size(instr->src[2]);
1570    if (instr->intrinsic == nir_intrinsic_ssbo_atomic_swap)
1571       val2 = get_src(bld_base, instr->src[3]);
1572 
1573    bld_base->atomic_mem(bld_base, nir_intrinsic_atomic_op(instr), bitsize, false, idx,
1574                         offset, val, val2, &result[0]);
1575 }
1576 
1577 static void
img_params_init_resource(struct lp_build_nir_context * bld_base,struct lp_img_params * params,nir_src src)1578 img_params_init_resource(struct lp_build_nir_context *bld_base, struct lp_img_params *params, nir_src src)
1579 {
1580    if (nir_src_num_components(src) == 1) {
1581       if (nir_src_is_const(src))
1582          params->image_index = nir_src_as_int(src);
1583       else
1584          params->image_index_offset = get_src(bld_base, src);
1585 
1586       return;
1587    }
1588 
1589    params->resource = get_src(bld_base, src);
1590 }
1591 
1592 static void
sampler_size_params_init_resource(struct lp_build_nir_context * bld_base,struct lp_sampler_size_query_params * params,nir_src src)1593 sampler_size_params_init_resource(struct lp_build_nir_context *bld_base, struct lp_sampler_size_query_params *params, nir_src src)
1594 {
1595    if (nir_src_num_components(src) == 1) {
1596       if (nir_src_is_const(src))
1597          params->texture_unit = nir_src_as_int(src);
1598       else
1599          params->texture_unit_offset = get_src(bld_base, src);
1600 
1601       return;
1602    }
1603 
1604    params->resource = get_src(bld_base, src);
1605 }
1606 
1607 static void
visit_load_image(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1608 visit_load_image(struct lp_build_nir_context *bld_base,
1609                  nir_intrinsic_instr *instr,
1610                  LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1611 {
1612    struct gallivm_state *gallivm = bld_base->base.gallivm;
1613    LLVMBuilderRef builder = gallivm->builder;
1614    LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1615    LLVMValueRef coords[5];
1616    struct lp_img_params params = { 0 };
1617 
1618    params.target = glsl_sampler_to_pipe(nir_intrinsic_image_dim(instr),
1619                                         nir_intrinsic_image_array(instr));
1620    for (unsigned i = 0; i < 4; i++)
1621       coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1622    if (params.target == PIPE_TEXTURE_1D_ARRAY)
1623       coords[2] = coords[1];
1624 
1625    params.coords = coords;
1626    params.outdata = result;
1627    params.img_op = LP_IMG_LOAD;
1628    if (nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_MS ||
1629        nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_SUBPASS_MS)
1630       params.ms_index = cast_type(bld_base, get_src(bld_base, instr->src[2]),
1631                                   nir_type_uint, 32);
1632 
1633    img_params_init_resource(bld_base, &params, instr->src[0]);
1634    params.format = nir_intrinsic_format(instr);
1635 
1636    bld_base->image_op(bld_base, &params);
1637 }
1638 
1639 
1640 static void
visit_store_image(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)1641 visit_store_image(struct lp_build_nir_context *bld_base,
1642                   nir_intrinsic_instr *instr)
1643 {
1644    struct gallivm_state *gallivm = bld_base->base.gallivm;
1645    LLVMBuilderRef builder = gallivm->builder;
1646    LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1647    LLVMValueRef in_val = get_src(bld_base, instr->src[3]);
1648    LLVMValueRef coords[5];
1649    struct lp_img_params params = { 0 };
1650 
1651    params.target = glsl_sampler_to_pipe(nir_intrinsic_image_dim(instr), nir_intrinsic_image_array(instr));
1652    for (unsigned i = 0; i < 4; i++)
1653       coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1654    if (params.target == PIPE_TEXTURE_1D_ARRAY)
1655       coords[2] = coords[1];
1656    params.coords = coords;
1657 
1658    params.format = nir_intrinsic_format(instr);
1659 
1660    const struct util_format_description *desc = util_format_description(params.format);
1661    bool integer = desc->channel[util_format_get_first_non_void_channel(params.format)].pure_integer;
1662 
1663    for (unsigned i = 0; i < 4; i++) {
1664       params.indata[i] = LLVMBuildExtractValue(builder, in_val, i, "");
1665 
1666       if (integer)
1667          params.indata[i] = LLVMBuildBitCast(builder, params.indata[i], bld_base->int_bld.vec_type, "");
1668       else
1669          params.indata[i] = LLVMBuildBitCast(builder, params.indata[i], bld_base->base.vec_type, "");
1670    }
1671    if (nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_MS)
1672       params.ms_index = get_src(bld_base, instr->src[2]);
1673    params.img_op = LP_IMG_STORE;
1674 
1675    img_params_init_resource(bld_base, &params, instr->src[0]);
1676 
1677    if (params.target == PIPE_TEXTURE_1D_ARRAY)
1678       coords[2] = coords[1];
1679    bld_base->image_op(bld_base, &params);
1680 }
1681 
1682 LLVMAtomicRMWBinOp
lp_translate_atomic_op(nir_atomic_op op)1683 lp_translate_atomic_op(nir_atomic_op op)
1684 {
1685    switch (op) {
1686    case nir_atomic_op_iadd: return LLVMAtomicRMWBinOpAdd;
1687    case nir_atomic_op_xchg: return LLVMAtomicRMWBinOpXchg;
1688    case nir_atomic_op_iand: return LLVMAtomicRMWBinOpAnd;
1689    case nir_atomic_op_ior:  return LLVMAtomicRMWBinOpOr;
1690    case nir_atomic_op_ixor: return LLVMAtomicRMWBinOpXor;
1691    case nir_atomic_op_umin: return LLVMAtomicRMWBinOpUMin;
1692    case nir_atomic_op_umax: return LLVMAtomicRMWBinOpUMax;
1693    case nir_atomic_op_imin: return LLVMAtomicRMWBinOpMin;
1694    case nir_atomic_op_imax: return LLVMAtomicRMWBinOpMax;
1695    case nir_atomic_op_fadd: return LLVMAtomicRMWBinOpFAdd;
1696 #if LLVM_VERSION_MAJOR >= 15
1697    case nir_atomic_op_fmin: return LLVMAtomicRMWBinOpFMin;
1698    case nir_atomic_op_fmax: return LLVMAtomicRMWBinOpFMax;
1699 #endif
1700    default:          unreachable("Unexpected atomic");
1701    }
1702 }
1703 
1704 void
lp_img_op_from_intrinsic(struct lp_img_params * params,nir_intrinsic_instr * instr)1705 lp_img_op_from_intrinsic(struct lp_img_params *params, nir_intrinsic_instr *instr)
1706 {
1707    if (instr->intrinsic == nir_intrinsic_image_load ||
1708        instr->intrinsic == nir_intrinsic_bindless_image_load) {
1709       params->img_op = LP_IMG_LOAD;
1710       return;
1711    }
1712 
1713    if (instr->intrinsic == nir_intrinsic_image_store ||
1714        instr->intrinsic == nir_intrinsic_bindless_image_store) {
1715       params->img_op = LP_IMG_STORE;
1716       return;
1717    }
1718 
1719    if (instr->intrinsic == nir_intrinsic_image_atomic_swap ||
1720        instr->intrinsic == nir_intrinsic_bindless_image_atomic_swap) {
1721       params->img_op = LP_IMG_ATOMIC_CAS;
1722       return;
1723    }
1724 
1725    if (instr->intrinsic == nir_intrinsic_image_atomic ||
1726        instr->intrinsic == nir_intrinsic_bindless_image_atomic) {
1727       params->img_op = LP_IMG_ATOMIC;
1728       params->op = lp_translate_atomic_op(nir_intrinsic_atomic_op(instr));
1729    } else {
1730       params->img_op = -1;
1731    }
1732 }
1733 
1734 
1735 static void
visit_atomic_image(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1736 visit_atomic_image(struct lp_build_nir_context *bld_base,
1737                    nir_intrinsic_instr *instr,
1738                    LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1739 {
1740    struct gallivm_state *gallivm = bld_base->base.gallivm;
1741    LLVMBuilderRef builder = gallivm->builder;
1742    struct lp_img_params params = { 0 };
1743    LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1744    LLVMValueRef in_val = get_src(bld_base, instr->src[3]);
1745    LLVMValueRef coords[5];
1746 
1747    params.target = glsl_sampler_to_pipe(nir_intrinsic_image_dim(instr),
1748                                         nir_intrinsic_image_array(instr));
1749    for (unsigned i = 0; i < 4; i++) {
1750       coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1751    }
1752    if (params.target == PIPE_TEXTURE_1D_ARRAY) {
1753       coords[2] = coords[1];
1754    }
1755 
1756    params.coords = coords;
1757 
1758    params.format = nir_intrinsic_format(instr);
1759 
1760    const struct util_format_description *desc = util_format_description(params.format);
1761    bool integer = desc->channel[util_format_get_first_non_void_channel(params.format)].pure_integer;
1762 
1763    if (nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_MS)
1764       params.ms_index = get_src(bld_base, instr->src[2]);
1765 
1766    if (instr->intrinsic == nir_intrinsic_image_atomic_swap ||
1767        instr->intrinsic == nir_intrinsic_bindless_image_atomic_swap) {
1768       LLVMValueRef cas_val = get_src(bld_base, instr->src[4]);
1769       params.indata[0] = in_val;
1770       params.indata2[0] = cas_val;
1771 
1772       if (integer)
1773          params.indata2[0] = LLVMBuildBitCast(builder, params.indata2[0], bld_base->int_bld.vec_type, "");
1774       else
1775          params.indata2[0] = LLVMBuildBitCast(builder, params.indata2[0], bld_base->base.vec_type, "");
1776    } else {
1777       params.indata[0] = in_val;
1778    }
1779 
1780    if (integer)
1781       params.indata[0] = LLVMBuildBitCast(builder, params.indata[0], bld_base->int_bld.vec_type, "");
1782    else
1783       params.indata[0] = LLVMBuildBitCast(builder, params.indata[0], bld_base->base.vec_type, "");
1784 
1785    params.outdata = result;
1786 
1787    lp_img_op_from_intrinsic(&params, instr);
1788 
1789    img_params_init_resource(bld_base, &params, instr->src[0]);
1790 
1791    bld_base->image_op(bld_base, &params);
1792 }
1793 
1794 
1795 static void
visit_image_size(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1796 visit_image_size(struct lp_build_nir_context *bld_base,
1797                  nir_intrinsic_instr *instr,
1798                  LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1799 {
1800    struct lp_sampler_size_query_params params = { 0 };
1801 
1802    sampler_size_params_init_resource(bld_base, &params, instr->src[0]);
1803 
1804    params.target = glsl_sampler_to_pipe(nir_intrinsic_image_dim(instr),
1805                                         nir_intrinsic_image_array(instr));
1806    params.sizes_out = result;
1807    params.ms = nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_MS ||
1808       nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_SUBPASS_MS;
1809    params.format = nir_intrinsic_format(instr);
1810 
1811    bld_base->image_size(bld_base, &params);
1812 }
1813 
1814 
1815 static void
visit_image_samples(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1816 visit_image_samples(struct lp_build_nir_context *bld_base,
1817                     nir_intrinsic_instr *instr,
1818                     LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1819 {
1820    struct lp_sampler_size_query_params params = { 0 };
1821 
1822    sampler_size_params_init_resource(bld_base, &params, instr->src[0]);
1823 
1824    params.target = glsl_sampler_to_pipe(nir_intrinsic_image_dim(instr),
1825                                         nir_intrinsic_image_array(instr));
1826    params.sizes_out = result;
1827    params.ms = nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_MS ||
1828       nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_SUBPASS_MS;
1829    params.samples_only = true;
1830 
1831    params.format = nir_intrinsic_format(instr);
1832 
1833    bld_base->image_size(bld_base, &params);
1834 }
1835 
1836 
1837 static void
visit_shared_load(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1838 visit_shared_load(struct lp_build_nir_context *bld_base,
1839                   nir_intrinsic_instr *instr,
1840                   LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1841 {
1842    LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1843    bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
1844    bld_base->load_mem(bld_base, instr->def.num_components,
1845                       instr->def.bit_size,
1846                       offset_is_uniform, false, NULL, offset, result);
1847 }
1848 
1849 
1850 static void
visit_shared_store(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)1851 visit_shared_store(struct lp_build_nir_context *bld_base,
1852                    nir_intrinsic_instr *instr)
1853 {
1854    LLVMValueRef val = get_src(bld_base, instr->src[0]);
1855    LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1856    bool offset_is_uniform = nir_src_is_always_uniform(instr->src[1]);
1857    int writemask = instr->const_index[1];
1858    int nc = nir_src_num_components(instr->src[0]);
1859    int bitsize = nir_src_bit_size(instr->src[0]);
1860    bld_base->store_mem(bld_base, writemask, nc, bitsize,
1861                        offset_is_uniform, false, NULL, offset, val);
1862 }
1863 
1864 
1865 static void
visit_shared_atomic(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1866 visit_shared_atomic(struct lp_build_nir_context *bld_base,
1867                     nir_intrinsic_instr *instr,
1868                     LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1869 {
1870    LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1871    LLVMValueRef val = get_src(bld_base, instr->src[1]);
1872    LLVMValueRef val2 = NULL;
1873    int bitsize = nir_src_bit_size(instr->src[1]);
1874    if (instr->intrinsic == nir_intrinsic_shared_atomic_swap)
1875       val2 = get_src(bld_base, instr->src[2]);
1876 
1877    bld_base->atomic_mem(bld_base, nir_intrinsic_atomic_op(instr), bitsize, false, NULL,
1878                         offset, val, val2, &result[0]);
1879 }
1880 
1881 
1882 static void
visit_barrier(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)1883 visit_barrier(struct lp_build_nir_context *bld_base,
1884               nir_intrinsic_instr *instr)
1885 {
1886    LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1887    mesa_scope exec_scope = nir_intrinsic_execution_scope(instr);
1888    unsigned nir_semantics = nir_intrinsic_memory_semantics(instr);
1889 
1890    if (nir_semantics) {
1891       LLVMAtomicOrdering ordering = LLVMAtomicOrderingSequentiallyConsistent;
1892       LLVMBuildFence(builder, ordering, false, "");
1893    }
1894    if (exec_scope != SCOPE_NONE)
1895       bld_base->barrier(bld_base);
1896 }
1897 
1898 
1899 static void
visit_discard(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)1900 visit_discard(struct lp_build_nir_context *bld_base,
1901               nir_intrinsic_instr *instr)
1902 {
1903    LLVMValueRef cond = NULL;
1904    if (instr->intrinsic == nir_intrinsic_discard_if) {
1905       cond = get_src(bld_base, instr->src[0]);
1906       cond = cast_type(bld_base, cond, nir_type_int, 32);
1907    }
1908    bld_base->discard(bld_base, cond);
1909 }
1910 
1911 
1912 static void
visit_load_kernel_input(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1913 visit_load_kernel_input(struct lp_build_nir_context *bld_base,
1914                         nir_intrinsic_instr *instr,
1915                         LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1916 {
1917    LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1918 
1919    bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
1920    bld_base->load_kernel_arg(bld_base, instr->def.num_components,
1921                              instr->def.bit_size,
1922                              nir_src_bit_size(instr->src[0]),
1923                              offset_is_uniform, offset, result);
1924 }
1925 
1926 
1927 static void
visit_load_global(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1928 visit_load_global(struct lp_build_nir_context *bld_base,
1929                   nir_intrinsic_instr *instr,
1930                   LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1931 {
1932    LLVMValueRef addr = get_src(bld_base, instr->src[0]);
1933    bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
1934    bld_base->load_global(bld_base, instr->def.num_components,
1935                          instr->def.bit_size,
1936                          nir_src_bit_size(instr->src[0]),
1937                          offset_is_uniform, addr, result);
1938 }
1939 
1940 
1941 static void
visit_store_global(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)1942 visit_store_global(struct lp_build_nir_context *bld_base,
1943                    nir_intrinsic_instr *instr)
1944 {
1945    LLVMValueRef val = get_src(bld_base, instr->src[0]);
1946    int nc = nir_src_num_components(instr->src[0]);
1947    int bitsize = nir_src_bit_size(instr->src[0]);
1948    LLVMValueRef addr = get_src(bld_base, instr->src[1]);
1949    int addr_bitsize = nir_src_bit_size(instr->src[1]);
1950    int writemask = instr->const_index[0];
1951    bld_base->store_global(bld_base, writemask, nc, bitsize,
1952                           addr_bitsize, addr, val);
1953 }
1954 
1955 
1956 static void
visit_global_atomic(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1957 visit_global_atomic(struct lp_build_nir_context *bld_base,
1958                     nir_intrinsic_instr *instr,
1959                     LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1960 {
1961    LLVMValueRef addr = get_src(bld_base, instr->src[0]);
1962    LLVMValueRef val = get_src(bld_base, instr->src[1]);
1963    LLVMValueRef val2 = NULL;
1964    int addr_bitsize = nir_src_bit_size(instr->src[0]);
1965    int val_bitsize = nir_src_bit_size(instr->src[1]);
1966    if (instr->intrinsic == nir_intrinsic_global_atomic_swap)
1967       val2 = get_src(bld_base, instr->src[2]);
1968 
1969    bld_base->atomic_global(bld_base, nir_intrinsic_atomic_op(instr),
1970                            addr_bitsize, val_bitsize, addr, val, val2,
1971                            &result[0]);
1972 }
1973 
1974 #if LLVM_VERSION_MAJOR >= 10
visit_shuffle(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef dst[4])1975 static void visit_shuffle(struct lp_build_nir_context *bld_base,
1976                           nir_intrinsic_instr *instr,
1977                           LLVMValueRef dst[4])
1978 {
1979    LLVMValueRef src = get_src(bld_base, instr->src[0]);
1980    src = cast_type(bld_base, src, nir_type_int,
1981                    nir_src_bit_size(instr->src[0]));
1982    LLVMValueRef index = get_src(bld_base, instr->src[1]);
1983    index = cast_type(bld_base, index, nir_type_uint,
1984                      nir_src_bit_size(instr->src[1]));
1985 
1986    bld_base->shuffle(bld_base, src, index, instr, dst);
1987 }
1988 #endif
1989 
1990 
1991 static void
visit_interp(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1992 visit_interp(struct lp_build_nir_context *bld_base,
1993              nir_intrinsic_instr *instr,
1994              LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1995 {
1996    struct gallivm_state *gallivm = bld_base->base.gallivm;
1997    LLVMBuilderRef builder = gallivm->builder;
1998    nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1999    unsigned num_components = instr->def.num_components;
2000    nir_variable *var = nir_deref_instr_get_variable(deref);
2001    unsigned const_index;
2002    LLVMValueRef indir_index;
2003    LLVMValueRef offsets[2] = { NULL, NULL };
2004    get_deref_offset(bld_base, deref, false, NULL, NULL,
2005                     &const_index, &indir_index);
2006    bool centroid = instr->intrinsic == nir_intrinsic_interp_deref_at_centroid;
2007    bool sample = false;
2008    if (instr->intrinsic == nir_intrinsic_interp_deref_at_offset) {
2009       for (unsigned i = 0; i < 2; i++) {
2010          offsets[i] = LLVMBuildExtractValue(builder, get_src(bld_base, instr->src[1]), i, "");
2011          offsets[i] = cast_type(bld_base, offsets[i], nir_type_float, 32);
2012       }
2013    } else if (instr->intrinsic == nir_intrinsic_interp_deref_at_sample) {
2014       offsets[0] = get_src(bld_base, instr->src[1]);
2015       offsets[0] = cast_type(bld_base, offsets[0], nir_type_int, 32);
2016       sample = true;
2017    }
2018    bld_base->interp_at(bld_base, num_components, var, centroid, sample,
2019                        const_index, indir_index, offsets, result);
2020 }
2021 
2022 
2023 static void
visit_load_scratch(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])2024 visit_load_scratch(struct lp_build_nir_context *bld_base,
2025                    nir_intrinsic_instr *instr,
2026                    LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
2027 {
2028    LLVMValueRef offset = get_src(bld_base, instr->src[0]);
2029 
2030    bld_base->load_scratch(bld_base, instr->def.num_components,
2031                           instr->def.bit_size, offset, result);
2032 }
2033 
2034 
2035 static void
visit_store_scratch(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)2036 visit_store_scratch(struct lp_build_nir_context *bld_base,
2037                     nir_intrinsic_instr *instr)
2038 {
2039    LLVMValueRef val = get_src(bld_base, instr->src[0]);
2040    LLVMValueRef offset = get_src(bld_base, instr->src[1]);
2041    int writemask = instr->const_index[2];
2042    int nc = nir_src_num_components(instr->src[0]);
2043    int bitsize = nir_src_bit_size(instr->src[0]);
2044    bld_base->store_scratch(bld_base, writemask, nc, bitsize, offset, val);
2045 }
2046 
2047 static void
visit_payload_load(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])2048 visit_payload_load(struct lp_build_nir_context *bld_base,
2049                   nir_intrinsic_instr *instr,
2050                   LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
2051 {
2052    LLVMValueRef offset = get_src(bld_base, instr->src[0]);
2053    bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
2054    bld_base->load_mem(bld_base, instr->def.num_components,
2055                       instr->def.bit_size,
2056                       offset_is_uniform, true, NULL, offset, result);
2057 }
2058 
2059 static void
visit_payload_store(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)2060 visit_payload_store(struct lp_build_nir_context *bld_base,
2061                     nir_intrinsic_instr *instr)
2062 {
2063    LLVMValueRef val = get_src(bld_base, instr->src[0]);
2064    LLVMValueRef offset = get_src(bld_base, instr->src[1]);
2065    bool offset_is_uniform = nir_src_is_always_uniform(instr->src[1]);
2066    int writemask = instr->const_index[1];
2067    int nc = nir_src_num_components(instr->src[0]);
2068    int bitsize = nir_src_bit_size(instr->src[0]);
2069    bld_base->store_mem(bld_base, writemask, nc, bitsize,
2070                        offset_is_uniform, true, NULL, offset, val);
2071 }
2072 
2073 static void
visit_payload_atomic(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])2074 visit_payload_atomic(struct lp_build_nir_context *bld_base,
2075                      nir_intrinsic_instr *instr,
2076                      LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
2077 {
2078    LLVMValueRef offset = get_src(bld_base, instr->src[0]);
2079    LLVMValueRef val = get_src(bld_base, instr->src[1]);
2080    LLVMValueRef val2 = NULL;
2081    int bitsize = nir_src_bit_size(instr->src[1]);
2082    if (instr->intrinsic == nir_intrinsic_task_payload_atomic_swap)
2083       val2 = get_src(bld_base, instr->src[2]);
2084 
2085    bld_base->atomic_mem(bld_base, nir_intrinsic_atomic_op(instr), bitsize, true, NULL,
2086                         offset, val, val2, &result[0]);
2087 }
2088 
visit_load_param(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])2089 static void visit_load_param(struct lp_build_nir_context *bld_base,
2090                              nir_intrinsic_instr *instr,
2091                              LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
2092 {
2093    LLVMValueRef param = LLVMGetParam(bld_base->func, nir_intrinsic_param_idx(instr) + LP_RESV_FUNC_ARGS);
2094    struct gallivm_state *gallivm = bld_base->base.gallivm;
2095    if (instr->num_components == 1)
2096       result[0] = param;
2097    else {
2098       for (unsigned i = 0; i < instr->num_components; i++)
2099          result[i] = LLVMBuildExtractValue(gallivm->builder, param, i, "");
2100    }
2101 }
2102 
2103 static void
visit_intrinsic(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)2104 visit_intrinsic(struct lp_build_nir_context *bld_base,
2105                 nir_intrinsic_instr *instr)
2106 {
2107    LLVMValueRef result[NIR_MAX_VEC_COMPONENTS] = {0};
2108    switch (instr->intrinsic) {
2109    case nir_intrinsic_decl_reg:
2110       /* already handled */
2111       break;
2112    case nir_intrinsic_load_reg:
2113    case nir_intrinsic_load_reg_indirect:
2114       visit_load_reg(bld_base, instr, result);
2115       break;
2116    case nir_intrinsic_store_reg:
2117    case nir_intrinsic_store_reg_indirect:
2118       visit_store_reg(bld_base, instr);
2119       break;
2120    case nir_intrinsic_load_input:
2121       visit_load_input(bld_base, instr, result);
2122       break;
2123    case nir_intrinsic_store_output:
2124       visit_store_output(bld_base, instr);
2125       break;
2126    case nir_intrinsic_load_deref:
2127       visit_load_var(bld_base, instr, result);
2128       break;
2129    case nir_intrinsic_store_deref:
2130       visit_store_var(bld_base, instr);
2131       break;
2132    case nir_intrinsic_load_ubo:
2133       visit_load_ubo(bld_base, instr, result);
2134       break;
2135    case nir_intrinsic_load_push_constant:
2136       visit_load_push_constant(bld_base, instr, result);
2137       break;
2138    case nir_intrinsic_load_ssbo:
2139       visit_load_ssbo(bld_base, instr, result);
2140       break;
2141    case nir_intrinsic_store_ssbo:
2142       visit_store_ssbo(bld_base, instr);
2143       break;
2144    case nir_intrinsic_get_ssbo_size:
2145       visit_get_ssbo_size(bld_base, instr, result);
2146       break;
2147    case nir_intrinsic_load_vertex_id:
2148    case nir_intrinsic_load_primitive_id:
2149    case nir_intrinsic_load_instance_id:
2150    case nir_intrinsic_load_base_instance:
2151    case nir_intrinsic_load_base_vertex:
2152    case nir_intrinsic_load_first_vertex:
2153    case nir_intrinsic_load_workgroup_id:
2154    case nir_intrinsic_load_local_invocation_id:
2155    case nir_intrinsic_load_local_invocation_index:
2156    case nir_intrinsic_load_num_workgroups:
2157    case nir_intrinsic_load_invocation_id:
2158    case nir_intrinsic_load_front_face:
2159    case nir_intrinsic_load_draw_id:
2160    case nir_intrinsic_load_workgroup_size:
2161    case nir_intrinsic_load_work_dim:
2162    case nir_intrinsic_load_tess_coord:
2163    case nir_intrinsic_load_tess_level_outer:
2164    case nir_intrinsic_load_tess_level_inner:
2165    case nir_intrinsic_load_patch_vertices_in:
2166    case nir_intrinsic_load_sample_id:
2167    case nir_intrinsic_load_sample_pos:
2168    case nir_intrinsic_load_sample_mask_in:
2169    case nir_intrinsic_load_view_index:
2170    case nir_intrinsic_load_subgroup_invocation:
2171    case nir_intrinsic_load_subgroup_id:
2172    case nir_intrinsic_load_num_subgroups:
2173       bld_base->sysval_intrin(bld_base, instr, result);
2174       break;
2175    case nir_intrinsic_load_helper_invocation:
2176       bld_base->helper_invocation(bld_base, &result[0]);
2177       break;
2178    case nir_intrinsic_discard_if:
2179    case nir_intrinsic_discard:
2180       visit_discard(bld_base, instr);
2181       break;
2182    case nir_intrinsic_emit_vertex:
2183       bld_base->emit_vertex(bld_base, nir_intrinsic_stream_id(instr));
2184       break;
2185    case nir_intrinsic_end_primitive:
2186       bld_base->end_primitive(bld_base, nir_intrinsic_stream_id(instr));
2187       break;
2188    case nir_intrinsic_ssbo_atomic:
2189    case nir_intrinsic_ssbo_atomic_swap:
2190       visit_ssbo_atomic(bld_base, instr, result);
2191       break;
2192    case nir_intrinsic_image_load:
2193    case nir_intrinsic_bindless_image_load:
2194       visit_load_image(bld_base, instr, result);
2195       break;
2196    case nir_intrinsic_image_store:
2197    case nir_intrinsic_bindless_image_store:
2198       visit_store_image(bld_base, instr);
2199       break;
2200    case nir_intrinsic_image_atomic:
2201    case nir_intrinsic_image_atomic_swap:
2202    case nir_intrinsic_bindless_image_atomic:
2203    case nir_intrinsic_bindless_image_atomic_swap:
2204       visit_atomic_image(bld_base, instr, result);
2205       break;
2206    case nir_intrinsic_image_size:
2207    case nir_intrinsic_bindless_image_size:
2208       visit_image_size(bld_base, instr, result);
2209       break;
2210    case nir_intrinsic_image_samples:
2211    case nir_intrinsic_bindless_image_samples:
2212       visit_image_samples(bld_base, instr, result);
2213       break;
2214    case nir_intrinsic_load_shared:
2215       visit_shared_load(bld_base, instr, result);
2216       break;
2217    case nir_intrinsic_store_shared:
2218       visit_shared_store(bld_base, instr);
2219       break;
2220    case nir_intrinsic_shared_atomic:
2221    case nir_intrinsic_shared_atomic_swap:
2222       visit_shared_atomic(bld_base, instr, result);
2223       break;
2224    case nir_intrinsic_barrier:
2225       visit_barrier(bld_base, instr);
2226       break;
2227    case nir_intrinsic_load_kernel_input:
2228       visit_load_kernel_input(bld_base, instr, result);
2229      break;
2230    case nir_intrinsic_load_global:
2231    case nir_intrinsic_load_global_constant:
2232       visit_load_global(bld_base, instr, result);
2233       break;
2234    case nir_intrinsic_store_global:
2235       visit_store_global(bld_base, instr);
2236       break;
2237    case nir_intrinsic_global_atomic:
2238    case nir_intrinsic_global_atomic_swap:
2239       visit_global_atomic(bld_base, instr, result);
2240       break;
2241    case nir_intrinsic_vote_all:
2242    case nir_intrinsic_vote_any:
2243    case nir_intrinsic_vote_ieq:
2244    case nir_intrinsic_vote_feq:
2245       bld_base->vote(bld_base, cast_type(bld_base, get_src(bld_base, instr->src[0]), nir_type_int, nir_src_bit_size(instr->src[0])), instr, result);
2246       break;
2247    case nir_intrinsic_elect:
2248       bld_base->elect(bld_base, result);
2249       break;
2250    case nir_intrinsic_reduce:
2251    case nir_intrinsic_inclusive_scan:
2252    case nir_intrinsic_exclusive_scan:
2253       bld_base->reduce(bld_base, cast_type(bld_base, get_src(bld_base, instr->src[0]), nir_type_int, nir_src_bit_size(instr->src[0])), instr, result);
2254       break;
2255    case nir_intrinsic_ballot:
2256       bld_base->ballot(bld_base, cast_type(bld_base, get_src(bld_base, instr->src[0]), nir_type_int, 32), instr, result);
2257       break;
2258 #if LLVM_VERSION_MAJOR >= 10
2259    case nir_intrinsic_shuffle:
2260       visit_shuffle(bld_base, instr, result);
2261       break;
2262 #endif
2263    case nir_intrinsic_read_invocation:
2264    case nir_intrinsic_read_first_invocation: {
2265       LLVMValueRef src0 = get_src(bld_base, instr->src[0]);
2266       src0 = cast_type(bld_base, src0, nir_type_int, nir_src_bit_size(instr->src[0]));
2267 
2268       LLVMValueRef src1 = NULL;
2269       if (instr->intrinsic == nir_intrinsic_read_invocation)
2270          src1 = cast_type(bld_base, get_src(bld_base, instr->src[1]), nir_type_int, 32);
2271 
2272       bld_base->read_invocation(bld_base, src0, nir_src_bit_size(instr->src[0]), src1, result);
2273       break;
2274    }
2275    case nir_intrinsic_interp_deref_at_offset:
2276    case nir_intrinsic_interp_deref_at_centroid:
2277    case nir_intrinsic_interp_deref_at_sample:
2278       visit_interp(bld_base, instr, result);
2279       break;
2280    case nir_intrinsic_load_scratch:
2281       visit_load_scratch(bld_base, instr, result);
2282       break;
2283    case nir_intrinsic_store_scratch:
2284       visit_store_scratch(bld_base, instr);
2285       break;
2286    case nir_intrinsic_shader_clock:
2287       bld_base->clock(bld_base, result);
2288       break;
2289    case nir_intrinsic_launch_mesh_workgroups:
2290       bld_base->launch_mesh_workgroups(bld_base,
2291                                        get_src(bld_base, instr->src[0]));
2292       break;
2293    case nir_intrinsic_load_task_payload:
2294       visit_payload_load(bld_base, instr, result);
2295       break;
2296    case nir_intrinsic_store_task_payload:
2297       visit_payload_store(bld_base, instr);
2298       break;
2299    case nir_intrinsic_task_payload_atomic:
2300    case nir_intrinsic_task_payload_atomic_swap:
2301       visit_payload_atomic(bld_base, instr, result);
2302       break;
2303    case nir_intrinsic_set_vertex_and_primitive_count:
2304       bld_base->set_vertex_and_primitive_count(bld_base,
2305                                                get_src(bld_base, instr->src[0]),
2306                                                get_src(bld_base, instr->src[1]));
2307       break;
2308    case nir_intrinsic_load_param:
2309       visit_load_param(bld_base, instr, result);
2310       break;
2311    default:
2312       fprintf(stderr, "Unsupported intrinsic: ");
2313       nir_print_instr(&instr->instr, stderr);
2314       fprintf(stderr, "\n");
2315       assert(0);
2316       break;
2317    }
2318    if (result[0]) {
2319       assign_ssa_dest(bld_base, &instr->def, result);
2320    }
2321 }
2322 
2323 
2324 static void
visit_txs(struct lp_build_nir_context * bld_base,nir_tex_instr * instr)2325 visit_txs(struct lp_build_nir_context *bld_base, nir_tex_instr *instr)
2326 {
2327    struct lp_sampler_size_query_params params = { 0 };
2328    LLVMValueRef sizes_out[NIR_MAX_VEC_COMPONENTS];
2329    LLVMValueRef explicit_lod = NULL;
2330    LLVMValueRef texture_unit_offset = NULL;
2331    LLVMValueRef resource = NULL;
2332 
2333    for (unsigned i = 0; i < instr->num_srcs; i++) {
2334       switch (instr->src[i].src_type) {
2335       case nir_tex_src_lod:
2336          explicit_lod = cast_type(bld_base,
2337                                   get_src(bld_base, instr->src[i].src),
2338                                   nir_type_int, 32);
2339          break;
2340       case nir_tex_src_texture_offset:
2341          texture_unit_offset = get_src(bld_base, instr->src[i].src);
2342          break;
2343       case nir_tex_src_texture_handle:
2344          resource = get_src(bld_base, instr->src[i].src);
2345          break;
2346       default:
2347          break;
2348       }
2349    }
2350 
2351    params.target = glsl_sampler_to_pipe(instr->sampler_dim, instr->is_array);
2352    params.texture_unit = instr->texture_index;
2353    params.explicit_lod = explicit_lod;
2354    params.is_sviewinfo = true;
2355    params.sizes_out = sizes_out;
2356    params.samples_only = (instr->op == nir_texop_texture_samples);
2357    params.texture_unit_offset = texture_unit_offset;
2358    params.ms = instr->sampler_dim == GLSL_SAMPLER_DIM_MS ||
2359       instr->sampler_dim == GLSL_SAMPLER_DIM_SUBPASS_MS;
2360 
2361    if (instr->op == nir_texop_query_levels)
2362       params.explicit_lod = bld_base->uint_bld.zero;
2363 
2364    params.resource = resource;
2365 
2366    bld_base->tex_size(bld_base, &params);
2367    assign_ssa_dest(bld_base, &instr->def,
2368                    &sizes_out[instr->op == nir_texop_query_levels ? 3 : 0]);
2369 }
2370 
2371 
2372 static enum lp_sampler_lod_property
lp_build_nir_lod_property(gl_shader_stage stage,nir_src lod_src)2373 lp_build_nir_lod_property(gl_shader_stage stage, nir_src lod_src)
2374 {
2375    enum lp_sampler_lod_property lod_property;
2376 
2377    if (nir_src_is_always_uniform(lod_src)) {
2378       lod_property = LP_SAMPLER_LOD_SCALAR;
2379    } else if (stage == MESA_SHADER_FRAGMENT) {
2380       if (gallivm_perf & GALLIVM_PERF_NO_QUAD_LOD)
2381          lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
2382       else
2383          lod_property = LP_SAMPLER_LOD_PER_QUAD;
2384    } else {
2385       lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
2386    }
2387    return lod_property;
2388 }
2389 
2390 
2391 uint32_t
lp_build_nir_sample_key(gl_shader_stage stage,nir_tex_instr * instr)2392 lp_build_nir_sample_key(gl_shader_stage stage, nir_tex_instr *instr)
2393 {
2394    uint32_t sample_key = 0;
2395 
2396    if (instr->op == nir_texop_txf ||
2397        instr->op == nir_texop_txf_ms) {
2398       sample_key |= LP_SAMPLER_OP_FETCH << LP_SAMPLER_OP_TYPE_SHIFT;
2399    } else if (instr->op == nir_texop_tg4) {
2400       sample_key |= LP_SAMPLER_OP_GATHER << LP_SAMPLER_OP_TYPE_SHIFT;
2401       sample_key |= (instr->component << LP_SAMPLER_GATHER_COMP_SHIFT);
2402    } else if (instr->op == nir_texop_lod) {
2403       sample_key |= LP_SAMPLER_OP_LODQ << LP_SAMPLER_OP_TYPE_SHIFT;
2404    }
2405 
2406    bool explicit_lod = false;
2407    uint32_t lod_src = 0;
2408 
2409    for (unsigned i = 0; i < instr->num_srcs; i++) {
2410       switch (instr->src[i].src_type) {
2411       case nir_tex_src_comparator:
2412          sample_key |= LP_SAMPLER_SHADOW;
2413          break;
2414       case nir_tex_src_bias:
2415          sample_key |= LP_SAMPLER_LOD_BIAS << LP_SAMPLER_LOD_CONTROL_SHIFT;
2416          explicit_lod = true;
2417          lod_src = i;
2418          break;
2419       case nir_tex_src_lod:
2420          sample_key |= LP_SAMPLER_LOD_EXPLICIT << LP_SAMPLER_LOD_CONTROL_SHIFT;
2421          explicit_lod = true;
2422          lod_src = i;
2423          break;
2424       case nir_tex_src_offset:
2425          sample_key |= LP_SAMPLER_OFFSETS;
2426          break;
2427       case nir_tex_src_ms_index:
2428          sample_key |= LP_SAMPLER_FETCH_MS;
2429          break;
2430       default:
2431          break;
2432       }
2433    }
2434 
2435    enum lp_sampler_lod_property lod_property = LP_SAMPLER_LOD_SCALAR;
2436    if (explicit_lod)
2437       lod_property = lp_build_nir_lod_property(stage, instr->src[lod_src].src);
2438 
2439    if (instr->op == nir_texop_txd) {
2440       sample_key |= LP_SAMPLER_LOD_DERIVATIVES << LP_SAMPLER_LOD_CONTROL_SHIFT;
2441 
2442       if (stage == MESA_SHADER_FRAGMENT) {
2443          if (gallivm_perf & GALLIVM_PERF_NO_QUAD_LOD)
2444             lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
2445          else
2446             lod_property = LP_SAMPLER_LOD_PER_QUAD;
2447       } else
2448          lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
2449    }
2450 
2451    sample_key |= lod_property << LP_SAMPLER_LOD_PROPERTY_SHIFT;
2452 
2453    return sample_key;
2454 }
2455 
2456 
2457 static void
visit_tex(struct lp_build_nir_context * bld_base,nir_tex_instr * instr)2458 visit_tex(struct lp_build_nir_context *bld_base, nir_tex_instr *instr)
2459 {
2460    if (instr->op == nir_texop_txs ||
2461        instr->op == nir_texop_query_levels ||
2462        instr->op == nir_texop_texture_samples) {
2463       visit_txs(bld_base, instr);
2464       return;
2465    }
2466 
2467    struct gallivm_state *gallivm = bld_base->base.gallivm;
2468    LLVMBuilderRef builder = gallivm->builder;
2469    LLVMValueRef coords[5];
2470    LLVMValueRef offsets[3] = { NULL };
2471    LLVMValueRef explicit_lod = NULL, ms_index = NULL;
2472    struct lp_sampler_params params = { 0 };
2473    struct lp_derivatives derivs;
2474    nir_deref_instr *texture_deref_instr = NULL;
2475    nir_deref_instr *sampler_deref_instr = NULL;
2476    LLVMValueRef texture_unit_offset = NULL;
2477    LLVMValueRef texel[NIR_MAX_VEC_COMPONENTS];
2478    LLVMValueRef coord_undef = LLVMGetUndef(bld_base->base.vec_type);
2479    unsigned coord_vals = is_aos(bld_base) ? 1 : instr->coord_components;
2480 
2481    LLVMValueRef texture_resource = NULL;
2482    LLVMValueRef sampler_resource = NULL;
2483 
2484    for (unsigned i = 0; i < instr->num_srcs; i++) {
2485       switch (instr->src[i].src_type) {
2486       case nir_tex_src_coord: {
2487          LLVMValueRef coord = get_src(bld_base, instr->src[i].src);
2488          if (coord_vals == 1) {
2489             coords[0] = coord;
2490          } else {
2491             for (unsigned chan = 0; chan < instr->coord_components; ++chan)
2492                coords[chan] = LLVMBuildExtractValue(builder, coord,
2493                                                     chan, "");
2494          }
2495          for (unsigned chan = coord_vals; chan < 5; chan++) {
2496             coords[chan] = coord_undef;
2497          }
2498          break;
2499       }
2500       case nir_tex_src_texture_deref:
2501          texture_deref_instr = nir_src_as_deref(instr->src[i].src);
2502          break;
2503       case nir_tex_src_sampler_deref:
2504          sampler_deref_instr = nir_src_as_deref(instr->src[i].src);
2505          break;
2506       case nir_tex_src_comparator:
2507          coords[4] = get_src(bld_base, instr->src[i].src);
2508          coords[4] = cast_type(bld_base, coords[4], nir_type_float, 32);
2509          break;
2510       case nir_tex_src_bias:
2511          explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32);
2512          break;
2513       case nir_tex_src_lod:
2514          if (instr->op == nir_texop_txf)
2515             explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_int, 32);
2516          else
2517             explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32);
2518          break;
2519       case nir_tex_src_ddx: {
2520          int deriv_cnt = instr->coord_components;
2521          if (instr->is_array)
2522             deriv_cnt--;
2523          LLVMValueRef deriv_val = get_src(bld_base, instr->src[i].src);
2524          if (deriv_cnt == 1)
2525             derivs.ddx[0] = deriv_val;
2526          else
2527             for (unsigned chan = 0; chan < deriv_cnt; ++chan)
2528                derivs.ddx[chan] = LLVMBuildExtractValue(builder, deriv_val,
2529                                                         chan, "");
2530          for (unsigned chan = 0; chan < deriv_cnt; ++chan)
2531             derivs.ddx[chan] = cast_type(bld_base, derivs.ddx[chan], nir_type_float, 32);
2532          break;
2533       }
2534       case nir_tex_src_ddy: {
2535          int deriv_cnt = instr->coord_components;
2536          if (instr->is_array)
2537             deriv_cnt--;
2538          LLVMValueRef deriv_val = get_src(bld_base, instr->src[i].src);
2539          if (deriv_cnt == 1)
2540             derivs.ddy[0] = deriv_val;
2541          else
2542             for (unsigned chan = 0; chan < deriv_cnt; ++chan)
2543                derivs.ddy[chan] = LLVMBuildExtractValue(builder, deriv_val,
2544                                                         chan, "");
2545          for (unsigned chan = 0; chan < deriv_cnt; ++chan)
2546             derivs.ddy[chan] = cast_type(bld_base, derivs.ddy[chan], nir_type_float, 32);
2547          break;
2548       }
2549       case nir_tex_src_offset: {
2550          int offset_cnt = instr->coord_components;
2551          if (instr->is_array)
2552             offset_cnt--;
2553          LLVMValueRef offset_val = get_src(bld_base, instr->src[i].src);
2554          if (offset_cnt == 1)
2555             offsets[0] = cast_type(bld_base, offset_val, nir_type_int, 32);
2556          else {
2557             for (unsigned chan = 0; chan < offset_cnt; ++chan) {
2558                offsets[chan] = LLVMBuildExtractValue(builder, offset_val,
2559                                                      chan, "");
2560                offsets[chan] = cast_type(bld_base, offsets[chan], nir_type_int, 32);
2561             }
2562          }
2563          break;
2564       }
2565       case nir_tex_src_ms_index:
2566          ms_index = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_int, 32);
2567          break;
2568 
2569       case nir_tex_src_texture_offset:
2570          texture_unit_offset = get_src(bld_base, instr->src[i].src);
2571          break;
2572       case nir_tex_src_sampler_offset:
2573          break;
2574       case nir_tex_src_texture_handle:
2575          texture_resource = get_src(bld_base, instr->src[i].src);
2576          break;
2577       case nir_tex_src_sampler_handle:
2578          sampler_resource = get_src(bld_base, instr->src[i].src);
2579          break;
2580       case nir_tex_src_plane:
2581          assert(nir_src_is_const(instr->src[i].src) && !nir_src_as_uint(instr->src[i].src));
2582          break;
2583       default:
2584          assert(0);
2585          break;
2586       }
2587    }
2588    if (!sampler_deref_instr)
2589       sampler_deref_instr = texture_deref_instr;
2590 
2591    if (!sampler_resource)
2592       sampler_resource = texture_resource;
2593 
2594    switch (instr->op) {
2595    case nir_texop_tex:
2596    case nir_texop_tg4:
2597    case nir_texop_txb:
2598    case nir_texop_txl:
2599    case nir_texop_txd:
2600    case nir_texop_lod:
2601       for (unsigned chan = 0; chan < coord_vals; ++chan)
2602          coords[chan] = cast_type(bld_base, coords[chan], nir_type_float, 32);
2603       break;
2604    case nir_texop_txf:
2605    case nir_texop_txf_ms:
2606       for (unsigned chan = 0; chan < instr->coord_components; ++chan)
2607          coords[chan] = cast_type(bld_base, coords[chan], nir_type_int, 32);
2608       break;
2609    default:
2610       ;
2611    }
2612 
2613    if (instr->is_array && instr->sampler_dim == GLSL_SAMPLER_DIM_1D) {
2614       /* move layer coord for 1d arrays. */
2615       coords[2] = coords[1];
2616       coords[1] = coord_undef;
2617    }
2618 
2619    uint32_t samp_base_index = 0, tex_base_index = 0;
2620    if (!sampler_deref_instr) {
2621       int samp_src_index = nir_tex_instr_src_index(instr, nir_tex_src_sampler_handle);
2622       if (samp_src_index == -1) {
2623          samp_base_index = instr->sampler_index;
2624       }
2625    }
2626    if (!texture_deref_instr) {
2627       int tex_src_index = nir_tex_instr_src_index(instr, nir_tex_src_texture_handle);
2628       if (tex_src_index == -1) {
2629          tex_base_index = instr->texture_index;
2630       }
2631    }
2632 
2633    if (instr->op == nir_texop_txd)
2634       params.derivs = &derivs;
2635 
2636    params.sample_key = lp_build_nir_sample_key(bld_base->shader->info.stage, instr);
2637    params.offsets = offsets;
2638    params.texture_index = tex_base_index;
2639    params.texture_index_offset = texture_unit_offset;
2640    params.sampler_index = samp_base_index;
2641    params.coords = coords;
2642    params.texel = texel;
2643    params.lod = explicit_lod;
2644    params.ms_index = ms_index;
2645    params.aniso_filter_table = bld_base->aniso_filter_table;
2646    params.texture_resource = texture_resource;
2647    params.sampler_resource = sampler_resource;
2648    bld_base->tex(bld_base, &params);
2649 
2650    if (instr->def.bit_size != 32) {
2651       assert(instr->def.bit_size == 16);
2652       LLVMTypeRef vec_type = NULL;
2653       bool is_float = false;
2654       switch (nir_alu_type_get_base_type(instr->dest_type)) {
2655       case nir_type_float:
2656          is_float = true;
2657          break;
2658       case nir_type_int:
2659          vec_type = bld_base->int16_bld.vec_type;
2660          break;
2661       case nir_type_uint:
2662          vec_type = bld_base->uint16_bld.vec_type;
2663          break;
2664       default:
2665          unreachable("unexpected alu type");
2666       }
2667       for (int i = 0; i < instr->def.num_components; ++i) {
2668          if (is_float) {
2669             texel[i] = lp_build_float_to_half(gallivm, texel[i]);
2670          } else {
2671             texel[i] = LLVMBuildBitCast(builder, texel[i], bld_base->int_bld.vec_type, "");
2672             texel[i] = LLVMBuildTrunc(builder, texel[i], vec_type, "");
2673          }
2674       }
2675    }
2676 
2677    assign_ssa_dest(bld_base, &instr->def, texel);
2678 }
2679 
2680 
2681 static void
visit_ssa_undef(struct lp_build_nir_context * bld_base,const nir_undef_instr * instr)2682 visit_ssa_undef(struct lp_build_nir_context *bld_base,
2683                 const nir_undef_instr *instr)
2684 {
2685    unsigned num_components = instr->def.num_components;
2686    LLVMValueRef undef[NIR_MAX_VEC_COMPONENTS];
2687    struct lp_build_context *undef_bld = get_int_bld(bld_base, true,
2688                                                     instr->def.bit_size);
2689    for (unsigned i = 0; i < num_components; i++)
2690       undef[i] = LLVMGetUndef(undef_bld->vec_type);
2691    memset(&undef[num_components], 0, NIR_MAX_VEC_COMPONENTS - num_components);
2692    assign_ssa_dest(bld_base, &instr->def, undef);
2693 }
2694 
2695 
2696 static void
visit_jump(struct lp_build_nir_context * bld_base,const nir_jump_instr * instr)2697 visit_jump(struct lp_build_nir_context *bld_base,
2698            const nir_jump_instr *instr)
2699 {
2700    switch (instr->type) {
2701    case nir_jump_break:
2702       bld_base->break_stmt(bld_base);
2703       break;
2704    case nir_jump_continue:
2705       bld_base->continue_stmt(bld_base);
2706       break;
2707    default:
2708       unreachable("Unknown jump instr\n");
2709    }
2710 }
2711 
2712 
2713 static void
visit_deref(struct lp_build_nir_context * bld_base,nir_deref_instr * instr)2714 visit_deref(struct lp_build_nir_context *bld_base,
2715             nir_deref_instr *instr)
2716 {
2717    if (!nir_deref_mode_is_one_of(instr, nir_var_mem_shared |
2718                                         nir_var_mem_global)) {
2719       return;
2720    }
2721 
2722    LLVMValueRef result = NULL;
2723    switch(instr->deref_type) {
2724    case nir_deref_type_var: {
2725       struct hash_entry *entry =
2726          _mesa_hash_table_search(bld_base->vars, instr->var);
2727       result = entry->data;
2728       break;
2729    }
2730    default:
2731       unreachable("Unhandled deref_instr deref type");
2732    }
2733 
2734    assign_ssa(bld_base, instr->def.index, result);
2735 }
2736 
2737 static void
visit_call(struct lp_build_nir_context * bld_base,nir_call_instr * instr)2738 visit_call(struct lp_build_nir_context *bld_base,
2739            nir_call_instr *instr)
2740 {
2741    LLVMValueRef *args;
2742    struct hash_entry *entry = _mesa_hash_table_search(bld_base->fns, instr->callee);
2743    struct lp_build_fn *fn = entry->data;
2744    args = calloc(instr->num_params + LP_RESV_FUNC_ARGS, sizeof(LLVMValueRef));
2745 
2746    assert(args);
2747 
2748    args[0] = 0;
2749    for (unsigned i = 0; i < instr->num_params; i++) {
2750       LLVMValueRef arg = get_src(bld_base, instr->params[i]);
2751 
2752       if (nir_src_bit_size(instr->params[i]) == 32 && LLVMTypeOf(arg) == bld_base->base.vec_type)
2753          arg = cast_type(bld_base, arg, nir_type_int, 32);
2754       args[i + LP_RESV_FUNC_ARGS] = arg;
2755    }
2756 
2757    bld_base->call(bld_base, fn, instr->num_params + LP_RESV_FUNC_ARGS, args);
2758    free(args);
2759 }
2760 
2761 static void
visit_block(struct lp_build_nir_context * bld_base,nir_block * block)2762 visit_block(struct lp_build_nir_context *bld_base, nir_block *block)
2763 {
2764    nir_foreach_instr(instr, block)
2765    {
2766       switch (instr->type) {
2767       case nir_instr_type_alu:
2768          visit_alu(bld_base, nir_instr_as_alu(instr));
2769          break;
2770       case nir_instr_type_load_const:
2771          visit_load_const(bld_base, nir_instr_as_load_const(instr));
2772          break;
2773       case nir_instr_type_intrinsic:
2774          visit_intrinsic(bld_base, nir_instr_as_intrinsic(instr));
2775          break;
2776       case nir_instr_type_tex:
2777          visit_tex(bld_base, nir_instr_as_tex(instr));
2778          break;
2779       case nir_instr_type_phi:
2780          assert(0);
2781          break;
2782       case nir_instr_type_undef:
2783          visit_ssa_undef(bld_base, nir_instr_as_undef(instr));
2784          break;
2785       case nir_instr_type_jump:
2786          visit_jump(bld_base, nir_instr_as_jump(instr));
2787          break;
2788       case nir_instr_type_deref:
2789          visit_deref(bld_base, nir_instr_as_deref(instr));
2790          break;
2791       case nir_instr_type_call:
2792          visit_call(bld_base, nir_instr_as_call(instr));
2793          break;
2794       default:
2795          fprintf(stderr, "Unknown NIR instr type: ");
2796          nir_print_instr(instr, stderr);
2797          fprintf(stderr, "\n");
2798          abort();
2799       }
2800    }
2801 }
2802 
2803 
2804 static void
visit_if(struct lp_build_nir_context * bld_base,nir_if * if_stmt)2805 visit_if(struct lp_build_nir_context *bld_base, nir_if *if_stmt)
2806 {
2807    LLVMValueRef cond = get_src(bld_base, if_stmt->condition);
2808 
2809    bld_base->if_cond(bld_base, cond);
2810    visit_cf_list(bld_base, &if_stmt->then_list);
2811 
2812    if (!exec_list_is_empty(&if_stmt->else_list)) {
2813       bld_base->else_stmt(bld_base);
2814       visit_cf_list(bld_base, &if_stmt->else_list);
2815    }
2816    bld_base->endif_stmt(bld_base);
2817 }
2818 
2819 
2820 static void
visit_loop(struct lp_build_nir_context * bld_base,nir_loop * loop)2821 visit_loop(struct lp_build_nir_context *bld_base, nir_loop *loop)
2822 {
2823    assert(!nir_loop_has_continue_construct(loop));
2824    bld_base->bgnloop(bld_base);
2825    visit_cf_list(bld_base, &loop->body);
2826    bld_base->endloop(bld_base);
2827 }
2828 
2829 
2830 static void
visit_cf_list(struct lp_build_nir_context * bld_base,struct exec_list * list)2831 visit_cf_list(struct lp_build_nir_context *bld_base,
2832               struct exec_list *list)
2833 {
2834    foreach_list_typed(nir_cf_node, node, node, list)
2835    {
2836       switch (node->type) {
2837       case nir_cf_node_block:
2838          visit_block(bld_base, nir_cf_node_as_block(node));
2839          break;
2840       case nir_cf_node_if:
2841          visit_if(bld_base, nir_cf_node_as_if(node));
2842          break;
2843       case nir_cf_node_loop:
2844          visit_loop(bld_base, nir_cf_node_as_loop(node));
2845          break;
2846       default:
2847          assert(0);
2848       }
2849    }
2850 }
2851 
2852 
2853 static void
handle_shader_output_decl(struct lp_build_nir_context * bld_base,struct nir_shader * nir,struct nir_variable * variable)2854 handle_shader_output_decl(struct lp_build_nir_context *bld_base,
2855                           struct nir_shader *nir,
2856                           struct nir_variable *variable)
2857 {
2858    bld_base->emit_var_decl(bld_base, variable);
2859 }
2860 
2861 
2862 /* vector registers are stored as arrays in LLVM side,
2863    so we can use GEP on them, as to do exec mask stores
2864    we need to operate on a single components.
2865    arrays are:
2866    0.x, 1.x, 2.x, 3.x
2867    0.y, 1.y, 2.y, 3.y
2868    ....
2869 */
2870 static LLVMTypeRef
get_register_type(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * reg)2871 get_register_type(struct lp_build_nir_context *bld_base,
2872                   nir_intrinsic_instr *reg)
2873 {
2874    if (is_aos(bld_base))
2875       return bld_base->base.int_vec_type;
2876 
2877    unsigned num_array_elems = nir_intrinsic_num_array_elems(reg);
2878    unsigned bit_size = nir_intrinsic_bit_size(reg);
2879    unsigned num_components = nir_intrinsic_num_components(reg);
2880 
2881    struct lp_build_context *int_bld =
2882       get_int_bld(bld_base, true, bit_size == 1 ? 32 : bit_size);
2883 
2884    LLVMTypeRef type = int_bld->vec_type;
2885    if (num_components > 1)
2886       type = LLVMArrayType(type, num_components);
2887    if (num_array_elems)
2888       type = LLVMArrayType(type, num_array_elems);
2889 
2890    return type;
2891 }
2892 
2893 void
lp_build_nir_prepasses(struct nir_shader * nir)2894 lp_build_nir_prepasses(struct nir_shader *nir)
2895 {
2896    NIR_PASS_V(nir, nir_convert_to_lcssa, true, true);
2897    NIR_PASS_V(nir, nir_convert_from_ssa, true);
2898    NIR_PASS_V(nir, nir_lower_locals_to_regs, 32);
2899    NIR_PASS_V(nir, nir_remove_dead_derefs);
2900    NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_function_temp, NULL);
2901 }
2902 
lp_build_nir_llvm(struct lp_build_nir_context * bld_base,struct nir_shader * nir,nir_function_impl * impl)2903 bool lp_build_nir_llvm(struct lp_build_nir_context *bld_base,
2904                        struct nir_shader *nir,
2905                        nir_function_impl *impl)
2906 {
2907    nir_foreach_shader_out_variable(variable, nir)
2908       handle_shader_output_decl(bld_base, nir, variable);
2909 
2910    if (nir->info.io_lowered) {
2911       uint64_t outputs_written = nir->info.outputs_written;
2912 
2913       while (outputs_written) {
2914          unsigned location = u_bit_scan64(&outputs_written);
2915          nir_variable var = {0};
2916 
2917          var.type = glsl_vec4_type();
2918          var.data.mode = nir_var_shader_out;
2919          var.data.location = location;
2920          var.data.driver_location = util_bitcount64(nir->info.outputs_written &
2921                                                     BITFIELD64_MASK(location));
2922          bld_base->emit_var_decl(bld_base, &var);
2923       }
2924    }
2925 
2926    bld_base->regs = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
2927                                             _mesa_key_pointer_equal);
2928    bld_base->vars = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
2929                                             _mesa_key_pointer_equal);
2930    bld_base->range_ht = _mesa_pointer_hash_table_create(NULL);
2931 
2932    nir_foreach_reg_decl(reg, impl) {
2933       LLVMTypeRef type = get_register_type(bld_base, reg);
2934       LLVMValueRef reg_alloc = lp_build_alloca(bld_base->base.gallivm,
2935                                                type, "reg");
2936       _mesa_hash_table_insert(bld_base->regs, reg, reg_alloc);
2937    }
2938    nir_index_ssa_defs(impl);
2939    bld_base->ssa_defs = calloc(impl->ssa_alloc, sizeof(LLVMValueRef));
2940    visit_cf_list(bld_base, &impl->body);
2941 
2942    free(bld_base->ssa_defs);
2943    ralloc_free(bld_base->vars);
2944    ralloc_free(bld_base->regs);
2945    ralloc_free(bld_base->range_ht);
2946    return true;
2947 }
2948 
2949 
2950 /* do some basic opts to remove some things we don't want to see. */
2951 void
lp_build_opt_nir(struct nir_shader * nir)2952 lp_build_opt_nir(struct nir_shader *nir)
2953 {
2954    bool progress;
2955 
2956    static const struct nir_lower_tex_options lower_tex_options = {
2957       .lower_tg4_offsets = true,
2958       .lower_txp = ~0u,
2959       .lower_invalid_implicit_lod = true,
2960    };
2961    NIR_PASS_V(nir, nir_lower_tex, &lower_tex_options);
2962    NIR_PASS_V(nir, nir_lower_frexp);
2963 
2964    if (nir->info.stage == MESA_SHADER_TASK) {
2965       nir_lower_task_shader_options ts_opts = { 0 };
2966       NIR_PASS_V(nir, nir_lower_task_shader, ts_opts);
2967    }
2968 
2969    NIR_PASS_V(nir, nir_lower_flrp, 16|32|64, true);
2970    NIR_PASS_V(nir, nir_lower_fp16_casts, nir_lower_fp16_all | nir_lower_fp16_split_fp64);
2971    do {
2972       progress = false;
2973       NIR_PASS(progress, nir, nir_opt_constant_folding);
2974       NIR_PASS(progress, nir, nir_opt_algebraic);
2975       NIR_PASS(progress, nir, nir_lower_pack);
2976 
2977       nir_lower_tex_options options = { .lower_invalid_implicit_lod = true, };
2978       NIR_PASS_V(nir, nir_lower_tex, &options);
2979 
2980       const nir_lower_subgroups_options subgroups_options = {
2981          .subgroup_size = lp_native_vector_width / 32,
2982          .ballot_bit_size = 32,
2983          .ballot_components = 1,
2984          .lower_to_scalar = true,
2985          .lower_subgroup_masks = true,
2986          .lower_relative_shuffle = true,
2987          .lower_inverse_ballot = true,
2988       };
2989       NIR_PASS(progress, nir, nir_lower_subgroups, &subgroups_options);
2990    } while (progress);
2991 
2992    do {
2993       progress = false;
2994       NIR_PASS(progress, nir, nir_opt_algebraic_late);
2995       if (progress) {
2996          NIR_PASS_V(nir, nir_copy_prop);
2997          NIR_PASS_V(nir, nir_opt_dce);
2998          NIR_PASS_V(nir, nir_opt_cse);
2999       }
3000    } while (progress);
3001 
3002    if (nir_lower_bool_to_int32(nir)) {
3003       NIR_PASS_V(nir, nir_copy_prop);
3004       NIR_PASS_V(nir, nir_opt_dce);
3005    }
3006 }
3007