• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**************************************************************************
2  *
3  * Copyright 2019 Red Hat.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included
14  * in all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  *
24  **************************************************************************/
25 
26 #include "lp_bld_nir.h"
27 #include "lp_bld_arit.h"
28 #include "lp_bld_bitarit.h"
29 #include "lp_bld_const.h"
30 #include "lp_bld_conv.h"
31 #include "lp_bld_gather.h"
32 #include "lp_bld_logic.h"
33 #include "lp_bld_quad.h"
34 #include "lp_bld_flow.h"
35 #include "lp_bld_intr.h"
36 #include "lp_bld_struct.h"
37 #include "lp_bld_debug.h"
38 #include "lp_bld_printf.h"
39 #include "nir.h"
40 #include "nir_deref.h"
41 #include "nir_search_helpers.h"
42 
43 
44 // Doing AOS (and linear) codegen?
45 static bool
is_aos(const struct lp_build_nir_context * bld_base)46 is_aos(const struct lp_build_nir_context *bld_base)
47 {
48    // AOS is used for vectors of uint8[16]
49    return bld_base->base.type.length == 16 && bld_base->base.type.width == 8;
50 }
51 
52 
53 static void
54 visit_cf_list(struct lp_build_nir_context *bld_base,
55               struct exec_list *list);
56 
57 
58 static LLVMValueRef
cast_type(struct lp_build_nir_context * bld_base,LLVMValueRef val,nir_alu_type alu_type,unsigned bit_size)59 cast_type(struct lp_build_nir_context *bld_base, LLVMValueRef val,
60           nir_alu_type alu_type, unsigned bit_size)
61 {
62    LLVMBuilderRef builder = bld_base->base.gallivm->builder;
63    switch (alu_type) {
64    case nir_type_float:
65       switch (bit_size) {
66       case 16:
67          return LLVMBuildBitCast(builder, val, bld_base->half_bld.vec_type, "");
68       case 32:
69          return LLVMBuildBitCast(builder, val, bld_base->base.vec_type, "");
70       case 64:
71          return LLVMBuildBitCast(builder, val, bld_base->dbl_bld.vec_type, "");
72       default:
73          assert(0);
74          break;
75       }
76       break;
77    case nir_type_int:
78       switch (bit_size) {
79       case 8:
80          return LLVMBuildBitCast(builder, val, bld_base->int8_bld.vec_type, "");
81       case 16:
82          return LLVMBuildBitCast(builder, val, bld_base->int16_bld.vec_type, "");
83       case 32:
84          return LLVMBuildBitCast(builder, val, bld_base->int_bld.vec_type, "");
85       case 64:
86          return LLVMBuildBitCast(builder, val, bld_base->int64_bld.vec_type, "");
87       default:
88          assert(0);
89          break;
90       }
91       break;
92    case nir_type_uint:
93       switch (bit_size) {
94       case 8:
95          return LLVMBuildBitCast(builder, val, bld_base->uint8_bld.vec_type, "");
96       case 16:
97          return LLVMBuildBitCast(builder, val, bld_base->uint16_bld.vec_type, "");
98       case 1:
99       case 32:
100          return LLVMBuildBitCast(builder, val, bld_base->uint_bld.vec_type, "");
101       case 64:
102          return LLVMBuildBitCast(builder, val, bld_base->uint64_bld.vec_type, "");
103       default:
104          assert(0);
105          break;
106       }
107       break;
108    case nir_type_uint32:
109       return LLVMBuildBitCast(builder, val, bld_base->uint_bld.vec_type, "");
110    default:
111       return val;
112    }
113    return NULL;
114 }
115 
116 
117 static unsigned
glsl_sampler_to_pipe(int sampler_dim,bool is_array)118 glsl_sampler_to_pipe(int sampler_dim, bool is_array)
119 {
120    unsigned pipe_target = PIPE_BUFFER;
121    switch (sampler_dim) {
122    case GLSL_SAMPLER_DIM_1D:
123       pipe_target = is_array ? PIPE_TEXTURE_1D_ARRAY : PIPE_TEXTURE_1D;
124       break;
125    case GLSL_SAMPLER_DIM_2D:
126       pipe_target = is_array ? PIPE_TEXTURE_2D_ARRAY : PIPE_TEXTURE_2D;
127       break;
128    case GLSL_SAMPLER_DIM_SUBPASS:
129    case GLSL_SAMPLER_DIM_SUBPASS_MS:
130       pipe_target = PIPE_TEXTURE_2D_ARRAY;
131       break;
132    case GLSL_SAMPLER_DIM_3D:
133       pipe_target = PIPE_TEXTURE_3D;
134       break;
135    case GLSL_SAMPLER_DIM_MS:
136       pipe_target = is_array ? PIPE_TEXTURE_2D_ARRAY : PIPE_TEXTURE_2D;
137       break;
138    case GLSL_SAMPLER_DIM_CUBE:
139       pipe_target = is_array ? PIPE_TEXTURE_CUBE_ARRAY : PIPE_TEXTURE_CUBE;
140       break;
141    case GLSL_SAMPLER_DIM_RECT:
142       pipe_target = PIPE_TEXTURE_RECT;
143       break;
144    case GLSL_SAMPLER_DIM_BUF:
145       pipe_target = PIPE_BUFFER;
146       break;
147    default:
148       break;
149    }
150    return pipe_target;
151 }
152 
153 
154 static LLVMValueRef
get_src(struct lp_build_nir_context * bld_base,nir_src src)155 get_src(struct lp_build_nir_context *bld_base, nir_src src)
156 {
157    return bld_base->ssa_defs[src.ssa->index];
158 }
159 
160 
161 static void
assign_ssa(struct lp_build_nir_context * bld_base,int idx,LLVMValueRef ptr)162 assign_ssa(struct lp_build_nir_context *bld_base, int idx, LLVMValueRef ptr)
163 {
164    bld_base->ssa_defs[idx] = ptr;
165 }
166 
167 
168 static void
assign_ssa_dest(struct lp_build_nir_context * bld_base,const nir_def * ssa,LLVMValueRef vals[NIR_MAX_VEC_COMPONENTS])169 assign_ssa_dest(struct lp_build_nir_context *bld_base, const nir_def *ssa,
170                 LLVMValueRef vals[NIR_MAX_VEC_COMPONENTS])
171 {
172    if ((ssa->num_components == 1 || is_aos(bld_base))) {
173       assign_ssa(bld_base, ssa->index, vals[0]);
174    } else {
175       assign_ssa(bld_base, ssa->index,
176              lp_nir_array_build_gather_values(bld_base->base.gallivm->builder,
177                                               vals, ssa->num_components));
178    }
179 }
180 
181 
182 static LLVMValueRef
fcmp32(struct lp_build_nir_context * bld_base,enum pipe_compare_func compare,uint32_t src_bit_size,LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])183 fcmp32(struct lp_build_nir_context *bld_base,
184        enum pipe_compare_func compare,
185        uint32_t src_bit_size,
186        LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
187 {
188    LLVMBuilderRef builder = bld_base->base.gallivm->builder;
189    struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size);
190    LLVMValueRef result;
191 
192    if (compare != PIPE_FUNC_NOTEQUAL)
193       result = lp_build_cmp_ordered(flt_bld, compare, src[0], src[1]);
194    else
195       result = lp_build_cmp(flt_bld, compare, src[0], src[1]);
196    if (src_bit_size == 64)
197       result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
198    else if (src_bit_size == 16)
199       result = LLVMBuildSExt(builder, result, bld_base->int_bld.vec_type, "");
200    return result;
201 }
202 
203 
204 static LLVMValueRef
icmp32(struct lp_build_nir_context * bld_base,enum pipe_compare_func compare,bool is_unsigned,uint32_t src_bit_size,LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])205 icmp32(struct lp_build_nir_context *bld_base,
206        enum pipe_compare_func compare,
207        bool is_unsigned,
208        uint32_t src_bit_size,
209        LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
210 {
211    LLVMBuilderRef builder = bld_base->base.gallivm->builder;
212    struct lp_build_context *i_bld =
213       get_int_bld(bld_base, is_unsigned, src_bit_size);
214    LLVMValueRef result = lp_build_cmp(i_bld, compare, src[0], src[1]);
215    if (src_bit_size < 32)
216       result = LLVMBuildSExt(builder, result, bld_base->int_bld.vec_type, "");
217    else if (src_bit_size == 64)
218       result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
219    return result;
220 }
221 
222 
223 /**
224  * Get a source register value for an ALU instruction.
225  * This is where swizzles are handled.  There should be no negation
226  * or absolute value modifiers.
227  * num_components indicates the number of components needed in the
228  * returned array or vector.
229  */
230 static LLVMValueRef
get_alu_src(struct lp_build_nir_context * bld_base,nir_alu_src src,unsigned num_components)231 get_alu_src(struct lp_build_nir_context *bld_base,
232             nir_alu_src src,
233             unsigned num_components)
234 {
235    assert(num_components >= 1);
236    assert(num_components <= 4);
237 
238    struct gallivm_state *gallivm = bld_base->base.gallivm;
239    LLVMBuilderRef builder = gallivm->builder;
240    const unsigned src_components = nir_src_num_components(src.src);
241    assert(src_components > 0);
242    LLVMValueRef value = get_src(bld_base, src.src);
243    assert(value);
244 
245    /* check if swizzling needed for the src vector */
246    bool need_swizzle = false;
247    for (unsigned i = 0; i < src_components; ++i) {
248       if (src.swizzle[i] != i) {
249          need_swizzle = true;
250          break;
251       }
252    }
253 
254    if (is_aos(bld_base) && !need_swizzle) {
255       return value;
256    }
257 
258    if (need_swizzle || num_components != src_components) {
259       if (is_aos(bld_base) && need_swizzle) {
260          // Handle swizzle for AOS
261          assert(LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMVectorTypeKind);
262 
263          // swizzle vector of ((r,g,b,a), (r,g,b,a), (r,g,b,a), (r,g,b,a))
264          assert(bld_base->base.type.width == 8);
265          assert(bld_base->base.type.length == 16);
266 
267          // Do our own swizzle here since lp_build_swizzle_aos_n() does
268          // not do what we want.
269          // Ex: value = {r0,g0,b0,a0, r1,g1,b1,a1, r2,g2,b2,a2, r3,g3,b3,a3}.
270          // aos swizzle = {2,1,0,3}  // swap red/blue
271          // shuffles = {2,1,0,3, 6,5,4,7, 10,9,8,11, 14,13,12,15}
272          // result = {b0,g0,r0,a0, b1,g1,r1,a1, b2,g2,r2,a2, b3,g3,r3,a3}.
273          LLVMValueRef shuffles[LP_MAX_VECTOR_WIDTH];
274          for (unsigned i = 0; i < 16; i++) {
275             unsigned chan = i % 4;
276             /* apply src register swizzle */
277             if (chan < num_components) {
278                chan = src.swizzle[chan];
279             } else {
280                chan = src.swizzle[0];
281             }
282             /* apply aos swizzle */
283             chan = lp_nir_aos_swizzle(bld_base, chan);
284             shuffles[i] = lp_build_const_int32(gallivm, (i & ~3) + chan);
285          }
286          value = LLVMBuildShuffleVector(builder, value,
287                                         LLVMGetUndef(LLVMTypeOf(value)),
288                                         LLVMConstVector(shuffles, 16), "");
289       } else if (src_components > 1 && num_components == 1) {
290          value = LLVMBuildExtractValue(gallivm->builder, value,
291                                        src.swizzle[0], "");
292       } else if (src_components == 1 && num_components > 1) {
293          LLVMValueRef values[] = {value, value, value, value,
294                                   value, value, value, value,
295                                   value, value, value, value,
296                                   value, value, value, value};
297          value = lp_nir_array_build_gather_values(builder, values, num_components);
298       } else {
299          LLVMValueRef arr = LLVMGetUndef(LLVMArrayType(LLVMTypeOf(LLVMBuildExtractValue(builder, value, 0, "")), num_components));
300          for (unsigned i = 0; i < num_components; i++)
301             arr = LLVMBuildInsertValue(builder, arr, LLVMBuildExtractValue(builder, value, src.swizzle[i], ""), i, "");
302          value = arr;
303       }
304    }
305 
306    return value;
307 }
308 
309 
310 static LLVMValueRef
emit_b2f(struct lp_build_nir_context * bld_base,LLVMValueRef src0,unsigned bitsize)311 emit_b2f(struct lp_build_nir_context *bld_base,
312          LLVMValueRef src0,
313          unsigned bitsize)
314 {
315    LLVMBuilderRef builder = bld_base->base.gallivm->builder;
316    LLVMValueRef result =
317       LLVMBuildAnd(builder, cast_type(bld_base, src0, nir_type_int, 32),
318                    LLVMBuildBitCast(builder,
319                                     lp_build_const_vec(bld_base->base.gallivm,
320                                                        bld_base->base.type,
321                                                        1.0),
322                                     bld_base->int_bld.vec_type, ""),
323                    "");
324    result = LLVMBuildBitCast(builder, result, bld_base->base.vec_type, "");
325    switch (bitsize) {
326    case 16:
327       result = LLVMBuildFPTrunc(builder, result,
328                                 bld_base->half_bld.vec_type, "");
329       break;
330    case 32:
331       break;
332    case 64:
333       result = LLVMBuildFPExt(builder, result,
334                               bld_base->dbl_bld.vec_type, "");
335       break;
336    default:
337       unreachable("unsupported bit size.");
338    }
339    return result;
340 }
341 
342 
343 static LLVMValueRef
emit_b2i(struct lp_build_nir_context * bld_base,LLVMValueRef src0,unsigned bitsize)344 emit_b2i(struct lp_build_nir_context *bld_base,
345          LLVMValueRef src0,
346          unsigned bitsize)
347 {
348    LLVMBuilderRef builder = bld_base->base.gallivm->builder;
349    LLVMValueRef result = LLVMBuildAnd(builder,
350                           cast_type(bld_base, src0, nir_type_int, 32),
351                           lp_build_const_int_vec(bld_base->base.gallivm,
352                                                  bld_base->base.type, 1), "");
353    switch (bitsize) {
354    case 8:
355       return LLVMBuildTrunc(builder, result, bld_base->int8_bld.vec_type, "");
356    case 16:
357       return LLVMBuildTrunc(builder, result, bld_base->int16_bld.vec_type, "");
358    case 32:
359       return result;
360    case 64:
361       return LLVMBuildZExt(builder, result, bld_base->int64_bld.vec_type, "");
362    default:
363       unreachable("unsupported bit size.");
364    }
365 }
366 
367 
368 static LLVMValueRef
emit_b32csel(struct lp_build_nir_context * bld_base,unsigned src_bit_size[NIR_MAX_VEC_COMPONENTS],LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])369 emit_b32csel(struct lp_build_nir_context *bld_base,
370              unsigned src_bit_size[NIR_MAX_VEC_COMPONENTS],
371              LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
372 {
373    LLVMValueRef sel = cast_type(bld_base, src[0], nir_type_int, 32);
374    LLVMValueRef v = lp_build_compare(bld_base->base.gallivm, bld_base->int_bld.type, PIPE_FUNC_NOTEQUAL, sel, bld_base->int_bld.zero);
375    struct lp_build_context *bld = get_int_bld(bld_base, false, src_bit_size[1]);
376    return lp_build_select(bld, v, src[1], src[2]);
377 }
378 
379 
380 static LLVMValueRef
split_64bit(struct lp_build_nir_context * bld_base,LLVMValueRef src,bool hi)381 split_64bit(struct lp_build_nir_context *bld_base,
382             LLVMValueRef src,
383             bool hi)
384 {
385    struct gallivm_state *gallivm = bld_base->base.gallivm;
386    LLVMValueRef shuffles[LP_MAX_VECTOR_WIDTH/32];
387    LLVMValueRef shuffles2[LP_MAX_VECTOR_WIDTH/32];
388    int len = bld_base->base.type.length * 2;
389    for (unsigned i = 0; i < bld_base->base.type.length; i++) {
390 #if UTIL_ARCH_LITTLE_ENDIAN
391       shuffles[i] = lp_build_const_int32(gallivm, i * 2);
392       shuffles2[i] = lp_build_const_int32(gallivm, (i * 2) + 1);
393 #else
394       shuffles[i] = lp_build_const_int32(gallivm, (i * 2) + 1);
395       shuffles2[i] = lp_build_const_int32(gallivm, (i * 2));
396 #endif
397    }
398 
399    src = LLVMBuildBitCast(gallivm->builder, src,
400            LLVMVectorType(LLVMInt32TypeInContext(gallivm->context), len), "");
401    return LLVMBuildShuffleVector(gallivm->builder, src,
402                                  LLVMGetUndef(LLVMTypeOf(src)),
403                                  LLVMConstVector(hi ? shuffles2 : shuffles,
404                                                  bld_base->base.type.length),
405                                  "");
406 }
407 
408 
409 static LLVMValueRef
merge_64bit(struct lp_build_nir_context * bld_base,LLVMValueRef input,LLVMValueRef input2)410 merge_64bit(struct lp_build_nir_context *bld_base,
411             LLVMValueRef input,
412             LLVMValueRef input2)
413 {
414    struct gallivm_state *gallivm = bld_base->base.gallivm;
415    LLVMBuilderRef builder = gallivm->builder;
416    int i;
417    LLVMValueRef shuffles[2 * (LP_MAX_VECTOR_WIDTH/32)];
418    int len = bld_base->base.type.length * 2;
419    assert(len <= (2 * (LP_MAX_VECTOR_WIDTH/32)));
420 
421    for (i = 0; i < bld_base->base.type.length * 2; i+=2) {
422 #if UTIL_ARCH_LITTLE_ENDIAN
423       shuffles[i] = lp_build_const_int32(gallivm, i / 2);
424       shuffles[i + 1] = lp_build_const_int32(gallivm, i / 2 + bld_base->base.type.length);
425 #else
426       shuffles[i] = lp_build_const_int32(gallivm, i / 2 + bld_base->base.type.length);
427       shuffles[i + 1] = lp_build_const_int32(gallivm, i / 2);
428 #endif
429    }
430    return LLVMBuildShuffleVector(builder, input, input2, LLVMConstVector(shuffles, len), "");
431 }
432 
433 
434 static LLVMValueRef
split_16bit(struct lp_build_nir_context * bld_base,LLVMValueRef src,bool hi)435 split_16bit(struct lp_build_nir_context *bld_base,
436             LLVMValueRef src,
437             bool hi)
438 {
439    struct gallivm_state *gallivm = bld_base->base.gallivm;
440    LLVMValueRef shuffles[LP_MAX_VECTOR_WIDTH/32];
441    LLVMValueRef shuffles2[LP_MAX_VECTOR_WIDTH/32];
442    int len = bld_base->base.type.length * 2;
443    for (unsigned i = 0; i < bld_base->base.type.length; i++) {
444 #if UTIL_ARCH_LITTLE_ENDIAN
445       shuffles[i] = lp_build_const_int32(gallivm, i * 2);
446       shuffles2[i] = lp_build_const_int32(gallivm, (i * 2) + 1);
447 #else
448       shuffles[i] = lp_build_const_int32(gallivm, (i * 2) + 1);
449       shuffles2[i] = lp_build_const_int32(gallivm, (i * 2));
450 #endif
451    }
452 
453    src = LLVMBuildBitCast(gallivm->builder, src, LLVMVectorType(LLVMInt16TypeInContext(gallivm->context), len), "");
454    return LLVMBuildShuffleVector(gallivm->builder, src,
455                                  LLVMGetUndef(LLVMTypeOf(src)),
456                                  LLVMConstVector(hi ? shuffles2 : shuffles,
457                                                  bld_base->base.type.length),
458                                  "");
459 }
460 
461 
462 static LLVMValueRef
merge_16bit(struct lp_build_nir_context * bld_base,LLVMValueRef input,LLVMValueRef input2)463 merge_16bit(struct lp_build_nir_context *bld_base,
464             LLVMValueRef input,
465             LLVMValueRef input2)
466 {
467    struct gallivm_state *gallivm = bld_base->base.gallivm;
468    LLVMBuilderRef builder = gallivm->builder;
469    int i;
470    LLVMValueRef shuffles[2 * (LP_MAX_VECTOR_WIDTH/32)];
471    int len = bld_base->int16_bld.type.length * 2;
472    assert(len <= (2 * (LP_MAX_VECTOR_WIDTH/32)));
473 
474    for (i = 0; i < bld_base->int_bld.type.length * 2; i+=2) {
475 #if UTIL_ARCH_LITTLE_ENDIAN
476       shuffles[i] = lp_build_const_int32(gallivm, i / 2);
477       shuffles[i + 1] = lp_build_const_int32(gallivm, i / 2 + bld_base->base.type.length);
478 #else
479       shuffles[i] = lp_build_const_int32(gallivm, i / 2 + bld_base->base.type.length);
480       shuffles[i + 1] = lp_build_const_int32(gallivm, i / 2);
481 #endif
482    }
483    return LLVMBuildShuffleVector(builder, input, input2, LLVMConstVector(shuffles, len), "");
484 }
485 
486 
487 static LLVMValueRef
get_signed_divisor(struct gallivm_state * gallivm,struct lp_build_context * int_bld,struct lp_build_context * mask_bld,int src_bit_size,LLVMValueRef src,LLVMValueRef divisor)488 get_signed_divisor(struct gallivm_state *gallivm,
489                    struct lp_build_context *int_bld,
490                    struct lp_build_context *mask_bld,
491                    int src_bit_size,
492                    LLVMValueRef src, LLVMValueRef divisor)
493 {
494    LLVMBuilderRef builder = gallivm->builder;
495    /* However for signed divides SIGFPE can occur if the numerator is INT_MIN
496       and divisor is -1. */
497    /* set mask if numerator == INT_MIN */
498    long long min_val;
499    switch (src_bit_size) {
500    case 8:
501       min_val = INT8_MIN;
502       break;
503    case 16:
504       min_val = INT16_MIN;
505       break;
506    default:
507    case 32:
508       min_val = INT_MIN;
509       break;
510    case 64:
511       min_val = INT64_MIN;
512       break;
513    }
514    LLVMValueRef div_mask2 = lp_build_cmp(mask_bld, PIPE_FUNC_EQUAL, src,
515                                          lp_build_const_int_vec(gallivm, int_bld->type, min_val));
516    /* set another mask if divisor is - 1 */
517    LLVMValueRef div_mask3 = lp_build_cmp(mask_bld, PIPE_FUNC_EQUAL, divisor,
518                                          lp_build_const_int_vec(gallivm, int_bld->type, -1));
519    div_mask2 = LLVMBuildAnd(builder, div_mask2, div_mask3, "");
520 
521    divisor = lp_build_select(mask_bld, div_mask2, int_bld->one, divisor);
522    return divisor;
523 }
524 
525 
526 static LLVMValueRef
do_int_divide(struct lp_build_nir_context * bld_base,bool is_unsigned,unsigned src_bit_size,LLVMValueRef src,LLVMValueRef src2)527 do_int_divide(struct lp_build_nir_context *bld_base,
528               bool is_unsigned, unsigned src_bit_size,
529               LLVMValueRef src, LLVMValueRef src2)
530 {
531    struct gallivm_state *gallivm = bld_base->base.gallivm;
532    LLVMBuilderRef builder = gallivm->builder;
533    struct lp_build_context *int_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
534    struct lp_build_context *mask_bld = get_int_bld(bld_base, true, src_bit_size);
535 
536    /* avoid divide by 0. Converted divisor from 0 to -1 */
537    LLVMValueRef div_mask = lp_build_cmp(mask_bld, PIPE_FUNC_EQUAL, src2,
538                                         mask_bld->zero);
539 
540    LLVMValueRef divisor = LLVMBuildOr(builder, div_mask, src2, "");
541    if (!is_unsigned) {
542       divisor = get_signed_divisor(gallivm, int_bld, mask_bld,
543                                    src_bit_size, src, divisor);
544    }
545    LLVMValueRef result = lp_build_div(int_bld, src, divisor);
546 
547    if (!is_unsigned) {
548       LLVMValueRef not_div_mask = LLVMBuildNot(builder, div_mask, "");
549       return LLVMBuildAnd(builder, not_div_mask, result, "");
550    } else
551       /* udiv by zero is guaranteed to return 0xffffffff at least with d3d10
552        * may as well do same for idiv */
553       return LLVMBuildOr(builder, div_mask, result, "");
554 }
555 
556 
557 static LLVMValueRef
do_int_mod(struct lp_build_nir_context * bld_base,bool is_unsigned,unsigned src_bit_size,LLVMValueRef src,LLVMValueRef src2)558 do_int_mod(struct lp_build_nir_context *bld_base,
559            bool is_unsigned, unsigned src_bit_size,
560            LLVMValueRef src, LLVMValueRef src2)
561 {
562    struct gallivm_state *gallivm = bld_base->base.gallivm;
563    LLVMBuilderRef builder = gallivm->builder;
564    struct lp_build_context *int_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
565    struct lp_build_context *mask_bld = get_int_bld(bld_base, true, src_bit_size);
566    LLVMValueRef div_mask = lp_build_cmp(mask_bld, PIPE_FUNC_EQUAL, src2,
567                                         mask_bld->zero);
568    LLVMValueRef divisor = LLVMBuildOr(builder,
569                                       div_mask,
570                                       src2, "");
571    if (!is_unsigned) {
572       divisor = get_signed_divisor(gallivm, int_bld, mask_bld,
573                                    src_bit_size, src, divisor);
574    }
575    LLVMValueRef result = lp_build_mod(int_bld, src, divisor);
576    return LLVMBuildOr(builder, div_mask, result, "");
577 }
578 
579 static LLVMValueRef
do_alu_action(struct lp_build_nir_context * bld_base,const nir_alu_instr * instr,unsigned src_bit_size[NIR_MAX_VEC_COMPONENTS],LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])580 do_alu_action(struct lp_build_nir_context *bld_base,
581               const nir_alu_instr *instr,
582               unsigned src_bit_size[NIR_MAX_VEC_COMPONENTS],
583               LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
584 {
585    struct gallivm_state *gallivm = bld_base->base.gallivm;
586    LLVMBuilderRef builder = gallivm->builder;
587    LLVMValueRef result;
588 
589    switch (instr->op) {
590    case nir_op_b2f16:
591       result = emit_b2f(bld_base, src[0], 16);
592       break;
593    case nir_op_b2f32:
594       result = emit_b2f(bld_base, src[0], 32);
595       break;
596    case nir_op_b2f64:
597       result = emit_b2f(bld_base, src[0], 64);
598       break;
599    case nir_op_b2i8:
600       result = emit_b2i(bld_base, src[0], 8);
601       break;
602    case nir_op_b2i16:
603       result = emit_b2i(bld_base, src[0], 16);
604       break;
605    case nir_op_b2i32:
606       result = emit_b2i(bld_base, src[0], 32);
607       break;
608    case nir_op_b2i64:
609       result = emit_b2i(bld_base, src[0], 64);
610       break;
611    case nir_op_b32csel:
612       result = emit_b32csel(bld_base, src_bit_size, src);
613       break;
614    case nir_op_bit_count:
615       result = lp_build_popcount(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
616       if (src_bit_size[0] < 32)
617          result = LLVMBuildZExt(builder, result, bld_base->int_bld.vec_type, "");
618       else if (src_bit_size[0] > 32)
619          result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
620       break;
621    case nir_op_bitfield_select:
622       result = lp_build_xor(&bld_base->uint_bld, src[2], lp_build_and(&bld_base->uint_bld, src[0], lp_build_xor(&bld_base->uint_bld, src[1], src[2])));
623       break;
624    case nir_op_bitfield_reverse:
625       result = lp_build_bitfield_reverse(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
626       break;
627    case nir_op_f2f16:
628       if (src_bit_size[0] == 64)
629          src[0] = LLVMBuildFPTrunc(builder, src[0],
630                                    bld_base->base.vec_type, "");
631       result = LLVMBuildFPTrunc(builder, src[0],
632                                 bld_base->half_bld.vec_type, "");
633       break;
634    case nir_op_f2f32:
635       if (src_bit_size[0] < 32)
636          result = LLVMBuildFPExt(builder, src[0],
637                                  bld_base->base.vec_type, "");
638       else
639          result = LLVMBuildFPTrunc(builder, src[0],
640                                    bld_base->base.vec_type, "");
641       break;
642    case nir_op_f2f64:
643       result = LLVMBuildFPExt(builder, src[0],
644                               bld_base->dbl_bld.vec_type, "");
645       break;
646    case nir_op_f2i8:
647    case nir_op_f2i16:
648    case nir_op_f2i32:
649    case nir_op_f2i64:
650    case nir_op_f2u8:
651    case nir_op_f2u16:
652    case nir_op_f2u32:
653    case nir_op_f2u64: {
654       nir_alu_type dst_type = nir_op_infos[instr->op].output_type;
655       bool is_unsigned = nir_alu_type_get_base_type(dst_type) == nir_type_uint;
656       LLVMTypeRef int_type = get_int_bld(bld_base, is_unsigned, nir_alu_type_get_type_size(dst_type))->vec_type;
657 
658       char name[64];
659       char tmp[64];
660       char intrinsic[64];
661       snprintf(name, sizeof(name), "llvm.fpto%ci.sat", is_unsigned ? 'u' : 's');
662       lp_format_intrinsic(tmp, 64, name, int_type);
663       lp_format_intrinsic(intrinsic, 64, tmp, LLVMTypeOf(src[0]));
664       result = lp_build_intrinsic_unary(builder, intrinsic, int_type, src[0]);
665       break;
666    }
667    case nir_op_fabs:
668       result = lp_build_abs(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
669       break;
670    case nir_op_fadd:
671       result = lp_build_add(get_flt_bld(bld_base, src_bit_size[0]),
672                             src[0], src[1]);
673       break;
674    case nir_op_fceil:
675       result = lp_build_ceil(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
676       break;
677    case nir_op_fcos:
678       result = lp_build_cos(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
679       break;
680    case nir_op_fdiv:
681       result = lp_build_div(get_flt_bld(bld_base, src_bit_size[0]),
682                             src[0], src[1]);
683       break;
684    case nir_op_feq32:
685       result = fcmp32(bld_base, PIPE_FUNC_EQUAL, src_bit_size[0], src);
686       break;
687    case nir_op_fexp2:
688       result = lp_build_exp2(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
689       break;
690    case nir_op_ffloor:
691       result = lp_build_floor(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
692       break;
693    case nir_op_ffma:
694       result = lp_build_fmuladd(builder, src[0], src[1], src[2]);
695       break;
696    case nir_op_ffract: {
697       struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size[0]);
698       LLVMValueRef tmp = lp_build_floor(flt_bld, src[0]);
699       result = lp_build_sub(flt_bld, src[0], tmp);
700       break;
701    }
702    case nir_op_fge:
703    case nir_op_fge32:
704       result = fcmp32(bld_base, PIPE_FUNC_GEQUAL, src_bit_size[0], src);
705       break;
706    case nir_op_find_lsb: {
707       struct lp_build_context *int_bld = get_int_bld(bld_base, false, src_bit_size[0]);
708       result = lp_build_cttz(int_bld, src[0]);
709       if (src_bit_size[0] < 32)
710          result = LLVMBuildZExt(builder, result, bld_base->uint_bld.vec_type, "");
711       else if (src_bit_size[0] > 32)
712          result = LLVMBuildTrunc(builder, result, bld_base->uint_bld.vec_type, "");
713       break;
714    }
715    case nir_op_fisfinite32:
716       unreachable("Should have been lowered in nir_opt_algebraic_late.");
717    case nir_op_flog2:
718       result = lp_build_log2_safe(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
719       break;
720    case nir_op_flt:
721    case nir_op_flt32:
722       result = fcmp32(bld_base, PIPE_FUNC_LESS, src_bit_size[0], src);
723       break;
724    case nir_op_fmax:
725    case nir_op_fmin: {
726       enum gallivm_nan_behavior minmax_nan;
727       int first = 0;
728 
729       /* If one of the sources is known to be a number (i.e., not NaN), then
730        * better code can be generated by passing that information along.
731        */
732       if (is_a_number(bld_base->range_ht, instr, 1,
733                       0 /* unused num_components */,
734                       NULL /* unused swizzle */)) {
735          minmax_nan = GALLIVM_NAN_RETURN_OTHER_SECOND_NONNAN;
736       } else if (is_a_number(bld_base->range_ht, instr, 0,
737                              0 /* unused num_components */,
738                              NULL /* unused swizzle */)) {
739          first = 1;
740          minmax_nan = GALLIVM_NAN_RETURN_OTHER_SECOND_NONNAN;
741       } else {
742          minmax_nan = GALLIVM_NAN_RETURN_OTHER;
743       }
744 
745       if (instr->op == nir_op_fmin) {
746          result = lp_build_min_ext(get_flt_bld(bld_base, src_bit_size[0]),
747                                    src[first], src[1 - first], minmax_nan);
748       } else {
749          result = lp_build_max_ext(get_flt_bld(bld_base, src_bit_size[0]),
750                                    src[first], src[1 - first], minmax_nan);
751       }
752       break;
753    }
754    case nir_op_fmod: {
755       struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size[0]);
756       result = lp_build_div(flt_bld, src[0], src[1]);
757       result = lp_build_floor(flt_bld, result);
758       result = lp_build_mul(flt_bld, src[1], result);
759       result = lp_build_sub(flt_bld, src[0], result);
760       break;
761    }
762    case nir_op_fmul:
763       result = lp_build_mul(get_flt_bld(bld_base, src_bit_size[0]),
764                             src[0], src[1]);
765       break;
766    case nir_op_fneu32:
767       result = fcmp32(bld_base, PIPE_FUNC_NOTEQUAL, src_bit_size[0], src);
768       break;
769    case nir_op_fneg:
770       result = lp_build_negate(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
771       break;
772    case nir_op_fpow:
773       result = lp_build_pow(get_flt_bld(bld_base, src_bit_size[0]), src[0], src[1]);
774       break;
775    case nir_op_frcp:
776       result = lp_build_rcp(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
777       break;
778    case nir_op_fround_even:
779       if (src_bit_size[0] == 16) {
780          struct lp_build_context *bld = get_flt_bld(bld_base, 16);
781          char intrinsic[64];
782          lp_format_intrinsic(intrinsic, 64, "llvm.roundeven", bld->vec_type);
783          result = lp_build_intrinsic_unary(builder, intrinsic, bld->vec_type, src[0]);
784       } else {
785          result = lp_build_round(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
786       }
787       break;
788    case nir_op_frsq:
789       result = lp_build_rsqrt(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
790       break;
791    case nir_op_fsat:
792       result = lp_build_clamp_zero_one_nanzero(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
793       break;
794    case nir_op_fsign:
795       result = lp_build_sgn(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
796       break;
797    case nir_op_fsin:
798       result = lp_build_sin(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
799       break;
800    case nir_op_fsqrt:
801       result = lp_build_sqrt(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
802       break;
803    case nir_op_ftrunc:
804       result = lp_build_trunc(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
805       break;
806    case nir_op_i2f16:
807       result = LLVMBuildSIToFP(builder, src[0],
808                                bld_base->half_bld.vec_type, "");
809       break;
810    case nir_op_i2f32:
811       result = lp_build_int_to_float(&bld_base->base, src[0]);
812       break;
813    case nir_op_i2f64:
814       result = lp_build_int_to_float(&bld_base->dbl_bld, src[0]);
815       break;
816    case nir_op_i2i8:
817       result = LLVMBuildTrunc(builder, src[0], bld_base->int8_bld.vec_type, "");
818       break;
819    case nir_op_i2i16:
820       if (src_bit_size[0] < 16)
821          result = LLVMBuildSExt(builder, src[0], bld_base->int16_bld.vec_type, "");
822       else
823          result = LLVMBuildTrunc(builder, src[0], bld_base->int16_bld.vec_type, "");
824       break;
825    case nir_op_i2i32:
826       if (src_bit_size[0] < 32)
827          result = LLVMBuildSExt(builder, src[0], bld_base->int_bld.vec_type, "");
828       else
829          result = LLVMBuildTrunc(builder, src[0], bld_base->int_bld.vec_type, "");
830       break;
831    case nir_op_i2i64:
832       result = LLVMBuildSExt(builder, src[0], bld_base->int64_bld.vec_type, "");
833       break;
834    case nir_op_iabs:
835       result = lp_build_abs(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
836       break;
837    case nir_op_iadd:
838       result = lp_build_add(get_int_bld(bld_base, false, src_bit_size[0]),
839                             src[0], src[1]);
840       break;
841    case nir_op_iand:
842       result = lp_build_and(get_int_bld(bld_base, false, src_bit_size[0]),
843                             src[0], src[1]);
844       break;
845    case nir_op_idiv:
846       result = do_int_divide(bld_base, false, src_bit_size[0], src[0], src[1]);
847       break;
848    case nir_op_ieq32:
849       result = icmp32(bld_base, PIPE_FUNC_EQUAL, false, src_bit_size[0], src);
850       break;
851    case nir_op_ige32:
852       result = icmp32(bld_base, PIPE_FUNC_GEQUAL, false, src_bit_size[0], src);
853       break;
854    case nir_op_ilt32:
855       result = icmp32(bld_base, PIPE_FUNC_LESS, false, src_bit_size[0], src);
856       break;
857    case nir_op_imax:
858       result = lp_build_max(get_int_bld(bld_base, false, src_bit_size[0]), src[0], src[1]);
859       break;
860    case nir_op_imin:
861       result = lp_build_min(get_int_bld(bld_base, false, src_bit_size[0]), src[0], src[1]);
862       break;
863    case nir_op_imul:
864    case nir_op_imul24:
865       result = lp_build_mul(get_int_bld(bld_base, false, src_bit_size[0]),
866                             src[0], src[1]);
867       break;
868    case nir_op_imul_high: {
869       LLVMValueRef hi_bits;
870       lp_build_mul_32_lohi(get_int_bld(bld_base, false, src_bit_size[0]), src[0], src[1], &hi_bits);
871       result = hi_bits;
872       break;
873    }
874    case nir_op_ine32:
875       result = icmp32(bld_base, PIPE_FUNC_NOTEQUAL, false, src_bit_size[0], src);
876       break;
877    case nir_op_ineg:
878       result = lp_build_negate(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
879       break;
880    case nir_op_inot:
881       result = lp_build_not(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
882       break;
883    case nir_op_ior:
884       result = lp_build_or(get_int_bld(bld_base, false, src_bit_size[0]),
885                            src[0], src[1]);
886       break;
887    case nir_op_imod:
888    case nir_op_irem:
889       result = do_int_mod(bld_base, false, src_bit_size[0], src[0], src[1]);
890       break;
891    case nir_op_ishl: {
892       struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
893       struct lp_build_context *int_bld = get_int_bld(bld_base, false, src_bit_size[0]);
894       if (src_bit_size[0] == 64)
895          src[1] = LLVMBuildZExt(builder, src[1], uint_bld->vec_type, "");
896       if (src_bit_size[0] < 32)
897          src[1] = LLVMBuildTrunc(builder, src[1], uint_bld->vec_type, "");
898       src[1] = lp_build_and(uint_bld, src[1], lp_build_const_int_vec(gallivm, uint_bld->type, (src_bit_size[0] - 1)));
899       result = lp_build_shl(int_bld, src[0], src[1]);
900       break;
901    }
902    case nir_op_ishr: {
903       struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
904       struct lp_build_context *int_bld = get_int_bld(bld_base, false, src_bit_size[0]);
905       if (src_bit_size[0] == 64)
906          src[1] = LLVMBuildZExt(builder, src[1], uint_bld->vec_type, "");
907       if (src_bit_size[0] < 32)
908          src[1] = LLVMBuildTrunc(builder, src[1], uint_bld->vec_type, "");
909       src[1] = lp_build_and(uint_bld, src[1], lp_build_const_int_vec(gallivm, uint_bld->type, (src_bit_size[0] - 1)));
910       result = lp_build_shr(int_bld, src[0], src[1]);
911       break;
912    }
913    case nir_op_isign:
914       result = lp_build_sgn(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
915       break;
916    case nir_op_isub:
917       result = lp_build_sub(get_int_bld(bld_base, false, src_bit_size[0]),
918                             src[0], src[1]);
919       break;
920    case nir_op_ixor:
921       result = lp_build_xor(get_int_bld(bld_base, false, src_bit_size[0]),
922                             src[0], src[1]);
923       break;
924    case nir_op_mov:
925       result = src[0];
926       break;
927    case nir_op_unpack_64_2x32_split_x:
928       result = split_64bit(bld_base, src[0], false);
929       break;
930    case nir_op_unpack_64_2x32_split_y:
931       result = split_64bit(bld_base, src[0], true);
932       break;
933 
934    case nir_op_pack_32_2x16_split: {
935       LLVMValueRef tmp = merge_16bit(bld_base, src[0], src[1]);
936       result = LLVMBuildBitCast(builder, tmp, bld_base->base.vec_type, "");
937       break;
938    }
939    case nir_op_unpack_32_2x16_split_x:
940       result = split_16bit(bld_base, src[0], false);
941       break;
942    case nir_op_unpack_32_2x16_split_y:
943       result = split_16bit(bld_base, src[0], true);
944       break;
945    case nir_op_pack_64_2x32_split: {
946       LLVMValueRef tmp = merge_64bit(bld_base, src[0], src[1]);
947       result = LLVMBuildBitCast(builder, tmp, bld_base->uint64_bld.vec_type, "");
948       break;
949    }
950    case nir_op_pack_32_4x8_split: {
951       LLVMValueRef tmp1 = merge_16bit(bld_base, src[0], src[1]);
952       LLVMValueRef tmp2 = merge_16bit(bld_base, src[2], src[3]);
953       tmp1 = LLVMBuildBitCast(builder, tmp1, bld_base->uint16_bld.vec_type, "");
954       tmp2 = LLVMBuildBitCast(builder, tmp2, bld_base->uint16_bld.vec_type, "");
955       LLVMValueRef tmp = merge_16bit(bld_base, tmp1, tmp2);
956       result = LLVMBuildBitCast(builder, tmp, bld_base->uint_bld.vec_type, "");
957       break;
958    }
959    case nir_op_u2f16:
960       result = LLVMBuildUIToFP(builder, src[0],
961                                bld_base->half_bld.vec_type, "");
962       break;
963    case nir_op_u2f32:
964       result = LLVMBuildUIToFP(builder, src[0], bld_base->base.vec_type, "");
965       break;
966    case nir_op_u2f64:
967       result = LLVMBuildUIToFP(builder, src[0], bld_base->dbl_bld.vec_type, "");
968       break;
969    case nir_op_u2u8:
970       result = LLVMBuildTrunc(builder, src[0], bld_base->uint8_bld.vec_type, "");
971       break;
972    case nir_op_u2u16:
973       if (src_bit_size[0] < 16)
974          result = LLVMBuildZExt(builder, src[0], bld_base->uint16_bld.vec_type, "");
975       else
976          result = LLVMBuildTrunc(builder, src[0], bld_base->uint16_bld.vec_type, "");
977       break;
978    case nir_op_u2u32:
979       if (src_bit_size[0] < 32)
980          result = LLVMBuildZExt(builder, src[0], bld_base->uint_bld.vec_type, "");
981       else
982          result = LLVMBuildTrunc(builder, src[0], bld_base->uint_bld.vec_type, "");
983       break;
984    case nir_op_u2u64:
985       result = LLVMBuildZExt(builder, src[0], bld_base->uint64_bld.vec_type, "");
986       break;
987    case nir_op_udiv:
988       result = do_int_divide(bld_base, true, src_bit_size[0], src[0], src[1]);
989       break;
990    case nir_op_ufind_msb: {
991       struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
992       result = lp_build_ctlz(uint_bld, src[0]);
993       result = lp_build_sub(uint_bld, lp_build_const_int_vec(gallivm, uint_bld->type, src_bit_size[0] - 1), result);
994       if (src_bit_size[0] < 32)
995          result = LLVMBuildZExt(builder, result, bld_base->uint_bld.vec_type, "");
996       else
997          result = LLVMBuildTrunc(builder, result, bld_base->uint_bld.vec_type, "");
998       break;
999    }
1000    case nir_op_uge32:
1001       result = icmp32(bld_base, PIPE_FUNC_GEQUAL, true, src_bit_size[0], src);
1002       break;
1003    case nir_op_ult32:
1004       result = icmp32(bld_base, PIPE_FUNC_LESS, true, src_bit_size[0], src);
1005       break;
1006    case nir_op_umax:
1007       result = lp_build_max(get_int_bld(bld_base, true, src_bit_size[0]), src[0], src[1]);
1008       break;
1009    case nir_op_umin:
1010       result = lp_build_min(get_int_bld(bld_base, true, src_bit_size[0]), src[0], src[1]);
1011       break;
1012    case nir_op_umod:
1013       result = do_int_mod(bld_base, true, src_bit_size[0], src[0], src[1]);
1014       break;
1015    case nir_op_umul_high: {
1016       LLVMValueRef hi_bits;
1017       lp_build_mul_32_lohi(get_int_bld(bld_base, true, src_bit_size[0]), src[0], src[1], &hi_bits);
1018       result = hi_bits;
1019       break;
1020    }
1021    case nir_op_ushr: {
1022       struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
1023       if (src_bit_size[0] == 64)
1024          src[1] = LLVMBuildZExt(builder, src[1], uint_bld->vec_type, "");
1025       if (src_bit_size[0] < 32)
1026          src[1] = LLVMBuildTrunc(builder, src[1], uint_bld->vec_type, "");
1027       src[1] = lp_build_and(uint_bld, src[1], lp_build_const_int_vec(gallivm, uint_bld->type, (src_bit_size[0] - 1)));
1028       result = lp_build_shr(uint_bld, src[0], src[1]);
1029       break;
1030    }
1031    case nir_op_bcsel: {
1032       LLVMTypeRef src1_type = LLVMTypeOf(src[1]);
1033       LLVMTypeRef src2_type = LLVMTypeOf(src[2]);
1034 
1035       if (LLVMGetTypeKind(src1_type) == LLVMPointerTypeKind &&
1036           LLVMGetTypeKind(src2_type) != LLVMPointerTypeKind) {
1037          src[2] = LLVMBuildIntToPtr(builder, src[2], src1_type, "");
1038       } else if (LLVMGetTypeKind(src2_type) == LLVMPointerTypeKind &&
1039                  LLVMGetTypeKind(src1_type) != LLVMPointerTypeKind) {
1040          src[1] = LLVMBuildIntToPtr(builder, src[1], src2_type, "");
1041       }
1042 
1043       for (int i = 1; i <= 2; i++) {
1044          LLVMTypeRef type = LLVMTypeOf(src[i]);
1045          if (LLVMGetTypeKind(type) == LLVMPointerTypeKind)
1046             break;
1047          src[i] = LLVMBuildBitCast(builder, src[i], get_int_bld(bld_base, true, src_bit_size[i])->vec_type, "");
1048       }
1049       return LLVMBuildSelect(builder, src[0], src[1], src[2], "");
1050    }
1051    default:
1052       assert(0);
1053       break;
1054    }
1055    return result;
1056 }
1057 
1058 
1059 static void
visit_alu(struct lp_build_nir_context * bld_base,const nir_alu_instr * instr)1060 visit_alu(struct lp_build_nir_context *bld_base,
1061           const nir_alu_instr *instr)
1062 {
1063    struct gallivm_state *gallivm = bld_base->base.gallivm;
1064    LLVMValueRef src[NIR_MAX_VEC_COMPONENTS];
1065    unsigned src_bit_size[NIR_MAX_VEC_COMPONENTS];
1066    const unsigned num_components = instr->def.num_components;
1067    unsigned src_components;
1068 
1069    struct lp_type half_type = bld_base->half_bld.type;
1070    struct lp_type float_type = bld_base->base.type;
1071    struct lp_type double_type = bld_base->dbl_bld.type;
1072 
1073    /* Set the per-intruction float controls. */
1074    bld_base->half_bld.type.signed_zero_preserve |=
1075       !!(instr->fp_fast_math & FLOAT_CONTROLS_SIGNED_ZERO_PRESERVE_FP16);
1076    bld_base->half_bld.type.nan_preserve |=
1077       !!(instr->fp_fast_math & FLOAT_CONTROLS_NAN_PRESERVE_FP16);
1078 
1079    bld_base->base.type.signed_zero_preserve |=
1080       !!(instr->fp_fast_math & FLOAT_CONTROLS_SIGNED_ZERO_PRESERVE_FP32);
1081    bld_base->base.type.nan_preserve |=
1082       !!(instr->fp_fast_math & FLOAT_CONTROLS_NAN_PRESERVE_FP32);
1083 
1084    bld_base->dbl_bld.type.signed_zero_preserve |=
1085       !!(instr->fp_fast_math & FLOAT_CONTROLS_SIGNED_ZERO_PRESERVE_FP64);
1086    bld_base->dbl_bld.type.nan_preserve |=
1087       !!(instr->fp_fast_math & FLOAT_CONTROLS_NAN_PRESERVE_FP64);
1088 
1089    switch (instr->op) {
1090    case nir_op_vec2:
1091    case nir_op_vec3:
1092    case nir_op_vec4:
1093    case nir_op_vec8:
1094    case nir_op_vec16:
1095       src_components = 1;
1096       break;
1097    case nir_op_pack_half_2x16:
1098       src_components = 2;
1099       break;
1100    case nir_op_unpack_half_2x16:
1101       src_components = 1;
1102       break;
1103    case nir_op_cube_amd:
1104       src_components = 3;
1105       break;
1106    case nir_op_fsum2:
1107    case nir_op_fsum3:
1108    case nir_op_fsum4:
1109       src_components = nir_op_infos[instr->op].input_sizes[0];
1110       break;
1111    default:
1112       src_components = num_components;
1113       break;
1114    }
1115 
1116    for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
1117       src[i] = get_alu_src(bld_base, instr->src[i], src_components);
1118       src_bit_size[i] = nir_src_bit_size(instr->src[i].src);
1119    }
1120 
1121    LLVMValueRef result[NIR_MAX_VEC_COMPONENTS];
1122    if (instr->op == nir_op_vec4 ||
1123        instr->op == nir_op_vec3 ||
1124        instr->op == nir_op_vec2 ||
1125        instr->op == nir_op_vec8 ||
1126        instr->op == nir_op_vec16) {
1127       for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
1128          result[i] = cast_type(bld_base, src[i],
1129                                nir_op_infos[instr->op].input_types[i],
1130                                src_bit_size[i]);
1131       }
1132    } else if (instr->op == nir_op_fsum4 ||
1133               instr->op == nir_op_fsum3 ||
1134               instr->op == nir_op_fsum2) {
1135       for (unsigned c = 0; c < nir_op_infos[instr->op].input_sizes[0]; c++) {
1136          LLVMValueRef temp_chan = LLVMBuildExtractValue(gallivm->builder,
1137                                                           src[0], c, "");
1138          temp_chan = cast_type(bld_base, temp_chan,
1139                                nir_op_infos[instr->op].input_types[0],
1140                                src_bit_size[0]);
1141          result[0] = (c == 0) ? temp_chan
1142             : lp_build_add(get_flt_bld(bld_base, src_bit_size[0]),
1143                            result[0], temp_chan);
1144       }
1145    } else if (is_aos(bld_base)) {
1146       result[0] = do_alu_action(bld_base, instr, src_bit_size, src);
1147    } else {
1148       /* Loop for R,G,B,A channels */
1149       for (unsigned c = 0; c < num_components; c++) {
1150          LLVMValueRef src_chan[NIR_MAX_VEC_COMPONENTS];
1151 
1152          /* Loop over instruction operands */
1153          for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
1154             if (num_components > 1) {
1155                src_chan[i] = LLVMBuildExtractValue(gallivm->builder,
1156                                                      src[i], c, "");
1157             } else {
1158                src_chan[i] = src[i];
1159             }
1160             src_chan[i] = cast_type(bld_base, src_chan[i],
1161                                     nir_op_infos[instr->op].input_types[i],
1162                                     src_bit_size[i]);
1163          }
1164          result[c] = do_alu_action(bld_base, instr, src_bit_size, src_chan);
1165          result[c] = cast_type(bld_base, result[c],
1166                                nir_op_infos[instr->op].output_type,
1167                                instr->def.bit_size);
1168       }
1169    }
1170    assign_ssa_dest(bld_base, &instr->def, result);
1171 
1172    /* Restore the global float controls. */
1173    bld_base->half_bld.type = half_type;
1174    bld_base->base.type = float_type;
1175    bld_base->dbl_bld.type = double_type;
1176 }
1177 
1178 
1179 static void
visit_load_const(struct lp_build_nir_context * bld_base,const nir_load_const_instr * instr)1180 visit_load_const(struct lp_build_nir_context *bld_base,
1181                  const nir_load_const_instr *instr)
1182 {
1183    LLVMValueRef result[NIR_MAX_VEC_COMPONENTS];
1184    bld_base->load_const(bld_base, instr, result);
1185    assign_ssa_dest(bld_base, &instr->def, result);
1186 }
1187 
1188 
1189 static void
get_deref_offset(struct lp_build_nir_context * bld_base,nir_deref_instr * instr,bool vs_in,unsigned * vertex_index_out,LLVMValueRef * vertex_index_ref,unsigned * const_out,LLVMValueRef * indir_out)1190 get_deref_offset(struct lp_build_nir_context *bld_base, nir_deref_instr *instr,
1191                  bool vs_in, unsigned *vertex_index_out,
1192                  LLVMValueRef *vertex_index_ref,
1193                  unsigned *const_out, LLVMValueRef *indir_out)
1194 {
1195    LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1196    nir_variable *var = nir_deref_instr_get_variable(instr);
1197    nir_deref_path path;
1198    unsigned idx_lvl = 1;
1199 
1200    nir_deref_path_init(&path, instr, NULL);
1201 
1202    if (vertex_index_out != NULL || vertex_index_ref != NULL) {
1203       if (vertex_index_ref) {
1204          *vertex_index_ref = get_src(bld_base, path.path[idx_lvl]->arr.index);
1205          if (vertex_index_out)
1206             *vertex_index_out = 0;
1207       } else {
1208          *vertex_index_out = nir_src_as_uint(path.path[idx_lvl]->arr.index);
1209       }
1210       ++idx_lvl;
1211    }
1212 
1213    uint32_t const_offset = 0;
1214    LLVMValueRef offset = NULL;
1215 
1216    if (var->data.compact && nir_src_is_const(instr->arr.index)) {
1217       assert(instr->deref_type == nir_deref_type_array);
1218       const_offset = nir_src_as_uint(instr->arr.index);
1219       goto out;
1220    }
1221 
1222    for (; path.path[idx_lvl]; ++idx_lvl) {
1223       const struct glsl_type *parent_type = path.path[idx_lvl - 1]->type;
1224       if (path.path[idx_lvl]->deref_type == nir_deref_type_struct) {
1225          unsigned index = path.path[idx_lvl]->strct.index;
1226 
1227          for (unsigned i = 0; i < index; i++) {
1228             const struct glsl_type *ft = glsl_get_struct_field(parent_type, i);
1229             const_offset += glsl_count_attribute_slots(ft, vs_in);
1230          }
1231       } else if (path.path[idx_lvl]->deref_type == nir_deref_type_array) {
1232          unsigned size = glsl_count_attribute_slots(path.path[idx_lvl]->type, vs_in);
1233          if (nir_src_is_const(path.path[idx_lvl]->arr.index)) {
1234             const_offset += nir_src_comp_as_int(path.path[idx_lvl]->arr.index, 0) * size;
1235          } else {
1236             LLVMValueRef idx_src = get_src(bld_base, path.path[idx_lvl]->arr.index);
1237             idx_src = cast_type(bld_base, idx_src, nir_type_uint, 32);
1238             LLVMValueRef array_off = lp_build_mul(&bld_base->uint_bld, lp_build_const_int_vec(bld_base->base.gallivm, bld_base->base.type, size),
1239                                                   idx_src);
1240             if (offset)
1241                offset = lp_build_add(&bld_base->uint_bld, offset, array_off);
1242             else
1243                offset = array_off;
1244          }
1245       } else
1246          unreachable("Uhandled deref type in get_deref_instr_offset");
1247    }
1248 
1249 out:
1250    nir_deref_path_finish(&path);
1251 
1252    if (const_offset && offset)
1253       offset = LLVMBuildAdd(builder, offset,
1254                             lp_build_const_int_vec(bld_base->base.gallivm, bld_base->uint_bld.type, const_offset),
1255                             "");
1256    *const_out = const_offset;
1257    *indir_out = offset;
1258 }
1259 
1260 
1261 static void
visit_load_input(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1262 visit_load_input(struct lp_build_nir_context *bld_base,
1263                  nir_intrinsic_instr *instr,
1264                  LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1265 {
1266    nir_variable var = {0};
1267    var.data.location = nir_intrinsic_io_semantics(instr).location;
1268    var.data.driver_location = nir_intrinsic_base(instr);
1269    var.data.location_frac = nir_intrinsic_component(instr);
1270 
1271    unsigned nc = instr->def.num_components;
1272    unsigned bit_size = instr->def.bit_size;
1273 
1274    nir_src offset = *nir_get_io_offset_src(instr);
1275    bool indirect = !nir_src_is_const(offset);
1276    if (!indirect)
1277       assert(nir_src_as_uint(offset) == 0);
1278    LLVMValueRef indir_index = indirect ? get_src(bld_base, offset) : NULL;
1279 
1280    bld_base->load_var(bld_base, nir_var_shader_in, nc, bit_size, &var, 0, NULL, 0, indir_index, result);
1281 }
1282 
1283 
1284 static void
visit_store_output(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)1285 visit_store_output(struct lp_build_nir_context *bld_base,
1286                    nir_intrinsic_instr *instr)
1287 {
1288    nir_variable var = {0};
1289    var.data.location = nir_intrinsic_io_semantics(instr).location;
1290    var.data.driver_location = nir_intrinsic_base(instr);
1291    var.data.location_frac = nir_intrinsic_component(instr);
1292 
1293    unsigned mask = nir_intrinsic_write_mask(instr);
1294 
1295    unsigned bit_size = nir_src_bit_size(instr->src[0]);
1296    LLVMValueRef src = get_src(bld_base, instr->src[0]);
1297 
1298    nir_src offset = *nir_get_io_offset_src(instr);
1299    bool indirect = !nir_src_is_const(offset);
1300    if (!indirect)
1301       assert(nir_src_as_uint(offset) == 0);
1302    LLVMValueRef indir_index = indirect ? get_src(bld_base, offset) : NULL;
1303 
1304    if (mask == 0x1 && LLVMGetTypeKind(LLVMTypeOf(src)) == LLVMArrayTypeKind) {
1305       src = LLVMBuildExtractValue(bld_base->base.gallivm->builder,
1306                                   src, 0, "");
1307    }
1308 
1309    bld_base->store_var(bld_base, nir_var_shader_out, util_last_bit(mask),
1310                        bit_size, &var, mask, NULL, 0, indir_index, src);
1311 }
1312 
1313 
1314 static void
visit_load_reg(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1315 visit_load_reg(struct lp_build_nir_context *bld_base,
1316                nir_intrinsic_instr *instr,
1317                LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1318 {
1319    struct gallivm_state *gallivm = bld_base->base.gallivm;
1320    LLVMBuilderRef builder = gallivm->builder;
1321 
1322    nir_intrinsic_instr *decl = nir_reg_get_decl(instr->src[0].ssa);
1323    unsigned base = nir_intrinsic_base(instr);
1324 
1325    struct hash_entry *entry = _mesa_hash_table_search(bld_base->regs, decl);
1326    LLVMValueRef reg_storage = (LLVMValueRef)entry->data;
1327 
1328    unsigned bit_size = nir_intrinsic_bit_size(decl);
1329    struct lp_build_context *reg_bld = get_int_bld(bld_base, true, bit_size);
1330 
1331    LLVMValueRef indir_src = NULL;
1332    if (instr->intrinsic == nir_intrinsic_load_reg_indirect) {
1333       indir_src = cast_type(bld_base, get_src(bld_base, instr->src[1]),
1334                             nir_type_uint, 32);
1335    }
1336 
1337    LLVMValueRef val = bld_base->load_reg(bld_base, reg_bld, decl, base, indir_src, reg_storage);
1338 
1339    if (!is_aos(bld_base) && instr->def.num_components > 1) {
1340       for (unsigned i = 0; i < instr->def.num_components; i++)
1341          result[i] = LLVMBuildExtractValue(builder, val, i, "");
1342    } else {
1343       result[0] = val;
1344    }
1345 }
1346 
1347 
1348 static void
visit_store_reg(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)1349 visit_store_reg(struct lp_build_nir_context *bld_base,
1350                 nir_intrinsic_instr *instr)
1351 {
1352    struct gallivm_state *gallivm = bld_base->base.gallivm;
1353    LLVMBuilderRef builder = gallivm->builder;
1354 
1355    nir_intrinsic_instr *decl = nir_reg_get_decl(instr->src[1].ssa);
1356    unsigned base = nir_intrinsic_base(instr);
1357    unsigned write_mask = nir_intrinsic_write_mask(instr);
1358    assert(write_mask != 0x0);
1359 
1360    LLVMValueRef val = get_src(bld_base, instr->src[0]);
1361    LLVMValueRef vals[NIR_MAX_VEC_COMPONENTS] = { NULL };
1362    if (!is_aos(bld_base) && nir_src_num_components(instr->src[0]) > 1) {
1363       for (unsigned i = 0; i < nir_src_num_components(instr->src[0]); i++)
1364          vals[i] = LLVMBuildExtractValue(builder, val, i, "");
1365    } else {
1366       vals[0] = val;
1367    }
1368 
1369    struct hash_entry *entry = _mesa_hash_table_search(bld_base->regs, decl);
1370    LLVMValueRef reg_storage = (LLVMValueRef)entry->data;
1371 
1372    unsigned bit_size = nir_intrinsic_bit_size(decl);
1373    struct lp_build_context *reg_bld = get_int_bld(bld_base, true, bit_size);
1374 
1375    LLVMValueRef indir_src = NULL;
1376    if (instr->intrinsic == nir_intrinsic_store_reg_indirect) {
1377       indir_src = cast_type(bld_base, get_src(bld_base, instr->src[2]),
1378                             nir_type_uint, 32);
1379    }
1380 
1381    bld_base->store_reg(bld_base, reg_bld, decl, write_mask, base,
1382                        indir_src, reg_storage, vals);
1383 }
1384 
1385 
1386 static bool
compact_array_index_oob(struct lp_build_nir_context * bld_base,nir_variable * var,const uint32_t index)1387 compact_array_index_oob(struct lp_build_nir_context *bld_base, nir_variable *var, const uint32_t index)
1388 {
1389    const struct glsl_type *type = var->type;
1390    if (nir_is_arrayed_io(var, bld_base->shader->info.stage)) {
1391       assert(glsl_type_is_array(type));
1392       type = glsl_get_array_element(type);
1393    }
1394    return index >= glsl_get_length(type);
1395 }
1396 
1397 static void
visit_load_var(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1398 visit_load_var(struct lp_build_nir_context *bld_base,
1399                nir_intrinsic_instr *instr,
1400                LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1401 {
1402    nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1403    nir_variable *var = nir_deref_instr_get_variable(deref);
1404    assert(util_bitcount(deref->modes) == 1);
1405    nir_variable_mode mode = deref->modes;
1406    unsigned const_index = 0;
1407    LLVMValueRef indir_index = NULL;
1408    LLVMValueRef indir_vertex_index = NULL;
1409    unsigned vertex_index = 0;
1410    unsigned nc = instr->def.num_components;
1411    unsigned bit_size = instr->def.bit_size;
1412    if (var) {
1413       bool vs_in = bld_base->shader->info.stage == MESA_SHADER_VERTEX &&
1414          var->data.mode == nir_var_shader_in;
1415       bool gs_in = bld_base->shader->info.stage == MESA_SHADER_GEOMETRY &&
1416          var->data.mode == nir_var_shader_in;
1417       bool tcs_in = bld_base->shader->info.stage == MESA_SHADER_TESS_CTRL &&
1418          var->data.mode == nir_var_shader_in;
1419       bool tcs_out = bld_base->shader->info.stage == MESA_SHADER_TESS_CTRL &&
1420          var->data.mode == nir_var_shader_out && !var->data.patch;
1421       bool tes_in = bld_base->shader->info.stage == MESA_SHADER_TESS_EVAL &&
1422          var->data.mode == nir_var_shader_in && !var->data.patch;
1423 
1424       mode = var->data.mode;
1425 
1426       get_deref_offset(bld_base, deref, vs_in,
1427                    gs_in ? &vertex_index : NULL,
1428                    (tcs_in || tcs_out || tes_in) ? &indir_vertex_index : NULL,
1429                    &const_index, &indir_index);
1430 
1431       /* Return undef for loads definitely outside of the array bounds
1432        * (tcs-tes-levels-out-of-bounds-read.shader_test).
1433        */
1434       if (var->data.compact && compact_array_index_oob(bld_base, var, const_index)) {
1435          struct lp_build_context *undef_bld = get_int_bld(bld_base, true,
1436                                                           instr->def.bit_size);
1437          for (int i = 0; i < instr->def.num_components; i++)
1438             result[i] = LLVMGetUndef(undef_bld->vec_type);
1439          return;
1440       }
1441    }
1442    bld_base->load_var(bld_base, mode, nc, bit_size, var, vertex_index,
1443                       indir_vertex_index, const_index, indir_index, result);
1444 }
1445 
1446 
1447 static void
visit_store_var(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)1448 visit_store_var(struct lp_build_nir_context *bld_base,
1449                 nir_intrinsic_instr *instr)
1450 {
1451    nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1452    nir_variable *var = nir_deref_instr_get_variable(deref);
1453    assert(util_bitcount(deref->modes) == 1);
1454    nir_variable_mode mode = deref->modes;
1455    int writemask = instr->const_index[0];
1456    unsigned bit_size = nir_src_bit_size(instr->src[1]);
1457    LLVMValueRef src = get_src(bld_base, instr->src[1]);
1458    unsigned const_index = 0;
1459    LLVMValueRef indir_index = NULL, indir_vertex_index = NULL;
1460    if (var) {
1461       bool tcs_out = bld_base->shader->info.stage == MESA_SHADER_TESS_CTRL &&
1462          var->data.mode == nir_var_shader_out && !var->data.patch;
1463       bool mesh_out = bld_base->shader->info.stage == MESA_SHADER_MESH &&
1464          var->data.mode == nir_var_shader_out;
1465       get_deref_offset(bld_base, deref, false, NULL,
1466                        (tcs_out || mesh_out) ? &indir_vertex_index : NULL,
1467                        &const_index, &indir_index);
1468 
1469       /* Skip stores definitely outside of the array bounds
1470        * (tcs-tes-levels-out-of-bounds-write.shader_test).
1471        */
1472       if (var->data.compact && compact_array_index_oob(bld_base, var, const_index))
1473          return;
1474    }
1475    bld_base->store_var(bld_base, mode, instr->num_components, bit_size,
1476                        var, writemask, indir_vertex_index, const_index,
1477                        indir_index, src);
1478 }
1479 
1480 
1481 static void
visit_load_ubo(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1482 visit_load_ubo(struct lp_build_nir_context *bld_base,
1483                nir_intrinsic_instr *instr,
1484                LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1485 {
1486    struct gallivm_state *gallivm = bld_base->base.gallivm;
1487    LLVMBuilderRef builder = gallivm->builder;
1488    LLVMValueRef idx = get_src(bld_base, instr->src[0]);
1489    LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1490 
1491    bool offset_is_uniform = nir_src_is_always_uniform(instr->src[1]);
1492 
1493    if (nir_src_num_components(instr->src[0]) == 1)
1494       idx = LLVMBuildExtractElement(builder, idx, lp_build_const_int32(gallivm, 0), "");
1495 
1496    bld_base->load_ubo(bld_base, instr->def.num_components,
1497                       instr->def.bit_size,
1498                       offset_is_uniform, idx, offset, result);
1499 }
1500 
1501 
1502 static void
visit_load_push_constant(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[4])1503 visit_load_push_constant(struct lp_build_nir_context *bld_base,
1504                          nir_intrinsic_instr *instr,
1505                          LLVMValueRef result[4])
1506 {
1507    struct gallivm_state *gallivm = bld_base->base.gallivm;
1508    LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1509    LLVMValueRef idx = lp_build_const_int32(gallivm, 0);
1510    bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
1511 
1512    bld_base->load_ubo(bld_base, instr->def.num_components,
1513                       instr->def.bit_size,
1514                       offset_is_uniform, idx, offset, result);
1515 }
1516 
1517 
1518 static void
visit_load_ssbo(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1519 visit_load_ssbo(struct lp_build_nir_context *bld_base,
1520                 nir_intrinsic_instr *instr,
1521                 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1522 {
1523    LLVMValueRef idx = get_src(bld_base, instr->src[0]);
1524    if (nir_src_num_components(instr->src[0]) == 1)
1525       idx = cast_type(bld_base, idx, nir_type_uint, 32);
1526 
1527    LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1528    bool index_and_offset_are_uniform =
1529       nir_src_is_always_uniform(instr->src[0]) &&
1530       nir_src_is_always_uniform(instr->src[1]);
1531    bld_base->load_mem(bld_base, instr->def.num_components,
1532                       instr->def.bit_size,
1533                       index_and_offset_are_uniform, false, idx, offset, result);
1534 }
1535 
1536 
1537 static void
visit_store_ssbo(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)1538 visit_store_ssbo(struct lp_build_nir_context *bld_base,
1539                  nir_intrinsic_instr *instr)
1540 {
1541    LLVMValueRef val = get_src(bld_base, instr->src[0]);
1542 
1543    LLVMValueRef idx = get_src(bld_base, instr->src[1]);
1544    if (nir_src_num_components(instr->src[1]) == 1)
1545       idx = cast_type(bld_base, idx, nir_type_uint, 32);
1546 
1547    LLVMValueRef offset = get_src(bld_base, instr->src[2]);
1548    bool index_and_offset_are_uniform =
1549       nir_src_is_always_uniform(instr->src[1]) &&
1550       nir_src_is_always_uniform(instr->src[2]);
1551    int writemask = instr->const_index[0];
1552    int nc = nir_src_num_components(instr->src[0]);
1553    int bitsize = nir_src_bit_size(instr->src[0]);
1554    bld_base->store_mem(bld_base, writemask, nc, bitsize,
1555                        index_and_offset_are_uniform, false, idx, offset, val);
1556 }
1557 
1558 
1559 static void
visit_get_ssbo_size(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1560 visit_get_ssbo_size(struct lp_build_nir_context *bld_base,
1561                     nir_intrinsic_instr *instr,
1562                     LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1563 {
1564    LLVMValueRef idx = get_src(bld_base, instr->src[0]);
1565    if (nir_src_num_components(instr->src[0]) == 1)
1566       idx = cast_type(bld_base, idx, nir_type_uint, 32);
1567 
1568    result[0] = bld_base->get_ssbo_size(bld_base, idx);
1569 }
1570 
1571 
1572 static void
visit_ssbo_atomic(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1573 visit_ssbo_atomic(struct lp_build_nir_context *bld_base,
1574                   nir_intrinsic_instr *instr,
1575                   LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1576 {
1577    LLVMValueRef idx = get_src(bld_base, instr->src[0]);
1578    if (nir_src_num_components(instr->src[0]) == 1)
1579       idx = cast_type(bld_base, idx, nir_type_uint, 32);
1580 
1581    LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1582    LLVMValueRef val = get_src(bld_base, instr->src[2]);
1583    LLVMValueRef val2 = NULL;
1584    int bitsize = nir_src_bit_size(instr->src[2]);
1585    if (instr->intrinsic == nir_intrinsic_ssbo_atomic_swap)
1586       val2 = get_src(bld_base, instr->src[3]);
1587 
1588    bld_base->atomic_mem(bld_base, nir_intrinsic_atomic_op(instr), bitsize, false, idx,
1589                         offset, val, val2, &result[0]);
1590 }
1591 
1592 static void
img_params_init_resource(struct lp_build_nir_context * bld_base,struct lp_img_params * params,nir_src src)1593 img_params_init_resource(struct lp_build_nir_context *bld_base, struct lp_img_params *params, nir_src src)
1594 {
1595    if (nir_src_num_components(src) == 1) {
1596       if (nir_src_is_const(src))
1597          params->image_index = nir_src_as_int(src);
1598       else
1599          params->image_index_offset = get_src(bld_base, src);
1600 
1601       return;
1602    }
1603 
1604    params->resource = get_src(bld_base, src);
1605 }
1606 
1607 static void
sampler_size_params_init_resource(struct lp_build_nir_context * bld_base,struct lp_sampler_size_query_params * params,nir_src src)1608 sampler_size_params_init_resource(struct lp_build_nir_context *bld_base, struct lp_sampler_size_query_params *params, nir_src src)
1609 {
1610    if (nir_src_num_components(src) == 1) {
1611       if (nir_src_is_const(src))
1612          params->texture_unit = nir_src_as_int(src);
1613       else
1614          params->texture_unit_offset = get_src(bld_base, src);
1615 
1616       return;
1617    }
1618 
1619    params->resource = get_src(bld_base, src);
1620 }
1621 
1622 static void
visit_load_image(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1623 visit_load_image(struct lp_build_nir_context *bld_base,
1624                  nir_intrinsic_instr *instr,
1625                  LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1626 {
1627    struct gallivm_state *gallivm = bld_base->base.gallivm;
1628    LLVMBuilderRef builder = gallivm->builder;
1629    LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1630    LLVMValueRef coords[5];
1631    struct lp_img_params params = { 0 };
1632 
1633    params.target = glsl_sampler_to_pipe(nir_intrinsic_image_dim(instr),
1634                                         nir_intrinsic_image_array(instr));
1635    for (unsigned i = 0; i < 4; i++)
1636       coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1637    if (params.target == PIPE_TEXTURE_1D_ARRAY)
1638       coords[2] = coords[1];
1639 
1640    params.coords = coords;
1641    params.outdata = result;
1642    lp_img_op_from_intrinsic(&params, instr);
1643    if (nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_MS ||
1644        nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_SUBPASS_MS)
1645       params.ms_index = cast_type(bld_base, get_src(bld_base, instr->src[2]),
1646                                   nir_type_uint, 32);
1647 
1648    img_params_init_resource(bld_base, &params, instr->src[0]);
1649    params.format = nir_intrinsic_format(instr);
1650 
1651    bld_base->image_op(bld_base, &params);
1652 }
1653 
1654 
1655 static void
visit_store_image(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)1656 visit_store_image(struct lp_build_nir_context *bld_base,
1657                   nir_intrinsic_instr *instr)
1658 {
1659    struct gallivm_state *gallivm = bld_base->base.gallivm;
1660    LLVMBuilderRef builder = gallivm->builder;
1661    LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1662    LLVMValueRef in_val = get_src(bld_base, instr->src[3]);
1663    LLVMValueRef coords[5];
1664    struct lp_img_params params = { 0 };
1665 
1666    params.target = glsl_sampler_to_pipe(nir_intrinsic_image_dim(instr), nir_intrinsic_image_array(instr));
1667    for (unsigned i = 0; i < 4; i++)
1668       coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1669    if (params.target == PIPE_TEXTURE_1D_ARRAY)
1670       coords[2] = coords[1];
1671    params.coords = coords;
1672 
1673    params.format = nir_intrinsic_format(instr);
1674 
1675    const struct util_format_description *desc = util_format_description(params.format);
1676    bool integer = desc->channel[util_format_get_first_non_void_channel(params.format)].pure_integer;
1677 
1678    for (unsigned i = 0; i < 4; i++) {
1679       params.indata[i] = LLVMBuildExtractValue(builder, in_val, i, "");
1680 
1681       if (integer)
1682          params.indata[i] = LLVMBuildBitCast(builder, params.indata[i], bld_base->int_bld.vec_type, "");
1683       else
1684          params.indata[i] = LLVMBuildBitCast(builder, params.indata[i], bld_base->base.vec_type, "");
1685    }
1686    if (nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_MS)
1687       params.ms_index = get_src(bld_base, instr->src[2]);
1688    params.img_op = LP_IMG_STORE;
1689 
1690    img_params_init_resource(bld_base, &params, instr->src[0]);
1691 
1692    if (params.target == PIPE_TEXTURE_1D_ARRAY)
1693       coords[2] = coords[1];
1694    bld_base->image_op(bld_base, &params);
1695 }
1696 
1697 LLVMAtomicRMWBinOp
lp_translate_atomic_op(nir_atomic_op op)1698 lp_translate_atomic_op(nir_atomic_op op)
1699 {
1700    switch (op) {
1701    case nir_atomic_op_iadd: return LLVMAtomicRMWBinOpAdd;
1702    case nir_atomic_op_xchg: return LLVMAtomicRMWBinOpXchg;
1703    case nir_atomic_op_iand: return LLVMAtomicRMWBinOpAnd;
1704    case nir_atomic_op_ior:  return LLVMAtomicRMWBinOpOr;
1705    case nir_atomic_op_ixor: return LLVMAtomicRMWBinOpXor;
1706    case nir_atomic_op_umin: return LLVMAtomicRMWBinOpUMin;
1707    case nir_atomic_op_umax: return LLVMAtomicRMWBinOpUMax;
1708    case nir_atomic_op_imin: return LLVMAtomicRMWBinOpMin;
1709    case nir_atomic_op_imax: return LLVMAtomicRMWBinOpMax;
1710    case nir_atomic_op_fadd: return LLVMAtomicRMWBinOpFAdd;
1711 #if LLVM_VERSION_MAJOR >= 15
1712    case nir_atomic_op_fmin: return LLVMAtomicRMWBinOpFMin;
1713    case nir_atomic_op_fmax: return LLVMAtomicRMWBinOpFMax;
1714 #endif
1715    default:          unreachable("Unexpected atomic");
1716    }
1717 }
1718 
1719 void
lp_img_op_from_intrinsic(struct lp_img_params * params,nir_intrinsic_instr * instr)1720 lp_img_op_from_intrinsic(struct lp_img_params *params, nir_intrinsic_instr *instr)
1721 {
1722    if (instr->intrinsic == nir_intrinsic_image_load ||
1723        instr->intrinsic == nir_intrinsic_bindless_image_load) {
1724       params->img_op = LP_IMG_LOAD;
1725       return;
1726    }
1727 
1728    if (instr->intrinsic == nir_intrinsic_bindless_image_sparse_load) {
1729       params->img_op = LP_IMG_LOAD_SPARSE;
1730       return;
1731    }
1732 
1733    if (instr->intrinsic == nir_intrinsic_image_store ||
1734        instr->intrinsic == nir_intrinsic_bindless_image_store) {
1735       params->img_op = LP_IMG_STORE;
1736       return;
1737    }
1738 
1739    if (instr->intrinsic == nir_intrinsic_image_atomic_swap ||
1740        instr->intrinsic == nir_intrinsic_bindless_image_atomic_swap) {
1741       params->img_op = LP_IMG_ATOMIC_CAS;
1742       return;
1743    }
1744 
1745    if (instr->intrinsic == nir_intrinsic_image_atomic ||
1746        instr->intrinsic == nir_intrinsic_bindless_image_atomic) {
1747       params->img_op = LP_IMG_ATOMIC;
1748       params->op = lp_translate_atomic_op(nir_intrinsic_atomic_op(instr));
1749    } else {
1750       params->img_op = -1;
1751    }
1752 }
1753 
1754 
1755 static void
visit_atomic_image(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1756 visit_atomic_image(struct lp_build_nir_context *bld_base,
1757                    nir_intrinsic_instr *instr,
1758                    LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1759 {
1760    struct gallivm_state *gallivm = bld_base->base.gallivm;
1761    LLVMBuilderRef builder = gallivm->builder;
1762    struct lp_img_params params = { 0 };
1763    LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1764    LLVMValueRef in_val = get_src(bld_base, instr->src[3]);
1765    LLVMValueRef coords[5];
1766 
1767    params.target = glsl_sampler_to_pipe(nir_intrinsic_image_dim(instr),
1768                                         nir_intrinsic_image_array(instr));
1769    for (unsigned i = 0; i < 4; i++) {
1770       coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1771    }
1772    if (params.target == PIPE_TEXTURE_1D_ARRAY) {
1773       coords[2] = coords[1];
1774    }
1775 
1776    params.coords = coords;
1777 
1778    params.format = nir_intrinsic_format(instr);
1779 
1780    const struct util_format_description *desc = util_format_description(params.format);
1781    bool integer = desc->channel[util_format_get_first_non_void_channel(params.format)].pure_integer;
1782 
1783    if (nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_MS)
1784       params.ms_index = get_src(bld_base, instr->src[2]);
1785 
1786    if (instr->intrinsic == nir_intrinsic_image_atomic_swap ||
1787        instr->intrinsic == nir_intrinsic_bindless_image_atomic_swap) {
1788       LLVMValueRef cas_val = get_src(bld_base, instr->src[4]);
1789       params.indata[0] = in_val;
1790       params.indata2[0] = cas_val;
1791 
1792       if (integer)
1793          params.indata2[0] = LLVMBuildBitCast(builder, params.indata2[0], bld_base->int_bld.vec_type, "");
1794       else
1795          params.indata2[0] = LLVMBuildBitCast(builder, params.indata2[0], bld_base->base.vec_type, "");
1796    } else {
1797       params.indata[0] = in_val;
1798    }
1799 
1800    if (integer)
1801       params.indata[0] = LLVMBuildBitCast(builder, params.indata[0], bld_base->int_bld.vec_type, "");
1802    else
1803       params.indata[0] = LLVMBuildBitCast(builder, params.indata[0], bld_base->base.vec_type, "");
1804 
1805    params.outdata = result;
1806 
1807    lp_img_op_from_intrinsic(&params, instr);
1808 
1809    img_params_init_resource(bld_base, &params, instr->src[0]);
1810 
1811    bld_base->image_op(bld_base, &params);
1812 }
1813 
1814 
1815 static void
visit_image_size(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1816 visit_image_size(struct lp_build_nir_context *bld_base,
1817                  nir_intrinsic_instr *instr,
1818                  LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1819 {
1820    struct lp_sampler_size_query_params params = { 0 };
1821 
1822    sampler_size_params_init_resource(bld_base, &params, instr->src[0]);
1823 
1824    params.target = glsl_sampler_to_pipe(nir_intrinsic_image_dim(instr),
1825                                         nir_intrinsic_image_array(instr));
1826    params.sizes_out = result;
1827    params.ms = nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_MS ||
1828       nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_SUBPASS_MS;
1829    params.format = nir_intrinsic_format(instr);
1830 
1831    bld_base->image_size(bld_base, &params);
1832 }
1833 
1834 
1835 static void
visit_image_samples(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1836 visit_image_samples(struct lp_build_nir_context *bld_base,
1837                     nir_intrinsic_instr *instr,
1838                     LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1839 {
1840    struct lp_sampler_size_query_params params = { 0 };
1841 
1842    sampler_size_params_init_resource(bld_base, &params, instr->src[0]);
1843 
1844    params.target = glsl_sampler_to_pipe(nir_intrinsic_image_dim(instr),
1845                                         nir_intrinsic_image_array(instr));
1846    params.sizes_out = result;
1847    params.ms = nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_MS ||
1848       nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_SUBPASS_MS;
1849    params.samples_only = true;
1850 
1851    params.format = nir_intrinsic_format(instr);
1852 
1853    bld_base->image_size(bld_base, &params);
1854 }
1855 
1856 
1857 static void
visit_shared_load(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1858 visit_shared_load(struct lp_build_nir_context *bld_base,
1859                   nir_intrinsic_instr *instr,
1860                   LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1861 {
1862    LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1863    bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
1864    bld_base->load_mem(bld_base, instr->def.num_components,
1865                       instr->def.bit_size,
1866                       offset_is_uniform, false, NULL, offset, result);
1867 }
1868 
1869 
1870 static void
visit_shared_store(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)1871 visit_shared_store(struct lp_build_nir_context *bld_base,
1872                    nir_intrinsic_instr *instr)
1873 {
1874    LLVMValueRef val = get_src(bld_base, instr->src[0]);
1875    LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1876    bool offset_is_uniform = nir_src_is_always_uniform(instr->src[1]);
1877    int writemask = instr->const_index[1];
1878    int nc = nir_src_num_components(instr->src[0]);
1879    int bitsize = nir_src_bit_size(instr->src[0]);
1880    bld_base->store_mem(bld_base, writemask, nc, bitsize,
1881                        offset_is_uniform, false, NULL, offset, val);
1882 }
1883 
1884 
1885 static void
visit_shared_atomic(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1886 visit_shared_atomic(struct lp_build_nir_context *bld_base,
1887                     nir_intrinsic_instr *instr,
1888                     LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1889 {
1890    LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1891    LLVMValueRef val = get_src(bld_base, instr->src[1]);
1892    LLVMValueRef val2 = NULL;
1893    int bitsize = nir_src_bit_size(instr->src[1]);
1894    if (instr->intrinsic == nir_intrinsic_shared_atomic_swap)
1895       val2 = get_src(bld_base, instr->src[2]);
1896 
1897    bld_base->atomic_mem(bld_base, nir_intrinsic_atomic_op(instr), bitsize, false, NULL,
1898                         offset, val, val2, &result[0]);
1899 }
1900 
1901 
1902 static void
visit_barrier(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)1903 visit_barrier(struct lp_build_nir_context *bld_base,
1904               nir_intrinsic_instr *instr)
1905 {
1906    LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1907    mesa_scope exec_scope = nir_intrinsic_execution_scope(instr);
1908    unsigned nir_semantics = nir_intrinsic_memory_semantics(instr);
1909 
1910    if (nir_semantics) {
1911       LLVMAtomicOrdering ordering = LLVMAtomicOrderingSequentiallyConsistent;
1912       LLVMBuildFence(builder, ordering, false, "");
1913    }
1914    if (exec_scope != SCOPE_NONE)
1915       bld_base->barrier(bld_base);
1916 }
1917 
1918 
1919 static void
visit_discard(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)1920 visit_discard(struct lp_build_nir_context *bld_base,
1921               nir_intrinsic_instr *instr)
1922 {
1923    LLVMValueRef cond = NULL;
1924    if (instr->intrinsic == nir_intrinsic_terminate_if) {
1925       cond = get_src(bld_base, instr->src[0]);
1926       cond = cast_type(bld_base, cond, nir_type_int, 32);
1927    }
1928    bld_base->discard(bld_base, cond);
1929 }
1930 
1931 
1932 static void
visit_load_kernel_input(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1933 visit_load_kernel_input(struct lp_build_nir_context *bld_base,
1934                         nir_intrinsic_instr *instr,
1935                         LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1936 {
1937    LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1938 
1939    bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
1940    bld_base->load_kernel_arg(bld_base, instr->def.num_components,
1941                              instr->def.bit_size,
1942                              nir_src_bit_size(instr->src[0]),
1943                              offset_is_uniform, offset, result);
1944 }
1945 
1946 
1947 static void
visit_load_global(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1948 visit_load_global(struct lp_build_nir_context *bld_base,
1949                   nir_intrinsic_instr *instr,
1950                   LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1951 {
1952    LLVMValueRef addr = get_src(bld_base, instr->src[0]);
1953    bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
1954    bld_base->load_global(bld_base, instr->def.num_components,
1955                          instr->def.bit_size,
1956                          nir_src_bit_size(instr->src[0]),
1957                          offset_is_uniform, addr, result);
1958 }
1959 
1960 
1961 static void
visit_store_global(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)1962 visit_store_global(struct lp_build_nir_context *bld_base,
1963                    nir_intrinsic_instr *instr)
1964 {
1965    LLVMValueRef val = get_src(bld_base, instr->src[0]);
1966    int nc = nir_src_num_components(instr->src[0]);
1967    int bitsize = nir_src_bit_size(instr->src[0]);
1968    LLVMValueRef addr = get_src(bld_base, instr->src[1]);
1969    int addr_bitsize = nir_src_bit_size(instr->src[1]);
1970    int writemask = instr->const_index[0];
1971    bld_base->store_global(bld_base, writemask, nc, bitsize,
1972                           addr_bitsize, addr, val);
1973 }
1974 
1975 
1976 static void
visit_global_atomic(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1977 visit_global_atomic(struct lp_build_nir_context *bld_base,
1978                     nir_intrinsic_instr *instr,
1979                     LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1980 {
1981    LLVMValueRef addr = get_src(bld_base, instr->src[0]);
1982    LLVMValueRef val = get_src(bld_base, instr->src[1]);
1983    LLVMValueRef val2 = NULL;
1984    int addr_bitsize = nir_src_bit_size(instr->src[0]);
1985    int val_bitsize = nir_src_bit_size(instr->src[1]);
1986    if (instr->intrinsic == nir_intrinsic_global_atomic_swap)
1987       val2 = get_src(bld_base, instr->src[2]);
1988 
1989    bld_base->atomic_global(bld_base, nir_intrinsic_atomic_op(instr),
1990                            addr_bitsize, val_bitsize, addr, val, val2,
1991                            &result[0]);
1992 }
1993 
1994 #if LLVM_VERSION_MAJOR >= 10
visit_shuffle(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef dst[4])1995 static void visit_shuffle(struct lp_build_nir_context *bld_base,
1996                           nir_intrinsic_instr *instr,
1997                           LLVMValueRef dst[4])
1998 {
1999    LLVMValueRef src = get_src(bld_base, instr->src[0]);
2000    src = cast_type(bld_base, src, nir_type_int,
2001                    nir_src_bit_size(instr->src[0]));
2002    LLVMValueRef index = get_src(bld_base, instr->src[1]);
2003    index = cast_type(bld_base, index, nir_type_uint,
2004                      nir_src_bit_size(instr->src[1]));
2005 
2006    bld_base->shuffle(bld_base, src, index, instr, dst);
2007 }
2008 #endif
2009 
2010 
2011 static void
visit_interp(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])2012 visit_interp(struct lp_build_nir_context *bld_base,
2013              nir_intrinsic_instr *instr,
2014              LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
2015 {
2016    struct gallivm_state *gallivm = bld_base->base.gallivm;
2017    LLVMBuilderRef builder = gallivm->builder;
2018    nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
2019    unsigned num_components = instr->def.num_components;
2020    nir_variable *var = nir_deref_instr_get_variable(deref);
2021    unsigned const_index;
2022    LLVMValueRef indir_index;
2023    LLVMValueRef offsets[2] = { NULL, NULL };
2024    get_deref_offset(bld_base, deref, false, NULL, NULL,
2025                     &const_index, &indir_index);
2026    bool centroid = instr->intrinsic == nir_intrinsic_interp_deref_at_centroid;
2027    bool sample = false;
2028    if (instr->intrinsic == nir_intrinsic_interp_deref_at_offset) {
2029       for (unsigned i = 0; i < 2; i++) {
2030          offsets[i] = LLVMBuildExtractValue(builder, get_src(bld_base, instr->src[1]), i, "");
2031          offsets[i] = cast_type(bld_base, offsets[i], nir_type_float, 32);
2032       }
2033    } else if (instr->intrinsic == nir_intrinsic_interp_deref_at_sample) {
2034       offsets[0] = get_src(bld_base, instr->src[1]);
2035       offsets[0] = cast_type(bld_base, offsets[0], nir_type_int, 32);
2036       sample = true;
2037    }
2038    bld_base->interp_at(bld_base, num_components, var, centroid, sample,
2039                        const_index, indir_index, offsets, result);
2040 }
2041 
2042 
2043 static void
visit_load_scratch(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])2044 visit_load_scratch(struct lp_build_nir_context *bld_base,
2045                    nir_intrinsic_instr *instr,
2046                    LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
2047 {
2048    LLVMValueRef offset = get_src(bld_base, instr->src[0]);
2049 
2050    bld_base->load_scratch(bld_base, instr->def.num_components,
2051                           instr->def.bit_size, offset, result);
2052 }
2053 
2054 
2055 static void
visit_store_scratch(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)2056 visit_store_scratch(struct lp_build_nir_context *bld_base,
2057                     nir_intrinsic_instr *instr)
2058 {
2059    LLVMValueRef val = get_src(bld_base, instr->src[0]);
2060    LLVMValueRef offset = get_src(bld_base, instr->src[1]);
2061    int writemask = instr->const_index[2];
2062    int nc = nir_src_num_components(instr->src[0]);
2063    int bitsize = nir_src_bit_size(instr->src[0]);
2064    bld_base->store_scratch(bld_base, writemask, nc, bitsize, offset, val);
2065 }
2066 
2067 static void
visit_payload_load(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])2068 visit_payload_load(struct lp_build_nir_context *bld_base,
2069                   nir_intrinsic_instr *instr,
2070                   LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
2071 {
2072    LLVMValueRef offset = get_src(bld_base, instr->src[0]);
2073    bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
2074    bld_base->load_mem(bld_base, instr->def.num_components,
2075                       instr->def.bit_size,
2076                       offset_is_uniform, true, NULL, offset, result);
2077 }
2078 
2079 static void
visit_payload_store(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)2080 visit_payload_store(struct lp_build_nir_context *bld_base,
2081                     nir_intrinsic_instr *instr)
2082 {
2083    LLVMValueRef val = get_src(bld_base, instr->src[0]);
2084    LLVMValueRef offset = get_src(bld_base, instr->src[1]);
2085    bool offset_is_uniform = nir_src_is_always_uniform(instr->src[1]);
2086    int writemask = instr->const_index[1];
2087    int nc = nir_src_num_components(instr->src[0]);
2088    int bitsize = nir_src_bit_size(instr->src[0]);
2089    bld_base->store_mem(bld_base, writemask, nc, bitsize,
2090                        offset_is_uniform, true, NULL, offset, val);
2091 }
2092 
2093 static void
visit_payload_atomic(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])2094 visit_payload_atomic(struct lp_build_nir_context *bld_base,
2095                      nir_intrinsic_instr *instr,
2096                      LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
2097 {
2098    LLVMValueRef offset = get_src(bld_base, instr->src[0]);
2099    LLVMValueRef val = get_src(bld_base, instr->src[1]);
2100    LLVMValueRef val2 = NULL;
2101    int bitsize = nir_src_bit_size(instr->src[1]);
2102    if (instr->intrinsic == nir_intrinsic_task_payload_atomic_swap)
2103       val2 = get_src(bld_base, instr->src[2]);
2104 
2105    bld_base->atomic_mem(bld_base, nir_intrinsic_atomic_op(instr), bitsize, true, NULL,
2106                         offset, val, val2, &result[0]);
2107 }
2108 
visit_load_param(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])2109 static void visit_load_param(struct lp_build_nir_context *bld_base,
2110                              nir_intrinsic_instr *instr,
2111                              LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
2112 {
2113    LLVMValueRef param = LLVMGetParam(bld_base->func, nir_intrinsic_param_idx(instr) + LP_RESV_FUNC_ARGS);
2114    struct gallivm_state *gallivm = bld_base->base.gallivm;
2115    if (instr->num_components == 1)
2116       result[0] = param;
2117    else {
2118       for (unsigned i = 0; i < instr->num_components; i++)
2119          result[i] = LLVMBuildExtractValue(gallivm->builder, param, i, "");
2120    }
2121 }
2122 
2123 static void
visit_intrinsic(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)2124 visit_intrinsic(struct lp_build_nir_context *bld_base,
2125                 nir_intrinsic_instr *instr)
2126 {
2127    LLVMValueRef result[NIR_MAX_VEC_COMPONENTS] = {0};
2128    switch (instr->intrinsic) {
2129    case nir_intrinsic_decl_reg:
2130       /* already handled */
2131       break;
2132    case nir_intrinsic_load_reg:
2133    case nir_intrinsic_load_reg_indirect:
2134       visit_load_reg(bld_base, instr, result);
2135       break;
2136    case nir_intrinsic_store_reg:
2137    case nir_intrinsic_store_reg_indirect:
2138       visit_store_reg(bld_base, instr);
2139       break;
2140    case nir_intrinsic_load_input:
2141    case nir_intrinsic_load_per_primitive_input:
2142       visit_load_input(bld_base, instr, result);
2143       break;
2144    case nir_intrinsic_store_output:
2145       visit_store_output(bld_base, instr);
2146       break;
2147    case nir_intrinsic_load_deref:
2148       visit_load_var(bld_base, instr, result);
2149       break;
2150    case nir_intrinsic_store_deref:
2151       visit_store_var(bld_base, instr);
2152       break;
2153    case nir_intrinsic_load_ubo:
2154       visit_load_ubo(bld_base, instr, result);
2155       break;
2156    case nir_intrinsic_load_push_constant:
2157       visit_load_push_constant(bld_base, instr, result);
2158       break;
2159    case nir_intrinsic_load_ssbo:
2160       visit_load_ssbo(bld_base, instr, result);
2161       break;
2162    case nir_intrinsic_store_ssbo:
2163       visit_store_ssbo(bld_base, instr);
2164       break;
2165    case nir_intrinsic_get_ssbo_size:
2166       visit_get_ssbo_size(bld_base, instr, result);
2167       break;
2168    case nir_intrinsic_load_vertex_id:
2169    case nir_intrinsic_load_primitive_id:
2170    case nir_intrinsic_load_instance_id:
2171    case nir_intrinsic_load_base_instance:
2172    case nir_intrinsic_load_base_vertex:
2173    case nir_intrinsic_load_first_vertex:
2174    case nir_intrinsic_load_workgroup_id:
2175    case nir_intrinsic_load_local_invocation_id:
2176    case nir_intrinsic_load_local_invocation_index:
2177    case nir_intrinsic_load_num_workgroups:
2178    case nir_intrinsic_load_invocation_id:
2179    case nir_intrinsic_load_front_face:
2180    case nir_intrinsic_load_draw_id:
2181    case nir_intrinsic_load_workgroup_size:
2182    case nir_intrinsic_load_work_dim:
2183    case nir_intrinsic_load_tess_coord:
2184    case nir_intrinsic_load_tess_level_outer:
2185    case nir_intrinsic_load_tess_level_inner:
2186    case nir_intrinsic_load_patch_vertices_in:
2187    case nir_intrinsic_load_sample_id:
2188    case nir_intrinsic_load_sample_pos:
2189    case nir_intrinsic_load_sample_mask_in:
2190    case nir_intrinsic_load_view_index:
2191    case nir_intrinsic_load_subgroup_invocation:
2192    case nir_intrinsic_load_subgroup_id:
2193    case nir_intrinsic_load_num_subgroups:
2194       bld_base->sysval_intrin(bld_base, instr, result);
2195       break;
2196    case nir_intrinsic_load_helper_invocation:
2197       bld_base->helper_invocation(bld_base, &result[0]);
2198       break;
2199    case nir_intrinsic_terminate_if:
2200    case nir_intrinsic_terminate:
2201       visit_discard(bld_base, instr);
2202       break;
2203    case nir_intrinsic_emit_vertex:
2204       bld_base->emit_vertex(bld_base, nir_intrinsic_stream_id(instr));
2205       break;
2206    case nir_intrinsic_end_primitive:
2207       bld_base->end_primitive(bld_base, nir_intrinsic_stream_id(instr));
2208       break;
2209    case nir_intrinsic_ssbo_atomic:
2210    case nir_intrinsic_ssbo_atomic_swap:
2211       visit_ssbo_atomic(bld_base, instr, result);
2212       break;
2213    case nir_intrinsic_image_load:
2214    case nir_intrinsic_bindless_image_load:
2215    case nir_intrinsic_bindless_image_sparse_load:
2216       visit_load_image(bld_base, instr, result);
2217       break;
2218    case nir_intrinsic_image_store:
2219    case nir_intrinsic_bindless_image_store:
2220       visit_store_image(bld_base, instr);
2221       break;
2222    case nir_intrinsic_image_atomic:
2223    case nir_intrinsic_image_atomic_swap:
2224    case nir_intrinsic_bindless_image_atomic:
2225    case nir_intrinsic_bindless_image_atomic_swap:
2226       visit_atomic_image(bld_base, instr, result);
2227       break;
2228    case nir_intrinsic_image_size:
2229    case nir_intrinsic_bindless_image_size:
2230       visit_image_size(bld_base, instr, result);
2231       break;
2232    case nir_intrinsic_image_samples:
2233    case nir_intrinsic_bindless_image_samples:
2234       visit_image_samples(bld_base, instr, result);
2235       break;
2236    case nir_intrinsic_load_shared:
2237       visit_shared_load(bld_base, instr, result);
2238       break;
2239    case nir_intrinsic_store_shared:
2240       visit_shared_store(bld_base, instr);
2241       break;
2242    case nir_intrinsic_shared_atomic:
2243    case nir_intrinsic_shared_atomic_swap:
2244       visit_shared_atomic(bld_base, instr, result);
2245       break;
2246    case nir_intrinsic_barrier:
2247       visit_barrier(bld_base, instr);
2248       break;
2249    case nir_intrinsic_load_kernel_input:
2250       visit_load_kernel_input(bld_base, instr, result);
2251      break;
2252    case nir_intrinsic_load_global:
2253    case nir_intrinsic_load_global_constant:
2254       visit_load_global(bld_base, instr, result);
2255       break;
2256    case nir_intrinsic_store_global:
2257       visit_store_global(bld_base, instr);
2258       break;
2259    case nir_intrinsic_global_atomic:
2260    case nir_intrinsic_global_atomic_swap:
2261       visit_global_atomic(bld_base, instr, result);
2262       break;
2263    case nir_intrinsic_vote_all:
2264    case nir_intrinsic_vote_any:
2265    case nir_intrinsic_vote_ieq:
2266    case nir_intrinsic_vote_feq:
2267       bld_base->vote(bld_base, cast_type(bld_base, get_src(bld_base, instr->src[0]), nir_type_int, nir_src_bit_size(instr->src[0])), instr, result);
2268       break;
2269    case nir_intrinsic_elect:
2270       bld_base->elect(bld_base, result);
2271       break;
2272    case nir_intrinsic_reduce:
2273    case nir_intrinsic_inclusive_scan:
2274    case nir_intrinsic_exclusive_scan:
2275       bld_base->reduce(bld_base, cast_type(bld_base, get_src(bld_base, instr->src[0]), nir_type_int, nir_src_bit_size(instr->src[0])), instr, result);
2276       break;
2277    case nir_intrinsic_ballot:
2278       bld_base->ballot(bld_base, cast_type(bld_base, get_src(bld_base, instr->src[0]), nir_type_int, 32), instr, result);
2279       break;
2280 #if LLVM_VERSION_MAJOR >= 10
2281    case nir_intrinsic_shuffle:
2282       visit_shuffle(bld_base, instr, result);
2283       break;
2284 #endif
2285    case nir_intrinsic_read_invocation:
2286    case nir_intrinsic_read_first_invocation: {
2287       LLVMValueRef src0 = get_src(bld_base, instr->src[0]);
2288       src0 = cast_type(bld_base, src0, nir_type_int, nir_src_bit_size(instr->src[0]));
2289 
2290       LLVMValueRef src1 = NULL;
2291       if (instr->intrinsic == nir_intrinsic_read_invocation)
2292          src1 = cast_type(bld_base, get_src(bld_base, instr->src[1]), nir_type_int, 32);
2293 
2294       bld_base->read_invocation(bld_base, src0, nir_src_bit_size(instr->src[0]), src1, result);
2295       break;
2296    }
2297    case nir_intrinsic_interp_deref_at_offset:
2298    case nir_intrinsic_interp_deref_at_centroid:
2299    case nir_intrinsic_interp_deref_at_sample:
2300       visit_interp(bld_base, instr, result);
2301       break;
2302    case nir_intrinsic_load_scratch:
2303       visit_load_scratch(bld_base, instr, result);
2304       break;
2305    case nir_intrinsic_store_scratch:
2306       visit_store_scratch(bld_base, instr);
2307       break;
2308    case nir_intrinsic_shader_clock:
2309       bld_base->clock(bld_base, result);
2310       break;
2311    case nir_intrinsic_launch_mesh_workgroups:
2312       bld_base->launch_mesh_workgroups(bld_base,
2313                                        get_src(bld_base, instr->src[0]));
2314       break;
2315    case nir_intrinsic_load_task_payload:
2316       visit_payload_load(bld_base, instr, result);
2317       break;
2318    case nir_intrinsic_store_task_payload:
2319       visit_payload_store(bld_base, instr);
2320       break;
2321    case nir_intrinsic_task_payload_atomic:
2322    case nir_intrinsic_task_payload_atomic_swap:
2323       visit_payload_atomic(bld_base, instr, result);
2324       break;
2325    case nir_intrinsic_set_vertex_and_primitive_count:
2326       bld_base->set_vertex_and_primitive_count(bld_base,
2327                                                get_src(bld_base, instr->src[0]),
2328                                                get_src(bld_base, instr->src[1]));
2329       break;
2330    case nir_intrinsic_load_param:
2331       visit_load_param(bld_base, instr, result);
2332       break;
2333    case nir_intrinsic_ddx:
2334    case nir_intrinsic_ddy:
2335    case nir_intrinsic_ddx_coarse:
2336    case nir_intrinsic_ddy_coarse:
2337    case nir_intrinsic_ddx_fine:
2338    case nir_intrinsic_ddy_fine: {
2339       LLVMValueRef src = get_src(bld_base, instr->src[0]);
2340       src = cast_type(bld_base, src, nir_type_float, nir_src_bit_size(instr->src[0]));
2341 
2342       struct lp_build_context *bld = get_flt_bld(bld_base, nir_src_bit_size(instr->src[0]));
2343 
2344       if (instr->intrinsic == nir_intrinsic_ddx ||
2345           instr->intrinsic == nir_intrinsic_ddx_coarse ||
2346           instr->intrinsic == nir_intrinsic_ddx_fine)
2347          result[0] = lp_build_ddx(bld, src);
2348       else
2349          result[0] = lp_build_ddy(bld, src);
2350 
2351       break;
2352    }
2353    default:
2354       fprintf(stderr, "Unsupported intrinsic: ");
2355       nir_print_instr(&instr->instr, stderr);
2356       fprintf(stderr, "\n");
2357       assert(0);
2358       break;
2359    }
2360    if (result[0]) {
2361       assign_ssa_dest(bld_base, &instr->def, result);
2362    }
2363 }
2364 
2365 
2366 static void
visit_txs(struct lp_build_nir_context * bld_base,nir_tex_instr * instr)2367 visit_txs(struct lp_build_nir_context *bld_base, nir_tex_instr *instr)
2368 {
2369    struct lp_sampler_size_query_params params = { 0 };
2370    LLVMValueRef sizes_out[NIR_MAX_VEC_COMPONENTS];
2371    LLVMValueRef explicit_lod = NULL;
2372    LLVMValueRef texture_unit_offset = NULL;
2373    LLVMValueRef resource = NULL;
2374 
2375    for (unsigned i = 0; i < instr->num_srcs; i++) {
2376       switch (instr->src[i].src_type) {
2377       case nir_tex_src_lod:
2378          explicit_lod = cast_type(bld_base,
2379                                   get_src(bld_base, instr->src[i].src),
2380                                   nir_type_int, 32);
2381          break;
2382       case nir_tex_src_texture_offset:
2383          texture_unit_offset = get_src(bld_base, instr->src[i].src);
2384          break;
2385       case nir_tex_src_texture_handle:
2386          resource = get_src(bld_base, instr->src[i].src);
2387          break;
2388       default:
2389          break;
2390       }
2391    }
2392 
2393    params.target = glsl_sampler_to_pipe(instr->sampler_dim, instr->is_array);
2394    params.texture_unit = instr->texture_index;
2395    params.explicit_lod = explicit_lod;
2396    params.is_sviewinfo = true;
2397    params.sizes_out = sizes_out;
2398    params.samples_only = (instr->op == nir_texop_texture_samples);
2399    params.texture_unit_offset = texture_unit_offset;
2400    params.ms = instr->sampler_dim == GLSL_SAMPLER_DIM_MS ||
2401       instr->sampler_dim == GLSL_SAMPLER_DIM_SUBPASS_MS;
2402 
2403    if (instr->op == nir_texop_query_levels)
2404       params.explicit_lod = bld_base->uint_bld.zero;
2405 
2406    params.resource = resource;
2407 
2408    bld_base->tex_size(bld_base, &params);
2409    assign_ssa_dest(bld_base, &instr->def,
2410                    &sizes_out[instr->op == nir_texop_query_levels ? 3 : 0]);
2411 }
2412 
2413 
2414 static enum lp_sampler_lod_property
lp_build_nir_lod_property(gl_shader_stage stage,nir_src lod_src)2415 lp_build_nir_lod_property(gl_shader_stage stage, nir_src lod_src)
2416 {
2417    enum lp_sampler_lod_property lod_property;
2418 
2419    if (nir_src_is_always_uniform(lod_src)) {
2420       lod_property = LP_SAMPLER_LOD_SCALAR;
2421    } else if (stage == MESA_SHADER_FRAGMENT) {
2422       if (gallivm_perf & GALLIVM_PERF_NO_QUAD_LOD)
2423          lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
2424       else
2425          lod_property = LP_SAMPLER_LOD_PER_QUAD;
2426    } else {
2427       lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
2428    }
2429    return lod_property;
2430 }
2431 
2432 
2433 uint32_t
lp_build_nir_sample_key(gl_shader_stage stage,nir_tex_instr * instr)2434 lp_build_nir_sample_key(gl_shader_stage stage, nir_tex_instr *instr)
2435 {
2436    uint32_t sample_key = 0;
2437 
2438    if (instr->op == nir_texop_txf ||
2439        instr->op == nir_texop_txf_ms) {
2440       sample_key |= LP_SAMPLER_OP_FETCH << LP_SAMPLER_OP_TYPE_SHIFT;
2441    } else if (instr->op == nir_texop_tg4) {
2442       sample_key |= LP_SAMPLER_OP_GATHER << LP_SAMPLER_OP_TYPE_SHIFT;
2443       sample_key |= (instr->component << LP_SAMPLER_GATHER_COMP_SHIFT);
2444    } else if (instr->op == nir_texop_lod) {
2445       sample_key |= LP_SAMPLER_OP_LODQ << LP_SAMPLER_OP_TYPE_SHIFT;
2446    }
2447 
2448    bool explicit_lod = false;
2449    uint32_t lod_src = 0;
2450 
2451    for (unsigned i = 0; i < instr->num_srcs; i++) {
2452       switch (instr->src[i].src_type) {
2453       case nir_tex_src_comparator:
2454          sample_key |= LP_SAMPLER_SHADOW;
2455          break;
2456       case nir_tex_src_bias:
2457          sample_key |= LP_SAMPLER_LOD_BIAS << LP_SAMPLER_LOD_CONTROL_SHIFT;
2458          explicit_lod = true;
2459          lod_src = i;
2460          break;
2461       case nir_tex_src_lod:
2462          sample_key |= LP_SAMPLER_LOD_EXPLICIT << LP_SAMPLER_LOD_CONTROL_SHIFT;
2463          explicit_lod = true;
2464          lod_src = i;
2465          break;
2466       case nir_tex_src_offset:
2467          sample_key |= LP_SAMPLER_OFFSETS;
2468          break;
2469       case nir_tex_src_ms_index:
2470          sample_key |= LP_SAMPLER_FETCH_MS;
2471          break;
2472       default:
2473          break;
2474       }
2475    }
2476 
2477    enum lp_sampler_lod_property lod_property = LP_SAMPLER_LOD_SCALAR;
2478    if (explicit_lod)
2479       lod_property = lp_build_nir_lod_property(stage, instr->src[lod_src].src);
2480 
2481    if (instr->op == nir_texop_txd) {
2482       sample_key |= LP_SAMPLER_LOD_DERIVATIVES << LP_SAMPLER_LOD_CONTROL_SHIFT;
2483 
2484       if (stage == MESA_SHADER_FRAGMENT) {
2485          if (gallivm_perf & GALLIVM_PERF_NO_QUAD_LOD)
2486             lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
2487          else
2488             lod_property = LP_SAMPLER_LOD_PER_QUAD;
2489       } else
2490          lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
2491    }
2492 
2493    sample_key |= lod_property << LP_SAMPLER_LOD_PROPERTY_SHIFT;
2494 
2495    if (instr->is_sparse)
2496       sample_key |= LP_SAMPLER_RESIDENCY;
2497 
2498    return sample_key;
2499 }
2500 
2501 
2502 static void
visit_tex(struct lp_build_nir_context * bld_base,nir_tex_instr * instr)2503 visit_tex(struct lp_build_nir_context *bld_base, nir_tex_instr *instr)
2504 {
2505    if (instr->op == nir_texop_txs ||
2506        instr->op == nir_texop_query_levels ||
2507        instr->op == nir_texop_texture_samples) {
2508       visit_txs(bld_base, instr);
2509       return;
2510    }
2511 
2512    struct gallivm_state *gallivm = bld_base->base.gallivm;
2513    LLVMBuilderRef builder = gallivm->builder;
2514    LLVMValueRef coords[5];
2515    LLVMValueRef offsets[3] = { NULL };
2516    LLVMValueRef explicit_lod = NULL, ms_index = NULL;
2517    struct lp_sampler_params params = { 0 };
2518    struct lp_derivatives derivs;
2519    nir_deref_instr *texture_deref_instr = NULL;
2520    nir_deref_instr *sampler_deref_instr = NULL;
2521    LLVMValueRef texture_unit_offset = NULL;
2522    LLVMValueRef texel[NIR_MAX_VEC_COMPONENTS];
2523    LLVMValueRef coord_undef = LLVMGetUndef(bld_base->base.vec_type);
2524    unsigned coord_vals = is_aos(bld_base) ? 1 : instr->coord_components;
2525 
2526    LLVMValueRef texture_resource = NULL;
2527    LLVMValueRef sampler_resource = NULL;
2528 
2529    for (unsigned i = 0; i < instr->num_srcs; i++) {
2530       switch (instr->src[i].src_type) {
2531       case nir_tex_src_coord: {
2532          LLVMValueRef coord = get_src(bld_base, instr->src[i].src);
2533          if (coord_vals == 1) {
2534             coords[0] = coord;
2535          } else {
2536             for (unsigned chan = 0; chan < instr->coord_components; ++chan)
2537                coords[chan] = LLVMBuildExtractValue(builder, coord,
2538                                                     chan, "");
2539          }
2540          for (unsigned chan = coord_vals; chan < 5; chan++) {
2541             coords[chan] = coord_undef;
2542          }
2543          break;
2544       }
2545       case nir_tex_src_texture_deref:
2546          texture_deref_instr = nir_src_as_deref(instr->src[i].src);
2547          break;
2548       case nir_tex_src_sampler_deref:
2549          sampler_deref_instr = nir_src_as_deref(instr->src[i].src);
2550          break;
2551       case nir_tex_src_comparator:
2552          coords[4] = get_src(bld_base, instr->src[i].src);
2553          coords[4] = cast_type(bld_base, coords[4], nir_type_float, 32);
2554          break;
2555       case nir_tex_src_bias:
2556          explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32);
2557          break;
2558       case nir_tex_src_lod:
2559          if (instr->op == nir_texop_txf)
2560             explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_int, 32);
2561          else
2562             explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32);
2563          break;
2564       case nir_tex_src_ddx: {
2565          int deriv_cnt = instr->coord_components;
2566          if (instr->is_array)
2567             deriv_cnt--;
2568          LLVMValueRef deriv_val = get_src(bld_base, instr->src[i].src);
2569          if (deriv_cnt == 1)
2570             derivs.ddx[0] = deriv_val;
2571          else
2572             for (unsigned chan = 0; chan < deriv_cnt; ++chan)
2573                derivs.ddx[chan] = LLVMBuildExtractValue(builder, deriv_val,
2574                                                         chan, "");
2575          for (unsigned chan = 0; chan < deriv_cnt; ++chan)
2576             derivs.ddx[chan] = cast_type(bld_base, derivs.ddx[chan], nir_type_float, 32);
2577          break;
2578       }
2579       case nir_tex_src_ddy: {
2580          int deriv_cnt = instr->coord_components;
2581          if (instr->is_array)
2582             deriv_cnt--;
2583          LLVMValueRef deriv_val = get_src(bld_base, instr->src[i].src);
2584          if (deriv_cnt == 1)
2585             derivs.ddy[0] = deriv_val;
2586          else
2587             for (unsigned chan = 0; chan < deriv_cnt; ++chan)
2588                derivs.ddy[chan] = LLVMBuildExtractValue(builder, deriv_val,
2589                                                         chan, "");
2590          for (unsigned chan = 0; chan < deriv_cnt; ++chan)
2591             derivs.ddy[chan] = cast_type(bld_base, derivs.ddy[chan], nir_type_float, 32);
2592          break;
2593       }
2594       case nir_tex_src_offset: {
2595          int offset_cnt = instr->coord_components;
2596          if (instr->is_array)
2597             offset_cnt--;
2598          LLVMValueRef offset_val = get_src(bld_base, instr->src[i].src);
2599          if (offset_cnt == 1)
2600             offsets[0] = cast_type(bld_base, offset_val, nir_type_int, 32);
2601          else {
2602             for (unsigned chan = 0; chan < offset_cnt; ++chan) {
2603                offsets[chan] = LLVMBuildExtractValue(builder, offset_val,
2604                                                      chan, "");
2605                offsets[chan] = cast_type(bld_base, offsets[chan], nir_type_int, 32);
2606             }
2607          }
2608          break;
2609       }
2610       case nir_tex_src_ms_index:
2611          ms_index = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_int, 32);
2612          break;
2613 
2614       case nir_tex_src_texture_offset:
2615          texture_unit_offset = get_src(bld_base, instr->src[i].src);
2616          break;
2617       case nir_tex_src_sampler_offset:
2618          break;
2619       case nir_tex_src_texture_handle:
2620          texture_resource = get_src(bld_base, instr->src[i].src);
2621          break;
2622       case nir_tex_src_sampler_handle:
2623          sampler_resource = get_src(bld_base, instr->src[i].src);
2624          break;
2625       case nir_tex_src_plane:
2626          assert(nir_src_is_const(instr->src[i].src) && !nir_src_as_uint(instr->src[i].src));
2627          break;
2628       default:
2629          assert(0);
2630          break;
2631       }
2632    }
2633    if (!sampler_deref_instr)
2634       sampler_deref_instr = texture_deref_instr;
2635 
2636    if (!sampler_resource)
2637       sampler_resource = texture_resource;
2638 
2639    switch (instr->op) {
2640    case nir_texop_tex:
2641    case nir_texop_tg4:
2642    case nir_texop_txb:
2643    case nir_texop_txl:
2644    case nir_texop_txd:
2645    case nir_texop_lod:
2646       for (unsigned chan = 0; chan < coord_vals; ++chan)
2647          coords[chan] = cast_type(bld_base, coords[chan], nir_type_float, 32);
2648       break;
2649    case nir_texop_txf:
2650    case nir_texop_txf_ms:
2651       for (unsigned chan = 0; chan < instr->coord_components; ++chan)
2652          coords[chan] = cast_type(bld_base, coords[chan], nir_type_int, 32);
2653       break;
2654    default:
2655       ;
2656    }
2657 
2658    if (instr->is_array && instr->sampler_dim == GLSL_SAMPLER_DIM_1D) {
2659       /* move layer coord for 1d arrays. */
2660       coords[2] = coords[1];
2661       coords[1] = coord_undef;
2662    }
2663 
2664    uint32_t samp_base_index = 0, tex_base_index = 0;
2665    if (!sampler_deref_instr) {
2666       int samp_src_index = nir_tex_instr_src_index(instr, nir_tex_src_sampler_handle);
2667       if (samp_src_index == -1) {
2668          samp_base_index = instr->sampler_index;
2669       }
2670    }
2671    if (!texture_deref_instr) {
2672       int tex_src_index = nir_tex_instr_src_index(instr, nir_tex_src_texture_handle);
2673       if (tex_src_index == -1) {
2674          tex_base_index = instr->texture_index;
2675       }
2676    }
2677 
2678    if (instr->op == nir_texop_txd)
2679       params.derivs = &derivs;
2680 
2681    params.sample_key = lp_build_nir_sample_key(bld_base->shader->info.stage, instr);
2682    params.offsets = offsets;
2683    params.texture_index = tex_base_index;
2684    params.texture_index_offset = texture_unit_offset;
2685    params.sampler_index = samp_base_index;
2686    params.coords = coords;
2687    params.texel = texel;
2688    params.lod = explicit_lod;
2689    params.ms_index = ms_index;
2690    params.texture_resource = texture_resource;
2691    params.sampler_resource = sampler_resource;
2692    bld_base->tex(bld_base, &params);
2693 
2694    if (instr->def.bit_size != 32) {
2695       assert(instr->def.bit_size == 16);
2696       LLVMTypeRef vec_type = NULL;
2697       bool is_float = false;
2698       switch (nir_alu_type_get_base_type(instr->dest_type)) {
2699       case nir_type_float:
2700          is_float = true;
2701          break;
2702       case nir_type_int:
2703          vec_type = bld_base->int16_bld.vec_type;
2704          break;
2705       case nir_type_uint:
2706          vec_type = bld_base->uint16_bld.vec_type;
2707          break;
2708       default:
2709          unreachable("unexpected alu type");
2710       }
2711       for (int i = 0; i < instr->def.num_components; ++i) {
2712          if (is_float) {
2713             texel[i] = lp_build_float_to_half(gallivm, texel[i]);
2714          } else {
2715             texel[i] = LLVMBuildBitCast(builder, texel[i], bld_base->int_bld.vec_type, "");
2716             texel[i] = LLVMBuildTrunc(builder, texel[i], vec_type, "");
2717          }
2718       }
2719    }
2720 
2721    assign_ssa_dest(bld_base, &instr->def, texel);
2722 }
2723 
2724 
2725 static void
visit_ssa_undef(struct lp_build_nir_context * bld_base,const nir_undef_instr * instr)2726 visit_ssa_undef(struct lp_build_nir_context *bld_base,
2727                 const nir_undef_instr *instr)
2728 {
2729    unsigned num_components = instr->def.num_components;
2730    LLVMValueRef undef[NIR_MAX_VEC_COMPONENTS];
2731    struct lp_build_context *undef_bld = get_int_bld(bld_base, true,
2732                                                     instr->def.bit_size);
2733    for (unsigned i = 0; i < num_components; i++)
2734       undef[i] = LLVMGetUndef(undef_bld->vec_type);
2735    memset(&undef[num_components], 0, NIR_MAX_VEC_COMPONENTS - num_components);
2736    assign_ssa_dest(bld_base, &instr->def, undef);
2737 }
2738 
2739 
2740 static void
visit_jump(struct lp_build_nir_context * bld_base,const nir_jump_instr * instr)2741 visit_jump(struct lp_build_nir_context *bld_base,
2742            const nir_jump_instr *instr)
2743 {
2744    switch (instr->type) {
2745    case nir_jump_break:
2746       bld_base->break_stmt(bld_base);
2747       break;
2748    case nir_jump_continue:
2749       bld_base->continue_stmt(bld_base);
2750       break;
2751    default:
2752       unreachable("Unknown jump instr\n");
2753    }
2754 }
2755 
2756 
2757 static void
visit_deref(struct lp_build_nir_context * bld_base,nir_deref_instr * instr)2758 visit_deref(struct lp_build_nir_context *bld_base,
2759             nir_deref_instr *instr)
2760 {
2761    if (!nir_deref_mode_is_one_of(instr, nir_var_mem_shared |
2762                                         nir_var_mem_global)) {
2763       return;
2764    }
2765 
2766    LLVMValueRef result = NULL;
2767    switch(instr->deref_type) {
2768    case nir_deref_type_var: {
2769       struct hash_entry *entry =
2770          _mesa_hash_table_search(bld_base->vars, instr->var);
2771       result = entry->data;
2772       break;
2773    }
2774    default:
2775       unreachable("Unhandled deref_instr deref type");
2776    }
2777 
2778    assign_ssa(bld_base, instr->def.index, result);
2779 }
2780 
2781 static void
visit_call(struct lp_build_nir_context * bld_base,nir_call_instr * instr)2782 visit_call(struct lp_build_nir_context *bld_base,
2783            nir_call_instr *instr)
2784 {
2785    LLVMValueRef *args;
2786    struct hash_entry *entry = _mesa_hash_table_search(bld_base->fns, instr->callee);
2787    struct lp_build_fn *fn = entry->data;
2788    args = calloc(instr->num_params + LP_RESV_FUNC_ARGS, sizeof(LLVMValueRef));
2789 
2790    assert(args);
2791 
2792    args[0] = 0;
2793    for (unsigned i = 0; i < instr->num_params; i++) {
2794       LLVMValueRef arg = get_src(bld_base, instr->params[i]);
2795 
2796       if (nir_src_bit_size(instr->params[i]) == 32 && LLVMTypeOf(arg) == bld_base->base.vec_type)
2797          arg = cast_type(bld_base, arg, nir_type_int, 32);
2798       args[i + LP_RESV_FUNC_ARGS] = arg;
2799    }
2800 
2801    bld_base->call(bld_base, fn, instr->num_params + LP_RESV_FUNC_ARGS, args);
2802    free(args);
2803 }
2804 
2805 static void
visit_block(struct lp_build_nir_context * bld_base,nir_block * block)2806 visit_block(struct lp_build_nir_context *bld_base, nir_block *block)
2807 {
2808    nir_foreach_instr(instr, block)
2809    {
2810       switch (instr->type) {
2811       case nir_instr_type_alu:
2812          visit_alu(bld_base, nir_instr_as_alu(instr));
2813          break;
2814       case nir_instr_type_load_const:
2815          visit_load_const(bld_base, nir_instr_as_load_const(instr));
2816          break;
2817       case nir_instr_type_intrinsic:
2818          visit_intrinsic(bld_base, nir_instr_as_intrinsic(instr));
2819          break;
2820       case nir_instr_type_tex:
2821          visit_tex(bld_base, nir_instr_as_tex(instr));
2822          break;
2823       case nir_instr_type_phi:
2824          assert(0);
2825          break;
2826       case nir_instr_type_undef:
2827          visit_ssa_undef(bld_base, nir_instr_as_undef(instr));
2828          break;
2829       case nir_instr_type_jump:
2830          visit_jump(bld_base, nir_instr_as_jump(instr));
2831          break;
2832       case nir_instr_type_deref:
2833          visit_deref(bld_base, nir_instr_as_deref(instr));
2834          break;
2835       case nir_instr_type_call:
2836          visit_call(bld_base, nir_instr_as_call(instr));
2837          break;
2838       default:
2839          fprintf(stderr, "Unknown NIR instr type: ");
2840          nir_print_instr(instr, stderr);
2841          fprintf(stderr, "\n");
2842          abort();
2843       }
2844    }
2845 }
2846 
2847 static bool
lp_should_flatten_cf_list(struct exec_list * cf_list)2848 lp_should_flatten_cf_list(struct exec_list *cf_list)
2849 {
2850    if (exec_list_is_empty(cf_list))
2851       return true;
2852    if (!exec_list_is_singular(cf_list))
2853       return false;
2854 
2855    struct exec_node *head = exec_list_get_head(cf_list);
2856    nir_block *block = nir_cf_node_as_block(exec_node_data(nir_cf_node, head, node));
2857    return exec_list_length(&block->instr_list) < 8;
2858 }
2859 
2860 static void
visit_if(struct lp_build_nir_context * bld_base,nir_if * if_stmt)2861 visit_if(struct lp_build_nir_context *bld_base, nir_if *if_stmt)
2862 {
2863    LLVMValueRef cond = get_src(bld_base, if_stmt->condition);
2864 
2865    bool flatten_then = lp_should_flatten_cf_list(&if_stmt->then_list);
2866 
2867    bld_base->if_cond(bld_base, cond, flatten_then);
2868    visit_cf_list(bld_base, &if_stmt->then_list);
2869 
2870    if (!exec_list_is_empty(&if_stmt->else_list)) {
2871       bool flatten_else = lp_should_flatten_cf_list(&if_stmt->else_list);
2872       bld_base->else_stmt(bld_base, flatten_then, flatten_else);
2873       visit_cf_list(bld_base, &if_stmt->else_list);
2874       bld_base->endif_stmt(bld_base, flatten_else);
2875    } else {
2876       bld_base->endif_stmt(bld_base, flatten_then);
2877    }
2878 }
2879 
2880 
2881 static void
visit_loop(struct lp_build_nir_context * bld_base,nir_loop * loop)2882 visit_loop(struct lp_build_nir_context *bld_base, nir_loop *loop)
2883 {
2884    assert(!nir_loop_has_continue_construct(loop));
2885    bld_base->bgnloop(bld_base);
2886    visit_cf_list(bld_base, &loop->body);
2887    bld_base->endloop(bld_base);
2888 }
2889 
2890 
2891 static void
visit_cf_list(struct lp_build_nir_context * bld_base,struct exec_list * list)2892 visit_cf_list(struct lp_build_nir_context *bld_base,
2893               struct exec_list *list)
2894 {
2895    foreach_list_typed(nir_cf_node, node, node, list)
2896    {
2897       switch (node->type) {
2898       case nir_cf_node_block:
2899          visit_block(bld_base, nir_cf_node_as_block(node));
2900          break;
2901       case nir_cf_node_if:
2902          visit_if(bld_base, nir_cf_node_as_if(node));
2903          break;
2904       case nir_cf_node_loop:
2905          visit_loop(bld_base, nir_cf_node_as_loop(node));
2906          break;
2907       default:
2908          assert(0);
2909       }
2910    }
2911 }
2912 
2913 
2914 static void
handle_shader_output_decl(struct lp_build_nir_context * bld_base,struct nir_shader * nir,struct nir_variable * variable)2915 handle_shader_output_decl(struct lp_build_nir_context *bld_base,
2916                           struct nir_shader *nir,
2917                           struct nir_variable *variable)
2918 {
2919    bld_base->emit_var_decl(bld_base, variable);
2920 }
2921 
2922 
2923 /* vector registers are stored as arrays in LLVM side,
2924    so we can use GEP on them, as to do exec mask stores
2925    we need to operate on a single components.
2926    arrays are:
2927    0.x, 1.x, 2.x, 3.x
2928    0.y, 1.y, 2.y, 3.y
2929    ....
2930 */
2931 static LLVMTypeRef
get_register_type(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * reg)2932 get_register_type(struct lp_build_nir_context *bld_base,
2933                   nir_intrinsic_instr *reg)
2934 {
2935    if (is_aos(bld_base))
2936       return bld_base->base.int_vec_type;
2937 
2938    unsigned num_array_elems = nir_intrinsic_num_array_elems(reg);
2939    unsigned bit_size = nir_intrinsic_bit_size(reg);
2940    unsigned num_components = nir_intrinsic_num_components(reg);
2941 
2942    struct lp_build_context *int_bld =
2943       get_int_bld(bld_base, true, bit_size == 1 ? 32 : bit_size);
2944 
2945    LLVMTypeRef type = int_bld->vec_type;
2946    if (num_components > 1)
2947       type = LLVMArrayType(type, num_components);
2948    if (num_array_elems)
2949       type = LLVMArrayType(type, num_array_elems);
2950 
2951    return type;
2952 }
2953 
2954 void
lp_build_nir_prepasses(struct nir_shader * nir)2955 lp_build_nir_prepasses(struct nir_shader *nir)
2956 {
2957    NIR_PASS_V(nir, nir_convert_to_lcssa, true, true);
2958    NIR_PASS_V(nir, nir_convert_from_ssa, true);
2959    NIR_PASS_V(nir, nir_lower_locals_to_regs, 32);
2960    NIR_PASS_V(nir, nir_remove_dead_derefs);
2961    NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_function_temp, NULL);
2962 }
2963 
lp_build_nir_llvm(struct lp_build_nir_context * bld_base,struct nir_shader * nir,nir_function_impl * impl)2964 bool lp_build_nir_llvm(struct lp_build_nir_context *bld_base,
2965                        struct nir_shader *nir,
2966                        nir_function_impl *impl)
2967 {
2968    nir_foreach_shader_out_variable(variable, nir)
2969       handle_shader_output_decl(bld_base, nir, variable);
2970 
2971    if (nir->info.io_lowered) {
2972       uint64_t outputs_written = nir->info.outputs_written;
2973 
2974       while (outputs_written) {
2975          unsigned location = u_bit_scan64(&outputs_written);
2976          nir_variable var = {0};
2977 
2978          var.type = glsl_vec4_type();
2979          var.data.mode = nir_var_shader_out;
2980          var.data.location = location;
2981          var.data.driver_location = util_bitcount64(nir->info.outputs_written &
2982                                                     BITFIELD64_MASK(location));
2983          bld_base->emit_var_decl(bld_base, &var);
2984       }
2985    }
2986 
2987    bld_base->regs = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
2988                                             _mesa_key_pointer_equal);
2989    bld_base->vars = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
2990                                             _mesa_key_pointer_equal);
2991    bld_base->range_ht = _mesa_pointer_hash_table_create(NULL);
2992 
2993    nir_foreach_reg_decl(reg, impl) {
2994       LLVMTypeRef type = get_register_type(bld_base, reg);
2995       LLVMValueRef reg_alloc = lp_build_alloca(bld_base->base.gallivm,
2996                                                type, "reg");
2997       _mesa_hash_table_insert(bld_base->regs, reg, reg_alloc);
2998    }
2999    nir_index_ssa_defs(impl);
3000    bld_base->ssa_defs = calloc(impl->ssa_alloc, sizeof(LLVMValueRef));
3001    visit_cf_list(bld_base, &impl->body);
3002 
3003    free(bld_base->ssa_defs);
3004    ralloc_free(bld_base->vars);
3005    ralloc_free(bld_base->regs);
3006    ralloc_free(bld_base->range_ht);
3007    return true;
3008 }
3009 
3010 
3011 /* do some basic opts to remove some things we don't want to see. */
3012 void
lp_build_opt_nir(struct nir_shader * nir)3013 lp_build_opt_nir(struct nir_shader *nir)
3014 {
3015    bool progress;
3016 
3017    static const struct nir_lower_tex_options lower_tex_options = {
3018       .lower_tg4_offsets = true,
3019       .lower_txp = ~0u,
3020       .lower_invalid_implicit_lod = true,
3021    };
3022    NIR_PASS_V(nir, nir_lower_tex, &lower_tex_options);
3023    NIR_PASS_V(nir, nir_lower_frexp);
3024 
3025    if (nir->info.stage == MESA_SHADER_TASK) {
3026       nir_lower_task_shader_options ts_opts = { 0 };
3027       NIR_PASS_V(nir, nir_lower_task_shader, ts_opts);
3028    }
3029 
3030    NIR_PASS_V(nir, nir_lower_flrp, 16|32|64, true);
3031    NIR_PASS_V(nir, nir_lower_fp16_casts, nir_lower_fp16_all | nir_lower_fp16_split_fp64);
3032 
3033    NIR_PASS(_, nir, nir_lower_alu);
3034 
3035    do {
3036       progress = false;
3037       NIR_PASS(progress, nir, nir_opt_constant_folding);
3038       NIR_PASS(progress, nir, nir_opt_algebraic);
3039       NIR_PASS(progress, nir, nir_lower_pack);
3040 
3041       nir_lower_tex_options options = { .lower_invalid_implicit_lod = true, };
3042       NIR_PASS_V(nir, nir_lower_tex, &options);
3043 
3044       const nir_lower_subgroups_options subgroups_options = {
3045          .subgroup_size = lp_native_vector_width / 32,
3046          .ballot_bit_size = 32,
3047          .ballot_components = 1,
3048          .lower_to_scalar = true,
3049          .lower_subgroup_masks = true,
3050          .lower_relative_shuffle = true,
3051          .lower_inverse_ballot = true,
3052       };
3053       NIR_PASS(progress, nir, nir_lower_subgroups, &subgroups_options);
3054    } while (progress);
3055 
3056    do {
3057       progress = false;
3058       NIR_PASS(progress, nir, nir_opt_algebraic_late);
3059       if (progress) {
3060          NIR_PASS_V(nir, nir_copy_prop);
3061          NIR_PASS_V(nir, nir_opt_dce);
3062          NIR_PASS_V(nir, nir_opt_cse);
3063       }
3064    } while (progress);
3065 
3066    if (nir_lower_bool_to_int32(nir)) {
3067       NIR_PASS_V(nir, nir_copy_prop);
3068       NIR_PASS_V(nir, nir_opt_dce);
3069    }
3070 }
3071