• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "../intel_nir.h"
25 #include "elk_nir.h"
26 #include "elk_nir_private.h"
27 #include "elk_shader.h"
28 #include "dev/intel_debug.h"
29 #include "compiler/glsl_types.h"
30 #include "compiler/nir/nir_builder.h"
31 #include "util/u_math.h"
32 
33 static bool
remap_tess_levels(nir_builder * b,nir_intrinsic_instr * intr,enum tess_primitive_mode _primitive_mode)34 remap_tess_levels(nir_builder *b, nir_intrinsic_instr *intr,
35                   enum tess_primitive_mode _primitive_mode)
36 {
37    const int location = nir_intrinsic_base(intr);
38    const unsigned component = nir_intrinsic_component(intr);
39    bool out_of_bounds = false;
40    bool write = !nir_intrinsic_infos[intr->intrinsic].has_dest;
41    unsigned mask = write ? nir_intrinsic_write_mask(intr) : 0;
42    nir_def *src = NULL, *dest = NULL;
43 
44    if (write) {
45       assert(intr->num_components == intr->src[0].ssa->num_components);
46    } else {
47       assert(intr->num_components == intr->def.num_components);
48    }
49 
50    if (location == VARYING_SLOT_TESS_LEVEL_INNER) {
51       b->cursor = write ? nir_before_instr(&intr->instr)
52                         : nir_after_instr(&intr->instr);
53 
54       switch (_primitive_mode) {
55       case TESS_PRIMITIVE_QUADS:
56          /* gl_TessLevelInner[0..1] lives at DWords 3-2 (reversed). */
57          nir_intrinsic_set_base(intr, 0);
58 
59          if (write) {
60             assert(intr->src[0].ssa->num_components == 2);
61 
62             intr->num_components = 4;
63 
64             nir_def *undef = nir_undef(b, 1, 32);
65             nir_def *x = nir_channel(b, intr->src[0].ssa, 0);
66             nir_def *y = nir_channel(b, intr->src[0].ssa, 1);
67             src = nir_vec4(b, undef, undef, y, x);
68             mask = !!(mask & WRITEMASK_X) << 3 | !!(mask & WRITEMASK_Y) << 2;
69          } else if (intr->def.num_components > 1) {
70             assert(intr->def.num_components == 2);
71 
72             intr->num_components = 4;
73             intr->def.num_components = 4;
74 
75             unsigned wz[2] = { 3, 2 };
76             dest = nir_swizzle(b, &intr->def, wz, 2);
77          } else {
78             nir_intrinsic_set_component(intr, 3 - component);
79          }
80          break;
81       case TESS_PRIMITIVE_TRIANGLES:
82          /* gl_TessLevelInner[0] lives at DWord 4. */
83          nir_intrinsic_set_base(intr, 1);
84          mask &= WRITEMASK_X;
85          out_of_bounds = component > 0;
86          break;
87       case TESS_PRIMITIVE_ISOLINES:
88          out_of_bounds = true;
89          break;
90       default:
91          unreachable("Bogus tessellation domain");
92       }
93    } else if (location == VARYING_SLOT_TESS_LEVEL_OUTER) {
94       b->cursor = write ? nir_before_instr(&intr->instr)
95                         : nir_after_instr(&intr->instr);
96 
97       nir_intrinsic_set_base(intr, 1);
98 
99       switch (_primitive_mode) {
100       case TESS_PRIMITIVE_QUADS:
101       case TESS_PRIMITIVE_TRIANGLES:
102          /* Quads:     gl_TessLevelOuter[0..3] lives at DWords 7-4 (reversed).
103           * Triangles: gl_TessLevelOuter[0..2] lives at DWords 7-5 (reversed).
104           */
105          if (write) {
106             assert(intr->src[0].ssa->num_components == 4);
107 
108             unsigned wzyx[4] = { 3, 2, 1, 0 };
109             src = nir_swizzle(b, intr->src[0].ssa, wzyx, 4);
110             mask = !!(mask & WRITEMASK_X) << 3 | !!(mask & WRITEMASK_Y) << 2 |
111                    !!(mask & WRITEMASK_Z) << 1 | !!(mask & WRITEMASK_W) << 0;
112 
113             /* Don't overwrite the inner factor at DWord 4 for triangles */
114             if (_primitive_mode == TESS_PRIMITIVE_TRIANGLES)
115                mask &= ~WRITEMASK_X;
116          } else if (intr->def.num_components > 1) {
117             assert(intr->def.num_components == 4);
118 
119             unsigned wzyx[4] = { 3, 2, 1, 0 };
120             dest = nir_swizzle(b, &intr->def, wzyx, 4);
121          } else {
122             nir_intrinsic_set_component(intr, 3 - component);
123             out_of_bounds = component == 3 &&
124                             _primitive_mode == TESS_PRIMITIVE_TRIANGLES;
125          }
126          break;
127       case TESS_PRIMITIVE_ISOLINES:
128          /* gl_TessLevelOuter[0..1] lives at DWords 6-7 (in order). */
129          if (write) {
130             assert(intr->src[0].ssa->num_components == 4);
131 
132             nir_def *undef = nir_undef(b, 1, 32);
133             nir_def *x = nir_channel(b, intr->src[0].ssa, 0);
134             nir_def *y = nir_channel(b, intr->src[0].ssa, 1);
135             src = nir_vec4(b, undef, undef, x, y);
136             mask = !!(mask & WRITEMASK_X) << 2 | !!(mask & WRITEMASK_Y) << 3;
137          } else {
138             nir_intrinsic_set_component(intr, 2 + component);
139             out_of_bounds = component > 1;
140          }
141          break;
142       default:
143          unreachable("Bogus tessellation domain");
144       }
145    } else {
146       return false;
147    }
148 
149    if (out_of_bounds) {
150       if (!write)
151          nir_def_rewrite_uses(&intr->def, nir_undef(b, 1, 32));
152       nir_instr_remove(&intr->instr);
153    } else if (write) {
154       nir_intrinsic_set_write_mask(intr, mask);
155 
156       if (src) {
157          nir_src_rewrite(&intr->src[0], src);
158       }
159    } else if (dest) {
160       nir_def_rewrite_uses_after(&intr->def, dest,
161                                      dest->parent_instr);
162    }
163 
164    return true;
165 }
166 
167 static bool
is_input(nir_intrinsic_instr * intrin)168 is_input(nir_intrinsic_instr *intrin)
169 {
170    return intrin->intrinsic == nir_intrinsic_load_input ||
171           intrin->intrinsic == nir_intrinsic_load_per_primitive_input ||
172           intrin->intrinsic == nir_intrinsic_load_per_vertex_input ||
173           intrin->intrinsic == nir_intrinsic_load_interpolated_input;
174 }
175 
176 static bool
is_output(nir_intrinsic_instr * intrin)177 is_output(nir_intrinsic_instr *intrin)
178 {
179    return intrin->intrinsic == nir_intrinsic_load_output ||
180           intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
181           intrin->intrinsic == nir_intrinsic_store_output ||
182           intrin->intrinsic == nir_intrinsic_store_per_vertex_output;
183 }
184 
185 
186 static bool
remap_patch_urb_offsets(nir_block * block,nir_builder * b,const struct intel_vue_map * vue_map,enum tess_primitive_mode tes_primitive_mode)187 remap_patch_urb_offsets(nir_block *block, nir_builder *b,
188                         const struct intel_vue_map *vue_map,
189                         enum tess_primitive_mode tes_primitive_mode)
190 {
191    nir_foreach_instr_safe(instr, block) {
192       if (instr->type != nir_instr_type_intrinsic)
193          continue;
194 
195       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
196 
197       gl_shader_stage stage = b->shader->info.stage;
198 
199       if ((stage == MESA_SHADER_TESS_CTRL && is_output(intrin)) ||
200           (stage == MESA_SHADER_TESS_EVAL && is_input(intrin))) {
201 
202          if (remap_tess_levels(b, intrin, tes_primitive_mode))
203             continue;
204 
205          int vue_slot = vue_map->varying_to_slot[intrin->const_index[0]];
206          assert(vue_slot != -1);
207          intrin->const_index[0] = vue_slot;
208 
209          nir_src *vertex = nir_get_io_arrayed_index_src(intrin);
210          if (vertex) {
211             if (nir_src_is_const(*vertex)) {
212                intrin->const_index[0] += nir_src_as_uint(*vertex) *
213                                          vue_map->num_per_vertex_slots;
214             } else {
215                b->cursor = nir_before_instr(&intrin->instr);
216 
217                /* Multiply by the number of per-vertex slots. */
218                nir_def *vertex_offset =
219                   nir_imul(b,
220                            vertex->ssa,
221                            nir_imm_int(b,
222                                        vue_map->num_per_vertex_slots));
223 
224                /* Add it to the existing offset */
225                nir_src *offset = nir_get_io_offset_src(intrin);
226                nir_def *total_offset =
227                   nir_iadd(b, vertex_offset,
228                            offset->ssa);
229 
230                nir_src_rewrite(offset, total_offset);
231             }
232          }
233       }
234    }
235    return true;
236 }
237 
238 void
elk_nir_lower_vs_inputs(nir_shader * nir,bool edgeflag_is_last,const uint8_t * vs_attrib_wa_flags)239 elk_nir_lower_vs_inputs(nir_shader *nir,
240                         bool edgeflag_is_last,
241                         const uint8_t *vs_attrib_wa_flags)
242 {
243    /* Start with the location of the variable's base. */
244    nir_foreach_shader_in_variable(var, nir)
245       var->data.driver_location = var->data.location;
246 
247    /* Now use nir_lower_io to walk dereference chains.  Attribute arrays are
248     * loaded as one vec4 or dvec4 per element (or matrix column), depending on
249     * whether it is a double-precision type or not.
250     */
251    nir_lower_io(nir, nir_var_shader_in, elk_type_size_vec4,
252                 nir_lower_io_lower_64bit_to_32);
253 
254    /* This pass needs actual constants */
255    nir_opt_constant_folding(nir);
256 
257    nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
258 
259    elk_nir_apply_attribute_workarounds(nir, vs_attrib_wa_flags);
260 
261    /* The last step is to remap VERT_ATTRIB_* to actual registers */
262 
263    /* Whether or not we have any system generated values.  gl_DrawID is not
264     * included here as it lives in its own vec4.
265     */
266    const bool has_sgvs =
267       BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_FIRST_VERTEX) ||
268       BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_BASE_INSTANCE) ||
269       BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) ||
270       BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_INSTANCE_ID);
271 
272    const unsigned num_inputs = util_bitcount64(nir->info.inputs_read);
273 
274    nir_foreach_function_impl(impl, nir) {
275       nir_builder b = nir_builder_create(impl);
276 
277       nir_foreach_block(block, impl) {
278          nir_foreach_instr_safe(instr, block) {
279             if (instr->type != nir_instr_type_intrinsic)
280                continue;
281 
282             nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
283 
284             switch (intrin->intrinsic) {
285             case nir_intrinsic_load_first_vertex:
286             case nir_intrinsic_load_base_instance:
287             case nir_intrinsic_load_vertex_id_zero_base:
288             case nir_intrinsic_load_instance_id:
289             case nir_intrinsic_load_is_indexed_draw:
290             case nir_intrinsic_load_draw_id: {
291                b.cursor = nir_after_instr(&intrin->instr);
292 
293                /* gl_VertexID and friends are stored by the VF as the last
294                 * vertex element.  We convert them to load_input intrinsics at
295                 * the right location.
296                 */
297                nir_intrinsic_instr *load =
298                   nir_intrinsic_instr_create(nir, nir_intrinsic_load_input);
299                load->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
300 
301                nir_intrinsic_set_base(load, num_inputs);
302                switch (intrin->intrinsic) {
303                case nir_intrinsic_load_first_vertex:
304                   nir_intrinsic_set_component(load, 0);
305                   break;
306                case nir_intrinsic_load_base_instance:
307                   nir_intrinsic_set_component(load, 1);
308                   break;
309                case nir_intrinsic_load_vertex_id_zero_base:
310                   nir_intrinsic_set_component(load, 2);
311                   break;
312                case nir_intrinsic_load_instance_id:
313                   nir_intrinsic_set_component(load, 3);
314                   break;
315                case nir_intrinsic_load_draw_id:
316                case nir_intrinsic_load_is_indexed_draw:
317                   /* gl_DrawID and IsIndexedDraw are stored right after
318                    * gl_VertexID and friends if any of them exist.
319                    */
320                   nir_intrinsic_set_base(load, num_inputs + has_sgvs);
321                   if (intrin->intrinsic == nir_intrinsic_load_draw_id)
322                      nir_intrinsic_set_component(load, 0);
323                   else
324                      nir_intrinsic_set_component(load, 1);
325                   break;
326                default:
327                   unreachable("Invalid system value intrinsic");
328                }
329 
330                load->num_components = 1;
331                nir_def_init(&load->instr, &load->def, 1, 32);
332                nir_builder_instr_insert(&b, &load->instr);
333 
334                nir_def_replace(&intrin->def, &load->def);
335                break;
336             }
337 
338             case nir_intrinsic_load_input: {
339                /* Attributes come in a contiguous block, ordered by their
340                 * gl_vert_attrib value.  That means we can compute the slot
341                 * number for an attribute by masking out the enabled attributes
342                 * before it and counting the bits.
343                 */
344                int attr = nir_intrinsic_base(intrin);
345                uint64_t inputs_read = nir->info.inputs_read;
346                int slot = -1;
347                if (edgeflag_is_last) {
348                   inputs_read &= ~BITFIELD64_BIT(VERT_ATTRIB_EDGEFLAG);
349                   if (attr == VERT_ATTRIB_EDGEFLAG)
350                      slot = num_inputs - 1;
351                }
352                if (slot == -1)
353                   slot = util_bitcount64(inputs_read &
354                                          BITFIELD64_MASK(attr));
355                nir_intrinsic_set_base(intrin, slot);
356                break;
357             }
358 
359             default:
360                break; /* Nothing to do */
361             }
362          }
363       }
364    }
365 }
366 
367 void
elk_nir_lower_vue_inputs(nir_shader * nir,const struct intel_vue_map * vue_map)368 elk_nir_lower_vue_inputs(nir_shader *nir,
369                          const struct intel_vue_map *vue_map)
370 {
371    nir_foreach_shader_in_variable(var, nir)
372       var->data.driver_location = var->data.location;
373 
374    /* Inputs are stored in vec4 slots, so use elk_type_size_vec4(). */
375    nir_lower_io(nir, nir_var_shader_in, elk_type_size_vec4,
376                 nir_lower_io_lower_64bit_to_32);
377 
378    /* This pass needs actual constants */
379    nir_opt_constant_folding(nir);
380 
381    nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
382 
383    nir_foreach_function_impl(impl, nir) {
384       nir_foreach_block(block, impl) {
385          nir_foreach_instr(instr, block) {
386             if (instr->type != nir_instr_type_intrinsic)
387                continue;
388 
389             nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
390 
391             if (intrin->intrinsic == nir_intrinsic_load_input ||
392                 intrin->intrinsic == nir_intrinsic_load_per_vertex_input) {
393                /* Offset 0 is the VUE header, which contains
394                 * VARYING_SLOT_LAYER [.y], VARYING_SLOT_VIEWPORT [.z], and
395                 * VARYING_SLOT_PSIZ [.w].
396                 */
397                int varying = nir_intrinsic_base(intrin);
398                int vue_slot;
399                switch (varying) {
400                case VARYING_SLOT_PSIZ:
401                   nir_intrinsic_set_base(intrin, 0);
402                   nir_intrinsic_set_component(intrin, 3);
403                   break;
404 
405                default:
406                   vue_slot = vue_map->varying_to_slot[varying];
407                   assert(vue_slot != -1);
408                   nir_intrinsic_set_base(intrin, vue_slot);
409                   break;
410                }
411             }
412          }
413       }
414    }
415 }
416 
417 void
elk_nir_lower_tes_inputs(nir_shader * nir,const struct intel_vue_map * vue_map)418 elk_nir_lower_tes_inputs(nir_shader *nir, const struct intel_vue_map *vue_map)
419 {
420    nir_foreach_shader_in_variable(var, nir)
421       var->data.driver_location = var->data.location;
422 
423    nir_lower_io(nir, nir_var_shader_in, elk_type_size_vec4,
424                 nir_lower_io_lower_64bit_to_32);
425 
426    /* This pass needs actual constants */
427    nir_opt_constant_folding(nir);
428 
429    nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
430 
431    nir_foreach_function_impl(impl, nir) {
432       nir_builder b = nir_builder_create(impl);
433       nir_foreach_block(block, impl) {
434          remap_patch_urb_offsets(block, &b, vue_map,
435                                  nir->info.tess._primitive_mode);
436       }
437    }
438 }
439 
440 static bool
lower_barycentric_per_sample(nir_builder * b,nir_intrinsic_instr * intrin,UNUSED void * cb_data)441 lower_barycentric_per_sample(nir_builder *b,
442                              nir_intrinsic_instr *intrin,
443                              UNUSED void *cb_data)
444 {
445    if (intrin->intrinsic != nir_intrinsic_load_barycentric_pixel &&
446        intrin->intrinsic != nir_intrinsic_load_barycentric_centroid)
447       return false;
448 
449    b->cursor = nir_before_instr(&intrin->instr);
450    nir_def *centroid =
451       nir_load_barycentric(b, nir_intrinsic_load_barycentric_sample,
452                            nir_intrinsic_interp_mode(intrin));
453    nir_def_replace(&intrin->def, centroid);
454    return true;
455 }
456 
457 /**
458  * Convert interpolateAtOffset() offsets from [-0.5, +0.5] floating point
459  * offsets to integer [-8, +7] offsets (in units of 1/16th of a pixel).
460  *
461  * We clamp to +7/16 on the upper end of the range, since +0.5 isn't
462  * representable in a S0.4 value; a naive conversion would give us -8/16,
463  * which is the opposite of what was intended.
464  *
465  * This is allowed by GL_ARB_gpu_shader5's quantization rules:
466  *
467  *    "Not all values of <offset> may be supported; x and y offsets may
468  *     be rounded to fixed-point values with the number of fraction bits
469  *     given by the implementation-dependent constant
470  *     FRAGMENT_INTERPOLATION_OFFSET_BITS."
471  */
472 static bool
lower_barycentric_at_offset(nir_builder * b,nir_intrinsic_instr * intrin,void * data)473 lower_barycentric_at_offset(nir_builder *b, nir_intrinsic_instr *intrin,
474                             void *data)
475 {
476    if (intrin->intrinsic != nir_intrinsic_load_barycentric_at_offset)
477       return false;
478 
479    b->cursor = nir_before_instr(&intrin->instr);
480 
481    assert(intrin->src[0].ssa);
482    nir_def *offset =
483       nir_imin(b, nir_imm_int(b, 7),
484                nir_f2i32(b, nir_fmul_imm(b, intrin->src[0].ssa, 16)));
485 
486    nir_src_rewrite(&intrin->src[0], offset);
487 
488    return true;
489 }
490 
491 void
elk_nir_lower_fs_inputs(nir_shader * nir,const struct intel_device_info * devinfo,const struct elk_wm_prog_key * key)492 elk_nir_lower_fs_inputs(nir_shader *nir,
493                         const struct intel_device_info *devinfo,
494                         const struct elk_wm_prog_key *key)
495 {
496    nir_foreach_shader_in_variable(var, nir) {
497       var->data.driver_location = var->data.location;
498 
499       /* Apply default interpolation mode.
500        *
501        * Everything defaults to smooth except for the legacy GL color
502        * built-in variables, which might be flat depending on API state.
503        */
504       if (var->data.interpolation == INTERP_MODE_NONE) {
505          const bool flat = key->flat_shade &&
506             (var->data.location == VARYING_SLOT_COL0 ||
507              var->data.location == VARYING_SLOT_COL1);
508 
509          var->data.interpolation = flat ? INTERP_MODE_FLAT
510                                         : INTERP_MODE_SMOOTH;
511       }
512 
513       /* On Ironlake and below, there is only one interpolation mode.
514        * Centroid interpolation doesn't mean anything on this hardware --
515        * there is no multisampling.
516        */
517       if (devinfo->ver < 6) {
518          var->data.centroid = false;
519          var->data.sample = false;
520       }
521    }
522 
523    nir_lower_io(nir, nir_var_shader_in, elk_type_size_vec4,
524                 nir_lower_io_lower_64bit_to_32 |
525                 nir_lower_io_use_interpolated_input_intrinsics);
526 
527    if (key->multisample_fbo == ELK_NEVER) {
528       nir_lower_single_sampled(nir);
529    } else if (key->persample_interp == ELK_ALWAYS) {
530       nir_shader_intrinsics_pass(nir, lower_barycentric_per_sample,
531                                    nir_metadata_control_flow,
532                                    NULL);
533    }
534 
535    nir_shader_intrinsics_pass(nir, lower_barycentric_at_offset,
536                                 nir_metadata_control_flow,
537                                 NULL);
538 
539    /* This pass needs actual constants */
540    nir_opt_constant_folding(nir);
541 
542    nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
543 }
544 
545 void
elk_nir_lower_vue_outputs(nir_shader * nir)546 elk_nir_lower_vue_outputs(nir_shader *nir)
547 {
548    nir_foreach_shader_out_variable(var, nir) {
549       var->data.driver_location = var->data.location;
550    }
551 
552    nir_lower_io(nir, nir_var_shader_out, elk_type_size_vec4,
553                 nir_lower_io_lower_64bit_to_32);
554 }
555 
556 void
elk_nir_lower_tcs_outputs(nir_shader * nir,const struct intel_vue_map * vue_map,enum tess_primitive_mode tes_primitive_mode)557 elk_nir_lower_tcs_outputs(nir_shader *nir, const struct intel_vue_map *vue_map,
558                           enum tess_primitive_mode tes_primitive_mode)
559 {
560    nir_foreach_shader_out_variable(var, nir) {
561       var->data.driver_location = var->data.location;
562    }
563 
564    nir_lower_io(nir, nir_var_shader_out, elk_type_size_vec4,
565                 nir_lower_io_lower_64bit_to_32);
566 
567    /* This pass needs actual constants */
568    nir_opt_constant_folding(nir);
569 
570    nir_io_add_const_offset_to_base(nir, nir_var_shader_out);
571 
572    nir_foreach_function_impl(impl, nir) {
573       nir_builder b = nir_builder_create(impl);
574       nir_foreach_block(block, impl) {
575          remap_patch_urb_offsets(block, &b, vue_map, tes_primitive_mode);
576       }
577    }
578 }
579 
580 void
elk_nir_lower_fs_outputs(nir_shader * nir)581 elk_nir_lower_fs_outputs(nir_shader *nir)
582 {
583    nir_foreach_shader_out_variable(var, nir) {
584       var->data.driver_location =
585          SET_FIELD(var->data.index, ELK_NIR_FRAG_OUTPUT_INDEX) |
586          SET_FIELD(var->data.location, ELK_NIR_FRAG_OUTPUT_LOCATION);
587    }
588 
589    nir_lower_io(nir, nir_var_shader_out, elk_type_size_dvec4, 0);
590 }
591 
592 #define OPT(pass, ...) ({                                  \
593    bool this_progress = false;                             \
594    NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__);      \
595    if (this_progress)                                      \
596       progress = true;                                     \
597    this_progress;                                          \
598 })
599 
600 void
elk_nir_optimize(nir_shader * nir,bool is_scalar,const struct intel_device_info * devinfo)601 elk_nir_optimize(nir_shader *nir, bool is_scalar,
602                  const struct intel_device_info *devinfo)
603 {
604    bool progress;
605    unsigned lower_flrp =
606       (nir->options->lower_flrp16 ? 16 : 0) |
607       (nir->options->lower_flrp32 ? 32 : 0) |
608       (nir->options->lower_flrp64 ? 64 : 0);
609 
610    do {
611       progress = false;
612       OPT(nir_shrink_vec_array_vars, nir_var_function_temp);
613       OPT(nir_opt_deref);
614       if (OPT(nir_opt_memcpy))
615          OPT(nir_split_var_copies);
616       OPT(nir_lower_vars_to_ssa);
617       if (!nir->info.var_copies_lowered) {
618          /* Only run this pass if nir_lower_var_copies was not called
619           * yet. That would lower away any copy_deref instructions and we
620           * don't want to introduce any more.
621           */
622          OPT(nir_opt_find_array_copies);
623       }
624       OPT(nir_opt_copy_prop_vars);
625       OPT(nir_opt_dead_write_vars);
626       OPT(nir_opt_combine_stores, nir_var_all);
627 
628       if (is_scalar) {
629          OPT(nir_lower_alu_to_scalar, NULL, NULL);
630       } else {
631          OPT(nir_opt_shrink_stores, true);
632          OPT(nir_opt_shrink_vectors, false);
633       }
634 
635       OPT(nir_copy_prop);
636 
637       if (is_scalar) {
638          OPT(nir_lower_phis_to_scalar, false);
639       }
640 
641       OPT(nir_copy_prop);
642       OPT(nir_opt_dce);
643       OPT(nir_opt_cse);
644       OPT(nir_opt_combine_stores, nir_var_all);
645 
646       /* Passing 0 to the peephole select pass causes it to convert
647        * if-statements that contain only move instructions in the branches
648        * regardless of the count.
649        *
650        * Passing 1 to the peephole select pass causes it to convert
651        * if-statements that contain at most a single ALU instruction (total)
652        * in both branches.  Before Gfx6, some math instructions were
653        * prohibitively expensive and the results of compare operations need an
654        * extra resolve step.  For these reasons, this pass is more harmful
655        * than good on those platforms.
656        *
657        * For indirect loads of uniforms (push constants), we assume that array
658        * indices will nearly always be in bounds and the cost of the load is
659        * low.  Therefore there shouldn't be a performance benefit to avoid it.
660        * However, in vec4 tessellation shaders, these loads operate by
661        * actually pulling from memory.
662        */
663       const bool is_vec4_tessellation = !is_scalar &&
664          (nir->info.stage == MESA_SHADER_TESS_CTRL ||
665           nir->info.stage == MESA_SHADER_TESS_EVAL);
666       OPT(nir_opt_peephole_select, 0, !is_vec4_tessellation, false);
667       OPT(nir_opt_peephole_select, 8, !is_vec4_tessellation,
668           devinfo->ver >= 6);
669 
670       OPT(nir_opt_intrinsics);
671       OPT(nir_opt_idiv_const, 32);
672       OPT(nir_opt_algebraic);
673 
674       /* BFI2 did not exist until Gfx7, so there's no point in trying to
675        * optimize an instruction that should not get generated.
676        */
677       if (devinfo->ver >= 7)
678          OPT(nir_opt_reassociate_bfi);
679 
680       OPT(nir_lower_constant_convert_alu_types);
681       OPT(nir_opt_constant_folding);
682 
683       if (lower_flrp != 0) {
684          if (OPT(nir_lower_flrp,
685                  lower_flrp,
686                  false /* always_precise */)) {
687             OPT(nir_opt_constant_folding);
688          }
689 
690          /* Nothing should rematerialize any flrps, so we only need to do this
691           * lowering once.
692           */
693          lower_flrp = 0;
694       }
695 
696       OPT(nir_opt_dead_cf);
697       if (OPT(nir_opt_loop)) {
698          /* If nir_opt_loop makes progress, then we need to clean
699           * things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
700           * to make progress.
701           */
702          OPT(nir_copy_prop);
703          OPT(nir_opt_dce);
704       }
705       OPT(nir_opt_if, nir_opt_if_optimize_phi_true_false);
706       OPT(nir_opt_conditional_discard);
707       if (nir->options->max_unroll_iterations != 0) {
708          OPT(nir_opt_loop_unroll);
709       }
710       OPT(nir_opt_remove_phis);
711       OPT(nir_opt_gcm, false);
712       OPT(nir_opt_undef);
713       OPT(nir_lower_pack);
714    } while (progress);
715 
716    /* Workaround Gfxbench unused local sampler variable which will trigger an
717     * assert in the opt_large_constants pass.
718     */
719    OPT(nir_remove_dead_variables, nir_var_function_temp, NULL);
720 }
721 
722 static unsigned
lower_bit_size_callback(const nir_instr * instr,UNUSED void * data)723 lower_bit_size_callback(const nir_instr *instr, UNUSED void *data)
724 {
725    switch (instr->type) {
726    case nir_instr_type_alu: {
727       nir_alu_instr *alu = nir_instr_as_alu(instr);
728       switch (alu->op) {
729       case nir_op_bit_count:
730       case nir_op_ufind_msb:
731       case nir_op_ifind_msb:
732       case nir_op_find_lsb:
733          /* These are handled specially because the destination is always
734           * 32-bit and so the bit size of the instruction is given by the
735           * source.
736           */
737          return alu->src[0].src.ssa->bit_size >= 32 ? 0 : 32;
738       default:
739          break;
740       }
741 
742       if (alu->def.bit_size >= 32)
743          return 0;
744 
745       /* Note: nir_op_iabs and nir_op_ineg are not lowered here because the
746        * 8-bit ABS or NEG instruction should eventually get copy propagated
747        * into the MOV that does the type conversion.  This results in far
748        * fewer MOV instructions.
749        */
750       switch (alu->op) {
751       case nir_op_idiv:
752       case nir_op_imod:
753       case nir_op_irem:
754       case nir_op_udiv:
755       case nir_op_umod:
756       case nir_op_fceil:
757       case nir_op_ffloor:
758       case nir_op_ffract:
759       case nir_op_fround_even:
760       case nir_op_ftrunc:
761          return 32;
762       case nir_op_frcp:
763       case nir_op_frsq:
764       case nir_op_fsqrt:
765       case nir_op_fpow:
766       case nir_op_fexp2:
767       case nir_op_flog2:
768       case nir_op_fsin:
769       case nir_op_fcos:
770          return 32;
771       case nir_op_isign:
772          assert(!"Should have been lowered by nir_opt_algebraic.");
773          return 0;
774       default:
775          if (nir_op_infos[alu->op].num_inputs >= 2 &&
776              alu->def.bit_size == 8)
777             return 16;
778 
779          if (nir_alu_instr_is_comparison(alu) &&
780              alu->src[0].src.ssa->bit_size == 8)
781             return 16;
782 
783          return 0;
784       }
785       break;
786    }
787 
788    case nir_instr_type_intrinsic: {
789       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
790       switch (intrin->intrinsic) {
791       case nir_intrinsic_read_invocation:
792       case nir_intrinsic_read_first_invocation:
793       case nir_intrinsic_vote_feq:
794       case nir_intrinsic_vote_ieq:
795       case nir_intrinsic_shuffle:
796       case nir_intrinsic_shuffle_xor:
797       case nir_intrinsic_shuffle_up:
798       case nir_intrinsic_shuffle_down:
799       case nir_intrinsic_quad_broadcast:
800       case nir_intrinsic_quad_swap_horizontal:
801       case nir_intrinsic_quad_swap_vertical:
802       case nir_intrinsic_quad_swap_diagonal:
803          if (intrin->src[0].ssa->bit_size == 8)
804             return 16;
805          return 0;
806 
807       case nir_intrinsic_reduce:
808       case nir_intrinsic_inclusive_scan:
809       case nir_intrinsic_exclusive_scan:
810          /* There are a couple of register region issues that make things
811           * complicated for 8-bit types:
812           *
813           *    1. Only raw moves are allowed to write to a packed 8-bit
814           *       destination.
815           *    2. If we use a strided destination, the efficient way to do
816           *       scan operations ends up using strides that are too big to
817           *       encode in an instruction.
818           *
819           * To get around these issues, we just do all 8-bit scan operations
820           * in 16 bits.  It's actually fewer instructions than what we'd have
821           * to do if we were trying to do it in native 8-bit types and the
822           * results are the same once we truncate to 8 bits at the end.
823           */
824          if (intrin->def.bit_size == 8)
825             return 16;
826          return 0;
827 
828       default:
829          return 0;
830       }
831       break;
832    }
833 
834    case nir_instr_type_phi: {
835       nir_phi_instr *phi = nir_instr_as_phi(instr);
836       if (phi->def.bit_size == 8)
837          return 16;
838       return 0;
839    }
840 
841    default:
842       return 0;
843    }
844 }
845 
846 /* On gfx12.5+, if the offsets are not both constant and in the {-8,7} range,
847  * we will have nir_lower_tex() lower the source offset by returning true from
848  * this filter function.
849  */
850 static bool
lower_xehp_tg4_offset_filter(const nir_instr * instr,UNUSED const void * data)851 lower_xehp_tg4_offset_filter(const nir_instr *instr, UNUSED const void *data)
852 {
853    if (instr->type != nir_instr_type_tex)
854       return false;
855 
856    nir_tex_instr *tex = nir_instr_as_tex(instr);
857 
858    if (tex->op != nir_texop_tg4)
859       return false;
860 
861    int offset_index = nir_tex_instr_src_index(tex, nir_tex_src_offset);
862    if (offset_index < 0)
863       return false;
864 
865    if (!nir_src_is_const(tex->src[offset_index].src))
866       return true;
867 
868    int64_t offset_x = nir_src_comp_as_int(tex->src[offset_index].src, 0);
869    int64_t offset_y = nir_src_comp_as_int(tex->src[offset_index].src, 1);
870 
871    return offset_x < -8 || offset_x > 7 || offset_y < -8 || offset_y > 7;
872 }
873 
874 /* Does some simple lowering and runs the standard suite of optimizations
875  *
876  * This is intended to be called more-or-less directly after you get the
877  * shader out of GLSL or some other source.  While it is geared towards i965,
878  * it is not at all generator-specific.
879  */
880 void
elk_preprocess_nir(const struct elk_compiler * compiler,nir_shader * nir,const struct elk_nir_compiler_opts * opts)881 elk_preprocess_nir(const struct elk_compiler *compiler, nir_shader *nir,
882                    const struct elk_nir_compiler_opts *opts)
883 {
884    const struct intel_device_info *devinfo = compiler->devinfo;
885    UNUSED bool progress; /* Written by OPT */
886 
887    const bool is_scalar = compiler->scalar_stage[nir->info.stage];
888 
889    nir_validate_ssa_dominance(nir, "before elk_preprocess_nir");
890 
891    OPT(nir_lower_frexp);
892 
893    if (is_scalar) {
894       OPT(nir_lower_alu_to_scalar, NULL, NULL);
895    }
896 
897    if (nir->info.stage == MESA_SHADER_GEOMETRY)
898       OPT(nir_lower_gs_intrinsics, 0);
899 
900    /* See also elk_nir_trig_workarounds.py */
901    if (compiler->precise_trig)
902       OPT(elk_nir_apply_trig_workarounds);
903 
904    /* This workaround existing for performance reasons. Since it requires not
905     * setting RENDER_SURFACE_STATE::SurfaceArray when the array length is 1,
906     * we're loosing the HW robustness feature in that case.
907     *
908     * So when robust image access is enabled, just avoid the workaround.
909     */
910    if (intel_needs_workaround(devinfo, 1806565034) && !opts->robust_image_access)
911       OPT(intel_nir_clamp_image_1d_2d_array_sizes);
912 
913    const nir_lower_tex_options tex_options = {
914       .lower_txp = ~0,
915       .lower_txf_offset = true,
916       .lower_rect_offset = true,
917       .lower_txd_cube_map = true,
918       .lower_txb_shadow_clamp = true,
919       .lower_txd_shadow_clamp = true,
920       .lower_txd_offset_clamp = true,
921       .lower_tg4_offsets = true,
922       .lower_txs_lod = true, /* Wa_14012320009 */
923       .lower_invalid_implicit_lod = true,
924    };
925 
926    OPT(nir_lower_tex, &tex_options);
927    OPT(nir_normalize_cubemap_coords);
928 
929    OPT(nir_lower_global_vars_to_local);
930 
931    OPT(nir_split_var_copies);
932    OPT(nir_split_struct_vars, nir_var_function_temp);
933 
934    elk_nir_optimize(nir, is_scalar, devinfo);
935 
936    OPT(nir_lower_doubles, opts->softfp64, nir->options->lower_doubles_options);
937    if (OPT(nir_lower_int64_float_conversions)) {
938       OPT(nir_opt_algebraic);
939       OPT(nir_lower_doubles, opts->softfp64,
940           nir->options->lower_doubles_options);
941    }
942 
943    OPT(nir_lower_bit_size, lower_bit_size_callback, (void *)compiler);
944 
945    /* Lower a bunch of stuff */
946    OPT(nir_lower_var_copies);
947 
948    /* This needs to be run after the first optimization pass but before we
949     * lower indirect derefs away
950     */
951    if (compiler->supports_shader_constants) {
952       OPT(nir_opt_large_constants, NULL, 32);
953    }
954 
955    if (is_scalar) {
956       OPT(nir_lower_load_const_to_scalar);
957    }
958 
959    OPT(nir_lower_system_values);
960    nir_lower_compute_system_values_options lower_csv_options = {
961       .has_base_workgroup_id = nir->info.stage == MESA_SHADER_COMPUTE,
962    };
963    OPT(nir_lower_compute_system_values, &lower_csv_options);
964 
965    const nir_lower_subgroups_options subgroups_options = {
966       .ballot_bit_size = 32,
967       .ballot_components = 1,
968       .lower_to_scalar = true,
969       .lower_vote_trivial = !is_scalar,
970       .lower_relative_shuffle = true,
971       .lower_quad_broadcast_dynamic = true,
972       .lower_elect = true,
973       .lower_inverse_ballot = true,
974       .lower_rotate_to_shuffle = true,
975    };
976    OPT(nir_lower_subgroups, &subgroups_options);
977 
978    nir_variable_mode indirect_mask =
979       elk_nir_no_indirect_mask(compiler, nir->info.stage);
980    OPT(nir_lower_indirect_derefs, indirect_mask, UINT32_MAX);
981 
982    /* Even in cases where we can handle indirect temporaries via scratch, we
983     * it can still be expensive.  Lower indirects on small arrays to
984     * conditional load/stores.
985     *
986     * The threshold of 16 was chosen semi-arbitrarily.  The idea is that an
987     * indirect on an array of 16 elements is about 30 instructions at which
988     * point, you may be better off doing a send.  With a SIMD8 program, 16
989     * floats is 1/8 of the entire register file.  Any array larger than that
990     * is likely to cause pressure issues.  Also, this value is sufficiently
991     * high that the benchmarks known to suffer from large temporary array
992     * issues are helped but nothing else in shader-db is hurt except for maybe
993     * that one kerbal space program shader.
994     */
995    if (is_scalar && !(indirect_mask & nir_var_function_temp))
996       OPT(nir_lower_indirect_derefs, nir_var_function_temp, 16);
997 
998    /* Lower array derefs of vectors for SSBO and UBO loads.  For both UBOs and
999     * SSBOs, our back-end is capable of loading an entire vec4 at a time and
1000     * we would like to take advantage of that whenever possible regardless of
1001     * whether or not the app gives us full loads.  This should allow the
1002     * optimizer to combine UBO and SSBO load operations and save us some send
1003     * messages.
1004     */
1005    OPT(nir_lower_array_deref_of_vec,
1006        nir_var_mem_ubo | nir_var_mem_ssbo, NULL,
1007        nir_lower_direct_array_deref_of_vec_load);
1008 
1009    /* Get rid of split copies */
1010    elk_nir_optimize(nir, is_scalar, devinfo);
1011 }
1012 
1013 static bool
elk_nir_zero_inputs_instr(struct nir_builder * b,nir_intrinsic_instr * intrin,void * data)1014 elk_nir_zero_inputs_instr(struct nir_builder *b, nir_intrinsic_instr *intrin,
1015                           void *data)
1016 {
1017    if (intrin->intrinsic != nir_intrinsic_load_deref)
1018       return false;
1019 
1020    nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1021    if (!nir_deref_mode_is(deref, nir_var_shader_in))
1022       return false;
1023 
1024    if (deref->deref_type != nir_deref_type_var)
1025       return false;
1026 
1027    nir_variable *var = deref->var;
1028 
1029    uint64_t zero_inputs = *(uint64_t *)data;
1030    if (!(BITFIELD64_BIT(var->data.location) & zero_inputs))
1031       return false;
1032 
1033    b->cursor = nir_before_instr(&intrin->instr);
1034 
1035    nir_def *zero = nir_imm_zero(b, 1, 32);
1036 
1037    nir_def_replace(&intrin->def, zero);
1038 
1039    return true;
1040 }
1041 
1042 static bool
elk_nir_zero_inputs(nir_shader * shader,uint64_t * zero_inputs)1043 elk_nir_zero_inputs(nir_shader *shader, uint64_t *zero_inputs)
1044 {
1045    return nir_shader_intrinsics_pass(shader, elk_nir_zero_inputs_instr,
1046                                      nir_metadata_control_flow,
1047                                      zero_inputs);
1048 }
1049 
1050 void
elk_nir_link_shaders(const struct elk_compiler * compiler,nir_shader * producer,nir_shader * consumer)1051 elk_nir_link_shaders(const struct elk_compiler *compiler,
1052                      nir_shader *producer, nir_shader *consumer)
1053 {
1054    const struct intel_device_info *devinfo = compiler->devinfo;
1055 
1056    nir_lower_io_arrays_to_elements(producer, consumer);
1057    nir_validate_shader(producer, "after nir_lower_io_arrays_to_elements");
1058    nir_validate_shader(consumer, "after nir_lower_io_arrays_to_elements");
1059 
1060    const bool p_is_scalar = compiler->scalar_stage[producer->info.stage];
1061    const bool c_is_scalar = compiler->scalar_stage[consumer->info.stage];
1062 
1063    if (p_is_scalar && c_is_scalar) {
1064       NIR_PASS(_, producer, nir_lower_io_to_scalar_early, nir_var_shader_out);
1065       NIR_PASS(_, consumer, nir_lower_io_to_scalar_early, nir_var_shader_in);
1066       elk_nir_optimize(producer, p_is_scalar, devinfo);
1067       elk_nir_optimize(consumer, c_is_scalar, devinfo);
1068    }
1069 
1070    if (nir_link_opt_varyings(producer, consumer))
1071       elk_nir_optimize(consumer, c_is_scalar, devinfo);
1072 
1073    NIR_PASS(_, producer, nir_remove_dead_variables, nir_var_shader_out, NULL);
1074    NIR_PASS(_, consumer, nir_remove_dead_variables, nir_var_shader_in, NULL);
1075 
1076    if (nir_remove_unused_varyings(producer, consumer)) {
1077       if (should_print_nir(producer)) {
1078          printf("nir_remove_unused_varyings\n");
1079          nir_print_shader(producer, stdout);
1080       }
1081       if (should_print_nir(consumer)) {
1082          printf("nir_remove_unused_varyings\n");
1083          nir_print_shader(consumer, stdout);
1084       }
1085 
1086       NIR_PASS(_, producer, nir_lower_global_vars_to_local);
1087       NIR_PASS(_, consumer, nir_lower_global_vars_to_local);
1088 
1089       /* The backend might not be able to handle indirects on
1090        * temporaries so we need to lower indirects on any of the
1091        * varyings we have demoted here.
1092        */
1093       NIR_PASS(_, producer, nir_lower_indirect_derefs,
1094                   elk_nir_no_indirect_mask(compiler, producer->info.stage),
1095                   UINT32_MAX);
1096       NIR_PASS(_, consumer, nir_lower_indirect_derefs,
1097                   elk_nir_no_indirect_mask(compiler, consumer->info.stage),
1098                   UINT32_MAX);
1099 
1100       elk_nir_optimize(producer, p_is_scalar, devinfo);
1101       elk_nir_optimize(consumer, c_is_scalar, devinfo);
1102    }
1103 
1104    NIR_PASS(_, producer, nir_lower_io_to_vector, nir_var_shader_out);
1105 
1106    if (producer->info.stage == MESA_SHADER_TESS_CTRL &&
1107        producer->options->vectorize_tess_levels)
1108    NIR_PASS_V(producer, nir_vectorize_tess_levels);
1109 
1110    NIR_PASS(_, producer, nir_opt_combine_stores, nir_var_shader_out);
1111    NIR_PASS(_, consumer, nir_lower_io_to_vector, nir_var_shader_in);
1112 
1113    if (producer->info.stage != MESA_SHADER_TESS_CTRL) {
1114       /* Calling lower_io_to_vector creates output variable writes with
1115        * write-masks.  On non-TCS outputs, the back-end can't handle it and we
1116        * need to call nir_lower_io_to_temporaries to get rid of them.  This,
1117        * in turn, creates temporary variables and extra copy_deref intrinsics
1118        * that we need to clean up.
1119        */
1120       NIR_PASS_V(producer, nir_lower_io_to_temporaries,
1121                  nir_shader_get_entrypoint(producer), true, false);
1122       NIR_PASS(_, producer, nir_lower_global_vars_to_local);
1123       NIR_PASS(_, producer, nir_split_var_copies);
1124       NIR_PASS(_, producer, nir_lower_var_copies);
1125    }
1126 }
1127 
1128 static bool
elk_nir_should_vectorize_mem(unsigned align_mul,unsigned align_offset,unsigned bit_size,unsigned num_components,int64_t hole_size,nir_intrinsic_instr * low,nir_intrinsic_instr * high,void * data)1129 elk_nir_should_vectorize_mem(unsigned align_mul, unsigned align_offset,
1130                              unsigned bit_size,
1131                              unsigned num_components,
1132                              int64_t hole_size,
1133                              nir_intrinsic_instr *low,
1134                              nir_intrinsic_instr *high,
1135                              void *data)
1136 {
1137    /* Don't combine things to generate 64-bit loads/stores.  We have to split
1138     * those back into 32-bit ones anyway and UBO loads aren't split in NIR so
1139     * we don't want to make a mess for the back-end.
1140     */
1141    if (bit_size > 32 || hole_size > 0 || !nir_num_components_valid(num_components))
1142       return false;
1143 
1144    if (low->intrinsic == nir_intrinsic_load_ubo_uniform_block_intel ||
1145        low->intrinsic == nir_intrinsic_load_ssbo_uniform_block_intel ||
1146        low->intrinsic == nir_intrinsic_load_shared_uniform_block_intel ||
1147        low->intrinsic == nir_intrinsic_load_global_constant_uniform_block_intel) {
1148       if (num_components > 4) {
1149          if (!util_is_power_of_two_nonzero(num_components))
1150             return false;
1151 
1152          if (bit_size != 32)
1153             return false;
1154 
1155          if (num_components > 32)
1156             return false;
1157       }
1158    } else {
1159       /* We can handle at most a vec4 right now.  Anything bigger would get
1160        * immediately split by elk_nir_lower_mem_access_bit_sizes anyway.
1161        */
1162       if (num_components > 4)
1163          return false;
1164    }
1165 
1166 
1167    uint32_t align;
1168    if (align_offset)
1169       align = 1 << (ffs(align_offset) - 1);
1170    else
1171       align = align_mul;
1172 
1173    if (align < bit_size / 8)
1174       return false;
1175 
1176    return true;
1177 }
1178 
1179 static
combine_all_memory_barriers(nir_intrinsic_instr * a,nir_intrinsic_instr * b,void * data)1180 bool combine_all_memory_barriers(nir_intrinsic_instr *a,
1181                                  nir_intrinsic_instr *b,
1182                                  void *data)
1183 {
1184    /* Combine control barriers with identical memory semantics. This prevents
1185     * the second barrier generating a spurious, identical fence message as the
1186     * first barrier.
1187     */
1188    if (nir_intrinsic_memory_modes(a) == nir_intrinsic_memory_modes(b) &&
1189        nir_intrinsic_memory_semantics(a) == nir_intrinsic_memory_semantics(b) &&
1190        nir_intrinsic_memory_scope(a) == nir_intrinsic_memory_scope(b)) {
1191       nir_intrinsic_set_execution_scope(a, MAX2(nir_intrinsic_execution_scope(a),
1192                                                 nir_intrinsic_execution_scope(b)));
1193       return true;
1194    }
1195 
1196    /* Only combine pure memory barriers */
1197    if ((nir_intrinsic_execution_scope(a) != SCOPE_NONE) ||
1198        (nir_intrinsic_execution_scope(b) != SCOPE_NONE))
1199       return false;
1200 
1201    /* Translation to backend IR will get rid of modes we don't care about, so
1202     * no harm in always combining them.
1203     *
1204     * TODO: While HW has only ACQUIRE|RELEASE fences, we could improve the
1205     * scheduling so that it can take advantage of the different semantics.
1206     */
1207    nir_intrinsic_set_memory_modes(a, nir_intrinsic_memory_modes(a) |
1208                                      nir_intrinsic_memory_modes(b));
1209    nir_intrinsic_set_memory_semantics(a, nir_intrinsic_memory_semantics(a) |
1210                                          nir_intrinsic_memory_semantics(b));
1211    nir_intrinsic_set_memory_scope(a, MAX2(nir_intrinsic_memory_scope(a),
1212                                           nir_intrinsic_memory_scope(b)));
1213    return true;
1214 }
1215 
1216 static nir_mem_access_size_align
get_mem_access_size_align(nir_intrinsic_op intrin,uint8_t bytes,uint8_t bit_size,uint32_t align_mul,uint32_t align_offset,bool offset_is_const,enum gl_access_qualifier access,const void * cb_data)1217 get_mem_access_size_align(nir_intrinsic_op intrin, uint8_t bytes,
1218                           uint8_t bit_size, uint32_t align_mul, uint32_t align_offset,
1219                           bool offset_is_const, enum gl_access_qualifier access,
1220                           const void *cb_data)
1221 {
1222    const uint32_t align = nir_combined_align(align_mul, align_offset);
1223 
1224    switch (intrin) {
1225    case nir_intrinsic_load_ssbo:
1226    case nir_intrinsic_load_shared:
1227    case nir_intrinsic_load_scratch:
1228       /* The offset is constant so we can use a 32-bit load and just shift it
1229        * around as needed.
1230        */
1231       if (align < 4 && offset_is_const) {
1232          assert(util_is_power_of_two_nonzero(align_mul) && align_mul >= 4);
1233          const unsigned pad = align_offset % 4;
1234          const unsigned comps32 = MIN2(DIV_ROUND_UP(bytes + pad, 4), 4);
1235          return (nir_mem_access_size_align) {
1236             .bit_size = 32,
1237             .num_components = comps32,
1238             .align = 4,
1239             .shift = nir_mem_access_shift_method_scalar,
1240          };
1241       }
1242       break;
1243 
1244    default:
1245       break;
1246    }
1247 
1248    const bool is_load = nir_intrinsic_infos[intrin].has_dest;
1249    const bool is_scratch = intrin == nir_intrinsic_load_scratch ||
1250                            intrin == nir_intrinsic_store_scratch;
1251 
1252    if (align < 4 || bytes < 4) {
1253       /* Choose a byte, word, or dword */
1254       bytes = MIN2(bytes, 4);
1255       if (bytes == 3)
1256          bytes = is_load ? 4 : 2;
1257 
1258       if (is_scratch) {
1259          /* The way scratch address swizzling works in the back-end, it
1260           * happens at a DWORD granularity so we can't have a single load
1261           * or store cross a DWORD boundary.
1262           */
1263          if ((align_offset % 4) + bytes > MIN2(align_mul, 4))
1264             bytes = MIN2(align_mul, 4) - (align_offset % 4);
1265 
1266          /* Must be a power of two */
1267          if (bytes == 3)
1268             bytes = 2;
1269       }
1270 
1271       return (nir_mem_access_size_align) {
1272          .bit_size = bytes * 8,
1273          .num_components = 1,
1274          .align = 1,
1275          .shift = nir_mem_access_shift_method_scalar,
1276       };
1277    } else {
1278       bytes = MIN2(bytes, 16);
1279       return (nir_mem_access_size_align) {
1280          .bit_size = 32,
1281          .num_components = is_scratch ? 1 :
1282                            is_load ? DIV_ROUND_UP(bytes, 4) : bytes / 4,
1283          .align = 4,
1284          .shift = nir_mem_access_shift_method_scalar,
1285       };
1286    }
1287 }
1288 
1289 static void
elk_vectorize_lower_mem_access(nir_shader * nir,const struct elk_compiler * compiler,enum elk_robustness_flags robust_flags)1290 elk_vectorize_lower_mem_access(nir_shader *nir,
1291                                const struct elk_compiler *compiler,
1292                                enum elk_robustness_flags robust_flags)
1293 {
1294    bool progress = false;
1295    const bool is_scalar = compiler->scalar_stage[nir->info.stage];
1296 
1297    if (is_scalar) {
1298       nir_load_store_vectorize_options options = {
1299          .modes = nir_var_mem_ubo | nir_var_mem_ssbo |
1300                   nir_var_mem_global | nir_var_mem_shared,
1301          .callback = elk_nir_should_vectorize_mem,
1302          .robust_modes = (nir_variable_mode)0,
1303       };
1304 
1305       if (robust_flags & ELK_ROBUSTNESS_UBO)
1306          options.robust_modes |= nir_var_mem_ubo | nir_var_mem_global;
1307       if (robust_flags & ELK_ROBUSTNESS_SSBO)
1308          options.robust_modes |= nir_var_mem_ssbo | nir_var_mem_global;
1309 
1310       OPT(nir_opt_load_store_vectorize, &options);
1311    }
1312 
1313    nir_lower_mem_access_bit_sizes_options mem_access_options = {
1314       .modes = nir_var_mem_ssbo |
1315                nir_var_mem_constant |
1316                nir_var_shader_temp |
1317                nir_var_function_temp |
1318                nir_var_mem_global |
1319                nir_var_mem_shared,
1320       .callback = get_mem_access_size_align,
1321    };
1322    OPT(nir_lower_mem_access_bit_sizes, &mem_access_options);
1323 
1324    while (progress) {
1325       progress = false;
1326 
1327       OPT(nir_lower_pack);
1328       OPT(nir_copy_prop);
1329       OPT(nir_opt_dce);
1330       OPT(nir_opt_cse);
1331       OPT(nir_opt_algebraic);
1332       OPT(nir_opt_constant_folding);
1333    }
1334 }
1335 
1336 static bool
nir_shader_has_local_variables(const nir_shader * nir)1337 nir_shader_has_local_variables(const nir_shader *nir)
1338 {
1339    nir_foreach_function_impl(impl, nir) {
1340       if (!exec_list_is_empty(&impl->locals))
1341          return true;
1342    }
1343 
1344    return false;
1345 }
1346 
1347 /* Prepare the given shader for codegen
1348  *
1349  * This function is intended to be called right before going into the actual
1350  * backend and is highly backend-specific.  Also, once this function has been
1351  * called on a shader, it will no longer be in SSA form so most optimizations
1352  * will not work.
1353  */
1354 void
elk_postprocess_nir(nir_shader * nir,const struct elk_compiler * compiler,bool debug_enabled,enum elk_robustness_flags robust_flags)1355 elk_postprocess_nir(nir_shader *nir, const struct elk_compiler *compiler,
1356                     bool debug_enabled,
1357                     enum elk_robustness_flags robust_flags)
1358 {
1359    const struct intel_device_info *devinfo = compiler->devinfo;
1360    const bool is_scalar = compiler->scalar_stage[nir->info.stage];
1361 
1362    UNUSED bool progress; /* Written by OPT */
1363 
1364    OPT(intel_nir_lower_sparse_intrinsics);
1365 
1366    OPT(nir_lower_bit_size, lower_bit_size_callback, (void *)compiler);
1367 
1368    OPT(nir_opt_combine_barriers, combine_all_memory_barriers, NULL);
1369 
1370    do {
1371       progress = false;
1372       OPT(nir_opt_algebraic_before_ffma);
1373    } while (progress);
1374 
1375    elk_nir_optimize(nir, is_scalar, devinfo);
1376 
1377    if (is_scalar && nir_shader_has_local_variables(nir)) {
1378       OPT(nir_lower_vars_to_explicit_types, nir_var_function_temp,
1379           glsl_get_natural_size_align_bytes);
1380       OPT(nir_lower_explicit_io, nir_var_function_temp,
1381           nir_address_format_32bit_offset);
1382       elk_nir_optimize(nir, is_scalar, devinfo);
1383    }
1384 
1385    elk_vectorize_lower_mem_access(nir, compiler, robust_flags);
1386 
1387    if (OPT(nir_lower_int64))
1388       elk_nir_optimize(nir, is_scalar, devinfo);
1389 
1390    if (devinfo->ver >= 6) {
1391       /* Try and fuse multiply-adds, if successful, run shrink_vectors to
1392        * avoid peephole_ffma to generate things like this :
1393        *    vec16 ssa_0 = ...
1394        *    vec16 ssa_1 = fneg ssa_0
1395        *    vec1  ssa_2 = ffma ssa_1, ...
1396        *
1397        * We want this instead :
1398        *    vec16 ssa_0 = ...
1399        *    vec1  ssa_1 = fneg ssa_0.x
1400        *    vec1  ssa_2 = ffma ssa_1, ...
1401        */
1402       if (OPT(intel_nir_opt_peephole_ffma))
1403          OPT(nir_opt_shrink_vectors, false);
1404    }
1405 
1406    if (is_scalar)
1407       OPT(intel_nir_opt_peephole_imul32x16);
1408 
1409    if (OPT(nir_opt_comparison_pre)) {
1410       OPT(nir_copy_prop);
1411       OPT(nir_opt_dce);
1412       OPT(nir_opt_cse);
1413 
1414       /* Do the select peepehole again.  nir_opt_comparison_pre (combined with
1415        * the other optimization passes) will have removed at least one
1416        * instruction from one of the branches of the if-statement, so now it
1417        * might be under the threshold of conversion to bcsel.
1418        *
1419        * See elk_nir_optimize for the explanation of is_vec4_tessellation.
1420        */
1421       const bool is_vec4_tessellation = !is_scalar &&
1422          (nir->info.stage == MESA_SHADER_TESS_CTRL ||
1423           nir->info.stage == MESA_SHADER_TESS_EVAL);
1424       OPT(nir_opt_peephole_select, 0, is_vec4_tessellation, false);
1425       OPT(nir_opt_peephole_select, 1, is_vec4_tessellation,
1426           compiler->devinfo->ver >= 6);
1427    }
1428 
1429    do {
1430       progress = false;
1431       if (OPT(nir_opt_algebraic_late)) {
1432          /* At this late stage, anything that makes more constants will wreak
1433           * havok on the vec4 backend.  The handling of constants in the vec4
1434           * backend is not good.
1435           */
1436          if (is_scalar)
1437             OPT(nir_opt_constant_folding);
1438 
1439          OPT(nir_copy_prop);
1440          OPT(nir_opt_dce);
1441          OPT(nir_opt_cse);
1442       }
1443    } while (progress);
1444 
1445 
1446    if (OPT(nir_lower_fp16_casts, nir_lower_fp16_split_fp64)) {
1447       if (OPT(nir_lower_int64)) {
1448          elk_nir_optimize(nir, is_scalar, devinfo);
1449       }
1450    }
1451 
1452    OPT(intel_nir_lower_conversions);
1453 
1454    if (is_scalar)
1455       OPT(nir_lower_alu_to_scalar, NULL, NULL);
1456 
1457    while (OPT(nir_opt_algebraic_distribute_src_mods)) {
1458       if (is_scalar)
1459          OPT(nir_opt_constant_folding);
1460 
1461       OPT(nir_copy_prop);
1462       OPT(nir_opt_dce);
1463       OPT(nir_opt_cse);
1464    }
1465 
1466    OPT(nir_copy_prop);
1467    OPT(nir_opt_dce);
1468    OPT(nir_opt_move, nir_move_comparisons);
1469    OPT(nir_opt_dead_cf);
1470 
1471    bool divergence_analysis_dirty = false;
1472    NIR_PASS_V(nir, nir_divergence_analysis);
1473 
1474    /* TODO: Enable nir_opt_uniform_atomics on Gfx7.x too.
1475     * It currently fails Vulkan tests on Haswell for an unknown reason.
1476     */
1477    bool opt_uniform_atomic_stage_allowed = devinfo->ver >= 8;
1478 
1479    if (opt_uniform_atomic_stage_allowed && OPT(nir_opt_uniform_atomics, false)) {
1480       const nir_lower_subgroups_options subgroups_options = {
1481          .ballot_bit_size = 32,
1482          .ballot_components = 1,
1483          .lower_elect = true,
1484       };
1485       OPT(nir_lower_subgroups, &subgroups_options);
1486 
1487       if (OPT(nir_lower_int64))
1488          elk_nir_optimize(nir, is_scalar, devinfo);
1489 
1490       divergence_analysis_dirty = true;
1491    }
1492 
1493    /* Do this only after the last opt_gcm. GCM will undo this lowering. */
1494    if (nir->info.stage == MESA_SHADER_FRAGMENT) {
1495       if (divergence_analysis_dirty) {
1496          NIR_PASS_V(nir, nir_divergence_analysis);
1497       }
1498 
1499       OPT(intel_nir_lower_non_uniform_barycentric_at_sample);
1500    }
1501 
1502    OPT(nir_lower_bool_to_int32);
1503    OPT(nir_copy_prop);
1504    OPT(nir_opt_dce);
1505 
1506    OPT(nir_lower_locals_to_regs, 32);
1507 
1508    if (unlikely(debug_enabled)) {
1509       /* Re-index SSA defs so we print more sensible numbers. */
1510       nir_foreach_function_impl(impl, nir) {
1511          nir_index_ssa_defs(impl);
1512       }
1513 
1514       fprintf(stderr, "NIR (SSA form) for %s shader:\n",
1515               _mesa_shader_stage_to_string(nir->info.stage));
1516       nir_print_shader(nir, stderr);
1517    }
1518 
1519    nir_validate_ssa_dominance(nir, "before nir_convert_from_ssa");
1520 
1521    /* Rerun the divergence analysis before convert_from_ssa as this pass has
1522     * some assert on consistent divergence flags.
1523     */
1524    NIR_PASS(_, nir, nir_convert_to_lcssa, true, true);
1525    NIR_PASS_V(nir, nir_divergence_analysis);
1526 
1527    OPT(nir_convert_from_ssa, true);
1528 
1529    if (!is_scalar) {
1530       OPT(nir_move_vec_src_uses_to_dest, true);
1531       OPT(nir_lower_vec_to_regs, NULL, NULL);
1532    }
1533 
1534    OPT(nir_opt_dce);
1535 
1536    if (OPT(nir_opt_rematerialize_compares))
1537       OPT(nir_opt_dce);
1538 
1539    nir_trivialize_registers(nir);
1540 
1541    /* This is the last pass we run before we start emitting stuff.  It
1542     * determines when we need to insert boolean resolves on Gen <= 5.  We
1543     * run it last because it stashes data in instr->pass_flags and we don't
1544     * want that to be squashed by other NIR passes.
1545     */
1546    if (devinfo->ver <= 5)
1547       elk_nir_analyze_boolean_resolves(nir);
1548 
1549    nir_sweep(nir);
1550 
1551    if (unlikely(debug_enabled)) {
1552       fprintf(stderr, "NIR (final form) for %s shader:\n",
1553               _mesa_shader_stage_to_string(nir->info.stage));
1554       nir_print_shader(nir, stderr);
1555    }
1556 }
1557 
1558 static bool
elk_nir_apply_sampler_key(nir_shader * nir,const struct elk_compiler * compiler,const struct elk_sampler_prog_key_data * key_tex)1559 elk_nir_apply_sampler_key(nir_shader *nir,
1560                           const struct elk_compiler *compiler,
1561                           const struct elk_sampler_prog_key_data *key_tex)
1562 {
1563    const struct intel_device_info *devinfo = compiler->devinfo;
1564    nir_lower_tex_options tex_options = {
1565       .lower_txd_clamp_bindless_sampler = true,
1566       .lower_txd_clamp_if_sampler_index_not_lt_16 = true,
1567       .lower_invalid_implicit_lod = true,
1568       .lower_index_to_offset = true,
1569    };
1570 
1571    /* Iron Lake and prior require lowering of all rectangle textures */
1572    if (devinfo->ver < 6)
1573       tex_options.lower_rect = true;
1574 
1575    /* Prior to Broadwell, our hardware can't actually do GL_CLAMP */
1576    if (devinfo->ver < 8) {
1577       tex_options.saturate_s = key_tex->gl_clamp_mask[0];
1578       tex_options.saturate_t = key_tex->gl_clamp_mask[1];
1579       tex_options.saturate_r = key_tex->gl_clamp_mask[2];
1580    }
1581 
1582    /* Prior to Haswell, we have to lower gradients on shadow samplers */
1583    tex_options.lower_txd_shadow = devinfo->verx10 <= 70;
1584 
1585    return nir_lower_tex(nir, &tex_options);
1586 }
1587 
1588 static unsigned
get_subgroup_size(const struct shader_info * info,unsigned max_subgroup_size)1589 get_subgroup_size(const struct shader_info *info, unsigned max_subgroup_size)
1590 {
1591    switch (info->subgroup_size) {
1592    case SUBGROUP_SIZE_API_CONSTANT:
1593       /* We have to use the global constant size. */
1594       return ELK_SUBGROUP_SIZE;
1595 
1596    case SUBGROUP_SIZE_UNIFORM:
1597       /* It has to be uniform across all invocations but can vary per stage
1598        * if we want.  This gives us a bit more freedom.
1599        *
1600        * For compute, elk_nir_apply_key is called per-dispatch-width so this
1601        * is the actual subgroup size and not a maximum.  However, we only
1602        * invoke one size of any given compute shader so it's still guaranteed
1603        * to be uniform across invocations.
1604        */
1605       return max_subgroup_size;
1606 
1607    case SUBGROUP_SIZE_VARYING:
1608       /* The subgroup size is allowed to be fully varying.  For geometry
1609        * stages, we know it's always 8 which is max_subgroup_size so we can
1610        * return that.  For compute, elk_nir_apply_key is called once per
1611        * dispatch-width so max_subgroup_size is the real subgroup size.
1612        *
1613        * For fragment, we return 0 and let it fall through to the back-end
1614        * compiler.  This means we can't optimize based on subgroup size but
1615        * that's a risk the client took when it asked for a varying subgroup
1616        * size.
1617        */
1618       return info->stage == MESA_SHADER_FRAGMENT ? 0 : max_subgroup_size;
1619 
1620    case SUBGROUP_SIZE_REQUIRE_4:
1621       unreachable("Unsupported subgroup size type");
1622 
1623    case SUBGROUP_SIZE_REQUIRE_8:
1624    case SUBGROUP_SIZE_REQUIRE_16:
1625    case SUBGROUP_SIZE_REQUIRE_32:
1626       assert(gl_shader_stage_uses_workgroup(info->stage) ||
1627              (info->stage >= MESA_SHADER_RAYGEN && info->stage <= MESA_SHADER_CALLABLE));
1628       /* These enum values are expressly chosen to be equal to the subgroup
1629        * size that they require.
1630        */
1631       return info->subgroup_size;
1632 
1633    case SUBGROUP_SIZE_FULL_SUBGROUPS:
1634    case SUBGROUP_SIZE_REQUIRE_64:
1635    case SUBGROUP_SIZE_REQUIRE_128:
1636       break;
1637    }
1638 
1639    unreachable("Invalid subgroup size type");
1640 }
1641 
1642 unsigned
elk_nir_api_subgroup_size(const nir_shader * nir,unsigned hw_subgroup_size)1643 elk_nir_api_subgroup_size(const nir_shader *nir,
1644                           unsigned hw_subgroup_size)
1645 {
1646    return get_subgroup_size(&nir->info, hw_subgroup_size);
1647 }
1648 
1649 void
elk_nir_apply_key(nir_shader * nir,const struct elk_compiler * compiler,const struct elk_base_prog_key * key,unsigned max_subgroup_size)1650 elk_nir_apply_key(nir_shader *nir,
1651                   const struct elk_compiler *compiler,
1652                   const struct elk_base_prog_key *key,
1653                   unsigned max_subgroup_size)
1654 {
1655    bool progress = false;
1656 
1657    OPT(elk_nir_apply_sampler_key, compiler, &key->tex);
1658 
1659    const struct intel_nir_lower_texture_opts tex_opts = {0};
1660    OPT(intel_nir_lower_texture, &tex_opts);
1661 
1662    const nir_lower_subgroups_options subgroups_options = {
1663       .subgroup_size = get_subgroup_size(&nir->info, max_subgroup_size),
1664       .ballot_bit_size = 32,
1665       .ballot_components = 1,
1666       .lower_subgroup_masks = true,
1667    };
1668    OPT(nir_lower_subgroups, &subgroups_options);
1669 
1670    if (key->limit_trig_input_range)
1671       OPT(elk_nir_limit_trig_input_range_workaround);
1672 
1673    if (progress) {
1674       const bool is_scalar = compiler->scalar_stage[nir->info.stage];
1675       elk_nir_optimize(nir, is_scalar, compiler->devinfo);
1676    }
1677 }
1678 
1679 enum elk_conditional_mod
elk_cmod_for_nir_comparison(nir_op op)1680 elk_cmod_for_nir_comparison(nir_op op)
1681 {
1682    switch (op) {
1683    case nir_op_flt:
1684    case nir_op_flt32:
1685    case nir_op_ilt:
1686    case nir_op_ilt32:
1687    case nir_op_ult:
1688    case nir_op_ult32:
1689       return ELK_CONDITIONAL_L;
1690 
1691    case nir_op_fge:
1692    case nir_op_fge32:
1693    case nir_op_ige:
1694    case nir_op_ige32:
1695    case nir_op_uge:
1696    case nir_op_uge32:
1697       return ELK_CONDITIONAL_GE;
1698 
1699    case nir_op_feq:
1700    case nir_op_feq32:
1701    case nir_op_ieq:
1702    case nir_op_ieq32:
1703    case nir_op_b32all_fequal2:
1704    case nir_op_b32all_iequal2:
1705    case nir_op_b32all_fequal3:
1706    case nir_op_b32all_iequal3:
1707    case nir_op_b32all_fequal4:
1708    case nir_op_b32all_iequal4:
1709       return ELK_CONDITIONAL_Z;
1710 
1711    case nir_op_fneu:
1712    case nir_op_fneu32:
1713    case nir_op_ine:
1714    case nir_op_ine32:
1715    case nir_op_b32any_fnequal2:
1716    case nir_op_b32any_inequal2:
1717    case nir_op_b32any_fnequal3:
1718    case nir_op_b32any_inequal3:
1719    case nir_op_b32any_fnequal4:
1720    case nir_op_b32any_inequal4:
1721       return ELK_CONDITIONAL_NZ;
1722 
1723    default:
1724       unreachable("Unsupported NIR comparison op");
1725    }
1726 }
1727 
1728 enum elk_lsc_opcode
elk_lsc_aop_for_nir_intrinsic(const nir_intrinsic_instr * atomic)1729 elk_lsc_aop_for_nir_intrinsic(const nir_intrinsic_instr *atomic)
1730 {
1731    switch (nir_intrinsic_atomic_op(atomic)) {
1732    case nir_atomic_op_iadd: {
1733       unsigned src_idx;
1734       switch (atomic->intrinsic) {
1735       case nir_intrinsic_image_atomic:
1736       case nir_intrinsic_bindless_image_atomic:
1737          src_idx = 3;
1738          break;
1739       case nir_intrinsic_ssbo_atomic:
1740          src_idx = 2;
1741          break;
1742       case nir_intrinsic_shared_atomic:
1743       case nir_intrinsic_global_atomic:
1744          src_idx = 1;
1745          break;
1746       default:
1747          unreachable("Invalid add atomic opcode");
1748       }
1749 
1750       if (nir_src_is_const(atomic->src[src_idx])) {
1751          int64_t add_val = nir_src_as_int(atomic->src[src_idx]);
1752          if (add_val == 1)
1753             return LSC_OP_ATOMIC_INC;
1754          else if (add_val == -1)
1755             return LSC_OP_ATOMIC_DEC;
1756       }
1757       return LSC_OP_ATOMIC_ADD;
1758    }
1759 
1760    case nir_atomic_op_imin: return LSC_OP_ATOMIC_MIN;
1761    case nir_atomic_op_umin: return LSC_OP_ATOMIC_UMIN;
1762    case nir_atomic_op_imax: return LSC_OP_ATOMIC_MAX;
1763    case nir_atomic_op_umax: return LSC_OP_ATOMIC_UMAX;
1764    case nir_atomic_op_iand: return LSC_OP_ATOMIC_AND;
1765    case nir_atomic_op_ior:  return LSC_OP_ATOMIC_OR;
1766    case nir_atomic_op_ixor: return LSC_OP_ATOMIC_XOR;
1767    case nir_atomic_op_xchg: return LSC_OP_ATOMIC_STORE;
1768    case nir_atomic_op_cmpxchg: return LSC_OP_ATOMIC_CMPXCHG;
1769 
1770    case nir_atomic_op_fmin: return LSC_OP_ATOMIC_FMIN;
1771    case nir_atomic_op_fmax: return LSC_OP_ATOMIC_FMAX;
1772    case nir_atomic_op_fcmpxchg: return LSC_OP_ATOMIC_FCMPXCHG;
1773    case nir_atomic_op_fadd: return LSC_OP_ATOMIC_FADD;
1774 
1775    default:
1776       unreachable("Unsupported NIR atomic intrinsic");
1777    }
1778 }
1779 
1780 enum elk_reg_type
elk_type_for_nir_type(const struct intel_device_info * devinfo,nir_alu_type type)1781 elk_type_for_nir_type(const struct intel_device_info *devinfo,
1782                       nir_alu_type type)
1783 {
1784    switch (type) {
1785    case nir_type_uint:
1786    case nir_type_uint32:
1787       return ELK_REGISTER_TYPE_UD;
1788    case nir_type_bool:
1789    case nir_type_int:
1790    case nir_type_bool32:
1791    case nir_type_int32:
1792       return ELK_REGISTER_TYPE_D;
1793    case nir_type_float:
1794    case nir_type_float32:
1795       return ELK_REGISTER_TYPE_F;
1796    case nir_type_float16:
1797       return ELK_REGISTER_TYPE_HF;
1798    case nir_type_float64:
1799       return ELK_REGISTER_TYPE_DF;
1800    case nir_type_int64:
1801       return devinfo->ver < 8 ? ELK_REGISTER_TYPE_DF : ELK_REGISTER_TYPE_Q;
1802    case nir_type_uint64:
1803       return devinfo->ver < 8 ? ELK_REGISTER_TYPE_DF : ELK_REGISTER_TYPE_UQ;
1804    case nir_type_int16:
1805       return ELK_REGISTER_TYPE_W;
1806    case nir_type_uint16:
1807       return ELK_REGISTER_TYPE_UW;
1808    case nir_type_int8:
1809       return ELK_REGISTER_TYPE_B;
1810    case nir_type_uint8:
1811       return ELK_REGISTER_TYPE_UB;
1812    default:
1813       unreachable("unknown type");
1814    }
1815 
1816    return ELK_REGISTER_TYPE_F;
1817 }
1818 
1819 nir_shader *
elk_nir_create_passthrough_tcs(void * mem_ctx,const struct elk_compiler * compiler,const struct elk_tcs_prog_key * key)1820 elk_nir_create_passthrough_tcs(void *mem_ctx, const struct elk_compiler *compiler,
1821                                const struct elk_tcs_prog_key *key)
1822 {
1823    assert(key->input_vertices > 0);
1824 
1825    const nir_shader_compiler_options *options =
1826       compiler->nir_options[MESA_SHADER_TESS_CTRL];
1827 
1828    uint64_t inputs_read = key->outputs_written &
1829       ~(VARYING_BIT_TESS_LEVEL_INNER | VARYING_BIT_TESS_LEVEL_OUTER);
1830 
1831    unsigned locations[64];
1832    unsigned num_locations = 0;
1833 
1834    u_foreach_bit64(varying, inputs_read)
1835       locations[num_locations++] = varying;
1836 
1837    nir_shader *nir =
1838       nir_create_passthrough_tcs_impl(options, locations, num_locations,
1839                                       key->input_vertices);
1840 
1841    ralloc_steal(mem_ctx, nir);
1842 
1843    nir->info.inputs_read = inputs_read;
1844    nir->info.tess._primitive_mode = key->_tes_primitive_mode;
1845    nir_validate_shader(nir, "in elk_nir_create_passthrough_tcs");
1846 
1847    struct elk_nir_compiler_opts opts = {};
1848    elk_preprocess_nir(compiler, nir, &opts);
1849 
1850    return nir;
1851 }
1852 
1853 nir_def *
elk_nir_load_global_const(nir_builder * b,nir_intrinsic_instr * load_uniform,nir_def * base_addr,unsigned off)1854 elk_nir_load_global_const(nir_builder *b, nir_intrinsic_instr *load_uniform,
1855       nir_def *base_addr, unsigned off)
1856 {
1857    assert(load_uniform->intrinsic == nir_intrinsic_load_uniform);
1858 
1859    unsigned bit_size = load_uniform->def.bit_size;
1860    assert(bit_size >= 8 && bit_size % 8 == 0);
1861    unsigned byte_size = bit_size / 8;
1862    nir_def *sysval;
1863 
1864    if (nir_src_is_const(load_uniform->src[0])) {
1865       uint64_t offset = off +
1866                         nir_intrinsic_base(load_uniform) +
1867                         nir_src_as_uint(load_uniform->src[0]);
1868 
1869       /* Things should be component-aligned. */
1870       assert(offset % byte_size == 0);
1871 
1872       unsigned suboffset = offset % 64;
1873       uint64_t aligned_offset = offset - suboffset;
1874 
1875       /* Load two just in case we go over a 64B boundary */
1876       nir_def *data[2];
1877       for (unsigned i = 0; i < 2; i++) {
1878          nir_def *addr = nir_iadd_imm(b, base_addr, aligned_offset + i * 64);
1879          data[i] = nir_load_global_constant_uniform_block_intel(b, 16, 32, addr);
1880       }
1881 
1882       sysval = nir_extract_bits(b, data, 2, suboffset * 8,
1883                                 load_uniform->num_components, bit_size);
1884    } else {
1885       nir_def *offset32 =
1886          nir_iadd_imm(b, load_uniform->src[0].ssa,
1887                          off + nir_intrinsic_base(load_uniform));
1888       nir_def *addr = nir_iadd(b, base_addr, nir_u2u64(b, offset32));
1889       sysval = nir_load_global_constant(b, addr, byte_size,
1890                                         load_uniform->num_components, bit_size);
1891    }
1892 
1893    return sysval;
1894 }
1895 
1896 const struct glsl_type *
elk_nir_get_var_type(const struct nir_shader * nir,nir_variable * var)1897 elk_nir_get_var_type(const struct nir_shader *nir, nir_variable *var)
1898 {
1899    const struct glsl_type *type = var->interface_type;
1900    if (!type) {
1901       type = var->type;
1902       if (nir_is_arrayed_io(var, nir->info.stage)) {
1903          assert(glsl_type_is_array(type));
1904          type = glsl_get_array_element(type);
1905       }
1906    }
1907 
1908    return type;
1909 }
1910 
1911