• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "intel_nir.h"
25 #include "brw_nir.h"
26 #include "brw_shader.h"
27 #include "compiler/glsl_types.h"
28 #include "compiler/nir/nir_builder.h"
29 
30 /*
31  * Returns the minimum number of vec4 (as_vec4 == true) or dvec4 (as_vec4 ==
32  * false) elements needed to pack a type.
33  */
34 static int
type_size_xvec4(const struct glsl_type * type,bool as_vec4,bool bindless)35 type_size_xvec4(const struct glsl_type *type, bool as_vec4, bool bindless)
36 {
37    unsigned int i;
38    int size;
39 
40    switch (type->base_type) {
41    case GLSL_TYPE_UINT:
42    case GLSL_TYPE_INT:
43    case GLSL_TYPE_FLOAT:
44    case GLSL_TYPE_FLOAT16:
45    case GLSL_TYPE_BOOL:
46    case GLSL_TYPE_DOUBLE:
47    case GLSL_TYPE_UINT16:
48    case GLSL_TYPE_INT16:
49    case GLSL_TYPE_UINT8:
50    case GLSL_TYPE_INT8:
51    case GLSL_TYPE_UINT64:
52    case GLSL_TYPE_INT64:
53       if (glsl_type_is_matrix(type)) {
54          const glsl_type *col_type = glsl_get_column_type(type);
55          unsigned col_slots =
56             (as_vec4 && glsl_type_is_dual_slot(col_type)) ? 2 : 1;
57          return type->matrix_columns * col_slots;
58       } else {
59          /* Regardless of size of vector, it gets a vec4. This is bad
60           * packing for things like floats, but otherwise arrays become a
61           * mess.  Hopefully a later pass over the code can pack scalars
62           * down if appropriate.
63           */
64          return (as_vec4 && glsl_type_is_dual_slot(type)) ? 2 : 1;
65       }
66    case GLSL_TYPE_ARRAY:
67       assert(type->length > 0);
68       return type_size_xvec4(type->fields.array, as_vec4, bindless) *
69              type->length;
70    case GLSL_TYPE_STRUCT:
71    case GLSL_TYPE_INTERFACE:
72       size = 0;
73       for (i = 0; i < type->length; i++) {
74 	 size += type_size_xvec4(type->fields.structure[i].type, as_vec4,
75                                  bindless);
76       }
77       return size;
78    case GLSL_TYPE_SUBROUTINE:
79       return 1;
80 
81    case GLSL_TYPE_SAMPLER:
82    case GLSL_TYPE_TEXTURE:
83       /* Samplers and textures take up no register space, since they're baked
84        * in at link time.
85        */
86       return bindless ? 1 : 0;
87    case GLSL_TYPE_ATOMIC_UINT:
88       return 0;
89    case GLSL_TYPE_IMAGE:
90       return bindless ? 1 : DIV_ROUND_UP(ISL_IMAGE_PARAM_SIZE, 4);
91    case GLSL_TYPE_VOID:
92    case GLSL_TYPE_ERROR:
93    case GLSL_TYPE_COOPERATIVE_MATRIX:
94       unreachable("not reached");
95    }
96 
97    return 0;
98 }
99 
100 /**
101  * Returns the minimum number of vec4 elements needed to pack a type.
102  *
103  * For simple types, it will return 1 (a single vec4); for matrices, the
104  * number of columns; for array and struct, the sum of the vec4_size of
105  * each of its elements; and for sampler and atomic, zero.
106  *
107  * This method is useful to calculate how much register space is needed to
108  * store a particular type.
109  */
110 int
type_size_vec4(const struct glsl_type * type,bool bindless)111 type_size_vec4(const struct glsl_type *type, bool bindless)
112 {
113    return type_size_xvec4(type, true, bindless);
114 }
115 
116 /**
117  * Returns the minimum number of dvec4 elements needed to pack a type.
118  *
119  * For simple types, it will return 1 (a single dvec4); for matrices, the
120  * number of columns; for array and struct, the sum of the dvec4_size of
121  * each of its elements; and for sampler and atomic, zero.
122  *
123  * This method is useful to calculate how much register space is needed to
124  * store a particular type.
125  *
126  * Measuring double-precision vertex inputs as dvec4 is required because
127  * ARB_vertex_attrib_64bit states that these uses the same number of locations
128  * than the single-precision version. That is, two consecutives dvec4 would be
129  * located in location "x" and location "x+1", not "x+2".
130  *
131  * In order to map vec4/dvec4 vertex inputs in the proper ATTRs,
132  * remap_vs_attrs() will take in account both the location and also if the
133  * type fits in one or two vec4 slots.
134  */
135 int
type_size_dvec4(const struct glsl_type * type,bool bindless)136 type_size_dvec4(const struct glsl_type *type, bool bindless)
137 {
138    return type_size_xvec4(type, false, bindless);
139 }
140 
141 static bool
remap_tess_levels(nir_builder * b,nir_intrinsic_instr * intr,enum tess_primitive_mode _primitive_mode)142 remap_tess_levels(nir_builder *b, nir_intrinsic_instr *intr,
143                   enum tess_primitive_mode _primitive_mode)
144 {
145    const int location = nir_intrinsic_base(intr);
146    const unsigned component = nir_intrinsic_component(intr);
147    bool out_of_bounds = false;
148    bool write = !nir_intrinsic_infos[intr->intrinsic].has_dest;
149    unsigned mask = write ? nir_intrinsic_write_mask(intr) : 0;
150    nir_def *src = NULL, *dest = NULL;
151 
152    if (write) {
153       assert(intr->num_components == intr->src[0].ssa->num_components);
154    } else {
155       assert(intr->num_components == intr->def.num_components);
156    }
157 
158    if (location == VARYING_SLOT_TESS_LEVEL_INNER) {
159       b->cursor = write ? nir_before_instr(&intr->instr)
160                         : nir_after_instr(&intr->instr);
161 
162       switch (_primitive_mode) {
163       case TESS_PRIMITIVE_QUADS:
164          /* gl_TessLevelInner[0..1] lives at DWords 3-2 (reversed). */
165          nir_intrinsic_set_base(intr, 0);
166 
167          if (write) {
168             assert(intr->src[0].ssa->num_components == 2);
169 
170             intr->num_components = 4;
171 
172             nir_def *undef = nir_undef(b, 1, 32);
173             nir_def *x = nir_channel(b, intr->src[0].ssa, 0);
174             nir_def *y = nir_channel(b, intr->src[0].ssa, 1);
175             src = nir_vec4(b, undef, undef, y, x);
176             mask = !!(mask & WRITEMASK_X) << 3 | !!(mask & WRITEMASK_Y) << 2;
177          } else if (intr->def.num_components > 1) {
178             assert(intr->def.num_components == 2);
179 
180             intr->num_components = 4;
181             intr->def.num_components = 4;
182 
183             unsigned wz[2] = { 3, 2 };
184             dest = nir_swizzle(b, &intr->def, wz, 2);
185          } else {
186             nir_intrinsic_set_component(intr, 3 - component);
187          }
188          break;
189       case TESS_PRIMITIVE_TRIANGLES:
190          /* gl_TessLevelInner[0] lives at DWord 4. */
191          nir_intrinsic_set_base(intr, 1);
192          mask &= WRITEMASK_X;
193          out_of_bounds = component > 0;
194          break;
195       case TESS_PRIMITIVE_ISOLINES:
196          out_of_bounds = true;
197          break;
198       default:
199          unreachable("Bogus tessellation domain");
200       }
201    } else if (location == VARYING_SLOT_TESS_LEVEL_OUTER) {
202       b->cursor = write ? nir_before_instr(&intr->instr)
203                         : nir_after_instr(&intr->instr);
204 
205       nir_intrinsic_set_base(intr, 1);
206 
207       switch (_primitive_mode) {
208       case TESS_PRIMITIVE_QUADS:
209       case TESS_PRIMITIVE_TRIANGLES:
210          /* Quads:     gl_TessLevelOuter[0..3] lives at DWords 7-4 (reversed).
211           * Triangles: gl_TessLevelOuter[0..2] lives at DWords 7-5 (reversed).
212           */
213          if (write) {
214             assert(intr->src[0].ssa->num_components == 4);
215 
216             unsigned wzyx[4] = { 3, 2, 1, 0 };
217             src = nir_swizzle(b, intr->src[0].ssa, wzyx, 4);
218             mask = !!(mask & WRITEMASK_X) << 3 | !!(mask & WRITEMASK_Y) << 2 |
219                    !!(mask & WRITEMASK_Z) << 1 | !!(mask & WRITEMASK_W) << 0;
220 
221             /* Don't overwrite the inner factor at DWord 4 for triangles */
222             if (_primitive_mode == TESS_PRIMITIVE_TRIANGLES)
223                mask &= ~WRITEMASK_X;
224          } else if (intr->def.num_components > 1) {
225             assert(intr->def.num_components == 4);
226 
227             unsigned wzyx[4] = { 3, 2, 1, 0 };
228             dest = nir_swizzle(b, &intr->def, wzyx, 4);
229          } else {
230             nir_intrinsic_set_component(intr, 3 - component);
231             out_of_bounds = component == 3 &&
232                             _primitive_mode == TESS_PRIMITIVE_TRIANGLES;
233          }
234          break;
235       case TESS_PRIMITIVE_ISOLINES:
236          /* gl_TessLevelOuter[0..1] lives at DWords 6-7 (in order). */
237          if (write) {
238             assert(intr->src[0].ssa->num_components == 4);
239 
240             nir_def *undef = nir_undef(b, 1, 32);
241             nir_def *x = nir_channel(b, intr->src[0].ssa, 0);
242             nir_def *y = nir_channel(b, intr->src[0].ssa, 1);
243             src = nir_vec4(b, undef, undef, x, y);
244             mask = !!(mask & WRITEMASK_X) << 2 | !!(mask & WRITEMASK_Y) << 3;
245          } else {
246             nir_intrinsic_set_component(intr, 2 + component);
247             out_of_bounds = component > 1;
248          }
249          break;
250       default:
251          unreachable("Bogus tessellation domain");
252       }
253    } else {
254       return false;
255    }
256 
257    if (out_of_bounds) {
258       if (!write)
259          nir_def_rewrite_uses(&intr->def, nir_undef(b, 1, 32));
260       nir_instr_remove(&intr->instr);
261    } else if (write) {
262       nir_intrinsic_set_write_mask(intr, mask);
263 
264       if (src) {
265          nir_src_rewrite(&intr->src[0], src);
266       }
267    } else if (dest) {
268       nir_def_rewrite_uses_after(&intr->def, dest,
269                                      dest->parent_instr);
270    }
271 
272    return true;
273 }
274 
275 static bool
is_input(nir_intrinsic_instr * intrin)276 is_input(nir_intrinsic_instr *intrin)
277 {
278    return intrin->intrinsic == nir_intrinsic_load_input ||
279           intrin->intrinsic == nir_intrinsic_load_per_vertex_input ||
280           intrin->intrinsic == nir_intrinsic_load_interpolated_input;
281 }
282 
283 static bool
is_output(nir_intrinsic_instr * intrin)284 is_output(nir_intrinsic_instr *intrin)
285 {
286    return intrin->intrinsic == nir_intrinsic_load_output ||
287           intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
288           intrin->intrinsic == nir_intrinsic_store_output ||
289           intrin->intrinsic == nir_intrinsic_store_per_vertex_output;
290 }
291 
292 
293 static bool
remap_patch_urb_offsets(nir_block * block,nir_builder * b,const struct intel_vue_map * vue_map,enum tess_primitive_mode tes_primitive_mode)294 remap_patch_urb_offsets(nir_block *block, nir_builder *b,
295                         const struct intel_vue_map *vue_map,
296                         enum tess_primitive_mode tes_primitive_mode)
297 {
298    nir_foreach_instr_safe(instr, block) {
299       if (instr->type != nir_instr_type_intrinsic)
300          continue;
301 
302       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
303 
304       gl_shader_stage stage = b->shader->info.stage;
305 
306       if ((stage == MESA_SHADER_TESS_CTRL && is_output(intrin)) ||
307           (stage == MESA_SHADER_TESS_EVAL && is_input(intrin))) {
308 
309          if (remap_tess_levels(b, intrin, tes_primitive_mode))
310             continue;
311 
312          int vue_slot = vue_map->varying_to_slot[intrin->const_index[0]];
313          assert(vue_slot != -1);
314          intrin->const_index[0] = vue_slot;
315 
316          nir_src *vertex = nir_get_io_arrayed_index_src(intrin);
317          if (vertex) {
318             if (nir_src_is_const(*vertex)) {
319                intrin->const_index[0] += nir_src_as_uint(*vertex) *
320                                          vue_map->num_per_vertex_slots;
321             } else {
322                b->cursor = nir_before_instr(&intrin->instr);
323 
324                /* Multiply by the number of per-vertex slots. */
325                nir_def *vertex_offset =
326                   nir_imul(b,
327                            vertex->ssa,
328                            nir_imm_int(b,
329                                        vue_map->num_per_vertex_slots));
330 
331                /* Add it to the existing offset */
332                nir_src *offset = nir_get_io_offset_src(intrin);
333                nir_def *total_offset =
334                   nir_iadd(b, vertex_offset,
335                            offset->ssa);
336 
337                nir_src_rewrite(offset, total_offset);
338             }
339          }
340       }
341    }
342    return true;
343 }
344 
345 void
brw_nir_lower_vs_inputs(nir_shader * nir)346 brw_nir_lower_vs_inputs(nir_shader *nir)
347 {
348    /* Start with the location of the variable's base. */
349    nir_foreach_shader_in_variable(var, nir)
350       var->data.driver_location = var->data.location;
351 
352    /* Now use nir_lower_io to walk dereference chains.  Attribute arrays are
353     * loaded as one vec4 or dvec4 per element (or matrix column), depending on
354     * whether it is a double-precision type or not.
355     */
356    nir_lower_io(nir, nir_var_shader_in, type_size_vec4,
357                 nir_lower_io_lower_64bit_to_32);
358 
359    /* This pass needs actual constants */
360    nir_opt_constant_folding(nir);
361 
362    nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
363 
364    /* The last step is to remap VERT_ATTRIB_* to actual registers */
365 
366    /* Whether or not we have any system generated values.  gl_DrawID is not
367     * included here as it lives in its own vec4.
368     */
369    const bool has_sgvs =
370       BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_FIRST_VERTEX) ||
371       BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_BASE_INSTANCE) ||
372       BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) ||
373       BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_INSTANCE_ID);
374 
375    const unsigned num_inputs = util_bitcount64(nir->info.inputs_read);
376 
377    nir_foreach_function_impl(impl, nir) {
378       nir_builder b = nir_builder_create(impl);
379 
380       nir_foreach_block(block, impl) {
381          nir_foreach_instr_safe(instr, block) {
382             if (instr->type != nir_instr_type_intrinsic)
383                continue;
384 
385             nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
386 
387             switch (intrin->intrinsic) {
388             case nir_intrinsic_load_first_vertex:
389             case nir_intrinsic_load_base_instance:
390             case nir_intrinsic_load_vertex_id_zero_base:
391             case nir_intrinsic_load_instance_id:
392             case nir_intrinsic_load_is_indexed_draw:
393             case nir_intrinsic_load_draw_id: {
394                b.cursor = nir_after_instr(&intrin->instr);
395 
396                /* gl_VertexID and friends are stored by the VF as the last
397                 * vertex element.  We convert them to load_input intrinsics at
398                 * the right location.
399                 */
400                nir_intrinsic_instr *load =
401                   nir_intrinsic_instr_create(nir, nir_intrinsic_load_input);
402                load->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
403 
404                nir_intrinsic_set_base(load, num_inputs);
405                switch (intrin->intrinsic) {
406                case nir_intrinsic_load_first_vertex:
407                   nir_intrinsic_set_component(load, 0);
408                   break;
409                case nir_intrinsic_load_base_instance:
410                   nir_intrinsic_set_component(load, 1);
411                   break;
412                case nir_intrinsic_load_vertex_id_zero_base:
413                   nir_intrinsic_set_component(load, 2);
414                   break;
415                case nir_intrinsic_load_instance_id:
416                   nir_intrinsic_set_component(load, 3);
417                   break;
418                case nir_intrinsic_load_draw_id:
419                case nir_intrinsic_load_is_indexed_draw:
420                   /* gl_DrawID and IsIndexedDraw are stored right after
421                    * gl_VertexID and friends if any of them exist.
422                    */
423                   nir_intrinsic_set_base(load, num_inputs + has_sgvs);
424                   if (intrin->intrinsic == nir_intrinsic_load_draw_id)
425                      nir_intrinsic_set_component(load, 0);
426                   else
427                      nir_intrinsic_set_component(load, 1);
428                   break;
429                default:
430                   unreachable("Invalid system value intrinsic");
431                }
432 
433                load->num_components = 1;
434                nir_def_init(&load->instr, &load->def, 1, 32);
435                nir_builder_instr_insert(&b, &load->instr);
436 
437                nir_def_rewrite_uses(&intrin->def,
438                                         &load->def);
439                nir_instr_remove(&intrin->instr);
440                break;
441             }
442 
443             case nir_intrinsic_load_input: {
444                /* Attributes come in a contiguous block, ordered by their
445                 * gl_vert_attrib value.  That means we can compute the slot
446                 * number for an attribute by masking out the enabled attributes
447                 * before it and counting the bits.
448                 */
449                int attr = nir_intrinsic_base(intrin);
450                int slot = util_bitcount64(nir->info.inputs_read &
451                                           BITFIELD64_MASK(attr));
452                nir_intrinsic_set_base(intrin, slot);
453                break;
454             }
455 
456             default:
457                break; /* Nothing to do */
458             }
459          }
460       }
461    }
462 }
463 
464 void
brw_nir_lower_vue_inputs(nir_shader * nir,const struct intel_vue_map * vue_map)465 brw_nir_lower_vue_inputs(nir_shader *nir,
466                          const struct intel_vue_map *vue_map)
467 {
468    nir_foreach_shader_in_variable(var, nir)
469       var->data.driver_location = var->data.location;
470 
471    /* Inputs are stored in vec4 slots, so use type_size_vec4(). */
472    nir_lower_io(nir, nir_var_shader_in, type_size_vec4,
473                 nir_lower_io_lower_64bit_to_32);
474 
475    /* This pass needs actual constants */
476    nir_opt_constant_folding(nir);
477 
478    nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
479 
480    nir_foreach_function_impl(impl, nir) {
481       nir_foreach_block(block, impl) {
482          nir_foreach_instr(instr, block) {
483             if (instr->type != nir_instr_type_intrinsic)
484                continue;
485 
486             nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
487 
488             if (intrin->intrinsic == nir_intrinsic_load_input ||
489                 intrin->intrinsic == nir_intrinsic_load_per_vertex_input) {
490                /* Offset 0 is the VUE header, which contains
491                 * VARYING_SLOT_LAYER [.y], VARYING_SLOT_VIEWPORT [.z], and
492                 * VARYING_SLOT_PSIZ [.w].
493                 */
494                int varying = nir_intrinsic_base(intrin);
495                int vue_slot;
496                switch (varying) {
497                case VARYING_SLOT_PSIZ:
498                   nir_intrinsic_set_base(intrin, 0);
499                   nir_intrinsic_set_component(intrin, 3);
500                   break;
501 
502                default:
503                   vue_slot = vue_map->varying_to_slot[varying];
504                   assert(vue_slot != -1);
505                   nir_intrinsic_set_base(intrin, vue_slot);
506                   break;
507                }
508             }
509          }
510       }
511    }
512 }
513 
514 void
brw_nir_lower_tes_inputs(nir_shader * nir,const struct intel_vue_map * vue_map)515 brw_nir_lower_tes_inputs(nir_shader *nir, const struct intel_vue_map *vue_map)
516 {
517    nir_foreach_shader_in_variable(var, nir)
518       var->data.driver_location = var->data.location;
519 
520    nir_lower_io(nir, nir_var_shader_in, type_size_vec4,
521                 nir_lower_io_lower_64bit_to_32);
522 
523    /* This pass needs actual constants */
524    nir_opt_constant_folding(nir);
525 
526    nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
527 
528    nir_foreach_function_impl(impl, nir) {
529       nir_builder b = nir_builder_create(impl);
530       nir_foreach_block(block, impl) {
531          remap_patch_urb_offsets(block, &b, vue_map,
532                                  nir->info.tess._primitive_mode);
533       }
534    }
535 }
536 
537 static bool
lower_barycentric_per_sample(nir_builder * b,nir_intrinsic_instr * intrin,UNUSED void * cb_data)538 lower_barycentric_per_sample(nir_builder *b,
539                              nir_intrinsic_instr *intrin,
540                              UNUSED void *cb_data)
541 {
542    if (intrin->intrinsic != nir_intrinsic_load_barycentric_pixel &&
543        intrin->intrinsic != nir_intrinsic_load_barycentric_centroid)
544       return false;
545 
546    b->cursor = nir_before_instr(&intrin->instr);
547    nir_def *centroid =
548       nir_load_barycentric(b, nir_intrinsic_load_barycentric_sample,
549                            nir_intrinsic_interp_mode(intrin));
550    nir_def_rewrite_uses(&intrin->def, centroid);
551    nir_instr_remove(&intrin->instr);
552    return true;
553 }
554 
555 /**
556  * Convert interpolateAtOffset() offsets from [-0.5, +0.5] floating point
557  * offsets to integer [-8, +7] offsets (in units of 1/16th of a pixel).
558  *
559  * We clamp to +7/16 on the upper end of the range, since +0.5 isn't
560  * representable in a S0.4 value; a naive conversion would give us -8/16,
561  * which is the opposite of what was intended.
562  *
563  * This is allowed by GL_ARB_gpu_shader5's quantization rules:
564  *
565  *    "Not all values of <offset> may be supported; x and y offsets may
566  *     be rounded to fixed-point values with the number of fraction bits
567  *     given by the implementation-dependent constant
568  *     FRAGMENT_INTERPOLATION_OFFSET_BITS."
569  */
570 static bool
lower_barycentric_at_offset(nir_builder * b,nir_intrinsic_instr * intrin,void * data)571 lower_barycentric_at_offset(nir_builder *b, nir_intrinsic_instr *intrin,
572                             void *data)
573 {
574    if (intrin->intrinsic != nir_intrinsic_load_barycentric_at_offset)
575       return false;
576 
577    b->cursor = nir_before_instr(&intrin->instr);
578 
579    assert(intrin->src[0].ssa);
580    nir_def *offset =
581       nir_imin(b, nir_imm_int(b, 7),
582                nir_f2i32(b, nir_fmul_imm(b, intrin->src[0].ssa, 16)));
583 
584    nir_src_rewrite(&intrin->src[0], offset);
585 
586    return true;
587 }
588 
589 void
brw_nir_lower_fs_inputs(nir_shader * nir,const struct intel_device_info * devinfo,const struct brw_wm_prog_key * key)590 brw_nir_lower_fs_inputs(nir_shader *nir,
591                         const struct intel_device_info *devinfo,
592                         const struct brw_wm_prog_key *key)
593 {
594    nir_foreach_shader_in_variable(var, nir) {
595       var->data.driver_location = var->data.location;
596 
597       /* Apply default interpolation mode.
598        *
599        * Everything defaults to smooth except for the legacy GL color
600        * built-in variables, which might be flat depending on API state.
601        */
602       if (var->data.interpolation == INTERP_MODE_NONE) {
603          const bool flat = key->flat_shade &&
604             (var->data.location == VARYING_SLOT_COL0 ||
605              var->data.location == VARYING_SLOT_COL1);
606 
607          var->data.interpolation = flat ? INTERP_MODE_FLAT
608                                         : INTERP_MODE_SMOOTH;
609       }
610    }
611 
612    nir_lower_io(nir, nir_var_shader_in, type_size_vec4,
613                 nir_lower_io_lower_64bit_to_32);
614    if (devinfo->ver >= 11)
615       nir_lower_interpolation(nir, ~0);
616 
617    if (key->multisample_fbo == BRW_NEVER) {
618       nir_lower_single_sampled(nir);
619    } else if (key->persample_interp == BRW_ALWAYS) {
620       nir_shader_intrinsics_pass(nir, lower_barycentric_per_sample,
621                                    nir_metadata_block_index |
622                                    nir_metadata_dominance,
623                                    NULL);
624    }
625 
626    nir_shader_intrinsics_pass(nir, lower_barycentric_at_offset,
627                                 nir_metadata_block_index |
628                                 nir_metadata_dominance,
629                                 NULL);
630 
631    /* This pass needs actual constants */
632    nir_opt_constant_folding(nir);
633 
634    nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
635 }
636 
637 void
brw_nir_lower_vue_outputs(nir_shader * nir)638 brw_nir_lower_vue_outputs(nir_shader *nir)
639 {
640    nir_foreach_shader_out_variable(var, nir) {
641       var->data.driver_location = var->data.location;
642    }
643 
644    nir_lower_io(nir, nir_var_shader_out, type_size_vec4,
645                 nir_lower_io_lower_64bit_to_32);
646 }
647 
648 void
brw_nir_lower_tcs_outputs(nir_shader * nir,const struct intel_vue_map * vue_map,enum tess_primitive_mode tes_primitive_mode)649 brw_nir_lower_tcs_outputs(nir_shader *nir, const struct intel_vue_map *vue_map,
650                           enum tess_primitive_mode tes_primitive_mode)
651 {
652    nir_foreach_shader_out_variable(var, nir) {
653       var->data.driver_location = var->data.location;
654    }
655 
656    nir_lower_io(nir, nir_var_shader_out, type_size_vec4,
657                 nir_lower_io_lower_64bit_to_32);
658 
659    /* This pass needs actual constants */
660    nir_opt_constant_folding(nir);
661 
662    nir_io_add_const_offset_to_base(nir, nir_var_shader_out);
663 
664    nir_foreach_function_impl(impl, nir) {
665       nir_builder b = nir_builder_create(impl);
666       nir_foreach_block(block, impl) {
667          remap_patch_urb_offsets(block, &b, vue_map, tes_primitive_mode);
668       }
669    }
670 }
671 
672 void
brw_nir_lower_fs_outputs(nir_shader * nir)673 brw_nir_lower_fs_outputs(nir_shader *nir)
674 {
675    nir_foreach_shader_out_variable(var, nir) {
676       var->data.driver_location =
677          SET_FIELD(var->data.index, BRW_NIR_FRAG_OUTPUT_INDEX) |
678          SET_FIELD(var->data.location, BRW_NIR_FRAG_OUTPUT_LOCATION);
679    }
680 
681    nir_lower_io(nir, nir_var_shader_out, type_size_dvec4, 0);
682 }
683 
684 #define OPT(pass, ...) ({                                  \
685    bool this_progress = false;                             \
686    NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__);      \
687    if (this_progress)                                      \
688       progress = true;                                     \
689    this_progress;                                          \
690 })
691 
692 void
brw_nir_optimize(nir_shader * nir,const struct intel_device_info * devinfo)693 brw_nir_optimize(nir_shader *nir,
694                  const struct intel_device_info *devinfo)
695 {
696    bool progress;
697    unsigned lower_flrp =
698       (nir->options->lower_flrp16 ? 16 : 0) |
699       (nir->options->lower_flrp32 ? 32 : 0) |
700       (nir->options->lower_flrp64 ? 64 : 0);
701 
702    do {
703       progress = false;
704       /* This pass is causing problems with types used by OpenCL :
705        *    https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/13955
706        *
707        * Running with it disabled made no difference in the resulting assembly
708        * code.
709        */
710       if (nir->info.stage != MESA_SHADER_KERNEL)
711          OPT(nir_split_array_vars, nir_var_function_temp);
712       OPT(nir_shrink_vec_array_vars, nir_var_function_temp);
713       OPT(nir_opt_deref);
714       if (OPT(nir_opt_memcpy))
715          OPT(nir_split_var_copies);
716       OPT(nir_lower_vars_to_ssa);
717       if (!nir->info.var_copies_lowered) {
718          /* Only run this pass if nir_lower_var_copies was not called
719           * yet. That would lower away any copy_deref instructions and we
720           * don't want to introduce any more.
721           */
722          OPT(nir_opt_find_array_copies);
723       }
724       OPT(nir_opt_copy_prop_vars);
725       OPT(nir_opt_dead_write_vars);
726       OPT(nir_opt_combine_stores, nir_var_all);
727 
728       OPT(nir_opt_ray_queries);
729       OPT(nir_opt_ray_query_ranges);
730 
731       OPT(nir_lower_alu_to_scalar, NULL, NULL);
732 
733       OPT(nir_copy_prop);
734 
735       OPT(nir_lower_phis_to_scalar, false);
736 
737       OPT(nir_copy_prop);
738       OPT(nir_opt_dce);
739       OPT(nir_opt_cse);
740       OPT(nir_opt_combine_stores, nir_var_all);
741 
742       /* Passing 0 to the peephole select pass causes it to convert
743        * if-statements that contain only move instructions in the branches
744        * regardless of the count.
745        *
746        * Passing 1 to the peephole select pass causes it to convert
747        * if-statements that contain at most a single ALU instruction (total)
748        * in both branches.  Before Gfx6, some math instructions were
749        * prohibitively expensive and the results of compare operations need an
750        * extra resolve step.  For these reasons, this pass is more harmful
751        * than good on those platforms.
752        *
753        * For indirect loads of uniforms (push constants), we assume that array
754        * indices will nearly always be in bounds and the cost of the load is
755        * low.  Therefore there shouldn't be a performance benefit to avoid it.
756        */
757       OPT(nir_opt_peephole_select, 0, true, false);
758       OPT(nir_opt_peephole_select, 8, true, true);
759 
760       OPT(nir_opt_intrinsics);
761       OPT(nir_opt_idiv_const, 32);
762       OPT(nir_opt_algebraic);
763 
764       OPT(nir_opt_reassociate_bfi);
765 
766       OPT(nir_lower_constant_convert_alu_types);
767       OPT(nir_opt_constant_folding);
768 
769       if (lower_flrp != 0) {
770          if (OPT(nir_lower_flrp,
771                  lower_flrp,
772                  false /* always_precise */)) {
773             OPT(nir_opt_constant_folding);
774          }
775 
776          /* Nothing should rematerialize any flrps, so we only need to do this
777           * lowering once.
778           */
779          lower_flrp = 0;
780       }
781 
782       OPT(nir_opt_dead_cf);
783       if (OPT(nir_opt_loop)) {
784          /* If nir_opt_loop makes progress, then we need to clean
785           * things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
786           * to make progress.
787           */
788          OPT(nir_copy_prop);
789          OPT(nir_opt_dce);
790       }
791       OPT(nir_opt_if, nir_opt_if_optimize_phi_true_false);
792       OPT(nir_opt_conditional_discard);
793       if (nir->options->max_unroll_iterations != 0) {
794          OPT(nir_opt_loop_unroll);
795       }
796       OPT(nir_opt_remove_phis);
797       OPT(nir_opt_gcm, false);
798       OPT(nir_opt_undef);
799       OPT(nir_lower_pack);
800    } while (progress);
801 
802    /* Workaround Gfxbench unused local sampler variable which will trigger an
803     * assert in the opt_large_constants pass.
804     */
805    OPT(nir_remove_dead_variables, nir_var_function_temp, NULL);
806 }
807 
808 static unsigned
lower_bit_size_callback(const nir_instr * instr,UNUSED void * data)809 lower_bit_size_callback(const nir_instr *instr, UNUSED void *data)
810 {
811    switch (instr->type) {
812    case nir_instr_type_alu: {
813       nir_alu_instr *alu = nir_instr_as_alu(instr);
814       switch (alu->op) {
815       case nir_op_bit_count:
816       case nir_op_ufind_msb:
817       case nir_op_ifind_msb:
818       case nir_op_find_lsb:
819          /* These are handled specially because the destination is always
820           * 32-bit and so the bit size of the instruction is given by the
821           * source.
822           */
823          return alu->src[0].src.ssa->bit_size >= 32 ? 0 : 32;
824       default:
825          break;
826       }
827 
828       if (alu->def.bit_size >= 32)
829          return 0;
830 
831       /* Note: nir_op_iabs and nir_op_ineg are not lowered here because the
832        * 8-bit ABS or NEG instruction should eventually get copy propagated
833        * into the MOV that does the type conversion.  This results in far
834        * fewer MOV instructions.
835        */
836       switch (alu->op) {
837       case nir_op_idiv:
838       case nir_op_imod:
839       case nir_op_irem:
840       case nir_op_udiv:
841       case nir_op_umod:
842       case nir_op_fceil:
843       case nir_op_ffloor:
844       case nir_op_ffract:
845       case nir_op_fround_even:
846       case nir_op_ftrunc:
847          return 32;
848       case nir_op_frcp:
849       case nir_op_frsq:
850       case nir_op_fsqrt:
851       case nir_op_fpow:
852       case nir_op_fexp2:
853       case nir_op_flog2:
854       case nir_op_fsin:
855       case nir_op_fcos:
856          return 0;
857       case nir_op_isign:
858          assert(!"Should have been lowered by nir_opt_algebraic.");
859          return 0;
860       default:
861          if (nir_op_infos[alu->op].num_inputs >= 2 &&
862              alu->def.bit_size == 8)
863             return 16;
864 
865          if (nir_alu_instr_is_comparison(alu) &&
866              alu->src[0].src.ssa->bit_size == 8)
867             return 16;
868 
869          return 0;
870       }
871       break;
872    }
873 
874    case nir_instr_type_intrinsic: {
875       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
876       switch (intrin->intrinsic) {
877       case nir_intrinsic_read_invocation:
878       case nir_intrinsic_read_first_invocation:
879       case nir_intrinsic_vote_feq:
880       case nir_intrinsic_vote_ieq:
881       case nir_intrinsic_shuffle:
882       case nir_intrinsic_shuffle_xor:
883       case nir_intrinsic_shuffle_up:
884       case nir_intrinsic_shuffle_down:
885       case nir_intrinsic_quad_broadcast:
886       case nir_intrinsic_quad_swap_horizontal:
887       case nir_intrinsic_quad_swap_vertical:
888       case nir_intrinsic_quad_swap_diagonal:
889          if (intrin->src[0].ssa->bit_size == 8)
890             return 16;
891          return 0;
892 
893       case nir_intrinsic_reduce:
894       case nir_intrinsic_inclusive_scan:
895       case nir_intrinsic_exclusive_scan:
896          /* There are a couple of register region issues that make things
897           * complicated for 8-bit types:
898           *
899           *    1. Only raw moves are allowed to write to a packed 8-bit
900           *       destination.
901           *    2. If we use a strided destination, the efficient way to do
902           *       scan operations ends up using strides that are too big to
903           *       encode in an instruction.
904           *
905           * To get around these issues, we just do all 8-bit scan operations
906           * in 16 bits.  It's actually fewer instructions than what we'd have
907           * to do if we were trying to do it in native 8-bit types and the
908           * results are the same once we truncate to 8 bits at the end.
909           */
910          if (intrin->def.bit_size == 8)
911             return 16;
912          return 0;
913 
914       default:
915          return 0;
916       }
917       break;
918    }
919 
920    case nir_instr_type_phi: {
921       nir_phi_instr *phi = nir_instr_as_phi(instr);
922       if (phi->def.bit_size == 8)
923          return 16;
924       return 0;
925    }
926 
927    default:
928       return 0;
929    }
930 }
931 
932 /* On gfx12.5+, if the offsets are not both constant and in the {-8,7} range,
933  * we will have nir_lower_tex() lower the source offset by returning true from
934  * this filter function.
935  */
936 static bool
lower_xehp_tg4_offset_filter(const nir_instr * instr,UNUSED const void * data)937 lower_xehp_tg4_offset_filter(const nir_instr *instr, UNUSED const void *data)
938 {
939    if (instr->type != nir_instr_type_tex)
940       return false;
941 
942    nir_tex_instr *tex = nir_instr_as_tex(instr);
943 
944    if (tex->op != nir_texop_tg4)
945       return false;
946 
947    int offset_index = nir_tex_instr_src_index(tex, nir_tex_src_offset);
948    if (offset_index < 0)
949       return false;
950 
951    if (!nir_src_is_const(tex->src[offset_index].src))
952       return true;
953 
954    int64_t offset_x = nir_src_comp_as_int(tex->src[offset_index].src, 0);
955    int64_t offset_y = nir_src_comp_as_int(tex->src[offset_index].src, 1);
956 
957    return offset_x < -8 || offset_x > 7 || offset_y < -8 || offset_y > 7;
958 }
959 
960 /* Does some simple lowering and runs the standard suite of optimizations
961  *
962  * This is intended to be called more-or-less directly after you get the
963  * shader out of GLSL or some other source.  While it is geared towards i965,
964  * it is not at all generator-specific.
965  */
966 void
brw_preprocess_nir(const struct brw_compiler * compiler,nir_shader * nir,const struct brw_nir_compiler_opts * opts)967 brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir,
968                    const struct brw_nir_compiler_opts *opts)
969 {
970    const struct intel_device_info *devinfo = compiler->devinfo;
971    UNUSED bool progress; /* Written by OPT */
972 
973    nir_validate_ssa_dominance(nir, "before brw_preprocess_nir");
974 
975    OPT(nir_lower_frexp);
976 
977    OPT(nir_lower_alu_to_scalar, NULL, NULL);
978 
979    if (nir->info.stage == MESA_SHADER_GEOMETRY)
980       OPT(nir_lower_gs_intrinsics, 0);
981 
982    /* See also brw_nir_trig_workarounds.py */
983    if (compiler->precise_trig &&
984        !(devinfo->ver >= 10 || devinfo->platform == INTEL_PLATFORM_KBL))
985       OPT(brw_nir_apply_trig_workarounds);
986 
987    /* This workaround existing for performance reasons. Since it requires not
988     * setting RENDER_SURFACE_STATE::SurfaceArray when the array length is 1,
989     * we're loosing the HW robustness feature in that case.
990     *
991     * So when robust image access is enabled, just avoid the workaround.
992     */
993    if (intel_needs_workaround(devinfo, 1806565034) && !opts->robust_image_access)
994       OPT(intel_nir_clamp_image_1d_2d_array_sizes);
995 
996    const struct intel_nir_lower_texture_opts intel_tex_options = {
997       .combined_lod_or_bias_and_offset = compiler->devinfo->ver >= 20,
998    };
999    OPT(intel_nir_lower_texture, &intel_tex_options);
1000 
1001    const nir_lower_tex_options tex_options = {
1002       .lower_txp = ~0,
1003       .lower_txf_offset = true,
1004       .lower_rect_offset = true,
1005       .lower_txd_cube_map = true,
1006       /* For below, See bspec 45942, "Enable new message layout for cube array" */
1007       .lower_txd_3d = devinfo->verx10 >= 125,
1008       .lower_txd_array = devinfo->verx10 >= 125,
1009       .lower_txb_shadow_clamp = true,
1010       .lower_txd_shadow_clamp = true,
1011       .lower_txd_offset_clamp = true,
1012       .lower_tg4_offsets = true,
1013       .lower_txs_lod = true, /* Wa_14012320009 */
1014       .lower_offset_filter =
1015          devinfo->verx10 >= 125 ? lower_xehp_tg4_offset_filter : NULL,
1016       .lower_invalid_implicit_lod = true,
1017    };
1018 
1019    /* In the case where TG4 coords are lowered to offsets and we have a
1020     * lower_xehp_tg4_offset_filter lowering those offsets further, we need to
1021     * rerun the pass because the instructions inserted by the first lowering
1022     * are not visible during that first pass.
1023     */
1024    if (OPT(nir_lower_tex, &tex_options)) {
1025       OPT(intel_nir_lower_texture, &intel_tex_options);
1026       OPT(nir_lower_tex, &tex_options);
1027    }
1028 
1029    OPT(nir_normalize_cubemap_coords);
1030 
1031    OPT(nir_lower_global_vars_to_local);
1032 
1033    OPT(nir_split_var_copies);
1034    OPT(nir_split_struct_vars, nir_var_function_temp);
1035 
1036    brw_nir_optimize(nir, devinfo);
1037 
1038    OPT(nir_lower_doubles, opts->softfp64, nir->options->lower_doubles_options);
1039    if (OPT(nir_lower_int64_float_conversions)) {
1040       OPT(nir_opt_algebraic);
1041       OPT(nir_lower_doubles, opts->softfp64,
1042           nir->options->lower_doubles_options);
1043    }
1044 
1045    OPT(nir_lower_bit_size, lower_bit_size_callback, (void *)compiler);
1046 
1047    /* Lower a bunch of stuff */
1048    OPT(nir_lower_var_copies);
1049 
1050    /* This needs to be run after the first optimization pass but before we
1051     * lower indirect derefs away
1052     */
1053    if (compiler->supports_shader_constants) {
1054       OPT(nir_opt_large_constants, NULL, 32);
1055    }
1056 
1057    OPT(nir_lower_load_const_to_scalar);
1058 
1059    OPT(nir_lower_system_values);
1060    nir_lower_compute_system_values_options lower_csv_options = {
1061       .has_base_workgroup_id = nir->info.stage == MESA_SHADER_COMPUTE,
1062    };
1063    OPT(nir_lower_compute_system_values, &lower_csv_options);
1064 
1065    const nir_lower_subgroups_options subgroups_options = {
1066       .ballot_bit_size = 32,
1067       .ballot_components = 1,
1068       .lower_to_scalar = true,
1069       .lower_relative_shuffle = true,
1070       .lower_quad_broadcast_dynamic = true,
1071       .lower_elect = true,
1072       .lower_inverse_ballot = true,
1073       .lower_rotate_to_shuffle = true,
1074    };
1075    OPT(nir_lower_subgroups, &subgroups_options);
1076 
1077    nir_variable_mode indirect_mask =
1078       brw_nir_no_indirect_mask(compiler, nir->info.stage);
1079    OPT(nir_lower_indirect_derefs, indirect_mask, UINT32_MAX);
1080 
1081    /* Even in cases where we can handle indirect temporaries via scratch, we
1082     * it can still be expensive.  Lower indirects on small arrays to
1083     * conditional load/stores.
1084     *
1085     * The threshold of 16 was chosen semi-arbitrarily.  The idea is that an
1086     * indirect on an array of 16 elements is about 30 instructions at which
1087     * point, you may be better off doing a send.  With a SIMD8 program, 16
1088     * floats is 1/8 of the entire register file.  Any array larger than that
1089     * is likely to cause pressure issues.  Also, this value is sufficiently
1090     * high that the benchmarks known to suffer from large temporary array
1091     * issues are helped but nothing else in shader-db is hurt except for maybe
1092     * that one kerbal space program shader.
1093     */
1094    if (!(indirect_mask & nir_var_function_temp))
1095       OPT(nir_lower_indirect_derefs, nir_var_function_temp, 16);
1096 
1097    /* Lower array derefs of vectors for SSBO and UBO loads.  For both UBOs and
1098     * SSBOs, our back-end is capable of loading an entire vec4 at a time and
1099     * we would like to take advantage of that whenever possible regardless of
1100     * whether or not the app gives us full loads.  This should allow the
1101     * optimizer to combine UBO and SSBO load operations and save us some send
1102     * messages.
1103     */
1104    OPT(nir_lower_array_deref_of_vec,
1105        nir_var_mem_ubo | nir_var_mem_ssbo,
1106        nir_lower_direct_array_deref_of_vec_load);
1107 
1108    /* Clamp load_per_vertex_input of the TCS stage so that we do not generate
1109     * loads reading out of bounds. We can do this here because we called
1110     * nir_lower_system_values above.
1111     */
1112    if (nir->info.stage == MESA_SHADER_TESS_CTRL &&
1113        compiler->use_tcs_multi_patch)
1114       OPT(intel_nir_clamp_per_vertex_loads);
1115 
1116    /* Get rid of split copies */
1117    brw_nir_optimize(nir, devinfo);
1118 }
1119 
1120 static bool
brw_nir_zero_inputs_instr(struct nir_builder * b,nir_intrinsic_instr * intrin,void * data)1121 brw_nir_zero_inputs_instr(struct nir_builder *b, nir_intrinsic_instr *intrin,
1122                           void *data)
1123 {
1124    if (intrin->intrinsic != nir_intrinsic_load_deref)
1125       return false;
1126 
1127    nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1128    if (!nir_deref_mode_is(deref, nir_var_shader_in))
1129       return false;
1130 
1131    if (deref->deref_type != nir_deref_type_var)
1132       return false;
1133 
1134    nir_variable *var = deref->var;
1135 
1136    uint64_t zero_inputs = *(uint64_t *)data;
1137    if (!(BITFIELD64_BIT(var->data.location) & zero_inputs))
1138       return false;
1139 
1140    b->cursor = nir_before_instr(&intrin->instr);
1141 
1142    nir_def *zero = nir_imm_zero(b, 1, 32);
1143 
1144    nir_def_rewrite_uses(&intrin->def, zero);
1145 
1146    nir_instr_remove(&intrin->instr);
1147 
1148    return true;
1149 }
1150 
1151 static bool
brw_nir_zero_inputs(nir_shader * shader,uint64_t * zero_inputs)1152 brw_nir_zero_inputs(nir_shader *shader, uint64_t *zero_inputs)
1153 {
1154    return nir_shader_intrinsics_pass(shader, brw_nir_zero_inputs_instr,
1155                                      nir_metadata_block_index | nir_metadata_dominance,
1156                                      zero_inputs);
1157 }
1158 
1159 /* Code for Wa_18019110168 may have created input/output variables beyond
1160  * VARYING_SLOT_MAX and removed uses of variables below VARYING_SLOT_MAX.
1161  * Clean it up, so they all stay below VARYING_SLOT_MAX.
1162  */
1163 static void
brw_mesh_compact_io(nir_shader * mesh,nir_shader * frag)1164 brw_mesh_compact_io(nir_shader *mesh, nir_shader *frag)
1165 {
1166    gl_varying_slot mapping[VARYING_SLOT_MAX] = {0, };
1167    gl_varying_slot cur = VARYING_SLOT_VAR0;
1168    bool compact = false;
1169 
1170    nir_foreach_shader_out_variable(var, mesh) {
1171       gl_varying_slot location = var->data.location;
1172       if (location < VARYING_SLOT_VAR0)
1173          continue;
1174       assert(location < ARRAY_SIZE(mapping));
1175 
1176       const struct glsl_type *type = var->type;
1177       if (nir_is_arrayed_io(var, MESA_SHADER_MESH) || var->data.per_view) {
1178          assert(glsl_type_is_array(type));
1179          type = glsl_get_array_element(type);
1180       }
1181 
1182       if (mapping[location])
1183          continue;
1184 
1185       unsigned num_slots = glsl_count_attribute_slots(type, false);
1186 
1187       compact |= location + num_slots > VARYING_SLOT_MAX;
1188 
1189       mapping[location] = cur;
1190       cur += num_slots;
1191    }
1192 
1193    if (!compact)
1194       return;
1195 
1196    /* The rest of this function should be hit only for Wa_18019110168. */
1197 
1198    nir_foreach_shader_out_variable(var, mesh) {
1199       gl_varying_slot location = var->data.location;
1200       if (location < VARYING_SLOT_VAR0)
1201          continue;
1202       location = mapping[location];
1203       if (location == 0)
1204          continue;
1205       var->data.location = location;
1206    }
1207 
1208    nir_foreach_shader_in_variable(var, frag) {
1209       gl_varying_slot location = var->data.location;
1210       if (location < VARYING_SLOT_VAR0)
1211          continue;
1212       location = mapping[location];
1213       if (location == 0)
1214          continue;
1215       var->data.location = location;
1216    }
1217 
1218    nir_shader_gather_info(mesh, nir_shader_get_entrypoint(mesh));
1219    nir_shader_gather_info(frag, nir_shader_get_entrypoint(frag));
1220 
1221    if (should_print_nir(mesh)) {
1222       printf("%s\n", __func__);
1223       nir_print_shader(mesh, stdout);
1224    }
1225    if (should_print_nir(frag)) {
1226       printf("%s\n", __func__);
1227       nir_print_shader(frag, stdout);
1228    }
1229 }
1230 
1231 void
brw_nir_link_shaders(const struct brw_compiler * compiler,nir_shader * producer,nir_shader * consumer)1232 brw_nir_link_shaders(const struct brw_compiler *compiler,
1233                      nir_shader *producer, nir_shader *consumer)
1234 {
1235    const struct intel_device_info *devinfo = compiler->devinfo;
1236 
1237    if (producer->info.stage == MESA_SHADER_MESH &&
1238        consumer->info.stage == MESA_SHADER_FRAGMENT) {
1239       uint64_t fs_inputs = 0, ms_outputs = 0;
1240       /* gl_MeshPerPrimitiveEXT[].gl_ViewportIndex, gl_PrimitiveID and gl_Layer
1241        * are per primitive, but fragment shader does not have them marked as
1242        * such. Add the annotation here.
1243        */
1244       nir_foreach_shader_in_variable(var, consumer) {
1245          fs_inputs |= BITFIELD64_BIT(var->data.location);
1246 
1247          switch (var->data.location) {
1248             case VARYING_SLOT_LAYER:
1249             case VARYING_SLOT_PRIMITIVE_ID:
1250             case VARYING_SLOT_VIEWPORT:
1251                var->data.per_primitive = 1;
1252                break;
1253             default:
1254                continue;
1255          }
1256       }
1257 
1258       nir_foreach_shader_out_variable(var, producer)
1259          ms_outputs |= BITFIELD64_BIT(var->data.location);
1260 
1261       uint64_t zero_inputs = ~ms_outputs & fs_inputs;
1262       zero_inputs &= BITFIELD64_BIT(VARYING_SLOT_LAYER) |
1263                      BITFIELD64_BIT(VARYING_SLOT_VIEWPORT);
1264 
1265       if (zero_inputs)
1266          NIR_PASS(_, consumer, brw_nir_zero_inputs, &zero_inputs);
1267    }
1268 
1269    nir_lower_io_arrays_to_elements(producer, consumer);
1270    nir_validate_shader(producer, "after nir_lower_io_arrays_to_elements");
1271    nir_validate_shader(consumer, "after nir_lower_io_arrays_to_elements");
1272 
1273    NIR_PASS(_, producer, nir_lower_io_to_scalar_early, nir_var_shader_out);
1274    NIR_PASS(_, consumer, nir_lower_io_to_scalar_early, nir_var_shader_in);
1275    brw_nir_optimize(producer, devinfo);
1276    brw_nir_optimize(consumer, devinfo);
1277 
1278    if (nir_link_opt_varyings(producer, consumer))
1279       brw_nir_optimize(consumer, devinfo);
1280 
1281    NIR_PASS(_, producer, nir_remove_dead_variables, nir_var_shader_out, NULL);
1282    NIR_PASS(_, consumer, nir_remove_dead_variables, nir_var_shader_in, NULL);
1283 
1284    if (nir_remove_unused_varyings(producer, consumer)) {
1285       if (should_print_nir(producer)) {
1286          printf("nir_remove_unused_varyings\n");
1287          nir_print_shader(producer, stdout);
1288       }
1289       if (should_print_nir(consumer)) {
1290          printf("nir_remove_unused_varyings\n");
1291          nir_print_shader(consumer, stdout);
1292       }
1293 
1294       NIR_PASS(_, producer, nir_lower_global_vars_to_local);
1295       NIR_PASS(_, consumer, nir_lower_global_vars_to_local);
1296 
1297       /* The backend might not be able to handle indirects on
1298        * temporaries so we need to lower indirects on any of the
1299        * varyings we have demoted here.
1300        */
1301       NIR_PASS(_, producer, nir_lower_indirect_derefs,
1302                   brw_nir_no_indirect_mask(compiler, producer->info.stage),
1303                   UINT32_MAX);
1304       NIR_PASS(_, consumer, nir_lower_indirect_derefs,
1305                   brw_nir_no_indirect_mask(compiler, consumer->info.stage),
1306                   UINT32_MAX);
1307 
1308       brw_nir_optimize(producer, devinfo);
1309       brw_nir_optimize(consumer, devinfo);
1310 
1311       if (producer->info.stage == MESA_SHADER_MESH &&
1312             consumer->info.stage == MESA_SHADER_FRAGMENT) {
1313          brw_mesh_compact_io(producer, consumer);
1314       }
1315    }
1316 
1317    NIR_PASS(_, producer, nir_lower_io_to_vector, nir_var_shader_out);
1318 
1319    if (producer->info.stage == MESA_SHADER_TESS_CTRL &&
1320        producer->options->vectorize_tess_levels)
1321    NIR_PASS_V(producer, nir_vectorize_tess_levels);
1322 
1323    NIR_PASS(_, producer, nir_opt_combine_stores, nir_var_shader_out);
1324    NIR_PASS(_, consumer, nir_lower_io_to_vector, nir_var_shader_in);
1325 
1326    if (producer->info.stage != MESA_SHADER_TESS_CTRL &&
1327        producer->info.stage != MESA_SHADER_MESH &&
1328        producer->info.stage != MESA_SHADER_TASK) {
1329       /* Calling lower_io_to_vector creates output variable writes with
1330        * write-masks.  On non-TCS outputs, the back-end can't handle it and we
1331        * need to call nir_lower_io_to_temporaries to get rid of them.  This,
1332        * in turn, creates temporary variables and extra copy_deref intrinsics
1333        * that we need to clean up.
1334        *
1335        * Note Mesh/Task don't support I/O as temporaries (I/O is shared
1336        * between whole workgroup, possibly using multiple HW threads). For
1337        * those write-mask in output is handled by I/O lowering.
1338        */
1339       NIR_PASS_V(producer, nir_lower_io_to_temporaries,
1340                  nir_shader_get_entrypoint(producer), true, false);
1341       NIR_PASS(_, producer, nir_lower_global_vars_to_local);
1342       NIR_PASS(_, producer, nir_split_var_copies);
1343       NIR_PASS(_, producer, nir_lower_var_copies);
1344    }
1345 
1346    if (producer->info.stage == MESA_SHADER_TASK &&
1347          consumer->info.stage == MESA_SHADER_MESH) {
1348 
1349       for (unsigned i = 0; i < 3; ++i)
1350          assert(producer->info.mesh.ts_mesh_dispatch_dimensions[i] <= UINT16_MAX);
1351 
1352       nir_lower_compute_system_values_options options = {
1353             .lower_workgroup_id_to_index = true,
1354             .num_workgroups[0] = producer->info.mesh.ts_mesh_dispatch_dimensions[0],
1355             .num_workgroups[1] = producer->info.mesh.ts_mesh_dispatch_dimensions[1],
1356             .num_workgroups[2] = producer->info.mesh.ts_mesh_dispatch_dimensions[2],
1357             /* nir_lower_idiv generates expensive code */
1358             .shortcut_1d_workgroup_id = compiler->devinfo->verx10 >= 125,
1359       };
1360 
1361       NIR_PASS(_, consumer, nir_lower_compute_system_values, &options);
1362    }
1363 }
1364 
1365 bool
brw_nir_should_vectorize_mem(unsigned align_mul,unsigned align_offset,unsigned bit_size,unsigned num_components,nir_intrinsic_instr * low,nir_intrinsic_instr * high,void * data)1366 brw_nir_should_vectorize_mem(unsigned align_mul, unsigned align_offset,
1367                              unsigned bit_size,
1368                              unsigned num_components,
1369                              nir_intrinsic_instr *low,
1370                              nir_intrinsic_instr *high,
1371                              void *data)
1372 {
1373    /* Don't combine things to generate 64-bit loads/stores.  We have to split
1374     * those back into 32-bit ones anyway and UBO loads aren't split in NIR so
1375     * we don't want to make a mess for the back-end.
1376     */
1377    if (bit_size > 32)
1378       return false;
1379 
1380    if (low->intrinsic == nir_intrinsic_load_global_const_block_intel ||
1381        low->intrinsic == nir_intrinsic_load_ubo_uniform_block_intel ||
1382        low->intrinsic == nir_intrinsic_load_ssbo_uniform_block_intel ||
1383        low->intrinsic == nir_intrinsic_load_shared_uniform_block_intel ||
1384        low->intrinsic == nir_intrinsic_load_global_constant_uniform_block_intel) {
1385       if (num_components > 4) {
1386          if (!util_is_power_of_two_nonzero(num_components))
1387             return false;
1388 
1389          if (bit_size != 32)
1390             return false;
1391 
1392          if (num_components > 32)
1393             return false;
1394       }
1395    } else {
1396       /* We can handle at most a vec4 right now.  Anything bigger would get
1397        * immediately split by brw_nir_lower_mem_access_bit_sizes anyway.
1398        */
1399       if (num_components > 4)
1400          return false;
1401    }
1402 
1403 
1404    uint32_t align;
1405    if (align_offset)
1406       align = 1 << (ffs(align_offset) - 1);
1407    else
1408       align = align_mul;
1409 
1410    if (align < bit_size / 8)
1411       return false;
1412 
1413    return true;
1414 }
1415 
1416 static
combine_all_memory_barriers(nir_intrinsic_instr * a,nir_intrinsic_instr * b,void * data)1417 bool combine_all_memory_barriers(nir_intrinsic_instr *a,
1418                                  nir_intrinsic_instr *b,
1419                                  void *data)
1420 {
1421    /* Combine control barriers with identical memory semantics. This prevents
1422     * the second barrier generating a spurious, identical fence message as the
1423     * first barrier.
1424     */
1425    if (nir_intrinsic_memory_modes(a) == nir_intrinsic_memory_modes(b) &&
1426        nir_intrinsic_memory_semantics(a) == nir_intrinsic_memory_semantics(b) &&
1427        nir_intrinsic_memory_scope(a) == nir_intrinsic_memory_scope(b)) {
1428       nir_intrinsic_set_execution_scope(a, MAX2(nir_intrinsic_execution_scope(a),
1429                                                 nir_intrinsic_execution_scope(b)));
1430       return true;
1431    }
1432 
1433    /* Only combine pure memory barriers */
1434    if ((nir_intrinsic_execution_scope(a) != SCOPE_NONE) ||
1435        (nir_intrinsic_execution_scope(b) != SCOPE_NONE))
1436       return false;
1437 
1438    /* Translation to backend IR will get rid of modes we don't care about, so
1439     * no harm in always combining them.
1440     *
1441     * TODO: While HW has only ACQUIRE|RELEASE fences, we could improve the
1442     * scheduling so that it can take advantage of the different semantics.
1443     */
1444    nir_intrinsic_set_memory_modes(a, nir_intrinsic_memory_modes(a) |
1445                                      nir_intrinsic_memory_modes(b));
1446    nir_intrinsic_set_memory_semantics(a, nir_intrinsic_memory_semantics(a) |
1447                                          nir_intrinsic_memory_semantics(b));
1448    nir_intrinsic_set_memory_scope(a, MAX2(nir_intrinsic_memory_scope(a),
1449                                           nir_intrinsic_memory_scope(b)));
1450    return true;
1451 }
1452 
1453 static nir_mem_access_size_align
get_mem_access_size_align(nir_intrinsic_op intrin,uint8_t bytes,uint8_t bit_size,uint32_t align_mul,uint32_t align_offset,bool offset_is_const,const void * cb_data)1454 get_mem_access_size_align(nir_intrinsic_op intrin, uint8_t bytes,
1455                           uint8_t bit_size, uint32_t align_mul, uint32_t align_offset,
1456                           bool offset_is_const, const void *cb_data)
1457 {
1458    const uint32_t align = nir_combined_align(align_mul, align_offset);
1459 
1460    switch (intrin) {
1461    case nir_intrinsic_load_ssbo:
1462    case nir_intrinsic_load_shared:
1463    case nir_intrinsic_load_scratch:
1464       /* The offset is constant so we can use a 32-bit load and just shift it
1465        * around as needed.
1466        */
1467       if (align < 4 && offset_is_const) {
1468          assert(util_is_power_of_two_nonzero(align_mul) && align_mul >= 4);
1469          const unsigned pad = align_offset % 4;
1470          const unsigned comps32 = MIN2(DIV_ROUND_UP(bytes + pad, 4), 4);
1471          return (nir_mem_access_size_align) {
1472             .bit_size = 32,
1473             .num_components = comps32,
1474             .align = 4,
1475          };
1476       }
1477       break;
1478 
1479    case nir_intrinsic_load_task_payload:
1480       if (bytes < 4 || align < 4) {
1481          return (nir_mem_access_size_align) {
1482             .bit_size = 32,
1483             .num_components = 1,
1484             .align = 4,
1485          };
1486       }
1487       break;
1488 
1489    default:
1490       break;
1491    }
1492 
1493    const bool is_load = nir_intrinsic_infos[intrin].has_dest;
1494    const bool is_scratch = intrin == nir_intrinsic_load_scratch ||
1495                            intrin == nir_intrinsic_store_scratch;
1496 
1497    if (align < 4 || bytes < 4) {
1498       /* Choose a byte, word, or dword */
1499       bytes = MIN2(bytes, 4);
1500       if (bytes == 3)
1501          bytes = is_load ? 4 : 2;
1502 
1503       if (is_scratch) {
1504          /* The way scratch address swizzling works in the back-end, it
1505           * happens at a DWORD granularity so we can't have a single load
1506           * or store cross a DWORD boundary.
1507           */
1508          if ((align_offset % 4) + bytes > MIN2(align_mul, 4))
1509             bytes = MIN2(align_mul, 4) - (align_offset % 4);
1510 
1511          /* Must be a power of two */
1512          if (bytes == 3)
1513             bytes = 2;
1514       }
1515 
1516       return (nir_mem_access_size_align) {
1517          .bit_size = bytes * 8,
1518          .num_components = 1,
1519          .align = 1,
1520       };
1521    } else {
1522       bytes = MIN2(bytes, 16);
1523       return (nir_mem_access_size_align) {
1524          .bit_size = 32,
1525          .num_components = is_scratch ? 1 :
1526                            is_load ? DIV_ROUND_UP(bytes, 4) : bytes / 4,
1527          .align = 4,
1528       };
1529    }
1530 }
1531 
1532 static void
brw_vectorize_lower_mem_access(nir_shader * nir,const struct brw_compiler * compiler,enum brw_robustness_flags robust_flags)1533 brw_vectorize_lower_mem_access(nir_shader *nir,
1534                                const struct brw_compiler *compiler,
1535                                enum brw_robustness_flags robust_flags)
1536 {
1537    bool progress = false;
1538 
1539    nir_load_store_vectorize_options options = {
1540       .modes = nir_var_mem_ubo | nir_var_mem_ssbo |
1541                nir_var_mem_global | nir_var_mem_shared |
1542                nir_var_mem_task_payload,
1543       .callback = brw_nir_should_vectorize_mem,
1544       .robust_modes = (nir_variable_mode)0,
1545    };
1546 
1547    if (robust_flags & BRW_ROBUSTNESS_UBO)
1548       options.robust_modes |= nir_var_mem_ubo | nir_var_mem_global;
1549    if (robust_flags & BRW_ROBUSTNESS_SSBO)
1550       options.robust_modes |= nir_var_mem_ssbo | nir_var_mem_global;
1551 
1552    OPT(nir_opt_load_store_vectorize, &options);
1553 
1554    /* Required for nir_divergence_analysis() */
1555    OPT(nir_convert_to_lcssa, true, true);
1556 
1557    /* When HW supports block loads, using the divergence analysis, try
1558     * to find uniform SSBO loads and turn them into block loads.
1559     *
1560     * Rerun the vectorizer after that to make the largest possible block
1561     * loads.
1562     *
1563     * This is a win on 2 fronts :
1564     *   - fewer send messages
1565     *   - reduced register pressure
1566     */
1567    nir_divergence_analysis(nir);
1568    if (OPT(intel_nir_blockify_uniform_loads, compiler->devinfo))
1569       OPT(nir_opt_load_store_vectorize, &options);
1570    OPT(nir_opt_remove_phis);
1571 
1572    nir_lower_mem_access_bit_sizes_options mem_access_options = {
1573       .modes = nir_var_mem_ssbo |
1574                nir_var_mem_constant |
1575                nir_var_mem_task_payload |
1576                nir_var_shader_temp |
1577                nir_var_function_temp |
1578                nir_var_mem_global |
1579                nir_var_mem_shared,
1580       .callback = get_mem_access_size_align,
1581    };
1582    OPT(nir_lower_mem_access_bit_sizes, &mem_access_options);
1583 
1584    while (progress) {
1585       progress = false;
1586 
1587       OPT(nir_lower_pack);
1588       OPT(nir_copy_prop);
1589       OPT(nir_opt_dce);
1590       OPT(nir_opt_cse);
1591       OPT(nir_opt_algebraic);
1592       OPT(nir_opt_constant_folding);
1593    }
1594 }
1595 
1596 static bool
nir_shader_has_local_variables(const nir_shader * nir)1597 nir_shader_has_local_variables(const nir_shader *nir)
1598 {
1599    nir_foreach_function_impl(impl, nir) {
1600       if (!exec_list_is_empty(&impl->locals))
1601          return true;
1602    }
1603 
1604    return false;
1605 }
1606 
1607 /* Prepare the given shader for codegen
1608  *
1609  * This function is intended to be called right before going into the actual
1610  * backend and is highly backend-specific.  Also, once this function has been
1611  * called on a shader, it will no longer be in SSA form so most optimizations
1612  * will not work.
1613  */
1614 void
brw_postprocess_nir(nir_shader * nir,const struct brw_compiler * compiler,bool debug_enabled,enum brw_robustness_flags robust_flags)1615 brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
1616                     bool debug_enabled,
1617                     enum brw_robustness_flags robust_flags)
1618 {
1619    const struct intel_device_info *devinfo = compiler->devinfo;
1620 
1621    UNUSED bool progress; /* Written by OPT */
1622 
1623    OPT(intel_nir_lower_sparse_intrinsics);
1624 
1625    OPT(nir_lower_bit_size, lower_bit_size_callback, (void *)compiler);
1626 
1627    OPT(nir_opt_combine_barriers, combine_all_memory_barriers, NULL);
1628 
1629    do {
1630       progress = false;
1631       OPT(nir_opt_algebraic_before_ffma);
1632    } while (progress);
1633 
1634    if (devinfo->verx10 >= 125) {
1635       /* Lower integer division by constants before nir_lower_idiv. */
1636       OPT(nir_opt_idiv_const, 32);
1637       const nir_lower_idiv_options options = {
1638          .allow_fp16 = false
1639       };
1640       OPT(nir_lower_idiv, &options);
1641    }
1642 
1643    if (gl_shader_stage_can_set_fragment_shading_rate(nir->info.stage))
1644       NIR_PASS(_, nir, intel_nir_lower_shading_rate_output);
1645 
1646    brw_nir_optimize(nir, devinfo);
1647 
1648    if (nir_shader_has_local_variables(nir)) {
1649       OPT(nir_lower_vars_to_explicit_types, nir_var_function_temp,
1650           glsl_get_natural_size_align_bytes);
1651       OPT(nir_lower_explicit_io, nir_var_function_temp,
1652           nir_address_format_32bit_offset);
1653       brw_nir_optimize(nir, devinfo);
1654    }
1655 
1656    brw_vectorize_lower_mem_access(nir, compiler, robust_flags);
1657 
1658    if (OPT(nir_lower_int64))
1659       brw_nir_optimize(nir, devinfo);
1660 
1661    /* Try and fuse multiply-adds, if successful, run shrink_vectors to
1662     * avoid peephole_ffma to generate things like this :
1663     *    vec16 ssa_0 = ...
1664     *    vec16 ssa_1 = fneg ssa_0
1665     *    vec1  ssa_2 = ffma ssa_1, ...
1666     *
1667     * We want this instead :
1668     *    vec16 ssa_0 = ...
1669     *    vec1  ssa_1 = fneg ssa_0.x
1670     *    vec1  ssa_2 = ffma ssa_1, ...
1671     */
1672    if (OPT(intel_nir_opt_peephole_ffma))
1673       OPT(nir_opt_shrink_vectors);
1674 
1675    OPT(intel_nir_opt_peephole_imul32x16);
1676 
1677    if (OPT(nir_opt_comparison_pre)) {
1678       OPT(nir_copy_prop);
1679       OPT(nir_opt_dce);
1680       OPT(nir_opt_cse);
1681 
1682       /* Do the select peepehole again.  nir_opt_comparison_pre (combined with
1683        * the other optimization passes) will have removed at least one
1684        * instruction from one of the branches of the if-statement, so now it
1685        * might be under the threshold of conversion to bcsel.
1686        */
1687       OPT(nir_opt_peephole_select, 0, false, false);
1688       OPT(nir_opt_peephole_select, 1, false, true);
1689    }
1690 
1691    do {
1692       progress = false;
1693       if (OPT(nir_opt_algebraic_late)) {
1694          OPT(nir_opt_constant_folding);
1695          OPT(nir_copy_prop);
1696          OPT(nir_opt_dce);
1697          OPT(nir_opt_cse);
1698       }
1699    } while (progress);
1700 
1701 
1702    if (OPT(nir_lower_fp16_casts, nir_lower_fp16_split_fp64)) {
1703       if (OPT(nir_lower_int64)) {
1704          brw_nir_optimize(nir, devinfo);
1705       }
1706    }
1707 
1708    OPT(intel_nir_lower_conversions);
1709 
1710    OPT(nir_lower_alu_to_scalar, NULL, NULL);
1711 
1712    while (OPT(nir_opt_algebraic_distribute_src_mods)) {
1713       OPT(nir_opt_constant_folding);
1714       OPT(nir_copy_prop);
1715       OPT(nir_opt_dce);
1716       OPT(nir_opt_cse);
1717    }
1718 
1719    OPT(nir_copy_prop);
1720    OPT(nir_opt_dce);
1721    OPT(nir_opt_move, nir_move_comparisons);
1722    OPT(nir_opt_dead_cf);
1723 
1724    bool divergence_analysis_dirty = false;
1725    NIR_PASS(_, nir, nir_convert_to_lcssa, true, true);
1726    NIR_PASS_V(nir, nir_divergence_analysis);
1727 
1728    static const nir_lower_subgroups_options subgroups_options = {
1729       .ballot_bit_size = 32,
1730       .ballot_components = 1,
1731       .lower_elect = true,
1732       .lower_subgroup_masks = true,
1733    };
1734 
1735    if (OPT(nir_opt_uniform_atomics)) {
1736       OPT(nir_lower_subgroups, &subgroups_options);
1737 
1738       if (OPT(nir_lower_int64))
1739          brw_nir_optimize(nir, devinfo);
1740 
1741       divergence_analysis_dirty = true;
1742    }
1743 
1744    /* nir_opt_uniform_subgroup can create some operations (e.g.,
1745     * load_subgroup_lt_mask) that need to be lowered again.
1746     */
1747    if (OPT(nir_opt_uniform_subgroup, &subgroups_options)) {
1748       /* Some of the optimizations can generate 64-bit integer multiplication
1749        * that must be lowered.
1750        */
1751       if (OPT(nir_lower_int64))
1752          brw_nir_optimize(nir, devinfo);
1753 
1754       OPT(nir_lower_subgroups, &subgroups_options);
1755    }
1756 
1757    /* Do this only after the last opt_gcm. GCM will undo this lowering. */
1758    if (nir->info.stage == MESA_SHADER_FRAGMENT) {
1759       if (divergence_analysis_dirty) {
1760          NIR_PASS(_, nir, nir_convert_to_lcssa, true, true);
1761          NIR_PASS_V(nir, nir_divergence_analysis);
1762       }
1763 
1764       OPT(intel_nir_lower_non_uniform_barycentric_at_sample);
1765    }
1766 
1767    /* Clean up LCSSA phis */
1768    OPT(nir_opt_remove_phis);
1769 
1770    OPT(nir_lower_bool_to_int32);
1771    OPT(nir_copy_prop);
1772    OPT(nir_opt_dce);
1773 
1774    OPT(nir_lower_locals_to_regs, 32);
1775 
1776    if (unlikely(debug_enabled)) {
1777       /* Re-index SSA defs so we print more sensible numbers. */
1778       nir_foreach_function_impl(impl, nir) {
1779          nir_index_ssa_defs(impl);
1780       }
1781 
1782       fprintf(stderr, "NIR (SSA form) for %s shader:\n",
1783               _mesa_shader_stage_to_string(nir->info.stage));
1784       nir_print_shader(nir, stderr);
1785    }
1786 
1787    nir_validate_ssa_dominance(nir, "before nir_convert_from_ssa");
1788 
1789    /* Rerun the divergence analysis before convert_from_ssa as this pass has
1790     * some assert on consistent divergence flags.
1791     */
1792    NIR_PASS(_, nir, nir_convert_to_lcssa, true, true);
1793    NIR_PASS_V(nir, nir_divergence_analysis);
1794    OPT(nir_opt_remove_phis);
1795 
1796    OPT(nir_convert_from_ssa, true);
1797 
1798    OPT(nir_opt_dce);
1799 
1800    if (OPT(nir_opt_rematerialize_compares))
1801       OPT(nir_opt_dce);
1802 
1803    OPT(nir_opt_dce);
1804 
1805    /* The mesh stages require this pass to be called at the last minute,
1806     * but if anything is done by it, it will also constant fold, and that
1807     * undoes the work done by nir_trivialize_registers, so call it right
1808     * before that one instead.
1809     */
1810    if (nir->info.stage == MESA_SHADER_MESH ||
1811        nir->info.stage == MESA_SHADER_TASK)
1812       brw_nir_adjust_payload(nir);
1813 
1814    nir_trivialize_registers(nir);
1815 
1816    nir_sweep(nir);
1817 
1818    if (unlikely(debug_enabled)) {
1819       fprintf(stderr, "NIR (final form) for %s shader:\n",
1820               _mesa_shader_stage_to_string(nir->info.stage));
1821       nir_print_shader(nir, stderr);
1822    }
1823 }
1824 
1825 static unsigned
get_subgroup_size(const struct shader_info * info,unsigned max_subgroup_size)1826 get_subgroup_size(const struct shader_info *info, unsigned max_subgroup_size)
1827 {
1828    switch (info->subgroup_size) {
1829    case SUBGROUP_SIZE_API_CONSTANT:
1830       /* We have to use the global constant size. */
1831       return BRW_SUBGROUP_SIZE;
1832 
1833    case SUBGROUP_SIZE_UNIFORM:
1834       /* It has to be uniform across all invocations but can vary per stage
1835        * if we want.  This gives us a bit more freedom.
1836        *
1837        * For compute, brw_nir_apply_key is called per-dispatch-width so this
1838        * is the actual subgroup size and not a maximum.  However, we only
1839        * invoke one size of any given compute shader so it's still guaranteed
1840        * to be uniform across invocations.
1841        */
1842       return max_subgroup_size;
1843 
1844    case SUBGROUP_SIZE_VARYING:
1845       /* The subgroup size is allowed to be fully varying.  For geometry
1846        * stages, we know it's always 8 which is max_subgroup_size so we can
1847        * return that.  For compute, brw_nir_apply_key is called once per
1848        * dispatch-width so max_subgroup_size is the real subgroup size.
1849        *
1850        * For fragment, we return 0 and let it fall through to the back-end
1851        * compiler.  This means we can't optimize based on subgroup size but
1852        * that's a risk the client took when it asked for a varying subgroup
1853        * size.
1854        */
1855       return info->stage == MESA_SHADER_FRAGMENT ? 0 : max_subgroup_size;
1856 
1857    case SUBGROUP_SIZE_REQUIRE_8:
1858    case SUBGROUP_SIZE_REQUIRE_16:
1859    case SUBGROUP_SIZE_REQUIRE_32:
1860       assert(gl_shader_stage_uses_workgroup(info->stage) ||
1861              (info->stage >= MESA_SHADER_RAYGEN && info->stage <= MESA_SHADER_CALLABLE));
1862       /* These enum values are expressly chosen to be equal to the subgroup
1863        * size that they require.
1864        */
1865       return info->subgroup_size;
1866 
1867    case SUBGROUP_SIZE_FULL_SUBGROUPS:
1868    case SUBGROUP_SIZE_REQUIRE_64:
1869    case SUBGROUP_SIZE_REQUIRE_128:
1870       break;
1871    }
1872 
1873    unreachable("Invalid subgroup size type");
1874 }
1875 
1876 unsigned
brw_nir_api_subgroup_size(const nir_shader * nir,unsigned hw_subgroup_size)1877 brw_nir_api_subgroup_size(const nir_shader *nir,
1878                           unsigned hw_subgroup_size)
1879 {
1880    return get_subgroup_size(&nir->info, hw_subgroup_size);
1881 }
1882 
1883 void
brw_nir_apply_key(nir_shader * nir,const struct brw_compiler * compiler,const struct brw_base_prog_key * key,unsigned max_subgroup_size)1884 brw_nir_apply_key(nir_shader *nir,
1885                   const struct brw_compiler *compiler,
1886                   const struct brw_base_prog_key *key,
1887                   unsigned max_subgroup_size)
1888 {
1889    bool progress = false;
1890 
1891    nir_lower_tex_options nir_tex_opts = {
1892       .lower_txd_clamp_bindless_sampler = true,
1893       .lower_txd_clamp_if_sampler_index_not_lt_16 = true,
1894       .lower_invalid_implicit_lod = true,
1895       .lower_index_to_offset = true,
1896    };
1897    OPT(nir_lower_tex, &nir_tex_opts);
1898 
1899    const struct intel_nir_lower_texture_opts tex_opts = {
1900       .combined_lod_and_array_index = compiler->devinfo->ver >= 20,
1901    };
1902    OPT(intel_nir_lower_texture, &tex_opts);
1903 
1904    const nir_lower_subgroups_options subgroups_options = {
1905       .subgroup_size = get_subgroup_size(&nir->info, max_subgroup_size),
1906       .ballot_bit_size = 32,
1907       .ballot_components = 1,
1908       .lower_subgroup_masks = true,
1909    };
1910    OPT(nir_lower_subgroups, &subgroups_options);
1911 
1912    if (key->limit_trig_input_range)
1913       OPT(brw_nir_limit_trig_input_range_workaround);
1914 
1915    if (progress) {
1916       brw_nir_optimize(nir, compiler->devinfo);
1917    }
1918 }
1919 
1920 enum brw_conditional_mod
brw_cmod_for_nir_comparison(nir_op op)1921 brw_cmod_for_nir_comparison(nir_op op)
1922 {
1923    switch (op) {
1924    case nir_op_flt:
1925    case nir_op_flt32:
1926    case nir_op_ilt:
1927    case nir_op_ilt32:
1928    case nir_op_ult:
1929    case nir_op_ult32:
1930       return BRW_CONDITIONAL_L;
1931 
1932    case nir_op_fge:
1933    case nir_op_fge32:
1934    case nir_op_ige:
1935    case nir_op_ige32:
1936    case nir_op_uge:
1937    case nir_op_uge32:
1938       return BRW_CONDITIONAL_GE;
1939 
1940    case nir_op_feq:
1941    case nir_op_feq32:
1942    case nir_op_ieq:
1943    case nir_op_ieq32:
1944    case nir_op_b32all_fequal2:
1945    case nir_op_b32all_iequal2:
1946    case nir_op_b32all_fequal3:
1947    case nir_op_b32all_iequal3:
1948    case nir_op_b32all_fequal4:
1949    case nir_op_b32all_iequal4:
1950       return BRW_CONDITIONAL_Z;
1951 
1952    case nir_op_fneu:
1953    case nir_op_fneu32:
1954    case nir_op_ine:
1955    case nir_op_ine32:
1956    case nir_op_b32any_fnequal2:
1957    case nir_op_b32any_inequal2:
1958    case nir_op_b32any_fnequal3:
1959    case nir_op_b32any_inequal3:
1960    case nir_op_b32any_fnequal4:
1961    case nir_op_b32any_inequal4:
1962       return BRW_CONDITIONAL_NZ;
1963 
1964    default:
1965       unreachable("Unsupported NIR comparison op");
1966    }
1967 }
1968 
1969 enum lsc_opcode
lsc_aop_for_nir_intrinsic(const nir_intrinsic_instr * atomic)1970 lsc_aop_for_nir_intrinsic(const nir_intrinsic_instr *atomic)
1971 {
1972    switch (nir_intrinsic_atomic_op(atomic)) {
1973    case nir_atomic_op_iadd: {
1974       unsigned src_idx;
1975       switch (atomic->intrinsic) {
1976       case nir_intrinsic_image_atomic:
1977       case nir_intrinsic_bindless_image_atomic:
1978          src_idx = 3;
1979          break;
1980       case nir_intrinsic_ssbo_atomic:
1981          src_idx = 2;
1982          break;
1983       case nir_intrinsic_shared_atomic:
1984       case nir_intrinsic_global_atomic:
1985          src_idx = 1;
1986          break;
1987       default:
1988          unreachable("Invalid add atomic opcode");
1989       }
1990 
1991       if (nir_src_is_const(atomic->src[src_idx])) {
1992          int64_t add_val = nir_src_as_int(atomic->src[src_idx]);
1993          if (add_val == 1)
1994             return LSC_OP_ATOMIC_INC;
1995          else if (add_val == -1)
1996             return LSC_OP_ATOMIC_DEC;
1997       }
1998       return LSC_OP_ATOMIC_ADD;
1999    }
2000 
2001    case nir_atomic_op_imin: return LSC_OP_ATOMIC_MIN;
2002    case nir_atomic_op_umin: return LSC_OP_ATOMIC_UMIN;
2003    case nir_atomic_op_imax: return LSC_OP_ATOMIC_MAX;
2004    case nir_atomic_op_umax: return LSC_OP_ATOMIC_UMAX;
2005    case nir_atomic_op_iand: return LSC_OP_ATOMIC_AND;
2006    case nir_atomic_op_ior:  return LSC_OP_ATOMIC_OR;
2007    case nir_atomic_op_ixor: return LSC_OP_ATOMIC_XOR;
2008    case nir_atomic_op_xchg: return LSC_OP_ATOMIC_STORE;
2009    case nir_atomic_op_cmpxchg: return LSC_OP_ATOMIC_CMPXCHG;
2010 
2011    case nir_atomic_op_fmin: return LSC_OP_ATOMIC_FMIN;
2012    case nir_atomic_op_fmax: return LSC_OP_ATOMIC_FMAX;
2013    case nir_atomic_op_fcmpxchg: return LSC_OP_ATOMIC_FCMPXCHG;
2014    case nir_atomic_op_fadd: return LSC_OP_ATOMIC_FADD;
2015 
2016    default:
2017       unreachable("Unsupported NIR atomic intrinsic");
2018    }
2019 }
2020 
2021 enum brw_reg_type
brw_type_for_nir_type(const struct intel_device_info * devinfo,nir_alu_type type)2022 brw_type_for_nir_type(const struct intel_device_info *devinfo,
2023                       nir_alu_type type)
2024 {
2025    switch (type) {
2026    case nir_type_uint:
2027    case nir_type_uint32:
2028       return BRW_REGISTER_TYPE_UD;
2029    case nir_type_bool:
2030    case nir_type_int:
2031    case nir_type_bool32:
2032    case nir_type_int32:
2033       return BRW_REGISTER_TYPE_D;
2034    case nir_type_float:
2035    case nir_type_float32:
2036       return BRW_REGISTER_TYPE_F;
2037    case nir_type_float16:
2038       return BRW_REGISTER_TYPE_HF;
2039    case nir_type_float64:
2040       return BRW_REGISTER_TYPE_DF;
2041    case nir_type_int64:
2042       return BRW_REGISTER_TYPE_Q;
2043    case nir_type_uint64:
2044       return BRW_REGISTER_TYPE_UQ;
2045    case nir_type_int16:
2046       return BRW_REGISTER_TYPE_W;
2047    case nir_type_uint16:
2048       return BRW_REGISTER_TYPE_UW;
2049    case nir_type_int8:
2050       return BRW_REGISTER_TYPE_B;
2051    case nir_type_uint8:
2052       return BRW_REGISTER_TYPE_UB;
2053    default:
2054       unreachable("unknown type");
2055    }
2056 
2057    return BRW_REGISTER_TYPE_F;
2058 }
2059 
2060 nir_shader *
brw_nir_create_passthrough_tcs(void * mem_ctx,const struct brw_compiler * compiler,const struct brw_tcs_prog_key * key)2061 brw_nir_create_passthrough_tcs(void *mem_ctx, const struct brw_compiler *compiler,
2062                                const struct brw_tcs_prog_key *key)
2063 {
2064    assert(key->input_vertices > 0);
2065 
2066    const nir_shader_compiler_options *options =
2067       compiler->nir_options[MESA_SHADER_TESS_CTRL];
2068 
2069    uint64_t inputs_read = key->outputs_written &
2070       ~(VARYING_BIT_TESS_LEVEL_INNER | VARYING_BIT_TESS_LEVEL_OUTER);
2071 
2072    unsigned locations[64];
2073    unsigned num_locations = 0;
2074 
2075    u_foreach_bit64(varying, inputs_read)
2076       locations[num_locations++] = varying;
2077 
2078    nir_shader *nir =
2079       nir_create_passthrough_tcs_impl(options, locations, num_locations,
2080                                       key->input_vertices);
2081 
2082    ralloc_steal(mem_ctx, nir);
2083 
2084    nir->info.inputs_read = inputs_read;
2085    nir->info.tess._primitive_mode = key->_tes_primitive_mode;
2086    nir_validate_shader(nir, "in brw_nir_create_passthrough_tcs");
2087 
2088    struct brw_nir_compiler_opts opts = {};
2089    brw_preprocess_nir(compiler, nir, &opts);
2090 
2091    return nir;
2092 }
2093 
2094 nir_def *
brw_nir_load_global_const(nir_builder * b,nir_intrinsic_instr * load_uniform,nir_def * base_addr,unsigned off)2095 brw_nir_load_global_const(nir_builder *b, nir_intrinsic_instr *load_uniform,
2096       nir_def *base_addr, unsigned off)
2097 {
2098    assert(load_uniform->intrinsic == nir_intrinsic_load_uniform);
2099 
2100    unsigned bit_size = load_uniform->def.bit_size;
2101    assert(bit_size >= 8 && bit_size % 8 == 0);
2102    unsigned byte_size = bit_size / 8;
2103    nir_def *sysval;
2104 
2105    if (nir_src_is_const(load_uniform->src[0])) {
2106       uint64_t offset = off +
2107                         nir_intrinsic_base(load_uniform) +
2108                         nir_src_as_uint(load_uniform->src[0]);
2109 
2110       /* Things should be component-aligned. */
2111       assert(offset % byte_size == 0);
2112 
2113       unsigned suboffset = offset % 64;
2114       uint64_t aligned_offset = offset - suboffset;
2115 
2116       /* Load two just in case we go over a 64B boundary */
2117       nir_def *data[2];
2118       for (unsigned i = 0; i < 2; i++) {
2119          nir_def *addr = nir_iadd_imm(b, base_addr, aligned_offset + i * 64);
2120          data[i] = nir_load_global_const_block_intel(b, 16, addr,
2121                                                      nir_imm_true(b));
2122       }
2123 
2124       sysval = nir_extract_bits(b, data, 2, suboffset * 8,
2125                                 load_uniform->num_components, bit_size);
2126    } else {
2127       nir_def *offset32 =
2128          nir_iadd_imm(b, load_uniform->src[0].ssa,
2129                          off + nir_intrinsic_base(load_uniform));
2130       nir_def *addr = nir_iadd(b, base_addr, nir_u2u64(b, offset32));
2131       sysval = nir_load_global_constant(b, addr, byte_size,
2132                                         load_uniform->num_components, bit_size);
2133    }
2134 
2135    return sysval;
2136 }
2137 
2138 const struct glsl_type *
brw_nir_get_var_type(const struct nir_shader * nir,nir_variable * var)2139 brw_nir_get_var_type(const struct nir_shader *nir, nir_variable *var)
2140 {
2141    const struct glsl_type *type = var->interface_type;
2142    if (!type) {
2143       type = var->type;
2144       if (nir_is_arrayed_io(var, nir->info.stage) || var->data.per_view) {
2145          assert(glsl_type_is_array(type));
2146          type = glsl_get_array_element(type);
2147       }
2148    }
2149 
2150    return type;
2151 }
2152 
2153