1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "brw_nir.h"
25 #include "brw_nir_rt.h"
26 #include "brw_shader.h"
27 #include "dev/intel_debug.h"
28 #include "compiler/glsl_types.h"
29 #include "compiler/nir/nir_builder.h"
30 #include "util/u_math.h"
31
32 static bool
remap_tess_levels(nir_builder * b,nir_intrinsic_instr * intr,enum tess_primitive_mode _primitive_mode)33 remap_tess_levels(nir_builder *b, nir_intrinsic_instr *intr,
34 enum tess_primitive_mode _primitive_mode)
35 {
36 const int location = nir_intrinsic_base(intr);
37 const unsigned component = nir_intrinsic_component(intr);
38 bool out_of_bounds;
39
40 if (location == VARYING_SLOT_TESS_LEVEL_INNER) {
41 switch (_primitive_mode) {
42 case TESS_PRIMITIVE_QUADS:
43 /* gl_TessLevelInner[0..1] lives at DWords 3-2 (reversed). */
44 nir_intrinsic_set_base(intr, 0);
45 nir_intrinsic_set_component(intr, 3 - component);
46 out_of_bounds = false;
47 break;
48 case TESS_PRIMITIVE_TRIANGLES:
49 /* gl_TessLevelInner[0] lives at DWord 4. */
50 nir_intrinsic_set_base(intr, 1);
51 out_of_bounds = component > 0;
52 break;
53 case TESS_PRIMITIVE_ISOLINES:
54 out_of_bounds = true;
55 break;
56 default:
57 unreachable("Bogus tessellation domain");
58 }
59 } else if (location == VARYING_SLOT_TESS_LEVEL_OUTER) {
60 if (_primitive_mode == TESS_PRIMITIVE_ISOLINES) {
61 /* gl_TessLevelOuter[0..1] lives at DWords 6-7 (in order). */
62 nir_intrinsic_set_base(intr, 1);
63 nir_intrinsic_set_component(intr, 2 + nir_intrinsic_component(intr));
64 out_of_bounds = component > 1;
65 } else {
66 /* Triangles use DWords 7-5 (reversed); Quads use 7-4 (reversed) */
67 nir_intrinsic_set_base(intr, 1);
68 nir_intrinsic_set_component(intr, 3 - nir_intrinsic_component(intr));
69 out_of_bounds = component == 3 && _primitive_mode == TESS_PRIMITIVE_TRIANGLES;
70 }
71 } else {
72 return false;
73 }
74
75 if (out_of_bounds) {
76 if (nir_intrinsic_infos[intr->intrinsic].has_dest) {
77 b->cursor = nir_before_instr(&intr->instr);
78 nir_ssa_def *undef = nir_ssa_undef(b, 1, 32);
79 nir_ssa_def_rewrite_uses(&intr->dest.ssa, undef);
80 }
81 nir_instr_remove(&intr->instr);
82 }
83
84 return true;
85 }
86
87 static bool
is_input(nir_intrinsic_instr * intrin)88 is_input(nir_intrinsic_instr *intrin)
89 {
90 return intrin->intrinsic == nir_intrinsic_load_input ||
91 intrin->intrinsic == nir_intrinsic_load_per_vertex_input ||
92 intrin->intrinsic == nir_intrinsic_load_interpolated_input;
93 }
94
95 static bool
is_output(nir_intrinsic_instr * intrin)96 is_output(nir_intrinsic_instr *intrin)
97 {
98 return intrin->intrinsic == nir_intrinsic_load_output ||
99 intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
100 intrin->intrinsic == nir_intrinsic_store_output ||
101 intrin->intrinsic == nir_intrinsic_store_per_vertex_output;
102 }
103
104
105 static bool
remap_patch_urb_offsets(nir_block * block,nir_builder * b,const struct brw_vue_map * vue_map,enum tess_primitive_mode tes_primitive_mode)106 remap_patch_urb_offsets(nir_block *block, nir_builder *b,
107 const struct brw_vue_map *vue_map,
108 enum tess_primitive_mode tes_primitive_mode)
109 {
110 const bool is_passthrough_tcs = b->shader->info.name &&
111 strcmp(b->shader->info.name, "passthrough TCS") == 0;
112
113 nir_foreach_instr_safe(instr, block) {
114 if (instr->type != nir_instr_type_intrinsic)
115 continue;
116
117 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
118
119 gl_shader_stage stage = b->shader->info.stage;
120
121 if ((stage == MESA_SHADER_TESS_CTRL && is_output(intrin)) ||
122 (stage == MESA_SHADER_TESS_EVAL && is_input(intrin))) {
123
124 if (!is_passthrough_tcs &&
125 remap_tess_levels(b, intrin, tes_primitive_mode))
126 continue;
127
128 int vue_slot = vue_map->varying_to_slot[intrin->const_index[0]];
129 assert(vue_slot != -1);
130 intrin->const_index[0] = vue_slot;
131
132 nir_src *vertex = nir_get_io_arrayed_index_src(intrin);
133 if (vertex) {
134 if (nir_src_is_const(*vertex)) {
135 intrin->const_index[0] += nir_src_as_uint(*vertex) *
136 vue_map->num_per_vertex_slots;
137 } else {
138 b->cursor = nir_before_instr(&intrin->instr);
139
140 /* Multiply by the number of per-vertex slots. */
141 nir_ssa_def *vertex_offset =
142 nir_imul(b,
143 nir_ssa_for_src(b, *vertex, 1),
144 nir_imm_int(b,
145 vue_map->num_per_vertex_slots));
146
147 /* Add it to the existing offset */
148 nir_src *offset = nir_get_io_offset_src(intrin);
149 nir_ssa_def *total_offset =
150 nir_iadd(b, vertex_offset,
151 nir_ssa_for_src(b, *offset, 1));
152
153 nir_instr_rewrite_src(&intrin->instr, offset,
154 nir_src_for_ssa(total_offset));
155 }
156 }
157 }
158 }
159 return true;
160 }
161
162 void
brw_nir_lower_vs_inputs(nir_shader * nir,bool edgeflag_is_last,const uint8_t * vs_attrib_wa_flags)163 brw_nir_lower_vs_inputs(nir_shader *nir,
164 bool edgeflag_is_last,
165 const uint8_t *vs_attrib_wa_flags)
166 {
167 /* Start with the location of the variable's base. */
168 nir_foreach_shader_in_variable(var, nir)
169 var->data.driver_location = var->data.location;
170
171 /* Now use nir_lower_io to walk dereference chains. Attribute arrays are
172 * loaded as one vec4 or dvec4 per element (or matrix column), depending on
173 * whether it is a double-precision type or not.
174 */
175 nir_lower_io(nir, nir_var_shader_in, type_size_vec4,
176 nir_lower_io_lower_64bit_to_32);
177
178 /* This pass needs actual constants */
179 nir_opt_constant_folding(nir);
180
181 nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
182
183 brw_nir_apply_attribute_workarounds(nir, vs_attrib_wa_flags);
184
185 /* The last step is to remap VERT_ATTRIB_* to actual registers */
186
187 /* Whether or not we have any system generated values. gl_DrawID is not
188 * included here as it lives in its own vec4.
189 */
190 const bool has_sgvs =
191 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_FIRST_VERTEX) ||
192 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_BASE_INSTANCE) ||
193 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) ||
194 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_INSTANCE_ID);
195
196 const unsigned num_inputs = util_bitcount64(nir->info.inputs_read);
197
198 nir_foreach_function(function, nir) {
199 if (!function->impl)
200 continue;
201
202 nir_builder b;
203 nir_builder_init(&b, function->impl);
204
205 nir_foreach_block(block, function->impl) {
206 nir_foreach_instr_safe(instr, block) {
207 if (instr->type != nir_instr_type_intrinsic)
208 continue;
209
210 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
211
212 switch (intrin->intrinsic) {
213 case nir_intrinsic_load_first_vertex:
214 case nir_intrinsic_load_base_instance:
215 case nir_intrinsic_load_vertex_id_zero_base:
216 case nir_intrinsic_load_instance_id:
217 case nir_intrinsic_load_is_indexed_draw:
218 case nir_intrinsic_load_draw_id: {
219 b.cursor = nir_after_instr(&intrin->instr);
220
221 /* gl_VertexID and friends are stored by the VF as the last
222 * vertex element. We convert them to load_input intrinsics at
223 * the right location.
224 */
225 nir_intrinsic_instr *load =
226 nir_intrinsic_instr_create(nir, nir_intrinsic_load_input);
227 load->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
228
229 nir_intrinsic_set_base(load, num_inputs);
230 switch (intrin->intrinsic) {
231 case nir_intrinsic_load_first_vertex:
232 nir_intrinsic_set_component(load, 0);
233 break;
234 case nir_intrinsic_load_base_instance:
235 nir_intrinsic_set_component(load, 1);
236 break;
237 case nir_intrinsic_load_vertex_id_zero_base:
238 nir_intrinsic_set_component(load, 2);
239 break;
240 case nir_intrinsic_load_instance_id:
241 nir_intrinsic_set_component(load, 3);
242 break;
243 case nir_intrinsic_load_draw_id:
244 case nir_intrinsic_load_is_indexed_draw:
245 /* gl_DrawID and IsIndexedDraw are stored right after
246 * gl_VertexID and friends if any of them exist.
247 */
248 nir_intrinsic_set_base(load, num_inputs + has_sgvs);
249 if (intrin->intrinsic == nir_intrinsic_load_draw_id)
250 nir_intrinsic_set_component(load, 0);
251 else
252 nir_intrinsic_set_component(load, 1);
253 break;
254 default:
255 unreachable("Invalid system value intrinsic");
256 }
257
258 load->num_components = 1;
259 nir_ssa_dest_init(&load->instr, &load->dest, 1, 32, NULL);
260 nir_builder_instr_insert(&b, &load->instr);
261
262 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
263 &load->dest.ssa);
264 nir_instr_remove(&intrin->instr);
265 break;
266 }
267
268 case nir_intrinsic_load_input: {
269 /* Attributes come in a contiguous block, ordered by their
270 * gl_vert_attrib value. That means we can compute the slot
271 * number for an attribute by masking out the enabled attributes
272 * before it and counting the bits.
273 */
274 int attr = nir_intrinsic_base(intrin);
275 uint64_t inputs_read = nir->info.inputs_read;
276 int slot = -1;
277 if (edgeflag_is_last) {
278 inputs_read &= ~BITFIELD64_BIT(VERT_ATTRIB_EDGEFLAG);
279 if (attr == VERT_ATTRIB_EDGEFLAG)
280 slot = num_inputs - 1;
281 }
282 if (slot == -1)
283 slot = util_bitcount64(inputs_read &
284 BITFIELD64_MASK(attr));
285 nir_intrinsic_set_base(intrin, slot);
286 break;
287 }
288
289 default:
290 break; /* Nothing to do */
291 }
292 }
293 }
294 }
295 }
296
297 void
brw_nir_lower_vue_inputs(nir_shader * nir,const struct brw_vue_map * vue_map)298 brw_nir_lower_vue_inputs(nir_shader *nir,
299 const struct brw_vue_map *vue_map)
300 {
301 nir_foreach_shader_in_variable(var, nir)
302 var->data.driver_location = var->data.location;
303
304 /* Inputs are stored in vec4 slots, so use type_size_vec4(). */
305 nir_lower_io(nir, nir_var_shader_in, type_size_vec4,
306 nir_lower_io_lower_64bit_to_32);
307
308 /* This pass needs actual constants */
309 nir_opt_constant_folding(nir);
310
311 nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
312
313 nir_foreach_function(function, nir) {
314 if (!function->impl)
315 continue;
316
317 nir_foreach_block(block, function->impl) {
318 nir_foreach_instr(instr, block) {
319 if (instr->type != nir_instr_type_intrinsic)
320 continue;
321
322 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
323
324 if (intrin->intrinsic == nir_intrinsic_load_input ||
325 intrin->intrinsic == nir_intrinsic_load_per_vertex_input) {
326 /* Offset 0 is the VUE header, which contains
327 * VARYING_SLOT_LAYER [.y], VARYING_SLOT_VIEWPORT [.z], and
328 * VARYING_SLOT_PSIZ [.w].
329 */
330 int varying = nir_intrinsic_base(intrin);
331 int vue_slot;
332 switch (varying) {
333 case VARYING_SLOT_PSIZ:
334 nir_intrinsic_set_base(intrin, 0);
335 nir_intrinsic_set_component(intrin, 3);
336 break;
337
338 default:
339 vue_slot = vue_map->varying_to_slot[varying];
340 assert(vue_slot != -1);
341 nir_intrinsic_set_base(intrin, vue_slot);
342 break;
343 }
344 }
345 }
346 }
347 }
348 }
349
350 void
brw_nir_lower_tes_inputs(nir_shader * nir,const struct brw_vue_map * vue_map)351 brw_nir_lower_tes_inputs(nir_shader *nir, const struct brw_vue_map *vue_map)
352 {
353 nir_foreach_shader_in_variable(var, nir)
354 var->data.driver_location = var->data.location;
355
356 nir_lower_io(nir, nir_var_shader_in, type_size_vec4,
357 nir_lower_io_lower_64bit_to_32);
358
359 /* This pass needs actual constants */
360 nir_opt_constant_folding(nir);
361
362 nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
363
364 nir_foreach_function(function, nir) {
365 if (function->impl) {
366 nir_builder b;
367 nir_builder_init(&b, function->impl);
368 nir_foreach_block(block, function->impl) {
369 remap_patch_urb_offsets(block, &b, vue_map,
370 nir->info.tess._primitive_mode);
371 }
372 }
373 }
374 }
375
376 /**
377 * Convert interpolateAtOffset() offsets from [-0.5, +0.5] floating point
378 * offsets to integer [-8, +7] offsets (in units of 1/16th of a pixel).
379 *
380 * We clamp to +7/16 on the upper end of the range, since +0.5 isn't
381 * representable in a S0.4 value; a naive conversion would give us -8/16,
382 * which is the opposite of what was intended.
383 *
384 * This is allowed by GL_ARB_gpu_shader5's quantization rules:
385 *
386 * "Not all values of <offset> may be supported; x and y offsets may
387 * be rounded to fixed-point values with the number of fraction bits
388 * given by the implementation-dependent constant
389 * FRAGMENT_INTERPOLATION_OFFSET_BITS."
390 */
391 static bool
lower_barycentric_at_offset(nir_builder * b,nir_instr * instr,void * data)392 lower_barycentric_at_offset(nir_builder *b, nir_instr *instr, void *data)
393 {
394 if (instr->type != nir_instr_type_intrinsic)
395 return false;
396
397 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
398
399 if (intrin->intrinsic != nir_intrinsic_load_barycentric_at_offset)
400 return false;
401
402 b->cursor = nir_before_instr(instr);
403
404 assert(intrin->src[0].ssa);
405 nir_ssa_def *offset =
406 nir_imin(b, nir_imm_int(b, 7),
407 nir_f2i32(b, nir_fmul(b, nir_imm_float(b, 16),
408 intrin->src[0].ssa)));
409
410 nir_instr_rewrite_src(instr, &intrin->src[0], nir_src_for_ssa(offset));
411
412 return true;
413 }
414
415 void
brw_nir_lower_fs_inputs(nir_shader * nir,const struct intel_device_info * devinfo,const struct brw_wm_prog_key * key)416 brw_nir_lower_fs_inputs(nir_shader *nir,
417 const struct intel_device_info *devinfo,
418 const struct brw_wm_prog_key *key)
419 {
420 nir_foreach_shader_in_variable(var, nir) {
421 var->data.driver_location = var->data.location;
422
423 /* Apply default interpolation mode.
424 *
425 * Everything defaults to smooth except for the legacy GL color
426 * built-in variables, which might be flat depending on API state.
427 */
428 if (var->data.interpolation == INTERP_MODE_NONE) {
429 const bool flat = key->flat_shade &&
430 (var->data.location == VARYING_SLOT_COL0 ||
431 var->data.location == VARYING_SLOT_COL1);
432
433 var->data.interpolation = flat ? INTERP_MODE_FLAT
434 : INTERP_MODE_SMOOTH;
435 }
436
437 /* On Ironlake and below, there is only one interpolation mode.
438 * Centroid interpolation doesn't mean anything on this hardware --
439 * there is no multisampling.
440 */
441 if (devinfo->ver < 6) {
442 var->data.centroid = false;
443 var->data.sample = false;
444 }
445 }
446
447 nir_lower_io_options lower_io_options = nir_lower_io_lower_64bit_to_32;
448 if (key->persample_interp)
449 lower_io_options |= nir_lower_io_force_sample_interpolation;
450
451 nir_lower_io(nir, nir_var_shader_in, type_size_vec4, lower_io_options);
452 if (devinfo->ver >= 11)
453 nir_lower_interpolation(nir, ~0);
454
455 if (!key->multisample_fbo)
456 nir_lower_single_sampled(nir);
457
458 nir_shader_instructions_pass(nir, lower_barycentric_at_offset,
459 nir_metadata_block_index |
460 nir_metadata_dominance,
461 NULL);
462
463 /* This pass needs actual constants */
464 nir_opt_constant_folding(nir);
465
466 nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
467 }
468
469 void
brw_nir_lower_vue_outputs(nir_shader * nir)470 brw_nir_lower_vue_outputs(nir_shader *nir)
471 {
472 nir_foreach_shader_out_variable(var, nir) {
473 var->data.driver_location = var->data.location;
474 }
475
476 nir_lower_io(nir, nir_var_shader_out, type_size_vec4,
477 nir_lower_io_lower_64bit_to_32);
478 }
479
480 void
brw_nir_lower_tcs_outputs(nir_shader * nir,const struct brw_vue_map * vue_map,enum tess_primitive_mode tes_primitive_mode)481 brw_nir_lower_tcs_outputs(nir_shader *nir, const struct brw_vue_map *vue_map,
482 enum tess_primitive_mode tes_primitive_mode)
483 {
484 nir_foreach_shader_out_variable(var, nir) {
485 var->data.driver_location = var->data.location;
486 }
487
488 nir_lower_io(nir, nir_var_shader_out, type_size_vec4,
489 nir_lower_io_lower_64bit_to_32);
490
491 /* This pass needs actual constants */
492 nir_opt_constant_folding(nir);
493
494 nir_io_add_const_offset_to_base(nir, nir_var_shader_out);
495
496 nir_foreach_function(function, nir) {
497 if (function->impl) {
498 nir_builder b;
499 nir_builder_init(&b, function->impl);
500 nir_foreach_block(block, function->impl) {
501 remap_patch_urb_offsets(block, &b, vue_map, tes_primitive_mode);
502 }
503 }
504 }
505 }
506
507 void
brw_nir_lower_fs_outputs(nir_shader * nir)508 brw_nir_lower_fs_outputs(nir_shader *nir)
509 {
510 nir_foreach_shader_out_variable(var, nir) {
511 var->data.driver_location =
512 SET_FIELD(var->data.index, BRW_NIR_FRAG_OUTPUT_INDEX) |
513 SET_FIELD(var->data.location, BRW_NIR_FRAG_OUTPUT_LOCATION);
514 }
515
516 nir_lower_io(nir, nir_var_shader_out, type_size_dvec4, 0);
517 }
518
519 #define OPT(pass, ...) ({ \
520 bool this_progress = false; \
521 NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__); \
522 if (this_progress) \
523 progress = true; \
524 this_progress; \
525 })
526
527 void
brw_nir_optimize(nir_shader * nir,const struct brw_compiler * compiler,bool is_scalar,bool allow_copies)528 brw_nir_optimize(nir_shader *nir, const struct brw_compiler *compiler,
529 bool is_scalar, bool allow_copies)
530 {
531 bool progress;
532 unsigned lower_flrp =
533 (nir->options->lower_flrp16 ? 16 : 0) |
534 (nir->options->lower_flrp32 ? 32 : 0) |
535 (nir->options->lower_flrp64 ? 64 : 0);
536
537 do {
538 progress = false;
539 OPT(nir_split_array_vars, nir_var_function_temp);
540 OPT(nir_shrink_vec_array_vars, nir_var_function_temp);
541 OPT(nir_opt_deref);
542 if (OPT(nir_opt_memcpy))
543 OPT(nir_split_var_copies);
544 OPT(nir_lower_vars_to_ssa);
545 if (allow_copies) {
546 /* Only run this pass in the first call to brw_nir_optimize. Later
547 * calls assume that we've lowered away any copy_deref instructions
548 * and we don't want to introduce any more.
549 */
550 OPT(nir_opt_find_array_copies);
551 }
552 OPT(nir_opt_copy_prop_vars);
553 OPT(nir_opt_dead_write_vars);
554 OPT(nir_opt_combine_stores, nir_var_all);
555
556 OPT(nir_opt_ray_queries);
557
558 if (is_scalar) {
559 OPT(nir_lower_alu_to_scalar, NULL, NULL);
560 } else {
561 OPT(nir_opt_shrink_stores, true);
562 OPT(nir_opt_shrink_vectors);
563 }
564
565 OPT(nir_copy_prop);
566
567 if (is_scalar) {
568 OPT(nir_lower_phis_to_scalar, false);
569 }
570
571 OPT(nir_copy_prop);
572 OPT(nir_opt_dce);
573 OPT(nir_opt_cse);
574 OPT(nir_opt_combine_stores, nir_var_all);
575
576 /* Passing 0 to the peephole select pass causes it to convert
577 * if-statements that contain only move instructions in the branches
578 * regardless of the count.
579 *
580 * Passing 1 to the peephole select pass causes it to convert
581 * if-statements that contain at most a single ALU instruction (total)
582 * in both branches. Before Gfx6, some math instructions were
583 * prohibitively expensive and the results of compare operations need an
584 * extra resolve step. For these reasons, this pass is more harmful
585 * than good on those platforms.
586 *
587 * For indirect loads of uniforms (push constants), we assume that array
588 * indices will nearly always be in bounds and the cost of the load is
589 * low. Therefore there shouldn't be a performance benefit to avoid it.
590 * However, in vec4 tessellation shaders, these loads operate by
591 * actually pulling from memory.
592 */
593 const bool is_vec4_tessellation = !is_scalar &&
594 (nir->info.stage == MESA_SHADER_TESS_CTRL ||
595 nir->info.stage == MESA_SHADER_TESS_EVAL);
596 OPT(nir_opt_peephole_select, 0, !is_vec4_tessellation, false);
597 OPT(nir_opt_peephole_select, 8, !is_vec4_tessellation,
598 compiler->devinfo->ver >= 6);
599
600 OPT(nir_opt_intrinsics);
601 OPT(nir_opt_idiv_const, 32);
602 OPT(nir_opt_algebraic);
603 OPT(nir_lower_constant_convert_alu_types);
604 OPT(nir_opt_constant_folding);
605
606 if (lower_flrp != 0) {
607 if (OPT(nir_lower_flrp,
608 lower_flrp,
609 false /* always_precise */)) {
610 OPT(nir_opt_constant_folding);
611 }
612
613 /* Nothing should rematerialize any flrps, so we only need to do this
614 * lowering once.
615 */
616 lower_flrp = 0;
617 }
618
619 OPT(nir_opt_dead_cf);
620 if (OPT(nir_opt_trivial_continues)) {
621 /* If nir_opt_trivial_continues makes progress, then we need to clean
622 * things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
623 * to make progress.
624 */
625 OPT(nir_copy_prop);
626 OPT(nir_opt_dce);
627 }
628 OPT(nir_opt_if, nir_opt_if_optimize_phi_true_false);
629 OPT(nir_opt_conditional_discard);
630 if (nir->options->max_unroll_iterations != 0) {
631 OPT(nir_opt_loop_unroll);
632 }
633 OPT(nir_opt_remove_phis);
634 OPT(nir_opt_gcm, false);
635 OPT(nir_opt_undef);
636 OPT(nir_lower_pack);
637 } while (progress);
638
639 /* Workaround Gfxbench unused local sampler variable which will trigger an
640 * assert in the opt_large_constants pass.
641 */
642 OPT(nir_remove_dead_variables, nir_var_function_temp, NULL);
643 }
644
645 static unsigned
lower_bit_size_callback(const nir_instr * instr,UNUSED void * data)646 lower_bit_size_callback(const nir_instr *instr, UNUSED void *data)
647 {
648 const struct brw_compiler *compiler = (const struct brw_compiler *) data;
649 const struct intel_device_info *devinfo = compiler->devinfo;
650
651 switch (instr->type) {
652 case nir_instr_type_alu: {
653 nir_alu_instr *alu = nir_instr_as_alu(instr);
654 switch (alu->op) {
655 case nir_op_bit_count:
656 case nir_op_ufind_msb:
657 case nir_op_ifind_msb:
658 case nir_op_find_lsb:
659 /* These are handled specially because the destination is always
660 * 32-bit and so the bit size of the instruction is given by the
661 * source.
662 */
663 assert(alu->src[0].src.is_ssa);
664 return alu->src[0].src.ssa->bit_size == 32 ? 0 : 32;
665 default:
666 break;
667 }
668
669 assert(alu->dest.dest.is_ssa);
670 if (alu->dest.dest.ssa.bit_size >= 32)
671 return 0;
672
673 /* Note: nir_op_iabs and nir_op_ineg are not lowered here because the
674 * 8-bit ABS or NEG instruction should eventually get copy propagated
675 * into the MOV that does the type conversion. This results in far
676 * fewer MOV instructions.
677 */
678 switch (alu->op) {
679 case nir_op_idiv:
680 case nir_op_imod:
681 case nir_op_irem:
682 case nir_op_udiv:
683 case nir_op_umod:
684 case nir_op_fceil:
685 case nir_op_ffloor:
686 case nir_op_ffract:
687 case nir_op_fround_even:
688 case nir_op_ftrunc:
689 return 32;
690 case nir_op_frcp:
691 case nir_op_frsq:
692 case nir_op_fsqrt:
693 case nir_op_fpow:
694 case nir_op_fexp2:
695 case nir_op_flog2:
696 case nir_op_fsin:
697 case nir_op_fcos:
698 return devinfo->ver < 9 ? 32 : 0;
699 case nir_op_isign:
700 assert(!"Should have been lowered by nir_opt_algebraic.");
701 return 0;
702 default:
703 if (nir_op_infos[alu->op].num_inputs >= 2 &&
704 alu->dest.dest.ssa.bit_size == 8)
705 return 16;
706
707 if (nir_alu_instr_is_comparison(alu) &&
708 alu->src[0].src.ssa->bit_size == 8)
709 return 16;
710
711 return 0;
712 }
713 break;
714 }
715
716 case nir_instr_type_intrinsic: {
717 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
718 switch (intrin->intrinsic) {
719 case nir_intrinsic_read_invocation:
720 case nir_intrinsic_read_first_invocation:
721 case nir_intrinsic_vote_feq:
722 case nir_intrinsic_vote_ieq:
723 case nir_intrinsic_shuffle:
724 case nir_intrinsic_shuffle_xor:
725 case nir_intrinsic_shuffle_up:
726 case nir_intrinsic_shuffle_down:
727 case nir_intrinsic_quad_broadcast:
728 case nir_intrinsic_quad_swap_horizontal:
729 case nir_intrinsic_quad_swap_vertical:
730 case nir_intrinsic_quad_swap_diagonal:
731 if (intrin->src[0].ssa->bit_size == 8)
732 return 16;
733 return 0;
734
735 case nir_intrinsic_reduce:
736 case nir_intrinsic_inclusive_scan:
737 case nir_intrinsic_exclusive_scan:
738 /* There are a couple of register region issues that make things
739 * complicated for 8-bit types:
740 *
741 * 1. Only raw moves are allowed to write to a packed 8-bit
742 * destination.
743 * 2. If we use a strided destination, the efficient way to do
744 * scan operations ends up using strides that are too big to
745 * encode in an instruction.
746 *
747 * To get around these issues, we just do all 8-bit scan operations
748 * in 16 bits. It's actually fewer instructions than what we'd have
749 * to do if we were trying to do it in native 8-bit types and the
750 * results are the same once we truncate to 8 bits at the end.
751 */
752 if (intrin->dest.ssa.bit_size == 8)
753 return 16;
754 return 0;
755
756 default:
757 return 0;
758 }
759 break;
760 }
761
762 case nir_instr_type_phi: {
763 nir_phi_instr *phi = nir_instr_as_phi(instr);
764 if (phi->dest.ssa.bit_size == 8)
765 return 16;
766 return 0;
767 }
768
769 default:
770 return 0;
771 }
772 }
773
774 /* On gfx12.5+, if the offsets are not both constant and in the {-8,7} range,
775 * we will have nir_lower_tex() lower the source offset by returning true from
776 * this filter function.
777 */
778 static bool
lower_xehp_tg4_offset_filter(const nir_instr * instr,UNUSED const void * data)779 lower_xehp_tg4_offset_filter(const nir_instr *instr, UNUSED const void *data)
780 {
781 if (instr->type != nir_instr_type_tex)
782 return false;
783
784 nir_tex_instr *tex = nir_instr_as_tex(instr);
785
786 if (tex->op != nir_texop_tg4)
787 return false;
788
789 int offset_index = nir_tex_instr_src_index(tex, nir_tex_src_offset);
790 if (offset_index < 0)
791 return false;
792
793 if (!nir_src_is_const(tex->src[offset_index].src))
794 return true;
795
796 int64_t offset_x = nir_src_comp_as_int(tex->src[offset_index].src, 0);
797 int64_t offset_y = nir_src_comp_as_int(tex->src[offset_index].src, 1);
798
799 return offset_x < -8 || offset_x > 7 || offset_y < -8 || offset_y > 7;
800 }
801
802 /* Does some simple lowering and runs the standard suite of optimizations
803 *
804 * This is intended to be called more-or-less directly after you get the
805 * shader out of GLSL or some other source. While it is geared towards i965,
806 * it is not at all generator-specific except for the is_scalar flag. Even
807 * there, it is safe to call with is_scalar = false for a shader that is
808 * intended for the FS backend as long as nir_optimize is called again with
809 * is_scalar = true to scalarize everything prior to code gen.
810 */
811 void
brw_preprocess_nir(const struct brw_compiler * compiler,nir_shader * nir,const nir_shader * softfp64)812 brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir,
813 const nir_shader *softfp64)
814 {
815 const struct intel_device_info *devinfo = compiler->devinfo;
816 UNUSED bool progress; /* Written by OPT */
817
818 const bool is_scalar = compiler->scalar_stage[nir->info.stage];
819
820 nir_validate_ssa_dominance(nir, "before brw_preprocess_nir");
821
822 if (is_scalar) {
823 OPT(nir_lower_alu_to_scalar, NULL, NULL);
824 }
825
826 if (nir->info.stage == MESA_SHADER_GEOMETRY)
827 OPT(nir_lower_gs_intrinsics, 0);
828
829 /* See also brw_nir_trig_workarounds.py */
830 if (compiler->precise_trig &&
831 !(devinfo->ver >= 10 || devinfo->platform == INTEL_PLATFORM_KBL))
832 OPT(brw_nir_apply_trig_workarounds);
833
834 if (devinfo->ver >= 12)
835 OPT(brw_nir_clamp_image_1d_2d_array_sizes);
836
837 const nir_lower_tex_options tex_options = {
838 .lower_txp = ~0,
839 .lower_txf_offset = true,
840 .lower_rect_offset = true,
841 .lower_txd_cube_map = true,
842 .lower_txd_3d = devinfo->verx10 >= 125, /* Wa_1209978020 */
843 .lower_txd_array = devinfo->verx10 >= 125, /* Wa_1209978020 */
844 .lower_txb_shadow_clamp = true,
845 .lower_txd_shadow_clamp = true,
846 .lower_txd_offset_clamp = true,
847 .lower_tg4_offsets = true,
848 .lower_txs_lod = true, /* Wa_14012320009 */
849 .lower_offset_filter =
850 devinfo->verx10 >= 125 ? lower_xehp_tg4_offset_filter : NULL,
851 .lower_invalid_implicit_lod = true,
852 };
853
854 OPT(nir_lower_tex, &tex_options);
855 OPT(nir_normalize_cubemap_coords);
856
857 OPT(nir_lower_global_vars_to_local);
858
859 OPT(nir_split_var_copies);
860 OPT(nir_split_struct_vars, nir_var_function_temp);
861
862 brw_nir_optimize(nir, compiler, is_scalar, true);
863
864 OPT(nir_lower_doubles, softfp64, nir->options->lower_doubles_options);
865 OPT(nir_lower_int64);
866
867 OPT(nir_lower_bit_size, lower_bit_size_callback, (void *)compiler);
868
869 if (is_scalar) {
870 OPT(nir_lower_load_const_to_scalar);
871 }
872
873 /* Lower a bunch of stuff */
874 OPT(nir_lower_var_copies);
875
876 /* This needs to be run after the first optimization pass but before we
877 * lower indirect derefs away
878 */
879 if (compiler->supports_shader_constants) {
880 OPT(nir_opt_large_constants, NULL, 32);
881 }
882
883 OPT(nir_lower_system_values);
884 OPT(nir_lower_compute_system_values, NULL);
885
886 const nir_lower_subgroups_options subgroups_options = {
887 .ballot_bit_size = 32,
888 .ballot_components = 1,
889 .lower_to_scalar = true,
890 .lower_vote_trivial = !is_scalar,
891 .lower_relative_shuffle = true,
892 .lower_quad_broadcast_dynamic = true,
893 .lower_elect = true,
894 };
895 OPT(nir_lower_subgroups, &subgroups_options);
896
897 nir_variable_mode indirect_mask =
898 brw_nir_no_indirect_mask(compiler, nir->info.stage);
899 OPT(nir_lower_indirect_derefs, indirect_mask, UINT32_MAX);
900
901 /* Even in cases where we can handle indirect temporaries via scratch, we
902 * it can still be expensive. Lower indirects on small arrays to
903 * conditional load/stores.
904 *
905 * The threshold of 16 was chosen semi-arbitrarily. The idea is that an
906 * indirect on an array of 16 elements is about 30 instructions at which
907 * point, you may be better off doing a send. With a SIMD8 program, 16
908 * floats is 1/8 of the entire register file. Any array larger than that
909 * is likely to cause pressure issues. Also, this value is sufficiently
910 * high that the benchmarks known to suffer from large temporary array
911 * issues are helped but nothing else in shader-db is hurt except for maybe
912 * that one kerbal space program shader.
913 */
914 if (is_scalar && !(indirect_mask & nir_var_function_temp))
915 OPT(nir_lower_indirect_derefs, nir_var_function_temp, 16);
916
917 /* Lower array derefs of vectors for SSBO and UBO loads. For both UBOs and
918 * SSBOs, our back-end is capable of loading an entire vec4 at a time and
919 * we would like to take advantage of that whenever possible regardless of
920 * whether or not the app gives us full loads. This should allow the
921 * optimizer to combine UBO and SSBO load operations and save us some send
922 * messages.
923 */
924 OPT(nir_lower_array_deref_of_vec,
925 nir_var_mem_ubo | nir_var_mem_ssbo,
926 nir_lower_direct_array_deref_of_vec_load);
927
928 /* Get rid of split copies */
929 brw_nir_optimize(nir, compiler, is_scalar, false);
930 }
931
932 void
brw_nir_link_shaders(const struct brw_compiler * compiler,nir_shader * producer,nir_shader * consumer)933 brw_nir_link_shaders(const struct brw_compiler *compiler,
934 nir_shader *producer, nir_shader *consumer)
935 {
936 if (producer->info.stage == MESA_SHADER_MESH &&
937 consumer->info.stage == MESA_SHADER_FRAGMENT) {
938 /* gl_MeshPerPrimitiveNV[].gl_ViewportIndex, gl_PrimitiveID and gl_Layer
939 * are per primitive, but fragment shader does not have them marked as
940 * such. Add the annotation here.
941 */
942 nir_foreach_shader_in_variable(var, consumer) {
943 switch (var->data.location) {
944 case VARYING_SLOT_LAYER:
945 case VARYING_SLOT_PRIMITIVE_ID:
946 case VARYING_SLOT_VIEWPORT:
947 var->data.per_primitive = 1;
948 break;
949 default:
950 continue;
951 }
952 }
953 }
954
955 nir_lower_io_arrays_to_elements(producer, consumer);
956 nir_validate_shader(producer, "after nir_lower_io_arrays_to_elements");
957 nir_validate_shader(consumer, "after nir_lower_io_arrays_to_elements");
958
959 const bool p_is_scalar = compiler->scalar_stage[producer->info.stage];
960 const bool c_is_scalar = compiler->scalar_stage[consumer->info.stage];
961
962 if (p_is_scalar && c_is_scalar) {
963 NIR_PASS(_, producer, nir_lower_io_to_scalar_early, nir_var_shader_out);
964 NIR_PASS(_, consumer, nir_lower_io_to_scalar_early, nir_var_shader_in);
965 brw_nir_optimize(producer, compiler, p_is_scalar, false);
966 brw_nir_optimize(consumer, compiler, c_is_scalar, false);
967 }
968
969 if (nir_link_opt_varyings(producer, consumer))
970 brw_nir_optimize(consumer, compiler, c_is_scalar, false);
971
972 NIR_PASS(_, producer, nir_remove_dead_variables, nir_var_shader_out, NULL);
973 NIR_PASS(_, consumer, nir_remove_dead_variables, nir_var_shader_in, NULL);
974
975 if (nir_remove_unused_varyings(producer, consumer)) {
976 if (should_print_nir(producer)) {
977 printf("nir_remove_unused_varyings\n");
978 nir_print_shader(producer, stdout);
979 }
980 if (should_print_nir(consumer)) {
981 printf("nir_remove_unused_varyings\n");
982 nir_print_shader(consumer, stdout);
983 }
984
985 NIR_PASS(_, producer, nir_lower_global_vars_to_local);
986 NIR_PASS(_, consumer, nir_lower_global_vars_to_local);
987
988 /* The backend might not be able to handle indirects on
989 * temporaries so we need to lower indirects on any of the
990 * varyings we have demoted here.
991 */
992 NIR_PASS(_, producer, nir_lower_indirect_derefs,
993 brw_nir_no_indirect_mask(compiler, producer->info.stage),
994 UINT32_MAX);
995 NIR_PASS(_, consumer, nir_lower_indirect_derefs,
996 brw_nir_no_indirect_mask(compiler, consumer->info.stage),
997 UINT32_MAX);
998
999 brw_nir_optimize(producer, compiler, p_is_scalar, false);
1000 brw_nir_optimize(consumer, compiler, c_is_scalar, false);
1001 }
1002
1003 NIR_PASS(_, producer, nir_lower_io_to_vector, nir_var_shader_out);
1004 NIR_PASS(_, producer, nir_opt_combine_stores, nir_var_shader_out);
1005 NIR_PASS(_, consumer, nir_lower_io_to_vector, nir_var_shader_in);
1006
1007 if (producer->info.stage != MESA_SHADER_TESS_CTRL &&
1008 producer->info.stage != MESA_SHADER_MESH &&
1009 producer->info.stage != MESA_SHADER_TASK) {
1010 /* Calling lower_io_to_vector creates output variable writes with
1011 * write-masks. On non-TCS outputs, the back-end can't handle it and we
1012 * need to call nir_lower_io_to_temporaries to get rid of them. This,
1013 * in turn, creates temporary variables and extra copy_deref intrinsics
1014 * that we need to clean up.
1015 *
1016 * Note Mesh/Task don't support I/O as temporaries (I/O is shared
1017 * between whole workgroup, possibly using multiple HW threads). For
1018 * those write-mask in output is handled by I/O lowering.
1019 */
1020 NIR_PASS_V(producer, nir_lower_io_to_temporaries,
1021 nir_shader_get_entrypoint(producer), true, false);
1022 NIR_PASS(_, producer, nir_lower_global_vars_to_local);
1023 NIR_PASS(_, producer, nir_split_var_copies);
1024 NIR_PASS(_, producer, nir_lower_var_copies);
1025 }
1026 }
1027
1028 static bool
brw_nir_should_vectorize_mem(unsigned align_mul,unsigned align_offset,unsigned bit_size,unsigned num_components,nir_intrinsic_instr * low,nir_intrinsic_instr * high,void * data)1029 brw_nir_should_vectorize_mem(unsigned align_mul, unsigned align_offset,
1030 unsigned bit_size,
1031 unsigned num_components,
1032 nir_intrinsic_instr *low,
1033 nir_intrinsic_instr *high,
1034 void *data)
1035 {
1036 /* Don't combine things to generate 64-bit loads/stores. We have to split
1037 * those back into 32-bit ones anyway and UBO loads aren't split in NIR so
1038 * we don't want to make a mess for the back-end.
1039 */
1040 if (bit_size > 32)
1041 return false;
1042
1043 /* We can handle at most a vec4 right now. Anything bigger would get
1044 * immediately split by brw_nir_lower_mem_access_bit_sizes anyway.
1045 */
1046 if (num_components > 4)
1047 return false;
1048
1049
1050 uint32_t align;
1051 if (align_offset)
1052 align = 1 << (ffs(align_offset) - 1);
1053 else
1054 align = align_mul;
1055
1056 if (align < bit_size / 8)
1057 return false;
1058
1059 return true;
1060 }
1061
1062 static
combine_all_barriers(nir_intrinsic_instr * a,nir_intrinsic_instr * b,void * data)1063 bool combine_all_barriers(nir_intrinsic_instr *a,
1064 nir_intrinsic_instr *b,
1065 void *data)
1066 {
1067 /* Translation to backend IR will get rid of modes we don't care about, so
1068 * no harm in always combining them.
1069 *
1070 * TODO: While HW has only ACQUIRE|RELEASE fences, we could improve the
1071 * scheduling so that it can take advantage of the different semantics.
1072 */
1073 nir_intrinsic_set_memory_modes(a, nir_intrinsic_memory_modes(a) |
1074 nir_intrinsic_memory_modes(b));
1075 nir_intrinsic_set_memory_semantics(a, nir_intrinsic_memory_semantics(a) |
1076 nir_intrinsic_memory_semantics(b));
1077 nir_intrinsic_set_memory_scope(a, MAX2(nir_intrinsic_memory_scope(a),
1078 nir_intrinsic_memory_scope(b)));
1079 return true;
1080 }
1081
1082 static void
brw_vectorize_lower_mem_access(nir_shader * nir,const struct brw_compiler * compiler,bool is_scalar,bool robust_buffer_access)1083 brw_vectorize_lower_mem_access(nir_shader *nir,
1084 const struct brw_compiler *compiler,
1085 bool is_scalar,
1086 bool robust_buffer_access)
1087 {
1088 const struct intel_device_info *devinfo = compiler->devinfo;
1089 bool progress = false;
1090
1091 if (is_scalar) {
1092 nir_load_store_vectorize_options options = {
1093 .modes = nir_var_mem_ubo | nir_var_mem_ssbo |
1094 nir_var_mem_global | nir_var_mem_shared |
1095 nir_var_mem_task_payload,
1096 .callback = brw_nir_should_vectorize_mem,
1097 .robust_modes = (nir_variable_mode)0,
1098 };
1099
1100 if (robust_buffer_access) {
1101 options.robust_modes = nir_var_mem_ubo | nir_var_mem_ssbo |
1102 nir_var_mem_global;
1103 }
1104
1105 OPT(nir_opt_load_store_vectorize, &options);
1106 }
1107
1108 OPT(brw_nir_lower_mem_access_bit_sizes, devinfo);
1109
1110 while (progress) {
1111 progress = false;
1112
1113 OPT(nir_lower_pack);
1114 OPT(nir_copy_prop);
1115 OPT(nir_opt_dce);
1116 OPT(nir_opt_cse);
1117 OPT(nir_opt_algebraic);
1118 OPT(nir_opt_constant_folding);
1119 }
1120 }
1121
1122 static bool
nir_shader_has_local_variables(const nir_shader * nir)1123 nir_shader_has_local_variables(const nir_shader *nir)
1124 {
1125 nir_foreach_function(func, nir) {
1126 if (func->impl && !exec_list_is_empty(&func->impl->locals))
1127 return true;
1128 }
1129
1130 return false;
1131 }
1132
1133 /* Prepare the given shader for codegen
1134 *
1135 * This function is intended to be called right before going into the actual
1136 * backend and is highly backend-specific. Also, once this function has been
1137 * called on a shader, it will no longer be in SSA form so most optimizations
1138 * will not work.
1139 */
1140 void
brw_postprocess_nir(nir_shader * nir,const struct brw_compiler * compiler,bool is_scalar,bool debug_enabled,bool robust_buffer_access)1141 brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
1142 bool is_scalar, bool debug_enabled,
1143 bool robust_buffer_access)
1144 {
1145 const struct intel_device_info *devinfo = compiler->devinfo;
1146
1147 UNUSED bool progress; /* Written by OPT */
1148
1149 OPT(nir_lower_bit_size, lower_bit_size_callback, (void *)compiler);
1150
1151 OPT(brw_nir_lower_scoped_barriers);
1152 OPT(nir_opt_combine_memory_barriers, combine_all_barriers, NULL);
1153
1154 do {
1155 progress = false;
1156 OPT(nir_opt_algebraic_before_ffma);
1157 } while (progress);
1158
1159 if (devinfo->verx10 >= 125) {
1160 const nir_lower_idiv_options options = {
1161 .imprecise_32bit_lowering = false,
1162 .allow_fp16 = false
1163 };
1164 OPT(nir_lower_idiv, &options);
1165 }
1166
1167 if (gl_shader_stage_can_set_fragment_shading_rate(nir->info.stage))
1168 brw_nir_lower_shading_rate_output(nir);
1169
1170 brw_nir_optimize(nir, compiler, is_scalar, false);
1171
1172 if (is_scalar && nir_shader_has_local_variables(nir)) {
1173 OPT(nir_lower_vars_to_explicit_types, nir_var_function_temp,
1174 glsl_get_natural_size_align_bytes);
1175 OPT(nir_lower_explicit_io, nir_var_function_temp,
1176 nir_address_format_32bit_offset);
1177 brw_nir_optimize(nir, compiler, is_scalar, false);
1178 }
1179
1180 brw_vectorize_lower_mem_access(nir, compiler, is_scalar,
1181 robust_buffer_access);
1182
1183 if (OPT(nir_lower_int64))
1184 brw_nir_optimize(nir, compiler, is_scalar, false);
1185
1186 if (devinfo->ver >= 6) {
1187 /* Try and fuse multiply-adds */
1188 OPT(brw_nir_opt_peephole_ffma);
1189 }
1190
1191 if (OPT(nir_opt_comparison_pre)) {
1192 OPT(nir_copy_prop);
1193 OPT(nir_opt_dce);
1194 OPT(nir_opt_cse);
1195
1196 /* Do the select peepehole again. nir_opt_comparison_pre (combined with
1197 * the other optimization passes) will have removed at least one
1198 * instruction from one of the branches of the if-statement, so now it
1199 * might be under the threshold of conversion to bcsel.
1200 *
1201 * See brw_nir_optimize for the explanation of is_vec4_tessellation.
1202 */
1203 const bool is_vec4_tessellation = !is_scalar &&
1204 (nir->info.stage == MESA_SHADER_TESS_CTRL ||
1205 nir->info.stage == MESA_SHADER_TESS_EVAL);
1206 OPT(nir_opt_peephole_select, 0, is_vec4_tessellation, false);
1207 OPT(nir_opt_peephole_select, 1, is_vec4_tessellation,
1208 compiler->devinfo->ver >= 6);
1209 }
1210
1211 do {
1212 progress = false;
1213 if (OPT(nir_opt_algebraic_late)) {
1214 /* At this late stage, anything that makes more constants will wreak
1215 * havok on the vec4 backend. The handling of constants in the vec4
1216 * backend is not good.
1217 */
1218 if (is_scalar)
1219 OPT(nir_opt_constant_folding);
1220
1221 OPT(nir_copy_prop);
1222 OPT(nir_opt_dce);
1223 OPT(nir_opt_cse);
1224 }
1225 } while (progress);
1226
1227
1228 OPT(brw_nir_lower_conversions);
1229
1230 if (is_scalar)
1231 OPT(nir_lower_alu_to_scalar, NULL, NULL);
1232
1233 while (OPT(nir_opt_algebraic_distribute_src_mods)) {
1234 OPT(nir_copy_prop);
1235 OPT(nir_opt_dce);
1236 OPT(nir_opt_cse);
1237 }
1238
1239 OPT(nir_copy_prop);
1240 OPT(nir_opt_dce);
1241 OPT(nir_opt_move, nir_move_comparisons);
1242 OPT(nir_opt_dead_cf);
1243
1244 NIR_PASS_V(nir, nir_convert_to_lcssa, true, true);
1245 NIR_PASS_V(nir, nir_divergence_analysis);
1246
1247 /* TODO: Enable nir_opt_uniform_atomics on Gfx7.x too.
1248 * It currently fails Vulkan tests on Haswell for an unknown reason.
1249 *
1250 * TODO: Using this optimization on RT/OpenCL kernels also seems to cause
1251 * issues. Until we can understand those issues, disable it.
1252 */
1253 bool opt_uniform_atomic_stage_allowed =
1254 devinfo->ver >= 8 &&
1255 nir->info.stage != MESA_SHADER_KERNEL &&
1256 nir->info.stage != MESA_SHADER_RAYGEN &&
1257 !gl_shader_stage_is_callable(nir->info.stage);
1258
1259 if (opt_uniform_atomic_stage_allowed && OPT(nir_opt_uniform_atomics)) {
1260 const nir_lower_subgroups_options subgroups_options = {
1261 .ballot_bit_size = 32,
1262 .ballot_components = 1,
1263 .lower_elect = true,
1264 };
1265 OPT(nir_lower_subgroups, &subgroups_options);
1266
1267 if (OPT(nir_lower_int64))
1268 brw_nir_optimize(nir, compiler, is_scalar, false);
1269 }
1270
1271 /* Clean up LCSSA phis */
1272 OPT(nir_opt_remove_phis);
1273
1274 OPT(nir_lower_bool_to_int32);
1275 OPT(nir_copy_prop);
1276 OPT(nir_opt_dce);
1277
1278 OPT(nir_lower_locals_to_regs);
1279
1280 if (unlikely(debug_enabled)) {
1281 /* Re-index SSA defs so we print more sensible numbers. */
1282 nir_foreach_function(function, nir) {
1283 if (function->impl)
1284 nir_index_ssa_defs(function->impl);
1285 }
1286
1287 fprintf(stderr, "NIR (SSA form) for %s shader:\n",
1288 _mesa_shader_stage_to_string(nir->info.stage));
1289 nir_print_shader(nir, stderr);
1290 }
1291
1292 nir_validate_ssa_dominance(nir, "before nir_convert_from_ssa");
1293
1294 OPT(nir_convert_from_ssa, true);
1295
1296 if (!is_scalar) {
1297 OPT(nir_move_vec_src_uses_to_dest);
1298 OPT(nir_lower_vec_to_movs, NULL, NULL);
1299 }
1300
1301 OPT(nir_opt_dce);
1302
1303 if (OPT(nir_opt_rematerialize_compares))
1304 OPT(nir_opt_dce);
1305
1306 /* This is the last pass we run before we start emitting stuff. It
1307 * determines when we need to insert boolean resolves on Gen <= 5. We
1308 * run it last because it stashes data in instr->pass_flags and we don't
1309 * want that to be squashed by other NIR passes.
1310 */
1311 if (devinfo->ver <= 5)
1312 brw_nir_analyze_boolean_resolves(nir);
1313
1314 nir_sweep(nir);
1315
1316 if (unlikely(debug_enabled)) {
1317 fprintf(stderr, "NIR (final form) for %s shader:\n",
1318 _mesa_shader_stage_to_string(nir->info.stage));
1319 nir_print_shader(nir, stderr);
1320 }
1321 }
1322
1323 static bool
brw_nir_apply_sampler_key(nir_shader * nir,const struct brw_compiler * compiler,const struct brw_sampler_prog_key_data * key_tex)1324 brw_nir_apply_sampler_key(nir_shader *nir,
1325 const struct brw_compiler *compiler,
1326 const struct brw_sampler_prog_key_data *key_tex)
1327 {
1328 const struct intel_device_info *devinfo = compiler->devinfo;
1329 nir_lower_tex_options tex_options = {
1330 .lower_txd_clamp_bindless_sampler = true,
1331 .lower_txd_clamp_if_sampler_index_not_lt_16 = true,
1332 .lower_invalid_implicit_lod = true,
1333 };
1334
1335 /* Iron Lake and prior require lowering of all rectangle textures */
1336 if (devinfo->ver < 6)
1337 tex_options.lower_rect = true;
1338
1339 /* Prior to Broadwell, our hardware can't actually do GL_CLAMP */
1340 if (devinfo->ver < 8) {
1341 tex_options.saturate_s = key_tex->gl_clamp_mask[0];
1342 tex_options.saturate_t = key_tex->gl_clamp_mask[1];
1343 tex_options.saturate_r = key_tex->gl_clamp_mask[2];
1344 }
1345
1346 /* Prior to Haswell, we have to lower gradients on shadow samplers */
1347 tex_options.lower_txd_shadow = devinfo->verx10 <= 70;
1348
1349 tex_options.lower_y_uv_external = key_tex->y_uv_image_mask;
1350 tex_options.lower_y_u_v_external = key_tex->y_u_v_image_mask;
1351 tex_options.lower_yx_xuxv_external = key_tex->yx_xuxv_image_mask;
1352 tex_options.lower_xy_uxvx_external = key_tex->xy_uxvx_image_mask;
1353 tex_options.lower_ayuv_external = key_tex->ayuv_image_mask;
1354 tex_options.lower_xyuv_external = key_tex->xyuv_image_mask;
1355 tex_options.bt709_external = key_tex->bt709_mask;
1356 tex_options.bt2020_external = key_tex->bt2020_mask;
1357
1358 /* Setup array of scaling factors for each texture. */
1359 memcpy(&tex_options.scale_factors, &key_tex->scale_factors,
1360 sizeof(tex_options.scale_factors));
1361
1362 return nir_lower_tex(nir, &tex_options);
1363 }
1364
1365 static unsigned
get_subgroup_size(const struct shader_info * info,unsigned max_subgroup_size)1366 get_subgroup_size(const struct shader_info *info, unsigned max_subgroup_size)
1367 {
1368 switch (info->subgroup_size) {
1369 case SUBGROUP_SIZE_API_CONSTANT:
1370 /* We have to use the global constant size. */
1371 return BRW_SUBGROUP_SIZE;
1372
1373 case SUBGROUP_SIZE_UNIFORM:
1374 /* It has to be uniform across all invocations but can vary per stage
1375 * if we want. This gives us a bit more freedom.
1376 *
1377 * For compute, brw_nir_apply_key is called per-dispatch-width so this
1378 * is the actual subgroup size and not a maximum. However, we only
1379 * invoke one size of any given compute shader so it's still guaranteed
1380 * to be uniform across invocations.
1381 */
1382 return max_subgroup_size;
1383
1384 case SUBGROUP_SIZE_VARYING:
1385 /* The subgroup size is allowed to be fully varying. For geometry
1386 * stages, we know it's always 8 which is max_subgroup_size so we can
1387 * return that. For compute, brw_nir_apply_key is called once per
1388 * dispatch-width so max_subgroup_size is the real subgroup size.
1389 *
1390 * For fragment, we return 0 and let it fall through to the back-end
1391 * compiler. This means we can't optimize based on subgroup size but
1392 * that's a risk the client took when it asked for a varying subgroup
1393 * size.
1394 */
1395 return info->stage == MESA_SHADER_FRAGMENT ? 0 : max_subgroup_size;
1396
1397 case SUBGROUP_SIZE_REQUIRE_8:
1398 case SUBGROUP_SIZE_REQUIRE_16:
1399 case SUBGROUP_SIZE_REQUIRE_32:
1400 assert(gl_shader_stage_uses_workgroup(info->stage));
1401 /* These enum values are expressly chosen to be equal to the subgroup
1402 * size that they require.
1403 */
1404 return info->subgroup_size;
1405
1406 case SUBGROUP_SIZE_FULL_SUBGROUPS:
1407 case SUBGROUP_SIZE_REQUIRE_64:
1408 case SUBGROUP_SIZE_REQUIRE_128:
1409 break;
1410 }
1411
1412 unreachable("Invalid subgroup size type");
1413 }
1414
1415 void
brw_nir_apply_key(nir_shader * nir,const struct brw_compiler * compiler,const struct brw_base_prog_key * key,unsigned max_subgroup_size,bool is_scalar)1416 brw_nir_apply_key(nir_shader *nir,
1417 const struct brw_compiler *compiler,
1418 const struct brw_base_prog_key *key,
1419 unsigned max_subgroup_size,
1420 bool is_scalar)
1421 {
1422 bool progress = false;
1423
1424 OPT(brw_nir_apply_sampler_key, compiler, &key->tex);
1425
1426 const nir_lower_subgroups_options subgroups_options = {
1427 .subgroup_size = get_subgroup_size(&nir->info, max_subgroup_size),
1428 .ballot_bit_size = 32,
1429 .ballot_components = 1,
1430 .lower_subgroup_masks = true,
1431 };
1432 OPT(nir_lower_subgroups, &subgroups_options);
1433
1434 if (key->limit_trig_input_range)
1435 OPT(brw_nir_limit_trig_input_range_workaround);
1436
1437 if (progress)
1438 brw_nir_optimize(nir, compiler, is_scalar, false);
1439 }
1440
1441 enum brw_conditional_mod
brw_cmod_for_nir_comparison(nir_op op)1442 brw_cmod_for_nir_comparison(nir_op op)
1443 {
1444 switch (op) {
1445 case nir_op_flt:
1446 case nir_op_flt32:
1447 case nir_op_ilt:
1448 case nir_op_ilt32:
1449 case nir_op_ult:
1450 case nir_op_ult32:
1451 return BRW_CONDITIONAL_L;
1452
1453 case nir_op_fge:
1454 case nir_op_fge32:
1455 case nir_op_ige:
1456 case nir_op_ige32:
1457 case nir_op_uge:
1458 case nir_op_uge32:
1459 return BRW_CONDITIONAL_GE;
1460
1461 case nir_op_feq:
1462 case nir_op_feq32:
1463 case nir_op_ieq:
1464 case nir_op_ieq32:
1465 case nir_op_b32all_fequal2:
1466 case nir_op_b32all_iequal2:
1467 case nir_op_b32all_fequal3:
1468 case nir_op_b32all_iequal3:
1469 case nir_op_b32all_fequal4:
1470 case nir_op_b32all_iequal4:
1471 return BRW_CONDITIONAL_Z;
1472
1473 case nir_op_fneu:
1474 case nir_op_fneu32:
1475 case nir_op_ine:
1476 case nir_op_ine32:
1477 case nir_op_b32any_fnequal2:
1478 case nir_op_b32any_inequal2:
1479 case nir_op_b32any_fnequal3:
1480 case nir_op_b32any_inequal3:
1481 case nir_op_b32any_fnequal4:
1482 case nir_op_b32any_inequal4:
1483 return BRW_CONDITIONAL_NZ;
1484
1485 default:
1486 unreachable("Unsupported NIR comparison op");
1487 }
1488 }
1489
1490 uint32_t
brw_aop_for_nir_intrinsic(const nir_intrinsic_instr * atomic)1491 brw_aop_for_nir_intrinsic(const nir_intrinsic_instr *atomic)
1492 {
1493 switch (atomic->intrinsic) {
1494 #define AOP_CASE(atom) \
1495 case nir_intrinsic_image_atomic_##atom: \
1496 case nir_intrinsic_bindless_image_atomic_##atom: \
1497 case nir_intrinsic_ssbo_atomic_##atom: \
1498 case nir_intrinsic_shared_atomic_##atom: \
1499 case nir_intrinsic_global_atomic_##atom
1500
1501 AOP_CASE(add): {
1502 unsigned src_idx;
1503 switch (atomic->intrinsic) {
1504 case nir_intrinsic_image_atomic_add:
1505 case nir_intrinsic_bindless_image_atomic_add:
1506 src_idx = 3;
1507 break;
1508 case nir_intrinsic_ssbo_atomic_add:
1509 src_idx = 2;
1510 break;
1511 case nir_intrinsic_shared_atomic_add:
1512 case nir_intrinsic_global_atomic_add:
1513 src_idx = 1;
1514 break;
1515 default:
1516 unreachable("Invalid add atomic opcode");
1517 }
1518
1519 if (nir_src_is_const(atomic->src[src_idx])) {
1520 int64_t add_val = nir_src_as_int(atomic->src[src_idx]);
1521 if (add_val == 1)
1522 return BRW_AOP_INC;
1523 else if (add_val == -1)
1524 return BRW_AOP_DEC;
1525 }
1526 return BRW_AOP_ADD;
1527 }
1528
1529 AOP_CASE(imin): return BRW_AOP_IMIN;
1530 AOP_CASE(umin): return BRW_AOP_UMIN;
1531 AOP_CASE(imax): return BRW_AOP_IMAX;
1532 AOP_CASE(umax): return BRW_AOP_UMAX;
1533 AOP_CASE(and): return BRW_AOP_AND;
1534 AOP_CASE(or): return BRW_AOP_OR;
1535 AOP_CASE(xor): return BRW_AOP_XOR;
1536 AOP_CASE(exchange): return BRW_AOP_MOV;
1537 AOP_CASE(comp_swap): return BRW_AOP_CMPWR;
1538
1539 #undef AOP_CASE
1540 #define AOP_CASE(atom) \
1541 case nir_intrinsic_ssbo_atomic_##atom: \
1542 case nir_intrinsic_shared_atomic_##atom: \
1543 case nir_intrinsic_global_atomic_##atom
1544
1545 AOP_CASE(fmin): return BRW_AOP_FMIN;
1546 AOP_CASE(fmax): return BRW_AOP_FMAX;
1547 AOP_CASE(fcomp_swap): return BRW_AOP_FCMPWR;
1548 AOP_CASE(fadd): return BRW_AOP_FADD;
1549
1550 #undef AOP_CASE
1551
1552 default:
1553 unreachable("Unsupported NIR atomic intrinsic");
1554 }
1555 }
1556
1557 enum brw_reg_type
brw_type_for_nir_type(const struct intel_device_info * devinfo,nir_alu_type type)1558 brw_type_for_nir_type(const struct intel_device_info *devinfo,
1559 nir_alu_type type)
1560 {
1561 switch (type) {
1562 case nir_type_uint:
1563 case nir_type_uint32:
1564 return BRW_REGISTER_TYPE_UD;
1565 case nir_type_bool:
1566 case nir_type_int:
1567 case nir_type_bool32:
1568 case nir_type_int32:
1569 return BRW_REGISTER_TYPE_D;
1570 case nir_type_float:
1571 case nir_type_float32:
1572 return BRW_REGISTER_TYPE_F;
1573 case nir_type_float16:
1574 return BRW_REGISTER_TYPE_HF;
1575 case nir_type_float64:
1576 return BRW_REGISTER_TYPE_DF;
1577 case nir_type_int64:
1578 return devinfo->ver < 8 ? BRW_REGISTER_TYPE_DF : BRW_REGISTER_TYPE_Q;
1579 case nir_type_uint64:
1580 return devinfo->ver < 8 ? BRW_REGISTER_TYPE_DF : BRW_REGISTER_TYPE_UQ;
1581 case nir_type_int16:
1582 return BRW_REGISTER_TYPE_W;
1583 case nir_type_uint16:
1584 return BRW_REGISTER_TYPE_UW;
1585 case nir_type_int8:
1586 return BRW_REGISTER_TYPE_B;
1587 case nir_type_uint8:
1588 return BRW_REGISTER_TYPE_UB;
1589 default:
1590 unreachable("unknown type");
1591 }
1592
1593 return BRW_REGISTER_TYPE_F;
1594 }
1595
1596 nir_shader *
brw_nir_create_passthrough_tcs(void * mem_ctx,const struct brw_compiler * compiler,const nir_shader_compiler_options * options,const struct brw_tcs_prog_key * key)1597 brw_nir_create_passthrough_tcs(void *mem_ctx, const struct brw_compiler *compiler,
1598 const nir_shader_compiler_options *options,
1599 const struct brw_tcs_prog_key *key)
1600 {
1601 nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_TESS_CTRL,
1602 options, "passthrough TCS");
1603 ralloc_steal(mem_ctx, b.shader);
1604 nir_shader *nir = b.shader;
1605 nir_variable *var;
1606 nir_ssa_def *load;
1607 nir_ssa_def *zero = nir_imm_int(&b, 0);
1608 nir_ssa_def *invoc_id = nir_load_invocation_id(&b);
1609
1610 nir->info.inputs_read = key->outputs_written &
1611 ~(VARYING_BIT_TESS_LEVEL_INNER | VARYING_BIT_TESS_LEVEL_OUTER);
1612 nir->info.outputs_written = key->outputs_written;
1613 nir->info.tess.tcs_vertices_out = key->input_vertices;
1614 nir->num_uniforms = 8 * sizeof(uint32_t);
1615
1616 var = nir_variable_create(nir, nir_var_uniform, glsl_vec4_type(), "hdr_0");
1617 var->data.location = 0;
1618 var = nir_variable_create(nir, nir_var_uniform, glsl_vec4_type(), "hdr_1");
1619 var->data.location = 1;
1620
1621 /* Write the patch URB header. */
1622 for (int i = 0; i <= 1; i++) {
1623 load = nir_load_uniform(&b, 4, 32, zero, .base = i * 4 * sizeof(uint32_t));
1624
1625 nir_store_output(&b, load, zero,
1626 .base = VARYING_SLOT_TESS_LEVEL_INNER - i,
1627 .write_mask = WRITEMASK_XYZW);
1628 }
1629
1630 /* Copy inputs to outputs. */
1631 uint64_t varyings = nir->info.inputs_read;
1632
1633 while (varyings != 0) {
1634 const int varying = ffsll(varyings) - 1;
1635
1636 load = nir_load_per_vertex_input(&b, 4, 32, invoc_id, zero, .base = varying);
1637
1638 nir_store_per_vertex_output(&b, load, invoc_id, zero,
1639 .base = varying,
1640 .write_mask = WRITEMASK_XYZW);
1641
1642 varyings &= ~BITFIELD64_BIT(varying);
1643 }
1644
1645 nir_validate_shader(nir, "in brw_nir_create_passthrough_tcs");
1646
1647 brw_preprocess_nir(compiler, nir, NULL);
1648
1649 return nir;
1650 }
1651
1652 nir_ssa_def *
brw_nir_load_global_const(nir_builder * b,nir_intrinsic_instr * load_uniform,nir_ssa_def * base_addr,unsigned off)1653 brw_nir_load_global_const(nir_builder *b, nir_intrinsic_instr *load_uniform,
1654 nir_ssa_def *base_addr, unsigned off)
1655 {
1656 assert(load_uniform->intrinsic == nir_intrinsic_load_uniform);
1657 assert(load_uniform->dest.is_ssa);
1658 assert(load_uniform->src[0].is_ssa);
1659
1660 unsigned bit_size = load_uniform->dest.ssa.bit_size;
1661 assert(bit_size >= 8 && bit_size % 8 == 0);
1662 unsigned byte_size = bit_size / 8;
1663 nir_ssa_def *sysval;
1664
1665 if (nir_src_is_const(load_uniform->src[0])) {
1666 uint64_t offset = off +
1667 nir_intrinsic_base(load_uniform) +
1668 nir_src_as_uint(load_uniform->src[0]);
1669
1670 /* Things should be component-aligned. */
1671 assert(offset % byte_size == 0);
1672
1673 unsigned suboffset = offset % 64;
1674 uint64_t aligned_offset = offset - suboffset;
1675
1676 /* Load two just in case we go over a 64B boundary */
1677 nir_ssa_def *data[2];
1678 for (unsigned i = 0; i < 2; i++) {
1679 nir_ssa_def *addr = nir_iadd_imm(b, base_addr, aligned_offset + i * 64);
1680 data[i] = nir_load_global_const_block_intel(b, 16, addr,
1681 nir_imm_true(b));
1682 }
1683
1684 sysval = nir_extract_bits(b, data, 2, suboffset * 8,
1685 load_uniform->num_components, bit_size);
1686 } else {
1687 nir_ssa_def *offset32 =
1688 nir_iadd_imm(b, load_uniform->src[0].ssa,
1689 off + nir_intrinsic_base(load_uniform));
1690 nir_ssa_def *addr = nir_iadd(b, base_addr, nir_u2u64(b, offset32));
1691 sysval = nir_load_global_constant(b, addr, byte_size,
1692 load_uniform->num_components, bit_size);
1693 }
1694
1695 return sysval;
1696 }
1697