1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "../intel_nir.h"
25 #include "elk_nir.h"
26 #include "elk_nir_private.h"
27 #include "elk_shader.h"
28 #include "dev/intel_debug.h"
29 #include "compiler/glsl_types.h"
30 #include "compiler/nir/nir_builder.h"
31 #include "util/u_math.h"
32
33 static bool
remap_tess_levels(nir_builder * b,nir_intrinsic_instr * intr,enum tess_primitive_mode _primitive_mode)34 remap_tess_levels(nir_builder *b, nir_intrinsic_instr *intr,
35 enum tess_primitive_mode _primitive_mode)
36 {
37 const int location = nir_intrinsic_base(intr);
38 const unsigned component = nir_intrinsic_component(intr);
39 bool out_of_bounds = false;
40 bool write = !nir_intrinsic_infos[intr->intrinsic].has_dest;
41 unsigned mask = write ? nir_intrinsic_write_mask(intr) : 0;
42 nir_def *src = NULL, *dest = NULL;
43
44 if (write) {
45 assert(intr->num_components == intr->src[0].ssa->num_components);
46 } else {
47 assert(intr->num_components == intr->def.num_components);
48 }
49
50 if (location == VARYING_SLOT_TESS_LEVEL_INNER) {
51 b->cursor = write ? nir_before_instr(&intr->instr)
52 : nir_after_instr(&intr->instr);
53
54 switch (_primitive_mode) {
55 case TESS_PRIMITIVE_QUADS:
56 /* gl_TessLevelInner[0..1] lives at DWords 3-2 (reversed). */
57 nir_intrinsic_set_base(intr, 0);
58
59 if (write) {
60 assert(intr->src[0].ssa->num_components == 2);
61
62 intr->num_components = 4;
63
64 nir_def *undef = nir_undef(b, 1, 32);
65 nir_def *x = nir_channel(b, intr->src[0].ssa, 0);
66 nir_def *y = nir_channel(b, intr->src[0].ssa, 1);
67 src = nir_vec4(b, undef, undef, y, x);
68 mask = !!(mask & WRITEMASK_X) << 3 | !!(mask & WRITEMASK_Y) << 2;
69 } else if (intr->def.num_components > 1) {
70 assert(intr->def.num_components == 2);
71
72 intr->num_components = 4;
73 intr->def.num_components = 4;
74
75 unsigned wz[2] = { 3, 2 };
76 dest = nir_swizzle(b, &intr->def, wz, 2);
77 } else {
78 nir_intrinsic_set_component(intr, 3 - component);
79 }
80 break;
81 case TESS_PRIMITIVE_TRIANGLES:
82 /* gl_TessLevelInner[0] lives at DWord 4. */
83 nir_intrinsic_set_base(intr, 1);
84 mask &= WRITEMASK_X;
85 out_of_bounds = component > 0;
86 break;
87 case TESS_PRIMITIVE_ISOLINES:
88 out_of_bounds = true;
89 break;
90 default:
91 unreachable("Bogus tessellation domain");
92 }
93 } else if (location == VARYING_SLOT_TESS_LEVEL_OUTER) {
94 b->cursor = write ? nir_before_instr(&intr->instr)
95 : nir_after_instr(&intr->instr);
96
97 nir_intrinsic_set_base(intr, 1);
98
99 switch (_primitive_mode) {
100 case TESS_PRIMITIVE_QUADS:
101 case TESS_PRIMITIVE_TRIANGLES:
102 /* Quads: gl_TessLevelOuter[0..3] lives at DWords 7-4 (reversed).
103 * Triangles: gl_TessLevelOuter[0..2] lives at DWords 7-5 (reversed).
104 */
105 if (write) {
106 assert(intr->src[0].ssa->num_components == 4);
107
108 unsigned wzyx[4] = { 3, 2, 1, 0 };
109 src = nir_swizzle(b, intr->src[0].ssa, wzyx, 4);
110 mask = !!(mask & WRITEMASK_X) << 3 | !!(mask & WRITEMASK_Y) << 2 |
111 !!(mask & WRITEMASK_Z) << 1 | !!(mask & WRITEMASK_W) << 0;
112
113 /* Don't overwrite the inner factor at DWord 4 for triangles */
114 if (_primitive_mode == TESS_PRIMITIVE_TRIANGLES)
115 mask &= ~WRITEMASK_X;
116 } else if (intr->def.num_components > 1) {
117 assert(intr->def.num_components == 4);
118
119 unsigned wzyx[4] = { 3, 2, 1, 0 };
120 dest = nir_swizzle(b, &intr->def, wzyx, 4);
121 } else {
122 nir_intrinsic_set_component(intr, 3 - component);
123 out_of_bounds = component == 3 &&
124 _primitive_mode == TESS_PRIMITIVE_TRIANGLES;
125 }
126 break;
127 case TESS_PRIMITIVE_ISOLINES:
128 /* gl_TessLevelOuter[0..1] lives at DWords 6-7 (in order). */
129 if (write) {
130 assert(intr->src[0].ssa->num_components == 4);
131
132 nir_def *undef = nir_undef(b, 1, 32);
133 nir_def *x = nir_channel(b, intr->src[0].ssa, 0);
134 nir_def *y = nir_channel(b, intr->src[0].ssa, 1);
135 src = nir_vec4(b, undef, undef, x, y);
136 mask = !!(mask & WRITEMASK_X) << 2 | !!(mask & WRITEMASK_Y) << 3;
137 } else {
138 nir_intrinsic_set_component(intr, 2 + component);
139 out_of_bounds = component > 1;
140 }
141 break;
142 default:
143 unreachable("Bogus tessellation domain");
144 }
145 } else {
146 return false;
147 }
148
149 if (out_of_bounds) {
150 if (!write)
151 nir_def_rewrite_uses(&intr->def, nir_undef(b, 1, 32));
152 nir_instr_remove(&intr->instr);
153 } else if (write) {
154 nir_intrinsic_set_write_mask(intr, mask);
155
156 if (src) {
157 nir_src_rewrite(&intr->src[0], src);
158 }
159 } else if (dest) {
160 nir_def_rewrite_uses_after(&intr->def, dest,
161 dest->parent_instr);
162 }
163
164 return true;
165 }
166
167 static bool
is_input(nir_intrinsic_instr * intrin)168 is_input(nir_intrinsic_instr *intrin)
169 {
170 return intrin->intrinsic == nir_intrinsic_load_input ||
171 intrin->intrinsic == nir_intrinsic_load_per_vertex_input ||
172 intrin->intrinsic == nir_intrinsic_load_interpolated_input;
173 }
174
175 static bool
is_output(nir_intrinsic_instr * intrin)176 is_output(nir_intrinsic_instr *intrin)
177 {
178 return intrin->intrinsic == nir_intrinsic_load_output ||
179 intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
180 intrin->intrinsic == nir_intrinsic_store_output ||
181 intrin->intrinsic == nir_intrinsic_store_per_vertex_output;
182 }
183
184
185 static bool
remap_patch_urb_offsets(nir_block * block,nir_builder * b,const struct intel_vue_map * vue_map,enum tess_primitive_mode tes_primitive_mode)186 remap_patch_urb_offsets(nir_block *block, nir_builder *b,
187 const struct intel_vue_map *vue_map,
188 enum tess_primitive_mode tes_primitive_mode)
189 {
190 nir_foreach_instr_safe(instr, block) {
191 if (instr->type != nir_instr_type_intrinsic)
192 continue;
193
194 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
195
196 gl_shader_stage stage = b->shader->info.stage;
197
198 if ((stage == MESA_SHADER_TESS_CTRL && is_output(intrin)) ||
199 (stage == MESA_SHADER_TESS_EVAL && is_input(intrin))) {
200
201 if (remap_tess_levels(b, intrin, tes_primitive_mode))
202 continue;
203
204 int vue_slot = vue_map->varying_to_slot[intrin->const_index[0]];
205 assert(vue_slot != -1);
206 intrin->const_index[0] = vue_slot;
207
208 nir_src *vertex = nir_get_io_arrayed_index_src(intrin);
209 if (vertex) {
210 if (nir_src_is_const(*vertex)) {
211 intrin->const_index[0] += nir_src_as_uint(*vertex) *
212 vue_map->num_per_vertex_slots;
213 } else {
214 b->cursor = nir_before_instr(&intrin->instr);
215
216 /* Multiply by the number of per-vertex slots. */
217 nir_def *vertex_offset =
218 nir_imul(b,
219 vertex->ssa,
220 nir_imm_int(b,
221 vue_map->num_per_vertex_slots));
222
223 /* Add it to the existing offset */
224 nir_src *offset = nir_get_io_offset_src(intrin);
225 nir_def *total_offset =
226 nir_iadd(b, vertex_offset,
227 offset->ssa);
228
229 nir_src_rewrite(offset, total_offset);
230 }
231 }
232 }
233 }
234 return true;
235 }
236
237 void
elk_nir_lower_vs_inputs(nir_shader * nir,bool edgeflag_is_last,const uint8_t * vs_attrib_wa_flags)238 elk_nir_lower_vs_inputs(nir_shader *nir,
239 bool edgeflag_is_last,
240 const uint8_t *vs_attrib_wa_flags)
241 {
242 /* Start with the location of the variable's base. */
243 nir_foreach_shader_in_variable(var, nir)
244 var->data.driver_location = var->data.location;
245
246 /* Now use nir_lower_io to walk dereference chains. Attribute arrays are
247 * loaded as one vec4 or dvec4 per element (or matrix column), depending on
248 * whether it is a double-precision type or not.
249 */
250 nir_lower_io(nir, nir_var_shader_in, elk_type_size_vec4,
251 nir_lower_io_lower_64bit_to_32);
252
253 /* This pass needs actual constants */
254 nir_opt_constant_folding(nir);
255
256 nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
257
258 elk_nir_apply_attribute_workarounds(nir, vs_attrib_wa_flags);
259
260 /* The last step is to remap VERT_ATTRIB_* to actual registers */
261
262 /* Whether or not we have any system generated values. gl_DrawID is not
263 * included here as it lives in its own vec4.
264 */
265 const bool has_sgvs =
266 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_FIRST_VERTEX) ||
267 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_BASE_INSTANCE) ||
268 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) ||
269 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_INSTANCE_ID);
270
271 const unsigned num_inputs = util_bitcount64(nir->info.inputs_read);
272
273 nir_foreach_function_impl(impl, nir) {
274 nir_builder b = nir_builder_create(impl);
275
276 nir_foreach_block(block, impl) {
277 nir_foreach_instr_safe(instr, block) {
278 if (instr->type != nir_instr_type_intrinsic)
279 continue;
280
281 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
282
283 switch (intrin->intrinsic) {
284 case nir_intrinsic_load_first_vertex:
285 case nir_intrinsic_load_base_instance:
286 case nir_intrinsic_load_vertex_id_zero_base:
287 case nir_intrinsic_load_instance_id:
288 case nir_intrinsic_load_is_indexed_draw:
289 case nir_intrinsic_load_draw_id: {
290 b.cursor = nir_after_instr(&intrin->instr);
291
292 /* gl_VertexID and friends are stored by the VF as the last
293 * vertex element. We convert them to load_input intrinsics at
294 * the right location.
295 */
296 nir_intrinsic_instr *load =
297 nir_intrinsic_instr_create(nir, nir_intrinsic_load_input);
298 load->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
299
300 nir_intrinsic_set_base(load, num_inputs);
301 switch (intrin->intrinsic) {
302 case nir_intrinsic_load_first_vertex:
303 nir_intrinsic_set_component(load, 0);
304 break;
305 case nir_intrinsic_load_base_instance:
306 nir_intrinsic_set_component(load, 1);
307 break;
308 case nir_intrinsic_load_vertex_id_zero_base:
309 nir_intrinsic_set_component(load, 2);
310 break;
311 case nir_intrinsic_load_instance_id:
312 nir_intrinsic_set_component(load, 3);
313 break;
314 case nir_intrinsic_load_draw_id:
315 case nir_intrinsic_load_is_indexed_draw:
316 /* gl_DrawID and IsIndexedDraw are stored right after
317 * gl_VertexID and friends if any of them exist.
318 */
319 nir_intrinsic_set_base(load, num_inputs + has_sgvs);
320 if (intrin->intrinsic == nir_intrinsic_load_draw_id)
321 nir_intrinsic_set_component(load, 0);
322 else
323 nir_intrinsic_set_component(load, 1);
324 break;
325 default:
326 unreachable("Invalid system value intrinsic");
327 }
328
329 load->num_components = 1;
330 nir_def_init(&load->instr, &load->def, 1, 32);
331 nir_builder_instr_insert(&b, &load->instr);
332
333 nir_def_rewrite_uses(&intrin->def,
334 &load->def);
335 nir_instr_remove(&intrin->instr);
336 break;
337 }
338
339 case nir_intrinsic_load_input: {
340 /* Attributes come in a contiguous block, ordered by their
341 * gl_vert_attrib value. That means we can compute the slot
342 * number for an attribute by masking out the enabled attributes
343 * before it and counting the bits.
344 */
345 int attr = nir_intrinsic_base(intrin);
346 uint64_t inputs_read = nir->info.inputs_read;
347 int slot = -1;
348 if (edgeflag_is_last) {
349 inputs_read &= ~BITFIELD64_BIT(VERT_ATTRIB_EDGEFLAG);
350 if (attr == VERT_ATTRIB_EDGEFLAG)
351 slot = num_inputs - 1;
352 }
353 if (slot == -1)
354 slot = util_bitcount64(inputs_read &
355 BITFIELD64_MASK(attr));
356 nir_intrinsic_set_base(intrin, slot);
357 break;
358 }
359
360 default:
361 break; /* Nothing to do */
362 }
363 }
364 }
365 }
366 }
367
368 void
elk_nir_lower_vue_inputs(nir_shader * nir,const struct intel_vue_map * vue_map)369 elk_nir_lower_vue_inputs(nir_shader *nir,
370 const struct intel_vue_map *vue_map)
371 {
372 nir_foreach_shader_in_variable(var, nir)
373 var->data.driver_location = var->data.location;
374
375 /* Inputs are stored in vec4 slots, so use elk_type_size_vec4(). */
376 nir_lower_io(nir, nir_var_shader_in, elk_type_size_vec4,
377 nir_lower_io_lower_64bit_to_32);
378
379 /* This pass needs actual constants */
380 nir_opt_constant_folding(nir);
381
382 nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
383
384 nir_foreach_function_impl(impl, nir) {
385 nir_foreach_block(block, impl) {
386 nir_foreach_instr(instr, block) {
387 if (instr->type != nir_instr_type_intrinsic)
388 continue;
389
390 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
391
392 if (intrin->intrinsic == nir_intrinsic_load_input ||
393 intrin->intrinsic == nir_intrinsic_load_per_vertex_input) {
394 /* Offset 0 is the VUE header, which contains
395 * VARYING_SLOT_LAYER [.y], VARYING_SLOT_VIEWPORT [.z], and
396 * VARYING_SLOT_PSIZ [.w].
397 */
398 int varying = nir_intrinsic_base(intrin);
399 int vue_slot;
400 switch (varying) {
401 case VARYING_SLOT_PSIZ:
402 nir_intrinsic_set_base(intrin, 0);
403 nir_intrinsic_set_component(intrin, 3);
404 break;
405
406 default:
407 vue_slot = vue_map->varying_to_slot[varying];
408 assert(vue_slot != -1);
409 nir_intrinsic_set_base(intrin, vue_slot);
410 break;
411 }
412 }
413 }
414 }
415 }
416 }
417
418 void
elk_nir_lower_tes_inputs(nir_shader * nir,const struct intel_vue_map * vue_map)419 elk_nir_lower_tes_inputs(nir_shader *nir, const struct intel_vue_map *vue_map)
420 {
421 nir_foreach_shader_in_variable(var, nir)
422 var->data.driver_location = var->data.location;
423
424 nir_lower_io(nir, nir_var_shader_in, elk_type_size_vec4,
425 nir_lower_io_lower_64bit_to_32);
426
427 /* This pass needs actual constants */
428 nir_opt_constant_folding(nir);
429
430 nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
431
432 nir_foreach_function_impl(impl, nir) {
433 nir_builder b = nir_builder_create(impl);
434 nir_foreach_block(block, impl) {
435 remap_patch_urb_offsets(block, &b, vue_map,
436 nir->info.tess._primitive_mode);
437 }
438 }
439 }
440
441 static bool
lower_barycentric_per_sample(nir_builder * b,nir_intrinsic_instr * intrin,UNUSED void * cb_data)442 lower_barycentric_per_sample(nir_builder *b,
443 nir_intrinsic_instr *intrin,
444 UNUSED void *cb_data)
445 {
446 if (intrin->intrinsic != nir_intrinsic_load_barycentric_pixel &&
447 intrin->intrinsic != nir_intrinsic_load_barycentric_centroid)
448 return false;
449
450 b->cursor = nir_before_instr(&intrin->instr);
451 nir_def *centroid =
452 nir_load_barycentric(b, nir_intrinsic_load_barycentric_sample,
453 nir_intrinsic_interp_mode(intrin));
454 nir_def_rewrite_uses(&intrin->def, centroid);
455 nir_instr_remove(&intrin->instr);
456 return true;
457 }
458
459 /**
460 * Convert interpolateAtOffset() offsets from [-0.5, +0.5] floating point
461 * offsets to integer [-8, +7] offsets (in units of 1/16th of a pixel).
462 *
463 * We clamp to +7/16 on the upper end of the range, since +0.5 isn't
464 * representable in a S0.4 value; a naive conversion would give us -8/16,
465 * which is the opposite of what was intended.
466 *
467 * This is allowed by GL_ARB_gpu_shader5's quantization rules:
468 *
469 * "Not all values of <offset> may be supported; x and y offsets may
470 * be rounded to fixed-point values with the number of fraction bits
471 * given by the implementation-dependent constant
472 * FRAGMENT_INTERPOLATION_OFFSET_BITS."
473 */
474 static bool
lower_barycentric_at_offset(nir_builder * b,nir_intrinsic_instr * intrin,void * data)475 lower_barycentric_at_offset(nir_builder *b, nir_intrinsic_instr *intrin,
476 void *data)
477 {
478 if (intrin->intrinsic != nir_intrinsic_load_barycentric_at_offset)
479 return false;
480
481 b->cursor = nir_before_instr(&intrin->instr);
482
483 assert(intrin->src[0].ssa);
484 nir_def *offset =
485 nir_imin(b, nir_imm_int(b, 7),
486 nir_f2i32(b, nir_fmul_imm(b, intrin->src[0].ssa, 16)));
487
488 nir_src_rewrite(&intrin->src[0], offset);
489
490 return true;
491 }
492
493 void
elk_nir_lower_fs_inputs(nir_shader * nir,const struct intel_device_info * devinfo,const struct elk_wm_prog_key * key)494 elk_nir_lower_fs_inputs(nir_shader *nir,
495 const struct intel_device_info *devinfo,
496 const struct elk_wm_prog_key *key)
497 {
498 nir_foreach_shader_in_variable(var, nir) {
499 var->data.driver_location = var->data.location;
500
501 /* Apply default interpolation mode.
502 *
503 * Everything defaults to smooth except for the legacy GL color
504 * built-in variables, which might be flat depending on API state.
505 */
506 if (var->data.interpolation == INTERP_MODE_NONE) {
507 const bool flat = key->flat_shade &&
508 (var->data.location == VARYING_SLOT_COL0 ||
509 var->data.location == VARYING_SLOT_COL1);
510
511 var->data.interpolation = flat ? INTERP_MODE_FLAT
512 : INTERP_MODE_SMOOTH;
513 }
514
515 /* On Ironlake and below, there is only one interpolation mode.
516 * Centroid interpolation doesn't mean anything on this hardware --
517 * there is no multisampling.
518 */
519 if (devinfo->ver < 6) {
520 var->data.centroid = false;
521 var->data.sample = false;
522 }
523 }
524
525 nir_lower_io(nir, nir_var_shader_in, elk_type_size_vec4,
526 nir_lower_io_lower_64bit_to_32);
527 if (devinfo->ver >= 11)
528 nir_lower_interpolation(nir, ~0);
529
530 if (key->multisample_fbo == ELK_NEVER) {
531 nir_lower_single_sampled(nir);
532 } else if (key->persample_interp == ELK_ALWAYS) {
533 nir_shader_intrinsics_pass(nir, lower_barycentric_per_sample,
534 nir_metadata_block_index |
535 nir_metadata_dominance,
536 NULL);
537 }
538
539 nir_shader_intrinsics_pass(nir, lower_barycentric_at_offset,
540 nir_metadata_block_index |
541 nir_metadata_dominance,
542 NULL);
543
544 /* This pass needs actual constants */
545 nir_opt_constant_folding(nir);
546
547 nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
548 }
549
550 void
elk_nir_lower_vue_outputs(nir_shader * nir)551 elk_nir_lower_vue_outputs(nir_shader *nir)
552 {
553 nir_foreach_shader_out_variable(var, nir) {
554 var->data.driver_location = var->data.location;
555 }
556
557 nir_lower_io(nir, nir_var_shader_out, elk_type_size_vec4,
558 nir_lower_io_lower_64bit_to_32);
559 }
560
561 void
elk_nir_lower_tcs_outputs(nir_shader * nir,const struct intel_vue_map * vue_map,enum tess_primitive_mode tes_primitive_mode)562 elk_nir_lower_tcs_outputs(nir_shader *nir, const struct intel_vue_map *vue_map,
563 enum tess_primitive_mode tes_primitive_mode)
564 {
565 nir_foreach_shader_out_variable(var, nir) {
566 var->data.driver_location = var->data.location;
567 }
568
569 nir_lower_io(nir, nir_var_shader_out, elk_type_size_vec4,
570 nir_lower_io_lower_64bit_to_32);
571
572 /* This pass needs actual constants */
573 nir_opt_constant_folding(nir);
574
575 nir_io_add_const_offset_to_base(nir, nir_var_shader_out);
576
577 nir_foreach_function_impl(impl, nir) {
578 nir_builder b = nir_builder_create(impl);
579 nir_foreach_block(block, impl) {
580 remap_patch_urb_offsets(block, &b, vue_map, tes_primitive_mode);
581 }
582 }
583 }
584
585 void
elk_nir_lower_fs_outputs(nir_shader * nir)586 elk_nir_lower_fs_outputs(nir_shader *nir)
587 {
588 nir_foreach_shader_out_variable(var, nir) {
589 var->data.driver_location =
590 SET_FIELD(var->data.index, ELK_NIR_FRAG_OUTPUT_INDEX) |
591 SET_FIELD(var->data.location, ELK_NIR_FRAG_OUTPUT_LOCATION);
592 }
593
594 nir_lower_io(nir, nir_var_shader_out, elk_type_size_dvec4, 0);
595 }
596
597 #define OPT(pass, ...) ({ \
598 bool this_progress = false; \
599 NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__); \
600 if (this_progress) \
601 progress = true; \
602 this_progress; \
603 })
604
605 void
elk_nir_optimize(nir_shader * nir,bool is_scalar,const struct intel_device_info * devinfo)606 elk_nir_optimize(nir_shader *nir, bool is_scalar,
607 const struct intel_device_info *devinfo)
608 {
609 bool progress;
610 unsigned lower_flrp =
611 (nir->options->lower_flrp16 ? 16 : 0) |
612 (nir->options->lower_flrp32 ? 32 : 0) |
613 (nir->options->lower_flrp64 ? 64 : 0);
614
615 do {
616 progress = false;
617 OPT(nir_shrink_vec_array_vars, nir_var_function_temp);
618 OPT(nir_opt_deref);
619 if (OPT(nir_opt_memcpy))
620 OPT(nir_split_var_copies);
621 OPT(nir_lower_vars_to_ssa);
622 if (!nir->info.var_copies_lowered) {
623 /* Only run this pass if nir_lower_var_copies was not called
624 * yet. That would lower away any copy_deref instructions and we
625 * don't want to introduce any more.
626 */
627 OPT(nir_opt_find_array_copies);
628 }
629 OPT(nir_opt_copy_prop_vars);
630 OPT(nir_opt_dead_write_vars);
631 OPT(nir_opt_combine_stores, nir_var_all);
632
633 OPT(nir_opt_ray_queries);
634 OPT(nir_opt_ray_query_ranges);
635
636 if (is_scalar) {
637 OPT(nir_lower_alu_to_scalar, NULL, NULL);
638 } else {
639 OPT(nir_opt_shrink_stores, true);
640 OPT(nir_opt_shrink_vectors);
641 }
642
643 OPT(nir_copy_prop);
644
645 if (is_scalar) {
646 OPT(nir_lower_phis_to_scalar, false);
647 }
648
649 OPT(nir_copy_prop);
650 OPT(nir_opt_dce);
651 OPT(nir_opt_cse);
652 OPT(nir_opt_combine_stores, nir_var_all);
653
654 /* Passing 0 to the peephole select pass causes it to convert
655 * if-statements that contain only move instructions in the branches
656 * regardless of the count.
657 *
658 * Passing 1 to the peephole select pass causes it to convert
659 * if-statements that contain at most a single ALU instruction (total)
660 * in both branches. Before Gfx6, some math instructions were
661 * prohibitively expensive and the results of compare operations need an
662 * extra resolve step. For these reasons, this pass is more harmful
663 * than good on those platforms.
664 *
665 * For indirect loads of uniforms (push constants), we assume that array
666 * indices will nearly always be in bounds and the cost of the load is
667 * low. Therefore there shouldn't be a performance benefit to avoid it.
668 * However, in vec4 tessellation shaders, these loads operate by
669 * actually pulling from memory.
670 */
671 const bool is_vec4_tessellation = !is_scalar &&
672 (nir->info.stage == MESA_SHADER_TESS_CTRL ||
673 nir->info.stage == MESA_SHADER_TESS_EVAL);
674 OPT(nir_opt_peephole_select, 0, !is_vec4_tessellation, false);
675 OPT(nir_opt_peephole_select, 8, !is_vec4_tessellation,
676 devinfo->ver >= 6);
677
678 OPT(nir_opt_intrinsics);
679 OPT(nir_opt_idiv_const, 32);
680 OPT(nir_opt_algebraic);
681
682 /* BFI2 did not exist until Gfx7, so there's no point in trying to
683 * optimize an instruction that should not get generated.
684 */
685 if (devinfo->ver >= 7)
686 OPT(nir_opt_reassociate_bfi);
687
688 OPT(nir_lower_constant_convert_alu_types);
689 OPT(nir_opt_constant_folding);
690
691 if (lower_flrp != 0) {
692 if (OPT(nir_lower_flrp,
693 lower_flrp,
694 false /* always_precise */)) {
695 OPT(nir_opt_constant_folding);
696 }
697
698 /* Nothing should rematerialize any flrps, so we only need to do this
699 * lowering once.
700 */
701 lower_flrp = 0;
702 }
703
704 OPT(nir_opt_dead_cf);
705 if (OPT(nir_opt_loop)) {
706 /* If nir_opt_loop makes progress, then we need to clean
707 * things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
708 * to make progress.
709 */
710 OPT(nir_copy_prop);
711 OPT(nir_opt_dce);
712 }
713 OPT(nir_opt_if, nir_opt_if_optimize_phi_true_false);
714 OPT(nir_opt_conditional_discard);
715 if (nir->options->max_unroll_iterations != 0) {
716 OPT(nir_opt_loop_unroll);
717 }
718 OPT(nir_opt_remove_phis);
719 OPT(nir_opt_gcm, false);
720 OPT(nir_opt_undef);
721 OPT(nir_lower_pack);
722 } while (progress);
723
724 /* Workaround Gfxbench unused local sampler variable which will trigger an
725 * assert in the opt_large_constants pass.
726 */
727 OPT(nir_remove_dead_variables, nir_var_function_temp, NULL);
728 }
729
730 static unsigned
lower_bit_size_callback(const nir_instr * instr,UNUSED void * data)731 lower_bit_size_callback(const nir_instr *instr, UNUSED void *data)
732 {
733 const struct elk_compiler *compiler = (const struct elk_compiler *) data;
734 const struct intel_device_info *devinfo = compiler->devinfo;
735
736 switch (instr->type) {
737 case nir_instr_type_alu: {
738 nir_alu_instr *alu = nir_instr_as_alu(instr);
739 switch (alu->op) {
740 case nir_op_bit_count:
741 case nir_op_ufind_msb:
742 case nir_op_ifind_msb:
743 case nir_op_find_lsb:
744 /* These are handled specially because the destination is always
745 * 32-bit and so the bit size of the instruction is given by the
746 * source.
747 */
748 return alu->src[0].src.ssa->bit_size >= 32 ? 0 : 32;
749 default:
750 break;
751 }
752
753 if (alu->def.bit_size >= 32)
754 return 0;
755
756 /* Note: nir_op_iabs and nir_op_ineg are not lowered here because the
757 * 8-bit ABS or NEG instruction should eventually get copy propagated
758 * into the MOV that does the type conversion. This results in far
759 * fewer MOV instructions.
760 */
761 switch (alu->op) {
762 case nir_op_idiv:
763 case nir_op_imod:
764 case nir_op_irem:
765 case nir_op_udiv:
766 case nir_op_umod:
767 case nir_op_fceil:
768 case nir_op_ffloor:
769 case nir_op_ffract:
770 case nir_op_fround_even:
771 case nir_op_ftrunc:
772 return 32;
773 case nir_op_frcp:
774 case nir_op_frsq:
775 case nir_op_fsqrt:
776 case nir_op_fpow:
777 case nir_op_fexp2:
778 case nir_op_flog2:
779 case nir_op_fsin:
780 case nir_op_fcos:
781 return devinfo->ver < 9 ? 32 : 0;
782 case nir_op_isign:
783 assert(!"Should have been lowered by nir_opt_algebraic.");
784 return 0;
785 default:
786 if (nir_op_infos[alu->op].num_inputs >= 2 &&
787 alu->def.bit_size == 8)
788 return 16;
789
790 if (nir_alu_instr_is_comparison(alu) &&
791 alu->src[0].src.ssa->bit_size == 8)
792 return 16;
793
794 return 0;
795 }
796 break;
797 }
798
799 case nir_instr_type_intrinsic: {
800 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
801 switch (intrin->intrinsic) {
802 case nir_intrinsic_read_invocation:
803 case nir_intrinsic_read_first_invocation:
804 case nir_intrinsic_vote_feq:
805 case nir_intrinsic_vote_ieq:
806 case nir_intrinsic_shuffle:
807 case nir_intrinsic_shuffle_xor:
808 case nir_intrinsic_shuffle_up:
809 case nir_intrinsic_shuffle_down:
810 case nir_intrinsic_quad_broadcast:
811 case nir_intrinsic_quad_swap_horizontal:
812 case nir_intrinsic_quad_swap_vertical:
813 case nir_intrinsic_quad_swap_diagonal:
814 if (intrin->src[0].ssa->bit_size == 8)
815 return 16;
816 return 0;
817
818 case nir_intrinsic_reduce:
819 case nir_intrinsic_inclusive_scan:
820 case nir_intrinsic_exclusive_scan:
821 /* There are a couple of register region issues that make things
822 * complicated for 8-bit types:
823 *
824 * 1. Only raw moves are allowed to write to a packed 8-bit
825 * destination.
826 * 2. If we use a strided destination, the efficient way to do
827 * scan operations ends up using strides that are too big to
828 * encode in an instruction.
829 *
830 * To get around these issues, we just do all 8-bit scan operations
831 * in 16 bits. It's actually fewer instructions than what we'd have
832 * to do if we were trying to do it in native 8-bit types and the
833 * results are the same once we truncate to 8 bits at the end.
834 */
835 if (intrin->def.bit_size == 8)
836 return 16;
837 return 0;
838
839 default:
840 return 0;
841 }
842 break;
843 }
844
845 case nir_instr_type_phi: {
846 nir_phi_instr *phi = nir_instr_as_phi(instr);
847 if (phi->def.bit_size == 8)
848 return 16;
849 return 0;
850 }
851
852 default:
853 return 0;
854 }
855 }
856
857 /* On gfx12.5+, if the offsets are not both constant and in the {-8,7} range,
858 * we will have nir_lower_tex() lower the source offset by returning true from
859 * this filter function.
860 */
861 static bool
lower_xehp_tg4_offset_filter(const nir_instr * instr,UNUSED const void * data)862 lower_xehp_tg4_offset_filter(const nir_instr *instr, UNUSED const void *data)
863 {
864 if (instr->type != nir_instr_type_tex)
865 return false;
866
867 nir_tex_instr *tex = nir_instr_as_tex(instr);
868
869 if (tex->op != nir_texop_tg4)
870 return false;
871
872 int offset_index = nir_tex_instr_src_index(tex, nir_tex_src_offset);
873 if (offset_index < 0)
874 return false;
875
876 if (!nir_src_is_const(tex->src[offset_index].src))
877 return true;
878
879 int64_t offset_x = nir_src_comp_as_int(tex->src[offset_index].src, 0);
880 int64_t offset_y = nir_src_comp_as_int(tex->src[offset_index].src, 1);
881
882 return offset_x < -8 || offset_x > 7 || offset_y < -8 || offset_y > 7;
883 }
884
885 /* Does some simple lowering and runs the standard suite of optimizations
886 *
887 * This is intended to be called more-or-less directly after you get the
888 * shader out of GLSL or some other source. While it is geared towards i965,
889 * it is not at all generator-specific.
890 */
891 void
elk_preprocess_nir(const struct elk_compiler * compiler,nir_shader * nir,const struct elk_nir_compiler_opts * opts)892 elk_preprocess_nir(const struct elk_compiler *compiler, nir_shader *nir,
893 const struct elk_nir_compiler_opts *opts)
894 {
895 const struct intel_device_info *devinfo = compiler->devinfo;
896 UNUSED bool progress; /* Written by OPT */
897
898 const bool is_scalar = compiler->scalar_stage[nir->info.stage];
899
900 nir_validate_ssa_dominance(nir, "before elk_preprocess_nir");
901
902 OPT(nir_lower_frexp);
903
904 if (is_scalar) {
905 OPT(nir_lower_alu_to_scalar, NULL, NULL);
906 }
907
908 if (nir->info.stage == MESA_SHADER_GEOMETRY)
909 OPT(nir_lower_gs_intrinsics, 0);
910
911 /* See also elk_nir_trig_workarounds.py */
912 if (compiler->precise_trig &&
913 !(devinfo->ver >= 10 || devinfo->platform == INTEL_PLATFORM_KBL))
914 OPT(elk_nir_apply_trig_workarounds);
915
916 /* This workaround existing for performance reasons. Since it requires not
917 * setting RENDER_SURFACE_STATE::SurfaceArray when the array length is 1,
918 * we're loosing the HW robustness feature in that case.
919 *
920 * So when robust image access is enabled, just avoid the workaround.
921 */
922 if (intel_needs_workaround(devinfo, 1806565034) && !opts->robust_image_access)
923 OPT(intel_nir_clamp_image_1d_2d_array_sizes);
924
925 const nir_lower_tex_options tex_options = {
926 .lower_txp = ~0,
927 .lower_txf_offset = true,
928 .lower_rect_offset = true,
929 .lower_txd_cube_map = true,
930 /* For below, See bspec 45942, "Enable new message layout for cube array" */
931 .lower_txd_3d = devinfo->verx10 >= 125,
932 .lower_txd_array = devinfo->verx10 >= 125,
933 .lower_txb_shadow_clamp = true,
934 .lower_txd_shadow_clamp = true,
935 .lower_txd_offset_clamp = true,
936 .lower_tg4_offsets = true,
937 .lower_txs_lod = true, /* Wa_14012320009 */
938 .lower_offset_filter =
939 devinfo->verx10 >= 125 ? lower_xehp_tg4_offset_filter : NULL,
940 .lower_invalid_implicit_lod = true,
941 };
942
943 /* In the case where TG4 coords are lowered to offsets and we have a
944 * lower_xehp_tg4_offset_filter lowering those offsets further, we need to
945 * rerun the pass because the instructions inserted by the first lowering
946 * are not visible during that first pass.
947 */
948 if (OPT(nir_lower_tex, &tex_options))
949 OPT(nir_lower_tex, &tex_options);
950 OPT(nir_normalize_cubemap_coords);
951
952 OPT(nir_lower_global_vars_to_local);
953
954 OPT(nir_split_var_copies);
955 OPT(nir_split_struct_vars, nir_var_function_temp);
956
957 elk_nir_optimize(nir, is_scalar, devinfo);
958
959 OPT(nir_lower_doubles, opts->softfp64, nir->options->lower_doubles_options);
960 if (OPT(nir_lower_int64_float_conversions)) {
961 OPT(nir_opt_algebraic);
962 OPT(nir_lower_doubles, opts->softfp64,
963 nir->options->lower_doubles_options);
964 }
965
966 OPT(nir_lower_bit_size, lower_bit_size_callback, (void *)compiler);
967
968 /* Lower a bunch of stuff */
969 OPT(nir_lower_var_copies);
970
971 /* This needs to be run after the first optimization pass but before we
972 * lower indirect derefs away
973 */
974 if (compiler->supports_shader_constants) {
975 OPT(nir_opt_large_constants, NULL, 32);
976 }
977
978 if (is_scalar) {
979 OPT(nir_lower_load_const_to_scalar);
980 }
981
982 OPT(nir_lower_system_values);
983 nir_lower_compute_system_values_options lower_csv_options = {
984 .has_base_workgroup_id = nir->info.stage == MESA_SHADER_COMPUTE,
985 };
986 OPT(nir_lower_compute_system_values, &lower_csv_options);
987
988 const nir_lower_subgroups_options subgroups_options = {
989 .ballot_bit_size = 32,
990 .ballot_components = 1,
991 .lower_to_scalar = true,
992 .lower_vote_trivial = !is_scalar,
993 .lower_relative_shuffle = true,
994 .lower_quad_broadcast_dynamic = true,
995 .lower_elect = true,
996 .lower_inverse_ballot = true,
997 .lower_rotate_to_shuffle = true,
998 };
999 OPT(nir_lower_subgroups, &subgroups_options);
1000
1001 nir_variable_mode indirect_mask =
1002 elk_nir_no_indirect_mask(compiler, nir->info.stage);
1003 OPT(nir_lower_indirect_derefs, indirect_mask, UINT32_MAX);
1004
1005 /* Even in cases where we can handle indirect temporaries via scratch, we
1006 * it can still be expensive. Lower indirects on small arrays to
1007 * conditional load/stores.
1008 *
1009 * The threshold of 16 was chosen semi-arbitrarily. The idea is that an
1010 * indirect on an array of 16 elements is about 30 instructions at which
1011 * point, you may be better off doing a send. With a SIMD8 program, 16
1012 * floats is 1/8 of the entire register file. Any array larger than that
1013 * is likely to cause pressure issues. Also, this value is sufficiently
1014 * high that the benchmarks known to suffer from large temporary array
1015 * issues are helped but nothing else in shader-db is hurt except for maybe
1016 * that one kerbal space program shader.
1017 */
1018 if (is_scalar && !(indirect_mask & nir_var_function_temp))
1019 OPT(nir_lower_indirect_derefs, nir_var_function_temp, 16);
1020
1021 /* Lower array derefs of vectors for SSBO and UBO loads. For both UBOs and
1022 * SSBOs, our back-end is capable of loading an entire vec4 at a time and
1023 * we would like to take advantage of that whenever possible regardless of
1024 * whether or not the app gives us full loads. This should allow the
1025 * optimizer to combine UBO and SSBO load operations and save us some send
1026 * messages.
1027 */
1028 OPT(nir_lower_array_deref_of_vec,
1029 nir_var_mem_ubo | nir_var_mem_ssbo,
1030 nir_lower_direct_array_deref_of_vec_load);
1031
1032 /* Clamp load_per_vertex_input of the TCS stage so that we do not generate
1033 * loads reading out of bounds. We can do this here because we called
1034 * nir_lower_system_values above.
1035 */
1036 if (nir->info.stage == MESA_SHADER_TESS_CTRL &&
1037 compiler->use_tcs_multi_patch)
1038 OPT(intel_nir_clamp_per_vertex_loads);
1039
1040 /* Get rid of split copies */
1041 elk_nir_optimize(nir, is_scalar, devinfo);
1042 }
1043
1044 static bool
elk_nir_zero_inputs_instr(struct nir_builder * b,nir_intrinsic_instr * intrin,void * data)1045 elk_nir_zero_inputs_instr(struct nir_builder *b, nir_intrinsic_instr *intrin,
1046 void *data)
1047 {
1048 if (intrin->intrinsic != nir_intrinsic_load_deref)
1049 return false;
1050
1051 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1052 if (!nir_deref_mode_is(deref, nir_var_shader_in))
1053 return false;
1054
1055 if (deref->deref_type != nir_deref_type_var)
1056 return false;
1057
1058 nir_variable *var = deref->var;
1059
1060 uint64_t zero_inputs = *(uint64_t *)data;
1061 if (!(BITFIELD64_BIT(var->data.location) & zero_inputs))
1062 return false;
1063
1064 b->cursor = nir_before_instr(&intrin->instr);
1065
1066 nir_def *zero = nir_imm_zero(b, 1, 32);
1067
1068 nir_def_rewrite_uses(&intrin->def, zero);
1069
1070 nir_instr_remove(&intrin->instr);
1071
1072 return true;
1073 }
1074
1075 static bool
elk_nir_zero_inputs(nir_shader * shader,uint64_t * zero_inputs)1076 elk_nir_zero_inputs(nir_shader *shader, uint64_t *zero_inputs)
1077 {
1078 return nir_shader_intrinsics_pass(shader, elk_nir_zero_inputs_instr,
1079 nir_metadata_block_index | nir_metadata_dominance,
1080 zero_inputs);
1081 }
1082
1083 void
elk_nir_link_shaders(const struct elk_compiler * compiler,nir_shader * producer,nir_shader * consumer)1084 elk_nir_link_shaders(const struct elk_compiler *compiler,
1085 nir_shader *producer, nir_shader *consumer)
1086 {
1087 const struct intel_device_info *devinfo = compiler->devinfo;
1088
1089 nir_lower_io_arrays_to_elements(producer, consumer);
1090 nir_validate_shader(producer, "after nir_lower_io_arrays_to_elements");
1091 nir_validate_shader(consumer, "after nir_lower_io_arrays_to_elements");
1092
1093 const bool p_is_scalar = compiler->scalar_stage[producer->info.stage];
1094 const bool c_is_scalar = compiler->scalar_stage[consumer->info.stage];
1095
1096 if (p_is_scalar && c_is_scalar) {
1097 NIR_PASS(_, producer, nir_lower_io_to_scalar_early, nir_var_shader_out);
1098 NIR_PASS(_, consumer, nir_lower_io_to_scalar_early, nir_var_shader_in);
1099 elk_nir_optimize(producer, p_is_scalar, devinfo);
1100 elk_nir_optimize(consumer, c_is_scalar, devinfo);
1101 }
1102
1103 if (nir_link_opt_varyings(producer, consumer))
1104 elk_nir_optimize(consumer, c_is_scalar, devinfo);
1105
1106 NIR_PASS(_, producer, nir_remove_dead_variables, nir_var_shader_out, NULL);
1107 NIR_PASS(_, consumer, nir_remove_dead_variables, nir_var_shader_in, NULL);
1108
1109 if (nir_remove_unused_varyings(producer, consumer)) {
1110 if (should_print_nir(producer)) {
1111 printf("nir_remove_unused_varyings\n");
1112 nir_print_shader(producer, stdout);
1113 }
1114 if (should_print_nir(consumer)) {
1115 printf("nir_remove_unused_varyings\n");
1116 nir_print_shader(consumer, stdout);
1117 }
1118
1119 NIR_PASS(_, producer, nir_lower_global_vars_to_local);
1120 NIR_PASS(_, consumer, nir_lower_global_vars_to_local);
1121
1122 /* The backend might not be able to handle indirects on
1123 * temporaries so we need to lower indirects on any of the
1124 * varyings we have demoted here.
1125 */
1126 NIR_PASS(_, producer, nir_lower_indirect_derefs,
1127 elk_nir_no_indirect_mask(compiler, producer->info.stage),
1128 UINT32_MAX);
1129 NIR_PASS(_, consumer, nir_lower_indirect_derefs,
1130 elk_nir_no_indirect_mask(compiler, consumer->info.stage),
1131 UINT32_MAX);
1132
1133 elk_nir_optimize(producer, p_is_scalar, devinfo);
1134 elk_nir_optimize(consumer, c_is_scalar, devinfo);
1135 }
1136
1137 NIR_PASS(_, producer, nir_lower_io_to_vector, nir_var_shader_out);
1138
1139 if (producer->info.stage == MESA_SHADER_TESS_CTRL &&
1140 producer->options->vectorize_tess_levels)
1141 NIR_PASS_V(producer, nir_vectorize_tess_levels);
1142
1143 NIR_PASS(_, producer, nir_opt_combine_stores, nir_var_shader_out);
1144 NIR_PASS(_, consumer, nir_lower_io_to_vector, nir_var_shader_in);
1145
1146 if (producer->info.stage != MESA_SHADER_TESS_CTRL) {
1147 /* Calling lower_io_to_vector creates output variable writes with
1148 * write-masks. On non-TCS outputs, the back-end can't handle it and we
1149 * need to call nir_lower_io_to_temporaries to get rid of them. This,
1150 * in turn, creates temporary variables and extra copy_deref intrinsics
1151 * that we need to clean up.
1152 */
1153 NIR_PASS_V(producer, nir_lower_io_to_temporaries,
1154 nir_shader_get_entrypoint(producer), true, false);
1155 NIR_PASS(_, producer, nir_lower_global_vars_to_local);
1156 NIR_PASS(_, producer, nir_split_var_copies);
1157 NIR_PASS(_, producer, nir_lower_var_copies);
1158 }
1159 }
1160
1161 bool
elk_nir_should_vectorize_mem(unsigned align_mul,unsigned align_offset,unsigned bit_size,unsigned num_components,nir_intrinsic_instr * low,nir_intrinsic_instr * high,void * data)1162 elk_nir_should_vectorize_mem(unsigned align_mul, unsigned align_offset,
1163 unsigned bit_size,
1164 unsigned num_components,
1165 nir_intrinsic_instr *low,
1166 nir_intrinsic_instr *high,
1167 void *data)
1168 {
1169 /* Don't combine things to generate 64-bit loads/stores. We have to split
1170 * those back into 32-bit ones anyway and UBO loads aren't split in NIR so
1171 * we don't want to make a mess for the back-end.
1172 */
1173 if (bit_size > 32)
1174 return false;
1175
1176 if (low->intrinsic == nir_intrinsic_load_global_const_block_intel ||
1177 low->intrinsic == nir_intrinsic_load_ubo_uniform_block_intel ||
1178 low->intrinsic == nir_intrinsic_load_ssbo_uniform_block_intel ||
1179 low->intrinsic == nir_intrinsic_load_shared_uniform_block_intel ||
1180 low->intrinsic == nir_intrinsic_load_global_constant_uniform_block_intel) {
1181 if (num_components > 4) {
1182 if (!util_is_power_of_two_nonzero(num_components))
1183 return false;
1184
1185 if (bit_size != 32)
1186 return false;
1187
1188 if (num_components > 32)
1189 return false;
1190 }
1191 } else {
1192 /* We can handle at most a vec4 right now. Anything bigger would get
1193 * immediately split by elk_nir_lower_mem_access_bit_sizes anyway.
1194 */
1195 if (num_components > 4)
1196 return false;
1197 }
1198
1199
1200 uint32_t align;
1201 if (align_offset)
1202 align = 1 << (ffs(align_offset) - 1);
1203 else
1204 align = align_mul;
1205
1206 if (align < bit_size / 8)
1207 return false;
1208
1209 return true;
1210 }
1211
1212 static
combine_all_memory_barriers(nir_intrinsic_instr * a,nir_intrinsic_instr * b,void * data)1213 bool combine_all_memory_barriers(nir_intrinsic_instr *a,
1214 nir_intrinsic_instr *b,
1215 void *data)
1216 {
1217 /* Combine control barriers with identical memory semantics. This prevents
1218 * the second barrier generating a spurious, identical fence message as the
1219 * first barrier.
1220 */
1221 if (nir_intrinsic_memory_modes(a) == nir_intrinsic_memory_modes(b) &&
1222 nir_intrinsic_memory_semantics(a) == nir_intrinsic_memory_semantics(b) &&
1223 nir_intrinsic_memory_scope(a) == nir_intrinsic_memory_scope(b)) {
1224 nir_intrinsic_set_execution_scope(a, MAX2(nir_intrinsic_execution_scope(a),
1225 nir_intrinsic_execution_scope(b)));
1226 return true;
1227 }
1228
1229 /* Only combine pure memory barriers */
1230 if ((nir_intrinsic_execution_scope(a) != SCOPE_NONE) ||
1231 (nir_intrinsic_execution_scope(b) != SCOPE_NONE))
1232 return false;
1233
1234 /* Translation to backend IR will get rid of modes we don't care about, so
1235 * no harm in always combining them.
1236 *
1237 * TODO: While HW has only ACQUIRE|RELEASE fences, we could improve the
1238 * scheduling so that it can take advantage of the different semantics.
1239 */
1240 nir_intrinsic_set_memory_modes(a, nir_intrinsic_memory_modes(a) |
1241 nir_intrinsic_memory_modes(b));
1242 nir_intrinsic_set_memory_semantics(a, nir_intrinsic_memory_semantics(a) |
1243 nir_intrinsic_memory_semantics(b));
1244 nir_intrinsic_set_memory_scope(a, MAX2(nir_intrinsic_memory_scope(a),
1245 nir_intrinsic_memory_scope(b)));
1246 return true;
1247 }
1248
1249 static nir_mem_access_size_align
get_mem_access_size_align(nir_intrinsic_op intrin,uint8_t bytes,uint8_t bit_size,uint32_t align_mul,uint32_t align_offset,bool offset_is_const,const void * cb_data)1250 get_mem_access_size_align(nir_intrinsic_op intrin, uint8_t bytes,
1251 uint8_t bit_size, uint32_t align_mul, uint32_t align_offset,
1252 bool offset_is_const, const void *cb_data)
1253 {
1254 const uint32_t align = nir_combined_align(align_mul, align_offset);
1255
1256 switch (intrin) {
1257 case nir_intrinsic_load_ssbo:
1258 case nir_intrinsic_load_shared:
1259 case nir_intrinsic_load_scratch:
1260 /* The offset is constant so we can use a 32-bit load and just shift it
1261 * around as needed.
1262 */
1263 if (align < 4 && offset_is_const) {
1264 assert(util_is_power_of_two_nonzero(align_mul) && align_mul >= 4);
1265 const unsigned pad = align_offset % 4;
1266 const unsigned comps32 = MIN2(DIV_ROUND_UP(bytes + pad, 4), 4);
1267 return (nir_mem_access_size_align) {
1268 .bit_size = 32,
1269 .num_components = comps32,
1270 .align = 4,
1271 };
1272 }
1273 break;
1274
1275 default:
1276 break;
1277 }
1278
1279 const bool is_load = nir_intrinsic_infos[intrin].has_dest;
1280 const bool is_scratch = intrin == nir_intrinsic_load_scratch ||
1281 intrin == nir_intrinsic_store_scratch;
1282
1283 if (align < 4 || bytes < 4) {
1284 /* Choose a byte, word, or dword */
1285 bytes = MIN2(bytes, 4);
1286 if (bytes == 3)
1287 bytes = is_load ? 4 : 2;
1288
1289 if (is_scratch) {
1290 /* The way scratch address swizzling works in the back-end, it
1291 * happens at a DWORD granularity so we can't have a single load
1292 * or store cross a DWORD boundary.
1293 */
1294 if ((align_offset % 4) + bytes > MIN2(align_mul, 4))
1295 bytes = MIN2(align_mul, 4) - (align_offset % 4);
1296
1297 /* Must be a power of two */
1298 if (bytes == 3)
1299 bytes = 2;
1300 }
1301
1302 return (nir_mem_access_size_align) {
1303 .bit_size = bytes * 8,
1304 .num_components = 1,
1305 .align = 1,
1306 };
1307 } else {
1308 bytes = MIN2(bytes, 16);
1309 return (nir_mem_access_size_align) {
1310 .bit_size = 32,
1311 .num_components = is_scratch ? 1 :
1312 is_load ? DIV_ROUND_UP(bytes, 4) : bytes / 4,
1313 .align = 4,
1314 };
1315 }
1316 }
1317
1318 static void
elk_vectorize_lower_mem_access(nir_shader * nir,const struct elk_compiler * compiler,enum elk_robustness_flags robust_flags)1319 elk_vectorize_lower_mem_access(nir_shader *nir,
1320 const struct elk_compiler *compiler,
1321 enum elk_robustness_flags robust_flags)
1322 {
1323 bool progress = false;
1324 const bool is_scalar = compiler->scalar_stage[nir->info.stage];
1325
1326 if (is_scalar) {
1327 nir_load_store_vectorize_options options = {
1328 .modes = nir_var_mem_ubo | nir_var_mem_ssbo |
1329 nir_var_mem_global | nir_var_mem_shared,
1330 .callback = elk_nir_should_vectorize_mem,
1331 .robust_modes = (nir_variable_mode)0,
1332 };
1333
1334 if (robust_flags & ELK_ROBUSTNESS_UBO)
1335 options.robust_modes |= nir_var_mem_ubo | nir_var_mem_global;
1336 if (robust_flags & ELK_ROBUSTNESS_SSBO)
1337 options.robust_modes |= nir_var_mem_ssbo | nir_var_mem_global;
1338
1339 OPT(nir_opt_load_store_vectorize, &options);
1340
1341 /* Only run the blockify optimization on Gfx9+ because although prior HW
1342 * versions have support for block loads, they do have limitations on
1343 * alignment as well as requiring split sends which are not supported
1344 * there.
1345 */
1346 if (compiler->devinfo->ver >= 9) {
1347 /* Required for nir_divergence_analysis() */
1348 OPT(nir_convert_to_lcssa, true, true);
1349
1350 /* When HW supports block loads, using the divergence analysis, try
1351 * to find uniform SSBO loads and turn them into block loads.
1352 *
1353 * Rerun the vectorizer after that to make the largest possible block
1354 * loads.
1355 *
1356 * This is a win on 2 fronts :
1357 * - fewer send messages
1358 * - reduced register pressure
1359 */
1360 nir_divergence_analysis(nir);
1361 if (OPT(intel_nir_blockify_uniform_loads, compiler->devinfo))
1362 OPT(nir_opt_load_store_vectorize, &options);
1363 OPT(nir_opt_remove_phis);
1364 }
1365 }
1366
1367 nir_lower_mem_access_bit_sizes_options mem_access_options = {
1368 .modes = nir_var_mem_ssbo |
1369 nir_var_mem_constant |
1370 nir_var_shader_temp |
1371 nir_var_function_temp |
1372 nir_var_mem_global |
1373 nir_var_mem_shared,
1374 .callback = get_mem_access_size_align,
1375 };
1376 OPT(nir_lower_mem_access_bit_sizes, &mem_access_options);
1377
1378 while (progress) {
1379 progress = false;
1380
1381 OPT(nir_lower_pack);
1382 OPT(nir_copy_prop);
1383 OPT(nir_opt_dce);
1384 OPT(nir_opt_cse);
1385 OPT(nir_opt_algebraic);
1386 OPT(nir_opt_constant_folding);
1387 }
1388 }
1389
1390 static bool
nir_shader_has_local_variables(const nir_shader * nir)1391 nir_shader_has_local_variables(const nir_shader *nir)
1392 {
1393 nir_foreach_function_impl(impl, nir) {
1394 if (!exec_list_is_empty(&impl->locals))
1395 return true;
1396 }
1397
1398 return false;
1399 }
1400
1401 /* Prepare the given shader for codegen
1402 *
1403 * This function is intended to be called right before going into the actual
1404 * backend and is highly backend-specific. Also, once this function has been
1405 * called on a shader, it will no longer be in SSA form so most optimizations
1406 * will not work.
1407 */
1408 void
elk_postprocess_nir(nir_shader * nir,const struct elk_compiler * compiler,bool debug_enabled,enum elk_robustness_flags robust_flags)1409 elk_postprocess_nir(nir_shader *nir, const struct elk_compiler *compiler,
1410 bool debug_enabled,
1411 enum elk_robustness_flags robust_flags)
1412 {
1413 const struct intel_device_info *devinfo = compiler->devinfo;
1414 const bool is_scalar = compiler->scalar_stage[nir->info.stage];
1415
1416 UNUSED bool progress; /* Written by OPT */
1417
1418 OPT(intel_nir_lower_sparse_intrinsics);
1419
1420 OPT(nir_lower_bit_size, lower_bit_size_callback, (void *)compiler);
1421
1422 OPT(nir_opt_combine_barriers, combine_all_memory_barriers, NULL);
1423
1424 do {
1425 progress = false;
1426 OPT(nir_opt_algebraic_before_ffma);
1427 } while (progress);
1428
1429 if (devinfo->verx10 >= 125) {
1430 /* Lower integer division by constants before nir_lower_idiv. */
1431 OPT(nir_opt_idiv_const, 32);
1432 const nir_lower_idiv_options options = {
1433 .allow_fp16 = false
1434 };
1435 OPT(nir_lower_idiv, &options);
1436 }
1437
1438 if (gl_shader_stage_can_set_fragment_shading_rate(nir->info.stage))
1439 NIR_PASS(_, nir, intel_nir_lower_shading_rate_output);
1440
1441 elk_nir_optimize(nir, is_scalar, devinfo);
1442
1443 if (is_scalar && nir_shader_has_local_variables(nir)) {
1444 OPT(nir_lower_vars_to_explicit_types, nir_var_function_temp,
1445 glsl_get_natural_size_align_bytes);
1446 OPT(nir_lower_explicit_io, nir_var_function_temp,
1447 nir_address_format_32bit_offset);
1448 elk_nir_optimize(nir, is_scalar, devinfo);
1449 }
1450
1451 elk_vectorize_lower_mem_access(nir, compiler, robust_flags);
1452
1453 if (OPT(nir_lower_int64))
1454 elk_nir_optimize(nir, is_scalar, devinfo);
1455
1456 if (devinfo->ver >= 6) {
1457 /* Try and fuse multiply-adds, if successful, run shrink_vectors to
1458 * avoid peephole_ffma to generate things like this :
1459 * vec16 ssa_0 = ...
1460 * vec16 ssa_1 = fneg ssa_0
1461 * vec1 ssa_2 = ffma ssa_1, ...
1462 *
1463 * We want this instead :
1464 * vec16 ssa_0 = ...
1465 * vec1 ssa_1 = fneg ssa_0.x
1466 * vec1 ssa_2 = ffma ssa_1, ...
1467 */
1468 if (OPT(intel_nir_opt_peephole_ffma))
1469 OPT(nir_opt_shrink_vectors);
1470 }
1471
1472 if (is_scalar)
1473 OPT(intel_nir_opt_peephole_imul32x16);
1474
1475 if (OPT(nir_opt_comparison_pre)) {
1476 OPT(nir_copy_prop);
1477 OPT(nir_opt_dce);
1478 OPT(nir_opt_cse);
1479
1480 /* Do the select peepehole again. nir_opt_comparison_pre (combined with
1481 * the other optimization passes) will have removed at least one
1482 * instruction from one of the branches of the if-statement, so now it
1483 * might be under the threshold of conversion to bcsel.
1484 *
1485 * See elk_nir_optimize for the explanation of is_vec4_tessellation.
1486 */
1487 const bool is_vec4_tessellation = !is_scalar &&
1488 (nir->info.stage == MESA_SHADER_TESS_CTRL ||
1489 nir->info.stage == MESA_SHADER_TESS_EVAL);
1490 OPT(nir_opt_peephole_select, 0, is_vec4_tessellation, false);
1491 OPT(nir_opt_peephole_select, 1, is_vec4_tessellation,
1492 compiler->devinfo->ver >= 6);
1493 }
1494
1495 do {
1496 progress = false;
1497 if (OPT(nir_opt_algebraic_late)) {
1498 /* At this late stage, anything that makes more constants will wreak
1499 * havok on the vec4 backend. The handling of constants in the vec4
1500 * backend is not good.
1501 */
1502 if (is_scalar)
1503 OPT(nir_opt_constant_folding);
1504
1505 OPT(nir_copy_prop);
1506 OPT(nir_opt_dce);
1507 OPT(nir_opt_cse);
1508 }
1509 } while (progress);
1510
1511
1512 if (OPT(nir_lower_fp16_casts, nir_lower_fp16_split_fp64)) {
1513 if (OPT(nir_lower_int64)) {
1514 elk_nir_optimize(nir, is_scalar, devinfo);
1515 }
1516 }
1517
1518 OPT(intel_nir_lower_conversions);
1519
1520 if (is_scalar)
1521 OPT(nir_lower_alu_to_scalar, NULL, NULL);
1522
1523 while (OPT(nir_opt_algebraic_distribute_src_mods)) {
1524 if (is_scalar)
1525 OPT(nir_opt_constant_folding);
1526
1527 OPT(nir_copy_prop);
1528 OPT(nir_opt_dce);
1529 OPT(nir_opt_cse);
1530 }
1531
1532 OPT(nir_copy_prop);
1533 OPT(nir_opt_dce);
1534 OPT(nir_opt_move, nir_move_comparisons);
1535 OPT(nir_opt_dead_cf);
1536
1537 bool divergence_analysis_dirty = false;
1538 NIR_PASS(_, nir, nir_convert_to_lcssa, true, true);
1539 NIR_PASS_V(nir, nir_divergence_analysis);
1540
1541 /* TODO: Enable nir_opt_uniform_atomics on Gfx7.x too.
1542 * It currently fails Vulkan tests on Haswell for an unknown reason.
1543 */
1544 bool opt_uniform_atomic_stage_allowed = devinfo->ver >= 8;
1545
1546 if (opt_uniform_atomic_stage_allowed && OPT(nir_opt_uniform_atomics)) {
1547 const nir_lower_subgroups_options subgroups_options = {
1548 .ballot_bit_size = 32,
1549 .ballot_components = 1,
1550 .lower_elect = true,
1551 };
1552 OPT(nir_lower_subgroups, &subgroups_options);
1553
1554 if (OPT(nir_lower_int64))
1555 elk_nir_optimize(nir, is_scalar, devinfo);
1556
1557 divergence_analysis_dirty = true;
1558 }
1559
1560 /* Do this only after the last opt_gcm. GCM will undo this lowering. */
1561 if (nir->info.stage == MESA_SHADER_FRAGMENT) {
1562 if (divergence_analysis_dirty) {
1563 NIR_PASS(_, nir, nir_convert_to_lcssa, true, true);
1564 NIR_PASS_V(nir, nir_divergence_analysis);
1565 }
1566
1567 OPT(intel_nir_lower_non_uniform_barycentric_at_sample);
1568 }
1569
1570 /* Clean up LCSSA phis */
1571 OPT(nir_opt_remove_phis);
1572
1573 OPT(nir_lower_bool_to_int32);
1574 OPT(nir_copy_prop);
1575 OPT(nir_opt_dce);
1576
1577 OPT(nir_lower_locals_to_regs, 32);
1578
1579 if (unlikely(debug_enabled)) {
1580 /* Re-index SSA defs so we print more sensible numbers. */
1581 nir_foreach_function_impl(impl, nir) {
1582 nir_index_ssa_defs(impl);
1583 }
1584
1585 fprintf(stderr, "NIR (SSA form) for %s shader:\n",
1586 _mesa_shader_stage_to_string(nir->info.stage));
1587 nir_print_shader(nir, stderr);
1588 }
1589
1590 nir_validate_ssa_dominance(nir, "before nir_convert_from_ssa");
1591
1592 /* Rerun the divergence analysis before convert_from_ssa as this pass has
1593 * some assert on consistent divergence flags.
1594 */
1595 NIR_PASS(_, nir, nir_convert_to_lcssa, true, true);
1596 NIR_PASS_V(nir, nir_divergence_analysis);
1597 OPT(nir_opt_remove_phis);
1598
1599 OPT(nir_convert_from_ssa, true);
1600
1601 if (!is_scalar) {
1602 OPT(nir_move_vec_src_uses_to_dest, true);
1603 OPT(nir_lower_vec_to_regs, NULL, NULL);
1604 }
1605
1606 OPT(nir_opt_dce);
1607
1608 if (OPT(nir_opt_rematerialize_compares))
1609 OPT(nir_opt_dce);
1610
1611 OPT(nir_opt_dce);
1612
1613 nir_trivialize_registers(nir);
1614
1615 /* This is the last pass we run before we start emitting stuff. It
1616 * determines when we need to insert boolean resolves on Gen <= 5. We
1617 * run it last because it stashes data in instr->pass_flags and we don't
1618 * want that to be squashed by other NIR passes.
1619 */
1620 if (devinfo->ver <= 5)
1621 elk_nir_analyze_boolean_resolves(nir);
1622
1623 nir_sweep(nir);
1624
1625 if (unlikely(debug_enabled)) {
1626 fprintf(stderr, "NIR (final form) for %s shader:\n",
1627 _mesa_shader_stage_to_string(nir->info.stage));
1628 nir_print_shader(nir, stderr);
1629 }
1630 }
1631
1632 static bool
elk_nir_apply_sampler_key(nir_shader * nir,const struct elk_compiler * compiler,const struct elk_sampler_prog_key_data * key_tex)1633 elk_nir_apply_sampler_key(nir_shader *nir,
1634 const struct elk_compiler *compiler,
1635 const struct elk_sampler_prog_key_data *key_tex)
1636 {
1637 const struct intel_device_info *devinfo = compiler->devinfo;
1638 nir_lower_tex_options tex_options = {
1639 .lower_txd_clamp_bindless_sampler = true,
1640 .lower_txd_clamp_if_sampler_index_not_lt_16 = true,
1641 .lower_invalid_implicit_lod = true,
1642 .lower_index_to_offset = true,
1643 };
1644
1645 /* Iron Lake and prior require lowering of all rectangle textures */
1646 if (devinfo->ver < 6)
1647 tex_options.lower_rect = true;
1648
1649 /* Prior to Broadwell, our hardware can't actually do GL_CLAMP */
1650 if (devinfo->ver < 8) {
1651 tex_options.saturate_s = key_tex->gl_clamp_mask[0];
1652 tex_options.saturate_t = key_tex->gl_clamp_mask[1];
1653 tex_options.saturate_r = key_tex->gl_clamp_mask[2];
1654 }
1655
1656 /* Prior to Haswell, we have to lower gradients on shadow samplers */
1657 tex_options.lower_txd_shadow = devinfo->verx10 <= 70;
1658
1659 return nir_lower_tex(nir, &tex_options);
1660 }
1661
1662 static unsigned
get_subgroup_size(const struct shader_info * info,unsigned max_subgroup_size)1663 get_subgroup_size(const struct shader_info *info, unsigned max_subgroup_size)
1664 {
1665 switch (info->subgroup_size) {
1666 case SUBGROUP_SIZE_API_CONSTANT:
1667 /* We have to use the global constant size. */
1668 return ELK_SUBGROUP_SIZE;
1669
1670 case SUBGROUP_SIZE_UNIFORM:
1671 /* It has to be uniform across all invocations but can vary per stage
1672 * if we want. This gives us a bit more freedom.
1673 *
1674 * For compute, elk_nir_apply_key is called per-dispatch-width so this
1675 * is the actual subgroup size and not a maximum. However, we only
1676 * invoke one size of any given compute shader so it's still guaranteed
1677 * to be uniform across invocations.
1678 */
1679 return max_subgroup_size;
1680
1681 case SUBGROUP_SIZE_VARYING:
1682 /* The subgroup size is allowed to be fully varying. For geometry
1683 * stages, we know it's always 8 which is max_subgroup_size so we can
1684 * return that. For compute, elk_nir_apply_key is called once per
1685 * dispatch-width so max_subgroup_size is the real subgroup size.
1686 *
1687 * For fragment, we return 0 and let it fall through to the back-end
1688 * compiler. This means we can't optimize based on subgroup size but
1689 * that's a risk the client took when it asked for a varying subgroup
1690 * size.
1691 */
1692 return info->stage == MESA_SHADER_FRAGMENT ? 0 : max_subgroup_size;
1693
1694 case SUBGROUP_SIZE_REQUIRE_8:
1695 case SUBGROUP_SIZE_REQUIRE_16:
1696 case SUBGROUP_SIZE_REQUIRE_32:
1697 assert(gl_shader_stage_uses_workgroup(info->stage) ||
1698 (info->stage >= MESA_SHADER_RAYGEN && info->stage <= MESA_SHADER_CALLABLE));
1699 /* These enum values are expressly chosen to be equal to the subgroup
1700 * size that they require.
1701 */
1702 return info->subgroup_size;
1703
1704 case SUBGROUP_SIZE_FULL_SUBGROUPS:
1705 case SUBGROUP_SIZE_REQUIRE_64:
1706 case SUBGROUP_SIZE_REQUIRE_128:
1707 break;
1708 }
1709
1710 unreachable("Invalid subgroup size type");
1711 }
1712
1713 unsigned
elk_nir_api_subgroup_size(const nir_shader * nir,unsigned hw_subgroup_size)1714 elk_nir_api_subgroup_size(const nir_shader *nir,
1715 unsigned hw_subgroup_size)
1716 {
1717 return get_subgroup_size(&nir->info, hw_subgroup_size);
1718 }
1719
1720 void
elk_nir_apply_key(nir_shader * nir,const struct elk_compiler * compiler,const struct elk_base_prog_key * key,unsigned max_subgroup_size)1721 elk_nir_apply_key(nir_shader *nir,
1722 const struct elk_compiler *compiler,
1723 const struct elk_base_prog_key *key,
1724 unsigned max_subgroup_size)
1725 {
1726 bool progress = false;
1727
1728 OPT(elk_nir_apply_sampler_key, compiler, &key->tex);
1729
1730 const struct intel_nir_lower_texture_opts tex_opts = {
1731 .combined_lod_and_array_index = compiler->devinfo->ver >= 20,
1732 };
1733 OPT(intel_nir_lower_texture, &tex_opts);
1734
1735 const nir_lower_subgroups_options subgroups_options = {
1736 .subgroup_size = get_subgroup_size(&nir->info, max_subgroup_size),
1737 .ballot_bit_size = 32,
1738 .ballot_components = 1,
1739 .lower_subgroup_masks = true,
1740 };
1741 OPT(nir_lower_subgroups, &subgroups_options);
1742
1743 if (key->limit_trig_input_range)
1744 OPT(elk_nir_limit_trig_input_range_workaround);
1745
1746 if (progress) {
1747 const bool is_scalar = compiler->scalar_stage[nir->info.stage];
1748 elk_nir_optimize(nir, is_scalar, compiler->devinfo);
1749 }
1750 }
1751
1752 enum elk_conditional_mod
elk_cmod_for_nir_comparison(nir_op op)1753 elk_cmod_for_nir_comparison(nir_op op)
1754 {
1755 switch (op) {
1756 case nir_op_flt:
1757 case nir_op_flt32:
1758 case nir_op_ilt:
1759 case nir_op_ilt32:
1760 case nir_op_ult:
1761 case nir_op_ult32:
1762 return ELK_CONDITIONAL_L;
1763
1764 case nir_op_fge:
1765 case nir_op_fge32:
1766 case nir_op_ige:
1767 case nir_op_ige32:
1768 case nir_op_uge:
1769 case nir_op_uge32:
1770 return ELK_CONDITIONAL_GE;
1771
1772 case nir_op_feq:
1773 case nir_op_feq32:
1774 case nir_op_ieq:
1775 case nir_op_ieq32:
1776 case nir_op_b32all_fequal2:
1777 case nir_op_b32all_iequal2:
1778 case nir_op_b32all_fequal3:
1779 case nir_op_b32all_iequal3:
1780 case nir_op_b32all_fequal4:
1781 case nir_op_b32all_iequal4:
1782 return ELK_CONDITIONAL_Z;
1783
1784 case nir_op_fneu:
1785 case nir_op_fneu32:
1786 case nir_op_ine:
1787 case nir_op_ine32:
1788 case nir_op_b32any_fnequal2:
1789 case nir_op_b32any_inequal2:
1790 case nir_op_b32any_fnequal3:
1791 case nir_op_b32any_inequal3:
1792 case nir_op_b32any_fnequal4:
1793 case nir_op_b32any_inequal4:
1794 return ELK_CONDITIONAL_NZ;
1795
1796 default:
1797 unreachable("Unsupported NIR comparison op");
1798 }
1799 }
1800
1801 enum elk_lsc_opcode
elk_lsc_aop_for_nir_intrinsic(const nir_intrinsic_instr * atomic)1802 elk_lsc_aop_for_nir_intrinsic(const nir_intrinsic_instr *atomic)
1803 {
1804 switch (nir_intrinsic_atomic_op(atomic)) {
1805 case nir_atomic_op_iadd: {
1806 unsigned src_idx;
1807 switch (atomic->intrinsic) {
1808 case nir_intrinsic_image_atomic:
1809 case nir_intrinsic_bindless_image_atomic:
1810 src_idx = 3;
1811 break;
1812 case nir_intrinsic_ssbo_atomic:
1813 src_idx = 2;
1814 break;
1815 case nir_intrinsic_shared_atomic:
1816 case nir_intrinsic_global_atomic:
1817 src_idx = 1;
1818 break;
1819 default:
1820 unreachable("Invalid add atomic opcode");
1821 }
1822
1823 if (nir_src_is_const(atomic->src[src_idx])) {
1824 int64_t add_val = nir_src_as_int(atomic->src[src_idx]);
1825 if (add_val == 1)
1826 return LSC_OP_ATOMIC_INC;
1827 else if (add_val == -1)
1828 return LSC_OP_ATOMIC_DEC;
1829 }
1830 return LSC_OP_ATOMIC_ADD;
1831 }
1832
1833 case nir_atomic_op_imin: return LSC_OP_ATOMIC_MIN;
1834 case nir_atomic_op_umin: return LSC_OP_ATOMIC_UMIN;
1835 case nir_atomic_op_imax: return LSC_OP_ATOMIC_MAX;
1836 case nir_atomic_op_umax: return LSC_OP_ATOMIC_UMAX;
1837 case nir_atomic_op_iand: return LSC_OP_ATOMIC_AND;
1838 case nir_atomic_op_ior: return LSC_OP_ATOMIC_OR;
1839 case nir_atomic_op_ixor: return LSC_OP_ATOMIC_XOR;
1840 case nir_atomic_op_xchg: return LSC_OP_ATOMIC_STORE;
1841 case nir_atomic_op_cmpxchg: return LSC_OP_ATOMIC_CMPXCHG;
1842
1843 case nir_atomic_op_fmin: return LSC_OP_ATOMIC_FMIN;
1844 case nir_atomic_op_fmax: return LSC_OP_ATOMIC_FMAX;
1845 case nir_atomic_op_fcmpxchg: return LSC_OP_ATOMIC_FCMPXCHG;
1846 case nir_atomic_op_fadd: return LSC_OP_ATOMIC_FADD;
1847
1848 default:
1849 unreachable("Unsupported NIR atomic intrinsic");
1850 }
1851 }
1852
1853 enum elk_reg_type
elk_type_for_nir_type(const struct intel_device_info * devinfo,nir_alu_type type)1854 elk_type_for_nir_type(const struct intel_device_info *devinfo,
1855 nir_alu_type type)
1856 {
1857 switch (type) {
1858 case nir_type_uint:
1859 case nir_type_uint32:
1860 return ELK_REGISTER_TYPE_UD;
1861 case nir_type_bool:
1862 case nir_type_int:
1863 case nir_type_bool32:
1864 case nir_type_int32:
1865 return ELK_REGISTER_TYPE_D;
1866 case nir_type_float:
1867 case nir_type_float32:
1868 return ELK_REGISTER_TYPE_F;
1869 case nir_type_float16:
1870 return ELK_REGISTER_TYPE_HF;
1871 case nir_type_float64:
1872 return ELK_REGISTER_TYPE_DF;
1873 case nir_type_int64:
1874 return devinfo->ver < 8 ? ELK_REGISTER_TYPE_DF : ELK_REGISTER_TYPE_Q;
1875 case nir_type_uint64:
1876 return devinfo->ver < 8 ? ELK_REGISTER_TYPE_DF : ELK_REGISTER_TYPE_UQ;
1877 case nir_type_int16:
1878 return ELK_REGISTER_TYPE_W;
1879 case nir_type_uint16:
1880 return ELK_REGISTER_TYPE_UW;
1881 case nir_type_int8:
1882 return ELK_REGISTER_TYPE_B;
1883 case nir_type_uint8:
1884 return ELK_REGISTER_TYPE_UB;
1885 default:
1886 unreachable("unknown type");
1887 }
1888
1889 return ELK_REGISTER_TYPE_F;
1890 }
1891
1892 nir_shader *
elk_nir_create_passthrough_tcs(void * mem_ctx,const struct elk_compiler * compiler,const struct elk_tcs_prog_key * key)1893 elk_nir_create_passthrough_tcs(void *mem_ctx, const struct elk_compiler *compiler,
1894 const struct elk_tcs_prog_key *key)
1895 {
1896 assert(key->input_vertices > 0);
1897
1898 const nir_shader_compiler_options *options =
1899 compiler->nir_options[MESA_SHADER_TESS_CTRL];
1900
1901 uint64_t inputs_read = key->outputs_written &
1902 ~(VARYING_BIT_TESS_LEVEL_INNER | VARYING_BIT_TESS_LEVEL_OUTER);
1903
1904 unsigned locations[64];
1905 unsigned num_locations = 0;
1906
1907 u_foreach_bit64(varying, inputs_read)
1908 locations[num_locations++] = varying;
1909
1910 nir_shader *nir =
1911 nir_create_passthrough_tcs_impl(options, locations, num_locations,
1912 key->input_vertices);
1913
1914 ralloc_steal(mem_ctx, nir);
1915
1916 nir->info.inputs_read = inputs_read;
1917 nir->info.tess._primitive_mode = key->_tes_primitive_mode;
1918 nir_validate_shader(nir, "in elk_nir_create_passthrough_tcs");
1919
1920 struct elk_nir_compiler_opts opts = {};
1921 elk_preprocess_nir(compiler, nir, &opts);
1922
1923 return nir;
1924 }
1925
1926 nir_def *
elk_nir_load_global_const(nir_builder * b,nir_intrinsic_instr * load_uniform,nir_def * base_addr,unsigned off)1927 elk_nir_load_global_const(nir_builder *b, nir_intrinsic_instr *load_uniform,
1928 nir_def *base_addr, unsigned off)
1929 {
1930 assert(load_uniform->intrinsic == nir_intrinsic_load_uniform);
1931
1932 unsigned bit_size = load_uniform->def.bit_size;
1933 assert(bit_size >= 8 && bit_size % 8 == 0);
1934 unsigned byte_size = bit_size / 8;
1935 nir_def *sysval;
1936
1937 if (nir_src_is_const(load_uniform->src[0])) {
1938 uint64_t offset = off +
1939 nir_intrinsic_base(load_uniform) +
1940 nir_src_as_uint(load_uniform->src[0]);
1941
1942 /* Things should be component-aligned. */
1943 assert(offset % byte_size == 0);
1944
1945 unsigned suboffset = offset % 64;
1946 uint64_t aligned_offset = offset - suboffset;
1947
1948 /* Load two just in case we go over a 64B boundary */
1949 nir_def *data[2];
1950 for (unsigned i = 0; i < 2; i++) {
1951 nir_def *addr = nir_iadd_imm(b, base_addr, aligned_offset + i * 64);
1952 data[i] = nir_load_global_const_block_intel(b, 16, addr,
1953 nir_imm_true(b));
1954 }
1955
1956 sysval = nir_extract_bits(b, data, 2, suboffset * 8,
1957 load_uniform->num_components, bit_size);
1958 } else {
1959 nir_def *offset32 =
1960 nir_iadd_imm(b, load_uniform->src[0].ssa,
1961 off + nir_intrinsic_base(load_uniform));
1962 nir_def *addr = nir_iadd(b, base_addr, nir_u2u64(b, offset32));
1963 sysval = nir_load_global_constant(b, addr, byte_size,
1964 load_uniform->num_components, bit_size);
1965 }
1966
1967 return sysval;
1968 }
1969
1970 const struct glsl_type *
elk_nir_get_var_type(const struct nir_shader * nir,nir_variable * var)1971 elk_nir_get_var_type(const struct nir_shader *nir, nir_variable *var)
1972 {
1973 const struct glsl_type *type = var->interface_type;
1974 if (!type) {
1975 type = var->type;
1976 if (nir_is_arrayed_io(var, nir->info.stage) || var->data.per_view) {
1977 assert(glsl_type_is_array(type));
1978 type = glsl_get_array_element(type);
1979 }
1980 }
1981
1982 return type;
1983 }
1984
1985