1 /*
2 * Copyright © Microsoft Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "spirv_to_dxil.h"
25 #include "nir_to_dxil.h"
26 #include "dxil_nir.h"
27 #include "dxil_nir_lower_int_cubemaps.h"
28 #include "shader_enums.h"
29 #include "spirv/nir_spirv.h"
30 #include "util/blob.h"
31 #include "dxil_spirv_nir.h"
32
33 #include "git_sha1.h"
34 #include "vulkan/vulkan.h"
35
36 static const struct spirv_to_nir_options
37 spirv_to_nir_options = {
38 .caps = {
39 .draw_parameters = true,
40 .multiview = true,
41 .subgroup_basic = true,
42 .subgroup_ballot = true,
43 .subgroup_vote = true,
44 .subgroup_shuffle = true,
45 .subgroup_quad = true,
46 .subgroup_arithmetic = true,
47 .descriptor_array_dynamic_indexing = true,
48 .float_controls = true,
49 .float16 = true,
50 .int16 = true,
51 .storage_16bit = true,
52 .storage_8bit = true,
53 .descriptor_indexing = true,
54 .runtime_descriptor_array = true,
55 .descriptor_array_non_uniform_indexing = true,
56 .image_read_without_format = true,
57 .image_write_without_format = true,
58 .int64 = true,
59 .float64 = true,
60 .tessellation = true,
61 },
62 .ubo_addr_format = nir_address_format_32bit_index_offset,
63 .ssbo_addr_format = nir_address_format_32bit_index_offset,
64 .shared_addr_format = nir_address_format_logical,
65
66 .min_ubo_alignment = 256, /* D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT */
67 .min_ssbo_alignment = 16, /* D3D12_RAW_UAV_SRV_BYTE_ALIGNMENT */
68
69 .mediump_16bit_alu = true,
70 .mediump_16bit_derivatives = true,
71 };
72
73 const struct spirv_to_nir_options*
dxil_spirv_nir_get_spirv_options(void)74 dxil_spirv_nir_get_spirv_options(void)
75 {
76 return &spirv_to_nir_options;
77 }
78
79 /* Logic extracted from vk_spirv_to_nir() so we have the same preparation
80 * steps for both the vulkan driver and the lib used by the WebGPU
81 * implementation.
82 * Maybe we should move those steps out of vk_spirv_to_nir() and make
83 * them vk agnosting (right, the only vk specific thing is the vk_device
84 * object that's used for the debug callback passed to spirv_to_nir()).
85 */
86 void
dxil_spirv_nir_prep(nir_shader * nir)87 dxil_spirv_nir_prep(nir_shader *nir)
88 {
89 /* We have to lower away local constant initializers right before we
90 * inline functions. That way they get properly initialized at the top
91 * of the function and not at the top of its caller.
92 */
93 NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
94 NIR_PASS_V(nir, nir_lower_returns);
95 NIR_PASS_V(nir, nir_inline_functions);
96 NIR_PASS_V(nir, nir_copy_prop);
97 NIR_PASS_V(nir, nir_opt_deref);
98
99 /* Pick off the single entrypoint that we want */
100 nir_remove_non_entrypoints(nir);
101
102 /* Now that we've deleted all but the main function, we can go ahead and
103 * lower the rest of the constant initializers. We do this here so that
104 * nir_remove_dead_variables and split_per_member_structs below see the
105 * corresponding stores.
106 */
107 NIR_PASS_V(nir, nir_lower_variable_initializers, ~0);
108
109 /* Split member structs. We do this before lower_io_to_temporaries so that
110 * it doesn't lower system values to temporaries by accident.
111 */
112 NIR_PASS_V(nir, nir_split_var_copies);
113 NIR_PASS_V(nir, nir_split_per_member_structs);
114
115 NIR_PASS_V(nir, nir_remove_dead_variables,
116 nir_var_shader_in | nir_var_shader_out | nir_var_system_value |
117 nir_var_shader_call_data | nir_var_ray_hit_attrib,
118 NULL);
119
120 NIR_PASS_V(nir, nir_propagate_invariant, false);
121 }
122
123 static void
shared_var_info(const struct glsl_type * type,unsigned * size,unsigned * align)124 shared_var_info(const struct glsl_type* type, unsigned* size, unsigned* align)
125 {
126 assert(glsl_type_is_vector_or_scalar(type));
127
128 uint32_t comp_size = glsl_type_is_boolean(type) ? 4 : glsl_get_bit_size(type) / 8;
129 unsigned length = glsl_get_vector_elements(type);
130 *size = comp_size * length;
131 *align = comp_size;
132 }
133
134 static void
temp_var_info(const struct glsl_type * type,unsigned * size,unsigned * align)135 temp_var_info(const struct glsl_type* type, unsigned* size, unsigned* align)
136 {
137 uint32_t base_size, base_align;
138 switch (glsl_get_base_type(type)) {
139 case GLSL_TYPE_ARRAY:
140 temp_var_info(glsl_get_array_element(type), &base_size, align);
141 *size = base_size * glsl_array_size(type);
142 break;
143 case GLSL_TYPE_STRUCT:
144 case GLSL_TYPE_INTERFACE:
145 *size = 0;
146 *align = 0;
147 for (uint32_t i = 0; i < glsl_get_length(type); ++i) {
148 temp_var_info(glsl_get_struct_field(type, i), &base_size, &base_align);
149 *size = ALIGN_POT(*size, base_align) + base_size;
150 *align = MAX2(*align, base_align);
151 }
152 break;
153 default:
154 glsl_get_natural_size_align_bytes(type, &base_size, &base_align);
155
156 *align = MAX2(base_align, 4);
157 *size = ALIGN_POT(base_size, *align);
158 break;
159 }
160 }
161
162 static nir_variable *
add_runtime_data_var(nir_shader * nir,unsigned desc_set,unsigned binding)163 add_runtime_data_var(nir_shader *nir, unsigned desc_set, unsigned binding)
164 {
165 unsigned runtime_data_size =
166 nir->info.stage == MESA_SHADER_COMPUTE
167 ? sizeof(struct dxil_spirv_compute_runtime_data)
168 : sizeof(struct dxil_spirv_vertex_runtime_data);
169
170 const struct glsl_type *array_type =
171 glsl_array_type(glsl_uint_type(), runtime_data_size / sizeof(unsigned),
172 sizeof(unsigned));
173 const struct glsl_struct_field field = {array_type, "arr"};
174 nir_variable *var = nir_variable_create(
175 nir, nir_var_mem_ubo,
176 glsl_struct_type(&field, 1, "runtime_data", false), "runtime_data");
177 var->data.descriptor_set = desc_set;
178 // Check that desc_set fits on descriptor_set
179 assert(var->data.descriptor_set == desc_set);
180 var->data.binding = binding;
181 var->data.how_declared = nir_var_hidden;
182 return var;
183 }
184
185 static bool
lower_shader_system_values(struct nir_builder * builder,nir_instr * instr,void * cb_data)186 lower_shader_system_values(struct nir_builder *builder, nir_instr *instr,
187 void *cb_data)
188 {
189 if (instr->type != nir_instr_type_intrinsic) {
190 return false;
191 }
192
193 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
194
195 /* All the intrinsics we care about are loads */
196 if (!nir_intrinsic_infos[intrin->intrinsic].has_dest)
197 return false;
198
199
200 const struct dxil_spirv_runtime_conf *conf =
201 (const struct dxil_spirv_runtime_conf *)cb_data;
202
203 int offset = 0;
204 switch (intrin->intrinsic) {
205 case nir_intrinsic_load_num_workgroups:
206 offset =
207 offsetof(struct dxil_spirv_compute_runtime_data, group_count_x);
208 break;
209 case nir_intrinsic_load_base_workgroup_id:
210 offset =
211 offsetof(struct dxil_spirv_compute_runtime_data, base_group_x);
212 break;
213 case nir_intrinsic_load_first_vertex:
214 offset = offsetof(struct dxil_spirv_vertex_runtime_data, first_vertex);
215 break;
216 case nir_intrinsic_load_is_indexed_draw:
217 offset =
218 offsetof(struct dxil_spirv_vertex_runtime_data, is_indexed_draw);
219 break;
220 case nir_intrinsic_load_base_instance:
221 offset = offsetof(struct dxil_spirv_vertex_runtime_data, base_instance);
222 break;
223 case nir_intrinsic_load_draw_id:
224 offset = offsetof(struct dxil_spirv_vertex_runtime_data, draw_id);
225 break;
226 case nir_intrinsic_load_view_index:
227 if (!conf->lower_view_index)
228 return false;
229 offset = offsetof(struct dxil_spirv_vertex_runtime_data, view_index);
230 break;
231 default:
232 return false;
233 }
234
235 builder->cursor = nir_after_instr(instr);
236 nir_address_format ubo_format = nir_address_format_32bit_index_offset;
237
238 nir_def *index = nir_vulkan_resource_index(
239 builder, nir_address_format_num_components(ubo_format),
240 nir_address_format_bit_size(ubo_format),
241 nir_imm_int(builder, 0),
242 .desc_set = conf->runtime_data_cbv.register_space,
243 .binding = conf->runtime_data_cbv.base_shader_register,
244 .desc_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
245
246 nir_def *load_desc = nir_load_vulkan_descriptor(
247 builder, nir_address_format_num_components(ubo_format),
248 nir_address_format_bit_size(ubo_format),
249 index, .desc_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
250
251 nir_def *load_data = nir_load_ubo(
252 builder,
253 intrin->def.num_components,
254 intrin->def.bit_size,
255 nir_channel(builder, load_desc, 0),
256 nir_imm_int(builder, offset),
257 .align_mul = 256,
258 .align_offset = offset,
259 .range_base = offset,
260 .range = intrin->def.bit_size * intrin->def.num_components / 8);
261
262 nir_def_rewrite_uses(&intrin->def, load_data);
263 nir_instr_remove(instr);
264 return true;
265 }
266
267 static bool
dxil_spirv_nir_lower_shader_system_values(nir_shader * shader,const struct dxil_spirv_runtime_conf * conf)268 dxil_spirv_nir_lower_shader_system_values(nir_shader *shader,
269 const struct dxil_spirv_runtime_conf *conf)
270 {
271 return nir_shader_instructions_pass(shader, lower_shader_system_values,
272 nir_metadata_block_index |
273 nir_metadata_dominance |
274 nir_metadata_loop_analysis,
275 (void *)conf);
276 }
277
278 static nir_variable *
add_push_constant_var(nir_shader * nir,unsigned size,unsigned desc_set,unsigned binding)279 add_push_constant_var(nir_shader *nir, unsigned size, unsigned desc_set, unsigned binding)
280 {
281 /* Size must be a multiple of 16 as buffer load is loading 16 bytes at a time */
282 unsigned num_32bit_words = ALIGN_POT(size, 16) / 4;
283
284 const struct glsl_type *array_type =
285 glsl_array_type(glsl_uint_type(), num_32bit_words, 4);
286 const struct glsl_struct_field field = {array_type, "arr"};
287 nir_variable *var = nir_variable_create(
288 nir, nir_var_mem_ubo,
289 glsl_struct_type(&field, 1, "block", false), "push_constants");
290 var->data.descriptor_set = desc_set;
291 var->data.binding = binding;
292 var->data.how_declared = nir_var_hidden;
293 return var;
294 }
295
296 struct lower_load_push_constant_data {
297 nir_address_format ubo_format;
298 unsigned desc_set;
299 unsigned binding;
300 unsigned size;
301 };
302
303 static bool
lower_load_push_constant(struct nir_builder * builder,nir_instr * instr,void * cb_data)304 lower_load_push_constant(struct nir_builder *builder, nir_instr *instr,
305 void *cb_data)
306 {
307 struct lower_load_push_constant_data *data =
308 (struct lower_load_push_constant_data *)cb_data;
309
310 if (instr->type != nir_instr_type_intrinsic)
311 return false;
312
313 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
314
315 /* All the intrinsics we care about are loads */
316 if (intrin->intrinsic != nir_intrinsic_load_push_constant)
317 return false;
318
319 uint32_t base = nir_intrinsic_base(intrin);
320 uint32_t range = nir_intrinsic_range(intrin);
321
322 data->size = MAX2(base + range, data->size);
323
324 builder->cursor = nir_after_instr(instr);
325 nir_address_format ubo_format = data->ubo_format;
326
327 nir_def *index = nir_vulkan_resource_index(
328 builder, nir_address_format_num_components(ubo_format),
329 nir_address_format_bit_size(ubo_format),
330 nir_imm_int(builder, 0),
331 .desc_set = data->desc_set, .binding = data->binding,
332 .desc_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
333
334 nir_def *load_desc = nir_load_vulkan_descriptor(
335 builder, nir_address_format_num_components(ubo_format),
336 nir_address_format_bit_size(ubo_format),
337 index, .desc_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
338
339 nir_def *offset = intrin->src[0].ssa;
340 nir_def *load_data = nir_load_ubo(
341 builder,
342 intrin->def.num_components,
343 intrin->def.bit_size,
344 nir_channel(builder, load_desc, 0),
345 nir_iadd_imm(builder, offset, base),
346 .align_mul = nir_intrinsic_align_mul(intrin),
347 .align_offset = nir_intrinsic_align_offset(intrin),
348 .range_base = base,
349 .range = range);
350
351 nir_def_rewrite_uses(&intrin->def, load_data);
352 nir_instr_remove(instr);
353 return true;
354 }
355
356 static bool
dxil_spirv_nir_lower_load_push_constant(nir_shader * shader,nir_address_format ubo_format,unsigned desc_set,unsigned binding,uint32_t * size)357 dxil_spirv_nir_lower_load_push_constant(nir_shader *shader,
358 nir_address_format ubo_format,
359 unsigned desc_set, unsigned binding,
360 uint32_t *size)
361 {
362 bool ret;
363 struct lower_load_push_constant_data data = {
364 .ubo_format = ubo_format,
365 .desc_set = desc_set,
366 .binding = binding,
367 };
368 ret = nir_shader_instructions_pass(shader, lower_load_push_constant,
369 nir_metadata_block_index |
370 nir_metadata_dominance |
371 nir_metadata_loop_analysis,
372 &data);
373
374 *size = data.size;
375
376 assert(ret == (*size > 0));
377
378 return ret;
379 }
380
381 struct lower_yz_flip_data {
382 bool *reads_sysval_ubo;
383 const struct dxil_spirv_runtime_conf *rt_conf;
384 };
385
386 static bool
lower_yz_flip(struct nir_builder * builder,nir_instr * instr,void * cb_data)387 lower_yz_flip(struct nir_builder *builder, nir_instr *instr,
388 void *cb_data)
389 {
390 struct lower_yz_flip_data *data =
391 (struct lower_yz_flip_data *)cb_data;
392
393 if (instr->type != nir_instr_type_intrinsic)
394 return false;
395
396 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
397
398 if (intrin->intrinsic != nir_intrinsic_store_deref)
399 return false;
400
401 nir_variable *var = nir_intrinsic_get_var(intrin, 0);
402 if (var->data.mode != nir_var_shader_out ||
403 var->data.location != VARYING_SLOT_POS)
404 return false;
405
406 builder->cursor = nir_before_instr(instr);
407
408 const struct dxil_spirv_runtime_conf *rt_conf = data->rt_conf;
409
410 nir_def *pos = intrin->src[1].ssa;
411 nir_def *y_pos = nir_channel(builder, pos, 1);
412 nir_def *z_pos = nir_channel(builder, pos, 2);
413 nir_def *y_flip_mask = NULL, *z_flip_mask = NULL, *dyn_yz_flip_mask = NULL;
414
415 if (rt_conf->yz_flip.mode & DXIL_SPIRV_YZ_FLIP_CONDITIONAL) {
416 // conditional YZ-flip. The flip bitmask is passed through the vertex
417 // runtime data UBO.
418 unsigned offset =
419 offsetof(struct dxil_spirv_vertex_runtime_data, yz_flip_mask);
420 nir_address_format ubo_format = nir_address_format_32bit_index_offset;
421
422 nir_def *index = nir_vulkan_resource_index(
423 builder, nir_address_format_num_components(ubo_format),
424 nir_address_format_bit_size(ubo_format),
425 nir_imm_int(builder, 0),
426 .desc_set = rt_conf->runtime_data_cbv.register_space,
427 .binding = rt_conf->runtime_data_cbv.base_shader_register,
428 .desc_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
429
430 nir_def *load_desc = nir_load_vulkan_descriptor(
431 builder, nir_address_format_num_components(ubo_format),
432 nir_address_format_bit_size(ubo_format),
433 index, .desc_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
434
435 dyn_yz_flip_mask =
436 nir_load_ubo(builder, 1, 32,
437 nir_channel(builder, load_desc, 0),
438 nir_imm_int(builder, offset),
439 .align_mul = 256,
440 .align_offset = offset,
441 .range_base = offset,
442 .range = 4);
443 *data->reads_sysval_ubo = true;
444 }
445
446 if (rt_conf->yz_flip.mode & DXIL_SPIRV_Y_FLIP_UNCONDITIONAL)
447 y_flip_mask = nir_imm_int(builder, rt_conf->yz_flip.y_mask);
448 else if (rt_conf->yz_flip.mode & DXIL_SPIRV_Y_FLIP_CONDITIONAL)
449 y_flip_mask = nir_iand_imm(builder, dyn_yz_flip_mask, DXIL_SPIRV_Y_FLIP_MASK);
450
451 if (rt_conf->yz_flip.mode & DXIL_SPIRV_Z_FLIP_UNCONDITIONAL)
452 z_flip_mask = nir_imm_int(builder, rt_conf->yz_flip.z_mask);
453 else if (rt_conf->yz_flip.mode & DXIL_SPIRV_Z_FLIP_CONDITIONAL)
454 z_flip_mask = nir_ushr_imm(builder, dyn_yz_flip_mask, DXIL_SPIRV_Z_FLIP_SHIFT);
455
456 /* TODO: Multi-viewport */
457
458 if (y_flip_mask) {
459 nir_def *flip = nir_test_mask(builder, y_flip_mask, 1);
460
461 // Z-flip => pos.y = -pos.y
462 y_pos = nir_bcsel(builder, flip, nir_fneg(builder, y_pos), y_pos);
463 }
464
465 if (z_flip_mask) {
466 nir_def *flip = nir_test_mask(builder, z_flip_mask, 1);
467
468 // Z-flip => pos.z = -pos.z + 1.0f
469 z_pos = nir_bcsel(builder, flip,
470 nir_fadd_imm(builder, nir_fneg(builder, z_pos), 1.0f),
471 z_pos);
472 }
473
474 nir_def *def = nir_vec4(builder,
475 nir_channel(builder, pos, 0),
476 y_pos,
477 z_pos,
478 nir_channel(builder, pos, 3));
479 nir_src_rewrite(&intrin->src[1], def);
480 return true;
481 }
482
483 bool
dxil_spirv_nir_lower_yz_flip(nir_shader * shader,const struct dxil_spirv_runtime_conf * rt_conf,bool * reads_sysval_ubo)484 dxil_spirv_nir_lower_yz_flip(nir_shader *shader,
485 const struct dxil_spirv_runtime_conf *rt_conf,
486 bool *reads_sysval_ubo)
487 {
488 struct lower_yz_flip_data data = {
489 .rt_conf = rt_conf,
490 .reads_sysval_ubo = reads_sysval_ubo,
491 };
492
493 return nir_shader_instructions_pass(shader, lower_yz_flip,
494 nir_metadata_block_index |
495 nir_metadata_dominance |
496 nir_metadata_loop_analysis,
497 &data);
498 }
499
500 static bool
discard_psiz_access(struct nir_builder * builder,nir_intrinsic_instr * intrin,void * cb_data)501 discard_psiz_access(struct nir_builder *builder, nir_intrinsic_instr *intrin,
502 void *cb_data)
503 {
504 if (intrin->intrinsic != nir_intrinsic_store_deref &&
505 intrin->intrinsic != nir_intrinsic_load_deref)
506 return false;
507
508 nir_variable *var = nir_intrinsic_get_var(intrin, 0);
509 if (!var || var->data.mode != nir_var_shader_out ||
510 var->data.location != VARYING_SLOT_PSIZ)
511 return false;
512
513 builder->cursor = nir_before_instr(&intrin->instr);
514
515 if (intrin->intrinsic == nir_intrinsic_load_deref)
516 nir_def_rewrite_uses(&intrin->def, nir_imm_float(builder, 1.0));
517
518 nir_instr_remove(&intrin->instr);
519 return true;
520 }
521
522 static bool
dxil_spirv_nir_discard_point_size_var(nir_shader * shader)523 dxil_spirv_nir_discard_point_size_var(nir_shader *shader)
524 {
525 if (shader->info.stage != MESA_SHADER_VERTEX &&
526 shader->info.stage != MESA_SHADER_TESS_EVAL &&
527 shader->info.stage != MESA_SHADER_GEOMETRY)
528 return false;
529
530 nir_variable *psiz = NULL;
531 nir_foreach_shader_out_variable(var, shader) {
532 if (var->data.location == VARYING_SLOT_PSIZ) {
533 psiz = var;
534 break;
535 }
536 }
537
538 if (!psiz)
539 return false;
540
541 if (!nir_shader_intrinsics_pass(shader, discard_psiz_access,
542 nir_metadata_block_index |
543 nir_metadata_dominance |
544 nir_metadata_loop_analysis,
545 NULL))
546 return false;
547
548 nir_remove_dead_derefs(shader);
549 return true;
550 }
551
552 static bool
kill_undefined_varyings(struct nir_builder * b,nir_instr * instr,void * data)553 kill_undefined_varyings(struct nir_builder *b,
554 nir_instr *instr,
555 void *data)
556 {
557 const nir_shader *prev_stage_nir = data;
558
559 if (instr->type != nir_instr_type_intrinsic)
560 return false;
561
562 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
563
564 if (intr->intrinsic != nir_intrinsic_load_deref)
565 return false;
566
567 nir_variable *var = nir_intrinsic_get_var(intr, 0);
568 if (!var || var->data.mode != nir_var_shader_in)
569 return false;
570
571 /* Ignore most builtins for now, some of them get default values
572 * when not written from previous stages.
573 */
574 if (var->data.location < VARYING_SLOT_VAR0 &&
575 var->data.location != VARYING_SLOT_POS)
576 return false;
577
578 uint32_t loc = var->data.patch ?
579 var->data.location - VARYING_SLOT_PATCH0 : var->data.location;
580 uint64_t written = var->data.patch ?
581 prev_stage_nir->info.patch_outputs_written :
582 prev_stage_nir->info.outputs_written;
583 if (BITFIELD64_RANGE(loc, glsl_varying_count(var->type)) & written)
584 return false;
585
586 b->cursor = nir_after_instr(instr);
587 /* Note: zero is used instead of undef, because optimization is not run here, but is
588 * run later on. If we load an undef here, and that undef ends up being used to store
589 * to position later on, that can cause some or all of the components in that position
590 * write to be removed, which is problematic especially in the case of all components,
591 * since that would remove the store instruction, and would make it tricky to satisfy
592 * the DXIL requirements of writing all position components.
593 */
594 nir_def *zero = nir_imm_zero(b, intr->def.num_components,
595 intr->def.bit_size);
596 nir_def_rewrite_uses(&intr->def, zero);
597 nir_instr_remove(instr);
598 return true;
599 }
600
601 static bool
dxil_spirv_nir_kill_undefined_varyings(nir_shader * shader,const nir_shader * prev_stage_shader)602 dxil_spirv_nir_kill_undefined_varyings(nir_shader *shader,
603 const nir_shader *prev_stage_shader)
604 {
605 if (!nir_shader_instructions_pass(shader,
606 kill_undefined_varyings,
607 nir_metadata_dominance |
608 nir_metadata_block_index |
609 nir_metadata_loop_analysis,
610 (void *)prev_stage_shader))
611 return false;
612
613 nir_remove_dead_derefs(shader);
614 nir_remove_dead_variables(shader, nir_var_shader_in, NULL);
615 return true;
616 }
617
618 static bool
kill_unused_outputs(struct nir_builder * b,nir_instr * instr,void * data)619 kill_unused_outputs(struct nir_builder *b,
620 nir_instr *instr,
621 void *data)
622 {
623 uint64_t kill_mask = *((uint64_t *)data);
624
625 if (instr->type != nir_instr_type_intrinsic)
626 return false;
627
628 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
629
630 if (intr->intrinsic != nir_intrinsic_store_deref)
631 return false;
632
633 nir_variable *var = nir_intrinsic_get_var(intr, 0);
634 if (!var || var->data.mode != nir_var_shader_out)
635 return false;
636
637 unsigned loc = var->data.patch ?
638 var->data.location - VARYING_SLOT_PATCH0 :
639 var->data.location;
640 if (!(BITFIELD64_RANGE(loc, glsl_varying_count(var->type)) & kill_mask))
641 return false;
642
643 nir_instr_remove(instr);
644 return true;
645 }
646
647 static bool
dxil_spirv_nir_kill_unused_outputs(nir_shader * shader,nir_shader * next_stage_shader)648 dxil_spirv_nir_kill_unused_outputs(nir_shader *shader,
649 nir_shader *next_stage_shader)
650 {
651 uint64_t kill_var_mask =
652 shader->info.outputs_written & ~next_stage_shader->info.inputs_read;
653 bool progress = false;
654
655 /* Don't kill buitin vars */
656 kill_var_mask &= BITFIELD64_MASK(MAX_VARYING) << VARYING_SLOT_VAR0;
657
658 if (nir_shader_instructions_pass(shader,
659 kill_unused_outputs,
660 nir_metadata_dominance |
661 nir_metadata_block_index |
662 nir_metadata_loop_analysis,
663 (void *)&kill_var_mask))
664 progress = true;
665
666 if (shader->info.stage == MESA_SHADER_TESS_CTRL) {
667 kill_var_mask =
668 (shader->info.patch_outputs_written |
669 shader->info.patch_outputs_read) &
670 ~next_stage_shader->info.patch_inputs_read;
671 if (nir_shader_instructions_pass(shader,
672 kill_unused_outputs,
673 nir_metadata_dominance |
674 nir_metadata_block_index |
675 nir_metadata_loop_analysis,
676 (void *)&kill_var_mask))
677 progress = true;
678 }
679
680 if (progress) {
681 nir_opt_dce(shader);
682 nir_remove_dead_derefs(shader);
683 nir_remove_dead_variables(shader, nir_var_shader_out, NULL);
684 }
685
686 return progress;
687 }
688
689 struct lower_pntc_data {
690 const struct dxil_spirv_runtime_conf *conf;
691 nir_variable *pntc;
692 };
693
694 static bool
write_pntc_with_pos(nir_builder * b,nir_instr * instr,void * _data)695 write_pntc_with_pos(nir_builder *b, nir_instr *instr, void *_data)
696 {
697 struct lower_pntc_data *data = (struct lower_pntc_data *)_data;
698 if (instr->type != nir_instr_type_intrinsic)
699 return false;
700 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
701 if (intr->intrinsic != nir_intrinsic_store_deref)
702 return false;
703 nir_variable *var = nir_intrinsic_get_var(intr, 0);
704 if (!var || var->data.location != VARYING_SLOT_POS)
705 return false;
706
707 nir_def *pos = intr->src[1].ssa;
708
709 unsigned offset =
710 offsetof(struct dxil_spirv_vertex_runtime_data, viewport_width) - 4;
711 static_assert(offsetof(struct dxil_spirv_vertex_runtime_data, viewport_width) % 16 == 4,
712 "Doing vector unpacking with this assumption");
713 nir_address_format ubo_format = nir_address_format_32bit_index_offset;
714
715 b->cursor = nir_before_instr(instr);
716 nir_def *index = nir_vulkan_resource_index(
717 b, nir_address_format_num_components(ubo_format),
718 nir_address_format_bit_size(ubo_format),
719 nir_imm_int(b, 0),
720 .desc_set = data->conf->runtime_data_cbv.register_space,
721 .binding = data->conf->runtime_data_cbv.base_shader_register,
722 .desc_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
723
724 nir_def *load_desc = nir_load_vulkan_descriptor(
725 b, nir_address_format_num_components(ubo_format),
726 nir_address_format_bit_size(ubo_format),
727 index, .desc_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
728
729 nir_def *transform = nir_channels(b,
730 nir_load_ubo(b, 4, 32,
731 nir_channel(b, load_desc, 0),
732 nir_imm_int(b, offset),
733 .align_mul = 16,
734 .range_base = offset,
735 .range = 16),
736 0x6);
737 nir_def *point_center_in_clip = nir_fmul(b, nir_trim_vector(b, pos, 2),
738 nir_frcp(b, nir_channel(b, pos, 3)));
739 nir_def *point_center =
740 nir_fmul(b, nir_fadd_imm(b,
741 nir_fmul(b, point_center_in_clip,
742 nir_vec2(b, nir_imm_float(b, 0.5), nir_imm_float(b, -0.5f))),
743 0.5), transform);
744 nir_store_var(b, data->pntc, nir_pad_vec4(b, point_center), 0xf);
745 return true;
746 }
747
748 static void
dxil_spirv_write_pntc(nir_shader * nir,const struct dxil_spirv_runtime_conf * conf)749 dxil_spirv_write_pntc(nir_shader *nir, const struct dxil_spirv_runtime_conf *conf)
750 {
751 struct lower_pntc_data data = { .conf = conf };
752 data.pntc = nir_variable_create(nir, nir_var_shader_out, glsl_vec4_type(), "gl_PointCoord");
753 data.pntc->data.location = VARYING_SLOT_PNTC;
754 nir_shader_instructions_pass(nir, write_pntc_with_pos,
755 nir_metadata_block_index |
756 nir_metadata_dominance |
757 nir_metadata_loop_analysis,
758 &data);
759 nir->info.outputs_written |= VARYING_BIT_PNTC;
760
761 /* Add the runtime data var if it's not already there */
762 nir_binding binding = {
763 .binding = conf->runtime_data_cbv.base_shader_register,
764 .desc_set = conf->runtime_data_cbv.register_space,
765 .success = true,
766 };
767 nir_variable *ubo_var = nir_get_binding_variable(nir, binding);
768 if (!ubo_var)
769 add_runtime_data_var(nir, conf->runtime_data_cbv.register_space, conf->runtime_data_cbv.base_shader_register);
770 }
771
772 static bool
lower_pntc_read(nir_builder * b,nir_intrinsic_instr * intr,void * data)773 lower_pntc_read(nir_builder *b, nir_intrinsic_instr *intr, void *data)
774 {
775 if (intr->intrinsic != nir_intrinsic_load_deref)
776 return false;
777 nir_variable *var = nir_intrinsic_get_var(intr, 0);
778 if (!var || var->data.location != VARYING_SLOT_PNTC)
779 return false;
780
781 nir_def *point_center = &intr->def;
782 nir_variable *pos_var = (nir_variable *)data;
783
784 b->cursor = nir_after_instr(&intr->instr);
785
786 nir_def *pos;
787 if (var->data.sample == pos_var->data.sample)
788 pos = nir_load_var(b, pos_var);
789 else if (var->data.sample)
790 pos = nir_interp_deref_at_sample(b, 4, 32,
791 &nir_build_deref_var(b, pos_var)->def,
792 nir_load_sample_id(b));
793 else
794 pos = nir_interp_deref_at_offset(b, 4, 32,
795 &nir_build_deref_var(b, pos_var)->def,
796 nir_imm_zero(b, 2, 32));
797
798 nir_def *pntc = nir_fadd_imm(b,
799 nir_fsub(b, nir_trim_vector(b, pos, 2), nir_trim_vector(b, point_center, 2)),
800 0.5);
801 nir_def_rewrite_uses_after(point_center, pntc, pntc->parent_instr);
802 return true;
803 }
804
805 static void
dxil_spirv_compute_pntc(nir_shader * nir)806 dxil_spirv_compute_pntc(nir_shader *nir)
807 {
808 nir_variable *pos = nir_find_variable_with_location(nir, nir_var_shader_in, VARYING_SLOT_POS);
809 if (!pos) {
810 pos = nir_variable_create(nir, nir_var_shader_in, glsl_vec4_type(), "gl_FragCoord");
811 pos->data.location = VARYING_SLOT_POS;
812 pos->data.sample = nir_find_variable_with_location(nir, nir_var_shader_in, VARYING_SLOT_PNTC)->data.sample;
813 }
814 nir_shader_intrinsics_pass(nir, lower_pntc_read,
815 nir_metadata_block_index |
816 nir_metadata_dominance |
817 nir_metadata_loop_analysis,
818 pos);
819 }
820
821 static bool
lower_view_index_to_rt_layer_instr(nir_builder * b,nir_intrinsic_instr * intr,void * data)822 lower_view_index_to_rt_layer_instr(nir_builder *b, nir_intrinsic_instr *intr,
823 void *data)
824 {
825 if (intr->intrinsic != nir_intrinsic_store_deref)
826 return false;
827
828 nir_variable *var = nir_intrinsic_get_var(intr, 0);
829 if (!var ||
830 var->data.mode != nir_var_shader_out ||
831 var->data.location != VARYING_SLOT_LAYER)
832 return false;
833
834 b->cursor = nir_before_instr(&intr->instr);
835 nir_def *layer = intr->src[1].ssa;
836 nir_def *new_layer = nir_iadd(b, layer,
837 nir_load_view_index(b));
838 nir_src_rewrite(&intr->src[1], new_layer);
839 return true;
840 }
841
842 static bool
add_layer_write(nir_builder * b,nir_instr * instr,void * data)843 add_layer_write(nir_builder *b, nir_instr *instr, void *data)
844 {
845 if (instr) {
846 if (instr->type != nir_instr_type_intrinsic)
847 return false;
848 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
849 if (intr->intrinsic != nir_intrinsic_emit_vertex &&
850 intr->intrinsic != nir_intrinsic_emit_vertex_with_counter)
851 return false;
852 b->cursor = nir_before_instr(instr);
853 }
854 nir_variable *var = (nir_variable *)data;
855 nir_store_var(b, var, nir_load_view_index(b), 0x1);
856 return true;
857 }
858
859 static void
lower_view_index_to_rt_layer(nir_shader * nir)860 lower_view_index_to_rt_layer(nir_shader *nir)
861 {
862 bool existing_write =
863 nir_shader_intrinsics_pass(nir, lower_view_index_to_rt_layer_instr,
864 nir_metadata_block_index |
865 nir_metadata_dominance |
866 nir_metadata_loop_analysis, NULL);
867
868 if (existing_write)
869 return;
870
871 nir_variable *var = nir_variable_create(nir, nir_var_shader_out,
872 glsl_uint_type(), "gl_Layer");
873 var->data.location = VARYING_SLOT_LAYER;
874 var->data.interpolation = INTERP_MODE_FLAT;
875 if (nir->info.stage == MESA_SHADER_GEOMETRY) {
876 nir_shader_instructions_pass(nir,
877 add_layer_write,
878 nir_metadata_block_index |
879 nir_metadata_dominance |
880 nir_metadata_loop_analysis, var);
881 } else {
882 nir_function_impl *func = nir_shader_get_entrypoint(nir);
883 nir_builder b = nir_builder_at(nir_after_impl(func));
884 add_layer_write(&b, NULL, var);
885 }
886 }
887
888 void
dxil_spirv_nir_link(nir_shader * nir,nir_shader * prev_stage_nir,const struct dxil_spirv_runtime_conf * conf,bool * requires_runtime_data)889 dxil_spirv_nir_link(nir_shader *nir, nir_shader *prev_stage_nir,
890 const struct dxil_spirv_runtime_conf *conf,
891 bool *requires_runtime_data)
892 {
893 glsl_type_singleton_init_or_ref();
894
895 *requires_runtime_data = false;
896 if (prev_stage_nir) {
897 if (nir->info.stage == MESA_SHADER_FRAGMENT) {
898 nir->info.clip_distance_array_size = prev_stage_nir->info.clip_distance_array_size;
899
900 if (nir->info.inputs_read & VARYING_BIT_PNTC) {
901 NIR_PASS_V(prev_stage_nir, dxil_spirv_write_pntc, conf);
902 NIR_PASS_V(nir, dxil_spirv_compute_pntc);
903 *requires_runtime_data = true;
904 }
905 }
906
907 NIR_PASS_V(nir, dxil_spirv_nir_kill_undefined_varyings, prev_stage_nir);
908 NIR_PASS_V(prev_stage_nir, dxil_spirv_nir_kill_unused_outputs, nir);
909
910 nir->info.inputs_read =
911 dxil_reassign_driver_locations(nir, nir_var_shader_in,
912 prev_stage_nir->info.outputs_written);
913
914 prev_stage_nir->info.outputs_written =
915 dxil_reassign_driver_locations(prev_stage_nir, nir_var_shader_out,
916 nir->info.inputs_read);
917
918 if (nir->info.stage == MESA_SHADER_TESS_EVAL) {
919 assert(prev_stage_nir->info.stage == MESA_SHADER_TESS_CTRL);
920 nir->info.tess.tcs_vertices_out = prev_stage_nir->info.tess.tcs_vertices_out;
921 prev_stage_nir->info.tess = nir->info.tess;
922
923 for (uint32_t i = 0; i < 2; ++i) {
924 unsigned loc = i == 0 ? VARYING_SLOT_TESS_LEVEL_OUTER : VARYING_SLOT_TESS_LEVEL_INNER;
925 nir_variable *var = nir_find_variable_with_location(nir, nir_var_shader_in, loc);
926 if (!var) {
927 var = nir_variable_create(nir, nir_var_shader_in, glsl_array_type(glsl_float_type(), i == 0 ? 4 : 2, 0), i == 0 ? "outer" : "inner");
928 var->data.location = loc;
929 var->data.patch = true;
930 var->data.compact = true;
931 }
932 }
933 }
934 }
935
936 glsl_type_singleton_decref();
937 }
938
939 static unsigned
lower_bit_size_callback(const nir_instr * instr,void * data)940 lower_bit_size_callback(const nir_instr *instr, void *data)
941 {
942 if (instr->type != nir_instr_type_intrinsic)
943 return 0;
944 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
945 switch (intr->intrinsic) {
946 case nir_intrinsic_quad_swap_horizontal:
947 case nir_intrinsic_quad_swap_vertical:
948 case nir_intrinsic_quad_swap_diagonal:
949 case nir_intrinsic_reduce:
950 case nir_intrinsic_inclusive_scan:
951 case nir_intrinsic_exclusive_scan:
952 return intr->def.bit_size == 1 ? 32 : 0;
953 default:
954 return 0;
955 }
956 }
957
958 static bool
merge_ubos_and_ssbos(nir_shader * nir)959 merge_ubos_and_ssbos(nir_shader *nir)
960 {
961 bool progress = false;
962 nir_foreach_variable_with_modes_safe(var, nir, nir_var_mem_ubo | nir_var_mem_ssbo) {
963 nir_variable *other_var = NULL;
964 nir_foreach_variable_with_modes(var2, nir, var->data.mode) {
965 if (var->data.descriptor_set == var2->data.descriptor_set &&
966 var->data.binding == var2->data.binding) {
967 other_var = var2;
968 break;
969 }
970 }
971
972 if (!other_var)
973 continue;
974
975 progress = true;
976 /* Merge types */
977 if (var->type != other_var->type) {
978 /* Pick the larger array size */
979 uint32_t desc_array_size = 1;
980 if (glsl_type_is_array(var->type))
981 desc_array_size = glsl_get_aoa_size(var->type);
982 if (glsl_type_is_array(other_var->type))
983 desc_array_size = MAX2(desc_array_size, glsl_get_aoa_size(other_var->type));
984
985 const glsl_type *struct_type = glsl_without_array(var->type);
986 if (var->data.mode == nir_var_mem_ubo) {
987 /* Pick the larger struct type; doesn't matter for ssbos */
988 uint32_t size = glsl_get_explicit_size(struct_type, false);
989 const glsl_type *other_type = glsl_without_array(other_var->type);
990 if (glsl_get_explicit_size(other_type, false) > size)
991 struct_type = other_type;
992 }
993
994 var->type = glsl_array_type(struct_type, desc_array_size, 0);
995
996 /* An ssbo is non-writeable if all aliased vars are non-writeable */
997 if (var->data.mode == nir_var_mem_ssbo)
998 var->data.access &= ~(other_var->data.access & ACCESS_NON_WRITEABLE);
999
1000 exec_node_remove(&other_var->node);
1001 }
1002 }
1003 nir_shader_preserve_all_metadata(nir);
1004 return progress;
1005 }
1006
1007 void
dxil_spirv_nir_passes(nir_shader * nir,const struct dxil_spirv_runtime_conf * conf,bool * requires_runtime_data)1008 dxil_spirv_nir_passes(nir_shader *nir,
1009 const struct dxil_spirv_runtime_conf *conf,
1010 bool *requires_runtime_data)
1011 {
1012 glsl_type_singleton_init_or_ref();
1013
1014 NIR_PASS_V(nir, nir_lower_io_to_vector,
1015 nir_var_shader_out |
1016 (nir->info.stage != MESA_SHADER_VERTEX ? nir_var_shader_in : 0));
1017 NIR_PASS_V(nir, nir_opt_combine_stores, nir_var_shader_out);
1018 NIR_PASS_V(nir, nir_remove_dead_derefs);
1019
1020 const struct nir_lower_sysvals_to_varyings_options sysvals_to_varyings = {
1021 .frag_coord = true,
1022 .point_coord = true,
1023 };
1024 NIR_PASS_V(nir, nir_lower_sysvals_to_varyings, &sysvals_to_varyings);
1025
1026 NIR_PASS_V(nir, nir_lower_system_values);
1027
1028 nir_lower_compute_system_values_options compute_options = {
1029 .has_base_workgroup_id = !conf->zero_based_compute_workgroup_id,
1030 };
1031 NIR_PASS_V(nir, nir_lower_compute_system_values, &compute_options);
1032 NIR_PASS_V(nir, dxil_nir_lower_subgroup_id);
1033 NIR_PASS_V(nir, dxil_nir_lower_num_subgroups);
1034
1035 nir_lower_subgroups_options subgroup_options = {
1036 .ballot_bit_size = 32,
1037 .ballot_components = 4,
1038 .lower_subgroup_masks = true,
1039 .lower_to_scalar = true,
1040 .lower_relative_shuffle = true,
1041 .lower_inverse_ballot = true,
1042 };
1043 if (nir->info.stage != MESA_SHADER_FRAGMENT &&
1044 nir->info.stage != MESA_SHADER_COMPUTE)
1045 subgroup_options.lower_quad = true;
1046 NIR_PASS_V(nir, nir_lower_subgroups, &subgroup_options);
1047 NIR_PASS_V(nir, nir_lower_bit_size, lower_bit_size_callback, NULL);
1048
1049 // Ensure subgroup scans on bools are gone
1050 NIR_PASS_V(nir, nir_opt_dce);
1051 NIR_PASS_V(nir, dxil_nir_lower_unsupported_subgroup_scan);
1052
1053 // Force sample-rate shading if we're asked to.
1054 if (conf->force_sample_rate_shading) {
1055 assert(nir->info.stage == MESA_SHADER_FRAGMENT);
1056 nir->info.fs.uses_sample_shading = true;
1057 }
1058
1059 if (conf->zero_based_vertex_instance_id) {
1060 // vertex_id and instance_id should have already been transformed to
1061 // base zero before spirv_to_dxil was called. Therefore, we can zero out
1062 // base/firstVertex/Instance.
1063 gl_system_value system_values[] = {SYSTEM_VALUE_FIRST_VERTEX,
1064 SYSTEM_VALUE_BASE_VERTEX,
1065 SYSTEM_VALUE_BASE_INSTANCE};
1066 NIR_PASS_V(nir, dxil_nir_lower_system_values_to_zero, system_values,
1067 ARRAY_SIZE(system_values));
1068 }
1069
1070 if (conf->lower_view_index_to_rt_layer)
1071 NIR_PASS_V(nir, lower_view_index_to_rt_layer);
1072
1073 *requires_runtime_data = false;
1074 NIR_PASS(*requires_runtime_data, nir,
1075 dxil_spirv_nir_lower_shader_system_values,
1076 conf);
1077
1078 if (nir->info.stage == MESA_SHADER_FRAGMENT) {
1079 NIR_PASS_V(nir, nir_lower_input_attachments,
1080 &(nir_input_attachment_options){
1081 .use_fragcoord_sysval = false,
1082 .use_layer_id_sysval = !conf->lower_view_index,
1083 .use_view_id_for_layer = !conf->lower_view_index,
1084 });
1085
1086 /* This will lower load_helper to a memoized is_helper if needed; otherwise, load_helper
1087 * will stay, but trivially translatable to IsHelperLane(), which will be known to be
1088 * constant across the invocation since no demotion would have been used.
1089 */
1090 NIR_PASS_V(nir, nir_lower_discard_or_demote, nir->info.use_legacy_math_rules);
1091
1092 NIR_PASS_V(nir, dxil_nir_lower_discard_and_terminate);
1093 NIR_PASS_V(nir, nir_lower_returns);
1094 NIR_PASS_V(nir, dxil_nir_lower_sample_pos);
1095 NIR_PASS_V(nir, nir_lower_fragcoord_wtrans);
1096 }
1097
1098 NIR_PASS_V(nir, nir_opt_deref);
1099
1100 NIR_PASS_V(nir, nir_lower_memory_model);
1101 NIR_PASS_V(nir, dxil_nir_lower_coherent_loads_and_stores);
1102
1103 if (conf->inferred_read_only_images_as_srvs) {
1104 const nir_opt_access_options opt_access_options = {
1105 .is_vulkan = true,
1106 };
1107 NIR_PASS_V(nir, nir_opt_access, &opt_access_options);
1108 }
1109
1110 NIR_PASS_V(nir, dxil_spirv_nir_discard_point_size_var);
1111
1112 NIR_PASS_V(nir, nir_remove_dead_variables,
1113 nir_var_shader_in | nir_var_shader_out |
1114 nir_var_system_value | nir_var_mem_shared,
1115 NULL);
1116
1117 uint32_t push_constant_size = 0;
1118 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_push_const,
1119 nir_address_format_32bit_offset);
1120 NIR_PASS_V(nir, dxil_spirv_nir_lower_load_push_constant,
1121 nir_address_format_32bit_index_offset,
1122 conf->push_constant_cbv.register_space,
1123 conf->push_constant_cbv.base_shader_register,
1124 &push_constant_size);
1125
1126 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ubo | nir_var_mem_ssbo,
1127 nir_address_format_32bit_index_offset);
1128
1129 if (nir->info.shared_memory_explicit_layout) {
1130 NIR_PASS_V(nir, nir_lower_vars_to_explicit_types, nir_var_mem_shared,
1131 shared_var_info);
1132 NIR_PASS_V(nir, dxil_nir_split_unaligned_loads_stores, nir_var_mem_shared);
1133 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_shared, nir_address_format_32bit_offset);
1134 } else {
1135 NIR_PASS_V(nir, nir_split_struct_vars, nir_var_mem_shared);
1136 NIR_PASS_V(nir, dxil_nir_flatten_var_arrays, nir_var_mem_shared);
1137 NIR_PASS_V(nir, dxil_nir_lower_var_bit_size, nir_var_mem_shared,
1138 conf->shader_model_max >= SHADER_MODEL_6_2 ? 16 : 32, 64);
1139 }
1140
1141 NIR_PASS_V(nir, dxil_nir_lower_int_cubemaps, false);
1142
1143 NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
1144 NIR_PASS_V(nir, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(nir), true, true);
1145 NIR_PASS_V(nir, nir_lower_global_vars_to_local);
1146 NIR_PASS_V(nir, nir_split_var_copies);
1147 NIR_PASS_V(nir, nir_lower_var_copies);
1148 NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
1149
1150
1151 if (conf->yz_flip.mode != DXIL_SPIRV_YZ_FLIP_NONE) {
1152 assert(nir->info.stage == MESA_SHADER_VERTEX ||
1153 nir->info.stage == MESA_SHADER_GEOMETRY ||
1154 nir->info.stage == MESA_SHADER_TESS_EVAL);
1155 NIR_PASS_V(nir,
1156 dxil_spirv_nir_lower_yz_flip,
1157 conf, requires_runtime_data);
1158 }
1159
1160 if (*requires_runtime_data) {
1161 add_runtime_data_var(nir, conf->runtime_data_cbv.register_space,
1162 conf->runtime_data_cbv.base_shader_register);
1163 }
1164
1165 if (push_constant_size > 0) {
1166 add_push_constant_var(nir, push_constant_size,
1167 conf->push_constant_cbv.register_space,
1168 conf->push_constant_cbv.base_shader_register);
1169 }
1170
1171 NIR_PASS_V(nir, nir_lower_fp16_casts, nir_lower_fp16_all & ~nir_lower_fp16_rtz);
1172 NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
1173 NIR_PASS_V(nir, nir_opt_dce);
1174 NIR_PASS_V(nir, dxil_nir_lower_double_math);
1175
1176 {
1177 bool progress;
1178 do
1179 {
1180 progress = false;
1181 NIR_PASS(progress, nir, nir_copy_prop);
1182 NIR_PASS(progress, nir, nir_opt_copy_prop_vars);
1183 NIR_PASS(progress, nir, nir_opt_deref);
1184 NIR_PASS(progress, nir, nir_opt_dce);
1185 NIR_PASS(progress, nir, nir_opt_undef);
1186 NIR_PASS(progress, nir, nir_opt_constant_folding);
1187 NIR_PASS(progress, nir, nir_opt_cse);
1188 if (nir_opt_loop(nir)) {
1189 progress = true;
1190 NIR_PASS(progress, nir, nir_copy_prop);
1191 NIR_PASS(progress, nir, nir_opt_dce);
1192 }
1193 NIR_PASS(progress, nir, nir_lower_vars_to_ssa);
1194 NIR_PASS(progress, nir, nir_opt_algebraic);
1195 NIR_PASS(progress, nir, nir_opt_dead_cf);
1196 NIR_PASS(progress, nir, nir_opt_remove_phis);
1197 } while (progress);
1198 }
1199
1200 NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_function_temp, NULL);
1201 NIR_PASS_V(nir, nir_split_struct_vars, nir_var_function_temp);
1202 NIR_PASS_V(nir, dxil_nir_flatten_var_arrays, nir_var_function_temp);
1203 NIR_PASS_V(nir, dxil_nir_lower_var_bit_size, nir_var_function_temp,
1204 conf->shader_model_max >= SHADER_MODEL_6_2 ? 16 : 32, 64);
1205
1206 NIR_PASS_V(nir, nir_lower_doubles, NULL, nir->options->lower_doubles_options);
1207
1208 if (conf->declared_read_only_images_as_srvs)
1209 NIR_PASS_V(nir, nir_lower_readonly_images_to_tex, true);
1210 nir_lower_tex_options lower_tex_options = {
1211 .lower_txp = UINT32_MAX,
1212 .lower_invalid_implicit_lod = true,
1213 .lower_tg4_offsets = true,
1214 };
1215 NIR_PASS_V(nir, nir_lower_tex, &lower_tex_options);
1216
1217 NIR_PASS_V(nir, dxil_nir_split_clip_cull_distance);
1218 const struct dxil_nir_lower_loads_stores_options loads_stores_options = {
1219 .use_16bit_ssbo = conf->shader_model_max >= SHADER_MODEL_6_2,
1220 };
1221 NIR_PASS_V(nir, dxil_nir_lower_loads_stores_to_dxil, &loads_stores_options);
1222 NIR_PASS_V(nir, dxil_nir_split_typed_samplers);
1223 NIR_PASS_V(nir, dxil_nir_lower_ubo_array_one_to_static);
1224 NIR_PASS_V(nir, nir_opt_dce);
1225 NIR_PASS_V(nir, nir_remove_dead_derefs);
1226 NIR_PASS_V(nir, nir_remove_dead_variables,
1227 nir_var_uniform | nir_var_shader_in | nir_var_shader_out,
1228 NULL);
1229 NIR_PASS_V(nir, merge_ubos_and_ssbos);
1230
1231 if (nir->info.stage == MESA_SHADER_FRAGMENT) {
1232 dxil_sort_ps_outputs(nir);
1233 } else {
1234 /* Dummy linking step so we get different driver_location
1235 * assigned even if there's just a single vertex shader in the
1236 * pipeline. The real linking happens in dxil_spirv_nir_link().
1237 */
1238 nir->info.outputs_written =
1239 dxil_reassign_driver_locations(nir, nir_var_shader_out, 0);
1240 }
1241
1242 if (nir->info.stage == MESA_SHADER_VERTEX) {
1243 nir_foreach_variable_with_modes(var, nir, nir_var_shader_in) {
1244 /* spirv_to_dxil() only emits generic vertex attributes. */
1245 assert(var->data.location >= VERT_ATTRIB_GENERIC0);
1246 var->data.driver_location = var->data.location - VERT_ATTRIB_GENERIC0;
1247 }
1248
1249 nir->info.inputs_read =
1250 dxil_sort_by_driver_location(nir, nir_var_shader_in);
1251 } else {
1252 nir->info.inputs_read =
1253 dxil_reassign_driver_locations(nir, nir_var_shader_in, 0);
1254 }
1255
1256 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
1257
1258 glsl_type_singleton_decref();
1259 }
1260