• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © Microsoft Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "spirv_to_dxil.h"
25 #include "nir_to_dxil.h"
26 #include "dxil_nir.h"
27 #include "dxil_nir_lower_int_cubemaps.h"
28 #include "shader_enums.h"
29 #include "spirv/nir_spirv.h"
30 #include "util/blob.h"
31 #include "dxil_spirv_nir.h"
32 
33 #include "git_sha1.h"
34 #include "vulkan/vulkan.h"
35 
36 static void
shared_var_info(const struct glsl_type * type,unsigned * size,unsigned * align)37 shared_var_info(const struct glsl_type* type, unsigned* size, unsigned* align)
38 {
39    assert(glsl_type_is_vector_or_scalar(type));
40 
41    uint32_t comp_size = glsl_type_is_boolean(type) ? 4 : glsl_get_bit_size(type) / 8;
42    unsigned length = glsl_get_vector_elements(type);
43    *size = comp_size * length;
44    *align = comp_size;
45 }
46 
47 static nir_variable *
add_runtime_data_var(nir_shader * nir,unsigned desc_set,unsigned binding)48 add_runtime_data_var(nir_shader *nir, unsigned desc_set, unsigned binding)
49 {
50    unsigned runtime_data_size =
51       nir->info.stage == MESA_SHADER_COMPUTE
52          ? sizeof(struct dxil_spirv_compute_runtime_data)
53          : sizeof(struct dxil_spirv_vertex_runtime_data);
54 
55    const struct glsl_type *array_type =
56       glsl_array_type(glsl_uint_type(), runtime_data_size / sizeof(unsigned),
57                       sizeof(unsigned));
58    const struct glsl_struct_field field = {array_type, "arr"};
59    nir_variable *var = nir_variable_create(
60       nir, nir_var_mem_ubo,
61       glsl_struct_type(&field, 1, "runtime_data", false), "runtime_data");
62    var->data.descriptor_set = desc_set;
63    // Check that desc_set fits on descriptor_set
64    assert(var->data.descriptor_set == desc_set);
65    var->data.binding = binding;
66    var->data.how_declared = nir_var_hidden;
67    return var;
68 }
69 
70 struct lower_system_values_data {
71    nir_address_format ubo_format;
72    unsigned desc_set;
73    unsigned binding;
74 };
75 
76 static bool
lower_shader_system_values(struct nir_builder * builder,nir_instr * instr,void * cb_data)77 lower_shader_system_values(struct nir_builder *builder, nir_instr *instr,
78                            void *cb_data)
79 {
80    if (instr->type != nir_instr_type_intrinsic) {
81       return false;
82    }
83 
84    nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
85 
86    /* All the intrinsics we care about are loads */
87    if (!nir_intrinsic_infos[intrin->intrinsic].has_dest)
88       return false;
89 
90    assert(intrin->dest.is_ssa);
91 
92    int offset = 0;
93    switch (intrin->intrinsic) {
94    case nir_intrinsic_load_num_workgroups:
95       offset =
96          offsetof(struct dxil_spirv_compute_runtime_data, group_count_x);
97       break;
98    case nir_intrinsic_load_first_vertex:
99       offset = offsetof(struct dxil_spirv_vertex_runtime_data, first_vertex);
100       break;
101    case nir_intrinsic_load_is_indexed_draw:
102       offset =
103          offsetof(struct dxil_spirv_vertex_runtime_data, is_indexed_draw);
104       break;
105    case nir_intrinsic_load_base_instance:
106       offset = offsetof(struct dxil_spirv_vertex_runtime_data, base_instance);
107       break;
108    case nir_intrinsic_load_draw_id:
109       offset = offsetof(struct dxil_spirv_vertex_runtime_data, draw_id);
110       break;
111    default:
112       return false;
113    }
114 
115    struct lower_system_values_data *data =
116       (struct lower_system_values_data *)cb_data;
117 
118    builder->cursor = nir_after_instr(instr);
119    nir_address_format ubo_format = data->ubo_format;
120 
121    nir_ssa_def *index = nir_vulkan_resource_index(
122       builder, nir_address_format_num_components(ubo_format),
123       nir_address_format_bit_size(ubo_format),
124       nir_imm_int(builder, 0),
125       .desc_set = data->desc_set, .binding = data->binding,
126       .desc_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
127 
128    nir_ssa_def *load_desc = nir_load_vulkan_descriptor(
129       builder, nir_address_format_num_components(ubo_format),
130       nir_address_format_bit_size(ubo_format),
131       index, .desc_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
132 
133    nir_ssa_def *load_data = build_load_ubo_dxil(
134       builder, nir_channel(builder, load_desc, 0),
135       nir_imm_int(builder, offset),
136       nir_dest_num_components(intrin->dest), nir_dest_bit_size(intrin->dest));
137 
138    nir_ssa_def_rewrite_uses(&intrin->dest.ssa, load_data);
139    nir_instr_remove(instr);
140    return true;
141 }
142 
143 static bool
dxil_spirv_nir_lower_shader_system_values(nir_shader * shader,nir_address_format ubo_format,unsigned desc_set,unsigned binding)144 dxil_spirv_nir_lower_shader_system_values(nir_shader *shader,
145                                           nir_address_format ubo_format,
146                                           unsigned desc_set, unsigned binding)
147 {
148    struct lower_system_values_data data = {
149       .ubo_format = ubo_format,
150       .desc_set = desc_set,
151       .binding = binding,
152    };
153    return nir_shader_instructions_pass(shader, lower_shader_system_values,
154                                        nir_metadata_block_index |
155                                           nir_metadata_dominance |
156                                           nir_metadata_loop_analysis,
157                                        &data);
158 }
159 
160 static nir_variable *
add_push_constant_var(nir_shader * nir,unsigned size,unsigned desc_set,unsigned binding)161 add_push_constant_var(nir_shader *nir, unsigned size, unsigned desc_set, unsigned binding)
162 {
163    /* Size must be a multiple of 16 as buffer load is loading 16 bytes at a time */
164    unsigned num_32bit_words = ALIGN_POT(size, 16) / 4;
165 
166    const struct glsl_type *array_type =
167       glsl_array_type(glsl_uint_type(), num_32bit_words, 4);
168    const struct glsl_struct_field field = {array_type, "arr"};
169    nir_variable *var = nir_variable_create(
170       nir, nir_var_mem_ubo,
171       glsl_struct_type(&field, 1, "block", false), "push_constants");
172    var->data.descriptor_set = desc_set;
173    var->data.binding = binding;
174    var->data.how_declared = nir_var_hidden;
175    return var;
176 }
177 
178 struct lower_load_push_constant_data {
179    nir_address_format ubo_format;
180    unsigned desc_set;
181    unsigned binding;
182    unsigned size;
183 };
184 
185 static bool
lower_load_push_constant(struct nir_builder * builder,nir_instr * instr,void * cb_data)186 lower_load_push_constant(struct nir_builder *builder, nir_instr *instr,
187                            void *cb_data)
188 {
189    struct lower_load_push_constant_data *data =
190       (struct lower_load_push_constant_data *)cb_data;
191 
192    if (instr->type != nir_instr_type_intrinsic)
193       return false;
194 
195    nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
196 
197    /* All the intrinsics we care about are loads */
198    if (intrin->intrinsic != nir_intrinsic_load_push_constant)
199       return false;
200 
201    uint32_t base = nir_intrinsic_base(intrin);
202    uint32_t range = nir_intrinsic_range(intrin);
203 
204    data->size = MAX2(base + range, data->size);
205 
206    builder->cursor = nir_after_instr(instr);
207    nir_address_format ubo_format = data->ubo_format;
208 
209    nir_ssa_def *index = nir_vulkan_resource_index(
210       builder, nir_address_format_num_components(ubo_format),
211       nir_address_format_bit_size(ubo_format),
212       nir_imm_int(builder, 0),
213       .desc_set = data->desc_set, .binding = data->binding,
214       .desc_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
215 
216    nir_ssa_def *load_desc = nir_load_vulkan_descriptor(
217       builder, nir_address_format_num_components(ubo_format),
218       nir_address_format_bit_size(ubo_format),
219       index, .desc_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
220 
221    nir_ssa_def *offset = nir_ssa_for_src(builder, intrin->src[0], 1);
222    nir_ssa_def *load_data = build_load_ubo_dxil(
223       builder, nir_channel(builder, load_desc, 0),
224       nir_iadd_imm(builder, offset, base),
225       nir_dest_num_components(intrin->dest), nir_dest_bit_size(intrin->dest));
226 
227    nir_ssa_def_rewrite_uses(&intrin->dest.ssa, load_data);
228    nir_instr_remove(instr);
229    return true;
230 }
231 
232 static bool
dxil_spirv_nir_lower_load_push_constant(nir_shader * shader,nir_address_format ubo_format,unsigned desc_set,unsigned binding,uint32_t * size)233 dxil_spirv_nir_lower_load_push_constant(nir_shader *shader,
234                                         nir_address_format ubo_format,
235                                         unsigned desc_set, unsigned binding,
236                                         uint32_t *size)
237 {
238    bool ret;
239    struct lower_load_push_constant_data data = {
240       .ubo_format = ubo_format,
241       .desc_set = desc_set,
242       .binding = binding,
243    };
244    ret = nir_shader_instructions_pass(shader, lower_load_push_constant,
245                                       nir_metadata_block_index |
246                                          nir_metadata_dominance |
247                                          nir_metadata_loop_analysis,
248                                       &data);
249 
250    *size = data.size;
251 
252    assert(ret == (*size > 0));
253 
254    return ret;
255 }
256 
257 struct lower_yz_flip_data {
258    bool *reads_sysval_ubo;
259    const struct dxil_spirv_runtime_conf *rt_conf;
260 };
261 
262 static bool
lower_yz_flip(struct nir_builder * builder,nir_instr * instr,void * cb_data)263 lower_yz_flip(struct nir_builder *builder, nir_instr *instr,
264               void *cb_data)
265 {
266    struct lower_yz_flip_data *data =
267       (struct lower_yz_flip_data *)cb_data;
268 
269    if (instr->type != nir_instr_type_intrinsic)
270       return false;
271 
272    nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
273 
274    if (intrin->intrinsic != nir_intrinsic_store_deref)
275       return false;
276 
277    nir_variable *var = nir_intrinsic_get_var(intrin, 0);
278    if (var->data.mode != nir_var_shader_out ||
279        var->data.location != VARYING_SLOT_POS)
280       return false;
281 
282    builder->cursor = nir_before_instr(instr);
283 
284    const struct dxil_spirv_runtime_conf *rt_conf = data->rt_conf;
285 
286    nir_ssa_def *pos = nir_ssa_for_src(builder, intrin->src[1], 4);
287    nir_ssa_def *y_pos = nir_channel(builder, pos, 1);
288    nir_ssa_def *z_pos = nir_channel(builder, pos, 2);
289    nir_ssa_def *y_flip_mask = NULL, *z_flip_mask = NULL, *dyn_yz_flip_mask = NULL;
290 
291    if (rt_conf->yz_flip.mode & DXIL_SPIRV_YZ_FLIP_CONDITIONAL) {
292       // conditional YZ-flip. The flip bitmask is passed through the vertex
293       // runtime data UBO.
294       unsigned offset =
295          offsetof(struct dxil_spirv_vertex_runtime_data, yz_flip_mask);
296       nir_address_format ubo_format = nir_address_format_32bit_index_offset;
297 
298       nir_ssa_def *index = nir_vulkan_resource_index(
299          builder, nir_address_format_num_components(ubo_format),
300          nir_address_format_bit_size(ubo_format),
301          nir_imm_int(builder, 0),
302          .desc_set = rt_conf->runtime_data_cbv.register_space,
303          .binding = rt_conf->runtime_data_cbv.base_shader_register,
304          .desc_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
305 
306       nir_ssa_def *load_desc = nir_load_vulkan_descriptor(
307          builder, nir_address_format_num_components(ubo_format),
308          nir_address_format_bit_size(ubo_format),
309          index, .desc_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
310 
311       dyn_yz_flip_mask =
312          build_load_ubo_dxil(builder,
313                              nir_channel(builder, load_desc, 0),
314                              nir_imm_int(builder, offset), 1, 32);
315       *data->reads_sysval_ubo = true;
316    }
317 
318    if (rt_conf->yz_flip.mode & DXIL_SPIRV_Y_FLIP_UNCONDITIONAL)
319       y_flip_mask = nir_imm_int(builder, rt_conf->yz_flip.y_mask);
320    else if (rt_conf->yz_flip.mode & DXIL_SPIRV_Y_FLIP_CONDITIONAL)
321       y_flip_mask = nir_iand_imm(builder, dyn_yz_flip_mask, DXIL_SPIRV_Y_FLIP_MASK);
322 
323    if (rt_conf->yz_flip.mode & DXIL_SPIRV_Z_FLIP_UNCONDITIONAL)
324       z_flip_mask = nir_imm_int(builder, rt_conf->yz_flip.z_mask);
325    else if (rt_conf->yz_flip.mode & DXIL_SPIRV_Z_FLIP_CONDITIONAL)
326       z_flip_mask = nir_ushr_imm(builder, dyn_yz_flip_mask, DXIL_SPIRV_Z_FLIP_SHIFT);
327 
328    /* TODO: Multi-viewport */
329 
330    if (y_flip_mask) {
331       nir_ssa_def *flip = nir_test_mask(builder, y_flip_mask, 1);
332 
333       // Z-flip => pos.y = -pos.y
334       y_pos = nir_bcsel(builder, flip, nir_fneg(builder, y_pos), y_pos);
335    }
336 
337    if (z_flip_mask) {
338       nir_ssa_def *flip = nir_test_mask(builder, z_flip_mask, 1);
339 
340       // Z-flip => pos.z = -pos.z + 1.0f
341       z_pos = nir_bcsel(builder, flip,
342                         nir_fadd_imm(builder, nir_fneg(builder, z_pos), 1.0f),
343                         z_pos);
344    }
345 
346    nir_ssa_def *def = nir_vec4(builder,
347                                nir_channel(builder, pos, 0),
348                                y_pos,
349                                z_pos,
350                                nir_channel(builder, pos, 3));
351    nir_instr_rewrite_src(&intrin->instr, &intrin->src[1], nir_src_for_ssa(def));
352    return true;
353 }
354 
355 static bool
dxil_spirv_nir_lower_yz_flip(nir_shader * shader,const struct dxil_spirv_runtime_conf * rt_conf,bool * reads_sysval_ubo)356 dxil_spirv_nir_lower_yz_flip(nir_shader *shader,
357                              const struct dxil_spirv_runtime_conf *rt_conf,
358                              bool *reads_sysval_ubo)
359 {
360    struct lower_yz_flip_data data = {
361       .rt_conf = rt_conf,
362       .reads_sysval_ubo = reads_sysval_ubo,
363    };
364 
365    return nir_shader_instructions_pass(shader, lower_yz_flip,
366                                        nir_metadata_block_index |
367                                        nir_metadata_dominance |
368                                        nir_metadata_loop_analysis,
369                                        &data);
370 }
371 
372 static bool
discard_psiz_access(struct nir_builder * builder,nir_instr * instr,void * cb_data)373 discard_psiz_access(struct nir_builder *builder, nir_instr *instr,
374                     void *cb_data)
375 {
376    if (instr->type != nir_instr_type_intrinsic)
377       return false;
378 
379    nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
380 
381    if (intrin->intrinsic != nir_intrinsic_store_deref &&
382        intrin->intrinsic != nir_intrinsic_load_deref)
383       return false;
384 
385    nir_variable *var = nir_intrinsic_get_var(intrin, 0);
386    if (!var || var->data.mode != nir_var_shader_out ||
387        var->data.location != VARYING_SLOT_PSIZ)
388       return false;
389 
390    builder->cursor = nir_before_instr(instr);
391 
392    if (intrin->intrinsic == nir_intrinsic_load_deref)
393       nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_imm_float(builder, 1.0));
394 
395    nir_instr_remove(instr);
396    return true;
397 }
398 
399 static bool
dxil_spirv_nir_discard_point_size_var(nir_shader * shader)400 dxil_spirv_nir_discard_point_size_var(nir_shader *shader)
401 {
402    if (shader->info.stage != MESA_SHADER_VERTEX &&
403        shader->info.stage != MESA_SHADER_TESS_EVAL &&
404        shader->info.stage != MESA_SHADER_GEOMETRY)
405       return false;
406 
407    nir_variable *psiz = NULL;
408    nir_foreach_shader_out_variable(var, shader) {
409       if (var->data.location == VARYING_SLOT_PSIZ) {
410          psiz = var;
411          break;
412       }
413    }
414 
415    if (!psiz)
416       return false;
417 
418    if (!nir_shader_instructions_pass(shader, discard_psiz_access,
419                                      nir_metadata_block_index |
420                                      nir_metadata_dominance |
421                                      nir_metadata_loop_analysis,
422                                      NULL))
423       return false;
424 
425    nir_remove_dead_derefs(shader);
426    return true;
427 }
428 
429 static bool
kill_undefined_varyings(struct nir_builder * b,nir_instr * instr,void * data)430 kill_undefined_varyings(struct nir_builder *b,
431                         nir_instr *instr,
432                         void *data)
433 {
434    const nir_shader *prev_stage_nir = data;
435 
436    if (instr->type != nir_instr_type_intrinsic)
437       return false;
438 
439    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
440 
441    if (intr->intrinsic != nir_intrinsic_load_deref)
442       return false;
443 
444    nir_variable *var = nir_intrinsic_get_var(intr, 0);
445    if (!var)
446       return false;
447 
448    /* Ignore builtins for now, some of them get default values
449     * when not written from previous stages.
450     */
451    if (var->data.location < VARYING_SLOT_VAR0)
452       return false;
453 
454    uint32_t loc = var->data.patch ?
455                   var->data.location - VARYING_SLOT_PATCH0 : var->data.location;
456    uint64_t written = var->data.patch ?
457                       prev_stage_nir->info.patch_outputs_written :
458                       prev_stage_nir->info.outputs_written;
459    if (BITFIELD64_BIT(loc) & written)
460       return false;
461 
462    b->cursor = nir_after_instr(instr);
463    nir_ssa_def *undef =
464       nir_ssa_undef(b, nir_dest_num_components(intr->dest),
465                     nir_dest_bit_size(intr->dest));
466    nir_ssa_def_rewrite_uses(&intr->dest.ssa, undef);
467    nir_instr_remove(instr);
468    return true;
469 }
470 
471 static bool
dxil_spirv_nir_kill_undefined_varyings(nir_shader * shader,const nir_shader * prev_stage_shader)472 dxil_spirv_nir_kill_undefined_varyings(nir_shader *shader,
473                                        const nir_shader *prev_stage_shader)
474 {
475    if (!nir_shader_instructions_pass(shader,
476                                      kill_undefined_varyings,
477                                      nir_metadata_dominance |
478                                      nir_metadata_block_index |
479                                      nir_metadata_loop_analysis,
480                                      (void *)prev_stage_shader))
481       return false;
482 
483    nir_remove_dead_derefs(shader);
484    nir_remove_dead_variables(shader, nir_var_shader_in, NULL);
485    return true;
486 }
487 
488 static bool
kill_unused_outputs(struct nir_builder * b,nir_instr * instr,void * data)489 kill_unused_outputs(struct nir_builder *b,
490                     nir_instr *instr,
491                     void *data)
492 {
493    uint64_t kill_mask = *((uint64_t *)data);
494 
495    if (instr->type != nir_instr_type_intrinsic)
496       return false;
497 
498    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
499 
500    if (intr->intrinsic != nir_intrinsic_store_deref)
501       return false;
502 
503    nir_variable *var = nir_intrinsic_get_var(intr, 0);
504    if (!var || var->data.mode != nir_var_shader_out)
505       return false;
506 
507    unsigned loc = var->data.patch ?
508                   var->data.location - VARYING_SLOT_PATCH0 :
509                   var->data.location;
510    if (!(BITFIELD64_BIT(loc) & kill_mask))
511       return false;
512 
513    nir_instr_remove(instr);
514    return true;
515 }
516 
517 static bool
dxil_spirv_nir_kill_unused_outputs(nir_shader * shader,nir_shader * next_stage_shader)518 dxil_spirv_nir_kill_unused_outputs(nir_shader *shader,
519                                    nir_shader *next_stage_shader)
520 {
521    uint64_t kill_var_mask =
522       shader->info.outputs_written & ~next_stage_shader->info.inputs_read;
523    bool progress = false;
524 
525    /* Don't kill buitin vars */
526    kill_var_mask &= BITFIELD64_MASK(MAX_VARYING) << VARYING_SLOT_VAR0;
527 
528    if (nir_shader_instructions_pass(shader,
529                                     kill_unused_outputs,
530                                     nir_metadata_dominance |
531                                     nir_metadata_block_index |
532                                     nir_metadata_loop_analysis,
533                                     (void *)&kill_var_mask))
534       progress = true;
535 
536    if (shader->info.stage == MESA_SHADER_TESS_EVAL) {
537       kill_var_mask =
538          (shader->info.patch_outputs_written |
539           shader->info.patch_outputs_read) &
540          ~next_stage_shader->info.patch_inputs_read;
541       if (nir_shader_instructions_pass(shader,
542                                        kill_unused_outputs,
543                                        nir_metadata_dominance |
544                                        nir_metadata_block_index |
545                                        nir_metadata_loop_analysis,
546                                        (void *)&kill_var_mask))
547          progress = true;
548    }
549 
550    if (progress) {
551       nir_opt_dce(shader);
552       nir_remove_dead_derefs(shader);
553       nir_remove_dead_variables(shader, nir_var_shader_out, NULL);
554    }
555 
556    return progress;
557 }
558 
559 void
dxil_spirv_nir_link(nir_shader * nir,nir_shader * prev_stage_nir)560 dxil_spirv_nir_link(nir_shader *nir, nir_shader *prev_stage_nir)
561 {
562    glsl_type_singleton_init_or_ref();
563 
564    if (prev_stage_nir) {
565       NIR_PASS_V(nir, dxil_spirv_nir_kill_undefined_varyings, prev_stage_nir);
566       NIR_PASS_V(prev_stage_nir, dxil_spirv_nir_kill_unused_outputs, nir);
567 
568       nir->info.inputs_read =
569          dxil_reassign_driver_locations(nir, nir_var_shader_in,
570                                         prev_stage_nir->info.outputs_written);
571       prev_stage_nir->info.outputs_written =
572          dxil_reassign_driver_locations(prev_stage_nir, nir_var_shader_out,
573                                         nir->info.inputs_read);
574    }
575 
576    glsl_type_singleton_decref();
577 }
578 
579 void
dxil_spirv_nir_passes(nir_shader * nir,const struct dxil_spirv_runtime_conf * conf,bool * requires_runtime_data)580 dxil_spirv_nir_passes(nir_shader *nir,
581                       const struct dxil_spirv_runtime_conf *conf,
582                       bool *requires_runtime_data)
583 {
584    glsl_type_singleton_init_or_ref();
585 
586    NIR_PASS_V(nir, dxil_nir_lower_int_cubemaps, false);
587    NIR_PASS_V(nir, nir_lower_io_to_vector,
588               nir_var_shader_out |
589               (nir->info.stage != MESA_SHADER_VERTEX ? nir_var_shader_in : 0));
590    NIR_PASS_V(nir, nir_opt_combine_stores, nir_var_shader_out);
591    NIR_PASS_V(nir, nir_remove_dead_derefs);
592 
593    const struct nir_lower_sysvals_to_varyings_options sysvals_to_varyings = {
594       .frag_coord = true,
595       .point_coord = true,
596    };
597    NIR_PASS_V(nir, nir_lower_sysvals_to_varyings, &sysvals_to_varyings);
598 
599    NIR_PASS_V(nir, nir_lower_system_values);
600 
601    // Force sample-rate shading if we're asked to.
602    if (conf->force_sample_rate_shading) {
603       assert(nir->info.stage == MESA_SHADER_FRAGMENT);
604       nir_foreach_shader_in_variable(var, nir)
605          var->data.sample = true;
606    }
607 
608    if (conf->zero_based_vertex_instance_id) {
609       // vertex_id and instance_id should have already been transformed to
610       // base zero before spirv_to_dxil was called. Therefore, we can zero out
611       // base/firstVertex/Instance.
612       gl_system_value system_values[] = {SYSTEM_VALUE_FIRST_VERTEX,
613                                          SYSTEM_VALUE_BASE_VERTEX,
614                                          SYSTEM_VALUE_BASE_INSTANCE};
615       NIR_PASS_V(nir, dxil_nir_lower_system_values_to_zero, system_values,
616                  ARRAY_SIZE(system_values));
617    }
618 
619    *requires_runtime_data = false;
620    NIR_PASS(*requires_runtime_data, nir,
621             dxil_spirv_nir_lower_shader_system_values,
622             nir_address_format_32bit_index_offset,
623             conf->runtime_data_cbv.register_space,
624             conf->runtime_data_cbv.base_shader_register);
625 
626    if (nir->info.stage == MESA_SHADER_FRAGMENT) {
627       NIR_PASS_V(nir, nir_lower_input_attachments,
628                  &(nir_input_attachment_options){
629                      .use_fragcoord_sysval = false,
630                      .use_layer_id_sysval = true,
631                  });
632 
633       NIR_PASS_V(nir, dxil_nir_lower_discard_and_terminate);
634       NIR_PASS_V(nir, nir_lower_returns);
635 
636    }
637 
638    NIR_PASS_V(nir, nir_opt_deref);
639 
640    if (conf->read_only_images_as_srvs) {
641       const nir_opt_access_options opt_access_options = {
642          .is_vulkan = true,
643          .infer_non_readable = true,
644       };
645       NIR_PASS_V(nir, nir_opt_access, &opt_access_options);
646    }
647 
648    NIR_PASS_V(nir, dxil_spirv_nir_discard_point_size_var);
649 
650    NIR_PASS_V(nir, nir_remove_dead_variables,
651               nir_var_shader_in | nir_var_shader_out |
652               nir_var_system_value | nir_var_mem_shared,
653               NULL);
654 
655    uint32_t push_constant_size = 0;
656    NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_push_const,
657               nir_address_format_32bit_offset);
658    NIR_PASS_V(nir, dxil_spirv_nir_lower_load_push_constant,
659               nir_address_format_32bit_index_offset,
660               conf->push_constant_cbv.register_space,
661               conf->push_constant_cbv.base_shader_register,
662               &push_constant_size);
663 
664    NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ubo | nir_var_mem_ssbo,
665               nir_address_format_32bit_index_offset);
666 
667    if (!nir->info.shared_memory_explicit_layout) {
668       NIR_PASS_V(nir, nir_lower_vars_to_explicit_types, nir_var_mem_shared,
669                  shared_var_info);
670    }
671    NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_shared,
672       nir_address_format_32bit_offset_as_64bit);
673 
674    NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
675    NIR_PASS_V(nir, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(nir), true, true);
676    NIR_PASS_V(nir, nir_lower_global_vars_to_local);
677    NIR_PASS_V(nir, nir_split_var_copies);
678    NIR_PASS_V(nir, nir_lower_var_copies);
679    NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
680 
681 
682    if (conf->yz_flip.mode != DXIL_SPIRV_YZ_FLIP_NONE) {
683       assert(nir->info.stage == MESA_SHADER_VERTEX ||
684              nir->info.stage == MESA_SHADER_GEOMETRY);
685       NIR_PASS_V(nir,
686                  dxil_spirv_nir_lower_yz_flip,
687                  conf, requires_runtime_data);
688    }
689 
690    if (*requires_runtime_data) {
691       add_runtime_data_var(nir, conf->runtime_data_cbv.register_space,
692                            conf->runtime_data_cbv.base_shader_register);
693    }
694 
695    if (push_constant_size > 0) {
696       add_push_constant_var(nir, push_constant_size,
697                             conf->push_constant_cbv.register_space,
698                             conf->push_constant_cbv.base_shader_register);
699    }
700 
701    NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
702    NIR_PASS_V(nir, nir_opt_dce);
703    NIR_PASS_V(nir, dxil_nir_lower_double_math);
704 
705    {
706       bool progress;
707       do
708       {
709          progress = false;
710          NIR_PASS(progress, nir, nir_copy_prop);
711          NIR_PASS(progress, nir, nir_opt_copy_prop_vars);
712          NIR_PASS(progress, nir, nir_opt_deref);
713          NIR_PASS(progress, nir, nir_opt_dce);
714          NIR_PASS(progress, nir, nir_opt_undef);
715          NIR_PASS(progress, nir, nir_opt_constant_folding);
716          NIR_PASS(progress, nir, nir_opt_cse);
717          if (nir_opt_trivial_continues(nir)) {
718             progress = true;
719             NIR_PASS(progress, nir, nir_copy_prop);
720             NIR_PASS(progress, nir, nir_opt_dce);
721          }
722          NIR_PASS(progress, nir, nir_lower_vars_to_ssa);
723          NIR_PASS(progress, nir, nir_opt_algebraic);
724       } while (progress);
725    }
726 
727    NIR_PASS_V(nir, nir_lower_readonly_images_to_tex, true);
728    nir_lower_tex_options lower_tex_options = {
729       .lower_invalid_implicit_lod = true,
730    };
731    NIR_PASS_V(nir, nir_lower_tex, &lower_tex_options);
732 
733    NIR_PASS_V(nir, dxil_nir_lower_atomics_to_dxil);
734    NIR_PASS_V(nir, dxil_nir_split_clip_cull_distance);
735    NIR_PASS_V(nir, dxil_nir_lower_loads_stores_to_dxil);
736    NIR_PASS_V(nir, dxil_nir_split_typed_samplers);
737    NIR_PASS_V(nir, dxil_nir_lower_bool_input);
738    NIR_PASS_V(nir, dxil_nir_lower_ubo_array_one_to_static);
739    NIR_PASS_V(nir, nir_opt_dce);
740    NIR_PASS_V(nir, nir_remove_dead_derefs);
741    NIR_PASS_V(nir, nir_remove_dead_variables,
742               nir_var_uniform | nir_var_shader_in | nir_var_shader_out,
743               NULL);
744 
745    if (nir->info.stage == MESA_SHADER_FRAGMENT) {
746       dxil_sort_ps_outputs(nir);
747    } else {
748       /* Dummy linking step so we get different driver_location
749        * assigned even if there's just a single vertex shader in the
750        * pipeline. The real linking happens in dxil_spirv_nir_link().
751        */
752       nir->info.outputs_written =
753          dxil_reassign_driver_locations(nir, nir_var_shader_out, 0);
754    }
755 
756    if (nir->info.stage == MESA_SHADER_VERTEX) {
757       nir_foreach_variable_with_modes(var, nir, nir_var_shader_in) {
758          /* spirv_to_dxil() only emits generic vertex attributes. */
759          assert(var->data.location >= VERT_ATTRIB_GENERIC0);
760          var->data.driver_location = var->data.location - VERT_ATTRIB_GENERIC0;
761       }
762 
763       nir->info.inputs_read =
764          dxil_sort_by_driver_location(nir, nir_var_shader_in);
765    } else {
766       nir->info.inputs_read =
767          dxil_reassign_driver_locations(nir, nir_var_shader_in, 0);
768    }
769 
770    nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
771 
772    glsl_type_singleton_decref();
773 }
774