1 /*
2 * Copyright © 2015 Intel Corporation
3 * Copyright © 2022 Collabora, LTD
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "vk_nir.h"
26
27 #include "compiler/nir/nir_xfb_info.h"
28 #include "compiler/spirv/nir_spirv.h"
29 #include "vk_log.h"
30 #include "vk_util.h"
31
32 #define SPIR_V_MAGIC_NUMBER 0x07230203
33
34 uint32_t
vk_spirv_version(const uint32_t * spirv_data,size_t spirv_size_B)35 vk_spirv_version(const uint32_t *spirv_data, size_t spirv_size_B)
36 {
37 assert(spirv_size_B >= 8);
38 assert(spirv_data[0] == SPIR_V_MAGIC_NUMBER);
39 return spirv_data[1];
40 }
41
42 static void
spirv_nir_debug(void * private_data,enum nir_spirv_debug_level level,size_t spirv_offset,const char * message)43 spirv_nir_debug(void *private_data,
44 enum nir_spirv_debug_level level,
45 size_t spirv_offset,
46 const char *message)
47 {
48 const struct vk_object_base *log_obj = private_data;
49
50 switch (level) {
51 case NIR_SPIRV_DEBUG_LEVEL_INFO:
52 //vk_logi(VK_LOG_OBJS(log_obj), "SPIR-V offset %lu: %s",
53 // (unsigned long) spirv_offset, message);
54 break;
55 case NIR_SPIRV_DEBUG_LEVEL_WARNING:
56 vk_logw(VK_LOG_OBJS(log_obj), "SPIR-V offset %lu: %s",
57 (unsigned long) spirv_offset, message);
58 break;
59 case NIR_SPIRV_DEBUG_LEVEL_ERROR:
60 vk_loge(VK_LOG_OBJS(log_obj), "SPIR-V offset %lu: %s",
61 (unsigned long) spirv_offset, message);
62 break;
63 default:
64 break;
65 }
66 }
67
68 bool
nir_vk_is_not_xfb_output(nir_variable * var,void * data)69 nir_vk_is_not_xfb_output(nir_variable *var, void *data)
70 {
71 if (var->data.mode != nir_var_shader_out)
72 return true;
73
74 /* From the Vulkan 1.3.259 spec:
75 *
76 * VUID-StandaloneSpirv-Offset-04716
77 *
78 * "Only variables or block members in the output interface decorated
79 * with Offset can be captured for transform feedback, and those
80 * variables or block members must also be decorated with XfbBuffer
81 * and XfbStride, or inherit XfbBuffer and XfbStride decorations from
82 * a block containing them"
83 *
84 * glslang generates gl_PerVertex builtins when they are not declared,
85 * enabled XFB should not prevent them from being DCE'd.
86 *
87 * The logic should match nir_gather_xfb_info_with_varyings
88 */
89
90 if (!var->data.explicit_xfb_buffer)
91 return true;
92
93 bool is_array_block = var->interface_type != NULL &&
94 glsl_type_is_array(var->type) &&
95 glsl_without_array(var->type) == var->interface_type;
96
97 if (!is_array_block) {
98 return !var->data.explicit_offset;
99 } else {
100 /* For array of blocks we have to check each element */
101 unsigned aoa_size = glsl_get_aoa_size(var->type);
102 const struct glsl_type *itype = var->interface_type;
103 unsigned nfields = glsl_get_length(itype);
104 for (unsigned b = 0; b < aoa_size; b++) {
105 for (unsigned f = 0; f < nfields; f++) {
106 if (glsl_get_struct_field_offset(itype, f) >= 0)
107 return false;
108 }
109 }
110
111 return true;
112 }
113 }
114
115 nir_shader *
vk_spirv_to_nir(struct vk_device * device,const uint32_t * spirv_data,size_t spirv_size_B,gl_shader_stage stage,const char * entrypoint_name,enum gl_subgroup_size subgroup_size,const VkSpecializationInfo * spec_info,const struct spirv_to_nir_options * spirv_options,const struct nir_shader_compiler_options * nir_options,bool internal,void * mem_ctx)116 vk_spirv_to_nir(struct vk_device *device,
117 const uint32_t *spirv_data, size_t spirv_size_B,
118 gl_shader_stage stage, const char *entrypoint_name,
119 enum gl_subgroup_size subgroup_size,
120 const VkSpecializationInfo *spec_info,
121 const struct spirv_to_nir_options *spirv_options,
122 const struct nir_shader_compiler_options *nir_options,
123 bool internal,
124 void *mem_ctx)
125 {
126 assert(spirv_size_B >= 4 && spirv_size_B % 4 == 0);
127 assert(spirv_data[0] == SPIR_V_MAGIC_NUMBER);
128
129 struct spirv_to_nir_options spirv_options_local = *spirv_options;
130 spirv_options_local.debug.func = spirv_nir_debug;
131 spirv_options_local.debug.private_data = (void *)device;
132 spirv_options_local.subgroup_size = subgroup_size;
133
134 uint32_t num_spec_entries = 0;
135 struct nir_spirv_specialization *spec_entries =
136 vk_spec_info_to_nir_spirv(spec_info, &num_spec_entries);
137
138 nir_shader *nir = spirv_to_nir(spirv_data, spirv_size_B / 4,
139 spec_entries, num_spec_entries,
140 stage, entrypoint_name,
141 &spirv_options_local, nir_options);
142 free(spec_entries);
143
144 if (nir == NULL)
145 return NULL;
146
147 assert(nir->info.stage == stage);
148 nir_validate_shader(nir, "after spirv_to_nir");
149 nir_validate_ssa_dominance(nir, "after spirv_to_nir");
150 if (mem_ctx != NULL)
151 ralloc_steal(mem_ctx, nir);
152
153 nir->info.internal = internal;
154
155 /* We have to lower away local constant initializers right before we
156 * inline functions. That way they get properly initialized at the top
157 * of the function and not at the top of its caller.
158 */
159 NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
160 NIR_PASS_V(nir, nir_lower_returns);
161 NIR_PASS_V(nir, nir_inline_functions);
162 NIR_PASS_V(nir, nir_copy_prop);
163 NIR_PASS_V(nir, nir_opt_deref);
164
165 /* Pick off the single entrypoint that we want */
166 nir_remove_non_entrypoints(nir);
167
168 /* Now that we've deleted all but the main function, we can go ahead and
169 * lower the rest of the constant initializers. We do this here so that
170 * nir_remove_dead_variables and split_per_member_structs below see the
171 * corresponding stores.
172 */
173 NIR_PASS_V(nir, nir_lower_variable_initializers, ~0);
174
175 /* Split member structs. We do this before lower_io_to_temporaries so that
176 * it doesn't lower system values to temporaries by accident.
177 */
178 NIR_PASS_V(nir, nir_split_var_copies);
179 NIR_PASS_V(nir, nir_split_per_member_structs);
180
181 nir_remove_dead_variables_options dead_vars_opts = {
182 .can_remove_var = nir_vk_is_not_xfb_output,
183 };
184 NIR_PASS_V(nir, nir_remove_dead_variables,
185 nir_var_shader_in | nir_var_shader_out | nir_var_system_value |
186 nir_var_shader_call_data | nir_var_ray_hit_attrib,
187 &dead_vars_opts);
188
189 /* This needs to happen after remove_dead_vars because GLSLang likes to
190 * insert dead clip/cull vars and we don't want to clip/cull based on
191 * uninitialized garbage.
192 */
193 NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
194
195 if (nir->info.stage == MESA_SHADER_VERTEX ||
196 nir->info.stage == MESA_SHADER_TESS_EVAL ||
197 nir->info.stage == MESA_SHADER_GEOMETRY)
198 NIR_PASS_V(nir, nir_shader_gather_xfb_info);
199
200 NIR_PASS_V(nir, nir_propagate_invariant, false);
201
202 return nir;
203 }
204