1 /*
2 * Copyright © Microsoft Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26 #include "nir_control_flow.h"
27
28 #include "dxil_nir.h"
29
30 static bool
is_memory_barrier_tcs_patch(const nir_intrinsic_instr * intr)31 is_memory_barrier_tcs_patch(const nir_intrinsic_instr *intr)
32 {
33 if (intr->intrinsic == nir_intrinsic_barrier &&
34 nir_intrinsic_memory_modes(intr) & nir_var_shader_out) {
35 assert(nir_intrinsic_memory_modes(intr) == nir_var_shader_out);
36 assert(nir_intrinsic_memory_scope(intr) == SCOPE_WORKGROUP || nir_intrinsic_memory_scope(intr) == SCOPE_INVOCATION);
37 return true;
38 } else {
39 return false;
40 }
41 }
42
43 static void
remove_hs_intrinsics(nir_function_impl * impl)44 remove_hs_intrinsics(nir_function_impl *impl)
45 {
46 nir_foreach_block(block, impl) {
47 nir_foreach_instr_safe(instr, block) {
48 if (instr->type != nir_instr_type_intrinsic)
49 continue;
50 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
51 if (intr->intrinsic != nir_intrinsic_store_output &&
52 !is_memory_barrier_tcs_patch(intr))
53 continue;
54 nir_instr_remove(instr);
55 }
56 }
57 nir_metadata_preserve(impl, nir_metadata_block_index | nir_metadata_dominance);
58 }
59
60 static void
61 add_instr_and_srcs_to_set(struct set *instr_set, nir_instr *instr);
62
63 static bool
add_srcs_to_set(nir_src * src,void * state)64 add_srcs_to_set(nir_src *src, void *state)
65 {
66 add_instr_and_srcs_to_set(state, src->ssa->parent_instr);
67 return true;
68 }
69
70 static void
add_instr_and_srcs_to_set(struct set * instr_set,nir_instr * instr)71 add_instr_and_srcs_to_set(struct set *instr_set, nir_instr *instr)
72 {
73 bool was_already_found = false;
74 _mesa_set_search_or_add(instr_set, instr, &was_already_found);
75 if (!was_already_found)
76 nir_foreach_src(instr, add_srcs_to_set, instr_set);
77 }
78
79 static void
prune_patch_function_to_intrinsic_and_srcs(nir_function_impl * impl)80 prune_patch_function_to_intrinsic_and_srcs(nir_function_impl *impl)
81 {
82 struct set *instr_set = _mesa_pointer_set_create(NULL);
83
84 /* Do this in two phases:
85 * 1. Find all instructions that contribute to a store_output and add them to
86 * the set. Also, add instructions that contribute to control flow.
87 * 2. Erase every instruction that isn't in the set
88 */
89 nir_foreach_block(block, impl) {
90 nir_if *following_if = nir_block_get_following_if(block);
91 if (following_if) {
92 add_instr_and_srcs_to_set(instr_set, following_if->condition.ssa->parent_instr);
93 }
94 nir_foreach_instr_safe(instr, block) {
95 if (instr->type == nir_instr_type_intrinsic) {
96 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
97 if (intr->intrinsic != nir_intrinsic_store_output &&
98 !is_memory_barrier_tcs_patch(intr))
99 continue;
100 } else if (instr->type != nir_instr_type_jump)
101 continue;
102 add_instr_and_srcs_to_set(instr_set, instr);
103 }
104 }
105
106 nir_foreach_block_reverse(block, impl) {
107 nir_foreach_instr_reverse_safe(instr, block) {
108 struct set_entry *entry = _mesa_set_search(instr_set, instr);
109 if (!entry)
110 nir_instr_remove(instr);
111 }
112 }
113
114 _mesa_set_destroy(instr_set, NULL);
115 }
116
117 static nir_cursor
get_cursor_for_instr_without_cf(nir_instr * instr)118 get_cursor_for_instr_without_cf(nir_instr *instr)
119 {
120 nir_block *block = instr->block;
121 if (block->cf_node.parent->type == nir_cf_node_function)
122 return nir_before_instr(instr);
123
124 do {
125 block = nir_cf_node_as_block(nir_cf_node_prev(block->cf_node.parent));
126 } while (block->cf_node.parent->type != nir_cf_node_function);
127 return nir_after_block_before_jump(block);
128 }
129
130 struct tcs_patch_loop_state {
131 nir_def *deref, *count;
132 nir_cursor begin_cursor, end_cursor, insert_cursor;
133 nir_loop *loop;
134 };
135
136 static void
start_tcs_loop(nir_builder * b,struct tcs_patch_loop_state * state,nir_deref_instr * loop_var_deref)137 start_tcs_loop(nir_builder *b, struct tcs_patch_loop_state *state, nir_deref_instr *loop_var_deref)
138 {
139 if (!loop_var_deref)
140 return;
141
142 nir_store_deref(b, loop_var_deref, nir_imm_int(b, 0), 1);
143 state->loop = nir_push_loop(b);
144 state->count = nir_load_deref(b, loop_var_deref);
145 nir_push_if(b, nir_ige_imm(b, state->count, b->impl->function->shader->info.tess.tcs_vertices_out));
146 nir_jump(b, nir_jump_break);
147 nir_pop_if(b, NULL);
148 state->insert_cursor = b->cursor;
149 nir_store_deref(b, loop_var_deref, nir_iadd_imm(b, state->count, 1), 1);
150 nir_pop_loop(b, state->loop);
151 }
152
153 static void
end_tcs_loop(nir_builder * b,struct tcs_patch_loop_state * state)154 end_tcs_loop(nir_builder *b, struct tcs_patch_loop_state *state)
155 {
156 if (!state->loop)
157 return;
158
159 nir_cf_list extracted;
160 nir_cf_extract(&extracted, state->begin_cursor, state->end_cursor);
161 nir_cf_reinsert(&extracted, state->insert_cursor);
162
163 *state = (struct tcs_patch_loop_state ){ 0 };
164 }
165
166 /* In HLSL/DXIL, the hull (tesselation control) shader is split into two:
167 * 1. The main hull shader, which runs once per output control point.
168 * 2. A patch constant function, which runs once overall.
169 * In GLSL/NIR, these are combined. Each invocation must write to the output
170 * array with a constant gl_InvocationID, which is (apparently) lowered to an
171 * if/else ladder in nir. Each invocation must write the same value to patch
172 * constants - or else undefined behavior strikes. NIR uses store_output to
173 * write the patch constants, and store_per_vertex_output to write the control
174 * point values.
175 *
176 * We clone the NIR function to produce 2: one with the store_output intrinsics
177 * removed, which becomes the main shader (only writes control points), and one
178 * with everything that doesn't contribute to store_output removed, which becomes
179 * the patch constant function.
180 *
181 * For the patch constant function, if the expressions rely on gl_InvocationID,
182 * then we need to run the resulting logic in a loop, using the loop counter to
183 * replace gl_InvocationID. This loop can be terminated when a barrier is hit. If
184 * gl_InvocationID is used again after the barrier, then another loop needs to begin.
185 */
186 void
dxil_nir_split_tess_ctrl(nir_shader * nir,nir_function ** patch_const_func)187 dxil_nir_split_tess_ctrl(nir_shader *nir, nir_function **patch_const_func)
188 {
189 assert(nir->info.stage == MESA_SHADER_TESS_CTRL);
190 assert(exec_list_length(&nir->functions) == 1);
191 nir_function_impl *entrypoint = nir_shader_get_entrypoint(nir);
192
193 *patch_const_func = nir_function_create(nir, "PatchConstantFunc");
194 nir_function_impl *patch_const_func_impl = nir_function_impl_clone(nir, entrypoint);
195 nir_function_set_impl(*patch_const_func, patch_const_func_impl);
196
197 remove_hs_intrinsics(entrypoint);
198 prune_patch_function_to_intrinsic_and_srcs(patch_const_func_impl);
199
200 /* Kill dead references to the invocation ID from the patch const func so we don't
201 * insert unnecessarily loops
202 */
203 bool progress;
204 do {
205 progress = false;
206 progress |= nir_opt_dead_cf(nir);
207 progress |= nir_opt_dce(nir);
208 } while (progress);
209
210 /* Now, the patch constant function needs to be split into blocks and loops.
211 * The series of instructions up to the first block containing a load_invocation_id
212 * will run sequentially. Then a loop is inserted so load_invocation_id will load the
213 * loop counter. This loop continues until a barrier is reached, when the loop
214 * is closed and the process begins again.
215 *
216 * First, sink load_invocation_id so that it's present on both sides of barriers.
217 * Each use gets a unique load of the invocation ID.
218 */
219 nir_builder b = nir_builder_create(patch_const_func_impl);
220 nir_foreach_block(block, patch_const_func_impl) {
221 nir_foreach_instr_safe(instr, block) {
222 if (instr->type != nir_instr_type_intrinsic)
223 continue;
224 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
225 if (intr->intrinsic != nir_intrinsic_load_invocation_id ||
226 list_is_empty(&intr->def.uses) ||
227 list_is_singular(&intr->def.uses))
228 continue;
229 nir_foreach_use_including_if_safe(src, &intr->def) {
230 b.cursor = nir_before_src(src);
231 nir_src_rewrite(src, nir_load_invocation_id(&b));
232 }
233 nir_instr_remove(instr);
234 }
235 }
236
237 /* Now replace those invocation ID loads with loads of a local variable that's used as a loop counter */
238 nir_variable *loop_var = NULL;
239 nir_deref_instr *loop_var_deref = NULL;
240 struct tcs_patch_loop_state state = { 0 };
241 nir_foreach_block_safe(block, patch_const_func_impl) {
242 nir_foreach_instr_safe(instr, block) {
243 if (instr->type != nir_instr_type_intrinsic)
244 continue;
245 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
246 switch (intr->intrinsic) {
247 case nir_intrinsic_load_invocation_id: {
248 if (!loop_var) {
249 loop_var = nir_local_variable_create(patch_const_func_impl, glsl_int_type(), "PatchConstInvocId");
250 b.cursor = nir_before_impl(patch_const_func_impl);
251 loop_var_deref = nir_build_deref_var(&b, loop_var);
252 }
253 if (!state.loop) {
254 b.cursor = state.begin_cursor = get_cursor_for_instr_without_cf(instr);
255 start_tcs_loop(&b, &state, loop_var_deref);
256 }
257 nir_def_rewrite_uses(&intr->def, state.count);
258 break;
259 }
260 case nir_intrinsic_barrier:
261 if (!is_memory_barrier_tcs_patch(intr))
262 break;
263
264 /* The GL tessellation spec says:
265 * The barrier() function may only be called inside the main entry point of the tessellation control shader
266 * and may not be called in potentially divergent flow control. In particular, barrier() may not be called
267 * inside a switch statement, in either sub-statement of an if statement, inside a do, for, or while loop,
268 * or at any point after a return statement in the function main().
269 *
270 * Therefore, we should be at function-level control flow.
271 */
272 assert(nir_cursors_equal(nir_before_instr(instr), get_cursor_for_instr_without_cf(instr)));
273 state.end_cursor = nir_before_instr(instr);
274 end_tcs_loop(&b, &state);
275 nir_instr_remove(instr);
276 break;
277 default:
278 break;
279 }
280 }
281 }
282 state.end_cursor = nir_after_block_before_jump(nir_impl_last_block(patch_const_func_impl));
283 end_tcs_loop(&b, &state);
284 }
285
286 struct remove_tess_level_accesses_data {
287 unsigned location;
288 unsigned size;
289 };
290
291 static bool
remove_tess_level_accesses(nir_builder * b,nir_instr * instr,void * _data)292 remove_tess_level_accesses(nir_builder *b, nir_instr *instr, void *_data)
293 {
294 struct remove_tess_level_accesses_data *data = _data;
295 if (instr->type != nir_instr_type_intrinsic)
296 return false;
297
298 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
299 if (intr->intrinsic != nir_intrinsic_store_output &&
300 intr->intrinsic != nir_intrinsic_load_input)
301 return false;
302
303 nir_io_semantics io = nir_intrinsic_io_semantics(intr);
304 if (io.location != data->location)
305 return false;
306
307 if (nir_intrinsic_component(intr) < data->size)
308 return false;
309
310 if (intr->intrinsic == nir_intrinsic_store_output) {
311 assert(nir_src_num_components(intr->src[0]) == 1);
312 nir_instr_remove(instr);
313 } else {
314 b->cursor = nir_after_instr(instr);
315 assert(intr->def.num_components == 1);
316 nir_def_rewrite_uses(&intr->def, nir_undef(b, 1, intr->def.bit_size));
317 }
318 return true;
319 }
320
321 /* Update the types of the tess level variables and remove writes to removed components.
322 * GL always has a 4-component outer tess level and 2-component inner, while D3D requires
323 * the number of components to vary based on the primitive mode.
324 * The 4 and 2 is for quads, while triangles are 3 and 1, and lines are 2 and 0.
325 */
326 bool
dxil_nir_fixup_tess_level_for_domain(nir_shader * nir)327 dxil_nir_fixup_tess_level_for_domain(nir_shader *nir)
328 {
329 bool progress = false;
330 if (nir->info.tess._primitive_mode != TESS_PRIMITIVE_QUADS) {
331 nir_foreach_variable_with_modes_safe(var, nir, nir_var_shader_out | nir_var_shader_in) {
332 unsigned new_array_size = 4;
333 unsigned old_array_size = glsl_array_size(var->type);
334 if (var->data.location == VARYING_SLOT_TESS_LEVEL_OUTER) {
335 new_array_size = nir->info.tess._primitive_mode == TESS_PRIMITIVE_TRIANGLES ? 3 : 2;
336 assert(var->data.compact && (old_array_size == 4 || old_array_size == new_array_size));
337 } else if (var->data.location == VARYING_SLOT_TESS_LEVEL_INNER) {
338 new_array_size = nir->info.tess._primitive_mode == TESS_PRIMITIVE_TRIANGLES ? 1 : 0;
339 assert(var->data.compact && (old_array_size == 2 || old_array_size == new_array_size));
340 } else
341 continue;
342
343 if (new_array_size == old_array_size)
344 continue;
345
346 progress = true;
347 if (new_array_size)
348 var->type = glsl_array_type(glsl_float_type(), new_array_size, 0);
349 else {
350 exec_node_remove(&var->node);
351 ralloc_free(var);
352 }
353
354 struct remove_tess_level_accesses_data pass_data = {
355 .location = var->data.location,
356 .size = new_array_size
357 };
358
359 nir_shader_instructions_pass(nir, remove_tess_level_accesses,
360 nir_metadata_block_index | nir_metadata_dominance, &pass_data);
361 }
362 }
363 return progress;
364 }
365
366 static bool
tcs_update_deref_input_types(nir_builder * b,nir_instr * instr,void * data)367 tcs_update_deref_input_types(nir_builder *b, nir_instr *instr, void *data)
368 {
369 if (instr->type != nir_instr_type_deref)
370 return false;
371
372 nir_deref_instr *deref = nir_instr_as_deref(instr);
373 if (deref->deref_type != nir_deref_type_var)
374 return false;
375
376 nir_variable *var = deref->var;
377 deref->type = var->type;
378 return true;
379 }
380
381 bool
dxil_nir_set_tcs_patches_in(nir_shader * nir,unsigned num_control_points)382 dxil_nir_set_tcs_patches_in(nir_shader *nir, unsigned num_control_points)
383 {
384 bool progress = false;
385 nir_foreach_variable_with_modes(var, nir, nir_var_shader_in) {
386 if (nir_is_arrayed_io(var, MESA_SHADER_TESS_CTRL)) {
387 var->type = glsl_array_type(glsl_get_array_element(var->type), num_control_points, 0);
388 progress = true;
389 }
390 }
391
392 if (progress)
393 nir_shader_instructions_pass(nir, tcs_update_deref_input_types, nir_metadata_all, NULL);
394
395 return progress;
396 }
397