1 /*
2 * Copyright © 2016 Intel Corporation
3 * Copyright © 2020 Valve Corporation
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "nir_builder.h"
26 #include "nir_control_flow.h"
27
28 /**
29 * This file implements an optimization for multiview. Some GPU's have a
30 * special mode which allows the vertex shader (or last stage in the geometry
31 * pipeline) to create multiple primitives in different layers of the
32 * framebuffer at once by writing multiple copies of gl_Position. The
33 * assumption is that in most uses of multiview, the only use of gl_ViewIndex
34 * is to change the position to implement the parallax effect, and other
35 * varyings will be the same between the different views. We put the body of
36 * the original vertex shader in a loop, writing to a different copy of
37 * gl_Position each loop iteration, and then let other optimizations clean up
38 * the mess. On some hardware it is also possible to write different copies of
39 * other varyings, expanding the set of shaders that the optimization is
40 * usable for.
41 *
42 * Indexes in the per-view output arrays generated by this pass do not
43 * correspond directly to gl_ViewIndex. Instead, they are compacted, dropping
44 * disabled views. For example, with view mask 0b1010, gl_Position[0]
45 * is view index 1, and gl_Position[1] is view index 3.
46 */
47
48 static bool
shader_writes_to_memory(nir_shader * shader)49 shader_writes_to_memory(nir_shader *shader)
50 {
51 /* With multiview, we would need to ensure that memory writes happen either
52 * once or once per view. Since combination of multiview and memory writes
53 * is not expected, we'll just skip this optimization in this case.
54 */
55
56 nir_function_impl *entrypoint = nir_shader_get_entrypoint(shader);
57
58 nir_foreach_block(block, entrypoint) {
59 nir_foreach_instr(instr, block) {
60 if (instr->type != nir_instr_type_intrinsic)
61 continue;
62 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
63
64 switch (intrin->intrinsic) {
65 case nir_intrinsic_deref_atomic:
66 case nir_intrinsic_deref_atomic_swap:
67 case nir_intrinsic_store_ssbo:
68 case nir_intrinsic_ssbo_atomic:
69 case nir_intrinsic_ssbo_atomic_swap:
70 case nir_intrinsic_store_shared:
71 case nir_intrinsic_store_shared2_amd:
72 case nir_intrinsic_shared_atomic:
73 case nir_intrinsic_shared_atomic_swap:
74 case nir_intrinsic_shared_append_amd:
75 case nir_intrinsic_shared_consume_amd:
76 case nir_intrinsic_task_payload_atomic:
77 case nir_intrinsic_task_payload_atomic_swap:
78 case nir_intrinsic_image_deref_store:
79 case nir_intrinsic_image_deref_atomic:
80 case nir_intrinsic_image_deref_atomic_swap:
81 return true;
82
83 default:
84 /* Keep walking. */
85 break;
86 }
87 }
88 }
89
90 return false;
91 }
92
93 bool
nir_shader_uses_view_index(nir_shader * shader)94 nir_shader_uses_view_index(nir_shader *shader)
95 {
96 nir_function_impl *entrypoint = nir_shader_get_entrypoint(shader);
97
98 nir_foreach_block(block, entrypoint) {
99 nir_foreach_instr(instr, block) {
100 if (instr->type != nir_instr_type_intrinsic)
101 continue;
102
103 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
104 if (intrin->intrinsic == nir_intrinsic_load_view_index)
105 return true;
106 }
107 }
108
109 return false;
110 }
111
112 static bool
shader_only_allowed_outputs_use_view_index(nir_shader * shader,uint64_t allowed_outputs)113 shader_only_allowed_outputs_use_view_index(nir_shader *shader,
114 uint64_t allowed_outputs)
115 {
116 nir_shader *shader_no_position = nir_shader_clone(NULL, shader);
117 nir_function_impl *entrypoint = nir_shader_get_entrypoint(shader_no_position);
118
119 /* Remove stores to allowed outputs from a cloned shader. */
120 nir_foreach_block(block, entrypoint) {
121 nir_foreach_instr_safe(instr, block) {
122 if (instr->type != nir_instr_type_intrinsic)
123 continue;
124
125 nir_intrinsic_instr *store = nir_instr_as_intrinsic(instr);
126 if (store->intrinsic != nir_intrinsic_store_deref)
127 continue;
128
129 nir_variable *var = nir_intrinsic_get_var(store, 0);
130 if (!(allowed_outputs & BITFIELD64_BIT(var->data.location)))
131 continue;
132
133 nir_instr_remove(&store->instr);
134 }
135 }
136
137 /* Clean up shader so unused load_view_index intrinsics are removed. */
138 bool progress;
139 do {
140 progress = false;
141 progress |= nir_opt_dead_cf(shader_no_position);
142
143 /* Peephole select will drop if-blocks that have then and else empty,
144 * which will remove the usage of an SSA in the condition.
145 */
146 progress |= nir_opt_peephole_select(shader_no_position, 0, false, false);
147
148 progress |= nir_opt_dce(shader_no_position);
149 } while (progress);
150
151 bool uses_view_index = nir_shader_uses_view_index(shader_no_position);
152
153 ralloc_free(shader_no_position);
154 return !uses_view_index;
155 }
156
157 /* Return true if it's safe to call nir_lower_multiview() on this vertex
158 * shader. Note that this only handles driver-agnostic checks, i.e. things
159 * which would make nir_lower_multiview() incorrect. Any driver-specific
160 * checks, e.g. for sufficient varying space or performance considerations,
161 * should be handled in the driver.
162 *
163 * Note that we don't handle the more complex checks needed for lowering
164 * pipelines with geometry or tessellation shaders.
165 */
166
167 bool
nir_can_lower_multiview(nir_shader * shader,nir_lower_multiview_options options)168 nir_can_lower_multiview(nir_shader *shader, nir_lower_multiview_options options)
169 {
170 return !shader_writes_to_memory(shader) &&
171 shader_only_allowed_outputs_use_view_index(
172 shader, options.allowed_per_view_outputs);
173 }
174
175 /**
176 * The lowering. Call with the last active geometry stage.
177 */
178
179 bool
nir_lower_multiview(nir_shader * shader,nir_lower_multiview_options options)180 nir_lower_multiview(nir_shader *shader, nir_lower_multiview_options options)
181 {
182 assert(shader->info.stage != MESA_SHADER_FRAGMENT);
183 int view_count = util_bitcount(options.view_mask);
184
185 shader->info.view_mask = options.view_mask;
186
187 nir_function_impl *entrypoint = nir_shader_get_entrypoint(shader);
188
189 /* Update per-view outputs to refer to arrays. */
190 nir_foreach_shader_out_variable(var, shader) {
191 if (options.allowed_per_view_outputs & BITFIELD64_BIT(var->data.location)) {
192 var->type = glsl_array_type(var->type, view_count, 0);
193 var->data.per_view = true;
194 shader->info.per_view_outputs |= BITFIELD64_BIT(var->data.location);
195 }
196 }
197
198 nir_cf_list body;
199 nir_cf_list_extract(&body, &entrypoint->body);
200
201 nir_builder b = nir_builder_at(nir_after_impl(entrypoint));
202
203 /* Loop Index will go from 0 to view_count. */
204 nir_variable *loop_index_var =
205 nir_local_variable_create(entrypoint, glsl_uint_type(), "loop_index");
206 nir_deref_instr *loop_index_deref = nir_build_deref_var(&b, loop_index_var);
207 nir_store_deref(&b, loop_index_deref, nir_imm_int(&b, 0), 1);
208
209 /* Array of view index values that are active in the loop. Note that the
210 * loop index only matches the view index if there are no gaps in the
211 * view_mask.
212 */
213 nir_variable *view_index_var = nir_local_variable_create(
214 entrypoint, glsl_array_type(glsl_uint_type(), view_count, 0), "view_index");
215 nir_deref_instr *view_index_deref = nir_build_deref_var(&b, view_index_var);
216 {
217 int array_position = 0;
218 uint32_t view_mask_temp = options.view_mask;
219 while (view_mask_temp) {
220 uint32_t view_index = u_bit_scan(&view_mask_temp);
221 nir_store_deref(&b, nir_build_deref_array_imm(&b, view_index_deref, array_position),
222 nir_imm_int(&b, view_index), 1);
223 array_position++;
224 }
225 }
226
227 /* Create the equivalent of
228 *
229 * while (true):
230 * if (loop_index >= view_count):
231 * break
232 *
233 * view_index = active_indices[loop_index]
234 *
235 * out1_deref = &out1[loop_index]
236 * out2_deref = &out2[loop_index]
237 * ...
238 *
239 * # Placeholder for the body to be reinserted.
240 *
241 * loop_index += 1
242 *
243 * Later both `view_index` and `outN_deref` will be used to rewrite the
244 * original shader body.
245 */
246
247 nir_loop *loop = nir_push_loop(&b);
248
249 nir_def *loop_index = nir_load_deref(&b, loop_index_deref);
250 nir_def *cmp = nir_ige_imm(&b, loop_index, view_count);
251 nir_if *loop_check = nir_push_if(&b, cmp);
252 nir_jump(&b, nir_jump_break);
253 nir_pop_if(&b, loop_check);
254
255 nir_def *view_index =
256 nir_load_deref(&b, nir_build_deref_array(&b, view_index_deref, loop_index));
257
258 struct hash_table *out_derefs = _mesa_pointer_hash_table_create(NULL);
259 nir_cursor body_cursor = b.cursor;
260 nir_foreach_shader_out_variable(var, shader) {
261 if (var->data.per_view) {
262 nir_deref_instr *deref =
263 nir_build_deref_array(&b, nir_build_deref_var(&b, var), loop_index);
264 _mesa_hash_table_insert(out_derefs, var, (void *)deref);
265 body_cursor = nir_after_instr(&deref->instr);
266 }
267 }
268
269 nir_store_deref(&b, loop_index_deref, nir_iadd_imm(&b, loop_index, 1), 1);
270 nir_pop_loop(&b, loop);
271
272 /* Reinsert the body. */
273 b.cursor = body_cursor;
274 nir_cf_reinsert(&body, b.cursor);
275
276 nir_foreach_block(block, entrypoint) {
277 nir_foreach_instr_safe(instr, block) {
278 if (instr->type != nir_instr_type_intrinsic)
279 continue;
280
281 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
282
283 switch (intrin->intrinsic) {
284 case nir_intrinsic_load_view_index: {
285 nir_def_rewrite_uses(&intrin->def, view_index);
286 break;
287 }
288
289 case nir_intrinsic_store_deref: {
290 nir_variable *var = nir_intrinsic_get_var(intrin, 0);
291 struct hash_entry *entry = _mesa_hash_table_search(out_derefs, var);
292 if (entry) {
293 nir_deref_instr *new_deref = entry->data;
294 nir_deref_instr *old_deref = nir_src_as_deref(intrin->src[0]);
295
296 nir_src_rewrite(&intrin->src[0], &new_deref->def);
297
298 /* Remove old deref since it has the wrong type. */
299 nir_deref_instr_remove_if_unused(old_deref);
300 }
301 break;
302 }
303
304 case nir_intrinsic_load_deref: {
305 nir_variable *var = nir_intrinsic_get_var(intrin, 0);
306 if (_mesa_hash_table_search(out_derefs, var)) {
307 unreachable("Should have lowered I/O to temporaries "
308 "so no load_deref on output is expected.");
309 }
310 break;
311 }
312
313 case nir_intrinsic_copy_deref:
314 unreachable("Should have lowered copy_derefs at this point");
315 break;
316
317 default:
318 /* Do nothing. */
319 break;
320 }
321 }
322 }
323
324 _mesa_hash_table_destroy(out_derefs, NULL);
325
326 nir_metadata_preserve(entrypoint, nir_metadata_none);
327 return true;
328 }
329