1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_nir.h"
25 #include "nir/nir_builder.h"
26 #include "util/u_debug.h"
27
28 /**
29 * This file implements the lowering required for VK_KHR_multiview.
30 *
31 * When possible, Primitive Replication is used and the shader is modified to
32 * make gl_Position an array and fill it with values for each view.
33 *
34 * Otherwise we implement multiview using instanced rendering. The number of
35 * instances in each draw call is multiplied by the number of views in the
36 * subpass. Then, in the shader, we divide gl_InstanceId by the number of
37 * views and use gl_InstanceId % view_count to compute the actual ViewIndex.
38 */
39
40 struct lower_multiview_state {
41 nir_builder builder;
42
43 uint32_t view_mask;
44
45 nir_def *instance_id_with_views;
46 nir_def *instance_id;
47 nir_def *view_index;
48 };
49
50 static nir_def *
build_instance_id(struct lower_multiview_state * state)51 build_instance_id(struct lower_multiview_state *state)
52 {
53 assert(state->builder.shader->info.stage == MESA_SHADER_VERTEX);
54
55 if (state->instance_id == NULL) {
56 nir_builder *b = &state->builder;
57
58 b->cursor =
59 nir_after_instr(state->instance_id_with_views->parent_instr);
60
61 /* We use instancing for implementing multiview. The actual instance id
62 * is given by dividing instance_id by the number of views in this
63 * subpass.
64 */
65 state->instance_id =
66 nir_idiv(b, state->instance_id_with_views,
67 nir_imm_int(b, util_bitcount(state->view_mask)));
68 }
69
70 return state->instance_id;
71 }
72
73 static nir_def *
build_view_index(struct lower_multiview_state * state)74 build_view_index(struct lower_multiview_state *state)
75 {
76 assert(state->builder.shader->info.stage != MESA_SHADER_FRAGMENT);
77
78 if (state->view_index == NULL) {
79 nir_builder *b = &state->builder;
80
81 b->cursor =
82 nir_after_instr(state->instance_id_with_views->parent_instr);
83
84 assert(state->view_mask != 0);
85 if (util_bitcount(state->view_mask) == 1) {
86 /* Set the view index directly. */
87 state->view_index = nir_imm_int(b, ffs(state->view_mask) - 1);
88 } else if (state->builder.shader->info.stage == MESA_SHADER_VERTEX) {
89 /* We only support 16 viewports */
90 assert((state->view_mask & 0xffff0000) == 0);
91
92 /* We use instancing for implementing multiview. The compacted view
93 * id is given by instance_id % view_count. We then have to convert
94 * that to an actual view id.
95 */
96 nir_def *compacted =
97 nir_umod_imm(b, state->instance_id_with_views,
98 util_bitcount(state->view_mask));
99
100 if (util_is_power_of_two_or_zero(state->view_mask + 1)) {
101 /* If we have a full view mask, then compacted is what we want */
102 state->view_index = compacted;
103 } else {
104 /* Now we define a map from compacted view index to the actual
105 * view index that's based on the view_mask. The map is given by
106 * 16 nibbles, each of which is a value from 0 to 15.
107 */
108 uint64_t remap = 0;
109 uint32_t i = 0;
110 u_foreach_bit(bit, state->view_mask) {
111 assert(bit < 16);
112 remap |= (uint64_t)bit << (i++ * 4);
113 }
114
115 nir_def *shift = nir_imul_imm(b, compacted, 4);
116
117 /* One of these days, when we have int64 everywhere, this will be
118 * easier.
119 */
120 nir_def *shifted;
121 if (remap <= UINT32_MAX) {
122 shifted = nir_ushr(b, nir_imm_int(b, remap), shift);
123 } else {
124 nir_def *shifted_low =
125 nir_ushr(b, nir_imm_int(b, remap), shift);
126 nir_def *shifted_high =
127 nir_ushr(b, nir_imm_int(b, remap >> 32),
128 nir_iadd_imm(b, shift, -32));
129 shifted = nir_bcsel(b, nir_ilt_imm(b, shift, 32),
130 shifted_low, shifted_high);
131 }
132 state->view_index = nir_iand_imm(b, shifted, 0xf);
133 }
134 } else {
135 const struct glsl_type *type = glsl_int_type();
136 if (b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
137 b->shader->info.stage == MESA_SHADER_GEOMETRY)
138 type = glsl_array_type(type, 1, 0);
139
140 nir_variable *idx_var =
141 nir_variable_create(b->shader, nir_var_shader_in,
142 type, "view index");
143 idx_var->data.location = VARYING_SLOT_VIEW_INDEX;
144 if (b->shader->info.stage == MESA_SHADER_FRAGMENT)
145 idx_var->data.interpolation = INTERP_MODE_FLAT;
146
147 nir_deref_instr *deref = nir_build_deref_var(b, idx_var);
148 if (glsl_type_is_array(type))
149 deref = nir_build_deref_array_imm(b, deref, 0);
150
151 state->view_index = nir_load_deref(b, deref);
152 }
153 }
154
155 return state->view_index;
156 }
157
158 static bool
is_load_view_index(const nir_instr * instr,const void * data)159 is_load_view_index(const nir_instr *instr, const void *data)
160 {
161 return instr->type == nir_instr_type_intrinsic &&
162 nir_instr_as_intrinsic(instr)->intrinsic == nir_intrinsic_load_view_index;
163 }
164
165 static nir_def *
replace_load_view_index_with_zero(struct nir_builder * b,nir_instr * instr,void * data)166 replace_load_view_index_with_zero(struct nir_builder *b,
167 nir_instr *instr, void *data)
168 {
169 assert(is_load_view_index(instr, data));
170 return nir_imm_zero(b, 1, 32);
171 }
172
173 static nir_def *
replace_load_view_index_with_layer_id(struct nir_builder * b,nir_instr * instr,void * data)174 replace_load_view_index_with_layer_id(struct nir_builder *b,
175 nir_instr *instr, void *data)
176 {
177 assert(is_load_view_index(instr, data));
178 return nir_load_layer_id(b);
179 }
180
181 bool
anv_nir_lower_multiview(nir_shader * shader,uint32_t view_mask,bool use_primitive_replication)182 anv_nir_lower_multiview(nir_shader *shader, uint32_t view_mask,
183 bool use_primitive_replication)
184 {
185 assert(shader->info.stage != MESA_SHADER_COMPUTE);
186
187 /* If multiview isn't enabled, just lower the ViewIndex builtin to zero. */
188 if (view_mask == 0) {
189 return nir_shader_lower_instructions(shader, is_load_view_index,
190 replace_load_view_index_with_zero, NULL);
191 }
192
193 if (shader->info.stage == MESA_SHADER_FRAGMENT) {
194 return nir_shader_lower_instructions(shader, is_load_view_index,
195 replace_load_view_index_with_layer_id, NULL);
196 }
197
198 /* This pass assumes a single entrypoint */
199 nir_function_impl *entrypoint = nir_shader_get_entrypoint(shader);
200
201 /* Primitive Replication allows a shader to write different positions for
202 * each view in the same execution. If only the position depends on the
203 * view, then it is possible to use the feature instead of instancing to
204 * implement multiview.
205 */
206 if (use_primitive_replication) {
207 nir_lower_multiview_options options = {
208 .view_mask = view_mask,
209 .allowed_per_view_outputs = VARYING_BIT_POS
210 };
211 bool progress = nir_lower_multiview(shader, options);
212
213 if (progress) {
214 nir_builder b = nir_builder_at(nir_before_impl(entrypoint));
215
216 /* Fill Layer ID with zero. Replication will use that as base to
217 * apply the RTAI offsets.
218 */
219 nir_variable *layer_id_out =
220 nir_variable_create(shader, nir_var_shader_out,
221 glsl_int_type(), "layer ID");
222 layer_id_out->data.location = VARYING_SLOT_LAYER;
223 nir_store_var(&b, layer_id_out, nir_imm_zero(&b, 1, 32), 0x1);
224 }
225
226 return progress;
227 }
228
229 struct lower_multiview_state state = {
230 .view_mask = view_mask,
231 };
232
233 state.builder = nir_builder_at(nir_before_impl(entrypoint));
234 nir_builder *b = &state.builder;
235
236 /* Save the original "instance ID" which is the actual instance ID
237 * multiplied by the number of views.
238 */
239 state.instance_id_with_views = nir_load_instance_id(b);
240
241 /* The view index is available in all stages but the instance id is only
242 * available in the VS. If it's not a fragment shader, we need to pass
243 * the view index on to the next stage.
244 */
245 nir_def *view_index = build_view_index(&state);
246
247 assert(view_index->parent_instr->block == nir_start_block(entrypoint));
248 b->cursor = nir_after_instr(view_index->parent_instr);
249
250 /* Unless there is only one possible view index (that would be set
251 * directly), pass it to the next stage.
252 */
253 nir_variable *view_index_out = NULL;
254 if (util_bitcount(state.view_mask) != 1) {
255 view_index_out = nir_variable_create(shader, nir_var_shader_out,
256 glsl_int_type(), "view index");
257 view_index_out->data.location = VARYING_SLOT_VIEW_INDEX;
258 }
259
260 nir_variable *layer_id_out =
261 nir_variable_create(shader, nir_var_shader_out,
262 glsl_int_type(), "layer ID");
263 layer_id_out->data.location = VARYING_SLOT_LAYER;
264
265 if (shader->info.stage != MESA_SHADER_GEOMETRY) {
266 if (view_index_out)
267 nir_store_var(b, view_index_out, view_index, 0x1);
268
269 nir_store_var(b, layer_id_out, view_index, 0x1);
270 }
271
272 nir_foreach_block(block, entrypoint) {
273 nir_foreach_instr_safe(instr, block) {
274 if (instr->type != nir_instr_type_intrinsic)
275 continue;
276
277 nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
278
279 switch (load->intrinsic) {
280 case nir_intrinsic_load_instance_id:
281 if (&load->def != state.instance_id_with_views) {
282 nir_def_replace(&load->def, build_instance_id(&state));
283 }
284 break;
285 case nir_intrinsic_load_view_index:
286 nir_def_replace(&load->def, view_index);
287 break;
288 case nir_intrinsic_emit_vertex_with_counter:
289 /* In geometry shaders, outputs become undefined after every
290 * EmitVertex() call. We need to re-emit them for each vertex.
291 */
292 b->cursor = nir_before_instr(instr);
293 if (view_index_out)
294 nir_store_var(b, view_index_out, view_index, 0x1);
295
296 nir_store_var(b, layer_id_out, view_index, 0x1);
297 break;
298 default:
299 break;
300 }
301 }
302 }
303
304 nir_metadata_preserve(entrypoint, nir_metadata_control_flow);
305
306 return true;
307 }
308
309 bool
anv_check_for_primitive_replication(struct anv_device * device,VkShaderStageFlags stages,nir_shader ** shaders,uint32_t view_mask)310 anv_check_for_primitive_replication(struct anv_device *device,
311 VkShaderStageFlags stages,
312 nir_shader **shaders,
313 uint32_t view_mask)
314 {
315 assert(device->info->ver >= 12);
316
317 static int primitive_replication_max_views = -1;
318 if (primitive_replication_max_views < 0) {
319 /* TODO: Figure out why we are not getting same benefits for larger than
320 * 2 views. For now use Primitive Replication just for the 2-view case
321 * by default.
322 */
323 const unsigned default_max_views = 2;
324
325 primitive_replication_max_views =
326 MIN2(MAX_VIEWS_FOR_PRIMITIVE_REPLICATION,
327 debug_get_num_option("ANV_PRIMITIVE_REPLICATION_MAX_VIEWS",
328 default_max_views));
329 }
330
331 /* TODO: We should be able to support replication at 'geometry' stages
332 * later than Vertex. In that case only the last stage can refer to
333 * gl_ViewIndex.
334 */
335 if (stages & ~(VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))
336 return false;
337
338 /* It's possible we have no vertex shader yet (with pipeline libraries) */
339 if (!(stages & VK_SHADER_STAGE_VERTEX_BIT))
340 return false;
341
342 int view_count = util_bitcount(view_mask);
343 if (view_count == 1 || view_count > primitive_replication_max_views)
344 return false;
345
346 nir_lower_multiview_options options = {
347 .view_mask = view_mask,
348 .allowed_per_view_outputs = VARYING_BIT_POS
349 };
350 return nir_can_lower_multiview(shaders[MESA_SHADER_VERTEX], options);
351 }
352