1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_nir.h"
25 #include "nir/nir_builder.h"
26 #include "util/u_debug.h"
27
28 /**
29 * This file implements the lowering required for VK_KHR_multiview.
30 *
31 * When possible, Primitive Replication is used and the shader is modified to
32 * make gl_Position an array and fill it with values for each view.
33 *
34 * Otherwise we implement multiview using instanced rendering. The number of
35 * instances in each draw call is multiplied by the number of views in the
36 * subpass. Then, in the shader, we divide gl_InstanceId by the number of
37 * views and use gl_InstanceId % view_count to compute the actual ViewIndex.
38 */
39
40 struct lower_multiview_state {
41 nir_builder builder;
42
43 uint32_t view_mask;
44
45 nir_def *instance_id;
46 nir_def *view_index;
47 };
48
49 static nir_def *
build_instance_id(struct lower_multiview_state * state)50 build_instance_id(struct lower_multiview_state *state)
51 {
52 assert(state->builder.shader->info.stage == MESA_SHADER_VERTEX);
53
54 if (state->instance_id == NULL) {
55 nir_builder *b = &state->builder;
56
57 b->cursor = nir_before_impl(b->impl);
58
59 /* We use instancing for implementing multiview. The actual instance id
60 * is given by dividing instance_id by the number of views in this
61 * subpass.
62 */
63 state->instance_id =
64 nir_idiv(b, nir_load_instance_id(b),
65 nir_imm_int(b, util_bitcount(state->view_mask)));
66 }
67
68 return state->instance_id;
69 }
70
71 static nir_def *
build_view_index(struct lower_multiview_state * state)72 build_view_index(struct lower_multiview_state *state)
73 {
74 assert(state->builder.shader->info.stage != MESA_SHADER_FRAGMENT);
75
76 if (state->view_index == NULL) {
77 nir_builder *b = &state->builder;
78
79 b->cursor = nir_before_impl(b->impl);
80
81 assert(state->view_mask != 0);
82 if (util_bitcount(state->view_mask) == 1) {
83 /* Set the view index directly. */
84 state->view_index = nir_imm_int(b, ffs(state->view_mask) - 1);
85 } else if (state->builder.shader->info.stage == MESA_SHADER_VERTEX) {
86 /* We only support 16 viewports */
87 assert((state->view_mask & 0xffff0000) == 0);
88
89 /* We use instancing for implementing multiview. The compacted view
90 * id is given by instance_id % view_count. We then have to convert
91 * that to an actual view id.
92 */
93 nir_def *compacted =
94 nir_umod_imm(b, nir_load_instance_id(b),
95 util_bitcount(state->view_mask));
96
97 if (util_is_power_of_two_or_zero(state->view_mask + 1)) {
98 /* If we have a full view mask, then compacted is what we want */
99 state->view_index = compacted;
100 } else {
101 /* Now we define a map from compacted view index to the actual
102 * view index that's based on the view_mask. The map is given by
103 * 16 nibbles, each of which is a value from 0 to 15.
104 */
105 uint64_t remap = 0;
106 uint32_t i = 0;
107 u_foreach_bit(bit, state->view_mask) {
108 assert(bit < 16);
109 remap |= (uint64_t)bit << (i++ * 4);
110 }
111
112 nir_def *shift = nir_imul_imm(b, compacted, 4);
113
114 /* One of these days, when we have int64 everywhere, this will be
115 * easier.
116 */
117 nir_def *shifted;
118 if (remap <= UINT32_MAX) {
119 shifted = nir_ushr(b, nir_imm_int(b, remap), shift);
120 } else {
121 nir_def *shifted_low =
122 nir_ushr(b, nir_imm_int(b, remap), shift);
123 nir_def *shifted_high =
124 nir_ushr(b, nir_imm_int(b, remap >> 32),
125 nir_iadd_imm(b, shift, -32));
126 shifted = nir_bcsel(b, nir_ilt_imm(b, shift, 32),
127 shifted_low, shifted_high);
128 }
129 state->view_index = nir_iand_imm(b, shifted, 0xf);
130 }
131 } else {
132 const struct glsl_type *type = glsl_int_type();
133 if (b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
134 b->shader->info.stage == MESA_SHADER_GEOMETRY)
135 type = glsl_array_type(type, 1, 0);
136
137 nir_variable *idx_var =
138 nir_variable_create(b->shader, nir_var_shader_in,
139 type, "view index");
140 idx_var->data.location = VARYING_SLOT_VIEW_INDEX;
141 if (b->shader->info.stage == MESA_SHADER_FRAGMENT)
142 idx_var->data.interpolation = INTERP_MODE_FLAT;
143
144 nir_deref_instr *deref = nir_build_deref_var(b, idx_var);
145 if (glsl_type_is_array(type))
146 deref = nir_build_deref_array_imm(b, deref, 0);
147
148 state->view_index = nir_load_deref(b, deref);
149 }
150 }
151
152 return state->view_index;
153 }
154
155 static bool
is_load_view_index(const nir_instr * instr,const void * data)156 is_load_view_index(const nir_instr *instr, const void *data)
157 {
158 return instr->type == nir_instr_type_intrinsic &&
159 nir_instr_as_intrinsic(instr)->intrinsic == nir_intrinsic_load_view_index;
160 }
161
162 static nir_def *
replace_load_view_index_with_zero(struct nir_builder * b,nir_instr * instr,void * data)163 replace_load_view_index_with_zero(struct nir_builder *b,
164 nir_instr *instr, void *data)
165 {
166 assert(is_load_view_index(instr, data));
167 return nir_imm_zero(b, 1, 32);
168 }
169
170 static nir_def *
replace_load_view_index_with_layer_id(struct nir_builder * b,nir_instr * instr,void * data)171 replace_load_view_index_with_layer_id(struct nir_builder *b,
172 nir_instr *instr, void *data)
173 {
174 assert(is_load_view_index(instr, data));
175 return nir_load_layer_id(b);
176 }
177
178 bool
anv_nir_lower_multiview(nir_shader * shader,uint32_t view_mask,bool use_primitive_replication)179 anv_nir_lower_multiview(nir_shader *shader, uint32_t view_mask,
180 bool use_primitive_replication)
181 {
182 assert(shader->info.stage != MESA_SHADER_COMPUTE);
183
184 /* If multiview isn't enabled, just lower the ViewIndex builtin to zero. */
185 if (view_mask == 0) {
186 return nir_shader_lower_instructions(shader, is_load_view_index,
187 replace_load_view_index_with_zero, NULL);
188 }
189
190 if (shader->info.stage == MESA_SHADER_FRAGMENT) {
191 return nir_shader_lower_instructions(shader, is_load_view_index,
192 replace_load_view_index_with_layer_id, NULL);
193 }
194
195 /* This pass assumes a single entrypoint */
196 nir_function_impl *entrypoint = nir_shader_get_entrypoint(shader);
197
198 /* Primitive Replication allows a shader to write different positions for
199 * each view in the same execution. If only the position depends on the
200 * view, then it is possible to use the feature instead of instancing to
201 * implement multiview.
202 */
203 if (use_primitive_replication) {
204 bool progress = nir_lower_multiview(shader, view_mask);
205
206 if (progress) {
207 nir_builder b = nir_builder_at(nir_before_impl(entrypoint));
208
209 /* Fill Layer ID with zero. Replication will use that as base to
210 * apply the RTAI offsets.
211 */
212 nir_variable *layer_id_out =
213 nir_variable_create(shader, nir_var_shader_out,
214 glsl_int_type(), "layer ID");
215 layer_id_out->data.location = VARYING_SLOT_LAYER;
216 nir_store_var(&b, layer_id_out, nir_imm_zero(&b, 1, 32), 0x1);
217 }
218
219 return progress;
220 }
221
222 struct lower_multiview_state state = {
223 .view_mask = view_mask,
224 };
225
226 state.builder = nir_builder_create(entrypoint);
227
228 nir_foreach_block(block, entrypoint) {
229 nir_foreach_instr_safe(instr, block) {
230 if (instr->type != nir_instr_type_intrinsic)
231 continue;
232
233 nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
234
235 if (load->intrinsic != nir_intrinsic_load_instance_id &&
236 load->intrinsic != nir_intrinsic_load_view_index)
237 continue;
238
239 nir_def *value;
240 if (load->intrinsic == nir_intrinsic_load_instance_id) {
241 value = build_instance_id(&state);
242 } else {
243 assert(load->intrinsic == nir_intrinsic_load_view_index);
244 value = build_view_index(&state);
245 }
246
247 nir_def_rewrite_uses(&load->def, value);
248
249 nir_instr_remove(&load->instr);
250 }
251 }
252
253 /* The view index is available in all stages but the instance id is only
254 * available in the VS. If it's not a fragment shader, we need to pass
255 * the view index on to the next stage.
256 */
257 nir_def *view_index = build_view_index(&state);
258
259 nir_builder *b = &state.builder;
260
261 assert(view_index->parent_instr->block == nir_start_block(entrypoint));
262 b->cursor = nir_after_instr(view_index->parent_instr);
263
264 /* Unless there is only one possible view index (that would be set
265 * directly), pass it to the next stage. */
266 if (util_bitcount(state.view_mask) != 1) {
267 nir_variable *view_index_out =
268 nir_variable_create(shader, nir_var_shader_out,
269 glsl_int_type(), "view index");
270 view_index_out->data.location = VARYING_SLOT_VIEW_INDEX;
271 nir_store_var(b, view_index_out, view_index, 0x1);
272 }
273
274 nir_variable *layer_id_out =
275 nir_variable_create(shader, nir_var_shader_out,
276 glsl_int_type(), "layer ID");
277 layer_id_out->data.location = VARYING_SLOT_LAYER;
278 nir_store_var(b, layer_id_out, view_index, 0x1);
279
280 nir_metadata_preserve(entrypoint, nir_metadata_block_index |
281 nir_metadata_dominance);
282
283 return true;
284 }
285
286 bool
anv_check_for_primitive_replication(struct anv_device * device,VkShaderStageFlags stages,nir_shader ** shaders,uint32_t view_mask)287 anv_check_for_primitive_replication(struct anv_device *device,
288 VkShaderStageFlags stages,
289 nir_shader **shaders,
290 uint32_t view_mask)
291 {
292 assert(device->info->ver >= 12);
293
294 static int primitive_replication_max_views = -1;
295 if (primitive_replication_max_views < 0) {
296 /* TODO: Figure out why we are not getting same benefits for larger than
297 * 2 views. For now use Primitive Replication just for the 2-view case
298 * by default.
299 */
300 const unsigned default_max_views = 2;
301
302 primitive_replication_max_views =
303 MIN2(MAX_VIEWS_FOR_PRIMITIVE_REPLICATION,
304 debug_get_num_option("ANV_PRIMITIVE_REPLICATION_MAX_VIEWS",
305 default_max_views));
306 }
307
308 /* TODO: We should be able to support replication at 'geometry' stages
309 * later than Vertex. In that case only the last stage can refer to
310 * gl_ViewIndex.
311 */
312 if (stages & ~(VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))
313 return false;
314
315 /* It's possible we have no vertex shader yet (with pipeline libraries) */
316 if (!(stages & VK_SHADER_STAGE_VERTEX_BIT))
317 return false;
318
319 int view_count = util_bitcount(view_mask);
320 if (view_count == 1 || view_count > primitive_replication_max_views)
321 return false;
322
323 return nir_can_lower_multiview(shaders[MESA_SHADER_VERTEX]);
324 }
325