1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_nir.h"
25 #include "nir/nir_builder.h"
26 #include "util/u_debug.h"
27
28 /**
29 * This file implements the lowering required for VK_KHR_multiview.
30 *
31 * When possible, Primitive Replication is used and the shader is modified to
32 * make gl_Position an array and fill it with values for each view.
33 *
34 * Otherwise we implement multiview using instanced rendering. The number of
35 * instances in each draw call is multiplied by the number of views in the
36 * subpass. Then, in the shader, we divide gl_InstanceId by the number of
37 * views and use gl_InstanceId % view_count to compute the actual ViewIndex.
38 */
39
40 struct lower_multiview_state {
41 nir_builder builder;
42
43 uint32_t view_mask;
44
45 nir_def *instance_id_with_views;
46 nir_def *instance_id;
47 nir_def *view_index;
48 };
49
50 static nir_def *
build_instance_id(struct lower_multiview_state * state)51 build_instance_id(struct lower_multiview_state *state)
52 {
53 assert(state->builder.shader->info.stage == MESA_SHADER_VERTEX);
54
55 if (state->instance_id == NULL) {
56 nir_builder *b = &state->builder;
57
58 b->cursor =
59 nir_after_instr(state->instance_id_with_views->parent_instr);
60
61 /* We use instancing for implementing multiview. The actual instance id
62 * is given by dividing instance_id by the number of views in this
63 * subpass.
64 */
65 state->instance_id =
66 nir_idiv(b, state->instance_id_with_views,
67 nir_imm_int(b, util_bitcount(state->view_mask)));
68 }
69
70 return state->instance_id;
71 }
72
73 static nir_def *
build_view_index(struct lower_multiview_state * state)74 build_view_index(struct lower_multiview_state *state)
75 {
76 assert(state->builder.shader->info.stage != MESA_SHADER_FRAGMENT);
77
78 if (state->view_index == NULL) {
79 nir_builder *b = &state->builder;
80
81 b->cursor =
82 nir_after_instr(state->instance_id_with_views->parent_instr);
83
84 assert(state->view_mask != 0);
85 if (util_bitcount(state->view_mask) == 1) {
86 /* Set the view index directly. */
87 state->view_index = nir_imm_int(b, ffs(state->view_mask) - 1);
88 } else if (state->builder.shader->info.stage == MESA_SHADER_VERTEX) {
89 /* We only support 16 viewports */
90 assert((state->view_mask & 0xffff0000) == 0);
91
92 /* We use instancing for implementing multiview. The compacted view
93 * id is given by instance_id % view_count. We then have to convert
94 * that to an actual view id.
95 */
96 nir_def *compacted =
97 nir_umod_imm(b, state->instance_id_with_views,
98 util_bitcount(state->view_mask));
99
100 if (util_is_power_of_two_or_zero(state->view_mask + 1)) {
101 /* If we have a full view mask, then compacted is what we want */
102 state->view_index = compacted;
103 } else {
104 /* Now we define a map from compacted view index to the actual
105 * view index that's based on the view_mask. The map is given by
106 * 16 nibbles, each of which is a value from 0 to 15.
107 */
108 uint64_t remap = 0;
109 uint32_t i = 0;
110 u_foreach_bit(bit, state->view_mask) {
111 assert(bit < 16);
112 remap |= (uint64_t)bit << (i++ * 4);
113 }
114
115 nir_def *shift = nir_imul_imm(b, compacted, 4);
116
117 /* One of these days, when we have int64 everywhere, this will be
118 * easier.
119 */
120 nir_def *shifted;
121 if (remap <= UINT32_MAX) {
122 shifted = nir_ushr(b, nir_imm_int(b, remap), shift);
123 } else {
124 nir_def *shifted_low =
125 nir_ushr(b, nir_imm_int(b, remap), shift);
126 nir_def *shifted_high =
127 nir_ushr(b, nir_imm_int(b, remap >> 32),
128 nir_iadd_imm(b, shift, -32));
129 shifted = nir_bcsel(b, nir_ilt_imm(b, shift, 32),
130 shifted_low, shifted_high);
131 }
132 state->view_index = nir_iand_imm(b, shifted, 0xf);
133 }
134 } else {
135 const struct glsl_type *type = glsl_int_type();
136 if (b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
137 b->shader->info.stage == MESA_SHADER_GEOMETRY)
138 type = glsl_array_type(type, 1, 0);
139
140 nir_variable *idx_var =
141 nir_variable_create(b->shader, nir_var_shader_in,
142 type, "view index");
143 idx_var->data.location = VARYING_SLOT_VIEW_INDEX;
144 if (b->shader->info.stage == MESA_SHADER_FRAGMENT)
145 idx_var->data.interpolation = INTERP_MODE_FLAT;
146
147 nir_deref_instr *deref = nir_build_deref_var(b, idx_var);
148 if (glsl_type_is_array(type))
149 deref = nir_build_deref_array_imm(b, deref, 0);
150
151 state->view_index = nir_load_deref(b, deref);
152 }
153 }
154
155 return state->view_index;
156 }
157
158 static bool
is_load_view_index(const nir_instr * instr,const void * data)159 is_load_view_index(const nir_instr *instr, const void *data)
160 {
161 return instr->type == nir_instr_type_intrinsic &&
162 nir_instr_as_intrinsic(instr)->intrinsic == nir_intrinsic_load_view_index;
163 }
164
165 static nir_def *
replace_load_view_index_with_zero(struct nir_builder * b,nir_instr * instr,void * data)166 replace_load_view_index_with_zero(struct nir_builder *b,
167 nir_instr *instr, void *data)
168 {
169 assert(is_load_view_index(instr, data));
170 return nir_imm_zero(b, 1, 32);
171 }
172
173 static nir_def *
replace_load_view_index_with_layer_id(struct nir_builder * b,nir_instr * instr,void * data)174 replace_load_view_index_with_layer_id(struct nir_builder *b,
175 nir_instr *instr, void *data)
176 {
177 assert(is_load_view_index(instr, data));
178 return nir_load_layer_id(b);
179 }
180
181 bool
anv_nir_lower_multiview(nir_shader * shader,uint32_t view_mask)182 anv_nir_lower_multiview(nir_shader *shader, uint32_t view_mask)
183 {
184 assert(shader->info.stage != MESA_SHADER_COMPUTE);
185
186 /* If multiview isn't enabled, just lower the ViewIndex builtin to zero. */
187 if (view_mask == 0) {
188 return nir_shader_lower_instructions(shader, is_load_view_index,
189 replace_load_view_index_with_zero, NULL);
190 }
191
192 if (shader->info.stage == MESA_SHADER_FRAGMENT) {
193 return nir_shader_lower_instructions(shader, is_load_view_index,
194 replace_load_view_index_with_layer_id, NULL);
195 }
196
197 /* This pass assumes a single entrypoint */
198 nir_function_impl *entrypoint = nir_shader_get_entrypoint(shader);
199
200 struct lower_multiview_state state = {
201 .view_mask = view_mask,
202 };
203
204 state.builder = nir_builder_at(nir_before_impl(entrypoint));
205 nir_builder *b = &state.builder;
206
207 /* Save the original "instance ID" which is the actual instance ID
208 * multiplied by the number of views.
209 */
210 state.instance_id_with_views = nir_load_instance_id(b);
211
212 /* The view index is available in all stages but the instance id is only
213 * available in the VS. If it's not a fragment shader, we need to pass
214 * the view index on to the next stage.
215 */
216 nir_def *view_index = build_view_index(&state);
217
218 assert(view_index->parent_instr->block == nir_start_block(entrypoint));
219 b->cursor = nir_after_instr(view_index->parent_instr);
220
221 /* Unless there is only one possible view index (that would be set
222 * directly), pass it to the next stage.
223 */
224 nir_variable *view_index_out = NULL;
225 if (util_bitcount(state.view_mask) != 1) {
226 view_index_out = nir_variable_create(shader, nir_var_shader_out,
227 glsl_int_type(), "view index");
228 view_index_out->data.location = VARYING_SLOT_VIEW_INDEX;
229 }
230
231 nir_variable *layer_id_out =
232 nir_variable_create(shader, nir_var_shader_out,
233 glsl_int_type(), "layer ID");
234 layer_id_out->data.location = VARYING_SLOT_LAYER;
235
236 if (shader->info.stage != MESA_SHADER_GEOMETRY) {
237 if (view_index_out)
238 nir_store_var(b, view_index_out, view_index, 0x1);
239
240 nir_store_var(b, layer_id_out, view_index, 0x1);
241 }
242
243 nir_foreach_block(block, entrypoint) {
244 nir_foreach_instr_safe(instr, block) {
245 if (instr->type != nir_instr_type_intrinsic)
246 continue;
247
248 nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
249
250 switch (load->intrinsic) {
251 case nir_intrinsic_load_instance_id:
252 if (&load->def != state.instance_id_with_views) {
253 nir_def_replace(&load->def, build_instance_id(&state));
254 }
255 break;
256 case nir_intrinsic_load_view_index:
257 nir_def_replace(&load->def, view_index);
258 break;
259 case nir_intrinsic_emit_vertex_with_counter:
260 /* In geometry shaders, outputs become undefined after every
261 * EmitVertex() call. We need to re-emit them for each vertex.
262 */
263 b->cursor = nir_before_instr(instr);
264 if (view_index_out)
265 nir_store_var(b, view_index_out, view_index, 0x1);
266
267 nir_store_var(b, layer_id_out, view_index, 0x1);
268 break;
269 default:
270 break;
271 }
272 }
273 }
274
275 nir_metadata_preserve(entrypoint, nir_metadata_control_flow);
276
277 return true;
278 }
279