1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_nir.h"
25 #include "nir/nir_builder.h"
26 #include "util/u_debug.h"
27
28 /**
29 * This file implements the lowering required for VK_KHR_multiview.
30 *
31 * When possible, Primitive Replication is used and the shader is modified to
32 * make gl_Position an array and fill it with values for each view.
33 *
34 * Otherwise we implement multiview using instanced rendering. The number of
35 * instances in each draw call is multiplied by the number of views in the
36 * subpass. Then, in the shader, we divide gl_InstanceId by the number of
37 * views and use gl_InstanceId % view_count to compute the actual ViewIndex.
38 */
39
40 struct lower_multiview_state {
41 nir_builder builder;
42
43 uint32_t view_mask;
44
45 nir_def *instance_id;
46 nir_def *view_index;
47 };
48
49 static nir_def *
build_instance_id(struct lower_multiview_state * state)50 build_instance_id(struct lower_multiview_state *state)
51 {
52 assert(state->builder.shader->info.stage == MESA_SHADER_VERTEX);
53
54 if (state->instance_id == NULL) {
55 nir_builder *b = &state->builder;
56
57 b->cursor = nir_before_impl(b->impl);
58
59 /* We use instancing for implementing multiview. The actual instance id
60 * is given by dividing instance_id by the number of views in this
61 * subpass.
62 */
63 state->instance_id =
64 nir_idiv(b, nir_load_instance_id(b),
65 nir_imm_int(b, util_bitcount(state->view_mask)));
66 }
67
68 return state->instance_id;
69 }
70
71 static nir_def *
build_view_index(struct lower_multiview_state * state)72 build_view_index(struct lower_multiview_state *state)
73 {
74 assert(state->builder.shader->info.stage != MESA_SHADER_FRAGMENT);
75
76 if (state->view_index == NULL) {
77 nir_builder *b = &state->builder;
78
79 b->cursor = nir_before_impl(b->impl);
80
81 assert(state->view_mask != 0);
82 if (util_bitcount(state->view_mask) == 1) {
83 /* Set the view index directly. */
84 state->view_index = nir_imm_int(b, ffs(state->view_mask) - 1);
85 } else if (state->builder.shader->info.stage == MESA_SHADER_VERTEX) {
86 /* We only support 16 viewports */
87 assert((state->view_mask & 0xffff0000) == 0);
88
89 /* We use instancing for implementing multiview. The compacted view
90 * id is given by instance_id % view_count. We then have to convert
91 * that to an actual view id.
92 */
93 nir_def *compacted =
94 nir_umod_imm(b, nir_load_instance_id(b),
95 util_bitcount(state->view_mask));
96
97 if (util_is_power_of_two_or_zero(state->view_mask + 1)) {
98 /* If we have a full view mask, then compacted is what we want */
99 state->view_index = compacted;
100 } else {
101 /* Now we define a map from compacted view index to the actual
102 * view index that's based on the view_mask. The map is given by
103 * 16 nibbles, each of which is a value from 0 to 15.
104 */
105 uint64_t remap = 0;
106 uint32_t i = 0;
107 u_foreach_bit(bit, state->view_mask) {
108 assert(bit < 16);
109 remap |= (uint64_t)bit << (i++ * 4);
110 }
111
112 nir_def *shift = nir_imul_imm(b, compacted, 4);
113
114 /* One of these days, when we have int64 everywhere, this will be
115 * easier.
116 */
117 nir_def *shifted;
118 if (remap <= UINT32_MAX) {
119 shifted = nir_ushr(b, nir_imm_int(b, remap), shift);
120 } else {
121 nir_def *shifted_low =
122 nir_ushr(b, nir_imm_int(b, remap), shift);
123 nir_def *shifted_high =
124 nir_ushr(b, nir_imm_int(b, remap >> 32),
125 nir_iadd_imm(b, shift, -32));
126 shifted = nir_bcsel(b, nir_ilt_imm(b, shift, 32),
127 shifted_low, shifted_high);
128 }
129 state->view_index = nir_iand_imm(b, shifted, 0xf);
130 }
131 } else {
132 const struct glsl_type *type = glsl_int_type();
133 if (b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
134 b->shader->info.stage == MESA_SHADER_GEOMETRY)
135 type = glsl_array_type(type, 1, 0);
136
137 nir_variable *idx_var =
138 nir_variable_create(b->shader, nir_var_shader_in,
139 type, "view index");
140 idx_var->data.location = VARYING_SLOT_VIEW_INDEX;
141 if (b->shader->info.stage == MESA_SHADER_FRAGMENT)
142 idx_var->data.interpolation = INTERP_MODE_FLAT;
143
144 nir_deref_instr *deref = nir_build_deref_var(b, idx_var);
145 if (glsl_type_is_array(type))
146 deref = nir_build_deref_array_imm(b, deref, 0);
147
148 state->view_index = nir_load_deref(b, deref);
149 }
150 }
151
152 return state->view_index;
153 }
154
155 static bool
is_load_view_index(const nir_instr * instr,const void * data)156 is_load_view_index(const nir_instr *instr, const void *data)
157 {
158 return instr->type == nir_instr_type_intrinsic &&
159 nir_instr_as_intrinsic(instr)->intrinsic == nir_intrinsic_load_view_index;
160 }
161
162 static nir_def *
replace_load_view_index_with_zero(struct nir_builder * b,nir_instr * instr,void * data)163 replace_load_view_index_with_zero(struct nir_builder *b,
164 nir_instr *instr, void *data)
165 {
166 assert(is_load_view_index(instr, data));
167 return nir_imm_zero(b, 1, 32);
168 }
169
170 static nir_def *
replace_load_view_index_with_layer_id(struct nir_builder * b,nir_instr * instr,void * data)171 replace_load_view_index_with_layer_id(struct nir_builder *b,
172 nir_instr *instr, void *data)
173 {
174 assert(is_load_view_index(instr, data));
175 return nir_load_layer_id(b);
176 }
177
178 bool
anv_nir_lower_multiview(nir_shader * shader,uint32_t view_mask)179 anv_nir_lower_multiview(nir_shader *shader, uint32_t view_mask)
180 {
181 assert(shader->info.stage != MESA_SHADER_COMPUTE);
182
183 /* If multiview isn't enabled, just lower the ViewIndex builtin to zero. */
184 if (view_mask == 0) {
185 return nir_shader_lower_instructions(shader, is_load_view_index,
186 replace_load_view_index_with_zero, NULL);
187 }
188
189 if (shader->info.stage == MESA_SHADER_FRAGMENT) {
190 return nir_shader_lower_instructions(shader, is_load_view_index,
191 replace_load_view_index_with_layer_id, NULL);
192 }
193
194 /* This pass assumes a single entrypoint */
195 nir_function_impl *entrypoint = nir_shader_get_entrypoint(shader);
196
197 struct lower_multiview_state state = {
198 .view_mask = view_mask,
199 };
200
201 state.builder = nir_builder_create(entrypoint);
202
203 nir_foreach_block(block, entrypoint) {
204 nir_foreach_instr_safe(instr, block) {
205 if (instr->type != nir_instr_type_intrinsic)
206 continue;
207
208 nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
209
210 if (load->intrinsic != nir_intrinsic_load_instance_id &&
211 load->intrinsic != nir_intrinsic_load_view_index)
212 continue;
213
214 nir_def *value;
215 if (load->intrinsic == nir_intrinsic_load_instance_id) {
216 value = build_instance_id(&state);
217 } else {
218 assert(load->intrinsic == nir_intrinsic_load_view_index);
219 value = build_view_index(&state);
220 }
221
222 nir_def_rewrite_uses(&load->def, value);
223
224 nir_instr_remove(&load->instr);
225 }
226 }
227
228 /* The view index is available in all stages but the instance id is only
229 * available in the VS. If it's not a fragment shader, we need to pass
230 * the view index on to the next stage.
231 */
232 nir_def *view_index = build_view_index(&state);
233
234 nir_builder *b = &state.builder;
235
236 assert(view_index->parent_instr->block == nir_start_block(entrypoint));
237 b->cursor = nir_after_instr(view_index->parent_instr);
238
239 /* Unless there is only one possible view index (that would be set
240 * directly), pass it to the next stage. */
241 if (util_bitcount(state.view_mask) != 1) {
242 nir_variable *view_index_out =
243 nir_variable_create(shader, nir_var_shader_out,
244 glsl_int_type(), "view index");
245 view_index_out->data.location = VARYING_SLOT_VIEW_INDEX;
246 nir_store_var(b, view_index_out, view_index, 0x1);
247 }
248
249 nir_variable *layer_id_out =
250 nir_variable_create(shader, nir_var_shader_out,
251 glsl_int_type(), "layer ID");
252 layer_id_out->data.location = VARYING_SLOT_LAYER;
253 nir_store_var(b, layer_id_out, view_index, 0x1);
254
255 nir_metadata_preserve(entrypoint, nir_metadata_block_index |
256 nir_metadata_dominance);
257
258 return true;
259 }
260