1 /*
2 * Copyright © 2015 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "vc4_qir.h"
25 #include "compiler/nir/nir_builder.h"
26 #include "util/format/u_format.h"
27 #include "util/u_helpers.h"
28
29 /**
30 * Walks the NIR generated by TGSI-to-NIR or GLSL-to-NIR to lower its io
31 * intrinsics into something amenable to the VC4 architecture.
32 *
33 * Currently, it splits VS inputs and uniforms into scalars, drops any
34 * non-position outputs in coordinate shaders, and fixes up the addressing on
35 * indirect uniform loads. FS input and VS output scalarization is handled by
36 * nir_lower_io_to_scalar().
37 */
38
39 static void
replace_intrinsic_with_vec(nir_builder * b,nir_intrinsic_instr * intr,nir_def ** comps)40 replace_intrinsic_with_vec(nir_builder *b, nir_intrinsic_instr *intr,
41 nir_def **comps)
42 {
43
44 /* Batch things back together into a vector. This will get split by
45 * the later ALU scalarization pass.
46 */
47 nir_def *vec = nir_vec(b, comps, intr->num_components);
48
49 /* Replace the old intrinsic with a reference to our reconstructed
50 * vector.
51 */
52 nir_def_rewrite_uses(&intr->def, vec);
53 nir_instr_remove(&intr->instr);
54 }
55
56 static nir_def *
vc4_nir_unpack_8i(nir_builder * b,nir_def * src,unsigned chan)57 vc4_nir_unpack_8i(nir_builder *b, nir_def *src, unsigned chan)
58 {
59 return nir_ubitfield_extract(b,
60 src,
61 nir_imm_int(b, 8 * chan),
62 nir_imm_int(b, 8));
63 }
64
65 /** Returns the 16 bit field as a sign-extended 32-bit value. */
66 static nir_def *
vc4_nir_unpack_16i(nir_builder * b,nir_def * src,unsigned chan)67 vc4_nir_unpack_16i(nir_builder *b, nir_def *src, unsigned chan)
68 {
69 return nir_ibitfield_extract(b,
70 src,
71 nir_imm_int(b, 16 * chan),
72 nir_imm_int(b, 16));
73 }
74
75 /** Returns the 16 bit field as an unsigned 32 bit value. */
76 static nir_def *
vc4_nir_unpack_16u(nir_builder * b,nir_def * src,unsigned chan)77 vc4_nir_unpack_16u(nir_builder *b, nir_def *src, unsigned chan)
78 {
79 if (chan == 0) {
80 return nir_iand_imm(b, src, 0xffff);
81 } else {
82 return nir_ushr_imm(b, src, 16);
83 }
84 }
85
86 static nir_def *
vc4_nir_unpack_8f(nir_builder * b,nir_def * src,unsigned chan)87 vc4_nir_unpack_8f(nir_builder *b, nir_def *src, unsigned chan)
88 {
89 return nir_channel(b, nir_unpack_unorm_4x8(b, src), chan);
90 }
91
92 static nir_def *
vc4_nir_get_vattr_channel_vpm(struct vc4_compile * c,nir_builder * b,nir_def ** vpm_reads,uint8_t swiz,const struct util_format_description * desc)93 vc4_nir_get_vattr_channel_vpm(struct vc4_compile *c,
94 nir_builder *b,
95 nir_def **vpm_reads,
96 uint8_t swiz,
97 const struct util_format_description *desc)
98 {
99 const struct util_format_channel_description *chan =
100 &desc->channel[swiz];
101 nir_def *temp;
102
103 if (swiz > PIPE_SWIZZLE_W) {
104 return vc4_nir_get_swizzled_channel(b, vpm_reads, swiz);
105 } else if (chan->size == 32 && chan->type == UTIL_FORMAT_TYPE_FLOAT) {
106 return vc4_nir_get_swizzled_channel(b, vpm_reads, swiz);
107 } else if (chan->size == 32 && chan->type == UTIL_FORMAT_TYPE_SIGNED) {
108 if (chan->normalized) {
109 return nir_fmul_imm(b,
110 nir_i2f32(b, vpm_reads[swiz]),
111 1.0 / 0x7fffffff);
112 } else {
113 return nir_i2f32(b, vpm_reads[swiz]);
114 }
115 } else if (chan->size == 8 &&
116 (chan->type == UTIL_FORMAT_TYPE_UNSIGNED ||
117 chan->type == UTIL_FORMAT_TYPE_SIGNED)) {
118 nir_def *vpm = vpm_reads[0];
119 if (chan->type == UTIL_FORMAT_TYPE_SIGNED) {
120 temp = nir_ixor(b, vpm, nir_imm_int(b, 0x80808080));
121 if (chan->normalized) {
122 return nir_fadd_imm(b, nir_fmul_imm(b,
123 vc4_nir_unpack_8f(b, temp, swiz),
124 2.0),
125 -1.0);
126 } else {
127 return nir_fadd_imm(b,
128 nir_i2f32(b,
129 vc4_nir_unpack_8i(b, temp,
130 swiz)),
131 -128.0);
132 }
133 } else {
134 if (chan->normalized) {
135 return vc4_nir_unpack_8f(b, vpm, swiz);
136 } else {
137 return nir_i2f32(b, vc4_nir_unpack_8i(b, vpm, swiz));
138 }
139 }
140 } else if (chan->size == 16 &&
141 (chan->type == UTIL_FORMAT_TYPE_UNSIGNED ||
142 chan->type == UTIL_FORMAT_TYPE_SIGNED)) {
143 nir_def *vpm = vpm_reads[swiz / 2];
144
145 /* Note that UNPACK_16F eats a half float, not ints, so we use
146 * UNPACK_16_I for all of these.
147 */
148 if (chan->type == UTIL_FORMAT_TYPE_SIGNED) {
149 temp = nir_i2f32(b, vc4_nir_unpack_16i(b, vpm, swiz & 1));
150 if (chan->normalized) {
151 return nir_fmul_imm(b, temp, 1 / 32768.0f);
152 } else {
153 return temp;
154 }
155 } else {
156 temp = nir_i2f32(b, vc4_nir_unpack_16u(b, vpm, swiz & 1));
157 if (chan->normalized) {
158 return nir_fmul_imm(b, temp, 1 / 65535.0);
159 } else {
160 return temp;
161 }
162 }
163 } else {
164 return NULL;
165 }
166 }
167
168 static void
vc4_nir_lower_vertex_attr(struct vc4_compile * c,nir_builder * b,nir_intrinsic_instr * intr)169 vc4_nir_lower_vertex_attr(struct vc4_compile *c, nir_builder *b,
170 nir_intrinsic_instr *intr)
171 {
172 b->cursor = nir_before_instr(&intr->instr);
173
174 int attr = nir_intrinsic_base(intr);
175 enum pipe_format format = c->vs_key->attr_formats[attr];
176 uint32_t attr_size = util_format_get_blocksize(format);
177
178 /* We only accept direct outputs and TGSI only ever gives them to us
179 * with an offset value of 0.
180 */
181 assert(nir_src_as_uint(intr->src[0]) == 0);
182
183 /* Generate dword loads for the VPM values (Since these intrinsics may
184 * be reordered, the actual reads will be generated at the top of the
185 * shader by ntq_setup_inputs().
186 */
187 nir_def *vpm_reads[4];
188 for (int i = 0; i < align(attr_size, 4) / 4; i++)
189 vpm_reads[i] = nir_load_input(b, 1, 32, nir_imm_int(b, 0),
190 .base = nir_intrinsic_base(intr),
191 .component = i);
192
193 bool format_warned = false;
194 const struct util_format_description *desc =
195 util_format_description(format);
196
197 nir_def *dests[4];
198 for (int i = 0; i < intr->num_components; i++) {
199 uint8_t swiz = desc->swizzle[i];
200 dests[i] = vc4_nir_get_vattr_channel_vpm(c, b, vpm_reads, swiz,
201 desc);
202
203 if (!dests[i]) {
204 if (!format_warned) {
205 fprintf(stderr,
206 "vtx element %d unsupported type: %s\n",
207 attr, util_format_name(format));
208 format_warned = true;
209 }
210 dests[i] = nir_imm_float(b, 0.0);
211 }
212 }
213
214 replace_intrinsic_with_vec(b, intr, dests);
215 }
216
217 static void
vc4_nir_lower_fs_input(struct vc4_compile * c,nir_builder * b,nir_intrinsic_instr * intr)218 vc4_nir_lower_fs_input(struct vc4_compile *c, nir_builder *b,
219 nir_intrinsic_instr *intr)
220 {
221 b->cursor = nir_after_instr(&intr->instr);
222
223 if (nir_intrinsic_base(intr) >= VC4_NIR_TLB_COLOR_READ_INPUT &&
224 nir_intrinsic_base(intr) < (VC4_NIR_TLB_COLOR_READ_INPUT +
225 VC4_MAX_SAMPLES)) {
226 /* This doesn't need any lowering. */
227 return;
228 }
229
230 nir_variable *input_var =
231 nir_find_variable_with_driver_location(c->s, nir_var_shader_in,
232 nir_intrinsic_base(intr));
233 assert(input_var);
234
235 int comp = nir_intrinsic_component(intr);
236
237 /* Lower away point coordinates, and fix up PNTC. */
238 if (util_varying_is_point_coord(input_var->data.location,
239 c->fs_key->point_sprite_mask)) {
240 assert(intr->num_components == 1);
241
242 nir_def *result = &intr->def;
243
244 switch (comp) {
245 case 0:
246 case 1:
247 /* If we're not rendering points, we need to set a
248 * defined value for the input that would come from
249 * PNTC.
250 */
251 if (!c->fs_key->is_points)
252 result = nir_imm_float(b, 0.0);
253 break;
254 case 2:
255 result = nir_imm_float(b, 0.0);
256 break;
257 case 3:
258 result = nir_imm_float(b, 1.0);
259 break;
260 }
261
262 if (c->fs_key->point_coord_upper_left && comp == 1)
263 result = nir_fsub_imm(b, 1.0, result);
264
265 if (result != &intr->def) {
266 nir_def_rewrite_uses_after(&intr->def,
267 result,
268 result->parent_instr);
269 }
270 }
271 }
272
273 static void
vc4_nir_lower_output(struct vc4_compile * c,nir_builder * b,nir_intrinsic_instr * intr)274 vc4_nir_lower_output(struct vc4_compile *c, nir_builder *b,
275 nir_intrinsic_instr *intr)
276 {
277 nir_variable *output_var =
278 nir_find_variable_with_driver_location(c->s, nir_var_shader_out,
279 nir_intrinsic_base(intr));
280 assert(output_var);
281
282 if (c->stage == QSTAGE_COORD &&
283 output_var->data.location != VARYING_SLOT_POS &&
284 output_var->data.location != VARYING_SLOT_PSIZ) {
285 nir_instr_remove(&intr->instr);
286 return;
287 }
288 }
289
290 static void
vc4_nir_lower_uniform(struct vc4_compile * c,nir_builder * b,nir_intrinsic_instr * intr)291 vc4_nir_lower_uniform(struct vc4_compile *c, nir_builder *b,
292 nir_intrinsic_instr *intr)
293 {
294 b->cursor = nir_before_instr(&intr->instr);
295
296 /* Generate scalar loads equivalent to the original vector. */
297 nir_def *dests[4];
298 for (unsigned i = 0; i < intr->num_components; i++) {
299 nir_intrinsic_instr *intr_comp =
300 nir_intrinsic_instr_create(c->s, intr->intrinsic);
301 intr_comp->num_components = 1;
302 nir_def_init(&intr_comp->instr, &intr_comp->def, 1,
303 intr->def.bit_size);
304
305 /* Convert the uniform offset to bytes. If it happens
306 * to be a constant, constant-folding will clean up
307 * the shift for us.
308 */
309 nir_intrinsic_set_base(intr_comp,
310 nir_intrinsic_base(intr) * 16 +
311 i * 4);
312 nir_intrinsic_set_range(intr_comp,
313 nir_intrinsic_range(intr) * 16 - i * 4);
314
315 intr_comp->src[0] =
316 nir_src_for_ssa(nir_ishl_imm(b, intr->src[0].ssa, 4));
317
318 dests[i] = &intr_comp->def;
319
320 nir_builder_instr_insert(b, &intr_comp->instr);
321 }
322
323 replace_intrinsic_with_vec(b, intr, dests);
324 }
325
326 static void
vc4_nir_lower_io_instr(struct vc4_compile * c,nir_builder * b,struct nir_instr * instr)327 vc4_nir_lower_io_instr(struct vc4_compile *c, nir_builder *b,
328 struct nir_instr *instr)
329 {
330 if (instr->type != nir_instr_type_intrinsic)
331 return;
332 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
333
334 switch (intr->intrinsic) {
335 case nir_intrinsic_load_input:
336 if (c->stage == QSTAGE_FRAG)
337 vc4_nir_lower_fs_input(c, b, intr);
338 else
339 vc4_nir_lower_vertex_attr(c, b, intr);
340 break;
341
342 case nir_intrinsic_store_output:
343 vc4_nir_lower_output(c, b, intr);
344 break;
345
346 case nir_intrinsic_load_uniform:
347 vc4_nir_lower_uniform(c, b, intr);
348 break;
349
350 case nir_intrinsic_load_user_clip_plane:
351 default:
352 break;
353 }
354 }
355
356 static bool
vc4_nir_lower_io_impl(struct vc4_compile * c,nir_function_impl * impl)357 vc4_nir_lower_io_impl(struct vc4_compile *c, nir_function_impl *impl)
358 {
359 nir_builder b = nir_builder_create(impl);
360
361 nir_foreach_block(block, impl) {
362 nir_foreach_instr_safe(instr, block)
363 vc4_nir_lower_io_instr(c, &b, instr);
364 }
365
366 nir_metadata_preserve(impl, nir_metadata_block_index |
367 nir_metadata_dominance);
368
369 return true;
370 }
371
372 void
vc4_nir_lower_io(nir_shader * s,struct vc4_compile * c)373 vc4_nir_lower_io(nir_shader *s, struct vc4_compile *c)
374 {
375 nir_foreach_function_impl(impl, s) {
376 vc4_nir_lower_io_impl(c, impl);
377 }
378 }
379