1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28 #include <math.h>
29
30 #include "nir/nir_builtin_builder.h"
31
32 #include "vtn_private.h"
33 #include "GLSL.std.450.h"
34
35 #ifndef M_PIf
36 #define M_PIf ((float) M_PI)
37 #endif
38 #ifndef M_PI_2f
39 #define M_PI_2f ((float) M_PI_2)
40 #endif
41 #ifndef M_PI_4f
42 #define M_PI_4f ((float) M_PI_4)
43 #endif
44
45 static nir_ssa_def *build_det(nir_builder *b, nir_ssa_def **col, unsigned cols);
46
47 /* Computes the determinate of the submatrix given by taking src and
48 * removing the specified row and column.
49 */
50 static nir_ssa_def *
build_mat_subdet(struct nir_builder * b,struct nir_ssa_def ** src,unsigned size,unsigned row,unsigned col)51 build_mat_subdet(struct nir_builder *b, struct nir_ssa_def **src,
52 unsigned size, unsigned row, unsigned col)
53 {
54 assert(row < size && col < size);
55 if (size == 2) {
56 return nir_channel(b, src[1 - col], 1 - row);
57 } else {
58 /* Swizzle to get all but the specified row */
59 unsigned swiz[NIR_MAX_VEC_COMPONENTS] = {0};
60 for (unsigned j = 0; j < 3; j++)
61 swiz[j] = j + (j >= row);
62
63 /* Grab all but the specified column */
64 nir_ssa_def *subcol[3];
65 for (unsigned j = 0; j < size; j++) {
66 if (j != col) {
67 subcol[j - (j > col)] = nir_swizzle(b, src[j], swiz, size - 1);
68 }
69 }
70
71 return build_det(b, subcol, size - 1);
72 }
73 }
74
75 static nir_ssa_def *
build_det(nir_builder * b,nir_ssa_def ** col,unsigned size)76 build_det(nir_builder *b, nir_ssa_def **col, unsigned size)
77 {
78 assert(size <= 4);
79 nir_ssa_def *subdet[4];
80 for (unsigned i = 0; i < size; i++)
81 subdet[i] = build_mat_subdet(b, col, size, i, 0);
82
83 nir_ssa_def *prod = nir_fmul(b, col[0], nir_vec(b, subdet, size));
84
85 nir_ssa_def *result = NULL;
86 for (unsigned i = 0; i < size; i += 2) {
87 nir_ssa_def *term;
88 if (i + 1 < size) {
89 term = nir_fsub(b, nir_channel(b, prod, i),
90 nir_channel(b, prod, i + 1));
91 } else {
92 term = nir_channel(b, prod, i);
93 }
94
95 result = result ? nir_fadd(b, result, term) : term;
96 }
97
98 return result;
99 }
100
101 static nir_ssa_def *
build_mat_det(struct vtn_builder * b,struct vtn_ssa_value * src)102 build_mat_det(struct vtn_builder *b, struct vtn_ssa_value *src)
103 {
104 unsigned size = glsl_get_vector_elements(src->type);
105
106 nir_ssa_def *cols[4];
107 for (unsigned i = 0; i < size; i++)
108 cols[i] = src->elems[i]->def;
109
110 return build_det(&b->nb, cols, size);
111 }
112
113 static struct vtn_ssa_value *
matrix_inverse(struct vtn_builder * b,struct vtn_ssa_value * src)114 matrix_inverse(struct vtn_builder *b, struct vtn_ssa_value *src)
115 {
116 nir_ssa_def *adj_col[4];
117 unsigned size = glsl_get_vector_elements(src->type);
118
119 nir_ssa_def *cols[4];
120 for (unsigned i = 0; i < size; i++)
121 cols[i] = src->elems[i]->def;
122
123 /* Build up an adjugate matrix */
124 for (unsigned c = 0; c < size; c++) {
125 nir_ssa_def *elem[4];
126 for (unsigned r = 0; r < size; r++) {
127 elem[r] = build_mat_subdet(&b->nb, cols, size, c, r);
128
129 if ((r + c) % 2)
130 elem[r] = nir_fneg(&b->nb, elem[r]);
131 }
132
133 adj_col[c] = nir_vec(&b->nb, elem, size);
134 }
135
136 nir_ssa_def *det_inv = nir_frcp(&b->nb, build_det(&b->nb, cols, size));
137
138 struct vtn_ssa_value *val = vtn_create_ssa_value(b, src->type);
139 for (unsigned i = 0; i < size; i++)
140 val->elems[i]->def = nir_fmul(&b->nb, adj_col[i], det_inv);
141
142 return val;
143 }
144
145 /**
146 * Approximate asin(x) by the piecewise formula:
147 * for |x| < 0.5, asin~(x) = x * (1 + x²(pS0 + x²(pS1 + x²*pS2)) / (1 + x²*qS1))
148 * for |x| ≥ 0.5, asin~(x) = sign(x) * (π/2 - sqrt(1 - |x|) * (π/2 + |x|(π/4 - 1 + |x|(p0 + |x|p1))))
149 *
150 * The latter is correct to first order at x=0 and x=±1 regardless of the p
151 * coefficients but can be made second-order correct at both ends by selecting
152 * the fit coefficients appropriately. Different p coefficients can be used
153 * in the asin and acos implementation to minimize some relative error metric
154 * in each case.
155 */
156 static nir_ssa_def *
build_asin(nir_builder * b,nir_ssa_def * x,float p0,float p1,bool piecewise)157 build_asin(nir_builder *b, nir_ssa_def *x, float p0, float p1, bool piecewise)
158 {
159 if (x->bit_size == 16) {
160 /* The polynomial approximation isn't precise enough to meet half-float
161 * precision requirements. Alternatively, we could implement this using
162 * the formula:
163 *
164 * asin(x) = atan2(x, sqrt(1 - x*x))
165 *
166 * But that is very expensive, so instead we just do the polynomial
167 * approximation in 32-bit math and then we convert the result back to
168 * 16-bit.
169 */
170 return nir_f2f16(b, build_asin(b, nir_f2f32(b, x), p0, p1, piecewise));
171 }
172 nir_ssa_def *one = nir_imm_floatN_t(b, 1.0f, x->bit_size);
173 nir_ssa_def *half = nir_imm_floatN_t(b, 0.5f, x->bit_size);
174 nir_ssa_def *abs_x = nir_fabs(b, x);
175
176 nir_ssa_def *p0_plus_xp1 = nir_ffma_imm12(b, abs_x, p1, p0);
177
178 nir_ssa_def *expr_tail =
179 nir_ffma_imm2(b, abs_x,
180 nir_ffma_imm2(b, abs_x, p0_plus_xp1, M_PI_4f - 1.0f),
181 M_PI_2f);
182
183 nir_ssa_def *result0 = nir_fmul(b, nir_fsign(b, x),
184 nir_a_minus_bc(b, nir_imm_floatN_t(b, M_PI_2f, x->bit_size),
185 nir_fsqrt(b, nir_fsub(b, one, abs_x)),
186 expr_tail));
187 if (piecewise) {
188 /* approximation for |x| < 0.5 */
189 const float pS0 = 1.6666586697e-01f;
190 const float pS1 = -4.2743422091e-02f;
191 const float pS2 = -8.6563630030e-03f;
192 const float qS1 = -7.0662963390e-01f;
193
194 nir_ssa_def *x2 = nir_fmul(b, x, x);
195 nir_ssa_def *p = nir_fmul(b,
196 x2,
197 nir_ffma_imm2(b, x2,
198 nir_ffma_imm12(b, x2, pS2, pS1),
199 pS0));
200
201 nir_ssa_def *q = nir_ffma_imm1(b, x2, qS1, one);
202 nir_ssa_def *result1 = nir_ffma(b, x, nir_fdiv(b, p, q), x);
203 return nir_bcsel(b, nir_flt(b, abs_x, half), result1, result0);
204 } else {
205 return result0;
206 }
207 }
208
209 static nir_op
vtn_nir_alu_op_for_spirv_glsl_opcode(struct vtn_builder * b,enum GLSLstd450 opcode,unsigned execution_mode,bool * exact)210 vtn_nir_alu_op_for_spirv_glsl_opcode(struct vtn_builder *b,
211 enum GLSLstd450 opcode,
212 unsigned execution_mode,
213 bool *exact)
214 {
215 *exact = false;
216 switch (opcode) {
217 case GLSLstd450Round: return nir_op_fround_even;
218 case GLSLstd450RoundEven: return nir_op_fround_even;
219 case GLSLstd450Trunc: return nir_op_ftrunc;
220 case GLSLstd450FAbs: return nir_op_fabs;
221 case GLSLstd450SAbs: return nir_op_iabs;
222 case GLSLstd450FSign: return nir_op_fsign;
223 case GLSLstd450SSign: return nir_op_isign;
224 case GLSLstd450Floor: return nir_op_ffloor;
225 case GLSLstd450Ceil: return nir_op_fceil;
226 case GLSLstd450Fract: return nir_op_ffract;
227 case GLSLstd450Sin: return nir_op_fsin;
228 case GLSLstd450Cos: return nir_op_fcos;
229 case GLSLstd450Pow: return nir_op_fpow;
230 case GLSLstd450Exp2: return nir_op_fexp2;
231 case GLSLstd450Log2: return nir_op_flog2;
232 case GLSLstd450Sqrt: return nir_op_fsqrt;
233 case GLSLstd450InverseSqrt: return nir_op_frsq;
234 case GLSLstd450NMin: *exact = true; return nir_op_fmin;
235 case GLSLstd450FMin: return nir_op_fmin;
236 case GLSLstd450UMin: return nir_op_umin;
237 case GLSLstd450SMin: return nir_op_imin;
238 case GLSLstd450NMax: *exact = true; return nir_op_fmax;
239 case GLSLstd450FMax: return nir_op_fmax;
240 case GLSLstd450UMax: return nir_op_umax;
241 case GLSLstd450SMax: return nir_op_imax;
242 case GLSLstd450FMix: return nir_op_flrp;
243 case GLSLstd450Fma: return nir_op_ffma;
244 case GLSLstd450Ldexp: return nir_op_ldexp;
245 case GLSLstd450FindILsb: return nir_op_find_lsb;
246 case GLSLstd450FindSMsb: return nir_op_ifind_msb;
247 case GLSLstd450FindUMsb: return nir_op_ufind_msb;
248
249 /* Packing/Unpacking functions */
250 case GLSLstd450PackSnorm4x8: return nir_op_pack_snorm_4x8;
251 case GLSLstd450PackUnorm4x8: return nir_op_pack_unorm_4x8;
252 case GLSLstd450PackSnorm2x16: return nir_op_pack_snorm_2x16;
253 case GLSLstd450PackUnorm2x16: return nir_op_pack_unorm_2x16;
254 case GLSLstd450PackHalf2x16: return nir_op_pack_half_2x16;
255 case GLSLstd450PackDouble2x32: return nir_op_pack_64_2x32;
256 case GLSLstd450UnpackSnorm4x8: return nir_op_unpack_snorm_4x8;
257 case GLSLstd450UnpackUnorm4x8: return nir_op_unpack_unorm_4x8;
258 case GLSLstd450UnpackSnorm2x16: return nir_op_unpack_snorm_2x16;
259 case GLSLstd450UnpackUnorm2x16: return nir_op_unpack_unorm_2x16;
260 case GLSLstd450UnpackHalf2x16:
261 if (execution_mode & FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP16)
262 return nir_op_unpack_half_2x16_flush_to_zero;
263 else
264 return nir_op_unpack_half_2x16;
265 case GLSLstd450UnpackDouble2x32: return nir_op_unpack_64_2x32;
266
267 default:
268 vtn_fail("No NIR equivalent");
269 }
270 }
271
272 #define NIR_IMM_FP(n, v) (nir_imm_floatN_t(n, v, src[0]->bit_size))
273
274 static void
handle_glsl450_alu(struct vtn_builder * b,enum GLSLstd450 entrypoint,const uint32_t * w,unsigned count)275 handle_glsl450_alu(struct vtn_builder *b, enum GLSLstd450 entrypoint,
276 const uint32_t *w, unsigned count)
277 {
278 struct nir_builder *nb = &b->nb;
279 const struct glsl_type *dest_type = vtn_get_type(b, w[1])->type;
280 struct vtn_value *dest_val = vtn_untyped_value(b, w[2]);
281
282 bool mediump_16bit;
283 switch (entrypoint) {
284 case GLSLstd450PackSnorm4x8:
285 case GLSLstd450PackUnorm4x8:
286 case GLSLstd450PackSnorm2x16:
287 case GLSLstd450PackUnorm2x16:
288 case GLSLstd450PackHalf2x16:
289 case GLSLstd450PackDouble2x32:
290 case GLSLstd450UnpackSnorm4x8:
291 case GLSLstd450UnpackUnorm4x8:
292 case GLSLstd450UnpackSnorm2x16:
293 case GLSLstd450UnpackUnorm2x16:
294 case GLSLstd450UnpackHalf2x16:
295 case GLSLstd450UnpackDouble2x32:
296 /* Asking for relaxed precision snorm 4x8 pack results (for example)
297 * doesn't even make sense. The NIR opcodes have a fixed output size, so
298 * no trying to reduce precision.
299 */
300 mediump_16bit = false;
301 break;
302
303 case GLSLstd450Frexp:
304 case GLSLstd450FrexpStruct:
305 case GLSLstd450Modf:
306 case GLSLstd450ModfStruct:
307 /* Not sure how to detect the ->elems[i] destinations on these in vtn_upconvert_value(). */
308 mediump_16bit = false;
309 break;
310
311 default:
312 mediump_16bit = b->options->mediump_16bit_alu && vtn_value_is_relaxed_precision(b, dest_val);
313 break;
314 }
315
316 /* Collect the various SSA sources */
317 unsigned num_inputs = count - 5;
318 nir_ssa_def *src[3] = { NULL, };
319 for (unsigned i = 0; i < num_inputs; i++) {
320 /* These are handled specially below */
321 if (vtn_untyped_value(b, w[i + 5])->value_type == vtn_value_type_pointer)
322 continue;
323
324 src[i] = vtn_get_nir_ssa(b, w[i + 5]);
325 if (mediump_16bit) {
326 struct vtn_ssa_value *vtn_src = vtn_ssa_value(b, w[i + 5]);
327 src[i] = vtn_mediump_downconvert(b, glsl_get_base_type(vtn_src->type), src[i]);
328 }
329 }
330
331 struct vtn_ssa_value *dest = vtn_create_ssa_value(b, dest_type);
332
333 vtn_handle_no_contraction(b, vtn_untyped_value(b, w[2]));
334 switch (entrypoint) {
335 case GLSLstd450Radians:
336 dest->def = nir_radians(nb, src[0]);
337 break;
338 case GLSLstd450Degrees:
339 dest->def = nir_degrees(nb, src[0]);
340 break;
341 case GLSLstd450Tan:
342 dest->def = nir_ftan(nb, src[0]);
343 break;
344
345 case GLSLstd450Modf: {
346 nir_ssa_def *inf = nir_imm_floatN_t(&b->nb, INFINITY, src[0]->bit_size);
347 nir_ssa_def *sign_bit =
348 nir_imm_intN_t(&b->nb, (uint64_t)1 << (src[0]->bit_size - 1),
349 src[0]->bit_size);
350 nir_ssa_def *sign = nir_fsign(nb, src[0]);
351 nir_ssa_def *abs = nir_fabs(nb, src[0]);
352
353 /* NaN input should produce a NaN results, and ±Inf input should provide
354 * ±0 result. The fmul(sign(x), ffract(x)) calculation will already
355 * produce the expected NaN. To get ±0, directly compare for equality
356 * with Inf instead of using fisfinite (which is false for NaN).
357 */
358 dest->def = nir_bcsel(nb,
359 nir_ieq(nb, abs, inf),
360 nir_iand(nb, src[0], sign_bit),
361 nir_fmul(nb, sign, nir_ffract(nb, abs)));
362
363 struct vtn_pointer *i_ptr = vtn_value(b, w[6], vtn_value_type_pointer)->pointer;
364 struct vtn_ssa_value *whole = vtn_create_ssa_value(b, i_ptr->type->type);
365 whole->def = nir_fmul(nb, sign, nir_ffloor(nb, abs));
366 vtn_variable_store(b, whole, i_ptr, 0);
367 break;
368 }
369
370 case GLSLstd450ModfStruct: {
371 nir_ssa_def *inf = nir_imm_floatN_t(&b->nb, INFINITY, src[0]->bit_size);
372 nir_ssa_def *sign_bit =
373 nir_imm_intN_t(&b->nb, (uint64_t)1 << (src[0]->bit_size - 1),
374 src[0]->bit_size);
375 nir_ssa_def *sign = nir_fsign(nb, src[0]);
376 nir_ssa_def *abs = nir_fabs(nb, src[0]);
377 vtn_assert(glsl_type_is_struct_or_ifc(dest_type));
378
379 /* See GLSLstd450Modf for explanation of the Inf and NaN handling. */
380 dest->elems[0]->def = nir_bcsel(nb,
381 nir_ieq(nb, abs, inf),
382 nir_iand(nb, src[0], sign_bit),
383 nir_fmul(nb, sign, nir_ffract(nb, abs)));
384 dest->elems[1]->def = nir_fmul(nb, sign, nir_ffloor(nb, abs));
385 break;
386 }
387
388 case GLSLstd450Step: {
389 /* The SPIR-V Extended Instructions for GLSL spec says:
390 *
391 * Result is 0.0 if x < edge; otherwise result is 1.0.
392 *
393 * Here src[1] is x, and src[0] is edge. The direct implementation is
394 *
395 * bcsel(src[1] < src[0], 0.0, 1.0)
396 *
397 * This is effectively b2f(!(src1 < src0)). Previously this was
398 * implemented using sge(src1, src0), but that produces incorrect
399 * results for NaN. Instead, we use the identity b2f(!x) = 1 - b2f(x).
400 */
401 const bool exact = nb->exact;
402 nb->exact = true;
403
404 nir_ssa_def *cmp = nir_slt(nb, src[1], src[0]);
405
406 nb->exact = exact;
407 dest->def = nir_fsub(nb, nir_imm_floatN_t(nb, 1.0f, cmp->bit_size), cmp);
408 break;
409 }
410
411 case GLSLstd450Length:
412 dest->def = nir_fast_length(nb, src[0]);
413 break;
414 case GLSLstd450Distance:
415 dest->def = nir_fast_distance(nb, src[0], src[1]);
416 break;
417 case GLSLstd450Normalize:
418 dest->def = nir_fast_normalize(nb, src[0]);
419 break;
420
421 case GLSLstd450Exp:
422 dest->def = nir_fexp(nb, src[0]);
423 break;
424
425 case GLSLstd450Log:
426 dest->def = nir_flog(nb, src[0]);
427 break;
428
429 case GLSLstd450FClamp:
430 dest->def = nir_fclamp(nb, src[0], src[1], src[2]);
431 break;
432 case GLSLstd450NClamp:
433 nb->exact = true;
434 dest->def = nir_fclamp(nb, src[0], src[1], src[2]);
435 nb->exact = false;
436 break;
437 case GLSLstd450UClamp:
438 dest->def = nir_uclamp(nb, src[0], src[1], src[2]);
439 break;
440 case GLSLstd450SClamp:
441 dest->def = nir_iclamp(nb, src[0], src[1], src[2]);
442 break;
443
444 case GLSLstd450Cross: {
445 dest->def = nir_cross3(nb, src[0], src[1]);
446 break;
447 }
448
449 case GLSLstd450SmoothStep: {
450 dest->def = nir_smoothstep(nb, src[0], src[1], src[2]);
451 break;
452 }
453
454 case GLSLstd450FaceForward:
455 dest->def =
456 nir_bcsel(nb, nir_flt(nb, nir_fdot(nb, src[2], src[1]),
457 NIR_IMM_FP(nb, 0.0)),
458 src[0], nir_fneg(nb, src[0]));
459 break;
460
461 case GLSLstd450Reflect:
462 /* I - 2 * dot(N, I) * N */
463 dest->def =
464 nir_a_minus_bc(nb, src[0],
465 src[1],
466 nir_fmul(nb, nir_fdot(nb, src[0], src[1]),
467 NIR_IMM_FP(nb, 2.0)));
468 break;
469
470 case GLSLstd450Refract: {
471 nir_ssa_def *I = src[0];
472 nir_ssa_def *N = src[1];
473 nir_ssa_def *eta = src[2];
474 nir_ssa_def *n_dot_i = nir_fdot(nb, N, I);
475 nir_ssa_def *one = NIR_IMM_FP(nb, 1.0);
476 nir_ssa_def *zero = NIR_IMM_FP(nb, 0.0);
477 /* According to the SPIR-V and GLSL specs, eta is always a float
478 * regardless of the type of the other operands. However in practice it
479 * seems that if you try to pass it a float then glslang will just
480 * promote it to a double and generate invalid SPIR-V. In order to
481 * support a hypothetical fixed version of glslang we’ll promote eta to
482 * double if the other operands are double also.
483 */
484 if (I->bit_size != eta->bit_size) {
485 nir_op conversion_op =
486 nir_type_conversion_op(nir_type_float | eta->bit_size,
487 nir_type_float | I->bit_size,
488 nir_rounding_mode_undef);
489 eta = nir_build_alu(nb, conversion_op, eta, NULL, NULL, NULL);
490 }
491 /* k = 1.0 - eta * eta * (1.0 - dot(N, I) * dot(N, I)) */
492 nir_ssa_def *k =
493 nir_a_minus_bc(nb, one, eta,
494 nir_fmul(nb, eta, nir_a_minus_bc(nb, one, n_dot_i, n_dot_i)));
495 nir_ssa_def *result =
496 nir_a_minus_bc(nb, nir_fmul(nb, eta, I),
497 nir_ffma(nb, eta, n_dot_i, nir_fsqrt(nb, k)),
498 N);
499 /* XXX: bcsel, or if statement? */
500 dest->def = nir_bcsel(nb, nir_flt(nb, k, zero), zero, result);
501 break;
502 }
503
504 case GLSLstd450Sinh:
505 /* 0.5 * (e^x - e^(-x)) */
506 dest->def =
507 nir_fmul_imm(nb, nir_fsub(nb, nir_fexp(nb, src[0]),
508 nir_fexp(nb, nir_fneg(nb, src[0]))),
509 0.5f);
510 break;
511
512 case GLSLstd450Cosh:
513 /* 0.5 * (e^x + e^(-x)) */
514 dest->def =
515 nir_fmul_imm(nb, nir_fadd(nb, nir_fexp(nb, src[0]),
516 nir_fexp(nb, nir_fneg(nb, src[0]))),
517 0.5f);
518 break;
519
520 case GLSLstd450Tanh: {
521 /* tanh(x) := (e^x - e^(-x)) / (e^x + e^(-x))
522 *
523 * We clamp x to [-10, +10] to avoid precision problems. When x > 10,
524 * e^x dominates the sum, e^(-x) is lost and tanh(x) is 1.0 for 32 bit
525 * floating point.
526 *
527 * For 16-bit precision this we clamp x to [-4.2, +4.2].
528 */
529 const uint32_t bit_size = src[0]->bit_size;
530 const double clamped_x = bit_size > 16 ? 10.0 : 4.2;
531 nir_ssa_def *x = nir_fclamp(nb, src[0],
532 nir_imm_floatN_t(nb, -clamped_x, bit_size),
533 nir_imm_floatN_t(nb, clamped_x, bit_size));
534
535 /* The clamping will filter out NaN values causing an incorrect result.
536 * The comparison is carefully structured to get NaN result for NaN and
537 * get -0 for -0.
538 *
539 * result = abs(s) > 0.0 ? ... : s;
540 */
541 const bool exact = nb->exact;
542
543 nb->exact = true;
544 nir_ssa_def *is_regular = nir_flt(nb,
545 nir_imm_floatN_t(nb, 0, bit_size),
546 nir_fabs(nb, src[0]));
547
548 /* The extra 1.0*s ensures that subnormal inputs are flushed to zero
549 * when that is selected by the shader.
550 */
551 nir_ssa_def *flushed = nir_fmul(nb,
552 src[0],
553 nir_imm_floatN_t(nb, 1.0, bit_size));
554 nb->exact = exact;
555
556 dest->def = nir_bcsel(nb,
557 is_regular,
558 nir_fdiv(nb, nir_fsub(nb, nir_fexp(nb, x),
559 nir_fexp(nb, nir_fneg(nb, x))),
560 nir_fadd(nb, nir_fexp(nb, x),
561 nir_fexp(nb, nir_fneg(nb, x)))),
562 flushed);
563 break;
564 }
565
566 case GLSLstd450Asinh:
567 dest->def = nir_fmul(nb, nir_fsign(nb, src[0]),
568 nir_flog(nb, nir_fadd(nb, nir_fabs(nb, src[0]),
569 nir_fsqrt(nb, nir_ffma_imm2(nb, src[0], src[0], 1.0f)))));
570 break;
571 case GLSLstd450Acosh:
572 dest->def = nir_flog(nb, nir_fadd(nb, src[0],
573 nir_fsqrt(nb, nir_ffma_imm2(nb, src[0], src[0], -1.0f))));
574 break;
575 case GLSLstd450Atanh: {
576 nir_ssa_def *one = nir_imm_floatN_t(nb, 1.0, src[0]->bit_size);
577 dest->def =
578 nir_fmul_imm(nb, nir_flog(nb, nir_fdiv(nb, nir_fadd(nb, src[0], one),
579 nir_fsub(nb, one, src[0]))),
580 0.5f);
581 break;
582 }
583
584 case GLSLstd450Asin:
585 dest->def = build_asin(nb, src[0], 0.086566724, -0.03102955, true);
586 break;
587
588 case GLSLstd450Acos:
589 dest->def =
590 nir_fsub(nb, nir_imm_floatN_t(nb, M_PI_2f, src[0]->bit_size),
591 build_asin(nb, src[0], 0.08132463, -0.02363318, false));
592 break;
593
594 case GLSLstd450Atan:
595 dest->def = nir_atan(nb, src[0]);
596 break;
597
598 case GLSLstd450Atan2:
599 dest->def = nir_atan2(nb, src[0], src[1]);
600 break;
601
602 case GLSLstd450Frexp: {
603 dest->def = nir_frexp_sig(nb, src[0]);
604
605 struct vtn_pointer *i_ptr = vtn_value(b, w[6], vtn_value_type_pointer)->pointer;
606 struct vtn_ssa_value *exp = vtn_create_ssa_value(b, i_ptr->type->type);
607 exp->def = nir_frexp_exp(nb, src[0]);
608 vtn_variable_store(b, exp, i_ptr, 0);
609 break;
610 }
611
612 case GLSLstd450FrexpStruct: {
613 vtn_assert(glsl_type_is_struct_or_ifc(dest_type));
614 dest->elems[0]->def = nir_frexp_sig(nb, src[0]);
615 dest->elems[1]->def = nir_frexp_exp(nb, src[0]);
616 break;
617 }
618
619 default: {
620 unsigned execution_mode =
621 b->shader->info.float_controls_execution_mode;
622 bool exact;
623 nir_op op = vtn_nir_alu_op_for_spirv_glsl_opcode(b, entrypoint, execution_mode, &exact);
624 /* don't override explicit decoration */
625 b->nb.exact |= exact;
626 dest->def = nir_build_alu(&b->nb, op, src[0], src[1], src[2], NULL);
627 break;
628 }
629 }
630 b->nb.exact = false;
631
632 if (mediump_16bit)
633 vtn_mediump_upconvert_value(b, dest);
634
635 vtn_push_ssa_value(b, w[2], dest);
636 }
637
638 static void
handle_glsl450_interpolation(struct vtn_builder * b,enum GLSLstd450 opcode,const uint32_t * w,unsigned count)639 handle_glsl450_interpolation(struct vtn_builder *b, enum GLSLstd450 opcode,
640 const uint32_t *w, unsigned count)
641 {
642 nir_intrinsic_op op;
643 switch (opcode) {
644 case GLSLstd450InterpolateAtCentroid:
645 op = nir_intrinsic_interp_deref_at_centroid;
646 break;
647 case GLSLstd450InterpolateAtSample:
648 op = nir_intrinsic_interp_deref_at_sample;
649 break;
650 case GLSLstd450InterpolateAtOffset:
651 op = nir_intrinsic_interp_deref_at_offset;
652 break;
653 default:
654 vtn_fail("Invalid opcode");
655 }
656
657 nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->nb.shader, op);
658
659 struct vtn_pointer *ptr =
660 vtn_value(b, w[5], vtn_value_type_pointer)->pointer;
661 nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr);
662
663 /* If the value we are interpolating has an index into a vector then
664 * interpolate the vector and index the result of that instead. This is
665 * necessary because the index will get generated as a series of nir_bcsel
666 * instructions so it would no longer be an input variable.
667 */
668 const bool vec_array_deref = deref->deref_type == nir_deref_type_array &&
669 glsl_type_is_vector(nir_deref_instr_parent(deref)->type);
670
671 nir_deref_instr *vec_deref = NULL;
672 if (vec_array_deref) {
673 vec_deref = deref;
674 deref = nir_deref_instr_parent(deref);
675 }
676 intrin->src[0] = nir_src_for_ssa(&deref->dest.ssa);
677
678 switch (opcode) {
679 case GLSLstd450InterpolateAtCentroid:
680 break;
681 case GLSLstd450InterpolateAtSample:
682 case GLSLstd450InterpolateAtOffset:
683 intrin->src[1] = nir_src_for_ssa(vtn_get_nir_ssa(b, w[6]));
684 break;
685 default:
686 vtn_fail("Invalid opcode");
687 }
688
689 intrin->num_components = glsl_get_vector_elements(deref->type);
690 nir_ssa_dest_init(&intrin->instr, &intrin->dest,
691 glsl_get_vector_elements(deref->type),
692 glsl_get_bit_size(deref->type), NULL);
693
694 nir_builder_instr_insert(&b->nb, &intrin->instr);
695
696 nir_ssa_def *def = &intrin->dest.ssa;
697 if (vec_array_deref)
698 def = nir_vector_extract(&b->nb, def, vec_deref->arr.index.ssa);
699
700 vtn_push_nir_ssa(b, w[2], def);
701 }
702
703 bool
vtn_handle_glsl450_instruction(struct vtn_builder * b,SpvOp ext_opcode,const uint32_t * w,unsigned count)704 vtn_handle_glsl450_instruction(struct vtn_builder *b, SpvOp ext_opcode,
705 const uint32_t *w, unsigned count)
706 {
707 switch ((enum GLSLstd450)ext_opcode) {
708 case GLSLstd450Determinant: {
709 vtn_push_nir_ssa(b, w[2], build_mat_det(b, vtn_ssa_value(b, w[5])));
710 break;
711 }
712
713 case GLSLstd450MatrixInverse: {
714 vtn_push_ssa_value(b, w[2], matrix_inverse(b, vtn_ssa_value(b, w[5])));
715 break;
716 }
717
718 case GLSLstd450InterpolateAtCentroid:
719 case GLSLstd450InterpolateAtSample:
720 case GLSLstd450InterpolateAtOffset:
721 handle_glsl450_interpolation(b, (enum GLSLstd450)ext_opcode, w, count);
722 break;
723
724 default:
725 handle_glsl450_alu(b, (enum GLSLstd450)ext_opcode, w, count);
726 }
727
728 return true;
729 }
730