1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28 #include "vtn_private.h"
29 #include "GLSL.std.450.h"
30
31 #define M_PIf ((float) M_PI)
32 #define M_PI_2f ((float) M_PI_2)
33 #define M_PI_4f ((float) M_PI_4)
34
35 static nir_ssa_def *
build_mat2_det(nir_builder * b,nir_ssa_def * col[2])36 build_mat2_det(nir_builder *b, nir_ssa_def *col[2])
37 {
38 unsigned swiz[4] = {1, 0, 0, 0};
39 nir_ssa_def *p = nir_fmul(b, col[0], nir_swizzle(b, col[1], swiz, 2, true));
40 return nir_fsub(b, nir_channel(b, p, 0), nir_channel(b, p, 1));
41 }
42
43 static nir_ssa_def *
build_mat3_det(nir_builder * b,nir_ssa_def * col[3])44 build_mat3_det(nir_builder *b, nir_ssa_def *col[3])
45 {
46 unsigned yzx[4] = {1, 2, 0, 0};
47 unsigned zxy[4] = {2, 0, 1, 0};
48
49 nir_ssa_def *prod0 =
50 nir_fmul(b, col[0],
51 nir_fmul(b, nir_swizzle(b, col[1], yzx, 3, true),
52 nir_swizzle(b, col[2], zxy, 3, true)));
53 nir_ssa_def *prod1 =
54 nir_fmul(b, col[0],
55 nir_fmul(b, nir_swizzle(b, col[1], zxy, 3, true),
56 nir_swizzle(b, col[2], yzx, 3, true)));
57
58 nir_ssa_def *diff = nir_fsub(b, prod0, prod1);
59
60 return nir_fadd(b, nir_channel(b, diff, 0),
61 nir_fadd(b, nir_channel(b, diff, 1),
62 nir_channel(b, diff, 2)));
63 }
64
65 static nir_ssa_def *
build_mat4_det(nir_builder * b,nir_ssa_def ** col)66 build_mat4_det(nir_builder *b, nir_ssa_def **col)
67 {
68 nir_ssa_def *subdet[4];
69 for (unsigned i = 0; i < 4; i++) {
70 unsigned swiz[3];
71 for (unsigned j = 0; j < 3; j++)
72 swiz[j] = j + (j >= i);
73
74 nir_ssa_def *subcol[3];
75 subcol[0] = nir_swizzle(b, col[1], swiz, 3, true);
76 subcol[1] = nir_swizzle(b, col[2], swiz, 3, true);
77 subcol[2] = nir_swizzle(b, col[3], swiz, 3, true);
78
79 subdet[i] = build_mat3_det(b, subcol);
80 }
81
82 nir_ssa_def *prod = nir_fmul(b, col[0], nir_vec(b, subdet, 4));
83
84 return nir_fadd(b, nir_fsub(b, nir_channel(b, prod, 0),
85 nir_channel(b, prod, 1)),
86 nir_fsub(b, nir_channel(b, prod, 2),
87 nir_channel(b, prod, 3)));
88 }
89
90 static nir_ssa_def *
build_mat_det(struct vtn_builder * b,struct vtn_ssa_value * src)91 build_mat_det(struct vtn_builder *b, struct vtn_ssa_value *src)
92 {
93 unsigned size = glsl_get_vector_elements(src->type);
94
95 nir_ssa_def *cols[4];
96 for (unsigned i = 0; i < size; i++)
97 cols[i] = src->elems[i]->def;
98
99 switch(size) {
100 case 2: return build_mat2_det(&b->nb, cols);
101 case 3: return build_mat3_det(&b->nb, cols);
102 case 4: return build_mat4_det(&b->nb, cols);
103 default:
104 vtn_fail("Invalid matrix size");
105 }
106 }
107
108 /* Computes the determinate of the submatrix given by taking src and
109 * removing the specified row and column.
110 */
111 static nir_ssa_def *
build_mat_subdet(struct nir_builder * b,struct vtn_ssa_value * src,unsigned size,unsigned row,unsigned col)112 build_mat_subdet(struct nir_builder *b, struct vtn_ssa_value *src,
113 unsigned size, unsigned row, unsigned col)
114 {
115 assert(row < size && col < size);
116 if (size == 2) {
117 return nir_channel(b, src->elems[1 - col]->def, 1 - row);
118 } else {
119 /* Swizzle to get all but the specified row */
120 unsigned swiz[3];
121 for (unsigned j = 0; j < 3; j++)
122 swiz[j] = j + (j >= row);
123
124 /* Grab all but the specified column */
125 nir_ssa_def *subcol[3];
126 for (unsigned j = 0; j < size; j++) {
127 if (j != col) {
128 subcol[j - (j > col)] = nir_swizzle(b, src->elems[j]->def,
129 swiz, size - 1, true);
130 }
131 }
132
133 if (size == 3) {
134 return build_mat2_det(b, subcol);
135 } else {
136 assert(size == 4);
137 return build_mat3_det(b, subcol);
138 }
139 }
140 }
141
142 static struct vtn_ssa_value *
matrix_inverse(struct vtn_builder * b,struct vtn_ssa_value * src)143 matrix_inverse(struct vtn_builder *b, struct vtn_ssa_value *src)
144 {
145 nir_ssa_def *adj_col[4];
146 unsigned size = glsl_get_vector_elements(src->type);
147
148 /* Build up an adjugate matrix */
149 for (unsigned c = 0; c < size; c++) {
150 nir_ssa_def *elem[4];
151 for (unsigned r = 0; r < size; r++) {
152 elem[r] = build_mat_subdet(&b->nb, src, size, c, r);
153
154 if ((r + c) % 2)
155 elem[r] = nir_fneg(&b->nb, elem[r]);
156 }
157
158 adj_col[c] = nir_vec(&b->nb, elem, size);
159 }
160
161 nir_ssa_def *det_inv = nir_frcp(&b->nb, build_mat_det(b, src));
162
163 struct vtn_ssa_value *val = vtn_create_ssa_value(b, src->type);
164 for (unsigned i = 0; i < size; i++)
165 val->elems[i]->def = nir_fmul(&b->nb, adj_col[i], det_inv);
166
167 return val;
168 }
169
170 static nir_ssa_def*
build_length(nir_builder * b,nir_ssa_def * vec)171 build_length(nir_builder *b, nir_ssa_def *vec)
172 {
173 switch (vec->num_components) {
174 case 1: return nir_fsqrt(b, nir_fmul(b, vec, vec));
175 case 2: return nir_fsqrt(b, nir_fdot2(b, vec, vec));
176 case 3: return nir_fsqrt(b, nir_fdot3(b, vec, vec));
177 case 4: return nir_fsqrt(b, nir_fdot4(b, vec, vec));
178 default:
179 unreachable("Invalid number of components");
180 }
181 }
182
183 static inline nir_ssa_def *
build_fclamp(nir_builder * b,nir_ssa_def * x,nir_ssa_def * min_val,nir_ssa_def * max_val)184 build_fclamp(nir_builder *b,
185 nir_ssa_def *x, nir_ssa_def *min_val, nir_ssa_def *max_val)
186 {
187 return nir_fmin(b, nir_fmax(b, x, min_val), max_val);
188 }
189
190 /**
191 * Return e^x.
192 */
193 static nir_ssa_def *
build_exp(nir_builder * b,nir_ssa_def * x)194 build_exp(nir_builder *b, nir_ssa_def *x)
195 {
196 return nir_fexp2(b, nir_fmul(b, x, nir_imm_float(b, M_LOG2E)));
197 }
198
199 /**
200 * Return ln(x) - the natural logarithm of x.
201 */
202 static nir_ssa_def *
build_log(nir_builder * b,nir_ssa_def * x)203 build_log(nir_builder *b, nir_ssa_def *x)
204 {
205 return nir_fmul(b, nir_flog2(b, x), nir_imm_float(b, 1.0 / M_LOG2E));
206 }
207
208 /**
209 * Approximate asin(x) by the formula:
210 * asin~(x) = sign(x) * (pi/2 - sqrt(1 - |x|) * (pi/2 + |x|(pi/4 - 1 + |x|(p0 + |x|p1))))
211 *
212 * which is correct to first order at x=0 and x=±1 regardless of the p
213 * coefficients but can be made second-order correct at both ends by selecting
214 * the fit coefficients appropriately. Different p coefficients can be used
215 * in the asin and acos implementation to minimize some relative error metric
216 * in each case.
217 */
218 static nir_ssa_def *
build_asin(nir_builder * b,nir_ssa_def * x,float p0,float p1)219 build_asin(nir_builder *b, nir_ssa_def *x, float p0, float p1)
220 {
221 nir_ssa_def *abs_x = nir_fabs(b, x);
222 return nir_fmul(b, nir_fsign(b, x),
223 nir_fsub(b, nir_imm_float(b, M_PI_2f),
224 nir_fmul(b, nir_fsqrt(b, nir_fsub(b, nir_imm_float(b, 1.0f), abs_x)),
225 nir_fadd(b, nir_imm_float(b, M_PI_2f),
226 nir_fmul(b, abs_x,
227 nir_fadd(b, nir_imm_float(b, M_PI_4f - 1.0f),
228 nir_fmul(b, abs_x,
229 nir_fadd(b, nir_imm_float(b, p0),
230 nir_fmul(b, abs_x,
231 nir_imm_float(b, p1))))))))));
232 }
233
234 /**
235 * Compute xs[0] + xs[1] + xs[2] + ... using fadd.
236 */
237 static nir_ssa_def *
build_fsum(nir_builder * b,nir_ssa_def ** xs,int terms)238 build_fsum(nir_builder *b, nir_ssa_def **xs, int terms)
239 {
240 nir_ssa_def *accum = xs[0];
241
242 for (int i = 1; i < terms; i++)
243 accum = nir_fadd(b, accum, xs[i]);
244
245 return accum;
246 }
247
248 static nir_ssa_def *
build_atan(nir_builder * b,nir_ssa_def * y_over_x)249 build_atan(nir_builder *b, nir_ssa_def *y_over_x)
250 {
251 nir_ssa_def *abs_y_over_x = nir_fabs(b, y_over_x);
252 nir_ssa_def *one = nir_imm_float(b, 1.0f);
253
254 /*
255 * range-reduction, first step:
256 *
257 * / y_over_x if |y_over_x| <= 1.0;
258 * x = <
259 * \ 1.0 / y_over_x otherwise
260 */
261 nir_ssa_def *x = nir_fdiv(b, nir_fmin(b, abs_y_over_x, one),
262 nir_fmax(b, abs_y_over_x, one));
263
264 /*
265 * approximate atan by evaluating polynomial:
266 *
267 * x * 0.9999793128310355 - x^3 * 0.3326756418091246 +
268 * x^5 * 0.1938924977115610 - x^7 * 0.1173503194786851 +
269 * x^9 * 0.0536813784310406 - x^11 * 0.0121323213173444
270 */
271 nir_ssa_def *x_2 = nir_fmul(b, x, x);
272 nir_ssa_def *x_3 = nir_fmul(b, x_2, x);
273 nir_ssa_def *x_5 = nir_fmul(b, x_3, x_2);
274 nir_ssa_def *x_7 = nir_fmul(b, x_5, x_2);
275 nir_ssa_def *x_9 = nir_fmul(b, x_7, x_2);
276 nir_ssa_def *x_11 = nir_fmul(b, x_9, x_2);
277
278 nir_ssa_def *polynomial_terms[] = {
279 nir_fmul(b, x, nir_imm_float(b, 0.9999793128310355f)),
280 nir_fmul(b, x_3, nir_imm_float(b, -0.3326756418091246f)),
281 nir_fmul(b, x_5, nir_imm_float(b, 0.1938924977115610f)),
282 nir_fmul(b, x_7, nir_imm_float(b, -0.1173503194786851f)),
283 nir_fmul(b, x_9, nir_imm_float(b, 0.0536813784310406f)),
284 nir_fmul(b, x_11, nir_imm_float(b, -0.0121323213173444f)),
285 };
286
287 nir_ssa_def *tmp =
288 build_fsum(b, polynomial_terms, ARRAY_SIZE(polynomial_terms));
289
290 /* range-reduction fixup */
291 tmp = nir_fadd(b, tmp,
292 nir_fmul(b,
293 nir_b2f(b, nir_flt(b, one, abs_y_over_x)),
294 nir_fadd(b, nir_fmul(b, tmp,
295 nir_imm_float(b, -2.0f)),
296 nir_imm_float(b, M_PI_2f))));
297
298 /* sign fixup */
299 return nir_fmul(b, tmp, nir_fsign(b, y_over_x));
300 }
301
302 static nir_ssa_def *
build_atan2(nir_builder * b,nir_ssa_def * y,nir_ssa_def * x)303 build_atan2(nir_builder *b, nir_ssa_def *y, nir_ssa_def *x)
304 {
305 nir_ssa_def *zero = nir_imm_float(b, 0);
306 nir_ssa_def *one = nir_imm_float(b, 1);
307
308 /* If we're on the left half-plane rotate the coordinates π/2 clock-wise
309 * for the y=0 discontinuity to end up aligned with the vertical
310 * discontinuity of atan(s/t) along t=0. This also makes sure that we
311 * don't attempt to divide by zero along the vertical line, which may give
312 * unspecified results on non-GLSL 4.1-capable hardware.
313 */
314 nir_ssa_def *flip = nir_fge(b, zero, x);
315 nir_ssa_def *s = nir_bcsel(b, flip, nir_fabs(b, x), y);
316 nir_ssa_def *t = nir_bcsel(b, flip, y, nir_fabs(b, x));
317
318 /* If the magnitude of the denominator exceeds some huge value, scale down
319 * the arguments in order to prevent the reciprocal operation from flushing
320 * its result to zero, which would cause precision problems, and for s
321 * infinite would cause us to return a NaN instead of the correct finite
322 * value.
323 *
324 * If fmin and fmax are respectively the smallest and largest positive
325 * normalized floating point values representable by the implementation,
326 * the constants below should be in agreement with:
327 *
328 * huge <= 1 / fmin
329 * scale <= 1 / fmin / fmax (for |t| >= huge)
330 *
331 * In addition scale should be a negative power of two in order to avoid
332 * loss of precision. The values chosen below should work for most usual
333 * floating point representations with at least the dynamic range of ATI's
334 * 24-bit representation.
335 */
336 nir_ssa_def *huge = nir_imm_float(b, 1e18f);
337 nir_ssa_def *scale = nir_bcsel(b, nir_fge(b, nir_fabs(b, t), huge),
338 nir_imm_float(b, 0.25), one);
339 nir_ssa_def *rcp_scaled_t = nir_frcp(b, nir_fmul(b, t, scale));
340 nir_ssa_def *s_over_t = nir_fmul(b, nir_fmul(b, s, scale), rcp_scaled_t);
341
342 /* For |x| = |y| assume tan = 1 even if infinite (i.e. pretend momentarily
343 * that ∞/∞ = 1) in order to comply with the rather artificial rules
344 * inherited from IEEE 754-2008, namely:
345 *
346 * "atan2(±∞, −∞) is ±3π/4
347 * atan2(±∞, +∞) is ±π/4"
348 *
349 * Note that this is inconsistent with the rules for the neighborhood of
350 * zero that are based on iterated limits:
351 *
352 * "atan2(±0, −0) is ±π
353 * atan2(±0, +0) is ±0"
354 *
355 * but GLSL specifically allows implementations to deviate from IEEE rules
356 * at (0,0), so we take that license (i.e. pretend that 0/0 = 1 here as
357 * well).
358 */
359 nir_ssa_def *tan = nir_bcsel(b, nir_feq(b, nir_fabs(b, x), nir_fabs(b, y)),
360 one, nir_fabs(b, s_over_t));
361
362 /* Calculate the arctangent and fix up the result if we had flipped the
363 * coordinate system.
364 */
365 nir_ssa_def *arc = nir_fadd(b, nir_fmul(b, nir_b2f(b, flip),
366 nir_imm_float(b, M_PI_2f)),
367 build_atan(b, tan));
368
369 /* Rather convoluted calculation of the sign of the result. When x < 0 we
370 * cannot use fsign because we need to be able to distinguish between
371 * negative and positive zero. We don't use bitwise arithmetic tricks for
372 * consistency with the GLSL front-end. When x >= 0 rcp_scaled_t will
373 * always be non-negative so this won't be able to distinguish between
374 * negative and positive zero, but we don't care because atan2 is
375 * continuous along the whole positive y = 0 half-line, so it won't affect
376 * the result significantly.
377 */
378 return nir_bcsel(b, nir_flt(b, nir_fmin(b, y, rcp_scaled_t), zero),
379 nir_fneg(b, arc), arc);
380 }
381
382 static nir_ssa_def *
build_frexp(nir_builder * b,nir_ssa_def * x,nir_ssa_def ** exponent)383 build_frexp(nir_builder *b, nir_ssa_def *x, nir_ssa_def **exponent)
384 {
385 nir_ssa_def *abs_x = nir_fabs(b, x);
386 nir_ssa_def *zero = nir_imm_float(b, 0.0f);
387
388 /* Single-precision floating-point values are stored as
389 * 1 sign bit;
390 * 8 exponent bits;
391 * 23 mantissa bits.
392 *
393 * An exponent shift of 23 will shift the mantissa out, leaving only the
394 * exponent and sign bit (which itself may be zero, if the absolute value
395 * was taken before the bitcast and shift.
396 */
397 nir_ssa_def *exponent_shift = nir_imm_int(b, 23);
398 nir_ssa_def *exponent_bias = nir_imm_int(b, -126);
399
400 nir_ssa_def *sign_mantissa_mask = nir_imm_int(b, 0x807fffffu);
401
402 /* Exponent of floating-point values in the range [0.5, 1.0). */
403 nir_ssa_def *exponent_value = nir_imm_int(b, 0x3f000000u);
404
405 nir_ssa_def *is_not_zero = nir_fne(b, abs_x, zero);
406
407 *exponent =
408 nir_iadd(b, nir_ushr(b, abs_x, exponent_shift),
409 nir_bcsel(b, is_not_zero, exponent_bias, zero));
410
411 return nir_ior(b, nir_iand(b, x, sign_mantissa_mask),
412 nir_bcsel(b, is_not_zero, exponent_value, zero));
413 }
414
415 static nir_op
vtn_nir_alu_op_for_spirv_glsl_opcode(struct vtn_builder * b,enum GLSLstd450 opcode)416 vtn_nir_alu_op_for_spirv_glsl_opcode(struct vtn_builder *b,
417 enum GLSLstd450 opcode)
418 {
419 switch (opcode) {
420 case GLSLstd450Round: return nir_op_fround_even;
421 case GLSLstd450RoundEven: return nir_op_fround_even;
422 case GLSLstd450Trunc: return nir_op_ftrunc;
423 case GLSLstd450FAbs: return nir_op_fabs;
424 case GLSLstd450SAbs: return nir_op_iabs;
425 case GLSLstd450FSign: return nir_op_fsign;
426 case GLSLstd450SSign: return nir_op_isign;
427 case GLSLstd450Floor: return nir_op_ffloor;
428 case GLSLstd450Ceil: return nir_op_fceil;
429 case GLSLstd450Fract: return nir_op_ffract;
430 case GLSLstd450Sin: return nir_op_fsin;
431 case GLSLstd450Cos: return nir_op_fcos;
432 case GLSLstd450Pow: return nir_op_fpow;
433 case GLSLstd450Exp2: return nir_op_fexp2;
434 case GLSLstd450Log2: return nir_op_flog2;
435 case GLSLstd450Sqrt: return nir_op_fsqrt;
436 case GLSLstd450InverseSqrt: return nir_op_frsq;
437 case GLSLstd450NMin: return nir_op_fmin;
438 case GLSLstd450FMin: return nir_op_fmin;
439 case GLSLstd450UMin: return nir_op_umin;
440 case GLSLstd450SMin: return nir_op_imin;
441 case GLSLstd450NMax: return nir_op_fmax;
442 case GLSLstd450FMax: return nir_op_fmax;
443 case GLSLstd450UMax: return nir_op_umax;
444 case GLSLstd450SMax: return nir_op_imax;
445 case GLSLstd450FMix: return nir_op_flrp;
446 case GLSLstd450Fma: return nir_op_ffma;
447 case GLSLstd450Ldexp: return nir_op_ldexp;
448 case GLSLstd450FindILsb: return nir_op_find_lsb;
449 case GLSLstd450FindSMsb: return nir_op_ifind_msb;
450 case GLSLstd450FindUMsb: return nir_op_ufind_msb;
451
452 /* Packing/Unpacking functions */
453 case GLSLstd450PackSnorm4x8: return nir_op_pack_snorm_4x8;
454 case GLSLstd450PackUnorm4x8: return nir_op_pack_unorm_4x8;
455 case GLSLstd450PackSnorm2x16: return nir_op_pack_snorm_2x16;
456 case GLSLstd450PackUnorm2x16: return nir_op_pack_unorm_2x16;
457 case GLSLstd450PackHalf2x16: return nir_op_pack_half_2x16;
458 case GLSLstd450PackDouble2x32: return nir_op_pack_64_2x32;
459 case GLSLstd450UnpackSnorm4x8: return nir_op_unpack_snorm_4x8;
460 case GLSLstd450UnpackUnorm4x8: return nir_op_unpack_unorm_4x8;
461 case GLSLstd450UnpackSnorm2x16: return nir_op_unpack_snorm_2x16;
462 case GLSLstd450UnpackUnorm2x16: return nir_op_unpack_unorm_2x16;
463 case GLSLstd450UnpackHalf2x16: return nir_op_unpack_half_2x16;
464 case GLSLstd450UnpackDouble2x32: return nir_op_unpack_64_2x32;
465
466 default:
467 vtn_fail("No NIR equivalent");
468 }
469 }
470
471 #define NIR_IMM_FP(n, v) (src[0]->bit_size == 64 ? nir_imm_double(n, v) : nir_imm_float(n, v))
472
473 static void
handle_glsl450_alu(struct vtn_builder * b,enum GLSLstd450 entrypoint,const uint32_t * w,unsigned count)474 handle_glsl450_alu(struct vtn_builder *b, enum GLSLstd450 entrypoint,
475 const uint32_t *w, unsigned count)
476 {
477 struct nir_builder *nb = &b->nb;
478 const struct glsl_type *dest_type =
479 vtn_value(b, w[1], vtn_value_type_type)->type->type;
480
481 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
482 val->ssa = vtn_create_ssa_value(b, dest_type);
483
484 /* Collect the various SSA sources */
485 unsigned num_inputs = count - 5;
486 nir_ssa_def *src[3] = { NULL, };
487 for (unsigned i = 0; i < num_inputs; i++) {
488 /* These are handled specially below */
489 if (vtn_untyped_value(b, w[i + 5])->value_type == vtn_value_type_pointer)
490 continue;
491
492 src[i] = vtn_ssa_value(b, w[i + 5])->def;
493 }
494
495 switch (entrypoint) {
496 case GLSLstd450Radians:
497 val->ssa->def = nir_fmul(nb, src[0], nir_imm_float(nb, 0.01745329251));
498 return;
499 case GLSLstd450Degrees:
500 val->ssa->def = nir_fmul(nb, src[0], nir_imm_float(nb, 57.2957795131));
501 return;
502 case GLSLstd450Tan:
503 val->ssa->def = nir_fdiv(nb, nir_fsin(nb, src[0]),
504 nir_fcos(nb, src[0]));
505 return;
506
507 case GLSLstd450Modf: {
508 nir_ssa_def *sign = nir_fsign(nb, src[0]);
509 nir_ssa_def *abs = nir_fabs(nb, src[0]);
510 val->ssa->def = nir_fmul(nb, sign, nir_ffract(nb, abs));
511 nir_store_deref_var(nb, vtn_nir_deref(b, w[6]),
512 nir_fmul(nb, sign, nir_ffloor(nb, abs)), 0xf);
513 return;
514 }
515
516 case GLSLstd450ModfStruct: {
517 nir_ssa_def *sign = nir_fsign(nb, src[0]);
518 nir_ssa_def *abs = nir_fabs(nb, src[0]);
519 vtn_assert(glsl_type_is_struct(val->ssa->type));
520 val->ssa->elems[0]->def = nir_fmul(nb, sign, nir_ffract(nb, abs));
521 val->ssa->elems[1]->def = nir_fmul(nb, sign, nir_ffloor(nb, abs));
522 return;
523 }
524
525 case GLSLstd450Step:
526 val->ssa->def = nir_sge(nb, src[1], src[0]);
527 return;
528
529 case GLSLstd450Length:
530 val->ssa->def = build_length(nb, src[0]);
531 return;
532 case GLSLstd450Distance:
533 val->ssa->def = build_length(nb, nir_fsub(nb, src[0], src[1]));
534 return;
535 case GLSLstd450Normalize:
536 val->ssa->def = nir_fdiv(nb, src[0], build_length(nb, src[0]));
537 return;
538
539 case GLSLstd450Exp:
540 val->ssa->def = build_exp(nb, src[0]);
541 return;
542
543 case GLSLstd450Log:
544 val->ssa->def = build_log(nb, src[0]);
545 return;
546
547 case GLSLstd450FClamp:
548 case GLSLstd450NClamp:
549 val->ssa->def = build_fclamp(nb, src[0], src[1], src[2]);
550 return;
551 case GLSLstd450UClamp:
552 val->ssa->def = nir_umin(nb, nir_umax(nb, src[0], src[1]), src[2]);
553 return;
554 case GLSLstd450SClamp:
555 val->ssa->def = nir_imin(nb, nir_imax(nb, src[0], src[1]), src[2]);
556 return;
557
558 case GLSLstd450Cross: {
559 unsigned yzx[4] = { 1, 2, 0, 0 };
560 unsigned zxy[4] = { 2, 0, 1, 0 };
561 val->ssa->def =
562 nir_fsub(nb, nir_fmul(nb, nir_swizzle(nb, src[0], yzx, 3, true),
563 nir_swizzle(nb, src[1], zxy, 3, true)),
564 nir_fmul(nb, nir_swizzle(nb, src[0], zxy, 3, true),
565 nir_swizzle(nb, src[1], yzx, 3, true)));
566 return;
567 }
568
569 case GLSLstd450SmoothStep: {
570 /* t = clamp((x - edge0) / (edge1 - edge0), 0, 1) */
571 nir_ssa_def *t =
572 build_fclamp(nb, nir_fdiv(nb, nir_fsub(nb, src[2], src[0]),
573 nir_fsub(nb, src[1], src[0])),
574 NIR_IMM_FP(nb, 0.0), NIR_IMM_FP(nb, 1.0));
575 /* result = t * t * (3 - 2 * t) */
576 val->ssa->def =
577 nir_fmul(nb, t, nir_fmul(nb, t,
578 nir_fsub(nb, NIR_IMM_FP(nb, 3.0),
579 nir_fmul(nb, NIR_IMM_FP(nb, 2.0), t))));
580 return;
581 }
582
583 case GLSLstd450FaceForward:
584 val->ssa->def =
585 nir_bcsel(nb, nir_flt(nb, nir_fdot(nb, src[2], src[1]),
586 nir_imm_float(nb, 0.0)),
587 src[0], nir_fneg(nb, src[0]));
588 return;
589
590 case GLSLstd450Reflect:
591 /* I - 2 * dot(N, I) * N */
592 val->ssa->def =
593 nir_fsub(nb, src[0], nir_fmul(nb, nir_imm_float(nb, 2.0),
594 nir_fmul(nb, nir_fdot(nb, src[0], src[1]),
595 src[1])));
596 return;
597
598 case GLSLstd450Refract: {
599 nir_ssa_def *I = src[0];
600 nir_ssa_def *N = src[1];
601 nir_ssa_def *eta = src[2];
602 nir_ssa_def *n_dot_i = nir_fdot(nb, N, I);
603 nir_ssa_def *one = nir_imm_float(nb, 1.0);
604 nir_ssa_def *zero = nir_imm_float(nb, 0.0);
605 /* k = 1.0 - eta * eta * (1.0 - dot(N, I) * dot(N, I)) */
606 nir_ssa_def *k =
607 nir_fsub(nb, one, nir_fmul(nb, eta, nir_fmul(nb, eta,
608 nir_fsub(nb, one, nir_fmul(nb, n_dot_i, n_dot_i)))));
609 nir_ssa_def *result =
610 nir_fsub(nb, nir_fmul(nb, eta, I),
611 nir_fmul(nb, nir_fadd(nb, nir_fmul(nb, eta, n_dot_i),
612 nir_fsqrt(nb, k)), N));
613 /* XXX: bcsel, or if statement? */
614 val->ssa->def = nir_bcsel(nb, nir_flt(nb, k, zero), zero, result);
615 return;
616 }
617
618 case GLSLstd450Sinh:
619 /* 0.5 * (e^x - e^(-x)) */
620 val->ssa->def =
621 nir_fmul(nb, nir_imm_float(nb, 0.5f),
622 nir_fsub(nb, build_exp(nb, src[0]),
623 build_exp(nb, nir_fneg(nb, src[0]))));
624 return;
625
626 case GLSLstd450Cosh:
627 /* 0.5 * (e^x + e^(-x)) */
628 val->ssa->def =
629 nir_fmul(nb, nir_imm_float(nb, 0.5f),
630 nir_fadd(nb, build_exp(nb, src[0]),
631 build_exp(nb, nir_fneg(nb, src[0]))));
632 return;
633
634 case GLSLstd450Tanh: {
635 /* tanh(x) := (0.5 * (e^x - e^(-x))) / (0.5 * (e^x + e^(-x)))
636 *
637 * With a little algebra this reduces to (e^2x - 1) / (e^2x + 1)
638 *
639 * We clamp x to (-inf, +10] to avoid precision problems. When x > 10,
640 * e^2x is so much larger than 1.0 that 1.0 gets flushed to zero in the
641 * computation e^2x +/- 1 so it can be ignored.
642 */
643 nir_ssa_def *x = nir_fmin(nb, src[0], nir_imm_float(nb, 10));
644 nir_ssa_def *exp2x = build_exp(nb, nir_fmul(nb, x, nir_imm_float(nb, 2)));
645 val->ssa->def = nir_fdiv(nb, nir_fsub(nb, exp2x, nir_imm_float(nb, 1)),
646 nir_fadd(nb, exp2x, nir_imm_float(nb, 1)));
647 return;
648 }
649
650 case GLSLstd450Asinh:
651 val->ssa->def = nir_fmul(nb, nir_fsign(nb, src[0]),
652 build_log(nb, nir_fadd(nb, nir_fabs(nb, src[0]),
653 nir_fsqrt(nb, nir_fadd(nb, nir_fmul(nb, src[0], src[0]),
654 nir_imm_float(nb, 1.0f))))));
655 return;
656 case GLSLstd450Acosh:
657 val->ssa->def = build_log(nb, nir_fadd(nb, src[0],
658 nir_fsqrt(nb, nir_fsub(nb, nir_fmul(nb, src[0], src[0]),
659 nir_imm_float(nb, 1.0f)))));
660 return;
661 case GLSLstd450Atanh: {
662 nir_ssa_def *one = nir_imm_float(nb, 1.0);
663 val->ssa->def = nir_fmul(nb, nir_imm_float(nb, 0.5f),
664 build_log(nb, nir_fdiv(nb, nir_fadd(nb, one, src[0]),
665 nir_fsub(nb, one, src[0]))));
666 return;
667 }
668
669 case GLSLstd450Asin:
670 val->ssa->def = build_asin(nb, src[0], 0.086566724, -0.03102955);
671 return;
672
673 case GLSLstd450Acos:
674 val->ssa->def = nir_fsub(nb, nir_imm_float(nb, M_PI_2f),
675 build_asin(nb, src[0], 0.08132463, -0.02363318));
676 return;
677
678 case GLSLstd450Atan:
679 val->ssa->def = build_atan(nb, src[0]);
680 return;
681
682 case GLSLstd450Atan2:
683 val->ssa->def = build_atan2(nb, src[0], src[1]);
684 return;
685
686 case GLSLstd450Frexp: {
687 nir_ssa_def *exponent;
688 val->ssa->def = build_frexp(nb, src[0], &exponent);
689 nir_store_deref_var(nb, vtn_nir_deref(b, w[6]), exponent, 0xf);
690 return;
691 }
692
693 case GLSLstd450FrexpStruct: {
694 vtn_assert(glsl_type_is_struct(val->ssa->type));
695 val->ssa->elems[0]->def = build_frexp(nb, src[0],
696 &val->ssa->elems[1]->def);
697 return;
698 }
699
700 default:
701 val->ssa->def =
702 nir_build_alu(&b->nb,
703 vtn_nir_alu_op_for_spirv_glsl_opcode(b, entrypoint),
704 src[0], src[1], src[2], NULL);
705 return;
706 }
707 }
708
709 static void
handle_glsl450_interpolation(struct vtn_builder * b,enum GLSLstd450 opcode,const uint32_t * w,unsigned count)710 handle_glsl450_interpolation(struct vtn_builder *b, enum GLSLstd450 opcode,
711 const uint32_t *w, unsigned count)
712 {
713 const struct glsl_type *dest_type =
714 vtn_value(b, w[1], vtn_value_type_type)->type->type;
715
716 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
717 val->ssa = vtn_create_ssa_value(b, dest_type);
718
719 nir_intrinsic_op op;
720 switch (opcode) {
721 case GLSLstd450InterpolateAtCentroid:
722 op = nir_intrinsic_interp_var_at_centroid;
723 break;
724 case GLSLstd450InterpolateAtSample:
725 op = nir_intrinsic_interp_var_at_sample;
726 break;
727 case GLSLstd450InterpolateAtOffset:
728 op = nir_intrinsic_interp_var_at_offset;
729 break;
730 default:
731 vtn_fail("Invalid opcode");
732 }
733
734 nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->nb.shader, op);
735
736 nir_deref_var *deref = vtn_nir_deref(b, w[5]);
737 intrin->variables[0] = nir_deref_var_clone(deref, intrin);
738
739 switch (opcode) {
740 case GLSLstd450InterpolateAtCentroid:
741 break;
742 case GLSLstd450InterpolateAtSample:
743 case GLSLstd450InterpolateAtOffset:
744 intrin->src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[6])->def);
745 break;
746 default:
747 vtn_fail("Invalid opcode");
748 }
749
750 intrin->num_components = glsl_get_vector_elements(dest_type);
751 nir_ssa_dest_init(&intrin->instr, &intrin->dest,
752 glsl_get_vector_elements(dest_type),
753 glsl_get_bit_size(dest_type), NULL);
754 val->ssa->def = &intrin->dest.ssa;
755
756 nir_builder_instr_insert(&b->nb, &intrin->instr);
757 }
758
759 bool
vtn_handle_glsl450_instruction(struct vtn_builder * b,uint32_t ext_opcode,const uint32_t * w,unsigned count)760 vtn_handle_glsl450_instruction(struct vtn_builder *b, uint32_t ext_opcode,
761 const uint32_t *w, unsigned count)
762 {
763 switch ((enum GLSLstd450)ext_opcode) {
764 case GLSLstd450Determinant: {
765 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
766 val->ssa = rzalloc(b, struct vtn_ssa_value);
767 val->ssa->type = vtn_value(b, w[1], vtn_value_type_type)->type->type;
768 val->ssa->def = build_mat_det(b, vtn_ssa_value(b, w[5]));
769 break;
770 }
771
772 case GLSLstd450MatrixInverse: {
773 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
774 val->ssa = matrix_inverse(b, vtn_ssa_value(b, w[5]));
775 break;
776 }
777
778 case GLSLstd450InterpolateAtCentroid:
779 case GLSLstd450InterpolateAtSample:
780 case GLSLstd450InterpolateAtOffset:
781 handle_glsl450_interpolation(b, ext_opcode, w, count);
782 break;
783
784 default:
785 handle_glsl450_alu(b, (enum GLSLstd450)ext_opcode, w, count);
786 }
787
788 return true;
789 }
790