1 /*
2 * Copyright © 2018 Red Hat Inc.
3 * Copyright © 2015 Intel Corporation
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include <math.h>
26
27 #include "nir.h"
28 #include "nir_builtin_builder.h"
29
30 nir_ssa_def*
nir_cross3(nir_builder * b,nir_ssa_def * x,nir_ssa_def * y)31 nir_cross3(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
32 {
33 unsigned yzx[3] = { 1, 2, 0 };
34 unsigned zxy[3] = { 2, 0, 1 };
35
36 return nir_ffma(b, nir_swizzle(b, x, yzx, 3),
37 nir_swizzle(b, y, zxy, 3),
38 nir_fneg(b, nir_fmul(b, nir_swizzle(b, x, zxy, 3),
39 nir_swizzle(b, y, yzx, 3))));
40 }
41
42 nir_ssa_def*
nir_cross4(nir_builder * b,nir_ssa_def * x,nir_ssa_def * y)43 nir_cross4(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
44 {
45 nir_ssa_def *cross = nir_cross3(b, x, y);
46
47 return nir_vec4(b,
48 nir_channel(b, cross, 0),
49 nir_channel(b, cross, 1),
50 nir_channel(b, cross, 2),
51 nir_imm_intN_t(b, 0, cross->bit_size));
52 }
53
54 nir_ssa_def*
nir_fast_length(nir_builder * b,nir_ssa_def * vec)55 nir_fast_length(nir_builder *b, nir_ssa_def *vec)
56 {
57 return nir_fsqrt(b, nir_fdot(b, vec, vec));
58 }
59
60 nir_ssa_def*
nir_nextafter(nir_builder * b,nir_ssa_def * x,nir_ssa_def * y)61 nir_nextafter(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
62 {
63 nir_ssa_def *zero = nir_imm_intN_t(b, 0, x->bit_size);
64 nir_ssa_def *one = nir_imm_intN_t(b, 1, x->bit_size);
65
66 nir_ssa_def *condeq = nir_feq(b, x, y);
67 nir_ssa_def *conddir = nir_flt(b, x, y);
68 nir_ssa_def *condzero = nir_feq(b, x, zero);
69
70 uint64_t sign_mask = 1ull << (x->bit_size - 1);
71 uint64_t min_abs = 1;
72
73 if (nir_is_denorm_flush_to_zero(b->shader->info.float_controls_execution_mode, x->bit_size)) {
74 switch (x->bit_size) {
75 case 16:
76 min_abs = 1 << 10;
77 break;
78 case 32:
79 min_abs = 1 << 23;
80 break;
81 case 64:
82 min_abs = 1ULL << 52;
83 break;
84 }
85
86 /* Flush denorm to zero to avoid returning a denorm when condeq is true. */
87 x = nir_fmul(b, x, nir_imm_floatN_t(b, 1.0, x->bit_size));
88 }
89
90 /* beware of: +/-0.0 - 1 == NaN */
91 nir_ssa_def *xn =
92 nir_bcsel(b,
93 condzero,
94 nir_imm_intN_t(b, sign_mask | min_abs, x->bit_size),
95 nir_isub(b, x, one));
96
97 /* beware of -0.0 + 1 == -0x1p-149 */
98 nir_ssa_def *xp = nir_bcsel(b, condzero,
99 nir_imm_intN_t(b, min_abs, x->bit_size),
100 nir_iadd(b, x, one));
101
102 /* nextafter can be implemented by just +/- 1 on the int value */
103 nir_ssa_def *res =
104 nir_bcsel(b, nir_ixor(b, conddir, nir_flt(b, x, zero)), xp, xn);
105
106 return nir_nan_check2(b, x, y, nir_bcsel(b, condeq, x, res));
107 }
108
109 nir_ssa_def*
nir_normalize(nir_builder * b,nir_ssa_def * vec)110 nir_normalize(nir_builder *b, nir_ssa_def *vec)
111 {
112 if (vec->num_components == 1)
113 return nir_fsign(b, vec);
114
115 nir_ssa_def *f0 = nir_imm_floatN_t(b, 0.0, vec->bit_size);
116 nir_ssa_def *f1 = nir_imm_floatN_t(b, 1.0, vec->bit_size);
117 nir_ssa_def *finf = nir_imm_floatN_t(b, INFINITY, vec->bit_size);
118
119 /* scale the input to increase precision */
120 nir_ssa_def *maxc = nir_fmax_abs_vec_comp(b, vec);
121 nir_ssa_def *svec = nir_fdiv(b, vec, maxc);
122 /* for inf */
123 nir_ssa_def *finfvec = nir_copysign(b, nir_bcsel(b, nir_feq(b, vec, finf), f1, f0), f1);
124
125 nir_ssa_def *temp = nir_bcsel(b, nir_feq(b, maxc, finf), finfvec, svec);
126 nir_ssa_def *res = nir_fmul(b, temp, nir_frsq(b, nir_fdot(b, temp, temp)));
127
128 return nir_bcsel(b, nir_feq(b, maxc, f0), vec, res);
129 }
130
131 nir_ssa_def*
nir_smoothstep(nir_builder * b,nir_ssa_def * edge0,nir_ssa_def * edge1,nir_ssa_def * x)132 nir_smoothstep(nir_builder *b, nir_ssa_def *edge0, nir_ssa_def *edge1, nir_ssa_def *x)
133 {
134 nir_ssa_def *f2 = nir_imm_floatN_t(b, 2.0, x->bit_size);
135 nir_ssa_def *f3 = nir_imm_floatN_t(b, 3.0, x->bit_size);
136
137 /* t = clamp((x - edge0) / (edge1 - edge0), 0, 1) */
138 nir_ssa_def *t =
139 nir_fsat(b, nir_fdiv(b, nir_fsub(b, x, edge0),
140 nir_fsub(b, edge1, edge0)));
141
142 /* result = t * t * (3 - 2 * t) */
143 return nir_fmul(b, t, nir_fmul(b, t, nir_a_minus_bc(b, f3, f2, t)));
144 }
145
146 nir_ssa_def*
nir_upsample(nir_builder * b,nir_ssa_def * hi,nir_ssa_def * lo)147 nir_upsample(nir_builder *b, nir_ssa_def *hi, nir_ssa_def *lo)
148 {
149 assert(lo->num_components == hi->num_components);
150 assert(lo->bit_size == hi->bit_size);
151
152 nir_ssa_def *res[NIR_MAX_VEC_COMPONENTS];
153 for (unsigned i = 0; i < lo->num_components; ++i) {
154 nir_ssa_def *vec = nir_vec2(b, nir_channel(b, lo, i), nir_channel(b, hi, i));
155 res[i] = nir_pack_bits(b, vec, vec->bit_size * 2);
156 }
157
158 return nir_vec(b, res, lo->num_components);
159 }
160
161 /**
162 * Compute xs[0] + xs[1] + xs[2] + ... using fadd.
163 */
164 static nir_ssa_def *
build_fsum(nir_builder * b,nir_ssa_def ** xs,int terms)165 build_fsum(nir_builder *b, nir_ssa_def **xs, int terms)
166 {
167 nir_ssa_def *accum = xs[0];
168
169 for (int i = 1; i < terms; i++)
170 accum = nir_fadd(b, accum, xs[i]);
171
172 return accum;
173 }
174
175 nir_ssa_def *
nir_atan(nir_builder * b,nir_ssa_def * y_over_x)176 nir_atan(nir_builder *b, nir_ssa_def *y_over_x)
177 {
178 const uint32_t bit_size = y_over_x->bit_size;
179
180 nir_ssa_def *abs_y_over_x = nir_fabs(b, y_over_x);
181 nir_ssa_def *one = nir_imm_floatN_t(b, 1.0f, bit_size);
182
183 /*
184 * range-reduction, first step:
185 *
186 * / y_over_x if |y_over_x| <= 1.0;
187 * x = <
188 * \ 1.0 / y_over_x otherwise
189 */
190 nir_ssa_def *x = nir_fdiv(b, nir_fmin(b, abs_y_over_x, one),
191 nir_fmax(b, abs_y_over_x, one));
192
193 /*
194 * approximate atan by evaluating polynomial:
195 *
196 * x * 0.9999793128310355 - x^3 * 0.3326756418091246 +
197 * x^5 * 0.1938924977115610 - x^7 * 0.1173503194786851 +
198 * x^9 * 0.0536813784310406 - x^11 * 0.0121323213173444
199 */
200 nir_ssa_def *x_2 = nir_fmul(b, x, x);
201 nir_ssa_def *x_3 = nir_fmul(b, x_2, x);
202 nir_ssa_def *x_5 = nir_fmul(b, x_3, x_2);
203 nir_ssa_def *x_7 = nir_fmul(b, x_5, x_2);
204 nir_ssa_def *x_9 = nir_fmul(b, x_7, x_2);
205 nir_ssa_def *x_11 = nir_fmul(b, x_9, x_2);
206
207 nir_ssa_def *polynomial_terms[] = {
208 nir_fmul_imm(b, x, 0.9999793128310355f),
209 nir_fmul_imm(b, x_3, -0.3326756418091246f),
210 nir_fmul_imm(b, x_5, 0.1938924977115610f),
211 nir_fmul_imm(b, x_7, -0.1173503194786851f),
212 nir_fmul_imm(b, x_9, 0.0536813784310406f),
213 nir_fmul_imm(b, x_11, -0.0121323213173444f),
214 };
215
216 nir_ssa_def *tmp =
217 build_fsum(b, polynomial_terms, ARRAY_SIZE(polynomial_terms));
218
219 /* range-reduction fixup */
220 tmp = nir_ffma(b,
221 nir_b2f(b, nir_flt(b, one, abs_y_over_x), bit_size),
222 nir_ffma_imm12(b, tmp, -2.0f, M_PI_2),
223 tmp);
224
225 /* sign fixup */
226 return nir_fmul(b, tmp, nir_fsign(b, y_over_x));
227 }
228
229 nir_ssa_def *
nir_atan2(nir_builder * b,nir_ssa_def * y,nir_ssa_def * x)230 nir_atan2(nir_builder *b, nir_ssa_def *y, nir_ssa_def *x)
231 {
232 assert(y->bit_size == x->bit_size);
233 const uint32_t bit_size = x->bit_size;
234
235 nir_ssa_def *zero = nir_imm_floatN_t(b, 0, bit_size);
236 nir_ssa_def *one = nir_imm_floatN_t(b, 1, bit_size);
237
238 /* If we're on the left half-plane rotate the coordinates π/2 clock-wise
239 * for the y=0 discontinuity to end up aligned with the vertical
240 * discontinuity of atan(s/t) along t=0. This also makes sure that we
241 * don't attempt to divide by zero along the vertical line, which may give
242 * unspecified results on non-GLSL 4.1-capable hardware.
243 */
244 nir_ssa_def *flip = nir_fge(b, zero, x);
245 nir_ssa_def *s = nir_bcsel(b, flip, nir_fabs(b, x), y);
246 nir_ssa_def *t = nir_bcsel(b, flip, y, nir_fabs(b, x));
247
248 /* If the magnitude of the denominator exceeds some huge value, scale down
249 * the arguments in order to prevent the reciprocal operation from flushing
250 * its result to zero, which would cause precision problems, and for s
251 * infinite would cause us to return a NaN instead of the correct finite
252 * value.
253 *
254 * If fmin and fmax are respectively the smallest and largest positive
255 * normalized floating point values representable by the implementation,
256 * the constants below should be in agreement with:
257 *
258 * huge <= 1 / fmin
259 * scale <= 1 / fmin / fmax (for |t| >= huge)
260 *
261 * In addition scale should be a negative power of two in order to avoid
262 * loss of precision. The values chosen below should work for most usual
263 * floating point representations with at least the dynamic range of ATI's
264 * 24-bit representation.
265 */
266 const double huge_val = bit_size >= 32 ? 1e18 : 16384;
267 nir_ssa_def *huge = nir_imm_floatN_t(b, huge_val, bit_size);
268 nir_ssa_def *scale = nir_bcsel(b, nir_fge(b, nir_fabs(b, t), huge),
269 nir_imm_floatN_t(b, 0.25, bit_size), one);
270 nir_ssa_def *rcp_scaled_t = nir_frcp(b, nir_fmul(b, t, scale));
271 nir_ssa_def *s_over_t = nir_fmul(b, nir_fmul(b, s, scale), rcp_scaled_t);
272
273 /* For |x| = |y| assume tan = 1 even if infinite (i.e. pretend momentarily
274 * that ∞/∞ = 1) in order to comply with the rather artificial rules
275 * inherited from IEEE 754-2008, namely:
276 *
277 * "atan2(±∞, −∞) is ±3π/4
278 * atan2(±∞, +∞) is ±π/4"
279 *
280 * Note that this is inconsistent with the rules for the neighborhood of
281 * zero that are based on iterated limits:
282 *
283 * "atan2(±0, −0) is ±π
284 * atan2(±0, +0) is ±0"
285 *
286 * but GLSL specifically allows implementations to deviate from IEEE rules
287 * at (0,0), so we take that license (i.e. pretend that 0/0 = 1 here as
288 * well).
289 */
290 nir_ssa_def *tan = nir_bcsel(b, nir_feq(b, nir_fabs(b, x), nir_fabs(b, y)),
291 one, nir_fabs(b, s_over_t));
292
293 /* Calculate the arctangent and fix up the result if we had flipped the
294 * coordinate system.
295 */
296 nir_ssa_def *arc =
297 nir_ffma_imm1(b, nir_b2f(b, flip, bit_size), M_PI_2, nir_atan(b, tan));
298
299 /* Rather convoluted calculation of the sign of the result. When x < 0 we
300 * cannot use fsign because we need to be able to distinguish between
301 * negative and positive zero. We don't use bitwise arithmetic tricks for
302 * consistency with the GLSL front-end. When x >= 0 rcp_scaled_t will
303 * always be non-negative so this won't be able to distinguish between
304 * negative and positive zero, but we don't care because atan2 is
305 * continuous along the whole positive y = 0 half-line, so it won't affect
306 * the result significantly.
307 */
308 return nir_bcsel(b, nir_flt(b, nir_fmin(b, y, rcp_scaled_t), zero),
309 nir_fneg(b, arc), arc);
310 }
311
312 nir_ssa_def *
nir_get_texture_size(nir_builder * b,nir_tex_instr * tex)313 nir_get_texture_size(nir_builder *b, nir_tex_instr *tex)
314 {
315 b->cursor = nir_before_instr(&tex->instr);
316
317 nir_tex_instr *txs;
318
319 unsigned num_srcs = 1; /* One for the LOD */
320 for (unsigned i = 0; i < tex->num_srcs; i++) {
321 if (tex->src[i].src_type == nir_tex_src_texture_deref ||
322 tex->src[i].src_type == nir_tex_src_sampler_deref ||
323 tex->src[i].src_type == nir_tex_src_texture_offset ||
324 tex->src[i].src_type == nir_tex_src_sampler_offset ||
325 tex->src[i].src_type == nir_tex_src_texture_handle ||
326 tex->src[i].src_type == nir_tex_src_sampler_handle)
327 num_srcs++;
328 }
329
330 txs = nir_tex_instr_create(b->shader, num_srcs);
331 txs->op = nir_texop_txs;
332 txs->sampler_dim = tex->sampler_dim;
333 txs->is_array = tex->is_array;
334 txs->is_shadow = tex->is_shadow;
335 txs->is_new_style_shadow = tex->is_new_style_shadow;
336 txs->texture_index = tex->texture_index;
337 txs->sampler_index = tex->sampler_index;
338 txs->dest_type = nir_type_int32;
339
340 unsigned idx = 0;
341 for (unsigned i = 0; i < tex->num_srcs; i++) {
342 if (tex->src[i].src_type == nir_tex_src_texture_deref ||
343 tex->src[i].src_type == nir_tex_src_sampler_deref ||
344 tex->src[i].src_type == nir_tex_src_texture_offset ||
345 tex->src[i].src_type == nir_tex_src_sampler_offset ||
346 tex->src[i].src_type == nir_tex_src_texture_handle ||
347 tex->src[i].src_type == nir_tex_src_sampler_handle) {
348 nir_src_copy(&txs->src[idx].src, &tex->src[i].src);
349 txs->src[idx].src_type = tex->src[i].src_type;
350 idx++;
351 }
352 }
353 /* Add in an LOD because some back-ends require it */
354 txs->src[idx].src = nir_src_for_ssa(nir_imm_int(b, 0));
355 txs->src[idx].src_type = nir_tex_src_lod;
356
357 nir_ssa_dest_init(&txs->instr, &txs->dest,
358 nir_tex_instr_dest_size(txs), 32, NULL);
359 nir_builder_instr_insert(b, &txs->instr);
360
361 return &txs->dest.ssa;
362 }
363
364 nir_ssa_def *
nir_get_texture_lod(nir_builder * b,nir_tex_instr * tex)365 nir_get_texture_lod(nir_builder *b, nir_tex_instr *tex)
366 {
367 b->cursor = nir_before_instr(&tex->instr);
368
369 nir_tex_instr *tql;
370
371 unsigned num_srcs = 0;
372 for (unsigned i = 0; i < tex->num_srcs; i++) {
373 if (tex->src[i].src_type == nir_tex_src_coord ||
374 tex->src[i].src_type == nir_tex_src_texture_deref ||
375 tex->src[i].src_type == nir_tex_src_sampler_deref ||
376 tex->src[i].src_type == nir_tex_src_texture_offset ||
377 tex->src[i].src_type == nir_tex_src_sampler_offset ||
378 tex->src[i].src_type == nir_tex_src_texture_handle ||
379 tex->src[i].src_type == nir_tex_src_sampler_handle)
380 num_srcs++;
381 }
382
383 tql = nir_tex_instr_create(b->shader, num_srcs);
384 tql->op = nir_texop_lod;
385 tql->coord_components = tex->coord_components;
386 tql->sampler_dim = tex->sampler_dim;
387 tql->is_array = tex->is_array;
388 tql->is_shadow = tex->is_shadow;
389 tql->is_new_style_shadow = tex->is_new_style_shadow;
390 tql->texture_index = tex->texture_index;
391 tql->sampler_index = tex->sampler_index;
392 tql->dest_type = nir_type_float32;
393
394 unsigned idx = 0;
395 for (unsigned i = 0; i < tex->num_srcs; i++) {
396 if (tex->src[i].src_type == nir_tex_src_coord ||
397 tex->src[i].src_type == nir_tex_src_texture_deref ||
398 tex->src[i].src_type == nir_tex_src_sampler_deref ||
399 tex->src[i].src_type == nir_tex_src_texture_offset ||
400 tex->src[i].src_type == nir_tex_src_sampler_offset ||
401 tex->src[i].src_type == nir_tex_src_texture_handle ||
402 tex->src[i].src_type == nir_tex_src_sampler_handle) {
403 nir_src_copy(&tql->src[idx].src, &tex->src[i].src);
404 tql->src[idx].src_type = tex->src[i].src_type;
405 idx++;
406 }
407 }
408
409 nir_ssa_dest_init(&tql->instr, &tql->dest, 2, 32, NULL);
410 nir_builder_instr_insert(b, &tql->instr);
411
412 /* The LOD is the y component of the result */
413 return nir_channel(b, &tql->dest.ssa, 1);
414 }
415