• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include "nir.h"
26 #include "nir_builder.h"
27 
28 #include <float.h>
29 #include <math.h>
30 
31 /*
32  * Lowers some unsupported double operations, using only:
33  *
34  * - pack/unpackDouble2x32
35  * - conversion to/from single-precision
36  * - double add, mul, and fma
37  * - conditional select
38  * - 32-bit integer and floating point arithmetic
39  */
40 
41 /* Creates a double with the exponent bits set to a given integer value */
42 static nir_def *
set_exponent(nir_builder * b,nir_def * src,nir_def * exp)43 set_exponent(nir_builder *b, nir_def *src, nir_def *exp)
44 {
45    /* Split into bits 0-31 and 32-63 */
46    nir_def *lo = nir_unpack_64_2x32_split_x(b, src);
47    nir_def *hi = nir_unpack_64_2x32_split_y(b, src);
48 
49    /* The exponent is bits 52-62, or 20-30 of the high word, so set the exponent
50     * to 1023
51     */
52    nir_def *new_hi = nir_bitfield_insert(b, hi, exp,
53                                          nir_imm_int(b, 20),
54                                          nir_imm_int(b, 11));
55    /* recombine */
56    return nir_pack_64_2x32_split(b, lo, new_hi);
57 }
58 
59 static nir_def *
get_exponent(nir_builder * b,nir_def * src)60 get_exponent(nir_builder *b, nir_def *src)
61 {
62    /* get bits 32-63 */
63    nir_def *hi = nir_unpack_64_2x32_split_y(b, src);
64 
65    /* extract bits 20-30 of the high word */
66    return nir_ubitfield_extract(b, hi, nir_imm_int(b, 20), nir_imm_int(b, 11));
67 }
68 
69 /* Return infinity with the sign of the given source which is +/-0 */
70 
71 static nir_def *
get_signed_inf(nir_builder * b,nir_def * zero)72 get_signed_inf(nir_builder *b, nir_def *zero)
73 {
74    nir_def *zero_hi = nir_unpack_64_2x32_split_y(b, zero);
75 
76    /* The bit pattern for infinity is 0x7ff0000000000000, where the sign bit
77     * is the highest bit. Only the sign bit can be non-zero in the passed in
78     * source. So we essentially need to OR the infinity and the zero, except
79     * the low 32 bits are always 0 so we can construct the correct high 32
80     * bits and then pack it together with zero low 32 bits.
81     */
82    nir_def *inf_hi = nir_ior_imm(b, zero_hi, 0x7ff00000);
83    return nir_pack_64_2x32_split(b, nir_imm_int(b, 0), inf_hi);
84 }
85 
86 /*
87  * Generates the correctly-signed infinity if the source was zero, and flushes
88  * the result to 0 if the source was infinity or the calculated exponent was
89  * too small to be representable.
90  */
91 
92 static nir_def *
fix_inv_result(nir_builder * b,nir_def * res,nir_def * src,nir_def * exp)93 fix_inv_result(nir_builder *b, nir_def *res, nir_def *src,
94                nir_def *exp)
95 {
96    /* If the exponent is too small or the original input was infinity/NaN,
97     * force the result to 0 (flush denorms) to avoid the work of handling
98     * denorms properly. Note that this doesn't preserve positive/negative
99     * zeros, but GLSL doesn't require it.
100     */
101    res = nir_bcsel(b, nir_ior(b, nir_ile_imm(b, exp, 0), nir_feq_imm(b, nir_fabs(b, src), INFINITY)),
102                    nir_imm_double(b, 0.0f), res);
103 
104    /* If the original input was 0, generate the correctly-signed infinity */
105    res = nir_bcsel(b, nir_fneu_imm(b, src, 0.0f),
106                    res, get_signed_inf(b, src));
107 
108    return res;
109 }
110 
111 static nir_def *
lower_rcp(nir_builder * b,nir_def * src)112 lower_rcp(nir_builder *b, nir_def *src)
113 {
114    /* normalize the input to avoid range issues */
115    nir_def *src_norm = set_exponent(b, src, nir_imm_int(b, 1023));
116 
117    /* cast to float, do an rcp, and then cast back to get an approximate
118     * result
119     */
120    nir_def *ra = nir_f2f64(b, nir_frcp(b, nir_f2f32(b, src_norm)));
121 
122    /* Fixup the exponent of the result - note that we check if this is too
123     * small below.
124     */
125    nir_def *new_exp = nir_isub(b, get_exponent(b, ra),
126                                nir_iadd_imm(b, get_exponent(b, src),
127                                             -1023));
128 
129    ra = set_exponent(b, ra, new_exp);
130 
131    /* Do a few Newton-Raphson steps to improve precision.
132     *
133     * Each step doubles the precision, and we started off with around 24 bits,
134     * so we only need to do 2 steps to get to full precision. The step is:
135     *
136     * x_new = x * (2 - x*src)
137     *
138     * But we can re-arrange this to improve precision by using another fused
139     * multiply-add:
140     *
141     * x_new = x + x * (1 - x*src)
142     *
143     * See https://en.wikipedia.org/wiki/Division_algorithm for more details.
144     */
145 
146    ra = nir_ffma(b, nir_fneg(b, ra), nir_ffma_imm2(b, ra, src, -1), ra);
147    ra = nir_ffma(b, nir_fneg(b, ra), nir_ffma_imm2(b, ra, src, -1), ra);
148 
149    return fix_inv_result(b, ra, src, new_exp);
150 }
151 
152 static nir_def *
lower_sqrt_rsq(nir_builder * b,nir_def * src,bool sqrt)153 lower_sqrt_rsq(nir_builder *b, nir_def *src, bool sqrt)
154 {
155    /* We want to compute:
156     *
157     * 1/sqrt(m * 2^e)
158     *
159     * When the exponent is even, this is equivalent to:
160     *
161     * 1/sqrt(m) * 2^(-e/2)
162     *
163     * and then the exponent is odd, this is equal to:
164     *
165     * 1/sqrt(m * 2) * 2^(-(e - 1)/2)
166     *
167     * where the m * 2 is absorbed into the exponent. So we want the exponent
168     * inside the square root to be 1 if e is odd and 0 if e is even, and we
169     * want to subtract off e/2 from the final exponent, rounded to negative
170     * infinity. We can do the former by first computing the unbiased exponent,
171     * and then AND'ing it with 1 to get 0 or 1, and we can do the latter by
172     * shifting right by 1.
173     */
174 
175    nir_def *unbiased_exp = nir_iadd_imm(b, get_exponent(b, src),
176                                         -1023);
177    nir_def *even = nir_iand_imm(b, unbiased_exp, 1);
178    nir_def *half = nir_ishr_imm(b, unbiased_exp, 1);
179 
180    nir_def *src_norm = set_exponent(b, src,
181                                     nir_iadd_imm(b, even, 1023));
182 
183    nir_def *ra = nir_f2f64(b, nir_frsq(b, nir_f2f32(b, src_norm)));
184    nir_def *new_exp = nir_isub(b, get_exponent(b, ra), half);
185    ra = set_exponent(b, ra, new_exp);
186 
187    /*
188     * The following implements an iterative algorithm that's very similar
189     * between sqrt and rsqrt. We start with an iteration of Goldschmit's
190     * algorithm, which looks like:
191     *
192     * a = the source
193     * y_0 = initial (single-precision) rsqrt estimate
194     *
195     * h_0 = .5 * y_0
196     * g_0 = a * y_0
197     * r_0 = .5 - h_0 * g_0
198     * g_1 = g_0 * r_0 + g_0
199     * h_1 = h_0 * r_0 + h_0
200     *
201     * Now g_1 ~= sqrt(a), and h_1 ~= 1/(2 * sqrt(a)). We could continue
202     * applying another round of Goldschmit, but since we would never refer
203     * back to a (the original source), we would add too much rounding error.
204     * So instead, we do one last round of Newton-Raphson, which has better
205     * rounding characteristics, to get the final rounding correct. This is
206     * split into two cases:
207     *
208     * 1. sqrt
209     *
210     * Normally, doing a round of Newton-Raphson for sqrt involves taking a
211     * reciprocal of the original estimate, which is slow since it isn't
212     * supported in HW. But we can take advantage of the fact that we already
213     * computed a good estimate of 1/(2 * g_1) by rearranging it like so:
214     *
215     * g_2 = .5 * (g_1 + a / g_1)
216     *     = g_1 + .5 * (a / g_1 - g_1)
217     *     = g_1 + (.5 / g_1) * (a - g_1^2)
218     *     = g_1 + h_1 * (a - g_1^2)
219     *
220     * The second term represents the error, and by splitting it out we can get
221     * better precision by computing it as part of a fused multiply-add. Since
222     * both Newton-Raphson and Goldschmit approximately double the precision of
223     * the result, these two steps should be enough.
224     *
225     * 2. rsqrt
226     *
227     * First off, note that the first round of the Goldschmit algorithm is
228     * really just a Newton-Raphson step in disguise:
229     *
230     * h_1 = h_0 * (.5 - h_0 * g_0) + h_0
231     *     = h_0 * (1.5 - h_0 * g_0)
232     *     = h_0 * (1.5 - .5 * a * y_0^2)
233     *     = (.5 * y_0) * (1.5 - .5 * a * y_0^2)
234     *
235     * which is the standard formula multiplied by .5. Unlike in the sqrt case,
236     * we don't need the inverse to do a Newton-Raphson step; we just need h_1,
237     * so we can skip the calculation of g_1. Instead, we simply do another
238     * Newton-Raphson step:
239     *
240     * y_1 = 2 * h_1
241     * r_1 = .5 - h_1 * y_1 * a
242     * y_2 = y_1 * r_1 + y_1
243     *
244     * Where the difference from Goldschmit is that we calculate y_1 * a
245     * instead of using g_1. Doing it this way should be as fast as computing
246     * y_1 up front instead of h_1, and it lets us share the code for the
247     * initial Goldschmit step with the sqrt case.
248     *
249     * Putting it together, the computations are:
250     *
251     * h_0 = .5 * y_0
252     * g_0 = a * y_0
253     * r_0 = .5 - h_0 * g_0
254     * h_1 = h_0 * r_0 + h_0
255     * if sqrt:
256     *    g_1 = g_0 * r_0 + g_0
257     *    r_1 = a - g_1 * g_1
258     *    g_2 = h_1 * r_1 + g_1
259     * else:
260     *    y_1 = 2 * h_1
261     *    r_1 = .5 - y_1 * (h_1 * a)
262     *    y_2 = y_1 * r_1 + y_1
263     *
264     * For more on the ideas behind this, see "Software Division and Square
265     * Root Using Goldschmit's Algorithms" by Markstein and the Wikipedia page
266     * on square roots
267     * (https://en.wikipedia.org/wiki/Methods_of_computing_square_roots).
268     */
269 
270    nir_def *one_half = nir_imm_double(b, 0.5);
271    nir_def *h_0 = nir_fmul(b, one_half, ra);
272    nir_def *g_0 = nir_fmul(b, src, ra);
273    nir_def *r_0 = nir_ffma(b, nir_fneg(b, h_0), g_0, one_half);
274    nir_def *h_1 = nir_ffma(b, h_0, r_0, h_0);
275    nir_def *res;
276    if (sqrt) {
277       nir_def *g_1 = nir_ffma(b, g_0, r_0, g_0);
278       nir_def *r_1 = nir_ffma(b, nir_fneg(b, g_1), g_1, src);
279       res = nir_ffma(b, h_1, r_1, g_1);
280    } else {
281       nir_def *y_1 = nir_fmul_imm(b, h_1, 2.0);
282       nir_def *r_1 = nir_ffma(b, nir_fneg(b, y_1), nir_fmul(b, h_1, src),
283                               one_half);
284       res = nir_ffma(b, y_1, r_1, y_1);
285    }
286 
287    if (sqrt) {
288       /* Here, the special cases we need to handle are
289        * 0 -> 0 and
290        * +inf -> +inf
291        */
292       const bool preserve_denorms =
293          b->shader->info.float_controls_execution_mode &
294          FLOAT_CONTROLS_DENORM_PRESERVE_FP64;
295       nir_def *src_flushed = src;
296       if (!preserve_denorms) {
297          src_flushed = nir_bcsel(b,
298                                  nir_flt_imm(b, nir_fabs(b, src), DBL_MIN),
299                                  nir_imm_double(b, 0.0),
300                                  src);
301       }
302       res = nir_bcsel(b, nir_ior(b, nir_feq_imm(b, src_flushed, 0.0), nir_feq_imm(b, src, INFINITY)),
303                       src_flushed, res);
304    } else {
305       res = fix_inv_result(b, res, src, new_exp);
306    }
307 
308    return res;
309 }
310 
311 static nir_def *
lower_trunc(nir_builder * b,nir_def * src)312 lower_trunc(nir_builder *b, nir_def *src)
313 {
314    nir_def *unbiased_exp = nir_iadd_imm(b, get_exponent(b, src),
315                                         -1023);
316 
317    nir_def *frac_bits = nir_isub_imm(b, 52, unbiased_exp);
318 
319    /*
320     * Decide the operation to apply depending on the unbiased exponent:
321     *
322     * if (unbiased_exp < 0)
323     *    return 0
324     * else if (unbiased_exp > 52)
325     *    return src
326     * else
327     *    return src & (~0 << frac_bits)
328     *
329     * Notice that the else branch is a 64-bit integer operation that we need
330     * to implement in terms of 32-bit integer arithmetics (at least until we
331     * support 64-bit integer arithmetics).
332     */
333 
334    /* Compute "~0 << frac_bits" in terms of hi/lo 32-bit integer math */
335    nir_def *mask_lo =
336       nir_bcsel(b,
337                 nir_ige_imm(b, frac_bits, 32),
338                 nir_imm_int(b, 0),
339                 nir_ishl(b, nir_imm_int(b, ~0), frac_bits));
340 
341    nir_def *mask_hi =
342       nir_bcsel(b,
343                 nir_ilt_imm(b, frac_bits, 33),
344                 nir_imm_int(b, ~0),
345                 nir_ishl(b,
346                          nir_imm_int(b, ~0),
347                          nir_iadd_imm(b, frac_bits, -32)));
348 
349    nir_def *src_lo = nir_unpack_64_2x32_split_x(b, src);
350    nir_def *src_hi = nir_unpack_64_2x32_split_y(b, src);
351 
352    return nir_bcsel(b,
353                     nir_ilt_imm(b, unbiased_exp, 0),
354                     nir_imm_double(b, 0.0),
355                     nir_bcsel(b, nir_ige_imm(b, unbiased_exp, 53),
356                               src,
357                               nir_pack_64_2x32_split(b,
358                                                      nir_iand(b, mask_lo, src_lo),
359                                                      nir_iand(b, mask_hi, src_hi))));
360 }
361 
362 static nir_def *
lower_floor(nir_builder * b,nir_def * src)363 lower_floor(nir_builder *b, nir_def *src)
364 {
365    /*
366     * For x >= 0, floor(x) = trunc(x)
367     * For x < 0,
368     *    - if x is integer, floor(x) = x
369     *    - otherwise, floor(x) = trunc(x) - 1
370     */
371    nir_def *tr = nir_ftrunc(b, src);
372    nir_def *positive = nir_fge_imm(b, src, 0.0);
373    return nir_bcsel(b,
374                     nir_ior(b, positive, nir_feq(b, src, tr)),
375                     tr,
376                     nir_fadd_imm(b, tr, -1.0));
377 }
378 
379 static nir_def *
lower_ceil(nir_builder * b,nir_def * src)380 lower_ceil(nir_builder *b, nir_def *src)
381 {
382    /* if x < 0,                    ceil(x) = trunc(x)
383     * else if (x - trunc(x) == 0), ceil(x) = x
384     * else,                        ceil(x) = trunc(x) + 1
385     */
386    nir_def *tr = nir_ftrunc(b, src);
387    nir_def *negative = nir_flt_imm(b, src, 0.0);
388    return nir_bcsel(b,
389                     nir_ior(b, negative, nir_feq(b, src, tr)),
390                     tr,
391                     nir_fadd_imm(b, tr, 1.0));
392 }
393 
394 static nir_def *
lower_fract(nir_builder * b,nir_def * src)395 lower_fract(nir_builder *b, nir_def *src)
396 {
397    return nir_fsub(b, src, nir_ffloor(b, src));
398 }
399 
400 static nir_def *
lower_round_even(nir_builder * b,nir_def * src)401 lower_round_even(nir_builder *b, nir_def *src)
402 {
403    /* Add and subtract 2**52 to round off any fractional bits. */
404    nir_def *two52 = nir_imm_double(b, (double)(1ull << 52));
405    nir_def *sign = nir_iand_imm(b, nir_unpack_64_2x32_split_y(b, src),
406                                 1ull << 31);
407 
408    b->exact = true;
409    nir_def *res = nir_fsub(b, nir_fadd(b, nir_fabs(b, src), two52), two52);
410    b->exact = false;
411 
412    return nir_bcsel(b, nir_flt(b, nir_fabs(b, src), two52),
413                     nir_pack_64_2x32_split(b, nir_unpack_64_2x32_split_x(b, res),
414                                            nir_ior(b, nir_unpack_64_2x32_split_y(b, res), sign)),
415                     src);
416 }
417 
418 static nir_def *
lower_mod(nir_builder * b,nir_def * src0,nir_def * src1)419 lower_mod(nir_builder *b, nir_def *src0, nir_def *src1)
420 {
421    /* mod(x,y) = x - y * floor(x/y)
422     *
423     * If the division is lowered, it could add some rounding errors that make
424     * floor() to return the quotient minus one when x = N * y. If this is the
425     * case, we should return zero because mod(x, y) output value is [0, y).
426     * But fortunately Vulkan spec allows this kind of errors; from Vulkan
427     * spec, appendix A (Precision and Operation of SPIR-V instructions:
428     *
429     *   "The OpFRem and OpFMod instructions use cheap approximations of
430     *   remainder, and the error can be large due to the discontinuity in
431     *   trunc() and floor(). This can produce mathematically unexpected
432     *   results in some cases, such as FMod(x,x) computing x rather than 0,
433     *   and can also cause the result to have a different sign than the
434     *   infinitely precise result."
435     *
436     * In practice this means the output value is actually in the interval
437     * [0, y].
438     *
439     * While Vulkan states this behaviour explicitly, OpenGL does not, and thus
440     * we need to assume that value should be in range [0, y); but on the other
441     * hand, mod(a,b) is defined as "a - b * floor(a/b)" and OpenGL allows for
442     * some error in division, so a/a could actually end up being 1.0 - 1ULP;
443     * so in this case floor(a/a) would end up as 0, and hence mod(a,a) == a.
444     *
445     * In summary, in the practice mod(a,a) can be "a" both for OpenGL and
446     * Vulkan.
447     */
448    nir_def *floor = nir_ffloor(b, nir_fdiv(b, src0, src1));
449 
450    return nir_fsub(b, src0, nir_fmul(b, src1, floor));
451 }
452 
453 static nir_def *
lower_minmax(nir_builder * b,nir_op cmp,nir_def * src0,nir_def * src1)454 lower_minmax(nir_builder *b, nir_op cmp, nir_def *src0, nir_def *src1)
455 {
456    b->exact = true;
457    nir_def *src1_is_nan = nir_fneu(b, src1, src1);
458    nir_def *cmp_res = nir_build_alu2(b, cmp, src0, src1);
459    b->exact = false;
460    nir_def *take_src0 = nir_ior(b, src1_is_nan, cmp_res);
461    return nir_bcsel(b, take_src0, src0, src1);
462 }
463 
464 static nir_def *
lower_sat(nir_builder * b,nir_def * src)465 lower_sat(nir_builder *b, nir_def *src)
466 {
467    b->exact = true;
468    /* This will get lowered again if nir_lower_dminmax is set */
469    nir_def *sat = nir_fclamp(b, src, nir_imm_double(b, 0),
470                              nir_imm_double(b, 1));
471    b->exact = false;
472    return sat;
473 }
474 
475 static nir_def *
lower_doubles_instr_to_soft(nir_builder * b,nir_alu_instr * instr,const nir_shader * softfp64,nir_lower_doubles_options options)476 lower_doubles_instr_to_soft(nir_builder *b, nir_alu_instr *instr,
477                             const nir_shader *softfp64,
478                             nir_lower_doubles_options options)
479 {
480    if (!(options & nir_lower_fp64_full_software))
481       return NULL;
482 
483    const char *name;
484    const char *mangled_name;
485    const struct glsl_type *return_type = glsl_uint64_t_type();
486 
487    switch (instr->op) {
488    case nir_op_f2i64:
489       if (instr->src[0].src.ssa->bit_size != 64)
490          return false;
491       name = "__fp64_to_int64";
492       mangled_name = "__fp64_to_int64(u641;";
493       return_type = glsl_int64_t_type();
494       break;
495    case nir_op_f2u64:
496       if (instr->src[0].src.ssa->bit_size != 64)
497          return false;
498       name = "__fp64_to_uint64";
499       mangled_name = "__fp64_to_uint64(u641;";
500       break;
501    case nir_op_f2f64:
502       name = "__fp32_to_fp64";
503       mangled_name = "__fp32_to_fp64(f1;";
504       break;
505    case nir_op_f2f32:
506       name = "__fp64_to_fp32";
507       mangled_name = "__fp64_to_fp32(u641;";
508       return_type = glsl_float_type();
509       break;
510    case nir_op_f2i32:
511       name = "__fp64_to_int";
512       mangled_name = "__fp64_to_int(u641;";
513       return_type = glsl_int_type();
514       break;
515    case nir_op_f2u32:
516       name = "__fp64_to_uint";
517       mangled_name = "__fp64_to_uint(u641;";
518       return_type = glsl_uint_type();
519       break;
520    case nir_op_b2f64:
521       name = "__bool_to_fp64";
522       mangled_name = "__bool_to_fp64(b1;";
523       break;
524    case nir_op_i2f64:
525       if (instr->src[0].src.ssa->bit_size == 64) {
526          name = "__int64_to_fp64";
527          mangled_name = "__int64_to_fp64(i641;";
528       } else {
529          name = "__int_to_fp64";
530          mangled_name = "__int_to_fp64(i1;";
531       }
532       break;
533    case nir_op_u2f64:
534       if (instr->src[0].src.ssa->bit_size == 64) {
535          name = "__uint64_to_fp64";
536          mangled_name = "__uint64_to_fp64(u641;";
537       } else {
538          name = "__uint_to_fp64";
539          mangled_name = "__uint_to_fp64(u1;";
540       }
541       break;
542    case nir_op_fabs:
543       name = "__fabs64";
544       mangled_name = "__fabs64(u641;";
545       break;
546    case nir_op_fneg:
547       name = "__fneg64";
548       mangled_name = "__fneg64(u641;";
549       break;
550    case nir_op_fround_even:
551       name = "__fround64";
552       mangled_name = "__fround64(u641;";
553       break;
554    case nir_op_ftrunc:
555       name = "__ftrunc64";
556       mangled_name = "__ftrunc64(u641;";
557       break;
558    case nir_op_ffloor:
559       name = "__ffloor64";
560       mangled_name = "__ffloor64(u641;";
561       break;
562    case nir_op_ffract:
563       name = "__ffract64";
564       mangled_name = "__ffract64(u641;";
565       break;
566    case nir_op_fsign:
567       name = "__fsign64";
568       mangled_name = "__fsign64(u641;";
569       break;
570    case nir_op_feq:
571       name = "__feq64";
572       mangled_name = "__feq64(u641;u641;";
573       return_type = glsl_bool_type();
574       break;
575    case nir_op_fneu:
576       name = "__fneu64";
577       mangled_name = "__fneu64(u641;u641;";
578       return_type = glsl_bool_type();
579       break;
580    case nir_op_flt:
581       name = "__flt64";
582       mangled_name = "__flt64(u641;u641;";
583       return_type = glsl_bool_type();
584       break;
585    case nir_op_fge:
586       name = "__fge64";
587       mangled_name = "__fge64(u641;u641;";
588       return_type = glsl_bool_type();
589       break;
590    case nir_op_fmin:
591       name = "__fmin64";
592       mangled_name = "__fmin64(u641;u641;";
593       break;
594    case nir_op_fmax:
595       name = "__fmax64";
596       mangled_name = "__fmax64(u641;u641;";
597       break;
598    case nir_op_fadd:
599       name = "__fadd64";
600       mangled_name = "__fadd64(u641;u641;";
601       break;
602    case nir_op_fmul:
603       name = "__fmul64";
604       mangled_name = "__fmul64(u641;u641;";
605       break;
606    case nir_op_ffma:
607       name = "__ffma64";
608       mangled_name = "__ffma64(u641;u641;u641;";
609       break;
610    case nir_op_fsat:
611       name = "__fsat64";
612       mangled_name = "__fsat64(u641;";
613       break;
614    case nir_op_fisfinite:
615       name = "__fisfinite64";
616       mangled_name = "__fisfinite64(u641;";
617       return_type = glsl_bool_type();
618       break;
619    default:
620       return false;
621    }
622 
623    assert(softfp64 != NULL);
624    nir_function *func = nir_shader_get_function_for_name(softfp64, name);
625 
626    /* Another attempt, but this time with mangled names if softfp64
627     * shader is taken from SPIR-V.
628     */
629    if (!func)
630       func = nir_shader_get_function_for_name(softfp64, mangled_name);
631 
632    if (!func || !func->impl) {
633       fprintf(stderr, "Cannot find function \"%s\"\n", name);
634       assert(func);
635    }
636 
637    nir_def *params[4] = {
638       NULL,
639    };
640 
641    nir_variable *ret_tmp =
642       nir_local_variable_create(b->impl, return_type, "return_tmp");
643    nir_deref_instr *ret_deref = nir_build_deref_var(b, ret_tmp);
644    params[0] = &ret_deref->def;
645 
646    assert(nir_op_infos[instr->op].num_inputs + 1 == func->num_params);
647    for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
648       assert(i + 1 < ARRAY_SIZE(params));
649       params[i + 1] = nir_mov_alu(b, instr->src[i], 1);
650    }
651 
652    nir_inline_function_impl(b, func->impl, params, NULL);
653 
654    return nir_load_deref(b, ret_deref);
655 }
656 
657 nir_lower_doubles_options
nir_lower_doubles_op_to_options_mask(nir_op opcode)658 nir_lower_doubles_op_to_options_mask(nir_op opcode)
659 {
660    switch (opcode) {
661    case nir_op_frcp:
662       return nir_lower_drcp;
663    case nir_op_fsqrt:
664       return nir_lower_dsqrt;
665    case nir_op_frsq:
666       return nir_lower_drsq;
667    case nir_op_ftrunc:
668       return nir_lower_dtrunc;
669    case nir_op_ffloor:
670       return nir_lower_dfloor;
671    case nir_op_fceil:
672       return nir_lower_dceil;
673    case nir_op_ffract:
674       return nir_lower_dfract;
675    case nir_op_fround_even:
676       return nir_lower_dround_even;
677    case nir_op_fmod:
678       return nir_lower_dmod;
679    case nir_op_fsub:
680       return nir_lower_dsub;
681    case nir_op_fdiv:
682       return nir_lower_ddiv;
683    case nir_op_fmin:
684    case nir_op_fmax:
685       return nir_lower_dminmax;
686    case nir_op_fsat:
687       return nir_lower_dsat;
688    default:
689       return 0;
690    }
691 }
692 
693 struct lower_doubles_data {
694    const nir_shader *softfp64;
695    nir_lower_doubles_options options;
696 };
697 
698 static bool
should_lower_double_instr(const nir_instr * instr,const void * _data)699 should_lower_double_instr(const nir_instr *instr, const void *_data)
700 {
701    const struct lower_doubles_data *data = _data;
702    const nir_lower_doubles_options options = data->options;
703 
704    if (instr->type != nir_instr_type_alu)
705       return false;
706 
707    const nir_alu_instr *alu = nir_instr_as_alu(instr);
708 
709    bool is_64 = alu->def.bit_size == 64;
710 
711    unsigned num_srcs = nir_op_infos[alu->op].num_inputs;
712    for (unsigned i = 0; i < num_srcs; i++) {
713       is_64 |= (nir_src_bit_size(alu->src[i].src) == 64);
714    }
715 
716    if (!is_64)
717       return false;
718 
719    if (options & nir_lower_fp64_full_software)
720       return true;
721 
722    return options & nir_lower_doubles_op_to_options_mask(alu->op);
723 }
724 
725 static nir_def *
lower_doubles_instr(nir_builder * b,nir_instr * instr,void * _data)726 lower_doubles_instr(nir_builder *b, nir_instr *instr, void *_data)
727 {
728    const struct lower_doubles_data *data = _data;
729    const nir_lower_doubles_options options = data->options;
730    nir_alu_instr *alu = nir_instr_as_alu(instr);
731 
732    nir_def *soft_def =
733       lower_doubles_instr_to_soft(b, alu, data->softfp64, options);
734    if (soft_def)
735       return soft_def;
736 
737    if (!(options & nir_lower_doubles_op_to_options_mask(alu->op)))
738       return NULL;
739 
740    nir_def *src = nir_mov_alu(b, alu->src[0],
741                               alu->def.num_components);
742 
743    switch (alu->op) {
744    case nir_op_frcp:
745       return lower_rcp(b, src);
746    case nir_op_fsqrt:
747       return lower_sqrt_rsq(b, src, true);
748    case nir_op_frsq:
749       return lower_sqrt_rsq(b, src, false);
750    case nir_op_ftrunc:
751       return lower_trunc(b, src);
752    case nir_op_ffloor:
753       return lower_floor(b, src);
754    case nir_op_fceil:
755       return lower_ceil(b, src);
756    case nir_op_ffract:
757       return lower_fract(b, src);
758    case nir_op_fround_even:
759       return lower_round_even(b, src);
760    case nir_op_fsat:
761       return lower_sat(b, src);
762 
763    case nir_op_fdiv:
764    case nir_op_fsub:
765    case nir_op_fmod:
766    case nir_op_fmin:
767    case nir_op_fmax: {
768       nir_def *src1 = nir_mov_alu(b, alu->src[1],
769                                   alu->def.num_components);
770       switch (alu->op) {
771       case nir_op_fdiv:
772          return nir_fmul(b, src, nir_frcp(b, src1));
773       case nir_op_fsub:
774          return nir_fadd(b, src, nir_fneg(b, src1));
775       case nir_op_fmod:
776          return lower_mod(b, src, src1);
777       case nir_op_fmin:
778          return lower_minmax(b, nir_op_flt, src, src1);
779       case nir_op_fmax:
780          return lower_minmax(b, nir_op_fge, src, src1);
781       default:
782          unreachable("unhandled opcode");
783       }
784    }
785    default:
786       unreachable("unhandled opcode");
787    }
788 }
789 
790 static bool
nir_lower_doubles_impl(nir_function_impl * impl,const nir_shader * softfp64,nir_lower_doubles_options options)791 nir_lower_doubles_impl(nir_function_impl *impl,
792                        const nir_shader *softfp64,
793                        nir_lower_doubles_options options)
794 {
795    struct lower_doubles_data data = {
796       .softfp64 = softfp64,
797       .options = options,
798    };
799 
800    bool progress =
801       nir_function_impl_lower_instructions(impl,
802                                            should_lower_double_instr,
803                                            lower_doubles_instr,
804                                            &data);
805 
806    if (progress && (options & nir_lower_fp64_full_software)) {
807       /* Indices are completely messed up now */
808       nir_index_ssa_defs(impl);
809 
810       nir_metadata_preserve(impl, nir_metadata_none);
811 
812       /* And we have deref casts we need to clean up thanks to function
813        * inlining.
814        */
815       nir_opt_deref_impl(impl);
816    } else if (progress) {
817       nir_metadata_preserve(impl, nir_metadata_block_index |
818                                      nir_metadata_dominance);
819    } else {
820       nir_metadata_preserve(impl, nir_metadata_all);
821    }
822 
823    return progress;
824 }
825 
826 bool
nir_lower_doubles(nir_shader * shader,const nir_shader * softfp64,nir_lower_doubles_options options)827 nir_lower_doubles(nir_shader *shader,
828                   const nir_shader *softfp64,
829                   nir_lower_doubles_options options)
830 {
831    bool progress = false;
832 
833    nir_foreach_function_impl(impl, shader) {
834       progress |= nir_lower_doubles_impl(impl, softfp64, options);
835    }
836 
837    return progress;
838 }
839