• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "nir.h"
25 #include "nir_builder.h"
26 
27 static nir_ssa_def *
lower_imul64(nir_builder * b,nir_ssa_def * x,nir_ssa_def * y)28 lower_imul64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
29 {
30    nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
31    nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
32    nir_ssa_def *y_lo = nir_unpack_64_2x32_split_x(b, y);
33    nir_ssa_def *y_hi = nir_unpack_64_2x32_split_y(b, y);
34 
35    nir_ssa_def *res_lo = nir_imul(b, x_lo, y_lo);
36    nir_ssa_def *res_hi = nir_iadd(b, nir_umul_high(b, x_lo, y_lo),
37                          nir_iadd(b, nir_imul(b, x_lo, y_hi),
38                                      nir_imul(b, x_hi, y_lo)));
39 
40    return nir_pack_64_2x32_split(b, res_lo, res_hi);
41 }
42 
43 static nir_ssa_def *
lower_isign64(nir_builder * b,nir_ssa_def * x)44 lower_isign64(nir_builder *b, nir_ssa_def *x)
45 {
46    nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
47    nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
48 
49    nir_ssa_def *is_non_zero = nir_i2b(b, nir_ior(b, x_lo, x_hi));
50    nir_ssa_def *res_hi = nir_ishr(b, x_hi, nir_imm_int(b, 31));
51    nir_ssa_def *res_lo = nir_ior(b, res_hi, nir_b2i(b, is_non_zero));
52 
53    return nir_pack_64_2x32_split(b, res_lo, res_hi);
54 }
55 
56 static void
lower_udiv64_mod64(nir_builder * b,nir_ssa_def * n,nir_ssa_def * d,nir_ssa_def ** q,nir_ssa_def ** r)57 lower_udiv64_mod64(nir_builder *b, nir_ssa_def *n, nir_ssa_def *d,
58                    nir_ssa_def **q, nir_ssa_def **r)
59 {
60    /* TODO: We should specially handle the case where the denominator is a
61     * constant.  In that case, we should be able to reduce it to a multiply by
62     * a constant, some shifts, and an add.
63     */
64    nir_ssa_def *n_lo = nir_unpack_64_2x32_split_x(b, n);
65    nir_ssa_def *n_hi = nir_unpack_64_2x32_split_y(b, n);
66    nir_ssa_def *d_lo = nir_unpack_64_2x32_split_x(b, d);
67    nir_ssa_def *d_hi = nir_unpack_64_2x32_split_y(b, d);
68 
69    nir_const_value v = { .u32 = { 0, 0, 0, 0 } };
70    nir_ssa_def *q_lo = nir_build_imm(b, n->num_components, 32, v);
71    nir_ssa_def *q_hi = nir_build_imm(b, n->num_components, 32, v);
72 
73    nir_ssa_def *n_hi_before_if = n_hi;
74    nir_ssa_def *q_hi_before_if = q_hi;
75 
76    /* If the upper 32 bits of denom are non-zero, it is impossible for shifts
77     * greater than 32 bits to occur.  If the upper 32 bits of the numerator
78     * are zero, it is impossible for (denom << [63, 32]) <= numer unless
79     * denom == 0.
80     */
81    nir_ssa_def *need_high_div =
82       nir_iand(b, nir_ieq(b, d_hi, nir_imm_int(b, 0)), nir_uge(b, n_hi, d_lo));
83    nir_push_if(b, nir_bany(b, need_high_div));
84    {
85       /* If we only have one component, then the bany above goes away and
86        * this is always true within the if statement.
87        */
88       if (n->num_components == 1)
89          need_high_div = nir_imm_int(b, NIR_TRUE);
90 
91       nir_ssa_def *log2_d_lo = nir_ufind_msb(b, d_lo);
92 
93       for (int i = 31; i >= 0; i--) {
94          /* if ((d.x << i) <= n.y) {
95           *    n.y -= d.x << i;
96           *    quot.y |= 1U << i;
97           * }
98           */
99          nir_ssa_def *d_shift = nir_ishl(b, d_lo, nir_imm_int(b, i));
100          nir_ssa_def *new_n_hi = nir_isub(b, n_hi, d_shift);
101          nir_ssa_def *new_q_hi = nir_ior(b, q_hi, nir_imm_int(b, 1u << i));
102          nir_ssa_def *cond = nir_iand(b, need_high_div,
103                                          nir_uge(b, n_hi, d_shift));
104          if (i != 0) {
105             /* log2_d_lo is always <= 31, so we don't need to bother with it
106              * in the last iteration.
107              */
108             cond = nir_iand(b, cond,
109                                nir_ige(b, nir_imm_int(b, 31 - i), log2_d_lo));
110          }
111          n_hi = nir_bcsel(b, cond, new_n_hi, n_hi);
112          q_hi = nir_bcsel(b, cond, new_q_hi, q_hi);
113       }
114    }
115    nir_pop_if(b, NULL);
116    n_hi = nir_if_phi(b, n_hi, n_hi_before_if);
117    q_hi = nir_if_phi(b, q_hi, q_hi_before_if);
118 
119    nir_ssa_def *log2_denom = nir_ufind_msb(b, d_hi);
120 
121    n = nir_pack_64_2x32_split(b, n_lo, n_hi);
122    d = nir_pack_64_2x32_split(b, d_lo, d_hi);
123    for (int i = 31; i >= 0; i--) {
124       /* if ((d64 << i) <= n64) {
125        *    n64 -= d64 << i;
126        *    quot.x |= 1U << i;
127        * }
128        */
129       nir_ssa_def *d_shift = nir_ishl(b, d, nir_imm_int(b, i));
130       nir_ssa_def *new_n = nir_isub(b, n, d_shift);
131       nir_ssa_def *new_q_lo = nir_ior(b, q_lo, nir_imm_int(b, 1u << i));
132       nir_ssa_def *cond = nir_uge(b, n, d_shift);
133       if (i != 0) {
134          /* log2_denom is always <= 31, so we don't need to bother with it
135           * in the last iteration.
136           */
137          cond = nir_iand(b, cond,
138                             nir_ige(b, nir_imm_int(b, 31 - i), log2_denom));
139       }
140       n = nir_bcsel(b, cond, new_n, n);
141       q_lo = nir_bcsel(b, cond, new_q_lo, q_lo);
142    }
143 
144    *q = nir_pack_64_2x32_split(b, q_lo, q_hi);
145    *r = n;
146 }
147 
148 static nir_ssa_def *
lower_udiv64(nir_builder * b,nir_ssa_def * n,nir_ssa_def * d)149 lower_udiv64(nir_builder *b, nir_ssa_def *n, nir_ssa_def *d)
150 {
151    nir_ssa_def *q, *r;
152    lower_udiv64_mod64(b, n, d, &q, &r);
153    return q;
154 }
155 
156 static nir_ssa_def *
lower_idiv64(nir_builder * b,nir_ssa_def * n,nir_ssa_def * d)157 lower_idiv64(nir_builder *b, nir_ssa_def *n, nir_ssa_def *d)
158 {
159    nir_ssa_def *n_hi = nir_unpack_64_2x32_split_y(b, n);
160    nir_ssa_def *d_hi = nir_unpack_64_2x32_split_y(b, d);
161 
162    nir_ssa_def *negate = nir_ine(b, nir_ilt(b, n_hi, nir_imm_int(b, 0)),
163                                     nir_ilt(b, d_hi, nir_imm_int(b, 0)));
164    nir_ssa_def *q, *r;
165    lower_udiv64_mod64(b, nir_iabs(b, n), nir_iabs(b, d), &q, &r);
166    return nir_bcsel(b, negate, nir_ineg(b, q), q);
167 }
168 
169 static nir_ssa_def *
lower_umod64(nir_builder * b,nir_ssa_def * n,nir_ssa_def * d)170 lower_umod64(nir_builder *b, nir_ssa_def *n, nir_ssa_def *d)
171 {
172    nir_ssa_def *q, *r;
173    lower_udiv64_mod64(b, n, d, &q, &r);
174    return r;
175 }
176 
177 static nir_ssa_def *
lower_imod64(nir_builder * b,nir_ssa_def * n,nir_ssa_def * d)178 lower_imod64(nir_builder *b, nir_ssa_def *n, nir_ssa_def *d)
179 {
180    nir_ssa_def *n_hi = nir_unpack_64_2x32_split_y(b, n);
181    nir_ssa_def *d_hi = nir_unpack_64_2x32_split_y(b, d);
182    nir_ssa_def *n_is_neg = nir_ilt(b, n_hi, nir_imm_int(b, 0));
183    nir_ssa_def *d_is_neg = nir_ilt(b, d_hi, nir_imm_int(b, 0));
184 
185    nir_ssa_def *q, *r;
186    lower_udiv64_mod64(b, nir_iabs(b, n), nir_iabs(b, d), &q, &r);
187 
188    nir_ssa_def *rem = nir_bcsel(b, n_is_neg, nir_ineg(b, r), r);
189 
190    return nir_bcsel(b, nir_ieq(b, r, nir_imm_int64(b, 0)), nir_imm_int64(b, 0),
191           nir_bcsel(b, nir_ieq(b, n_is_neg, d_is_neg), rem,
192                        nir_iadd(b, rem, d)));
193 }
194 
195 static nir_ssa_def *
lower_irem64(nir_builder * b,nir_ssa_def * n,nir_ssa_def * d)196 lower_irem64(nir_builder *b, nir_ssa_def *n, nir_ssa_def *d)
197 {
198    nir_ssa_def *n_hi = nir_unpack_64_2x32_split_y(b, n);
199    nir_ssa_def *n_is_neg = nir_ilt(b, n_hi, nir_imm_int(b, 0));
200 
201    nir_ssa_def *q, *r;
202    lower_udiv64_mod64(b, nir_iabs(b, n), nir_iabs(b, d), &q, &r);
203    return nir_bcsel(b, n_is_neg, nir_ineg(b, r), r);
204 }
205 
206 static nir_lower_int64_options
opcode_to_options_mask(nir_op opcode)207 opcode_to_options_mask(nir_op opcode)
208 {
209    switch (opcode) {
210    case nir_op_imul:
211       return nir_lower_imul64;
212    case nir_op_isign:
213       return nir_lower_isign64;
214    case nir_op_udiv:
215    case nir_op_idiv:
216    case nir_op_umod:
217    case nir_op_imod:
218    case nir_op_irem:
219       return nir_lower_divmod64;
220    default:
221       return 0;
222    }
223 }
224 
225 static nir_ssa_def *
lower_int64_alu_instr(nir_builder * b,nir_alu_instr * alu)226 lower_int64_alu_instr(nir_builder *b, nir_alu_instr *alu)
227 {
228    nir_ssa_def *src[4];
229    for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++)
230       src[i] = nir_ssa_for_alu_src(b, alu, i);
231 
232    switch (alu->op) {
233    case nir_op_imul:
234       return lower_imul64(b, src[0], src[1]);
235    case nir_op_isign:
236       return lower_isign64(b, src[0]);
237    case nir_op_udiv:
238       return lower_udiv64(b, src[0], src[1]);
239    case nir_op_idiv:
240       return lower_idiv64(b, src[0], src[1]);
241    case nir_op_umod:
242       return lower_umod64(b, src[0], src[1]);
243    case nir_op_imod:
244       return lower_imod64(b, src[0], src[1]);
245    case nir_op_irem:
246       return lower_irem64(b, src[0], src[1]);
247    default:
248       unreachable("Invalid ALU opcode to lower");
249    }
250 }
251 
252 static bool
lower_int64_impl(nir_function_impl * impl,nir_lower_int64_options options)253 lower_int64_impl(nir_function_impl *impl, nir_lower_int64_options options)
254 {
255    nir_builder b;
256    nir_builder_init(&b, impl);
257 
258    bool progress = false;
259    nir_foreach_block(block, impl) {
260       nir_foreach_instr_safe(instr, block) {
261          if (instr->type != nir_instr_type_alu)
262             continue;
263 
264          nir_alu_instr *alu = nir_instr_as_alu(instr);
265          assert(alu->dest.dest.is_ssa);
266          if (alu->dest.dest.ssa.bit_size != 64)
267             continue;
268 
269          if (!(options & opcode_to_options_mask(alu->op)))
270             continue;
271 
272          b.cursor = nir_before_instr(instr);
273 
274          nir_ssa_def *lowered = lower_int64_alu_instr(&b, alu);
275          nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa,
276                                   nir_src_for_ssa(lowered));
277          nir_instr_remove(&alu->instr);
278          progress = true;
279       }
280    }
281 
282    return progress;
283 }
284 
285 bool
nir_lower_int64(nir_shader * shader,nir_lower_int64_options options)286 nir_lower_int64(nir_shader *shader, nir_lower_int64_options options)
287 {
288    bool progress = false;
289 
290    nir_foreach_function(function, shader) {
291       if (function->impl)
292          progress |= lower_int64_impl(function->impl, options);
293    }
294 
295    return progress;
296 }
297