• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2018 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "nir.h"
25 #include "nir_builder.h"
26 #include "util/fast_idiv_by_const.h"
27 #include "util/u_math.h"
28 
29 static nir_ssa_def *
build_udiv(nir_builder * b,nir_ssa_def * n,uint64_t d)30 build_udiv(nir_builder *b, nir_ssa_def *n, uint64_t d)
31 {
32    if (d == 0) {
33       return nir_imm_intN_t(b, 0, n->bit_size);
34    } else if (util_is_power_of_two_or_zero64(d)) {
35       return nir_ushr_imm(b, n, util_logbase2_64(d));
36    } else {
37       struct util_fast_udiv_info m =
38          util_compute_fast_udiv_info(d, n->bit_size, n->bit_size);
39 
40       if (m.pre_shift)
41          n = nir_ushr_imm(b, n, m.pre_shift);
42       if (m.increment)
43          n = nir_uadd_sat(b, n, nir_imm_intN_t(b, m.increment, n->bit_size));
44       n = nir_umul_high(b, n, nir_imm_intN_t(b, m.multiplier, n->bit_size));
45       if (m.post_shift)
46          n = nir_ushr_imm(b, n, m.post_shift);
47 
48       return n;
49    }
50 }
51 
52 static nir_ssa_def *
build_umod(nir_builder * b,nir_ssa_def * n,uint64_t d)53 build_umod(nir_builder *b, nir_ssa_def *n, uint64_t d)
54 {
55    if (d == 0) {
56       return nir_imm_intN_t(b, 0, n->bit_size);
57    } else if (util_is_power_of_two_or_zero64(d)) {
58       return nir_iand(b, n, nir_imm_intN_t(b, d - 1, n->bit_size));
59    } else {
60       return nir_isub(b, n, nir_imul(b, build_udiv(b, n, d),
61                                         nir_imm_intN_t(b, d, n->bit_size)));
62    }
63 }
64 
65 static nir_ssa_def *
build_idiv(nir_builder * b,nir_ssa_def * n,int64_t d)66 build_idiv(nir_builder *b, nir_ssa_def *n, int64_t d)
67 {
68    uint64_t abs_d = d < 0 ? -d : d;
69 
70    if (d == 0) {
71       return nir_imm_intN_t(b, 0, n->bit_size);
72    } else if (d == 1) {
73       return n;
74    } else if (d == -1) {
75       return nir_ineg(b, n);
76    } else if (util_is_power_of_two_or_zero64(abs_d)) {
77       nir_ssa_def *uq = nir_ushr_imm(b, nir_iabs(b, n), util_logbase2_64(abs_d));
78       nir_ssa_def *n_neg = nir_ilt(b, n, nir_imm_intN_t(b, 0, n->bit_size));
79       nir_ssa_def *neg = d < 0 ? nir_inot(b, n_neg) : n_neg;
80       return nir_bcsel(b, neg, nir_ineg(b, uq), uq);
81    } else {
82       struct util_fast_sdiv_info m =
83          util_compute_fast_sdiv_info(d, n->bit_size);
84 
85       nir_ssa_def *res =
86          nir_imul_high(b, n, nir_imm_intN_t(b, m.multiplier, n->bit_size));
87       if (d > 0 && m.multiplier < 0)
88          res = nir_iadd(b, res, n);
89       if (d < 0 && m.multiplier > 0)
90          res = nir_isub(b, res, n);
91       if (m.shift)
92          res = nir_ishr_imm(b, res, m.shift);
93       res = nir_iadd(b, res, nir_ushr_imm(b, res, n->bit_size - 1));
94 
95       return res;
96    }
97 }
98 
99 static bool
nir_opt_idiv_const_instr(nir_builder * b,nir_alu_instr * alu)100 nir_opt_idiv_const_instr(nir_builder *b, nir_alu_instr *alu)
101 {
102    assert(alu->dest.dest.is_ssa);
103    assert(alu->src[0].src.is_ssa && alu->src[1].src.is_ssa);
104 
105    if (!nir_src_is_const(alu->src[1].src))
106       return false;
107 
108    unsigned bit_size = alu->src[1].src.ssa->bit_size;
109 
110    b->cursor = nir_before_instr(&alu->instr);
111 
112    nir_ssa_def *q[NIR_MAX_VEC_COMPONENTS];
113    for (unsigned comp = 0; comp < alu->dest.dest.ssa.num_components; comp++) {
114       /* Get the numerator for the channel */
115       nir_ssa_def *n = nir_channel(b, alu->src[0].src.ssa,
116                                    alu->src[0].swizzle[comp]);
117 
118       /* Get the denominator for the channel */
119       int64_t d = nir_src_comp_as_int(alu->src[1].src,
120                                       alu->src[1].swizzle[comp]);
121 
122       nir_alu_type d_type = nir_op_infos[alu->op].input_types[1];
123       if (nir_alu_type_get_base_type(d_type) == nir_type_uint) {
124          /* The code above sign-extended.  If we're lowering an unsigned op,
125           * we need to mask it off to the correct number of bits so that a
126           * cast to uint64_t will do the right thing.
127           */
128          if (bit_size < 64)
129             d &= (1ull << bit_size) - 1;
130       }
131 
132       switch (alu->op) {
133       case nir_op_udiv:
134          q[comp] = build_udiv(b, n, d);
135          break;
136       case nir_op_idiv:
137          q[comp] = build_idiv(b, n, d);
138          break;
139       case nir_op_umod:
140          q[comp] = build_umod(b, n, d);
141          break;
142       default:
143          unreachable("Unknown integer division op");
144       }
145    }
146 
147    nir_ssa_def *qvec = nir_vec(b, q, alu->dest.dest.ssa.num_components);
148    nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(qvec));
149    nir_instr_remove(&alu->instr);
150 
151    return true;
152 }
153 
154 static bool
nir_opt_idiv_const_impl(nir_function_impl * impl,unsigned min_bit_size)155 nir_opt_idiv_const_impl(nir_function_impl *impl, unsigned min_bit_size)
156 {
157    bool progress = false;
158 
159    nir_builder b;
160    nir_builder_init(&b, impl);
161 
162    nir_foreach_block(block, impl) {
163       nir_foreach_instr_safe(instr, block) {
164          if (instr->type != nir_instr_type_alu)
165             continue;
166 
167          nir_alu_instr *alu = nir_instr_as_alu(instr);
168          if (alu->op != nir_op_udiv &&
169              alu->op != nir_op_idiv &&
170              alu->op != nir_op_umod)
171             continue;
172 
173          assert(alu->dest.dest.is_ssa);
174          if (alu->dest.dest.ssa.bit_size < min_bit_size)
175             continue;
176 
177          progress |= nir_opt_idiv_const_instr(&b, alu);
178       }
179    }
180 
181    if (progress) {
182       nir_metadata_preserve(impl, nir_metadata_block_index |
183                                   nir_metadata_dominance);
184    } else {
185       nir_metadata_preserve(impl, nir_metadata_all);
186    }
187 
188    return progress;
189 }
190 
191 bool
nir_opt_idiv_const(nir_shader * shader,unsigned min_bit_size)192 nir_opt_idiv_const(nir_shader *shader, unsigned min_bit_size)
193 {
194    bool progress = false;
195 
196    nir_foreach_function(function, shader) {
197       if (function->impl)
198          progress |= nir_opt_idiv_const_impl(function->impl, min_bit_size);
199    }
200 
201    return progress;
202 }
203