• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2018 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "nir_builder.h"
25 
26 /**
27  * Some ALU operations may not be supported in hardware in specific bit-sizes.
28  * This pass allows implementations to selectively lower such operations to
29  * a bit-size that is supported natively and then converts the result back to
30  * the original bit-size.
31  */
32 
convert_to_bit_size(nir_builder * bld,nir_ssa_def * src,nir_alu_type type,unsigned bit_size)33 static nir_ssa_def *convert_to_bit_size(nir_builder *bld, nir_ssa_def *src,
34                                         nir_alu_type type, unsigned bit_size)
35 {
36    /* create b2i32(a) instead of i2i32(b2i8(a))/i2i32(b2i16(a)) */
37    nir_alu_instr *alu = nir_src_as_alu_instr(nir_src_for_ssa(src));
38    if ((type & (nir_type_uint | nir_type_int)) && bit_size == 32 &&
39        alu && (alu->op == nir_op_b2i8 || alu->op == nir_op_b2i16)) {
40       nir_alu_instr *instr = nir_alu_instr_create(bld->shader, nir_op_b2i32);
41       nir_alu_src_copy(&instr->src[0], &alu->src[0]);
42       return nir_builder_alu_instr_finish_and_insert(bld, instr);
43    }
44 
45    return nir_convert_to_bit_size(bld, src, type, bit_size);
46 }
47 
48 static void
lower_alu_instr(nir_builder * bld,nir_alu_instr * alu,unsigned bit_size)49 lower_alu_instr(nir_builder *bld, nir_alu_instr *alu, unsigned bit_size)
50 {
51    const nir_op op = alu->op;
52    unsigned dst_bit_size = alu->dest.dest.ssa.bit_size;
53 
54    bld->cursor = nir_before_instr(&alu->instr);
55 
56    /* Convert each source to the requested bit-size */
57    nir_ssa_def *srcs[NIR_MAX_VEC_COMPONENTS] = { NULL };
58    for (unsigned i = 0; i < nir_op_infos[op].num_inputs; i++) {
59       nir_ssa_def *src = nir_ssa_for_alu_src(bld, alu, i);
60 
61       nir_alu_type type = nir_op_infos[op].input_types[i];
62       if (nir_alu_type_get_type_size(type) == 0)
63          src = convert_to_bit_size(bld, src, type, bit_size);
64 
65       if (i == 1 && (op == nir_op_ishl || op == nir_op_ishr || op == nir_op_ushr)) {
66          assert(util_is_power_of_two_nonzero(dst_bit_size));
67          src = nir_iand(bld, src, nir_imm_int(bld, dst_bit_size - 1));
68       }
69 
70       srcs[i] = src;
71    }
72 
73    /* Emit the lowered ALU instruction */
74    nir_ssa_def *lowered_dst = NULL;
75    if (op == nir_op_imul_high || op == nir_op_umul_high) {
76       assert(dst_bit_size * 2 <= bit_size);
77       lowered_dst = nir_imul(bld, srcs[0], srcs[1]);
78       if (nir_op_infos[op].output_type & nir_type_uint)
79          lowered_dst = nir_ushr_imm(bld, lowered_dst, dst_bit_size);
80       else
81          lowered_dst = nir_ishr_imm(bld, lowered_dst, dst_bit_size);
82    } else {
83       lowered_dst = nir_build_alu_src_arr(bld, op, srcs);
84 
85       /* The add_sat and sub_sat instructions need to clamp the result to the
86        * range of the original type.
87        */
88       if (op == nir_op_iadd_sat || op == nir_op_isub_sat) {
89          const int64_t int_max = u_intN_max(dst_bit_size);
90          const int64_t int_min = u_intN_min(dst_bit_size);
91 
92          lowered_dst = nir_iclamp(bld, lowered_dst,
93                                   nir_imm_intN_t(bld, int_min, bit_size),
94                                   nir_imm_intN_t(bld, int_max, bit_size));
95       } else if (op == nir_op_uadd_sat || op == nir_op_usub_sat) {
96          const uint64_t uint_max = u_uintN_max(dst_bit_size);
97 
98          lowered_dst = nir_umin(bld, lowered_dst,
99                                 nir_imm_intN_t(bld, uint_max, bit_size));
100       }
101    }
102 
103 
104    /* Convert result back to the original bit-size */
105    if (nir_alu_type_get_type_size(nir_op_infos[op].output_type) == 0 &&
106        dst_bit_size != bit_size) {
107       nir_alu_type type = nir_op_infos[op].output_type;
108       nir_ssa_def *dst = nir_convert_to_bit_size(bld, lowered_dst, type, dst_bit_size);
109       nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, dst);
110    } else {
111       nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, lowered_dst);
112    }
113 }
114 
115 static void
lower_intrinsic_instr(nir_builder * b,nir_intrinsic_instr * intrin,unsigned bit_size)116 lower_intrinsic_instr(nir_builder *b, nir_intrinsic_instr *intrin,
117                       unsigned bit_size)
118 {
119    switch (intrin->intrinsic) {
120    case nir_intrinsic_read_invocation:
121    case nir_intrinsic_read_first_invocation:
122    case nir_intrinsic_vote_feq:
123    case nir_intrinsic_vote_ieq:
124    case nir_intrinsic_shuffle:
125    case nir_intrinsic_shuffle_xor:
126    case nir_intrinsic_shuffle_up:
127    case nir_intrinsic_shuffle_down:
128    case nir_intrinsic_quad_broadcast:
129    case nir_intrinsic_quad_swap_horizontal:
130    case nir_intrinsic_quad_swap_vertical:
131    case nir_intrinsic_quad_swap_diagonal:
132    case nir_intrinsic_reduce:
133    case nir_intrinsic_inclusive_scan:
134    case nir_intrinsic_exclusive_scan: {
135       assert(intrin->src[0].is_ssa && intrin->dest.is_ssa);
136       const unsigned old_bit_size = intrin->dest.ssa.bit_size;
137       assert(old_bit_size < bit_size);
138 
139       nir_alu_type type = nir_type_uint;
140       if (nir_intrinsic_has_reduction_op(intrin))
141          type = nir_op_infos[nir_intrinsic_reduction_op(intrin)].input_types[0];
142       else if (intrin->intrinsic == nir_intrinsic_vote_feq)
143          type = nir_type_float;
144 
145       b->cursor = nir_before_instr(&intrin->instr);
146       nir_intrinsic_instr *new_intrin =
147          nir_instr_as_intrinsic(nir_instr_clone(b->shader, &intrin->instr));
148 
149       nir_ssa_def *new_src = nir_convert_to_bit_size(b, intrin->src[0].ssa,
150                                                      type, bit_size);
151       new_intrin->src[0] = nir_src_for_ssa(new_src);
152 
153       if (intrin->intrinsic == nir_intrinsic_vote_feq ||
154           intrin->intrinsic == nir_intrinsic_vote_ieq) {
155          /* These return a Boolean; it's always 1-bit */
156          assert(new_intrin->dest.ssa.bit_size == 1);
157       } else {
158          /* These return the same bit size as the source; we need to adjust
159           * the size and then we'll have to emit a down-cast.
160           */
161          assert(intrin->src[0].ssa->bit_size == intrin->dest.ssa.bit_size);
162          new_intrin->dest.ssa.bit_size = bit_size;
163       }
164 
165       nir_builder_instr_insert(b, &new_intrin->instr);
166 
167       nir_ssa_def *res = &new_intrin->dest.ssa;
168       if (intrin->intrinsic == nir_intrinsic_exclusive_scan) {
169          /* For exclusive scan, we have to be careful because the identity
170           * value for the higher bit size may get added into the mix by
171           * disabled channels.  For some cases (imin/imax in particular),
172           * this value won't convert to the right identity value when we
173           * down-cast so we have to clamp it.
174           */
175          switch (nir_intrinsic_reduction_op(intrin)) {
176          case nir_op_imin: {
177             int64_t int_max = (1ull << (old_bit_size - 1)) - 1;
178             res = nir_imin(b, res, nir_imm_intN_t(b, int_max, bit_size));
179             break;
180          }
181          case nir_op_imax: {
182             int64_t int_min = -(int64_t)(1ull << (old_bit_size - 1));
183             res = nir_imax(b, res, nir_imm_intN_t(b, int_min, bit_size));
184             break;
185          }
186          default:
187             break;
188          }
189       }
190 
191       if (intrin->intrinsic != nir_intrinsic_vote_feq &&
192           intrin->intrinsic != nir_intrinsic_vote_ieq)
193          res = nir_u2u(b, res, old_bit_size);
194 
195       nir_ssa_def_rewrite_uses(&intrin->dest.ssa, res);
196       break;
197    }
198 
199    default:
200       unreachable("Unsupported instruction");
201    }
202 }
203 
204 static void
lower_phi_instr(nir_builder * b,nir_phi_instr * phi,unsigned bit_size,nir_phi_instr * last_phi)205 lower_phi_instr(nir_builder *b, nir_phi_instr *phi, unsigned bit_size,
206                 nir_phi_instr *last_phi)
207 {
208    assert(phi->dest.is_ssa);
209    unsigned old_bit_size = phi->dest.ssa.bit_size;
210    assert(old_bit_size < bit_size);
211 
212    nir_foreach_phi_src(src, phi) {
213       b->cursor = nir_after_block_before_jump(src->pred);
214       assert(src->src.is_ssa);
215       nir_ssa_def *new_src = nir_u2u(b, src->src.ssa, bit_size);
216 
217       nir_instr_rewrite_src(&phi->instr, &src->src, nir_src_for_ssa(new_src));
218    }
219 
220    phi->dest.ssa.bit_size = bit_size;
221 
222    b->cursor = nir_after_instr(&last_phi->instr);
223 
224    nir_ssa_def *new_dest = nir_u2u(b, &phi->dest.ssa, old_bit_size);
225    nir_ssa_def_rewrite_uses_after(&phi->dest.ssa, new_dest,
226                                   new_dest->parent_instr);
227 }
228 
229 static bool
lower_impl(nir_function_impl * impl,nir_lower_bit_size_callback callback,void * callback_data)230 lower_impl(nir_function_impl *impl,
231            nir_lower_bit_size_callback callback,
232            void *callback_data)
233 {
234    nir_builder b;
235    nir_builder_init(&b, impl);
236    bool progress = false;
237 
238    nir_foreach_block(block, impl) {
239       /* Stash this so we can rewrite phi destinations quickly. */
240       nir_phi_instr *last_phi = nir_block_last_phi_instr(block);
241 
242       nir_foreach_instr_safe(instr, block) {
243          unsigned lower_bit_size = callback(instr, callback_data);
244          if (lower_bit_size == 0)
245             continue;
246 
247          switch (instr->type) {
248          case nir_instr_type_alu:
249             lower_alu_instr(&b, nir_instr_as_alu(instr), lower_bit_size);
250             break;
251 
252          case nir_instr_type_intrinsic:
253             lower_intrinsic_instr(&b, nir_instr_as_intrinsic(instr),
254                                   lower_bit_size);
255             break;
256 
257          case nir_instr_type_phi:
258             lower_phi_instr(&b, nir_instr_as_phi(instr),
259                             lower_bit_size, last_phi);
260             break;
261 
262          default:
263             unreachable("Unsupported instruction type");
264          }
265          progress = true;
266       }
267    }
268 
269    if (progress) {
270       nir_metadata_preserve(impl, nir_metadata_block_index |
271                                   nir_metadata_dominance);
272    } else {
273       nir_metadata_preserve(impl, nir_metadata_all);
274    }
275 
276    return progress;
277 }
278 
279 bool
nir_lower_bit_size(nir_shader * shader,nir_lower_bit_size_callback callback,void * callback_data)280 nir_lower_bit_size(nir_shader *shader,
281                    nir_lower_bit_size_callback callback,
282                    void *callback_data)
283 {
284    bool progress = false;
285 
286    nir_foreach_function(function, shader) {
287       if (function->impl)
288          progress |= lower_impl(function->impl, callback, callback_data);
289    }
290 
291    return progress;
292 }
293 
294 static void
split_phi(nir_builder * b,nir_phi_instr * phi)295 split_phi(nir_builder *b, nir_phi_instr *phi)
296 {
297    nir_phi_instr *lowered[2] = {
298       nir_phi_instr_create(b->shader),
299       nir_phi_instr_create(b->shader)
300    };
301    int num_components = phi->dest.ssa.num_components;
302    assert(phi->dest.ssa.bit_size == 64);
303 
304    nir_foreach_phi_src(src, phi) {
305       assert(num_components == src->src.ssa->num_components);
306 
307       b->cursor = nir_before_src(&src->src, false);
308 
309       nir_ssa_def *x = nir_unpack_64_2x32_split_x(b, src->src.ssa);
310       nir_ssa_def *y = nir_unpack_64_2x32_split_y(b, src->src.ssa);
311 
312       nir_phi_instr_add_src(lowered[0], src->pred, nir_src_for_ssa(x));
313       nir_phi_instr_add_src(lowered[1], src->pred, nir_src_for_ssa(y));
314    }
315 
316    nir_ssa_dest_init(&lowered[0]->instr, &lowered[0]->dest,
317                      num_components, 32, NULL);
318    nir_ssa_dest_init(&lowered[1]->instr, &lowered[1]->dest,
319                      num_components, 32, NULL);
320 
321    b->cursor = nir_before_instr(&phi->instr);
322    nir_builder_instr_insert(b, &lowered[0]->instr);
323    nir_builder_instr_insert(b, &lowered[1]->instr);
324 
325    b->cursor = nir_after_phis(nir_cursor_current_block(b->cursor));
326    nir_ssa_def *merged = nir_pack_64_2x32_split(b, &lowered[0]->dest.ssa, &lowered[1]->dest.ssa);
327    nir_ssa_def_rewrite_uses(&phi->dest.ssa, merged);
328    nir_instr_remove(&phi->instr);
329 }
330 
331 static bool
lower_64bit_phi_impl(nir_function_impl * impl)332 lower_64bit_phi_impl(nir_function_impl *impl)
333 {
334    nir_builder b;
335    nir_builder_init(&b, impl);
336    bool progress = false;
337 
338    nir_foreach_block(block, impl) {
339       nir_foreach_instr_safe(instr, block) {
340          if (instr->type != nir_instr_type_phi)
341             break;
342 
343          nir_phi_instr *phi = nir_instr_as_phi(instr);
344          assert(phi->dest.is_ssa);
345 
346          if (phi->dest.ssa.bit_size <= 32)
347             continue;
348 
349          split_phi(&b, phi);
350          progress = true;
351       }
352    }
353 
354    if (progress) {
355       nir_metadata_preserve(impl, nir_metadata_block_index |
356                                   nir_metadata_dominance);
357    } else {
358       nir_metadata_preserve(impl, nir_metadata_all);
359    }
360 
361    return progress;
362 }
363 
364 bool
nir_lower_64bit_phis(nir_shader * shader)365 nir_lower_64bit_phis(nir_shader *shader)
366 {
367    bool progress = false;
368 
369    nir_foreach_function(function, shader) {
370       if (function->impl)
371          progress |= lower_64bit_phi_impl(function->impl);
372    }
373 
374    return progress;
375 }
376