• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2018-2019 Igalia S.L.
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #include "compiler/nir/nir_builder.h"
7 #include "ir3_nir.h"
8 
9 /**
10  * This pass moves to NIR certain offset computations for different I/O
11  * ops that are currently implemented on the IR3 backend compiler, to
12  * give NIR a chance to optimize them:
13  *
14  * - Dword-offset for SSBO load, store and atomics: A new, similar intrinsic
15  *   is emitted that replaces the original one, adding a new source that
16  *   holds the result of the original byte-offset source divided by 4.
17  */
18 
19 /* Returns the ir3-specific intrinsic opcode corresponding to an SSBO
20  * instruction that is handled by this pass. It also conveniently returns
21  * the offset source index in @offset_src_idx.
22  *
23  * If @intrinsic is not SSBO, or it is not handled by the pass, -1 is
24  * returned.
25  */
26 static int
get_ir3_intrinsic_for_ssbo_intrinsic(unsigned intrinsic,uint8_t * offset_src_idx)27 get_ir3_intrinsic_for_ssbo_intrinsic(unsigned intrinsic,
28                                      uint8_t *offset_src_idx)
29 {
30    assert(offset_src_idx);
31 
32    *offset_src_idx = 1;
33 
34    switch (intrinsic) {
35    case nir_intrinsic_store_ssbo:
36       *offset_src_idx = 2;
37       return nir_intrinsic_store_ssbo_ir3;
38    case nir_intrinsic_load_ssbo:
39       return nir_intrinsic_load_ssbo_ir3;
40    case nir_intrinsic_ssbo_atomic:
41       return nir_intrinsic_ssbo_atomic_ir3;
42    case nir_intrinsic_ssbo_atomic_swap:
43       return nir_intrinsic_ssbo_atomic_swap_ir3;
44    default:
45       break;
46    }
47 
48    return -1;
49 }
50 
51 static nir_def *
check_and_propagate_bit_shift32(nir_builder * b,nir_alu_instr * alu_instr,int32_t direction,int32_t shift)52 check_and_propagate_bit_shift32(nir_builder *b, nir_alu_instr *alu_instr,
53                                 int32_t direction, int32_t shift)
54 {
55    nir_def *shift_ssa = alu_instr->src[1].src.ssa;
56 
57    /* Only propagate if the shift is a const value so we can check value range
58     * statically.
59     */
60    nir_const_value *const_val = nir_src_as_const_value(alu_instr->src[1].src);
61    if (!const_val)
62       return NULL;
63 
64    int32_t current_shift = const_val[0].i32 * direction;
65    int32_t new_shift = current_shift + shift;
66 
67    /* If the merge would reverse the direction, bail out.
68     * e.g, 'x << 2' then 'x >> 4' is not 'x >> 2'.
69     */
70    if (current_shift * new_shift < 0)
71       return NULL;
72 
73    /* If the propagation would overflow an int32_t, bail out too to be on the
74     * safe side.
75     */
76    if (new_shift < -31 || new_shift > 31)
77       return NULL;
78 
79    /* Add or substract shift depending on the final direction (SHR vs. SHL). */
80    if (shift * direction < 0)
81       shift_ssa = nir_iadd_imm(b, shift_ssa, -abs(shift));
82    else
83       shift_ssa = nir_iadd_imm(b, shift_ssa, abs(shift));
84 
85    return shift_ssa;
86 }
87 
88 nir_def *
ir3_nir_try_propagate_bit_shift(nir_builder * b,nir_def * offset,int32_t shift)89 ir3_nir_try_propagate_bit_shift(nir_builder *b, nir_def *offset,
90                                 int32_t shift)
91 {
92    nir_instr *offset_instr = offset->parent_instr;
93    if (offset_instr->type != nir_instr_type_alu)
94       return NULL;
95 
96    nir_alu_instr *alu = nir_instr_as_alu(offset_instr);
97    nir_def *shift_ssa;
98    nir_def *new_offset = NULL;
99 
100    /* the first src could be something like ssa_18.x, but we only want
101     * the single component.  Otherwise the ishl/ishr/ushr could turn
102     * into a vec4 operation:
103     */
104    nir_def *src0 = nir_mov_alu(b, alu->src[0], 1);
105 
106    switch (alu->op) {
107    case nir_op_ishl:
108       shift_ssa = check_and_propagate_bit_shift32(b, alu, 1, shift);
109       if (shift_ssa)
110          new_offset = nir_ishl(b, src0, shift_ssa);
111       break;
112    case nir_op_ishr:
113       shift_ssa = check_and_propagate_bit_shift32(b, alu, -1, shift);
114       if (shift_ssa)
115          new_offset = nir_ishr(b, src0, shift_ssa);
116       break;
117    case nir_op_ushr:
118       shift_ssa = check_and_propagate_bit_shift32(b, alu, -1, shift);
119       if (shift_ssa)
120          new_offset = nir_ushr(b, src0, shift_ssa);
121       break;
122    default:
123       return NULL;
124    }
125 
126    return new_offset;
127 }
128 
129 static nir_def *
create_shift(nir_builder * b,nir_def * offset,int shift)130 create_shift(nir_builder *b, nir_def *offset, int shift)
131 {
132    /* If the offset to be shifted has the form "iadd constant, foo" don't shift
133     * the result but transform it to "iadd constant>>shift, (ushr foo, shift)".
134     * This ensures nir_opt_offsets (which only looks for iadds) can fold the
135     * constant into the immediate offset.
136     */
137    if (offset->parent_instr->type == nir_instr_type_alu) {
138       nir_alu_instr *offset_instr = nir_instr_as_alu(offset->parent_instr);
139 
140       if (offset_instr->op == nir_op_iadd &&
141           nir_src_is_const(offset_instr->src[0].src)) {
142          nir_def *new_shift = ir3_nir_try_propagate_bit_shift(
143             b, offset_instr->src[1].src.ssa, -shift);
144 
145          if (!new_shift)
146             new_shift = nir_ushr_imm(b, offset_instr->src[1].src.ssa, shift);
147 
148          return nir_iadd_imm(
149             b, new_shift,
150             nir_src_as_const_value(offset_instr->src[0].src)->u32 >> shift);
151       }
152    }
153 
154    return nir_ushr_imm(b, offset, shift);
155 }
156 
157 /* isam doesn't have an "untyped" field, so it can only load 1 component at a
158  * time because our storage buffer descriptors use a 1-component format.
159  * Therefore we need to scalarize any loads that would use isam.
160  */
161 static void
scalarize_load(nir_intrinsic_instr * intrinsic,nir_builder * b)162 scalarize_load(nir_intrinsic_instr *intrinsic, nir_builder *b)
163 {
164    struct nir_def *results[NIR_MAX_VEC_COMPONENTS];
165 
166    nir_def *descriptor = intrinsic->src[0].ssa;
167    nir_def *offset = intrinsic->src[1].ssa;
168    nir_def *record = nir_channel(b, offset, 0);
169    nir_def *record_offset = nir_channel(b, offset, 1);
170 
171    for (unsigned i = 0; i < intrinsic->def.num_components; i++) {
172       results[i] =
173          nir_load_uav_ir3(b, 1, intrinsic->def.bit_size, descriptor,
174                           nir_vec2(b, record,
175                                    nir_iadd_imm(b, record_offset, i)),
176                           .access = nir_intrinsic_access(intrinsic),
177                           .align_mul = nir_intrinsic_align_mul(intrinsic),
178                           .align_offset = nir_intrinsic_align_offset(intrinsic));
179    }
180 
181    nir_def *result = nir_vec(b, results, intrinsic->def.num_components);
182 
183    nir_def_rewrite_uses(&intrinsic->def, result);
184 
185    nir_instr_remove(&intrinsic->instr);
186 }
187 
188 static bool
lower_offset_for_ssbo(nir_intrinsic_instr * intrinsic,nir_builder * b,unsigned ir3_ssbo_opcode,uint8_t offset_src_idx)189 lower_offset_for_ssbo(nir_intrinsic_instr *intrinsic, nir_builder *b,
190                       unsigned ir3_ssbo_opcode, uint8_t offset_src_idx)
191 {
192    unsigned num_srcs = nir_intrinsic_infos[intrinsic->intrinsic].num_srcs;
193    int shift = 2;
194 
195    bool has_dest = nir_intrinsic_infos[intrinsic->intrinsic].has_dest;
196    nir_def *new_dest = NULL;
197 
198    /* for 16-bit ssbo access, offset is in 16-bit words instead of dwords */
199    if ((has_dest && intrinsic->def.bit_size == 16) ||
200        (!has_dest && intrinsic->src[0].ssa->bit_size == 16))
201       shift = 1;
202 
203    /* for 8-bit ssbo access, offset is in 8-bit words instead of dwords */
204    if ((has_dest && intrinsic->def.bit_size == 8) ||
205        (!has_dest && intrinsic->src[0].ssa->bit_size == 8))
206       shift = 0;
207 
208    if ((has_dest && intrinsic->def.bit_size == 64) ||
209        (!has_dest && intrinsic->src[0].ssa->bit_size == 64)) {
210       shift = 1;
211    }
212 
213    /* Here we create a new intrinsic and copy over all contents from the old
214     * one. */
215 
216    nir_intrinsic_instr *new_intrinsic;
217    nir_src *target_src;
218 
219    b->cursor = nir_before_instr(&intrinsic->instr);
220 
221    /* 'offset_src_idx' holds the index of the source that represent the offset. */
222    new_intrinsic = nir_intrinsic_instr_create(b->shader, ir3_ssbo_opcode);
223 
224    nir_def *offset = intrinsic->src[offset_src_idx].ssa;
225 
226    /* Since we don't have value range checking, we first try to propagate
227     * the division by 4 ('offset >> 2') into another bit-shift instruction that
228     * possibly defines the offset. If that's the case, we emit a similar
229     * instructions adjusting (merging) the shift value.
230     *
231     * Here we use the convention that shifting right is negative while shifting
232     * left is positive. So 'x / 4' ~ 'x >> 2' or 'x << -2'.
233     */
234    nir_def *new_offset = ir3_nir_try_propagate_bit_shift(b, offset, -shift);
235 
236    /* The new source that will hold the dword-offset is always the last
237     * one for every intrinsic.
238     */
239    target_src = &new_intrinsic->src[num_srcs];
240    *target_src = nir_src_for_ssa(offset);
241 
242    if (has_dest) {
243       nir_def *dest = &intrinsic->def;
244       nir_def_init(&new_intrinsic->instr, &new_intrinsic->def,
245                    dest->num_components, dest->bit_size);
246       new_dest = &new_intrinsic->def;
247    }
248 
249    for (unsigned i = 0; i < num_srcs; i++)
250       new_intrinsic->src[i] = nir_src_for_ssa(intrinsic->src[i].ssa);
251 
252    nir_intrinsic_copy_const_indices(new_intrinsic, intrinsic);
253 
254    new_intrinsic->num_components = intrinsic->num_components;
255 
256    /* If we managed to propagate the division by 4, just use the new offset
257     * register and don't emit the SHR.
258     */
259    if (new_offset)
260       offset = new_offset;
261    else
262       offset = create_shift(b, offset, shift);
263 
264    /* Insert the new intrinsic right before the old one. */
265    nir_builder_instr_insert(b, &new_intrinsic->instr);
266 
267    /* Replace the last source of the new intrinsic by the result of
268     * the offset divided by 4.
269     */
270    nir_src_rewrite(target_src, offset);
271 
272    if (has_dest) {
273       /* Replace the uses of the original destination by that
274        * of the new intrinsic.
275        */
276       nir_def_rewrite_uses(&intrinsic->def, new_dest);
277    }
278 
279    /* Finally remove the original intrinsic. */
280    nir_instr_remove(&intrinsic->instr);
281 
282    return true;
283 }
284 
285 static bool
lower_io_offsets_block(nir_block * block,nir_builder * b,void * mem_ctx)286 lower_io_offsets_block(nir_block *block, nir_builder *b, void *mem_ctx)
287 {
288    bool progress = false;
289 
290    nir_foreach_instr_safe (instr, block) {
291       if (instr->type != nir_instr_type_intrinsic)
292          continue;
293 
294       nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
295 
296       /* SSBO */
297       int ir3_intrinsic;
298       uint8_t offset_src_idx;
299       ir3_intrinsic =
300          get_ir3_intrinsic_for_ssbo_intrinsic(intr->intrinsic, &offset_src_idx);
301       if (ir3_intrinsic != -1) {
302          progress |= lower_offset_for_ssbo(intr, b, (unsigned)ir3_intrinsic,
303                                            offset_src_idx);
304       }
305 
306       if (intr->intrinsic == nir_intrinsic_load_uav_ir3 &&
307           (nir_intrinsic_access(intr) & ACCESS_CAN_REORDER) &&
308           ir3_bindless_resource(intr->src[0]) &&
309           intr->num_components > 1) {
310          b->cursor = nir_before_instr(instr);
311          scalarize_load(intr, b);
312       }
313    }
314 
315    return progress;
316 }
317 
318 static bool
lower_io_offsets_func(nir_function_impl * impl)319 lower_io_offsets_func(nir_function_impl *impl)
320 {
321    void *mem_ctx = ralloc_parent(impl);
322    nir_builder b = nir_builder_create(impl);
323 
324    bool progress = false;
325    nir_foreach_block_safe (block, impl) {
326       progress |= lower_io_offsets_block(block, &b, mem_ctx);
327    }
328 
329    if (progress) {
330       nir_metadata_preserve(impl,
331                             nir_metadata_control_flow);
332    }
333 
334    return progress;
335 }
336 
337 bool
ir3_nir_lower_io_offsets(nir_shader * shader)338 ir3_nir_lower_io_offsets(nir_shader *shader)
339 {
340    bool progress = false;
341 
342    nir_foreach_function (function, shader) {
343       if (function->impl)
344          progress |= lower_io_offsets_func(function->impl);
345    }
346 
347    return progress;
348 }
349 
350 uint32_t
ir3_nir_max_imm_offset(nir_intrinsic_instr * intrin,const void * data)351 ir3_nir_max_imm_offset(nir_intrinsic_instr *intrin, const void *data)
352 {
353    const struct ir3_compiler *compiler = data;
354 
355    if (!compiler->has_ssbo_imm_offsets)
356       return 0;
357 
358    switch (intrin->intrinsic) {
359    case nir_intrinsic_load_ssbo_ir3:
360       if ((nir_intrinsic_access(intrin) & ACCESS_CAN_REORDER) &&
361           !(compiler->options.storage_8bit && intrin->def.bit_size == 8))
362          return 255; /* isam.v */
363       return 127;    /* ldib.b */
364    case nir_intrinsic_store_ssbo_ir3:
365       return 127; /* stib.b */
366    default:
367       return 0;
368    }
369 }
370