1 /*
2 * Copyright © 2018-2019 Igalia S.L.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "compiler/nir/nir_builder.h"
25 #include "ir3_nir.h"
26
27 /**
28 * This pass moves to NIR certain offset computations for different I/O
29 * ops that are currently implemented on the IR3 backend compiler, to
30 * give NIR a chance to optimize them:
31 *
32 * - Dword-offset for SSBO load, store and atomics: A new, similar intrinsic
33 * is emitted that replaces the original one, adding a new source that
34 * holds the result of the original byte-offset source divided by 4.
35 */
36
37 /* Returns the ir3-specific intrinsic opcode corresponding to an SSBO
38 * instruction that is handled by this pass. It also conveniently returns
39 * the offset source index in @offset_src_idx.
40 *
41 * If @intrinsic is not SSBO, or it is not handled by the pass, -1 is
42 * returned.
43 */
44 static int
get_ir3_intrinsic_for_ssbo_intrinsic(unsigned intrinsic,uint8_t * offset_src_idx)45 get_ir3_intrinsic_for_ssbo_intrinsic(unsigned intrinsic,
46 uint8_t *offset_src_idx)
47 {
48 assert(offset_src_idx);
49
50 *offset_src_idx = 1;
51
52 switch (intrinsic) {
53 case nir_intrinsic_store_ssbo:
54 *offset_src_idx = 2;
55 return nir_intrinsic_store_ssbo_ir3;
56 case nir_intrinsic_load_ssbo:
57 return nir_intrinsic_load_ssbo_ir3;
58 case nir_intrinsic_ssbo_atomic:
59 return nir_intrinsic_ssbo_atomic_ir3;
60 case nir_intrinsic_ssbo_atomic_swap:
61 return nir_intrinsic_ssbo_atomic_swap_ir3;
62 default:
63 break;
64 }
65
66 return -1;
67 }
68
69 static nir_def *
check_and_propagate_bit_shift32(nir_builder * b,nir_alu_instr * alu_instr,int32_t direction,int32_t shift)70 check_and_propagate_bit_shift32(nir_builder *b, nir_alu_instr *alu_instr,
71 int32_t direction, int32_t shift)
72 {
73 nir_def *shift_ssa = alu_instr->src[1].src.ssa;
74
75 /* Only propagate if the shift is a const value so we can check value range
76 * statically.
77 */
78 nir_const_value *const_val = nir_src_as_const_value(alu_instr->src[1].src);
79 if (!const_val)
80 return NULL;
81
82 int32_t current_shift = const_val[0].i32 * direction;
83 int32_t new_shift = current_shift + shift;
84
85 /* If the merge would reverse the direction, bail out.
86 * e.g, 'x << 2' then 'x >> 4' is not 'x >> 2'.
87 */
88 if (current_shift * new_shift < 0)
89 return NULL;
90
91 /* If the propagation would overflow an int32_t, bail out too to be on the
92 * safe side.
93 */
94 if (new_shift < -31 || new_shift > 31)
95 return NULL;
96
97 /* Add or substract shift depending on the final direction (SHR vs. SHL). */
98 if (shift * direction < 0)
99 shift_ssa = nir_iadd_imm(b, shift_ssa, -abs(shift));
100 else
101 shift_ssa = nir_iadd_imm(b, shift_ssa, abs(shift));
102
103 return shift_ssa;
104 }
105
106 nir_def *
ir3_nir_try_propagate_bit_shift(nir_builder * b,nir_def * offset,int32_t shift)107 ir3_nir_try_propagate_bit_shift(nir_builder *b, nir_def *offset,
108 int32_t shift)
109 {
110 nir_instr *offset_instr = offset->parent_instr;
111 if (offset_instr->type != nir_instr_type_alu)
112 return NULL;
113
114 nir_alu_instr *alu = nir_instr_as_alu(offset_instr);
115 nir_def *shift_ssa;
116 nir_def *new_offset = NULL;
117
118 /* the first src could be something like ssa_18.x, but we only want
119 * the single component. Otherwise the ishl/ishr/ushr could turn
120 * into a vec4 operation:
121 */
122 nir_def *src0 = nir_mov_alu(b, alu->src[0], 1);
123
124 switch (alu->op) {
125 case nir_op_ishl:
126 shift_ssa = check_and_propagate_bit_shift32(b, alu, 1, shift);
127 if (shift_ssa)
128 new_offset = nir_ishl(b, src0, shift_ssa);
129 break;
130 case nir_op_ishr:
131 shift_ssa = check_and_propagate_bit_shift32(b, alu, -1, shift);
132 if (shift_ssa)
133 new_offset = nir_ishr(b, src0, shift_ssa);
134 break;
135 case nir_op_ushr:
136 shift_ssa = check_and_propagate_bit_shift32(b, alu, -1, shift);
137 if (shift_ssa)
138 new_offset = nir_ushr(b, src0, shift_ssa);
139 break;
140 default:
141 return NULL;
142 }
143
144 return new_offset;
145 }
146
147 /* isam doesn't have an "untyped" field, so it can only load 1 component at a
148 * time because our storage buffer descriptors use a 1-component format.
149 * Therefore we need to scalarize any loads that would use isam.
150 */
151 static void
scalarize_load(nir_intrinsic_instr * intrinsic,nir_builder * b)152 scalarize_load(nir_intrinsic_instr *intrinsic, nir_builder *b)
153 {
154 struct nir_def *results[NIR_MAX_VEC_COMPONENTS];
155
156 nir_def *descriptor = intrinsic->src[0].ssa;
157 nir_def *offset = intrinsic->src[1].ssa;
158 nir_def *new_offset = intrinsic->src[2].ssa;
159 unsigned comp_size = intrinsic->def.bit_size / 8;
160 for (unsigned i = 0; i < intrinsic->def.num_components; i++) {
161 results[i] =
162 nir_load_ssbo_ir3(b, 1, intrinsic->def.bit_size, descriptor,
163 nir_iadd_imm(b, offset, i * comp_size),
164 nir_iadd_imm(b, new_offset, i),
165 .access = nir_intrinsic_access(intrinsic),
166 .align_mul = nir_intrinsic_align_mul(intrinsic),
167 .align_offset = nir_intrinsic_align_offset(intrinsic));
168 }
169
170 nir_def *result = nir_vec(b, results, intrinsic->def.num_components);
171
172 nir_def_rewrite_uses(&intrinsic->def, result);
173
174 nir_instr_remove(&intrinsic->instr);
175 }
176
177 static bool
lower_offset_for_ssbo(nir_intrinsic_instr * intrinsic,nir_builder * b,unsigned ir3_ssbo_opcode,uint8_t offset_src_idx)178 lower_offset_for_ssbo(nir_intrinsic_instr *intrinsic, nir_builder *b,
179 unsigned ir3_ssbo_opcode, uint8_t offset_src_idx)
180 {
181 unsigned num_srcs = nir_intrinsic_infos[intrinsic->intrinsic].num_srcs;
182 int shift = 2;
183
184 bool has_dest = nir_intrinsic_infos[intrinsic->intrinsic].has_dest;
185 nir_def *new_dest = NULL;
186
187 /* for 16-bit ssbo access, offset is in 16-bit words instead of dwords */
188 if ((has_dest && intrinsic->def.bit_size == 16) ||
189 (!has_dest && intrinsic->src[0].ssa->bit_size == 16))
190 shift = 1;
191
192 /* Here we create a new intrinsic and copy over all contents from the old
193 * one. */
194
195 nir_intrinsic_instr *new_intrinsic;
196 nir_src *target_src;
197
198 b->cursor = nir_before_instr(&intrinsic->instr);
199
200 /* 'offset_src_idx' holds the index of the source that represent the offset. */
201 new_intrinsic = nir_intrinsic_instr_create(b->shader, ir3_ssbo_opcode);
202
203 nir_def *offset = intrinsic->src[offset_src_idx].ssa;
204
205 /* Since we don't have value range checking, we first try to propagate
206 * the division by 4 ('offset >> 2') into another bit-shift instruction that
207 * possibly defines the offset. If that's the case, we emit a similar
208 * instructions adjusting (merging) the shift value.
209 *
210 * Here we use the convention that shifting right is negative while shifting
211 * left is positive. So 'x / 4' ~ 'x >> 2' or 'x << -2'.
212 */
213 nir_def *new_offset = ir3_nir_try_propagate_bit_shift(b, offset, -shift);
214
215 /* The new source that will hold the dword-offset is always the last
216 * one for every intrinsic.
217 */
218 target_src = &new_intrinsic->src[num_srcs];
219 *target_src = nir_src_for_ssa(offset);
220
221 if (has_dest) {
222 nir_def *dest = &intrinsic->def;
223 nir_def_init(&new_intrinsic->instr, &new_intrinsic->def,
224 dest->num_components, dest->bit_size);
225 new_dest = &new_intrinsic->def;
226 }
227
228 for (unsigned i = 0; i < num_srcs; i++)
229 new_intrinsic->src[i] = nir_src_for_ssa(intrinsic->src[i].ssa);
230
231 nir_intrinsic_copy_const_indices(new_intrinsic, intrinsic);
232
233 new_intrinsic->num_components = intrinsic->num_components;
234
235 /* If we managed to propagate the division by 4, just use the new offset
236 * register and don't emit the SHR.
237 */
238 if (new_offset)
239 offset = new_offset;
240 else
241 offset = nir_ushr_imm(b, offset, shift);
242
243 /* Insert the new intrinsic right before the old one. */
244 nir_builder_instr_insert(b, &new_intrinsic->instr);
245
246 /* Replace the last source of the new intrinsic by the result of
247 * the offset divided by 4.
248 */
249 nir_src_rewrite(target_src, offset);
250
251 if (has_dest) {
252 /* Replace the uses of the original destination by that
253 * of the new intrinsic.
254 */
255 nir_def_rewrite_uses(&intrinsic->def, new_dest);
256 }
257
258 /* Finally remove the original intrinsic. */
259 nir_instr_remove(&intrinsic->instr);
260
261 if (new_intrinsic->intrinsic == nir_intrinsic_load_ssbo_ir3 &&
262 (nir_intrinsic_access(new_intrinsic) & ACCESS_CAN_REORDER) &&
263 ir3_bindless_resource(new_intrinsic->src[0]) &&
264 new_intrinsic->num_components > 1)
265 scalarize_load(new_intrinsic, b);
266
267 return true;
268 }
269
270 static bool
lower_io_offsets_block(nir_block * block,nir_builder * b,void * mem_ctx)271 lower_io_offsets_block(nir_block *block, nir_builder *b, void *mem_ctx)
272 {
273 bool progress = false;
274
275 nir_foreach_instr_safe (instr, block) {
276 if (instr->type != nir_instr_type_intrinsic)
277 continue;
278
279 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
280
281 /* SSBO */
282 int ir3_intrinsic;
283 uint8_t offset_src_idx;
284 ir3_intrinsic =
285 get_ir3_intrinsic_for_ssbo_intrinsic(intr->intrinsic, &offset_src_idx);
286 if (ir3_intrinsic != -1) {
287 progress |= lower_offset_for_ssbo(intr, b, (unsigned)ir3_intrinsic,
288 offset_src_idx);
289 }
290 }
291
292 return progress;
293 }
294
295 static bool
lower_io_offsets_func(nir_function_impl * impl)296 lower_io_offsets_func(nir_function_impl *impl)
297 {
298 void *mem_ctx = ralloc_parent(impl);
299 nir_builder b = nir_builder_create(impl);
300
301 bool progress = false;
302 nir_foreach_block_safe (block, impl) {
303 progress |= lower_io_offsets_block(block, &b, mem_ctx);
304 }
305
306 if (progress) {
307 nir_metadata_preserve(impl,
308 nir_metadata_block_index | nir_metadata_dominance);
309 }
310
311 return progress;
312 }
313
314 bool
ir3_nir_lower_io_offsets(nir_shader * shader)315 ir3_nir_lower_io_offsets(nir_shader *shader)
316 {
317 bool progress = false;
318
319 nir_foreach_function (function, shader) {
320 if (function->impl)
321 progress |= lower_io_offsets_func(function->impl);
322 }
323
324 return progress;
325 }
326