• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2018-2019 Igalia S.L.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "ir3_nir.h"
25 #include "compiler/nir/nir_builder.h"
26 
27 /**
28  * This pass moves to NIR certain offset computations for different I/O
29  * ops that are currently implemented on the IR3 backend compiler, to
30  * give NIR a chance to optimize them:
31  *
32  * - Dword-offset for SSBO load, store and atomics: A new, similar intrinsic
33  *   is emitted that replaces the original one, adding a new source that
34  *   holds the result of the original byte-offset source divided by 4.
35  */
36 
37 
38 /* Returns the ir3-specific intrinsic opcode corresponding to an SSBO
39  * instruction that is handled by this pass. It also conveniently returns
40  * the offset source index in @offset_src_idx.
41  *
42  * If @intrinsic is not SSBO, or it is not handled by the pass, -1 is
43  * returned.
44  */
45 static int
get_ir3_intrinsic_for_ssbo_intrinsic(unsigned intrinsic,uint8_t * offset_src_idx)46 get_ir3_intrinsic_for_ssbo_intrinsic(unsigned intrinsic,
47 									 uint8_t *offset_src_idx)
48 {
49 	debug_assert(offset_src_idx);
50 
51 	*offset_src_idx = 1;
52 
53 	switch (intrinsic) {
54 	case nir_intrinsic_store_ssbo:
55 		*offset_src_idx = 2;
56 		return nir_intrinsic_store_ssbo_ir3;
57 	case nir_intrinsic_load_ssbo:
58 		return nir_intrinsic_load_ssbo_ir3;
59 	case nir_intrinsic_ssbo_atomic_add:
60 		return nir_intrinsic_ssbo_atomic_add_ir3;
61 	case nir_intrinsic_ssbo_atomic_imin:
62 		return nir_intrinsic_ssbo_atomic_imin_ir3;
63 	case nir_intrinsic_ssbo_atomic_umin:
64 		return nir_intrinsic_ssbo_atomic_umin_ir3;
65 	case nir_intrinsic_ssbo_atomic_imax:
66 		return nir_intrinsic_ssbo_atomic_imax_ir3;
67 	case nir_intrinsic_ssbo_atomic_umax:
68 		return nir_intrinsic_ssbo_atomic_umax_ir3;
69 	case nir_intrinsic_ssbo_atomic_and:
70 		return nir_intrinsic_ssbo_atomic_and_ir3;
71 	case nir_intrinsic_ssbo_atomic_or:
72 		return nir_intrinsic_ssbo_atomic_or_ir3;
73 	case nir_intrinsic_ssbo_atomic_xor:
74 		return nir_intrinsic_ssbo_atomic_xor_ir3;
75 	case nir_intrinsic_ssbo_atomic_exchange:
76 		return nir_intrinsic_ssbo_atomic_exchange_ir3;
77 	case nir_intrinsic_ssbo_atomic_comp_swap:
78 		return nir_intrinsic_ssbo_atomic_comp_swap_ir3;
79 	default:
80 		break;
81 	}
82 
83 	return -1;
84 }
85 
86 static nir_ssa_def *
check_and_propagate_bit_shift32(nir_builder * b,nir_alu_instr * alu_instr,int32_t direction,int32_t shift)87 check_and_propagate_bit_shift32(nir_builder *b, nir_alu_instr *alu_instr,
88 								int32_t direction, int32_t shift)
89 {
90 	debug_assert(alu_instr->src[1].src.is_ssa);
91 	nir_ssa_def *shift_ssa = alu_instr->src[1].src.ssa;
92 
93 	/* Only propagate if the shift is a const value so we can check value range
94 	 * statically.
95 	 */
96 	nir_const_value *const_val = nir_src_as_const_value(alu_instr->src[1].src);
97 	if (!const_val)
98 		return NULL;
99 
100 	int32_t current_shift = const_val[0].i32 * direction;
101 	int32_t new_shift = current_shift + shift;
102 
103 	/* If the merge would reverse the direction, bail out.
104 	 * e.g, 'x << 2' then 'x >> 4' is not 'x >> 2'.
105 	 */
106 	if (current_shift * new_shift < 0)
107 		return NULL;
108 
109 	/* If the propagation would overflow an int32_t, bail out too to be on the
110 	 * safe side.
111 	 */
112 	if (new_shift < -31 || new_shift > 31)
113 		return NULL;
114 
115 	/* Add or substract shift depending on the final direction (SHR vs. SHL). */
116 	if (shift * direction < 0)
117 		shift_ssa = nir_isub(b, shift_ssa, nir_imm_int(b, abs(shift)));
118 	else
119 		shift_ssa = nir_iadd(b, shift_ssa, nir_imm_int(b, abs(shift)));
120 
121 	return shift_ssa;
122 }
123 
124 nir_ssa_def *
ir3_nir_try_propagate_bit_shift(nir_builder * b,nir_ssa_def * offset,int32_t shift)125 ir3_nir_try_propagate_bit_shift(nir_builder *b, nir_ssa_def *offset, int32_t shift)
126 {
127 	nir_instr *offset_instr = offset->parent_instr;
128 	if (offset_instr->type != nir_instr_type_alu)
129 		return NULL;
130 
131 	nir_alu_instr *alu = nir_instr_as_alu(offset_instr);
132 	nir_ssa_def *shift_ssa;
133 	nir_ssa_def *new_offset = NULL;
134 
135 	/* the first src could be something like ssa_18.x, but we only want
136 	 * the single component.  Otherwise the ishl/ishr/ushr could turn
137 	 * into a vec4 operation:
138 	 */
139 	nir_ssa_def *src0 = nir_mov_alu(b, alu->src[0], 1);
140 
141 	switch (alu->op) {
142 	case nir_op_ishl:
143 		shift_ssa = check_and_propagate_bit_shift32(b, alu, 1, shift);
144 		if (shift_ssa)
145 			new_offset = nir_ishl(b, src0, shift_ssa);
146 		break;
147 	case nir_op_ishr:
148 		shift_ssa = check_and_propagate_bit_shift32(b, alu, -1, shift);
149 		if (shift_ssa)
150 			new_offset = nir_ishr(b, src0, shift_ssa);
151 		break;
152 	case nir_op_ushr:
153 		shift_ssa = check_and_propagate_bit_shift32(b, alu, -1, shift);
154 		if (shift_ssa)
155 			new_offset = nir_ushr(b, src0, shift_ssa);
156 		break;
157 	default:
158 		return NULL;
159 	}
160 
161 	return new_offset;
162 }
163 
164 static bool
lower_offset_for_ssbo(nir_intrinsic_instr * intrinsic,nir_builder * b,unsigned ir3_ssbo_opcode,uint8_t offset_src_idx)165 lower_offset_for_ssbo(nir_intrinsic_instr *intrinsic, nir_builder *b,
166 					  unsigned ir3_ssbo_opcode, uint8_t offset_src_idx)
167 {
168 	unsigned num_srcs = nir_intrinsic_infos[intrinsic->intrinsic].num_srcs;
169 	int shift = 2;
170 
171 	bool has_dest = nir_intrinsic_infos[intrinsic->intrinsic].has_dest;
172 	nir_ssa_def *new_dest = NULL;
173 
174 	/* for 16-bit ssbo access, offset is in 16-bit words instead of dwords */
175 	if ((has_dest && intrinsic->dest.ssa.bit_size == 16) ||
176 		(!has_dest && intrinsic->src[0].ssa->bit_size == 16))
177 		shift = 1;
178 
179 	/* Here we create a new intrinsic and copy over all contents from the old one. */
180 
181 	nir_intrinsic_instr *new_intrinsic;
182 	nir_src *target_src;
183 
184 	b->cursor = nir_before_instr(&intrinsic->instr);
185 
186 	/* 'offset_src_idx' holds the index of the source that represent the offset. */
187 	new_intrinsic =
188 		nir_intrinsic_instr_create(b->shader, ir3_ssbo_opcode);
189 
190 	debug_assert(intrinsic->src[offset_src_idx].is_ssa);
191 	nir_ssa_def *offset = intrinsic->src[offset_src_idx].ssa;
192 
193 	/* Since we don't have value range checking, we first try to propagate
194 	 * the division by 4 ('offset >> 2') into another bit-shift instruction that
195 	 * possibly defines the offset. If that's the case, we emit a similar
196 	 * instructions adjusting (merging) the shift value.
197 	 *
198 	 * Here we use the convention that shifting right is negative while shifting
199 	 * left is positive. So 'x / 4' ~ 'x >> 2' or 'x << -2'.
200 	 */
201 	nir_ssa_def *new_offset = ir3_nir_try_propagate_bit_shift(b, offset, -shift);
202 
203 	/* The new source that will hold the dword-offset is always the last
204 	 * one for every intrinsic.
205 	 */
206 	target_src = &new_intrinsic->src[num_srcs];
207 	*target_src = nir_src_for_ssa(offset);
208 
209 	if (has_dest) {
210 		debug_assert(intrinsic->dest.is_ssa);
211 		nir_ssa_def *dest = &intrinsic->dest.ssa;
212 		nir_ssa_dest_init(&new_intrinsic->instr, &new_intrinsic->dest,
213 						  dest->num_components, dest->bit_size, NULL);
214 		new_dest = &new_intrinsic->dest.ssa;
215 	}
216 
217 	for (unsigned i = 0; i < num_srcs; i++)
218 		new_intrinsic->src[i] = nir_src_for_ssa(intrinsic->src[i].ssa);
219 
220 	nir_intrinsic_copy_const_indices(new_intrinsic, intrinsic);
221 
222 	new_intrinsic->num_components = intrinsic->num_components;
223 
224 	/* If we managed to propagate the division by 4, just use the new offset
225 	 * register and don't emit the SHR.
226 	 */
227 	if (new_offset)
228 		offset = new_offset;
229 	else
230 		offset = nir_ushr(b, offset, nir_imm_int(b, shift));
231 
232 	/* Insert the new intrinsic right before the old one. */
233 	nir_builder_instr_insert(b, &new_intrinsic->instr);
234 
235 	/* Replace the last source of the new intrinsic by the result of
236 	 * the offset divided by 4.
237 	 */
238 	nir_instr_rewrite_src(&new_intrinsic->instr,
239 						  target_src,
240 						  nir_src_for_ssa(offset));
241 
242 	if (has_dest) {
243 		/* Replace the uses of the original destination by that
244 		 * of the new intrinsic.
245 		 */
246 		nir_ssa_def_rewrite_uses(&intrinsic->dest.ssa,
247 								 nir_src_for_ssa(new_dest));
248 	}
249 
250 	/* Finally remove the original intrinsic. */
251 	nir_instr_remove(&intrinsic->instr);
252 
253 	return true;
254 }
255 
256 static bool
lower_io_offsets_block(nir_block * block,nir_builder * b,void * mem_ctx,int gpu_id)257 lower_io_offsets_block(nir_block *block, nir_builder *b, void *mem_ctx, int gpu_id)
258 {
259 	bool progress = false;
260 
261 	nir_foreach_instr_safe (instr, block) {
262 		if (instr->type != nir_instr_type_intrinsic)
263 			continue;
264 
265 		nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
266 
267 		/* SSBO */
268 		int ir3_intrinsic;
269 		uint8_t offset_src_idx;
270 		ir3_intrinsic = get_ir3_intrinsic_for_ssbo_intrinsic(intr->intrinsic,
271 															 &offset_src_idx);
272 		if (ir3_intrinsic != -1) {
273 			progress |= lower_offset_for_ssbo(intr, b, (unsigned) ir3_intrinsic,
274 											  offset_src_idx);
275 		}
276 	}
277 
278 	return progress;
279 }
280 
281 static bool
lower_io_offsets_func(nir_function_impl * impl,int gpu_id)282 lower_io_offsets_func(nir_function_impl *impl, int gpu_id)
283 {
284 	void *mem_ctx = ralloc_parent(impl);
285 	nir_builder b;
286 	nir_builder_init(&b, impl);
287 
288 	bool progress = false;
289 	nir_foreach_block_safe (block, impl) {
290 		progress |= lower_io_offsets_block(block, &b, mem_ctx, gpu_id);
291 	}
292 
293 	if (progress) {
294 		nir_metadata_preserve(impl, nir_metadata_block_index |
295 									nir_metadata_dominance);
296 	}
297 
298 	return progress;
299 }
300 
301 bool
ir3_nir_lower_io_offsets(nir_shader * shader,int gpu_id)302 ir3_nir_lower_io_offsets(nir_shader *shader, int gpu_id)
303 {
304 	bool progress = false;
305 
306 	nir_foreach_function (function, shader) {
307 		if (function->impl)
308 			progress |= lower_io_offsets_func(function->impl, gpu_id);
309 	}
310 
311 	return progress;
312 }
313