1 /*
2 * Copyright © 2021 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Timur Kristóf
25 *
26 */
27
28 #include "nir.h"
29 #include "nir_builder.h"
30
31 typedef struct
32 {
33 struct hash_table *range_ht;
34 const nir_opt_offsets_options *options;
35 } opt_offsets_state;
36
37 static nir_scalar
try_extract_const_addition(nir_builder * b,nir_scalar val,opt_offsets_state * state,unsigned * out_const,uint32_t max)38 try_extract_const_addition(nir_builder *b, nir_scalar val, opt_offsets_state *state, unsigned *out_const, uint32_t max)
39 {
40 val = nir_scalar_chase_movs(val);
41
42 if (!nir_scalar_is_alu(val))
43 return val;
44
45 nir_alu_instr *alu = nir_instr_as_alu(val.def->parent_instr);
46 if (alu->op != nir_op_iadd)
47 return val;
48
49 nir_scalar src[2] = {
50 { alu->src[0].src.ssa, alu->src[0].swizzle[val.comp] },
51 { alu->src[1].src.ssa, alu->src[1].swizzle[val.comp] },
52 };
53
54 /* Make sure that we aren't taking out an addition that could trigger
55 * unsigned wrapping in a way that would change the semantics of the load.
56 * Ignored for ints-as-floats (lower_bitops is a proxy for that), where
57 * unsigned wrapping doesn't make sense.
58 */
59 if (!state->options->allow_offset_wrap && !alu->no_unsigned_wrap && !b->shader->options->lower_bitops) {
60 if (!state->range_ht) {
61 /* Cache for nir_unsigned_upper_bound */
62 state->range_ht = _mesa_pointer_hash_table_create(NULL);
63 }
64
65 /* Check if there can really be an unsigned wrap. */
66 uint32_t ub0 = nir_unsigned_upper_bound(b->shader, state->range_ht, src[0], NULL);
67 uint32_t ub1 = nir_unsigned_upper_bound(b->shader, state->range_ht, src[1], NULL);
68
69 if ((UINT32_MAX - ub0) < ub1)
70 return val;
71
72 /* We proved that unsigned wrap won't be possible, so we can set the flag too. */
73 alu->no_unsigned_wrap = true;
74 }
75
76 for (unsigned i = 0; i < 2; ++i) {
77 src[i] = nir_scalar_chase_movs(src[i]);
78 if (nir_scalar_is_const(src[i])) {
79 uint32_t offset = nir_scalar_as_uint(src[i]);
80 if (offset + *out_const <= max) {
81 *out_const += offset;
82 return try_extract_const_addition(b, src[1 - i], state, out_const, max);
83 }
84 }
85 }
86
87 uint32_t orig_offset = *out_const;
88 src[0] = try_extract_const_addition(b, src[0], state, out_const, max);
89 src[1] = try_extract_const_addition(b, src[1], state, out_const, max);
90 if (*out_const == orig_offset)
91 return val;
92
93 b->cursor = nir_before_instr(&alu->instr);
94 nir_def *r =
95 nir_iadd(b, nir_channel(b, src[0].def, src[0].comp),
96 nir_channel(b, src[1].def, src[1].comp));
97 return nir_get_scalar(r, 0);
98 }
99
100 static bool
try_fold_load_store(nir_builder * b,nir_intrinsic_instr * intrin,opt_offsets_state * state,unsigned offset_src_idx,uint32_t max)101 try_fold_load_store(nir_builder *b,
102 nir_intrinsic_instr *intrin,
103 opt_offsets_state *state,
104 unsigned offset_src_idx,
105 uint32_t max)
106 {
107 /* Assume that BASE is the constant offset of a load/store.
108 * Try to constant-fold additions to the offset source
109 * into the actual const offset of the instruction.
110 */
111
112 unsigned off_const = nir_intrinsic_base(intrin);
113 nir_src *off_src = &intrin->src[offset_src_idx];
114 nir_def *replace_src = NULL;
115
116 if (off_src->ssa->bit_size != 32)
117 return false;
118
119 if (off_const > max)
120 return false;
121
122 if (!nir_src_is_const(*off_src)) {
123 uint32_t add_offset = 0;
124 nir_scalar val = { .def = off_src->ssa, .comp = 0 };
125 val = try_extract_const_addition(b, val, state, &add_offset, max - off_const);
126 if (add_offset == 0)
127 return false;
128 off_const += add_offset;
129 b->cursor = nir_before_instr(&intrin->instr);
130 replace_src = nir_channel(b, val.def, val.comp);
131 } else if (nir_src_as_uint(*off_src) && nir_src_as_uint(*off_src) <= max - off_const) {
132 off_const += nir_src_as_uint(*off_src);
133 b->cursor = nir_before_instr(&intrin->instr);
134 replace_src = nir_imm_zero(b, off_src->ssa->num_components, off_src->ssa->bit_size);
135 }
136
137 if (!replace_src)
138 return false;
139
140 nir_src_rewrite(&intrin->src[offset_src_idx], replace_src);
141
142 assert(off_const <= max);
143 nir_intrinsic_set_base(intrin, off_const);
144 return true;
145 }
146
147 static bool
try_fold_shared2(nir_builder * b,nir_intrinsic_instr * intrin,opt_offsets_state * state,unsigned offset_src_idx)148 try_fold_shared2(nir_builder *b,
149 nir_intrinsic_instr *intrin,
150 opt_offsets_state *state,
151 unsigned offset_src_idx)
152 {
153 unsigned comp_size = (intrin->intrinsic == nir_intrinsic_load_shared2_amd ? intrin->def.bit_size : intrin->src[0].ssa->bit_size) / 8;
154 unsigned stride = (nir_intrinsic_st64(intrin) ? 64 : 1) * comp_size;
155 unsigned offset0 = nir_intrinsic_offset0(intrin) * stride;
156 unsigned offset1 = nir_intrinsic_offset1(intrin) * stride;
157 nir_src *off_src = &intrin->src[offset_src_idx];
158
159 if (!nir_src_is_const(*off_src))
160 return false;
161
162 unsigned const_offset = nir_src_as_uint(*off_src);
163 offset0 += const_offset;
164 offset1 += const_offset;
165 bool st64 = offset0 % (64 * comp_size) == 0 && offset1 % (64 * comp_size) == 0;
166 stride = (st64 ? 64 : 1) * comp_size;
167 if (const_offset % stride || offset0 > 255 * stride || offset1 > 255 * stride)
168 return false;
169
170 b->cursor = nir_before_instr(&intrin->instr);
171 nir_src_rewrite(off_src, nir_imm_zero(b, 1, 32));
172 nir_intrinsic_set_offset0(intrin, offset0 / stride);
173 nir_intrinsic_set_offset1(intrin, offset1 / stride);
174 nir_intrinsic_set_st64(intrin, st64);
175
176 return true;
177 }
178
179 static uint32_t
get_max(opt_offsets_state * state,nir_intrinsic_instr * intrin,uint32_t default_val)180 get_max(opt_offsets_state *state, nir_intrinsic_instr *intrin, uint32_t default_val)
181 {
182 if (default_val)
183 return default_val;
184 if (state->options->max_offset_cb)
185 return state->options->max_offset_cb(intrin, state->options->max_offset_data);
186 return 0;
187 }
188
189 static bool
process_instr(nir_builder * b,nir_instr * instr,void * s)190 process_instr(nir_builder *b, nir_instr *instr, void *s)
191 {
192 if (instr->type != nir_instr_type_intrinsic)
193 return false;
194
195 opt_offsets_state *state = (opt_offsets_state *)s;
196 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
197
198 switch (intrin->intrinsic) {
199 case nir_intrinsic_load_uniform:
200 case nir_intrinsic_load_const_ir3:
201 return try_fold_load_store(b, intrin, state, 0, get_max(state, intrin, state->options->uniform_max));
202 case nir_intrinsic_load_ubo_vec4:
203 return try_fold_load_store(b, intrin, state, 1, get_max(state, intrin, state->options->ubo_vec4_max));
204 case nir_intrinsic_shared_atomic:
205 case nir_intrinsic_shared_atomic_swap:
206 return try_fold_load_store(b, intrin, state, 0, get_max(state, intrin, state->options->shared_atomic_max));
207 case nir_intrinsic_load_shared:
208 case nir_intrinsic_load_shared_ir3:
209 return try_fold_load_store(b, intrin, state, 0, get_max(state, intrin, state->options->shared_max));
210 case nir_intrinsic_store_shared:
211 case nir_intrinsic_store_shared_ir3:
212 return try_fold_load_store(b, intrin, state, 1, get_max(state, intrin, state->options->shared_max));
213 case nir_intrinsic_load_shared2_amd:
214 return try_fold_shared2(b, intrin, state, 0);
215 case nir_intrinsic_store_shared2_amd:
216 return try_fold_shared2(b, intrin, state, 1);
217 case nir_intrinsic_load_buffer_amd:
218 return try_fold_load_store(b, intrin, state, 1, state->options->buffer_max);
219 case nir_intrinsic_store_buffer_amd:
220 case nir_intrinsic_load_ssbo_ir3:
221 return try_fold_load_store(b, intrin, state, 2, get_max(state, intrin, state->options->buffer_max));
222 case nir_intrinsic_store_ssbo_ir3:
223 return try_fold_load_store(b, intrin, state, 3, get_max(state, intrin, state->options->buffer_max));
224 default:
225 return false;
226 }
227
228 unreachable("Can't reach here.");
229 }
230
231 bool
nir_opt_offsets(nir_shader * shader,const nir_opt_offsets_options * options)232 nir_opt_offsets(nir_shader *shader, const nir_opt_offsets_options *options)
233 {
234 opt_offsets_state state;
235 state.range_ht = NULL;
236 state.options = options;
237
238 bool p = nir_shader_instructions_pass(shader, process_instr,
239 nir_metadata_control_flow,
240 &state);
241
242 if (state.range_ht)
243 _mesa_hash_table_destroy(state.range_ht, NULL);
244
245 return p;
246 }
247