• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2021 Valve Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Timur Kristóf
25  *
26  */
27 
28 #include "nir.h"
29 #include "nir_builder.h"
30 
31 typedef struct
32 {
33    struct hash_table *range_ht;
34    const nir_opt_offsets_options *options;
35 } opt_offsets_state;
36 
37 static nir_ssa_scalar
try_extract_const_addition(nir_builder * b,nir_ssa_scalar val,opt_offsets_state * state,unsigned * out_const,uint32_t max)38 try_extract_const_addition(nir_builder *b, nir_ssa_scalar val, opt_offsets_state *state, unsigned *out_const, uint32_t max)
39 {
40    val = nir_ssa_scalar_chase_movs(val);
41 
42    if (!nir_ssa_scalar_is_alu(val))
43       return val;
44 
45    nir_alu_instr *alu = nir_instr_as_alu(val.def->parent_instr);
46    if (alu->op != nir_op_iadd ||
47        !alu->src[0].src.is_ssa ||
48        !alu->src[1].src.is_ssa ||
49        alu->src[0].negate || alu->src[0].abs ||
50        alu->src[1].negate || alu->src[1].abs)
51       return val;
52 
53    nir_ssa_scalar src[2] = {
54       {alu->src[0].src.ssa, alu->src[0].swizzle[val.comp]},
55       {alu->src[1].src.ssa, alu->src[1].swizzle[val.comp]},
56    };
57 
58    /* Make sure that we aren't taking out an addition that could trigger
59     * unsigned wrapping in a way that would change the semantics of the load.
60     * Ignored for ints-as-floats (lower_bitops is a proxy for that), where
61     * unsigned wrapping doesn't make sense.
62     */
63    if (!alu->no_unsigned_wrap && !b->shader->options->lower_bitops) {
64       if (!state->range_ht) {
65          /* Cache for nir_unsigned_upper_bound */
66          state->range_ht = _mesa_pointer_hash_table_create(NULL);
67       }
68 
69       /* Check if there can really be an unsigned wrap. */
70       uint32_t ub0 = nir_unsigned_upper_bound(b->shader, state->range_ht, src[0], NULL);
71       uint32_t ub1 = nir_unsigned_upper_bound(b->shader, state->range_ht, src[1], NULL);
72 
73       if ((UINT32_MAX - ub0) < ub1)
74          return val;
75 
76       /* We proved that unsigned wrap won't be possible, so we can set the flag too. */
77       alu->no_unsigned_wrap = true;
78    }
79 
80    for (unsigned i = 0; i < 2; ++i) {
81       src[i] = nir_ssa_scalar_chase_movs(src[i]);
82       if (nir_ssa_scalar_is_const(src[i])) {
83          uint32_t offset = nir_ssa_scalar_as_uint(src[i]);
84          if (offset + *out_const <= max) {
85             *out_const += offset;
86             return try_extract_const_addition(b, src[1 - i], state, out_const, max);
87          }
88       }
89    }
90 
91    uint32_t orig_offset = *out_const;
92    src[0] = try_extract_const_addition(b, src[0], state, out_const, max);
93    src[1] = try_extract_const_addition(b, src[1], state, out_const, max);
94    if (*out_const == orig_offset)
95       return val;
96 
97    b->cursor = nir_before_instr(&alu->instr);
98    nir_ssa_def *r =
99           nir_iadd(b, nir_channel(b, src[0].def, src[0].comp),
100                    nir_channel(b, src[1].def, src[1].comp));
101    return nir_get_ssa_scalar(r, 0);
102 }
103 
104 static bool
try_fold_load_store(nir_builder * b,nir_intrinsic_instr * intrin,opt_offsets_state * state,unsigned offset_src_idx,uint32_t max)105 try_fold_load_store(nir_builder *b,
106                     nir_intrinsic_instr *intrin,
107                     opt_offsets_state *state,
108                     unsigned offset_src_idx,
109                     uint32_t max)
110 {
111    /* Assume that BASE is the constant offset of a load/store.
112     * Try to constant-fold additions to the offset source
113     * into the actual const offset of the instruction.
114     */
115 
116    unsigned off_const = nir_intrinsic_base(intrin);
117    nir_src *off_src = &intrin->src[offset_src_idx];
118    nir_ssa_def *replace_src = NULL;
119 
120    if (!off_src->is_ssa || off_src->ssa->bit_size != 32)
121       return false;
122 
123    if (!nir_src_is_const(*off_src)) {
124       uint32_t add_offset = 0;
125       nir_ssa_scalar val = {.def = off_src->ssa, .comp = 0};
126       val = try_extract_const_addition(b, val, state, &add_offset, max);
127       if (add_offset == 0)
128          return false;
129       off_const += add_offset;
130       b->cursor = nir_before_instr(&intrin->instr);
131       replace_src = nir_channel(b, val.def, val.comp);
132    } else if (nir_src_as_uint(*off_src) && off_const + nir_src_as_uint(*off_src) <= max) {
133       off_const += nir_src_as_uint(*off_src);
134       b->cursor = nir_before_instr(&intrin->instr);
135       replace_src = nir_imm_zero(b, off_src->ssa->num_components, off_src->ssa->bit_size);
136    }
137 
138    if (!replace_src)
139       return false;
140 
141    nir_instr_rewrite_src(&intrin->instr, &intrin->src[offset_src_idx], nir_src_for_ssa(replace_src));
142    nir_intrinsic_set_base(intrin, off_const);
143    return true;
144 }
145 
146 static bool
try_fold_shared2(nir_builder * b,nir_intrinsic_instr * intrin,opt_offsets_state * state,unsigned offset_src_idx)147 try_fold_shared2(nir_builder *b,
148                     nir_intrinsic_instr *intrin,
149                     opt_offsets_state *state,
150                     unsigned offset_src_idx)
151 {
152    unsigned comp_size = (intrin->intrinsic == nir_intrinsic_load_shared2_amd ?
153                          intrin->dest.ssa.bit_size : intrin->src[0].ssa->bit_size) / 8;
154    unsigned stride = (nir_intrinsic_st64(intrin) ? 64 : 1) * comp_size;
155    unsigned offset0 = nir_intrinsic_offset0(intrin) * stride;
156    unsigned offset1 = nir_intrinsic_offset1(intrin) * stride;
157    nir_src *off_src = &intrin->src[offset_src_idx];
158 
159    if (!nir_src_is_const(*off_src))
160       return false;
161 
162    unsigned const_offset = nir_src_as_uint(*off_src);
163    offset0 += const_offset;
164    offset1 += const_offset;
165    bool st64 = offset0 % (64 * comp_size) == 0 && offset1 % (64 * comp_size) == 0;
166    stride = (st64 ? 64 : 1) * comp_size;
167    if (const_offset % stride || offset0 > 255 * stride || offset1 > 255 * stride)
168       return false;
169 
170    b->cursor = nir_before_instr(&intrin->instr);
171    nir_instr_rewrite_src(&intrin->instr, off_src, nir_src_for_ssa(nir_imm_zero(b, 1, 32)));
172    nir_intrinsic_set_offset0(intrin, offset0 / stride);
173    nir_intrinsic_set_offset1(intrin, offset1 / stride);
174    nir_intrinsic_set_st64(intrin, st64);
175 
176    return true;
177 }
178 
179 static bool
process_instr(nir_builder * b,nir_instr * instr,void * s)180 process_instr(nir_builder *b, nir_instr *instr, void *s)
181 {
182    if (instr->type != nir_instr_type_intrinsic)
183       return false;
184 
185    opt_offsets_state *state = (opt_offsets_state *) s;
186    nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
187 
188    switch (intrin->intrinsic) {
189    case nir_intrinsic_load_uniform:
190       return try_fold_load_store(b, intrin, state, 0, state->options->uniform_max);
191    case nir_intrinsic_load_ubo_vec4:
192       return try_fold_load_store(b, intrin, state, 1, state->options->ubo_vec4_max);
193    case nir_intrinsic_load_shared:
194    case nir_intrinsic_load_shared_ir3:
195       return try_fold_load_store(b, intrin, state, 0, state->options->shared_max);
196    case nir_intrinsic_store_shared:
197    case nir_intrinsic_store_shared_ir3:
198       return try_fold_load_store(b, intrin, state, 1, state->options->shared_max);
199    case nir_intrinsic_load_shared2_amd:
200       return try_fold_shared2(b, intrin, state, 0);
201    case nir_intrinsic_store_shared2_amd:
202       return try_fold_shared2(b, intrin, state, 1);
203    case nir_intrinsic_load_buffer_amd:
204       return try_fold_load_store(b, intrin, state, 1, state->options->buffer_max);
205    case nir_intrinsic_store_buffer_amd:
206       return try_fold_load_store(b, intrin, state, 2, state->options->buffer_max);
207    default:
208       return false;
209    }
210 
211    unreachable("Can't reach here.");
212 }
213 
214 bool
nir_opt_offsets(nir_shader * shader,const nir_opt_offsets_options * options)215 nir_opt_offsets(nir_shader *shader, const nir_opt_offsets_options *options)
216 {
217    opt_offsets_state state;
218    state.range_ht = NULL;
219    state.options = options;
220 
221    bool p = nir_shader_instructions_pass(shader, process_instr,
222                                          nir_metadata_block_index |
223                                          nir_metadata_dominance,
224                                          &state);
225 
226    if (state.range_ht)
227       _mesa_hash_table_destroy(state.range_ht, NULL);
228 
229 
230    return p;
231 }
232