• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Jason Ekstrand (jason@jlekstrand.net)
25  *
26  */
27 
28 #include "nir_constant_expressions.h"
29 #include <math.h>
30 
31 /*
32  * Implements SSA-based constant folding.
33  */
34 
35 struct constant_fold_state {
36    void *mem_ctx;
37    nir_function_impl *impl;
38    bool progress;
39 };
40 
41 static bool
constant_fold_alu_instr(nir_alu_instr * instr,void * mem_ctx)42 constant_fold_alu_instr(nir_alu_instr *instr, void *mem_ctx)
43 {
44    nir_const_value src[4];
45 
46    if (!instr->dest.dest.is_ssa)
47       return false;
48 
49    /* In the case that any outputs/inputs have unsized types, then we need to
50     * guess the bit-size. In this case, the validator ensures that all
51     * bit-sizes match so we can just take the bit-size from first
52     * output/input with an unsized type. If all the outputs/inputs are sized
53     * then we don't need to guess the bit-size at all because the code we
54     * generate for constant opcodes in this case already knows the sizes of
55     * the types involved and does not need the provided bit-size for anything
56     * (although it still requires to receive a valid bit-size).
57     */
58    unsigned bit_size = 0;
59    if (!nir_alu_type_get_type_size(nir_op_infos[instr->op].output_type))
60       bit_size = instr->dest.dest.ssa.bit_size;
61 
62    for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
63       if (!instr->src[i].src.is_ssa)
64          return false;
65 
66       if (bit_size == 0 &&
67           !nir_alu_type_get_type_size(nir_op_infos[instr->op].input_sizes[i])) {
68          bit_size = instr->src[i].src.ssa->bit_size;
69       }
70 
71       nir_instr *src_instr = instr->src[i].src.ssa->parent_instr;
72 
73       if (src_instr->type != nir_instr_type_load_const)
74          return false;
75       nir_load_const_instr* load_const = nir_instr_as_load_const(src_instr);
76 
77       for (unsigned j = 0; j < nir_ssa_alu_instr_src_components(instr, i);
78            j++) {
79          if (load_const->def.bit_size == 64)
80             src[i].u64[j] = load_const->value.u64[instr->src[i].swizzle[j]];
81          else
82             src[i].u32[j] = load_const->value.u32[instr->src[i].swizzle[j]];
83       }
84 
85       /* We shouldn't have any source modifiers in the optimization loop. */
86       assert(!instr->src[i].abs && !instr->src[i].negate);
87    }
88 
89    if (bit_size == 0)
90       bit_size = 32;
91 
92    /* We shouldn't have any saturate modifiers in the optimization loop. */
93    assert(!instr->dest.saturate);
94 
95    nir_const_value dest =
96       nir_eval_const_opcode(instr->op, instr->dest.dest.ssa.num_components,
97                             bit_size, src);
98 
99    nir_load_const_instr *new_instr =
100       nir_load_const_instr_create(mem_ctx,
101                                   instr->dest.dest.ssa.num_components,
102                                   instr->dest.dest.ssa.bit_size);
103 
104    new_instr->value = dest;
105 
106    nir_instr_insert_before(&instr->instr, &new_instr->instr);
107 
108    nir_ssa_def_rewrite_uses(&instr->dest.dest.ssa,
109                             nir_src_for_ssa(&new_instr->def));
110 
111    nir_instr_remove(&instr->instr);
112    ralloc_free(instr);
113 
114    return true;
115 }
116 
117 static bool
constant_fold_deref(nir_instr * instr,nir_deref_var * deref)118 constant_fold_deref(nir_instr *instr, nir_deref_var *deref)
119 {
120    bool progress = false;
121 
122    for (nir_deref *tail = deref->deref.child; tail; tail = tail->child) {
123       if (tail->deref_type != nir_deref_type_array)
124          continue;
125 
126       nir_deref_array *arr = nir_deref_as_array(tail);
127 
128       if (arr->deref_array_type == nir_deref_array_type_indirect &&
129           arr->indirect.is_ssa &&
130           arr->indirect.ssa->parent_instr->type == nir_instr_type_load_const) {
131          nir_load_const_instr *indirect =
132             nir_instr_as_load_const(arr->indirect.ssa->parent_instr);
133 
134          arr->base_offset += indirect->value.u32[0];
135 
136          /* Clear out the source */
137          nir_instr_rewrite_src(instr, &arr->indirect, nir_src_for_ssa(NULL));
138 
139          arr->deref_array_type = nir_deref_array_type_direct;
140 
141          progress = true;
142       }
143    }
144 
145    return progress;
146 }
147 
148 static bool
constant_fold_intrinsic_instr(nir_intrinsic_instr * instr)149 constant_fold_intrinsic_instr(nir_intrinsic_instr *instr)
150 {
151    bool progress = false;
152 
153    unsigned num_vars = nir_intrinsic_infos[instr->intrinsic].num_variables;
154    for (unsigned i = 0; i < num_vars; i++) {
155       progress |= constant_fold_deref(&instr->instr, instr->variables[i]);
156    }
157 
158    if (instr->intrinsic == nir_intrinsic_discard_if) {
159       nir_const_value *src_val = nir_src_as_const_value(instr->src[0]);
160       if (src_val && src_val->u32[0] == 0) {
161          nir_instr_remove(&instr->instr);
162          progress = true;
163       }
164    }
165 
166    return progress;
167 }
168 
169 static bool
constant_fold_tex_instr(nir_tex_instr * instr)170 constant_fold_tex_instr(nir_tex_instr *instr)
171 {
172    bool progress = false;
173 
174    if (instr->texture)
175       progress |= constant_fold_deref(&instr->instr, instr->texture);
176 
177    if (instr->sampler)
178       progress |= constant_fold_deref(&instr->instr, instr->sampler);
179 
180    return progress;
181 }
182 
183 static bool
constant_fold_block(nir_block * block,void * mem_ctx)184 constant_fold_block(nir_block *block, void *mem_ctx)
185 {
186    bool progress = false;
187 
188    nir_foreach_instr_safe(instr, block) {
189       switch (instr->type) {
190       case nir_instr_type_alu:
191          progress |= constant_fold_alu_instr(nir_instr_as_alu(instr), mem_ctx);
192          break;
193       case nir_instr_type_intrinsic:
194          progress |=
195             constant_fold_intrinsic_instr(nir_instr_as_intrinsic(instr));
196          break;
197       case nir_instr_type_tex:
198          progress |= constant_fold_tex_instr(nir_instr_as_tex(instr));
199          break;
200       default:
201          /* Don't know how to constant fold */
202          break;
203       }
204    }
205 
206    return progress;
207 }
208 
209 static bool
nir_opt_constant_folding_impl(nir_function_impl * impl)210 nir_opt_constant_folding_impl(nir_function_impl *impl)
211 {
212    void *mem_ctx = ralloc_parent(impl);
213    bool progress = false;
214 
215    nir_foreach_block(block, impl) {
216       progress |= constant_fold_block(block, mem_ctx);
217    }
218 
219    if (progress)
220       nir_metadata_preserve(impl, nir_metadata_block_index |
221                                   nir_metadata_dominance);
222 
223    return progress;
224 }
225 
226 bool
nir_opt_constant_folding(nir_shader * shader)227 nir_opt_constant_folding(nir_shader *shader)
228 {
229    bool progress = false;
230 
231    nir_foreach_function(function, shader) {
232       if (function->impl)
233          progress |= nir_opt_constant_folding_impl(function->impl);
234    }
235 
236    return progress;
237 }
238