• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2020 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "nir_builder.h"
25 
26 static bool
opt_memcpy_deref_cast(nir_intrinsic_instr * cpy,nir_src * deref_src)27 opt_memcpy_deref_cast(nir_intrinsic_instr *cpy, nir_src *deref_src)
28 {
29    assert(cpy->intrinsic == nir_intrinsic_memcpy_deref);
30 
31    nir_deref_instr *cast = nir_src_as_deref(*deref_src);
32    if (cast == NULL || cast->deref_type != nir_deref_type_cast)
33       return false;
34 
35    /* We always have to replace the source with a deref, not a bare uint
36     * pointer.  If it's the first deref in the chain, bail.
37     */
38    nir_deref_instr *parent = nir_src_as_deref(cast->parent);
39    if (parent == NULL)
40       return false;
41 
42    /* If it has useful alignment information, we want to keep that */
43    if (cast->cast.align_mul > 0)
44       return false;
45 
46    /* Casts to uint8 or int8 never do us any good; get rid of them */
47    if (cast->type == glsl_int8_t_type() ||
48        cast->type == glsl_uint8_t_type()) {
49       nir_instr_rewrite_src(&cpy->instr, deref_src,
50                             nir_src_for_ssa(&parent->dest.ssa));
51       return true;
52    }
53 
54    int64_t parent_type_size = glsl_get_explicit_size(parent->type, false);
55    if (parent_type_size < 0)
56       return false;
57 
58    if (!nir_src_is_const(cpy->src[2]))
59       return false;
60 
61    /* We don't want to get rid of the cast if the resulting type would be
62     * smaller than the amount of data we're copying.
63     */
64    if (nir_src_as_uint(cpy->src[2]) < (uint64_t)parent_type_size)
65       return false;
66 
67    nir_instr_rewrite_src(&cpy->instr, deref_src,
68                          nir_src_for_ssa(&parent->dest.ssa));
69    return true;
70 }
71 
72 static bool
type_is_tightly_packed(const struct glsl_type * type,unsigned * size_out)73 type_is_tightly_packed(const struct glsl_type *type, unsigned *size_out)
74 {
75    unsigned size = 0;
76    if (glsl_type_is_struct_or_ifc(type)) {
77       unsigned num_fields = glsl_get_length(type);
78       for (unsigned i = 0; i < num_fields; i++) {
79          const struct glsl_struct_field *field =
80             glsl_get_struct_field_data(type, i);
81 
82          if (field->offset < 0 || field->offset != size)
83             return false;
84 
85          unsigned field_size;
86          if (!type_is_tightly_packed(field->type, &field_size))
87             return false;
88 
89          size = field->offset + field_size;
90       }
91    } else if (glsl_type_is_array_or_matrix(type)) {
92       if (glsl_type_is_unsized_array(type))
93          return false;
94 
95       unsigned stride = glsl_get_explicit_stride(type);
96       if (stride == 0)
97          return false;
98 
99       const struct glsl_type *elem_type = glsl_get_array_element(type);
100 
101       unsigned elem_size;
102       if (!type_is_tightly_packed(elem_type, &elem_size))
103          return false;
104 
105       if (elem_size != stride)
106          return false;
107 
108       size = stride * glsl_get_length(type);
109    } else {
110       assert(glsl_type_is_vector_or_scalar(type));
111       if (glsl_get_explicit_stride(type) > 0)
112          return false;
113 
114       if (glsl_type_is_boolean(type))
115          return false;
116 
117       size = glsl_get_explicit_size(type, false);
118    }
119 
120    if (size_out)
121       *size_out = size;
122    return true;
123 }
124 
125 static bool
try_lower_memcpy(nir_builder * b,nir_intrinsic_instr * cpy)126 try_lower_memcpy(nir_builder *b, nir_intrinsic_instr *cpy)
127 {
128    nir_deref_instr *dst = nir_src_as_deref(cpy->src[0]);
129    nir_deref_instr *src = nir_src_as_deref(cpy->src[1]);
130 
131    /* A self-copy can always be eliminated */
132    if (dst == src) {
133       nir_instr_remove(&cpy->instr);
134       return true;
135    }
136 
137    if (!nir_src_is_const(cpy->src[2]))
138       return false;
139 
140    uint64_t size = nir_src_as_uint(cpy->src[2]);
141    if (size == 0) {
142       nir_instr_remove(&cpy->instr);
143       return true;
144    }
145 
146    if (glsl_type_is_vector_or_scalar(src->type) &&
147        glsl_type_is_vector_or_scalar(dst->type) &&
148        glsl_get_explicit_size(dst->type, false) == size &&
149        glsl_get_explicit_size(src->type, false) == size) {
150       b->cursor = nir_instr_remove(&cpy->instr);
151       nir_ssa_def *data =
152          nir_load_deref_with_access(b, src, nir_intrinsic_src_access(cpy));
153       data = nir_bitcast_vector(b, data, glsl_get_bit_size(dst->type));
154       assert(data->num_components == glsl_get_vector_elements(dst->type));
155       nir_store_deref_with_access(b, dst, data, ~0 /* write mask */,
156                                   nir_intrinsic_dst_access(cpy));
157       return true;
158    }
159 
160    unsigned type_size;
161    if (dst->type == src->type &&
162        type_is_tightly_packed(dst->type, &type_size) &&
163        type_size == size) {
164       b->cursor = nir_instr_remove(&cpy->instr);
165       nir_copy_deref_with_access(b, dst, src,
166                                  nir_intrinsic_dst_access(cpy),
167                                  nir_intrinsic_src_access(cpy));
168       return true;
169    }
170 
171    return false;
172 }
173 
174 static bool
opt_memcpy_impl(nir_function_impl * impl)175 opt_memcpy_impl(nir_function_impl *impl)
176 {
177    bool progress = false;
178 
179    nir_builder b;
180    nir_builder_init(&b, impl);
181 
182    nir_foreach_block(block, impl) {
183       nir_foreach_instr_safe(instr, block) {
184          if (instr->type != nir_instr_type_intrinsic)
185             continue;
186 
187          nir_intrinsic_instr *cpy = nir_instr_as_intrinsic(instr);
188          if (cpy->intrinsic != nir_intrinsic_memcpy_deref)
189             continue;
190 
191          while (opt_memcpy_deref_cast(cpy, &cpy->src[0]))
192             progress = true;
193          while (opt_memcpy_deref_cast(cpy, &cpy->src[1]))
194             progress = true;
195 
196          if (try_lower_memcpy(&b, cpy)) {
197             progress = true;
198             continue;
199          }
200       }
201    }
202 
203    if (progress) {
204       nir_metadata_preserve(impl, nir_metadata_block_index |
205                                   nir_metadata_dominance);
206    } else {
207       nir_metadata_preserve(impl, nir_metadata_all);
208    }
209 
210    return progress;
211 }
212 
213 bool
nir_opt_memcpy(nir_shader * shader)214 nir_opt_memcpy(nir_shader *shader)
215 {
216    bool progress = false;
217 
218    nir_foreach_function(function, shader) {
219       if (function->impl && opt_memcpy_impl(function->impl))
220          progress = true;
221    }
222 
223    return progress;
224 }
225