• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2022 Collabora Ltd
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Gert Wollny <gert.wollny@collabora.com>
25  */
26 
27 #include "nir.h"
28 #include "nir_builder.h"
29 
30 #include "nir_deref.h"
31 #include "util/hash_table.h"
32 
33 /* This pass splits stores to and loads from 64 bit vec3
34  * and vec4 local variables to use at most vec2, and it also
35  * splits phi nodes accordingly.
36  *
37  * Arrays of vec3 and vec4 are handled directly, arrays of arrays
38  * are lowered to arrays on the fly.
39  */
40 
41 static bool
nir_split_64bit_vec3_and_vec4_filter(const nir_instr * instr,const void * data)42 nir_split_64bit_vec3_and_vec4_filter(const nir_instr *instr,
43                                      const void *data)
44 {
45    switch (instr->type) {
46    case  nir_instr_type_intrinsic: {
47       nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
48 
49       switch (intr->intrinsic) {
50       case nir_intrinsic_load_deref: {
51          if (nir_dest_bit_size(intr->dest) != 64)
52             return false;
53          nir_variable *var = nir_intrinsic_get_var(intr, 0);
54          if (var->data.mode != nir_var_function_temp)
55             return false;
56          return nir_dest_num_components(intr->dest) >= 3;
57       }
58       case nir_intrinsic_store_deref: {
59          if (nir_src_bit_size(intr->src[1]) != 64)
60             return false;
61          nir_variable *var = nir_intrinsic_get_var(intr, 0);
62          if (var->data.mode != nir_var_function_temp)
63             return false;
64          return nir_src_num_components(intr->src[1]) >= 3;
65       default:
66             return false;
67          }
68       }
69    }
70    case nir_instr_type_phi: {
71          nir_phi_instr *phi = nir_instr_as_phi(instr);
72          if (nir_dest_bit_size(phi->dest) != 64)
73             return false;
74          return nir_dest_num_components(phi->dest) >= 3;
75       }
76 
77    default:
78       return false;
79    }
80 }
81 
82 typedef struct {
83    nir_variable *xy;
84    nir_variable *zw;
85 } variable_pair;
86 
87 
88 static nir_ssa_def *
merge_to_vec3_or_vec4(nir_builder * b,nir_ssa_def * load1,nir_ssa_def * load2)89 merge_to_vec3_or_vec4(nir_builder *b, nir_ssa_def *load1,
90                       nir_ssa_def *load2)
91 {
92    assert(load2->num_components > 0 && load2->num_components < 3);
93 
94    if (load2->num_components == 1)
95       return nir_vec3(b, nir_channel(b, load1, 0),
96                       nir_channel(b, load1, 1),
97                       nir_channel(b, load2, 0));
98    else
99       return nir_vec4(b, nir_channel(b, load1, 0),
100                       nir_channel(b, load1, 1),
101                       nir_channel(b, load2, 0),
102                       nir_channel(b, load2, 1));
103 }
104 
105 static nir_ssa_def *
get_linear_array_offset(nir_builder * b,nir_deref_instr * deref)106 get_linear_array_offset(nir_builder *b, nir_deref_instr *deref)
107 {
108    nir_deref_path path;
109    nir_deref_path_init(&path, deref, NULL);
110 
111    nir_ssa_def *offset = nir_imm_intN_t(b, 0, deref->dest.ssa.bit_size);
112    for (nir_deref_instr **p = &path.path[1]; *p; p++) {
113       switch ((*p)->deref_type) {
114       case nir_deref_type_array: {
115          nir_ssa_def *index = nir_ssa_for_src(b, (*p)->arr.index, 1);
116          int stride = glsl_array_size((*p)->type);
117          if (stride >= 0)
118             offset = nir_iadd(b, offset, nir_amul_imm(b, index, stride));
119          else
120             offset = nir_iadd(b, offset, index);
121          break;
122       }
123       default:
124          unreachable("Not part of the path");
125       }
126    }
127    nir_deref_path_finish(&path);
128    return offset;
129 }
130 
131 
132 static variable_pair *
get_var_pair(nir_builder * b,nir_variable * old_var,struct hash_table * split_vars)133 get_var_pair(nir_builder *b, nir_variable *old_var,
134              struct hash_table  *split_vars)
135 {
136    variable_pair *new_var = NULL;
137    unsigned old_components = glsl_get_components(
138                                 glsl_without_array_or_matrix(old_var->type));
139 
140    assert(old_components > 2 && old_components <= 4);
141 
142    struct hash_entry *entry = _mesa_hash_table_search(split_vars, old_var);
143    if (!entry) {
144       new_var = (variable_pair *)calloc(1, sizeof(variable_pair));
145       new_var->xy = nir_variable_clone(old_var, b->shader);
146       new_var->zw = nir_variable_clone(old_var, b->shader);
147       new_var->xy->type = glsl_dvec_type(2);
148       new_var->zw->type = glsl_dvec_type(old_components - 2);
149 
150       if (glsl_type_is_array(old_var->type)) {
151          unsigned array_size = glsl_get_aoa_size(old_var->type);
152          new_var->xy->type = glsl_array_type(new_var->xy->type,
153                                              array_size, 0);
154          new_var->zw->type = glsl_array_type(new_var->zw->type,
155                                              array_size, 0);
156       }
157 
158       exec_list_push_tail(&b->impl->locals, &new_var->xy->node);
159       exec_list_push_tail(&b->impl->locals, &new_var->zw->node);
160 
161       _mesa_hash_table_insert(split_vars, old_var, new_var);
162    } else
163        new_var = (variable_pair *)entry->data;
164    return new_var;
165 }
166 
167 static nir_ssa_def *
split_load_deref(nir_builder * b,nir_intrinsic_instr * intr,nir_ssa_def * offset,struct hash_table * split_vars)168 split_load_deref(nir_builder *b, nir_intrinsic_instr *intr,
169                  nir_ssa_def *offset, struct hash_table *split_vars)
170 {
171    nir_variable *old_var = nir_intrinsic_get_var(intr, 0);
172    unsigned old_components = glsl_get_components(
173                                 glsl_without_array_or_matrix(old_var->type));
174 
175    variable_pair *vars = get_var_pair(b, old_var, split_vars);
176 
177    nir_deref_instr *deref1 = nir_build_deref_var(b, vars->xy);
178    nir_deref_instr *deref2 = nir_build_deref_var(b, vars->zw);
179 
180    if (offset) {
181       deref1 = nir_build_deref_array(b, deref1, offset);
182       deref2 = nir_build_deref_array(b, deref2, offset);
183    }
184 
185    nir_ssa_def *load1 = nir_build_load_deref(b, 2, 64, &deref1->dest.ssa, 0);
186    nir_ssa_def *load2 = nir_build_load_deref(b, old_components - 2, 64,
187                                              &deref2->dest.ssa, 0);
188 
189    return merge_to_vec3_or_vec4(b, load1, load2);
190 }
191 
192 static nir_ssa_def *
split_store_deref(nir_builder * b,nir_intrinsic_instr * intr,nir_ssa_def * offset,struct hash_table * split_vars)193 split_store_deref(nir_builder *b, nir_intrinsic_instr *intr,
194                   nir_ssa_def *offset, struct hash_table *split_vars)
195 {
196    nir_variable *old_var = nir_intrinsic_get_var(intr, 0);
197 
198    variable_pair *vars = get_var_pair(b, old_var, split_vars);
199 
200    nir_deref_instr *deref_xy = nir_build_deref_var(b, vars->xy);
201    nir_deref_instr *deref_zw = nir_build_deref_var(b, vars->zw);
202 
203    if (offset) {
204       deref_xy = nir_build_deref_array(b, deref_xy, offset);
205       deref_zw = nir_build_deref_array(b, deref_zw, offset);
206    }
207 
208    int write_mask_xy = nir_intrinsic_write_mask(intr) & 3;
209    if (write_mask_xy) {
210       nir_ssa_def *src_xy = nir_channels(b, intr->src[1].ssa, 3);
211       nir_build_store_deref(b, &deref_xy->dest.ssa, src_xy, write_mask_xy);
212    }
213 
214    int write_mask_zw = nir_intrinsic_write_mask(intr) & 0xc;
215    if (write_mask_zw) {
216       nir_ssa_def *src_zw = nir_channels(b, intr->src[1].ssa, write_mask_zw);
217       nir_build_store_deref(b, &deref_zw->dest.ssa, src_zw, write_mask_zw >> 2);
218    }
219 
220    return NIR_LOWER_INSTR_PROGRESS_REPLACE;
221 }
222 
223 static nir_ssa_def *
split_phi(nir_builder * b,nir_phi_instr * phi)224 split_phi(nir_builder *b, nir_phi_instr *phi)
225 {
226    nir_op vec_op = nir_op_vec(phi->dest.ssa.num_components);
227 
228    nir_alu_instr *vec = nir_alu_instr_create(b->shader, vec_op);
229    nir_ssa_dest_init(&vec->instr, &vec->dest.dest,
230                      phi->dest.ssa.num_components,
231                      64, NULL);
232    vec->dest.write_mask = (1 << phi->dest.ssa.num_components) - 1;
233 
234    int num_comp[2] = {2, phi->dest.ssa.num_components - 2};
235 
236    nir_phi_instr *new_phi[2];
237 
238    for (unsigned i = 0; i < 2; i++) {
239       new_phi[i] = nir_phi_instr_create(b->shader);
240       nir_ssa_dest_init(&new_phi[i]->instr, &new_phi[i]->dest, num_comp[i],
241                         phi->dest.ssa.bit_size, NULL);
242 
243       nir_foreach_phi_src(src, phi) {
244          /* Insert at the end of the predecessor but before the jump
245           * (This was inspired by nir_lower_phi_to_scalar) */
246          nir_instr *pred_last_instr = nir_block_last_instr(src->pred);
247 
248          if (pred_last_instr && pred_last_instr->type == nir_instr_type_jump)
249             b->cursor = nir_before_instr(pred_last_instr);
250          else
251             b->cursor = nir_after_block(src->pred);
252 
253          nir_ssa_def *new_src = nir_channels(b, src->src.ssa,
254                                              ((1 << num_comp[i]) - 1) << (2 * i));
255 
256          nir_phi_instr_add_src(new_phi[i], src->pred, nir_src_for_ssa(new_src));
257       }
258       nir_instr_insert_before(&phi->instr, &new_phi[i]->instr);
259    }
260 
261    b->cursor = nir_after_instr(&phi->instr);
262    return merge_to_vec3_or_vec4(b, &new_phi[0]->dest.ssa, &new_phi[1]->dest.ssa);
263 };
264 
265 static nir_ssa_def *
nir_split_64bit_vec3_and_vec4_impl(nir_builder * b,nir_instr * instr,void * d)266 nir_split_64bit_vec3_and_vec4_impl(nir_builder *b, nir_instr *instr, void *d)
267 {
268    struct hash_table *split_vars = (struct hash_table *)d;
269 
270    switch (instr->type) {
271 
272    case nir_instr_type_intrinsic: {
273 
274       nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
275       switch (intr->intrinsic) {
276 
277       case nir_intrinsic_load_deref: {
278          nir_deref_instr *deref =
279                nir_instr_as_deref(intr->src[0].ssa->parent_instr);
280          if (deref->deref_type == nir_deref_type_var)
281             return split_load_deref(b, intr, NULL, split_vars);
282          else if (deref->deref_type == nir_deref_type_array) {
283             return split_load_deref(b, intr, get_linear_array_offset(b, deref), split_vars);
284          }
285          else
286             unreachable("Only splitting of loads from vars and arrays");
287       }
288 
289       case nir_intrinsic_store_deref: {
290          nir_deref_instr *deref =
291                nir_instr_as_deref(intr->src[0].ssa->parent_instr);
292          if (deref->deref_type == nir_deref_type_var)
293             return split_store_deref(b, intr, NULL, split_vars);
294          else if (deref->deref_type == nir_deref_type_array)
295             return split_store_deref(b, intr,  get_linear_array_offset(b, deref), split_vars);
296          else
297             unreachable("Only splitting of stores to vars and arrays");
298          }
299 
300       default:
301          unreachable("Only splitting load_deref and store_deref");
302       }
303    }
304 
305    case nir_instr_type_phi: {
306       nir_phi_instr *phi = nir_instr_as_phi(instr);
307       return split_phi(b, phi);
308    }
309 
310    default:
311       unreachable("Only splitting load_deref/store_deref and phi");
312    }
313 
314    return NULL;
315 }
316 
317 
318 bool
nir_split_64bit_vec3_and_vec4(nir_shader * sh)319 nir_split_64bit_vec3_and_vec4(nir_shader *sh)
320 {
321    struct hash_table *split_vars = _mesa_pointer_hash_table_create(NULL);
322 
323    bool progress =
324          nir_shader_lower_instructions(sh,
325                                        nir_split_64bit_vec3_and_vec4_filter,
326                                        nir_split_64bit_vec3_and_vec4_impl,
327                                        split_vars);
328 
329    _mesa_hash_table_destroy(split_vars, NULL);
330    return progress;
331 }
332