• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2020 Google, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  */
23 
24 #include "nir.h"
25 #include "nir_builder.h"
26 
27 /* A pass to split intrinsics with discontinuous writemasks into ones
28  * with contiguous writemasks starting with .x, ie:
29  *
30  *   vec4 32 ssa_76 = vec4 ssa_35, ssa_35, ssa_35, ssa_35
31  *   intrinsic store_ssbo (ssa_76, ssa_105, ssa_106) (2, 0, 4, 0) // wrmask=y
32  *
33  * is turned into:
34  *
35  *   vec4 32 ssa_76 = vec4 ssa_35, ssa_35, ssa_35, ssa_35
36  *   vec1 32 ssa_107 = load_const (0x00000001)
37  *   vec1 32 ssa_108 = iadd ssa_106, ssa_107
38  *   vec1 32 ssa_109 = mov ssa_76.y
39  *   intrinsic store_ssbo (ssa_109, ssa_105, ssa_108) (1, 0, 4, 0) // wrmask=x
40  *
41  * and likewise:
42  *
43  *   vec4 32 ssa_76 = vec4 ssa_35, ssa_35, ssa_35, ssa_35
44  *   intrinsic store_ssbo (ssa_76, ssa_105, ssa_106) (15, 0, 4, 0) // wrmask=xzw
45  *
46  * is split into:
47  *
48  *   // .x component:
49  *   vec4 32 ssa_76 = vec4 ssa_35, ssa_35, ssa_35, ssa_35
50  *   vec1 32 ssa_107 = load_const (0x00000000)
51  *   vec1 32 ssa_108 = iadd ssa_106, ssa_107
52  *   vec1 32 ssa_109 = mov ssa_76.x
53  *   intrinsic store_ssbo (ssa_109, ssa_105, ssa_108) (1, 0, 4, 0) // wrmask=x
54  *   // .zw components:
55  *   vec1 32 ssa_110 = load_const (0x00000002)
56  *   vec1 32 ssa_111 = iadd ssa_106, ssa_110
57  *   vec2 32 ssa_112 = mov ssa_76.zw
58  *   intrinsic store_ssbo (ssa_112, ssa_105, ssa_111) (3, 0, 4, 0) // wrmask=xy
59  */
60 
61 static int
value_src(nir_intrinsic_op intrinsic)62 value_src(nir_intrinsic_op intrinsic)
63 {
64    switch (intrinsic) {
65    case nir_intrinsic_store_output:
66    case nir_intrinsic_store_per_vertex_output:
67    case nir_intrinsic_store_per_view_output:
68    case nir_intrinsic_store_ssbo:
69    case nir_intrinsic_store_shared:
70    case nir_intrinsic_store_global:
71    case nir_intrinsic_store_scratch:
72       return 0;
73    default:
74       return -1;
75    }
76 }
77 
78 static int
offset_src(nir_intrinsic_op intrinsic)79 offset_src(nir_intrinsic_op intrinsic)
80 {
81    switch (intrinsic) {
82    case nir_intrinsic_store_output:
83    case nir_intrinsic_store_shared:
84    case nir_intrinsic_store_global:
85    case nir_intrinsic_store_scratch:
86       return 1;
87    case nir_intrinsic_store_per_vertex_output:
88    case nir_intrinsic_store_per_view_output:
89    case nir_intrinsic_store_ssbo:
90       return 2;
91    default:
92       return -1;
93    }
94 }
95 
96 static void
split_wrmask(nir_builder * b,nir_intrinsic_instr * intr)97 split_wrmask(nir_builder *b, nir_intrinsic_instr *intr)
98 {
99    const nir_intrinsic_info *info = &nir_intrinsic_infos[intr->intrinsic];
100 
101    b->cursor = nir_before_instr(&intr->instr);
102 
103    assert(!info->has_dest); /* expecting only store intrinsics */
104 
105    unsigned num_srcs = info->num_srcs;
106    unsigned value_idx = value_src(intr->intrinsic);
107    unsigned offset_idx = offset_src(intr->intrinsic);
108 
109    unsigned wrmask = nir_intrinsic_write_mask(intr);
110    while (wrmask) {
111       unsigned first_component = ffs(wrmask) - 1;
112       unsigned length = ffs(~(wrmask >> first_component)) - 1;
113 
114       nir_def *value = intr->src[value_idx].ssa;
115       nir_def *offset = intr->src[offset_idx].ssa;
116 
117       /* swizzle out the consecutive components that we'll store
118        * in this iteration:
119        */
120       unsigned cur_mask = (BITFIELD_MASK(length) << first_component);
121       value = nir_channels(b, value, cur_mask);
122 
123       /* and create the replacement intrinsic: */
124       nir_intrinsic_instr *new_intr =
125          nir_intrinsic_instr_create(b->shader, intr->intrinsic);
126 
127       nir_intrinsic_copy_const_indices(new_intr, intr);
128       nir_intrinsic_set_write_mask(new_intr, BITFIELD_MASK(length));
129 
130       const int offset_units = value->bit_size / 8;
131 
132       if (nir_intrinsic_has_align_mul(intr)) {
133          assert(nir_intrinsic_has_align_offset(intr));
134          unsigned align_mul = nir_intrinsic_align_mul(intr);
135          unsigned align_off = nir_intrinsic_align_offset(intr);
136 
137          align_off += offset_units * first_component;
138          align_off = align_off % align_mul;
139 
140          nir_intrinsic_set_align(new_intr, align_mul, align_off);
141       }
142 
143       /* if the instruction has a BASE, fold the offset adjustment
144        * into that instead of adding alu instructions, otherwise add
145        * instructions
146        */
147       unsigned offset_adj = offset_units * first_component;
148       if (nir_intrinsic_has_base(intr)) {
149          nir_intrinsic_set_base(new_intr,
150                                 nir_intrinsic_base(intr) + offset_adj);
151       } else {
152          offset = nir_iadd(b, offset,
153                            nir_imm_intN_t(b, offset_adj, offset->bit_size));
154       }
155 
156       new_intr->num_components = length;
157 
158       /* Copy the sources, replacing value/offset, and passing everything
159        * else through to the new instrution:
160        */
161       for (unsigned i = 0; i < num_srcs; i++) {
162          if (i == value_idx) {
163             new_intr->src[i] = nir_src_for_ssa(value);
164          } else if (i == offset_idx) {
165             new_intr->src[i] = nir_src_for_ssa(offset);
166          } else {
167             new_intr->src[i] = intr->src[i];
168          }
169       }
170 
171       nir_builder_instr_insert(b, &new_intr->instr);
172 
173       /* Clear the bits in the writemask that we just wrote, then try
174        * again to see if more channels are left.
175        */
176       wrmask &= ~cur_mask;
177    }
178 
179    /* Finally remove the original intrinsic. */
180    nir_instr_remove(&intr->instr);
181 }
182 
183 struct nir_lower_wrmasks_state {
184    nir_instr_filter_cb cb;
185    const void *data;
186 };
187 
188 static bool
nir_lower_wrmasks_instr(nir_builder * b,nir_instr * instr,void * data)189 nir_lower_wrmasks_instr(nir_builder *b, nir_instr *instr, void *data)
190 {
191    struct nir_lower_wrmasks_state *state = data;
192 
193    if (instr->type != nir_instr_type_intrinsic)
194       return false;
195 
196    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
197 
198    /* if no wrmask, then skip it: */
199    if (!nir_intrinsic_has_write_mask(intr))
200       return false;
201 
202    /* if wrmask is already contiguous, then nothing to do: */
203    if (nir_intrinsic_write_mask(intr) == BITFIELD_MASK(intr->num_components))
204       return false;
205 
206    /* do we know how to lower this instruction? */
207    if (value_src(intr->intrinsic) < 0)
208       return false;
209 
210    assert(offset_src(intr->intrinsic) >= 0);
211 
212    /* does backend need us to lower this intrinsic? */
213    if (state->cb && !state->cb(instr, state->data))
214       return false;
215 
216    split_wrmask(b, intr);
217 
218    return true;
219 }
220 
221 bool
nir_lower_wrmasks(nir_shader * shader,nir_instr_filter_cb cb,const void * data)222 nir_lower_wrmasks(nir_shader *shader, nir_instr_filter_cb cb, const void *data)
223 {
224    struct nir_lower_wrmasks_state state = {
225       .cb = cb,
226       .data = data,
227    };
228 
229    return nir_shader_instructions_pass(shader,
230                                        nir_lower_wrmasks_instr,
231                                        nir_metadata_control_flow,
232                                        &state);
233 }
234