• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2021 Google, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  */
23 
24 #include "ir3_nir.h"
25 
26 /*
27  * Lowering for 64b intrinsics generated with OpenCL or with
28  * VK_KHR_buffer_device_address. All our intrinsics from a hw
29  * standpoint are 32b, so we just need to combine in zero for
30  * the upper 32bits and let the other nir passes clean up the mess.
31  */
32 
33 static bool
lower_64b_intrinsics_filter(const nir_instr * instr,const void * unused)34 lower_64b_intrinsics_filter(const nir_instr *instr, const void *unused)
35 {
36    (void)unused;
37 
38    if (instr->type != nir_instr_type_intrinsic)
39       return false;
40 
41    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
42 
43    if (intr->intrinsic == nir_intrinsic_load_deref ||
44        intr->intrinsic == nir_intrinsic_store_deref)
45       return false;
46 
47    if (is_intrinsic_store(intr->intrinsic))
48       return nir_src_bit_size(intr->src[0]) == 64;
49 
50    if (nir_intrinsic_dest_components(intr) == 0)
51       return false;
52 
53    return intr->def.bit_size == 64;
54 }
55 
56 static nir_def *
lower_64b_intrinsics(nir_builder * b,nir_instr * instr,void * unused)57 lower_64b_intrinsics(nir_builder *b, nir_instr *instr, void *unused)
58 {
59    (void)unused;
60 
61    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
62 
63    /* We could be *slightly* more clever and, for ex, turn a 64b vec4
64     * load into two 32b vec4 loads, rather than 4 32b vec2 loads.
65     */
66 
67    if (is_intrinsic_store(intr->intrinsic)) {
68       unsigned offset_src_idx;
69       switch (intr->intrinsic) {
70       case nir_intrinsic_store_ssbo:
71       case nir_intrinsic_store_global_ir3:
72          offset_src_idx = 2;
73          break;
74       default:
75          offset_src_idx = 1;
76       }
77 
78       unsigned num_comp = nir_intrinsic_src_components(intr, 0);
79       unsigned wrmask = nir_intrinsic_has_write_mask(intr) ?
80          nir_intrinsic_write_mask(intr) : BITSET_MASK(num_comp);
81       nir_def *val = intr->src[0].ssa;
82       nir_def *off = intr->src[offset_src_idx].ssa;
83 
84       for (unsigned i = 0; i < num_comp; i++) {
85          if (!(wrmask & BITFIELD_BIT(i)))
86             continue;
87 
88          nir_def *c64 = nir_channel(b, val, i);
89          nir_def *c32 = nir_unpack_64_2x32(b, c64);
90 
91          nir_intrinsic_instr *store =
92             nir_instr_as_intrinsic(nir_instr_clone(b->shader, &intr->instr));
93          store->num_components = 2;
94          store->src[0] = nir_src_for_ssa(c32);
95          store->src[offset_src_idx] = nir_src_for_ssa(off);
96 
97          if (nir_intrinsic_has_write_mask(intr))
98             nir_intrinsic_set_write_mask(store, 0x3);
99          nir_builder_instr_insert(b, &store->instr);
100 
101          off = nir_iadd_imm(b, off, 8);
102       }
103 
104       return NIR_LOWER_INSTR_PROGRESS_REPLACE;
105    }
106 
107    unsigned num_comp = nir_intrinsic_dest_components(intr);
108 
109    nir_def *def = &intr->def;
110    def->bit_size = 32;
111 
112    /* load_kernel_input is handled specially, lowering to two 32b inputs:
113     */
114    if (intr->intrinsic == nir_intrinsic_load_kernel_input) {
115       assert(num_comp == 1);
116 
117       nir_def *offset = nir_iadd_imm(b,
118             intr->src[0].ssa, 4);
119 
120       nir_def *upper = nir_load_kernel_input(b, 1, 32, offset);
121 
122       return nir_pack_64_2x32_split(b, def, upper);
123    }
124 
125    nir_def *components[num_comp];
126 
127    if (is_intrinsic_load(intr->intrinsic)) {
128       unsigned offset_src_idx;
129       switch(intr->intrinsic) {
130       case nir_intrinsic_load_ssbo:
131       case nir_intrinsic_load_ubo:
132       case nir_intrinsic_load_global_ir3:
133          offset_src_idx = 1;
134          break;
135       default:
136          offset_src_idx = 0;
137       }
138 
139       nir_def *off = intr->src[offset_src_idx].ssa;
140 
141       for (unsigned i = 0; i < num_comp; i++) {
142          nir_intrinsic_instr *load =
143             nir_instr_as_intrinsic(nir_instr_clone(b->shader, &intr->instr));
144          load->num_components = 2;
145          load->src[offset_src_idx] = nir_src_for_ssa(off);
146 
147          nir_def_init(&load->instr, &load->def, 2, 32);
148          nir_builder_instr_insert(b, &load->instr);
149 
150          components[i] = nir_pack_64_2x32(b, &load->def);
151 
152          off = nir_iadd_imm(b, off, 8);
153       }
154    } else {
155       /* The remaining (non load/store) intrinsics just get zero-
156        * extended from 32b to 64b:
157        */
158       for (unsigned i = 0; i < num_comp; i++) {
159          nir_def *c = nir_channel(b, def, i);
160          components[i] = nir_pack_64_2x32_split(b, c, nir_imm_zero(b, 1, 32));
161       }
162    }
163 
164    return nir_build_alu_src_arr(b, nir_op_vec(num_comp), components);
165 }
166 
167 bool
ir3_nir_lower_64b_intrinsics(nir_shader * shader)168 ir3_nir_lower_64b_intrinsics(nir_shader *shader)
169 {
170    return nir_shader_lower_instructions(
171          shader, lower_64b_intrinsics_filter,
172          lower_64b_intrinsics, NULL);
173 }
174 
175 /*
176  * Lowering for 64b undef instructions, splitting into a two 32b undefs
177  */
178 
179 static nir_def *
lower_64b_undef(nir_builder * b,nir_instr * instr,void * unused)180 lower_64b_undef(nir_builder *b, nir_instr *instr, void *unused)
181 {
182    (void)unused;
183 
184    nir_undef_instr *undef = nir_instr_as_undef(instr);
185    unsigned num_comp = undef->def.num_components;
186    nir_def *components[num_comp];
187 
188    for (unsigned i = 0; i < num_comp; i++) {
189       nir_def *lowered = nir_undef(b, 2, 32);
190 
191       components[i] = nir_pack_64_2x32_split(b,
192                                              nir_channel(b, lowered, 0),
193                                              nir_channel(b, lowered, 1));
194    }
195 
196    return nir_build_alu_src_arr(b, nir_op_vec(num_comp), components);
197 }
198 
199 static bool
lower_64b_undef_filter(const nir_instr * instr,const void * unused)200 lower_64b_undef_filter(const nir_instr *instr, const void *unused)
201 {
202    (void)unused;
203 
204    return instr->type == nir_instr_type_undef &&
205       nir_instr_as_undef(instr)->def.bit_size == 64;
206 }
207 
208 bool
ir3_nir_lower_64b_undef(nir_shader * shader)209 ir3_nir_lower_64b_undef(nir_shader *shader)
210 {
211    return nir_shader_lower_instructions(
212          shader, lower_64b_undef_filter,
213          lower_64b_undef, NULL);
214 }
215 
216 /*
217  * Lowering for load_global/store_global with 64b addresses to ir3
218  * variants, which instead take a uvec2_32
219  */
220 
221 static bool
lower_64b_global_filter(const nir_instr * instr,const void * unused)222 lower_64b_global_filter(const nir_instr *instr, const void *unused)
223 {
224    (void)unused;
225 
226    if (instr->type != nir_instr_type_intrinsic)
227       return false;
228 
229    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
230    switch (intr->intrinsic) {
231    case nir_intrinsic_load_global:
232    case nir_intrinsic_load_global_constant:
233    case nir_intrinsic_store_global:
234    case nir_intrinsic_global_atomic:
235    case nir_intrinsic_global_atomic_swap:
236       return true;
237    default:
238       return false;
239    }
240 }
241 
242 static nir_def *
lower_64b_global(nir_builder * b,nir_instr * instr,void * unused)243 lower_64b_global(nir_builder *b, nir_instr *instr, void *unused)
244 {
245    (void)unused;
246 
247    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
248    bool load = intr->intrinsic != nir_intrinsic_store_global;
249 
250    nir_def *addr64 = intr->src[load ? 0 : 1].ssa;
251    nir_def *addr = nir_unpack_64_2x32(b, addr64);
252 
253    /*
254     * Note that we can get vec8/vec16 with OpenCL.. we need to split
255     * those up into max 4 components per load/store.
256     */
257 
258    if (intr->intrinsic == nir_intrinsic_global_atomic) {
259       return nir_global_atomic_ir3(
260             b, intr->def.bit_size, addr,
261             intr->src[1].ssa,
262          .atomic_op = nir_intrinsic_atomic_op(intr));
263    } else if (intr->intrinsic == nir_intrinsic_global_atomic_swap) {
264       return nir_global_atomic_swap_ir3(
265          b, intr->def.bit_size, addr,
266          intr->src[1].ssa, intr->src[2].ssa,
267          .atomic_op = nir_intrinsic_atomic_op(intr));
268    }
269 
270    if (load) {
271       unsigned num_comp = nir_intrinsic_dest_components(intr);
272       nir_def *components[num_comp];
273       for (unsigned off = 0; off < num_comp;) {
274          unsigned c = MIN2(num_comp - off, 4);
275          nir_def *val = nir_load_global_ir3(
276                b, c, intr->def.bit_size,
277                addr, nir_imm_int(b, off));
278          for (unsigned i = 0; i < c; i++) {
279             components[off++] = nir_channel(b, val, i);
280          }
281       }
282       return nir_build_alu_src_arr(b, nir_op_vec(num_comp), components);
283    } else {
284       unsigned num_comp = nir_intrinsic_src_components(intr, 0);
285       nir_def *value = intr->src[0].ssa;
286       for (unsigned off = 0; off < num_comp; off += 4) {
287          unsigned c = MIN2(num_comp - off, 4);
288          nir_def *v = nir_channels(b, value, BITFIELD_MASK(c) << off);
289          nir_store_global_ir3(b, v, addr, nir_imm_int(b, off));
290       }
291       return NIR_LOWER_INSTR_PROGRESS_REPLACE;
292    }
293 }
294 
295 bool
ir3_nir_lower_64b_global(nir_shader * shader)296 ir3_nir_lower_64b_global(nir_shader *shader)
297 {
298    return nir_shader_lower_instructions(
299          shader, lower_64b_global_filter,
300          lower_64b_global, NULL);
301 }
302 
303 /*
304  * Lowering for 64b registers:
305  * - @decl_reg -> split in two 32b ones
306  * - @store_reg -> unpack_64_2x32_split_x/y and two separate stores
307  * - @load_reg -> two separate loads and pack_64_2x32_split
308  */
309 
310 static void
lower_64b_reg(nir_builder * b,nir_intrinsic_instr * reg)311 lower_64b_reg(nir_builder *b, nir_intrinsic_instr *reg)
312 {
313    unsigned num_components = nir_intrinsic_num_components(reg);
314    unsigned num_array_elems = nir_intrinsic_num_array_elems(reg);
315 
316    nir_def *reg_hi = nir_decl_reg(b, num_components, 32, num_array_elems);
317    nir_def *reg_lo = nir_decl_reg(b, num_components, 32, num_array_elems);
318 
319    nir_foreach_reg_store_safe (store_reg_src, reg) {
320       nir_intrinsic_instr *store =
321          nir_instr_as_intrinsic(nir_src_parent_instr(store_reg_src));
322       b->cursor = nir_before_instr(&store->instr);
323 
324       nir_def *packed = store->src[0].ssa;
325       nir_def *unpacked_lo = nir_unpack_64_2x32_split_x(b, packed);
326       nir_def *unpacked_hi = nir_unpack_64_2x32_split_y(b, packed);
327       int base = nir_intrinsic_base(store);
328 
329       if (store->intrinsic == nir_intrinsic_store_reg) {
330          nir_build_store_reg(b, unpacked_lo, reg_lo, .base = base);
331          nir_build_store_reg(b, unpacked_hi, reg_hi, .base = base);
332       } else {
333          assert(store->intrinsic == nir_intrinsic_store_reg_indirect);
334 
335          nir_def *offset = store->src[2].ssa;
336          nir_store_reg_indirect(b, unpacked_lo, reg_lo, offset, .base = base);
337          nir_store_reg_indirect(b, unpacked_hi, reg_hi, offset, .base = base);
338       }
339 
340       nir_instr_remove(&store->instr);
341    }
342 
343    nir_foreach_reg_load_safe (load_reg_src, reg) {
344       nir_intrinsic_instr *load =
345          nir_instr_as_intrinsic(nir_src_parent_instr(load_reg_src));
346       b->cursor = nir_before_instr(&load->instr);
347 
348       int base = nir_intrinsic_base(load);
349       nir_def *load_lo, *load_hi;
350 
351       if (load->intrinsic == nir_intrinsic_load_reg) {
352          load_lo =
353             nir_build_load_reg(b, num_components, 32, reg_lo, .base = base);
354          load_hi =
355             nir_build_load_reg(b, num_components, 32, reg_hi, .base = base);
356       } else {
357          assert(load->intrinsic == nir_intrinsic_load_reg_indirect);
358 
359          nir_def *offset = load->src[1].ssa;
360          load_lo = nir_load_reg_indirect(b, num_components, 32, reg_lo, offset,
361                                          .base = base);
362          load_hi = nir_load_reg_indirect(b, num_components, 32, reg_hi, offset,
363                                          .base = base);
364       }
365 
366       nir_def *packed = nir_pack_64_2x32_split(b, load_lo, load_hi);
367       nir_def_rewrite_uses(&load->def, packed);
368       nir_instr_remove(&load->instr);
369    }
370 
371    nir_instr_remove(&reg->instr);
372 }
373 
374 bool
ir3_nir_lower_64b_regs(nir_shader * shader)375 ir3_nir_lower_64b_regs(nir_shader *shader)
376 {
377    bool progress = false;
378 
379    nir_foreach_function_impl (impl, shader) {
380       bool impl_progress = false;
381       nir_builder b = nir_builder_create(impl);
382 
383       nir_foreach_reg_decl_safe (reg, impl) {
384          if (nir_intrinsic_bit_size(reg) == 64) {
385             lower_64b_reg(&b, reg);
386             impl_progress = true;
387          }
388       }
389 
390       if (impl_progress) {
391          nir_metadata_preserve(
392             impl, nir_metadata_block_index | nir_metadata_dominance);
393          progress = true;
394       }
395    }
396 
397    return progress;
398 }
399