• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "brw_nir.h"
25 #include "compiler/nir/nir_builder.h"
26 
27 struct lower_intrinsics_state {
28    nir_shader *nir;
29    nir_function_impl *impl;
30    bool progress;
31    nir_builder builder;
32 };
33 
34 static bool
lower_cs_intrinsics_convert_block(struct lower_intrinsics_state * state,nir_block * block)35 lower_cs_intrinsics_convert_block(struct lower_intrinsics_state *state,
36                                   nir_block *block)
37 {
38    bool progress = false;
39    nir_builder *b = &state->builder;
40    nir_shader *nir = state->nir;
41 
42    /* Reuse calculated values inside the block. */
43    nir_ssa_def *local_index = NULL;
44    nir_ssa_def *local_id = NULL;
45 
46    nir_foreach_instr_safe(instr, block) {
47       if (instr->type != nir_instr_type_intrinsic)
48          continue;
49 
50       nir_intrinsic_instr *intrinsic = nir_instr_as_intrinsic(instr);
51 
52       b->cursor = nir_after_instr(&intrinsic->instr);
53 
54       nir_ssa_def *sysval;
55       switch (intrinsic->intrinsic) {
56       case nir_intrinsic_load_workgroup_size:
57       case nir_intrinsic_load_workgroup_id:
58       case nir_intrinsic_load_num_workgroups:
59          /* Convert this to 32-bit if it's not */
60          if (intrinsic->dest.ssa.bit_size == 64) {
61             intrinsic->dest.ssa.bit_size = 32;
62             sysval = nir_u2u64(b, &intrinsic->dest.ssa);
63             nir_ssa_def_rewrite_uses_after(&intrinsic->dest.ssa,
64                                            sysval,
65                                            sysval->parent_instr);
66          }
67          continue;
68 
69       case nir_intrinsic_load_local_invocation_index:
70       case nir_intrinsic_load_local_invocation_id: {
71          if (nir->info.stage == MESA_SHADER_TASK ||
72              nir->info.stage == MESA_SHADER_MESH) {
73             /* Will be lowered by nir_emit_task_mesh_intrinsic() using
74              * information from the payload.
75              */
76             continue;
77          }
78 
79          /* First time we are using those, so let's calculate them. */
80          if (!local_index) {
81             assert(!local_id);
82 
83             nir_ssa_def *subgroup_id = nir_load_subgroup_id(b);
84 
85             nir_ssa_def *thread_local_id =
86                nir_imul(b, subgroup_id, nir_load_simd_width_intel(b));
87             nir_ssa_def *channel = nir_load_subgroup_invocation(b);
88             nir_ssa_def *linear = nir_iadd(b, channel, thread_local_id);
89 
90             nir_ssa_def *size_x;
91             nir_ssa_def *size_y;
92             if (state->nir->info.workgroup_size_variable) {
93                nir_ssa_def *size_xyz = nir_load_workgroup_size(b);
94                size_x = nir_channel(b, size_xyz, 0);
95                size_y = nir_channel(b, size_xyz, 1);
96             } else {
97                size_x = nir_imm_int(b, nir->info.workgroup_size[0]);
98                size_y = nir_imm_int(b, nir->info.workgroup_size[1]);
99             }
100             nir_ssa_def *size_xy = nir_imul(b, size_x, size_y);
101 
102             /* The local invocation index and ID must respect the following
103              *
104              *    gl_LocalInvocationID.x =
105              *       gl_LocalInvocationIndex % gl_WorkGroupSize.x;
106              *    gl_LocalInvocationID.y =
107              *       (gl_LocalInvocationIndex / gl_WorkGroupSize.x) %
108              *       gl_WorkGroupSize.y;
109              *    gl_LocalInvocationID.z =
110              *       (gl_LocalInvocationIndex /
111              *        (gl_WorkGroupSize.x * gl_WorkGroupSize.y)) %
112              *       gl_WorkGroupSize.z;
113              *
114              * However, the final % gl_WorkGroupSize.z does nothing unless we
115              * accidentally end up with a gl_LocalInvocationIndex that is too
116              * large so it can safely be omitted.
117              */
118 
119             nir_ssa_def *id_x, *id_y, *id_z;
120             switch (state->nir->info.cs.derivative_group) {
121             case DERIVATIVE_GROUP_NONE:
122                if (nir->info.num_images == 0 &&
123                    nir->info.num_textures == 0) {
124                   /* X-major lid order. Optimal for linear accesses only,
125                    * which are usually buffers. X,Y ordering will look like:
126                    * (0,0) (1,0) (2,0) ... (size_x-1,0) (0,1) (1,1) ...
127                    */
128                   id_x = nir_umod(b, linear, size_x);
129                   id_y = nir_umod(b, nir_udiv(b, linear, size_x), size_y);
130                   local_index = linear;
131                } else if (!nir->info.workgroup_size_variable &&
132                           nir->info.workgroup_size[1] % 4 == 0) {
133                   /* 1x4 block X-major lid order. Same as X-major except increments in
134                    * blocks of width=1 height=4. Always optimal for tileY and usually
135                    * optimal for linear accesses.
136                    *   x = (linear / 4) % size_x
137                    *   y = ((linear % 4) + (linear / 4 / size_x) * 4) % size_y
138                    * X,Y ordering will look like: (0,0) (0,1) (0,2) (0,3) (1,0) (1,1)
139                    * (1,2) (1,3) (2,0) ... (size_x-1,3) (0,4) (0,5) (0,6) (0,7) (1,4) ...
140                    */
141                   const unsigned height = 4;
142                   nir_ssa_def *block = nir_udiv_imm(b, linear, height);
143                   id_x = nir_umod(b, block, size_x);
144                   id_y = nir_umod(b,
145                                   nir_iadd(b,
146                                            nir_umod(b, linear, nir_imm_int(b, height)),
147                                            nir_imul_imm(b,
148                                                         nir_udiv(b, block, size_x),
149                                                         height)),
150                                   size_y);
151                } else {
152                   /* Y-major lid order. Optimal for tileY accesses only,
153                    * which are usually images. X,Y ordering will look like:
154                    * (0,0) (0,1) (0,2) ... (0,size_y-1) (1,0) (1,1) ...
155                    */
156                   id_y = nir_umod(b, linear, size_y);
157                   id_x = nir_umod(b, nir_udiv(b, linear, size_y), size_x);
158                }
159 
160                id_z = nir_udiv(b, linear, size_xy);
161                local_id = nir_vec3(b, id_x, id_y, id_z);
162                if (!local_index) {
163                   local_index = nir_iadd(b, nir_iadd(b, id_x,
164                                                         nir_imul(b, id_y, size_x)),
165                                                         nir_imul(b, id_z, size_xy));
166                }
167                break;
168             case DERIVATIVE_GROUP_LINEAR:
169                /* For linear, just set the local invocation index linearly,
170                 * and calculate local invocation ID from that.
171                 */
172                id_x = nir_umod(b, linear, size_x);
173                id_y = nir_umod(b, nir_udiv(b, linear, size_x), size_y);
174                id_z = nir_udiv(b, linear, size_xy);
175                local_id = nir_vec3(b, id_x, id_y, id_z);
176                local_index = linear;
177                break;
178             case DERIVATIVE_GROUP_QUADS: {
179                /* For quads, first we figure out the 2x2 grid the invocation
180                 * belongs to -- treating extra Z layers as just more rows.
181                 * Then map that into local invocation ID (trivial) and local
182                 * invocation index.  Skipping Z simplify index calculation.
183                 */
184 
185                nir_ssa_def *one = nir_imm_int(b, 1);
186                nir_ssa_def *double_size_x = nir_ishl(b, size_x, one);
187 
188                /* ID within a pair of rows, where each group of 4 is 2x2 quad. */
189                nir_ssa_def *row_pair_id = nir_umod(b, linear, double_size_x);
190                nir_ssa_def *y_row_pairs = nir_udiv(b, linear, double_size_x);
191 
192                nir_ssa_def *x =
193                   nir_ior(b,
194                           nir_iand(b, row_pair_id, one),
195                           nir_iand(b, nir_ishr(b, row_pair_id, one),
196                                    nir_imm_int(b, 0xfffffffe)));
197                nir_ssa_def *y =
198                   nir_ior(b,
199                           nir_ishl(b, y_row_pairs, one),
200                           nir_iand(b, nir_ishr(b, row_pair_id, one), one));
201 
202                local_id = nir_vec3(b, x,
203                                    nir_umod(b, y, size_y),
204                                    nir_udiv(b, y, size_y));
205                local_index = nir_iadd(b, x, nir_imul(b, y, size_x));
206                break;
207             }
208             default:
209                unreachable("invalid derivative group");
210             }
211          }
212 
213          assert(local_id);
214          assert(local_index);
215          if (intrinsic->intrinsic == nir_intrinsic_load_local_invocation_id)
216             sysval = local_id;
217          else
218             sysval = local_index;
219          break;
220       }
221 
222       case nir_intrinsic_load_num_subgroups: {
223          nir_ssa_def *size;
224          if (state->nir->info.workgroup_size_variable) {
225             nir_ssa_def *size_xyz = nir_load_workgroup_size(b);
226             nir_ssa_def *size_x = nir_channel(b, size_xyz, 0);
227             nir_ssa_def *size_y = nir_channel(b, size_xyz, 1);
228             nir_ssa_def *size_z = nir_channel(b, size_xyz, 2);
229             size = nir_imul(b, nir_imul(b, size_x, size_y), size_z);
230          } else {
231             size = nir_imm_int(b, nir->info.workgroup_size[0] *
232                                   nir->info.workgroup_size[1] *
233                                   nir->info.workgroup_size[2]);
234          }
235 
236          /* Calculate the equivalent of DIV_ROUND_UP. */
237          nir_ssa_def *simd_width = nir_load_simd_width_intel(b);
238          sysval =
239             nir_udiv(b, nir_iadd_imm(b, nir_iadd(b, size, simd_width), -1),
240                         simd_width);
241          break;
242       }
243 
244       default:
245          continue;
246       }
247 
248       if (intrinsic->dest.ssa.bit_size == 64)
249          sysval = nir_u2u64(b, sysval);
250 
251       nir_ssa_def_rewrite_uses(&intrinsic->dest.ssa, sysval);
252       nir_instr_remove(&intrinsic->instr);
253 
254       state->progress = true;
255    }
256 
257    return progress;
258 }
259 
260 static void
lower_cs_intrinsics_convert_impl(struct lower_intrinsics_state * state)261 lower_cs_intrinsics_convert_impl(struct lower_intrinsics_state *state)
262 {
263    nir_builder_init(&state->builder, state->impl);
264 
265    nir_foreach_block(block, state->impl) {
266       lower_cs_intrinsics_convert_block(state, block);
267    }
268 
269    nir_metadata_preserve(state->impl,
270                          nir_metadata_block_index | nir_metadata_dominance);
271 }
272 
273 bool
brw_nir_lower_cs_intrinsics(nir_shader * nir)274 brw_nir_lower_cs_intrinsics(nir_shader *nir)
275 {
276    assert(gl_shader_stage_uses_workgroup(nir->info.stage));
277 
278    struct lower_intrinsics_state state = {
279       .nir = nir,
280    };
281 
282    /* Constraints from NV_compute_shader_derivatives. */
283    if (gl_shader_stage_is_compute(nir->info.stage) &&
284        !nir->info.workgroup_size_variable) {
285       if (nir->info.cs.derivative_group == DERIVATIVE_GROUP_QUADS) {
286          assert(nir->info.workgroup_size[0] % 2 == 0);
287          assert(nir->info.workgroup_size[1] % 2 == 0);
288       } else if (nir->info.cs.derivative_group == DERIVATIVE_GROUP_LINEAR) {
289          ASSERTED unsigned workgroup_size =
290             nir->info.workgroup_size[0] *
291             nir->info.workgroup_size[1] *
292             nir->info.workgroup_size[2];
293          assert(workgroup_size % 4 == 0);
294       }
295    }
296 
297    nir_foreach_function(function, nir) {
298       if (function->impl) {
299          state.impl = function->impl;
300          lower_cs_intrinsics_convert_impl(&state);
301       }
302    }
303 
304    return state.progress;
305 }
306