• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Connor Abbott (cwabbott0@gmail.com)
25  *
26  */
27 
28 #include "util/set.h"
29 #include "util/u_math.h"
30 #include "nir.h"
31 #include "nir_builder.h"
32 
33 struct lower_sysval_state {
34    const nir_lower_compute_system_values_options *options;
35 
36    /* List of intrinsics that have already been lowered and shouldn't be
37     * lowered again.
38     */
39    struct set *lower_once_list;
40 };
41 
42 static nir_def *
sanitize_32bit_sysval(nir_builder * b,nir_intrinsic_instr * intrin)43 sanitize_32bit_sysval(nir_builder *b, nir_intrinsic_instr *intrin)
44 {
45    const unsigned bit_size = intrin->def.bit_size;
46    if (bit_size == 32)
47       return NULL;
48 
49    intrin->def.bit_size = 32;
50    return nir_u2uN(b, &intrin->def, bit_size);
51 }
52 
53 static nir_def *
build_global_group_size(nir_builder * b,unsigned bit_size)54 build_global_group_size(nir_builder *b, unsigned bit_size)
55 {
56    nir_def *group_size = nir_load_workgroup_size(b);
57    nir_def *num_workgroups = nir_load_num_workgroups(b);
58    return nir_imul(b, nir_u2uN(b, group_size, bit_size),
59                    nir_u2uN(b, num_workgroups, bit_size));
60 }
61 
62 static bool
lower_system_value_filter(const nir_instr * instr,const void * _state)63 lower_system_value_filter(const nir_instr *instr, const void *_state)
64 {
65    return instr->type == nir_instr_type_intrinsic;
66 }
67 
68 static nir_def *
lower_system_value_instr(nir_builder * b,nir_instr * instr,void * _state)69 lower_system_value_instr(nir_builder *b, nir_instr *instr, void *_state)
70 {
71    nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
72 
73    /* All the intrinsics we care about are loads */
74    if (!nir_intrinsic_infos[intrin->intrinsic].has_dest)
75       return NULL;
76 
77    const unsigned bit_size = intrin->def.bit_size;
78 
79    switch (intrin->intrinsic) {
80    case nir_intrinsic_load_vertex_id:
81       if (b->shader->options->vertex_id_zero_based) {
82          return nir_iadd(b, nir_load_vertex_id_zero_base(b),
83                          nir_load_first_vertex(b));
84       } else {
85          return NULL;
86       }
87 
88    case nir_intrinsic_load_base_vertex:
89       /**
90        * From the OpenGL 4.6 (11.1.3.9 Shader Inputs) specification:
91        *
92        * "gl_BaseVertex holds the integer value passed to the baseVertex
93        * parameter to the command that resulted in the current shader
94        * invocation. In the case where the command has no baseVertex
95        * parameter, the value of gl_BaseVertex is zero."
96        */
97       if (b->shader->options->lower_base_vertex) {
98          return nir_iand(b, nir_load_is_indexed_draw(b),
99                          nir_load_first_vertex(b));
100       } else {
101          return NULL;
102       }
103 
104    case nir_intrinsic_load_helper_invocation:
105       if (b->shader->options->lower_helper_invocation) {
106          return nir_build_lowered_load_helper_invocation(b);
107       } else {
108          return NULL;
109       }
110 
111    case nir_intrinsic_load_local_invocation_id:
112    case nir_intrinsic_load_local_invocation_index:
113    case nir_intrinsic_load_num_workgroups:
114    case nir_intrinsic_load_workgroup_id:
115    case nir_intrinsic_load_workgroup_size:
116       return sanitize_32bit_sysval(b, intrin);
117 
118    case nir_intrinsic_interp_deref_at_centroid:
119    case nir_intrinsic_interp_deref_at_sample:
120    case nir_intrinsic_interp_deref_at_offset: {
121       nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
122       if (!nir_deref_mode_is(deref, nir_var_system_value))
123          return NULL;
124 
125       nir_variable *var = deref->var;
126       enum glsl_interp_mode interp_mode;
127 
128       if (var->data.location == SYSTEM_VALUE_BARYCENTRIC_PERSP_COORD) {
129          interp_mode = INTERP_MODE_SMOOTH;
130       } else {
131          assert(var->data.location == SYSTEM_VALUE_BARYCENTRIC_LINEAR_COORD);
132          interp_mode = INTERP_MODE_NOPERSPECTIVE;
133       }
134 
135       switch (intrin->intrinsic) {
136       case nir_intrinsic_interp_deref_at_centroid:
137          return nir_load_barycentric_coord_centroid(b, 32, .interp_mode = interp_mode);
138       case nir_intrinsic_interp_deref_at_sample:
139          return nir_load_barycentric_coord_at_sample(b, 32, intrin->src[1].ssa,
140                                                      .interp_mode = interp_mode);
141       case nir_intrinsic_interp_deref_at_offset:
142          return nir_load_barycentric_coord_at_offset(b, 32, intrin->src[1].ssa,
143                                                      .interp_mode = interp_mode);
144       default:
145          unreachable("Bogus interpolateAt() intrinsic.");
146       }
147    }
148 
149    case nir_intrinsic_load_input:
150       if (b->shader->options->lower_layer_fs_input_to_sysval &&
151           b->shader->info.stage == MESA_SHADER_FRAGMENT &&
152           nir_intrinsic_io_semantics(intrin).location == VARYING_SLOT_LAYER)
153          return nir_load_layer_id(b);
154       else
155          return NULL;
156 
157    case nir_intrinsic_load_deref: {
158       nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
159       if (!nir_deref_mode_is(deref, nir_var_system_value))
160          return NULL;
161 
162       nir_def *column = NULL;
163       if (deref->deref_type != nir_deref_type_var) {
164          /* The only one system values that aren't plane variables are
165           * gl_SampleMask which is always an array of one element and a
166           * couple of ray-tracing intrinsics which are matrices.
167           */
168          assert(deref->deref_type == nir_deref_type_array);
169          column = deref->arr.index.ssa;
170          nir_deref_instr *arr_deref = deref;
171          deref = nir_deref_instr_parent(deref);
172          assert(deref->deref_type == nir_deref_type_var);
173 
174          switch (deref->var->data.location) {
175          case SYSTEM_VALUE_TESS_LEVEL_INNER:
176          case SYSTEM_VALUE_TESS_LEVEL_OUTER: {
177             nir_def *sysval = (deref->var->data.location ==
178                                SYSTEM_VALUE_TESS_LEVEL_INNER)
179                                  ? nir_load_tess_level_inner(b)
180                                  : nir_load_tess_level_outer(b);
181             return nir_vector_extract(b, sysval, arr_deref->arr.index.ssa);
182          }
183 
184          case SYSTEM_VALUE_SAMPLE_MASK_IN:
185          case SYSTEM_VALUE_RAY_OBJECT_TO_WORLD:
186          case SYSTEM_VALUE_RAY_WORLD_TO_OBJECT:
187          case SYSTEM_VALUE_MESH_VIEW_INDICES:
188          case SYSTEM_VALUE_RAY_TRIANGLE_VERTEX_POSITIONS:
189             /* These are all single-element arrays in our implementation, and
190              * the sysval load below just drops the 0 array index.
191              */
192             break;
193 
194          default:
195             unreachable("unsupported system value array deref");
196          }
197       }
198       nir_variable *var = deref->var;
199 
200       switch (var->data.location) {
201       case SYSTEM_VALUE_INSTANCE_INDEX:
202          return nir_iadd(b, nir_load_instance_id(b),
203                          nir_load_base_instance(b));
204 
205       case SYSTEM_VALUE_SUBGROUP_EQ_MASK:
206       case SYSTEM_VALUE_SUBGROUP_GE_MASK:
207       case SYSTEM_VALUE_SUBGROUP_GT_MASK:
208       case SYSTEM_VALUE_SUBGROUP_LE_MASK:
209       case SYSTEM_VALUE_SUBGROUP_LT_MASK: {
210          nir_intrinsic_op op =
211             nir_intrinsic_from_system_value(var->data.location);
212          nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, op);
213          nir_def_init_for_type(&load->instr, &load->def, var->type);
214          load->num_components = load->def.num_components;
215          nir_builder_instr_insert(b, &load->instr);
216          return &load->def;
217       }
218 
219       case SYSTEM_VALUE_DEVICE_INDEX:
220          if (b->shader->options->lower_device_index_to_zero)
221             return nir_imm_int(b, 0);
222          break;
223 
224       case SYSTEM_VALUE_GLOBAL_GROUP_SIZE:
225          return build_global_group_size(b, bit_size);
226 
227       case SYSTEM_VALUE_BARYCENTRIC_LINEAR_PIXEL:
228          return nir_load_barycentric(b, nir_intrinsic_load_barycentric_pixel,
229                                      INTERP_MODE_NOPERSPECTIVE);
230 
231       case SYSTEM_VALUE_BARYCENTRIC_LINEAR_CENTROID:
232          return nir_load_barycentric(b, nir_intrinsic_load_barycentric_centroid,
233                                      INTERP_MODE_NOPERSPECTIVE);
234 
235       case SYSTEM_VALUE_BARYCENTRIC_LINEAR_SAMPLE:
236          return nir_load_barycentric(b, nir_intrinsic_load_barycentric_sample,
237                                      INTERP_MODE_NOPERSPECTIVE);
238 
239       case SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL:
240          return nir_load_barycentric(b, nir_intrinsic_load_barycentric_pixel,
241                                      INTERP_MODE_SMOOTH);
242 
243       case SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTROID:
244          return nir_load_barycentric(b, nir_intrinsic_load_barycentric_centroid,
245                                      INTERP_MODE_SMOOTH);
246 
247       case SYSTEM_VALUE_BARYCENTRIC_PERSP_SAMPLE:
248          return nir_load_barycentric(b, nir_intrinsic_load_barycentric_sample,
249                                      INTERP_MODE_SMOOTH);
250 
251       case SYSTEM_VALUE_BARYCENTRIC_PULL_MODEL:
252          return nir_load_barycentric(b, nir_intrinsic_load_barycentric_model,
253                                      INTERP_MODE_NONE);
254 
255       case SYSTEM_VALUE_BARYCENTRIC_LINEAR_COORD:
256       case SYSTEM_VALUE_BARYCENTRIC_PERSP_COORD: {
257          enum glsl_interp_mode interp_mode;
258 
259          if (var->data.location == SYSTEM_VALUE_BARYCENTRIC_PERSP_COORD) {
260             interp_mode = INTERP_MODE_SMOOTH;
261          } else {
262             assert(var->data.location == SYSTEM_VALUE_BARYCENTRIC_LINEAR_COORD);
263             interp_mode = INTERP_MODE_NOPERSPECTIVE;
264          }
265 
266          if (var->data.sample) {
267             return nir_load_barycentric_coord_sample(b, 32, .interp_mode = interp_mode);
268          } else if (var->data.centroid) {
269             return nir_load_barycentric_coord_centroid(b, 32, .interp_mode = interp_mode);
270          } else {
271             return nir_load_barycentric_coord_pixel(b, 32, .interp_mode = interp_mode);
272          }
273       }
274 
275       case SYSTEM_VALUE_HELPER_INVOCATION: {
276          /* When demote operation is used, reading the HelperInvocation
277           * needs to use Volatile memory access semantics to provide the
278           * correct (dynamic) value.  See OpDemoteToHelperInvocation.
279           */
280          if (nir_intrinsic_access(intrin) & ACCESS_VOLATILE)
281             return nir_is_helper_invocation(b, 1);
282          break;
283       }
284 
285       case SYSTEM_VALUE_MESH_VIEW_INDICES:
286          return nir_load_mesh_view_indices(b, intrin->def.num_components,
287                                            bit_size, column, .base = 0,
288                                            .range = intrin->def.num_components * bit_size / 8);
289 
290       default:
291          break;
292       }
293 
294       nir_intrinsic_op sysval_op =
295          nir_intrinsic_from_system_value(var->data.location);
296       if (glsl_type_is_matrix(var->type)) {
297          assert(nir_intrinsic_infos[sysval_op].index_map[NIR_INTRINSIC_COLUMN] > 0);
298          unsigned num_cols = glsl_get_matrix_columns(var->type);
299          ASSERTED unsigned num_rows = glsl_get_vector_elements(var->type);
300          assert(num_rows == intrin->def.num_components);
301 
302          nir_def *cols[4];
303          for (unsigned i = 0; i < num_cols; i++) {
304             cols[i] = nir_load_system_value(b, sysval_op, i,
305                                             intrin->def.num_components,
306                                             intrin->def.bit_size);
307             assert(cols[i]->num_components == num_rows);
308          }
309          return nir_select_from_ssa_def_array(b, cols, num_cols, column);
310       } else if (glsl_type_is_array(var->type)) {
311          unsigned num_elems = glsl_get_length(var->type);
312          ASSERTED const struct glsl_type *elem_type = glsl_get_array_element(var->type);
313          assert(glsl_get_components(elem_type) == intrin->def.num_components);
314 
315          nir_def *elems[4];
316          assert(ARRAY_SIZE(elems) >= num_elems);
317          for (unsigned i = 0; i < num_elems; i++) {
318             elems[i] = nir_load_system_value(b, sysval_op, i,
319                                              intrin->def.num_components,
320                                              intrin->def.bit_size);
321          }
322          return nir_select_from_ssa_def_array(b, elems, num_elems, column);
323       } else {
324          return nir_load_system_value(b, sysval_op, 0,
325                                       intrin->def.num_components,
326                                       intrin->def.bit_size);
327       }
328    }
329 
330    default:
331       return NULL;
332    }
333 }
334 
335 nir_def *
nir_build_lowered_load_helper_invocation(nir_builder * b)336 nir_build_lowered_load_helper_invocation(nir_builder *b)
337 {
338    nir_def *tmp;
339    tmp = nir_ishl(b, nir_imm_int(b, 1),
340                   nir_load_sample_id_no_per_sample(b));
341    tmp = nir_iand(b, nir_load_sample_mask_in(b), tmp);
342    return nir_inot(b, nir_i2b(b, tmp));
343 }
344 
345 bool
nir_lower_system_values(nir_shader * shader)346 nir_lower_system_values(nir_shader *shader)
347 {
348    bool progress = nir_shader_lower_instructions(shader,
349                                                  lower_system_value_filter,
350                                                  lower_system_value_instr,
351                                                  NULL);
352 
353    /* We're going to delete the variables so we need to clean up all those
354     * derefs we left lying around.
355     */
356    if (progress)
357       nir_remove_dead_derefs(shader);
358 
359    nir_foreach_variable_with_modes_safe(var, shader, nir_var_system_value)
360       exec_node_remove(&var->node);
361 
362    return progress;
363 }
364 
365 static nir_def *
id_to_index_no_umod_slow(nir_builder * b,nir_def * index,nir_def * size_x,nir_def * size_y,unsigned bit_size)366 id_to_index_no_umod_slow(nir_builder *b, nir_def *index,
367                          nir_def *size_x, nir_def *size_y,
368                          unsigned bit_size)
369 {
370    /* We lower ID to Index with the following formula:
371     *
372     *    id.z = index / (size.x * size.y)
373     *    id.y = (index - (id.z * (size.x * size.y))) / size.x
374     *    id.x = index - ((id.z * (size.x * size.y)) + (id.y * size.x))
375     *
376     * This is more efficient on HW that doesn't have a
377     * modulo division instruction and when the size is either
378     * not compile time known or not a power of two.
379     */
380 
381    nir_def *size_x_y = nir_imul(b, size_x, size_y);
382    nir_def *id_z = nir_udiv(b, index, size_x_y);
383    nir_def *z_portion = nir_imul(b, id_z, size_x_y);
384    nir_def *id_y = nir_udiv(b, nir_isub(b, index, z_portion), size_x);
385    nir_def *y_portion = nir_imul(b, id_y, size_x);
386    nir_def *id_x = nir_isub(b, index, nir_iadd(b, z_portion, y_portion));
387 
388    return nir_u2uN(b, nir_vec3(b, id_x, id_y, id_z), bit_size);
389 }
390 
391 static nir_def *
lower_id_to_index_no_umod(nir_builder * b,nir_def * index,nir_def * size,unsigned bit_size,const uint32_t * size_imm,bool shortcut_1d)392 lower_id_to_index_no_umod(nir_builder *b, nir_def *index,
393                           nir_def *size, unsigned bit_size,
394                           const uint32_t *size_imm,
395                           bool shortcut_1d)
396 {
397    nir_def *size_x, *size_y;
398 
399    if (size_imm[0] > 0)
400       size_x = nir_imm_int(b, size_imm[0]);
401    else
402       size_x = nir_channel(b, size, 0);
403 
404    if (size_imm[1] > 0)
405       size_y = nir_imm_int(b, size_imm[1]);
406    else
407       size_y = nir_channel(b, size, 1);
408 
409    if (shortcut_1d) {
410       /* if size.y + size.z == 2 (which means that both y and z are 1)
411        *    id = vec3(index, 0, 0)
412        * else
413        *    id = id_to_index_no_umod_slow
414        */
415 
416       nir_def *size_z = nir_channel(b, size, 2);
417       nir_def *cond = nir_ieq(b, nir_iadd(b, size_y, size_z), nir_imm_int(b, 2));
418 
419       nir_def *val1, *val2;
420       nir_if *if_opt = nir_push_if(b, cond);
421       if_opt->control = nir_selection_control_dont_flatten;
422       {
423          nir_def *zero = nir_imm_int(b, 0);
424          val1 = nir_u2uN(b, nir_vec3(b, index, zero, zero), bit_size);
425       }
426       nir_push_else(b, if_opt);
427       {
428          val2 = id_to_index_no_umod_slow(b, index, size_x, size_y, bit_size);
429       }
430       nir_pop_if(b, if_opt);
431 
432       return nir_if_phi(b, val1, val2);
433    } else {
434       return id_to_index_no_umod_slow(b, index, size_x, size_y, bit_size);
435    }
436 }
437 
438 static nir_def *
lower_id_to_index(nir_builder * b,nir_def * index,nir_def * size,unsigned bit_size)439 lower_id_to_index(nir_builder *b, nir_def *index, nir_def *size,
440                   unsigned bit_size)
441 {
442    /* We lower gl_LocalInvocationID to gl_LocalInvocationIndex based
443     * on this formula:
444     *
445     *    id.x = index % size.x;
446     *    id.y = (index / size.x) % gl_WorkGroupSize.y;
447     *    id.z = (index / (size.x * size.y)) % size.z;
448     *
449     * However, the final % size.z does nothing unless we
450     * accidentally end up with an index that is too
451     * large so it can safely be omitted.
452     *
453     * Because no hardware supports a local workgroup size greater than
454     * about 1K, this calculation can be done in 32-bit and can save some
455     * 64-bit arithmetic.
456     */
457 
458    nir_def *size_x = nir_channel(b, size, 0);
459    nir_def *size_y = nir_channel(b, size, 1);
460 
461    nir_def *id_x = nir_umod(b, index, size_x);
462    nir_def *id_y = nir_umod(b, nir_udiv(b, index, size_x), size_y);
463    nir_def *id_z = nir_udiv(b, index, nir_imul(b, size_x, size_y));
464 
465    return nir_u2uN(b, nir_vec3(b, id_x, id_y, id_z), bit_size);
466 }
467 
468 static bool
lower_compute_system_value_filter(const nir_instr * instr,const void * _state)469 lower_compute_system_value_filter(const nir_instr *instr, const void *_state)
470 {
471    return instr->type == nir_instr_type_intrinsic;
472 }
473 
474 static nir_def *
try_lower_id_to_index_1d(nir_builder * b,nir_def * index,const uint32_t * size)475 try_lower_id_to_index_1d(nir_builder *b, nir_def *index, const uint32_t *size)
476 {
477    /* size_x = 1, size_y = 1, therefore Z = local index */
478    if (size[0] == 1 && size[1] == 1)
479       return nir_vec3(b, nir_imm_int(b, 0), nir_imm_int(b, 0), index);
480 
481    /* size_x = 1, size_z = 1, therefore Y = local index */
482    if (size[0] == 1 && size[2] == 1)
483       return nir_vec3(b, nir_imm_int(b, 0), index, nir_imm_int(b, 0));
484 
485    /* size_y = 1, size_z = 1, therefore X = local index */
486    if (size[1] == 1 && size[2] == 1)
487       return nir_vec3(b, index, nir_imm_int(b, 0), nir_imm_int(b, 0));
488 
489    return NULL;
490 }
491 
492 static nir_def *
lower_compute_system_value_instr(nir_builder * b,nir_instr * instr,void * _state)493 lower_compute_system_value_instr(nir_builder *b,
494                                  nir_instr *instr, void *_state)
495 {
496    nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
497    struct lower_sysval_state *state = (struct lower_sysval_state *)_state;
498    const nir_lower_compute_system_values_options *options = state->options;
499 
500    /* All the intrinsics we care about are loads */
501    if (!nir_intrinsic_infos[intrin->intrinsic].has_dest)
502       return NULL;
503 
504    const unsigned bit_size = intrin->def.bit_size;
505 
506    switch (intrin->intrinsic) {
507    case nir_intrinsic_load_local_invocation_id:
508       /* If lower_cs_local_id_to_index is true, then we replace
509        * local_invocation_id with a formula based on local_invocation_index.
510        */
511       if (b->shader->options->lower_cs_local_id_to_index ||
512           (options && options->lower_cs_local_id_to_index)) {
513          nir_def *local_index = nir_load_local_invocation_index(b);
514 
515          if (!b->shader->info.workgroup_size_variable) {
516             /* Shortcut for 1 dimensional workgroups:
517              * Use local_invocation_index directly, which is better than
518              * lower_id_to_index + constant folding, because
519              * this way we don't leave behind extra ALU instrs.
520              */
521 
522             uint32_t wg_size[3] = {b->shader->info.workgroup_size[0],
523                                    b->shader->info.workgroup_size[1],
524                                    b->shader->info.workgroup_size[2]};
525             nir_def *val = try_lower_id_to_index_1d(b, local_index, wg_size);
526             if (val)
527                return val;
528          }
529 
530          nir_def *local_size = nir_load_workgroup_size(b);
531          return lower_id_to_index(b, local_index, local_size, bit_size);
532       }
533       if (options && options->shuffle_local_ids_for_quad_derivatives &&
534           b->shader->info.cs.derivative_group == DERIVATIVE_GROUP_QUADS &&
535           _mesa_set_search(state->lower_once_list, instr) == NULL) {
536          nir_def *ids = nir_load_local_invocation_id(b);
537          _mesa_set_add(state->lower_once_list, ids->parent_instr);
538 
539          nir_def *x = nir_channel(b, ids, 0);
540          nir_def *y = nir_channel(b, ids, 1);
541          nir_def *z = nir_channel(b, ids, 2);
542          unsigned size_x = b->shader->info.workgroup_size[0];
543          nir_def *size_x_imm;
544 
545          if (b->shader->info.workgroup_size_variable)
546             size_x_imm = nir_channel(b, nir_load_workgroup_size(b), 0);
547          else
548             size_x_imm = nir_imm_int(b, size_x);
549 
550          /* Remap indices from:
551           *    | 0| 1| 2| 3|
552           *    | 4| 5| 6| 7|
553           *    | 8| 9|10|11|
554           *    |12|13|14|15|
555           * to:
556           *    | 0| 1| 4| 5|
557           *    | 2| 3| 6| 7|
558           *    | 8| 9|12|13|
559           *    |10|11|14|15|
560           *
561           * That's the layout required by AMD hardware for derivatives to
562           * work. Other hardware may work differently.
563           *
564           * It's a classic tiling pattern that can be implemented by inserting
565           * bit y[0] between bits x[0] and x[1] like this:
566           *
567           *    x[0],y[0],x[1],...x[last],y[1],...,y[last]
568           *
569           * If the width is a power of two, use:
570           *    i = ((x & 1) | ((y & 1) << 1) | ((x & ~1) << 1)) | ((y & ~1) << logbase2(size_x))
571           *
572           * If the width is not a power of two or the local size is variable, use:
573           *    i = ((x & 1) | ((y & 1) << 1) | ((x & ~1) << 1)) + ((y & ~1) * size_x)
574           *
575           * GL_NV_compute_shader_derivatives requires that the width and height
576           * are a multiple of two, which is also a requirement for the second
577           * expression to work.
578           *
579           * The 2D result is: (x,y) = (i % w, i / w)
580           */
581 
582          nir_def *one = nir_imm_int(b, 1);
583          nir_def *inv_one = nir_imm_int(b, ~1);
584          nir_def *x_bit0 = nir_iand(b, x, one);
585          nir_def *y_bit0 = nir_iand(b, y, one);
586          nir_def *x_bits_1n = nir_iand(b, x, inv_one);
587          nir_def *y_bits_1n = nir_iand(b, y, inv_one);
588          nir_def *bits_01 = nir_ior(b, x_bit0, nir_ishl(b, y_bit0, one));
589          nir_def *bits_01x = nir_ior(b, bits_01,
590                                      nir_ishl(b, x_bits_1n, one));
591          nir_def *i;
592 
593          if (!b->shader->info.workgroup_size_variable &&
594              util_is_power_of_two_nonzero(size_x)) {
595             nir_def *log2_size_x = nir_imm_int(b, util_logbase2(size_x));
596             i = nir_ior(b, bits_01x, nir_ishl(b, y_bits_1n, log2_size_x));
597          } else {
598             i = nir_iadd(b, bits_01x, nir_imul(b, y_bits_1n, size_x_imm));
599          }
600 
601          /* This should be fast if size_x is an immediate or even a power
602           * of two.
603           */
604          x = nir_umod(b, i, size_x_imm);
605          y = nir_udiv(b, i, size_x_imm);
606 
607          return nir_vec3(b, x, y, z);
608       }
609 
610       /* If a workgroup size dimension is 1, then the local invocation id must be zero. */
611       nir_component_mask_t is_zero = 0;
612       is_zero |= b->shader->info.workgroup_size[0] == 1 ? 0x1 : 0x0;
613       is_zero |= b->shader->info.workgroup_size[1] == 1 ? 0x2 : 0x0;
614       is_zero |= b->shader->info.workgroup_size[2] == 1 ? 0x4 : 0x0;
615       if (!b->shader->info.workgroup_size_variable && is_zero) {
616          nir_scalar defs[3];
617          for (unsigned i = 0; i < 3; i++) {
618             defs[i] = is_zero & (1 << i) ? nir_get_scalar(nir_imm_zero(b, 1, 32), 0) : nir_get_scalar(&intrin->def, i);
619          }
620          return nir_vec_scalars(b, defs, 3);
621       }
622 
623       return NULL;
624 
625    case nir_intrinsic_load_local_invocation_index:
626       /* If lower_cs_local_index_to_id is true, then we replace
627        * local_invocation_index with a formula based on local_invocation_id.
628        */
629       if (b->shader->options->lower_cs_local_index_to_id ||
630           (options && options->lower_local_invocation_index)) {
631          /* From the GLSL man page for gl_LocalInvocationIndex:
632           *
633           *    "The value of gl_LocalInvocationIndex is equal to
634           *    gl_LocalInvocationID.z * gl_WorkGroupSize.x *
635           *    gl_WorkGroupSize.y + gl_LocalInvocationID.y *
636           *    gl_WorkGroupSize.x + gl_LocalInvocationID.x"
637           */
638          nir_def *local_id = nir_load_local_invocation_id(b);
639          nir_def *local_size = nir_load_workgroup_size(b);
640          nir_def *size_x = nir_channel(b, local_size, 0);
641          nir_def *size_y = nir_channel(b, local_size, 1);
642 
643          /* Because no hardware supports a local workgroup size greater than
644           * about 1K, this calculation can be done in 32-bit and can save some
645           * 64-bit arithmetic.
646           */
647          nir_def *index;
648          index = nir_imul(b, nir_channel(b, local_id, 2),
649                           nir_imul(b, size_x, size_y));
650          index = nir_iadd(b, index,
651                           nir_imul(b, nir_channel(b, local_id, 1), size_x));
652          index = nir_iadd(b, index, nir_channel(b, local_id, 0));
653          return nir_u2uN(b, index, bit_size);
654       } else {
655          return NULL;
656       }
657 
658    case nir_intrinsic_load_workgroup_size:
659       if (b->shader->info.workgroup_size_variable) {
660          /* If the local work group size is variable it can't be lowered at
661           * this point.  We do, however, have to make sure that the intrinsic
662           * is only 32-bit.
663           */
664          return NULL;
665       } else {
666          /* using a 32 bit constant is safe here as no device/driver needs more
667           * than 32 bits for the local size */
668          nir_const_value workgroup_size_const[3];
669          memset(workgroup_size_const, 0, sizeof(workgroup_size_const));
670          workgroup_size_const[0].u32 = b->shader->info.workgroup_size[0];
671          workgroup_size_const[1].u32 = b->shader->info.workgroup_size[1];
672          workgroup_size_const[2].u32 = b->shader->info.workgroup_size[2];
673          return nir_u2uN(b, nir_build_imm(b, 3, 32, workgroup_size_const), bit_size);
674       }
675 
676    case nir_intrinsic_load_global_invocation_id_zero_base: {
677       if ((options && options->has_base_workgroup_id) ||
678           !b->shader->options->has_cs_global_id) {
679          nir_def *group_size = nir_load_workgroup_size(b);
680          nir_def *group_id = nir_load_workgroup_id(b);
681          nir_def *local_id = nir_load_local_invocation_id(b);
682 
683          return nir_iadd(b, nir_imul(b, nir_u2uN(b, group_id, bit_size),
684                          nir_u2uN(b, group_size, bit_size)),
685                          nir_u2uN(b, local_id, bit_size));
686       } else {
687          return NULL;
688       }
689    }
690 
691    case nir_intrinsic_load_global_invocation_id: {
692       if (options && options->has_base_global_invocation_id)
693          return nir_iadd(b, nir_load_global_invocation_id_zero_base(b, bit_size),
694                          nir_load_base_global_invocation_id(b, bit_size));
695       else if ((options && options->has_base_workgroup_id) ||
696                !b->shader->options->has_cs_global_id)
697          return nir_load_global_invocation_id_zero_base(b, bit_size);
698       else
699          return NULL;
700    }
701 
702    case nir_intrinsic_load_global_invocation_index: {
703       /* OpenCL's global_linear_id explicitly removes the global offset before computing this */
704       assert(b->shader->info.stage == MESA_SHADER_KERNEL);
705       nir_def *global_base_id = nir_load_base_global_invocation_id(b, bit_size);
706       nir_def *global_id = nir_isub(b, nir_load_global_invocation_id(b, bit_size), global_base_id);
707       nir_def *global_size = build_global_group_size(b, bit_size);
708 
709       /* index = id.x + ((id.y + (id.z * size.y)) * size.x) */
710       nir_def *index;
711       index = nir_imul(b, nir_channel(b, global_id, 2),
712                        nir_channel(b, global_size, 1));
713       index = nir_iadd(b, nir_channel(b, global_id, 1), index);
714       index = nir_imul(b, nir_channel(b, global_size, 0), index);
715       index = nir_iadd(b, nir_channel(b, global_id, 0), index);
716       return index;
717    }
718 
719    case nir_intrinsic_load_workgroup_id: {
720       if (options && options->has_base_workgroup_id)
721          return nir_iadd(b, nir_u2uN(b, nir_load_workgroup_id_zero_base(b), bit_size),
722                          nir_load_base_workgroup_id(b, bit_size));
723       else if (options && options->lower_workgroup_id_to_index) {
724          nir_def *wg_idx = nir_load_workgroup_index(b);
725 
726          nir_def *val =
727             try_lower_id_to_index_1d(b, wg_idx, options->num_workgroups);
728          if (val)
729             return val;
730 
731          nir_def *num_workgroups = nir_load_num_workgroups(b);
732          return lower_id_to_index_no_umod(b, wg_idx,
733                                           nir_u2uN(b, num_workgroups, bit_size),
734                                           bit_size,
735                                           options->num_workgroups,
736                                           options->shortcut_1d_workgroup_id);
737       }
738 
739       return NULL;
740    }
741 
742    case nir_intrinsic_load_num_workgroups: {
743       if (!options)
744          return NULL;
745 
746       const uint32_t *num_wgs_imm = options->num_workgroups;
747 
748       /* Exit early when none of the num workgroups components are known at
749        * compile time.
750        */
751       if (num_wgs_imm[0] == 0 && num_wgs_imm[1] == 0 && num_wgs_imm[2] == 0)
752          return NULL;
753 
754       b->cursor = nir_after_instr(instr);
755 
756       nir_def *num_wgs = &intrin->def;
757       for (unsigned i = 0; i < 3; ++i) {
758          if (num_wgs_imm[i])
759             num_wgs = nir_vector_insert_imm(b, num_wgs, nir_imm_int(b, num_wgs_imm[i]), i);
760       }
761 
762       return num_wgs;
763    }
764 
765    case nir_intrinsic_load_shader_index:
766       return nir_imm_int(b, b->shader->info.cs.shader_index);
767 
768    default:
769       return NULL;
770    }
771 }
772 
773 bool
nir_lower_compute_system_values(nir_shader * shader,const nir_lower_compute_system_values_options * options)774 nir_lower_compute_system_values(nir_shader *shader,
775                                 const nir_lower_compute_system_values_options *options)
776 {
777    if (!gl_shader_stage_uses_workgroup(shader->info.stage))
778       return false;
779 
780    struct lower_sysval_state state;
781    state.options = options;
782    state.lower_once_list = _mesa_pointer_set_create(NULL);
783 
784    bool progress =
785       nir_shader_lower_instructions(shader,
786                                     lower_compute_system_value_filter,
787                                     lower_compute_system_value_instr,
788                                     (void *)&state);
789    ralloc_free(state.lower_once_list);
790 
791    /* Update this so as not to lower it again. */
792    if (options && options->shuffle_local_ids_for_quad_derivatives &&
793        shader->info.cs.derivative_group == DERIVATIVE_GROUP_QUADS)
794       shader->info.cs.derivative_group = DERIVATIVE_GROUP_LINEAR;
795 
796    return progress;
797 }
798