• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Connor Abbott (cwabbott0@gmail.com)
25  *
26  */
27 
28 #include "nir.h"
29 #include "nir_builder.h"
30 
31 static nir_ssa_def *
sanitize_32bit_sysval(nir_builder * b,nir_intrinsic_instr * intrin)32 sanitize_32bit_sysval(nir_builder *b, nir_intrinsic_instr *intrin)
33 {
34    assert(intrin->dest.is_ssa);
35    const unsigned bit_size = intrin->dest.ssa.bit_size;
36    if (bit_size == 32)
37       return NULL;
38 
39    intrin->dest.ssa.bit_size = 32;
40    return nir_u2u(b, &intrin->dest.ssa, bit_size);
41 }
42 
43 static nir_ssa_def*
build_global_group_size(nir_builder * b,unsigned bit_size)44 build_global_group_size(nir_builder *b, unsigned bit_size)
45 {
46    nir_ssa_def *group_size = nir_load_local_group_size(b);
47    nir_ssa_def *num_work_groups = nir_load_num_work_groups(b, bit_size);
48    return nir_imul(b, nir_u2u(b, group_size, bit_size),
49                       num_work_groups);
50 }
51 
52 static bool
lower_system_value_filter(const nir_instr * instr,const void * _state)53 lower_system_value_filter(const nir_instr *instr, const void *_state)
54 {
55    return instr->type == nir_instr_type_intrinsic;
56 }
57 
58 static nir_ssa_def *
lower_system_value_instr(nir_builder * b,nir_instr * instr,void * _state)59 lower_system_value_instr(nir_builder *b, nir_instr *instr, void *_state)
60 {
61    nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
62 
63    /* All the intrinsics we care about are loads */
64    if (!nir_intrinsic_infos[intrin->intrinsic].has_dest)
65       return NULL;
66 
67    assert(intrin->dest.is_ssa);
68    const unsigned bit_size = intrin->dest.ssa.bit_size;
69 
70    switch (intrin->intrinsic) {
71    case nir_intrinsic_load_vertex_id:
72       if (b->shader->options->vertex_id_zero_based) {
73          return nir_iadd(b, nir_load_vertex_id_zero_base(b),
74                             nir_load_first_vertex(b));
75       } else {
76          return NULL;
77       }
78 
79    case nir_intrinsic_load_base_vertex:
80       /**
81        * From the OpenGL 4.6 (11.1.3.9 Shader Inputs) specification:
82        *
83        * "gl_BaseVertex holds the integer value passed to the baseVertex
84        * parameter to the command that resulted in the current shader
85        * invocation. In the case where the command has no baseVertex
86        * parameter, the value of gl_BaseVertex is zero."
87        */
88       if (b->shader->options->lower_base_vertex) {
89          return nir_iand(b, nir_load_is_indexed_draw(b),
90                             nir_load_first_vertex(b));
91       } else {
92          return NULL;
93       }
94 
95    case nir_intrinsic_load_helper_invocation:
96       if (b->shader->options->lower_helper_invocation) {
97          nir_ssa_def *tmp;
98          tmp = nir_ishl(b, nir_imm_int(b, 1),
99                            nir_load_sample_id_no_per_sample(b));
100          tmp = nir_iand(b, nir_load_sample_mask_in(b), tmp);
101          return nir_inot(b, nir_i2b(b, tmp));
102       } else {
103          return NULL;
104       }
105 
106    case nir_intrinsic_load_local_invocation_id:
107    case nir_intrinsic_load_local_invocation_index:
108    case nir_intrinsic_load_local_group_size:
109       return sanitize_32bit_sysval(b, intrin);
110 
111    case nir_intrinsic_load_deref: {
112       nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
113       if (!nir_deref_mode_is(deref, nir_var_system_value))
114          return NULL;
115 
116       nir_ssa_def *column = NULL;
117       if (deref->deref_type != nir_deref_type_var) {
118          /* The only one system values that aren't plane variables are
119           * gl_SampleMask which is always an array of one element and a
120           * couple of ray-tracing intrinsics which are matrices.
121           */
122          assert(deref->deref_type == nir_deref_type_array);
123          assert(deref->arr.index.is_ssa);
124          column = deref->arr.index.ssa;
125          deref = nir_deref_instr_parent(deref);
126          assert(deref->deref_type == nir_deref_type_var);
127          assert(deref->var->data.location == SYSTEM_VALUE_SAMPLE_MASK_IN ||
128                 deref->var->data.location == SYSTEM_VALUE_RAY_OBJECT_TO_WORLD ||
129                 deref->var->data.location == SYSTEM_VALUE_RAY_WORLD_TO_OBJECT);
130       }
131       nir_variable *var = deref->var;
132 
133       switch (var->data.location) {
134       case SYSTEM_VALUE_INSTANCE_INDEX:
135          return nir_iadd(b, nir_load_instance_id(b),
136                             nir_load_base_instance(b));
137 
138       case SYSTEM_VALUE_SUBGROUP_EQ_MASK:
139       case SYSTEM_VALUE_SUBGROUP_GE_MASK:
140       case SYSTEM_VALUE_SUBGROUP_GT_MASK:
141       case SYSTEM_VALUE_SUBGROUP_LE_MASK:
142       case SYSTEM_VALUE_SUBGROUP_LT_MASK: {
143          nir_intrinsic_op op =
144             nir_intrinsic_from_system_value(var->data.location);
145          nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, op);
146          nir_ssa_dest_init_for_type(&load->instr, &load->dest,
147                                     var->type, NULL);
148          load->num_components = load->dest.ssa.num_components;
149          nir_builder_instr_insert(b, &load->instr);
150          return &load->dest.ssa;
151       }
152 
153       case SYSTEM_VALUE_DEVICE_INDEX:
154          if (b->shader->options->lower_device_index_to_zero)
155             return nir_imm_int(b, 0);
156          break;
157 
158       case SYSTEM_VALUE_GLOBAL_GROUP_SIZE:
159          return build_global_group_size(b, bit_size);
160 
161       case SYSTEM_VALUE_BARYCENTRIC_LINEAR_PIXEL:
162          return nir_load_barycentric(b, nir_intrinsic_load_barycentric_pixel,
163                                      INTERP_MODE_NOPERSPECTIVE);
164 
165       case SYSTEM_VALUE_BARYCENTRIC_LINEAR_CENTROID:
166          return nir_load_barycentric(b, nir_intrinsic_load_barycentric_centroid,
167                                      INTERP_MODE_NOPERSPECTIVE);
168 
169       case SYSTEM_VALUE_BARYCENTRIC_LINEAR_SAMPLE:
170          return nir_load_barycentric(b, nir_intrinsic_load_barycentric_sample,
171                                      INTERP_MODE_NOPERSPECTIVE);
172 
173       case SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL:
174          return nir_load_barycentric(b, nir_intrinsic_load_barycentric_pixel,
175                                      INTERP_MODE_SMOOTH);
176 
177       case SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTROID:
178          return nir_load_barycentric(b, nir_intrinsic_load_barycentric_centroid,
179                                      INTERP_MODE_SMOOTH);
180 
181       case SYSTEM_VALUE_BARYCENTRIC_PERSP_SAMPLE:
182          return nir_load_barycentric(b, nir_intrinsic_load_barycentric_sample,
183                                      INTERP_MODE_SMOOTH);
184 
185       case SYSTEM_VALUE_BARYCENTRIC_PULL_MODEL:
186          return nir_load_barycentric(b, nir_intrinsic_load_barycentric_model,
187                                      INTERP_MODE_NONE);
188 
189       default:
190          break;
191       }
192 
193       nir_intrinsic_op sysval_op =
194          nir_intrinsic_from_system_value(var->data.location);
195       if (glsl_type_is_matrix(var->type)) {
196          assert(nir_intrinsic_infos[sysval_op].index_map[NIR_INTRINSIC_COLUMN] > 0);
197          unsigned num_cols = glsl_get_matrix_columns(var->type);
198          ASSERTED unsigned num_rows = glsl_get_vector_elements(var->type);
199          assert(num_rows == intrin->dest.ssa.num_components);
200 
201          nir_ssa_def *cols[4];
202          for (unsigned i = 0; i < num_cols; i++) {
203             cols[i] = nir_load_system_value(b, sysval_op, i,
204                                             intrin->dest.ssa.num_components,
205                                             intrin->dest.ssa.bit_size);
206             assert(cols[i]->num_components == num_rows);
207          }
208          return nir_select_from_ssa_def_array(b, cols, num_cols, column);
209       } else {
210          return nir_load_system_value(b, sysval_op, 0,
211                                       intrin->dest.ssa.num_components,
212                                       intrin->dest.ssa.bit_size);
213       }
214    }
215 
216    default:
217       return NULL;
218    }
219 }
220 
221 bool
nir_lower_system_values(nir_shader * shader)222 nir_lower_system_values(nir_shader *shader)
223 {
224    bool progress = nir_shader_lower_instructions(shader,
225                                                  lower_system_value_filter,
226                                                  lower_system_value_instr,
227                                                  NULL);
228 
229    /* We're going to delete the variables so we need to clean up all those
230     * derefs we left lying around.
231     */
232    if (progress)
233       nir_remove_dead_derefs(shader);
234 
235    nir_foreach_variable_with_modes_safe(var, shader, nir_var_system_value)
236       exec_node_remove(&var->node);
237 
238    return progress;
239 }
240 
241 static bool
lower_compute_system_value_filter(const nir_instr * instr,const void * _options)242 lower_compute_system_value_filter(const nir_instr *instr, const void *_options)
243 {
244    return instr->type == nir_instr_type_intrinsic;
245 }
246 
247 static nir_ssa_def *
lower_compute_system_value_instr(nir_builder * b,nir_instr * instr,void * _options)248 lower_compute_system_value_instr(nir_builder *b,
249                                  nir_instr *instr, void *_options)
250 {
251    nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
252    const nir_lower_compute_system_values_options *options = _options;
253 
254    /* All the intrinsics we care about are loads */
255    if (!nir_intrinsic_infos[intrin->intrinsic].has_dest)
256       return NULL;
257 
258    assert(intrin->dest.is_ssa);
259    const unsigned bit_size = intrin->dest.ssa.bit_size;
260 
261    switch (intrin->intrinsic) {
262    case nir_intrinsic_load_local_invocation_id:
263       /* If lower_cs_local_id_from_index is true, then we derive the local
264        * index from the local id.
265        */
266       if (b->shader->options->lower_cs_local_id_from_index) {
267          /* We lower gl_LocalInvocationID from gl_LocalInvocationIndex based
268           * on this formula:
269           *
270           *    gl_LocalInvocationID.x =
271           *       gl_LocalInvocationIndex % gl_WorkGroupSize.x;
272           *    gl_LocalInvocationID.y =
273           *       (gl_LocalInvocationIndex / gl_WorkGroupSize.x) %
274           *       gl_WorkGroupSize.y;
275           *    gl_LocalInvocationID.z =
276           *       (gl_LocalInvocationIndex /
277           *        (gl_WorkGroupSize.x * gl_WorkGroupSize.y)) %
278           *       gl_WorkGroupSize.z;
279           *
280           * However, the final % gl_WorkGroupSize.z does nothing unless we
281           * accidentally end up with a gl_LocalInvocationIndex that is too
282           * large so it can safely be omitted.
283           */
284          nir_ssa_def *local_index = nir_load_local_invocation_index(b);
285          nir_ssa_def *local_size = nir_load_local_group_size(b);
286 
287          /* Because no hardware supports a local workgroup size greater than
288           * about 1K, this calculation can be done in 32-bit and can save some
289           * 64-bit arithmetic.
290           */
291          nir_ssa_def *id_x, *id_y, *id_z;
292          id_x = nir_umod(b, local_index,
293                             nir_channel(b, local_size, 0));
294          id_y = nir_umod(b, nir_udiv(b, local_index,
295                                         nir_channel(b, local_size, 0)),
296                             nir_channel(b, local_size, 1));
297          id_z = nir_udiv(b, local_index,
298                             nir_imul(b, nir_channel(b, local_size, 0),
299                                         nir_channel(b, local_size, 1)));
300          return nir_u2u(b, nir_vec3(b, id_x, id_y, id_z), bit_size);
301       } else {
302          return NULL;
303       }
304 
305    case nir_intrinsic_load_local_invocation_index:
306       /* If lower_cs_local_index_from_id is true, then we derive the local
307        * index from the local id.
308        */
309       if (b->shader->options->lower_cs_local_index_from_id) {
310          /* From the GLSL man page for gl_LocalInvocationIndex:
311           *
312           *    "The value of gl_LocalInvocationIndex is equal to
313           *    gl_LocalInvocationID.z * gl_WorkGroupSize.x *
314           *    gl_WorkGroupSize.y + gl_LocalInvocationID.y *
315           *    gl_WorkGroupSize.x + gl_LocalInvocationID.x"
316           */
317          nir_ssa_def *local_id = nir_load_local_invocation_id(b);
318 
319          nir_ssa_def *size_x =
320             nir_imm_int(b, b->shader->info.cs.local_size[0]);
321          nir_ssa_def *size_y =
322             nir_imm_int(b, b->shader->info.cs.local_size[1]);
323 
324          /* Because no hardware supports a local workgroup size greater than
325           * about 1K, this calculation can be done in 32-bit and can save some
326           * 64-bit arithmetic.
327           */
328          nir_ssa_def *index;
329          index = nir_imul(b, nir_channel(b, local_id, 2),
330                              nir_imul(b, size_x, size_y));
331          index = nir_iadd(b, index,
332                              nir_imul(b, nir_channel(b, local_id, 1), size_x));
333          index = nir_iadd(b, index, nir_channel(b, local_id, 0));
334          return nir_u2u(b, index, bit_size);
335       } else {
336          return NULL;
337       }
338 
339    case nir_intrinsic_load_local_group_size:
340       if (b->shader->info.cs.local_size_variable) {
341          /* If the local work group size is variable it can't be lowered at
342           * this point.  We do, however, have to make sure that the intrinsic
343           * is only 32-bit.
344           */
345          return NULL;
346       } else {
347          /* using a 32 bit constant is safe here as no device/driver needs more
348           * than 32 bits for the local size */
349          nir_const_value local_size_const[3];
350          memset(local_size_const, 0, sizeof(local_size_const));
351          local_size_const[0].u32 = b->shader->info.cs.local_size[0];
352          local_size_const[1].u32 = b->shader->info.cs.local_size[1];
353          local_size_const[2].u32 = b->shader->info.cs.local_size[2];
354          return nir_u2u(b, nir_build_imm(b, 3, 32, local_size_const), bit_size);
355       }
356 
357    case nir_intrinsic_load_global_invocation_id_zero_base: {
358       if ((options && options->has_base_work_group_id) ||
359           !b->shader->options->has_cs_global_id) {
360          nir_ssa_def *group_size = nir_load_local_group_size(b);
361          nir_ssa_def *group_id = nir_load_work_group_id(b, bit_size);
362          nir_ssa_def *local_id = nir_load_local_invocation_id(b);
363 
364          return nir_iadd(b, nir_imul(b, group_id,
365                                         nir_u2u(b, group_size, bit_size)),
366                             nir_u2u(b, local_id, bit_size));
367       } else {
368          return NULL;
369       }
370    }
371 
372    case nir_intrinsic_load_global_invocation_id: {
373       if (options && options->has_base_global_invocation_id)
374          return nir_iadd(b, nir_load_global_invocation_id_zero_base(b, bit_size),
375                             nir_load_base_global_invocation_id(b, bit_size));
376       else if ((options && options->has_base_work_group_id) ||
377                !b->shader->options->has_cs_global_id)
378          return nir_load_global_invocation_id_zero_base(b, bit_size);
379       else
380          return NULL;
381    }
382 
383    case nir_intrinsic_load_global_invocation_index: {
384       /* OpenCL's global_linear_id explicitly removes the global offset before computing this */
385       assert(b->shader->info.stage == MESA_SHADER_KERNEL);
386       nir_ssa_def *global_base_id = nir_load_base_global_invocation_id(b, bit_size);
387       nir_ssa_def *global_id = nir_isub(b, nir_load_global_invocation_id(b, bit_size), global_base_id);
388       nir_ssa_def *global_size = build_global_group_size(b, bit_size);
389 
390       /* index = id.x + ((id.y + (id.z * size.y)) * size.x) */
391       nir_ssa_def *index;
392       index = nir_imul(b, nir_channel(b, global_id, 2),
393                           nir_channel(b, global_size, 1));
394       index = nir_iadd(b, nir_channel(b, global_id, 1), index);
395       index = nir_imul(b, nir_channel(b, global_size, 0), index);
396       index = nir_iadd(b, nir_channel(b, global_id, 0), index);
397       return index;
398    }
399 
400    case nir_intrinsic_load_work_group_id: {
401       if (options && options->has_base_work_group_id)
402          return nir_iadd(b, nir_u2u(b, nir_load_work_group_id_zero_base(b), bit_size),
403                             nir_load_base_work_group_id(b, bit_size));
404       else
405          return NULL;
406    }
407 
408    default:
409       return NULL;
410    }
411 }
412 
413 bool
nir_lower_compute_system_values(nir_shader * shader,const nir_lower_compute_system_values_options * options)414 nir_lower_compute_system_values(nir_shader *shader,
415                                 const nir_lower_compute_system_values_options *options)
416 {
417    if (shader->info.stage != MESA_SHADER_COMPUTE &&
418        shader->info.stage != MESA_SHADER_KERNEL)
419       return false;
420 
421    return nir_shader_lower_instructions(shader,
422                                         lower_compute_system_value_filter,
423                                         lower_compute_system_value_instr,
424                                         (void*)options);
425 }
426