• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "main/mtypes.h"
25 #include "nir.h"
26 
27 static void
set_io_mask(nir_shader * shader,nir_variable * var,int offset,int len)28 set_io_mask(nir_shader *shader, nir_variable *var, int offset, int len)
29 {
30    for (int i = 0; i < len; i++) {
31       assert(var->data.location != -1);
32 
33       int idx = var->data.location + offset + i;
34       bool is_patch_generic = var->data.patch &&
35                               idx != VARYING_SLOT_TESS_LEVEL_INNER &&
36                               idx != VARYING_SLOT_TESS_LEVEL_OUTER &&
37                               idx != VARYING_SLOT_BOUNDING_BOX0 &&
38                               idx != VARYING_SLOT_BOUNDING_BOX1;
39       uint64_t bitfield;
40 
41       if (is_patch_generic) {
42          assert(idx >= VARYING_SLOT_PATCH0 && idx < VARYING_SLOT_TESS_MAX);
43          bitfield = BITFIELD64_BIT(idx - VARYING_SLOT_PATCH0);
44       }
45       else {
46          assert(idx < VARYING_SLOT_MAX);
47          bitfield = BITFIELD64_BIT(idx);
48       }
49 
50       if (var->data.mode == nir_var_shader_in) {
51          if (is_patch_generic)
52             shader->info->patch_inputs_read |= bitfield;
53          else
54             shader->info->inputs_read |= bitfield;
55 
56          if (shader->stage == MESA_SHADER_FRAGMENT) {
57             shader->info->fs.uses_sample_qualifier |= var->data.sample;
58          }
59       } else {
60          assert(var->data.mode == nir_var_shader_out);
61          if (is_patch_generic) {
62             shader->info->patch_outputs_written |= bitfield;
63          } else if (!var->data.read_only) {
64             shader->info->outputs_written |= bitfield;
65          }
66 
67          if (var->data.fb_fetch_output)
68             shader->info->outputs_read |= bitfield;
69       }
70    }
71 }
72 
73 /**
74  * Mark an entire variable as used.  Caller must ensure that the variable
75  * represents a shader input or output.
76  */
77 static void
mark_whole_variable(nir_shader * shader,nir_variable * var)78 mark_whole_variable(nir_shader *shader, nir_variable *var)
79 {
80    const struct glsl_type *type = var->type;
81 
82    if (nir_is_per_vertex_io(var, shader->stage)) {
83       assert(glsl_type_is_array(type));
84       type = glsl_get_array_element(type);
85    }
86 
87    const unsigned slots =
88       var->data.compact ? DIV_ROUND_UP(glsl_get_length(type), 4)
89                         : glsl_count_attribute_slots(type, false);
90 
91    set_io_mask(shader, var, 0, slots);
92 }
93 
94 static unsigned
get_io_offset(nir_deref_var * deref)95 get_io_offset(nir_deref_var *deref)
96 {
97    unsigned offset = 0;
98 
99    nir_deref *tail = &deref->deref;
100    while (tail->child != NULL) {
101       tail = tail->child;
102 
103       if (tail->deref_type == nir_deref_type_array) {
104          nir_deref_array *deref_array = nir_deref_as_array(tail);
105 
106          if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
107             return -1;
108          }
109 
110          offset += glsl_count_attribute_slots(tail->type, false) *
111             deref_array->base_offset;
112       }
113       /* TODO: we can get the offset for structs here see nir_lower_io() */
114    }
115 
116    return offset;
117 }
118 
119 /**
120  * Try to mark a portion of the given varying as used.  Caller must ensure
121  * that the variable represents a shader input or output.
122  *
123  * If the index can't be interpreted as a constant, or some other problem
124  * occurs, then nothing will be marked and false will be returned.
125  */
126 static bool
try_mask_partial_io(nir_shader * shader,nir_deref_var * deref)127 try_mask_partial_io(nir_shader *shader, nir_deref_var *deref)
128 {
129    nir_variable *var = deref->var;
130    const struct glsl_type *type = var->type;
131 
132    if (nir_is_per_vertex_io(var, shader->stage)) {
133       assert(glsl_type_is_array(type));
134       type = glsl_get_array_element(type);
135    }
136 
137    /* The code below only handles:
138     *
139     * - Indexing into matrices
140     * - Indexing into arrays of (arrays, matrices, vectors, or scalars)
141     *
142     * For now, we just give up if we see varying structs and arrays of structs
143     * here marking the entire variable as used.
144     */
145    if (!(glsl_type_is_matrix(type) ||
146          (glsl_type_is_array(type) && !var->data.compact &&
147           (glsl_type_is_numeric(glsl_without_array(type)) ||
148            glsl_type_is_boolean(glsl_without_array(type)))))) {
149 
150       /* If we don't know how to handle this case, give up and let the
151        * caller mark the whole variable as used.
152        */
153       return false;
154    }
155 
156    unsigned offset = get_io_offset(deref);
157    if (offset == -1)
158       return false;
159 
160    unsigned num_elems;
161    unsigned elem_width = 1;
162    unsigned mat_cols = 1;
163    if (glsl_type_is_array(type)) {
164       num_elems = glsl_get_aoa_size(type);
165       if (glsl_type_is_matrix(glsl_without_array(type)))
166          mat_cols = glsl_get_matrix_columns(glsl_without_array(type));
167    } else {
168       num_elems = glsl_get_matrix_columns(type);
169    }
170 
171    /* double element width for double types that takes two slots */
172    if (glsl_type_is_dual_slot(glsl_without_array(type))) {
173       elem_width *= 2;
174    }
175 
176    if (offset >= num_elems * elem_width * mat_cols) {
177       /* Constant index outside the bounds of the matrix/array.  This could
178        * arise as a result of constant folding of a legal GLSL program.
179        *
180        * Even though the spec says that indexing outside the bounds of a
181        * matrix/array results in undefined behaviour, we don't want to pass
182        * out-of-range values to set_io_mask() (since this could result in
183        * slots that don't exist being marked as used), so just let the caller
184        * mark the whole variable as used.
185        */
186       return false;
187    }
188 
189    set_io_mask(shader, var, offset, elem_width);
190    return true;
191 }
192 
193 static void
gather_intrinsic_info(nir_intrinsic_instr * instr,nir_shader * shader)194 gather_intrinsic_info(nir_intrinsic_instr *instr, nir_shader *shader)
195 {
196    switch (instr->intrinsic) {
197    case nir_intrinsic_discard:
198    case nir_intrinsic_discard_if:
199       assert(shader->stage == MESA_SHADER_FRAGMENT);
200       shader->info->fs.uses_discard = true;
201       break;
202 
203    case nir_intrinsic_interp_var_at_centroid:
204    case nir_intrinsic_interp_var_at_sample:
205    case nir_intrinsic_interp_var_at_offset:
206    case nir_intrinsic_load_var:
207    case nir_intrinsic_store_var: {
208       nir_variable *var = instr->variables[0]->var;
209 
210       if (var->data.mode == nir_var_shader_in ||
211           var->data.mode == nir_var_shader_out) {
212          if (!try_mask_partial_io(shader, instr->variables[0]))
213             mark_whole_variable(shader, var);
214 
215          /* We need to track which input_reads bits correspond to a
216           * dvec3/dvec4 input attribute */
217          if (shader->stage == MESA_SHADER_VERTEX &&
218              var->data.mode == nir_var_shader_in &&
219              glsl_type_is_dual_slot(glsl_without_array(var->type))) {
220             for (uint i = 0; i < glsl_count_attribute_slots(var->type, false); i++) {
221                int idx = var->data.location + i;
222                shader->info->double_inputs_read |= BITFIELD64_BIT(idx);
223             }
224          }
225       }
226       break;
227    }
228 
229    case nir_intrinsic_load_draw_id:
230    case nir_intrinsic_load_front_face:
231    case nir_intrinsic_load_vertex_id:
232    case nir_intrinsic_load_vertex_id_zero_base:
233    case nir_intrinsic_load_base_vertex:
234    case nir_intrinsic_load_base_instance:
235    case nir_intrinsic_load_instance_id:
236    case nir_intrinsic_load_sample_id:
237    case nir_intrinsic_load_sample_pos:
238    case nir_intrinsic_load_sample_mask_in:
239    case nir_intrinsic_load_primitive_id:
240    case nir_intrinsic_load_invocation_id:
241    case nir_intrinsic_load_local_invocation_id:
242    case nir_intrinsic_load_local_invocation_index:
243    case nir_intrinsic_load_work_group_id:
244    case nir_intrinsic_load_num_work_groups:
245    case nir_intrinsic_load_tess_coord:
246    case nir_intrinsic_load_tess_level_outer:
247    case nir_intrinsic_load_tess_level_inner:
248       shader->info->system_values_read |=
249          (1 << nir_system_value_from_intrinsic(instr->intrinsic));
250       break;
251 
252    case nir_intrinsic_end_primitive:
253    case nir_intrinsic_end_primitive_with_counter:
254       assert(shader->stage == MESA_SHADER_GEOMETRY);
255       shader->info->gs.uses_end_primitive = 1;
256       break;
257 
258    default:
259       break;
260    }
261 }
262 
263 static void
gather_tex_info(nir_tex_instr * instr,nir_shader * shader)264 gather_tex_info(nir_tex_instr *instr, nir_shader *shader)
265 {
266    if (instr->op == nir_texop_tg4)
267       shader->info->uses_texture_gather = true;
268 }
269 
270 static void
gather_info_block(nir_block * block,nir_shader * shader)271 gather_info_block(nir_block *block, nir_shader *shader)
272 {
273    nir_foreach_instr(instr, block) {
274       switch (instr->type) {
275       case nir_instr_type_intrinsic:
276          gather_intrinsic_info(nir_instr_as_intrinsic(instr), shader);
277          break;
278       case nir_instr_type_tex:
279          gather_tex_info(nir_instr_as_tex(instr), shader);
280          break;
281       case nir_instr_type_call:
282          assert(!"nir_shader_gather_info only works if functions are inlined");
283          break;
284       default:
285          break;
286       }
287    }
288 }
289 
290 void
nir_shader_gather_info(nir_shader * shader,nir_function_impl * entrypoint)291 nir_shader_gather_info(nir_shader *shader, nir_function_impl *entrypoint)
292 {
293    shader->info->num_textures = 0;
294    shader->info->num_images = 0;
295    nir_foreach_variable(var, &shader->uniforms) {
296       const struct glsl_type *type = var->type;
297       unsigned count = 1;
298       if (glsl_type_is_array(type)) {
299          count = glsl_get_aoa_size(type);
300          type = glsl_without_array(type);
301       }
302 
303       if (glsl_type_is_image(type)) {
304          shader->info->num_images += count;
305       } else if (glsl_type_is_sampler(type)) {
306          shader->info->num_textures += count;
307       }
308    }
309 
310    shader->info->inputs_read = 0;
311    shader->info->outputs_written = 0;
312    shader->info->outputs_read = 0;
313    shader->info->double_inputs_read = 0;
314    shader->info->patch_inputs_read = 0;
315    shader->info->patch_outputs_written = 0;
316    shader->info->system_values_read = 0;
317    if (shader->stage == MESA_SHADER_FRAGMENT) {
318       shader->info->fs.uses_sample_qualifier = false;
319    }
320    nir_foreach_block(block, entrypoint) {
321       gather_info_block(block, shader);
322    }
323 }
324