• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "nir.h"
25 #include "util/set.h"
26 #include "util/hash_table.h"
27 
28 /* This file contains various little helpers for doing simple linking in
29  * NIR.  Eventually, we'll probably want a full-blown varying packing
30  * implementation in here.  Right now, it just deletes unused things.
31  */
32 
33 /**
34  * Returns the bits in the inputs_read, outputs_written, or
35  * system_values_read bitfield corresponding to this variable.
36  */
37 static uint64_t
get_variable_io_mask(nir_variable * var,gl_shader_stage stage)38 get_variable_io_mask(nir_variable *var, gl_shader_stage stage)
39 {
40    if (var->data.location < 0)
41       return 0;
42 
43    unsigned location = var->data.patch ?
44       var->data.location - VARYING_SLOT_PATCH0 : var->data.location;
45 
46    assert(var->data.mode == nir_var_shader_in ||
47           var->data.mode == nir_var_shader_out ||
48           var->data.mode == nir_var_system_value);
49    assert(var->data.location >= 0);
50 
51    const struct glsl_type *type = var->type;
52    if (nir_is_per_vertex_io(var, stage)) {
53       assert(glsl_type_is_array(type));
54       type = glsl_get_array_element(type);
55    }
56 
57    unsigned slots = glsl_count_attribute_slots(type, false);
58    return ((1ull << slots) - 1) << location;
59 }
60 
61 static void
tcs_add_output_reads(nir_shader * shader,uint64_t * read,uint64_t * patches_read)62 tcs_add_output_reads(nir_shader *shader, uint64_t *read, uint64_t *patches_read)
63 {
64    nir_foreach_function(function, shader) {
65       if (function->impl) {
66          nir_foreach_block(block, function->impl) {
67             nir_foreach_instr(instr, block) {
68                if (instr->type != nir_instr_type_intrinsic)
69                   continue;
70 
71                nir_intrinsic_instr *intrin_instr =
72                   nir_instr_as_intrinsic(instr);
73                if (intrin_instr->intrinsic == nir_intrinsic_load_var &&
74                    intrin_instr->variables[0]->var->data.mode ==
75                    nir_var_shader_out) {
76 
77                   nir_variable *var = intrin_instr->variables[0]->var;
78                   if (var->data.patch) {
79                      patches_read[var->data.location_frac] |=
80                         get_variable_io_mask(intrin_instr->variables[0]->var,
81                                              shader->info.stage);
82                   } else {
83                      read[var->data.location_frac] |=
84                         get_variable_io_mask(intrin_instr->variables[0]->var,
85                                              shader->info.stage);
86                   }
87                }
88             }
89          }
90       }
91    }
92 }
93 
94 static bool
remove_unused_io_vars(nir_shader * shader,struct exec_list * var_list,uint64_t * used_by_other_stage,uint64_t * used_by_other_stage_patches)95 remove_unused_io_vars(nir_shader *shader, struct exec_list *var_list,
96                       uint64_t *used_by_other_stage,
97                       uint64_t *used_by_other_stage_patches)
98 {
99    bool progress = false;
100    uint64_t *used;
101 
102    nir_foreach_variable_safe(var, var_list) {
103       if (var->data.patch)
104          used = used_by_other_stage_patches;
105       else
106          used = used_by_other_stage;
107 
108       if (var->data.location < VARYING_SLOT_VAR0 && var->data.location >= 0)
109          continue;
110 
111       if (var->data.always_active_io)
112          continue;
113 
114       uint64_t other_stage = used[var->data.location_frac];
115 
116       if (!(other_stage & get_variable_io_mask(var, shader->info.stage))) {
117          /* This one is invalid, make it a global variable instead */
118          var->data.location = 0;
119          var->data.mode = nir_var_global;
120 
121          exec_node_remove(&var->node);
122          exec_list_push_tail(&shader->globals, &var->node);
123 
124          progress = true;
125       }
126    }
127 
128    return progress;
129 }
130 
131 bool
nir_remove_unused_varyings(nir_shader * producer,nir_shader * consumer)132 nir_remove_unused_varyings(nir_shader *producer, nir_shader *consumer)
133 {
134    assert(producer->info.stage != MESA_SHADER_FRAGMENT);
135    assert(consumer->info.stage != MESA_SHADER_VERTEX);
136 
137    uint64_t read[4] = { 0 }, written[4] = { 0 };
138    uint64_t patches_read[4] = { 0 }, patches_written[4] = { 0 };
139 
140    nir_foreach_variable(var, &producer->outputs) {
141       if (var->data.patch) {
142          patches_written[var->data.location_frac] |=
143             get_variable_io_mask(var, producer->info.stage);
144       } else {
145          written[var->data.location_frac] |=
146             get_variable_io_mask(var, producer->info.stage);
147       }
148    }
149 
150    nir_foreach_variable(var, &consumer->inputs) {
151       if (var->data.patch) {
152          patches_read[var->data.location_frac] |=
153             get_variable_io_mask(var, consumer->info.stage);
154       } else {
155          read[var->data.location_frac] |=
156             get_variable_io_mask(var, consumer->info.stage);
157       }
158    }
159 
160    /* Each TCS invocation can read data written by other TCS invocations,
161     * so even if the outputs are not used by the TES we must also make
162     * sure they are not read by the TCS before demoting them to globals.
163     */
164    if (producer->info.stage == MESA_SHADER_TESS_CTRL)
165       tcs_add_output_reads(producer, read, patches_read);
166 
167    bool progress = false;
168    progress = remove_unused_io_vars(producer, &producer->outputs, read,
169                                     patches_read);
170 
171    progress = remove_unused_io_vars(consumer, &consumer->inputs, written,
172                                     patches_written) || progress;
173 
174    return progress;
175 }
176 
177 static uint8_t
get_interp_type(nir_variable * var,bool default_to_smooth_interp)178 get_interp_type(nir_variable *var, bool default_to_smooth_interp)
179 {
180    if (var->data.interpolation != INTERP_MODE_NONE)
181       return var->data.interpolation;
182    else if (default_to_smooth_interp)
183       return INTERP_MODE_SMOOTH;
184    else
185       return INTERP_MODE_NONE;
186 }
187 
188 #define INTERPOLATE_LOC_SAMPLE 0
189 #define INTERPOLATE_LOC_CENTROID 1
190 #define INTERPOLATE_LOC_CENTER 2
191 
192 static uint8_t
get_interp_loc(nir_variable * var)193 get_interp_loc(nir_variable *var)
194 {
195    if (var->data.sample)
196       return INTERPOLATE_LOC_SAMPLE;
197    else if (var->data.centroid)
198       return INTERPOLATE_LOC_CENTROID;
199    else
200       return INTERPOLATE_LOC_CENTER;
201 }
202 
203 static void
get_slot_component_masks_and_interp_types(struct exec_list * var_list,uint8_t * comps,uint8_t * interp_type,uint8_t * interp_loc,gl_shader_stage stage,bool default_to_smooth_interp)204 get_slot_component_masks_and_interp_types(struct exec_list *var_list,
205                                           uint8_t *comps,
206                                           uint8_t *interp_type,
207                                           uint8_t *interp_loc,
208                                           gl_shader_stage stage,
209                                           bool default_to_smooth_interp)
210 {
211    nir_foreach_variable_safe(var, var_list) {
212       assert(var->data.location >= 0);
213 
214       /* Only remap things that aren't built-ins.
215        * TODO: add TES patch support.
216        */
217       if (var->data.location >= VARYING_SLOT_VAR0 &&
218           var->data.location - VARYING_SLOT_VAR0 < 32) {
219 
220          const struct glsl_type *type = var->type;
221          if (nir_is_per_vertex_io(var, stage)) {
222             assert(glsl_type_is_array(type));
223             type = glsl_get_array_element(type);
224          }
225 
226          unsigned location = var->data.location - VARYING_SLOT_VAR0;
227          unsigned elements =
228             glsl_get_vector_elements(glsl_without_array(type));
229 
230          bool dual_slot = glsl_type_is_dual_slot(glsl_without_array(type));
231          unsigned slots = glsl_count_attribute_slots(type, false);
232          unsigned comps_slot2 = 0;
233          for (unsigned i = 0; i < slots; i++) {
234             interp_type[location + i] =
235                get_interp_type(var, default_to_smooth_interp);
236             interp_loc[location + i] = get_interp_loc(var);
237 
238             if (dual_slot) {
239                if (i & 1) {
240                   comps[location + i] |= ((1 << comps_slot2) - 1);
241                } else {
242                   unsigned num_comps = 4 - var->data.location_frac;
243                   comps_slot2 = (elements * 2) - num_comps;
244 
245                   /* Assume ARB_enhanced_layouts packing rules for doubles */
246                   assert(var->data.location_frac == 0 ||
247                          var->data.location_frac == 2);
248                   assert(comps_slot2 <= 4);
249 
250                   comps[location + i] |=
251                      ((1 << num_comps) - 1) << var->data.location_frac;
252                }
253             } else {
254                comps[location + i] |=
255                   ((1 << elements) - 1) << var->data.location_frac;
256             }
257          }
258       }
259    }
260 }
261 
262 struct varying_loc
263 {
264    uint8_t component;
265    uint32_t location;
266 };
267 
268 static void
remap_slots_and_components(struct exec_list * var_list,gl_shader_stage stage,struct varying_loc (* remap)[4],uint64_t * slots_used,uint64_t * out_slots_read)269 remap_slots_and_components(struct exec_list *var_list, gl_shader_stage stage,
270                            struct varying_loc (*remap)[4],
271                            uint64_t *slots_used, uint64_t *out_slots_read)
272  {
273    uint64_t out_slots_read_tmp = 0;
274 
275    /* We don't touch builtins so just copy the bitmask */
276    uint64_t slots_used_tmp =
277       *slots_used & (((uint64_t)1 << (VARYING_SLOT_VAR0 - 1)) - 1);
278 
279    nir_foreach_variable(var, var_list) {
280       assert(var->data.location >= 0);
281 
282       /* Only remap things that aren't built-ins */
283       if (var->data.location >= VARYING_SLOT_VAR0 &&
284           var->data.location - VARYING_SLOT_VAR0 < 32) {
285          assert(var->data.location - VARYING_SLOT_VAR0 < 32);
286          assert(remap[var->data.location - VARYING_SLOT_VAR0] >= 0);
287 
288          const struct glsl_type *type = var->type;
289          if (nir_is_per_vertex_io(var, stage)) {
290             assert(glsl_type_is_array(type));
291             type = glsl_get_array_element(type);
292          }
293 
294          unsigned num_slots = glsl_count_attribute_slots(type, false);
295          bool used_across_stages = false;
296          bool outputs_read = false;
297 
298          unsigned location = var->data.location - VARYING_SLOT_VAR0;
299          struct varying_loc *new_loc = &remap[location][var->data.location_frac];
300 
301          uint64_t slots = (((uint64_t)1 << num_slots) - 1) << var->data.location;
302          if (slots & *slots_used)
303             used_across_stages = true;
304 
305          if (slots & *out_slots_read)
306             outputs_read = true;
307 
308          if (new_loc->location) {
309             var->data.location = new_loc->location;
310             var->data.location_frac = new_loc->component;
311          }
312 
313          if (var->data.always_active_io) {
314             /* We can't apply link time optimisations (specifically array
315              * splitting) to these so we need to copy the existing mask
316              * otherwise we will mess up the mask for things like partially
317              * marked arrays.
318              */
319             if (used_across_stages) {
320                slots_used_tmp |=
321                   *slots_used & (((uint64_t)1 << num_slots) - 1) << var->data.location;
322             }
323 
324             if (outputs_read) {
325                out_slots_read_tmp |=
326                   *out_slots_read & (((uint64_t)1 << num_slots) - 1) << var->data.location;
327             }
328 
329          } else {
330             for (unsigned i = 0; i < num_slots; i++) {
331                if (used_across_stages)
332                   slots_used_tmp |= (uint64_t)1 << (var->data.location + i);
333 
334                if (outputs_read)
335                   out_slots_read_tmp |= (uint64_t)1 << (var->data.location + i);
336             }
337          }
338       }
339    }
340 
341    *slots_used = slots_used_tmp;
342    *out_slots_read = out_slots_read_tmp;
343 }
344 
345 /* If there are empty components in the slot compact the remaining components
346  * as close to component 0 as possible. This will make it easier to fill the
347  * empty components with components from a different slot in a following pass.
348  */
349 static void
compact_components(nir_shader * producer,nir_shader * consumer,uint8_t * comps,uint8_t * interp_type,uint8_t * interp_loc,bool default_to_smooth_interp)350 compact_components(nir_shader *producer, nir_shader *consumer, uint8_t *comps,
351                    uint8_t *interp_type, uint8_t *interp_loc,
352                    bool default_to_smooth_interp)
353 {
354    struct exec_list *input_list = &consumer->inputs;
355    struct exec_list *output_list = &producer->outputs;
356    struct varying_loc remap[32][4] = {{{0}, {0}}};
357 
358    /* Create a cursor for each interpolation type */
359    unsigned cursor[4] = {0};
360 
361    /* We only need to pass over one stage and we choose the consumer as it seems
362     * to cause a larger reduction in instruction counts (tested on i965).
363     */
364    nir_foreach_variable(var, input_list) {
365 
366       /* Only remap things that aren't builtins.
367        * TODO: add TES patch support.
368        */
369       if (var->data.location >= VARYING_SLOT_VAR0 &&
370           var->data.location - VARYING_SLOT_VAR0 < 32) {
371 
372          /* We can't repack xfb varyings. */
373          if (var->data.always_active_io)
374             continue;
375 
376          const struct glsl_type *type = var->type;
377          if (nir_is_per_vertex_io(var, consumer->info.stage)) {
378             assert(glsl_type_is_array(type));
379             type = glsl_get_array_element(type);
380          }
381 
382          /* Skip types that require more complex packing handling.
383           * TODO: add support for these types.
384           */
385          if (glsl_type_is_array(type) ||
386              glsl_type_is_dual_slot(type) ||
387              glsl_type_is_matrix(type) ||
388              glsl_type_is_struct(type) ||
389              glsl_type_is_64bit(type))
390             continue;
391 
392          /* We ignore complex types above and all other vector types should
393           * have been split into scalar variables by the lower_io_to_scalar
394           * pass. The only exeption should by OpenGL xfb varyings.
395           */
396          if (glsl_get_vector_elements(type) != 1)
397             continue;
398 
399          unsigned location = var->data.location - VARYING_SLOT_VAR0;
400          uint8_t used_comps = comps[location];
401 
402          /* If there are no empty components there is nothing more for us to do.
403           */
404          if (used_comps == 0xf)
405             continue;
406 
407          bool found_new_offset = false;
408          uint8_t interp = get_interp_type(var, default_to_smooth_interp);
409          for (; cursor[interp] < 32; cursor[interp]++) {
410             uint8_t cursor_used_comps = comps[cursor[interp]];
411 
412             /* We couldn't find anywhere to pack the varying continue on. */
413             if (cursor[interp] == location &&
414                 (var->data.location_frac == 0 ||
415                  cursor_used_comps & ((1 << (var->data.location_frac)) - 1)))
416                break;
417 
418             /* We can only pack varyings with matching interpolation types */
419             if (interp_type[cursor[interp]] != interp)
420                continue;
421 
422             /* Interpolation loc must match also.
423              * TODO: i965 can handle these if they don't match, but the
424              * radeonsi nir backend handles everything as vec4s and so expects
425              * this to be the same for all components. We could make this
426              * check driver specfific or drop it if NIR ever become the only
427              * radeonsi backend.
428              */
429             if (interp_loc[cursor[interp]] != get_interp_loc(var))
430                continue;
431 
432             /* If the slot is empty just skip it for now, compact_var_list()
433              * can be called after this function to remove empty slots for us.
434              * TODO: finish implementing compact_var_list() requires array and
435              * matrix splitting.
436              */
437             if (!cursor_used_comps)
438                continue;
439 
440             uint8_t unused_comps = ~cursor_used_comps;
441 
442             for (unsigned i = 0; i < 4; i++) {
443                uint8_t new_var_comps = 1 << i;
444                if (unused_comps & new_var_comps) {
445                   remap[location][var->data.location_frac].component = i;
446                   remap[location][var->data.location_frac].location =
447                      cursor[interp] + VARYING_SLOT_VAR0;
448 
449                   found_new_offset = true;
450 
451                   /* Turn off the mask for the component we are remapping */
452                   if (comps[location] & 1 << var->data.location_frac) {
453                      comps[location] ^= 1 << var->data.location_frac;
454                      comps[cursor[interp]] |= new_var_comps;
455                   }
456                   break;
457                }
458             }
459 
460             if (found_new_offset)
461                break;
462          }
463       }
464    }
465 
466    uint64_t zero = 0;
467    remap_slots_and_components(input_list, consumer->info.stage, remap,
468                               &consumer->info.inputs_read, &zero);
469    remap_slots_and_components(output_list, producer->info.stage, remap,
470                               &producer->info.outputs_written,
471                               &producer->info.outputs_read);
472 }
473 
474 /* We assume that this has been called more-or-less directly after
475  * remove_unused_varyings.  At this point, all of the varyings that we
476  * aren't going to be using have been completely removed and the
477  * inputs_read and outputs_written fields in nir_shader_info reflect
478  * this.  Therefore, the total set of valid slots is the OR of the two
479  * sets of varyings;  this accounts for varyings which one side may need
480  * to read/write even if the other doesn't.  This can happen if, for
481  * instance, an array is used indirectly from one side causing it to be
482  * unsplittable but directly from the other.
483  */
484 void
nir_compact_varyings(nir_shader * producer,nir_shader * consumer,bool default_to_smooth_interp)485 nir_compact_varyings(nir_shader *producer, nir_shader *consumer,
486                      bool default_to_smooth_interp)
487 {
488    assert(producer->info.stage != MESA_SHADER_FRAGMENT);
489    assert(consumer->info.stage != MESA_SHADER_VERTEX);
490 
491    uint8_t comps[32] = {0};
492    uint8_t interp_type[32] = {0};
493    uint8_t interp_loc[32] = {0};
494 
495    get_slot_component_masks_and_interp_types(&producer->outputs, comps,
496                                              interp_type, interp_loc,
497                                              producer->info.stage,
498                                              default_to_smooth_interp);
499    get_slot_component_masks_and_interp_types(&consumer->inputs, comps,
500                                              interp_type, interp_loc,
501                                              consumer->info.stage,
502                                              default_to_smooth_interp);
503 
504    compact_components(producer, consumer, comps, interp_type, interp_loc,
505                       default_to_smooth_interp);
506 }
507