• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2017 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 /*
25  * Remap load_uniform intrinsics to nir_load_ubo or nir_load_ubo_vec4 accesses
26  * of UBO binding point 0. Simultaneously, remap existing UBO accesses by
27  * increasing their binding point by 1.
28  *
29  * For PIPE_CAP_PACKED_UNIFORMS, dword_packed should be set to indicate that
30  * nir_intrinsic_load_uniform is in increments of dwords instead of vec4s.
31  *
32  * If load_vec4 is set, then nir_intrinsic_load_ubo_vec4 will be generated
33  * instead of nir_intrinsic_load_ubo, saving addressing math for hardawre
34  * needing aligned vec4 loads in increments of vec4s (such as TGSI CONST file
35  * loads).
36  */
37 
38 #include "nir.h"
39 #include "nir_builder.h"
40 
41 static bool
lower_instr(nir_intrinsic_instr * instr,nir_builder * b,bool dword_packed,bool load_vec4)42 lower_instr(nir_intrinsic_instr *instr, nir_builder *b, bool dword_packed, bool load_vec4)
43 {
44    b->cursor = nir_before_instr(&instr->instr);
45 
46    /* Increase all UBO binding points by 1. */
47    if (instr->intrinsic == nir_intrinsic_load_ubo &&
48        !b->shader->info.first_ubo_is_default_ubo) {
49       nir_ssa_def *old_idx = nir_ssa_for_src(b, instr->src[0], 1);
50       nir_ssa_def *new_idx = nir_iadd(b, old_idx, nir_imm_int(b, 1));
51       nir_instr_rewrite_src(&instr->instr, &instr->src[0],
52                             nir_src_for_ssa(new_idx));
53       return true;
54    }
55 
56    if (instr->intrinsic == nir_intrinsic_load_uniform) {
57       nir_ssa_def *ubo_idx = nir_imm_int(b, 0);
58       nir_ssa_def *uniform_offset = nir_ssa_for_src(b, instr->src[0], 1);
59 
60       assert(instr->dest.ssa.bit_size >= 8);
61       nir_ssa_def *load_result;
62       if (load_vec4) {
63          /* No asking us to generate load_vec4 when you've packed your uniforms
64           * as dwords instead of vec4s.
65           */
66          assert(!dword_packed);
67          load_result = nir_load_ubo_vec4(b, instr->num_components, instr->dest.ssa.bit_size,
68                                          ubo_idx,
69                                          nir_iadd_imm(b, uniform_offset, nir_intrinsic_base(instr)));
70       } else {
71          /* For PIPE_CAP_PACKED_UNIFORMS, the uniforms are packed with the
72           * base/offset in dword units instead of vec4 units.
73           */
74          int multiplier = dword_packed ? 4 : 16;
75          load_result = nir_load_ubo(b, instr->num_components, instr->dest.ssa.bit_size,
76                              ubo_idx,
77                              nir_iadd_imm(b, nir_imul_imm(b, uniform_offset, multiplier),
78                                           nir_intrinsic_base(instr) * multiplier));
79          nir_intrinsic_instr *load = nir_instr_as_intrinsic(load_result->parent_instr);
80 
81          /* If it's const, set the alignment to our known constant offset.  If
82           * not, set it to a pessimistic value based on the multiplier (or the
83           * scalar size, for qword loads).
84           *
85           * We could potentially set up stricter alignments for indirects by
86           * knowing what features are enabled in the APIs (see comment in
87           * nir_lower_ubo_vec4.c)
88           */
89          if (nir_src_is_const(instr->src[0])) {
90             nir_intrinsic_set_align(load, NIR_ALIGN_MUL_MAX,
91                                     (nir_src_as_uint(instr->src[0]) +
92                                     nir_intrinsic_base(instr) * multiplier) %
93                                     NIR_ALIGN_MUL_MAX);
94          } else {
95             nir_intrinsic_set_align(load, MAX2(multiplier,
96                                              instr->dest.ssa.bit_size / 8), 0);
97          }
98 
99          nir_intrinsic_set_range_base(load, nir_intrinsic_base(instr) * multiplier);
100          nir_intrinsic_set_range(load, nir_intrinsic_range(instr) * multiplier);
101       }
102       nir_ssa_def_rewrite_uses(&instr->dest.ssa, load_result);
103 
104       nir_instr_remove(&instr->instr);
105       return true;
106    }
107 
108    return false;
109 }
110 
111 bool
nir_lower_uniforms_to_ubo(nir_shader * shader,bool dword_packed,bool load_vec4)112 nir_lower_uniforms_to_ubo(nir_shader *shader, bool dword_packed, bool load_vec4)
113 {
114    bool progress = false;
115 
116    nir_foreach_function(function, shader) {
117       if (function->impl) {
118          nir_builder builder;
119          nir_builder_init(&builder, function->impl);
120          nir_foreach_block(block, function->impl) {
121             nir_foreach_instr_safe(instr, block) {
122                if (instr->type == nir_instr_type_intrinsic)
123                   progress |= lower_instr(nir_instr_as_intrinsic(instr),
124                                           &builder,
125                                           dword_packed, load_vec4);
126             }
127          }
128 
129          nir_metadata_preserve(function->impl, nir_metadata_block_index |
130                                                nir_metadata_dominance);
131       }
132    }
133 
134    if (progress) {
135       if (!shader->info.first_ubo_is_default_ubo) {
136          nir_foreach_variable_with_modes(var, shader, nir_var_mem_ubo) {
137             var->data.binding++;
138             if (var->data.driver_location != -1)
139                var->data.driver_location++;
140             /* only increment location for ubo arrays */
141             if (glsl_without_array(var->type) == var->interface_type &&
142                 glsl_type_is_array(var->type))
143                var->data.location++;
144          }
145       }
146       shader->info.num_ubos++;
147 
148       if (shader->num_uniforms > 0) {
149          const struct glsl_type *type = glsl_array_type(glsl_vec4_type(),
150                                                         shader->num_uniforms, 16);
151          nir_variable *ubo = nir_variable_create(shader, nir_var_mem_ubo, type,
152                                                  "uniform_0");
153          ubo->data.binding = 0;
154          ubo->data.explicit_binding = 1;
155 
156          struct glsl_struct_field field = {
157             .type = type,
158             .name = "data",
159             .location = -1,
160          };
161          ubo->interface_type =
162                glsl_interface_type(&field, 1, GLSL_INTERFACE_PACKING_STD430,
163                                    false, "__ubo0_interface");
164       }
165    }
166 
167    shader->info.first_ubo_is_default_ubo = true;
168    return progress;
169 }
170