• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2019 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "anv_nir.h"
25 #include "nir_builder.h"
26 #include "compiler/elk/elk_nir.h"
27 #include "util/mesa-sha1.h"
28 
29 #define sizeof_field(type, field) sizeof(((type *)0)->field)
30 
31 void
anv_nir_compute_push_layout(nir_shader * nir,const struct anv_physical_device * pdevice,enum elk_robustness_flags robust_flags,struct elk_stage_prog_data * prog_data,struct anv_pipeline_bind_map * map,void * mem_ctx)32 anv_nir_compute_push_layout(nir_shader *nir,
33                             const struct anv_physical_device *pdevice,
34                             enum elk_robustness_flags robust_flags,
35                             struct elk_stage_prog_data *prog_data,
36                             struct anv_pipeline_bind_map *map,
37                             void *mem_ctx)
38 {
39    const struct elk_compiler *compiler = pdevice->compiler;
40    memset(map->push_ranges, 0, sizeof(map->push_ranges));
41 
42    bool has_const_ubo = false;
43    unsigned push_start = UINT_MAX, push_end = 0;
44    nir_foreach_function_impl(impl, nir) {
45       nir_foreach_block(block, impl) {
46          nir_foreach_instr(instr, block) {
47             if (instr->type != nir_instr_type_intrinsic)
48                continue;
49 
50             nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
51             switch (intrin->intrinsic) {
52             case nir_intrinsic_load_ubo:
53                if (nir_src_is_const(intrin->src[0]) &&
54                    nir_src_is_const(intrin->src[1]))
55                   has_const_ubo = true;
56                break;
57 
58             case nir_intrinsic_load_push_constant: {
59                unsigned base = nir_intrinsic_base(intrin);
60                unsigned range = nir_intrinsic_range(intrin);
61                push_start = MIN2(push_start, base);
62                push_end = MAX2(push_end, base + range);
63                break;
64             }
65 
66             default:
67                break;
68             }
69          }
70       }
71    }
72 
73    const bool has_push_intrinsic = push_start <= push_end;
74 
75    const bool push_ubo_ranges =
76       pdevice->info.verx10 >= 75 &&
77       has_const_ubo && nir->info.stage != MESA_SHADER_COMPUTE;
78 
79    if (push_ubo_ranges && (robust_flags & ELK_ROBUSTNESS_UBO)) {
80       /* We can't on-the-fly adjust our push ranges because doing so would
81        * mess up the layout in the shader.  When robustBufferAccess is
82        * enabled, we push a mask into the shader indicating which pushed
83        * registers are valid and we zero out the invalid ones at the top of
84        * the shader.
85        */
86       const uint32_t push_reg_mask_start =
87          offsetof(struct anv_push_constants, push_reg_mask[nir->info.stage]);
88       const uint32_t push_reg_mask_end = push_reg_mask_start + sizeof(uint64_t);
89       push_start = MIN2(push_start, push_reg_mask_start);
90       push_end = MAX2(push_end, push_reg_mask_end);
91    }
92 
93    if (nir->info.stage == MESA_SHADER_COMPUTE) {
94       /* For compute shaders, we always have to have the subgroup ID.  The
95        * back-end compiler will "helpfully" add it for us in the last push
96        * constant slot.  Yes, there is an off-by-one error here but that's
97        * because the back-end will add it so we want to claim the number of
98        * push constants one dword less than the full amount including
99        * gl_SubgroupId.
100        */
101       assert(push_end <= offsetof(struct anv_push_constants, cs.subgroup_id));
102       push_end = offsetof(struct anv_push_constants, cs.subgroup_id);
103    }
104 
105    /* Align push_start down to a 32B boundary and make it no larger than
106     * push_end (no push constants is indicated by push_start = UINT_MAX).
107     */
108    push_start = MIN2(push_start, push_end);
109    push_start = ROUND_DOWN_TO(push_start, 32);
110 
111    /* For vec4 our push data size needs to be aligned to a vec4 and for
112     * scalar, it needs to be aligned to a DWORD.
113     */
114    const unsigned alignment = compiler->scalar_stage[nir->info.stage] ? 4 : 16;
115    nir->num_uniforms = ALIGN(push_end - push_start, alignment);
116    prog_data->nr_params = nir->num_uniforms / 4;
117    prog_data->param = rzalloc_array(mem_ctx, uint32_t, prog_data->nr_params);
118 
119    struct anv_push_range push_constant_range = {
120       .set = ANV_DESCRIPTOR_SET_PUSH_CONSTANTS,
121       .start = push_start / 32,
122       .length = DIV_ROUND_UP(push_end - push_start, 32),
123    };
124 
125    if (has_push_intrinsic) {
126       nir_foreach_function_impl(impl, nir) {
127          nir_foreach_block(block, impl) {
128             nir_foreach_instr_safe(instr, block) {
129                if (instr->type != nir_instr_type_intrinsic)
130                   continue;
131 
132                nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
133                switch (intrin->intrinsic) {
134                case nir_intrinsic_load_push_constant: {
135                   /* With bindless shaders we load uniforms with SEND
136                    * messages. All the push constants are located after the
137                    * RT_DISPATCH_GLOBALS. We just need to add the offset to
138                    * the address right after RT_DISPATCH_GLOBALS (see
139                    * elk_nir_lower_rt_intrinsics.c).
140                    */
141                   unsigned base_offset = push_start;
142                   intrin->intrinsic = nir_intrinsic_load_uniform;
143                   nir_intrinsic_set_base(intrin,
144                                          nir_intrinsic_base(intrin) -
145                                          base_offset);
146                   break;
147                }
148 
149                default:
150                   break;
151                }
152             }
153          }
154       }
155    }
156 
157    if (push_ubo_ranges) {
158       elk_nir_analyze_ubo_ranges(compiler, nir, prog_data->ubo_ranges);
159 
160       /* The vec4 back-end pushes at most 32 regs while the scalar back-end
161        * pushes up to 64.  This is primarily because the scalar back-end has a
162        * massively more competent register allocator and so the risk of
163        * spilling due to UBO pushing isn't nearly as high.
164        */
165       const unsigned max_push_regs =
166          compiler->scalar_stage[nir->info.stage] ? 64 : 32;
167 
168       unsigned total_push_regs = push_constant_range.length;
169       for (unsigned i = 0; i < 4; i++) {
170          if (total_push_regs + prog_data->ubo_ranges[i].length > max_push_regs)
171             prog_data->ubo_ranges[i].length = max_push_regs - total_push_regs;
172          total_push_regs += prog_data->ubo_ranges[i].length;
173       }
174       assert(total_push_regs <= max_push_regs);
175 
176       int n = 0;
177 
178       if (push_constant_range.length > 0)
179          map->push_ranges[n++] = push_constant_range;
180 
181       if (robust_flags & ELK_ROBUSTNESS_UBO) {
182          const uint32_t push_reg_mask_offset =
183             offsetof(struct anv_push_constants, push_reg_mask[nir->info.stage]);
184          assert(push_reg_mask_offset >= push_start);
185          prog_data->push_reg_mask_param =
186             (push_reg_mask_offset - push_start) / 4;
187       }
188 
189       unsigned range_start_reg = push_constant_range.length;
190 
191       for (int i = 0; i < 4; i++) {
192          struct elk_ubo_range *ubo_range = &prog_data->ubo_ranges[i];
193          if (ubo_range->length == 0)
194             continue;
195 
196          if (n >= 4 || (n == 3 && compiler->constant_buffer_0_is_relative)) {
197             memset(ubo_range, 0, sizeof(*ubo_range));
198             continue;
199          }
200 
201          const struct anv_pipeline_binding *binding =
202             &map->surface_to_descriptor[ubo_range->block];
203 
204          map->push_ranges[n++] = (struct anv_push_range) {
205             .set = binding->set,
206             .index = binding->index,
207             .dynamic_offset_index = binding->dynamic_offset_index,
208             .start = ubo_range->start,
209             .length = ubo_range->length,
210          };
211 
212          /* We only bother to shader-zero pushed client UBOs */
213          if (binding->set < MAX_SETS &&
214              (robust_flags & ELK_ROBUSTNESS_UBO)) {
215             prog_data->zero_push_reg |= BITFIELD64_RANGE(range_start_reg,
216                                                          ubo_range->length);
217          }
218 
219          range_start_reg += ubo_range->length;
220       }
221    } else {
222       /* For Ivy Bridge, the push constants packets have a different
223        * rule that would require us to iterate in the other direction
224        * and possibly mess around with dynamic state base address.
225        * Don't bother; just emit regular push constants at n = 0.
226        *
227        * In the compute case, we don't have multiple push ranges so it's
228        * better to just provide one in push_ranges[0].
229        */
230       map->push_ranges[0] = push_constant_range;
231    }
232 
233    /* Now that we're done computing the push constant portion of the
234     * bind map, hash it.  This lets us quickly determine if the actual
235     * mapping has changed and not just a no-op pipeline change.
236     */
237    _mesa_sha1_compute(map->push_ranges,
238                       sizeof(map->push_ranges),
239                       map->push_sha1);
240 }
241 
242 void
anv_nir_validate_push_layout(struct elk_stage_prog_data * prog_data,struct anv_pipeline_bind_map * map)243 anv_nir_validate_push_layout(struct elk_stage_prog_data *prog_data,
244                              struct anv_pipeline_bind_map *map)
245 {
246 #ifndef NDEBUG
247    unsigned prog_data_push_size = DIV_ROUND_UP(prog_data->nr_params, 8);
248    for (unsigned i = 0; i < 4; i++)
249       prog_data_push_size += prog_data->ubo_ranges[i].length;
250 
251    unsigned bind_map_push_size = 0;
252    for (unsigned i = 0; i < 4; i++)
253       bind_map_push_size += map->push_ranges[i].length;
254 
255    /* We could go through everything again but it should be enough to assert
256     * that they push the same number of registers.  This should alert us if
257     * the back-end compiler decides to re-arrange stuff or shrink a range.
258     */
259    assert(prog_data_push_size == bind_map_push_size);
260 #endif
261 }
262