• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2019 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "anv_nir.h"
25 #include "nir_builder.h"
26 #include "compiler/brw_nir.h"
27 #include "util/mesa-sha1.h"
28 
29 #define sizeof_field(type, field) sizeof(((type *)0)->field)
30 
31 void
anv_nir_compute_push_layout(nir_shader * nir,const struct anv_physical_device * pdevice,enum brw_robustness_flags robust_flags,bool fragment_dynamic,struct brw_stage_prog_data * prog_data,struct anv_pipeline_bind_map * map,const struct anv_pipeline_push_map * push_map,void * mem_ctx)32 anv_nir_compute_push_layout(nir_shader *nir,
33                             const struct anv_physical_device *pdevice,
34                             enum brw_robustness_flags robust_flags,
35                             bool fragment_dynamic,
36                             struct brw_stage_prog_data *prog_data,
37                             struct anv_pipeline_bind_map *map,
38                             const struct anv_pipeline_push_map *push_map,
39                             void *mem_ctx)
40 {
41    const struct brw_compiler *compiler = pdevice->compiler;
42    const struct intel_device_info *devinfo = compiler->devinfo;
43    memset(map->push_ranges, 0, sizeof(map->push_ranges));
44 
45    bool has_const_ubo = false;
46    unsigned push_start = UINT_MAX, push_end = 0;
47    nir_foreach_function_impl(impl, nir) {
48       nir_foreach_block(block, impl) {
49          nir_foreach_instr(instr, block) {
50             if (instr->type != nir_instr_type_intrinsic)
51                continue;
52 
53             nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
54             switch (intrin->intrinsic) {
55             case nir_intrinsic_load_ubo:
56                if (brw_nir_ubo_surface_index_is_pushable(intrin->src[0]) &&
57                    nir_src_is_const(intrin->src[1]))
58                   has_const_ubo = true;
59                break;
60 
61             case nir_intrinsic_load_push_constant: {
62                unsigned base = nir_intrinsic_base(intrin);
63                unsigned range = nir_intrinsic_range(intrin);
64                push_start = MIN2(push_start, base);
65                push_end = MAX2(push_end, base + range);
66                break;
67             }
68 
69             case nir_intrinsic_load_desc_set_address_intel:
70             case nir_intrinsic_load_desc_set_dynamic_index_intel: {
71                unsigned base = offsetof(struct anv_push_constants,
72                                         desc_surface_offsets);
73                push_start = MIN2(push_start, base);
74                push_end = MAX2(push_end, base +
75                   sizeof_field(struct anv_push_constants,
76                                desc_surface_offsets));
77                break;
78             }
79 
80             default:
81                break;
82             }
83          }
84       }
85    }
86 
87    const bool has_push_intrinsic = push_start <= push_end;
88 
89    const bool push_ubo_ranges =
90       has_const_ubo && nir->info.stage != MESA_SHADER_COMPUTE &&
91       !brw_shader_stage_requires_bindless_resources(nir->info.stage);
92 
93    if (push_ubo_ranges && (robust_flags & BRW_ROBUSTNESS_UBO)) {
94       /* We can't on-the-fly adjust our push ranges because doing so would
95        * mess up the layout in the shader.  When robustBufferAccess is
96        * enabled, we push a mask into the shader indicating which pushed
97        * registers are valid and we zero out the invalid ones at the top of
98        * the shader.
99        */
100       const uint32_t push_reg_mask_start =
101          offsetof(struct anv_push_constants, push_reg_mask[nir->info.stage]);
102       const uint32_t push_reg_mask_end = push_reg_mask_start + sizeof(uint64_t);
103       push_start = MIN2(push_start, push_reg_mask_start);
104       push_end = MAX2(push_end, push_reg_mask_end);
105    }
106 
107    if (nir->info.stage == MESA_SHADER_FRAGMENT && fragment_dynamic) {
108       const uint32_t fs_msaa_flags_start =
109          offsetof(struct anv_push_constants, gfx.fs_msaa_flags);
110       const uint32_t fs_msaa_flags_end = fs_msaa_flags_start + sizeof(uint32_t);
111       push_start = MIN2(push_start, fs_msaa_flags_start);
112       push_end = MAX2(push_end, fs_msaa_flags_end);
113    }
114 
115    if (nir->info.stage == MESA_SHADER_COMPUTE && devinfo->verx10 < 125) {
116       /* For compute shaders, we always have to have the subgroup ID.  The
117        * back-end compiler will "helpfully" add it for us in the last push
118        * constant slot.  Yes, there is an off-by-one error here but that's
119        * because the back-end will add it so we want to claim the number of
120        * push constants one dword less than the full amount including
121        * gl_SubgroupId.
122        */
123       assert(push_end <= offsetof(struct anv_push_constants, cs.subgroup_id));
124       push_end = offsetof(struct anv_push_constants, cs.subgroup_id);
125    }
126 
127    /* Align push_start down to a 32B boundary and make it no larger than
128     * push_end (no push constants is indicated by push_start = UINT_MAX).
129     */
130    push_start = MIN2(push_start, push_end);
131    push_start = ROUND_DOWN_TO(push_start, 32);
132 
133    /* For scalar, push data size needs to be aligned to a DWORD. */
134    const unsigned alignment = 4;
135    nir->num_uniforms = ALIGN(push_end - push_start, alignment);
136    prog_data->nr_params = nir->num_uniforms / 4;
137    prog_data->param = rzalloc_array(mem_ctx, uint32_t, prog_data->nr_params);
138 
139    struct anv_push_range push_constant_range = {
140       .set = ANV_DESCRIPTOR_SET_PUSH_CONSTANTS,
141       .start = push_start / 32,
142       .length = DIV_ROUND_UP(push_end - push_start, 32),
143    };
144 
145    if (has_push_intrinsic) {
146       nir_foreach_function_impl(impl, nir) {
147          nir_builder build = nir_builder_create(impl);
148          nir_builder *b = &build;
149 
150          nir_foreach_block(block, impl) {
151             nir_foreach_instr_safe(instr, block) {
152                if (instr->type != nir_instr_type_intrinsic)
153                   continue;
154 
155                nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
156                switch (intrin->intrinsic) {
157                case nir_intrinsic_load_push_constant: {
158                   /* With bindless shaders we load uniforms with SEND
159                    * messages. All the push constants are located after the
160                    * RT_DISPATCH_GLOBALS. We just need to add the offset to
161                    * the address right after RT_DISPATCH_GLOBALS (see
162                    * brw_nir_lower_rt_intrinsics.c).
163                    */
164                   unsigned base_offset =
165                      brw_shader_stage_requires_bindless_resources(nir->info.stage) ? 0 : push_start;
166                   intrin->intrinsic = nir_intrinsic_load_uniform;
167                   nir_intrinsic_set_base(intrin,
168                                          nir_intrinsic_base(intrin) -
169                                          base_offset);
170                   break;
171                }
172 
173                case nir_intrinsic_load_desc_set_address_intel: {
174                   assert(brw_shader_stage_requires_bindless_resources(nir->info.stage));
175                   b->cursor = nir_before_instr(&intrin->instr);
176                   nir_def *pc_load = nir_load_uniform(b, 1, 32,
177                      nir_imul_imm(b, intrin->src[0].ssa, sizeof(uint32_t)),
178                      .base = offsetof(struct anv_push_constants,
179                                       desc_surface_offsets),
180                      .range = sizeof_field(struct anv_push_constants,
181                                            desc_surface_offsets),
182                      .dest_type = nir_type_uint32);
183                   pc_load = nir_iand_imm(b, pc_load, ANV_DESCRIPTOR_SET_OFFSET_MASK);
184                   nir_def *desc_addr =
185                      nir_pack_64_2x32_split(
186                         b, pc_load,
187                         nir_load_reloc_const_intel(
188                            b, BRW_SHADER_RELOC_DESCRIPTORS_ADDR_HIGH));
189                   nir_def_rewrite_uses(&intrin->def, desc_addr);
190                   break;
191                }
192 
193                case nir_intrinsic_load_desc_set_dynamic_index_intel: {
194                   b->cursor = nir_before_instr(&intrin->instr);
195                   nir_def *pc_load = nir_load_uniform(b, 1, 32,
196                      nir_imul_imm(b, intrin->src[0].ssa, sizeof(uint32_t)),
197                      .base = offsetof(struct anv_push_constants,
198                                       desc_surface_offsets),
199                      .range = sizeof_field(struct anv_push_constants,
200                                            desc_surface_offsets),
201                      .dest_type = nir_type_uint32);
202                   pc_load = nir_iand_imm(
203                      b, pc_load, ANV_DESCRIPTOR_SET_DYNAMIC_INDEX_MASK);
204                   nir_def_rewrite_uses(&intrin->def, pc_load);
205                   break;
206                }
207 
208                default:
209                   break;
210                }
211             }
212          }
213       }
214    }
215 
216    if (push_ubo_ranges) {
217       brw_nir_analyze_ubo_ranges(compiler, nir, prog_data->ubo_ranges);
218 
219       const unsigned max_push_regs = 64;
220 
221       unsigned total_push_regs = push_constant_range.length;
222       for (unsigned i = 0; i < 4; i++) {
223          if (total_push_regs + prog_data->ubo_ranges[i].length > max_push_regs)
224             prog_data->ubo_ranges[i].length = max_push_regs - total_push_regs;
225          total_push_regs += prog_data->ubo_ranges[i].length;
226       }
227       assert(total_push_regs <= max_push_regs);
228 
229       int n = 0;
230 
231       if (push_constant_range.length > 0)
232          map->push_ranges[n++] = push_constant_range;
233 
234       if (robust_flags & BRW_ROBUSTNESS_UBO) {
235          const uint32_t push_reg_mask_offset =
236             offsetof(struct anv_push_constants, push_reg_mask[nir->info.stage]);
237          assert(push_reg_mask_offset >= push_start);
238          prog_data->push_reg_mask_param =
239             (push_reg_mask_offset - push_start) / 4;
240       }
241 
242       unsigned range_start_reg = push_constant_range.length;
243 
244       for (int i = 0; i < 4; i++) {
245          struct brw_ubo_range *ubo_range = &prog_data->ubo_ranges[i];
246          if (ubo_range->length == 0)
247             continue;
248 
249          if (n >= 4) {
250             memset(ubo_range, 0, sizeof(*ubo_range));
251             continue;
252          }
253 
254          assert(ubo_range->block < push_map->block_count);
255          const struct anv_pipeline_binding *binding =
256             &push_map->block_to_descriptor[ubo_range->block];
257 
258          map->push_ranges[n++] = (struct anv_push_range) {
259             .set = binding->set,
260             .index = binding->index,
261             .dynamic_offset_index = binding->dynamic_offset_index,
262             .start = ubo_range->start,
263             .length = ubo_range->length,
264          };
265 
266          /* We only bother to shader-zero pushed client UBOs */
267          if (binding->set < MAX_SETS &&
268              (robust_flags & BRW_ROBUSTNESS_UBO)) {
269             prog_data->zero_push_reg |= BITFIELD64_RANGE(range_start_reg,
270                                                          ubo_range->length);
271          }
272 
273          range_start_reg += ubo_range->length;
274       }
275    } else {
276       /* For Ivy Bridge, the push constants packets have a different
277        * rule that would require us to iterate in the other direction
278        * and possibly mess around with dynamic state base address.
279        * Don't bother; just emit regular push constants at n = 0.
280        *
281        * In the compute case, we don't have multiple push ranges so it's
282        * better to just provide one in push_ranges[0].
283        */
284       map->push_ranges[0] = push_constant_range;
285    }
286 
287    if (nir->info.stage == MESA_SHADER_FRAGMENT && fragment_dynamic) {
288       struct brw_wm_prog_data *wm_prog_data =
289          container_of(prog_data, struct brw_wm_prog_data, base);
290 
291       const uint32_t fs_msaa_flags_offset =
292          offsetof(struct anv_push_constants, gfx.fs_msaa_flags);
293       assert(fs_msaa_flags_offset >= push_start);
294       wm_prog_data->msaa_flags_param =
295          (fs_msaa_flags_offset - push_start) / 4;
296    }
297 
298 #if 0
299    fprintf(stderr, "stage=%s push ranges:\n", gl_shader_stage_name(nir->info.stage));
300    for (unsigned i = 0; i < ARRAY_SIZE(map->push_ranges); i++)
301       fprintf(stderr, "   range%i: %03u-%03u set=%u index=%u\n", i,
302               map->push_ranges[i].start,
303               map->push_ranges[i].length,
304               map->push_ranges[i].set,
305               map->push_ranges[i].index);
306 #endif
307 
308    /* Now that we're done computing the push constant portion of the
309     * bind map, hash it.  This lets us quickly determine if the actual
310     * mapping has changed and not just a no-op pipeline change.
311     */
312    _mesa_sha1_compute(map->push_ranges,
313                       sizeof(map->push_ranges),
314                       map->push_sha1);
315 }
316 
317 void
anv_nir_validate_push_layout(struct brw_stage_prog_data * prog_data,struct anv_pipeline_bind_map * map)318 anv_nir_validate_push_layout(struct brw_stage_prog_data *prog_data,
319                              struct anv_pipeline_bind_map *map)
320 {
321 #ifndef NDEBUG
322    unsigned prog_data_push_size = DIV_ROUND_UP(prog_data->nr_params, 8);
323    for (unsigned i = 0; i < 4; i++)
324       prog_data_push_size += prog_data->ubo_ranges[i].length;
325 
326    unsigned bind_map_push_size = 0;
327    for (unsigned i = 0; i < 4; i++)
328       bind_map_push_size += map->push_ranges[i].length;
329 
330    /* We could go through everything again but it should be enough to assert
331     * that they push the same number of registers.  This should alert us if
332     * the back-end compiler decides to re-arrange stuff or shrink a range.
333     */
334    assert(prog_data_push_size == bind_map_push_size);
335 #endif
336 }
337