1 /*
2 * Copyright © 2019 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_nir.h"
25 #include "nir_builder.h"
26 #include "compiler/brw_nir.h"
27 #include "util/mesa-sha1.h"
28
29 void
anv_nir_compute_push_layout(nir_shader * nir,const struct anv_physical_device * pdevice,enum brw_robustness_flags robust_flags,bool fragment_dynamic,struct brw_stage_prog_data * prog_data,struct anv_pipeline_bind_map * map,const struct anv_pipeline_push_map * push_map,enum anv_descriptor_set_layout_type desc_type,void * mem_ctx)30 anv_nir_compute_push_layout(nir_shader *nir,
31 const struct anv_physical_device *pdevice,
32 enum brw_robustness_flags robust_flags,
33 bool fragment_dynamic,
34 struct brw_stage_prog_data *prog_data,
35 struct anv_pipeline_bind_map *map,
36 const struct anv_pipeline_push_map *push_map,
37 enum anv_descriptor_set_layout_type desc_type,
38 void *mem_ctx)
39 {
40 const struct brw_compiler *compiler = pdevice->compiler;
41 const struct intel_device_info *devinfo = compiler->devinfo;
42 memset(map->push_ranges, 0, sizeof(map->push_ranges));
43
44 bool has_const_ubo = false;
45 unsigned push_start = UINT_MAX, push_end = 0;
46 nir_foreach_function_impl(impl, nir) {
47 nir_foreach_block(block, impl) {
48 nir_foreach_instr(instr, block) {
49 if (instr->type != nir_instr_type_intrinsic)
50 continue;
51
52 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
53 switch (intrin->intrinsic) {
54 case nir_intrinsic_load_ubo:
55 if (brw_nir_ubo_surface_index_is_pushable(intrin->src[0]) &&
56 nir_src_is_const(intrin->src[1]))
57 has_const_ubo = true;
58 break;
59
60 case nir_intrinsic_load_push_constant: {
61 unsigned base = nir_intrinsic_base(intrin);
62 unsigned range = nir_intrinsic_range(intrin);
63 push_start = MIN2(push_start, base);
64 push_end = MAX2(push_end, base + range);
65 /* We need to retain this information to update the push
66 * constant on vkCmdDispatch*().
67 */
68 if (nir->info.stage == MESA_SHADER_COMPUTE &&
69 base >= anv_drv_const_offset(cs.num_work_groups[0]) &&
70 base < (anv_drv_const_offset(cs.num_work_groups[2]) + 4)) {
71 struct brw_cs_prog_data *cs_prog_data =
72 container_of(prog_data, struct brw_cs_prog_data, base);
73 cs_prog_data->uses_num_work_groups = true;
74 }
75 break;
76 }
77
78 default:
79 break;
80 }
81 }
82 }
83 }
84
85 const bool has_push_intrinsic = push_start <= push_end;
86
87 const bool push_ubo_ranges =
88 has_const_ubo && nir->info.stage != MESA_SHADER_COMPUTE &&
89 !brw_shader_stage_requires_bindless_resources(nir->info.stage);
90
91 if (push_ubo_ranges && (robust_flags & BRW_ROBUSTNESS_UBO)) {
92 /* We can't on-the-fly adjust our push ranges because doing so would
93 * mess up the layout in the shader. When robustBufferAccess is
94 * enabled, we push a mask into the shader indicating which pushed
95 * registers are valid and we zero out the invalid ones at the top of
96 * the shader.
97 */
98 const uint32_t push_reg_mask_start =
99 anv_drv_const_offset(push_reg_mask[nir->info.stage]);
100 const uint32_t push_reg_mask_end = push_reg_mask_start +
101 anv_drv_const_size(push_reg_mask[nir->info.stage]);
102 push_start = MIN2(push_start, push_reg_mask_start);
103 push_end = MAX2(push_end, push_reg_mask_end);
104 }
105
106 if (nir->info.stage == MESA_SHADER_FRAGMENT && fragment_dynamic) {
107 const uint32_t fs_msaa_flags_start =
108 anv_drv_const_offset(gfx.fs_msaa_flags);
109 const uint32_t fs_msaa_flags_end = fs_msaa_flags_start +
110 anv_drv_const_size(gfx.fs_msaa_flags);
111 push_start = MIN2(push_start, fs_msaa_flags_start);
112 push_end = MAX2(push_end, fs_msaa_flags_end);
113 }
114
115 if (nir->info.stage == MESA_SHADER_COMPUTE && devinfo->verx10 < 125) {
116 /* For compute shaders, we always have to have the subgroup ID. The
117 * back-end compiler will "helpfully" add it for us in the last push
118 * constant slot. Yes, there is an off-by-one error here but that's
119 * because the back-end will add it so we want to claim the number of
120 * push constants one dword less than the full amount including
121 * gl_SubgroupId.
122 */
123 assert(push_end <= anv_drv_const_offset(cs.subgroup_id));
124 push_end = anv_drv_const_offset(cs.subgroup_id);
125 }
126
127 /* Align push_start down to a 32B boundary and make it no larger than
128 * push_end (no push constants is indicated by push_start = UINT_MAX).
129 */
130 push_start = MIN2(push_start, push_end);
131 push_start = ROUND_DOWN_TO(push_start, 32);
132
133 /* For scalar, push data size needs to be aligned to a DWORD. */
134 const unsigned alignment = 4;
135 nir->num_uniforms = ALIGN(push_end - push_start, alignment);
136 prog_data->nr_params = nir->num_uniforms / 4;
137 prog_data->param = rzalloc_array(mem_ctx, uint32_t, prog_data->nr_params);
138
139 struct anv_push_range push_constant_range = {
140 .set = ANV_DESCRIPTOR_SET_PUSH_CONSTANTS,
141 .start = push_start / 32,
142 .length = ALIGN(push_end - push_start, devinfo->grf_size) / 32,
143 };
144
145 if (has_push_intrinsic) {
146 nir_foreach_function_impl(impl, nir) {
147 nir_foreach_block(block, impl) {
148 nir_foreach_instr_safe(instr, block) {
149 if (instr->type != nir_instr_type_intrinsic)
150 continue;
151
152 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
153 switch (intrin->intrinsic) {
154 case nir_intrinsic_load_push_constant: {
155 /* With bindless shaders we load uniforms with SEND
156 * messages. All the push constants are located after the
157 * RT_DISPATCH_GLOBALS. We just need to add the offset to
158 * the address right after RT_DISPATCH_GLOBALS (see
159 * brw_nir_lower_rt_intrinsics.c).
160 */
161 unsigned base_offset =
162 brw_shader_stage_requires_bindless_resources(nir->info.stage) ? 0 : push_start;
163 intrin->intrinsic = nir_intrinsic_load_uniform;
164 nir_intrinsic_set_base(intrin,
165 nir_intrinsic_base(intrin) -
166 base_offset);
167 break;
168 }
169
170 default:
171 break;
172 }
173 }
174 }
175 }
176 }
177
178 unsigned n_push_ranges = 0;
179 if (push_ubo_ranges) {
180 brw_nir_analyze_ubo_ranges(compiler, nir, prog_data->ubo_ranges);
181
182 const unsigned max_push_regs = 64;
183
184 unsigned total_push_regs = push_constant_range.length;
185 for (unsigned i = 0; i < 4; i++) {
186 if (total_push_regs + prog_data->ubo_ranges[i].length > max_push_regs)
187 prog_data->ubo_ranges[i].length = max_push_regs - total_push_regs;
188 total_push_regs += prog_data->ubo_ranges[i].length;
189 }
190 assert(total_push_regs <= max_push_regs);
191
192 if (push_constant_range.length > 0)
193 map->push_ranges[n_push_ranges++] = push_constant_range;
194
195 if (robust_flags & BRW_ROBUSTNESS_UBO) {
196 const uint32_t push_reg_mask_offset =
197 anv_drv_const_offset(push_reg_mask[nir->info.stage]);
198 assert(push_reg_mask_offset >= push_start);
199 prog_data->push_reg_mask_param =
200 (push_reg_mask_offset - push_start) / 4;
201 }
202
203 unsigned range_start_reg = push_constant_range.length;
204
205 for (int i = 0; i < 4; i++) {
206 struct brw_ubo_range *ubo_range = &prog_data->ubo_ranges[i];
207 if (ubo_range->length == 0)
208 continue;
209
210 if (n_push_ranges >= 4) {
211 memset(ubo_range, 0, sizeof(*ubo_range));
212 continue;
213 }
214
215 assert(ubo_range->block < push_map->block_count);
216 const struct anv_pipeline_binding *binding =
217 &push_map->block_to_descriptor[ubo_range->block];
218
219 map->push_ranges[n_push_ranges++] = (struct anv_push_range) {
220 .set = binding->set,
221 .index = binding->index,
222 .dynamic_offset_index = binding->dynamic_offset_index,
223 .start = ubo_range->start,
224 .length = ubo_range->length,
225 };
226
227 /* We only bother to shader-zero pushed client UBOs */
228 if (binding->set < MAX_SETS &&
229 (robust_flags & BRW_ROBUSTNESS_UBO)) {
230 prog_data->zero_push_reg |= BITFIELD64_RANGE(range_start_reg,
231 ubo_range->length);
232 }
233
234 range_start_reg += ubo_range->length;
235 }
236 } else if (push_constant_range.length > 0) {
237 /* For Ivy Bridge, the push constants packets have a different
238 * rule that would require us to iterate in the other direction
239 * and possibly mess around with dynamic state base address.
240 * Don't bother; just emit regular push constants at n = 0.
241 *
242 * In the compute case, we don't have multiple push ranges so it's
243 * better to just provide one in push_ranges[0].
244 */
245 map->push_ranges[n_push_ranges++] = push_constant_range;
246 }
247
248 /* Pass a single-register push constant payload for the PS stage even if
249 * empty, since PS invocations with zero push constant cycles have been
250 * found to cause hangs with TBIMR enabled. See HSDES #22020184996.
251 *
252 * XXX - Use workaround infrastructure and final workaround when provided
253 * by hardware team.
254 */
255 if (n_push_ranges == 0 &&
256 nir->info.stage == MESA_SHADER_FRAGMENT &&
257 devinfo->needs_null_push_constant_tbimr_workaround) {
258 map->push_ranges[n_push_ranges++] = (struct anv_push_range) {
259 .set = ANV_DESCRIPTOR_SET_NULL,
260 .start = 0,
261 .length = 1,
262 };
263 assert(prog_data->nr_params == 0);
264 prog_data->nr_params = 32 / 4;
265 }
266
267 if (nir->info.stage == MESA_SHADER_FRAGMENT && fragment_dynamic) {
268 struct brw_wm_prog_data *wm_prog_data =
269 container_of(prog_data, struct brw_wm_prog_data, base);
270
271 const uint32_t fs_msaa_flags_offset =
272 anv_drv_const_offset(gfx.fs_msaa_flags);
273 assert(fs_msaa_flags_offset >= push_start);
274 wm_prog_data->msaa_flags_param =
275 (fs_msaa_flags_offset - push_start) / 4;
276 }
277
278 #if 0
279 fprintf(stderr, "stage=%s push ranges:\n", gl_shader_stage_name(nir->info.stage));
280 for (unsigned i = 0; i < ARRAY_SIZE(map->push_ranges); i++)
281 fprintf(stderr, " range%i: %03u-%03u set=%u index=%u\n", i,
282 map->push_ranges[i].start,
283 map->push_ranges[i].length,
284 map->push_ranges[i].set,
285 map->push_ranges[i].index);
286 #endif
287
288 /* Now that we're done computing the push constant portion of the
289 * bind map, hash it. This lets us quickly determine if the actual
290 * mapping has changed and not just a no-op pipeline change.
291 */
292 _mesa_sha1_compute(map->push_ranges,
293 sizeof(map->push_ranges),
294 map->push_sha1);
295 }
296
297 void
anv_nir_validate_push_layout(const struct anv_physical_device * pdevice,struct brw_stage_prog_data * prog_data,struct anv_pipeline_bind_map * map)298 anv_nir_validate_push_layout(const struct anv_physical_device *pdevice,
299 struct brw_stage_prog_data *prog_data,
300 struct anv_pipeline_bind_map *map)
301 {
302 #ifndef NDEBUG
303 unsigned prog_data_push_size = ALIGN(prog_data->nr_params, pdevice->info.grf_size / 4) / 8;
304
305 for (unsigned i = 0; i < 4; i++)
306 prog_data_push_size += prog_data->ubo_ranges[i].length;
307
308 unsigned bind_map_push_size = 0;
309 for (unsigned i = 0; i < 4; i++)
310 bind_map_push_size += map->push_ranges[i].length;
311
312 /* We could go through everything again but it should be enough to assert
313 * that they push the same number of registers. This should alert us if
314 * the back-end compiler decides to re-arrange stuff or shrink a range.
315 */
316 assert(prog_data_push_size == bind_map_push_size);
317 #endif
318 }
319