1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25
26 #include "anv_private.h"
27 #include "anv_measure.h"
28
29 /* These are defined in anv_private.h and blorp_genX_exec_brw.h */
30 #undef __gen_address_type
31 #undef __gen_user_data
32 #undef __gen_combine_address
33
34 #include "common/intel_l3_config.h"
35 #include "blorp/blorp_genX_exec_brw.h"
36
37 #include "ds/intel_tracepoints.h"
38
blorp_measure_start(struct blorp_batch * _batch,const struct blorp_params * params)39 static void blorp_measure_start(struct blorp_batch *_batch,
40 const struct blorp_params *params)
41 {
42 struct anv_cmd_buffer *cmd_buffer = _batch->driver_batch;
43 trace_intel_begin_blorp(&cmd_buffer->trace);
44 anv_measure_snapshot(cmd_buffer,
45 blorp_op_to_intel_measure_snapshot(params->op),
46 NULL, 0);
47 }
48
blorp_measure_end(struct blorp_batch * _batch,const struct blorp_params * params)49 static void blorp_measure_end(struct blorp_batch *_batch,
50 const struct blorp_params *params)
51 {
52 struct anv_cmd_buffer *cmd_buffer = _batch->driver_batch;
53 trace_intel_end_blorp(&cmd_buffer->trace,
54 params->op,
55 params->x1 - params->x0,
56 params->y1 - params->y0,
57 params->num_samples,
58 params->shader_pipeline,
59 params->dst.view.format,
60 params->src.view.format,
61 (_batch->flags & BLORP_BATCH_PREDICATE_ENABLE));
62 }
63
64 static void *
blorp_emit_dwords(struct blorp_batch * batch,unsigned n)65 blorp_emit_dwords(struct blorp_batch *batch, unsigned n)
66 {
67 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
68 return anv_batch_emit_dwords(&cmd_buffer->batch, n);
69 }
70
71 static uint64_t
blorp_emit_reloc(struct blorp_batch * batch,void * location,struct blorp_address address,uint32_t delta)72 blorp_emit_reloc(struct blorp_batch *batch,
73 void *location, struct blorp_address address, uint32_t delta)
74 {
75 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
76 struct anv_address anv_addr = {
77 .bo = address.buffer,
78 .offset = address.offset,
79 };
80 anv_reloc_list_add_bo(cmd_buffer->batch.relocs, anv_addr.bo);
81 return anv_address_physical(anv_address_add(anv_addr, delta));
82 }
83
84 static void
blorp_surface_reloc(struct blorp_batch * batch,uint32_t ss_offset,struct blorp_address address,uint32_t delta)85 blorp_surface_reloc(struct blorp_batch *batch, uint32_t ss_offset,
86 struct blorp_address address, uint32_t delta)
87 {
88 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
89
90 VkResult result = anv_reloc_list_add_bo(&cmd_buffer->surface_relocs,
91 address.buffer);
92 if (unlikely(result != VK_SUCCESS))
93 anv_batch_set_error(&cmd_buffer->batch, result);
94 }
95
96 static uint64_t
blorp_get_surface_address(struct blorp_batch * blorp_batch,struct blorp_address address)97 blorp_get_surface_address(struct blorp_batch *blorp_batch,
98 struct blorp_address address)
99 {
100 struct anv_address anv_addr = {
101 .bo = address.buffer,
102 .offset = address.offset,
103 };
104 return anv_address_physical(anv_addr);
105 }
106
107 #if GFX_VER == 9
108 static struct blorp_address
blorp_get_surface_base_address(struct blorp_batch * batch)109 blorp_get_surface_base_address(struct blorp_batch *batch)
110 {
111 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
112 return (struct blorp_address) {
113 .buffer = cmd_buffer->device->internal_surface_state_pool.block_pool.bo,
114 .offset = -cmd_buffer->device->internal_surface_state_pool.start_offset,
115 };
116 }
117 #endif
118
119 static void *
blorp_alloc_dynamic_state(struct blorp_batch * batch,uint32_t size,uint32_t alignment,uint32_t * offset)120 blorp_alloc_dynamic_state(struct blorp_batch *batch,
121 uint32_t size,
122 uint32_t alignment,
123 uint32_t *offset)
124 {
125 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
126
127 struct anv_state state =
128 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, alignment);
129
130 *offset = state.offset;
131 return state.map;
132 }
133
134 UNUSED static void *
blorp_alloc_general_state(struct blorp_batch * batch,uint32_t size,uint32_t alignment,uint32_t * offset)135 blorp_alloc_general_state(struct blorp_batch *batch,
136 uint32_t size,
137 uint32_t alignment,
138 uint32_t *offset)
139 {
140 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
141
142 struct anv_state state =
143 anv_cmd_buffer_alloc_general_state(cmd_buffer, size, alignment);
144
145 *offset = state.offset;
146 return state.map;
147 }
148
149 static bool
blorp_alloc_binding_table(struct blorp_batch * batch,unsigned num_entries,unsigned state_size,unsigned state_alignment,uint32_t * bt_offset,uint32_t * surface_offsets,void ** surface_maps)150 blorp_alloc_binding_table(struct blorp_batch *batch, unsigned num_entries,
151 unsigned state_size, unsigned state_alignment,
152 uint32_t *bt_offset,
153 uint32_t *surface_offsets, void **surface_maps)
154 {
155 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
156
157 uint32_t state_offset;
158 struct anv_state bt_state;
159
160 VkResult result =
161 anv_cmd_buffer_alloc_blorp_binding_table(cmd_buffer, num_entries,
162 &state_offset, &bt_state);
163 if (result != VK_SUCCESS)
164 return false;
165
166 uint32_t *bt_map = bt_state.map;
167 *bt_offset = bt_state.offset;
168
169 for (unsigned i = 0; i < num_entries; i++) {
170 struct anv_state surface_state =
171 anv_cmd_buffer_alloc_surface_states(cmd_buffer, 1);
172 if (surface_state.map == NULL)
173 return false;
174
175 bt_map[i] = surface_state.offset + state_offset;
176 surface_offsets[i] = surface_state.offset;
177 surface_maps[i] = surface_state.map;
178 }
179
180 return true;
181 }
182
183 static uint32_t
blorp_binding_table_offset_to_pointer(struct blorp_batch * batch,uint32_t offset)184 blorp_binding_table_offset_to_pointer(struct blorp_batch *batch,
185 uint32_t offset)
186 {
187 return offset;
188 }
189
190 static void *
blorp_alloc_vertex_buffer(struct blorp_batch * batch,uint32_t size,struct blorp_address * addr)191 blorp_alloc_vertex_buffer(struct blorp_batch *batch, uint32_t size,
192 struct blorp_address *addr)
193 {
194 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
195 struct anv_state vb_state =
196 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, 64);
197 struct anv_address vb_addr =
198 anv_state_pool_state_address(&cmd_buffer->device->dynamic_state_pool,
199 vb_state);
200
201 *addr = (struct blorp_address) {
202 .buffer = vb_addr.bo,
203 .offset = vb_addr.offset,
204 .mocs = isl_mocs(&cmd_buffer->device->isl_dev,
205 ISL_SURF_USAGE_VERTEX_BUFFER_BIT, false),
206 };
207
208 return vb_state.map;
209 }
210
211 static void
blorp_vf_invalidate_for_vb_48b_transitions(struct blorp_batch * batch,const struct blorp_address * addrs,uint32_t * sizes,unsigned num_vbs)212 blorp_vf_invalidate_for_vb_48b_transitions(struct blorp_batch *batch,
213 const struct blorp_address *addrs,
214 uint32_t *sizes,
215 unsigned num_vbs)
216 {
217 #if GFX_VER == 9
218 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
219
220 for (unsigned i = 0; i < num_vbs; i++) {
221 struct anv_address anv_addr = {
222 .bo = addrs[i].buffer,
223 .offset = addrs[i].offset,
224 };
225 genX(cmd_buffer_set_binding_for_gfx8_vb_flush)(cmd_buffer,
226 i, anv_addr, sizes[i]);
227 }
228
229 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
230
231 /* Technically, we should call this *after* 3DPRIMITIVE but it doesn't
232 * really matter for blorp because we never call apply_pipe_flushes after
233 * this point.
234 */
235 genX(cmd_buffer_update_dirty_vbs_for_gfx8_vb_flush)(cmd_buffer, SEQUENTIAL,
236 (1 << num_vbs) - 1);
237 #endif
238 }
239
240 UNUSED static struct blorp_address
blorp_get_workaround_address(struct blorp_batch * batch)241 blorp_get_workaround_address(struct blorp_batch *batch)
242 {
243 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
244
245 return (struct blorp_address) {
246 .buffer = cmd_buffer->device->workaround_address.bo,
247 .offset = cmd_buffer->device->workaround_address.offset,
248 };
249 }
250
251 static void
blorp_flush_range(struct blorp_batch * batch,void * start,size_t size)252 blorp_flush_range(struct blorp_batch *batch, void *start, size_t size)
253 {
254 /* We don't need to flush states anymore, since everything will be snooped.
255 */
256 }
257
258 static void
blorp_pre_emit_urb_config(struct blorp_batch * blorp_batch,struct intel_urb_config * urb_cfg)259 blorp_pre_emit_urb_config(struct blorp_batch *blorp_batch,
260 struct intel_urb_config *urb_cfg)
261 {
262 struct anv_cmd_buffer *cmd_buffer = blorp_batch->driver_batch;
263 genX(urb_workaround)(cmd_buffer, urb_cfg);
264
265 /* Update urb config. */
266 memcpy(&cmd_buffer->state.gfx.urb_cfg, urb_cfg,
267 sizeof(struct intel_urb_config));
268 }
269
270 static const struct intel_l3_config *
blorp_get_l3_config(struct blorp_batch * batch)271 blorp_get_l3_config(struct blorp_batch *batch)
272 {
273 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
274 return cmd_buffer->state.current_l3_config;
275 }
276
277 static void
blorp_exec_on_render(struct blorp_batch * batch,const struct blorp_params * params)278 blorp_exec_on_render(struct blorp_batch *batch,
279 const struct blorp_params *params)
280 {
281 assert((batch->flags & BLORP_BATCH_USE_COMPUTE) == 0);
282
283 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
284 assert(cmd_buffer->queue_family->queueFlags & VK_QUEUE_GRAPHICS_BIT);
285
286 struct anv_gfx_dynamic_state *hw_state =
287 &cmd_buffer->state.gfx.dyn_state;
288
289 const unsigned scale = params->fast_clear_op ? UINT_MAX : 1;
290 genX(cmd_buffer_emit_hashing_mode)(cmd_buffer, params->x1 - params->x0,
291 params->y1 - params->y0, scale);
292
293 #if GFX_VER >= 11
294 /* The PIPE_CONTROL command description says:
295 *
296 * "Whenever a Binding Table Index (BTI) used by a Render Target Message
297 * points to a different RENDER_SURFACE_STATE, SW must issue a Render
298 * Target Cache Flush by enabling this bit. When render target flush
299 * is set due to new association of BTI, PS Scoreboard Stall bit must
300 * be set in this packet."
301 */
302 if (blorp_uses_bti_rt_writes(batch, params)) {
303 anv_add_pending_pipe_bits(cmd_buffer,
304 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT |
305 ANV_PIPE_STALL_AT_SCOREBOARD_BIT,
306 "before blorp BTI change");
307 }
308 #endif
309
310 #if GFX_VERx10 >= 125
311 /* Check if blorp ds state matches ours. */
312 if (intel_needs_workaround(cmd_buffer->device->info, 18019816803)) {
313 bool blorp_ds_state = params->depth.enabled || params->stencil.enabled;
314 if (cmd_buffer->state.gfx.ds_write_state != blorp_ds_state) {
315 /* Flag the change in ds_write_state so that the next pipeline use
316 * will trigger a PIPE_CONTROL too.
317 */
318 cmd_buffer->state.gfx.ds_write_state = blorp_ds_state;
319 BITSET_SET(hw_state->dirty, ANV_GFX_STATE_WA_18019816803);
320
321 /* Add the stall that will flush prior to the blorp operation by
322 * genX(cmd_buffer_apply_pipe_flushes)
323 */
324 anv_add_pending_pipe_bits(cmd_buffer,
325 ANV_PIPE_PSS_STALL_SYNC_BIT,
326 "Wa_18019816803");
327 }
328 }
329 #endif
330
331 if (params->depth.enabled &&
332 !(batch->flags & BLORP_BATCH_NO_EMIT_DEPTH_STENCIL))
333 genX(cmd_buffer_emit_gfx12_depth_wa)(cmd_buffer, ¶ms->depth.surf);
334
335 genX(flush_pipeline_select_3d)(cmd_buffer);
336
337 /* Wa_14015814527 */
338 genX(apply_task_urb_workaround)(cmd_buffer);
339
340 /* Apply any outstanding flushes in case pipeline select haven't. */
341 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
342
343 /* BLORP doesn't do anything fancy with depth such as discards, so we want
344 * the PMA fix off. Also, off is always the safe option.
345 */
346 genX(cmd_buffer_enable_pma_fix)(cmd_buffer, false);
347
348 blorp_exec(batch, params);
349
350 #if GFX_VER >= 11
351 /* The PIPE_CONTROL command description says:
352 *
353 * "Whenever a Binding Table Index (BTI) used by a Render Target Message
354 * points to a different RENDER_SURFACE_STATE, SW must issue a Render
355 * Target Cache Flush by enabling this bit. When render target flush
356 * is set due to new association of BTI, PS Scoreboard Stall bit must
357 * be set in this packet."
358 */
359 if (blorp_uses_bti_rt_writes(batch, params)) {
360 anv_add_pending_pipe_bits(cmd_buffer,
361 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT |
362 ANV_PIPE_STALL_AT_SCOREBOARD_BIT,
363 "after blorp BTI change");
364 }
365 #endif
366
367 /* Flag all the instructions emitted by BLORP. */
368 BITSET_SET(hw_state->dirty, ANV_GFX_STATE_URB);
369 BITSET_SET(hw_state->dirty, ANV_GFX_STATE_VF_STATISTICS);
370 BITSET_SET(hw_state->dirty, ANV_GFX_STATE_VF);
371 BITSET_SET(hw_state->dirty, ANV_GFX_STATE_VF_TOPOLOGY);
372 BITSET_SET(hw_state->dirty, ANV_GFX_STATE_VERTEX_INPUT);
373 BITSET_SET(hw_state->dirty, ANV_GFX_STATE_VF_SGVS);
374 #if GFX_VER >= 11
375 BITSET_SET(hw_state->dirty, ANV_GFX_STATE_VF_SGVS_2);
376 #endif
377 #if GFX_VER >= 12
378 BITSET_SET(hw_state->dirty, ANV_GFX_STATE_PRIMITIVE_REPLICATION);
379 #endif
380 BITSET_SET(hw_state->dirty, ANV_GFX_STATE_VIEWPORT_CC);
381 BITSET_SET(hw_state->dirty, ANV_GFX_STATE_STREAMOUT);
382 BITSET_SET(hw_state->dirty, ANV_GFX_STATE_RASTER);
383 BITSET_SET(hw_state->dirty, ANV_GFX_STATE_CLIP);
384 BITSET_SET(hw_state->dirty, ANV_GFX_STATE_SAMPLE_MASK);
385 BITSET_SET(hw_state->dirty, ANV_GFX_STATE_MULTISAMPLE);
386 BITSET_SET(hw_state->dirty, ANV_GFX_STATE_SF);
387 BITSET_SET(hw_state->dirty, ANV_GFX_STATE_SBE);
388 BITSET_SET(hw_state->dirty, ANV_GFX_STATE_SBE_SWIZ);
389 BITSET_SET(hw_state->dirty, ANV_GFX_STATE_DEPTH_BOUNDS);
390 BITSET_SET(hw_state->dirty, ANV_GFX_STATE_WM);
391 BITSET_SET(hw_state->dirty, ANV_GFX_STATE_WM_DEPTH_STENCIL);
392 BITSET_SET(hw_state->dirty, ANV_GFX_STATE_VS);
393 BITSET_SET(hw_state->dirty, ANV_GFX_STATE_HS);
394 BITSET_SET(hw_state->dirty, ANV_GFX_STATE_DS);
395 BITSET_SET(hw_state->dirty, ANV_GFX_STATE_TE);
396 BITSET_SET(hw_state->dirty, ANV_GFX_STATE_GS);
397 BITSET_SET(hw_state->dirty, ANV_GFX_STATE_PS);
398 BITSET_SET(hw_state->dirty, ANV_GFX_STATE_PS_EXTRA);
399 BITSET_SET(hw_state->dirty, ANV_GFX_STATE_BLEND_STATE_POINTERS);
400 if (batch->blorp->config.use_mesh_shading) {
401 BITSET_SET(hw_state->dirty, ANV_GFX_STATE_MESH_CONTROL);
402 BITSET_SET(hw_state->dirty, ANV_GFX_STATE_TASK_CONTROL);
403 }
404 if (params->wm_prog_data) {
405 BITSET_SET(hw_state->dirty, ANV_GFX_STATE_CC_STATE);
406 BITSET_SET(hw_state->dirty, ANV_GFX_STATE_PS_BLEND);
407 }
408
409 anv_cmd_dirty_mask_t dirty = ~(ANV_CMD_DIRTY_INDEX_BUFFER |
410 ANV_CMD_DIRTY_XFB_ENABLE);
411
412 cmd_buffer->state.gfx.vb_dirty = ~0;
413 cmd_buffer->state.gfx.dirty |= dirty;
414 cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_ALL_GRAPHICS;
415 }
416
417 static void
blorp_exec_on_compute(struct blorp_batch * batch,const struct blorp_params * params)418 blorp_exec_on_compute(struct blorp_batch *batch,
419 const struct blorp_params *params)
420 {
421 assert(batch->flags & BLORP_BATCH_USE_COMPUTE);
422
423 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
424 assert(cmd_buffer->queue_family->queueFlags & VK_QUEUE_COMPUTE_BIT);
425
426 genX(flush_pipeline_select_gpgpu)(cmd_buffer);
427
428 /* Apply any outstanding flushes in case pipeline select haven't. */
429 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
430
431 blorp_exec(batch, params);
432
433 cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
434 }
435
436 static void
blorp_exec_on_blitter(struct blorp_batch * batch,const struct blorp_params * params)437 blorp_exec_on_blitter(struct blorp_batch *batch,
438 const struct blorp_params *params)
439 {
440 assert(batch->flags & BLORP_BATCH_USE_BLITTER);
441
442 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
443 assert(cmd_buffer->queue_family->queueFlags == VK_QUEUE_TRANSFER_BIT);
444
445 blorp_exec(batch, params);
446 }
447
448 void
genX(blorp_exec)449 genX(blorp_exec)(struct blorp_batch *batch,
450 const struct blorp_params *params)
451 {
452 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
453
454 /* Turn on preemption if it was toggled off. */
455 if (!cmd_buffer->state.gfx.object_preemption)
456 genX(cmd_buffer_set_preemption)(cmd_buffer, true);
457
458 if (!cmd_buffer->state.current_l3_config) {
459 const struct intel_l3_config *cfg =
460 intel_get_default_l3_config(cmd_buffer->device->info);
461 genX(cmd_buffer_config_l3)(cmd_buffer, cfg);
462 }
463
464 if (batch->flags & BLORP_BATCH_USE_BLITTER)
465 blorp_exec_on_blitter(batch, params);
466 else if (batch->flags & BLORP_BATCH_USE_COMPUTE)
467 blorp_exec_on_compute(batch, params);
468 else
469 blorp_exec_on_render(batch, params);
470 }
471
472 static void
blorp_emit_pre_draw(struct blorp_batch * batch,const struct blorp_params * params)473 blorp_emit_pre_draw(struct blorp_batch *batch, const struct blorp_params *params)
474 {
475 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
476 blorp_measure_start(batch, params);
477 genX(emit_breakpoint)(&cmd_buffer->batch, cmd_buffer->device, true);
478 }
479
480 static void
blorp_emit_post_draw(struct blorp_batch * batch,const struct blorp_params * params)481 blorp_emit_post_draw(struct blorp_batch *batch, const struct blorp_params *params)
482 {
483 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
484
485 genX(batch_emit_post_3dprimitive_was)(&cmd_buffer->batch,
486 cmd_buffer->device,
487 _3DPRIM_RECTLIST,
488 3);
489
490 genX(emit_breakpoint)(&cmd_buffer->batch, cmd_buffer->device, false);
491 blorp_measure_end(batch, params);
492 }
493