1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_blorp.c
25 *
26 * ============================= GENXML CODE =============================
27 * [This file is compiled once per generation.]
28 * =======================================================================
29 *
30 * GenX specific code for working with BLORP (blitting, resolves, clears
31 * on the 3D engine). This provides the driver-specific hooks needed to
32 * implement the BLORP API.
33 *
34 * See iris_blit.c, iris_clear.c, and so on.
35 */
36
37 #include <assert.h>
38
39 #include "iris_batch.h"
40 #include "iris_resource.h"
41 #include "iris_context.h"
42
43 #include "util/u_upload_mgr.h"
44 #include "intel/common/intel_l3_config.h"
45
46 #include "blorp/blorp_genX_exec.h"
47
48 static uint32_t *
stream_state(struct iris_batch * batch,struct u_upload_mgr * uploader,unsigned size,unsigned alignment,uint32_t * out_offset,struct iris_bo ** out_bo)49 stream_state(struct iris_batch *batch,
50 struct u_upload_mgr *uploader,
51 unsigned size,
52 unsigned alignment,
53 uint32_t *out_offset,
54 struct iris_bo **out_bo)
55 {
56 struct pipe_resource *res = NULL;
57 void *ptr = NULL;
58
59 u_upload_alloc(uploader, 0, size, alignment, out_offset, &res, &ptr);
60
61 struct iris_bo *bo = iris_resource_bo(res);
62 iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_NONE);
63
64 iris_record_state_size(batch->state_sizes,
65 bo->address + *out_offset, size);
66
67 /* If the caller has asked for a BO, we leave them the responsibility of
68 * adding bo->address (say, by handing an address to genxml). If not,
69 * we assume they want the offset from a base address.
70 */
71 if (out_bo)
72 *out_bo = bo;
73 else
74 *out_offset += iris_bo_offset_from_base_address(bo);
75
76 pipe_resource_reference(&res, NULL);
77
78 return ptr;
79 }
80
81 static void *
blorp_emit_dwords(struct blorp_batch * blorp_batch,unsigned n)82 blorp_emit_dwords(struct blorp_batch *blorp_batch, unsigned n)
83 {
84 struct iris_batch *batch = blorp_batch->driver_batch;
85 return iris_get_command_space(batch, n * sizeof(uint32_t));
86 }
87
88 static uint64_t
combine_and_pin_address(struct blorp_batch * blorp_batch,struct blorp_address addr)89 combine_and_pin_address(struct blorp_batch *blorp_batch,
90 struct blorp_address addr)
91 {
92 struct iris_batch *batch = blorp_batch->driver_batch;
93 struct iris_bo *bo = addr.buffer;
94
95 iris_use_pinned_bo(batch, bo, addr.reloc_flags & RELOC_WRITE,
96 IRIS_DOMAIN_NONE);
97
98 /* Assume this is a general address, not relative to a base. */
99 return bo->address + addr.offset;
100 }
101
102 static uint64_t
blorp_emit_reloc(struct blorp_batch * blorp_batch,UNUSED void * location,struct blorp_address addr,uint32_t delta)103 blorp_emit_reloc(struct blorp_batch *blorp_batch, UNUSED void *location,
104 struct blorp_address addr, uint32_t delta)
105 {
106 return combine_and_pin_address(blorp_batch, addr) + delta;
107 }
108
109 static void
blorp_surface_reloc(struct blorp_batch * blorp_batch,uint32_t ss_offset,struct blorp_address addr,uint32_t delta)110 blorp_surface_reloc(struct blorp_batch *blorp_batch, uint32_t ss_offset,
111 struct blorp_address addr, uint32_t delta)
112 {
113 /* Let blorp_get_surface_address do the pinning. */
114 }
115
116 static uint64_t
blorp_get_surface_address(struct blorp_batch * blorp_batch,struct blorp_address addr)117 blorp_get_surface_address(struct blorp_batch *blorp_batch,
118 struct blorp_address addr)
119 {
120 return combine_and_pin_address(blorp_batch, addr);
121 }
122
123 UNUSED static struct blorp_address
blorp_get_surface_base_address(UNUSED struct blorp_batch * blorp_batch)124 blorp_get_surface_base_address(UNUSED struct blorp_batch *blorp_batch)
125 {
126 return (struct blorp_address) { .offset = IRIS_MEMZONE_BINDER_START };
127 }
128
129 static void *
blorp_alloc_dynamic_state(struct blorp_batch * blorp_batch,uint32_t size,uint32_t alignment,uint32_t * offset)130 blorp_alloc_dynamic_state(struct blorp_batch *blorp_batch,
131 uint32_t size,
132 uint32_t alignment,
133 uint32_t *offset)
134 {
135 struct iris_context *ice = blorp_batch->blorp->driver_ctx;
136 struct iris_batch *batch = blorp_batch->driver_batch;
137
138 return stream_state(batch, ice->state.dynamic_uploader,
139 size, alignment, offset, NULL);
140 }
141
142 UNUSED static void *
blorp_alloc_general_state(struct blorp_batch * blorp_batch,uint32_t size,uint32_t alignment,uint32_t * offset)143 blorp_alloc_general_state(struct blorp_batch *blorp_batch,
144 uint32_t size,
145 uint32_t alignment,
146 uint32_t *offset)
147 {
148 /* Use dynamic state range for general state on iris. */
149 return blorp_alloc_dynamic_state(blorp_batch, size, alignment, offset);
150 }
151
152 static void
blorp_alloc_binding_table(struct blorp_batch * blorp_batch,unsigned num_entries,unsigned state_size,unsigned state_alignment,uint32_t * bt_offset,uint32_t * surface_offsets,void ** surface_maps)153 blorp_alloc_binding_table(struct blorp_batch *blorp_batch,
154 unsigned num_entries,
155 unsigned state_size,
156 unsigned state_alignment,
157 uint32_t *bt_offset,
158 uint32_t *surface_offsets,
159 void **surface_maps)
160 {
161 struct iris_context *ice = blorp_batch->blorp->driver_ctx;
162 struct iris_binder *binder = &ice->state.binder;
163 struct iris_batch *batch = blorp_batch->driver_batch;
164
165 *bt_offset = iris_binder_reserve(ice, num_entries * sizeof(uint32_t));
166 uint32_t *bt_map = binder->map + *bt_offset;
167
168 for (unsigned i = 0; i < num_entries; i++) {
169 surface_maps[i] = stream_state(batch, ice->state.surface_uploader,
170 state_size, state_alignment,
171 &surface_offsets[i], NULL);
172 bt_map[i] = surface_offsets[i] - (uint32_t) binder->bo->address;
173 }
174
175 iris_use_pinned_bo(batch, binder->bo, false, IRIS_DOMAIN_NONE);
176
177 batch->screen->vtbl.update_surface_base_address(batch, binder);
178 }
179
180 static void *
blorp_alloc_vertex_buffer(struct blorp_batch * blorp_batch,uint32_t size,struct blorp_address * addr)181 blorp_alloc_vertex_buffer(struct blorp_batch *blorp_batch,
182 uint32_t size,
183 struct blorp_address *addr)
184 {
185 struct iris_context *ice = blorp_batch->blorp->driver_ctx;
186 struct iris_batch *batch = blorp_batch->driver_batch;
187 struct iris_bo *bo;
188 uint32_t offset;
189
190 void *map = stream_state(batch, ice->ctx.const_uploader, size, 64,
191 &offset, &bo);
192
193 *addr = (struct blorp_address) {
194 .buffer = bo,
195 .offset = offset,
196 .mocs = iris_mocs(bo, &batch->screen->isl_dev,
197 ISL_SURF_USAGE_VERTEX_BUFFER_BIT),
198 };
199
200 return map;
201 }
202
203 /**
204 * See iris_upload_render_state's IRIS_DIRTY_VERTEX_BUFFERS handling for
205 * a comment about why these VF invalidations are needed.
206 */
207 static void
blorp_vf_invalidate_for_vb_48b_transitions(struct blorp_batch * blorp_batch,const struct blorp_address * addrs,UNUSED uint32_t * sizes,unsigned num_vbs)208 blorp_vf_invalidate_for_vb_48b_transitions(struct blorp_batch *blorp_batch,
209 const struct blorp_address *addrs,
210 UNUSED uint32_t *sizes,
211 unsigned num_vbs)
212 {
213 #if GFX_VER < 11
214 struct iris_context *ice = blorp_batch->blorp->driver_ctx;
215 struct iris_batch *batch = blorp_batch->driver_batch;
216 bool need_invalidate = false;
217
218 for (unsigned i = 0; i < num_vbs; i++) {
219 struct iris_bo *bo = addrs[i].buffer;
220 uint16_t high_bits = bo->address >> 32u;
221
222 if (high_bits != ice->state.last_vbo_high_bits[i]) {
223 need_invalidate = true;
224 ice->state.last_vbo_high_bits[i] = high_bits;
225 }
226 }
227
228 if (need_invalidate) {
229 iris_emit_pipe_control_flush(batch,
230 "workaround: VF cache 32-bit key [blorp]",
231 PIPE_CONTROL_VF_CACHE_INVALIDATE |
232 PIPE_CONTROL_CS_STALL);
233 }
234 #endif
235 }
236
237 static struct blorp_address
blorp_get_workaround_address(struct blorp_batch * blorp_batch)238 blorp_get_workaround_address(struct blorp_batch *blorp_batch)
239 {
240 struct iris_batch *batch = blorp_batch->driver_batch;
241
242 return (struct blorp_address) {
243 .buffer = batch->screen->workaround_address.bo,
244 .offset = batch->screen->workaround_address.offset,
245 };
246 }
247
248 static void
blorp_flush_range(UNUSED struct blorp_batch * blorp_batch,UNUSED void * start,UNUSED size_t size)249 blorp_flush_range(UNUSED struct blorp_batch *blorp_batch,
250 UNUSED void *start,
251 UNUSED size_t size)
252 {
253 /* All allocated states come from the batch which we will flush before we
254 * submit it. There's nothing for us to do here.
255 */
256 }
257
258 static const struct intel_l3_config *
blorp_get_l3_config(struct blorp_batch * blorp_batch)259 blorp_get_l3_config(struct blorp_batch *blorp_batch)
260 {
261 struct iris_batch *batch = blorp_batch->driver_batch;
262 return batch->screen->l3_config_3d;
263 }
264
265 static void
iris_blorp_exec(struct blorp_batch * blorp_batch,const struct blorp_params * params)266 iris_blorp_exec(struct blorp_batch *blorp_batch,
267 const struct blorp_params *params)
268 {
269 struct iris_context *ice = blorp_batch->blorp->driver_ctx;
270 struct iris_batch *batch = blorp_batch->driver_batch;
271
272 #if GFX_VER >= 11
273 /* The PIPE_CONTROL command description says:
274 *
275 * "Whenever a Binding Table Index (BTI) used by a Render Target Message
276 * points to a different RENDER_SURFACE_STATE, SW must issue a Render
277 * Target Cache Flush by enabling this bit. When render target flush
278 * is set due to new association of BTI, PS Scoreboard Stall bit must
279 * be set in this packet."
280 */
281 iris_emit_pipe_control_flush(batch,
282 "workaround: RT BTI change [blorp]",
283 PIPE_CONTROL_RENDER_TARGET_FLUSH |
284 PIPE_CONTROL_STALL_AT_SCOREBOARD);
285 #endif
286
287 if (params->depth.enabled &&
288 !(blorp_batch->flags & BLORP_BATCH_NO_EMIT_DEPTH_STENCIL))
289 genX(emit_depth_state_workarounds)(ice, batch, ¶ms->depth.surf);
290
291 /* Flush the render cache in cases where the same surface is used with
292 * different aux modes, which can lead to GPU hangs. Invalidation of
293 * sampler caches and flushing of any caches which had previously written
294 * the source surfaces should already have been handled by the caller.
295 */
296 if (params->dst.enabled) {
297 iris_cache_flush_for_render(batch, params->dst.addr.buffer,
298 params->dst.aux_usage);
299 }
300
301 iris_require_command_space(batch, 1400);
302
303 #if GFX_VER == 8
304 genX(update_pma_fix)(ice, batch, false);
305 #endif
306
307 const unsigned scale = params->fast_clear_op ? UINT_MAX : 1;
308 if (ice->state.current_hash_scale != scale) {
309 genX(emit_hashing_mode)(ice, batch, params->x1 - params->x0,
310 params->y1 - params->y0, scale);
311 }
312
313 #if GFX_VER >= 12
314 genX(invalidate_aux_map_state)(batch);
315 #endif
316
317 iris_handle_always_flush_cache(batch);
318
319 blorp_exec(blorp_batch, params);
320
321 iris_handle_always_flush_cache(batch);
322
323 /* We've smashed all state compared to what the normal 3D pipeline
324 * rendering tracks for GL.
325 */
326
327 uint64_t skip_bits = (IRIS_DIRTY_POLYGON_STIPPLE |
328 IRIS_DIRTY_SO_BUFFERS |
329 IRIS_DIRTY_SO_DECL_LIST |
330 IRIS_DIRTY_LINE_STIPPLE |
331 IRIS_ALL_DIRTY_FOR_COMPUTE |
332 IRIS_DIRTY_SCISSOR_RECT |
333 IRIS_DIRTY_VF |
334 IRIS_DIRTY_SF_CL_VIEWPORT);
335 uint64_t skip_stage_bits = (IRIS_ALL_STAGE_DIRTY_FOR_COMPUTE |
336 IRIS_STAGE_DIRTY_UNCOMPILED_VS |
337 IRIS_STAGE_DIRTY_UNCOMPILED_TCS |
338 IRIS_STAGE_DIRTY_UNCOMPILED_TES |
339 IRIS_STAGE_DIRTY_UNCOMPILED_GS |
340 IRIS_STAGE_DIRTY_UNCOMPILED_FS |
341 IRIS_STAGE_DIRTY_SAMPLER_STATES_VS |
342 IRIS_STAGE_DIRTY_SAMPLER_STATES_TCS |
343 IRIS_STAGE_DIRTY_SAMPLER_STATES_TES |
344 IRIS_STAGE_DIRTY_SAMPLER_STATES_GS);
345
346 if (!ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL]) {
347 /* BLORP disabled tessellation, that's fine for the next draw */
348 skip_stage_bits |= IRIS_STAGE_DIRTY_TCS |
349 IRIS_STAGE_DIRTY_TES |
350 IRIS_STAGE_DIRTY_CONSTANTS_TCS |
351 IRIS_STAGE_DIRTY_CONSTANTS_TES |
352 IRIS_STAGE_DIRTY_BINDINGS_TCS |
353 IRIS_STAGE_DIRTY_BINDINGS_TES;
354 }
355
356 if (!ice->shaders.uncompiled[MESA_SHADER_GEOMETRY]) {
357 /* BLORP disabled geometry shaders, that's fine for the next draw */
358 skip_stage_bits |= IRIS_STAGE_DIRTY_GS |
359 IRIS_STAGE_DIRTY_CONSTANTS_GS |
360 IRIS_STAGE_DIRTY_BINDINGS_GS;
361 }
362
363 /* we can skip flagging IRIS_DIRTY_DEPTH_BUFFER, if
364 * BLORP_BATCH_NO_EMIT_DEPTH_STENCIL is set.
365 */
366 if (blorp_batch->flags & BLORP_BATCH_NO_EMIT_DEPTH_STENCIL)
367 skip_bits |= IRIS_DIRTY_DEPTH_BUFFER;
368
369 if (!params->wm_prog_data)
370 skip_bits |= IRIS_DIRTY_BLEND_STATE | IRIS_DIRTY_PS_BLEND;
371
372 ice->state.dirty |= ~skip_bits;
373 ice->state.stage_dirty |= ~skip_stage_bits;
374
375 for (int i = 0; i < ARRAY_SIZE(ice->shaders.urb.size); i++)
376 ice->shaders.urb.size[i] = 0;
377
378 if (params->src.enabled)
379 iris_bo_bump_seqno(params->src.addr.buffer, batch->next_seqno,
380 IRIS_DOMAIN_OTHER_READ);
381 if (params->dst.enabled)
382 iris_bo_bump_seqno(params->dst.addr.buffer, batch->next_seqno,
383 IRIS_DOMAIN_RENDER_WRITE);
384 if (params->depth.enabled)
385 iris_bo_bump_seqno(params->depth.addr.buffer, batch->next_seqno,
386 IRIS_DOMAIN_DEPTH_WRITE);
387 if (params->stencil.enabled)
388 iris_bo_bump_seqno(params->stencil.addr.buffer, batch->next_seqno,
389 IRIS_DOMAIN_DEPTH_WRITE);
390 }
391
392 static void
blorp_measure_start(struct blorp_batch * blorp_batch,const struct blorp_params * params)393 blorp_measure_start(struct blorp_batch *blorp_batch,
394 const struct blorp_params *params)
395 {
396 struct iris_context *ice = blorp_batch->blorp->driver_ctx;
397 struct iris_batch *batch = blorp_batch->driver_batch;
398
399 if (batch->measure == NULL)
400 return;
401
402 iris_measure_snapshot(ice, batch, params->snapshot_type, NULL, NULL, NULL);
403 }
404
405 void
genX(init_blorp)406 genX(init_blorp)(struct iris_context *ice)
407 {
408 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
409
410 blorp_init(&ice->blorp, ice, &screen->isl_dev);
411 ice->blorp.compiler = screen->compiler;
412 ice->blorp.lookup_shader = iris_blorp_lookup_shader;
413 ice->blorp.upload_shader = iris_blorp_upload_shader;
414 ice->blorp.exec = iris_blorp_exec;
415 }
416