1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_blorp.c
25 *
26 * ============================= GENXML CODE =============================
27 * [This file is compiled once per generation.]
28 * =======================================================================
29 *
30 * GenX specific code for working with BLORP (blitting, resolves, clears
31 * on the 3D engine). This provides the driver-specific hooks needed to
32 * implement the BLORP API.
33 *
34 * See iris_blit.c, iris_clear.c, and so on.
35 */
36
37 #include <assert.h>
38
39 #include "iris_batch.h"
40 #include "iris_resource.h"
41 #include "iris_context.h"
42
43 #include "util/u_upload_mgr.h"
44 #include "intel/common/gen_l3_config.h"
45
46 #include "blorp/blorp_genX_exec.h"
47
48 static uint32_t *
stream_state(struct iris_batch * batch,struct u_upload_mgr * uploader,unsigned size,unsigned alignment,uint32_t * out_offset,struct iris_bo ** out_bo)49 stream_state(struct iris_batch *batch,
50 struct u_upload_mgr *uploader,
51 unsigned size,
52 unsigned alignment,
53 uint32_t *out_offset,
54 struct iris_bo **out_bo)
55 {
56 struct pipe_resource *res = NULL;
57 void *ptr = NULL;
58
59 u_upload_alloc(uploader, 0, size, alignment, out_offset, &res, &ptr);
60
61 struct iris_bo *bo = iris_resource_bo(res);
62 iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_NONE);
63
64 iris_record_state_size(batch->state_sizes,
65 bo->gtt_offset + *out_offset, size);
66
67 /* If the caller has asked for a BO, we leave them the responsibility of
68 * adding bo->gtt_offset (say, by handing an address to genxml). If not,
69 * we assume they want the offset from a base address.
70 */
71 if (out_bo)
72 *out_bo = bo;
73 else
74 *out_offset += iris_bo_offset_from_base_address(bo);
75
76 pipe_resource_reference(&res, NULL);
77
78 return ptr;
79 }
80
81 static void *
blorp_emit_dwords(struct blorp_batch * blorp_batch,unsigned n)82 blorp_emit_dwords(struct blorp_batch *blorp_batch, unsigned n)
83 {
84 struct iris_batch *batch = blorp_batch->driver_batch;
85 return iris_get_command_space(batch, n * sizeof(uint32_t));
86 }
87
88 static uint64_t
combine_and_pin_address(struct blorp_batch * blorp_batch,struct blorp_address addr)89 combine_and_pin_address(struct blorp_batch *blorp_batch,
90 struct blorp_address addr)
91 {
92 struct iris_batch *batch = blorp_batch->driver_batch;
93 struct iris_bo *bo = addr.buffer;
94
95 iris_use_pinned_bo(batch, bo, addr.reloc_flags & RELOC_WRITE,
96 IRIS_DOMAIN_NONE);
97
98 /* Assume this is a general address, not relative to a base. */
99 return bo->gtt_offset + addr.offset;
100 }
101
102 static uint64_t
blorp_emit_reloc(struct blorp_batch * blorp_batch,UNUSED void * location,struct blorp_address addr,uint32_t delta)103 blorp_emit_reloc(struct blorp_batch *blorp_batch, UNUSED void *location,
104 struct blorp_address addr, uint32_t delta)
105 {
106 return combine_and_pin_address(blorp_batch, addr) + delta;
107 }
108
109 static void
blorp_surface_reloc(struct blorp_batch * blorp_batch,uint32_t ss_offset,struct blorp_address addr,uint32_t delta)110 blorp_surface_reloc(struct blorp_batch *blorp_batch, uint32_t ss_offset,
111 struct blorp_address addr, uint32_t delta)
112 {
113 /* Let blorp_get_surface_address do the pinning. */
114 }
115
116 static uint64_t
blorp_get_surface_address(struct blorp_batch * blorp_batch,struct blorp_address addr)117 blorp_get_surface_address(struct blorp_batch *blorp_batch,
118 struct blorp_address addr)
119 {
120 return combine_and_pin_address(blorp_batch, addr);
121 }
122
123 UNUSED static struct blorp_address
blorp_get_surface_base_address(UNUSED struct blorp_batch * blorp_batch)124 blorp_get_surface_base_address(UNUSED struct blorp_batch *blorp_batch)
125 {
126 return (struct blorp_address) { .offset = IRIS_MEMZONE_BINDER_START };
127 }
128
129 static void *
blorp_alloc_dynamic_state(struct blorp_batch * blorp_batch,uint32_t size,uint32_t alignment,uint32_t * offset)130 blorp_alloc_dynamic_state(struct blorp_batch *blorp_batch,
131 uint32_t size,
132 uint32_t alignment,
133 uint32_t *offset)
134 {
135 struct iris_context *ice = blorp_batch->blorp->driver_ctx;
136 struct iris_batch *batch = blorp_batch->driver_batch;
137
138 return stream_state(batch, ice->state.dynamic_uploader,
139 size, alignment, offset, NULL);
140 }
141
142 static void
blorp_alloc_binding_table(struct blorp_batch * blorp_batch,unsigned num_entries,unsigned state_size,unsigned state_alignment,uint32_t * bt_offset,uint32_t * surface_offsets,void ** surface_maps)143 blorp_alloc_binding_table(struct blorp_batch *blorp_batch,
144 unsigned num_entries,
145 unsigned state_size,
146 unsigned state_alignment,
147 uint32_t *bt_offset,
148 uint32_t *surface_offsets,
149 void **surface_maps)
150 {
151 struct iris_context *ice = blorp_batch->blorp->driver_ctx;
152 struct iris_binder *binder = &ice->state.binder;
153 struct iris_batch *batch = blorp_batch->driver_batch;
154
155 *bt_offset = iris_binder_reserve(ice, num_entries * sizeof(uint32_t));
156 uint32_t *bt_map = binder->map + *bt_offset;
157
158 for (unsigned i = 0; i < num_entries; i++) {
159 surface_maps[i] = stream_state(batch, ice->state.surface_uploader,
160 state_size, state_alignment,
161 &surface_offsets[i], NULL);
162 bt_map[i] = surface_offsets[i] - (uint32_t) binder->bo->gtt_offset;
163 }
164
165 iris_use_pinned_bo(batch, binder->bo, false, IRIS_DOMAIN_NONE);
166
167 batch->screen->vtbl.update_surface_base_address(batch, binder);
168 }
169
170 static void *
blorp_alloc_vertex_buffer(struct blorp_batch * blorp_batch,uint32_t size,struct blorp_address * addr)171 blorp_alloc_vertex_buffer(struct blorp_batch *blorp_batch,
172 uint32_t size,
173 struct blorp_address *addr)
174 {
175 struct iris_context *ice = blorp_batch->blorp->driver_ctx;
176 struct iris_batch *batch = blorp_batch->driver_batch;
177 struct iris_bo *bo;
178 uint32_t offset;
179
180 void *map = stream_state(batch, ice->ctx.stream_uploader, size, 64,
181 &offset, &bo);
182
183 *addr = (struct blorp_address) {
184 .buffer = bo,
185 .offset = offset,
186 .mocs = iris_mocs(bo, &batch->screen->isl_dev,
187 ISL_SURF_USAGE_VERTEX_BUFFER_BIT),
188 };
189
190 return map;
191 }
192
193 /**
194 * See iris_upload_render_state's IRIS_DIRTY_VERTEX_BUFFERS handling for
195 * a comment about why these VF invalidations are needed.
196 */
197 static void
blorp_vf_invalidate_for_vb_48b_transitions(struct blorp_batch * blorp_batch,const struct blorp_address * addrs,UNUSED uint32_t * sizes,unsigned num_vbs)198 blorp_vf_invalidate_for_vb_48b_transitions(struct blorp_batch *blorp_batch,
199 const struct blorp_address *addrs,
200 UNUSED uint32_t *sizes,
201 unsigned num_vbs)
202 {
203 #if GEN_GEN < 11
204 struct iris_context *ice = blorp_batch->blorp->driver_ctx;
205 struct iris_batch *batch = blorp_batch->driver_batch;
206 bool need_invalidate = false;
207
208 for (unsigned i = 0; i < num_vbs; i++) {
209 struct iris_bo *bo = addrs[i].buffer;
210 uint16_t high_bits = bo->gtt_offset >> 32u;
211
212 if (high_bits != ice->state.last_vbo_high_bits[i]) {
213 need_invalidate = true;
214 ice->state.last_vbo_high_bits[i] = high_bits;
215 }
216 }
217
218 if (need_invalidate) {
219 iris_emit_pipe_control_flush(batch,
220 "workaround: VF cache 32-bit key [blorp]",
221 PIPE_CONTROL_VF_CACHE_INVALIDATE |
222 PIPE_CONTROL_CS_STALL);
223 }
224 #endif
225 }
226
227 static struct blorp_address
blorp_get_workaround_address(struct blorp_batch * blorp_batch)228 blorp_get_workaround_address(struct blorp_batch *blorp_batch)
229 {
230 struct iris_batch *batch = blorp_batch->driver_batch;
231
232 return (struct blorp_address) {
233 .buffer = batch->screen->workaround_address.bo,
234 .offset = batch->screen->workaround_address.offset,
235 };
236 }
237
238 static void
blorp_flush_range(UNUSED struct blorp_batch * blorp_batch,UNUSED void * start,UNUSED size_t size)239 blorp_flush_range(UNUSED struct blorp_batch *blorp_batch,
240 UNUSED void *start,
241 UNUSED size_t size)
242 {
243 /* All allocated states come from the batch which we will flush before we
244 * submit it. There's nothing for us to do here.
245 */
246 }
247
248 static const struct gen_l3_config *
blorp_get_l3_config(struct blorp_batch * blorp_batch)249 blorp_get_l3_config(struct blorp_batch *blorp_batch)
250 {
251 struct iris_batch *batch = blorp_batch->driver_batch;
252 return batch->screen->l3_config_3d;
253 }
254
255 static void
iris_blorp_exec(struct blorp_batch * blorp_batch,const struct blorp_params * params)256 iris_blorp_exec(struct blorp_batch *blorp_batch,
257 const struct blorp_params *params)
258 {
259 struct iris_context *ice = blorp_batch->blorp->driver_ctx;
260 struct iris_batch *batch = blorp_batch->driver_batch;
261
262 #if GEN_GEN >= 11
263 /* The PIPE_CONTROL command description says:
264 *
265 * "Whenever a Binding Table Index (BTI) used by a Render Target Message
266 * points to a different RENDER_SURFACE_STATE, SW must issue a Render
267 * Target Cache Flush by enabling this bit. When render target flush
268 * is set due to new association of BTI, PS Scoreboard Stall bit must
269 * be set in this packet."
270 */
271 iris_emit_pipe_control_flush(batch,
272 "workaround: RT BTI change [blorp]",
273 PIPE_CONTROL_RENDER_TARGET_FLUSH |
274 PIPE_CONTROL_STALL_AT_SCOREBOARD);
275 #endif
276
277 /* Flush the render cache in cases where the same surface is reinterpreted
278 * with a differernt format, which blorp does for stencil and depth data
279 * among other things. Invalidation of sampler caches and flushing of any
280 * caches which had previously written the source surfaces should already
281 * have been handled by the caller.
282 */
283 if (params->dst.enabled) {
284 iris_cache_flush_for_render(batch, params->dst.addr.buffer,
285 params->dst.view.format,
286 params->dst.aux_usage);
287 }
288
289 iris_require_command_space(batch, 1400);
290
291 #if GEN_GEN == 8
292 genX(update_pma_fix)(ice, batch, false);
293 #endif
294
295 const unsigned scale = params->fast_clear_op ? UINT_MAX : 1;
296 if (ice->state.current_hash_scale != scale) {
297 genX(emit_hashing_mode)(ice, batch, params->x1 - params->x0,
298 params->y1 - params->y0, scale);
299 }
300
301 #if GEN_GEN >= 12
302 genX(invalidate_aux_map_state)(batch);
303 #endif
304
305 iris_handle_always_flush_cache(batch);
306
307 blorp_exec(blorp_batch, params);
308
309 iris_handle_always_flush_cache(batch);
310
311 /* We've smashed all state compared to what the normal 3D pipeline
312 * rendering tracks for GL.
313 */
314
315 uint64_t skip_bits = (IRIS_DIRTY_POLYGON_STIPPLE |
316 IRIS_DIRTY_SO_BUFFERS |
317 IRIS_DIRTY_SO_DECL_LIST |
318 IRIS_DIRTY_LINE_STIPPLE |
319 IRIS_ALL_DIRTY_FOR_COMPUTE |
320 IRIS_DIRTY_SCISSOR_RECT |
321 IRIS_DIRTY_VF |
322 IRIS_DIRTY_SF_CL_VIEWPORT);
323 uint64_t skip_stage_bits = (IRIS_ALL_STAGE_DIRTY_FOR_COMPUTE |
324 IRIS_STAGE_DIRTY_UNCOMPILED_VS |
325 IRIS_STAGE_DIRTY_UNCOMPILED_TCS |
326 IRIS_STAGE_DIRTY_UNCOMPILED_TES |
327 IRIS_STAGE_DIRTY_UNCOMPILED_GS |
328 IRIS_STAGE_DIRTY_UNCOMPILED_FS |
329 IRIS_STAGE_DIRTY_SAMPLER_STATES_VS |
330 IRIS_STAGE_DIRTY_SAMPLER_STATES_TCS |
331 IRIS_STAGE_DIRTY_SAMPLER_STATES_TES |
332 IRIS_STAGE_DIRTY_SAMPLER_STATES_GS);
333
334 if (!ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL]) {
335 /* BLORP disabled tessellation, that's fine for the next draw */
336 skip_stage_bits |= IRIS_STAGE_DIRTY_TCS |
337 IRIS_STAGE_DIRTY_TES |
338 IRIS_STAGE_DIRTY_CONSTANTS_TCS |
339 IRIS_STAGE_DIRTY_CONSTANTS_TES |
340 IRIS_STAGE_DIRTY_BINDINGS_TCS |
341 IRIS_STAGE_DIRTY_BINDINGS_TES;
342 }
343
344 if (!ice->shaders.uncompiled[MESA_SHADER_GEOMETRY]) {
345 /* BLORP disabled geometry shaders, that's fine for the next draw */
346 skip_stage_bits |= IRIS_STAGE_DIRTY_GS |
347 IRIS_STAGE_DIRTY_CONSTANTS_GS |
348 IRIS_STAGE_DIRTY_BINDINGS_GS;
349 }
350
351 /* we can skip flagging IRIS_DIRTY_DEPTH_BUFFER, if
352 * BLORP_BATCH_NO_EMIT_DEPTH_STENCIL is set.
353 */
354 if (blorp_batch->flags & BLORP_BATCH_NO_EMIT_DEPTH_STENCIL)
355 skip_bits |= IRIS_DIRTY_DEPTH_BUFFER;
356
357 if (!params->wm_prog_data)
358 skip_bits |= IRIS_DIRTY_BLEND_STATE | IRIS_DIRTY_PS_BLEND;
359
360 ice->state.dirty |= ~skip_bits;
361 ice->state.stage_dirty |= ~skip_stage_bits;
362
363 if (params->src.enabled)
364 iris_bo_bump_seqno(params->src.addr.buffer, batch->next_seqno,
365 IRIS_DOMAIN_OTHER_READ);
366 if (params->dst.enabled)
367 iris_bo_bump_seqno(params->dst.addr.buffer, batch->next_seqno,
368 IRIS_DOMAIN_RENDER_WRITE);
369 if (params->depth.enabled)
370 iris_bo_bump_seqno(params->depth.addr.buffer, batch->next_seqno,
371 IRIS_DOMAIN_DEPTH_WRITE);
372 if (params->stencil.enabled)
373 iris_bo_bump_seqno(params->stencil.addr.buffer, batch->next_seqno,
374 IRIS_DOMAIN_DEPTH_WRITE);
375 }
376
377 void
genX(init_blorp)378 genX(init_blorp)(struct iris_context *ice)
379 {
380 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
381
382 blorp_init(&ice->blorp, ice, &screen->isl_dev);
383 ice->blorp.compiler = screen->compiler;
384 ice->blorp.lookup_shader = iris_blorp_lookup_shader;
385 ice->blorp.upload_shader = iris_blorp_upload_shader;
386 ice->blorp.exec = iris_blorp_exec;
387 }
388