• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2011 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <assert.h>
25 
26 #include "brw_batch.h"
27 #include "brw_mipmap_tree.h"
28 #include "brw_fbo.h"
29 
30 #include "brw_context.h"
31 #include "brw_state.h"
32 
33 #include "blorp/blorp_genX_exec.h"
34 
35 #if GFX_VER <= 5
36 #include "gfx4_blorp_exec.h"
37 #endif
38 
39 #include "brw_blorp.h"
40 
blorp_measure_start(struct blorp_batch * batch,const struct blorp_params * params)41 static void blorp_measure_start(struct blorp_batch *batch,
42                                 const struct blorp_params *params) { }
43 
44 static void *
blorp_emit_dwords(struct blorp_batch * batch,unsigned n)45 blorp_emit_dwords(struct blorp_batch *batch, unsigned n)
46 {
47    assert(batch->blorp->driver_ctx == batch->driver_batch);
48    struct brw_context *brw = batch->driver_batch;
49 
50    brw_batch_begin(brw, n);
51    uint32_t *map = brw->batch.map_next;
52    brw->batch.map_next += n;
53    brw_batch_advance(brw);
54    return map;
55 }
56 
57 static uint64_t
blorp_emit_reloc(struct blorp_batch * batch,void * location,struct blorp_address address,uint32_t delta)58 blorp_emit_reloc(struct blorp_batch *batch,
59                  void *location, struct blorp_address address, uint32_t delta)
60 {
61    assert(batch->blorp->driver_ctx == batch->driver_batch);
62    struct brw_context *brw = batch->driver_batch;
63    uint32_t offset;
64 
65    if (GFX_VER < 6 && brw_ptr_in_state_buffer(&brw->batch, location)) {
66       offset = (char *)location - (char *)brw->batch.state.map;
67       return brw_state_reloc(&brw->batch, offset,
68                              address.buffer, address.offset + delta,
69                              address.reloc_flags);
70    }
71 
72    assert(!brw_ptr_in_state_buffer(&brw->batch, location));
73 
74    offset = (char *)location - (char *)brw->batch.batch.map;
75    return brw_batch_reloc(&brw->batch, offset,
76                           address.buffer, address.offset + delta,
77                           address.reloc_flags);
78 }
79 
80 static void
blorp_surface_reloc(struct blorp_batch * batch,uint32_t ss_offset,struct blorp_address address,uint32_t delta)81 blorp_surface_reloc(struct blorp_batch *batch, uint32_t ss_offset,
82                     struct blorp_address address, uint32_t delta)
83 {
84    assert(batch->blorp->driver_ctx == batch->driver_batch);
85    struct brw_context *brw = batch->driver_batch;
86    struct brw_bo *bo = address.buffer;
87 
88    uint64_t reloc_val =
89       brw_state_reloc(&brw->batch, ss_offset, bo, address.offset + delta,
90                       address.reloc_flags);
91 
92    void *reloc_ptr = (void *)brw->batch.state.map + ss_offset;
93 #if GFX_VER >= 8
94    *(uint64_t *)reloc_ptr = reloc_val;
95 #else
96    *(uint32_t *)reloc_ptr = reloc_val;
97 #endif
98 }
99 
100 static uint64_t
blorp_get_surface_address(UNUSED struct blorp_batch * blorp_batch,UNUSED struct blorp_address address)101 blorp_get_surface_address(UNUSED struct blorp_batch *blorp_batch,
102                           UNUSED struct blorp_address address)
103 {
104    /* We'll let blorp_surface_reloc write the address. */
105    return 0ull;
106 }
107 
108 #if GFX_VER >= 7 && GFX_VER < 10
109 static struct blorp_address
blorp_get_surface_base_address(struct blorp_batch * batch)110 blorp_get_surface_base_address(struct blorp_batch *batch)
111 {
112    assert(batch->blorp->driver_ctx == batch->driver_batch);
113    struct brw_context *brw = batch->driver_batch;
114    return (struct blorp_address) {
115       .buffer = brw->batch.state.bo,
116       .offset = 0,
117    };
118 }
119 #endif
120 
121 static void *
blorp_alloc_dynamic_state(struct blorp_batch * batch,uint32_t size,uint32_t alignment,uint32_t * offset)122 blorp_alloc_dynamic_state(struct blorp_batch *batch,
123                           uint32_t size,
124                           uint32_t alignment,
125                           uint32_t *offset)
126 {
127    assert(batch->blorp->driver_ctx == batch->driver_batch);
128    struct brw_context *brw = batch->driver_batch;
129 
130    return brw_state_batch(brw, size, alignment, offset);
131 }
132 
133 UNUSED static void *
blorp_alloc_general_state(struct blorp_batch * blorp_batch,uint32_t size,uint32_t alignment,uint32_t * offset)134 blorp_alloc_general_state(struct blorp_batch *blorp_batch,
135                           uint32_t size,
136                           uint32_t alignment,
137                           uint32_t *offset)
138 {
139    /* Use dynamic state range for general state on i965. */
140    return blorp_alloc_dynamic_state(blorp_batch, size, alignment, offset);
141 }
142 
143 static void
blorp_alloc_binding_table(struct blorp_batch * batch,unsigned num_entries,unsigned state_size,unsigned state_alignment,uint32_t * bt_offset,uint32_t * surface_offsets,void ** surface_maps)144 blorp_alloc_binding_table(struct blorp_batch *batch, unsigned num_entries,
145                           unsigned state_size, unsigned state_alignment,
146                           uint32_t *bt_offset, uint32_t *surface_offsets,
147                           void **surface_maps)
148 {
149    assert(batch->blorp->driver_ctx == batch->driver_batch);
150    struct brw_context *brw = batch->driver_batch;
151 
152    uint32_t *bt_map = brw_state_batch(brw,
153                                       num_entries * sizeof(uint32_t), 32,
154                                       bt_offset);
155 
156    for (unsigned i = 0; i < num_entries; i++) {
157       surface_maps[i] = brw_state_batch(brw,
158                                         state_size, state_alignment,
159                                         &(surface_offsets)[i]);
160       bt_map[i] = surface_offsets[i];
161    }
162 }
163 
164 static void *
blorp_alloc_vertex_buffer(struct blorp_batch * batch,uint32_t size,struct blorp_address * addr)165 blorp_alloc_vertex_buffer(struct blorp_batch *batch, uint32_t size,
166                           struct blorp_address *addr)
167 {
168    assert(batch->blorp->driver_ctx == batch->driver_batch);
169    struct brw_context *brw = batch->driver_batch;
170 
171    /* From the Skylake PRM, 3DSTATE_VERTEX_BUFFERS:
172     *
173     *    "The VF cache needs to be invalidated before binding and then using
174     *    Vertex Buffers that overlap with any previously bound Vertex Buffer
175     *    (at a 64B granularity) since the last invalidation.  A VF cache
176     *    invalidate is performed by setting the "VF Cache Invalidation Enable"
177     *    bit in PIPE_CONTROL."
178     *
179     * This restriction first appears in the Skylake PRM but the internal docs
180     * also list it as being an issue on Broadwell.  In order to avoid this
181     * problem, we align all vertex buffer allocations to 64 bytes.
182     */
183    uint32_t offset;
184    void *data = brw_state_batch(brw, size, 64, &offset);
185 
186    *addr = (struct blorp_address) {
187       .buffer = brw->batch.state.bo,
188       .offset = offset,
189 
190       /* The VF cache designers apparently cut corners, and made the cache
191        * only consider the bottom 32 bits of memory addresses.  If you happen
192        * to have two vertex buffers which get placed exactly 4 GiB apart and
193        * use them in back-to-back draw calls, you can get collisions.  To work
194        * around this problem, we restrict vertex buffers to the low 32 bits of
195        * the address space.
196        */
197       .reloc_flags = RELOC_32BIT,
198 
199 #if GFX_VER == 11
200       .mocs = ICL_MOCS_WB,
201 #elif GFX_VER == 10
202       .mocs = CNL_MOCS_WB,
203 #elif GFX_VER == 9
204       .mocs = SKL_MOCS_WB,
205 #elif GFX_VER == 8
206       .mocs = BDW_MOCS_WB,
207 #elif GFX_VER == 7
208       .mocs = GFX7_MOCS_L3,
209 #elif GFX_VER > 6
210 #error "Missing MOCS setting!"
211 #endif
212    };
213 
214    return data;
215 }
216 
217 /**
218  * See vf_invalidate_for_vb_48b_transitions in genX_state_upload.c.
219  */
220 static void
blorp_vf_invalidate_for_vb_48b_transitions(UNUSED struct blorp_batch * batch,UNUSED const struct blorp_address * addrs,UNUSED uint32_t * sizes,UNUSED unsigned num_vbs)221 blorp_vf_invalidate_for_vb_48b_transitions(UNUSED struct blorp_batch *batch,
222                                            UNUSED const struct blorp_address *addrs,
223                                            UNUSED uint32_t *sizes,
224                                            UNUSED unsigned num_vbs)
225 {
226 #if GFX_VER >= 8 && GFX_VER < 11
227    struct brw_context *brw = batch->driver_batch;
228    bool need_invalidate = false;
229 
230    for (unsigned i = 0; i < num_vbs; i++) {
231       struct brw_bo *bo = addrs[i].buffer;
232       uint16_t high_bits =
233          bo && (bo->kflags & EXEC_OBJECT_PINNED) ? bo->gtt_offset >> 32u : 0;
234 
235       if (high_bits != brw->vb.last_bo_high_bits[i]) {
236          need_invalidate = true;
237          brw->vb.last_bo_high_bits[i] = high_bits;
238       }
239    }
240 
241    if (need_invalidate) {
242       brw_emit_pipe_control_flush(brw, PIPE_CONTROL_VF_CACHE_INVALIDATE | PIPE_CONTROL_CS_STALL);
243    }
244 #endif
245 }
246 
247 UNUSED static struct blorp_address
blorp_get_workaround_address(struct blorp_batch * batch)248 blorp_get_workaround_address(struct blorp_batch *batch)
249 {
250    assert(batch->blorp->driver_ctx == batch->driver_batch);
251    struct brw_context *brw = batch->driver_batch;
252 
253    return (struct blorp_address) {
254       .buffer = brw->workaround_bo,
255       .offset = brw->workaround_bo_offset,
256    };
257 }
258 
259 static void
blorp_flush_range(UNUSED struct blorp_batch * batch,UNUSED void * start,UNUSED size_t size)260 blorp_flush_range(UNUSED struct blorp_batch *batch, UNUSED void *start,
261                   UNUSED size_t size)
262 {
263    /* All allocated states come from the batch which we will flush before we
264     * submit it.  There's nothing for us to do here.
265     */
266 }
267 
268 #if GFX_VER >= 7
269 static const struct intel_l3_config *
blorp_get_l3_config(struct blorp_batch * batch)270 blorp_get_l3_config(struct blorp_batch *batch)
271 {
272    assert(batch->blorp->driver_ctx == batch->driver_batch);
273    struct brw_context *brw = batch->driver_batch;
274 
275    return brw->l3.config;
276 }
277 #else /* GFX_VER < 7 */
278 static void
blorp_emit_urb_config(struct blorp_batch * batch,unsigned vs_entry_size,UNUSED unsigned sf_entry_size)279 blorp_emit_urb_config(struct blorp_batch *batch,
280                       unsigned vs_entry_size,
281                       UNUSED unsigned sf_entry_size)
282 {
283    assert(batch->blorp->driver_ctx == batch->driver_batch);
284    struct brw_context *brw = batch->driver_batch;
285 
286 #if GFX_VER == 6
287    gfx6_upload_urb(brw, vs_entry_size, false, 0);
288 #else
289    /* We calculate it now and emit later. */
290    brw_calculate_urb_fence(brw, 0, vs_entry_size, sf_entry_size);
291 #endif
292 }
293 #endif
294 
295 void
genX(blorp_exec)296 genX(blorp_exec)(struct blorp_batch *batch,
297                  const struct blorp_params *params)
298 {
299    assert(batch->blorp->driver_ctx == batch->driver_batch);
300    struct brw_context *brw = batch->driver_batch;
301    struct gl_context *ctx = &brw->ctx;
302    bool check_aperture_failed_once = false;
303 
304 #if GFX_VER >= 11
305    /* The PIPE_CONTROL command description says:
306     *
307     * "Whenever a Binding Table Index (BTI) used by a Render Taget Message
308     *  points to a different RENDER_SURFACE_STATE, SW must issue a Render
309     *  Target Cache Flush by enabling this bit. When render target flush
310     *  is set due to new association of BTI, PS Scoreboard Stall bit must
311     *  be set in this packet."
312    */
313    brw_emit_pipe_control_flush(brw,
314                                PIPE_CONTROL_RENDER_TARGET_FLUSH |
315                                PIPE_CONTROL_STALL_AT_SCOREBOARD);
316 #endif
317 
318    /* Flush the sampler and render caches.  We definitely need to flush the
319     * sampler cache so that we get updated contents from the render cache for
320     * the glBlitFramebuffer() source.  Also, we are sometimes warned in the
321     * docs to flush the cache between reinterpretations of the same surface
322     * data with different formats, which blorp does for stencil and depth
323     * data.
324     */
325    if (params->src.enabled)
326       brw_cache_flush_for_read(brw, params->src.addr.buffer);
327    if (params->dst.enabled) {
328       brw_cache_flush_for_render(brw, params->dst.addr.buffer,
329                                  params->dst.view.format,
330                                  params->dst.aux_usage);
331    }
332    if (params->depth.enabled)
333       brw_cache_flush_for_depth(brw, params->depth.addr.buffer);
334    if (params->stencil.enabled)
335       brw_cache_flush_for_depth(brw, params->stencil.addr.buffer);
336 
337    brw_select_pipeline(brw, BRW_RENDER_PIPELINE);
338    brw_emit_l3_state(brw);
339 
340 retry:
341    brw_batch_require_space(brw, 1400);
342    brw_require_statebuffer_space(brw, 600);
343    brw_batch_save_state(brw);
344    check_aperture_failed_once |= brw_batch_saved_state_is_empty(brw);
345    brw->batch.no_wrap = true;
346 
347 #if GFX_VER == 6
348    /* Emit workaround flushes when we switch from drawing to blorping. */
349    brw_emit_post_sync_nonzero_flush(brw);
350 #endif
351 
352    brw_upload_state_base_address(brw);
353 
354 #if GFX_VER >= 8
355    gfx7_l3_state.emit(brw);
356 #endif
357 
358 #if GFX_VER >= 6
359    brw_emit_depth_stall_flushes(brw);
360 #endif
361 
362 #if GFX_VER == 8
363    gfx8_write_pma_stall_bits(brw, 0);
364 #endif
365 
366    const unsigned scale = params->fast_clear_op ? UINT_MAX : 1;
367    if (brw->current_hash_scale != scale) {
368       brw_emit_hashing_mode(brw, params->x1 - params->x0,
369                             params->y1 - params->y0, scale);
370    }
371 
372    blorp_emit(batch, GENX(3DSTATE_DRAWING_RECTANGLE), rect) {
373       rect.ClippedDrawingRectangleXMax = MAX2(params->x1, params->x0) - 1;
374       rect.ClippedDrawingRectangleYMax = MAX2(params->y1, params->y0) - 1;
375    }
376 
377    blorp_exec(batch, params);
378 
379    brw->batch.no_wrap = false;
380 
381    /* Check if the blorp op we just did would make our batch likely to fail to
382     * map all the BOs into the GPU at batch exec time later.  If so, flush the
383     * batch and try again with nothing else in the batch.
384     */
385    if (!brw_batch_has_aperture_space(brw, 0)) {
386       if (!check_aperture_failed_once) {
387          check_aperture_failed_once = true;
388          brw_batch_reset_to_saved(brw);
389          brw_batch_flush(brw);
390          goto retry;
391       } else {
392          int ret = brw_batch_flush(brw);
393          WARN_ONCE(ret == -ENOSPC,
394                    "i965: blorp emit exceeded available aperture space\n");
395       }
396    }
397 
398    if (unlikely(brw->always_flush_batch))
399       brw_batch_flush(brw);
400 
401    /* We've smashed all state compared to what the normal 3D pipeline
402     * rendering tracks for GL.
403     */
404    brw->ctx.NewDriverState |= BRW_NEW_BLORP;
405    brw->no_depth_or_stencil = !params->depth.enabled &&
406                               !params->stencil.enabled;
407    brw->ib.index_size = -1;
408    brw->urb.vsize = 0;
409    brw->urb.gs_present = false;
410    brw->urb.gsize = 0;
411    brw->urb.tess_present = false;
412    brw->urb.hsize = 0;
413    brw->urb.dsize = 0;
414 
415    if (params->dst.enabled) {
416       brw_render_cache_add_bo(brw, params->dst.addr.buffer,
417                               params->dst.view.format,
418                               params->dst.aux_usage);
419    }
420    if (params->depth.enabled)
421       brw_depth_cache_add_bo(brw, params->depth.addr.buffer);
422    if (params->stencil.enabled)
423       brw_depth_cache_add_bo(brw, params->stencil.addr.buffer);
424 }
425