• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  Copyright (C) Intel Corp.  2006.  All Rights Reserved.
3  Intel funded Tungsten Graphics to
4  develop this 3D driver.
5 
6  Permission is hereby granted, free of charge, to any person obtaining
7  a copy of this software and associated documentation files (the
8  "Software"), to deal in the Software without restriction, including
9  without limitation the rights to use, copy, modify, merge, publish,
10  distribute, sublicense, and/or sell copies of the Software, and to
11  permit persons to whom the Software is furnished to do so, subject to
12  the following conditions:
13 
14  The above copyright notice and this permission notice (including the
15  next paragraph) shall be included in all copies or substantial
16  portions of the Software.
17 
18  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21  IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22  LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23  OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24  WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 
26  **********************************************************************/
27  /*
28   * Authors:
29   *   Keith Whitwell <keithw@vmware.com>
30   */
31 
32 
33 
34 #include "brw_batch.h"
35 #include "brw_fbo.h"
36 #include "brw_mipmap_tree.h"
37 
38 #include "brw_context.h"
39 #include "brw_state.h"
40 #include "brw_defines.h"
41 #include "compiler/brw_eu_defines.h"
42 
43 #include "main/framebuffer.h"
44 #include "main/fbobject.h"
45 #include "main/format_utils.h"
46 #include "main/glformats.h"
47 
48 /**
49  * Upload pointers to the per-stage state.
50  *
51  * The state pointers in this packet are all relative to the general state
52  * base address set by CMD_STATE_BASE_ADDRESS, which is 0.
53  */
54 static void
upload_pipelined_state_pointers(struct brw_context * brw)55 upload_pipelined_state_pointers(struct brw_context *brw)
56 {
57    const struct intel_device_info *devinfo = &brw->screen->devinfo;
58 
59    if (devinfo->ver == 5) {
60       /* Need to flush before changing clip max threads for errata. */
61       BEGIN_BATCH(1);
62       OUT_BATCH(MI_FLUSH);
63       ADVANCE_BATCH();
64    }
65 
66    BEGIN_BATCH(7);
67    OUT_BATCH(_3DSTATE_PIPELINED_POINTERS << 16 | (7 - 2));
68    OUT_RELOC(brw->batch.state.bo, 0, brw->vs.base.state_offset);
69    if (brw->ff_gs.prog_active)
70       OUT_RELOC(brw->batch.state.bo, 0, brw->ff_gs.state_offset | 1);
71    else
72       OUT_BATCH(0);
73    OUT_RELOC(brw->batch.state.bo, 0, brw->clip.state_offset | 1);
74    OUT_RELOC(brw->batch.state.bo, 0, brw->sf.state_offset);
75    OUT_RELOC(brw->batch.state.bo, 0, brw->wm.base.state_offset);
76    OUT_RELOC(brw->batch.state.bo, 0, brw->cc.state_offset);
77    ADVANCE_BATCH();
78 
79    brw->ctx.NewDriverState |= BRW_NEW_PSP;
80 }
81 
82 static void
upload_psp_urb_cbs(struct brw_context * brw)83 upload_psp_urb_cbs(struct brw_context *brw)
84 {
85    upload_pipelined_state_pointers(brw);
86    brw_upload_urb_fence(brw);
87    brw_upload_cs_urb_state(brw);
88 }
89 
90 const struct brw_tracked_state brw_psp_urb_cbs = {
91    .dirty = {
92       .mesa = 0,
93       .brw = BRW_NEW_BATCH |
94              BRW_NEW_BLORP |
95              BRW_NEW_FF_GS_PROG_DATA |
96              BRW_NEW_GFX4_UNIT_STATE |
97              BRW_NEW_STATE_BASE_ADDRESS |
98              BRW_NEW_URB_FENCE,
99    },
100    .emit = upload_psp_urb_cbs,
101 };
102 
103 uint32_t
brw_depthbuffer_format(struct brw_context * brw)104 brw_depthbuffer_format(struct brw_context *brw)
105 {
106    struct gl_context *ctx = &brw->ctx;
107    struct gl_framebuffer *fb = ctx->DrawBuffer;
108    struct brw_renderbuffer *drb = brw_get_renderbuffer(fb, BUFFER_DEPTH);
109    struct brw_renderbuffer *srb;
110 
111    if (!drb &&
112        (srb = brw_get_renderbuffer(fb, BUFFER_STENCIL)) &&
113        !srb->mt->stencil_mt &&
114        (brw_rb_format(srb) == MESA_FORMAT_Z24_UNORM_S8_UINT ||
115         brw_rb_format(srb) == MESA_FORMAT_Z32_FLOAT_S8X24_UINT)) {
116       drb = srb;
117    }
118 
119    if (!drb)
120       return BRW_DEPTHFORMAT_D32_FLOAT;
121 
122    return brw_depth_format(brw, drb->mt->format);
123 }
124 
125 static struct brw_mipmap_tree *
get_stencil_miptree(struct brw_renderbuffer * irb)126 get_stencil_miptree(struct brw_renderbuffer *irb)
127 {
128    if (!irb)
129       return NULL;
130    if (irb->mt->stencil_mt)
131       return irb->mt->stencil_mt;
132    return brw_renderbuffer_get_mt(irb);
133 }
134 
135 static bool
rebase_depth_stencil(struct brw_context * brw,struct brw_renderbuffer * irb,bool invalidate)136 rebase_depth_stencil(struct brw_context *brw, struct brw_renderbuffer *irb,
137                      bool invalidate)
138 {
139    const struct intel_device_info *devinfo = &brw->screen->devinfo;
140    struct gl_context *ctx = &brw->ctx;
141    uint32_t tile_mask_x = 0, tile_mask_y = 0;
142 
143    isl_get_tile_masks(irb->mt->surf.tiling, irb->mt->cpp,
144                       &tile_mask_x, &tile_mask_y);
145    assert(!brw_miptree_level_has_hiz(irb->mt, irb->mt_level));
146 
147    uint32_t tile_x = irb->draw_x & tile_mask_x;
148    uint32_t tile_y = irb->draw_y & tile_mask_y;
149 
150    /* According to the Sandy Bridge PRM, volume 2 part 1, pp326-327
151     * (3DSTATE_DEPTH_BUFFER dw5), in the documentation for "Depth
152     * Coordinate Offset X/Y":
153     *
154     *   "The 3 LSBs of both offsets must be zero to ensure correct
155     *   alignment"
156     */
157    bool rebase = tile_x & 7 || tile_y & 7;
158 
159    /* We didn't even have intra-tile offsets before g45. */
160    rebase |= (!devinfo->has_surface_tile_offset && (tile_x || tile_y));
161 
162    if (rebase) {
163       perf_debug("HW workaround: blitting depth level %d to a temporary "
164                  "to fix alignment (depth tile offset %d,%d)\n",
165                  irb->mt_level, tile_x, tile_y);
166       brw_renderbuffer_move_to_temp(brw, irb, invalidate);
167 
168       /* There is now only single slice miptree. */
169       brw->depthstencil.tile_x = 0;
170       brw->depthstencil.tile_y = 0;
171       brw->depthstencil.depth_offset = 0;
172       return true;
173    }
174 
175    /* While we just tried to get everything aligned, we may have failed to do
176     * so in the case of rendering to array or 3D textures, where nonzero faces
177     * will still have an offset post-rebase.  At least give an informative
178     * warning.
179     */
180    WARN_ONCE((tile_x & 7) || (tile_y & 7),
181              "Depth/stencil buffer needs alignment to 8-pixel boundaries.\n"
182              "Truncating offset (%u:%u), bad rendering may occur.\n",
183              tile_x, tile_y);
184    tile_x &= ~7;
185    tile_y &= ~7;
186 
187    brw->depthstencil.tile_x = tile_x;
188    brw->depthstencil.tile_y = tile_y;
189    brw->depthstencil.depth_offset = brw_miptree_get_aligned_offset(
190                                        irb->mt,
191                                        irb->draw_x & ~tile_mask_x,
192                                        irb->draw_y & ~tile_mask_y);
193 
194    return false;
195 }
196 
197 void
brw_workaround_depthstencil_alignment(struct brw_context * brw,GLbitfield clear_mask)198 brw_workaround_depthstencil_alignment(struct brw_context *brw,
199                                       GLbitfield clear_mask)
200 {
201    const struct intel_device_info *devinfo = &brw->screen->devinfo;
202    struct gl_context *ctx = &brw->ctx;
203    struct gl_framebuffer *fb = ctx->DrawBuffer;
204    struct brw_renderbuffer *depth_irb = brw_get_renderbuffer(fb, BUFFER_DEPTH);
205    struct brw_renderbuffer *stencil_irb = brw_get_renderbuffer(fb, BUFFER_STENCIL);
206    struct brw_mipmap_tree *depth_mt = NULL;
207    bool invalidate_depth = clear_mask & BUFFER_BIT_DEPTH;
208    bool invalidate_stencil = clear_mask & BUFFER_BIT_STENCIL;
209 
210    if (depth_irb)
211       depth_mt = depth_irb->mt;
212 
213    /* Initialize brw->depthstencil to 'nop' workaround state.
214     */
215    brw->depthstencil.tile_x = 0;
216    brw->depthstencil.tile_y = 0;
217    brw->depthstencil.depth_offset = 0;
218 
219    /* Gfx6+ doesn't require the workarounds, since we always program the
220     * surface state at the start of the whole surface.
221     */
222    if (devinfo->ver >= 6)
223       return;
224 
225    /* Check if depth buffer is in depth/stencil format.  If so, then it's only
226     * safe to invalidate it if we're also clearing stencil.
227     */
228    if (depth_irb && invalidate_depth &&
229       _mesa_get_format_base_format(depth_mt->format) == GL_DEPTH_STENCIL)
230       invalidate_depth = invalidate_stencil && stencil_irb;
231 
232    if (depth_irb) {
233       if (rebase_depth_stencil(brw, depth_irb, invalidate_depth)) {
234          /* In the case of stencil_irb being the same packed depth/stencil
235           * texture but not the same rb, make it point at our rebased mt, too.
236           */
237          if (stencil_irb &&
238              stencil_irb != depth_irb &&
239              stencil_irb->mt == depth_mt) {
240             brw_miptree_reference(&stencil_irb->mt, depth_irb->mt);
241             brw_renderbuffer_set_draw_offset(stencil_irb);
242          }
243       }
244 
245       if (stencil_irb) {
246          assert(stencil_irb->mt == depth_irb->mt);
247          assert(stencil_irb->mt_level == depth_irb->mt_level);
248          assert(stencil_irb->mt_layer == depth_irb->mt_layer);
249       }
250    }
251 
252    /* If there is no depth attachment, consider if stencil needs rebase. */
253    if (!depth_irb && stencil_irb)
254        rebase_depth_stencil(brw, stencil_irb, invalidate_stencil);
255 }
256 
257 static void
brw_emit_depth_stencil_hiz(struct brw_context * brw,struct brw_renderbuffer * depth_irb,struct brw_mipmap_tree * depth_mt,struct brw_renderbuffer * stencil_irb,struct brw_mipmap_tree * stencil_mt)258 brw_emit_depth_stencil_hiz(struct brw_context *brw,
259                            struct brw_renderbuffer *depth_irb,
260                            struct brw_mipmap_tree *depth_mt,
261                            struct brw_renderbuffer *stencil_irb,
262                            struct brw_mipmap_tree *stencil_mt)
263 {
264    uint32_t tile_x = brw->depthstencil.tile_x;
265    uint32_t tile_y = brw->depthstencil.tile_y;
266    uint32_t depth_surface_type = BRW_SURFACE_NULL;
267    uint32_t depthbuffer_format = BRW_DEPTHFORMAT_D32_FLOAT;
268    uint32_t depth_offset = 0;
269    uint32_t width = 1, height = 1;
270    bool tiled_surface = true;
271 
272    /* If there's a packed depth/stencil bound to stencil only, we need to
273     * emit the packed depth/stencil buffer packet.
274     */
275    if (!depth_irb && stencil_irb) {
276       depth_irb = stencil_irb;
277       depth_mt = stencil_mt;
278    }
279 
280    if (depth_irb && depth_mt) {
281       depthbuffer_format = brw_depthbuffer_format(brw);
282       depth_surface_type = BRW_SURFACE_2D;
283       depth_offset = brw->depthstencil.depth_offset;
284       width = depth_irb->Base.Base.Width;
285       height = depth_irb->Base.Base.Height;
286       tiled_surface = depth_mt->surf.tiling != ISL_TILING_LINEAR;
287    }
288 
289    const struct intel_device_info *devinfo = &brw->screen->devinfo;
290    const unsigned len = (devinfo->is_g4x || devinfo->ver == 5) ? 6 : 5;
291 
292    BEGIN_BATCH(len);
293    OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (len - 2));
294    OUT_BATCH((depth_mt ? depth_mt->surf.row_pitch_B - 1 : 0) |
295              (depthbuffer_format << 18) |
296              (BRW_TILEWALK_YMAJOR << 26) |
297              (tiled_surface << 27) |
298              (depth_surface_type << 29));
299 
300    if (depth_mt) {
301       OUT_RELOC(depth_mt->bo, RELOC_WRITE, depth_offset);
302    } else {
303       OUT_BATCH(0);
304    }
305 
306    OUT_BATCH(((width + tile_x - 1) << 6) |
307              ((height + tile_y - 1) << 19));
308    OUT_BATCH(0);
309 
310    if (devinfo->is_g4x || devinfo->ver >= 5)
311       OUT_BATCH(tile_x | (tile_y << 16));
312    else
313       assert(tile_x == 0 && tile_y == 0);
314 
315    if (devinfo->ver >= 6)
316       OUT_BATCH(0);
317 
318    ADVANCE_BATCH();
319 }
320 
321 void
brw_emit_depthbuffer(struct brw_context * brw)322 brw_emit_depthbuffer(struct brw_context *brw)
323 {
324    const struct intel_device_info *devinfo = &brw->screen->devinfo;
325    struct gl_context *ctx = &brw->ctx;
326    struct gl_framebuffer *fb = ctx->DrawBuffer;
327    /* _NEW_BUFFERS */
328    struct brw_renderbuffer *depth_irb = brw_get_renderbuffer(fb, BUFFER_DEPTH);
329    struct brw_renderbuffer *stencil_irb = brw_get_renderbuffer(fb, BUFFER_STENCIL);
330    struct brw_mipmap_tree *depth_mt = brw_renderbuffer_get_mt(depth_irb);
331    struct brw_mipmap_tree *stencil_mt = get_stencil_miptree(stencil_irb);
332 
333    if (depth_mt)
334       brw_cache_flush_for_depth(brw, depth_mt->bo);
335    if (stencil_mt)
336       brw_cache_flush_for_depth(brw, stencil_mt->bo);
337 
338    if (devinfo->ver < 6) {
339       brw_emit_depth_stencil_hiz(brw, depth_irb, depth_mt,
340                                  stencil_irb, stencil_mt);
341       return;
342    }
343 
344    /* Skip repeated NULL depth/stencil emits (think 2D rendering). */
345    if (!depth_mt && !stencil_mt && brw->no_depth_or_stencil) {
346       assert(brw->hw_ctx);
347       return;
348    }
349 
350    brw_emit_depth_stall_flushes(brw);
351 
352    const unsigned ds_dwords = brw->isl_dev.ds.size / 4;
353    brw_batch_begin(brw, ds_dwords);
354    uint32_t *ds_map = brw->batch.map_next;
355    const uint32_t ds_offset = (char *)ds_map - (char *)brw->batch.batch.map;
356 
357    struct isl_view view = {
358       /* Some nice defaults */
359       .base_level = 0,
360       .levels = 1,
361       .base_array_layer = 0,
362       .array_len = 1,
363       .swizzle = ISL_SWIZZLE_IDENTITY,
364    };
365 
366    struct isl_depth_stencil_hiz_emit_info info = {
367       .view = &view,
368    };
369 
370    if (depth_mt) {
371       view.usage |= ISL_SURF_USAGE_DEPTH_BIT;
372       info.depth_surf = &depth_mt->surf;
373 
374       info.depth_address =
375          brw_batch_reloc(&brw->batch,
376                          ds_offset + brw->isl_dev.ds.depth_offset,
377                          depth_mt->bo, depth_mt->offset, RELOC_WRITE);
378 
379       info.mocs = brw_get_bo_mocs(devinfo, depth_mt->bo);
380       view.base_level = depth_irb->mt_level - depth_irb->mt->first_level;
381       view.base_array_layer = depth_irb->mt_layer;
382       view.array_len = MAX2(depth_irb->layer_count, 1);
383       view.format = depth_mt->surf.format;
384 
385       info.hiz_usage = depth_mt->aux_usage;
386       if (!brw_renderbuffer_has_hiz(depth_irb)) {
387          /* Just because a miptree has ISL_AUX_USAGE_HIZ does not mean that
388           * all miplevels of that miptree are guaranteed to support HiZ.  See
389           * brw_miptree_level_enable_hiz for details.
390           */
391          info.hiz_usage = ISL_AUX_USAGE_NONE;
392       }
393 
394       if (info.hiz_usage == ISL_AUX_USAGE_HIZ) {
395          info.hiz_surf = &depth_mt->aux_buf->surf;
396 
397          uint64_t hiz_offset = 0;
398          if (devinfo->ver == 6) {
399             /* HiZ surfaces on Sandy Bridge technically don't support
400              * mip-mapping.  However, we can fake it by offsetting to the
401              * first slice of LOD0 in the HiZ surface.
402              */
403             isl_surf_get_image_offset_B_tile_sa(&depth_mt->aux_buf->surf,
404                                                 view.base_level, 0, 0,
405                                                 &hiz_offset, NULL, NULL);
406          }
407 
408          info.hiz_address =
409             brw_batch_reloc(&brw->batch,
410                             ds_offset + brw->isl_dev.ds.hiz_offset,
411                             depth_mt->aux_buf->bo,
412                             depth_mt->aux_buf->offset + hiz_offset,
413                             RELOC_WRITE);
414       }
415 
416       info.depth_clear_value = depth_mt->fast_clear_color.f32[0];
417    }
418 
419    if (stencil_mt) {
420       view.usage |= ISL_SURF_USAGE_STENCIL_BIT;
421       info.stencil_surf = &stencil_mt->surf;
422 
423       if (!depth_mt) {
424          info.mocs = brw_get_bo_mocs(devinfo, stencil_mt->bo);
425          view.base_level = stencil_irb->mt_level - stencil_irb->mt->first_level;
426          view.base_array_layer = stencil_irb->mt_layer;
427          view.array_len = MAX2(stencil_irb->layer_count, 1);
428          view.format = stencil_mt->surf.format;
429       }
430 
431       uint64_t stencil_offset = 0;
432       if (devinfo->ver == 6) {
433          /* Stencil surfaces on Sandy Bridge technically don't support
434           * mip-mapping.  However, we can fake it by offsetting to the
435           * first slice of LOD0 in the stencil surface.
436           */
437          isl_surf_get_image_offset_B_tile_sa(&stencil_mt->surf,
438                                              view.base_level, 0, 0,
439                                              &stencil_offset, NULL, NULL);
440       }
441 
442       info.stencil_address =
443          brw_batch_reloc(&brw->batch,
444                          ds_offset + brw->isl_dev.ds.stencil_offset,
445                          stencil_mt->bo,
446                          stencil_mt->offset + stencil_offset,
447                          RELOC_WRITE);
448    }
449 
450    isl_emit_depth_stencil_hiz_s(&brw->isl_dev, ds_map, &info);
451 
452    brw->batch.map_next += ds_dwords;
453    brw_batch_advance(brw);
454 
455    brw->no_depth_or_stencil = !depth_mt && !stencil_mt;
456 }
457 
458 const struct brw_tracked_state brw_depthbuffer = {
459    .dirty = {
460       .mesa = _NEW_BUFFERS,
461       .brw = BRW_NEW_AUX_STATE |
462              BRW_NEW_BATCH |
463              BRW_NEW_BLORP,
464    },
465    .emit = brw_emit_depthbuffer,
466 };
467 
468 void
brw_emit_select_pipeline(struct brw_context * brw,enum brw_pipeline pipeline)469 brw_emit_select_pipeline(struct brw_context *brw, enum brw_pipeline pipeline)
470 {
471    const struct intel_device_info *devinfo = &brw->screen->devinfo;
472    const bool is_965 = devinfo->ver == 4 && !devinfo->is_g4x;
473    const uint32_t _3DSTATE_PIPELINE_SELECT =
474       is_965 ? CMD_PIPELINE_SELECT_965 : CMD_PIPELINE_SELECT_GM45;
475 
476    if (devinfo->ver >= 8 && devinfo->ver < 10) {
477       /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
478        *
479        *   Software must clear the COLOR_CALC_STATE Valid field in
480        *   3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
481        *   with Pipeline Select set to GPGPU.
482        *
483        * The internal hardware docs recommend the same workaround for Gfx9
484        * hardware too.
485        */
486       if (pipeline == BRW_COMPUTE_PIPELINE) {
487          BEGIN_BATCH(2);
488          OUT_BATCH(_3DSTATE_CC_STATE_POINTERS << 16 | (2 - 2));
489          OUT_BATCH(0);
490          ADVANCE_BATCH();
491 
492          brw->ctx.NewDriverState |= BRW_NEW_CC_STATE;
493       }
494    }
495 
496    if (devinfo->ver == 9 && pipeline == BRW_RENDER_PIPELINE) {
497       /* We seem to have issues with geometry flickering when 3D and compute
498        * are combined in the same batch and this appears to fix it.
499        */
500       const uint32_t maxNumberofThreads =
501          devinfo->max_cs_threads * devinfo->subslice_total - 1;
502 
503       BEGIN_BATCH(9);
504       OUT_BATCH(MEDIA_VFE_STATE << 16 | (9 - 2));
505       OUT_BATCH(0);
506       OUT_BATCH(0);
507       OUT_BATCH(2 << 8 | maxNumberofThreads << 16);
508       OUT_BATCH(0);
509       OUT_BATCH(2 << 16);
510       OUT_BATCH(0);
511       OUT_BATCH(0);
512       OUT_BATCH(0);
513       ADVANCE_BATCH();
514    }
515 
516    if (devinfo->ver >= 6) {
517       /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
518        * PIPELINE_SELECT [DevBWR+]":
519        *
520        *   Project: DEVSNB+
521        *
522        *   Software must ensure all the write caches are flushed through a
523        *   stalling PIPE_CONTROL command followed by another PIPE_CONTROL
524        *   command to invalidate read only caches prior to programming
525        *   MI_PIPELINE_SELECT command to change the Pipeline Select Mode.
526        */
527       const unsigned dc_flush =
528          devinfo->ver >= 7 ? PIPE_CONTROL_DATA_CACHE_FLUSH : 0;
529 
530       brw_emit_pipe_control_flush(brw,
531                                   PIPE_CONTROL_RENDER_TARGET_FLUSH |
532                                   PIPE_CONTROL_DEPTH_CACHE_FLUSH |
533                                   dc_flush |
534                                   PIPE_CONTROL_CS_STALL);
535 
536       brw_emit_pipe_control_flush(brw,
537                                   PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
538                                   PIPE_CONTROL_CONST_CACHE_INVALIDATE |
539                                   PIPE_CONTROL_STATE_CACHE_INVALIDATE |
540                                   PIPE_CONTROL_INSTRUCTION_INVALIDATE);
541 
542    } else {
543       /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
544        * PIPELINE_SELECT [DevBWR+]":
545        *
546        *   Project: PRE-DEVSNB
547        *
548        *   Software must ensure the current pipeline is flushed via an
549        *   MI_FLUSH or PIPE_CONTROL prior to the execution of PIPELINE_SELECT.
550        */
551       BEGIN_BATCH(1);
552       OUT_BATCH(MI_FLUSH);
553       ADVANCE_BATCH();
554    }
555 
556    /* Select the pipeline */
557    BEGIN_BATCH(1);
558    OUT_BATCH(_3DSTATE_PIPELINE_SELECT << 16 |
559              (devinfo->ver >= 9 ? (3 << 8) : 0) |
560              (pipeline == BRW_COMPUTE_PIPELINE ? 2 : 0));
561    ADVANCE_BATCH();
562 
563    if (devinfo->verx10 == 70 &&
564        pipeline == BRW_RENDER_PIPELINE) {
565       /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
566        * PIPELINE_SELECT [DevBWR+]":
567        *
568        *   Project: DEVIVB, DEVHSW:GT3:A0
569        *
570        *   Software must send a pipe_control with a CS stall and a post sync
571        *   operation and then a dummy DRAW after every MI_SET_CONTEXT and
572        *   after any PIPELINE_SELECT that is enabling 3D mode.
573        */
574       gfx7_emit_cs_stall_flush(brw);
575 
576       BEGIN_BATCH(7);
577       OUT_BATCH(CMD_3D_PRIM << 16 | (7 - 2));
578       OUT_BATCH(_3DPRIM_POINTLIST);
579       OUT_BATCH(0);
580       OUT_BATCH(0);
581       OUT_BATCH(0);
582       OUT_BATCH(0);
583       OUT_BATCH(0);
584       ADVANCE_BATCH();
585    }
586 
587    if (devinfo->is_geminilake) {
588       /* Project: DevGLK
589        *
590        * "This chicken bit works around a hardware issue with barrier logic
591        *  encountered when switching between GPGPU and 3D pipelines.  To
592        *  workaround the issue, this mode bit should be set after a pipeline
593        *  is selected."
594        */
595       const unsigned barrier_mode =
596          pipeline == BRW_RENDER_PIPELINE ? GLK_SCEC_BARRIER_MODE_3D_HULL
597                                          : GLK_SCEC_BARRIER_MODE_GPGPU;
598       brw_load_register_imm32(brw, SLICE_COMMON_ECO_CHICKEN1,
599                               barrier_mode | GLK_SCEC_BARRIER_MODE_MASK);
600    }
601 }
602 
603 /**
604  * Update the pixel hashing modes that determine the balancing of PS threads
605  * across subslices and slices.
606  *
607  * \param width Width bound of the rendering area (already scaled down if \p
608  *              scale is greater than 1).
609  * \param height Height bound of the rendering area (already scaled down if \p
610  *               scale is greater than 1).
611  * \param scale The number of framebuffer samples that could potentially be
612  *              affected by an individual channel of the PS thread.  This is
613  *              typically one for single-sampled rendering, but for operations
614  *              like CCS resolves and fast clears a single PS invocation may
615  *              update a huge number of pixels, in which case a finer
616  *              balancing is desirable in order to maximally utilize the
617  *              bandwidth available.  UINT_MAX can be used as shorthand for
618  *              "finest hashing mode available".
619  */
620 void
brw_emit_hashing_mode(struct brw_context * brw,unsigned width,unsigned height,unsigned scale)621 brw_emit_hashing_mode(struct brw_context *brw, unsigned width,
622                       unsigned height, unsigned scale)
623 {
624    const struct intel_device_info *devinfo = &brw->screen->devinfo;
625 
626    if (devinfo->ver == 9) {
627       const uint32_t slice_hashing[] = {
628          /* Because all Gfx9 platforms with more than one slice require
629           * three-way subslice hashing, a single "normal" 16x16 slice hashing
630           * block is guaranteed to suffer from substantial imbalance, with one
631           * subslice receiving twice as much work as the other two in the
632           * slice.
633           *
634           * The performance impact of that would be particularly severe when
635           * three-way hashing is also in use for slice balancing (which is the
636           * case for all Gfx9 GT4 platforms), because one of the slices
637           * receives one every three 16x16 blocks in either direction, which
638           * is roughly the periodicity of the underlying subslice imbalance
639           * pattern ("roughly" because in reality the hardware's
640           * implementation of three-way hashing doesn't do exact modulo 3
641           * arithmetic, which somewhat decreases the magnitude of this effect
642           * in practice).  This leads to a systematic subslice imbalance
643           * within that slice regardless of the size of the primitive.  The
644           * 32x32 hashing mode guarantees that the subslice imbalance within a
645           * single slice hashing block is minimal, largely eliminating this
646           * effect.
647           */
648          GFX9_SLICE_HASHING_32x32,
649          /* Finest slice hashing mode available. */
650          GFX9_SLICE_HASHING_NORMAL
651       };
652       const uint32_t subslice_hashing[] = {
653          /* The 16x16 subslice hashing mode is used on non-LLC platforms to
654           * match the performance of previous Mesa versions.  16x16 has a
655           * slight cache locality benefit especially visible in the sampler L1
656           * cache efficiency of low-bandwidth platforms, but it comes at the
657           * cost of greater subslice imbalance for primitives of dimensions
658           * approximately intermediate between 16x4 and 16x16.
659           */
660          (devinfo->has_llc ? GFX9_SUBSLICE_HASHING_16x4 :
661                              GFX9_SUBSLICE_HASHING_16x16),
662          /* Finest subslice hashing mode available. */
663          GFX9_SUBSLICE_HASHING_8x4
664       };
665       /* Dimensions of the smallest hashing block of a given hashing mode.  If
666        * the rendering area is smaller than this there can't possibly be any
667        * benefit from switching to this mode, so we optimize out the
668        * transition.
669        */
670       const unsigned min_size[][2] = {
671          { 16, 4 },
672          { 8, 4 }
673       };
674       const unsigned idx = scale > 1;
675 
676       if (width > min_size[idx][0] || height > min_size[idx][1]) {
677          const uint32_t gt_mode =
678             (devinfo->num_slices == 1 ? 0 :
679              GFX9_SLICE_HASHING_MASK_BITS | slice_hashing[idx]) |
680             GFX9_SUBSLICE_HASHING_MASK_BITS | subslice_hashing[idx];
681 
682          brw_emit_pipe_control_flush(brw,
683                                      PIPE_CONTROL_STALL_AT_SCOREBOARD |
684                                      PIPE_CONTROL_CS_STALL);
685 
686          brw_load_register_imm32(brw, GFX7_GT_MODE, gt_mode);
687 
688          brw->current_hash_scale = scale;
689       }
690    }
691 }
692 
693 /**
694  * Misc invariant state packets
695  */
696 void
brw_upload_invariant_state(struct brw_context * brw)697 brw_upload_invariant_state(struct brw_context *brw)
698 {
699    const struct intel_device_info *devinfo = &brw->screen->devinfo;
700    const bool is_965 = devinfo->ver == 4 && !devinfo->is_g4x;
701 
702    brw_emit_select_pipeline(brw, BRW_RENDER_PIPELINE);
703    brw->last_pipeline = BRW_RENDER_PIPELINE;
704 
705    if (devinfo->ver >= 8) {
706       BEGIN_BATCH(3);
707       OUT_BATCH(CMD_STATE_SIP << 16 | (3 - 2));
708       OUT_BATCH(0);
709       OUT_BATCH(0);
710       ADVANCE_BATCH();
711    } else {
712       BEGIN_BATCH(2);
713       OUT_BATCH(CMD_STATE_SIP << 16 | (2 - 2));
714       OUT_BATCH(0);
715       ADVANCE_BATCH();
716    }
717 
718    /* Original Gfx4 doesn't have 3DSTATE_AA_LINE_PARAMETERS. */
719    if (!is_965) {
720       BEGIN_BATCH(3);
721       OUT_BATCH(_3DSTATE_AA_LINE_PARAMETERS << 16 | (3 - 2));
722       /* use legacy aa line coverage computation */
723       OUT_BATCH(0);
724       OUT_BATCH(0);
725       ADVANCE_BATCH();
726    }
727 }
728 
729 /**
730  * Define the base addresses which some state is referenced from.
731  *
732  * This allows us to avoid having to emit relocations for the objects,
733  * and is actually required for binding table pointers on gfx6.
734  *
735  * Surface state base address covers binding table pointers and
736  * surface state objects, but not the surfaces that the surface state
737  * objects point to.
738  */
739 void
brw_upload_state_base_address(struct brw_context * brw)740 brw_upload_state_base_address(struct brw_context *brw)
741 {
742    const struct intel_device_info *devinfo = &brw->screen->devinfo;
743 
744    if (brw->batch.state_base_address_emitted)
745       return;
746 
747    /* FINISHME: According to section 3.6.1 "STATE_BASE_ADDRESS" of
748     * vol1a of the G45 PRM, MI_FLUSH with the ISC invalidate should be
749     * programmed prior to STATE_BASE_ADDRESS.
750     *
751     * However, given that the instruction SBA (general state base
752     * address) on this chipset is always set to 0 across X and GL,
753     * maybe this isn't required for us in particular.
754     */
755 
756    if (devinfo->ver >= 6) {
757       const unsigned dc_flush =
758          devinfo->ver >= 7 ? PIPE_CONTROL_DATA_CACHE_FLUSH : 0;
759 
760       /* Emit a render target cache flush.
761        *
762        * This isn't documented anywhere in the PRM.  However, it seems to be
763        * necessary prior to changing the surface state base adress.  We've
764        * seen issues in Vulkan where we get GPU hangs when using multi-level
765        * command buffers which clear depth, reset state base address, and then
766        * go render stuff.
767        *
768        * Normally, in GL, we would trust the kernel to do sufficient stalls
769        * and flushes prior to executing our batch.  However, it doesn't seem
770        * as if the kernel's flushing is always sufficient and we don't want to
771        * rely on it.
772        *
773        * We make this an end-of-pipe sync instead of a normal flush because we
774        * do not know the current status of the GPU.  On Haswell at least,
775        * having a fast-clear operation in flight at the same time as a normal
776        * rendering operation can cause hangs.  Since the kernel's flushing is
777        * insufficient, we need to ensure that any rendering operations from
778        * other processes are definitely complete before we try to do our own
779        * rendering.  It's a bit of a big hammer but it appears to work.
780        */
781       brw_emit_end_of_pipe_sync(brw,
782                                 PIPE_CONTROL_RENDER_TARGET_FLUSH |
783                                 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
784                                 dc_flush);
785    }
786 
787    if (devinfo->ver >= 8) {
788       /* STATE_BASE_ADDRESS has issues with 48-bit address spaces.  If the
789        * address + size as seen by STATE_BASE_ADDRESS overflows 48 bits,
790        * the GPU appears to treat all accesses to the buffer as being out
791        * of bounds and returns zero.  To work around this, we pin all SBAs
792        * to the bottom 4GB.
793        */
794       uint32_t mocs_wb = devinfo->ver >= 9 ? SKL_MOCS_WB : BDW_MOCS_WB;
795       int pkt_len = devinfo->ver >= 10 ? 22 : (devinfo->ver >= 9 ? 19 : 16);
796 
797       BEGIN_BATCH(pkt_len);
798       OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (pkt_len - 2));
799       /* General state base address: stateless DP read/write requests */
800       OUT_BATCH(mocs_wb << 4 | 1);
801       OUT_BATCH(0);
802       OUT_BATCH(mocs_wb << 16);
803       /* Surface state base address: */
804       OUT_RELOC64(brw->batch.state.bo, RELOC_32BIT, mocs_wb << 4 | 1);
805       /* Dynamic state base address: */
806       OUT_RELOC64(brw->batch.state.bo, RELOC_32BIT, mocs_wb << 4 | 1);
807       /* Indirect object base address: MEDIA_OBJECT data */
808       OUT_BATCH(mocs_wb << 4 | 1);
809       OUT_BATCH(0);
810       /* Instruction base address: shader kernels (incl. SIP) */
811       OUT_RELOC64(brw->cache.bo, RELOC_32BIT, mocs_wb << 4 | 1);
812       /* General state buffer size */
813       OUT_BATCH(0xfffff001);
814       /* Dynamic state buffer size */
815       OUT_BATCH(ALIGN(MAX_STATE_SIZE, 4096) | 1);
816       /* Indirect object upper bound */
817       OUT_BATCH(0xfffff001);
818       /* Instruction access upper bound */
819       OUT_BATCH(ALIGN(brw->cache.bo->size, 4096) | 1);
820       if (devinfo->ver >= 9) {
821          OUT_BATCH(1);
822          OUT_BATCH(0);
823          OUT_BATCH(0);
824       }
825       if (devinfo->ver >= 10) {
826          OUT_BATCH(1);
827          OUT_BATCH(0);
828          OUT_BATCH(0);
829       }
830       ADVANCE_BATCH();
831    } else if (devinfo->ver >= 6) {
832       uint8_t mocs = devinfo->ver == 7 ? GFX7_MOCS_L3 : 0;
833 
834        BEGIN_BATCH(10);
835        OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (10 - 2));
836        OUT_BATCH(mocs << 8 | /* General State Memory Object Control State */
837                  mocs << 4 | /* Stateless Data Port Access Memory Object Control State */
838                  1); /* General State Base Address Modify Enable */
839        /* Surface state base address:
840         * BINDING_TABLE_STATE
841         * SURFACE_STATE
842         */
843        OUT_RELOC(brw->batch.state.bo, 0, 1);
844         /* Dynamic state base address:
845          * SAMPLER_STATE
846          * SAMPLER_BORDER_COLOR_STATE
847          * CLIP, SF, WM/CC viewport state
848          * COLOR_CALC_STATE
849          * DEPTH_STENCIL_STATE
850          * BLEND_STATE
851          * Push constants (when INSTPM: CONSTANT_BUFFER Address Offset
852          * Disable is clear, which we rely on)
853          */
854        OUT_RELOC(brw->batch.state.bo, 0, 1);
855 
856        OUT_BATCH(1); /* Indirect object base address: MEDIA_OBJECT data */
857 
858        /* Instruction base address: shader kernels (incl. SIP) */
859        OUT_RELOC(brw->cache.bo, 0, 1);
860 
861        OUT_BATCH(1); /* General state upper bound */
862        /* Dynamic state upper bound.  Although the documentation says that
863         * programming it to zero will cause it to be ignored, that is a lie.
864         * If this isn't programmed to a real bound, the sampler border color
865         * pointer is rejected, causing border color to mysteriously fail.
866         */
867        OUT_BATCH(0xfffff001);
868        OUT_BATCH(1); /* Indirect object upper bound */
869        OUT_BATCH(1); /* Instruction access upper bound */
870        ADVANCE_BATCH();
871    } else if (devinfo->ver == 5) {
872        BEGIN_BATCH(8);
873        OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (8 - 2));
874        OUT_BATCH(1); /* General state base address */
875        OUT_RELOC(brw->batch.state.bo, 0, 1); /* Surface state base address */
876        OUT_BATCH(1); /* Indirect object base address */
877        OUT_RELOC(brw->cache.bo, 0, 1); /* Instruction base address */
878        OUT_BATCH(0xfffff001); /* General state upper bound */
879        OUT_BATCH(1); /* Indirect object upper bound */
880        OUT_BATCH(1); /* Instruction access upper bound */
881        ADVANCE_BATCH();
882    } else {
883        BEGIN_BATCH(6);
884        OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
885        OUT_BATCH(1); /* General state base address */
886        OUT_RELOC(brw->batch.state.bo, 0, 1); /* Surface state base address */
887        OUT_BATCH(1); /* Indirect object base address */
888        OUT_BATCH(1); /* General state upper bound */
889        OUT_BATCH(1); /* Indirect object upper bound */
890        ADVANCE_BATCH();
891    }
892 
893    if (devinfo->ver >= 6) {
894       brw_emit_pipe_control_flush(brw,
895                                   PIPE_CONTROL_INSTRUCTION_INVALIDATE |
896                                   PIPE_CONTROL_STATE_CACHE_INVALIDATE |
897                                   PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
898    }
899 
900    /* According to section 3.6.1 of VOL1 of the 965 PRM,
901     * STATE_BASE_ADDRESS updates require a reissue of:
902     *
903     * 3DSTATE_PIPELINE_POINTERS
904     * 3DSTATE_BINDING_TABLE_POINTERS
905     * MEDIA_STATE_POINTERS
906     *
907     * and this continues through Ironlake.  The Sandy Bridge PRM, vol
908     * 1 part 1 says that the folowing packets must be reissued:
909     *
910     * 3DSTATE_CC_POINTERS
911     * 3DSTATE_BINDING_TABLE_POINTERS
912     * 3DSTATE_SAMPLER_STATE_POINTERS
913     * 3DSTATE_VIEWPORT_STATE_POINTERS
914     * MEDIA_STATE_POINTERS
915     *
916     * Those are always reissued following SBA updates anyway (new
917     * batch time), except in the case of the program cache BO
918     * changing.  Having a separate state flag makes the sequence more
919     * obvious.
920     */
921 
922    brw->ctx.NewDriverState |= BRW_NEW_STATE_BASE_ADDRESS;
923    brw->batch.state_base_address_emitted = true;
924 }
925