• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2011 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "intel_batchbuffer.h"
25 #include "intel_mipmap_tree.h"
26 #include "intel_fbo.h"
27 #include "brw_context.h"
28 #include "brw_state.h"
29 #include "brw_defines.h"
30 #include "compiler/brw_eu_defines.h"
31 #include "brw_wm.h"
32 #include "main/framebuffer.h"
33 
34 /**
35  * Should we set the PMA FIX ENABLE bit?
36  *
37  * To avoid unnecessary depth related stalls, we need to set this bit.
38  * However, there is a very complicated formula which governs when it
39  * is legal to do so.  This function computes that.
40  *
41  * See the documenation for the CACHE_MODE_1 register, bit 11.
42  */
43 static bool
pma_fix_enable(const struct brw_context * brw)44 pma_fix_enable(const struct brw_context *brw)
45 {
46    const struct gl_context *ctx = &brw->ctx;
47    /* BRW_NEW_FS_PROG_DATA */
48    const struct brw_wm_prog_data *wm_prog_data =
49       brw_wm_prog_data(brw->wm.base.prog_data);
50    /* _NEW_BUFFERS */
51    struct intel_renderbuffer *depth_irb =
52       intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
53 
54    /* 3DSTATE_WM::ForceThreadDispatch is never used. */
55    const bool wm_force_thread_dispatch = false;
56 
57    /* 3DSTATE_RASTER::ForceSampleCount is never used. */
58    const bool raster_force_sample_count_nonzero = false;
59 
60    /* _NEW_BUFFERS:
61     * 3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL &&
62     * 3DSTATE_DEPTH_BUFFER::HIZ Enable
63     */
64    const bool hiz_enabled = depth_irb && intel_renderbuffer_has_hiz(depth_irb);
65 
66    /* 3DSTATE_WM::Early Depth/Stencil Control != EDSC_PREPS (2). */
67    const bool edsc_not_preps = !wm_prog_data->early_fragment_tests;
68 
69    /* 3DSTATE_PS_EXTRA::PixelShaderValid is always true. */
70    const bool pixel_shader_valid = true;
71 
72    /* !(3DSTATE_WM_HZ_OP::DepthBufferClear ||
73     *   3DSTATE_WM_HZ_OP::DepthBufferResolve ||
74     *   3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable ||
75     *   3DSTATE_WM_HZ_OP::StencilBufferClear)
76     *
77     * HiZ operations are done outside of the normal state upload, so they're
78     * definitely not happening now.
79     */
80    const bool in_hiz_op = false;
81 
82    /* _NEW_DEPTH:
83     * DEPTH_STENCIL_STATE::DepthTestEnable
84     */
85    const bool depth_test_enabled = depth_irb && ctx->Depth.Test;
86 
87    /* _NEW_DEPTH:
88     * 3DSTATE_WM_DEPTH_STENCIL::DepthWriteEnable &&
89     * 3DSTATE_DEPTH_BUFFER::DEPTH_WRITE_ENABLE.
90     */
91    const bool depth_writes_enabled = brw_depth_writes_enabled(brw);
92 
93    /* _NEW_STENCIL:
94     * !DEPTH_STENCIL_STATE::Stencil Buffer Write Enable ||
95     * !3DSTATE_DEPTH_BUFFER::Stencil Buffer Enable ||
96     * !3DSTATE_STENCIL_BUFFER::Stencil Buffer Enable
97     */
98    const bool stencil_writes_enabled = brw->stencil_write_enabled;
99 
100    /* 3DSTATE_PS_EXTRA::Pixel Shader Computed Depth Mode != PSCDEPTH_OFF */
101    const bool ps_computes_depth =
102       wm_prog_data->computed_depth_mode != BRW_PSCDEPTH_OFF;
103 
104    /* BRW_NEW_FS_PROG_DATA:     3DSTATE_PS_EXTRA::PixelShaderKillsPixels
105     * BRW_NEW_FS_PROG_DATA:     3DSTATE_PS_EXTRA::oMask Present to RenderTarget
106     * _NEW_MULTISAMPLE:         3DSTATE_PS_BLEND::AlphaToCoverageEnable
107     * _NEW_COLOR:               3DSTATE_PS_BLEND::AlphaTestEnable
108     * _NEW_BUFFERS:             3DSTATE_PS_BLEND::AlphaTestEnable
109     *                           3DSTATE_PS_BLEND::AlphaToCoverageEnable
110     *
111     * 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable is always false.
112     * 3DSTATE_WM::ForceKillPix != ForceOff is always true.
113     */
114    const bool kill_pixel =
115       wm_prog_data->uses_kill ||
116       wm_prog_data->uses_omask ||
117       _mesa_is_alpha_test_enabled(ctx) ||
118       _mesa_is_alpha_to_coverage_enabled(ctx);
119 
120    /* The big formula in CACHE_MODE_1::NP PMA FIX ENABLE. */
121    return !wm_force_thread_dispatch &&
122           !raster_force_sample_count_nonzero &&
123           hiz_enabled &&
124           edsc_not_preps &&
125           pixel_shader_valid &&
126           !in_hiz_op &&
127           depth_test_enabled &&
128           (ps_computes_depth ||
129            (kill_pixel && (depth_writes_enabled || stencil_writes_enabled)));
130 }
131 
132 void
gen8_write_pma_stall_bits(struct brw_context * brw,uint32_t pma_stall_bits)133 gen8_write_pma_stall_bits(struct brw_context *brw, uint32_t pma_stall_bits)
134 {
135    /* If we haven't actually changed the value, bail now to avoid unnecessary
136     * pipeline stalls and register writes.
137     */
138    if (brw->pma_stall_bits == pma_stall_bits)
139       return;
140 
141    brw->pma_stall_bits = pma_stall_bits;
142 
143    /* According to the PIPE_CONTROL documentation, software should emit a
144     * PIPE_CONTROL with the CS Stall and Depth Cache Flush bits set prior
145     * to the LRI.  If stencil buffer writes are enabled, then a Render Cache
146     * Flush is also necessary.
147     */
148    const uint32_t render_cache_flush =
149       brw->stencil_write_enabled ? PIPE_CONTROL_RENDER_TARGET_FLUSH : 0;
150    brw_emit_pipe_control_flush(brw,
151                                PIPE_CONTROL_CS_STALL |
152                                PIPE_CONTROL_DEPTH_CACHE_FLUSH |
153                                render_cache_flush);
154 
155    /* CACHE_MODE_1 is a non-privileged register. */
156    brw_load_register_imm32(brw, GEN7_CACHE_MODE_1,
157                            GEN8_HIZ_PMA_MASK_BITS |
158                            pma_stall_bits );
159 
160    /* After the LRI, a PIPE_CONTROL with both the Depth Stall and Depth Cache
161     * Flush bits is often necessary.  We do it regardless because it's easier.
162     * The render cache flush is also necessary if stencil writes are enabled.
163     */
164    brw_emit_pipe_control_flush(brw,
165                                PIPE_CONTROL_DEPTH_STALL |
166                                PIPE_CONTROL_DEPTH_CACHE_FLUSH |
167                                render_cache_flush);
168 
169 }
170 
171 static void
gen8_emit_pma_stall_workaround(struct brw_context * brw)172 gen8_emit_pma_stall_workaround(struct brw_context *brw)
173 {
174    const struct gen_device_info *devinfo = &brw->screen->devinfo;
175    uint32_t bits = 0;
176 
177    if (devinfo->gen >= 9)
178       return;
179 
180    if (pma_fix_enable(brw))
181       bits |= GEN8_HIZ_NP_PMA_FIX_ENABLE | GEN8_HIZ_NP_EARLY_Z_FAILS_DISABLE;
182 
183    gen8_write_pma_stall_bits(brw, bits);
184 }
185 
186 const struct brw_tracked_state gen8_pma_fix = {
187    .dirty = {
188       .mesa = _NEW_BUFFERS |
189               _NEW_COLOR |
190               _NEW_DEPTH |
191               _NEW_MULTISAMPLE |
192               _NEW_STENCIL,
193       .brw = BRW_NEW_BLORP |
194              BRW_NEW_FS_PROG_DATA,
195    },
196    .emit = gen8_emit_pma_stall_workaround
197 };
198