• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2003 VMware, Inc.
3  * Copyright 2009, 2012 Intel Corporation.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sublicense, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21  * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25  */
26 
27 #include "main/mtypes.h"
28 #include "main/condrender.h"
29 #include "swrast/swrast.h"
30 #include "drivers/common/meta.h"
31 
32 #include "intel_batchbuffer.h"
33 #include "intel_fbo.h"
34 #include "intel_mipmap_tree.h"
35 
36 #include "brw_context.h"
37 #include "brw_blorp.h"
38 #include "brw_defines.h"
39 
40 #define FILE_DEBUG_FLAG DEBUG_BLIT
41 
42 static const char *buffer_names[] = {
43    [BUFFER_FRONT_LEFT] = "front",
44    [BUFFER_BACK_LEFT] = "back",
45    [BUFFER_FRONT_RIGHT] = "front right",
46    [BUFFER_BACK_RIGHT] = "back right",
47    [BUFFER_DEPTH] = "depth",
48    [BUFFER_STENCIL] = "stencil",
49    [BUFFER_ACCUM] = "accum",
50    [BUFFER_AUX0] = "aux0",
51    [BUFFER_COLOR0] = "color0",
52    [BUFFER_COLOR1] = "color1",
53    [BUFFER_COLOR2] = "color2",
54    [BUFFER_COLOR3] = "color3",
55    [BUFFER_COLOR4] = "color4",
56    [BUFFER_COLOR5] = "color5",
57    [BUFFER_COLOR6] = "color6",
58    [BUFFER_COLOR7] = "color7",
59 };
60 
61 static void
debug_mask(const char * name,GLbitfield mask)62 debug_mask(const char *name, GLbitfield mask)
63 {
64    GLuint i;
65 
66    if (INTEL_DEBUG & DEBUG_BLIT) {
67       DBG("%s clear:", name);
68       for (i = 0; i < BUFFER_COUNT; i++) {
69 	 if (mask & (1 << i))
70 	    DBG(" %s", buffer_names[i]);
71       }
72       DBG("\n");
73    }
74 }
75 
76 /**
77  * Returns true if the scissor is a noop (cuts out nothing).
78  */
79 static bool
noop_scissor(struct gl_framebuffer * fb)80 noop_scissor(struct gl_framebuffer *fb)
81 {
82    return fb->_Xmin <= 0 &&
83           fb->_Ymin <= 0 &&
84           fb->_Xmax >= fb->Width &&
85           fb->_Ymax >= fb->Height;
86 }
87 
88 /**
89  * Implements fast depth clears on gen6+.
90  *
91  * Fast clears basically work by setting a flag in each of the subspans
92  * represented in the HiZ buffer that says "When you need the depth values for
93  * this subspan, it's the hardware's current clear value."  Then later rendering
94  * can just use the static clear value instead of referencing memory.
95  *
96  * The tricky part of the implementation is that you have to have the clear
97  * value that was used on the depth buffer in place for all further rendering,
98  * at least until a resolve to the real depth buffer happens.
99  */
100 static bool
brw_fast_clear_depth(struct gl_context * ctx)101 brw_fast_clear_depth(struct gl_context *ctx)
102 {
103    struct brw_context *brw = brw_context(ctx);
104    struct gl_framebuffer *fb = ctx->DrawBuffer;
105    struct intel_renderbuffer *depth_irb =
106       intel_get_renderbuffer(fb, BUFFER_DEPTH);
107    struct intel_mipmap_tree *mt = depth_irb->mt;
108    struct gl_renderbuffer_attachment *depth_att = &fb->Attachment[BUFFER_DEPTH];
109    const struct gen_device_info *devinfo = &brw->screen->devinfo;
110 
111    if (INTEL_DEBUG & DEBUG_NO_FAST_CLEAR)
112       return false;
113 
114    if (devinfo->gen < 6)
115       return false;
116 
117    if (!intel_renderbuffer_has_hiz(depth_irb))
118       return false;
119 
120    /* We only handle full buffer clears -- otherwise you'd have to track whether
121     * a previous clear had happened at a different clear value and resolve it
122     * first.
123     */
124    if ((ctx->Scissor.EnableFlags & 1) && !noop_scissor(fb)) {
125       perf_debug("Failed to fast clear %dx%d depth because of scissors.  "
126                  "Possible 5%% performance win if avoided.\n",
127                  mt->surf.logical_level0_px.width,
128                  mt->surf.logical_level0_px.height);
129       return false;
130    }
131 
132    switch (mt->format) {
133    case MESA_FORMAT_Z32_FLOAT_S8X24_UINT:
134    case MESA_FORMAT_Z24_UNORM_S8_UINT:
135       /* From the Sandy Bridge PRM, volume 2 part 1, page 314:
136        *
137        *     "[DevSNB+]: Several cases exist where Depth Buffer Clear cannot be
138        *      enabled (the legacy method of clearing must be performed):
139        *
140        *      - If the depth buffer format is D32_FLOAT_S8X24_UINT or
141        *        D24_UNORM_S8_UINT.
142        */
143       return false;
144 
145    case MESA_FORMAT_Z_UNORM16:
146       /* From the Sandy Bridge PRM, volume 2 part 1, page 314:
147        *
148        *     "[DevSNB+]: Several cases exist where Depth Buffer Clear cannot be
149        *      enabled (the legacy method of clearing must be performed):
150        *
151        *      - DevSNB{W/A}]: When depth buffer format is D16_UNORM and the
152        *        width of the map (LOD0) is not multiple of 16, fast clear
153        *        optimization must be disabled.
154        */
155       if (devinfo->gen == 6 &&
156           (minify(mt->surf.phys_level0_sa.width,
157                   depth_irb->mt_level - mt->first_level) % 16) != 0)
158 	 return false;
159       break;
160 
161    default:
162       break;
163    }
164 
165    /* Quantize the clear value to what can be stored in the actual depth
166     * buffer.  This makes the following check more accurate because it now
167     * checks if the actual depth bits will match.  It also prevents us from
168     * getting a too-accurate depth value during depth testing or when sampling
169     * with HiZ enabled.
170     */
171    float clear_value =
172       mt->format == MESA_FORMAT_Z_FLOAT32 ? ctx->Depth.Clear :
173       _mesa_lroundeven(ctx->Depth.Clear * fb->_DepthMax) / (float)(fb->_DepthMax);
174 
175    const uint32_t num_layers = depth_att->Layered ? depth_irb->layer_count : 1;
176 
177    /* If we're clearing to a new clear value, then we need to resolve any clear
178     * flags out of the HiZ buffer into the real depth buffer.
179     */
180    if (mt->fast_clear_color.f32[0] != clear_value) {
181       for (uint32_t level = mt->first_level; level <= mt->last_level; level++) {
182          if (!intel_miptree_level_has_hiz(mt, level))
183             continue;
184 
185          const unsigned level_layers = brw_get_num_logical_layers(mt, level);
186 
187          for (uint32_t layer = 0; layer < level_layers; layer++) {
188             if (level == depth_irb->mt_level &&
189                 layer >= depth_irb->mt_layer &&
190                 layer < depth_irb->mt_layer + num_layers) {
191                /* We're going to clear this layer anyway.  Leave it alone. */
192                continue;
193             }
194 
195             enum isl_aux_state aux_state =
196                intel_miptree_get_aux_state(mt, level, layer);
197 
198             if (aux_state != ISL_AUX_STATE_CLEAR &&
199                 aux_state != ISL_AUX_STATE_COMPRESSED_CLEAR) {
200                /* This slice doesn't have any fast-cleared bits. */
201                continue;
202             }
203 
204             /* If we got here, then the level may have fast-clear bits that
205              * use the old clear value.  We need to do a depth resolve to get
206              * rid of their use of the clear value before we can change it.
207              * Fortunately, few applications ever change their depth clear
208              * value so this shouldn't happen often.
209              */
210             intel_hiz_exec(brw, mt, level, layer, 1,
211                            ISL_AUX_OP_FULL_RESOLVE);
212             intel_miptree_set_aux_state(brw, mt, level, layer, 1,
213                                         ISL_AUX_STATE_RESOLVED);
214          }
215       }
216 
217       const union isl_color_value clear_color = { .f32 = {clear_value, } };
218       intel_miptree_set_clear_color(brw, mt, clear_color);
219    }
220 
221    for (unsigned a = 0; a < num_layers; a++) {
222       enum isl_aux_state aux_state =
223          intel_miptree_get_aux_state(mt, depth_irb->mt_level,
224                                      depth_irb->mt_layer + a);
225 
226       if (aux_state != ISL_AUX_STATE_CLEAR) {
227          intel_hiz_exec(brw, mt, depth_irb->mt_level,
228                         depth_irb->mt_layer + a, 1,
229                         ISL_AUX_OP_FAST_CLEAR);
230       }
231    }
232 
233    intel_miptree_set_aux_state(brw, mt, depth_irb->mt_level,
234                                depth_irb->mt_layer, num_layers,
235                                ISL_AUX_STATE_CLEAR);
236    return true;
237 }
238 
239 /**
240  * Called by ctx->Driver.Clear.
241  */
242 static void
brw_clear(struct gl_context * ctx,GLbitfield mask)243 brw_clear(struct gl_context *ctx, GLbitfield mask)
244 {
245    struct brw_context *brw = brw_context(ctx);
246    struct gl_framebuffer *fb = ctx->DrawBuffer;
247    const struct gen_device_info *devinfo = &brw->screen->devinfo;
248    bool partial_clear = ctx->Scissor.EnableFlags && !noop_scissor(fb);
249 
250    if (!_mesa_check_conditional_render(ctx))
251       return;
252 
253    if (mask & (BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_FRONT_RIGHT)) {
254       brw->front_buffer_dirty = true;
255    }
256 
257    intel_prepare_render(brw);
258    brw_workaround_depthstencil_alignment(brw, partial_clear ? 0 : mask);
259 
260    if (mask & BUFFER_BIT_DEPTH) {
261       if (brw_fast_clear_depth(ctx)) {
262 	 DBG("fast clear: depth\n");
263 	 mask &= ~BUFFER_BIT_DEPTH;
264       }
265    }
266 
267    if (mask & BUFFER_BITS_COLOR) {
268       brw_blorp_clear_color(brw, fb, mask, partial_clear,
269                             ctx->Color.sRGBEnabled);
270       debug_mask("blorp color", mask & BUFFER_BITS_COLOR);
271       mask &= ~BUFFER_BITS_COLOR;
272    }
273 
274    if (devinfo->gen >= 6 && (mask & BUFFER_BITS_DEPTH_STENCIL)) {
275       brw_blorp_clear_depth_stencil(brw, fb, mask, partial_clear);
276       debug_mask("blorp depth/stencil", mask & BUFFER_BITS_DEPTH_STENCIL);
277       mask &= ~BUFFER_BITS_DEPTH_STENCIL;
278    }
279 
280    GLbitfield tri_mask = mask & (BUFFER_BIT_STENCIL |
281                                  BUFFER_BIT_DEPTH);
282 
283    if (tri_mask) {
284       debug_mask("tri", tri_mask);
285       mask &= ~tri_mask;
286       _mesa_meta_glsl_Clear(&brw->ctx, tri_mask);
287    }
288 
289    /* Any strange buffers get passed off to swrast.  The only thing that
290     * should be left at this point is the accumulation buffer.
291     */
292    assert((mask & ~BUFFER_BIT_ACCUM) == 0);
293    if (mask) {
294       debug_mask("swrast", mask);
295       _swrast_Clear(ctx, mask);
296    }
297 }
298 
299 
300 void
intelInitClearFuncs(struct dd_function_table * functions)301 intelInitClearFuncs(struct dd_function_table *functions)
302 {
303    functions->Clear = brw_clear;
304 }
305