• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #include "pipe/p_state.h"
28 #include "util/u_draw.h"
29 #include "util/u_string.h"
30 #include "util/u_memory.h"
31 #include "util/u_prim.h"
32 #include "util/format/u_format.h"
33 #include "util/u_helpers.h"
34 
35 #include "freedreno_blitter.h"
36 #include "freedreno_draw.h"
37 #include "freedreno_context.h"
38 #include "freedreno_fence.h"
39 #include "freedreno_state.h"
40 #include "freedreno_resource.h"
41 #include "freedreno_query_acc.h"
42 #include "freedreno_query_hw.h"
43 #include "freedreno_util.h"
44 
45 static void
resource_read(struct fd_batch * batch,struct pipe_resource * prsc)46 resource_read(struct fd_batch *batch, struct pipe_resource *prsc)
47 {
48 	if (!prsc)
49 		return;
50 	fd_batch_resource_read(batch, fd_resource(prsc));
51 }
52 
53 static void
resource_written(struct fd_batch * batch,struct pipe_resource * prsc)54 resource_written(struct fd_batch *batch, struct pipe_resource *prsc)
55 {
56 	if (!prsc)
57 		return;
58 	fd_batch_resource_write(batch, fd_resource(prsc));
59 }
60 
61 static void
batch_draw_tracking(struct fd_batch * batch,const struct pipe_draw_info * info)62 batch_draw_tracking(struct fd_batch *batch, const struct pipe_draw_info *info)
63 {
64 	struct fd_context *ctx = batch->ctx;
65 	struct pipe_framebuffer_state *pfb = &batch->framebuffer;
66 	unsigned buffers = 0, restore_buffers = 0;
67 
68 	/* NOTE: needs to be before resource_written(batch->query_buf), otherwise
69 	 * query_buf may not be created yet.
70 	 */
71 	fd_batch_set_stage(batch, FD_STAGE_DRAW);
72 
73 	/*
74 	 * Figure out the buffers/features we need:
75 	 */
76 
77 	fd_screen_lock(ctx->screen);
78 
79 	if (ctx->dirty & (FD_DIRTY_FRAMEBUFFER | FD_DIRTY_ZSA)) {
80 		if (fd_depth_enabled(ctx)) {
81 			if (fd_resource(pfb->zsbuf->texture)->valid) {
82 				restore_buffers |= FD_BUFFER_DEPTH;
83 			} else {
84 				batch->invalidated |= FD_BUFFER_DEPTH;
85 			}
86 			batch->gmem_reason |= FD_GMEM_DEPTH_ENABLED;
87 			if (fd_depth_write_enabled(ctx)) {
88 				buffers |= FD_BUFFER_DEPTH;
89 				resource_written(batch, pfb->zsbuf->texture);
90 			} else {
91 				resource_read(batch, pfb->zsbuf->texture);
92 			}
93 		}
94 
95 		if (fd_stencil_enabled(ctx)) {
96 			if (fd_resource(pfb->zsbuf->texture)->valid) {
97 				restore_buffers |= FD_BUFFER_STENCIL;
98 			} else {
99 				batch->invalidated |= FD_BUFFER_STENCIL;
100 			}
101 			batch->gmem_reason |= FD_GMEM_STENCIL_ENABLED;
102 			buffers |= FD_BUFFER_STENCIL;
103 			resource_written(batch, pfb->zsbuf->texture);
104 		}
105 	}
106 
107 	if (fd_logicop_enabled(ctx))
108 		batch->gmem_reason |= FD_GMEM_LOGICOP_ENABLED;
109 
110 	for (unsigned i = 0; i < pfb->nr_cbufs; i++) {
111 		struct pipe_resource *surf;
112 
113 		if (!pfb->cbufs[i])
114 			continue;
115 
116 		surf = pfb->cbufs[i]->texture;
117 
118 		if (fd_resource(surf)->valid) {
119 			restore_buffers |= PIPE_CLEAR_COLOR0 << i;
120 		} else {
121 			batch->invalidated |= PIPE_CLEAR_COLOR0 << i;
122 		}
123 
124 		buffers |= PIPE_CLEAR_COLOR0 << i;
125 
126 		if (fd_blend_enabled(ctx, i))
127 			batch->gmem_reason |= FD_GMEM_BLEND_ENABLED;
128 
129 		if (ctx->dirty & FD_DIRTY_FRAMEBUFFER)
130 			resource_written(batch, pfb->cbufs[i]->texture);
131 	}
132 
133 	/* Mark SSBOs */
134 	if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_SSBO) {
135 		const struct fd_shaderbuf_stateobj *so = &ctx->shaderbuf[PIPE_SHADER_FRAGMENT];
136 
137 		foreach_bit (i, so->enabled_mask & so->writable_mask)
138 			resource_written(batch, so->sb[i].buffer);
139 
140 		foreach_bit (i, so->enabled_mask & ~so->writable_mask)
141 			resource_read(batch, so->sb[i].buffer);
142 	}
143 
144 	if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_IMAGE) {
145 		foreach_bit (i, ctx->shaderimg[PIPE_SHADER_FRAGMENT].enabled_mask) {
146 			struct pipe_image_view *img =
147 					&ctx->shaderimg[PIPE_SHADER_FRAGMENT].si[i];
148 			if (img->access & PIPE_IMAGE_ACCESS_WRITE)
149 				resource_written(batch, img->resource);
150 			else
151 				resource_read(batch, img->resource);
152 		}
153 	}
154 
155 	if (ctx->dirty_shader[PIPE_SHADER_VERTEX] & FD_DIRTY_SHADER_CONST) {
156 		foreach_bit (i, ctx->constbuf[PIPE_SHADER_VERTEX].enabled_mask)
157 			resource_read(batch, ctx->constbuf[PIPE_SHADER_VERTEX].cb[i].buffer);
158 	}
159 
160 	if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_CONST) {
161 		foreach_bit (i, ctx->constbuf[PIPE_SHADER_FRAGMENT].enabled_mask)
162 			resource_read(batch, ctx->constbuf[PIPE_SHADER_FRAGMENT].cb[i].buffer);
163 	}
164 
165 	/* Mark VBOs as being read */
166 	if (ctx->dirty & FD_DIRTY_VTXBUF) {
167 		foreach_bit (i, ctx->vtx.vertexbuf.enabled_mask) {
168 			assert(!ctx->vtx.vertexbuf.vb[i].is_user_buffer);
169 			resource_read(batch, ctx->vtx.vertexbuf.vb[i].buffer.resource);
170 		}
171 	}
172 
173 	/* Mark index buffer as being read */
174 	if (info->index_size)
175 		resource_read(batch, info->index.resource);
176 
177 	/* Mark indirect draw buffer as being read */
178 	if (info->indirect)
179 		resource_read(batch, info->indirect->buffer);
180 
181 	/* Mark textures as being read */
182 	if (ctx->dirty_shader[PIPE_SHADER_VERTEX] & FD_DIRTY_SHADER_TEX) {
183 		foreach_bit (i, ctx->tex[PIPE_SHADER_VERTEX].valid_textures)
184 			resource_read(batch, ctx->tex[PIPE_SHADER_VERTEX].textures[i]->texture);
185 	}
186 
187 	if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_TEX) {
188 		foreach_bit (i, ctx->tex[PIPE_SHADER_FRAGMENT].valid_textures)
189 			resource_read(batch, ctx->tex[PIPE_SHADER_FRAGMENT].textures[i]->texture);
190 	}
191 
192 	/* Mark streamout buffers as being written.. */
193 	if (ctx->dirty & FD_DIRTY_STREAMOUT) {
194 		for (unsigned i = 0; i < ctx->streamout.num_targets; i++)
195 			if (ctx->streamout.targets[i])
196 				resource_written(batch, ctx->streamout.targets[i]->buffer);
197 	}
198 
199 	resource_written(batch, batch->query_buf);
200 
201 	list_for_each_entry(struct fd_acc_query, aq, &ctx->acc_active_queries, node)
202 		resource_written(batch, aq->prsc);
203 
204 	fd_screen_unlock(ctx->screen);
205 
206 	/* any buffers that haven't been cleared yet, we need to restore: */
207 	batch->restore |= restore_buffers & (FD_BUFFER_ALL & ~batch->invalidated);
208 	/* and any buffers used, need to be resolved: */
209 	batch->resolve |= buffers;
210 }
211 
212 static void
fd_draw_vbo(struct pipe_context * pctx,const struct pipe_draw_info * info)213 fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
214 {
215 	struct fd_context *ctx = fd_context(pctx);
216 
217 	/* for debugging problems with indirect draw, it is convenient
218 	 * to be able to emulate it, to determine if game is feeding us
219 	 * bogus data:
220 	 */
221 	if (info->indirect && (fd_mesa_debug & FD_DBG_NOINDR)) {
222 		util_draw_indirect(pctx, info);
223 		return;
224 	}
225 
226 	if (info->mode != PIPE_PRIM_MAX &&
227 	    !info->count_from_stream_output && !info->indirect &&
228 	    !info->primitive_restart &&
229 	    !u_trim_pipe_prim(info->mode, (unsigned*)&info->count))
230 		return;
231 
232 	/* TODO: push down the region versions into the tiles */
233 	if (!fd_render_condition_check(pctx))
234 		return;
235 
236 	/* emulate unsupported primitives: */
237 	if (!fd_supported_prim(ctx, info->mode)) {
238 		if (ctx->streamout.num_targets > 0)
239 			debug_error("stream-out with emulated prims");
240 		util_primconvert_save_rasterizer_state(ctx->primconvert, ctx->rasterizer);
241 		util_primconvert_draw_vbo(ctx->primconvert, info);
242 		return;
243 	}
244 
245 	/* Upload a user index buffer. */
246 	struct pipe_resource *indexbuf = NULL;
247 	unsigned index_offset = 0;
248 	struct pipe_draw_info new_info;
249 	if (info->index_size) {
250 		if (info->has_user_indices) {
251 			if (!util_upload_index_buffer(pctx, info, &indexbuf, &index_offset, 4))
252 				return;
253 			new_info = *info;
254 			new_info.index.resource = indexbuf;
255 			new_info.has_user_indices = false;
256 			info = &new_info;
257 		} else {
258 			indexbuf = info->index.resource;
259 		}
260 	}
261 
262 	struct fd_batch *batch = NULL;
263 	fd_batch_reference(&batch, fd_context_batch(ctx));
264 
265 	if (ctx->in_discard_blit) {
266 		fd_batch_reset(batch);
267 		fd_context_all_dirty(ctx);
268 	}
269 
270 	batch_draw_tracking(batch, info);
271 
272 	if (unlikely(ctx->batch != batch)) {
273 		/* The current batch was flushed in batch_draw_tracking()
274 		 * so start anew.  We know this won't happen a second time
275 		 * since we are dealing with a fresh batch:
276 		 */
277 		fd_batch_reference(&batch, fd_context_batch(ctx));
278 		batch_draw_tracking(batch, info);
279 		assert(ctx->batch == batch);
280 	}
281 
282 	batch->blit = ctx->in_discard_blit;
283 	batch->back_blit = ctx->in_shadow;
284 	batch->num_draws++;
285 
286 	/* Counting prims in sw doesn't work for GS and tesselation. For older
287 	 * gens we don't have those stages and don't have the hw counters enabled,
288 	 * so keep the count accurate for non-patch geometry.
289 	 */
290 	unsigned prims;
291 	if ((info->mode != PIPE_PRIM_PATCHES) &&
292 			(info->mode != PIPE_PRIM_MAX))
293 		prims = u_reduced_prims_for_vertices(info->mode, info->count);
294 	else
295 		prims = 0;
296 
297 	ctx->stats.draw_calls++;
298 
299 	/* TODO prims_emitted should be clipped when the stream-out buffer is
300 	 * not large enough.  See max_tf_vtx().. probably need to move that
301 	 * into common code.  Although a bit more annoying since a2xx doesn't
302 	 * use ir3 so no common way to get at the pipe_stream_output_info
303 	 * which is needed for this calculation.
304 	 */
305 	if (ctx->streamout.num_targets > 0)
306 		ctx->stats.prims_emitted += prims;
307 	ctx->stats.prims_generated += prims;
308 
309 	/* Clearing last_fence must come after the batch dependency tracking
310 	 * (resource_read()/resource_written()), as that can trigger a flush,
311 	 * re-populating last_fence
312 	 */
313 	fd_fence_ref(&ctx->last_fence, NULL);
314 
315 	struct pipe_framebuffer_state *pfb = &batch->framebuffer;
316 	DBG("%p: %ux%u num_draws=%u (%s/%s)", batch,
317 		pfb->width, pfb->height, batch->num_draws,
318 		util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
319 		util_format_short_name(pipe_surface_format(pfb->zsbuf)));
320 
321 	if (ctx->draw_vbo(ctx, info, index_offset))
322 		batch->needs_flush = true;
323 
324 	batch->num_vertices += info->count * info->instance_count;
325 
326 	for (unsigned i = 0; i < ctx->streamout.num_targets; i++)
327 		ctx->streamout.offsets[i] += info->count;
328 
329 	if (fd_mesa_debug & FD_DBG_DDRAW)
330 		fd_context_all_dirty(ctx);
331 
332 	fd_batch_check_size(batch);
333 	fd_batch_reference(&batch, NULL);
334 
335 	if (info == &new_info)
336 		pipe_resource_reference(&indexbuf, NULL);
337 }
338 
339 static void
batch_clear_tracking(struct fd_batch * batch,unsigned buffers)340 batch_clear_tracking(struct fd_batch *batch, unsigned buffers)
341 {
342 	struct fd_context *ctx = batch->ctx;
343 	struct pipe_framebuffer_state *pfb = &batch->framebuffer;
344 	unsigned cleared_buffers;
345 
346 	/* pctx->clear() is only for full-surface clears, so scissor is
347 	 * equivalent to having GL_SCISSOR_TEST disabled:
348 	 */
349 	batch->max_scissor.minx = 0;
350 	batch->max_scissor.miny = 0;
351 	batch->max_scissor.maxx = pfb->width;
352 	batch->max_scissor.maxy = pfb->height;
353 
354 	/* for bookkeeping about which buffers have been cleared (and thus
355 	 * can fully or partially skip mem2gmem) we need to ignore buffers
356 	 * that have already had a draw, in case apps do silly things like
357 	 * clear after draw (ie. if you only clear the color buffer, but
358 	 * something like alpha-test causes side effects from the draw in
359 	 * the depth buffer, etc)
360 	 */
361 	cleared_buffers = buffers & (FD_BUFFER_ALL & ~batch->restore);
362 	batch->cleared |= buffers;
363 	batch->invalidated |= cleared_buffers;
364 
365 	batch->resolve |= buffers;
366 	batch->needs_flush = true;
367 
368 	fd_screen_lock(ctx->screen);
369 
370 	if (buffers & PIPE_CLEAR_COLOR)
371 		for (unsigned i = 0; i < pfb->nr_cbufs; i++)
372 			if (buffers & (PIPE_CLEAR_COLOR0 << i))
373 				resource_written(batch, pfb->cbufs[i]->texture);
374 
375 	if (buffers & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL)) {
376 		resource_written(batch, pfb->zsbuf->texture);
377 		batch->gmem_reason |= FD_GMEM_CLEARS_DEPTH_STENCIL;
378 	}
379 
380 	resource_written(batch, batch->query_buf);
381 
382 	list_for_each_entry(struct fd_acc_query, aq, &ctx->acc_active_queries, node)
383 		resource_written(batch, aq->prsc);
384 
385 	fd_screen_unlock(ctx->screen);
386 }
387 
388 static void
fd_clear(struct pipe_context * pctx,unsigned buffers,const struct pipe_scissor_state * scissor_state,const union pipe_color_union * color,double depth,unsigned stencil)389 fd_clear(struct pipe_context *pctx, unsigned buffers,
390 		const struct pipe_scissor_state *scissor_state,
391 		const union pipe_color_union *color, double depth,
392 		unsigned stencil)
393 {
394 	struct fd_context *ctx = fd_context(pctx);
395 
396 	/* TODO: push down the region versions into the tiles */
397 	if (!fd_render_condition_check(pctx))
398 		return;
399 
400 	struct fd_batch *batch = NULL;
401 	fd_batch_reference(&batch, fd_context_batch(ctx));
402 
403 	if (ctx->in_discard_blit) {
404 		fd_batch_reset(batch);
405 		fd_context_all_dirty(ctx);
406 	}
407 
408 	batch_clear_tracking(batch, buffers);
409 
410 	if (unlikely(ctx->batch != batch)) {
411 		/* The current batch was flushed in batch_clear_tracking()
412 		 * so start anew.  We know this won't happen a second time
413 		 * since we are dealing with a fresh batch:
414 		 */
415 		fd_batch_reference(&batch, fd_context_batch(ctx));
416 		batch_clear_tracking(batch, buffers);
417 		assert(ctx->batch == batch);
418 	}
419 
420 	/* Clearing last_fence must come after the batch dependency tracking
421 	 * (resource_read()/resource_written()), as that can trigger a flush,
422 	 * re-populating last_fence
423 	 */
424 	fd_fence_ref(&ctx->last_fence, NULL);
425 
426 	struct pipe_framebuffer_state *pfb = &batch->framebuffer;
427 	DBG("%p: %x %ux%u depth=%f, stencil=%u (%s/%s)", batch, buffers,
428 		pfb->width, pfb->height, depth, stencil,
429 		util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
430 		util_format_short_name(pipe_surface_format(pfb->zsbuf)));
431 
432 	/* if per-gen backend doesn't implement ctx->clear() generic
433 	 * blitter clear:
434 	 */
435 	bool fallback = true;
436 
437 	if (ctx->clear) {
438 		fd_batch_set_stage(batch, FD_STAGE_CLEAR);
439 
440 		if (ctx->clear(ctx, buffers, color, depth, stencil)) {
441 			if (fd_mesa_debug & FD_DBG_DCLEAR)
442 				fd_context_all_dirty(ctx);
443 
444 			fallback = false;
445 		}
446 	}
447 
448 	if (fallback) {
449 		fd_blitter_clear(pctx, buffers, color, depth, stencil);
450 	}
451 
452 	fd_batch_check_size(batch);
453 	fd_batch_reference(&batch, NULL);
454 }
455 
456 static void
fd_clear_render_target(struct pipe_context * pctx,struct pipe_surface * ps,const union pipe_color_union * color,unsigned x,unsigned y,unsigned w,unsigned h,bool render_condition_enabled)457 fd_clear_render_target(struct pipe_context *pctx, struct pipe_surface *ps,
458 		const union pipe_color_union *color,
459 		unsigned x, unsigned y, unsigned w, unsigned h,
460 		bool render_condition_enabled)
461 {
462 	DBG("TODO: x=%u, y=%u, w=%u, h=%u", x, y, w, h);
463 }
464 
465 static void
fd_clear_depth_stencil(struct pipe_context * pctx,struct pipe_surface * ps,unsigned buffers,double depth,unsigned stencil,unsigned x,unsigned y,unsigned w,unsigned h,bool render_condition_enabled)466 fd_clear_depth_stencil(struct pipe_context *pctx, struct pipe_surface *ps,
467 		unsigned buffers, double depth, unsigned stencil,
468 		unsigned x, unsigned y, unsigned w, unsigned h,
469 		bool render_condition_enabled)
470 {
471 	DBG("TODO: buffers=%u, depth=%f, stencil=%u, x=%u, y=%u, w=%u, h=%u",
472 			buffers, depth, stencil, x, y, w, h);
473 }
474 
475 static void
fd_launch_grid(struct pipe_context * pctx,const struct pipe_grid_info * info)476 fd_launch_grid(struct pipe_context *pctx, const struct pipe_grid_info *info)
477 {
478 	struct fd_context *ctx = fd_context(pctx);
479 	const struct fd_shaderbuf_stateobj *so = &ctx->shaderbuf[PIPE_SHADER_COMPUTE];
480 	struct fd_batch *batch, *save_batch = NULL;
481 
482 	batch = fd_bc_alloc_batch(&ctx->screen->batch_cache, ctx, true);
483 	fd_batch_reference(&save_batch, ctx->batch);
484 	fd_batch_reference(&ctx->batch, batch);
485 	fd_context_all_dirty(ctx);
486 
487 	fd_screen_lock(ctx->screen);
488 
489 	/* Mark SSBOs */
490 	foreach_bit (i, so->enabled_mask & so->writable_mask)
491 		resource_written(batch, so->sb[i].buffer);
492 
493 	foreach_bit (i, so->enabled_mask & ~so->writable_mask)
494 		resource_read(batch, so->sb[i].buffer);
495 
496 	foreach_bit(i, ctx->shaderimg[PIPE_SHADER_COMPUTE].enabled_mask) {
497 		struct pipe_image_view *img =
498 			&ctx->shaderimg[PIPE_SHADER_COMPUTE].si[i];
499 		if (img->access & PIPE_IMAGE_ACCESS_WRITE)
500 			resource_written(batch, img->resource);
501 		else
502 			resource_read(batch, img->resource);
503 	}
504 
505 	/* UBO's are read */
506 	foreach_bit(i, ctx->constbuf[PIPE_SHADER_COMPUTE].enabled_mask)
507 		resource_read(batch, ctx->constbuf[PIPE_SHADER_COMPUTE].cb[i].buffer);
508 
509 	/* Mark textures as being read */
510 	foreach_bit(i, ctx->tex[PIPE_SHADER_COMPUTE].valid_textures)
511 		resource_read(batch, ctx->tex[PIPE_SHADER_COMPUTE].textures[i]->texture);
512 
513 	/* For global buffers, we don't really know if read or written, so assume
514 	 * the worst:
515 	 */
516 	foreach_bit(i, ctx->global_bindings.enabled_mask)
517 		resource_written(batch, ctx->global_bindings.buf[i]);
518 
519 	if (info->indirect)
520 		resource_read(batch, info->indirect);
521 
522 	fd_screen_unlock(ctx->screen);
523 
524 	batch->needs_flush = true;
525 	ctx->launch_grid(ctx, info);
526 
527 	fd_batch_flush(batch);
528 
529 	fd_batch_reference(&ctx->batch, save_batch);
530 	fd_context_all_dirty(ctx);
531 	fd_batch_reference(&save_batch, NULL);
532 	fd_batch_reference(&batch, NULL);
533 }
534 
535 void
fd_draw_init(struct pipe_context * pctx)536 fd_draw_init(struct pipe_context *pctx)
537 {
538 	pctx->draw_vbo = fd_draw_vbo;
539 	pctx->clear = fd_clear;
540 	pctx->clear_render_target = fd_clear_render_target;
541 	pctx->clear_depth_stencil = fd_clear_depth_stencil;
542 
543 	if (has_compute(fd_screen(pctx->screen))) {
544 		pctx->launch_grid = fd_launch_grid;
545 	}
546 }
547