1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file crocus_draw.c
25 *
26 * The main driver hooks for drawing and launching compute shaders.
27 */
28
29 #include <stdio.h>
30 #include <errno.h>
31 #include "pipe/p_defines.h"
32 #include "pipe/p_state.h"
33 #include "pipe/p_context.h"
34 #include "pipe/p_screen.h"
35 #include "util/u_draw.h"
36 #include "util/u_inlines.h"
37 #include "util/u_transfer.h"
38 #include "util/u_upload_mgr.h"
39 #include "intel/compiler/brw_compiler.h"
40 #include "intel/compiler/brw_eu_defines.h"
41 #include "compiler/shader_info.h"
42 #include "crocus_context.h"
43 #include "crocus_defines.h"
44 #include "util/u_prim_restart.h"
45 #include "util/u_prim.h"
46
47 static bool
prim_is_points_or_lines(enum pipe_prim_type mode)48 prim_is_points_or_lines(enum pipe_prim_type mode)
49 {
50 /* We don't need to worry about adjacency - it can only be used with
51 * geometry shaders, and we don't care about this info when GS is on.
52 */
53 return mode == PIPE_PRIM_POINTS ||
54 mode == PIPE_PRIM_LINES ||
55 mode == PIPE_PRIM_LINE_LOOP ||
56 mode == PIPE_PRIM_LINE_STRIP;
57 }
58
59 static bool
can_cut_index_handle_restart_index(struct crocus_context * ice,const struct pipe_draw_info * draw)60 can_cut_index_handle_restart_index(struct crocus_context *ice,
61 const struct pipe_draw_info *draw)
62 {
63 switch (draw->index_size) {
64 case 1:
65 return draw->restart_index == 0xff;
66 case 2:
67 return draw->restart_index == 0xffff;
68 case 4:
69 return draw->restart_index == 0xffffffff;
70 default:
71 unreachable("illegal index size\n");
72 }
73
74 return false;
75 }
76
77 static bool
can_cut_index_handle_prim(struct crocus_context * ice,const struct pipe_draw_info * draw)78 can_cut_index_handle_prim(struct crocus_context *ice,
79 const struct pipe_draw_info *draw)
80 {
81 struct crocus_screen *screen = (struct crocus_screen*)ice->ctx.screen;
82 const struct intel_device_info *devinfo = &screen->devinfo;
83
84 /* Haswell can do it all. */
85 if (devinfo->verx10 >= 75)
86 return true;
87
88 if (!can_cut_index_handle_restart_index(ice, draw))
89 return false;
90
91 switch (draw->mode) {
92 case PIPE_PRIM_POINTS:
93 case PIPE_PRIM_LINES:
94 case PIPE_PRIM_LINE_STRIP:
95 case PIPE_PRIM_TRIANGLES:
96 case PIPE_PRIM_TRIANGLE_STRIP:
97 case PIPE_PRIM_LINES_ADJACENCY:
98 case PIPE_PRIM_LINE_STRIP_ADJACENCY:
99 case PIPE_PRIM_TRIANGLES_ADJACENCY:
100 case PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY:
101 return true;
102 default:
103 break;
104 }
105 return false;
106 }
107
108 /**
109 * Record the current primitive mode and restart information, flagging
110 * related packets as dirty if necessary.
111 *
112 * This must be called before updating compiled shaders, because the patch
113 * information informs the TCS key.
114 */
115 static void
crocus_update_draw_info(struct crocus_context * ice,const struct pipe_draw_info * info,const struct pipe_draw_start_count_bias * draw)116 crocus_update_draw_info(struct crocus_context *ice,
117 const struct pipe_draw_info *info,
118 const struct pipe_draw_start_count_bias *draw)
119 {
120 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
121 enum pipe_prim_type mode = info->mode;
122
123 if (screen->devinfo.ver < 6) {
124 /* Slight optimization to avoid the GS program when not needed:
125 */
126 struct pipe_rasterizer_state *rs_state = crocus_get_rast_state(ice);
127 if (mode == PIPE_PRIM_QUAD_STRIP && !rs_state->flatshade &&
128 rs_state->fill_front == PIPE_POLYGON_MODE_FILL &&
129 rs_state->fill_back == PIPE_POLYGON_MODE_FILL)
130 mode = PIPE_PRIM_TRIANGLE_STRIP;
131 if (mode == PIPE_PRIM_QUADS &&
132 draw->count == 4 &&
133 !rs_state->flatshade &&
134 rs_state->fill_front == PIPE_POLYGON_MODE_FILL &&
135 rs_state->fill_back == PIPE_POLYGON_MODE_FILL)
136 mode = PIPE_PRIM_TRIANGLE_FAN;
137 }
138
139 if (ice->state.prim_mode != mode) {
140 ice->state.prim_mode = mode;
141
142 enum pipe_prim_type reduced = u_reduced_prim(mode);
143 if (ice->state.reduced_prim_mode != reduced) {
144 if (screen->devinfo.ver < 6)
145 ice->state.dirty |= CROCUS_DIRTY_GEN4_CLIP_PROG | CROCUS_DIRTY_GEN4_SF_PROG;
146 /* if the reduced prim changes the WM needs updating. */
147 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_UNCOMPILED_FS;
148 ice->state.reduced_prim_mode = reduced;
149 }
150
151 if (screen->devinfo.ver == 8)
152 ice->state.dirty |= CROCUS_DIRTY_GEN8_VF_TOPOLOGY;
153
154 if (screen->devinfo.ver <= 6)
155 ice->state.dirty |= CROCUS_DIRTY_GEN4_FF_GS_PROG;
156
157 if (screen->devinfo.ver >= 7)
158 ice->state.dirty |= CROCUS_DIRTY_GEN7_SBE;
159
160 /* For XY Clip enables */
161 bool points_or_lines = prim_is_points_or_lines(mode);
162 if (points_or_lines != ice->state.prim_is_points_or_lines) {
163 ice->state.prim_is_points_or_lines = points_or_lines;
164 ice->state.dirty |= CROCUS_DIRTY_CLIP;
165 }
166 }
167
168 if (info->mode == PIPE_PRIM_PATCHES &&
169 ice->state.vertices_per_patch != ice->state.patch_vertices) {
170 ice->state.vertices_per_patch = ice->state.patch_vertices;
171
172 if (screen->devinfo.ver == 8)
173 ice->state.dirty |= CROCUS_DIRTY_GEN8_VF_TOPOLOGY;
174 /* This is needed for key->input_vertices */
175 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_UNCOMPILED_TCS;
176
177 /* Flag constants dirty for gl_PatchVerticesIn if needed. */
178 const struct shader_info *tcs_info =
179 crocus_get_shader_info(ice, MESA_SHADER_TESS_CTRL);
180 if (tcs_info &&
181 BITSET_TEST(tcs_info->system_values_read, SYSTEM_VALUE_VERTICES_IN)) {
182 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_CONSTANTS_TCS;
183 ice->state.shaders[MESA_SHADER_TESS_CTRL].sysvals_need_upload = true;
184 }
185 }
186
187 const unsigned cut_index = info->primitive_restart ? info->restart_index :
188 ice->state.cut_index;
189 if (ice->state.primitive_restart != info->primitive_restart ||
190 ice->state.cut_index != cut_index) {
191 if (screen->devinfo.verx10 >= 75)
192 ice->state.dirty |= CROCUS_DIRTY_GEN75_VF;
193 ice->state.primitive_restart = info->primitive_restart;
194 ice->state.cut_index = info->restart_index;
195 }
196 }
197
198 /**
199 * Update shader draw parameters, flagging VF packets as dirty if necessary.
200 */
201 static void
crocus_update_draw_parameters(struct crocus_context * ice,const struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draw)202 crocus_update_draw_parameters(struct crocus_context *ice,
203 const struct pipe_draw_info *info,
204 unsigned drawid_offset,
205 const struct pipe_draw_indirect_info *indirect,
206 const struct pipe_draw_start_count_bias *draw)
207 {
208 bool changed = false;
209
210 if (ice->state.vs_uses_draw_params) {
211 struct crocus_state_ref *draw_params = &ice->draw.draw_params;
212
213 if (indirect && indirect->buffer) {
214 pipe_resource_reference(&draw_params->res, indirect->buffer);
215 draw_params->offset =
216 indirect->offset + (info->index_size ? 12 : 8);
217
218 changed = true;
219 ice->draw.params_valid = false;
220 } else {
221 int firstvertex = info->index_size ? draw->index_bias : draw->start;
222
223 if (!ice->draw.params_valid ||
224 ice->draw.params.firstvertex != firstvertex ||
225 ice->draw.params.baseinstance != info->start_instance) {
226
227 changed = true;
228 ice->draw.params.firstvertex = firstvertex;
229 ice->draw.params.baseinstance = info->start_instance;
230 ice->draw.params_valid = true;
231
232 u_upload_data(ice->ctx.stream_uploader, 0,
233 sizeof(ice->draw.params), 4, &ice->draw.params,
234 &draw_params->offset, &draw_params->res);
235 }
236 }
237 }
238
239 if (ice->state.vs_uses_derived_draw_params) {
240 struct crocus_state_ref *derived_params = &ice->draw.derived_draw_params;
241 int is_indexed_draw = info->index_size ? -1 : 0;
242
243 if (ice->draw.derived_params.drawid != drawid_offset ||
244 ice->draw.derived_params.is_indexed_draw != is_indexed_draw) {
245
246 changed = true;
247 ice->draw.derived_params.drawid = drawid_offset;
248 ice->draw.derived_params.is_indexed_draw = is_indexed_draw;
249
250 u_upload_data(ice->ctx.stream_uploader, 0,
251 sizeof(ice->draw.derived_params), 4,
252 &ice->draw.derived_params, &derived_params->offset,
253 &derived_params->res);
254 }
255 }
256
257 if (changed) {
258 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
259 ice->state.dirty |= CROCUS_DIRTY_VERTEX_BUFFERS |
260 CROCUS_DIRTY_VERTEX_ELEMENTS;
261 if (screen->devinfo.ver == 8)
262 ice->state.dirty |= CROCUS_DIRTY_GEN8_VF_SGVS;
263 }
264 }
265
266 static void
crocus_indirect_draw_vbo(struct crocus_context * ice,const struct pipe_draw_info * dinfo,unsigned drawid_offset,const struct pipe_draw_indirect_info * dindirect,const struct pipe_draw_start_count_bias * draws)267 crocus_indirect_draw_vbo(struct crocus_context *ice,
268 const struct pipe_draw_info *dinfo,
269 unsigned drawid_offset,
270 const struct pipe_draw_indirect_info *dindirect,
271 const struct pipe_draw_start_count_bias *draws)
272 {
273 struct crocus_batch *batch = &ice->batches[CROCUS_BATCH_RENDER];
274 struct crocus_screen *screen = batch->screen;
275 struct pipe_draw_info info = *dinfo;
276 struct pipe_draw_indirect_info indirect = *dindirect;
277 const struct intel_device_info *devinfo = &batch->screen->devinfo;
278
279 if (devinfo->verx10 >= 75 && indirect.indirect_draw_count &&
280 ice->state.predicate == CROCUS_PREDICATE_STATE_USE_BIT) {
281 /* Upload MI_PREDICATE_RESULT to GPR15.*/
282 screen->vtbl.load_register_reg64(batch, CS_GPR(15), MI_PREDICATE_RESULT);
283 }
284
285 uint64_t orig_dirty = ice->state.dirty;
286 uint64_t orig_stage_dirty = ice->state.stage_dirty;
287
288 for (int i = 0; i < indirect.draw_count; i++) {
289 crocus_batch_maybe_flush(batch, 1500);
290 crocus_require_statebuffer_space(batch, 2400);
291
292 if (ice->state.vs_uses_draw_params ||
293 ice->state.vs_uses_derived_draw_params)
294 crocus_update_draw_parameters(ice, &info, drawid_offset + i, &indirect, draws);
295
296 screen->vtbl.upload_render_state(ice, batch, &info, drawid_offset + i, &indirect, draws);
297
298 ice->state.dirty &= ~CROCUS_ALL_DIRTY_FOR_RENDER;
299 ice->state.stage_dirty &= ~CROCUS_ALL_STAGE_DIRTY_FOR_RENDER;
300
301 indirect.offset += indirect.stride;
302 }
303
304 if (devinfo->verx10 >= 75 && indirect.indirect_draw_count &&
305 ice->state.predicate == CROCUS_PREDICATE_STATE_USE_BIT) {
306 /* Restore MI_PREDICATE_RESULT. */
307 screen->vtbl.load_register_reg64(batch, MI_PREDICATE_RESULT, CS_GPR(15));
308 }
309
310 /* Put this back for post-draw resolves, we'll clear it again after. */
311 ice->state.dirty = orig_dirty;
312 ice->state.stage_dirty = orig_stage_dirty;
313 }
314
315 static void
crocus_simple_draw_vbo(struct crocus_context * ice,const struct pipe_draw_info * draw,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * sc)316 crocus_simple_draw_vbo(struct crocus_context *ice,
317 const struct pipe_draw_info *draw,
318 unsigned drawid_offset,
319 const struct pipe_draw_indirect_info *indirect,
320 const struct pipe_draw_start_count_bias *sc)
321 {
322 struct crocus_batch *batch = &ice->batches[CROCUS_BATCH_RENDER];
323 struct crocus_screen *screen = batch->screen;
324
325 crocus_batch_maybe_flush(batch, 1500);
326 crocus_require_statebuffer_space(batch, 2400);
327
328 if (ice->state.vs_uses_draw_params ||
329 ice->state.vs_uses_derived_draw_params)
330 crocus_update_draw_parameters(ice, draw, drawid_offset, indirect, sc);
331
332 screen->vtbl.upload_render_state(ice, batch, draw, drawid_offset, indirect, sc);
333 }
334
335 static void
crocus_draw_vbo_get_vertex_count(struct pipe_context * ctx,const struct pipe_draw_info * info_in,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect)336 crocus_draw_vbo_get_vertex_count(struct pipe_context *ctx,
337 const struct pipe_draw_info *info_in,
338 unsigned drawid_offset,
339 const struct pipe_draw_indirect_info *indirect)
340 {
341 struct crocus_screen *screen = (struct crocus_screen *)ctx->screen;
342 struct pipe_draw_info info = *info_in;
343 struct pipe_draw_start_count_bias draw;
344
345 uint32_t val = screen->vtbl.get_so_offset(indirect->count_from_stream_output);
346
347 draw.start = 0;
348 draw.count = val;
349 ctx->draw_vbo(ctx, &info, drawid_offset, NULL, &draw, 1);
350 }
351
352 /**
353 * The pipe->draw_vbo() driver hook. Performs a draw on the GPU.
354 */
355 void
crocus_draw_vbo(struct pipe_context * ctx,const struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)356 crocus_draw_vbo(struct pipe_context *ctx,
357 const struct pipe_draw_info *info,
358 unsigned drawid_offset,
359 const struct pipe_draw_indirect_info *indirect,
360 const struct pipe_draw_start_count_bias *draws,
361 unsigned num_draws)
362 {
363 if (num_draws > 1) {
364 util_draw_multi(ctx, info, drawid_offset, indirect, draws, num_draws);
365 return;
366 }
367
368 if (!indirect && (!draws[0].count || !info->instance_count))
369 return;
370
371 struct crocus_context *ice = (struct crocus_context *) ctx;
372 struct crocus_screen *screen = (struct crocus_screen*)ice->ctx.screen;
373 struct crocus_batch *batch = &ice->batches[CROCUS_BATCH_RENDER];
374
375 if (!crocus_check_conditional_render(ice))
376 return;
377
378 if (info->primitive_restart && !can_cut_index_handle_prim(ice, info)) {
379 util_draw_vbo_without_prim_restart(ctx, info, drawid_offset,
380 indirect, draws);
381 return;
382 }
383
384 if (screen->devinfo.verx10 < 75 &&
385 indirect && indirect->count_from_stream_output) {
386 crocus_draw_vbo_get_vertex_count(ctx, info, drawid_offset, indirect);
387 return;
388 }
389
390 /**
391 * The hardware is capable of removing dangling vertices on its own; however,
392 * prior to Gen6, we sometimes convert quads into trifans (and quad strips
393 * into tristrips), since pre-Gen6 hardware requires a GS to render quads.
394 * This function manually trims dangling vertices from a draw call involving
395 * quads so that those dangling vertices won't get drawn when we convert to
396 * trifans/tristrips.
397 */
398 if (screen->devinfo.ver < 6) {
399 if (info->mode == PIPE_PRIM_QUADS || info->mode == PIPE_PRIM_QUAD_STRIP) {
400 bool trim = u_trim_pipe_prim(info->mode, (unsigned *)&draws[0].count);
401 if (!trim)
402 return;
403 }
404 }
405
406 /* We can't safely re-emit 3DSTATE_SO_BUFFERS because it may zero the
407 * write offsets, changing the behavior.
408 */
409 if (INTEL_DEBUG(DEBUG_REEMIT)) {
410 ice->state.dirty |= CROCUS_ALL_DIRTY_FOR_RENDER & ~(CROCUS_DIRTY_GEN7_SO_BUFFERS | CROCUS_DIRTY_GEN6_SVBI);
411 ice->state.stage_dirty |= CROCUS_ALL_STAGE_DIRTY_FOR_RENDER;
412 }
413
414 /* Emit Sandybridge workaround flushes on every primitive, for safety. */
415 if (screen->devinfo.ver == 6)
416 crocus_emit_post_sync_nonzero_flush(batch);
417
418 crocus_update_draw_info(ice, info, draws);
419
420 if (!crocus_update_compiled_shaders(ice))
421 return;
422
423 if (ice->state.dirty & CROCUS_DIRTY_RENDER_RESOLVES_AND_FLUSHES) {
424 bool draw_aux_buffer_disabled[BRW_MAX_DRAW_BUFFERS] = { };
425 for (gl_shader_stage stage = 0; stage < MESA_SHADER_COMPUTE; stage++) {
426 if (ice->shaders.prog[stage])
427 crocus_predraw_resolve_inputs(ice, batch, draw_aux_buffer_disabled,
428 stage, true);
429 }
430 crocus_predraw_resolve_framebuffer(ice, batch, draw_aux_buffer_disabled);
431 }
432
433 crocus_handle_always_flush_cache(batch);
434
435 if (indirect && indirect->buffer)
436 crocus_indirect_draw_vbo(ice, info, drawid_offset, indirect, draws);
437 else
438 crocus_simple_draw_vbo(ice, info, drawid_offset, indirect, draws);
439
440 crocus_handle_always_flush_cache(batch);
441
442 crocus_postdraw_update_resolve_tracking(ice, batch);
443
444 ice->state.dirty &= ~CROCUS_ALL_DIRTY_FOR_RENDER;
445 ice->state.stage_dirty &= ~CROCUS_ALL_STAGE_DIRTY_FOR_RENDER;
446 }
447
448 static void
crocus_update_grid_size_resource(struct crocus_context * ice,const struct pipe_grid_info * grid)449 crocus_update_grid_size_resource(struct crocus_context *ice,
450 const struct pipe_grid_info *grid)
451 {
452 struct crocus_state_ref *grid_ref = &ice->state.grid_size;
453 const struct crocus_compiled_shader *shader = ice->shaders.prog[MESA_SHADER_COMPUTE];
454 bool grid_needs_surface = shader->bt.used_mask[CROCUS_SURFACE_GROUP_CS_WORK_GROUPS];
455
456 if (grid->indirect) {
457 pipe_resource_reference(&grid_ref->res, grid->indirect);
458 grid_ref->offset = grid->indirect_offset;
459
460 /* Zero out the grid size so that the next non-indirect grid launch will
461 * re-upload it properly.
462 */
463 memset(ice->state.last_grid, 0, sizeof(ice->state.last_grid));
464 } else if (memcmp(ice->state.last_grid, grid->grid, sizeof(grid->grid)) != 0) {
465 memcpy(ice->state.last_grid, grid->grid, sizeof(grid->grid));
466 u_upload_data(ice->ctx.const_uploader, 0, sizeof(grid->grid), 4,
467 grid->grid, &grid_ref->offset, &grid_ref->res);
468 }
469
470 /* Skip surface upload if we don't need it or we already have one */
471 if (!grid_needs_surface)
472 return;
473
474 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_BINDINGS_CS;
475 }
476
477
478 void
crocus_launch_grid(struct pipe_context * ctx,const struct pipe_grid_info * grid)479 crocus_launch_grid(struct pipe_context *ctx, const struct pipe_grid_info *grid)
480 {
481 struct crocus_context *ice = (struct crocus_context *) ctx;
482 struct crocus_batch *batch = &ice->batches[CROCUS_BATCH_COMPUTE];
483 struct crocus_screen *screen = batch->screen;
484
485 if (!crocus_check_conditional_render(ice))
486 return;
487
488 if (INTEL_DEBUG(DEBUG_REEMIT)) {
489 ice->state.dirty |= CROCUS_ALL_DIRTY_FOR_COMPUTE;
490 ice->state.stage_dirty |= CROCUS_ALL_STAGE_DIRTY_FOR_COMPUTE;
491 }
492
493 /* We can't do resolves on the compute engine, so awkwardly, we have to
494 * do them on the render batch...
495 */
496 if (ice->state.dirty & CROCUS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES) {
497 crocus_predraw_resolve_inputs(ice, &ice->batches[CROCUS_BATCH_RENDER], NULL,
498 MESA_SHADER_COMPUTE, false);
499 }
500
501 crocus_batch_maybe_flush(batch, 1500);
502 crocus_require_statebuffer_space(batch, 2500);
503 crocus_update_compiled_compute_shader(ice);
504
505 if (memcmp(ice->state.last_block, grid->block, sizeof(grid->block)) != 0) {
506 memcpy(ice->state.last_block, grid->block, sizeof(grid->block));
507 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_CONSTANTS_CS;
508 ice->state.shaders[MESA_SHADER_COMPUTE].sysvals_need_upload = true;
509 }
510
511 crocus_update_grid_size_resource(ice, grid);
512
513 if (ice->state.compute_predicate) {
514 screen->vtbl.emit_compute_predicate(batch);
515 ice->state.compute_predicate = NULL;
516 }
517
518 crocus_handle_always_flush_cache(batch);
519
520 screen->vtbl.upload_compute_state(ice, batch, grid);
521
522 crocus_handle_always_flush_cache(batch);
523
524 ice->state.dirty &= ~CROCUS_ALL_DIRTY_FOR_COMPUTE;
525 ice->state.stage_dirty &= ~CROCUS_ALL_STAGE_DIRTY_FOR_COMPUTE;
526
527 /* Note: since compute shaders can't access the framebuffer, there's
528 * no need to call crocus_postdraw_update_resolve_tracking.
529 */
530 }
531