1 /*
2 * Copyright © 2012 Rob Clark <robclark@freedesktop.org>
3 * SPDX-License-Identifier: MIT
4 *
5 * Authors:
6 * Rob Clark <robclark@freedesktop.org>
7 */
8
9 #include "util/format/u_format.h"
10 #include "util/format/u_format_rgtc.h"
11 #include "util/format/u_format_zs.h"
12 #include "util/set.h"
13 #include "util/u_drm.h"
14 #include "util/u_inlines.h"
15 #include "util/u_resource.h"
16 #include "util/u_sample_positions.h"
17 #include "util/u_string.h"
18 #include "util/u_surface.h"
19 #include "util/u_transfer.h"
20
21 #include "decode/util.h"
22
23 #include "freedreno_batch_cache.h"
24 #include "freedreno_blitter.h"
25 #include "freedreno_context.h"
26 #include "freedreno_fence.h"
27 #include "freedreno_query_hw.h"
28 #include "freedreno_resource.h"
29 #include "freedreno_screen.h"
30 #include "freedreno_surface.h"
31 #include "freedreno_util.h"
32
33 #include <errno.h>
34 #include "drm-uapi/drm_fourcc.h"
35
36 /* XXX this should go away, needed for 'struct winsys_handle' */
37 #include "frontend/drm_driver.h"
38
39 /**
40 * Go through the entire state and see if the resource is bound
41 * anywhere. If it is, mark the relevant state as dirty. This is
42 * called on realloc_bo to ensure the necessary state is re-
43 * emitted so the GPU looks at the new backing bo.
44 */
45 static void
rebind_resource_in_ctx(struct fd_context * ctx,struct fd_resource * rsc)46 rebind_resource_in_ctx(struct fd_context *ctx,
47 struct fd_resource *rsc) assert_dt
48 {
49 struct pipe_resource *prsc = &rsc->b.b;
50
51 if (ctx->rebind_resource)
52 ctx->rebind_resource(ctx, rsc);
53
54 /* VBOs */
55 if (rsc->dirty & FD_DIRTY_VTXBUF) {
56 struct fd_vertexbuf_stateobj *vb = &ctx->vtx.vertexbuf;
57 for (unsigned i = 0; i < vb->count && !(ctx->dirty & FD_DIRTY_VTXBUF);
58 i++) {
59 if (vb->vb[i].buffer.resource == prsc)
60 fd_dirty_resource(ctx, prsc, FD_DIRTY_VTXBUF, false);
61 }
62 }
63
64 /* xfb/so buffers: */
65 if (rsc->dirty & FD_DIRTY_STREAMOUT) {
66 struct fd_streamout_stateobj *so = &ctx->streamout;
67
68 for (unsigned i = 0;
69 i < so->num_targets && !(ctx->dirty & FD_DIRTY_STREAMOUT);
70 i++) {
71 if (so->targets[i]->buffer == prsc)
72 fd_dirty_resource(ctx, prsc, FD_DIRTY_STREAMOUT, true);
73 }
74 }
75
76 const enum fd_dirty_3d_state per_stage_dirty =
77 FD_DIRTY_CONST | FD_DIRTY_TEX | FD_DIRTY_IMAGE | FD_DIRTY_SSBO;
78
79 if (!(rsc->dirty & per_stage_dirty))
80 return;
81
82 /* per-shader-stage resources: */
83 for (unsigned stage = 0; stage < PIPE_SHADER_TYPES; stage++) {
84 /* Constbufs.. note that constbuf[0] is normal uniforms emitted in
85 * cmdstream rather than by pointer..
86 */
87 if ((rsc->dirty & FD_DIRTY_CONST) &&
88 !(ctx->dirty_shader[stage] & FD_DIRTY_CONST)) {
89 struct fd_constbuf_stateobj *cb = &ctx->constbuf[stage];
90 const unsigned num_ubos = util_last_bit(cb->enabled_mask);
91 for (unsigned i = 1; i < num_ubos; i++) {
92 if (cb->cb[i].buffer == prsc) {
93 fd_dirty_shader_resource(ctx, prsc, stage,
94 FD_DIRTY_SHADER_CONST, false);
95 break;
96 }
97 }
98 }
99
100 /* Textures */
101 if ((rsc->dirty & FD_DIRTY_TEX) &&
102 !(ctx->dirty_shader[stage] & FD_DIRTY_TEX)) {
103 struct fd_texture_stateobj *tex = &ctx->tex[stage];
104 for (unsigned i = 0; i < tex->num_textures; i++) {
105 if (tex->textures[i] && (tex->textures[i]->texture == prsc)) {
106 fd_dirty_shader_resource(ctx, prsc, stage,
107 FD_DIRTY_SHADER_TEX, false);
108 break;
109 }
110 }
111 }
112
113 /* Images */
114 if ((rsc->dirty & FD_DIRTY_IMAGE) &&
115 !(ctx->dirty_shader[stage] & FD_DIRTY_IMAGE)) {
116 struct fd_shaderimg_stateobj *si = &ctx->shaderimg[stage];
117 const unsigned num_images = util_last_bit(si->enabled_mask);
118 for (unsigned i = 0; i < num_images; i++) {
119 if (si->si[i].resource == prsc) {
120 bool write = si->si[i].access & PIPE_IMAGE_ACCESS_WRITE;
121 fd_dirty_shader_resource(ctx, prsc, stage,
122 FD_DIRTY_SHADER_IMAGE, write);
123 break;
124 }
125 }
126 }
127
128 /* SSBOs */
129 if ((rsc->dirty & FD_DIRTY_SSBO) &&
130 !(ctx->dirty_shader[stage] & FD_DIRTY_SSBO)) {
131 struct fd_shaderbuf_stateobj *sb = &ctx->shaderbuf[stage];
132 const unsigned num_ssbos = util_last_bit(sb->enabled_mask);
133 for (unsigned i = 0; i < num_ssbos; i++) {
134 if (sb->sb[i].buffer == prsc) {
135 bool write = sb->writable_mask & BIT(i);
136 fd_dirty_shader_resource(ctx, prsc, stage,
137 FD_DIRTY_SHADER_SSBO, write);
138 break;
139 }
140 }
141 }
142 }
143 }
144
145 static void
rebind_resource(struct fd_resource * rsc)146 rebind_resource(struct fd_resource *rsc) assert_dt
147 {
148 struct fd_screen *screen = fd_screen(rsc->b.b.screen);
149
150 fd_screen_lock(screen);
151 fd_resource_lock(rsc);
152
153 if (rsc->dirty)
154 list_for_each_entry (struct fd_context, ctx, &screen->context_list, node)
155 rebind_resource_in_ctx(ctx, rsc);
156
157 fd_resource_unlock(rsc);
158 fd_screen_unlock(screen);
159 }
160
161 static inline void
fd_resource_set_bo(struct fd_resource * rsc,struct fd_bo * bo)162 fd_resource_set_bo(struct fd_resource *rsc, struct fd_bo *bo)
163 {
164 struct fd_screen *screen = fd_screen(rsc->b.b.screen);
165
166 rsc->bo = bo;
167 rsc->seqno = seqno_next_u16(&screen->rsc_seqno);
168 }
169
170 int
__fd_resource_wait(struct fd_context * ctx,struct fd_resource * rsc,unsigned op,const char * func)171 __fd_resource_wait(struct fd_context *ctx, struct fd_resource *rsc, unsigned op,
172 const char *func)
173 {
174 if (op & FD_BO_PREP_NOSYNC)
175 return fd_bo_cpu_prep(rsc->bo, ctx->pipe, op);
176
177 int ret;
178
179 perf_time_ctx (ctx, 10000, "%s: a busy \"%" PRSC_FMT "\" BO stalled", func,
180 PRSC_ARGS(&rsc->b.b)) {
181 ret = fd_bo_cpu_prep(rsc->bo, ctx->pipe, op);
182 }
183
184 return ret;
185 }
186
187 static void
realloc_bo(struct fd_resource * rsc,uint32_t size)188 realloc_bo(struct fd_resource *rsc, uint32_t size)
189 {
190 struct pipe_resource *prsc = &rsc->b.b;
191 struct fd_screen *screen = fd_screen(rsc->b.b.screen);
192 uint32_t flags =
193 (prsc->target == PIPE_BUFFER) ? FD_BO_HINT_BUFFER : FD_BO_HINT_IMAGE |
194 COND(rsc->layout.tile_mode, FD_BO_NOMAP) |
195 COND((prsc->usage & PIPE_USAGE_STAGING) &&
196 (prsc->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT),
197 FD_BO_CACHED_COHERENT) |
198 COND(prsc->bind & PIPE_BIND_SHARED, FD_BO_SHARED) |
199 COND(prsc->bind & PIPE_BIND_SCANOUT, FD_BO_SCANOUT);
200 /* TODO other flags? */
201
202 /* if we start using things other than write-combine,
203 * be sure to check for PIPE_RESOURCE_FLAG_MAP_COHERENT
204 */
205
206 if (rsc->bo)
207 fd_bo_del(rsc->bo);
208
209 struct fd_bo *bo =
210 fd_bo_new(screen->dev, size, flags, "%ux%ux%u@%u:%x", prsc->width0,
211 prsc->height0, prsc->depth0, rsc->layout.cpp, prsc->bind);
212 fd_resource_set_bo(rsc, bo);
213
214 /* Zero out the UBWC area on allocation. This fixes intermittent failures
215 * with UBWC, which I suspect are due to the HW having a hard time
216 * interpreting arbitrary values populating the flags buffer when the BO
217 * was recycled through the bo cache (instead of fresh allocations from
218 * the kernel, which are zeroed). sleep(1) in this spot didn't work
219 * around the issue, but any memset value seems to.
220 */
221 if (rsc->layout.ubwc) {
222 rsc->needs_ubwc_clear = true;
223 }
224
225 util_range_set_empty(&rsc->valid_buffer_range);
226 fd_bc_invalidate_resource(rsc, true);
227 }
228
229 static void
do_blit(struct fd_context * ctx,const struct pipe_blit_info * blit,bool fallback)230 do_blit(struct fd_context *ctx, const struct pipe_blit_info *blit,
231 bool fallback) assert_dt
232 {
233 struct pipe_context *pctx = &ctx->base;
234
235 assert(!ctx->in_blit);
236 ctx->in_blit = true;
237
238 /* TODO size threshold too?? */
239 if (fallback || !fd_blit(pctx, blit)) {
240 /* do blit on cpu: */
241 util_resource_copy_region(pctx, blit->dst.resource, blit->dst.level,
242 blit->dst.box.x, blit->dst.box.y,
243 blit->dst.box.z, blit->src.resource,
244 blit->src.level, &blit->src.box);
245 }
246
247 ctx->in_blit = false;
248 }
249
250 /**
251 * Replace the storage of dst with src. This is only used by TC in the
252 * DISCARD_WHOLE_RESOURCE path, and src is a freshly allocated buffer.
253 */
254 void
fd_replace_buffer_storage(struct pipe_context * pctx,struct pipe_resource * pdst,struct pipe_resource * psrc,unsigned num_rebinds,uint32_t rebind_mask,uint32_t delete_buffer_id)255 fd_replace_buffer_storage(struct pipe_context *pctx, struct pipe_resource *pdst,
256 struct pipe_resource *psrc, unsigned num_rebinds, uint32_t rebind_mask,
257 uint32_t delete_buffer_id)
258 {
259 struct fd_context *ctx = fd_context(pctx);
260 struct fd_resource *dst = fd_resource(pdst);
261 struct fd_resource *src = fd_resource(psrc);
262
263 DBG("pdst=%p, psrc=%p", pdst, psrc);
264
265 /* This should only be called with buffers.. which side-steps some tricker
266 * cases, like a rsc that is in a batch-cache key...
267 */
268 assert(pdst->target == PIPE_BUFFER);
269 assert(psrc->target == PIPE_BUFFER);
270 assert(dst->track->bc_batch_mask == 0);
271 assert(src->track->bc_batch_mask == 0);
272 assert(src->track->batch_mask == 0);
273 assert(src->track->write_batch == NULL);
274 assert(memcmp(&dst->layout, &src->layout, sizeof(dst->layout)) == 0);
275
276 /* get rid of any references that batch-cache might have to us (which
277 * should empty/destroy rsc->batches hashset)
278 *
279 * Note that we aren't actually destroying dst, but we are replacing
280 * it's storage so we want to go thru the same motions of decoupling
281 * it's batch connections.
282 */
283 fd_bc_invalidate_resource(dst, true);
284 rebind_resource(dst);
285
286 util_idalloc_mt_free(&ctx->screen->buffer_ids, delete_buffer_id);
287
288 fd_screen_lock(ctx->screen);
289
290 fd_bo_del(dst->bo);
291 dst->bo = fd_bo_ref(src->bo);
292
293 fd_resource_tracking_reference(&dst->track, src->track);
294 src->is_replacement = true;
295
296 dst->seqno = seqno_next_u16(&ctx->screen->rsc_seqno);
297
298 fd_screen_unlock(ctx->screen);
299 }
300
301 static unsigned
translate_usage(unsigned usage)302 translate_usage(unsigned usage)
303 {
304 uint32_t op = 0;
305
306 if (usage & PIPE_MAP_READ)
307 op |= FD_BO_PREP_READ;
308
309 if (usage & PIPE_MAP_WRITE)
310 op |= FD_BO_PREP_WRITE;
311
312 return op;
313 }
314
315 bool
fd_resource_busy(struct pipe_screen * pscreen,struct pipe_resource * prsc,unsigned usage)316 fd_resource_busy(struct pipe_screen *pscreen, struct pipe_resource *prsc,
317 unsigned usage)
318 {
319 struct fd_resource *rsc = fd_resource(prsc);
320
321 if (pending(rsc, !!(usage & PIPE_MAP_WRITE)))
322 return true;
323
324 if (resource_busy(rsc, translate_usage(usage)))
325 return true;
326
327 return false;
328 }
329
330 static void flush_resource(struct fd_context *ctx, struct fd_resource *rsc,
331 unsigned usage);
332
333 /**
334 * Helper to check if the format is something that we can blit/render
335 * to.. if the format is not renderable, there is no point in trying
336 * to do a staging blit (as it will still end up being a cpu copy)
337 */
338 static bool
is_renderable(struct pipe_resource * prsc)339 is_renderable(struct pipe_resource *prsc)
340 {
341 struct pipe_screen *pscreen = prsc->screen;
342 return pscreen->is_format_supported(
343 pscreen, prsc->format, prsc->target, prsc->nr_samples,
344 prsc->nr_storage_samples, PIPE_BIND_RENDER_TARGET);
345 }
346
347 /**
348 * @rsc: the resource to shadow
349 * @level: the level to discard (if box != NULL, otherwise ignored)
350 * @box: the box to discard (or NULL if none)
351 * @modifier: the modifier for the new buffer state
352 */
353 static bool
fd_try_shadow_resource(struct fd_context * ctx,struct fd_resource * rsc,unsigned level,const struct pipe_box * box,uint64_t modifier)354 fd_try_shadow_resource(struct fd_context *ctx, struct fd_resource *rsc,
355 unsigned level, const struct pipe_box *box,
356 uint64_t modifier) assert_dt
357 {
358 struct pipe_context *pctx = &ctx->base;
359 struct pipe_resource *prsc = &rsc->b.b;
360 struct fd_screen *screen = fd_screen(pctx->screen);
361 struct fd_batch *batch;
362 bool fallback = false;
363
364 if (prsc->next)
365 return false;
366
367 /* Flush any pending batches writing the resource before we go mucking around
368 * in its insides. The blit would immediately cause the batch to be flushed,
369 * anyway.
370 */
371 fd_bc_flush_writer(ctx, rsc);
372
373 /* Because IB1 ("gmem") cmdstream is built only when we flush the
374 * batch, we need to flush any batches that reference this rsc as
375 * a render target. Otherwise the framebuffer state emitted in
376 * IB1 will reference the resources new state, and not the state
377 * at the point in time that the earlier draws referenced it.
378 *
379 * Note that being in the gmem key doesn't necessarily mean the
380 * batch was considered a writer!
381 */
382 foreach_batch (batch, &screen->batch_cache, rsc->track->bc_batch_mask) {
383 fd_batch_flush(batch);
384 }
385
386 /* TODO: somehow munge dimensions and format to copy unsupported
387 * render target format to something that is supported?
388 */
389 if (!is_renderable(prsc))
390 fallback = true;
391
392 /* do shadowing back-blits on the cpu for buffers -- requires about a page of
393 * DMA to make GPU copies worth it according to robclark. Note, if you
394 * decide to do it on the GPU then you'll need to update valid_buffer_range
395 * in the swap()s below.
396 */
397 if (prsc->target == PIPE_BUFFER)
398 fallback = true;
399
400 bool discard_whole_level = box && util_texrange_covers_whole_level(
401 prsc, level, box->x, box->y, box->z,
402 box->width, box->height, box->depth);
403
404 /* TODO need to be more clever about current level */
405 if ((prsc->target >= PIPE_TEXTURE_2D) && box && !discard_whole_level)
406 return false;
407
408 struct pipe_resource *pshadow = pctx->screen->resource_create_with_modifiers(
409 pctx->screen, prsc, &modifier, 1);
410
411 if (!pshadow)
412 return false;
413
414 assert(!ctx->in_shadow);
415 ctx->in_shadow = true;
416
417 /* get rid of any references that batch-cache might have to us (which
418 * should empty/destroy rsc->batches hashset)
419 */
420 fd_bc_invalidate_resource(rsc, false);
421
422 fd_screen_lock(ctx->screen);
423
424 /* Swap the backing bo's, so shadow becomes the old buffer,
425 * blit from shadow to new buffer. From here on out, we
426 * cannot fail.
427 *
428 * Note that we need to do it in this order, otherwise if
429 * we go down cpu blit path, the recursive transfer_map()
430 * sees the wrong status..
431 */
432 struct fd_resource *shadow = fd_resource(pshadow);
433
434 DBG("shadow: %p (%d, %p) -> %p (%d, %p)", rsc, rsc->b.b.reference.count,
435 rsc->track, shadow, shadow->b.b.reference.count, shadow->track);
436
437 SWAP(rsc->bo, shadow->bo);
438 SWAP(rsc->valid, shadow->valid);
439
440 /* swap() doesn't work because you can't typeof() the bitfield. */
441 bool temp = shadow->needs_ubwc_clear;
442 shadow->needs_ubwc_clear = rsc->needs_ubwc_clear;
443 rsc->needs_ubwc_clear = temp;
444
445 SWAP(rsc->layout, shadow->layout);
446 rsc->seqno = seqno_next_u16(&ctx->screen->rsc_seqno);
447
448 /* at this point, the newly created shadow buffer is not referenced
449 * by any batches, but the existing rsc (probably) is. We need to
450 * transfer those references over:
451 */
452 assert(shadow->track->batch_mask == 0);
453 foreach_batch (batch, &ctx->screen->batch_cache, rsc->track->batch_mask) {
454 struct set_entry *entry = _mesa_set_search_pre_hashed(batch->resources, rsc->hash, rsc);
455 _mesa_set_remove(batch->resources, entry);
456 _mesa_set_add_pre_hashed(batch->resources, shadow->hash, shadow);
457 }
458 SWAP(rsc->track, shadow->track);
459
460 fd_screen_unlock(ctx->screen);
461
462 rebind_resource(rsc);
463
464 struct pipe_blit_info blit = {};
465 blit.dst.resource = prsc;
466 blit.dst.format = prsc->format;
467 blit.src.resource = pshadow;
468 blit.src.format = pshadow->format;
469 blit.mask = util_format_get_mask(prsc->format);
470 blit.filter = PIPE_TEX_FILTER_NEAREST;
471
472 #define set_box(field, val) \
473 do { \
474 blit.dst.field = (val); \
475 blit.src.field = (val); \
476 } while (0)
477
478 /* Disable occlusion queries during shadow blits. */
479 bool saved_active_queries = ctx->active_queries;
480 pctx->set_active_query_state(pctx, false);
481
482 /* blit the other levels in their entirety: */
483 for (unsigned l = 0; l <= prsc->last_level; l++) {
484 if (box && l == level)
485 continue;
486
487 /* just blit whole level: */
488 set_box(level, l);
489 set_box(box.width, u_minify(prsc->width0, l));
490 set_box(box.height, u_minify(prsc->height0, l));
491 set_box(box.depth, u_minify(prsc->depth0, l));
492
493 for (int i = 0; i < prsc->array_size; i++) {
494 set_box(box.z, i);
495 do_blit(ctx, &blit, fallback);
496 }
497 }
498
499 /* deal w/ current level specially, since we might need to split
500 * it up into a couple blits:
501 */
502 if (box && !discard_whole_level) {
503 set_box(level, level);
504
505 switch (prsc->target) {
506 case PIPE_BUFFER:
507 case PIPE_TEXTURE_1D:
508 set_box(box.y, 0);
509 set_box(box.z, 0);
510 set_box(box.height, 1);
511 set_box(box.depth, 1);
512
513 if (box->x > 0) {
514 set_box(box.x, 0);
515 set_box(box.width, box->x);
516
517 do_blit(ctx, &blit, fallback);
518 }
519 if ((box->x + box->width) < u_minify(prsc->width0, level)) {
520 set_box(box.x, box->x + box->width);
521 set_box(box.width,
522 u_minify(prsc->width0, level) - (box->x + box->width));
523
524 do_blit(ctx, &blit, fallback);
525 }
526 break;
527 case PIPE_TEXTURE_2D:
528 /* TODO */
529 default:
530 unreachable("TODO");
531 }
532 }
533
534 pctx->set_active_query_state(pctx, saved_active_queries);
535
536 ctx->in_shadow = false;
537
538 pipe_resource_reference(&pshadow, NULL);
539
540 return true;
541 }
542
543 /**
544 * Uncompress an UBWC compressed buffer "in place". This works basically
545 * like resource shadowing, creating a new resource, and doing an uncompress
546 * blit, and swapping the state between shadow and original resource so it
547 * appears to the gallium frontends as if nothing changed.
548 */
549 void
fd_resource_uncompress(struct fd_context * ctx,struct fd_resource * rsc,bool linear)550 fd_resource_uncompress(struct fd_context *ctx, struct fd_resource *rsc, bool linear)
551 {
552 tc_assert_driver_thread(ctx->tc);
553
554 uint64_t modifier = linear ? DRM_FORMAT_MOD_LINEAR : DRM_FORMAT_MOD_QCOM_TILED3;
555
556 ASSERTED bool success = fd_try_shadow_resource(ctx, rsc, 0, NULL, modifier);
557
558 /* shadow should not fail in any cases where we need to uncompress: */
559 assert(success);
560 }
561
562 /**
563 * Debug helper to hexdump a resource.
564 */
565 void
fd_resource_dump(struct fd_resource * rsc,const char * name)566 fd_resource_dump(struct fd_resource *rsc, const char *name)
567 {
568 fd_bo_cpu_prep(rsc->bo, NULL, FD_BO_PREP_READ);
569 printf("%s: \n", name);
570 dump_hex(fd_bo_map(rsc->bo), fd_bo_size(rsc->bo));
571 }
572
573 static struct fd_resource *
fd_alloc_staging(struct fd_context * ctx,struct fd_resource * rsc,unsigned level,const struct pipe_box * box,unsigned usage)574 fd_alloc_staging(struct fd_context *ctx, struct fd_resource *rsc,
575 unsigned level, const struct pipe_box *box, unsigned usage)
576 assert_dt
577 {
578 struct pipe_context *pctx = &ctx->base;
579 struct pipe_resource tmpl = rsc->b.b;
580
581 /* We cannot currently do stencil export on earlier gens, and
582 * u_blitter cannot do blits involving stencil otherwise:
583 */
584 if ((ctx->screen->gen < 6) && !ctx->blit &&
585 (util_format_get_mask(tmpl.format) & PIPE_MASK_S))
586 return NULL;
587
588 tmpl.width0 = box->width;
589 tmpl.height0 = box->height;
590 /* for array textures, box->depth is the array_size, otherwise
591 * for 3d textures, it is the depth:
592 */
593 if (tmpl.array_size > 1) {
594 if (tmpl.target == PIPE_TEXTURE_CUBE)
595 tmpl.target = PIPE_TEXTURE_2D_ARRAY;
596 tmpl.array_size = box->depth;
597 tmpl.depth0 = 1;
598 } else {
599 tmpl.array_size = 1;
600 tmpl.depth0 = box->depth;
601 }
602 tmpl.last_level = 0;
603 tmpl.bind |= PIPE_BIND_LINEAR;
604 tmpl.usage = PIPE_USAGE_STAGING;
605 tmpl.flags = (usage & PIPE_MAP_READ) ? PIPE_RESOURCE_FLAG_MAP_COHERENT : 0;
606
607 struct pipe_resource *pstaging =
608 pctx->screen->resource_create(pctx->screen, &tmpl);
609 if (!pstaging)
610 return NULL;
611
612 return fd_resource(pstaging);
613 }
614
615 static void
fd_blit_from_staging(struct fd_context * ctx,struct fd_transfer * trans)616 fd_blit_from_staging(struct fd_context *ctx,
617 struct fd_transfer *trans) assert_dt
618 {
619 DBG("");
620 struct pipe_resource *dst = trans->b.b.resource;
621 struct pipe_blit_info blit = {};
622
623 blit.dst.resource = dst;
624 blit.dst.format = dst->format;
625 blit.dst.level = trans->b.b.level;
626 blit.dst.box = trans->b.b.box;
627 blit.src.resource = trans->staging_prsc;
628 blit.src.format = trans->staging_prsc->format;
629 blit.src.level = 0;
630 blit.src.box = trans->staging_box;
631 blit.mask = util_format_get_mask(trans->staging_prsc->format);
632 blit.filter = PIPE_TEX_FILTER_NEAREST;
633
634 do_blit(ctx, &blit, false);
635 }
636
637 static void
fd_blit_to_staging(struct fd_context * ctx,struct fd_transfer * trans)638 fd_blit_to_staging(struct fd_context *ctx, struct fd_transfer *trans) assert_dt
639 {
640 DBG("");
641 struct pipe_resource *src = trans->b.b.resource;
642 struct pipe_blit_info blit = {};
643
644 blit.src.resource = src;
645 blit.src.format = src->format;
646 blit.src.level = trans->b.b.level;
647 blit.src.box = trans->b.b.box;
648 blit.dst.resource = trans->staging_prsc;
649 blit.dst.format = trans->staging_prsc->format;
650 blit.dst.level = 0;
651 blit.dst.box = trans->staging_box;
652 blit.mask = util_format_get_mask(trans->staging_prsc->format);
653 blit.filter = PIPE_TEX_FILTER_NEAREST;
654
655 do_blit(ctx, &blit, false);
656 }
657
658 static void
fd_resource_transfer_flush_region(struct pipe_context * pctx,struct pipe_transfer * ptrans,const struct pipe_box * box)659 fd_resource_transfer_flush_region(struct pipe_context *pctx,
660 struct pipe_transfer *ptrans,
661 const struct pipe_box *box)
662 {
663 struct fd_resource *rsc = fd_resource(ptrans->resource);
664
665 if (ptrans->resource->target == PIPE_BUFFER)
666 util_range_add(&rsc->b.b, &rsc->valid_buffer_range,
667 ptrans->box.x + box->x,
668 ptrans->box.x + box->x + box->width);
669 }
670
671 static void
flush_resource(struct fd_context * ctx,struct fd_resource * rsc,unsigned usage)672 flush_resource(struct fd_context *ctx, struct fd_resource *rsc,
673 unsigned usage) assert_dt
674 {
675 if (usage & PIPE_MAP_WRITE) {
676 fd_bc_flush_readers(ctx, rsc);
677 } else {
678 fd_bc_flush_writer(ctx, rsc);
679 }
680 }
681
682 static void
fd_flush_resource(struct pipe_context * pctx,struct pipe_resource * prsc)683 fd_flush_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
684 in_dt
685 {
686 struct fd_context *ctx = fd_context(pctx);
687 struct fd_resource *rsc = fd_resource(prsc);
688
689 /* Flushing the resource is only required if we are relying on
690 * implicit-sync, in which case the rendering must be flushed
691 * to the kernel for the fence to be added to the backing GEM
692 * object.
693 */
694 if (ctx->no_implicit_sync)
695 return;
696
697 flush_resource(ctx, rsc, PIPE_MAP_READ);
698
699 /* If we had to flush a batch, make sure it makes it's way all the
700 * way to the kernel:
701 */
702 fd_resource_wait(ctx, rsc, FD_BO_PREP_FLUSH);
703 }
704
705 static void
fd_resource_transfer_unmap(struct pipe_context * pctx,struct pipe_transfer * ptrans)706 fd_resource_transfer_unmap(struct pipe_context *pctx,
707 struct pipe_transfer *ptrans)
708 in_dt /* TODO for threaded-ctx we'll need to split out unsynchronized path */
709 {
710 struct fd_context *ctx = fd_context(pctx);
711 struct fd_resource *rsc = fd_resource(ptrans->resource);
712 struct fd_transfer *trans = fd_transfer(ptrans);
713
714 if (trans->staging_prsc) {
715 if (ptrans->usage & PIPE_MAP_WRITE)
716 fd_blit_from_staging(ctx, trans);
717 pipe_resource_reference(&trans->staging_prsc, NULL);
718 }
719
720 if (trans->upload_ptr) {
721 fd_bo_upload(rsc->bo, trans->upload_ptr, ptrans->box.x, ptrans->box.width);
722 free(trans->upload_ptr);
723 }
724
725 util_range_add(&rsc->b.b, &rsc->valid_buffer_range, ptrans->box.x,
726 ptrans->box.x + ptrans->box.width);
727
728 pipe_resource_reference(&ptrans->resource, NULL);
729
730 assert(trans->b.staging == NULL); /* for threaded context only */
731
732 /* Don't use pool_transfers_unsync. We are always in the driver
733 * thread. Freeing an object into a different pool is allowed.
734 */
735 slab_free(&ctx->transfer_pool, ptrans);
736 }
737
738 static void
invalidate_resource(struct fd_resource * rsc,unsigned usage)739 invalidate_resource(struct fd_resource *rsc, unsigned usage) assert_dt
740 {
741 bool needs_flush = pending(rsc, !!(usage & PIPE_MAP_WRITE));
742 unsigned op = translate_usage(usage);
743
744 if (needs_flush || resource_busy(rsc, op)) {
745 realloc_bo(rsc, fd_bo_size(rsc->bo));
746 rebind_resource(rsc);
747 } else {
748 util_range_set_empty(&rsc->valid_buffer_range);
749 }
750 }
751
752 static bool
valid_range(struct fd_resource * rsc,const struct pipe_box * box)753 valid_range(struct fd_resource *rsc, const struct pipe_box *box)
754 {
755 return util_ranges_intersect(&rsc->valid_buffer_range, box->x, box->x + box->width);
756 }
757
758 static void *
resource_transfer_map_staging(struct pipe_context * pctx,struct pipe_resource * prsc,unsigned level,unsigned usage,const struct pipe_box * box,struct fd_transfer * trans)759 resource_transfer_map_staging(struct pipe_context *pctx,
760 struct pipe_resource *prsc,
761 unsigned level, unsigned usage,
762 const struct pipe_box *box,
763 struct fd_transfer *trans)
764 in_dt
765 {
766 struct fd_context *ctx = fd_context(pctx);
767 struct fd_resource *rsc = fd_resource(prsc);
768 struct fd_resource *staging_rsc;
769
770 assert(prsc->target != PIPE_BUFFER);
771
772 staging_rsc = fd_alloc_staging(ctx, rsc, level, box, usage);
773 if (!staging_rsc)
774 return NULL;
775
776 trans->staging_prsc = &staging_rsc->b.b;
777 trans->b.b.stride = fd_resource_pitch(staging_rsc, 0);
778 trans->b.b.layer_stride = fd_resource_layer_stride(staging_rsc, 0);
779 trans->staging_box = *box;
780 trans->staging_box.x = 0;
781 trans->staging_box.y = 0;
782 trans->staging_box.z = 0;
783
784 if (usage & PIPE_MAP_READ) {
785 fd_blit_to_staging(ctx, trans);
786
787 fd_resource_wait(ctx, staging_rsc, FD_BO_PREP_READ);
788 }
789
790 ctx->stats.staging_uploads++;
791
792 return fd_bo_map(staging_rsc->bo);
793 }
794
795 static void *
resource_transfer_map_unsync(struct pipe_context * pctx,struct pipe_resource * prsc,unsigned level,unsigned usage,const struct pipe_box * box,struct fd_transfer * trans)796 resource_transfer_map_unsync(struct pipe_context *pctx,
797 struct pipe_resource *prsc, unsigned level,
798 unsigned usage, const struct pipe_box *box,
799 struct fd_transfer *trans)
800 {
801 struct fd_resource *rsc = fd_resource(prsc);
802 enum pipe_format format = prsc->format;
803 uint32_t offset;
804 char *buf;
805
806 if ((prsc->target == PIPE_BUFFER) &&
807 !(usage & (PIPE_MAP_READ | PIPE_MAP_DIRECTLY | PIPE_MAP_PERSISTENT)) &&
808 ((usage & PIPE_MAP_DISCARD_RANGE) || !valid_range(rsc, box)) &&
809 fd_bo_prefer_upload(rsc->bo, box->width)) {
810 trans->upload_ptr = malloc(box->width);
811 return trans->upload_ptr;
812 }
813
814 buf = fd_bo_map(rsc->bo);
815
816 /* With imported bo's allocated by something outside of mesa, when
817 * running in a VM (using virtio_gpu kernel driver) we could end up in
818 * a situation where we have a linear bo, but are unable to mmap it
819 * because it was allocated without the VIRTGPU_BLOB_FLAG_USE_MAPPABLE
820 * flag. So we need end up needing to do a staging blit instead:
821 */
822 if (!buf)
823 return resource_transfer_map_staging(pctx, prsc, level, usage, box, trans);
824
825 offset = box->y / util_format_get_blockheight(format) * trans->b.b.stride +
826 box->x / util_format_get_blockwidth(format) * rsc->layout.cpp +
827 fd_resource_offset(rsc, level, box->z);
828
829 if (usage & PIPE_MAP_WRITE)
830 rsc->valid = true;
831
832 return buf + offset;
833 }
834
835 /**
836 * Note, with threaded_context, resource_transfer_map() is only called
837 * in driver thread, but resource_transfer_map_unsync() can be called in
838 * either driver or frontend thread.
839 */
840 static void *
resource_transfer_map(struct pipe_context * pctx,struct pipe_resource * prsc,unsigned level,unsigned usage,const struct pipe_box * box,struct fd_transfer * trans)841 resource_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
842 unsigned level, unsigned usage,
843 const struct pipe_box *box,
844 struct fd_transfer *trans) in_dt
845 {
846 struct fd_context *ctx = fd_context(pctx);
847 struct fd_resource *rsc = fd_resource(prsc);
848 char *buf;
849 int ret = 0;
850
851 tc_assert_driver_thread(ctx->tc);
852
853 /* Strip the read flag if the buffer has been invalidated (or is freshly
854 * created). Avoids extra staging blits of undefined data on glTexSubImage of
855 * a fresh DEPTH_COMPONENT or STENCIL_INDEX texture being stored as z24s8.
856 */
857 if (!rsc->valid)
858 usage &= ~PIPE_MAP_READ;
859
860 /* we always need a staging texture for tiled buffers:
861 *
862 * TODO we might sometimes want to *also* shadow the resource to avoid
863 * splitting a batch.. for ex, mid-frame texture uploads to a tiled
864 * texture.
865 */
866 if (rsc->layout.tile_mode) {
867 return resource_transfer_map_staging(pctx, prsc, level, usage, box, trans);
868 } else if ((usage & PIPE_MAP_READ) && !fd_bo_is_cached(rsc->bo)) {
869 perf_debug_ctx(ctx, "wc readback: prsc=%p, level=%u, usage=%x, box=%dx%d+%d,%d",
870 prsc, level, usage, box->width, box->height, box->x, box->y);
871 }
872
873 if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) {
874 invalidate_resource(rsc, usage);
875 } else {
876 unsigned op = translate_usage(usage);
877 bool needs_flush = pending(rsc, !!(usage & PIPE_MAP_WRITE));
878
879 /* If the GPU is writing to the resource, or if it is reading from the
880 * resource and we're trying to write to it, flush the renders.
881 */
882 bool busy = needs_flush || resource_busy(rsc, op);
883
884 /* if we need to flush/stall, see if we can make a shadow buffer
885 * to avoid this:
886 *
887 * TODO we could go down this path !reorder && !busy_for_read
888 * ie. we only *don't* want to go down this path if the blit
889 * will trigger a flush!
890 */
891 if (ctx->screen->reorder && busy && !(usage & PIPE_MAP_READ) &&
892 (usage & PIPE_MAP_DISCARD_RANGE)) {
893
894 /* try shadowing only if it avoids a flush, otherwise staging would
895 * be better:
896 */
897 if (needs_flush && !(usage & TC_TRANSFER_MAP_NO_INVALIDATE) &&
898 fd_try_shadow_resource(ctx, rsc, level, box, DRM_FORMAT_MOD_LINEAR)) {
899 needs_flush = busy = false;
900 ctx->stats.shadow_uploads++;
901 } else {
902 struct fd_resource *staging_rsc = NULL;
903
904 if (needs_flush) {
905 perf_debug_ctx(ctx, "flushing: %" PRSC_FMT, PRSC_ARGS(prsc));
906 flush_resource(ctx, rsc, usage);
907 needs_flush = false;
908 }
909
910 /* in this case, we don't need to shadow the whole resource,
911 * since any draw that references the previous contents has
912 * already had rendering flushed for all tiles. So we can
913 * use a staging buffer to do the upload.
914 */
915 if (is_renderable(prsc))
916 staging_rsc = fd_alloc_staging(ctx, rsc, level, box, usage);
917 if (staging_rsc) {
918 trans->staging_prsc = &staging_rsc->b.b;
919 trans->b.b.stride = fd_resource_pitch(staging_rsc, 0);
920 trans->b.b.layer_stride =
921 fd_resource_layer_stride(staging_rsc, 0);
922 trans->staging_box = *box;
923 trans->staging_box.x = 0;
924 trans->staging_box.y = 0;
925 trans->staging_box.z = 0;
926 buf = fd_bo_map(staging_rsc->bo);
927
928 ctx->stats.staging_uploads++;
929
930 return buf;
931 }
932 }
933 }
934
935 if (needs_flush) {
936 flush_resource(ctx, rsc, usage);
937 needs_flush = false;
938 }
939
940 /* The GPU keeps track of how the various bo's are being used, and
941 * will wait if necessary for the proper operation to have
942 * completed.
943 */
944 if (busy) {
945 ret = fd_resource_wait(ctx, rsc, op);
946 if (ret)
947 return NULL;
948 }
949 }
950
951 return resource_transfer_map_unsync(pctx, prsc, level, usage, box, trans);
952 }
953
954 static unsigned
improve_transfer_map_usage(struct fd_context * ctx,struct fd_resource * rsc,unsigned usage,const struct pipe_box * box)955 improve_transfer_map_usage(struct fd_context *ctx, struct fd_resource *rsc,
956 unsigned usage, const struct pipe_box *box)
957 /* Not *strictly* true, but the access to things that must only be in driver-
958 * thread are protected by !(usage & TC_TRANSFER_MAP_THREADED_UNSYNC):
959 */
960 in_dt
961 {
962 if (usage & TC_TRANSFER_MAP_NO_INVALIDATE) {
963 usage &= ~PIPE_MAP_DISCARD_WHOLE_RESOURCE;
964 }
965
966 if (usage & TC_TRANSFER_MAP_THREADED_UNSYNC)
967 usage |= PIPE_MAP_UNSYNCHRONIZED;
968
969 if (!(usage &
970 (TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED | PIPE_MAP_UNSYNCHRONIZED))) {
971 if (ctx->in_shadow && !(usage & PIPE_MAP_READ)) {
972 usage |= PIPE_MAP_UNSYNCHRONIZED;
973 } else if ((usage & PIPE_MAP_WRITE) && (rsc->b.b.target == PIPE_BUFFER) &&
974 !valid_range(rsc, box)) {
975 /* We are trying to write to a previously uninitialized range. No need
976 * to synchronize.
977 */
978 usage |= PIPE_MAP_UNSYNCHRONIZED;
979 }
980 }
981
982 return usage;
983 }
984
985 static void *
fd_resource_transfer_map(struct pipe_context * pctx,struct pipe_resource * prsc,unsigned level,unsigned usage,const struct pipe_box * box,struct pipe_transfer ** pptrans)986 fd_resource_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
987 unsigned level, unsigned usage,
988 const struct pipe_box *box,
989 struct pipe_transfer **pptrans)
990 {
991 struct fd_context *ctx = fd_context(pctx);
992 struct fd_resource *rsc = fd_resource(prsc);
993 struct fd_transfer *trans;
994 struct pipe_transfer *ptrans;
995
996 DBG("prsc=%p, level=%u, usage=%x, box=%dx%d+%d,%d", prsc, level, usage,
997 box->width, box->height, box->x, box->y);
998
999 if ((usage & PIPE_MAP_DIRECTLY) && rsc->layout.tile_mode) {
1000 DBG("CANNOT MAP DIRECTLY!\n");
1001 return NULL;
1002 }
1003
1004 if (usage & TC_TRANSFER_MAP_THREADED_UNSYNC) {
1005 ptrans = slab_zalloc(&ctx->transfer_pool_unsync);
1006 } else {
1007 ptrans = slab_zalloc(&ctx->transfer_pool);
1008 }
1009
1010 if (!ptrans)
1011 return NULL;
1012
1013 trans = fd_transfer(ptrans);
1014
1015 usage = improve_transfer_map_usage(ctx, rsc, usage, box);
1016
1017 pipe_resource_reference(&ptrans->resource, prsc);
1018 ptrans->level = level;
1019 ptrans->usage = usage;
1020 ptrans->box = *box;
1021 ptrans->stride = fd_resource_pitch(rsc, level);
1022 ptrans->layer_stride = fd_resource_layer_stride(rsc, level);
1023
1024 void *ret;
1025 if (usage & PIPE_MAP_UNSYNCHRONIZED) {
1026 ret = resource_transfer_map_unsync(pctx, prsc, level, usage, box, trans);
1027 } else {
1028 ret = resource_transfer_map(pctx, prsc, level, usage, box, trans);
1029 }
1030
1031 if (ret) {
1032 *pptrans = ptrans;
1033 } else {
1034 fd_resource_transfer_unmap(pctx, ptrans);
1035 }
1036
1037 return ret;
1038 }
1039
1040 static void
fd_resource_destroy(struct pipe_screen * pscreen,struct pipe_resource * prsc)1041 fd_resource_destroy(struct pipe_screen *pscreen, struct pipe_resource *prsc)
1042 {
1043 struct fd_screen *screen = fd_screen(prsc->screen);
1044 struct fd_resource *rsc = fd_resource(prsc);
1045
1046 if (!rsc->is_replacement)
1047 fd_bc_invalidate_resource(rsc, true);
1048 if (rsc->bo)
1049 fd_bo_del(rsc->bo);
1050 if (rsc->lrz)
1051 fd_bo_del(rsc->lrz);
1052 if (rsc->scanout)
1053 renderonly_scanout_destroy(rsc->scanout, fd_screen(pscreen)->ro);
1054
1055 if (prsc->target == PIPE_BUFFER)
1056 util_idalloc_mt_free(&screen->buffer_ids, rsc->b.buffer_id_unique);
1057
1058 threaded_resource_deinit(prsc);
1059
1060 util_range_destroy(&rsc->valid_buffer_range);
1061 simple_mtx_destroy(&rsc->lock);
1062 fd_resource_tracking_reference(&rsc->track, NULL);
1063
1064 FREE(rsc);
1065 }
1066
1067 static uint64_t
fd_resource_modifier(struct fd_resource * rsc)1068 fd_resource_modifier(struct fd_resource *rsc)
1069 {
1070 if (rsc->layout.ubwc_layer_size)
1071 return DRM_FORMAT_MOD_QCOM_COMPRESSED;
1072
1073 switch (rsc->layout.tile_mode) {
1074 case 3: return DRM_FORMAT_MOD_QCOM_TILED3;
1075 case 2: return DRM_FORMAT_MOD_QCOM_TILED2;
1076 case 0: return DRM_FORMAT_MOD_LINEAR;
1077 default: return DRM_FORMAT_MOD_INVALID;
1078 }
1079 }
1080
1081 static bool
fd_resource_get_handle(struct pipe_screen * pscreen,struct pipe_context * pctx,struct pipe_resource * prsc,struct winsys_handle * handle,unsigned usage)1082 fd_resource_get_handle(struct pipe_screen *pscreen, struct pipe_context *pctx,
1083 struct pipe_resource *prsc, struct winsys_handle *handle,
1084 unsigned usage)
1085 assert_dt
1086 {
1087 struct fd_resource *rsc = fd_resource(prsc);
1088
1089 rsc->b.is_shared = true;
1090
1091 if (prsc->target == PIPE_BUFFER)
1092 tc_buffer_disable_cpu_storage(&rsc->b.b);
1093
1094 handle->modifier = fd_resource_modifier(rsc);
1095
1096 if (prsc->target != PIPE_BUFFER) {
1097 struct fdl_metadata metadata = {
1098 .modifier = handle->modifier,
1099 };
1100 fd_bo_set_metadata(rsc->bo, &metadata, sizeof(metadata));
1101 }
1102
1103 DBG("%" PRSC_FMT ", modifier=%" PRIx64, PRSC_ARGS(prsc), handle->modifier);
1104
1105 bool ret = fd_screen_bo_get_handle(pscreen, rsc->bo, rsc->scanout,
1106 fd_resource_pitch(rsc, 0), handle);
1107
1108 if (!ret && !(prsc->bind & PIPE_BIND_SHARED)) {
1109
1110 pctx = threaded_context_unwrap_sync(pctx);
1111
1112 struct fd_context *ctx = pctx ?
1113 fd_context(pctx) : fd_screen_aux_context_get(pscreen);
1114
1115 /* Since gl is horrible, we can end up getting asked to export a handle
1116 * for a rsc which was not originally allocated in a way that can be
1117 * exported (for ex, sub-allocation or in the case of virtgpu we need
1118 * to tell the kernel at allocation time that the buffer can be shared)
1119 *
1120 * If we get into this scenario we can try to reallocate.
1121 */
1122
1123 prsc->bind |= PIPE_BIND_SHARED;
1124
1125 ret = fd_try_shadow_resource(ctx, rsc, 0, NULL, handle->modifier);
1126
1127 if (!pctx)
1128 fd_screen_aux_context_put(pscreen);
1129
1130 if (!ret)
1131 return false;
1132
1133 return fd_resource_get_handle(pscreen, pctx, prsc, handle, usage);
1134 }
1135
1136 return ret;
1137 }
1138
1139 static bool
fd_resource_get_param(struct pipe_screen * pscreen,struct pipe_context * pctx,struct pipe_resource * prsc,unsigned plane,unsigned layer,unsigned level,enum pipe_resource_param param,unsigned usage,uint64_t * value)1140 fd_resource_get_param(struct pipe_screen *pscreen,
1141 struct pipe_context *pctx, struct pipe_resource *prsc,
1142 unsigned plane, unsigned layer, unsigned level,
1143 enum pipe_resource_param param,
1144 unsigned usage, uint64_t *value)
1145 {
1146 struct fd_resource *rsc = fd_resource(util_resource_at_index(prsc, plane));
1147
1148 switch (param) {
1149 case PIPE_RESOURCE_PARAM_STRIDE:
1150 *value = fd_resource_pitch(rsc, 0);
1151 return true;
1152 case PIPE_RESOURCE_PARAM_OFFSET:
1153 if (fd_resource_ubwc_enabled(rsc, level)) {
1154 if (plane > 0)
1155 debug_warning("Unsupported offset query!\n");
1156 *value = fd_resource_ubwc_offset(rsc, level, layer);
1157 } else {
1158 *value = fd_resource_offset(rsc, level, layer);
1159 }
1160 return true;
1161 case PIPE_RESOURCE_PARAM_MODIFIER:
1162 *value = fd_resource_modifier(rsc);
1163 return true;
1164 case PIPE_RESOURCE_PARAM_NPLANES:
1165 *value = util_resource_num(prsc);
1166 return true;
1167 default:
1168 return false;
1169 }
1170 }
1171
1172 /* special case to resize query buf after allocated.. */
1173 void
fd_resource_resize(struct pipe_resource * prsc,uint32_t sz)1174 fd_resource_resize(struct pipe_resource *prsc, uint32_t sz)
1175 {
1176 struct fd_resource *rsc = fd_resource(prsc);
1177
1178 assert(prsc->width0 == 0);
1179 assert(prsc->target == PIPE_BUFFER);
1180 assert(prsc->bind == PIPE_BIND_QUERY_BUFFER);
1181
1182 prsc->width0 = sz;
1183 realloc_bo(rsc, fd_screen(prsc->screen)->setup_slices(rsc));
1184 }
1185
1186 static void
fd_resource_layout_init(struct pipe_resource * prsc)1187 fd_resource_layout_init(struct pipe_resource *prsc)
1188 {
1189 struct fd_resource *rsc = fd_resource(prsc);
1190 struct fdl_layout *layout = &rsc->layout;
1191
1192 layout->format = prsc->format;
1193
1194 layout->width0 = prsc->width0;
1195 layout->height0 = prsc->height0;
1196 layout->depth0 = prsc->depth0;
1197
1198 layout->cpp = util_format_get_blocksize(prsc->format);
1199 layout->cpp *= fd_resource_nr_samples(prsc);
1200 layout->cpp_shift = ffs(layout->cpp) - 1;
1201 }
1202
1203 static struct fd_resource *
alloc_resource_struct(struct pipe_screen * pscreen,const struct pipe_resource * tmpl)1204 alloc_resource_struct(struct pipe_screen *pscreen,
1205 const struct pipe_resource *tmpl)
1206 {
1207 struct fd_screen *screen = fd_screen(pscreen);
1208 struct fd_resource *rsc = CALLOC_STRUCT(fd_resource);
1209
1210 if (!rsc)
1211 return NULL;
1212
1213 struct pipe_resource *prsc = &rsc->b.b;
1214 *prsc = *tmpl;
1215
1216 pipe_reference_init(&prsc->reference, 1);
1217 prsc->screen = pscreen;
1218 rsc->hash = _mesa_hash_pointer(rsc);
1219
1220 util_range_init(&rsc->valid_buffer_range);
1221 simple_mtx_init(&rsc->lock, mtx_plain);
1222
1223 rsc->track = CALLOC_STRUCT(fd_resource_tracking);
1224 if (!rsc->track) {
1225 free(rsc);
1226 return NULL;
1227 }
1228
1229 pipe_reference_init(&rsc->track->reference, 1);
1230
1231 bool allow_cpu_storage = (tmpl->target == PIPE_BUFFER) &&
1232 (tmpl->width0 < 0x1000);
1233 threaded_resource_init(prsc, allow_cpu_storage);
1234
1235 if (tmpl->target == PIPE_BUFFER)
1236 rsc->b.buffer_id_unique = util_idalloc_mt_alloc(&screen->buffer_ids);
1237
1238 return rsc;
1239 }
1240
1241 enum fd_layout_type {
1242 ERROR,
1243 LINEAR,
1244 TILED,
1245 UBWC,
1246 };
1247
1248 static bool
has_implicit_modifier(const uint64_t * modifiers,int count)1249 has_implicit_modifier(const uint64_t *modifiers, int count)
1250 {
1251 return count == 0 ||
1252 drm_find_modifier(DRM_FORMAT_MOD_INVALID, modifiers, count);
1253 }
1254
1255 static bool
has_explicit_modifier(const uint64_t * modifiers,int count)1256 has_explicit_modifier(const uint64_t *modifiers, int count)
1257 {
1258 for (int i = 0; i < count; i++) {
1259 if (modifiers[i] != DRM_FORMAT_MOD_INVALID)
1260 return true;
1261 }
1262 return false;
1263 }
1264
1265 static enum fd_layout_type
get_best_layout(struct fd_screen * screen,const struct pipe_resource * tmpl,const uint64_t * modifiers,int count)1266 get_best_layout(struct fd_screen *screen,
1267 const struct pipe_resource *tmpl, const uint64_t *modifiers,
1268 int count)
1269 {
1270 const bool can_implicit = has_implicit_modifier(modifiers, count);
1271 const bool can_explicit = has_explicit_modifier(modifiers, count);
1272
1273 /* First, find all the conditions which would force us to linear */
1274 if (!screen->tile_mode)
1275 return LINEAR;
1276
1277 if (!screen->tile_mode(tmpl))
1278 return LINEAR;
1279
1280 if (tmpl->target == PIPE_BUFFER)
1281 return LINEAR;
1282
1283 if ((tmpl->usage == PIPE_USAGE_STAGING) &&
1284 !util_format_is_depth_or_stencil(tmpl->format))
1285 return LINEAR;
1286
1287 if (tmpl->bind & PIPE_BIND_LINEAR) {
1288 if (tmpl->usage != PIPE_USAGE_STAGING)
1289 perf_debug("%" PRSC_FMT ": forcing linear: bind flags",
1290 PRSC_ARGS(tmpl));
1291 return LINEAR;
1292 }
1293
1294 if (FD_DBG(NOTILE))
1295 return LINEAR;
1296
1297 /* Shared resources without explicit modifiers must always be linear */
1298 if (!can_explicit && (tmpl->bind & PIPE_BIND_SHARED)) {
1299 perf_debug("%" PRSC_FMT
1300 ": forcing linear: shared resource + implicit modifiers",
1301 PRSC_ARGS(tmpl));
1302 return LINEAR;
1303 }
1304
1305 bool ubwc_ok = is_a6xx(screen);
1306 if (FD_DBG(NOUBWC))
1307 ubwc_ok = false;
1308
1309 /* Disallow UBWC for front-buffer rendering. The GPU does not atomically
1310 * write pixel and header data, nor does the display atomically read it.
1311 * The result can be visual corruption (ie. moreso than normal tearing).
1312 */
1313 if (tmpl->bind & PIPE_BIND_USE_FRONT_RENDERING)
1314 ubwc_ok = false;
1315
1316 /* Disallow UBWC when asked not to use data dependent bandwidth compression:
1317 */
1318 if (tmpl->bind & PIPE_BIND_CONST_BW)
1319 ubwc_ok = false;
1320
1321 if (ubwc_ok && !can_implicit &&
1322 !drm_find_modifier(DRM_FORMAT_MOD_QCOM_COMPRESSED, modifiers, count)) {
1323 perf_debug("%" PRSC_FMT
1324 ": not using UBWC: not in acceptable modifier set",
1325 PRSC_ARGS(tmpl));
1326 ubwc_ok = false;
1327 }
1328
1329 if (ubwc_ok)
1330 return UBWC;
1331
1332 if (can_implicit ||
1333 drm_find_modifier(DRM_FORMAT_MOD_QCOM_TILED3, modifiers, count))
1334 return TILED;
1335
1336 if (!drm_find_modifier(DRM_FORMAT_MOD_LINEAR, modifiers, count)) {
1337 perf_debug("%" PRSC_FMT ": need linear but not in modifier set",
1338 PRSC_ARGS(tmpl));
1339 return ERROR;
1340 }
1341
1342 perf_debug("%" PRSC_FMT ": not using tiling: explicit modifiers and no UBWC",
1343 PRSC_ARGS(tmpl));
1344 return LINEAR;
1345 }
1346
1347 /**
1348 * Helper that allocates a resource and resolves its layout (but doesn't
1349 * allocate its bo).
1350 *
1351 * It returns a pipe_resource (as fd_resource_create_with_modifiers()
1352 * would do), and also bo's minimum required size as an output argument.
1353 */
1354 static struct pipe_resource *
fd_resource_allocate_and_resolve(struct pipe_screen * pscreen,const struct pipe_resource * tmpl,const uint64_t * modifiers,int count,uint32_t * psize)1355 fd_resource_allocate_and_resolve(struct pipe_screen *pscreen,
1356 const struct pipe_resource *tmpl,
1357 const uint64_t *modifiers, int count,
1358 uint32_t *psize)
1359 {
1360 struct fd_screen *screen = fd_screen(pscreen);
1361 struct fd_resource *rsc;
1362 struct pipe_resource *prsc;
1363 enum pipe_format format = tmpl->format;
1364 uint32_t size;
1365
1366 rsc = alloc_resource_struct(pscreen, tmpl);
1367 if (!rsc)
1368 return NULL;
1369
1370 prsc = &rsc->b.b;
1371
1372 /* Clover creates buffers with PIPE_FORMAT_NONE: */
1373 if ((prsc->target == PIPE_BUFFER) && (format == PIPE_FORMAT_NONE))
1374 format = prsc->format = PIPE_FORMAT_R8_UNORM;
1375
1376 DBG("%" PRSC_FMT, PRSC_ARGS(prsc));
1377
1378 if (tmpl->bind & PIPE_BIND_SHARED)
1379 rsc->b.is_shared = true;
1380
1381 fd_resource_layout_init(prsc);
1382
1383 enum fd_layout_type layout =
1384 get_best_layout(screen, tmpl, modifiers, count);
1385 if (layout == ERROR) {
1386 free(prsc);
1387 return NULL;
1388 }
1389
1390 if (layout >= TILED)
1391 rsc->layout.tile_mode = screen->tile_mode(prsc);
1392 if (layout == UBWC)
1393 rsc->layout.ubwc = true;
1394
1395 rsc->internal_format = format;
1396
1397 if (prsc->target == PIPE_BUFFER) {
1398 assert(prsc->format == PIPE_FORMAT_R8_UNORM);
1399 size = prsc->width0;
1400 fdl_layout_buffer(&rsc->layout, size);
1401 } else {
1402 size = screen->setup_slices(rsc);
1403 }
1404
1405 /* special case for hw-query buffer, which we need to allocate before we
1406 * know the size:
1407 */
1408 if (size == 0) {
1409 /* note, semi-intention == instead of & */
1410 assert(prsc->bind == PIPE_BIND_QUERY_BUFFER);
1411 *psize = 0;
1412 return prsc;
1413 }
1414
1415 /* Set the layer size if the (non-a6xx) backend hasn't done so. */
1416 if (rsc->layout.layer_first && !rsc->layout.layer_size) {
1417 rsc->layout.layer_size = align(size, 4096);
1418 size = rsc->layout.layer_size * prsc->array_size;
1419 }
1420
1421 if (FD_DBG(LAYOUT))
1422 fdl_dump_layout(&rsc->layout);
1423
1424 /* Hand out the resolved size. */
1425 if (psize)
1426 *psize = size;
1427
1428 return prsc;
1429 }
1430
1431 /**
1432 * Create a new texture object, using the given template info.
1433 */
1434 static struct pipe_resource *
fd_resource_create_with_modifiers(struct pipe_screen * pscreen,const struct pipe_resource * tmpl,const uint64_t * modifiers,int count)1435 fd_resource_create_with_modifiers(struct pipe_screen *pscreen,
1436 const struct pipe_resource *tmpl,
1437 const uint64_t *modifiers, int count)
1438 {
1439 struct fd_screen *screen = fd_screen(pscreen);
1440 struct fd_resource *rsc;
1441 struct pipe_resource *prsc;
1442 uint32_t size;
1443
1444 /* when using kmsro, scanout buffers are allocated on the display device
1445 * create_with_modifiers() doesn't give us usage flags, so we have to
1446 * assume that all calls with modifiers are scanout-possible
1447 */
1448 if (screen->ro &&
1449 ((tmpl->bind & PIPE_BIND_SCANOUT) ||
1450 has_explicit_modifier(modifiers, count))) {
1451 struct pipe_resource scanout_templat = *tmpl;
1452 struct renderonly_scanout *scanout;
1453 struct winsys_handle handle;
1454
1455 /* note: alignment is wrong for a6xx */
1456 scanout_templat.width0 = align(tmpl->width0, screen->info->gmem_align_w);
1457
1458 scanout =
1459 renderonly_scanout_for_resource(&scanout_templat, screen->ro, &handle);
1460 if (!scanout)
1461 return NULL;
1462
1463 renderonly_scanout_destroy(scanout, screen->ro);
1464
1465 assert(handle.type == WINSYS_HANDLE_TYPE_FD);
1466 rsc = fd_resource(pscreen->resource_from_handle(
1467 pscreen, tmpl, &handle, PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE));
1468 close(handle.handle);
1469 if (!rsc)
1470 return NULL;
1471
1472 return &rsc->b.b;
1473 }
1474
1475 prsc =
1476 fd_resource_allocate_and_resolve(pscreen, tmpl, modifiers, count, &size);
1477 if (!prsc)
1478 return NULL;
1479 rsc = fd_resource(prsc);
1480
1481 realloc_bo(rsc, size);
1482 if (!rsc->bo)
1483 goto fail;
1484
1485 return prsc;
1486 fail:
1487 fd_resource_destroy(pscreen, prsc);
1488 return NULL;
1489 }
1490
1491 static struct pipe_resource *
fd_resource_create(struct pipe_screen * pscreen,const struct pipe_resource * tmpl)1492 fd_resource_create(struct pipe_screen *pscreen,
1493 const struct pipe_resource *tmpl)
1494 {
1495 const uint64_t mod = DRM_FORMAT_MOD_INVALID;
1496 return fd_resource_create_with_modifiers(pscreen, tmpl, &mod, 1);
1497 }
1498
1499 /**
1500 * Create a texture from a winsys_handle. The handle is often created in
1501 * another process by first creating a pipe texture and then calling
1502 * resource_get_handle.
1503 */
1504 static struct pipe_resource *
fd_resource_from_handle(struct pipe_screen * pscreen,const struct pipe_resource * tmpl,struct winsys_handle * handle,unsigned usage)1505 fd_resource_from_handle(struct pipe_screen *pscreen,
1506 const struct pipe_resource *tmpl,
1507 struct winsys_handle *handle, unsigned usage)
1508 {
1509 struct fd_screen *screen = fd_screen(pscreen);
1510 struct fd_resource *rsc = alloc_resource_struct(pscreen, tmpl);
1511
1512 if (!rsc)
1513 return NULL;
1514
1515 if (tmpl->target == PIPE_BUFFER)
1516 tc_buffer_disable_cpu_storage(&rsc->b.b);
1517
1518 struct fdl_slice *slice = fd_resource_slice(rsc, 0);
1519 struct pipe_resource *prsc = &rsc->b.b;
1520
1521 DBG("%" PRSC_FMT ", modifier=%" PRIx64, PRSC_ARGS(prsc), handle->modifier);
1522
1523 rsc->b.is_shared = true;
1524
1525 fd_resource_layout_init(prsc);
1526
1527 struct fd_bo *bo = fd_screen_bo_from_handle(pscreen, handle);
1528 if (!bo)
1529 goto fail;
1530
1531 fd_resource_set_bo(rsc, bo);
1532
1533 rsc->internal_format = tmpl->format;
1534 rsc->layout.layer_first = true;
1535 rsc->layout.pitch0 = handle->stride;
1536 slice->offset = handle->offset;
1537 slice->size0 = handle->stride * prsc->height0;
1538
1539 /* use a pitchalign of gmem_align_w pixels, because GMEM resolve for
1540 * lower alignments is not implemented (but possible for a6xx at least)
1541 *
1542 * for UBWC-enabled resources, layout_resource_for_modifier will further
1543 * validate the pitch and set the right pitchalign
1544 */
1545 rsc->layout.pitchalign =
1546 fdl_cpp_shift(&rsc->layout) + util_logbase2(screen->info->gmem_align_w);
1547
1548 /* apply the minimum pitchalign (note: actually 4 for a3xx but doesn't
1549 * matter) */
1550 if (is_a6xx(screen) || is_a5xx(screen))
1551 rsc->layout.pitchalign = MAX2(rsc->layout.pitchalign, 6);
1552 else
1553 rsc->layout.pitchalign = MAX2(rsc->layout.pitchalign, 5);
1554
1555 if (rsc->layout.pitch0 < (prsc->width0 * rsc->layout.cpp) ||
1556 fd_resource_pitch(rsc, 0) != rsc->layout.pitch0)
1557 goto fail;
1558
1559 assert(rsc->layout.cpp);
1560
1561 if (screen->layout_resource_for_modifier(rsc, handle->modifier) < 0)
1562 goto fail;
1563
1564 if (screen->ro) {
1565 rsc->scanout =
1566 renderonly_create_gpu_import_for_resource(prsc, screen->ro, NULL);
1567 /* failure is expected in some cases.. */
1568 }
1569
1570 rsc->valid = true;
1571
1572 return prsc;
1573
1574 fail:
1575 fd_resource_destroy(pscreen, prsc);
1576 return NULL;
1577 }
1578
1579 bool
fd_render_condition_check(struct pipe_context * pctx)1580 fd_render_condition_check(struct pipe_context *pctx)
1581 {
1582 struct fd_context *ctx = fd_context(pctx);
1583
1584 if (!ctx->cond_query)
1585 return true;
1586
1587 perf_debug("Implementing conditional rendering using a CPU read instaed of HW conditional rendering.");
1588
1589 union pipe_query_result res = {0};
1590 bool wait = ctx->cond_mode != PIPE_RENDER_COND_NO_WAIT &&
1591 ctx->cond_mode != PIPE_RENDER_COND_BY_REGION_NO_WAIT;
1592
1593 if (pctx->get_query_result(pctx, ctx->cond_query, wait, &res))
1594 return (bool)res.u64 != ctx->cond_cond;
1595
1596 return true;
1597 }
1598
1599 static void
fd_invalidate_resource(struct pipe_context * pctx,struct pipe_resource * prsc)1600 fd_invalidate_resource(struct pipe_context *pctx,
1601 struct pipe_resource *prsc) in_dt
1602 {
1603 struct fd_context *ctx = fd_context(pctx);
1604 struct fd_resource *rsc = fd_resource(prsc);
1605
1606 if (prsc->target == PIPE_BUFFER) {
1607 /* Handle the glInvalidateBufferData() case:
1608 */
1609 invalidate_resource(rsc, PIPE_MAP_READ | PIPE_MAP_WRITE);
1610 } else if (rsc->track->write_batch) {
1611 /* Handle the glInvalidateFramebuffer() case, telling us that
1612 * we can skip resolve.
1613 */
1614
1615 struct fd_batch *batch = rsc->track->write_batch;
1616 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
1617
1618 if (pfb->zsbuf && pfb->zsbuf->texture == prsc) {
1619 batch->resolve &= ~(FD_BUFFER_DEPTH | FD_BUFFER_STENCIL);
1620 fd_dirty_resource(ctx, prsc, FD_DIRTY_ZSA, true);
1621 }
1622
1623 for (unsigned i = 0; i < pfb->nr_cbufs; i++) {
1624 if (pfb->cbufs[i] && pfb->cbufs[i]->texture == prsc) {
1625 batch->resolve &= ~(PIPE_CLEAR_COLOR0 << i);
1626 fd_dirty_resource(ctx, prsc, FD_DIRTY_FRAMEBUFFER, true);
1627 }
1628 }
1629 }
1630
1631 rsc->valid = false;
1632 }
1633
1634 static enum pipe_format
fd_resource_get_internal_format(struct pipe_resource * prsc)1635 fd_resource_get_internal_format(struct pipe_resource *prsc)
1636 {
1637 return fd_resource(prsc)->internal_format;
1638 }
1639
1640 static void
fd_resource_set_stencil(struct pipe_resource * prsc,struct pipe_resource * stencil)1641 fd_resource_set_stencil(struct pipe_resource *prsc,
1642 struct pipe_resource *stencil)
1643 {
1644 fd_resource(prsc)->stencil = fd_resource(stencil);
1645 }
1646
1647 static struct pipe_resource *
fd_resource_get_stencil(struct pipe_resource * prsc)1648 fd_resource_get_stencil(struct pipe_resource *prsc)
1649 {
1650 struct fd_resource *rsc = fd_resource(prsc);
1651 if (rsc->stencil)
1652 return &rsc->stencil->b.b;
1653 return NULL;
1654 }
1655
1656 static const struct u_transfer_vtbl transfer_vtbl = {
1657 .resource_create = fd_resource_create,
1658 .resource_destroy = fd_resource_destroy,
1659 .transfer_map = fd_resource_transfer_map,
1660 .transfer_flush_region = fd_resource_transfer_flush_region,
1661 .transfer_unmap = fd_resource_transfer_unmap,
1662 .get_internal_format = fd_resource_get_internal_format,
1663 .set_stencil = fd_resource_set_stencil,
1664 .get_stencil = fd_resource_get_stencil,
1665 };
1666
1667 static int
fd_layout_resource_for_modifier(struct fd_resource * rsc,uint64_t modifier)1668 fd_layout_resource_for_modifier(struct fd_resource *rsc, uint64_t modifier)
1669 {
1670 switch (modifier) {
1671 case DRM_FORMAT_MOD_LINEAR:
1672 /* The dri gallium frontend will pass DRM_FORMAT_MOD_INVALID to us
1673 * when it's called through any of the non-modifier BO create entry
1674 * points. Other drivers will determine tiling from the kernel or
1675 * other legacy backchannels, but for freedreno it just means
1676 * LINEAR. */
1677 case DRM_FORMAT_MOD_INVALID:
1678 return 0;
1679 default:
1680 return -1;
1681 }
1682 }
1683
1684 static struct pipe_resource *
fd_resource_from_memobj(struct pipe_screen * pscreen,const struct pipe_resource * tmpl,struct pipe_memory_object * pmemobj,uint64_t offset)1685 fd_resource_from_memobj(struct pipe_screen *pscreen,
1686 const struct pipe_resource *tmpl,
1687 struct pipe_memory_object *pmemobj, uint64_t offset)
1688 {
1689 struct fd_screen *screen = fd_screen(pscreen);
1690 struct fd_memory_object *memobj = fd_memory_object(pmemobj);
1691 struct pipe_resource *prsc;
1692 struct fd_resource *rsc;
1693 struct fdl_metadata metadata;
1694 uint32_t size;
1695
1696 assert(memobj->bo);
1697 assert(offset == 0);
1698
1699 /* We shouldn't get a scanout buffer here. */
1700 assert(!(tmpl->bind & PIPE_BIND_SCANOUT));
1701
1702 uint64_t modifiers = DRM_FORMAT_MOD_INVALID;
1703 if (pmemobj->dedicated &&
1704 !fd_bo_get_metadata(memobj->bo, &metadata, sizeof(metadata))) {
1705 modifiers = metadata.modifier;
1706 } else if (tmpl->bind & PIPE_BIND_LINEAR) {
1707 modifiers = DRM_FORMAT_MOD_LINEAR;
1708 } else if (is_a6xx(screen) && tmpl->width0 >= FDL_MIN_UBWC_WIDTH) {
1709 modifiers = DRM_FORMAT_MOD_QCOM_COMPRESSED;
1710 }
1711
1712 /* Allocate new pipe resource. */
1713 prsc = fd_resource_allocate_and_resolve(pscreen, tmpl, &modifiers, 1, &size);
1714 if (!prsc)
1715 return NULL;
1716 rsc = fd_resource(prsc);
1717 rsc->b.is_shared = true;
1718
1719 /* bo's size has to be large enough, otherwise cleanup resource and fail
1720 * gracefully.
1721 */
1722 if (fd_bo_size(memobj->bo) < size) {
1723 fd_resource_destroy(pscreen, prsc);
1724 return NULL;
1725 }
1726
1727 /* Share the bo with the memory object. */
1728 fd_resource_set_bo(rsc, fd_bo_ref(memobj->bo));
1729
1730 return prsc;
1731 }
1732
1733 static struct pipe_memory_object *
fd_memobj_create_from_handle(struct pipe_screen * pscreen,struct winsys_handle * whandle,bool dedicated)1734 fd_memobj_create_from_handle(struct pipe_screen *pscreen,
1735 struct winsys_handle *whandle, bool dedicated)
1736 {
1737 struct fd_memory_object *memobj = CALLOC_STRUCT(fd_memory_object);
1738 if (!memobj)
1739 return NULL;
1740
1741 struct fd_bo *bo = fd_screen_bo_from_handle(pscreen, whandle);
1742 if (!bo) {
1743 free(memobj);
1744 return NULL;
1745 }
1746
1747 memobj->b.dedicated = dedicated;
1748 memobj->bo = bo;
1749
1750 return &memobj->b;
1751 }
1752
1753 static void
fd_memobj_destroy(struct pipe_screen * pscreen,struct pipe_memory_object * pmemobj)1754 fd_memobj_destroy(struct pipe_screen *pscreen,
1755 struct pipe_memory_object *pmemobj)
1756 {
1757 struct fd_memory_object *memobj = fd_memory_object(pmemobj);
1758
1759 assert(memobj->bo);
1760 fd_bo_del(memobj->bo);
1761
1762 free(pmemobj);
1763 }
1764
1765 void
fd_resource_screen_init(struct pipe_screen * pscreen)1766 fd_resource_screen_init(struct pipe_screen *pscreen)
1767 {
1768 struct fd_screen *screen = fd_screen(pscreen);
1769
1770 pscreen->resource_create = u_transfer_helper_resource_create;
1771 /* NOTE: u_transfer_helper does not yet support the _with_modifiers()
1772 * variant:
1773 */
1774 pscreen->resource_create_with_modifiers = fd_resource_create_with_modifiers;
1775 pscreen->resource_from_handle = fd_resource_from_handle;
1776 pscreen->resource_get_handle = fd_resource_get_handle;
1777 pscreen->resource_get_param = fd_resource_get_param;
1778 pscreen->resource_destroy = u_transfer_helper_resource_destroy;
1779
1780 pscreen->transfer_helper =
1781 u_transfer_helper_create(&transfer_vtbl,
1782 U_TRANSFER_HELPER_SEPARATE_Z32S8 |
1783 U_TRANSFER_HELPER_MSAA_MAP);
1784
1785 if (!screen->layout_resource_for_modifier)
1786 screen->layout_resource_for_modifier = fd_layout_resource_for_modifier;
1787
1788 /* GL_EXT_memory_object */
1789 pscreen->memobj_create_from_handle = fd_memobj_create_from_handle;
1790 pscreen->memobj_destroy = fd_memobj_destroy;
1791 pscreen->resource_from_memobj = fd_resource_from_memobj;
1792 }
1793
1794 static void
fd_blit_pipe(struct pipe_context * pctx,const struct pipe_blit_info * blit_info)1795 fd_blit_pipe(struct pipe_context *pctx,
1796 const struct pipe_blit_info *blit_info) in_dt
1797 {
1798 /* wrap fd_blit to return void */
1799 fd_blit(pctx, blit_info);
1800 }
1801
1802 void
fd_resource_context_init(struct pipe_context * pctx)1803 fd_resource_context_init(struct pipe_context *pctx)
1804 {
1805 pctx->buffer_map = u_transfer_helper_transfer_map;
1806 pctx->texture_map = u_transfer_helper_transfer_map;
1807 pctx->transfer_flush_region = u_transfer_helper_transfer_flush_region;
1808 pctx->buffer_unmap = u_transfer_helper_transfer_unmap;
1809 pctx->texture_unmap = u_transfer_helper_transfer_unmap;
1810 pctx->buffer_subdata = u_default_buffer_subdata;
1811 pctx->texture_subdata = u_default_texture_subdata;
1812 pctx->create_surface = fd_create_surface;
1813 pctx->surface_destroy = fd_surface_destroy;
1814 pctx->resource_copy_region = fd_resource_copy_region;
1815 pctx->blit = fd_blit_pipe;
1816 pctx->flush_resource = fd_flush_resource;
1817 pctx->invalidate_resource = fd_invalidate_resource;
1818 pctx->get_sample_position = u_default_get_sample_position;
1819 }
1820