1 /*
2 * Copyright © 2014-2017 Broadcom
3 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "pipe/p_defines.h"
26 #include "util/u_memory.h"
27 #include "util/format/u_format.h"
28 #include "util/perf/cpu_trace.h"
29 #include "util/u_inlines.h"
30 #include "util/u_resource.h"
31 #include "util/u_surface.h"
32 #include "util/u_transfer_helper.h"
33 #include "util/u_upload_mgr.h"
34 #include "util/format/u_format_zs.h"
35 #include "util/u_drm.h"
36
37 #include "drm-uapi/drm_fourcc.h"
38 #include "v3d_screen.h"
39 #include "v3d_context.h"
40 #include "v3d_resource.h"
41 /* The packets used here the same across V3D versions. */
42 #include "broadcom/cle/v3d_packet_v42_pack.h"
43
44 static void
v3d_debug_resource_layout(struct v3d_resource * rsc,const char * caller)45 v3d_debug_resource_layout(struct v3d_resource *rsc, const char *caller)
46 {
47 if (!V3D_DBG(SURFACE))
48 return;
49
50 struct pipe_resource *prsc = &rsc->base;
51
52 if (prsc->target == PIPE_BUFFER) {
53 fprintf(stderr,
54 "rsc %s %p (format %s), %dx%d buffer @0x%08x-0x%08x\n",
55 caller, rsc,
56 util_format_short_name(prsc->format),
57 prsc->width0, prsc->height0,
58 rsc->bo->offset,
59 rsc->bo->offset + rsc->bo->size - 1);
60 return;
61 }
62
63 static const char *const tiling_descriptions[] = {
64 [V3D_TILING_RASTER] = "R",
65 [V3D_TILING_LINEARTILE] = "LT",
66 [V3D_TILING_UBLINEAR_1_COLUMN] = "UB1",
67 [V3D_TILING_UBLINEAR_2_COLUMN] = "UB2",
68 [V3D_TILING_UIF_NO_XOR] = "UIF",
69 [V3D_TILING_UIF_XOR] = "UIF^",
70 };
71
72 for (int i = 0; i <= prsc->last_level; i++) {
73 struct v3d_resource_slice *slice = &rsc->slices[i];
74
75 int level_width = slice->stride / rsc->cpp;
76 int level_height = slice->padded_height;
77 int level_depth =
78 u_minify(util_next_power_of_two(prsc->depth0), i);
79
80 fprintf(stderr,
81 "rsc %s %p (format %s), %dx%d: "
82 "level %d (%s) %dx%dx%d -> %dx%dx%d, stride %d@0x%08x\n",
83 caller, rsc,
84 util_format_short_name(prsc->format),
85 prsc->width0, prsc->height0,
86 i, tiling_descriptions[slice->tiling],
87 u_minify(prsc->width0, i),
88 u_minify(prsc->height0, i),
89 u_minify(prsc->depth0, i),
90 level_width,
91 level_height,
92 level_depth,
93 slice->stride,
94 rsc->bo->offset + slice->offset);
95 }
96 }
97
98 static bool
v3d_resource_bo_alloc(struct v3d_resource * rsc)99 v3d_resource_bo_alloc(struct v3d_resource *rsc)
100 {
101 struct pipe_resource *prsc = &rsc->base;
102 struct pipe_screen *pscreen = prsc->screen;
103 struct v3d_bo *bo;
104
105 /* Buffers may be read using ldunifa, which prefetches the next
106 * 4 bytes after a read. If the buffer's size is exactly a multiple
107 * of a page size and the shader reads the last 4 bytes with ldunifa
108 * the prefetching would read out of bounds and cause an MMU error,
109 * so we allocate extra space to avoid kernel error spamming.
110 */
111 uint32_t size = rsc->size;
112 if (rsc->base.target == PIPE_BUFFER && (size % 4096 == 0))
113 size += 4;
114
115 bo = v3d_bo_alloc(v3d_screen(pscreen), size, "resource");
116 if (bo) {
117 v3d_bo_unreference(&rsc->bo);
118 rsc->bo = bo;
119 rsc->serial_id++;
120 v3d_debug_resource_layout(rsc, "alloc");
121 return true;
122 } else {
123 return false;
124 }
125 }
126
127 static void
v3d_resource_transfer_unmap(struct pipe_context * pctx,struct pipe_transfer * ptrans)128 v3d_resource_transfer_unmap(struct pipe_context *pctx,
129 struct pipe_transfer *ptrans)
130 {
131 struct v3d_context *v3d = v3d_context(pctx);
132 struct v3d_transfer *trans = v3d_transfer(ptrans);
133
134 if (trans->map) {
135 struct v3d_resource *rsc = v3d_resource(ptrans->resource);
136 struct v3d_resource_slice *slice = &rsc->slices[ptrans->level];
137
138 if (ptrans->usage & PIPE_MAP_WRITE) {
139 for (int z = 0; z < ptrans->box.depth; z++) {
140 void *dst = rsc->bo->map +
141 v3d_layer_offset(&rsc->base,
142 ptrans->level,
143 ptrans->box.z + z);
144 v3d_store_tiled_image(dst,
145 slice->stride,
146 (trans->map +
147 ptrans->stride *
148 ptrans->box.height * z),
149 ptrans->stride,
150 slice->tiling, rsc->cpp,
151 slice->padded_height,
152 &ptrans->box);
153 }
154 }
155 free(trans->map);
156 }
157
158 pipe_resource_reference(&ptrans->resource, NULL);
159 slab_free(&v3d->transfer_pool, ptrans);
160 }
161
162 static void
rebind_sampler_views(struct v3d_context * v3d,struct v3d_resource * rsc)163 rebind_sampler_views(struct v3d_context *v3d,
164 struct v3d_resource *rsc)
165 {
166 for (int st = 0; st < PIPE_SHADER_TYPES; st++) {
167 struct v3d_texture_stateobj *tex = v3d->tex + st;
168
169 for (unsigned i = 0; i < tex->num_textures; i++) {
170 struct pipe_sampler_view *psview = tex->textures[i];
171
172 if (psview->texture != &rsc->base)
173 continue;
174
175 struct v3d_sampler_view *sview =
176 v3d_sampler_view(psview);
177
178 if (sview->serial_id == rsc->serial_id)
179 continue;
180
181 struct v3d_device_info *devinfo =
182 &v3d->screen->devinfo;
183
184 v3d_X(devinfo, create_texture_shader_state_bo)(v3d, sview);
185
186 v3d_flag_dirty_sampler_state(v3d, st);
187 }
188 }
189 }
190
191 static void
v3d_map_usage_prep(struct pipe_context * pctx,struct pipe_resource * prsc,unsigned usage)192 v3d_map_usage_prep(struct pipe_context *pctx,
193 struct pipe_resource *prsc,
194 unsigned usage)
195 {
196 struct v3d_context *v3d = v3d_context(pctx);
197 struct v3d_resource *rsc = v3d_resource(prsc);
198
199 MESA_TRACE_FUNC();
200
201 if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) {
202 if (v3d_resource_bo_alloc(rsc)) {
203 /* If it might be bound as one of our vertex buffers
204 * or UBOs, make sure we re-emit vertex buffer state
205 * or uniforms.
206 */
207 if (prsc->bind & PIPE_BIND_VERTEX_BUFFER)
208 v3d->dirty |= V3D_DIRTY_VTXBUF;
209 if (prsc->bind & PIPE_BIND_CONSTANT_BUFFER)
210 v3d->dirty |= V3D_DIRTY_CONSTBUF;
211 /* Since we are changing the texture BO we need to
212 * update any bound samplers to point to the new
213 * BO. Notice we can have samplers that are not
214 * currently bound to the state that won't be
215 * updated. These will be fixed when they are bound in
216 * v3d_set_sampler_views.
217 */
218 if (prsc->bind & PIPE_BIND_SAMPLER_VIEW)
219 rebind_sampler_views(v3d, rsc);
220 } else {
221 /* If we failed to reallocate, flush users so that we
222 * don't violate any syncing requirements.
223 */
224 v3d_flush_jobs_reading_resource(v3d, prsc,
225 V3D_FLUSH_DEFAULT,
226 false);
227 }
228 } else if (!(usage & PIPE_MAP_UNSYNCHRONIZED)) {
229 /* If we're writing and the buffer is being used by the CL, we
230 * have to flush the CL first. If we're only reading, we need
231 * to flush if the CL has written our buffer.
232 */
233 if (usage & PIPE_MAP_WRITE) {
234 v3d_flush_jobs_reading_resource(v3d, prsc,
235 V3D_FLUSH_ALWAYS,
236 false);
237 } else {
238 v3d_flush_jobs_writing_resource(v3d, prsc,
239 V3D_FLUSH_ALWAYS,
240 false);
241 }
242 }
243
244 if (usage & PIPE_MAP_WRITE) {
245 rsc->writes++;
246 rsc->graphics_written = true;
247 rsc->initialized_buffers = ~0;
248 }
249 }
250
251 static void *
v3d_resource_transfer_map(struct pipe_context * pctx,struct pipe_resource * prsc,unsigned level,unsigned usage,const struct pipe_box * box,struct pipe_transfer ** pptrans)252 v3d_resource_transfer_map(struct pipe_context *pctx,
253 struct pipe_resource *prsc,
254 unsigned level, unsigned usage,
255 const struct pipe_box *box,
256 struct pipe_transfer **pptrans)
257 {
258 struct v3d_context *v3d = v3d_context(pctx);
259 struct v3d_resource *rsc = v3d_resource(prsc);
260 struct v3d_transfer *trans;
261 struct pipe_transfer *ptrans;
262 enum pipe_format format = prsc->format;
263 char *buf;
264
265 /* MSAA maps should have been handled by u_transfer_helper. */
266 assert(prsc->nr_samples <= 1);
267
268 /* Upgrade DISCARD_RANGE to WHOLE_RESOURCE if the whole resource is
269 * being mapped.
270 */
271 if ((usage & PIPE_MAP_DISCARD_RANGE) &&
272 !(usage & PIPE_MAP_UNSYNCHRONIZED) &&
273 !(prsc->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT) &&
274 prsc->last_level == 0 &&
275 prsc->width0 == box->width &&
276 prsc->height0 == box->height &&
277 prsc->depth0 == box->depth &&
278 prsc->array_size == 1 &&
279 rsc->bo->private) {
280 usage |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
281 }
282
283 v3d_map_usage_prep(pctx, prsc, usage);
284
285 trans = slab_zalloc(&v3d->transfer_pool);
286 if (!trans)
287 return NULL;
288
289 /* XXX: Handle DONTBLOCK, DISCARD_RANGE, PERSISTENT, COHERENT. */
290
291 ptrans = &trans->base;
292
293 pipe_resource_reference(&ptrans->resource, prsc);
294 ptrans->level = level;
295 ptrans->usage = usage;
296 ptrans->box = *box;
297
298 /* Note that the current kernel implementation is synchronous, so no
299 * need to do syncing stuff here yet.
300 */
301
302 if (usage & PIPE_MAP_UNSYNCHRONIZED)
303 buf = v3d_bo_map_unsynchronized(rsc->bo);
304 else
305 buf = v3d_bo_map(rsc->bo);
306 if (!buf) {
307 fprintf(stderr, "Failed to map bo\n");
308 goto fail;
309 }
310
311 *pptrans = ptrans;
312
313 /* Our load/store routines work on entire compressed blocks. */
314 u_box_pixels_to_blocks(&ptrans->box, &ptrans->box, format);
315
316 struct v3d_resource_slice *slice = &rsc->slices[level];
317 if (rsc->tiled) {
318 /* No direct mappings of tiled, since we need to manually
319 * tile/untile.
320 */
321 if (usage & PIPE_MAP_DIRECTLY)
322 return NULL;
323
324 ptrans->stride = ptrans->box.width * rsc->cpp;
325 ptrans->layer_stride = ptrans->stride * ptrans->box.height;
326
327 trans->map = malloc(ptrans->layer_stride * ptrans->box.depth);
328
329 if (usage & PIPE_MAP_READ) {
330 for (int z = 0; z < ptrans->box.depth; z++) {
331 void *src = rsc->bo->map +
332 v3d_layer_offset(&rsc->base,
333 ptrans->level,
334 ptrans->box.z + z);
335 v3d_load_tiled_image((trans->map +
336 ptrans->stride *
337 ptrans->box.height * z),
338 ptrans->stride,
339 src,
340 slice->stride,
341 slice->tiling, rsc->cpp,
342 slice->padded_height,
343 &ptrans->box);
344 }
345 }
346 return trans->map;
347 } else {
348 ptrans->stride = slice->stride;
349 ptrans->layer_stride = rsc->cube_map_stride;
350
351 return buf + slice->offset +
352 ptrans->box.y * ptrans->stride +
353 ptrans->box.x * rsc->cpp +
354 ptrans->box.z * rsc->cube_map_stride;
355 }
356
357
358 fail:
359 v3d_resource_transfer_unmap(pctx, ptrans);
360 return NULL;
361 }
362
363 static void
v3d_texture_subdata(struct pipe_context * pctx,struct pipe_resource * prsc,unsigned level,unsigned usage,const struct pipe_box * box,const void * data,unsigned stride,uintptr_t layer_stride)364 v3d_texture_subdata(struct pipe_context *pctx,
365 struct pipe_resource *prsc,
366 unsigned level,
367 unsigned usage,
368 const struct pipe_box *box,
369 const void *data,
370 unsigned stride,
371 uintptr_t layer_stride)
372 {
373 struct v3d_resource *rsc = v3d_resource(prsc);
374 struct v3d_resource_slice *slice = &rsc->slices[level];
375
376 /* For a direct mapping, we can just take the u_transfer path. */
377 if (!rsc->tiled) {
378 return u_default_texture_subdata(pctx, prsc, level, usage, box,
379 data, stride, layer_stride);
380 }
381
382 /* Otherwise, map and store the texture data directly into the tiled
383 * texture. Note that gallium's texture_subdata may be called with
384 * obvious usage flags missing!
385 */
386 v3d_map_usage_prep(pctx, prsc, usage | (PIPE_MAP_WRITE |
387 PIPE_MAP_DISCARD_RANGE));
388
389 void *buf;
390 if (usage & PIPE_MAP_UNSYNCHRONIZED)
391 buf = v3d_bo_map_unsynchronized(rsc->bo);
392 else
393 buf = v3d_bo_map(rsc->bo);
394
395 for (int i = 0; i < box->depth; i++) {
396 v3d_store_tiled_image(buf +
397 v3d_layer_offset(&rsc->base,
398 level,
399 box->z + i),
400 slice->stride,
401 (void *)data + layer_stride * i,
402 stride,
403 slice->tiling, rsc->cpp, slice->padded_height,
404 box);
405 }
406 }
407
408 static void
v3d_resource_destroy(struct pipe_screen * pscreen,struct pipe_resource * prsc)409 v3d_resource_destroy(struct pipe_screen *pscreen,
410 struct pipe_resource *prsc)
411 {
412 struct v3d_screen *screen = v3d_screen(pscreen);
413 struct v3d_resource *rsc = v3d_resource(prsc);
414
415 if (rsc->scanout)
416 renderonly_scanout_destroy(rsc->scanout, screen->ro);
417
418 v3d_bo_unreference(&rsc->bo);
419 free(rsc);
420 }
421
422 static uint64_t
v3d_resource_modifier(struct v3d_resource * rsc)423 v3d_resource_modifier(struct v3d_resource *rsc)
424 {
425 if (rsc->tiled) {
426 /* A shared tiled buffer should always be allocated as UIF,
427 * not UBLINEAR or LT.
428 */
429 assert(rsc->slices[0].tiling == V3D_TILING_UIF_XOR ||
430 rsc->slices[0].tiling == V3D_TILING_UIF_NO_XOR);
431 return DRM_FORMAT_MOD_BROADCOM_UIF;
432 } else {
433 return DRM_FORMAT_MOD_LINEAR;
434 }
435 }
436
437 static bool
v3d_resource_get_handle(struct pipe_screen * pscreen,struct pipe_context * pctx,struct pipe_resource * prsc,struct winsys_handle * whandle,unsigned usage)438 v3d_resource_get_handle(struct pipe_screen *pscreen,
439 struct pipe_context *pctx,
440 struct pipe_resource *prsc,
441 struct winsys_handle *whandle,
442 unsigned usage)
443 {
444 struct v3d_screen *screen = v3d_screen(pscreen);
445 struct v3d_resource *rsc = v3d_resource(prsc);
446 struct v3d_bo *bo = rsc->bo;
447
448 whandle->stride = rsc->slices[0].stride;
449 whandle->offset = 0;
450 whandle->modifier = v3d_resource_modifier(rsc);
451
452 /* If we're passing some reference to our BO out to some other part of
453 * the system, then we can't do any optimizations about only us being
454 * the ones seeing it (like BO caching).
455 */
456 bo->private = false;
457
458 switch (whandle->type) {
459 case WINSYS_HANDLE_TYPE_SHARED:
460 return v3d_bo_flink(bo, &whandle->handle);
461 case WINSYS_HANDLE_TYPE_KMS:
462 if (screen->ro) {
463 if (renderonly_get_handle(rsc->scanout, whandle)) {
464 whandle->stride = rsc->slices[0].stride;
465 return true;
466 }
467 return false;
468 }
469 whandle->handle = bo->handle;
470 return true;
471 case WINSYS_HANDLE_TYPE_FD:
472 whandle->handle = v3d_bo_get_dmabuf(bo);
473 return whandle->handle != -1;
474 }
475
476 return false;
477 }
478
479 static bool
v3d_resource_get_param(struct pipe_screen * pscreen,struct pipe_context * pctx,struct pipe_resource * prsc,unsigned plane,unsigned layer,unsigned level,enum pipe_resource_param param,unsigned usage,uint64_t * value)480 v3d_resource_get_param(struct pipe_screen *pscreen,
481 struct pipe_context *pctx, struct pipe_resource *prsc,
482 unsigned plane, unsigned layer, unsigned level,
483 enum pipe_resource_param param,
484 unsigned usage, uint64_t *value)
485 {
486 struct v3d_resource *rsc =
487 (struct v3d_resource *)util_resource_at_index(prsc, plane);
488
489 switch (param) {
490 case PIPE_RESOURCE_PARAM_STRIDE:
491 *value = rsc->slices[level].stride;
492 return true;
493 case PIPE_RESOURCE_PARAM_OFFSET:
494 *value = rsc->slices[level].offset;
495 return true;
496 case PIPE_RESOURCE_PARAM_MODIFIER:
497 *value = v3d_resource_modifier(rsc);
498 return true;
499 case PIPE_RESOURCE_PARAM_NPLANES:
500 *value = util_resource_num(prsc);
501 return true;
502 default:
503 return false;
504 }
505 }
506
507 #define PAGE_UB_ROWS (V3D_UIFCFG_PAGE_SIZE / V3D_UIFBLOCK_ROW_SIZE)
508 #define PAGE_UB_ROWS_TIMES_1_5 ((PAGE_UB_ROWS * 3) >> 1)
509 #define PAGE_CACHE_UB_ROWS (V3D_PAGE_CACHE_SIZE / V3D_UIFBLOCK_ROW_SIZE)
510 #define PAGE_CACHE_MINUS_1_5_UB_ROWS (PAGE_CACHE_UB_ROWS - PAGE_UB_ROWS_TIMES_1_5)
511
512 /**
513 * Computes the HW's UIFblock padding for a given height/cpp.
514 *
515 * The goal of the padding is to keep pages of the same color (bank number) at
516 * least half a page away from each other vertically when crossing between
517 * between columns of UIF blocks.
518 */
519 static uint32_t
v3d_get_ub_pad(struct v3d_resource * rsc,uint32_t height)520 v3d_get_ub_pad(struct v3d_resource *rsc, uint32_t height)
521 {
522 uint32_t utile_h = v3d_utile_height(rsc->cpp);
523 uint32_t uif_block_h = utile_h * 2;
524 uint32_t height_ub = height / uif_block_h;
525
526 uint32_t height_offset_in_pc = height_ub % PAGE_CACHE_UB_ROWS;
527
528 /* For the perfectly-aligned-for-UIF-XOR case, don't add any pad. */
529 if (height_offset_in_pc == 0)
530 return 0;
531
532 /* Try padding up to where we're offset by at least half a page. */
533 if (height_offset_in_pc < PAGE_UB_ROWS_TIMES_1_5) {
534 /* If we fit entirely in the page cache, don't pad. */
535 if (height_ub < PAGE_CACHE_UB_ROWS)
536 return 0;
537 else
538 return PAGE_UB_ROWS_TIMES_1_5 - height_offset_in_pc;
539 }
540
541 /* If we're close to being aligned to page cache size, then round up
542 * and rely on XOR.
543 */
544 if (height_offset_in_pc > PAGE_CACHE_MINUS_1_5_UB_ROWS)
545 return PAGE_CACHE_UB_ROWS - height_offset_in_pc;
546
547 /* Otherwise, we're far enough away (top and bottom) to not need any
548 * padding.
549 */
550 return 0;
551 }
552
553 /**
554 * Computes the dimension with required padding for mip levels.
555 *
556 * This padding is required for width and height dimensions when the mip
557 * level is greater than 1, and for the depth dimension when the mip level
558 * is greater than 0. This function expects to be passed a mip level >= 1.
559 *
560 * Note: Hardware documentation seems to suggest that the third argument
561 * should be the utile dimensions, but through testing it was found that
562 * the block dimension should be used instead.
563 */
564 static uint32_t
v3d_get_dimension_mpad(uint32_t dimension,uint32_t level,uint32_t block_dimension)565 v3d_get_dimension_mpad(uint32_t dimension, uint32_t level, uint32_t block_dimension)
566 {
567 assert(level >= 1);
568 uint32_t pot_dim = u_minify(dimension, 1);
569 pot_dim = util_next_power_of_two(DIV_ROUND_UP(pot_dim, block_dimension));
570 uint32_t padded_dim = block_dimension * pot_dim;
571 return u_minify(padded_dim, level - 1);
572 }
573
574 static void
v3d_setup_slices(struct v3d_resource * rsc,uint32_t winsys_stride,bool uif_top)575 v3d_setup_slices(struct v3d_resource *rsc, uint32_t winsys_stride,
576 bool uif_top)
577 {
578 struct pipe_resource *prsc = &rsc->base;
579 uint32_t width = prsc->width0;
580 uint32_t height = prsc->height0;
581 uint32_t depth = prsc->depth0;
582 uint32_t offset = 0;
583 uint32_t utile_w = v3d_utile_width(rsc->cpp);
584 uint32_t utile_h = v3d_utile_height(rsc->cpp);
585 uint32_t uif_block_w = utile_w * 2;
586 uint32_t uif_block_h = utile_h * 2;
587 uint32_t block_width = util_format_get_blockwidth(prsc->format);
588 uint32_t block_height = util_format_get_blockheight(prsc->format);
589
590 /* Note that power-of-two padding is based on level 1. These are not
591 * equivalent to just util_next_power_of_two(dimension), because at a
592 * level 0 dimension of 9, the level 1 power-of-two padded value is 4,
593 * not 8. Additionally the pot padding is based on the block size.
594 */
595 uint32_t pot_width = 2 * v3d_get_dimension_mpad(width,
596 1,
597 block_width);
598 uint32_t pot_height = 2 * v3d_get_dimension_mpad(height,
599 1,
600 block_height);
601 uint32_t pot_depth = 2 * v3d_get_dimension_mpad(depth,
602 1,
603 1);
604 bool msaa = prsc->nr_samples > 1;
605
606 /* MSAA textures/renderbuffers are always laid out as single-level
607 * UIF.
608 */
609 uif_top |= msaa;
610
611 /* Check some easy mistakes to make in a resource_create() call that
612 * will break our setup.
613 */
614 assert(prsc->array_size != 0);
615 assert(prsc->depth0 != 0);
616
617 for (int i = prsc->last_level; i >= 0; i--) {
618 struct v3d_resource_slice *slice = &rsc->slices[i];
619
620 uint32_t level_width, level_height, level_depth;
621 if (i < 2) {
622 level_width = u_minify(width, i);
623 level_height = u_minify(height, i);
624 } else {
625 level_width = u_minify(pot_width, i);
626 level_height = u_minify(pot_height, i);
627 }
628 if (i < 1)
629 level_depth = u_minify(depth, i);
630 else
631 level_depth = u_minify(pot_depth, i);
632
633 if (msaa) {
634 level_width *= 2;
635 level_height *= 2;
636 }
637
638 level_width = DIV_ROUND_UP(level_width, block_width);
639 level_height = DIV_ROUND_UP(level_height, block_height);
640
641 if (!rsc->tiled) {
642 slice->tiling = V3D_TILING_RASTER;
643 if (prsc->target == PIPE_TEXTURE_1D ||
644 prsc->target == PIPE_TEXTURE_1D_ARRAY)
645 level_width = align(level_width, 64 / rsc->cpp);
646 } else {
647 if ((i != 0 || !uif_top) &&
648 (level_width <= utile_w ||
649 level_height <= utile_h)) {
650 slice->tiling = V3D_TILING_LINEARTILE;
651 level_width = align(level_width, utile_w);
652 level_height = align(level_height, utile_h);
653 } else if ((i != 0 || !uif_top) &&
654 level_width <= uif_block_w) {
655 slice->tiling = V3D_TILING_UBLINEAR_1_COLUMN;
656 level_width = align(level_width, uif_block_w);
657 level_height = align(level_height, uif_block_h);
658 } else if ((i != 0 || !uif_top) &&
659 level_width <= 2 * uif_block_w) {
660 slice->tiling = V3D_TILING_UBLINEAR_2_COLUMN;
661 level_width = align(level_width, 2 * uif_block_w);
662 level_height = align(level_height, uif_block_h);
663 } else {
664 /* We align the width to a 4-block column of
665 * UIF blocks, but we only align height to UIF
666 * blocks.
667 */
668 level_width = align(level_width,
669 4 * uif_block_w);
670 level_height = align(level_height,
671 uif_block_h);
672
673 slice->ub_pad = v3d_get_ub_pad(rsc,
674 level_height);
675 level_height += slice->ub_pad * uif_block_h;
676
677 /* If the padding set us to to be aligned to
678 * the page cache size, then the HW will use
679 * the XOR bit on odd columns to get us
680 * perfectly misaligned
681 */
682 if ((level_height / uif_block_h) %
683 (V3D_PAGE_CACHE_SIZE /
684 V3D_UIFBLOCK_ROW_SIZE) == 0) {
685 slice->tiling = V3D_TILING_UIF_XOR;
686 } else {
687 slice->tiling = V3D_TILING_UIF_NO_XOR;
688 }
689 }
690 }
691
692 slice->offset = offset;
693 if (winsys_stride)
694 slice->stride = winsys_stride;
695 else
696 slice->stride = level_width * rsc->cpp;
697 slice->padded_height = level_height;
698 slice->size = level_height * slice->stride;
699
700 uint32_t slice_total_size = slice->size * level_depth;
701
702 /* The HW aligns level 1's base to a page if any of level 1 or
703 * below could be UIF XOR. The lower levels then inherit the
704 * alignment for as long as necessary, thanks to being power of
705 * two aligned.
706 */
707 if (i == 1 &&
708 level_width > 4 * uif_block_w &&
709 level_height > PAGE_CACHE_MINUS_1_5_UB_ROWS * uif_block_h) {
710 slice_total_size = align(slice_total_size,
711 V3D_UIFCFG_PAGE_SIZE);
712 }
713
714 offset += slice_total_size;
715
716 }
717 rsc->size = offset;
718
719 /* UIF/UBLINEAR levels need to be aligned to UIF-blocks, and LT only
720 * needs to be aligned to utile boundaries. Since tiles are laid out
721 * from small to big in memory, we need to align the later UIF slices
722 * to UIF blocks, if they were preceded by non-UIF-block-aligned LT
723 * slices.
724 *
725 * We additionally align to 4k, which improves UIF XOR performance.
726 */
727 uint32_t page_align_offset = (align(rsc->slices[0].offset, 4096) -
728 rsc->slices[0].offset);
729 if (page_align_offset) {
730 rsc->size += page_align_offset;
731 for (int i = 0; i <= prsc->last_level; i++)
732 rsc->slices[i].offset += page_align_offset;
733 }
734
735 /* Arrays and cube textures have a stride which is the distance from
736 * one full mipmap tree to the next (64b aligned). For 3D textures,
737 * we need to program the stride between slices of miplevel 0.
738 */
739 if (prsc->target != PIPE_TEXTURE_3D) {
740 rsc->cube_map_stride = align(rsc->slices[0].offset +
741 rsc->slices[0].size, 64);
742 rsc->size += rsc->cube_map_stride * (prsc->array_size - 1);
743 } else {
744 rsc->cube_map_stride = rsc->slices[0].size;
745 }
746 }
747
748 uint32_t
v3d_layer_offset(struct pipe_resource * prsc,uint32_t level,uint32_t layer)749 v3d_layer_offset(struct pipe_resource *prsc, uint32_t level, uint32_t layer)
750 {
751 struct v3d_resource *rsc = v3d_resource(prsc);
752 struct v3d_resource_slice *slice = &rsc->slices[level];
753
754 if (prsc->target == PIPE_TEXTURE_3D)
755 return slice->offset + layer * slice->size;
756 else
757 return slice->offset + layer * rsc->cube_map_stride;
758 }
759
760 static struct v3d_resource *
v3d_resource_setup(struct pipe_screen * pscreen,const struct pipe_resource * tmpl)761 v3d_resource_setup(struct pipe_screen *pscreen,
762 const struct pipe_resource *tmpl)
763 {
764 struct v3d_resource *rsc = CALLOC_STRUCT(v3d_resource);
765
766 if (!rsc)
767 return NULL;
768 struct pipe_resource *prsc = &rsc->base;
769
770 *prsc = *tmpl;
771
772 pipe_reference_init(&prsc->reference, 1);
773 prsc->screen = pscreen;
774
775 rsc->cpp = util_format_get_blocksize(prsc->format);
776 rsc->serial_id++;
777
778 assert(rsc->cpp);
779
780 return rsc;
781 }
782
783 static struct pipe_resource *
v3d_resource_create_with_modifiers(struct pipe_screen * pscreen,const struct pipe_resource * tmpl,const uint64_t * modifiers,int count)784 v3d_resource_create_with_modifiers(struct pipe_screen *pscreen,
785 const struct pipe_resource *tmpl,
786 const uint64_t *modifiers,
787 int count)
788 {
789 struct v3d_screen *screen = v3d_screen(pscreen);
790
791 struct v3d_resource *rsc = v3d_resource_setup(pscreen, tmpl);
792 struct pipe_resource *prsc = &rsc->base;
793 /* Use a tiled layout if we can, for better 3D performance. */
794 bool should_tile = true;
795
796 assert(tmpl->target != PIPE_BUFFER ||
797 (tmpl->format == PIPE_FORMAT_NONE ||
798 util_format_get_blocksize(tmpl->format) == 1));
799
800 /* VBOs/PBOs/Texture Buffer Objects are untiled (and 1 height). */
801 if (tmpl->target == PIPE_BUFFER)
802 should_tile = false;
803
804 /* Cursors are always linear, and the user can request linear as well.
805 */
806 if (tmpl->bind & (PIPE_BIND_LINEAR | PIPE_BIND_CURSOR))
807 should_tile = false;
808
809 /* 1D and 1D_ARRAY textures are always raster-order. */
810 if (tmpl->target == PIPE_TEXTURE_1D ||
811 tmpl->target == PIPE_TEXTURE_1D_ARRAY)
812 should_tile = false;
813
814 /* Scanout BOs for simulator need to be linear for interaction with
815 * i965.
816 */
817 #if USE_V3D_SIMULATOR
818 if (tmpl->bind & PIPE_BIND_SHARED)
819 should_tile = false;
820 #endif
821
822 /* If using the old-school SCANOUT flag, we don't know what the screen
823 * might support other than linear. Just force linear.
824 */
825 if (tmpl->bind & PIPE_BIND_SCANOUT)
826 should_tile = false;
827
828 /* No user-specified modifier; determine our own. */
829 if (count == 1 && modifiers[0] == DRM_FORMAT_MOD_INVALID) {
830 rsc->tiled = should_tile;
831 } else if (should_tile &&
832 drm_find_modifier(DRM_FORMAT_MOD_BROADCOM_UIF,
833 modifiers, count)) {
834 rsc->tiled = true;
835 } else if (drm_find_modifier(DRM_FORMAT_MOD_LINEAR, modifiers, count)) {
836 rsc->tiled = false;
837 } else {
838 fprintf(stderr, "Unsupported modifier requested\n");
839 goto fail;
840 }
841
842 rsc->internal_format = prsc->format;
843
844 v3d_setup_slices(rsc, 0, tmpl->bind & PIPE_BIND_SHARED);
845
846 if (screen->ro && (tmpl->bind & PIPE_BIND_SCANOUT)) {
847 assert(!rsc->tiled);
848 struct winsys_handle handle;
849 struct pipe_resource scanout_tmpl = {
850 .target = prsc->target,
851 .format = PIPE_FORMAT_RGBA8888_UNORM,
852 .width0 = 1024, /* one page */
853 .height0 = align(rsc->size, 4096) / 4096,
854 .depth0 = 1,
855 .array_size = 1,
856 };
857
858 rsc->scanout =
859 renderonly_scanout_for_resource(&scanout_tmpl,
860 screen->ro,
861 &handle);
862
863 if (!rsc->scanout) {
864 fprintf(stderr, "Failed to create scanout resource\n");
865 goto fail;
866 }
867 assert(handle.type == WINSYS_HANDLE_TYPE_FD);
868 rsc->bo = v3d_bo_open_dmabuf(screen, handle.handle);
869 close(handle.handle);
870
871 if (!rsc->bo)
872 goto fail;
873
874 v3d_debug_resource_layout(rsc, "renderonly");
875
876 return prsc;
877 } else {
878 if (!v3d_resource_bo_alloc(rsc))
879 goto fail;
880 }
881
882 return prsc;
883 fail:
884 v3d_resource_destroy(pscreen, prsc);
885 return NULL;
886 }
887
888 struct pipe_resource *
v3d_resource_create(struct pipe_screen * pscreen,const struct pipe_resource * tmpl)889 v3d_resource_create(struct pipe_screen *pscreen,
890 const struct pipe_resource *tmpl)
891 {
892 const uint64_t mod = DRM_FORMAT_MOD_INVALID;
893 return v3d_resource_create_with_modifiers(pscreen, tmpl, &mod, 1);
894 }
895
896 static struct pipe_resource *
v3d_resource_from_handle(struct pipe_screen * pscreen,const struct pipe_resource * tmpl,struct winsys_handle * whandle,unsigned usage)897 v3d_resource_from_handle(struct pipe_screen *pscreen,
898 const struct pipe_resource *tmpl,
899 struct winsys_handle *whandle,
900 unsigned usage)
901 {
902 struct v3d_screen *screen = v3d_screen(pscreen);
903 struct v3d_resource *rsc = v3d_resource_setup(pscreen, tmpl);
904 struct pipe_resource *prsc = &rsc->base;
905 struct v3d_resource_slice *slice = &rsc->slices[0];
906
907 if (!rsc)
908 return NULL;
909
910 switch (whandle->modifier) {
911 case DRM_FORMAT_MOD_LINEAR:
912 rsc->tiled = false;
913 break;
914 case DRM_FORMAT_MOD_BROADCOM_UIF:
915 rsc->tiled = true;
916 break;
917 case DRM_FORMAT_MOD_INVALID:
918 rsc->tiled = false;
919 break;
920 case DRM_FORMAT_MOD_BROADCOM_SAND128:
921 rsc->tiled = false;
922 rsc->sand_col128_stride = whandle->stride;
923 break;
924 default:
925 switch(fourcc_mod_broadcom_mod(whandle->modifier)) {
926 case DRM_FORMAT_MOD_BROADCOM_SAND128:
927 rsc->tiled = false;
928 rsc->sand_col128_stride =
929 fourcc_mod_broadcom_param(whandle->modifier);
930 break;
931 default:
932 fprintf(stderr,
933 "Attempt to import unsupported modifier 0x%llx\n",
934 (long long)whandle->modifier);
935 goto fail;
936 }
937 }
938
939 switch (whandle->type) {
940 case WINSYS_HANDLE_TYPE_SHARED:
941 rsc->bo = v3d_bo_open_name(screen, whandle->handle);
942 break;
943 case WINSYS_HANDLE_TYPE_FD:
944 rsc->bo = v3d_bo_open_dmabuf(screen, whandle->handle);
945 break;
946 default:
947 fprintf(stderr,
948 "Attempt to import unsupported handle type %d\n",
949 whandle->type);
950 goto fail;
951 }
952
953 if (!rsc->bo)
954 goto fail;
955
956 rsc->internal_format = prsc->format;
957
958 v3d_setup_slices(rsc, whandle->stride, true);
959 v3d_debug_resource_layout(rsc, "import");
960
961 if (whandle->offset != 0) {
962 if (rsc->tiled) {
963 fprintf(stderr,
964 "Attempt to import unsupported winsys offset %u\n",
965 whandle->offset);
966 goto fail;
967 }
968 rsc->slices[0].offset += whandle->offset;
969
970 if (rsc->slices[0].offset + rsc->slices[0].size >
971 rsc->bo->size) {
972 fprintf(stderr, "Attempt to import "
973 "with overflowing offset (%d + %d > %d)\n",
974 whandle->offset,
975 rsc->slices[0].size,
976 rsc->bo->size);
977 goto fail;
978 }
979 }
980
981 if (screen->ro) {
982 /* Make sure that renderonly has a handle to our buffer in the
983 * display's fd, so that a later renderonly_get_handle()
984 * returns correct handles or GEM names.
985 */
986 rsc->scanout =
987 renderonly_create_gpu_import_for_resource(prsc,
988 screen->ro,
989 NULL);
990 }
991
992 if (rsc->tiled && whandle->stride != slice->stride) {
993 static bool warned = false;
994 if (!warned) {
995 warned = true;
996 fprintf(stderr,
997 "Attempting to import %dx%d %s with "
998 "unsupported stride %d instead of %d\n",
999 prsc->width0, prsc->height0,
1000 util_format_short_name(prsc->format),
1001 whandle->stride,
1002 slice->stride);
1003 }
1004 goto fail;
1005 } else if (!rsc->tiled) {
1006 slice->stride = whandle->stride;
1007 }
1008
1009 /* Prevent implicit clearing of the imported buffer contents. */
1010 rsc->writes = 1;
1011
1012 return prsc;
1013
1014 fail:
1015 v3d_resource_destroy(pscreen, prsc);
1016 return NULL;
1017 }
1018
1019 void
v3d_update_shadow_texture(struct pipe_context * pctx,struct pipe_sampler_view * pview)1020 v3d_update_shadow_texture(struct pipe_context *pctx,
1021 struct pipe_sampler_view *pview)
1022 {
1023 struct v3d_context *v3d = v3d_context(pctx);
1024 struct v3d_sampler_view *view = v3d_sampler_view(pview);
1025 struct v3d_resource *shadow = v3d_resource(view->texture);
1026 struct v3d_resource *orig = v3d_resource(pview->texture);
1027
1028 assert(view->texture != pview->texture);
1029
1030 if (shadow->writes == orig->writes && orig->bo->private)
1031 return;
1032
1033 perf_debug("Updating %dx%d@%d shadow for linear texture\n",
1034 orig->base.width0, orig->base.height0,
1035 pview->u.tex.first_level);
1036
1037 for (int i = 0; i <= shadow->base.last_level; i++) {
1038 unsigned width = u_minify(shadow->base.width0, i);
1039 unsigned height = u_minify(shadow->base.height0, i);
1040 struct pipe_blit_info info = {
1041 .dst = {
1042 .resource = &shadow->base,
1043 .level = i,
1044 .box = {
1045 .x = 0,
1046 .y = 0,
1047 .z = 0,
1048 .width = width,
1049 .height = height,
1050 .depth = 1,
1051 },
1052 .format = shadow->base.format,
1053 },
1054 .src = {
1055 .resource = &orig->base,
1056 .level = pview->u.tex.first_level + i,
1057 .box = {
1058 .x = 0,
1059 .y = 0,
1060 .z = 0,
1061 .width = width,
1062 .height = height,
1063 .depth = 1,
1064 },
1065 .format = orig->base.format,
1066 },
1067 .mask = util_format_get_mask(orig->base.format),
1068 };
1069 pctx->blit(pctx, &info);
1070 }
1071
1072 shadow->writes = orig->writes;
1073 }
1074
1075 static struct pipe_surface *
v3d_create_surface(struct pipe_context * pctx,struct pipe_resource * ptex,const struct pipe_surface * surf_tmpl)1076 v3d_create_surface(struct pipe_context *pctx,
1077 struct pipe_resource *ptex,
1078 const struct pipe_surface *surf_tmpl)
1079 {
1080 struct v3d_context *v3d = v3d_context(pctx);
1081 struct v3d_screen *screen = v3d->screen;
1082 struct v3d_device_info *devinfo = &screen->devinfo;
1083 struct v3d_surface *surface = CALLOC_STRUCT(v3d_surface);
1084 struct v3d_resource *rsc = v3d_resource(ptex);
1085
1086 if (!surface)
1087 return NULL;
1088
1089 struct pipe_surface *psurf = &surface->base;
1090 unsigned level = surf_tmpl->u.tex.level;
1091 struct v3d_resource_slice *slice = &rsc->slices[level];
1092
1093 pipe_reference_init(&psurf->reference, 1);
1094 pipe_resource_reference(&psurf->texture, ptex);
1095
1096 psurf->context = pctx;
1097 psurf->format = surf_tmpl->format;
1098 psurf->width = u_minify(ptex->width0, level);
1099 psurf->height = u_minify(ptex->height0, level);
1100 psurf->u.tex.level = level;
1101 psurf->u.tex.first_layer = surf_tmpl->u.tex.first_layer;
1102 psurf->u.tex.last_layer = surf_tmpl->u.tex.last_layer;
1103
1104 surface->offset = v3d_layer_offset(ptex, level,
1105 psurf->u.tex.first_layer);
1106 surface->tiling = slice->tiling;
1107
1108 surface->format = v3d_get_rt_format(devinfo, psurf->format);
1109
1110 const struct util_format_description *desc =
1111 util_format_description(psurf->format);
1112
1113 surface->swap_rb = (desc->swizzle[0] == PIPE_SWIZZLE_Z &&
1114 psurf->format != PIPE_FORMAT_B5G6R5_UNORM);
1115
1116 if (util_format_is_depth_or_stencil(psurf->format)) {
1117 switch (psurf->format) {
1118 case PIPE_FORMAT_Z16_UNORM:
1119 surface->internal_type = V3D_INTERNAL_TYPE_DEPTH_16;
1120 break;
1121 case PIPE_FORMAT_Z32_FLOAT:
1122 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
1123 surface->internal_type = V3D_INTERNAL_TYPE_DEPTH_32F;
1124 break;
1125 default:
1126 surface->internal_type = V3D_INTERNAL_TYPE_DEPTH_24;
1127 }
1128 } else {
1129 uint32_t bpp, type;
1130 v3d_X(devinfo, get_internal_type_bpp_for_output_format)
1131 (surface->format, &type, &bpp);
1132 surface->internal_type = type;
1133 surface->internal_bpp = bpp;
1134 }
1135
1136 if (surface->tiling == V3D_TILING_UIF_NO_XOR ||
1137 surface->tiling == V3D_TILING_UIF_XOR) {
1138 surface->padded_height_of_output_image_in_uif_blocks =
1139 (slice->padded_height /
1140 (2 * v3d_utile_height(rsc->cpp)));
1141 }
1142
1143 if (rsc->separate_stencil) {
1144 surface->separate_stencil =
1145 v3d_create_surface(pctx, &rsc->separate_stencil->base,
1146 surf_tmpl);
1147 }
1148
1149 return &surface->base;
1150 }
1151
1152 static void
v3d_surface_destroy(struct pipe_context * pctx,struct pipe_surface * psurf)1153 v3d_surface_destroy(struct pipe_context *pctx, struct pipe_surface *psurf)
1154 {
1155 struct v3d_surface *surf = v3d_surface(psurf);
1156
1157 if (surf->separate_stencil)
1158 pipe_surface_reference(&surf->separate_stencil, NULL);
1159
1160 pipe_resource_reference(&psurf->texture, NULL);
1161 FREE(psurf);
1162 }
1163
1164 static void
v3d_flush_resource(struct pipe_context * pctx,struct pipe_resource * prsc)1165 v3d_flush_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
1166 {
1167 /* All calls to flush_resource are followed by a flush of the context,
1168 * so there's nothing to do. Still, if the resource is going to be
1169 * shared and it is tiled, only UIF format is valid, so we need to
1170 * convert it.
1171 */
1172 struct v3d_resource *rsc = v3d_resource(prsc);
1173 if (rsc->tiled &&
1174 rsc->slices[0].tiling != V3D_TILING_UIF_XOR &&
1175 rsc->slices[0].tiling != V3D_TILING_UIF_NO_XOR) {
1176 /* Shared resources must be not mipmapped */
1177 assert(prsc->last_level == 0);
1178 /* Shared resources must not be multisampled */
1179 assert(prsc->nr_samples <= 1);
1180
1181 struct pipe_resource ptmpl = *prsc;
1182 ptmpl.bind |= PIPE_BIND_SHARED;
1183 struct v3d_resource *new_rsc =
1184 v3d_resource(pctx->screen->resource_create(pctx->screen, &ptmpl));
1185 assert(new_rsc);
1186
1187 struct pipe_blit_info blit = { 0 };
1188 u_box_3d(0, 0, 0,
1189 prsc->width0, prsc->height0, prsc->depth0,
1190 &blit.dst.box);
1191 blit.src.box = blit.dst.box;
1192 blit.dst.resource = &new_rsc->base;
1193 blit.dst.format = new_rsc->base.format;
1194 blit.dst.level = 0;
1195 blit.src.resource = prsc;
1196 blit.src.format = prsc->format;
1197 blit.src.level = 0 ;
1198 blit.mask = util_format_get_mask(blit.src.format);
1199 blit.filter = PIPE_TEX_FILTER_NEAREST;
1200
1201 v3d_blit(pctx, &blit);
1202
1203 rsc->base.bind = new_rsc->base.bind;
1204 /* Swap the BOs */
1205 struct v3d_bo *old_bo = rsc->bo;
1206 rsc->bo = new_rsc->bo;
1207 rsc->serial_id++;
1208 new_rsc->bo = old_bo;
1209
1210 /* Copy the affected fields */
1211 rsc->slices[0] = new_rsc->slices[0];
1212 rsc->cube_map_stride = new_rsc->cube_map_stride;
1213 rsc->sand_col128_stride = new_rsc->sand_col128_stride;
1214 rsc->size = new_rsc->size;
1215 rsc->tiled = new_rsc->tiled;
1216
1217 struct pipe_resource *new_prsc = (struct pipe_resource *)&new_rsc;
1218 pipe_resource_reference(&new_prsc, NULL);
1219 }
1220 }
1221
1222 static enum pipe_format
v3d_resource_get_internal_format(struct pipe_resource * prsc)1223 v3d_resource_get_internal_format(struct pipe_resource *prsc)
1224 {
1225 return v3d_resource(prsc)->internal_format;
1226 }
1227
1228 static void
v3d_resource_set_stencil(struct pipe_resource * prsc,struct pipe_resource * stencil)1229 v3d_resource_set_stencil(struct pipe_resource *prsc,
1230 struct pipe_resource *stencil)
1231 {
1232 v3d_resource(prsc)->separate_stencil = v3d_resource(stencil);
1233 }
1234
1235 static struct pipe_resource *
v3d_resource_get_stencil(struct pipe_resource * prsc)1236 v3d_resource_get_stencil(struct pipe_resource *prsc)
1237 {
1238 struct v3d_resource *rsc = v3d_resource(prsc);
1239
1240 return rsc->separate_stencil ? &rsc->separate_stencil->base : NULL;
1241 }
1242
1243 static const struct u_transfer_vtbl transfer_vtbl = {
1244 .resource_create = v3d_resource_create,
1245 .resource_destroy = v3d_resource_destroy,
1246 .transfer_map = v3d_resource_transfer_map,
1247 .transfer_unmap = v3d_resource_transfer_unmap,
1248 .transfer_flush_region = u_default_transfer_flush_region,
1249 .get_internal_format = v3d_resource_get_internal_format,
1250 .set_stencil = v3d_resource_set_stencil,
1251 .get_stencil = v3d_resource_get_stencil,
1252 };
1253
1254 void
v3d_resource_screen_init(struct pipe_screen * pscreen)1255 v3d_resource_screen_init(struct pipe_screen *pscreen)
1256 {
1257 pscreen->resource_create_with_modifiers =
1258 v3d_resource_create_with_modifiers;
1259 pscreen->resource_create = u_transfer_helper_resource_create;
1260 pscreen->resource_from_handle = v3d_resource_from_handle;
1261 pscreen->resource_get_handle = v3d_resource_get_handle;
1262 pscreen->resource_get_param = v3d_resource_get_param;
1263 pscreen->resource_destroy = u_transfer_helper_resource_destroy;
1264 pscreen->transfer_helper = u_transfer_helper_create(&transfer_vtbl,
1265 U_TRANSFER_HELPER_SEPARATE_Z32S8 |
1266 U_TRANSFER_HELPER_MSAA_MAP);
1267 }
1268
1269 void
v3d_resource_context_init(struct pipe_context * pctx)1270 v3d_resource_context_init(struct pipe_context *pctx)
1271 {
1272 pctx->buffer_map = u_transfer_helper_transfer_map;
1273 pctx->texture_map = u_transfer_helper_transfer_map;
1274 pctx->transfer_flush_region = u_transfer_helper_transfer_flush_region;
1275 pctx->buffer_unmap = u_transfer_helper_transfer_unmap;
1276 pctx->texture_unmap = u_transfer_helper_transfer_unmap;
1277 pctx->buffer_subdata = u_default_buffer_subdata;
1278 pctx->texture_subdata = v3d_texture_subdata;
1279 pctx->create_surface = v3d_create_surface;
1280 pctx->surface_destroy = v3d_surface_destroy;
1281 pctx->resource_copy_region = util_resource_copy_region;
1282 pctx->blit = v3d_blit;
1283 pctx->generate_mipmap = v3d_generate_mipmap;
1284 pctx->flush_resource = v3d_flush_resource;
1285 }
1286