1 /*
2 * Copyright © 2015 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir/pipe_nir.h"
25 #include "util/format/u_format.h"
26 #include "util/perf/cpu_trace.h"
27 #include "util/u_surface.h"
28 #include "util/u_blitter.h"
29 #include "compiler/nir/nir_builder.h"
30 #include "vc4_context.h"
31
32 static struct pipe_surface *
vc4_get_blit_surface(struct pipe_context * pctx,struct pipe_resource * prsc,unsigned level,unsigned layer)33 vc4_get_blit_surface(struct pipe_context *pctx,
34 struct pipe_resource *prsc, unsigned level,
35 unsigned layer)
36 {
37 struct pipe_surface tmpl;
38
39 memset(&tmpl, 0, sizeof(tmpl));
40 tmpl.format = prsc->format;
41 tmpl.u.tex.level = level;
42 tmpl.u.tex.first_layer = tmpl.u.tex.last_layer = layer;
43
44 return pctx->create_surface(pctx, prsc, &tmpl);
45 }
46
47 static bool
is_tile_unaligned(unsigned size,unsigned tile_size)48 is_tile_unaligned(unsigned size, unsigned tile_size)
49 {
50 return size & (tile_size - 1);
51 }
52
53 static void
vc4_tile_blit(struct pipe_context * pctx,struct pipe_blit_info * info)54 vc4_tile_blit(struct pipe_context *pctx, struct pipe_blit_info *info)
55 {
56 struct vc4_context *vc4 = vc4_context(pctx);
57 bool msaa = (info->src.resource->nr_samples > 1 ||
58 info->dst.resource->nr_samples > 1);
59 int tile_width = msaa ? 32 : 64;
60 int tile_height = msaa ? 32 : 64;
61
62 if (!info->mask)
63 return;
64
65 bool is_color_blit = info->mask & PIPE_MASK_RGBA;
66 bool is_depth_blit = info->mask & PIPE_MASK_Z;
67 bool is_stencil_blit = info->mask & PIPE_MASK_S;
68
69 /* Either we receive a depth/stencil blit, or color blit, but not both.
70 */
71 assert ((is_color_blit && !(is_depth_blit || is_stencil_blit)) ||
72 (!is_color_blit && (is_depth_blit || is_stencil_blit)));
73
74 if (info->scissor_enable || info->swizzle_enable)
75 return;
76
77 if (info->dst.box.x != info->src.box.x ||
78 info->dst.box.y != info->src.box.y ||
79 info->dst.box.width != info->src.box.width ||
80 info->dst.box.height != info->src.box.height ||
81 info->dst.box.depth != info->src.box.depth ||
82 info->dst.box.depth != 1) {
83 return;
84 }
85
86 if (is_color_blit &&
87 util_format_is_depth_or_stencil(info->dst.format))
88 return;
89
90 if ((is_depth_blit || is_stencil_blit) &&
91 !util_format_is_depth_or_stencil(info->dst.format))
92 return;
93
94 int dst_surface_width = u_minify(info->dst.resource->width0,
95 info->dst.level);
96 int dst_surface_height = u_minify(info->dst.resource->height0,
97 info->dst.level);
98 if (is_tile_unaligned(info->dst.box.x, tile_width) ||
99 is_tile_unaligned(info->dst.box.y, tile_height) ||
100 (is_tile_unaligned(info->dst.box.width, tile_width) &&
101 info->dst.box.x + info->dst.box.width != dst_surface_width) ||
102 (is_tile_unaligned(info->dst.box.height, tile_height) &&
103 info->dst.box.y + info->dst.box.height != dst_surface_height)) {
104 return;
105 }
106
107 /* VC4_PACKET_LOAD_TILE_BUFFER_GENERAL uses the
108 * VC4_PACKET_TILE_RENDERING_MODE_CONFIG's width (determined by our
109 * destination surface) to determine the stride. This may be wrong
110 * when reading from texture miplevels > 0, which are stored in
111 * POT-sized areas. For MSAA, the tile addresses are computed
112 * explicitly by the RCL, but still use the destination width to
113 * determine the stride (which could be fixed by explicitly supplying
114 * it in the ABI).
115 */
116 struct vc4_resource *rsc = vc4_resource(info->src.resource);
117
118 uint32_t stride;
119
120 if (info->src.resource->nr_samples > 1)
121 stride = align(dst_surface_width, 32) * 4 * rsc->cpp;
122 else if (rsc->slices[info->src.level].tiling == VC4_TILING_FORMAT_T)
123 stride = align(dst_surface_width * rsc->cpp, 128);
124 else
125 stride = align(dst_surface_width * rsc->cpp, 16);
126
127 if (stride != rsc->slices[info->src.level].stride)
128 return;
129
130 if (info->dst.resource->format != info->src.resource->format)
131 return;
132
133 if (false) {
134 fprintf(stderr, "RCL blit from %d,%d to %d,%d (%d,%d)\n",
135 info->src.box.x,
136 info->src.box.y,
137 info->dst.box.x,
138 info->dst.box.y,
139 info->dst.box.width,
140 info->dst.box.height);
141 }
142
143 struct pipe_surface *dst_surf =
144 vc4_get_blit_surface(pctx, info->dst.resource, info->dst.level,
145 info->dst.box.z);
146 struct pipe_surface *src_surf =
147 vc4_get_blit_surface(pctx, info->src.resource, info->src.level,
148 info->src.box.z);
149
150 vc4_flush_jobs_reading_resource(vc4, info->src.resource);
151
152 struct vc4_job *job;
153 if (is_color_blit) {
154 job = vc4_get_job(vc4, dst_surf, NULL);
155 pipe_surface_reference(&job->color_read, src_surf);
156 } else {
157 job = vc4_get_job(vc4, NULL, dst_surf);
158 pipe_surface_reference(&job->zs_read, src_surf);
159 }
160
161 job->draw_min_x = info->dst.box.x;
162 job->draw_min_y = info->dst.box.y;
163 job->draw_max_x = info->dst.box.x + info->dst.box.width;
164 job->draw_max_y = info->dst.box.y + info->dst.box.height;
165 job->draw_width = dst_surf->width;
166 job->draw_height = dst_surf->height;
167
168 job->tile_width = tile_width;
169 job->tile_height = tile_height;
170 job->msaa = msaa;
171 job->needs_flush = true;
172
173 if (is_color_blit) {
174 job->resolve |= PIPE_CLEAR_COLOR;
175 info->mask &= ~PIPE_MASK_RGBA;
176 }
177
178 if (is_depth_blit) {
179 job->resolve |= PIPE_CLEAR_DEPTH;
180 info->mask &= ~PIPE_MASK_Z;
181 }
182
183 if (is_stencil_blit) {
184 job->resolve |= PIPE_CLEAR_STENCIL;
185 info->mask &= ~PIPE_MASK_S;
186 }
187
188 vc4_job_submit(vc4, job);
189
190 pipe_surface_reference(&dst_surf, NULL);
191 pipe_surface_reference(&src_surf, NULL);
192 }
193
194 void
vc4_blitter_save(struct vc4_context * vc4)195 vc4_blitter_save(struct vc4_context *vc4)
196 {
197 util_blitter_save_fragment_constant_buffer_slot(vc4->blitter,
198 vc4->constbuf[PIPE_SHADER_FRAGMENT].cb);
199 util_blitter_save_vertex_buffers(vc4->blitter, vc4->vertexbuf.vb,
200 vc4->vertexbuf.count);
201 util_blitter_save_vertex_elements(vc4->blitter, vc4->vtx);
202 util_blitter_save_vertex_shader(vc4->blitter, vc4->prog.bind_vs);
203 util_blitter_save_rasterizer(vc4->blitter, vc4->rasterizer);
204 util_blitter_save_viewport(vc4->blitter, &vc4->viewport);
205 util_blitter_save_scissor(vc4->blitter, &vc4->scissor);
206 util_blitter_save_fragment_shader(vc4->blitter, vc4->prog.bind_fs);
207 util_blitter_save_blend(vc4->blitter, vc4->blend);
208 util_blitter_save_depth_stencil_alpha(vc4->blitter, vc4->zsa);
209 util_blitter_save_stencil_ref(vc4->blitter, &vc4->stencil_ref);
210 util_blitter_save_sample_mask(vc4->blitter, vc4->sample_mask, 0);
211 util_blitter_save_framebuffer(vc4->blitter, &vc4->framebuffer);
212 util_blitter_save_fragment_sampler_states(vc4->blitter,
213 vc4->fragtex.num_samplers,
214 (void **)vc4->fragtex.samplers);
215 util_blitter_save_fragment_sampler_views(vc4->blitter,
216 vc4->fragtex.num_textures, vc4->fragtex.textures);
217 }
218
vc4_get_yuv_vs(struct pipe_context * pctx)219 static void *vc4_get_yuv_vs(struct pipe_context *pctx)
220 {
221 struct vc4_context *vc4 = vc4_context(pctx);
222 struct pipe_screen *pscreen = pctx->screen;
223
224 if (vc4->yuv_linear_blit_vs)
225 return vc4->yuv_linear_blit_vs;
226
227 const struct nir_shader_compiler_options *options =
228 pscreen->get_compiler_options(pscreen,
229 PIPE_SHADER_IR_NIR,
230 PIPE_SHADER_VERTEX);
231
232 nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_VERTEX, options,
233 "linear_blit_vs");
234
235 const struct glsl_type *vec4 = glsl_vec4_type();
236 nir_variable *pos_in = nir_variable_create(b.shader, nir_var_shader_in,
237 vec4, "pos");
238
239 nir_variable *pos_out = nir_variable_create(b.shader, nir_var_shader_out,
240 vec4, "gl_Position");
241 pos_out->data.location = VARYING_SLOT_POS;
242
243 nir_store_var(&b, pos_out, nir_load_var(&b, pos_in), 0xf);
244
245 vc4->yuv_linear_blit_vs = pipe_shader_from_nir(pctx, b.shader);
246
247 return vc4->yuv_linear_blit_vs;
248 }
249
vc4_get_yuv_fs(struct pipe_context * pctx,int cpp)250 static void *vc4_get_yuv_fs(struct pipe_context *pctx, int cpp)
251 {
252 struct vc4_context *vc4 = vc4_context(pctx);
253 struct pipe_screen *pscreen = pctx->screen;
254 struct pipe_shader_state **cached_shader;
255 const char *name;
256
257 if (cpp == 1) {
258 cached_shader = &vc4->yuv_linear_blit_fs_8bit;
259 name = "linear_blit_8bit_fs";
260 } else {
261 cached_shader = &vc4->yuv_linear_blit_fs_16bit;
262 name = "linear_blit_16bit_fs";
263 }
264
265 if (*cached_shader)
266 return *cached_shader;
267
268 const struct nir_shader_compiler_options *options =
269 pscreen->get_compiler_options(pscreen,
270 PIPE_SHADER_IR_NIR,
271 PIPE_SHADER_FRAGMENT);
272
273 nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_FRAGMENT,
274 options, "%s", name);
275
276 const struct glsl_type *vec4 = glsl_vec4_type();
277 const struct glsl_type *glsl_int = glsl_int_type();
278
279 nir_variable *color_out = nir_variable_create(b.shader, nir_var_shader_out,
280 vec4, "f_color");
281 color_out->data.location = FRAG_RESULT_COLOR;
282
283 nir_variable *pos_in = nir_variable_create(b.shader, nir_var_shader_in,
284 vec4, "pos");
285 pos_in->data.location = VARYING_SLOT_POS;
286 nir_def *pos = nir_load_var(&b, pos_in);
287
288 nir_def *one = nir_imm_int(&b, 1);
289 nir_def *two = nir_imm_int(&b, 2);
290
291 nir_def *x = nir_f2i32(&b, nir_channel(&b, pos, 0));
292 nir_def *y = nir_f2i32(&b, nir_channel(&b, pos, 1));
293
294 nir_variable *stride_in = nir_variable_create(b.shader, nir_var_uniform,
295 glsl_int, "stride");
296 nir_def *stride = nir_load_var(&b, stride_in);
297
298 nir_def *x_offset;
299 nir_def *y_offset;
300 if (cpp == 1) {
301 nir_def *intra_utile_x_offset =
302 nir_ishl(&b, nir_iand(&b, x, one), two);
303 nir_def *inter_utile_x_offset =
304 nir_ishl(&b, nir_iand(&b, x, nir_imm_int(&b, ~3)), one);
305
306 x_offset = nir_iadd(&b,
307 intra_utile_x_offset,
308 inter_utile_x_offset);
309 y_offset = nir_imul(&b,
310 nir_iadd(&b,
311 nir_ishl(&b, y, one),
312 nir_ushr(&b, nir_iand(&b, x, two), one)),
313 stride);
314 } else {
315 x_offset = nir_ishl(&b, x, two);
316 y_offset = nir_imul(&b, y, stride);
317 }
318
319 nir_def *load =
320 nir_load_ubo(&b, 1, 32, one, nir_iadd(&b, x_offset, y_offset),
321 .align_mul = 4,
322 .align_offset = 0,
323 .range_base = 0,
324 .range = ~0);
325
326 nir_store_var(&b, color_out,
327 nir_unpack_unorm_4x8(&b, load),
328 0xf);
329
330 *cached_shader = pipe_shader_from_nir(pctx, b.shader);
331
332 return *cached_shader;
333 }
334
335 static void
vc4_yuv_blit(struct pipe_context * pctx,struct pipe_blit_info * info)336 vc4_yuv_blit(struct pipe_context *pctx, struct pipe_blit_info *info)
337 {
338 struct vc4_context *vc4 = vc4_context(pctx);
339 struct vc4_resource *src = vc4_resource(info->src.resource);
340 struct vc4_resource *dst = vc4_resource(info->dst.resource);
341 bool ok;
342
343 if (!(info->mask & PIPE_MASK_RGBA))
344 return;
345
346 if (info->swizzle_enable)
347 return;
348 if (src->tiled)
349 return;
350
351 if (src->base.format != PIPE_FORMAT_R8_UNORM &&
352 src->base.format != PIPE_FORMAT_R8G8_UNORM)
353 return;
354
355 /* YUV blits always turn raster-order to tiled */
356 assert(dst->base.format == src->base.format);
357 assert(dst->tiled);
358
359 /* Always 1:1 and at the origin */
360 assert(info->src.box.x == 0 && info->dst.box.x == 0);
361 assert(info->src.box.y == 0 && info->dst.box.y == 0);
362 assert(info->src.box.width == info->dst.box.width);
363 assert(info->src.box.height == info->dst.box.height);
364
365 if ((src->slices[info->src.level].offset & 3) ||
366 (src->slices[info->src.level].stride & 3)) {
367 perf_debug("YUV-blit src texture offset/stride misaligned: 0x%08x/%d\n",
368 src->slices[info->src.level].offset,
369 src->slices[info->src.level].stride);
370 goto fallback;
371 }
372
373 vc4_blitter_save(vc4);
374
375 /* Create a renderable surface mapping the T-tiled shadow buffer.
376 */
377 struct pipe_surface dst_tmpl;
378 util_blitter_default_dst_texture(&dst_tmpl, info->dst.resource,
379 info->dst.level, info->dst.box.z);
380 dst_tmpl.format = PIPE_FORMAT_RGBA8888_UNORM;
381 struct pipe_surface *dst_surf =
382 pctx->create_surface(pctx, info->dst.resource, &dst_tmpl);
383 if (!dst_surf) {
384 fprintf(stderr, "Failed to create YUV dst surface\n");
385 util_blitter_unset_running_flag(vc4->blitter);
386 return;
387 }
388 dst_surf->width = align(dst_surf->width, 8) / 2;
389 if (dst->cpp == 1)
390 dst_surf->height /= 2;
391
392 /* Set the constant buffer. */
393 uint32_t stride = src->slices[info->src.level].stride;
394 struct pipe_constant_buffer cb_uniforms = {
395 .user_buffer = &stride,
396 .buffer_size = sizeof(stride),
397 };
398 pctx->set_constant_buffer(pctx, PIPE_SHADER_FRAGMENT, 0, false, &cb_uniforms);
399 struct pipe_constant_buffer cb_src = {
400 .buffer = info->src.resource,
401 .buffer_offset = src->slices[info->src.level].offset,
402 .buffer_size = (src->bo->size -
403 src->slices[info->src.level].offset),
404 };
405 pctx->set_constant_buffer(pctx, PIPE_SHADER_FRAGMENT, 1, false, &cb_src);
406
407 /* Unbind the textures, to make sure we don't try to recurse into the
408 * shadow blit.
409 */
410 pctx->set_sampler_views(pctx, PIPE_SHADER_FRAGMENT, 0, 0, 0, false, NULL);
411 pctx->bind_sampler_states(pctx, PIPE_SHADER_FRAGMENT, 0, 0, NULL);
412
413 util_blitter_custom_shader(vc4->blitter, dst_surf,
414 vc4_get_yuv_vs(pctx),
415 vc4_get_yuv_fs(pctx, src->cpp));
416
417 util_blitter_restore_textures(vc4->blitter);
418 util_blitter_restore_constant_buffer_state(vc4->blitter);
419 /* Restore cb1 (util_blitter doesn't handle this one). */
420 struct pipe_constant_buffer cb_disabled = { 0 };
421 pctx->set_constant_buffer(pctx, PIPE_SHADER_FRAGMENT, 1, false, &cb_disabled);
422
423 pipe_surface_reference(&dst_surf, NULL);
424
425 info->mask &= ~PIPE_MASK_RGBA;
426
427 return;
428
429 fallback:
430 /* Do an immediate SW fallback, since the render blit path
431 * would just recurse.
432 */
433 ok = util_try_blit_via_copy_region(pctx, info, false);
434 assert(ok); (void)ok;
435
436 info->mask &= ~PIPE_MASK_RGBA;
437 }
438
439 static void
vc4_render_blit(struct pipe_context * ctx,struct pipe_blit_info * info)440 vc4_render_blit(struct pipe_context *ctx, struct pipe_blit_info *info)
441 {
442 struct vc4_context *vc4 = vc4_context(ctx);
443
444 if (!info->mask)
445 return;
446
447 if (!util_blitter_is_blit_supported(vc4->blitter, info)) {
448 fprintf(stderr, "blit unsupported %s -> %s\n",
449 util_format_short_name(info->src.resource->format),
450 util_format_short_name(info->dst.resource->format));
451 return;
452 }
453
454 /* Enable the scissor, so we get a minimal set of tiles rendered. */
455 if (!info->scissor_enable) {
456 info->scissor_enable = true;
457 info->scissor.minx = info->dst.box.x;
458 info->scissor.miny = info->dst.box.y;
459 info->scissor.maxx = info->dst.box.x + info->dst.box.width;
460 info->scissor.maxy = info->dst.box.y + info->dst.box.height;
461 }
462
463 vc4_blitter_save(vc4);
464 util_blitter_blit(vc4->blitter, info, NULL);
465
466 info->mask = 0;
467 }
468
469 /* Implement stencil and stencil/depth blit by reinterpreting stencil data as
470 * an RGBA8888 texture.
471 */
472 static void
vc4_stencil_blit(struct pipe_context * ctx,struct pipe_blit_info * info)473 vc4_stencil_blit(struct pipe_context *ctx, struct pipe_blit_info *info)
474 {
475 struct vc4_context *vc4 = vc4_context(ctx);
476 struct vc4_resource *src = vc4_resource(info->src.resource);
477 struct vc4_resource *dst = vc4_resource(info->dst.resource);
478 enum pipe_format src_format, dst_format;
479
480 if ((info->mask & PIPE_MASK_S) == 0)
481 return;
482
483 src_format = (info->mask & PIPE_MASK_ZS) ?
484 PIPE_FORMAT_RGBA8888_UINT :
485 PIPE_FORMAT_R8_UINT;
486
487 dst_format = (info->mask & PIPE_MASK_ZS) ?
488 PIPE_FORMAT_RGBA8888_UINT :
489 PIPE_FORMAT_R8_UINT;
490
491 /* Initialize the surface */
492 struct pipe_surface dst_tmpl = {
493 .u.tex = {
494 .level = info->dst.level,
495 .first_layer = info->dst.box.z,
496 .last_layer = info->dst.box.z,
497 },
498 .format = dst_format,
499 };
500 struct pipe_surface *dst_surf =
501 ctx->create_surface(ctx, &dst->base, &dst_tmpl);
502
503 /* Initialize the sampler view */
504 struct pipe_sampler_view src_tmpl = {
505 .target = (src->base.target == PIPE_TEXTURE_CUBE_ARRAY) ?
506 PIPE_TEXTURE_2D_ARRAY :
507 src->base.target,
508 .format = src_format,
509 .u.tex = {
510 .first_level = info->src.level,
511 .last_level = info->src.level,
512 .first_layer = 0,
513 .last_layer = (PIPE_TEXTURE_2D ?
514 u_minify(src->base.depth0,
515 info->src.level) - 1 :
516 src->base.array_size - 1),
517 },
518 .swizzle_r = PIPE_SWIZZLE_X,
519 .swizzle_g = PIPE_SWIZZLE_Y,
520 .swizzle_b = PIPE_SWIZZLE_Z,
521 .swizzle_a = PIPE_SWIZZLE_W,
522 };
523 struct pipe_sampler_view *src_view =
524 ctx->create_sampler_view(ctx, &src->base, &src_tmpl);
525
526 vc4_blitter_save(vc4);
527 util_blitter_blit_generic(vc4->blitter, dst_surf, &info->dst.box,
528 src_view, &info->src.box,
529 src->base.width0, src->base.height0,
530 (info->mask & PIPE_MASK_ZS) ?
531 PIPE_MASK_RGBA : PIPE_MASK_R,
532 PIPE_TEX_FILTER_NEAREST,
533 info->scissor_enable ? &info->scissor : NULL,
534 info->alpha_blend, false, 0, NULL);
535
536 pipe_surface_reference(&dst_surf, NULL);
537 pipe_sampler_view_reference(&src_view, NULL);
538
539 info->mask &= ~PIPE_MASK_ZS;
540 }
541
542 /* Optimal hardware path for blitting pixels.
543 * Scaling, format conversion, up- and downsampling (resolve) are allowed.
544 */
545 void
vc4_blit(struct pipe_context * pctx,const struct pipe_blit_info * blit_info)546 vc4_blit(struct pipe_context *pctx, const struct pipe_blit_info *blit_info)
547 {
548 struct pipe_blit_info info = *blit_info;
549
550 MESA_TRACE_FUNC();
551
552 vc4_yuv_blit(pctx, &info);
553
554 vc4_tile_blit(pctx, &info);
555
556 if (info.mask &&
557 util_try_blit_via_copy_region(pctx, &info, false))
558 return;
559
560 vc4_stencil_blit(pctx, &info);
561
562 vc4_render_blit(pctx, &info);
563
564 if (info.mask)
565 fprintf(stderr, "Unsupported blit\n");
566 }
567