1 /**************************************************************************
2 *
3 * Copyright 2009 Younes Manton.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "util/u_sampler.h"
29
30 #include "vl_compositor_gfx.h"
31 #include "vl_compositor_cs.h"
32
33 static bool
init_shaders(struct vl_compositor * c)34 init_shaders(struct vl_compositor *c)
35 {
36 assert(c);
37
38 if (c->shaders_initialized)
39 return true;
40
41 if (c->pipe_cs_composit_supported) {
42 if (!vl_compositor_cs_init_shaders(c))
43 return false;
44
45 } else if (c->pipe_gfx_supported) {
46 c->fs_video_buffer = create_frag_shader_video_buffer(c);
47 if (!c->fs_video_buffer) {
48 debug_printf("Unable to create YCbCr-to-RGB fragment shader.\n");
49 return false;
50 }
51
52 c->fs_weave_rgb = create_frag_shader_weave_rgb(c);
53 if (!c->fs_weave_rgb) {
54 debug_printf("Unable to create YCbCr-to-RGB weave fragment shader.\n");
55 return false;
56 }
57
58 c->fs_yuv.weave.y = create_frag_shader_deint_yuv(c, true, true);
59 c->fs_yuv.weave.uv = create_frag_shader_deint_yuv(c, false, true);
60 c->fs_yuv.bob.y = create_frag_shader_deint_yuv(c, true, false);
61 c->fs_yuv.bob.uv = create_frag_shader_deint_yuv(c, false, false);
62 if (!c->fs_yuv.weave.y || !c->fs_yuv.weave.uv ||
63 !c->fs_yuv.bob.y || !c->fs_yuv.bob.uv) {
64 debug_printf("Unable to create YCbCr i-to-YCbCr p deint fragment shader.\n");
65 return false;
66 }
67
68 c->fs_rgb_yuv.y = create_frag_shader_rgb_yuv(c, true);
69 c->fs_rgb_yuv.uv = create_frag_shader_rgb_yuv(c, false);
70 if (!c->fs_rgb_yuv.y || !c->fs_rgb_yuv.uv) {
71 debug_printf("Unable to create RGB-to-YUV fragment shader.\n");
72 return false;
73 }
74 }
75
76 if (c->pipe_gfx_supported) {
77 c->vs = create_vert_shader(c);
78 if (!c->vs) {
79 debug_printf("Unable to create vertex shader.\n");
80 return false;
81 }
82
83 c->fs_palette.yuv = create_frag_shader_palette(c, true);
84 if (!c->fs_palette.yuv) {
85 debug_printf("Unable to create YUV-Palette-to-RGB fragment shader.\n");
86 return false;
87 }
88
89 c->fs_palette.rgb = create_frag_shader_palette(c, false);
90 if (!c->fs_palette.rgb) {
91 debug_printf("Unable to create RGB-Palette-to-RGB fragment shader.\n");
92 return false;
93 }
94
95 c->fs_rgba = create_frag_shader_rgba(c);
96 if (!c->fs_rgba) {
97 debug_printf("Unable to create RGB-to-RGB fragment shader.\n");
98 return false;
99 }
100 }
101
102 c->shaders_initialized = true;
103
104 return true;
105 }
106
cleanup_shaders(struct vl_compositor * c)107 static void cleanup_shaders(struct vl_compositor *c)
108 {
109 assert(c);
110
111 if (!c->shaders_initialized)
112 return;
113
114 if (c->pipe_cs_composit_supported) {
115 vl_compositor_cs_cleanup_shaders(c);
116 } else if (c->pipe_gfx_supported) {
117 c->pipe->delete_fs_state(c->pipe, c->fs_video_buffer);
118 c->pipe->delete_fs_state(c->pipe, c->fs_weave_rgb);
119 c->pipe->delete_fs_state(c->pipe, c->fs_yuv.weave.y);
120 c->pipe->delete_fs_state(c->pipe, c->fs_yuv.weave.uv);
121 c->pipe->delete_fs_state(c->pipe, c->fs_yuv.bob.y);
122 c->pipe->delete_fs_state(c->pipe, c->fs_yuv.bob.uv);
123 c->pipe->delete_fs_state(c->pipe, c->fs_rgb_yuv.y);
124 c->pipe->delete_fs_state(c->pipe, c->fs_rgb_yuv.uv);
125 }
126
127 if (c->pipe_gfx_supported) {
128 c->pipe->delete_vs_state(c->pipe, c->vs);
129 c->pipe->delete_fs_state(c->pipe, c->fs_palette.yuv);
130 c->pipe->delete_fs_state(c->pipe, c->fs_palette.rgb);
131 c->pipe->delete_fs_state(c->pipe, c->fs_rgba);
132 }
133 }
134
135 static bool
init_pipe_state(struct vl_compositor * c)136 init_pipe_state(struct vl_compositor *c)
137 {
138 struct pipe_rasterizer_state rast;
139 struct pipe_sampler_state sampler;
140 struct pipe_blend_state blend;
141 struct pipe_depth_stencil_alpha_state dsa;
142 unsigned i;
143
144 assert(c);
145
146 c->fb_state.nr_cbufs = 1;
147 c->fb_state.zsbuf = NULL;
148
149 memset(&sampler, 0, sizeof(sampler));
150 sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
151 sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
152 sampler.wrap_r = PIPE_TEX_WRAP_REPEAT;
153 sampler.min_img_filter = PIPE_TEX_FILTER_LINEAR;
154 sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
155 sampler.mag_img_filter = PIPE_TEX_FILTER_LINEAR;
156 sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
157 sampler.compare_func = PIPE_FUNC_ALWAYS;
158 c->sampler_linear = c->pipe->create_sampler_state(c->pipe, &sampler);
159
160 sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
161 sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
162 c->sampler_nearest = c->pipe->create_sampler_state(c->pipe, &sampler);
163
164 if (c->pipe_gfx_supported) {
165 memset(&blend, 0, sizeof blend);
166 blend.independent_blend_enable = 0;
167 blend.rt[0].blend_enable = 0;
168 blend.logicop_enable = 0;
169 blend.logicop_func = PIPE_LOGICOP_CLEAR;
170 blend.rt[0].colormask = PIPE_MASK_RGBA;
171 blend.dither = 0;
172 c->blend_clear = c->pipe->create_blend_state(c->pipe, &blend);
173
174 blend.rt[0].blend_enable = 1;
175 blend.rt[0].rgb_func = PIPE_BLEND_ADD;
176 blend.rt[0].rgb_src_factor = PIPE_BLENDFACTOR_SRC_ALPHA;
177 blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_INV_SRC_ALPHA;
178 blend.rt[0].alpha_func = PIPE_BLEND_ADD;
179 blend.rt[0].alpha_src_factor = PIPE_BLENDFACTOR_ONE;
180 blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ONE;
181 c->blend_add = c->pipe->create_blend_state(c->pipe, &blend);
182
183 memset(&rast, 0, sizeof rast);
184 rast.flatshade = 0;
185 rast.front_ccw = 1;
186 rast.cull_face = PIPE_FACE_NONE;
187 rast.fill_back = PIPE_POLYGON_MODE_FILL;
188 rast.fill_front = PIPE_POLYGON_MODE_FILL;
189 rast.scissor = 1;
190 rast.line_width = 1;
191 rast.point_size_per_vertex = 1;
192 rast.offset_units = 1;
193 rast.offset_scale = 1;
194 rast.half_pixel_center = 1;
195 rast.bottom_edge_rule = 1;
196 rast.depth_clip_near = 1;
197 rast.depth_clip_far = 1;
198
199 c->rast = c->pipe->create_rasterizer_state(c->pipe, &rast);
200
201 memset(&dsa, 0, sizeof dsa);
202 dsa.depth_enabled = 0;
203 dsa.depth_writemask = 0;
204 dsa.depth_func = PIPE_FUNC_ALWAYS;
205 for (i = 0; i < 2; ++i) {
206 dsa.stencil[i].enabled = 0;
207 dsa.stencil[i].func = PIPE_FUNC_ALWAYS;
208 dsa.stencil[i].fail_op = PIPE_STENCIL_OP_KEEP;
209 dsa.stencil[i].zpass_op = PIPE_STENCIL_OP_KEEP;
210 dsa.stencil[i].zfail_op = PIPE_STENCIL_OP_KEEP;
211 dsa.stencil[i].valuemask = 0;
212 dsa.stencil[i].writemask = 0;
213 }
214 dsa.alpha_enabled = 0;
215 dsa.alpha_func = PIPE_FUNC_ALWAYS;
216 dsa.alpha_ref_value = 0;
217 c->dsa = c->pipe->create_depth_stencil_alpha_state(c->pipe, &dsa);
218 c->pipe->bind_depth_stencil_alpha_state(c->pipe, c->dsa);
219 }
220
221 return true;
222 }
223
cleanup_pipe_state(struct vl_compositor * c)224 static void cleanup_pipe_state(struct vl_compositor *c)
225 {
226 assert(c);
227
228 if (c->pipe_gfx_supported) {
229 /* Asserted in softpipe_delete_fs_state() for some reason */
230 c->pipe->bind_vs_state(c->pipe, NULL);
231 c->pipe->bind_fs_state(c->pipe, NULL);
232
233 c->pipe->delete_depth_stencil_alpha_state(c->pipe, c->dsa);
234 c->pipe->delete_blend_state(c->pipe, c->blend_clear);
235 c->pipe->delete_blend_state(c->pipe, c->blend_add);
236 c->pipe->delete_rasterizer_state(c->pipe, c->rast);
237 }
238 if (c->sampler_linear)
239 c->pipe->delete_sampler_state(c->pipe, c->sampler_linear);
240 if (c->sampler_nearest)
241 c->pipe->delete_sampler_state(c->pipe, c->sampler_nearest);
242 }
243
244 static bool
init_buffers(struct vl_compositor * c)245 init_buffers(struct vl_compositor *c)
246 {
247 struct pipe_vertex_element vertex_elems[3];
248 memset(vertex_elems, 0, sizeof(vertex_elems));
249
250 assert(c);
251
252 /*
253 * Create our vertex buffer and vertex buffer elements
254 */
255 c->vertex_buf.buffer_offset = 0;
256 c->vertex_buf.buffer.resource = NULL;
257 c->vertex_buf.is_user_buffer = false;
258
259 if (c->pipe_gfx_supported) {
260 vertex_elems[0].src_offset = 0;
261 vertex_elems[0].src_stride = VL_COMPOSITOR_VB_STRIDE;
262 vertex_elems[0].instance_divisor = 0;
263 vertex_elems[0].vertex_buffer_index = 0;
264 vertex_elems[0].src_format = PIPE_FORMAT_R32G32_FLOAT;
265 vertex_elems[1].src_offset = sizeof(struct vertex2f);
266 vertex_elems[1].src_stride = VL_COMPOSITOR_VB_STRIDE;
267 vertex_elems[1].instance_divisor = 0;
268 vertex_elems[1].vertex_buffer_index = 0;
269 vertex_elems[1].src_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
270 vertex_elems[2].src_offset = sizeof(struct vertex2f) + sizeof(struct vertex4f);
271 vertex_elems[2].src_stride = VL_COMPOSITOR_VB_STRIDE;
272 vertex_elems[2].instance_divisor = 0;
273 vertex_elems[2].vertex_buffer_index = 0;
274 vertex_elems[2].src_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
275 c->vertex_elems_state = c->pipe->create_vertex_elements_state(c->pipe, 3, vertex_elems);
276 }
277
278 return true;
279 }
280
281 static void
cleanup_buffers(struct vl_compositor * c)282 cleanup_buffers(struct vl_compositor *c)
283 {
284 assert(c);
285
286 if (c->pipe_gfx_supported) {
287 c->pipe->delete_vertex_elements_state(c->pipe, c->vertex_elems_state);
288 }
289 pipe_resource_reference(&c->vertex_buf.buffer.resource, NULL);
290 }
291
292 static inline struct u_rect
default_rect(struct vl_compositor_layer * layer)293 default_rect(struct vl_compositor_layer *layer)
294 {
295 struct pipe_resource *res = layer->sampler_views[0]->texture;
296 struct u_rect rect = { 0, res->width0, 0, res->height0 * res->array_size };
297 return rect;
298 }
299
300 static inline struct vertex2f
calc_topleft(struct vertex2f size,struct u_rect rect)301 calc_topleft(struct vertex2f size, struct u_rect rect)
302 {
303 struct vertex2f res = { rect.x0 / size.x, rect.y0 / size.y };
304 return res;
305 }
306
307 static inline struct vertex2f
calc_bottomright(struct vertex2f size,struct u_rect rect)308 calc_bottomright(struct vertex2f size, struct u_rect rect)
309 {
310 struct vertex2f res = { rect.x1 / size.x, rect.y1 / size.y };
311 return res;
312 }
313
314 static inline void
calc_src_and_dst(struct vl_compositor_layer * layer,unsigned width,unsigned height,struct u_rect src,struct u_rect dst)315 calc_src_and_dst(struct vl_compositor_layer *layer, unsigned width, unsigned height,
316 struct u_rect src, struct u_rect dst)
317 {
318 struct vertex2f size = { width, height };
319
320 layer->src.tl = calc_topleft(size, src);
321 layer->src.br = calc_bottomright(size, src);
322 layer->dst.tl = calc_topleft(size, dst);
323 layer->dst.br = calc_bottomright(size, dst);
324 layer->zw.x = 0.0f;
325 layer->zw.y = size.y;
326 }
327
328 static void
set_yuv_layer(struct vl_compositor_state * s,struct vl_compositor * c,unsigned layer,struct pipe_video_buffer * buffer,struct u_rect * src_rect,struct u_rect * dst_rect,enum vl_compositor_plane plane,enum vl_compositor_deinterlace deinterlace)329 set_yuv_layer(struct vl_compositor_state *s, struct vl_compositor *c,
330 unsigned layer, struct pipe_video_buffer *buffer,
331 struct u_rect *src_rect, struct u_rect *dst_rect,
332 enum vl_compositor_plane plane,
333 enum vl_compositor_deinterlace deinterlace)
334 {
335 struct pipe_sampler_view **sampler_views;
336 float half_a_line;
337 unsigned i;
338 bool y = plane == VL_COMPOSITOR_PLANE_Y;
339
340 assert(s && c && buffer);
341
342 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
343
344 if (!init_shaders(c))
345 return;
346
347 s->used_layers |= 1 << layer;
348 sampler_views = buffer->get_sampler_view_components(buffer);
349 for (i = 0; i < 3; ++i) {
350 s->layers[layer].samplers[i] = c->sampler_linear;
351 pipe_sampler_view_reference(&s->layers[layer].sampler_views[i], sampler_views[i]);
352 }
353
354 calc_src_and_dst(&s->layers[layer], buffer->width, buffer->height,
355 src_rect ? *src_rect : default_rect(&s->layers[layer]),
356 dst_rect ? *dst_rect : default_rect(&s->layers[layer]));
357
358 half_a_line = 0.5f / s->layers[layer].zw.y;
359
360 switch(deinterlace) {
361 case VL_COMPOSITOR_BOB_TOP:
362 s->layers[layer].zw.x = 0.0f;
363 s->layers[layer].src.tl.y += half_a_line;
364 s->layers[layer].src.br.y += half_a_line;
365 if (c->pipe_gfx_supported)
366 s->layers[layer].fs = (y) ? c->fs_yuv.bob.y : c->fs_yuv.bob.uv;
367 if (c->pipe_cs_composit_supported)
368 s->layers[layer].cs = (y) ? c->cs_yuv.progressive.y : c->cs_yuv.progressive.uv;
369 break;
370
371 case VL_COMPOSITOR_BOB_BOTTOM:
372 s->layers[layer].zw.x = 1.0f;
373 s->layers[layer].src.tl.y -= half_a_line;
374 s->layers[layer].src.br.y -= half_a_line;
375 if (c->pipe_gfx_supported)
376 s->layers[layer].fs = (y) ? c->fs_yuv.bob.y : c->fs_yuv.bob.uv;
377 if (c->pipe_cs_composit_supported)
378 s->layers[layer].cs = (y) ? c->cs_yuv.progressive.y : c->cs_yuv.progressive.uv;
379 break;
380
381 case VL_COMPOSITOR_NONE:
382 if (c->pipe_cs_composit_supported) {
383 if (plane == VL_COMPOSITOR_PLANE_Y)
384 s->layers[layer].cs = c->cs_yuv.progressive.y;
385 else if (plane == VL_COMPOSITOR_PLANE_U)
386 s->layers[layer].cs = c->cs_yuv.progressive.u;
387 else if (plane == VL_COMPOSITOR_PLANE_V)
388 s->layers[layer].cs = c->cs_yuv.progressive.v;
389 else if (plane == VL_COMPOSITOR_PLANE_UV)
390 s->layers[layer].cs = c->cs_yuv.progressive.uv;
391 break;
392 }
393 FALLTHROUGH;
394
395 default:
396 if (c->pipe_gfx_supported)
397 s->layers[layer].fs = (y) ? c->fs_yuv.weave.y : c->fs_yuv.weave.uv;
398 if (c->pipe_cs_composit_supported)
399 s->layers[layer].cs = (y) ? c->cs_yuv.weave.y : c->cs_yuv.weave.uv;
400 break;
401 }
402 }
403
404 static void
set_rgb_to_yuv_layer(struct vl_compositor_state * s,struct vl_compositor * c,unsigned layer,struct pipe_sampler_view * v,struct u_rect * src_rect,struct u_rect * dst_rect,enum vl_compositor_plane plane)405 set_rgb_to_yuv_layer(struct vl_compositor_state *s, struct vl_compositor *c,
406 unsigned layer, struct pipe_sampler_view *v,
407 struct u_rect *src_rect, struct u_rect *dst_rect,
408 enum vl_compositor_plane plane)
409 {
410 bool y = plane == VL_COMPOSITOR_PLANE_Y;
411
412 assert(s && c && v);
413 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
414
415 if (!init_shaders(c))
416 return;
417
418 s->used_layers |= 1 << layer;
419
420 if (c->pipe_cs_composit_supported) {
421 if (plane == VL_COMPOSITOR_PLANE_Y)
422 s->layers[layer].cs = c->cs_rgb_yuv.y;
423 else if (plane == VL_COMPOSITOR_PLANE_U)
424 s->layers[layer].cs = c->cs_rgb_yuv.u;
425 else if (plane == VL_COMPOSITOR_PLANE_V)
426 s->layers[layer].cs = c->cs_rgb_yuv.v;
427 else if (plane == VL_COMPOSITOR_PLANE_UV)
428 s->layers[layer].cs = c->cs_rgb_yuv.uv;
429 } else if (c->pipe_gfx_supported)
430 s->layers[layer].fs = y ? c->fs_rgb_yuv.y : c->fs_rgb_yuv.uv;
431
432 s->layers[layer].samplers[0] = c->sampler_linear;
433 s->layers[layer].samplers[1] = NULL;
434 s->layers[layer].samplers[2] = NULL;
435
436 pipe_sampler_view_reference(&s->layers[layer].sampler_views[0], v);
437 pipe_sampler_view_reference(&s->layers[layer].sampler_views[1], NULL);
438 pipe_sampler_view_reference(&s->layers[layer].sampler_views[2], NULL);
439
440 calc_src_and_dst(&s->layers[layer], v->texture->width0, v->texture->height0,
441 src_rect ? *src_rect : default_rect(&s->layers[layer]),
442 dst_rect ? *dst_rect : default_rect(&s->layers[layer]));
443 }
444
445 void
vl_compositor_reset_dirty_area(struct u_rect * dirty)446 vl_compositor_reset_dirty_area(struct u_rect *dirty)
447 {
448 assert(dirty);
449
450 dirty->x0 = dirty->y0 = VL_COMPOSITOR_MIN_DIRTY;
451 dirty->x1 = dirty->y1 = VL_COMPOSITOR_MAX_DIRTY;
452 }
453
454 void
vl_compositor_set_clear_color(struct vl_compositor_state * s,union pipe_color_union * color)455 vl_compositor_set_clear_color(struct vl_compositor_state *s, union pipe_color_union *color)
456 {
457 assert(s);
458 assert(color);
459
460 s->clear_color = *color;
461 }
462
463 void
vl_compositor_get_clear_color(struct vl_compositor_state * s,union pipe_color_union * color)464 vl_compositor_get_clear_color(struct vl_compositor_state *s, union pipe_color_union *color)
465 {
466 assert(s);
467 assert(color);
468
469 *color = s->clear_color;
470 }
471
472 void
vl_compositor_clear_layers(struct vl_compositor_state * s)473 vl_compositor_clear_layers(struct vl_compositor_state *s)
474 {
475 unsigned i, j;
476
477 assert(s);
478 s->used_layers = 0;
479 for ( i = 0; i < VL_COMPOSITOR_MAX_LAYERS; ++i) {
480 struct vertex4f v_one = { 1.0f, 1.0f, 1.0f, 1.0f };
481 s->layers[i].clearing = i ? false : true;
482 s->layers[i].blend = NULL;
483 s->layers[i].fs = NULL;
484 s->layers[i].cs = NULL;
485 s->layers[i].viewport.scale[2] = 1;
486 s->layers[i].viewport.translate[2] = 0;
487 s->layers[i].viewport.swizzle_x = PIPE_VIEWPORT_SWIZZLE_POSITIVE_X;
488 s->layers[i].viewport.swizzle_y = PIPE_VIEWPORT_SWIZZLE_POSITIVE_Y;
489 s->layers[i].viewport.swizzle_z = PIPE_VIEWPORT_SWIZZLE_POSITIVE_Z;
490 s->layers[i].viewport.swizzle_w = PIPE_VIEWPORT_SWIZZLE_POSITIVE_W;
491 s->layers[i].rotate = VL_COMPOSITOR_ROTATE_0;
492
493 for ( j = 0; j < 3; j++)
494 pipe_sampler_view_reference(&s->layers[i].sampler_views[j], NULL);
495 for ( j = 0; j < 4; ++j)
496 s->layers[i].colors[j] = v_one;
497 }
498 }
499
500 void
vl_compositor_cleanup(struct vl_compositor * c)501 vl_compositor_cleanup(struct vl_compositor *c)
502 {
503 assert(c);
504
505 cleanup_buffers(c);
506 cleanup_shaders(c);
507 cleanup_pipe_state(c);
508 }
509
510 bool
vl_compositor_set_csc_matrix(struct vl_compositor_state * s,vl_csc_matrix const * matrix,float luma_min,float luma_max)511 vl_compositor_set_csc_matrix(struct vl_compositor_state *s,
512 vl_csc_matrix const *matrix,
513 float luma_min, float luma_max)
514 {
515 assert(s);
516
517 memcpy(&s->csc_matrix, matrix, sizeof(vl_csc_matrix));
518 s->luma_min = luma_min;
519 s->luma_max = luma_max;
520
521 return true;
522 }
523
524 void
vl_compositor_set_dst_clip(struct vl_compositor_state * s,struct u_rect * dst_clip)525 vl_compositor_set_dst_clip(struct vl_compositor_state *s, struct u_rect *dst_clip)
526 {
527 assert(s);
528
529 s->scissor_valid = dst_clip != NULL;
530 if (dst_clip) {
531 s->scissor.minx = dst_clip->x0;
532 s->scissor.miny = dst_clip->y0;
533 s->scissor.maxx = dst_clip->x1;
534 s->scissor.maxy = dst_clip->y1;
535 }
536 }
537
538 void
vl_compositor_set_layer_blend(struct vl_compositor_state * s,unsigned layer,void * blend,bool is_clearing)539 vl_compositor_set_layer_blend(struct vl_compositor_state *s,
540 unsigned layer, void *blend,
541 bool is_clearing)
542 {
543 assert(s && blend);
544
545 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
546
547 s->layers[layer].clearing = is_clearing;
548 s->layers[layer].blend = blend;
549 }
550
551 void
vl_compositor_set_layer_dst_area(struct vl_compositor_state * s,unsigned layer,struct u_rect * dst_area)552 vl_compositor_set_layer_dst_area(struct vl_compositor_state *s,
553 unsigned layer, struct u_rect *dst_area)
554 {
555 assert(s);
556
557 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
558
559 s->layers[layer].viewport_valid = dst_area != NULL;
560 if (dst_area) {
561 s->layers[layer].viewport.scale[0] = dst_area->x1 - dst_area->x0;
562 s->layers[layer].viewport.scale[1] = dst_area->y1 - dst_area->y0;
563 s->layers[layer].viewport.translate[0] = dst_area->x0;
564 s->layers[layer].viewport.translate[1] = dst_area->y0;
565 }
566 }
567
568 void
vl_compositor_set_buffer_layer(struct vl_compositor_state * s,struct vl_compositor * c,unsigned layer,struct pipe_video_buffer * buffer,struct u_rect * src_rect,struct u_rect * dst_rect,enum vl_compositor_deinterlace deinterlace)569 vl_compositor_set_buffer_layer(struct vl_compositor_state *s,
570 struct vl_compositor *c,
571 unsigned layer,
572 struct pipe_video_buffer *buffer,
573 struct u_rect *src_rect,
574 struct u_rect *dst_rect,
575 enum vl_compositor_deinterlace deinterlace)
576 {
577 struct pipe_sampler_view **sampler_views;
578 unsigned i;
579
580 assert(s && c && buffer);
581
582 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
583
584 if (!init_shaders(c))
585 return;
586
587 s->used_layers |= 1 << layer;
588 sampler_views = buffer->get_sampler_view_components(buffer);
589 for (i = 0; i < 3; ++i) {
590 s->layers[layer].samplers[i] = c->sampler_linear;
591 pipe_sampler_view_reference(&s->layers[layer].sampler_views[i], sampler_views[i]);
592 }
593
594 calc_src_and_dst(&s->layers[layer], buffer->width, buffer->height,
595 src_rect ? *src_rect : default_rect(&s->layers[layer]),
596 dst_rect ? *dst_rect : default_rect(&s->layers[layer]));
597
598 if (buffer->interlaced) {
599 float half_a_line = 0.5f / s->layers[layer].zw.y;
600 switch(deinterlace) {
601 case VL_COMPOSITOR_NONE:
602 case VL_COMPOSITOR_MOTION_ADAPTIVE:
603 case VL_COMPOSITOR_WEAVE:
604 if (c->pipe_cs_composit_supported)
605 s->layers[layer].cs = c->cs_weave_rgb;
606 else if (c->pipe_gfx_supported)
607 s->layers[layer].fs = c->fs_weave_rgb;
608 break;
609
610 case VL_COMPOSITOR_BOB_TOP:
611 s->layers[layer].zw.x = 0.0f;
612 s->layers[layer].src.tl.y += half_a_line;
613 s->layers[layer].src.br.y += half_a_line;
614 if (c->pipe_cs_composit_supported)
615 s->layers[layer].cs = c->cs_video_buffer;
616 else if (c->pipe_gfx_supported)
617 s->layers[layer].fs = c->fs_video_buffer;
618 break;
619
620 case VL_COMPOSITOR_BOB_BOTTOM:
621 s->layers[layer].zw.x = 1.0f;
622 s->layers[layer].src.tl.y -= half_a_line;
623 s->layers[layer].src.br.y -= half_a_line;
624 if (c->pipe_cs_composit_supported)
625 s->layers[layer].cs = c->cs_video_buffer;
626 else if (c->pipe_gfx_supported)
627 s->layers[layer].fs = c->fs_video_buffer;
628 break;
629 }
630
631 } else {
632 if (c->pipe_cs_composit_supported)
633 s->layers[layer].cs = c->cs_video_buffer;
634 else if (c->pipe_gfx_supported)
635 s->layers[layer].fs = c->fs_video_buffer;
636 }
637 }
638
639 void
vl_compositor_set_palette_layer(struct vl_compositor_state * s,struct vl_compositor * c,unsigned layer,struct pipe_sampler_view * indexes,struct pipe_sampler_view * palette,struct u_rect * src_rect,struct u_rect * dst_rect,bool include_color_conversion)640 vl_compositor_set_palette_layer(struct vl_compositor_state *s,
641 struct vl_compositor *c,
642 unsigned layer,
643 struct pipe_sampler_view *indexes,
644 struct pipe_sampler_view *palette,
645 struct u_rect *src_rect,
646 struct u_rect *dst_rect,
647 bool include_color_conversion)
648 {
649 assert(s && c && indexes && palette);
650
651 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
652
653 if (!init_shaders(c))
654 return;
655
656 s->used_layers |= 1 << layer;
657
658 s->layers[layer].fs = include_color_conversion ?
659 c->fs_palette.yuv : c->fs_palette.rgb;
660
661 s->layers[layer].samplers[0] = c->sampler_linear;
662 s->layers[layer].samplers[1] = c->sampler_nearest;
663 s->layers[layer].samplers[2] = NULL;
664 pipe_sampler_view_reference(&s->layers[layer].sampler_views[0], indexes);
665 pipe_sampler_view_reference(&s->layers[layer].sampler_views[1], palette);
666 pipe_sampler_view_reference(&s->layers[layer].sampler_views[2], NULL);
667 calc_src_and_dst(&s->layers[layer], indexes->texture->width0, indexes->texture->height0,
668 src_rect ? *src_rect : default_rect(&s->layers[layer]),
669 dst_rect ? *dst_rect : default_rect(&s->layers[layer]));
670 }
671
672 void
vl_compositor_set_rgba_layer(struct vl_compositor_state * s,struct vl_compositor * c,unsigned layer,struct pipe_sampler_view * rgba,struct u_rect * src_rect,struct u_rect * dst_rect,struct vertex4f * colors)673 vl_compositor_set_rgba_layer(struct vl_compositor_state *s,
674 struct vl_compositor *c,
675 unsigned layer,
676 struct pipe_sampler_view *rgba,
677 struct u_rect *src_rect,
678 struct u_rect *dst_rect,
679 struct vertex4f *colors)
680 {
681 unsigned i;
682
683 assert(s && c && rgba);
684
685 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
686
687 if (!init_shaders(c))
688 return;
689
690 s->used_layers |= 1 << layer;
691 if (c->fs_rgba)
692 s->layers[layer].fs = c->fs_rgba;
693 else if (c->cs_rgba)
694 s->layers[layer].cs = c->cs_rgba;
695 s->layers[layer].samplers[0] = c->sampler_linear;
696 s->layers[layer].samplers[1] = NULL;
697 s->layers[layer].samplers[2] = NULL;
698 pipe_sampler_view_reference(&s->layers[layer].sampler_views[0], rgba);
699 pipe_sampler_view_reference(&s->layers[layer].sampler_views[1], NULL);
700 pipe_sampler_view_reference(&s->layers[layer].sampler_views[2], NULL);
701 calc_src_and_dst(&s->layers[layer], rgba->texture->width0, rgba->texture->height0,
702 src_rect ? *src_rect : default_rect(&s->layers[layer]),
703 dst_rect ? *dst_rect : default_rect(&s->layers[layer]));
704
705 if (colors)
706 for (i = 0; i < 4; ++i)
707 s->layers[layer].colors[i] = colors[i];
708 }
709
710 void
vl_compositor_set_layer_rotation(struct vl_compositor_state * s,unsigned layer,enum vl_compositor_rotation rotate)711 vl_compositor_set_layer_rotation(struct vl_compositor_state *s,
712 unsigned layer,
713 enum vl_compositor_rotation rotate)
714 {
715 assert(s);
716 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
717 s->layers[layer].rotate = rotate;
718 }
719
720 void
vl_compositor_yuv_deint_full(struct vl_compositor_state * s,struct vl_compositor * c,struct pipe_video_buffer * src,struct pipe_video_buffer * dst,struct u_rect * src_rect,struct u_rect * dst_rect,enum vl_compositor_deinterlace deinterlace)721 vl_compositor_yuv_deint_full(struct vl_compositor_state *s,
722 struct vl_compositor *c,
723 struct pipe_video_buffer *src,
724 struct pipe_video_buffer *dst,
725 struct u_rect *src_rect,
726 struct u_rect *dst_rect,
727 enum vl_compositor_deinterlace deinterlace)
728 {
729 struct pipe_surface **dst_surfaces;
730
731 dst_surfaces = dst->get_surfaces(dst);
732 vl_compositor_clear_layers(s);
733
734 set_yuv_layer(s, c, 0, src, src_rect, NULL, VL_COMPOSITOR_PLANE_Y, deinterlace);
735 vl_compositor_set_layer_dst_area(s, 0, dst_rect);
736 vl_compositor_render(s, c, dst_surfaces[0], NULL, false);
737
738 if (dst_surfaces[1]) {
739 dst_rect->x0 = util_format_get_plane_width(dst->buffer_format, 1, dst_rect->x0);
740 dst_rect->x1 = util_format_get_plane_width(dst->buffer_format, 1, dst_rect->x1);
741 dst_rect->y0 = util_format_get_plane_height(dst->buffer_format, 1, dst_rect->y0);
742 dst_rect->y1 = util_format_get_plane_height(dst->buffer_format, 1, dst_rect->y1);
743 set_yuv_layer(s, c, 0, src, src_rect, NULL, dst_surfaces[2] ? VL_COMPOSITOR_PLANE_U :
744 VL_COMPOSITOR_PLANE_UV, deinterlace);
745 vl_compositor_set_layer_dst_area(s, 0, dst_rect);
746 vl_compositor_render(s, c, dst_surfaces[1], NULL, false);
747
748 if (dst_surfaces[2]) {
749 set_yuv_layer(s, c, 0, src, src_rect, NULL, VL_COMPOSITOR_PLANE_V, deinterlace);
750 vl_compositor_set_layer_dst_area(s, 0, dst_rect);
751 vl_compositor_render(s, c, dst_surfaces[2], NULL, false);
752 }
753 }
754
755 s->pipe->flush(s->pipe, NULL, 0);
756 }
757
758 void
vl_compositor_convert_rgb_to_yuv(struct vl_compositor_state * s,struct vl_compositor * c,unsigned layer,struct pipe_resource * src_res,struct pipe_video_buffer * dst,struct u_rect * src_rect,struct u_rect * dst_rect)759 vl_compositor_convert_rgb_to_yuv(struct vl_compositor_state *s,
760 struct vl_compositor *c,
761 unsigned layer,
762 struct pipe_resource *src_res,
763 struct pipe_video_buffer *dst,
764 struct u_rect *src_rect,
765 struct u_rect *dst_rect)
766 {
767 struct pipe_sampler_view *sv, sv_templ;
768 struct pipe_surface **dst_surfaces;
769
770 dst_surfaces = dst->get_surfaces(dst);
771
772 memset(&sv_templ, 0, sizeof(sv_templ));
773 u_sampler_view_default_template(&sv_templ, src_res, src_res->format);
774 sv = s->pipe->create_sampler_view(s->pipe, src_res, &sv_templ);
775
776 vl_compositor_clear_layers(s);
777
778 set_rgb_to_yuv_layer(s, c, 0, sv, src_rect, NULL, VL_COMPOSITOR_PLANE_Y);
779 vl_compositor_set_layer_dst_area(s, 0, dst_rect);
780 vl_compositor_render(s, c, dst_surfaces[0], NULL, false);
781
782 if (dst_surfaces[1]) {
783 dst_rect->x0 = util_format_get_plane_width(dst->buffer_format, 1, dst_rect->x0);
784 dst_rect->x1 = util_format_get_plane_width(dst->buffer_format, 1, dst_rect->x1);
785 dst_rect->y0 = util_format_get_plane_height(dst->buffer_format, 1, dst_rect->y0);
786 dst_rect->y1 = util_format_get_plane_height(dst->buffer_format, 1, dst_rect->y1);
787 set_rgb_to_yuv_layer(s, c, 0, sv, src_rect, NULL, dst_surfaces[2] ? VL_COMPOSITOR_PLANE_U :
788 VL_COMPOSITOR_PLANE_UV);
789 vl_compositor_set_layer_dst_area(s, 0, dst_rect);
790 vl_compositor_render(s, c, dst_surfaces[1], NULL, false);
791
792 if (dst_surfaces[2]) {
793 set_rgb_to_yuv_layer(s, c, 0, sv, src_rect, NULL, VL_COMPOSITOR_PLANE_V);
794 vl_compositor_set_layer_dst_area(s, 0, dst_rect);
795 vl_compositor_render(s, c, dst_surfaces[2], NULL, false);
796 }
797 }
798
799 pipe_sampler_view_reference(&sv, NULL);
800
801 s->pipe->flush(s->pipe, NULL, 0);
802 }
803
804 void
vl_compositor_render(struct vl_compositor_state * s,struct vl_compositor * c,struct pipe_surface * dst_surface,struct u_rect * dirty_area,bool clear_dirty)805 vl_compositor_render(struct vl_compositor_state *s,
806 struct vl_compositor *c,
807 struct pipe_surface *dst_surface,
808 struct u_rect *dirty_area,
809 bool clear_dirty)
810 {
811 assert(s);
812
813 if (s->layers->cs)
814 vl_compositor_cs_render(s, c, dst_surface, dirty_area, clear_dirty);
815 else if (s->layers->fs)
816 vl_compositor_gfx_render(s, c, dst_surface, dirty_area, clear_dirty);
817 else
818 debug_warning("Hardware don't support.\n");;
819 }
820
821 bool
vl_compositor_init(struct vl_compositor * c,struct pipe_context * pipe,bool compute_only)822 vl_compositor_init(struct vl_compositor *c, struct pipe_context *pipe, bool compute_only)
823 {
824 assert(c);
825
826 memset(c, 0, sizeof(*c));
827
828 c->pipe_cs_composit_supported = compute_only || pipe->screen->caps.prefer_compute_for_multimedia;
829 c->pipe_gfx_supported = !compute_only && pipe->screen->caps.graphics;
830 c->pipe = pipe;
831
832 c->deinterlace = VL_COMPOSITOR_NONE;
833
834 if (!init_pipe_state(c)) {
835 return false;
836 }
837
838 if (!init_buffers(c)) {
839 cleanup_shaders(c);
840 cleanup_pipe_state(c);
841 return false;
842 }
843
844 return true;
845 }
846
847 bool
vl_compositor_init_state(struct vl_compositor_state * s,struct pipe_context * pipe)848 vl_compositor_init_state(struct vl_compositor_state *s, struct pipe_context *pipe)
849 {
850 vl_csc_matrix csc_matrix;
851
852 assert(s);
853
854 memset(s, 0, sizeof(*s));
855
856 s->pipe = pipe;
857
858 s->clear_color.f[0] = s->clear_color.f[1] = 0.0f;
859 s->clear_color.f[2] = s->clear_color.f[3] = 0.0f;
860
861 /*
862 * Create our fragment shader's constant buffer
863 * Const buffer contains the color conversion matrix and bias vectors
864 */
865 /* XXX: Create with IMMUTABLE/STATIC... although it does change every once in a long while... */
866 s->shader_params = pipe_buffer_create_const0
867 (
868 pipe->screen,
869 PIPE_BIND_CONSTANT_BUFFER,
870 PIPE_USAGE_DEFAULT,
871 sizeof(csc_matrix) + 16*sizeof(float) + 2*sizeof(int)
872 );
873
874 if (!s->shader_params)
875 return false;
876
877 vl_compositor_clear_layers(s);
878
879 vl_csc_get_matrix(VL_CSC_COLOR_STANDARD_IDENTITY, NULL, true, &csc_matrix);
880 if (!vl_compositor_set_csc_matrix(s, (const vl_csc_matrix *)&csc_matrix, 1.0f, 0.0f))
881 return false;
882
883 return true;
884 }
885
886 void
vl_compositor_cleanup_state(struct vl_compositor_state * s)887 vl_compositor_cleanup_state(struct vl_compositor_state *s)
888 {
889 assert(s);
890
891 vl_compositor_clear_layers(s);
892 pipe_resource_reference(&s->shader_params, NULL);
893 }
894