1 /**************************************************************************
2 *
3 * Copyright 2009 Younes Manton.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <assert.h>
29
30 #include "pipe/p_compiler.h"
31 #include "pipe/p_context.h"
32
33 #include "util/u_memory.h"
34 #include "util/u_draw.h"
35 #include "util/u_surface.h"
36 #include "util/u_upload_mgr.h"
37
38 #include "tgsi/tgsi_ureg.h"
39
40 #include "vl_csc.h"
41 #include "vl_types.h"
42 #include "vl_compositor.h"
43
44 #define MIN_DIRTY (0)
45 #define MAX_DIRTY (1 << 15)
46
47 enum VS_OUTPUT
48 {
49 VS_O_VPOS = 0,
50 VS_O_COLOR = 0,
51 VS_O_VTEX = 0,
52 VS_O_VTOP,
53 VS_O_VBOTTOM,
54 };
55
56 static void *
create_vert_shader(struct vl_compositor * c)57 create_vert_shader(struct vl_compositor *c)
58 {
59 struct ureg_program *shader;
60 struct ureg_src vpos, vtex, color;
61 struct ureg_dst tmp;
62 struct ureg_dst o_vpos, o_vtex, o_color;
63 struct ureg_dst o_vtop, o_vbottom;
64
65 shader = ureg_create(PIPE_SHADER_VERTEX);
66 if (!shader)
67 return false;
68
69 vpos = ureg_DECL_vs_input(shader, 0);
70 vtex = ureg_DECL_vs_input(shader, 1);
71 color = ureg_DECL_vs_input(shader, 2);
72 tmp = ureg_DECL_temporary(shader);
73 o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
74 o_color = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, VS_O_COLOR);
75 o_vtex = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX);
76 o_vtop = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTOP);
77 o_vbottom = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_VBOTTOM);
78
79 /*
80 * o_vpos = vpos
81 * o_vtex = vtex
82 * o_color = color
83 */
84 ureg_MOV(shader, o_vpos, vpos);
85 ureg_MOV(shader, o_vtex, vtex);
86 ureg_MOV(shader, o_color, color);
87
88 /*
89 * tmp.x = vtex.w / 2
90 * tmp.y = vtex.w / 4
91 *
92 * o_vtop.x = vtex.x
93 * o_vtop.y = vtex.y * tmp.x + 0.25f
94 * o_vtop.z = vtex.y * tmp.y + 0.25f
95 * o_vtop.w = 1 / tmp.x
96 *
97 * o_vbottom.x = vtex.x
98 * o_vbottom.y = vtex.y * tmp.x - 0.25f
99 * o_vbottom.z = vtex.y * tmp.y - 0.25f
100 * o_vbottom.w = 1 / tmp.y
101 */
102 ureg_MUL(shader, ureg_writemask(tmp, TGSI_WRITEMASK_X),
103 ureg_scalar(vtex, TGSI_SWIZZLE_W), ureg_imm1f(shader, 0.5f));
104 ureg_MUL(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y),
105 ureg_scalar(vtex, TGSI_SWIZZLE_W), ureg_imm1f(shader, 0.25f));
106
107 ureg_MOV(shader, ureg_writemask(o_vtop, TGSI_WRITEMASK_X), vtex);
108 ureg_MAD(shader, ureg_writemask(o_vtop, TGSI_WRITEMASK_Y), ureg_scalar(vtex, TGSI_SWIZZLE_Y),
109 ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X), ureg_imm1f(shader, 0.25f));
110 ureg_MAD(shader, ureg_writemask(o_vtop, TGSI_WRITEMASK_Z), ureg_scalar(vtex, TGSI_SWIZZLE_Y),
111 ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y), ureg_imm1f(shader, 0.25f));
112 ureg_RCP(shader, ureg_writemask(o_vtop, TGSI_WRITEMASK_W),
113 ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X));
114
115 ureg_MOV(shader, ureg_writemask(o_vbottom, TGSI_WRITEMASK_X), vtex);
116 ureg_MAD(shader, ureg_writemask(o_vbottom, TGSI_WRITEMASK_Y), ureg_scalar(vtex, TGSI_SWIZZLE_Y),
117 ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X), ureg_imm1f(shader, -0.25f));
118 ureg_MAD(shader, ureg_writemask(o_vbottom, TGSI_WRITEMASK_Z), ureg_scalar(vtex, TGSI_SWIZZLE_Y),
119 ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y), ureg_imm1f(shader, -0.25f));
120 ureg_RCP(shader, ureg_writemask(o_vbottom, TGSI_WRITEMASK_W),
121 ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y));
122
123 ureg_END(shader);
124
125 return ureg_create_shader_and_destroy(shader, c->pipe);
126 }
127
128 static void
create_frag_shader_weave(struct ureg_program * shader,struct ureg_dst fragment)129 create_frag_shader_weave(struct ureg_program *shader, struct ureg_dst fragment)
130 {
131 struct ureg_src i_tc[2];
132 struct ureg_src sampler[3];
133 struct ureg_dst t_tc[2];
134 struct ureg_dst t_texel[2];
135 unsigned i, j;
136
137 i_tc[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTOP, TGSI_INTERPOLATE_LINEAR);
138 i_tc[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VBOTTOM, TGSI_INTERPOLATE_LINEAR);
139
140 for (i = 0; i < 3; ++i)
141 sampler[i] = ureg_DECL_sampler(shader, i);
142
143 for (i = 0; i < 2; ++i) {
144 t_tc[i] = ureg_DECL_temporary(shader);
145 t_texel[i] = ureg_DECL_temporary(shader);
146 }
147
148 /* calculate the texture offsets
149 * t_tc.x = i_tc.x
150 * t_tc.y = (round(i_tc.y - 0.5) + 0.5) / height * 2
151 */
152 for (i = 0; i < 2; ++i) {
153 ureg_MOV(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_X), i_tc[i]);
154 ureg_ADD(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_YZ),
155 i_tc[i], ureg_imm1f(shader, -0.5f));
156 ureg_ROUND(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_YZ), ureg_src(t_tc[i]));
157 ureg_MOV(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_W),
158 ureg_imm1f(shader, i ? 1.0f : 0.0f));
159 ureg_ADD(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_YZ),
160 ureg_src(t_tc[i]), ureg_imm1f(shader, 0.5f));
161 ureg_MUL(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_Y),
162 ureg_src(t_tc[i]), ureg_scalar(i_tc[0], TGSI_SWIZZLE_W));
163 ureg_MUL(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_Z),
164 ureg_src(t_tc[i]), ureg_scalar(i_tc[1], TGSI_SWIZZLE_W));
165 }
166
167 /* fetch the texels
168 * texel[0..1].x = tex(t_tc[0..1][0])
169 * texel[0..1].y = tex(t_tc[0..1][1])
170 * texel[0..1].z = tex(t_tc[0..1][2])
171 */
172 for (i = 0; i < 2; ++i)
173 for (j = 0; j < 3; ++j) {
174 struct ureg_src src = ureg_swizzle(ureg_src(t_tc[i]),
175 TGSI_SWIZZLE_X, j ? TGSI_SWIZZLE_Z : TGSI_SWIZZLE_Y, TGSI_SWIZZLE_W, TGSI_SWIZZLE_W);
176
177 ureg_TEX(shader, ureg_writemask(t_texel[i], TGSI_WRITEMASK_X << j),
178 TGSI_TEXTURE_2D_ARRAY, src, sampler[j]);
179 }
180
181 /* calculate linear interpolation factor
182 * factor = |round(i_tc.y) - i_tc.y| * 2
183 */
184 ureg_ROUND(shader, ureg_writemask(t_tc[0], TGSI_WRITEMASK_YZ), i_tc[0]);
185 ureg_ADD(shader, ureg_writemask(t_tc[0], TGSI_WRITEMASK_YZ),
186 ureg_src(t_tc[0]), ureg_negate(i_tc[0]));
187 ureg_MUL(shader, ureg_writemask(t_tc[0], TGSI_WRITEMASK_YZ),
188 ureg_abs(ureg_src(t_tc[0])), ureg_imm1f(shader, 2.0f));
189 ureg_LRP(shader, fragment, ureg_swizzle(ureg_src(t_tc[0]),
190 TGSI_SWIZZLE_Y, TGSI_SWIZZLE_Z, TGSI_SWIZZLE_Z, TGSI_SWIZZLE_Z),
191 ureg_src(t_texel[0]), ureg_src(t_texel[1]));
192
193 for (i = 0; i < 2; ++i) {
194 ureg_release_temporary(shader, t_texel[i]);
195 ureg_release_temporary(shader, t_tc[i]);
196 }
197 }
198
199 static void
create_frag_shader_csc(struct ureg_program * shader,struct ureg_dst texel,struct ureg_dst fragment)200 create_frag_shader_csc(struct ureg_program *shader, struct ureg_dst texel,
201 struct ureg_dst fragment)
202 {
203 struct ureg_src csc[3];
204 struct ureg_src lumakey;
205 struct ureg_dst temp[2];
206 unsigned i;
207
208 for (i = 0; i < 3; ++i)
209 csc[i] = ureg_DECL_constant(shader, i);
210
211 lumakey = ureg_DECL_constant(shader, 3);
212
213 for (i = 0; i < 2; ++i)
214 temp[i] = ureg_DECL_temporary(shader);
215
216 ureg_MOV(shader, ureg_writemask(texel, TGSI_WRITEMASK_W),
217 ureg_imm1f(shader, 1.0f));
218
219 for (i = 0; i < 3; ++i)
220 ureg_DP4(shader, ureg_writemask(fragment, TGSI_WRITEMASK_X << i), csc[i],
221 ureg_src(texel));
222
223 ureg_MOV(shader, ureg_writemask(temp[0], TGSI_WRITEMASK_W),
224 ureg_scalar(ureg_src(texel), TGSI_SWIZZLE_Z));
225 ureg_SLE(shader, ureg_writemask(temp[1],TGSI_WRITEMASK_W),
226 ureg_src(temp[0]), ureg_scalar(lumakey, TGSI_SWIZZLE_X));
227 ureg_SGT(shader, ureg_writemask(temp[0],TGSI_WRITEMASK_W),
228 ureg_src(temp[0]), ureg_scalar(lumakey, TGSI_SWIZZLE_Y));
229 ureg_MAX(shader, ureg_writemask(fragment, TGSI_WRITEMASK_W),
230 ureg_src(temp[0]), ureg_src(temp[1]));
231
232 for (i = 0; i < 2; ++i)
233 ureg_release_temporary(shader, temp[i]);
234 }
235
236 static void *
create_frag_shader_video_buffer(struct vl_compositor * c)237 create_frag_shader_video_buffer(struct vl_compositor *c)
238 {
239 struct ureg_program *shader;
240 struct ureg_src tc;
241 struct ureg_src sampler[3];
242 struct ureg_dst texel;
243 struct ureg_dst fragment;
244 unsigned i;
245
246 shader = ureg_create(PIPE_SHADER_FRAGMENT);
247 if (!shader)
248 return false;
249
250 tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX, TGSI_INTERPOLATE_LINEAR);
251 for (i = 0; i < 3; ++i)
252 sampler[i] = ureg_DECL_sampler(shader, i);
253
254 texel = ureg_DECL_temporary(shader);
255 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
256
257 /*
258 * texel.xyz = tex(tc, sampler[i])
259 * fragment = csc * texel
260 */
261 for (i = 0; i < 3; ++i)
262 ureg_TEX(shader, ureg_writemask(texel, TGSI_WRITEMASK_X << i), TGSI_TEXTURE_2D_ARRAY, tc, sampler[i]);
263
264 create_frag_shader_csc(shader, texel, fragment);
265
266 ureg_release_temporary(shader, texel);
267 ureg_END(shader);
268
269 return ureg_create_shader_and_destroy(shader, c->pipe);
270 }
271
272 static void *
create_frag_shader_weave_rgb(struct vl_compositor * c)273 create_frag_shader_weave_rgb(struct vl_compositor *c)
274 {
275 struct ureg_program *shader;
276 struct ureg_dst texel, fragment;
277
278 shader = ureg_create(PIPE_SHADER_FRAGMENT);
279 if (!shader)
280 return false;
281
282 texel = ureg_DECL_temporary(shader);
283 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
284
285 create_frag_shader_weave(shader, texel);
286 create_frag_shader_csc(shader, texel, fragment);
287
288 ureg_release_temporary(shader, texel);
289
290 ureg_END(shader);
291
292 return ureg_create_shader_and_destroy(shader, c->pipe);
293 }
294
295 static void *
create_frag_shader_weave_yuv(struct vl_compositor * c,bool y)296 create_frag_shader_weave_yuv(struct vl_compositor *c, bool y)
297 {
298 struct ureg_program *shader;
299 struct ureg_dst texel, fragment;
300
301 shader = ureg_create(PIPE_SHADER_FRAGMENT);
302 if (!shader)
303 return false;
304
305 texel = ureg_DECL_temporary(shader);
306 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
307
308 create_frag_shader_weave(shader, texel);
309
310 if (y)
311 ureg_MOV(shader, ureg_writemask(fragment, TGSI_WRITEMASK_X), ureg_src(texel));
312 else
313 ureg_MOV(shader, ureg_writemask(fragment, TGSI_WRITEMASK_XY),
314 ureg_swizzle(ureg_src(texel), TGSI_SWIZZLE_Y,
315 TGSI_SWIZZLE_Z, TGSI_SWIZZLE_W, TGSI_SWIZZLE_W));
316
317 ureg_release_temporary(shader, texel);
318
319 ureg_END(shader);
320
321 return ureg_create_shader_and_destroy(shader, c->pipe);
322 }
323
324 static void *
create_frag_shader_palette(struct vl_compositor * c,bool include_cc)325 create_frag_shader_palette(struct vl_compositor *c, bool include_cc)
326 {
327 struct ureg_program *shader;
328 struct ureg_src csc[3];
329 struct ureg_src tc;
330 struct ureg_src sampler;
331 struct ureg_src palette;
332 struct ureg_dst texel;
333 struct ureg_dst fragment;
334 unsigned i;
335
336 shader = ureg_create(PIPE_SHADER_FRAGMENT);
337 if (!shader)
338 return false;
339
340 for (i = 0; include_cc && i < 3; ++i)
341 csc[i] = ureg_DECL_constant(shader, i);
342
343 tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX, TGSI_INTERPOLATE_LINEAR);
344 sampler = ureg_DECL_sampler(shader, 0);
345 palette = ureg_DECL_sampler(shader, 1);
346
347 texel = ureg_DECL_temporary(shader);
348 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
349
350 /*
351 * texel = tex(tc, sampler)
352 * fragment.xyz = tex(texel, palette) * csc
353 * fragment.a = texel.a
354 */
355 ureg_TEX(shader, texel, TGSI_TEXTURE_2D, tc, sampler);
356 ureg_MOV(shader, ureg_writemask(fragment, TGSI_WRITEMASK_W), ureg_src(texel));
357
358 if (include_cc) {
359 ureg_TEX(shader, texel, TGSI_TEXTURE_1D, ureg_src(texel), palette);
360 for (i = 0; i < 3; ++i)
361 ureg_DP4(shader, ureg_writemask(fragment, TGSI_WRITEMASK_X << i), csc[i], ureg_src(texel));
362 } else {
363 ureg_TEX(shader, ureg_writemask(fragment, TGSI_WRITEMASK_XYZ),
364 TGSI_TEXTURE_1D, ureg_src(texel), palette);
365 }
366
367 ureg_release_temporary(shader, texel);
368 ureg_END(shader);
369
370 return ureg_create_shader_and_destroy(shader, c->pipe);
371 }
372
373 static void *
create_frag_shader_rgba(struct vl_compositor * c)374 create_frag_shader_rgba(struct vl_compositor *c)
375 {
376 struct ureg_program *shader;
377 struct ureg_src tc, color, sampler;
378 struct ureg_dst texel, fragment;
379
380 shader = ureg_create(PIPE_SHADER_FRAGMENT);
381 if (!shader)
382 return false;
383
384 tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX, TGSI_INTERPOLATE_LINEAR);
385 color = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_COLOR, VS_O_COLOR, TGSI_INTERPOLATE_LINEAR);
386 sampler = ureg_DECL_sampler(shader, 0);
387 texel = ureg_DECL_temporary(shader);
388 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
389
390 /*
391 * fragment = tex(tc, sampler)
392 */
393 ureg_TEX(shader, texel, TGSI_TEXTURE_2D, tc, sampler);
394 ureg_MUL(shader, fragment, ureg_src(texel), color);
395 ureg_END(shader);
396
397 return ureg_create_shader_and_destroy(shader, c->pipe);
398 }
399
400 static bool
init_shaders(struct vl_compositor * c)401 init_shaders(struct vl_compositor *c)
402 {
403 assert(c);
404
405 c->vs = create_vert_shader(c);
406 if (!c->vs) {
407 debug_printf("Unable to create vertex shader.\n");
408 return false;
409 }
410
411 c->fs_video_buffer = create_frag_shader_video_buffer(c);
412 if (!c->fs_video_buffer) {
413 debug_printf("Unable to create YCbCr-to-RGB fragment shader.\n");
414 return false;
415 }
416
417 c->fs_weave_rgb = create_frag_shader_weave_rgb(c);
418 if (!c->fs_weave_rgb) {
419 debug_printf("Unable to create YCbCr-to-RGB weave fragment shader.\n");
420 return false;
421 }
422
423 c->fs_weave_yuv.y = create_frag_shader_weave_yuv(c, true);
424 c->fs_weave_yuv.uv = create_frag_shader_weave_yuv(c, false);
425 if (!c->fs_weave_yuv.y || !c->fs_weave_yuv.uv) {
426 debug_printf("Unable to create YCbCr i-to-YCbCr p weave fragment shader.\n");
427 return false;
428 }
429
430 c->fs_palette.yuv = create_frag_shader_palette(c, true);
431 if (!c->fs_palette.yuv) {
432 debug_printf("Unable to create YUV-Palette-to-RGB fragment shader.\n");
433 return false;
434 }
435
436 c->fs_palette.rgb = create_frag_shader_palette(c, false);
437 if (!c->fs_palette.rgb) {
438 debug_printf("Unable to create RGB-Palette-to-RGB fragment shader.\n");
439 return false;
440 }
441
442 c->fs_rgba = create_frag_shader_rgba(c);
443 if (!c->fs_rgba) {
444 debug_printf("Unable to create RGB-to-RGB fragment shader.\n");
445 return false;
446 }
447
448 return true;
449 }
450
cleanup_shaders(struct vl_compositor * c)451 static void cleanup_shaders(struct vl_compositor *c)
452 {
453 assert(c);
454
455 c->pipe->delete_vs_state(c->pipe, c->vs);
456 c->pipe->delete_fs_state(c->pipe, c->fs_video_buffer);
457 c->pipe->delete_fs_state(c->pipe, c->fs_weave_rgb);
458 c->pipe->delete_fs_state(c->pipe, c->fs_weave_yuv.y);
459 c->pipe->delete_fs_state(c->pipe, c->fs_weave_yuv.uv);
460 c->pipe->delete_fs_state(c->pipe, c->fs_palette.yuv);
461 c->pipe->delete_fs_state(c->pipe, c->fs_palette.rgb);
462 c->pipe->delete_fs_state(c->pipe, c->fs_rgba);
463 }
464
465 static bool
init_pipe_state(struct vl_compositor * c)466 init_pipe_state(struct vl_compositor *c)
467 {
468 struct pipe_rasterizer_state rast;
469 struct pipe_sampler_state sampler;
470 struct pipe_blend_state blend;
471 struct pipe_depth_stencil_alpha_state dsa;
472 unsigned i;
473
474 assert(c);
475
476 c->fb_state.nr_cbufs = 1;
477 c->fb_state.zsbuf = NULL;
478
479 memset(&sampler, 0, sizeof(sampler));
480 sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
481 sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
482 sampler.wrap_r = PIPE_TEX_WRAP_REPEAT;
483 sampler.min_img_filter = PIPE_TEX_FILTER_LINEAR;
484 sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
485 sampler.mag_img_filter = PIPE_TEX_FILTER_LINEAR;
486 sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
487 sampler.compare_func = PIPE_FUNC_ALWAYS;
488 sampler.normalized_coords = 1;
489
490 c->sampler_linear = c->pipe->create_sampler_state(c->pipe, &sampler);
491
492 sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
493 sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
494 c->sampler_nearest = c->pipe->create_sampler_state(c->pipe, &sampler);
495
496 memset(&blend, 0, sizeof blend);
497 blend.independent_blend_enable = 0;
498 blend.rt[0].blend_enable = 0;
499 blend.logicop_enable = 0;
500 blend.logicop_func = PIPE_LOGICOP_CLEAR;
501 blend.rt[0].colormask = PIPE_MASK_RGBA;
502 blend.dither = 0;
503 c->blend_clear = c->pipe->create_blend_state(c->pipe, &blend);
504
505 blend.rt[0].blend_enable = 1;
506 blend.rt[0].rgb_func = PIPE_BLEND_ADD;
507 blend.rt[0].rgb_src_factor = PIPE_BLENDFACTOR_SRC_ALPHA;
508 blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_INV_SRC_ALPHA;
509 blend.rt[0].alpha_func = PIPE_BLEND_ADD;
510 blend.rt[0].alpha_src_factor = PIPE_BLENDFACTOR_ONE;
511 blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ONE;
512 c->blend_add = c->pipe->create_blend_state(c->pipe, &blend);
513
514 memset(&rast, 0, sizeof rast);
515 rast.flatshade = 0;
516 rast.front_ccw = 1;
517 rast.cull_face = PIPE_FACE_NONE;
518 rast.fill_back = PIPE_POLYGON_MODE_FILL;
519 rast.fill_front = PIPE_POLYGON_MODE_FILL;
520 rast.scissor = 1;
521 rast.line_width = 1;
522 rast.point_size_per_vertex = 1;
523 rast.offset_units = 1;
524 rast.offset_scale = 1;
525 rast.half_pixel_center = 1;
526 rast.bottom_edge_rule = 1;
527 rast.depth_clip = 1;
528
529 c->rast = c->pipe->create_rasterizer_state(c->pipe, &rast);
530
531 memset(&dsa, 0, sizeof dsa);
532 dsa.depth.enabled = 0;
533 dsa.depth.writemask = 0;
534 dsa.depth.func = PIPE_FUNC_ALWAYS;
535 for (i = 0; i < 2; ++i) {
536 dsa.stencil[i].enabled = 0;
537 dsa.stencil[i].func = PIPE_FUNC_ALWAYS;
538 dsa.stencil[i].fail_op = PIPE_STENCIL_OP_KEEP;
539 dsa.stencil[i].zpass_op = PIPE_STENCIL_OP_KEEP;
540 dsa.stencil[i].zfail_op = PIPE_STENCIL_OP_KEEP;
541 dsa.stencil[i].valuemask = 0;
542 dsa.stencil[i].writemask = 0;
543 }
544 dsa.alpha.enabled = 0;
545 dsa.alpha.func = PIPE_FUNC_ALWAYS;
546 dsa.alpha.ref_value = 0;
547 c->dsa = c->pipe->create_depth_stencil_alpha_state(c->pipe, &dsa);
548 c->pipe->bind_depth_stencil_alpha_state(c->pipe, c->dsa);
549
550 return true;
551 }
552
cleanup_pipe_state(struct vl_compositor * c)553 static void cleanup_pipe_state(struct vl_compositor *c)
554 {
555 assert(c);
556
557 /* Asserted in softpipe_delete_fs_state() for some reason */
558 c->pipe->bind_vs_state(c->pipe, NULL);
559 c->pipe->bind_fs_state(c->pipe, NULL);
560
561 c->pipe->delete_depth_stencil_alpha_state(c->pipe, c->dsa);
562 c->pipe->delete_sampler_state(c->pipe, c->sampler_linear);
563 c->pipe->delete_sampler_state(c->pipe, c->sampler_nearest);
564 c->pipe->delete_blend_state(c->pipe, c->blend_clear);
565 c->pipe->delete_blend_state(c->pipe, c->blend_add);
566 c->pipe->delete_rasterizer_state(c->pipe, c->rast);
567 }
568
569 static bool
init_buffers(struct vl_compositor * c)570 init_buffers(struct vl_compositor *c)
571 {
572 struct pipe_vertex_element vertex_elems[3];
573
574 assert(c);
575
576 /*
577 * Create our vertex buffer and vertex buffer elements
578 */
579 c->vertex_buf.stride = sizeof(struct vertex2f) + sizeof(struct vertex4f) * 2;
580 c->vertex_buf.buffer_offset = 0;
581 c->vertex_buf.buffer = NULL;
582
583 vertex_elems[0].src_offset = 0;
584 vertex_elems[0].instance_divisor = 0;
585 vertex_elems[0].vertex_buffer_index = 0;
586 vertex_elems[0].src_format = PIPE_FORMAT_R32G32_FLOAT;
587 vertex_elems[1].src_offset = sizeof(struct vertex2f);
588 vertex_elems[1].instance_divisor = 0;
589 vertex_elems[1].vertex_buffer_index = 0;
590 vertex_elems[1].src_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
591 vertex_elems[2].src_offset = sizeof(struct vertex2f) + sizeof(struct vertex4f);
592 vertex_elems[2].instance_divisor = 0;
593 vertex_elems[2].vertex_buffer_index = 0;
594 vertex_elems[2].src_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
595 c->vertex_elems_state = c->pipe->create_vertex_elements_state(c->pipe, 3, vertex_elems);
596
597 return true;
598 }
599
600 static void
cleanup_buffers(struct vl_compositor * c)601 cleanup_buffers(struct vl_compositor *c)
602 {
603 assert(c);
604
605 c->pipe->delete_vertex_elements_state(c->pipe, c->vertex_elems_state);
606 pipe_resource_reference(&c->vertex_buf.buffer, NULL);
607 }
608
609 static inline struct u_rect
default_rect(struct vl_compositor_layer * layer)610 default_rect(struct vl_compositor_layer *layer)
611 {
612 struct pipe_resource *res = layer->sampler_views[0]->texture;
613 struct u_rect rect = { 0, res->width0, 0, res->height0 * res->array_size };
614 return rect;
615 }
616
617 static inline struct vertex2f
calc_topleft(struct vertex2f size,struct u_rect rect)618 calc_topleft(struct vertex2f size, struct u_rect rect)
619 {
620 struct vertex2f res = { rect.x0 / size.x, rect.y0 / size.y };
621 return res;
622 }
623
624 static inline struct vertex2f
calc_bottomright(struct vertex2f size,struct u_rect rect)625 calc_bottomright(struct vertex2f size, struct u_rect rect)
626 {
627 struct vertex2f res = { rect.x1 / size.x, rect.y1 / size.y };
628 return res;
629 }
630
631 static inline void
calc_src_and_dst(struct vl_compositor_layer * layer,unsigned width,unsigned height,struct u_rect src,struct u_rect dst)632 calc_src_and_dst(struct vl_compositor_layer *layer, unsigned width, unsigned height,
633 struct u_rect src, struct u_rect dst)
634 {
635 struct vertex2f size = { width, height };
636
637 layer->src.tl = calc_topleft(size, src);
638 layer->src.br = calc_bottomright(size, src);
639 layer->dst.tl = calc_topleft(size, dst);
640 layer->dst.br = calc_bottomright(size, dst);
641 layer->zw.x = 0.0f;
642 layer->zw.y = size.y;
643 }
644
645 static void
gen_rect_verts(struct vertex2f * vb,struct vl_compositor_layer * layer)646 gen_rect_verts(struct vertex2f *vb, struct vl_compositor_layer *layer)
647 {
648 struct vertex2f tl, tr, br, bl;
649
650 assert(vb && layer);
651
652 switch (layer->rotate) {
653 default:
654 case VL_COMPOSITOR_ROTATE_0:
655 tl = layer->dst.tl;
656 tr.x = layer->dst.br.x;
657 tr.y = layer->dst.tl.y;
658 br = layer->dst.br;
659 bl.x = layer->dst.tl.x;
660 bl.y = layer->dst.br.y;
661 break;
662 case VL_COMPOSITOR_ROTATE_90:
663 tl.x = layer->dst.br.x;
664 tl.y = layer->dst.tl.y;
665 tr = layer->dst.br;
666 br.x = layer->dst.tl.x;
667 br.y = layer->dst.br.y;
668 bl = layer->dst.tl;
669 break;
670 case VL_COMPOSITOR_ROTATE_180:
671 tl = layer->dst.br;
672 tr.x = layer->dst.tl.x;
673 tr.y = layer->dst.br.y;
674 br = layer->dst.tl;
675 bl.x = layer->dst.br.x;
676 bl.y = layer->dst.tl.y;
677 break;
678 case VL_COMPOSITOR_ROTATE_270:
679 tl.x = layer->dst.tl.x;
680 tl.y = layer->dst.br.y;
681 tr = layer->dst.tl;
682 br.x = layer->dst.br.x;
683 br.y = layer->dst.tl.y;
684 bl = layer->dst.br;
685 break;
686 }
687
688 vb[ 0].x = tl.x;
689 vb[ 0].y = tl.y;
690 vb[ 1].x = layer->src.tl.x;
691 vb[ 1].y = layer->src.tl.y;
692 vb[ 2] = layer->zw;
693 vb[ 3].x = layer->colors[0].x;
694 vb[ 3].y = layer->colors[0].y;
695 vb[ 4].x = layer->colors[0].z;
696 vb[ 4].y = layer->colors[0].w;
697
698 vb[ 5].x = tr.x;
699 vb[ 5].y = tr.y;
700 vb[ 6].x = layer->src.br.x;
701 vb[ 6].y = layer->src.tl.y;
702 vb[ 7] = layer->zw;
703 vb[ 8].x = layer->colors[1].x;
704 vb[ 8].y = layer->colors[1].y;
705 vb[ 9].x = layer->colors[1].z;
706 vb[ 9].y = layer->colors[1].w;
707
708 vb[10].x = br.x;
709 vb[10].y = br.y;
710 vb[11].x = layer->src.br.x;
711 vb[11].y = layer->src.br.y;
712 vb[12] = layer->zw;
713 vb[13].x = layer->colors[2].x;
714 vb[13].y = layer->colors[2].y;
715 vb[14].x = layer->colors[2].z;
716 vb[14].y = layer->colors[2].w;
717
718 vb[15].x = bl.x;
719 vb[15].y = bl.y;
720 vb[16].x = layer->src.tl.x;
721 vb[16].y = layer->src.br.y;
722 vb[17] = layer->zw;
723 vb[18].x = layer->colors[3].x;
724 vb[18].y = layer->colors[3].y;
725 vb[19].x = layer->colors[3].z;
726 vb[19].y = layer->colors[3].w;
727 }
728
729 static inline struct u_rect
calc_drawn_area(struct vl_compositor_state * s,struct vl_compositor_layer * layer)730 calc_drawn_area(struct vl_compositor_state *s, struct vl_compositor_layer *layer)
731 {
732 struct vertex2f tl, br;
733 struct u_rect result;
734
735 assert(s && layer);
736
737 // rotate
738 switch (layer->rotate) {
739 default:
740 case VL_COMPOSITOR_ROTATE_0:
741 tl = layer->dst.tl;
742 br = layer->dst.br;
743 break;
744 case VL_COMPOSITOR_ROTATE_90:
745 tl.x = layer->dst.br.x;
746 tl.y = layer->dst.tl.y;
747 br.x = layer->dst.tl.x;
748 br.y = layer->dst.br.y;
749 break;
750 case VL_COMPOSITOR_ROTATE_180:
751 tl = layer->dst.br;
752 br = layer->dst.tl;
753 break;
754 case VL_COMPOSITOR_ROTATE_270:
755 tl.x = layer->dst.tl.x;
756 tl.y = layer->dst.br.y;
757 br.x = layer->dst.br.x;
758 br.y = layer->dst.tl.y;
759 break;
760 }
761
762 // scale
763 result.x0 = tl.x * layer->viewport.scale[0] + layer->viewport.translate[0];
764 result.y0 = tl.y * layer->viewport.scale[1] + layer->viewport.translate[1];
765 result.x1 = br.x * layer->viewport.scale[0] + layer->viewport.translate[0];
766 result.y1 = br.y * layer->viewport.scale[1] + layer->viewport.translate[1];
767
768 // and clip
769 result.x0 = MAX2(result.x0, s->scissor.minx);
770 result.y0 = MAX2(result.y0, s->scissor.miny);
771 result.x1 = MIN2(result.x1, s->scissor.maxx);
772 result.y1 = MIN2(result.y1, s->scissor.maxy);
773 return result;
774 }
775
776 static void
gen_vertex_data(struct vl_compositor * c,struct vl_compositor_state * s,struct u_rect * dirty)777 gen_vertex_data(struct vl_compositor *c, struct vl_compositor_state *s, struct u_rect *dirty)
778 {
779 struct vertex2f *vb;
780 unsigned i;
781
782 assert(c);
783
784 /* Allocate new memory for vertices. */
785 u_upload_alloc(c->upload, 0,
786 c->vertex_buf.stride * VL_COMPOSITOR_MAX_LAYERS * 4, /* size */
787 4, /* alignment */
788 &c->vertex_buf.buffer_offset, &c->vertex_buf.buffer,
789 (void**)&vb);
790
791 for (i = 0; i < VL_COMPOSITOR_MAX_LAYERS; i++) {
792 if (s->used_layers & (1 << i)) {
793 struct vl_compositor_layer *layer = &s->layers[i];
794 gen_rect_verts(vb, layer);
795 vb += 20;
796
797 if (!layer->viewport_valid) {
798 layer->viewport.scale[0] = c->fb_state.width;
799 layer->viewport.scale[1] = c->fb_state.height;
800 layer->viewport.translate[0] = 0;
801 layer->viewport.translate[1] = 0;
802 }
803
804 if (dirty && layer->clearing) {
805 struct u_rect drawn = calc_drawn_area(s, layer);
806 if (
807 dirty->x0 >= drawn.x0 &&
808 dirty->y0 >= drawn.y0 &&
809 dirty->x1 <= drawn.x1 &&
810 dirty->y1 <= drawn.y1) {
811
812 // We clear the dirty area anyway, no need for clear_render_target
813 dirty->x0 = dirty->y0 = MAX_DIRTY;
814 dirty->x1 = dirty->y1 = MIN_DIRTY;
815 }
816 }
817 }
818 }
819
820 u_upload_unmap(c->upload);
821 }
822
823 static void
draw_layers(struct vl_compositor * c,struct vl_compositor_state * s,struct u_rect * dirty)824 draw_layers(struct vl_compositor *c, struct vl_compositor_state *s, struct u_rect *dirty)
825 {
826 unsigned vb_index, i;
827
828 assert(c);
829
830 for (i = 0, vb_index = 0; i < VL_COMPOSITOR_MAX_LAYERS; ++i) {
831 if (s->used_layers & (1 << i)) {
832 struct vl_compositor_layer *layer = &s->layers[i];
833 struct pipe_sampler_view **samplers = &layer->sampler_views[0];
834 unsigned num_sampler_views = !samplers[1] ? 1 : !samplers[2] ? 2 : 3;
835 void *blend = layer->blend ? layer->blend : i ? c->blend_add : c->blend_clear;
836
837 c->pipe->bind_blend_state(c->pipe, blend);
838 c->pipe->set_viewport_states(c->pipe, 0, 1, &layer->viewport);
839 c->pipe->bind_fs_state(c->pipe, layer->fs);
840 c->pipe->bind_sampler_states(c->pipe, PIPE_SHADER_FRAGMENT, 0,
841 num_sampler_views, layer->samplers);
842 c->pipe->set_sampler_views(c->pipe, PIPE_SHADER_FRAGMENT, 0,
843 num_sampler_views, samplers);
844
845 util_draw_arrays(c->pipe, PIPE_PRIM_QUADS, vb_index * 4, 4);
846 vb_index++;
847
848 if (dirty) {
849 // Remember the currently drawn area as dirty for the next draw command
850 struct u_rect drawn = calc_drawn_area(s, layer);
851 dirty->x0 = MIN2(drawn.x0, dirty->x0);
852 dirty->y0 = MIN2(drawn.y0, dirty->y0);
853 dirty->x1 = MAX2(drawn.x1, dirty->x1);
854 dirty->y1 = MAX2(drawn.y1, dirty->y1);
855 }
856 }
857 }
858 }
859
860 void
vl_compositor_reset_dirty_area(struct u_rect * dirty)861 vl_compositor_reset_dirty_area(struct u_rect *dirty)
862 {
863 assert(dirty);
864
865 dirty->x0 = dirty->y0 = MIN_DIRTY;
866 dirty->x1 = dirty->y1 = MAX_DIRTY;
867 }
868
869 void
vl_compositor_set_clear_color(struct vl_compositor_state * s,union pipe_color_union * color)870 vl_compositor_set_clear_color(struct vl_compositor_state *s, union pipe_color_union *color)
871 {
872 assert(s);
873 assert(color);
874
875 s->clear_color = *color;
876 }
877
878 void
vl_compositor_get_clear_color(struct vl_compositor_state * s,union pipe_color_union * color)879 vl_compositor_get_clear_color(struct vl_compositor_state *s, union pipe_color_union *color)
880 {
881 assert(s);
882 assert(color);
883
884 *color = s->clear_color;
885 }
886
887 void
vl_compositor_clear_layers(struct vl_compositor_state * s)888 vl_compositor_clear_layers(struct vl_compositor_state *s)
889 {
890 unsigned i, j;
891
892 assert(s);
893
894 s->used_layers = 0;
895 for ( i = 0; i < VL_COMPOSITOR_MAX_LAYERS; ++i) {
896 struct vertex4f v_one = { 1.0f, 1.0f, 1.0f, 1.0f };
897 s->layers[i].clearing = i ? false : true;
898 s->layers[i].blend = NULL;
899 s->layers[i].fs = NULL;
900 s->layers[i].viewport.scale[2] = 1;
901 s->layers[i].viewport.translate[2] = 0;
902 s->layers[i].rotate = VL_COMPOSITOR_ROTATE_0;
903
904 for ( j = 0; j < 3; j++)
905 pipe_sampler_view_reference(&s->layers[i].sampler_views[j], NULL);
906 for ( j = 0; j < 4; ++j)
907 s->layers[i].colors[j] = v_one;
908 }
909 }
910
911 void
vl_compositor_cleanup(struct vl_compositor * c)912 vl_compositor_cleanup(struct vl_compositor *c)
913 {
914 assert(c);
915
916 u_upload_destroy(c->upload);
917 cleanup_buffers(c);
918 cleanup_shaders(c);
919 cleanup_pipe_state(c);
920 }
921
922 bool
vl_compositor_set_csc_matrix(struct vl_compositor_state * s,vl_csc_matrix const * matrix,float luma_min,float luma_max)923 vl_compositor_set_csc_matrix(struct vl_compositor_state *s,
924 vl_csc_matrix const *matrix,
925 float luma_min, float luma_max)
926 {
927 struct pipe_transfer *buf_transfer;
928
929 assert(s);
930
931 float *ptr = pipe_buffer_map(s->pipe, s->csc_matrix,
932 PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE,
933 &buf_transfer);
934
935 if (!ptr)
936 return false;
937
938 memcpy(ptr, matrix, sizeof(vl_csc_matrix));
939
940 ptr += sizeof(vl_csc_matrix)/sizeof(float);
941 ptr[0] = luma_min;
942 ptr[1] = luma_max;
943
944 pipe_buffer_unmap(s->pipe, buf_transfer);
945
946 return true;
947 }
948
949 void
vl_compositor_set_dst_clip(struct vl_compositor_state * s,struct u_rect * dst_clip)950 vl_compositor_set_dst_clip(struct vl_compositor_state *s, struct u_rect *dst_clip)
951 {
952 assert(s);
953
954 s->scissor_valid = dst_clip != NULL;
955 if (dst_clip) {
956 s->scissor.minx = dst_clip->x0;
957 s->scissor.miny = dst_clip->y0;
958 s->scissor.maxx = dst_clip->x1;
959 s->scissor.maxy = dst_clip->y1;
960 }
961 }
962
963 void
vl_compositor_set_layer_blend(struct vl_compositor_state * s,unsigned layer,void * blend,bool is_clearing)964 vl_compositor_set_layer_blend(struct vl_compositor_state *s,
965 unsigned layer, void *blend,
966 bool is_clearing)
967 {
968 assert(s && blend);
969
970 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
971
972 s->layers[layer].clearing = is_clearing;
973 s->layers[layer].blend = blend;
974 }
975
976 void
vl_compositor_set_layer_dst_area(struct vl_compositor_state * s,unsigned layer,struct u_rect * dst_area)977 vl_compositor_set_layer_dst_area(struct vl_compositor_state *s,
978 unsigned layer, struct u_rect *dst_area)
979 {
980 assert(s);
981
982 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
983
984 s->layers[layer].viewport_valid = dst_area != NULL;
985 if (dst_area) {
986 s->layers[layer].viewport.scale[0] = dst_area->x1 - dst_area->x0;
987 s->layers[layer].viewport.scale[1] = dst_area->y1 - dst_area->y0;
988 s->layers[layer].viewport.translate[0] = dst_area->x0;
989 s->layers[layer].viewport.translate[1] = dst_area->y0;
990 }
991 }
992
993 void
vl_compositor_set_buffer_layer(struct vl_compositor_state * s,struct vl_compositor * c,unsigned layer,struct pipe_video_buffer * buffer,struct u_rect * src_rect,struct u_rect * dst_rect,enum vl_compositor_deinterlace deinterlace)994 vl_compositor_set_buffer_layer(struct vl_compositor_state *s,
995 struct vl_compositor *c,
996 unsigned layer,
997 struct pipe_video_buffer *buffer,
998 struct u_rect *src_rect,
999 struct u_rect *dst_rect,
1000 enum vl_compositor_deinterlace deinterlace)
1001 {
1002 struct pipe_sampler_view **sampler_views;
1003 unsigned i;
1004
1005 assert(s && c && buffer);
1006
1007 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
1008
1009 s->used_layers |= 1 << layer;
1010 sampler_views = buffer->get_sampler_view_components(buffer);
1011 for (i = 0; i < 3; ++i) {
1012 s->layers[layer].samplers[i] = c->sampler_linear;
1013 pipe_sampler_view_reference(&s->layers[layer].sampler_views[i], sampler_views[i]);
1014 }
1015
1016 calc_src_and_dst(&s->layers[layer], buffer->width, buffer->height,
1017 src_rect ? *src_rect : default_rect(&s->layers[layer]),
1018 dst_rect ? *dst_rect : default_rect(&s->layers[layer]));
1019
1020 if (buffer->interlaced) {
1021 float half_a_line = 0.5f / s->layers[layer].zw.y;
1022 switch(deinterlace) {
1023 case VL_COMPOSITOR_WEAVE:
1024 s->layers[layer].fs = c->fs_weave_rgb;
1025 break;
1026
1027 case VL_COMPOSITOR_BOB_TOP:
1028 s->layers[layer].zw.x = 0.0f;
1029 s->layers[layer].src.tl.y += half_a_line;
1030 s->layers[layer].src.br.y += half_a_line;
1031 s->layers[layer].fs = c->fs_video_buffer;
1032 break;
1033
1034 case VL_COMPOSITOR_BOB_BOTTOM:
1035 s->layers[layer].zw.x = 1.0f;
1036 s->layers[layer].src.tl.y -= half_a_line;
1037 s->layers[layer].src.br.y -= half_a_line;
1038 s->layers[layer].fs = c->fs_video_buffer;
1039 break;
1040 }
1041
1042 } else
1043 s->layers[layer].fs = c->fs_video_buffer;
1044 }
1045
1046 void
vl_compositor_set_palette_layer(struct vl_compositor_state * s,struct vl_compositor * c,unsigned layer,struct pipe_sampler_view * indexes,struct pipe_sampler_view * palette,struct u_rect * src_rect,struct u_rect * dst_rect,bool include_color_conversion)1047 vl_compositor_set_palette_layer(struct vl_compositor_state *s,
1048 struct vl_compositor *c,
1049 unsigned layer,
1050 struct pipe_sampler_view *indexes,
1051 struct pipe_sampler_view *palette,
1052 struct u_rect *src_rect,
1053 struct u_rect *dst_rect,
1054 bool include_color_conversion)
1055 {
1056 assert(s && c && indexes && palette);
1057
1058 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
1059
1060 s->used_layers |= 1 << layer;
1061
1062 s->layers[layer].fs = include_color_conversion ?
1063 c->fs_palette.yuv : c->fs_palette.rgb;
1064
1065 s->layers[layer].samplers[0] = c->sampler_linear;
1066 s->layers[layer].samplers[1] = c->sampler_nearest;
1067 s->layers[layer].samplers[2] = NULL;
1068 pipe_sampler_view_reference(&s->layers[layer].sampler_views[0], indexes);
1069 pipe_sampler_view_reference(&s->layers[layer].sampler_views[1], palette);
1070 pipe_sampler_view_reference(&s->layers[layer].sampler_views[2], NULL);
1071 calc_src_and_dst(&s->layers[layer], indexes->texture->width0, indexes->texture->height0,
1072 src_rect ? *src_rect : default_rect(&s->layers[layer]),
1073 dst_rect ? *dst_rect : default_rect(&s->layers[layer]));
1074 }
1075
1076 void
vl_compositor_set_rgba_layer(struct vl_compositor_state * s,struct vl_compositor * c,unsigned layer,struct pipe_sampler_view * rgba,struct u_rect * src_rect,struct u_rect * dst_rect,struct vertex4f * colors)1077 vl_compositor_set_rgba_layer(struct vl_compositor_state *s,
1078 struct vl_compositor *c,
1079 unsigned layer,
1080 struct pipe_sampler_view *rgba,
1081 struct u_rect *src_rect,
1082 struct u_rect *dst_rect,
1083 struct vertex4f *colors)
1084 {
1085 unsigned i;
1086
1087 assert(s && c && rgba);
1088
1089 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
1090
1091 s->used_layers |= 1 << layer;
1092 s->layers[layer].fs = c->fs_rgba;
1093 s->layers[layer].samplers[0] = c->sampler_linear;
1094 s->layers[layer].samplers[1] = NULL;
1095 s->layers[layer].samplers[2] = NULL;
1096 pipe_sampler_view_reference(&s->layers[layer].sampler_views[0], rgba);
1097 pipe_sampler_view_reference(&s->layers[layer].sampler_views[1], NULL);
1098 pipe_sampler_view_reference(&s->layers[layer].sampler_views[2], NULL);
1099 calc_src_and_dst(&s->layers[layer], rgba->texture->width0, rgba->texture->height0,
1100 src_rect ? *src_rect : default_rect(&s->layers[layer]),
1101 dst_rect ? *dst_rect : default_rect(&s->layers[layer]));
1102
1103 if (colors)
1104 for (i = 0; i < 4; ++i)
1105 s->layers[layer].colors[i] = colors[i];
1106 }
1107
1108 void
vl_compositor_set_layer_rotation(struct vl_compositor_state * s,unsigned layer,enum vl_compositor_rotation rotate)1109 vl_compositor_set_layer_rotation(struct vl_compositor_state *s,
1110 unsigned layer,
1111 enum vl_compositor_rotation rotate)
1112 {
1113 assert(s);
1114 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
1115 s->layers[layer].rotate = rotate;
1116 }
1117
1118 void
vl_compositor_set_yuv_layer(struct vl_compositor_state * s,struct vl_compositor * c,unsigned layer,struct pipe_video_buffer * buffer,struct u_rect * src_rect,struct u_rect * dst_rect,bool y)1119 vl_compositor_set_yuv_layer(struct vl_compositor_state *s,
1120 struct vl_compositor *c,
1121 unsigned layer,
1122 struct pipe_video_buffer *buffer,
1123 struct u_rect *src_rect,
1124 struct u_rect *dst_rect,
1125 bool y)
1126 {
1127 struct pipe_sampler_view **sampler_views;
1128 unsigned i;
1129
1130 assert(s && c && buffer);
1131
1132 assert(layer < VL_COMPOSITOR_MAX_LAYERS);
1133
1134 s->used_layers |= 1 << layer;
1135 sampler_views = buffer->get_sampler_view_components(buffer);
1136 for (i = 0; i < 3; ++i) {
1137 s->layers[layer].samplers[i] = c->sampler_linear;
1138 pipe_sampler_view_reference(&s->layers[layer].sampler_views[i], sampler_views[i]);
1139 }
1140
1141 calc_src_and_dst(&s->layers[layer], buffer->width, buffer->height,
1142 src_rect ? *src_rect : default_rect(&s->layers[layer]),
1143 dst_rect ? *dst_rect : default_rect(&s->layers[layer]));
1144
1145 s->layers[layer].fs = (y) ? c->fs_weave_yuv.y : c->fs_weave_yuv.uv;
1146 }
1147
1148 void
vl_compositor_render(struct vl_compositor_state * s,struct vl_compositor * c,struct pipe_surface * dst_surface,struct u_rect * dirty_area,bool clear_dirty)1149 vl_compositor_render(struct vl_compositor_state *s,
1150 struct vl_compositor *c,
1151 struct pipe_surface *dst_surface,
1152 struct u_rect *dirty_area,
1153 bool clear_dirty)
1154 {
1155 assert(c);
1156 assert(dst_surface);
1157
1158 c->fb_state.width = dst_surface->width;
1159 c->fb_state.height = dst_surface->height;
1160 c->fb_state.cbufs[0] = dst_surface;
1161
1162 if (!s->scissor_valid) {
1163 s->scissor.minx = 0;
1164 s->scissor.miny = 0;
1165 s->scissor.maxx = dst_surface->width;
1166 s->scissor.maxy = dst_surface->height;
1167 }
1168 c->pipe->set_scissor_states(c->pipe, 0, 1, &s->scissor);
1169
1170 gen_vertex_data(c, s, dirty_area);
1171
1172 if (clear_dirty && dirty_area &&
1173 (dirty_area->x0 < dirty_area->x1 || dirty_area->y0 < dirty_area->y1)) {
1174
1175 c->pipe->clear_render_target(c->pipe, dst_surface, &s->clear_color,
1176 0, 0, dst_surface->width, dst_surface->height, false);
1177 dirty_area->x0 = dirty_area->y0 = MAX_DIRTY;
1178 dirty_area->x1 = dirty_area->y1 = MIN_DIRTY;
1179 }
1180
1181 c->pipe->set_framebuffer_state(c->pipe, &c->fb_state);
1182 c->pipe->bind_vs_state(c->pipe, c->vs);
1183 c->pipe->set_vertex_buffers(c->pipe, 0, 1, &c->vertex_buf);
1184 c->pipe->bind_vertex_elements_state(c->pipe, c->vertex_elems_state);
1185 pipe_set_constant_buffer(c->pipe, PIPE_SHADER_FRAGMENT, 0, s->csc_matrix);
1186 c->pipe->bind_rasterizer_state(c->pipe, c->rast);
1187
1188 draw_layers(c, s, dirty_area);
1189 }
1190
1191 bool
vl_compositor_init(struct vl_compositor * c,struct pipe_context * pipe)1192 vl_compositor_init(struct vl_compositor *c, struct pipe_context *pipe)
1193 {
1194 assert(c);
1195
1196 memset(c, 0, sizeof(*c));
1197
1198 c->pipe = pipe;
1199
1200 c->upload = u_upload_create(pipe, 128 * 1024, PIPE_BIND_VERTEX_BUFFER,
1201 PIPE_USAGE_STREAM);
1202
1203 if (!c->upload)
1204 return false;
1205
1206 if (!init_pipe_state(c)) {
1207 u_upload_destroy(c->upload);
1208 return false;
1209 }
1210
1211 if (!init_shaders(c)) {
1212 u_upload_destroy(c->upload);
1213 cleanup_pipe_state(c);
1214 return false;
1215 }
1216
1217 if (!init_buffers(c)) {
1218 u_upload_destroy(c->upload);
1219 cleanup_shaders(c);
1220 cleanup_pipe_state(c);
1221 return false;
1222 }
1223
1224 return true;
1225 }
1226
1227 bool
vl_compositor_init_state(struct vl_compositor_state * s,struct pipe_context * pipe)1228 vl_compositor_init_state(struct vl_compositor_state *s, struct pipe_context *pipe)
1229 {
1230 vl_csc_matrix csc_matrix;
1231
1232 assert(s);
1233
1234 memset(s, 0, sizeof(*s));
1235
1236 s->pipe = pipe;
1237
1238 s->clear_color.f[0] = s->clear_color.f[1] = 0.0f;
1239 s->clear_color.f[2] = s->clear_color.f[3] = 0.0f;
1240
1241 /*
1242 * Create our fragment shader's constant buffer
1243 * Const buffer contains the color conversion matrix and bias vectors
1244 */
1245 /* XXX: Create with IMMUTABLE/STATIC... although it does change every once in a long while... */
1246 s->csc_matrix = pipe_buffer_create
1247 (
1248 pipe->screen,
1249 PIPE_BIND_CONSTANT_BUFFER,
1250 PIPE_USAGE_DEFAULT,
1251 sizeof(csc_matrix) + 2*sizeof(float)
1252 );
1253
1254 if (!s->csc_matrix)
1255 return false;
1256
1257 vl_compositor_clear_layers(s);
1258
1259 vl_csc_get_matrix(VL_CSC_COLOR_STANDARD_IDENTITY, NULL, true, &csc_matrix);
1260 if (!vl_compositor_set_csc_matrix(s, (const vl_csc_matrix *)&csc_matrix, 1.0f, 0.0f))
1261 return false;
1262
1263 return true;
1264 }
1265
1266 void
vl_compositor_cleanup_state(struct vl_compositor_state * s)1267 vl_compositor_cleanup_state(struct vl_compositor_state *s)
1268 {
1269 assert(s);
1270
1271 vl_compositor_clear_layers(s);
1272 pipe_resource_reference(&s->csc_matrix, NULL);
1273 }
1274