• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**********************************************************
2  * Copyright 2009-2011 VMware, Inc. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person
5  * obtaining a copy of this software and associated documentation
6  * files (the "Software"), to deal in the Software without
7  * restriction, including without limitation the rights to use, copy,
8  * modify, merge, publish, distribute, sublicense, and/or sell copies
9  * of the Software, and to permit persons to whom the Software is
10  * furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  *
24  *********************************************************
25  * Authors:
26  * Zack Rusin <zackr-at-vmware-dot-com>
27  * Thomas Hellstrom <thellstrom-at-vmware-dot-com>
28  */
29 
30 #include "xa_composite.h"
31 #include "xa_context.h"
32 #include "xa_priv.h"
33 #include "cso_cache/cso_context.h"
34 #include "util/u_sampler.h"
35 #include "util/u_inlines.h"
36 
37 
38 /*XXX also in Xrender.h but the including it here breaks compilition */
39 #define XFixedToDouble(f)    (((double) (f)) / 65536.)
40 
41 struct xa_composite_blend {
42     unsigned op : 8;
43 
44     unsigned alpha_dst : 4;
45     unsigned alpha_src : 4;
46 
47     unsigned rgb_src : 8;    /**< PIPE_BLENDFACTOR_x */
48     unsigned rgb_dst : 8;    /**< PIPE_BLENDFACTOR_x */
49 };
50 
51 #define XA_BLEND_OP_OVER 3
52 static const struct xa_composite_blend xa_blends[] = {
53     { xa_op_clear,
54       0, 0, PIPE_BLENDFACTOR_ZERO, PIPE_BLENDFACTOR_ZERO},
55     { xa_op_src,
56       0, 0, PIPE_BLENDFACTOR_ONE, PIPE_BLENDFACTOR_ZERO},
57     { xa_op_dst,
58       0, 0, PIPE_BLENDFACTOR_ZERO, PIPE_BLENDFACTOR_ONE},
59     { xa_op_over,
60       0, 1, PIPE_BLENDFACTOR_ONE, PIPE_BLENDFACTOR_INV_SRC_ALPHA},
61     { xa_op_over_reverse,
62       1, 0, PIPE_BLENDFACTOR_INV_DST_ALPHA, PIPE_BLENDFACTOR_ONE},
63     { xa_op_in,
64       1, 0, PIPE_BLENDFACTOR_DST_ALPHA, PIPE_BLENDFACTOR_ZERO},
65     { xa_op_in_reverse,
66       0, 1, PIPE_BLENDFACTOR_ZERO, PIPE_BLENDFACTOR_SRC_ALPHA},
67     { xa_op_out,
68       1, 0, PIPE_BLENDFACTOR_INV_DST_ALPHA, PIPE_BLENDFACTOR_ZERO},
69     { xa_op_out_reverse,
70       0, 1, PIPE_BLENDFACTOR_ZERO, PIPE_BLENDFACTOR_INV_SRC_ALPHA},
71     { xa_op_atop,
72       1, 1, PIPE_BLENDFACTOR_DST_ALPHA, PIPE_BLENDFACTOR_INV_SRC_ALPHA},
73     { xa_op_atop_reverse,
74       1, 1, PIPE_BLENDFACTOR_INV_DST_ALPHA, PIPE_BLENDFACTOR_SRC_ALPHA},
75     { xa_op_xor,
76       1, 1, PIPE_BLENDFACTOR_INV_DST_ALPHA, PIPE_BLENDFACTOR_INV_SRC_ALPHA},
77     { xa_op_add,
78       0, 0, PIPE_BLENDFACTOR_ONE, PIPE_BLENDFACTOR_ONE},
79 };
80 
81 /*
82  * The alpha value stored in a L8 texture is read by the
83  * hardware as color, and R8 is read as red. The source alpha value
84  * at the end of the fragment shader is stored in all color channels,
85  * so the correct approach is to blend using DST_COLOR instead of
86  * DST_ALPHA and then output any color channel (L8) or the red channel (R8).
87  */
88 static unsigned
xa_convert_blend_for_luminance(unsigned factor)89 xa_convert_blend_for_luminance(unsigned factor)
90 {
91     switch(factor) {
92     case PIPE_BLENDFACTOR_DST_ALPHA:
93 	return PIPE_BLENDFACTOR_DST_COLOR;
94     case PIPE_BLENDFACTOR_INV_DST_ALPHA:
95 	return PIPE_BLENDFACTOR_INV_DST_COLOR;
96     default:
97 	break;
98     }
99     return factor;
100 }
101 
102 static boolean
blend_for_op(struct xa_composite_blend * blend,enum xa_composite_op op,struct xa_picture * src_pic,struct xa_picture * mask_pic,struct xa_picture * dst_pic)103 blend_for_op(struct xa_composite_blend *blend,
104 	     enum xa_composite_op op,
105 	     struct xa_picture *src_pic,
106 	     struct xa_picture *mask_pic,
107 	     struct xa_picture *dst_pic)
108 {
109     const int num_blends =
110 	sizeof(xa_blends)/sizeof(struct xa_composite_blend);
111     int i;
112     boolean supported = FALSE;
113 
114     /*
115      * our default in case something goes wrong
116      */
117     *blend = xa_blends[XA_BLEND_OP_OVER];
118 
119     for (i = 0; i < num_blends; ++i) {
120 	if (xa_blends[i].op == op) {
121 	    *blend = xa_blends[i];
122 	    supported = TRUE;
123             break;
124 	}
125     }
126 
127     /*
128      * No component alpha yet.
129      */
130     if (mask_pic && mask_pic->component_alpha && blend->alpha_src)
131 	return FALSE;
132 
133     if (!dst_pic->srf)
134 	return supported;
135 
136     if ((dst_pic->srf->tex->format == PIPE_FORMAT_L8_UNORM ||
137          dst_pic->srf->tex->format == PIPE_FORMAT_R8_UNORM)) {
138         blend->rgb_src = xa_convert_blend_for_luminance(blend->rgb_src);
139         blend->rgb_dst = xa_convert_blend_for_luminance(blend->rgb_dst);
140     }
141 
142     /*
143      * If there's no dst alpha channel, adjust the blend op so that we'll treat
144      * it as always 1.
145      */
146 
147     if (xa_format_a(dst_pic->pict_format) == 0 && blend->alpha_dst) {
148 	if (blend->rgb_src == PIPE_BLENDFACTOR_DST_ALPHA)
149 	    blend->rgb_src = PIPE_BLENDFACTOR_ONE;
150 	else if (blend->rgb_src == PIPE_BLENDFACTOR_INV_DST_ALPHA)
151 	    blend->rgb_src = PIPE_BLENDFACTOR_ZERO;
152     }
153 
154     return supported;
155 }
156 
157 
158 static inline int
xa_repeat_to_gallium(int mode)159 xa_repeat_to_gallium(int mode)
160 {
161     switch(mode) {
162     case xa_wrap_clamp_to_border:
163 	return PIPE_TEX_WRAP_CLAMP_TO_BORDER;
164     case xa_wrap_repeat:
165 	return PIPE_TEX_WRAP_REPEAT;
166     case xa_wrap_mirror_repeat:
167 	return PIPE_TEX_WRAP_MIRROR_REPEAT;
168     case xa_wrap_clamp_to_edge:
169 	return PIPE_TEX_WRAP_CLAMP_TO_EDGE;
170     default:
171 	break;
172     }
173     return PIPE_TEX_WRAP_REPEAT;
174 }
175 
176 static inline boolean
xa_filter_to_gallium(int xrender_filter,int * out_filter)177 xa_filter_to_gallium(int xrender_filter, int *out_filter)
178 {
179 
180     switch (xrender_filter) {
181     case xa_filter_nearest:
182 	*out_filter = PIPE_TEX_FILTER_NEAREST;
183 	break;
184     case xa_filter_linear:
185 	*out_filter = PIPE_TEX_FILTER_LINEAR;
186 	break;
187     default:
188 	*out_filter = PIPE_TEX_FILTER_NEAREST;
189 	return FALSE;
190     }
191     return TRUE;
192 }
193 
194 static int
xa_is_filter_accelerated(struct xa_picture * pic)195 xa_is_filter_accelerated(struct xa_picture *pic)
196 {
197     int filter;
198     if (pic && !xa_filter_to_gallium(pic->filter, &filter))
199 	return 0;
200     return 1;
201 }
202 
203 /**
204  * xa_src_pict_is_accelerated - Check whether we support acceleration
205  * of the given src_pict type
206  *
207  * \param src_pic[in]: Pointer to a union xa_source_pict to check.
208  *
209  * \returns TRUE if accelerated, FALSE otherwise.
210  */
211 static boolean
xa_src_pict_is_accelerated(const union xa_source_pict * src_pic)212 xa_src_pict_is_accelerated(const union xa_source_pict *src_pic)
213 {
214     if (!src_pic)
215         return TRUE;
216 
217     if (src_pic->type == xa_src_pict_solid_fill ||
218         src_pic->type == xa_src_pict_float_solid_fill)
219         return TRUE;
220 
221     return FALSE;
222 }
223 
224 XA_EXPORT int
xa_composite_check_accelerated(const struct xa_composite * comp)225 xa_composite_check_accelerated(const struct xa_composite *comp)
226 {
227     struct xa_picture *src_pic = comp->src;
228     struct xa_picture *mask_pic = comp->mask;
229     struct xa_composite_blend blend;
230 
231     if (!xa_is_filter_accelerated(src_pic) ||
232 	!xa_is_filter_accelerated(comp->mask)) {
233 	return -XA_ERR_INVAL;
234     }
235 
236     if (!xa_src_pict_is_accelerated(src_pic->src_pict) ||
237         (mask_pic && !xa_src_pict_is_accelerated(mask_pic->src_pict)))
238         return -XA_ERR_INVAL;
239 
240     if (!blend_for_op(&blend, comp->op, comp->src, comp->mask, comp->dst))
241 	return -XA_ERR_INVAL;
242 
243     /*
244      * No component alpha yet.
245      */
246     if (mask_pic && mask_pic->component_alpha && blend.alpha_src)
247 	return -XA_ERR_INVAL;
248 
249     return XA_ERR_NONE;
250 }
251 
252 static int
bind_composite_blend_state(struct xa_context * ctx,const struct xa_composite * comp)253 bind_composite_blend_state(struct xa_context *ctx,
254 			   const struct xa_composite *comp)
255 {
256     struct xa_composite_blend blend_opt;
257     struct pipe_blend_state blend;
258 
259     if (!blend_for_op(&blend_opt, comp->op, comp->src, comp->mask, comp->dst))
260 	return -XA_ERR_INVAL;
261 
262     memset(&blend, 0, sizeof(struct pipe_blend_state));
263     blend.rt[0].blend_enable = 1;
264     blend.rt[0].colormask = PIPE_MASK_RGBA;
265 
266     blend.rt[0].rgb_src_factor   = blend_opt.rgb_src;
267     blend.rt[0].alpha_src_factor = blend_opt.rgb_src;
268     blend.rt[0].rgb_dst_factor   = blend_opt.rgb_dst;
269     blend.rt[0].alpha_dst_factor = blend_opt.rgb_dst;
270 
271     cso_set_blend(ctx->cso, &blend);
272     return XA_ERR_NONE;
273 }
274 
275 static unsigned int
picture_format_fixups(struct xa_picture * src_pic,int mask)276 picture_format_fixups(struct xa_picture *src_pic,
277 		      int mask)
278 {
279     boolean set_alpha = FALSE;
280     boolean swizzle = FALSE;
281     unsigned ret = 0;
282     struct xa_surface *src = src_pic->srf;
283     enum xa_formats src_hw_format, src_pic_format;
284     enum xa_surface_type src_hw_type, src_pic_type;
285 
286     if (!src)
287 	return 0;
288 
289     src_hw_format = xa_surface_format(src);
290     src_pic_format = src_pic->pict_format;
291 
292     set_alpha = (xa_format_type_is_color(src_hw_format) &&
293 		 xa_format_a(src_pic_format) == 0);
294 
295     if (set_alpha)
296 	ret |= mask ? FS_MASK_SET_ALPHA : FS_SRC_SET_ALPHA;
297 
298     if (src_hw_format == src_pic_format) {
299 	if (src->tex->format == PIPE_FORMAT_L8_UNORM ||
300             src->tex->format == PIPE_FORMAT_R8_UNORM)
301 	    return ((mask) ? FS_MASK_LUMINANCE : FS_SRC_LUMINANCE);
302 
303 	return ret;
304     }
305 
306     src_hw_type = xa_format_type(src_hw_format);
307     src_pic_type = xa_format_type(src_pic_format);
308 
309     swizzle = ((src_hw_type == xa_type_argb &&
310 		src_pic_type == xa_type_abgr) ||
311 	       ((src_hw_type == xa_type_abgr &&
312 		 src_pic_type == xa_type_argb)));
313 
314     if (!swizzle && (src_hw_type != src_pic_type))
315       return ret;
316 
317     if (swizzle)
318 	ret |= mask ? FS_MASK_SWIZZLE_RGB : FS_SRC_SWIZZLE_RGB;
319 
320     return ret;
321 }
322 
323 static void
xa_src_in_mask(float src[4],const float mask[4])324 xa_src_in_mask(float src[4], const float mask[4])
325 {
326 	src[0] *= mask[3];
327 	src[1] *= mask[3];
328 	src[2] *= mask[3];
329 	src[3] *= mask[3];
330 }
331 
332 /**
333  * xa_handle_src_pict - Set up xa_context state and fragment shader
334  * input based on scr_pict type
335  *
336  * \param ctx[in, out]: Pointer to the xa context.
337  * \param src_pict[in]: Pointer to the union xa_source_pict to consider.
338  * \param is_mask[in]: Whether we're considering a mask picture.
339  *
340  * \returns TRUE if succesful, FALSE otherwise.
341  *
342  * This function computes some xa_context state used to determine whether
343  * to upload the solid color and also the solid color itself used as an input
344  * to the fragment shader.
345  */
346 static boolean
xa_handle_src_pict(struct xa_context * ctx,const union xa_source_pict * src_pict,boolean is_mask)347 xa_handle_src_pict(struct xa_context *ctx,
348                    const union xa_source_pict *src_pict,
349                    boolean is_mask)
350 {
351     float solid_color[4];
352 
353     switch(src_pict->type) {
354     case xa_src_pict_solid_fill:
355         xa_pixel_to_float4(src_pict->solid_fill.color, solid_color);
356         break;
357     case xa_src_pict_float_solid_fill:
358         memcpy(solid_color, src_pict->float_solid_fill.color,
359                sizeof(solid_color));
360         break;
361     default:
362         return FALSE;
363     }
364 
365     if (is_mask && ctx->has_solid_src)
366         xa_src_in_mask(ctx->solid_color, solid_color);
367     else
368         memcpy(ctx->solid_color, solid_color, sizeof(solid_color));
369 
370     if (is_mask)
371         ctx->has_solid_mask = TRUE;
372     else
373         ctx->has_solid_src = TRUE;
374 
375     return TRUE;
376 }
377 
378 static int
bind_shaders(struct xa_context * ctx,const struct xa_composite * comp)379 bind_shaders(struct xa_context *ctx, const struct xa_composite *comp)
380 {
381     unsigned vs_traits = 0, fs_traits = 0;
382     struct xa_shader shader;
383     struct xa_picture *src_pic = comp->src;
384     struct xa_picture *mask_pic = comp->mask;
385     struct xa_picture *dst_pic = comp->dst;
386 
387     ctx->has_solid_src = FALSE;
388     ctx->has_solid_mask = FALSE;
389 
390     if (dst_pic && xa_format_type(dst_pic->pict_format) !=
391         xa_format_type(xa_surface_format(dst_pic->srf)))
392        return -XA_ERR_INVAL;
393 
394     if (src_pic) {
395 	if (src_pic->wrap == xa_wrap_clamp_to_border && src_pic->has_transform)
396 	    fs_traits |= FS_SRC_REPEAT_NONE;
397 
398         fs_traits |= FS_COMPOSITE;
399         vs_traits |= VS_COMPOSITE;
400 
401 	if (src_pic->src_pict) {
402             if (!xa_handle_src_pict(ctx, src_pic->src_pict, false))
403                 return -XA_ERR_INVAL;
404             fs_traits |= FS_SRC_SRC;
405             vs_traits |= VS_SRC_SRC;
406 	} else
407            fs_traits |= picture_format_fixups(src_pic, 0);
408     }
409 
410     if (mask_pic) {
411 	vs_traits |= VS_MASK;
412 	fs_traits |= FS_MASK;
413         if (mask_pic->component_alpha)
414            fs_traits |= FS_CA;
415         if (mask_pic->src_pict) {
416             if (!xa_handle_src_pict(ctx, mask_pic->src_pict, true))
417                 return -XA_ERR_INVAL;
418 
419             if (ctx->has_solid_src) {
420                 vs_traits &= ~VS_MASK;
421                 fs_traits &= ~FS_MASK;
422             } else {
423                 vs_traits |= VS_MASK_SRC;
424                 fs_traits |= FS_MASK_SRC;
425             }
426         } else {
427             if (mask_pic->wrap == xa_wrap_clamp_to_border &&
428                 mask_pic->has_transform)
429                 fs_traits |= FS_MASK_REPEAT_NONE;
430 
431             fs_traits |= picture_format_fixups(mask_pic, 1);
432         }
433     }
434 
435     if (ctx->srf->format == PIPE_FORMAT_L8_UNORM ||
436         ctx->srf->format == PIPE_FORMAT_R8_UNORM)
437 	fs_traits |= FS_DST_LUMINANCE;
438 
439     shader = xa_shaders_get(ctx->shaders, vs_traits, fs_traits);
440     cso_set_vertex_shader_handle(ctx->cso, shader.vs);
441     cso_set_fragment_shader_handle(ctx->cso, shader.fs);
442     return XA_ERR_NONE;
443 }
444 
445 static void
bind_samplers(struct xa_context * ctx,const struct xa_composite * comp)446 bind_samplers(struct xa_context *ctx,
447 	      const struct xa_composite *comp)
448 {
449     struct pipe_sampler_state *samplers[PIPE_MAX_SAMPLERS];
450     struct pipe_sampler_state src_sampler, mask_sampler;
451     struct pipe_sampler_view view_templ;
452     struct pipe_sampler_view *src_view;
453     struct pipe_context *pipe = ctx->pipe;
454     struct xa_picture *src_pic = comp->src;
455     struct xa_picture *mask_pic = comp->mask;
456     int num_samplers = 0;
457 
458     xa_ctx_sampler_views_destroy(ctx);
459     memset(&src_sampler, 0, sizeof(struct pipe_sampler_state));
460     memset(&mask_sampler, 0, sizeof(struct pipe_sampler_state));
461 
462     if (src_pic && !ctx->has_solid_src) {
463 	unsigned src_wrap = xa_repeat_to_gallium(src_pic->wrap);
464 	int filter;
465 
466 	(void) xa_filter_to_gallium(src_pic->filter, &filter);
467 
468 	src_sampler.wrap_s = src_wrap;
469 	src_sampler.wrap_t = src_wrap;
470 	src_sampler.min_img_filter = filter;
471 	src_sampler.mag_img_filter = filter;
472 	src_sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NEAREST;
473 	src_sampler.normalized_coords = 1;
474 	samplers[0] = &src_sampler;
475 	u_sampler_view_default_template(&view_templ,
476 					src_pic->srf->tex,+					src_pic->srf->tex->format);
477 	src_view = pipe->create_sampler_view(pipe, src_pic->srf->tex,
478 					     &view_templ);
479 	ctx->bound_sampler_views[0] = src_view;
480 	num_samplers++;
481     }
482 
483     if (mask_pic && !ctx->has_solid_mask) {
484         unsigned mask_wrap = xa_repeat_to_gallium(mask_pic->wrap);
485 	int filter;
486 
487 	(void) xa_filter_to_gallium(mask_pic->filter, &filter);
488 
489 	mask_sampler.wrap_s = mask_wrap;
490 	mask_sampler.wrap_t = mask_wrap;
491 	mask_sampler.min_img_filter = filter;
492 	mask_sampler.mag_img_filter = filter;
493 	src_sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NEAREST;
494 	mask_sampler.normalized_coords = 1;
495         samplers[num_samplers] = &mask_sampler;
496 	u_sampler_view_default_template(&view_templ,
497 					mask_pic->srf->tex,
498 					mask_pic->srf->tex->format);
499 	src_view = pipe->create_sampler_view(pipe, mask_pic->srf->tex,
500 					     &view_templ);
501         ctx->bound_sampler_views[num_samplers] = src_view;
502         num_samplers++;
503     }
504 
505     cso_set_samplers(ctx->cso, PIPE_SHADER_FRAGMENT, num_samplers,
506 		     (const struct pipe_sampler_state **)samplers);
507     pipe->set_sampler_views(pipe, PIPE_SHADER_FRAGMENT, 0, num_samplers, 0,
508                             false, ctx->bound_sampler_views);
509     ctx->num_bound_samplers = num_samplers;
510 }
511 
512 XA_EXPORT int
xa_composite_prepare(struct xa_context * ctx,const struct xa_composite * comp)513 xa_composite_prepare(struct xa_context *ctx,
514 		     const struct xa_composite *comp)
515 {
516     struct xa_surface *dst_srf = comp->dst->srf;
517     int ret;
518 
519     ret = xa_ctx_srf_create(ctx, dst_srf);
520     if (ret != XA_ERR_NONE)
521 	return ret;
522 
523     ctx->dst = dst_srf;
524     renderer_bind_destination(ctx, ctx->srf);
525 
526     ret = bind_composite_blend_state(ctx, comp);
527     if (ret != XA_ERR_NONE)
528 	return ret;
529     ret = bind_shaders(ctx, comp);
530     if (ret != XA_ERR_NONE)
531 	return ret;
532     bind_samplers(ctx, comp);
533 
534     if (ctx->num_bound_samplers == 0 ) { /* solid fill */
535 	renderer_begin_solid(ctx);
536     } else {
537 	renderer_begin_textures(ctx);
538 	ctx->comp = comp;
539     }
540 
541     xa_ctx_srf_destroy(ctx);
542     return XA_ERR_NONE;
543 }
544 
545 XA_EXPORT void
xa_composite_rect(struct xa_context * ctx,int srcX,int srcY,int maskX,int maskY,int dstX,int dstY,int width,int height)546 xa_composite_rect(struct xa_context *ctx,
547 		  int srcX, int srcY, int maskX, int maskY,
548 		  int dstX, int dstY, int width, int height)
549 {
550     if (ctx->num_bound_samplers == 0 ) { /* solid fill */
551 	xa_scissor_update(ctx, dstX, dstY, dstX + width, dstY + height);
552 	renderer_solid(ctx, dstX, dstY, dstX + width, dstY + height);
553     } else {
554 	const struct xa_composite *comp = ctx->comp;
555 	int pos[6] = {srcX, srcY, maskX, maskY, dstX, dstY};
556 	const float *src_matrix = NULL;
557 	const float *mask_matrix = NULL;
558 
559 	xa_scissor_update(ctx, dstX, dstY, dstX + width, dstY + height);
560 
561 	if (comp->src->has_transform)
562 	    src_matrix = comp->src->transform;
563 	if (comp->mask && comp->mask->has_transform)
564 	    mask_matrix = comp->mask->transform;
565 
566 	renderer_texture(ctx, pos, width, height,
567 			 src_matrix, mask_matrix);
568     }
569 }
570 
571 XA_EXPORT void
xa_composite_done(struct xa_context * ctx)572 xa_composite_done(struct xa_context *ctx)
573 {
574     renderer_draw_flush(ctx);
575 
576     ctx->comp = NULL;
577     ctx->has_solid_src = FALSE;
578     ctx->has_solid_mask = FALSE;
579     xa_ctx_sampler_views_destroy(ctx);
580 }
581 
582 static const struct xa_composite_allocation a = {
583     .xa_composite_size = sizeof(struct xa_composite),
584     .xa_picture_size = sizeof(struct xa_picture),
585     .xa_source_pict_size = sizeof(union xa_source_pict),
586 };
587 
588 XA_EXPORT const struct xa_composite_allocation *
xa_composite_allocation(void)589 xa_composite_allocation(void)
590 {
591     return &a;
592 }
593