• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2012 Intel Corporation
3  * Copyright © 2015,2019 Collabora, Ltd.
4  * Copyright © 2016 NVIDIA Corporation
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining
7  * a copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sublicense, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial
16  * portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
21  * NONINFRINGEMENT.  IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
22  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
23  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
24  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25  * SOFTWARE.
26  */
27 
28 #include "config.h"
29 
30 #include <GLES2/gl2.h>
31 #include <GLES2/gl2ext.h>
32 
33 #include <stdbool.h>
34 #include <stdint.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <ctype.h>
38 #include <float.h>
39 #include <assert.h>
40 #include <linux/input.h>
41 #include <drm_fourcc.h>
42 #include <unistd.h>
43 #include <xf86drm.h>
44 
45 #include "linux-sync-file.h"
46 #include "timeline.h"
47 
48 #include "gl-renderer.h"
49 #include "gl-renderer-internal.h"
50 #include "vertex-clipping.h"
51 #include "linux-dmabuf.h"
52 #include "linux-dmabuf-unstable-v1-server-protocol.h"
53 #include "linux-explicit-synchronization.h"
54 #include "pixel-formats.h"
55 
56 #include <fcntl.h>
57 #include "shared/fd-util.h"
58 #include "shared/helpers.h"
59 #include "shared/platform.h"
60 #include "shared/timespec-util.h"
61 #include "shared/weston-egl-ext.h"
62 
63 #define GR_GL_VERSION(major, minor) \
64 	(((uint32_t)(major) << 16) | (uint32_t)(minor))
65 
66 #define GR_GL_VERSION_INVALID \
67 	GR_GL_VERSION(0, 0)
68 
69 #define BUFFER_DAMAGE_COUNT 2
70 #define GBM_DEVICE_PATH "/dev/dri/card0"
71 #define GENERAL_ATTRIBS  3
72 #define PLANE_ATTRIBS  5
73 #define ENTRIES_PER_ATTRIB  2
74 #define MAX_BUFFER_PLANES  4
75 
76 enum gl_border_status {
77 	BORDER_STATUS_CLEAN = 0,
78 	BORDER_TOP_DIRTY = 1 << GL_RENDERER_BORDER_TOP,
79 	BORDER_LEFT_DIRTY = 1 << GL_RENDERER_BORDER_LEFT,
80 	BORDER_RIGHT_DIRTY = 1 << GL_RENDERER_BORDER_RIGHT,
81 	BORDER_BOTTOM_DIRTY = 1 << GL_RENDERER_BORDER_BOTTOM,
82 	BORDER_ALL_DIRTY = 0xf,
83 	BORDER_SIZE_CHANGED = 0x10
84 };
85 
86 struct gl_border_image {
87 	GLuint tex;
88 	int32_t width, height;
89 	int32_t tex_width;
90 	void *data;
91 };
92 
93 struct gl_output_state {
94 	EGLSurface egl_surface;
95 	pixman_region32_t buffer_damage[BUFFER_DAMAGE_COUNT];
96 	int buffer_damage_index;
97 	enum gl_border_status border_damage[BUFFER_DAMAGE_COUNT];
98 	struct gl_border_image borders[4];
99 	enum gl_border_status border_status;
100 
101 	struct weston_matrix output_matrix;
102 
103 	EGLSyncKHR begin_render_sync, end_render_sync;
104 
105 	/* struct timeline_render_point::link */
106 	struct wl_list timeline_render_point_list;
107 
108 	// OHOS hdi-backend
109 	struct gl_fbo fbo[GL_RENDERER_FRMAEBUFFER_SIZE];
110 	int current_fbo_index;
111 	bool use_fbo;
112 };
113 
114 enum buffer_type {
115 	BUFFER_TYPE_NULL,
116 	BUFFER_TYPE_SOLID, /* internal solid color surfaces without a buffer */
117 	BUFFER_TYPE_SHM,
118 	BUFFER_TYPE_EGL
119 };
120 
121 struct gl_renderer;
122 
123 struct egl_image {
124 	struct gl_renderer *renderer;
125 	EGLImageKHR image;
126 	int refcount;
127 };
128 
129 enum import_type {
130 	IMPORT_TYPE_INVALID,
131 	IMPORT_TYPE_DIRECT,
132 	IMPORT_TYPE_GL_CONVERSION
133 };
134 
135 struct dmabuf_image {
136 	struct linux_dmabuf_buffer *dmabuf;
137 	int num_images;
138 	struct egl_image *images[3];
139 	struct wl_list link;
140 
141 	enum import_type import_type;
142 	GLenum target;
143 	struct gl_shader *shader;
144 };
145 
146 struct dmabuf_format {
147 	uint32_t format;
148 	struct wl_list link;
149 
150 	uint64_t *modifiers;
151 	unsigned *external_only;
152 	int num_modifiers;
153 };
154 
155 struct yuv_plane_descriptor {
156 	int width_divisor;
157 	int height_divisor;
158 	uint32_t format;
159 	int plane_index;
160 };
161 
162 enum texture_type {
163 	TEXTURE_Y_XUXV_WL,
164 	TEXTURE_Y_UV_WL,
165 	TEXTURE_Y_U_V_WL,
166 	TEXTURE_XYUV_WL
167 };
168 
169 struct yuv_format_descriptor {
170 	uint32_t format;
171 	int input_planes;
172 	int output_planes;
173 	enum texture_type texture_type;
174 	struct yuv_plane_descriptor plane[4];
175 };
176 
177 struct gl_surface_state {
178 	GLfloat color[4];
179 	struct gl_shader *shader;
180 
181 	GLuint textures[3];
182 	int num_textures;
183 	bool needs_full_upload;
184 	pixman_region32_t texture_damage;
185 
186 	/* These are only used by SHM surfaces to detect when we need
187 	 * to do a full upload to specify a new internal texture
188 	 * format */
189 	GLenum gl_format[3];
190 	GLenum gl_pixel_type;
191 
192 	struct egl_image* images[3];
193 	GLenum target;
194 	int num_images;
195 
196 	struct weston_buffer_reference buffer_ref;
197 	struct weston_buffer_release_reference buffer_release_ref;
198 	enum buffer_type buffer_type;
199 	int pitch; /* in pixels */
200 	int height; /* in pixels */
201 	bool y_inverted;
202 	bool direct_display;
203 
204 	/* Extension needed for SHM YUV texture */
205 	int offset[3]; /* offset per plane */
206 	int hsub[3];  /* horizontal subsampling per plane */
207 	int vsub[3];  /* vertical subsampling per plane */
208 
209 	struct weston_surface *surface;
210 
211 	/* Whether this surface was used in the current output repaint.
212 	   Used only in the context of a gl_renderer_repaint_output call. */
213 	bool used_in_output_repaint;
214 
215 	struct wl_listener surface_destroy_listener;
216 	struct wl_listener renderer_destroy_listener;
217 };
218 
219 enum timeline_render_point_type {
220 	TIMELINE_RENDER_POINT_TYPE_BEGIN,
221 	TIMELINE_RENDER_POINT_TYPE_END
222 };
223 
224 struct timeline_render_point {
225 	struct wl_list link; /* gl_output_state::timeline_render_point_list */
226 
227 	enum timeline_render_point_type type;
228 	int fd;
229 	struct weston_output *output;
230 	struct wl_event_source *event_source;
231 };
232 
233 static inline const char *
dump_format(uint32_t format,char out[4])234 dump_format(uint32_t format, char out[4])
235 {
236 #if BYTE_ORDER == BIG_ENDIAN
237 	format = __builtin_bswap32(format);
238 #endif
239 	memcpy(out, &format, 4);
240 	return out;
241 }
242 
243 static inline struct gl_output_state *
get_output_state(struct weston_output * output)244 get_output_state(struct weston_output *output)
245 {
246 	return (struct gl_output_state *)output->gpu_renderer_state;
247 }
248 
249 static int
250 gl_renderer_create_surface(struct weston_surface *surface);
251 
252 static inline struct gl_surface_state *
get_surface_state(struct weston_surface * surface)253 get_surface_state(struct weston_surface *surface)
254 {
255 	if (!surface->gpu_renderer_state)
256 		gl_renderer_create_surface(surface);
257 	return (struct gl_surface_state *)surface->gpu_renderer_state;
258 }
259 
260 static void
timeline_render_point_destroy(struct timeline_render_point * trp)261 timeline_render_point_destroy(struct timeline_render_point *trp)
262 {
263 	wl_list_remove(&trp->link);
264 	wl_event_source_remove(trp->event_source);
265 	close(trp->fd);
266 	free(trp);
267 }
268 
269 static int
timeline_render_point_handler(int fd,uint32_t mask,void * data)270 timeline_render_point_handler(int fd, uint32_t mask, void *data)
271 {
272 	struct timeline_render_point *trp = data;
273 	const char *tp_name = trp->type == TIMELINE_RENDER_POINT_TYPE_BEGIN ?
274 			      "renderer_gpu_begin" : "renderer_gpu_end";
275 
276 	if (mask & WL_EVENT_READABLE) {
277 		struct timespec tspec = { 0 };
278 
279 		if (weston_linux_sync_file_read_timestamp(trp->fd,
280 							  &tspec) == 0) {
281 			TL_POINT(trp->output->compositor, tp_name, TLP_GPU(&tspec),
282 				 TLP_OUTPUT(trp->output), TLP_END);
283 		}
284 	}
285 
286 	timeline_render_point_destroy(trp);
287 
288 	return 0;
289 }
290 
291 static EGLSyncKHR
create_render_sync(struct gl_renderer * gr)292 create_render_sync(struct gl_renderer *gr)
293 {
294 	static const EGLint attribs[] = { EGL_NONE };
295 
296 	if (!gr->has_native_fence_sync)
297 		return EGL_NO_SYNC_KHR;
298 
299 	return gr->create_sync(gr->egl_display, EGL_SYNC_NATIVE_FENCE_ANDROID,
300 			       attribs);
301 }
302 
303 static void
timeline_submit_render_sync(struct gl_renderer * gr,struct weston_compositor * ec,struct weston_output * output,EGLSyncKHR sync,enum timeline_render_point_type type)304 timeline_submit_render_sync(struct gl_renderer *gr,
305 			    struct weston_compositor *ec,
306 			    struct weston_output *output,
307 			    EGLSyncKHR sync,
308 			    enum timeline_render_point_type type)
309 {
310 	struct gl_output_state *go;
311 	struct wl_event_loop *loop;
312 	int fd;
313 	struct timeline_render_point *trp;
314 
315 // OHOS remove timeline
316 //	if (!weston_log_scope_is_enabled(ec->timeline) ||
317 	if (true ||
318 	    !gr->has_native_fence_sync ||
319 	    sync == EGL_NO_SYNC_KHR)
320 		return;
321 
322 	go = get_output_state(output);
323 	loop = wl_display_get_event_loop(ec->wl_display);
324 
325 	fd = gr->dup_native_fence_fd(gr->egl_display, sync);
326 	if (fd == EGL_NO_NATIVE_FENCE_FD_ANDROID)
327 		return;
328 
329 	trp = zalloc(sizeof *trp);
330 	if (trp == NULL) {
331 		close(fd);
332 		return;
333 	}
334 
335 	trp->type = type;
336 	trp->fd = fd;
337 	trp->output = output;
338 	trp->event_source = wl_event_loop_add_fd(loop, fd,
339 						 WL_EVENT_READABLE,
340 						 timeline_render_point_handler,
341 						 trp);
342 
343 	wl_list_insert(&go->timeline_render_point_list, &trp->link);
344 }
345 
346 static struct egl_image*
egl_image_create(struct gl_renderer * gr,EGLenum target,EGLClientBuffer buffer,const EGLint * attribs)347 egl_image_create(struct gl_renderer *gr, EGLenum target,
348 		 EGLClientBuffer buffer, const EGLint *attribs)
349 {
350 	struct egl_image *img;
351 
352 	img = zalloc(sizeof *img);
353 	img->renderer = gr;
354 	img->refcount = 1;
355 	img->image = gr->create_image(gr->egl_display, EGL_NO_CONTEXT,
356 				      target, buffer, attribs);
357 
358 	if (img->image == EGL_NO_IMAGE_KHR) {
359 		free(img);
360 		return NULL;
361 	}
362 
363 	return img;
364 }
365 
366 static struct egl_image*
egl_image_ref(struct egl_image * image)367 egl_image_ref(struct egl_image *image)
368 {
369 	image->refcount++;
370 
371 	return image;
372 }
373 
374 static int
egl_image_unref(struct egl_image * image)375 egl_image_unref(struct egl_image *image)
376 {
377 	struct gl_renderer *gr = image->renderer;
378 
379 	assert(image->refcount > 0);
380 
381 	image->refcount--;
382 	if (image->refcount > 0)
383 		return image->refcount;
384 
385 	gr->destroy_image(gr->egl_display, image->image);
386 	free(image);
387 
388 	return 0;
389 }
390 
391 static struct dmabuf_image*
dmabuf_image_create(void)392 dmabuf_image_create(void)
393 {
394 	struct dmabuf_image *img;
395 
396 	img = zalloc(sizeof *img);
397 	wl_list_init(&img->link);
398 
399 	return img;
400 }
401 
402 static void
dmabuf_image_destroy(struct dmabuf_image * image)403 dmabuf_image_destroy(struct dmabuf_image *image)
404 {
405 	int i;
406 
407 	for (i = 0; i < image->num_images; ++i)
408 		egl_image_unref(image->images[i]);
409 
410 	if (image->dmabuf)
411 		linux_dmabuf_buffer_set_user_data(image->dmabuf, NULL, NULL);
412 
413 	wl_list_remove(&image->link);
414 	free(image);
415 }
416 
417 #define max(a, b) (((a) > (b)) ? (a) : (b))
418 #define min(a, b) (((a) > (b)) ? (b) : (a))
419 
420 /*
421  * Compute the boundary vertices of the intersection of the global coordinate
422  * aligned rectangle 'rect', and an arbitrary quadrilateral produced from
423  * 'surf_rect' when transformed from surface coordinates into global coordinates.
424  * The vertices are written to 'ex' and 'ey', and the return value is the
425  * number of vertices. Vertices are produced in clockwise winding order.
426  * Guarantees to produce either zero vertices, or 3-8 vertices with non-zero
427  * polygon area.
428  */
429 static int
calculate_edges(struct weston_view * ev,pixman_box32_t * rect,pixman_box32_t * surf_rect,GLfloat * ex,GLfloat * ey)430 calculate_edges(struct weston_view *ev, pixman_box32_t *rect,
431 		pixman_box32_t *surf_rect, GLfloat *ex, GLfloat *ey)
432 {
433 
434 	struct clip_context ctx;
435 	int i, n;
436 	GLfloat min_x, max_x, min_y, max_y;
437 	struct polygon8 surf = {
438 		{ surf_rect->x1, surf_rect->x2, surf_rect->x2, surf_rect->x1 },
439 		{ surf_rect->y1, surf_rect->y1, surf_rect->y2, surf_rect->y2 },
440 		4
441 	};
442 
443 	ctx.clip.x1 = rect->x1;
444 	ctx.clip.y1 = rect->y1;
445 	ctx.clip.x2 = rect->x2;
446 	ctx.clip.y2 = rect->y2;
447 
448 	/* transform surface to screen space: */
449 	for (i = 0; i < surf.n; i++)
450 		weston_view_to_global_float(ev, surf.x[i], surf.y[i],
451 					    &surf.x[i], &surf.y[i]);
452 
453 	/* find bounding box: */
454 	min_x = max_x = surf.x[0];
455 	min_y = max_y = surf.y[0];
456 
457 	for (i = 1; i < surf.n; i++) {
458 		min_x = min(min_x, surf.x[i]);
459 		max_x = max(max_x, surf.x[i]);
460 		min_y = min(min_y, surf.y[i]);
461 		max_y = max(max_y, surf.y[i]);
462 	}
463 
464 	/* First, simple bounding box check to discard early transformed
465 	 * surface rects that do not intersect with the clip region:
466 	 */
467 	if ((min_x >= ctx.clip.x2) || (max_x <= ctx.clip.x1) ||
468 	    (min_y >= ctx.clip.y2) || (max_y <= ctx.clip.y1))
469 		return 0;
470 
471 	/* Simple case, bounding box edges are parallel to surface edges,
472 	 * there will be only four edges.  We just need to clip the surface
473 	 * vertices to the clip rect bounds:
474 	 */
475 	if (!ev->transform.enabled)
476 		return clip_simple(&ctx, &surf, ex, ey);
477 
478 	/* Transformed case: use a general polygon clipping algorithm to
479 	 * clip the surface rectangle with each side of 'rect'.
480 	 * The algorithm is Sutherland-Hodgman, as explained in
481 	 * http://www.codeguru.com/cpp/misc/misc/graphics/article.php/c8965/Polygon-Clipping.htm
482 	 * but without looking at any of that code.
483 	 */
484 	n = clip_transformed(&ctx, &surf, ex, ey);
485 
486 	if (n < 3)
487 		return 0;
488 
489 	return n;
490 }
491 
492 static bool
merge_down(pixman_box32_t * a,pixman_box32_t * b,pixman_box32_t * merge)493 merge_down(pixman_box32_t *a, pixman_box32_t *b, pixman_box32_t *merge)
494 {
495 	if (a->x1 == b->x1 && a->x2 == b->x2 && a->y1 == b->y2) {
496 		merge->x1 = a->x1;
497 		merge->x2 = a->x2;
498 		merge->y1 = b->y1;
499 		merge->y2 = a->y2;
500 		return true;
501 	}
502 	return false;
503 }
504 
505 static int
compress_bands(pixman_box32_t * inrects,int nrects,pixman_box32_t ** outrects)506 compress_bands(pixman_box32_t *inrects, int nrects,
507 		   pixman_box32_t **outrects)
508 {
509 	bool merged = false;
510 	pixman_box32_t *out, merge_rect;
511 	int i, j, nout;
512 
513 	if (!nrects) {
514 		*outrects = NULL;
515 		return 0;
516 	}
517 
518 	/* nrects is an upper bound - we're not too worried about
519 	 * allocating a little extra
520 	 */
521 	out = malloc(sizeof(pixman_box32_t) * nrects);
522 	out[0] = inrects[0];
523 	nout = 1;
524 	for (i = 1; i < nrects; i++) {
525 		for (j = 0; j < nout; j++) {
526 			merged = merge_down(&inrects[i], &out[j], &merge_rect);
527 			if (merged) {
528 				out[j] = merge_rect;
529 				break;
530 			}
531 		}
532 		if (!merged) {
533 			out[nout] = inrects[i];
534 			nout++;
535 		}
536 	}
537 	*outrects = out;
538 	return nout;
539 }
540 
541 static int
texture_region(struct weston_view * ev,pixman_region32_t * region,pixman_region32_t * surf_region)542 texture_region(struct weston_view *ev, pixman_region32_t *region,
543 		pixman_region32_t *surf_region)
544 {
545 	struct gl_surface_state *gs = get_surface_state(ev->surface);
546 	struct weston_compositor *ec = ev->surface->compositor;
547 	struct gl_renderer *gr = get_renderer(ec);
548 	GLfloat *v, inv_width, inv_height;
549 	unsigned int *vtxcnt, nvtx = 0;
550 	pixman_box32_t *rects, *surf_rects;
551 	pixman_box32_t *raw_rects;
552 	int i, j, k, nrects, nsurf, raw_nrects;
553 	bool used_band_compression;
554 	raw_rects = pixman_region32_rectangles(region, &raw_nrects);
555 	surf_rects = pixman_region32_rectangles(surf_region, &nsurf);
556 
557 	if (raw_nrects < 4) {
558 		used_band_compression = false;
559 		nrects = raw_nrects;
560 		rects = raw_rects;
561 	} else {
562 		nrects = compress_bands(raw_rects, raw_nrects, &rects);
563 		used_band_compression = true;
564 	}
565 	/* worst case we can have 8 vertices per rect (ie. clipped into
566 	 * an octagon):
567 	 */
568 	v = wl_array_add(&gr->vertices, nrects * nsurf * 8 * 4 * sizeof *v);
569 	vtxcnt = wl_array_add(&gr->vtxcnt, nrects * nsurf * sizeof *vtxcnt);
570 
571 	inv_width = 1.0 / gs->pitch;
572         inv_height = 1.0 / gs->height;
573 
574 	for (i = 0; i < nrects; i++) {
575 		pixman_box32_t *rect = &rects[i];
576 		for (j = 0; j < nsurf; j++) {
577 			pixman_box32_t *surf_rect = &surf_rects[j];
578 			GLfloat sx, sy, bx, by;
579 			GLfloat ex[8], ey[8];          /* edge points in screen space */
580 			int n;
581 
582 			/* The transformed surface, after clipping to the clip region,
583 			 * can have as many as eight sides, emitted as a triangle-fan.
584 			 * The first vertex in the triangle fan can be chosen arbitrarily,
585 			 * since the area is guaranteed to be convex.
586 			 *
587 			 * If a corner of the transformed surface falls outside of the
588 			 * clip region, instead of emitting one vertex for the corner
589 			 * of the surface, up to two are emitted for two corresponding
590 			 * intersection point(s) between the surface and the clip region.
591 			 *
592 			 * To do this, we first calculate the (up to eight) points that
593 			 * form the intersection of the clip rect and the transformed
594 			 * surface.
595 			 */
596 			n = calculate_edges(ev, rect, surf_rect, ex, ey);
597 			if (n < 3)
598 				continue;
599 
600 			/* emit edge points: */
601 			for (k = 0; k < n; k++) {
602 				weston_view_from_global_float(ev, ex[k], ey[k],
603 							      &sx, &sy);
604 				/* position: */
605 				*(v++) = ex[k];
606 				*(v++) = ey[k];
607 				/* texcoord: */
608 				weston_surface_to_buffer_float(ev->surface,
609 							       sx, sy,
610 							       &bx, &by);
611 				*(v++) = bx * inv_width;
612 				if (gs->y_inverted) {
613 					*(v++) = by * inv_height;
614 				} else {
615 					*(v++) = (gs->height - by) * inv_height;
616 				}
617 			}
618 
619 			vtxcnt[nvtx++] = n;
620 		}
621 	}
622 
623 	if (used_band_compression)
624 		free(rects);
625 	return nvtx;
626 }
627 
628 static void
triangle_fan_debug(struct weston_view * view,int first,int count)629 triangle_fan_debug(struct weston_view *view, int first, int count)
630 {
631 	struct weston_compositor *compositor = view->surface->compositor;
632 	struct gl_renderer *gr = get_renderer(compositor);
633 	int i;
634 	GLushort *buffer;
635 	GLushort *index;
636 	int nelems;
637 	static int color_idx = 0;
638 	static const GLfloat color[][4] = {
639 			{ 1.0, 0.0, 0.0, 1.0 },
640 			{ 0.0, 1.0, 0.0, 1.0 },
641 			{ 0.0, 0.0, 1.0, 1.0 },
642 			{ 1.0, 1.0, 1.0, 1.0 },
643 	};
644 
645 	nelems = (count - 1 + count - 2) * 2;
646 
647 	buffer = malloc(sizeof(GLushort) * nelems);
648 	index = buffer;
649 
650 	for (i = 1; i < count; i++) {
651 		*index++ = first;
652 		*index++ = first + i;
653 	}
654 
655 	for (i = 2; i < count; i++) {
656 		*index++ = first + i - 1;
657 		*index++ = first + i;
658 	}
659 
660 	glUseProgram(gr->solid_shader.program);
661 	glUniform4fv(gr->solid_shader.color_uniform, 1,
662 			color[color_idx++ % ARRAY_LENGTH(color)]);
663 	glDrawElements(GL_LINES, nelems, GL_UNSIGNED_SHORT, buffer);
664 	glUseProgram(gr->current_shader->program);
665 	free(buffer);
666 }
667 
668 static void
repaint_region(struct weston_view * ev,pixman_region32_t * region,pixman_region32_t * surf_region)669 repaint_region(struct weston_view *ev, pixman_region32_t *region,
670 		pixman_region32_t *surf_region)
671 {
672 	struct weston_compositor *ec = ev->surface->compositor;
673 	struct gl_renderer *gr = get_renderer(ec);
674 	GLfloat *v;
675 	unsigned int *vtxcnt;
676 	int i, first, nfans;
677 
678 	/* The final region to be painted is the intersection of
679 	 * 'region' and 'surf_region'. However, 'region' is in the global
680 	 * coordinates, and 'surf_region' is in the surface-local
681 	 * coordinates. texture_region() will iterate over all pairs of
682 	 * rectangles from both regions, compute the intersection
683 	 * polygon for each pair, and store it as a triangle fan if
684 	 * it has a non-zero area (at least 3 vertices, actually).
685 	 */
686 	nfans = texture_region(ev, region, surf_region);
687 
688 	v = gr->vertices.data;
689 	vtxcnt = gr->vtxcnt.data;
690 
691 	/* position: */
692 	glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 4 * sizeof *v, &v[0]);
693 	glEnableVertexAttribArray(0);
694 
695 	/* texcoord: */
696 	glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 4 * sizeof *v, &v[2]);
697 	glEnableVertexAttribArray(1);
698 
699 	for (i = 0, first = 0; i < nfans; i++) {
700 		glDrawArrays(GL_TRIANGLE_FAN, first, vtxcnt[i]);
701 		if (gr->fan_debug)
702 			triangle_fan_debug(ev, first, vtxcnt[i]);
703 		first += vtxcnt[i];
704 	}
705 
706 	glDisableVertexAttribArray(1);
707 	glDisableVertexAttribArray(0);
708 
709 	gr->vertices.size = 0;
710 	gr->vtxcnt.size = 0;
711 }
712 
713 static int
use_output(struct weston_output * output)714 use_output(struct weston_output *output)
715 {
716 	static int errored;
717 	struct gl_output_state *go = get_output_state(output);
718 	struct gl_renderer *gr = get_renderer(output->compositor);
719 	EGLBoolean ret;
720 
721 	ret = eglMakeCurrent(gr->egl_display, EGL_NO_SURFACE,
722 			     EGL_NO_SURFACE, gr->egl_context);
723 
724 	if (ret == EGL_FALSE) {
725 		if (errored)
726 			return -1;
727 		errored = 1;
728 		weston_log("Failed to make EGL context current.\n");
729 		gl_renderer_print_egl_error_state();
730 		return -1;
731 	}
732 
733 	return 0;
734 }
735 
736 static int
737 shader_init(struct gl_shader *shader, struct gl_renderer *gr,
738 		   const char *vertex_source, const char *fragment_source);
739 
740 static void
use_shader(struct gl_renderer * gr,struct gl_shader * shader)741 use_shader(struct gl_renderer *gr, struct gl_shader *shader)
742 {
743 	if (!shader->program) {
744 		int ret;
745 
746 		ret =  shader_init(shader, gr,
747 				   shader->vertex_source,
748 				   shader->fragment_source);
749 
750 		if (ret < 0)
751 			weston_log("warning: failed to compile shader\n");
752 	}
753 
754 	if (gr->current_shader == shader)
755 		return;
756 	glUseProgram(shader->program);
757 	gr->current_shader = shader;
758 }
759 
760 static void
shader_uniforms(struct gl_shader * shader,struct weston_view * view,struct weston_output * output)761 shader_uniforms(struct gl_shader *shader,
762 		struct weston_view *view,
763 		struct weston_output *output)
764 {
765 	int i;
766 	struct gl_surface_state *gs = get_surface_state(view->surface);
767 	struct gl_output_state *go = get_output_state(output);
768 
769 	glUniformMatrix4fv(shader->proj_uniform,
770 			   1, GL_FALSE, go->output_matrix.d);
771 	glUniform4fv(shader->color_uniform, 1, gs->color);
772 	glUniform1f(shader->alpha_uniform, view->alpha);
773 
774 	for (i = 0; i < gs->num_textures; i++)
775 		glUniform1i(shader->tex_uniforms[i], i);
776 }
777 
778 static int
ensure_surface_buffer_is_ready(struct gl_renderer * gr,struct gl_surface_state * gs)779 ensure_surface_buffer_is_ready(struct gl_renderer *gr,
780 			       struct gl_surface_state *gs)
781 {
782 	EGLint attribs[] = {
783 		EGL_SYNC_NATIVE_FENCE_FD_ANDROID,
784 		-1,
785 		EGL_NONE
786 	};
787 	struct weston_surface *surface = gs->surface;
788 	struct weston_buffer *buffer = gs->buffer_ref.buffer;
789 	EGLSyncKHR sync;
790 	EGLint wait_ret;
791 	EGLint destroy_ret;
792 
793 	if (!buffer)
794 		return 0;
795 
796 	if (surface->acquire_fence_fd < 0)
797 		return 0;
798 
799 	/* We should only get a fence if we support EGLSyncKHR, since
800 	 * we don't advertise the explicit sync protocol otherwise. */
801 	assert(gr->has_native_fence_sync);
802 	/* We should only get a fence for non-SHM buffers, since surface
803 	 * commit would have failed otherwise. */
804 	assert(wl_shm_buffer_get(buffer->resource) == NULL);
805 
806 	attribs[1] = dup(surface->acquire_fence_fd);
807 	if (attribs[1] == -1) {
808 		linux_explicit_synchronization_send_server_error(
809 			gs->surface->synchronization_resource,
810 			"Failed to dup acquire fence");
811 		return -1;
812 	}
813 
814 	sync = gr->create_sync(gr->egl_display,
815 			       EGL_SYNC_NATIVE_FENCE_ANDROID,
816 			       attribs);
817 	if (sync == EGL_NO_SYNC_KHR) {
818 		linux_explicit_synchronization_send_server_error(
819 			gs->surface->synchronization_resource,
820 			"Failed to create EGLSyncKHR object");
821 		close(attribs[1]);
822 		return -1;
823 	}
824 
825 	wait_ret = gr->wait_sync(gr->egl_display, sync, 0);
826 	if (wait_ret == EGL_FALSE) {
827 		linux_explicit_synchronization_send_server_error(
828 			gs->surface->synchronization_resource,
829 			"Failed to wait on EGLSyncKHR object");
830 		/* Continue to try to destroy the sync object. */
831 	}
832 
833 
834 	destroy_ret = gr->destroy_sync(gr->egl_display, sync);
835 	if (destroy_ret == EGL_FALSE) {
836 		linux_explicit_synchronization_send_server_error(
837 			gs->surface->synchronization_resource,
838 			"Failed to destroy on EGLSyncKHR object");
839 	}
840 
841 	return (wait_ret == EGL_TRUE && destroy_ret == EGL_TRUE) ? 0 : -1;
842 }
843 
844 
845  /* Checks if a view needs to be censored on an output
846   * Checks for 2 types of censor requirements
847   * - recording_censor: Censor protected view when a
848   *   protected view is captured.
849   * - unprotected_censor: Censor regions of protected views
850   *   when displayed on an output which has lower protection capability.
851   * Returns the originally stored gl_shader if content censoring is required,
852   * NULL otherwise.
853   */
854 static struct gl_shader *
setup_censor_overrides(struct weston_output * output,struct weston_view * ev)855 setup_censor_overrides(struct weston_output *output,
856 		       struct weston_view *ev)
857 {
858 	struct gl_shader *replaced_shader = NULL;
859 	struct weston_compositor *ec = ev->surface->compositor;
860 	struct gl_renderer *gr = get_renderer(ec);
861 	struct gl_surface_state *gs = get_surface_state(ev->surface);
862 	bool recording_censor =
863 		(output->disable_planes > 0) &&
864 		(ev->surface->desired_protection > WESTON_HDCP_DISABLE);
865 
866 	bool unprotected_censor =
867 		(ev->surface->desired_protection > output->current_protection);
868 
869 	if (gs->direct_display) {
870 		gs->color[0] = 0.40;
871 		gs->color[1] = 0.0;
872 		gs->color[2] = 0.0;
873 		gs->color[3] = 1.0;
874 		gs->shader = &gr->solid_shader;
875 		return gs->shader;
876 	}
877 
878 	/* When not in enforced mode, the client is notified of the protection */
879 	/* change, so content censoring is not required */
880 	if (ev->surface->protection_mode !=
881 	    WESTON_SURFACE_PROTECTION_MODE_ENFORCED)
882 		return NULL;
883 
884 	if (recording_censor || unprotected_censor) {
885 		replaced_shader = gs->shader;
886 		gs->color[0] = 0.40;
887 		gs->color[1] = 0.0;
888 		gs->color[2] = 0.0;
889 		gs->color[3] = 1.0;
890 		gs->shader = &gr->solid_shader;
891 	}
892 
893 	return replaced_shader;
894 }
895 
896 static void
draw_view(struct weston_view * ev,struct weston_output * output,pixman_region32_t * damage)897 draw_view(struct weston_view *ev, struct weston_output *output,
898 	  pixman_region32_t *damage) /* in global coordinates */
899 {
900 	struct weston_compositor *ec = ev->surface->compositor;
901 	struct gl_renderer *gr = get_renderer(ec);
902 	struct gl_surface_state *gs = get_surface_state(ev->surface);
903 	/* repaint bounding region in global coordinates: */
904 	pixman_region32_t repaint;
905 	/* opaque region in surface coordinates: */
906 	pixman_region32_t surface_opaque;
907 	/* non-opaque region in surface coordinates: */
908 	pixman_region32_t surface_blend;
909 	GLint filter;
910 	int i;
911 	struct gl_shader *replaced_shader = NULL;
912 
913 	/* In case of a runtime switch of renderers, we may not have received
914 	 * an attach for this surface since the switch. In that case we don't
915 	 * have a valid buffer or a proper shader set up so skip rendering. */
916 	if (!gs->shader && !gs->direct_display)
917 		return;
918 
919 	pixman_region32_init(&repaint);
920 	pixman_region32_intersect(&repaint,
921 				  &ev->transform.boundingbox, damage);
922 	pixman_region32_subtract(&repaint, &repaint, &ev->clip);
923 
924 	if (!pixman_region32_not_empty(&repaint))
925 		goto out;
926 
927 	if (ensure_surface_buffer_is_ready(gr, gs) < 0)
928 		goto out;
929 
930 	replaced_shader = setup_censor_overrides(output, ev);
931 
932 	glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
933 
934 	if (gr->fan_debug) {
935 		use_shader(gr, &gr->solid_shader);
936 		shader_uniforms(&gr->solid_shader, ev, output);
937 	}
938 
939 	use_shader(gr, gs->shader);
940 	shader_uniforms(gs->shader, ev, output);
941 
942 	if (ev->transform.enabled || output->zoom.active ||
943 	    output->current_scale != ev->surface->buffer_viewport.buffer.scale)
944 		filter = GL_LINEAR;
945 	else
946 		filter = GL_NEAREST;
947 
948 	for (i = 0; i < gs->num_textures; i++) {
949 		glActiveTexture(GL_TEXTURE0 + i);
950 		glBindTexture(gs->target, gs->textures[i]);
951 		glTexParameteri(gs->target, GL_TEXTURE_MIN_FILTER, filter);
952 		glTexParameteri(gs->target, GL_TEXTURE_MAG_FILTER, filter);
953 	}
954 
955 	/* blended region is whole surface minus opaque region: */
956 	pixman_region32_init_rect(&surface_blend, 0, 0,
957 				  ev->surface->width, ev->surface->height);
958 	if (ev->geometry.scissor_enabled)
959 		pixman_region32_intersect(&surface_blend, &surface_blend,
960 					  &ev->geometry.scissor);
961 	pixman_region32_subtract(&surface_blend, &surface_blend,
962 				 &ev->surface->opaque);
963 
964 	/* XXX: Should we be using ev->transform.opaque here? */
965 	pixman_region32_init(&surface_opaque);
966 	if (ev->geometry.scissor_enabled)
967 		pixman_region32_intersect(&surface_opaque,
968 					  &ev->surface->opaque,
969 					  &ev->geometry.scissor);
970 	else
971 		pixman_region32_copy(&surface_opaque, &ev->surface->opaque);
972 
973 	if (pixman_region32_not_empty(&surface_opaque)) {
974 		if (gs->shader == &gr->texture_shader_rgba) {
975 			/* Special case for RGBA textures with possibly
976 			 * bad data in alpha channel: use the shader
977 			 * that forces texture alpha = 1.0.
978 			 * Xwayland surfaces need this.
979 			 */
980 			use_shader(gr, &gr->texture_shader_rgbx);
981 			shader_uniforms(&gr->texture_shader_rgbx, ev, output);
982 		}
983 
984 		if (ev->alpha < 1.0)
985 			glEnable(GL_BLEND);
986 		else
987 			glDisable(GL_BLEND);
988 
989 		repaint_region(ev, &repaint, &surface_opaque);
990 		gs->used_in_output_repaint = true;
991 	}
992 
993 	if (pixman_region32_not_empty(&surface_blend)) {
994 		use_shader(gr, gs->shader);
995 		glEnable(GL_BLEND);
996 		repaint_region(ev, &repaint, &surface_blend);
997 		gs->used_in_output_repaint = true;
998 	}
999 
1000 	pixman_region32_fini(&surface_blend);
1001 	pixman_region32_fini(&surface_opaque);
1002 
1003 out:
1004 	pixman_region32_fini(&repaint);
1005 
1006 	if (replaced_shader)
1007 		gs->shader = replaced_shader;
1008 }
1009 
1010 static void
repaint_views(struct weston_output * output,pixman_region32_t * damage)1011 repaint_views(struct weston_output *output, pixman_region32_t *damage)
1012 {
1013 	struct weston_compositor *compositor = output->compositor;
1014 	struct weston_view *view;
1015 
1016 	wl_list_for_each_reverse(view, &compositor->view_list, link) {
1017 		if (view->plane == &compositor->primary_plane
1018 			&& view->renderer_type == WESTON_RENDERER_TYPE_GPU) {
1019 			draw_view(view, output, damage);
1020         }
1021     }
1022 }
1023 
1024 static int
1025 gl_renderer_create_fence_fd(struct weston_output *output);
1026 
1027 /* Updates the release fences of surfaces that were used in the current output
1028  * repaint. Should only be used from gl_renderer_repaint_output, so that the
1029  * information in gl_surface_state.used_in_output_repaint is accurate.
1030  */
1031 static void
update_buffer_release_fences(struct weston_compositor * compositor,struct weston_output * output)1032 update_buffer_release_fences(struct weston_compositor *compositor,
1033 			     struct weston_output *output)
1034 {
1035 	struct weston_view *view;
1036 
1037 	wl_list_for_each_reverse(view, &compositor->view_list, link) {
1038 		struct gl_surface_state *gs;
1039 		struct weston_buffer_release *buffer_release;
1040 		int fence_fd;
1041 
1042 		if (view->plane != &compositor->primary_plane
1043 			|| view->renderer_type != WESTON_RENDERER_TYPE_GPU)
1044 			continue;
1045 
1046 		gs = get_surface_state(view->surface);
1047 		buffer_release = gs->buffer_release_ref.buffer_release;
1048 
1049 		if (!gs->used_in_output_repaint || !buffer_release)
1050 			continue;
1051 
1052 		fence_fd = gl_renderer_create_fence_fd(output);
1053 
1054 		/* If we have a buffer_release then it means we support fences,
1055 		 * and we should be able to create the release fence. If we
1056 		 * can't, something has gone horribly wrong, so disconnect the
1057 		 * client.
1058 		 */
1059 		if (fence_fd == -1) {
1060 			linux_explicit_synchronization_send_server_error(
1061 				buffer_release->resource,
1062 				"Failed to create release fence");
1063 			fd_clear(&buffer_release->fence_fd);
1064 			continue;
1065 		}
1066 
1067 		/* At the moment it is safe to just replace the fence_fd,
1068 		 * discarding the previous one:
1069 		 *
1070 		 * 1. If the previous fence fd represents a sync fence from
1071 		 *    a previous repaint cycle, that fence fd is now not
1072 		 *    sufficient to provide the release guarantee and should
1073 		 *    be replaced.
1074 		 *
1075 		 * 2. If the fence fd represents a sync fence from another
1076 		 *    output in the same repaint cycle, it's fine to replace
1077 		 *    it since we are rendering to all outputs using the same
1078 		 *    EGL context, so a fence issued for a later output rendering
1079 		 *    is guaranteed to signal after fences for previous output
1080 		 *    renderings.
1081 		 *
1082 		 * Note that the above is only valid if the buffer_release
1083 		 * fences only originate from the GL renderer, which guarantees
1084 		 * a total order of operations and fences.  If we introduce
1085 		 * fences from other sources (e.g., plane out-fences), we will
1086 		 * need to merge fences instead.
1087 		 */
1088 		fd_update(&buffer_release->fence_fd, fence_fd);
1089 	}
1090 }
1091 
1092 static void
draw_output_border_texture(struct gl_output_state * go,enum gl_renderer_border_side side,int32_t x,int32_t y,int32_t width,int32_t height)1093 draw_output_border_texture(struct gl_output_state *go,
1094 			   enum gl_renderer_border_side side,
1095 			   int32_t x, int32_t y,
1096 			   int32_t width, int32_t height)
1097 {
1098 	struct gl_border_image *img = &go->borders[side];
1099 	static GLushort indices [] = { 0, 1, 3, 3, 1, 2 };
1100 
1101 	if (!img->data) {
1102 		if (img->tex) {
1103 			glDeleteTextures(1, &img->tex);
1104 			img->tex = 0;
1105 		}
1106 
1107 		return;
1108 	}
1109 
1110 	if (!img->tex) {
1111 		glGenTextures(1, &img->tex);
1112 		glBindTexture(GL_TEXTURE_2D, img->tex);
1113 
1114 		glTexParameteri(GL_TEXTURE_2D,
1115 				GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
1116 		glTexParameteri(GL_TEXTURE_2D,
1117 				GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
1118 		glTexParameteri(GL_TEXTURE_2D,
1119 				GL_TEXTURE_MIN_FILTER, GL_NEAREST);
1120 		glTexParameteri(GL_TEXTURE_2D,
1121 				GL_TEXTURE_MAG_FILTER, GL_NEAREST);
1122 	} else {
1123 		glBindTexture(GL_TEXTURE_2D, img->tex);
1124 	}
1125 
1126 	if (go->border_status & (1 << side)) {
1127 		glPixelStorei(GL_UNPACK_ROW_LENGTH_EXT, 0);
1128 		glPixelStorei(GL_UNPACK_SKIP_PIXELS_EXT, 0);
1129 		glPixelStorei(GL_UNPACK_SKIP_ROWS_EXT, 0);
1130 		glTexImage2D(GL_TEXTURE_2D, 0, GL_BGRA_EXT,
1131 			     img->tex_width, img->height, 0,
1132 			     GL_BGRA_EXT, GL_UNSIGNED_BYTE, img->data);
1133 	}
1134 
1135 	GLfloat texcoord[] = {
1136 		0.0f, 0.0f,
1137 		(GLfloat)img->width / (GLfloat)img->tex_width, 0.0f,
1138 		(GLfloat)img->width / (GLfloat)img->tex_width, 1.0f,
1139 		0.0f, 1.0f,
1140 	};
1141 
1142 	GLfloat verts[] = {
1143 		x, y,
1144 		x + width, y,
1145 		x + width, y + height,
1146 		x, y + height
1147 	};
1148 
1149 	glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, verts);
1150 	glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, texcoord);
1151 	glEnableVertexAttribArray(0);
1152 	glEnableVertexAttribArray(1);
1153 
1154 	glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_SHORT, indices);
1155 
1156 	glDisableVertexAttribArray(1);
1157 	glDisableVertexAttribArray(0);
1158 }
1159 
1160 static int
output_has_borders(struct weston_output * output)1161 output_has_borders(struct weston_output *output)
1162 {
1163 	struct gl_output_state *go = get_output_state(output);
1164 
1165 	return go->borders[GL_RENDERER_BORDER_TOP].data ||
1166 	       go->borders[GL_RENDERER_BORDER_RIGHT].data ||
1167 	       go->borders[GL_RENDERER_BORDER_BOTTOM].data ||
1168 	       go->borders[GL_RENDERER_BORDER_LEFT].data;
1169 }
1170 
1171 static void
draw_output_borders(struct weston_output * output,enum gl_border_status border_status)1172 draw_output_borders(struct weston_output *output,
1173 		    enum gl_border_status border_status)
1174 {
1175 	struct gl_output_state *go = get_output_state(output);
1176 	struct gl_renderer *gr = get_renderer(output->compositor);
1177 	struct gl_shader *shader = &gr->texture_shader_rgba;
1178 	struct gl_border_image *top, *bottom, *left, *right;
1179 	struct weston_matrix matrix;
1180 	int full_width, full_height;
1181 
1182 	if (border_status == BORDER_STATUS_CLEAN)
1183 		return; /* Clean. Nothing to do. */
1184 
1185 	top = &go->borders[GL_RENDERER_BORDER_TOP];
1186 	bottom = &go->borders[GL_RENDERER_BORDER_BOTTOM];
1187 	left = &go->borders[GL_RENDERER_BORDER_LEFT];
1188 	right = &go->borders[GL_RENDERER_BORDER_RIGHT];
1189 
1190 	full_width = output->current_mode->width + left->width + right->width;
1191 	full_height = output->current_mode->height + top->height + bottom->height;
1192 
1193 	glDisable(GL_BLEND);
1194 	use_shader(gr, shader);
1195 
1196 	glViewport(0, 0, full_width, full_height);
1197 
1198 	weston_matrix_init(&matrix);
1199 	weston_matrix_translate(&matrix, -full_width/2.0, -full_height/2.0, 0);
1200 	weston_matrix_scale(&matrix, 2.0/full_width, -2.0/full_height, 1);
1201 	glUniformMatrix4fv(shader->proj_uniform, 1, GL_FALSE, matrix.d);
1202 
1203 	glUniform1i(shader->tex_uniforms[0], 0);
1204 	glUniform1f(shader->alpha_uniform, 1);
1205 	glActiveTexture(GL_TEXTURE0);
1206 
1207 	if (border_status & BORDER_TOP_DIRTY)
1208 		draw_output_border_texture(go, GL_RENDERER_BORDER_TOP,
1209 					   0, 0,
1210 					   full_width, top->height);
1211 	if (border_status & BORDER_LEFT_DIRTY)
1212 		draw_output_border_texture(go, GL_RENDERER_BORDER_LEFT,
1213 					   0, top->height,
1214 					   left->width, output->current_mode->height);
1215 	if (border_status & BORDER_RIGHT_DIRTY)
1216 		draw_output_border_texture(go, GL_RENDERER_BORDER_RIGHT,
1217 					   full_width - right->width, top->height,
1218 					   right->width, output->current_mode->height);
1219 	if (border_status & BORDER_BOTTOM_DIRTY)
1220 		draw_output_border_texture(go, GL_RENDERER_BORDER_BOTTOM,
1221 					   0, full_height - bottom->height,
1222 					   full_width, bottom->height);
1223 }
1224 
1225 static void
output_get_border_damage(struct weston_output * output,enum gl_border_status border_status,pixman_region32_t * damage)1226 output_get_border_damage(struct weston_output *output,
1227 			 enum gl_border_status border_status,
1228 			 pixman_region32_t *damage)
1229 {
1230 	struct gl_output_state *go = get_output_state(output);
1231 	struct gl_border_image *top, *bottom, *left, *right;
1232 	int full_width, full_height;
1233 
1234 	if (border_status == BORDER_STATUS_CLEAN)
1235 		return; /* Clean. Nothing to do. */
1236 
1237 	top = &go->borders[GL_RENDERER_BORDER_TOP];
1238 	bottom = &go->borders[GL_RENDERER_BORDER_BOTTOM];
1239 	left = &go->borders[GL_RENDERER_BORDER_LEFT];
1240 	right = &go->borders[GL_RENDERER_BORDER_RIGHT];
1241 
1242 	full_width = output->current_mode->width + left->width + right->width;
1243 	full_height = output->current_mode->height + top->height + bottom->height;
1244 	if (border_status & BORDER_TOP_DIRTY)
1245 		pixman_region32_union_rect(damage, damage,
1246 					   0, 0,
1247 					   full_width, top->height);
1248 	if (border_status & BORDER_LEFT_DIRTY)
1249 		pixman_region32_union_rect(damage, damage,
1250 					   0, top->height,
1251 					   left->width, output->current_mode->height);
1252 	if (border_status & BORDER_RIGHT_DIRTY)
1253 		pixman_region32_union_rect(damage, damage,
1254 					   full_width - right->width, top->height,
1255 					   right->width, output->current_mode->height);
1256 	if (border_status & BORDER_BOTTOM_DIRTY)
1257 		pixman_region32_union_rect(damage, damage,
1258 					   0, full_height - bottom->height,
1259 					   full_width, bottom->height);
1260 }
1261 
1262 static void
output_get_damage(struct weston_output * output,pixman_region32_t * buffer_damage,uint32_t * border_damage)1263 output_get_damage(struct weston_output *output,
1264 		  pixman_region32_t *buffer_damage, uint32_t *border_damage)
1265 {
1266 	struct gl_output_state *go = get_output_state(output);
1267 	struct gl_renderer *gr = get_renderer(output->compositor);
1268 	EGLint buffer_age = 0;
1269 	EGLBoolean ret;
1270 	int i;
1271 
1272 	if (gr->has_egl_buffer_age) {
1273 		ret = eglQuerySurface(gr->egl_display, go->egl_surface,
1274 				      EGL_BUFFER_AGE_EXT, &buffer_age);
1275 		if (ret == EGL_FALSE) {
1276 			weston_log("buffer age query failed.\n");
1277 			gl_renderer_print_egl_error_state();
1278 		}
1279 	}
1280 
1281 	if (buffer_age == 0 || buffer_age - 1 > BUFFER_DAMAGE_COUNT) {
1282 		pixman_region32_copy(buffer_damage, &output->region);
1283 		*border_damage = BORDER_ALL_DIRTY;
1284 	} else {
1285 		for (i = 0; i < buffer_age - 1; i++)
1286 			*border_damage |= go->border_damage[(go->buffer_damage_index + i) % BUFFER_DAMAGE_COUNT];
1287 
1288 		if (*border_damage & BORDER_SIZE_CHANGED) {
1289 			/* If we've had a resize, we have to do a full
1290 			 * repaint. */
1291 			*border_damage |= BORDER_ALL_DIRTY;
1292 			pixman_region32_copy(buffer_damage, &output->region);
1293 		} else {
1294 			for (i = 0; i < buffer_age - 1; i++)
1295 				pixman_region32_union(buffer_damage,
1296 						      buffer_damage,
1297 						      &go->buffer_damage[(go->buffer_damage_index + i) % BUFFER_DAMAGE_COUNT]);
1298 		}
1299 	}
1300 }
1301 
1302 static void
output_rotate_damage(struct weston_output * output,pixman_region32_t * output_damage,enum gl_border_status border_status)1303 output_rotate_damage(struct weston_output *output,
1304 		     pixman_region32_t *output_damage,
1305 		     enum gl_border_status border_status)
1306 {
1307 	struct gl_output_state *go = get_output_state(output);
1308 	struct gl_renderer *gr = get_renderer(output->compositor);
1309 
1310 	if (!gr->has_egl_buffer_age)
1311 		return;
1312 
1313 	go->buffer_damage_index += BUFFER_DAMAGE_COUNT - 1;
1314 	go->buffer_damage_index %= BUFFER_DAMAGE_COUNT;
1315 
1316 	pixman_region32_copy(&go->buffer_damage[go->buffer_damage_index], output_damage);
1317 	go->border_damage[go->buffer_damage_index] = border_status;
1318 }
1319 
1320 /**
1321  * Given a region in Weston's (top-left-origin) global co-ordinate space,
1322  * translate it to the co-ordinate space used by GL for our output
1323  * rendering. This requires shifting it into output co-ordinate space:
1324  * translating for output offset within the global co-ordinate space,
1325  * multiplying by output scale to get buffer rather than logical size.
1326  *
1327  * Finally, if borders are drawn around the output, we translate the area
1328  * to account for the border region around the outside, and add any
1329  * damage if the borders have been redrawn.
1330  *
1331  * @param output The output whose co-ordinate space we are after
1332  * @param global_region The affected region in global co-ordinate space
1333  * @param[out] rects Y-inverted quads in {x,y,w,h} order; caller must free
1334  * @param[out] nrects Number of quads (4x number of co-ordinates)
1335  */
1336 static void
pixman_region_to_egl_y_invert(struct weston_output * output,struct pixman_region32 * global_region,EGLint ** rects,EGLint * nrects)1337 pixman_region_to_egl_y_invert(struct weston_output *output,
1338 			      struct pixman_region32 *global_region,
1339 			      EGLint **rects,
1340 			      EGLint *nrects)
1341 {
1342 	struct gl_output_state *go = get_output_state(output);
1343 	pixman_region32_t transformed;
1344 	struct pixman_box32 *box;
1345 	int buffer_height;
1346 	EGLint *d;
1347 	int i;
1348 
1349 	/* Translate from global to output co-ordinate space. */
1350 	pixman_region32_init(&transformed);
1351 	pixman_region32_copy(&transformed, global_region);
1352 	pixman_region32_translate(&transformed, -output->x, -output->y);
1353 	weston_transformed_region(output->width, output->height,
1354 				  output->transform,
1355 				  output->current_scale,
1356 				  &transformed, &transformed);
1357 
1358 	/* If we have borders drawn around the output, shift our output damage
1359 	 * to account for borders being drawn around the outside, adding any
1360 	 * damage resulting from borders being redrawn. */
1361 	if (output_has_borders(output)) {
1362 		pixman_region32_translate(&transformed,
1363 					  go->borders[GL_RENDERER_BORDER_LEFT].width,
1364 					  go->borders[GL_RENDERER_BORDER_TOP].height);
1365 		output_get_border_damage(output, go->border_status,
1366 					 &transformed);
1367 	}
1368 
1369 	/* Convert from a Pixman region into {x,y,w,h} quads, flipping in the
1370 	 * Y axis to account for GL's lower-left-origin co-ordinate space. */
1371 	box = pixman_region32_rectangles(&transformed, nrects);
1372 	*rects = malloc(*nrects * 4 * sizeof(EGLint));
1373 
1374 	buffer_height = go->borders[GL_RENDERER_BORDER_TOP].height +
1375 			output->current_mode->height +
1376 			go->borders[GL_RENDERER_BORDER_BOTTOM].height;
1377 
1378 	d = *rects;
1379 	for (i = 0; i < *nrects; ++i) {
1380 		*d++ = box[i].x1;
1381 		*d++ = buffer_height - box[i].y2;
1382 		*d++ = box[i].x2 - box[i].x1;
1383 		*d++ = box[i].y2 - box[i].y1;
1384 	}
1385 
1386 	pixman_region32_fini(&transformed);
1387 }
1388 
1389 /* NOTE: We now allow falling back to ARGB gl visuals when XRGB is
1390  * unavailable, so we're assuming the background has no transparency
1391  * and that everything with a blend, like drop shadows, will have something
1392  * opaque (like the background) drawn underneath it.
1393  *
1394  * Depending on the underlying hardware, violating that assumption could
1395  * result in seeing through to another display plane.
1396  */
1397 static void
gl_renderer_repaint_output(struct weston_output * output,pixman_region32_t * output_damage)1398 gl_renderer_repaint_output(struct weston_output *output,
1399 			      pixman_region32_t *output_damage)
1400 {
1401 	struct gl_output_state *go = get_output_state(output);
1402 	struct weston_compositor *compositor = output->compositor;
1403 	struct gl_renderer *gr = get_renderer(compositor);
1404 	EGLBoolean ret;
1405 	static int errored;
1406 	/* areas we've damaged since we last used this buffer */
1407 	pixman_region32_t previous_damage;
1408 	/* total area we need to repaint this time */
1409 	pixman_region32_t total_damage;
1410 	enum gl_border_status border_status = BORDER_STATUS_CLEAN;
1411 	struct weston_view *view;
1412 
1413 	if (use_output(output) < 0)
1414 		return;
1415 
1416 	// OHOS hdi-backend
1417 	if (go->use_fbo) {
1418 		glBindFramebuffer(GL_FRAMEBUFFER, go->fbo[go->current_fbo_index].fbo);
1419 	}
1420 
1421 	/* Clear the used_in_output_repaint flag, so that we can properly track
1422 	 * which surfaces were used in this output repaint. */
1423 	wl_list_for_each_reverse(view, &compositor->view_list, link) {
1424 		if (view->plane == &compositor->primary_plane
1425 			&& view->renderer_type == WESTON_RENDERER_TYPE_GPU ) {
1426 			struct gl_surface_state *gs =
1427 				get_surface_state(view->surface);
1428 			gs->used_in_output_repaint = false;
1429 		}
1430 	}
1431 
1432 	// if (go->begin_render_sync != EGL_NO_SYNC_KHR)
1433 	// 	gr->destroy_sync(gr->egl_display, go->begin_render_sync);
1434 	// if (go->end_render_sync != EGL_NO_SYNC_KHR)
1435 	// 	gr->destroy_sync(gr->egl_display, go->end_render_sync);
1436 
1437 	// go->begin_render_sync = create_render_sync(gr);
1438 
1439 	/* Calculate the viewport */
1440 	glViewport(go->borders[GL_RENDERER_BORDER_LEFT].width,
1441 		   go->borders[GL_RENDERER_BORDER_BOTTOM].height,
1442 		   output->current_mode->width,
1443 		   output->current_mode->height);
1444 
1445 	/* Calculate the global GL matrix */
1446 	go->output_matrix = output->matrix;
1447 	weston_matrix_translate(&go->output_matrix,
1448 				-(output->current_mode->width / 2.0),
1449 				-(output->current_mode->height / 2.0), 0);
1450 	weston_matrix_scale(&go->output_matrix,
1451 			    2.0 / output->current_mode->width,
1452 			    -2.0 / output->current_mode->height, 1);
1453 	weston_matrix_scale(&go->output_matrix, 1, -1, 1);
1454 
1455 	/* In fan debug mode, redraw everything to make sure that we clear any
1456 	 * fans left over from previous draws on this buffer.
1457 	 * This precludes the use of EGL_EXT_swap_buffers_with_damage and
1458 	 * EGL_KHR_partial_update, since we damage the whole area. */
1459 	if (gr->fan_debug) {
1460 		pixman_region32_t undamaged;
1461 		pixman_region32_init(&undamaged);
1462 		pixman_region32_subtract(&undamaged, &output->region,
1463 					 output_damage);
1464 		gr->fan_debug = false;
1465 		repaint_views(output, &undamaged);
1466 		gr->fan_debug = true;
1467 		pixman_region32_fini(&undamaged);
1468 	}
1469 
1470 	/* previous_damage covers regions damaged in previous paints since we
1471 	 * last used this buffer */
1472 	pixman_region32_init(&previous_damage);
1473 	pixman_region32_init(&total_damage); /* total area to redraw */
1474 
1475 	/* Update previous_damage using buffer_age (if available), and store
1476 	 * current damaged region for future use. */
1477 	output_get_damage(output, &previous_damage, &border_status);
1478 	output_rotate_damage(output, output_damage, go->border_status);
1479 
1480 	/* Redraw both areas which have changed since we last used this buffer,
1481 	 * as well as the areas we now want to repaint, to make sure the
1482 	 * buffer is up to date. */
1483 	pixman_region32_union(&total_damage, &previous_damage, output_damage);
1484 	border_status |= go->border_status;
1485 
1486 	if (gr->has_egl_partial_update && !gr->fan_debug) {
1487 		int n_egl_rects;
1488 		EGLint *egl_rects;
1489 
1490 		/* For partial_update, we need to pass the region which has
1491 		 * changed since we last rendered into this specific buffer;
1492 		 * this is total_damage. */
1493 		pixman_region_to_egl_y_invert(output, &total_damage,
1494 					      &egl_rects, &n_egl_rects);
1495 		gr->set_damage_region(gr->egl_display, go->egl_surface,
1496 				      egl_rects, n_egl_rects);
1497 		free(egl_rects);
1498 	}
1499 
1500 	repaint_views(output, &total_damage);
1501 
1502 	pixman_region32_fini(&total_damage);
1503 	pixman_region32_fini(&previous_damage);
1504 
1505 	//draw_output_borders(output, border_status);
1506 
1507 	// wl_signal_emit(&output->frame_signal, output_damage);
1508 
1509 	// go->end_render_sync = create_render_sync(gr);
1510 
1511 	// OHOS hdi-backend
1512 	if (go->use_fbo) {
1513 		glFinish();
1514 		go->current_fbo_index = (go->current_fbo_index + 1) % 2;
1515 		return;
1516 	}
1517 
1518 	if (gr->swap_buffers_with_damage && !gr->fan_debug) {
1519 		int n_egl_rects;
1520 		EGLint *egl_rects;
1521 
1522 		/* For swap_buffers_with_damage, we need to pass the region
1523 		 * which has changed since the previous SwapBuffers on this
1524 		 * surface - this is output_damage. */
1525 		pixman_region_to_egl_y_invert(output, output_damage,
1526 					      &egl_rects, &n_egl_rects);
1527 		ret = gr->swap_buffers_with_damage(gr->egl_display,
1528 						   go->egl_surface,
1529 						   egl_rects, n_egl_rects);
1530 		free(egl_rects);
1531 	} else {
1532 		ret = eglSwapBuffers(gr->egl_display, go->egl_surface);
1533 	}
1534 
1535 	if (ret == EGL_FALSE && !errored) {
1536 		errored = 1;
1537 		weston_log("Failed in eglSwapBuffers.\n");
1538 		gl_renderer_print_egl_error_state();
1539 	}
1540 
1541 	go->border_status = BORDER_STATUS_CLEAN;
1542 
1543 	/* We have to submit the render sync objects after swap buffers, since
1544 	 * the objects get assigned a valid sync file fd only after a gl flush.
1545 	 */
1546 	timeline_submit_render_sync(gr, compositor, output,
1547 				    go->begin_render_sync,
1548 				    TIMELINE_RENDER_POINT_TYPE_BEGIN);
1549 	timeline_submit_render_sync(gr, compositor, output, go->end_render_sync,
1550 				    TIMELINE_RENDER_POINT_TYPE_END);
1551 
1552 	update_buffer_release_fences(compositor, output);
1553 }
1554 
1555 static int
gl_renderer_read_pixels(struct weston_output * output,pixman_format_code_t format,void * pixels,uint32_t x,uint32_t y,uint32_t width,uint32_t height)1556 gl_renderer_read_pixels(struct weston_output *output,
1557 			       pixman_format_code_t format, void *pixels,
1558 			       uint32_t x, uint32_t y,
1559 			       uint32_t width, uint32_t height)
1560 {
1561 	GLenum gl_format;
1562 	struct gl_output_state *go = get_output_state(output);
1563 
1564 	x += go->borders[GL_RENDERER_BORDER_LEFT].width;
1565 	y += go->borders[GL_RENDERER_BORDER_BOTTOM].height;
1566 
1567 	switch (format) {
1568 	case PIXMAN_a8r8g8b8:
1569 		gl_format = GL_BGRA_EXT;
1570 		break;
1571 	case PIXMAN_a8b8g8r8:
1572 		gl_format = GL_RGBA;
1573 		break;
1574 	default:
1575 		return -1;
1576 	}
1577 
1578 	if (use_output(output) < 0)
1579 		return -1;
1580 
1581 	glPixelStorei(GL_PACK_ALIGNMENT, 1);
1582 	glReadPixels(x, y, width, height, gl_format,
1583 		     GL_UNSIGNED_BYTE, pixels);
1584 
1585 	return 0;
1586 }
1587 
gl_format_from_internal(GLenum internal_format)1588 static GLenum gl_format_from_internal(GLenum internal_format)
1589 {
1590 	switch (internal_format) {
1591 	case GL_R8_EXT:
1592 		return GL_RED_EXT;
1593 	case GL_RG8_EXT:
1594 		return GL_RG_EXT;
1595 	default:
1596 		return internal_format;
1597 	}
1598 }
1599 
1600 static void
gl_renderer_flush_damage(struct weston_surface * surface)1601 gl_renderer_flush_damage(struct weston_surface *surface)
1602 {
1603 	struct gl_renderer *gr = get_renderer(surface->compositor);
1604 	struct gl_surface_state *gs = get_surface_state(surface);
1605 	struct weston_buffer *buffer = gs->buffer_ref.buffer;
1606 	struct weston_view *view;
1607 	bool texture_used;
1608 	pixman_box32_t *rectangles;
1609 	uint8_t *data;
1610 	int i, j, n;
1611 
1612 	pixman_region32_union(&gs->texture_damage,
1613 			      &gs->texture_damage, &surface->damage);
1614 
1615 	if (!buffer)
1616 		return;
1617 
1618 	/* Avoid upload, if the texture won't be used this time.
1619 	 * We still accumulate the damage in texture_damage, and
1620 	 * hold the reference to the buffer, in case the surface
1621 	 * migrates back to the primary plane.
1622 	 */
1623 	texture_used = false;
1624 	wl_list_for_each(view, &surface->views, surface_link) {
1625 		if (view->plane == &surface->compositor->primary_plane) {
1626 			texture_used = true;
1627 			break;
1628 		}
1629 	}
1630 	if (!texture_used)
1631 		return;
1632 
1633 	if (!pixman_region32_not_empty(&gs->texture_damage) &&
1634 	    !gs->needs_full_upload)
1635 		goto done;
1636 
1637 	data = wl_shm_buffer_get_data(buffer->shm_buffer);
1638 
1639 	if (!gr->has_unpack_subimage) {
1640 		wl_shm_buffer_begin_access(buffer->shm_buffer);
1641 		for (j = 0; j < gs->num_textures; j++) {
1642 			glBindTexture(GL_TEXTURE_2D, gs->textures[j]);
1643 			glTexImage2D(GL_TEXTURE_2D, 0,
1644 				     gs->gl_format[j],
1645 				     gs->pitch / gs->hsub[j],
1646 				     buffer->height / gs->vsub[j],
1647 				     0,
1648 				     gl_format_from_internal(gs->gl_format[j]),
1649 				     gs->gl_pixel_type,
1650 				     data + gs->offset[j]);
1651 		}
1652 		wl_shm_buffer_end_access(buffer->shm_buffer);
1653 
1654 		goto done;
1655 	}
1656 
1657 	if (gs->needs_full_upload) {
1658 		glPixelStorei(GL_UNPACK_SKIP_PIXELS_EXT, 0);
1659 		glPixelStorei(GL_UNPACK_SKIP_ROWS_EXT, 0);
1660 		wl_shm_buffer_begin_access(buffer->shm_buffer);
1661 		for (j = 0; j < gs->num_textures; j++) {
1662 			glBindTexture(GL_TEXTURE_2D, gs->textures[j]);
1663 			glPixelStorei(GL_UNPACK_ROW_LENGTH_EXT,
1664 				      gs->pitch / gs->hsub[j]);
1665 			glTexImage2D(GL_TEXTURE_2D, 0,
1666 				     gs->gl_format[j],
1667 				     gs->pitch / gs->hsub[j],
1668 				     buffer->height / gs->vsub[j],
1669 				     0,
1670 				     gl_format_from_internal(gs->gl_format[j]),
1671 				     gs->gl_pixel_type,
1672 				     data + gs->offset[j]);
1673 		}
1674 		wl_shm_buffer_end_access(buffer->shm_buffer);
1675 		goto done;
1676 	}
1677 
1678 	rectangles = pixman_region32_rectangles(&gs->texture_damage, &n);
1679 	wl_shm_buffer_begin_access(buffer->shm_buffer);
1680 	for (i = 0; i < n; i++) {
1681 		pixman_box32_t r;
1682 
1683 		r = weston_surface_to_buffer_rect(surface, rectangles[i]);
1684 
1685 		for (j = 0; j < gs->num_textures; j++) {
1686 			glBindTexture(GL_TEXTURE_2D, gs->textures[j]);
1687 			glPixelStorei(GL_UNPACK_ROW_LENGTH_EXT,
1688 				      gs->pitch / gs->hsub[j]);
1689 			glPixelStorei(GL_UNPACK_SKIP_PIXELS_EXT,
1690 				      r.x1 / gs->hsub[j]);
1691 			glPixelStorei(GL_UNPACK_SKIP_ROWS_EXT,
1692 				      r.y1 / gs->hsub[j]);
1693 			glTexSubImage2D(GL_TEXTURE_2D, 0,
1694 					r.x1 / gs->hsub[j],
1695 					r.y1 / gs->vsub[j],
1696 					(r.x2 - r.x1) / gs->hsub[j],
1697 					(r.y2 - r.y1) / gs->vsub[j],
1698 					gl_format_from_internal(gs->gl_format[j]),
1699 					gs->gl_pixel_type,
1700 					data + gs->offset[j]);
1701 		}
1702 	}
1703 	wl_shm_buffer_end_access(buffer->shm_buffer);
1704 
1705 done:
1706 	pixman_region32_fini(&gs->texture_damage);
1707 	pixman_region32_init(&gs->texture_damage);
1708 	gs->needs_full_upload = false;
1709 
1710 	weston_buffer_reference(&gs->buffer_ref, NULL);
1711 	weston_buffer_release_reference(&gs->buffer_release_ref, NULL);
1712 }
1713 
1714 static void
ensure_textures(struct gl_surface_state * gs,int num_textures)1715 ensure_textures(struct gl_surface_state *gs, int num_textures)
1716 {
1717 	int i;
1718 
1719 	if (num_textures <= gs->num_textures)
1720 		return;
1721 
1722 	for (i = gs->num_textures; i < num_textures; i++) {
1723 		glGenTextures(1, &gs->textures[i]);
1724 		glBindTexture(gs->target, gs->textures[i]);
1725 		glTexParameteri(gs->target,
1726 				GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
1727 		glTexParameteri(gs->target,
1728 				GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
1729 	}
1730 	gs->num_textures = num_textures;
1731 	glBindTexture(gs->target, 0);
1732 }
1733 
1734 static void
gl_renderer_attach_shm(struct weston_surface * es,struct weston_buffer * buffer,struct wl_shm_buffer * shm_buffer)1735 gl_renderer_attach_shm(struct weston_surface *es, struct weston_buffer *buffer,
1736 		       struct wl_shm_buffer *shm_buffer)
1737 {
1738 	struct weston_compositor *ec = es->compositor;
1739 	struct gl_renderer *gr = get_renderer(ec);
1740 	struct gl_surface_state *gs = get_surface_state(es);
1741 	GLenum gl_format[3] = {0, 0, 0};
1742 	GLenum gl_pixel_type;
1743 	int pitch;
1744 	int num_planes;
1745 
1746 	buffer->shm_buffer = shm_buffer;
1747 	buffer->width = wl_shm_buffer_get_width(shm_buffer);
1748 	buffer->height = wl_shm_buffer_get_height(shm_buffer);
1749 
1750 	num_planes = 1;
1751 	gs->offset[0] = 0;
1752 	gs->hsub[0] = 1;
1753 	gs->vsub[0] = 1;
1754 
1755 	switch (wl_shm_buffer_get_format(shm_buffer)) {
1756 	case WL_SHM_FORMAT_XRGB8888:
1757 		gs->shader = &gr->texture_shader_rgbx;
1758 		pitch = wl_shm_buffer_get_stride(shm_buffer) / 4;
1759 		gl_format[0] = GL_BGRA_EXT;
1760 		gl_pixel_type = GL_UNSIGNED_BYTE;
1761 		es->is_opaque = true;
1762 		break;
1763 	case WL_SHM_FORMAT_ARGB8888:
1764 		gs->shader = &gr->texture_shader_rgba;
1765 		pitch = wl_shm_buffer_get_stride(shm_buffer) / 4;
1766 		gl_format[0] = GL_BGRA_EXT;
1767 		gl_pixel_type = GL_UNSIGNED_BYTE;
1768 		es->is_opaque = false;
1769 		break;
1770 	case WL_SHM_FORMAT_RGB565:
1771 		gs->shader = &gr->texture_shader_rgbx;
1772 		pitch = wl_shm_buffer_get_stride(shm_buffer) / 2;
1773 		gl_format[0] = GL_RGB;
1774 		gl_pixel_type = GL_UNSIGNED_SHORT_5_6_5;
1775 		es->is_opaque = true;
1776 		break;
1777 	case WL_SHM_FORMAT_YUV420:
1778 		gs->shader = &gr->texture_shader_y_u_v;
1779 		pitch = wl_shm_buffer_get_stride(shm_buffer);
1780 		gl_pixel_type = GL_UNSIGNED_BYTE;
1781 		num_planes = 3;
1782 		gs->offset[1] = gs->offset[0] + (pitch / gs->hsub[0]) *
1783 				(buffer->height / gs->vsub[0]);
1784 		gs->hsub[1] = 2;
1785 		gs->vsub[1] = 2;
1786 		gs->offset[2] = gs->offset[1] + (pitch / gs->hsub[1]) *
1787 				(buffer->height / gs->vsub[1]);
1788 		gs->hsub[2] = 2;
1789 		gs->vsub[2] = 2;
1790 		if (gr->has_gl_texture_rg) {
1791 			gl_format[0] = GL_R8_EXT;
1792 			gl_format[1] = GL_R8_EXT;
1793 			gl_format[2] = GL_R8_EXT;
1794 		} else {
1795 			gl_format[0] = GL_LUMINANCE;
1796 			gl_format[1] = GL_LUMINANCE;
1797 			gl_format[2] = GL_LUMINANCE;
1798 		}
1799 		es->is_opaque = true;
1800 		break;
1801 	case WL_SHM_FORMAT_NV12:
1802 		pitch = wl_shm_buffer_get_stride(shm_buffer);
1803 		gl_pixel_type = GL_UNSIGNED_BYTE;
1804 		num_planes = 2;
1805 		gs->offset[1] = gs->offset[0] + (pitch / gs->hsub[0]) *
1806 				(buffer->height / gs->vsub[0]);
1807 		gs->hsub[1] = 2;
1808 		gs->vsub[1] = 2;
1809 		if (gr->has_gl_texture_rg) {
1810 			gs->shader = &gr->texture_shader_y_uv;
1811 			gl_format[0] = GL_R8_EXT;
1812 			gl_format[1] = GL_RG8_EXT;
1813 		} else {
1814 			gs->shader = &gr->texture_shader_y_xuxv;
1815 			gl_format[0] = GL_LUMINANCE;
1816 			gl_format[1] = GL_LUMINANCE_ALPHA;
1817 		}
1818 		es->is_opaque = true;
1819 		break;
1820 	case WL_SHM_FORMAT_YUYV:
1821 		gs->shader = &gr->texture_shader_y_xuxv;
1822 		pitch = wl_shm_buffer_get_stride(shm_buffer) / 2;
1823 		gl_pixel_type = GL_UNSIGNED_BYTE;
1824 		num_planes = 2;
1825 		gs->offset[1] = 0;
1826 		gs->hsub[1] = 2;
1827 		gs->vsub[1] = 1;
1828 		if (gr->has_gl_texture_rg)
1829 			gl_format[0] = GL_RG8_EXT;
1830 		else
1831 			gl_format[0] = GL_LUMINANCE_ALPHA;
1832 		gl_format[1] = GL_BGRA_EXT;
1833 		es->is_opaque = true;
1834 		break;
1835 	default:
1836 		weston_log("warning: unknown shm buffer format: %08x\n",
1837 			   wl_shm_buffer_get_format(shm_buffer));
1838 		return;
1839 	}
1840 
1841 	/* Only allocate a texture if it doesn't match existing one.
1842 	 * If a switch from DRM allocated buffer to a SHM buffer is
1843 	 * happening, we need to allocate a new texture buffer. */
1844 	if (pitch != gs->pitch ||
1845 	    buffer->height != gs->height ||
1846 	    gl_format[0] != gs->gl_format[0] ||
1847 	    gl_format[1] != gs->gl_format[1] ||
1848 	    gl_format[2] != gs->gl_format[2] ||
1849 	    gl_pixel_type != gs->gl_pixel_type ||
1850 	    gs->buffer_type != BUFFER_TYPE_SHM) {
1851 		gs->pitch = pitch;
1852 		gs->height = buffer->height;
1853 		gs->target = GL_TEXTURE_2D;
1854 		gs->gl_format[0] = gl_format[0];
1855 		gs->gl_format[1] = gl_format[1];
1856 		gs->gl_format[2] = gl_format[2];
1857 		gs->gl_pixel_type = gl_pixel_type;
1858 		gs->buffer_type = BUFFER_TYPE_SHM;
1859 		gs->needs_full_upload = true;
1860 		gs->y_inverted = true;
1861 		gs->direct_display = false;
1862 
1863 		gs->surface = es;
1864 
1865 		ensure_textures(gs, num_planes);
1866 	}
1867 }
1868 
1869 static void
gl_renderer_attach_egl(struct weston_surface * es,struct weston_buffer * buffer,uint32_t format)1870 gl_renderer_attach_egl(struct weston_surface *es, struct weston_buffer *buffer,
1871 		       uint32_t format)
1872 {
1873 	struct weston_compositor *ec = es->compositor;
1874 	struct gl_renderer *gr = get_renderer(ec);
1875 	struct gl_surface_state *gs = get_surface_state(es);
1876 	EGLint attribs[3];
1877 	int i, num_planes;
1878 
1879 	buffer->legacy_buffer = (struct wl_buffer *)buffer->resource;
1880 	gr->query_buffer(gr->egl_display, buffer->legacy_buffer,
1881 			 EGL_WIDTH, &buffer->width);
1882 	gr->query_buffer(gr->egl_display, buffer->legacy_buffer,
1883 			 EGL_HEIGHT, &buffer->height);
1884 	gr->query_buffer(gr->egl_display, buffer->legacy_buffer,
1885 			 EGL_WAYLAND_Y_INVERTED_WL, &buffer->y_inverted);
1886 
1887 	for (i = 0; i < gs->num_images; i++) {
1888 		egl_image_unref(gs->images[i]);
1889 		gs->images[i] = NULL;
1890 	}
1891 	gs->num_images = 0;
1892 	gs->target = GL_TEXTURE_2D;
1893 	es->is_opaque = false;
1894 	switch (format) {
1895 	case EGL_TEXTURE_RGB:
1896 		es->is_opaque = true;
1897 		/* fallthrough */
1898 	case EGL_TEXTURE_RGBA:
1899 	default:
1900 		num_planes = 1;
1901 		gs->shader = &gr->texture_shader_rgba;
1902 		break;
1903 	case EGL_TEXTURE_EXTERNAL_WL:
1904 		num_planes = 1;
1905 		gs->target = GL_TEXTURE_EXTERNAL_OES;
1906 		gs->shader = &gr->texture_shader_egl_external;
1907 		break;
1908 	case EGL_TEXTURE_Y_UV_WL:
1909 		num_planes = 2;
1910 		gs->shader = &gr->texture_shader_y_uv;
1911 		es->is_opaque = true;
1912 		break;
1913 	case EGL_TEXTURE_Y_U_V_WL:
1914 		num_planes = 3;
1915 		gs->shader = &gr->texture_shader_y_u_v;
1916 		es->is_opaque = true;
1917 		break;
1918 	case EGL_TEXTURE_Y_XUXV_WL:
1919 		num_planes = 2;
1920 		gs->shader = &gr->texture_shader_y_xuxv;
1921 		es->is_opaque = true;
1922 		break;
1923 	}
1924 
1925 	ensure_textures(gs, num_planes);
1926 	for (i = 0; i < num_planes; i++) {
1927 		attribs[0] = EGL_WAYLAND_PLANE_WL;
1928 		attribs[1] = i;
1929 		attribs[2] = EGL_NONE;
1930 		gs->images[i] = egl_image_create(gr,
1931 						 EGL_WAYLAND_BUFFER_WL,
1932 						 buffer->legacy_buffer,
1933 						 attribs);
1934 		if (!gs->images[i]) {
1935 			weston_log("failed to create img for plane %d\n", i);
1936 			continue;
1937 		}
1938 		gs->num_images++;
1939 
1940 		glActiveTexture(GL_TEXTURE0 + i);
1941 		glBindTexture(gs->target, gs->textures[i]);
1942 		gr->image_target_texture_2d(gs->target,
1943 					    gs->images[i]->image);
1944 	}
1945 
1946 	gs->pitch = buffer->width;
1947 	gs->height = buffer->height;
1948 	gs->buffer_type = BUFFER_TYPE_EGL;
1949 	gs->y_inverted = buffer->y_inverted;
1950 }
1951 
1952 static void
gl_renderer_destroy_dmabuf(struct linux_dmabuf_buffer * dmabuf)1953 gl_renderer_destroy_dmabuf(struct linux_dmabuf_buffer *dmabuf)
1954 {
1955 	struct dmabuf_image *image = linux_dmabuf_buffer_get_user_data(dmabuf);
1956 
1957 	dmabuf_image_destroy(image);
1958 }
1959 
1960 static struct egl_image *
import_simple_dmabuf(struct gl_renderer * gr,struct dmabuf_attributes * attributes)1961 import_simple_dmabuf(struct gl_renderer *gr,
1962                      struct dmabuf_attributes *attributes)
1963 {
1964 	struct egl_image *image;
1965 	EGLint attribs[50];
1966 	int atti = 0;
1967 	bool has_modifier;
1968 
1969 	/* This requires the Mesa commit in
1970 	 * Mesa 10.3 (08264e5dad4df448e7718e782ad9077902089a07) or
1971 	 * Mesa 10.2.7 (55d28925e6109a4afd61f109e845a8a51bd17652).
1972 	 * Otherwise Mesa closes the fd behind our back and re-importing
1973 	 * will fail.
1974 	 * https://bugs.freedesktop.org/show_bug.cgi?id=76188
1975 	 */
1976 
1977 	attribs[atti++] = EGL_WIDTH;
1978 	attribs[atti++] = attributes->width;
1979 	attribs[atti++] = EGL_HEIGHT;
1980 	attribs[atti++] = attributes->height;
1981 	attribs[atti++] = EGL_LINUX_DRM_FOURCC_EXT;
1982 	attribs[atti++] = attributes->format;
1983 
1984 	if (attributes->modifier[0] != DRM_FORMAT_MOD_INVALID) {
1985 		if (!gr->has_dmabuf_import_modifiers)
1986 			return NULL;
1987 		has_modifier = true;
1988 	} else {
1989 		has_modifier = false;
1990 	}
1991 
1992 	if (attributes->n_planes > 0) {
1993 		attribs[atti++] = EGL_DMA_BUF_PLANE0_FD_EXT;
1994 		attribs[atti++] = attributes->fd[0];
1995 		attribs[atti++] = EGL_DMA_BUF_PLANE0_OFFSET_EXT;
1996 		attribs[atti++] = attributes->offset[0];
1997 		attribs[atti++] = EGL_DMA_BUF_PLANE0_PITCH_EXT;
1998 		attribs[atti++] = attributes->stride[0];
1999 		if (has_modifier) {
2000 			attribs[atti++] = EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT;
2001 			attribs[atti++] = attributes->modifier[0] & 0xFFFFFFFF;
2002 			attribs[atti++] = EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT;
2003 			attribs[atti++] = attributes->modifier[0] >> 32;
2004 		}
2005 	}
2006 
2007 	if (attributes->n_planes > 1) {
2008 		attribs[atti++] = EGL_DMA_BUF_PLANE1_FD_EXT;
2009 		attribs[atti++] = attributes->fd[1];
2010 		attribs[atti++] = EGL_DMA_BUF_PLANE1_OFFSET_EXT;
2011 		attribs[atti++] = attributes->offset[1];
2012 		attribs[atti++] = EGL_DMA_BUF_PLANE1_PITCH_EXT;
2013 		attribs[atti++] = attributes->stride[1];
2014 		if (has_modifier) {
2015 			attribs[atti++] = EGL_DMA_BUF_PLANE1_MODIFIER_LO_EXT;
2016 			attribs[atti++] = attributes->modifier[1] & 0xFFFFFFFF;
2017 			attribs[atti++] = EGL_DMA_BUF_PLANE1_MODIFIER_HI_EXT;
2018 			attribs[atti++] = attributes->modifier[1] >> 32;
2019 		}
2020 	}
2021 
2022 	if (attributes->n_planes > 2) {
2023 		attribs[atti++] = EGL_DMA_BUF_PLANE2_FD_EXT;
2024 		attribs[atti++] = attributes->fd[2];
2025 		attribs[atti++] = EGL_DMA_BUF_PLANE2_OFFSET_EXT;
2026 		attribs[atti++] = attributes->offset[2];
2027 		attribs[atti++] = EGL_DMA_BUF_PLANE2_PITCH_EXT;
2028 		attribs[atti++] = attributes->stride[2];
2029 		if (has_modifier) {
2030 			attribs[atti++] = EGL_DMA_BUF_PLANE2_MODIFIER_LO_EXT;
2031 			attribs[atti++] = attributes->modifier[2] & 0xFFFFFFFF;
2032 			attribs[atti++] = EGL_DMA_BUF_PLANE2_MODIFIER_HI_EXT;
2033 			attribs[atti++] = attributes->modifier[2] >> 32;
2034 		}
2035 	}
2036 
2037 	if (gr->has_dmabuf_import_modifiers) {
2038 		if (attributes->n_planes > 3) {
2039 			attribs[atti++] = EGL_DMA_BUF_PLANE3_FD_EXT;
2040 			attribs[atti++] = attributes->fd[3];
2041 			attribs[atti++] = EGL_DMA_BUF_PLANE3_OFFSET_EXT;
2042 			attribs[atti++] = attributes->offset[3];
2043 			attribs[atti++] = EGL_DMA_BUF_PLANE3_PITCH_EXT;
2044 			attribs[atti++] = attributes->stride[3];
2045 			attribs[atti++] = EGL_DMA_BUF_PLANE3_MODIFIER_LO_EXT;
2046 			attribs[atti++] = attributes->modifier[3] & 0xFFFFFFFF;
2047 			attribs[atti++] = EGL_DMA_BUF_PLANE3_MODIFIER_HI_EXT;
2048 			attribs[atti++] = attributes->modifier[3] >> 32;
2049 		}
2050 	}
2051 
2052 	attribs[atti++] = EGL_NONE;
2053 
2054 	image = egl_image_create(gr, EGL_LINUX_DMA_BUF_EXT, NULL,
2055 				 attribs);
2056 
2057 	return image;
2058 }
2059 
2060 /* The kernel header drm_fourcc.h defines the DRM formats below.  We duplicate
2061  * some of the definitions here so that building Weston won't require
2062  * bleeding-edge kernel headers.
2063  */
2064 #ifndef DRM_FORMAT_R8
2065 #define DRM_FORMAT_R8            fourcc_code('R', '8', ' ', ' ') /* [7:0] R */
2066 #endif
2067 
2068 #ifndef DRM_FORMAT_GR88
2069 #define DRM_FORMAT_GR88          fourcc_code('G', 'R', '8', '8') /* [15:0] G:R 8:8 little endian */
2070 #endif
2071 
2072 #ifndef DRM_FORMAT_XYUV8888
2073 #define DRM_FORMAT_XYUV8888      fourcc_code('X', 'Y', 'U', 'V') /* [31:0] X:Y:Cb:Cr 8:8:8:8 little endian */
2074 #endif
2075 
2076 struct yuv_format_descriptor yuv_formats[] = {
2077 	{
2078 		.format = DRM_FORMAT_YUYV,
2079 		.input_planes = 1,
2080 		.output_planes = 2,
2081 		.texture_type = TEXTURE_Y_XUXV_WL,
2082 		{{
2083 			.width_divisor = 1,
2084 			.height_divisor = 1,
2085 			.format = DRM_FORMAT_GR88,
2086 			.plane_index = 0
2087 		}, {
2088 			.width_divisor = 2,
2089 			.height_divisor = 1,
2090 			.format = DRM_FORMAT_ARGB8888,
2091 			.plane_index = 0
2092 		}}
2093 	}, {
2094 		.format = DRM_FORMAT_NV12,
2095 		.input_planes = 2,
2096 		.output_planes = 2,
2097 		.texture_type = TEXTURE_Y_UV_WL,
2098 		{{
2099 			.width_divisor = 1,
2100 			.height_divisor = 1,
2101 			.format = DRM_FORMAT_R8,
2102 			.plane_index = 0
2103 		}, {
2104 			.width_divisor = 2,
2105 			.height_divisor = 2,
2106 			.format = DRM_FORMAT_GR88,
2107 			.plane_index = 1
2108 		}}
2109 	}, {
2110 		.format = DRM_FORMAT_YUV420,
2111 		.input_planes = 3,
2112 		.output_planes = 3,
2113 		.texture_type = TEXTURE_Y_U_V_WL,
2114 		{{
2115 			.width_divisor = 1,
2116 			.height_divisor = 1,
2117 			.format = DRM_FORMAT_R8,
2118 			.plane_index = 0
2119 		}, {
2120 			.width_divisor = 2,
2121 			.height_divisor = 2,
2122 			.format = DRM_FORMAT_R8,
2123 			.plane_index = 1
2124 		}, {
2125 			.width_divisor = 2,
2126 			.height_divisor = 2,
2127 			.format = DRM_FORMAT_R8,
2128 			.plane_index = 2
2129 		}}
2130 	}, {
2131 		.format = DRM_FORMAT_YUV444,
2132 		.input_planes = 3,
2133 		.output_planes = 3,
2134 		.texture_type = TEXTURE_Y_U_V_WL,
2135 		{{
2136 			.width_divisor = 1,
2137 			.height_divisor = 1,
2138 			.format = DRM_FORMAT_R8,
2139 			.plane_index = 0
2140 		}, {
2141 			.width_divisor = 1,
2142 			.height_divisor = 1,
2143 			.format = DRM_FORMAT_R8,
2144 			.plane_index = 1
2145 		}, {
2146 			.width_divisor = 1,
2147 			.height_divisor = 1,
2148 			.format = DRM_FORMAT_R8,
2149 			.plane_index = 2
2150 		}}
2151 	}, {
2152 		.format = DRM_FORMAT_XYUV8888,
2153 		.input_planes = 1,
2154 		.output_planes = 1,
2155 		.texture_type = TEXTURE_XYUV_WL,
2156 		{{
2157 			.width_divisor = 1,
2158 			.height_divisor = 1,
2159 			.format = DRM_FORMAT_XBGR8888,
2160 			.plane_index = 0
2161 		}}
2162 	}
2163 };
2164 
2165 static struct egl_image *
import_dmabuf_single_plane(struct gl_renderer * gr,const struct dmabuf_attributes * attributes,struct yuv_plane_descriptor * descriptor)2166 import_dmabuf_single_plane(struct gl_renderer *gr,
2167                            const struct dmabuf_attributes *attributes,
2168                            struct yuv_plane_descriptor *descriptor)
2169 {
2170 	struct dmabuf_attributes plane;
2171 	struct egl_image *image;
2172 	char fmt[4];
2173 
2174 	plane.width = attributes->width / descriptor->width_divisor;
2175 	plane.height = attributes->height / descriptor->height_divisor;
2176 	plane.format = descriptor->format;
2177 	plane.n_planes = 1;
2178 	plane.fd[0] = attributes->fd[descriptor->plane_index];
2179 	plane.offset[0] = attributes->offset[descriptor->plane_index];
2180 	plane.stride[0] = attributes->stride[descriptor->plane_index];
2181 	plane.modifier[0] = attributes->modifier[descriptor->plane_index];
2182 
2183 	image = import_simple_dmabuf(gr, &plane);
2184 	if (!image) {
2185 		weston_log("Failed to import plane %d as %.4s\n",
2186 		           descriptor->plane_index,
2187 		           dump_format(descriptor->format, fmt));
2188 		return NULL;
2189 	}
2190 
2191 	return image;
2192 }
2193 
2194 static bool
import_yuv_dmabuf(struct gl_renderer * gr,struct dmabuf_image * image)2195 import_yuv_dmabuf(struct gl_renderer *gr,
2196                   struct dmabuf_image *image)
2197 {
2198 	unsigned i;
2199 	int j;
2200 	int ret;
2201 	struct yuv_format_descriptor *format = NULL;
2202 	struct dmabuf_attributes *attributes = &image->dmabuf->attributes;
2203 	char fmt[4];
2204 
2205 	for (i = 0; i < ARRAY_LENGTH(yuv_formats); ++i) {
2206 		if (yuv_formats[i].format == attributes->format) {
2207 			format = &yuv_formats[i];
2208 			break;
2209 		}
2210 	}
2211 
2212 	if (!format) {
2213 		weston_log("Error during import, and no known conversion for format "
2214 		           "%.4s in the renderer\n",
2215 		           dump_format(attributes->format, fmt));
2216 		return false;
2217 	}
2218 
2219 	if (attributes->n_planes != format->input_planes) {
2220 		weston_log("%.4s dmabuf must contain %d plane%s (%d provided)\n",
2221 		           dump_format(format->format, fmt),
2222 		           format->input_planes,
2223 		           (format->input_planes > 1) ? "s" : "",
2224 		           attributes->n_planes);
2225 		return false;
2226 	}
2227 
2228 	for (j = 0; j < format->output_planes; ++j) {
2229 		image->images[j] = import_dmabuf_single_plane(gr, attributes,
2230 		                                              &format->plane[j]);
2231 		if (!image->images[j]) {
2232 			while (j) {
2233 				ret = egl_image_unref(image->images[--j]);
2234 				assert(ret == 0);
2235 			}
2236 			return false;
2237 		}
2238 	}
2239 
2240 	image->num_images = format->output_planes;
2241 
2242 	switch (format->texture_type) {
2243 	case TEXTURE_Y_XUXV_WL:
2244 		image->shader = &gr->texture_shader_y_xuxv;
2245 		break;
2246 	case TEXTURE_Y_UV_WL:
2247 		image->shader = &gr->texture_shader_y_uv;
2248 		break;
2249 	case TEXTURE_Y_U_V_WL:
2250 		image->shader = &gr->texture_shader_y_u_v;
2251 		break;
2252 	case TEXTURE_XYUV_WL:
2253 		image->shader = &gr->texture_shader_xyuv;
2254 		break;
2255 	default:
2256 		assert(false);
2257 	}
2258 
2259 	return true;
2260 }
2261 
2262 static void
2263 gl_renderer_query_dmabuf_modifiers_full(struct gl_renderer *gr, int format,
2264 					uint64_t **modifiers,
2265 					unsigned **external_only,
2266 					int *num_modifiers);
2267 
2268 static struct dmabuf_format*
dmabuf_format_create(struct gl_renderer * gr,uint32_t format)2269 dmabuf_format_create(struct gl_renderer *gr, uint32_t format)
2270 {
2271 	struct dmabuf_format *dmabuf_format;
2272 
2273 	dmabuf_format = calloc(1, sizeof(struct dmabuf_format));
2274 	if (!dmabuf_format)
2275 		return NULL;
2276 
2277 	dmabuf_format->format = format;
2278 
2279 	gl_renderer_query_dmabuf_modifiers_full(gr, format,
2280 			&dmabuf_format->modifiers,
2281 			&dmabuf_format->external_only,
2282 			&dmabuf_format->num_modifiers);
2283 
2284 	if (dmabuf_format->num_modifiers == 0) {
2285 		free(dmabuf_format);
2286 		return NULL;
2287 	}
2288 
2289 	wl_list_insert(&gr->dmabuf_formats, &dmabuf_format->link);
2290 	return dmabuf_format;
2291 }
2292 
2293 static void
dmabuf_format_destroy(struct dmabuf_format * format)2294 dmabuf_format_destroy(struct dmabuf_format *format)
2295 {
2296 	free(format->modifiers);
2297 	free(format->external_only);
2298 	wl_list_remove(&format->link);
2299 	free(format);
2300 }
2301 
2302 static GLenum
choose_texture_target(struct gl_renderer * gr,struct dmabuf_attributes * attributes)2303 choose_texture_target(struct gl_renderer *gr,
2304 		      struct dmabuf_attributes *attributes)
2305 {
2306 	struct dmabuf_format *tmp, *format = NULL;
2307 
2308 	wl_list_for_each(tmp, &gr->dmabuf_formats, link) {
2309 		if (tmp->format == attributes->format) {
2310 			format = tmp;
2311 			break;
2312 		}
2313 	}
2314 
2315 	if (!format)
2316 		format = dmabuf_format_create(gr, attributes->format);
2317 
2318 	if (format) {
2319 		int i;
2320 
2321 		for (i = 0; i < format->num_modifiers; ++i) {
2322 			if (format->modifiers[i] == attributes->modifier[0]) {
2323 				if(format->external_only[i])
2324 					return GL_TEXTURE_EXTERNAL_OES;
2325 				else
2326 					return GL_TEXTURE_2D;
2327 			}
2328 		}
2329 	}
2330 
2331 	if (attributes->n_planes > 1)
2332 		return GL_TEXTURE_EXTERNAL_OES;
2333 
2334 	switch (attributes->format & ~DRM_FORMAT_BIG_ENDIAN) {
2335 	case DRM_FORMAT_YUYV:
2336 	case DRM_FORMAT_YVYU:
2337 	case DRM_FORMAT_UYVY:
2338 	case DRM_FORMAT_VYUY:
2339 	case DRM_FORMAT_AYUV:
2340 	case DRM_FORMAT_XYUV8888:
2341 		return GL_TEXTURE_EXTERNAL_OES;
2342 	default:
2343 		return GL_TEXTURE_2D;
2344 	}
2345 }
2346 
2347 static struct dmabuf_image *
import_dmabuf(struct gl_renderer * gr,struct linux_dmabuf_buffer * dmabuf)2348 import_dmabuf(struct gl_renderer *gr,
2349 	      struct linux_dmabuf_buffer *dmabuf)
2350 {
2351 	struct egl_image *egl_image;
2352 	struct dmabuf_image *image;
2353 
2354 	image = dmabuf_image_create();
2355 	image->dmabuf = dmabuf;
2356 
2357 	egl_image = import_simple_dmabuf(gr, &dmabuf->attributes);
2358 	if (egl_image) {
2359 		image->num_images = 1;
2360 		image->images[0] = egl_image;
2361 		image->import_type = IMPORT_TYPE_DIRECT;
2362 		image->target = choose_texture_target(gr, &dmabuf->attributes);
2363 
2364 		switch (image->target) {
2365 		case GL_TEXTURE_2D:
2366 			image->shader = &gr->texture_shader_rgba;
2367 			break;
2368 		default:
2369 			image->shader = &gr->texture_shader_egl_external;
2370 		}
2371 	} else {
2372 		if (!import_yuv_dmabuf(gr, image)) {
2373 			dmabuf_image_destroy(image);
2374 			return NULL;
2375 		}
2376 		image->import_type = IMPORT_TYPE_GL_CONVERSION;
2377 		image->target = GL_TEXTURE_2D;
2378 	}
2379 
2380 	return image;
2381 }
2382 
2383 static void
gl_renderer_query_dmabuf_formats(struct weston_compositor * wc,int ** formats,int * num_formats)2384 gl_renderer_query_dmabuf_formats(struct weston_compositor *wc,
2385 				int **formats, int *num_formats)
2386 {
2387 	struct gl_renderer *gr = get_renderer(wc);
2388 	static const int fallback_formats[] = {
2389 		DRM_FORMAT_ARGB8888,
2390 		DRM_FORMAT_XRGB8888,
2391 		DRM_FORMAT_YUYV,
2392 		DRM_FORMAT_NV12,
2393 		DRM_FORMAT_YUV420,
2394 		DRM_FORMAT_YUV444,
2395 		DRM_FORMAT_XYUV8888,
2396 	};
2397 	bool fallback = false;
2398 	EGLint num;
2399 
2400 	assert(gr->has_dmabuf_import);
2401 
2402 	if (!gr->has_dmabuf_import_modifiers ||
2403 	    !gr->query_dmabuf_formats(gr->egl_display, 0, NULL, &num)) {
2404 		num = gr->has_gl_texture_rg ? ARRAY_LENGTH(fallback_formats) : 2;
2405 		fallback = true;
2406 	}
2407 
2408 	*formats = calloc(num, sizeof(int));
2409 	if (*formats == NULL) {
2410 		*num_formats = 0;
2411 		return;
2412 	}
2413 
2414 	if (fallback) {
2415 		memcpy(*formats, fallback_formats, num * sizeof(int));
2416 		*num_formats = num;
2417 		return;
2418 	}
2419 
2420 	if (!gr->query_dmabuf_formats(gr->egl_display, num, *formats, &num)) {
2421 		*num_formats = 0;
2422 		free(*formats);
2423 		return;
2424 	}
2425 
2426 	*num_formats = num;
2427 }
2428 
2429 static void
gl_renderer_query_dmabuf_modifiers_full(struct gl_renderer * gr,int format,uint64_t ** modifiers,unsigned ** external_only,int * num_modifiers)2430 gl_renderer_query_dmabuf_modifiers_full(struct gl_renderer *gr, int format,
2431 					uint64_t **modifiers,
2432 					unsigned **external_only,
2433 					int *num_modifiers)
2434 {
2435 	int num;
2436 
2437 	assert(gr->has_dmabuf_import);
2438 
2439 	if (!gr->has_dmabuf_import_modifiers ||
2440 		!gr->query_dmabuf_modifiers(gr->egl_display, format, 0, NULL,
2441 					    NULL, &num) ||
2442 		num == 0) {
2443 		*num_modifiers = 0;
2444 		return;
2445 	}
2446 
2447 	*modifiers = calloc(num, sizeof(uint64_t));
2448 	if (*modifiers == NULL) {
2449 		*num_modifiers = 0;
2450 		return;
2451 	}
2452 	if (external_only) {
2453 		*external_only = calloc(num, sizeof(unsigned));
2454 		if (*external_only == NULL) {
2455 			*num_modifiers = 0;
2456 			free(*modifiers);
2457 			return;
2458 		}
2459 	}
2460 	if (!gr->query_dmabuf_modifiers(gr->egl_display, format,
2461 				num, *modifiers, external_only ?
2462 				*external_only : NULL, &num)) {
2463 		*num_modifiers = 0;
2464 		free(*modifiers);
2465 		if (external_only)
2466 			free(*external_only);
2467 		return;
2468 	}
2469 
2470 	*num_modifiers = num;
2471 }
2472 
2473 static void
gl_renderer_query_dmabuf_modifiers(struct weston_compositor * wc,int format,uint64_t ** modifiers,int * num_modifiers)2474 gl_renderer_query_dmabuf_modifiers(struct weston_compositor *wc, int format,
2475 					uint64_t **modifiers,
2476 					int *num_modifiers)
2477 {
2478 	struct gl_renderer *gr = get_renderer(wc);
2479 
2480 	gl_renderer_query_dmabuf_modifiers_full(gr, format, modifiers, NULL,
2481 			num_modifiers);
2482 }
2483 
2484 static bool
gl_renderer_import_dmabuf(struct weston_compositor * ec,struct linux_dmabuf_buffer * dmabuf)2485 gl_renderer_import_dmabuf(struct weston_compositor *ec,
2486 			  struct linux_dmabuf_buffer *dmabuf)
2487 {
2488 	struct gl_renderer *gr = get_renderer(ec);
2489 	struct dmabuf_image *image;
2490 	int i;
2491 
2492 	assert(gr->has_dmabuf_import);
2493 
2494 	for (i = 0; i < dmabuf->attributes.n_planes; i++) {
2495 		/* return if EGL doesn't support import modifiers */
2496 		if (dmabuf->attributes.modifier[i] != DRM_FORMAT_MOD_INVALID)
2497 			if (!gr->has_dmabuf_import_modifiers)
2498 				return false;
2499 
2500 		/* return if modifiers passed are unequal */
2501 		if (dmabuf->attributes.modifier[i] !=
2502 		    dmabuf->attributes.modifier[0])
2503 			return false;
2504 	}
2505 
2506 	/* reject all flags we do not recognize or handle */
2507 	if (dmabuf->attributes.flags & ~ZWP_LINUX_BUFFER_PARAMS_V1_FLAGS_Y_INVERT)
2508 		return false;
2509 
2510 	image = import_dmabuf(gr, dmabuf);
2511 	if (!image)
2512 		return false;
2513 
2514 	wl_list_insert(&gr->dmabuf_images, &image->link);
2515 	linux_dmabuf_buffer_set_user_data(dmabuf, image,
2516 		gl_renderer_destroy_dmabuf);
2517 
2518 	return true;
2519 }
2520 
2521 static bool
import_known_dmabuf(struct gl_renderer * gr,struct dmabuf_image * image)2522 import_known_dmabuf(struct gl_renderer *gr,
2523                     struct dmabuf_image *image)
2524 {
2525 	switch (image->import_type) {
2526 	case IMPORT_TYPE_DIRECT:
2527 		image->images[0] = import_simple_dmabuf(gr, &image->dmabuf->attributes);
2528 		if (!image->images[0])
2529 			return false;
2530 		image->num_images = 1;
2531 		break;
2532 
2533 	case IMPORT_TYPE_GL_CONVERSION:
2534 		if (!import_yuv_dmabuf(gr, image))
2535 			return false;
2536 		break;
2537 
2538 	default:
2539 		weston_log("Invalid import type for dmabuf\n");
2540 		return false;
2541 	}
2542 
2543 	return true;
2544 }
2545 
2546 static bool
dmabuf_is_opaque(struct linux_dmabuf_buffer * dmabuf)2547 dmabuf_is_opaque(struct linux_dmabuf_buffer *dmabuf)
2548 {
2549 	const struct pixel_format_info *info;
2550 
2551 	info = pixel_format_get_info(dmabuf->attributes.format &
2552 				     ~DRM_FORMAT_BIG_ENDIAN);
2553 	if (!info)
2554 		return false;
2555 
2556 	return pixel_format_is_opaque(info);
2557 }
2558 
2559 static void
gl_renderer_attach_dmabuf(struct weston_surface * surface,struct weston_buffer * buffer,struct linux_dmabuf_buffer * dmabuf)2560 gl_renderer_attach_dmabuf(struct weston_surface *surface,
2561 			  struct weston_buffer *buffer,
2562 			  struct linux_dmabuf_buffer *dmabuf)
2563 {
2564 	struct gl_renderer *gr = get_renderer(surface->compositor);
2565 	struct gl_surface_state *gs = get_surface_state(surface);
2566 	struct dmabuf_image *image;
2567 	int i;
2568 
2569 	if (!gr->has_dmabuf_import) {
2570 		linux_dmabuf_buffer_send_server_error(dmabuf,
2571 				"EGL dmabuf import not supported");
2572 		return;
2573 	}
2574 
2575 	buffer->width = dmabuf->attributes.width;
2576 	buffer->height = dmabuf->attributes.height;
2577 
2578 	/*
2579 	 * GL-renderer uses the OpenGL convention of texture coordinates, where
2580 	 * the origin is at bottom-left. Because dmabuf buffers have the origin
2581 	 * at top-left, we must invert the Y_INVERT flag to get the image right.
2582 	 */
2583 	buffer->y_inverted =
2584 		!(dmabuf->attributes.flags & ZWP_LINUX_BUFFER_PARAMS_V1_FLAGS_Y_INVERT);
2585 
2586 	for (i = 0; i < gs->num_images; i++)
2587 		egl_image_unref(gs->images[i]);
2588 	gs->num_images = 0;
2589 
2590 	gs->pitch = buffer->width;
2591 	gs->height = buffer->height;
2592 	gs->buffer_type = BUFFER_TYPE_EGL;
2593 	gs->y_inverted = buffer->y_inverted;
2594 	gs->direct_display = dmabuf->direct_display;
2595 	surface->is_opaque = dmabuf_is_opaque(dmabuf);
2596 
2597 	/*
2598 	 * We try to always hold an imported EGLImage from the dmabuf
2599 	 * to prevent the client from preventing re-imports. But, we also
2600 	 * need to re-import every time the contents may change because
2601 	 * GL driver's caching may need flushing.
2602 	 *
2603 	 * Here we release the cache reference which has to be final.
2604 	 */
2605 	if (dmabuf->direct_display)
2606 		return;
2607 
2608 	image = linux_dmabuf_buffer_get_user_data(dmabuf);
2609 
2610 	/* The dmabuf_image should have been created during the import */
2611 	assert(image != NULL);
2612 
2613 	for (i = 0; i < image->num_images; ++i)
2614 		egl_image_unref(image->images[i]);
2615 
2616 	if (!import_known_dmabuf(gr, image)) {
2617 		linux_dmabuf_buffer_send_server_error(dmabuf, "EGL dmabuf import failed");
2618 		return;
2619 	}
2620 
2621 	gs->num_images = image->num_images;
2622 	for (i = 0; i < gs->num_images; ++i)
2623 		gs->images[i] = egl_image_ref(image->images[i]);
2624 
2625 	gs->target = image->target;
2626 	ensure_textures(gs, gs->num_images);
2627 	for (i = 0; i < gs->num_images; ++i) {
2628 		glActiveTexture(GL_TEXTURE0 + i);
2629 		glBindTexture(gs->target, gs->textures[i]);
2630 		gr->image_target_texture_2d(gs->target, gs->images[i]->image);
2631 	}
2632 
2633 	gs->shader = image->shader;
2634 }
2635 
2636 static void
gl_renderer_attach(struct weston_surface * es,struct weston_buffer * buffer)2637 gl_renderer_attach(struct weston_surface *es, struct weston_buffer *buffer)
2638 {
2639 	struct weston_compositor *ec = es->compositor;
2640 	struct gl_renderer *gr = get_renderer(ec);
2641 	struct gl_surface_state *gs = get_surface_state(es);
2642 	struct wl_shm_buffer *shm_buffer;
2643 	struct linux_dmabuf_buffer *dmabuf;
2644 	EGLint format;
2645 	int i;
2646 
2647 	weston_buffer_reference(&gs->buffer_ref, buffer);
2648 	weston_buffer_release_reference(&gs->buffer_release_ref,
2649 					es->buffer_release_ref.buffer_release);
2650 
2651 	if (!buffer) {
2652 		for (i = 0; i < gs->num_images; i++) {
2653 			egl_image_unref(gs->images[i]);
2654 			gs->images[i] = NULL;
2655 		}
2656 		gs->num_images = 0;
2657 		glDeleteTextures(gs->num_textures, gs->textures);
2658 		gs->num_textures = 0;
2659 		gs->buffer_type = BUFFER_TYPE_NULL;
2660 		gs->y_inverted = true;
2661 		gs->direct_display = false;
2662 		es->is_opaque = false;
2663 		return;
2664 	}
2665 
2666 	shm_buffer = wl_shm_buffer_get(buffer->resource);
2667 
2668 	if (shm_buffer)
2669 		gl_renderer_attach_shm(es, buffer, shm_buffer);
2670 	else if (gr->has_bind_display &&
2671 		 gr->query_buffer(gr->egl_display, (void *)buffer->resource,
2672 				  EGL_TEXTURE_FORMAT, &format))
2673 		gl_renderer_attach_egl(es, buffer, format);
2674 	else if ((dmabuf = linux_dmabuf_buffer_get(buffer->resource)))
2675 		gl_renderer_attach_dmabuf(es, buffer, dmabuf);
2676 	else {
2677 		weston_log("unhandled buffer type!\n");
2678 		if (gr->has_bind_display) {
2679 		  weston_log("eglQueryWaylandBufferWL failed\n");
2680 		  gl_renderer_print_egl_error_state();
2681 		}
2682 		weston_buffer_reference(&gs->buffer_ref, NULL);
2683 		weston_buffer_release_reference(&gs->buffer_release_ref, NULL);
2684 		gs->buffer_type = BUFFER_TYPE_NULL;
2685 		gs->y_inverted = true;
2686 		es->is_opaque = false;
2687                 weston_buffer_send_server_error(buffer,
2688 			"disconnecting due to unhandled buffer type");
2689 	}
2690 }
2691 
2692 static void
gl_renderer_surface_set_color(struct weston_surface * surface,float red,float green,float blue,float alpha)2693 gl_renderer_surface_set_color(struct weston_surface *surface,
2694 		 float red, float green, float blue, float alpha)
2695 {
2696 	struct gl_surface_state *gs = get_surface_state(surface);
2697 	struct gl_renderer *gr = get_renderer(surface->compositor);
2698 
2699 	gs->color[0] = red;
2700 	gs->color[1] = green;
2701 	gs->color[2] = blue;
2702 	gs->color[3] = alpha;
2703 	gs->buffer_type = BUFFER_TYPE_SOLID;
2704 	gs->pitch = 1;
2705 	gs->height = 1;
2706 
2707 	gs->shader = &gr->solid_shader;
2708 }
2709 
2710 static void
gl_renderer_surface_get_content_size(struct weston_surface * surface,int * width,int * height)2711 gl_renderer_surface_get_content_size(struct weston_surface *surface,
2712 				     int *width, int *height)
2713 {
2714 	struct gl_surface_state *gs = get_surface_state(surface);
2715 
2716 	if (gs->buffer_type == BUFFER_TYPE_NULL) {
2717 		*width = 0;
2718 		*height = 0;
2719 	} else {
2720 		*width = gs->pitch;
2721 		*height = gs->height;
2722 	}
2723 }
2724 
2725 static uint32_t
pack_color(pixman_format_code_t format,float * c)2726 pack_color(pixman_format_code_t format, float *c)
2727 {
2728 	uint8_t r = round(c[0] * 255.0f);
2729 	uint8_t g = round(c[1] * 255.0f);
2730 	uint8_t b = round(c[2] * 255.0f);
2731 	uint8_t a = round(c[3] * 255.0f);
2732 
2733 	switch (format) {
2734 	case PIXMAN_a8b8g8r8:
2735 		return (a << 24) | (b << 16) | (g << 8) | r;
2736 	default:
2737 		assert(0);
2738 		return 0;
2739 	}
2740 }
2741 
2742 static int
gl_renderer_surface_copy_content(struct weston_surface * surface,void * target,size_t size,int src_x,int src_y,int width,int height)2743 gl_renderer_surface_copy_content(struct weston_surface *surface,
2744 				 void *target, size_t size,
2745 				 int src_x, int src_y,
2746 				 int width, int height)
2747 {
2748 	static const GLfloat verts[4 * 2] = {
2749 		0.0f, 0.0f,
2750 		1.0f, 0.0f,
2751 		1.0f, 1.0f,
2752 		0.0f, 1.0f
2753 	};
2754 	static const GLfloat projmat_normal[16] = { /* transpose */
2755 		 2.0f,  0.0f, 0.0f, 0.0f,
2756 		 0.0f,  2.0f, 0.0f, 0.0f,
2757 		 0.0f,  0.0f, 1.0f, 0.0f,
2758 		-1.0f, -1.0f, 0.0f, 1.0f
2759 	};
2760 	static const GLfloat projmat_yinvert[16] = { /* transpose */
2761 		 2.0f,  0.0f, 0.0f, 0.0f,
2762 		 0.0f, -2.0f, 0.0f, 0.0f,
2763 		 0.0f,  0.0f, 1.0f, 0.0f,
2764 		-1.0f,  1.0f, 0.0f, 1.0f
2765 	};
2766 	const pixman_format_code_t format = PIXMAN_a8b8g8r8;
2767 	const size_t bytespp = 4; /* PIXMAN_a8b8g8r8 */
2768 	const GLenum gl_format = GL_RGBA; /* PIXMAN_a8b8g8r8 little-endian */
2769 	struct gl_renderer *gr = get_renderer(surface->compositor);
2770 	struct gl_surface_state *gs = get_surface_state(surface);
2771 	int cw, ch;
2772 	GLuint fbo;
2773 	GLuint tex;
2774 	GLenum status;
2775 	const GLfloat *proj;
2776 	int i;
2777 
2778 	gl_renderer_surface_get_content_size(surface, &cw, &ch);
2779 
2780 	switch (gs->buffer_type) {
2781 	case BUFFER_TYPE_NULL:
2782 		return -1;
2783 	case BUFFER_TYPE_SOLID:
2784 		*(uint32_t *)target = pack_color(format, gs->color);
2785 		return 0;
2786 	case BUFFER_TYPE_SHM:
2787 		gl_renderer_flush_damage(surface);
2788 		/* fall through */
2789 	case BUFFER_TYPE_EGL:
2790 		break;
2791 	}
2792 
2793 	glGenTextures(1, &tex);
2794 	glBindTexture(GL_TEXTURE_2D, tex);
2795 	glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, cw, ch,
2796 		     0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
2797 	glBindTexture(GL_TEXTURE_2D, 0);
2798 
2799 	glGenFramebuffers(1, &fbo);
2800 	glBindFramebuffer(GL_FRAMEBUFFER, fbo);
2801 	glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
2802 			       GL_TEXTURE_2D, tex, 0);
2803 
2804 	status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
2805 	if (status != GL_FRAMEBUFFER_COMPLETE) {
2806 		weston_log("%s: fbo error: %#x\n", __func__, status);
2807 		glDeleteFramebuffers(1, &fbo);
2808 		glDeleteTextures(1, &tex);
2809 		return -1;
2810 	}
2811 
2812 	glViewport(0, 0, cw, ch);
2813 	glDisable(GL_BLEND);
2814 	use_shader(gr, gs->shader);
2815 	if (gs->y_inverted)
2816 		proj = projmat_normal;
2817 	else
2818 		proj = projmat_yinvert;
2819 
2820 	glUniformMatrix4fv(gs->shader->proj_uniform, 1, GL_FALSE, proj);
2821 	glUniform1f(gs->shader->alpha_uniform, 1.0f);
2822 
2823 	for (i = 0; i < gs->num_textures; i++) {
2824 		glUniform1i(gs->shader->tex_uniforms[i], i);
2825 
2826 		glActiveTexture(GL_TEXTURE0 + i);
2827 		glBindTexture(gs->target, gs->textures[i]);
2828 		glTexParameteri(gs->target, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
2829 		glTexParameteri(gs->target, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
2830 	}
2831 
2832 	/* position: */
2833 	glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, verts);
2834 	glEnableVertexAttribArray(0);
2835 
2836 	/* texcoord: */
2837 	glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, verts);
2838 	glEnableVertexAttribArray(1);
2839 
2840 	glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
2841 
2842 	glDisableVertexAttribArray(1);
2843 	glDisableVertexAttribArray(0);
2844 
2845 	glPixelStorei(GL_PACK_ALIGNMENT, bytespp);
2846 	glReadPixels(src_x, src_y, width, height, gl_format,
2847 		     GL_UNSIGNED_BYTE, target);
2848 
2849 	glDeleteFramebuffers(1, &fbo);
2850 	glDeleteTextures(1, &tex);
2851 
2852 	return 0;
2853 }
2854 
2855 static void
surface_state_destroy(struct gl_surface_state * gs,struct gl_renderer * gr)2856 surface_state_destroy(struct gl_surface_state *gs, struct gl_renderer *gr)
2857 {
2858 	int i;
2859 
2860 	wl_list_remove(&gs->surface_destroy_listener.link);
2861 	wl_list_remove(&gs->renderer_destroy_listener.link);
2862 
2863 	gs->surface->gpu_renderer_state = NULL;
2864 
2865 	glDeleteTextures(gs->num_textures, gs->textures);
2866 
2867 	for (i = 0; i < gs->num_images; i++)
2868 		egl_image_unref(gs->images[i]);
2869 
2870 	weston_buffer_reference(&gs->buffer_ref, NULL);
2871 	weston_buffer_release_reference(&gs->buffer_release_ref, NULL);
2872 	pixman_region32_fini(&gs->texture_damage);
2873 	free(gs);
2874 }
2875 
2876 static void
surface_state_handle_surface_destroy(struct wl_listener * listener,void * data)2877 surface_state_handle_surface_destroy(struct wl_listener *listener, void *data)
2878 {
2879 	struct gl_surface_state *gs;
2880 	struct gl_renderer *gr;
2881 
2882 	gs = container_of(listener, struct gl_surface_state,
2883 			  surface_destroy_listener);
2884 
2885 	gr = get_renderer(gs->surface->compositor);
2886 
2887 	surface_state_destroy(gs, gr);
2888 }
2889 
2890 static void
surface_state_handle_renderer_destroy(struct wl_listener * listener,void * data)2891 surface_state_handle_renderer_destroy(struct wl_listener *listener, void *data)
2892 {
2893 	struct gl_surface_state *gs;
2894 	struct gl_renderer *gr;
2895 
2896 	gr = data;
2897 
2898 	gs = container_of(listener, struct gl_surface_state,
2899 			  renderer_destroy_listener);
2900 
2901 	surface_state_destroy(gs, gr);
2902 }
2903 
2904 static int
gl_renderer_create_surface(struct weston_surface * surface)2905 gl_renderer_create_surface(struct weston_surface *surface)
2906 {
2907 	struct gl_surface_state *gs;
2908 	struct gl_renderer *gr = get_renderer(surface->compositor);
2909 
2910 	gs = zalloc(sizeof *gs);
2911 	if (gs == NULL)
2912 		return -1;
2913 
2914 	/* A buffer is never attached to solid color surfaces, yet
2915 	 * they still go through texcoord computations. Do not divide
2916 	 * by zero there.
2917 	 */
2918 	gs->pitch = 1;
2919 	gs->y_inverted = true;
2920 	gs->direct_display = false;
2921 
2922 	gs->surface = surface;
2923 
2924 	pixman_region32_init(&gs->texture_damage);
2925 	surface->gpu_renderer_state = gs;
2926 
2927 	gs->surface_destroy_listener.notify =
2928 		surface_state_handle_surface_destroy;
2929 	wl_signal_add(&surface->destroy_signal,
2930 		      &gs->surface_destroy_listener);
2931 
2932 	gs->renderer_destroy_listener.notify =
2933 		surface_state_handle_renderer_destroy;
2934 	wl_signal_add(&gr->destroy_signal,
2935 		      &gs->renderer_destroy_listener);
2936 
2937 	if (surface->buffer_ref.buffer) {
2938 		gl_renderer_attach(surface, surface->buffer_ref.buffer);
2939 		gl_renderer_flush_damage(surface);
2940 	}
2941 
2942 	return 0;
2943 }
2944 
2945 static const char vertex_shader[] =
2946 	"uniform mat4 proj;\n"
2947 	"attribute vec2 position;\n"
2948 	"attribute vec2 texcoord;\n"
2949 	"varying vec2 v_texcoord;\n"
2950 	"void main()\n"
2951 	"{\n"
2952 	"   gl_Position = proj * vec4(position, 0.0, 1.0);\n"
2953 	"   v_texcoord = texcoord;\n"
2954 	"}\n";
2955 
2956 /* Declare common fragment shader uniforms */
2957 #define FRAGMENT_CONVERT_YUV						\
2958 	"  y *= alpha;\n"						\
2959 	"  u *= alpha;\n"						\
2960 	"  v *= alpha;\n"						\
2961 	"  gl_FragColor.r = y + 1.59602678 * v;\n"			\
2962 	"  gl_FragColor.g = y - 0.39176229 * u - 0.81296764 * v;\n"	\
2963 	"  gl_FragColor.b = y + 2.01723214 * u;\n"			\
2964 	"  gl_FragColor.a = alpha;\n"
2965 
2966 static const char fragment_debug[] =
2967 	"  gl_FragColor = vec4(0.0, 0.3, 0.0, 0.2) + gl_FragColor * 0.8;\n";
2968 
2969 static const char fragment_brace[] =
2970 	"}\n";
2971 
2972 static const char texture_fragment_shader_rgba[] =
2973 	"precision mediump float;\n"
2974 	"varying vec2 v_texcoord;\n"
2975 	"uniform sampler2D tex;\n"
2976 	"uniform float alpha;\n"
2977 	"void main()\n"
2978 	"{\n"
2979 	"   gl_FragColor.argb = alpha * texture2D(tex, v_texcoord).rgba\n;"
2980 	;
2981 
2982 static const char texture_fragment_shader_rgbx[] =
2983 	"precision mediump float;\n"
2984 	"varying vec2 v_texcoord;\n"
2985 	"uniform sampler2D tex;\n"
2986 	"uniform float alpha;\n"
2987 	"void main()\n"
2988 	"{\n"
2989 	"   gl_FragColor.rgb = alpha * texture2D(tex, v_texcoord).rgb\n;"
2990 	"   gl_FragColor.a = alpha;\n"
2991 	;
2992 
2993 static const char texture_fragment_shader_egl_external[] =
2994 	"#extension GL_OES_EGL_image_external : require\n"
2995 	"precision mediump float;\n"
2996 	"varying vec2 v_texcoord;\n"
2997 	"uniform samplerExternalOES tex;\n"
2998 	"uniform float alpha;\n"
2999 	"void main()\n"
3000 	"{\n"
3001 	"   gl_FragColor = alpha * texture2D(tex, v_texcoord)\n;"
3002 	;
3003 
3004 static const char texture_fragment_shader_y_uv[] =
3005 	"precision mediump float;\n"
3006 	"uniform sampler2D tex;\n"
3007 	"uniform sampler2D tex1;\n"
3008 	"varying vec2 v_texcoord;\n"
3009 	"uniform float alpha;\n"
3010 	"void main() {\n"
3011 	"  float y = 1.16438356 * (texture2D(tex, v_texcoord).x - 0.0625);\n"
3012 	"  float u = texture2D(tex1, v_texcoord).r - 0.5;\n"
3013 	"  float v = texture2D(tex1, v_texcoord).g - 0.5;\n"
3014 	FRAGMENT_CONVERT_YUV
3015 	;
3016 
3017 static const char texture_fragment_shader_y_u_v[] =
3018 	"precision mediump float;\n"
3019 	"uniform sampler2D tex;\n"
3020 	"uniform sampler2D tex1;\n"
3021 	"uniform sampler2D tex2;\n"
3022 	"varying vec2 v_texcoord;\n"
3023 	"uniform float alpha;\n"
3024 	"void main() {\n"
3025 	"  float y = 1.16438356 * (texture2D(tex, v_texcoord).x - 0.0625);\n"
3026 	"  float u = texture2D(tex1, v_texcoord).x - 0.5;\n"
3027 	"  float v = texture2D(tex2, v_texcoord).x - 0.5;\n"
3028 	FRAGMENT_CONVERT_YUV
3029 	;
3030 
3031 static const char texture_fragment_shader_y_xuxv[] =
3032 	"precision mediump float;\n"
3033 	"uniform sampler2D tex;\n"
3034 	"uniform sampler2D tex1;\n"
3035 	"varying vec2 v_texcoord;\n"
3036 	"uniform float alpha;\n"
3037 	"void main() {\n"
3038 	"  float y = 1.16438356 * (texture2D(tex, v_texcoord).x - 0.0625);\n"
3039 	"  float u = texture2D(tex1, v_texcoord).g - 0.5;\n"
3040 	"  float v = texture2D(tex1, v_texcoord).a - 0.5;\n"
3041 	FRAGMENT_CONVERT_YUV
3042 	;
3043 
3044 static const char texture_fragment_shader_xyuv[] =
3045 	"precision mediump float;\n"
3046 	"uniform sampler2D tex;\n"
3047 	"varying vec2 v_texcoord;\n"
3048 	"uniform float alpha;\n"
3049 	"void main() {\n"
3050 	"  float y = 1.16438356 * (texture2D(tex, v_texcoord).b - 0.0625);\n"
3051 	"  float u = texture2D(tex, v_texcoord).g - 0.5;\n"
3052 	"  float v = texture2D(tex, v_texcoord).r - 0.5;\n"
3053 	FRAGMENT_CONVERT_YUV
3054 	;
3055 
3056 static const char solid_fragment_shader[] =
3057 	"precision mediump float;\n"
3058 	"uniform vec4 color;\n"
3059 	"uniform float alpha;\n"
3060 	"void main()\n"
3061 	"{\n"
3062 	"   gl_FragColor = alpha * color\n;"
3063 	;
3064 
3065 static int
compile_shader(GLenum type,int count,const char ** sources)3066 compile_shader(GLenum type, int count, const char **sources)
3067 {
3068 	GLuint s;
3069 	char msg[512];
3070 	GLint status;
3071 
3072 	s = glCreateShader(type);
3073 	glShaderSource(s, count, sources, NULL);
3074 	glCompileShader(s);
3075 	glGetShaderiv(s, GL_COMPILE_STATUS, &status);
3076 	if (!status) {
3077 		glGetShaderInfoLog(s, sizeof msg, NULL, msg);
3078 		weston_log("shader info: %s\n", msg);
3079 		return GL_NONE;
3080 	}
3081 
3082 	return s;
3083 }
3084 
3085 static int
shader_init(struct gl_shader * shader,struct gl_renderer * renderer,const char * vertex_source,const char * fragment_source)3086 shader_init(struct gl_shader *shader, struct gl_renderer *renderer,
3087 		   const char *vertex_source, const char *fragment_source)
3088 {
3089 	char msg[512];
3090 	GLint status;
3091 	int count;
3092 	const char *sources[3];
3093 
3094 	shader->vertex_shader =
3095 		compile_shader(GL_VERTEX_SHADER, 1, &vertex_source);
3096 	if (shader->vertex_shader == GL_NONE)
3097 		return -1;
3098 
3099 	if (renderer->fragment_shader_debug) {
3100 		sources[0] = fragment_source;
3101 		sources[1] = fragment_debug;
3102 		sources[2] = fragment_brace;
3103 		count = 3;
3104 	} else {
3105 		sources[0] = fragment_source;
3106 		sources[1] = fragment_brace;
3107 		count = 2;
3108 	}
3109 
3110 	shader->fragment_shader =
3111 		compile_shader(GL_FRAGMENT_SHADER, count, sources);
3112 	if (shader->fragment_shader == GL_NONE)
3113 		return -1;
3114 
3115 	shader->program = glCreateProgram();
3116 	glAttachShader(shader->program, shader->vertex_shader);
3117 	glAttachShader(shader->program, shader->fragment_shader);
3118 	glBindAttribLocation(shader->program, 0, "position");
3119 	glBindAttribLocation(shader->program, 1, "texcoord");
3120 
3121 	glLinkProgram(shader->program);
3122 	glGetProgramiv(shader->program, GL_LINK_STATUS, &status);
3123 	if (!status) {
3124 		glGetProgramInfoLog(shader->program, sizeof msg, NULL, msg);
3125 		weston_log("link info: %s\n", msg);
3126 		return -1;
3127 	}
3128 
3129 	shader->proj_uniform = glGetUniformLocation(shader->program, "proj");
3130 	shader->tex_uniforms[0] = glGetUniformLocation(shader->program, "tex");
3131 	shader->tex_uniforms[1] = glGetUniformLocation(shader->program, "tex1");
3132 	shader->tex_uniforms[2] = glGetUniformLocation(shader->program, "tex2");
3133 	shader->alpha_uniform = glGetUniformLocation(shader->program, "alpha");
3134 	shader->color_uniform = glGetUniformLocation(shader->program, "color");
3135 
3136 	return 0;
3137 }
3138 
3139 static void
shader_release(struct gl_shader * shader)3140 shader_release(struct gl_shader *shader)
3141 {
3142 	glDeleteShader(shader->vertex_shader);
3143 	glDeleteShader(shader->fragment_shader);
3144 	glDeleteProgram(shader->program);
3145 
3146 	shader->vertex_shader = 0;
3147 	shader->fragment_shader = 0;
3148 	shader->program = 0;
3149 }
3150 
3151 void
gl_renderer_log_extensions(const char * name,const char * extensions)3152 gl_renderer_log_extensions(const char *name, const char *extensions)
3153 {
3154 // OHOS
3155 //	const char *p, *end;
3156 //	int l;
3157 //	int len;
3158 //
3159 //	l = weston_log("%s:", name);
3160 //	p = extensions;
3161 //	while (*p) {
3162 //		end = strchrnul(p, ' ');
3163 //		len = end - p;
3164 //		if (l + len > 78)
3165 //			l = weston_log_continue("\n" STAMP_SPACE "%.*s",
3166 //						len, p);
3167 //		else
3168 //			l += weston_log_continue(" %.*s", len, p);
3169 //		for (p = end; isspace(*p); p++)
3170 //			;
3171 //	}
3172 //	weston_log_continue("\n");
3173 	weston_log("%s:", name);
3174 	weston_log("%s:", extensions);
3175 }
3176 
3177 static void
log_egl_info(EGLDisplay egldpy)3178 log_egl_info(EGLDisplay egldpy)
3179 {
3180 	const char *str;
3181 
3182 	str = eglQueryString(egldpy, EGL_VERSION);
3183 	weston_log("EGL version: %s\n", str ? str : "(null)");
3184 
3185 	str = eglQueryString(egldpy, EGL_VENDOR);
3186 	weston_log("EGL vendor: %s\n", str ? str : "(null)");
3187 
3188 	str = eglQueryString(egldpy, EGL_CLIENT_APIS);
3189 	weston_log("EGL client APIs: %s\n", str ? str : "(null)");
3190 
3191 	str = eglQueryString(egldpy, EGL_EXTENSIONS);
3192 	gl_renderer_log_extensions("EGL extensions", str ? str : "(null)");
3193 }
3194 
3195 static void
log_gl_info(void)3196 log_gl_info(void)
3197 {
3198 	const char *str;
3199 
3200 	str = (char *)glGetString(GL_VERSION);
3201 	weston_log("GL version: %s\n", str ? str : "(null)");
3202 
3203 	str = (char *)glGetString(GL_SHADING_LANGUAGE_VERSION);
3204 	weston_log("GLSL version: %s\n", str ? str : "(null)");
3205 
3206 	str = (char *)glGetString(GL_VENDOR);
3207 	weston_log("GL vendor: %s\n", str ? str : "(null)");
3208 
3209 	str = (char *)glGetString(GL_RENDERER);
3210 	weston_log("GL renderer: %s\n", str ? str : "(null)");
3211 
3212 	str = (char *)glGetString(GL_EXTENSIONS);
3213 	gl_renderer_log_extensions("GL extensions", str ? str : "(null)");
3214 }
3215 
3216 static void
gl_renderer_output_set_border(struct weston_output * output,enum gl_renderer_border_side side,int32_t width,int32_t height,int32_t tex_width,unsigned char * data)3217 gl_renderer_output_set_border(struct weston_output *output,
3218 			      enum gl_renderer_border_side side,
3219 			      int32_t width, int32_t height,
3220 			      int32_t tex_width, unsigned char *data)
3221 {
3222 	struct gl_output_state *go = get_output_state(output);
3223 
3224 	if (go->borders[side].width != width ||
3225 	    go->borders[side].height != height)
3226 		/* In this case, we have to blow everything and do a full
3227 		 * repaint. */
3228 		go->border_status |= BORDER_SIZE_CHANGED | BORDER_ALL_DIRTY;
3229 
3230 	if (data == NULL) {
3231 		width = 0;
3232 		height = 0;
3233 	}
3234 
3235 	go->borders[side].width = width;
3236 	go->borders[side].height = height;
3237 	go->borders[side].tex_width = tex_width;
3238 	go->borders[side].data = data;
3239 	go->border_status |= 1 << side;
3240 }
3241 
3242 static int
3243 gl_renderer_setup(struct weston_compositor *ec, EGLSurface egl_surface);
3244 
3245 static EGLSurface
gl_renderer_create_window_surface(struct gl_renderer * gr,EGLNativeWindowType window_for_legacy,void * window_for_platform,const uint32_t * drm_formats,unsigned drm_formats_count)3246 gl_renderer_create_window_surface(struct gl_renderer *gr,
3247 				  EGLNativeWindowType window_for_legacy,
3248 				  void *window_for_platform,
3249 				  const uint32_t *drm_formats,
3250 				  unsigned drm_formats_count)
3251 {
3252 	EGLSurface egl_surface = EGL_NO_SURFACE;
3253 	EGLConfig egl_config;
3254 
3255 	egl_config = gl_renderer_get_egl_config(gr, EGL_WINDOW_BIT,
3256 						drm_formats, drm_formats_count);
3257 	if (egl_config == EGL_NO_CONFIG_KHR)
3258 		return EGL_NO_SURFACE;
3259 
3260 	log_egl_config_info(gr->egl_display, egl_config);
3261 
3262 	if (gr->create_platform_window)
3263 		egl_surface = gr->create_platform_window(gr->egl_display,
3264 							 egl_config,
3265 							 window_for_platform,
3266 							 NULL);
3267 	else
3268 		egl_surface = eglCreateWindowSurface(gr->egl_display,
3269 						     egl_config,
3270 						     window_for_legacy, NULL);
3271 
3272 	return egl_surface;
3273 }
3274 
3275 static int
gl_renderer_output_create(struct weston_output * output,EGLSurface surface)3276 gl_renderer_output_create(struct weston_output *output,
3277 			  EGLSurface surface)
3278 {
3279 	struct gl_output_state *go;
3280 	int i;
3281 
3282 	go = zalloc(sizeof *go);
3283 	if (go == NULL)
3284 		return -1;
3285 
3286 	go->egl_surface = surface;
3287 
3288 	for (i = 0; i < BUFFER_DAMAGE_COUNT; i++)
3289 		pixman_region32_init(&go->buffer_damage[i]);
3290 
3291 	wl_list_init(&go->timeline_render_point_list);
3292 
3293 	go->begin_render_sync = EGL_NO_SYNC_KHR;
3294 	go->end_render_sync = EGL_NO_SYNC_KHR;
3295 
3296 	output->gpu_renderer_state = go;
3297 
3298 	return 0;
3299 }
3300 
3301 static int
gl_renderer_output_window_create(struct weston_output * output,const struct gl_renderer_output_options * options)3302 gl_renderer_output_window_create(struct weston_output *output,
3303 				 const struct gl_renderer_output_options *options)
3304 {
3305 	struct weston_compositor *ec = output->compositor;
3306 	struct gl_renderer *gr = get_renderer(ec);
3307 	EGLSurface egl_surface = EGL_NO_SURFACE;
3308 	int ret = 0;
3309 
3310 	egl_surface = gl_renderer_create_window_surface(gr,
3311 							options->window_for_legacy,
3312 							options->window_for_platform,
3313 							options->drm_formats,
3314 							options->drm_formats_count);
3315 	if (egl_surface == EGL_NO_SURFACE) {
3316 		weston_log("failed to create egl surface\n");
3317 		return -1;
3318 	}
3319 
3320 	ret = gl_renderer_output_create(output, egl_surface);
3321 	if (ret < 0)
3322 		weston_platform_destroy_egl_surface(gr->egl_display, egl_surface);
3323 
3324 	return ret;
3325 }
3326 
3327 static int
gl_renderer_output_pbuffer_create(struct weston_output * output,const struct gl_renderer_pbuffer_options * options)3328 gl_renderer_output_pbuffer_create(struct weston_output *output,
3329 				  const struct gl_renderer_pbuffer_options *options)
3330 {
3331 	struct gl_renderer *gr = get_renderer(output->compositor);
3332 	EGLConfig pbuffer_config;
3333 	EGLSurface egl_surface;
3334 	int ret;
3335 	EGLint pbuffer_attribs[] = {
3336 		EGL_WIDTH, options->width,
3337 		EGL_HEIGHT, options->height,
3338 		EGL_NONE
3339 	};
3340 
3341 	pbuffer_config = gl_renderer_get_egl_config(gr, EGL_PBUFFER_BIT,
3342 						    options->drm_formats,
3343 						    options->drm_formats_count);
3344 	if (pbuffer_config == EGL_NO_CONFIG_KHR) {
3345 		weston_log("failed to choose EGL config for PbufferSurface\n");
3346 		return -1;
3347 	}
3348 
3349 	log_egl_config_info(gr->egl_display, pbuffer_config);
3350 
3351 	egl_surface = eglCreatePbufferSurface(gr->egl_display, pbuffer_config,
3352 					      pbuffer_attribs);
3353 	if (egl_surface == EGL_NO_SURFACE) {
3354 		weston_log("failed to create egl surface\n");
3355 		gl_renderer_print_egl_error_state();
3356 		return -1;
3357 	}
3358 
3359 	ret = gl_renderer_output_create(output, egl_surface);
3360 	if (ret < 0)
3361 		eglDestroySurface(gr->egl_display, egl_surface);
3362 
3363 	return ret;
3364 }
3365 
3366 // OHOS hdi-backend
3367 static void gl_renderer_output_destroy(struct weston_output *output);
3368 static int
gl_renderer_output_fbo_create(struct weston_output * output,const struct gl_renderer_fbo_options * options)3369 gl_renderer_output_fbo_create(struct weston_output *output,
3370 				 const struct gl_renderer_fbo_options *options)
3371 {
3372 	struct weston_compositor *ec = output->compositor;
3373 	struct gl_renderer *gr = get_renderer(ec);
3374 
3375 	if (eglMakeCurrent(gr->egl_display, EGL_NO_SURFACE,
3376 			   EGL_NO_SURFACE, gr->egl_context) == EGL_FALSE) {
3377 		weston_log("Failed to make EGL context current.\n");
3378 		return -1;
3379 	}
3380 
3381     int ret = gl_renderer_output_create(output, EGL_NO_SURFACE);
3382     if (ret) {
3383         return ret;
3384     }
3385 
3386 	struct gl_output_state *go = get_output_state(output);
3387 	int i;
3388 	for (i = 0; i < GL_RENDERER_FRMAEBUFFER_SIZE; i++) {
3389 		assert(options->handle[i] != NULL || !"gl_renderer_fbo_options is error.");
3390 
3391 		EGLint attribs[] = {
3392 			EGL_WIDTH, options->handle[i]->width,
3393 			EGL_HEIGHT, options->handle[i]->height,
3394 			EGL_LINUX_DRM_FOURCC_EXT, DRM_FORMAT_ARGB8888,
3395 			EGL_DMA_BUF_PLANE0_FD_EXT, options->handle[i]->fd,
3396 			EGL_DMA_BUF_PLANE0_OFFSET_EXT, 0,
3397 			EGL_DMA_BUF_PLANE0_PITCH_EXT, options->handle[i]->stride,
3398 			EGL_NONE,
3399 		};
3400 
3401 		go->fbo[i].image = gr->create_image(gr->egl_display, EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL, attribs);
3402 		if (go->fbo[i].image == EGL_NO_IMAGE_KHR) {
3403 			weston_log("##createImage failed.");
3404 			break;
3405 		}
3406 
3407 		glGenTextures(1, &go->fbo[i].tex);
3408 		glBindTexture(GL_TEXTURE_2D, go->fbo[i].tex);
3409 		glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
3410 		glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
3411 		glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
3412 		glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
3413 
3414 		gr->image_target_texture_2d(GL_TEXTURE_2D, go->fbo[i].image);
3415 
3416 		glGenFramebuffers(1, &go->fbo[i].fbo);
3417 		glBindFramebuffer(GL_FRAMEBUFFER, go->fbo[i].fbo);
3418 		glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, go->fbo[i].tex, 0);
3419 		if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) {
3420 			weston_log("glCheckFramebufferStatus failed");
3421 			glDeleteFramebuffers(1, &go->fbo[i].fbo);
3422 			glDeleteTextures(1, &go->fbo[i].tex);
3423 			gr->destroy_image(gr->egl_display, go->fbo[i].image);
3424 			break;
3425 		}
3426 	}
3427 
3428 	if (i != GL_RENDERER_FRMAEBUFFER_SIZE) {
3429 		gl_renderer_output_destroy(output);
3430 		return -1;
3431 	}
3432 
3433 	go->current_fbo_index = 0;
3434 	go->use_fbo = true;
3435 
3436 	return 0;
3437 }
3438 
3439 static void
gl_renderer_output_destroy(struct weston_output * output)3440 gl_renderer_output_destroy(struct weston_output *output)
3441 {
3442 	struct gl_renderer *gr = get_renderer(output->compositor);
3443 	struct gl_output_state *go = get_output_state(output);
3444 	struct timeline_render_point *trp, *tmp;
3445 	int i;
3446 
3447 	for (i = 0; i < 2; i++)
3448 		pixman_region32_fini(&go->buffer_damage[i]);
3449 
3450 	eglMakeCurrent(gr->egl_display,
3451 		       EGL_NO_SURFACE, EGL_NO_SURFACE,
3452 		       EGL_NO_CONTEXT);
3453 
3454 	weston_platform_destroy_egl_surface(gr->egl_display, go->egl_surface);
3455 
3456 	if (!wl_list_empty(&go->timeline_render_point_list))
3457 		weston_log("warning: discarding pending timeline render"
3458 			   "objects at output destruction");
3459 
3460 	wl_list_for_each_safe(trp, tmp, &go->timeline_render_point_list, link)
3461 		timeline_render_point_destroy(trp);
3462 
3463 	if (go->begin_render_sync != EGL_NO_SYNC_KHR)
3464 		gr->destroy_sync(gr->egl_display, go->begin_render_sync);
3465 	if (go->end_render_sync != EGL_NO_SYNC_KHR)
3466 		gr->destroy_sync(gr->egl_display, go->end_render_sync);
3467 
3468 	for (int32_t i = 0; i < GL_RENDERER_FRMAEBUFFER_SIZE; i++) {
3469 		if (go->fbo[i].fbo) {
3470 			glDeleteFramebuffers(1, &go->fbo[i].fbo);
3471 		}
3472 
3473 		if (go->fbo[i].tex) {
3474 			glDeleteTextures(1, &go->fbo[i].tex);
3475 		}
3476 
3477 		if (go->fbo[i].image != EGL_NO_IMAGE_KHR) {
3478 			gr->destroy_image(gr->egl_display, go->fbo[i].image);
3479 		}
3480 	}
3481 
3482 	free(go);
3483 }
3484 
3485 // OHOS hdi-backend
3486 static int
gl_renderer_output_get_current_fbo_index(struct weston_output * output)3487 gl_renderer_output_get_current_fbo_index(struct weston_output *output)
3488 {
3489 	struct gl_output_state *go = get_output_state(output);
3490 	return go->current_fbo_index;
3491 }
3492 
3493 static int
gl_renderer_create_fence_fd(struct weston_output * output)3494 gl_renderer_create_fence_fd(struct weston_output *output)
3495 {
3496 	struct gl_output_state *go = get_output_state(output);
3497 	struct gl_renderer *gr = get_renderer(output->compositor);
3498 	int fd;
3499 
3500 	if (go->end_render_sync == EGL_NO_SYNC_KHR)
3501 		return -1;
3502 
3503 	fd = gr->dup_native_fence_fd(gr->egl_display, go->end_render_sync);
3504 	if (fd == EGL_NO_NATIVE_FENCE_FD_ANDROID)
3505 		return -1;
3506 
3507 	return fd;
3508 }
3509 
3510 static void
gl_renderer_destroy(struct weston_compositor * ec)3511 gl_renderer_destroy(struct weston_compositor *ec)
3512 {
3513 	struct gl_renderer *gr = get_renderer(ec);
3514 	struct dmabuf_image *image, *next;
3515 	struct dmabuf_format *format, *next_format;
3516 
3517 	wl_signal_emit(&gr->destroy_signal, gr);
3518 
3519 	if (gr->has_bind_display)
3520 		gr->unbind_display(gr->egl_display, ec->wl_display);
3521 
3522 	/* Work around crash in egl_dri2.c's dri2_make_current() - when does this apply? */
3523 	eglMakeCurrent(gr->egl_display,
3524 		       EGL_NO_SURFACE, EGL_NO_SURFACE,
3525 		       EGL_NO_CONTEXT);
3526 
3527 
3528 	wl_list_for_each_safe(image, next, &gr->dmabuf_images, link)
3529 		dmabuf_image_destroy(image);
3530 
3531 	wl_list_for_each_safe(format, next_format, &gr->dmabuf_formats, link)
3532 		dmabuf_format_destroy(format);
3533 
3534 	if (gr->dummy_surface != EGL_NO_SURFACE)
3535 		weston_platform_destroy_egl_surface(gr->egl_display,
3536 						    gr->dummy_surface);
3537 
3538 	eglTerminate(gr->egl_display);
3539 	eglReleaseThread();
3540 
3541 	wl_list_remove(&gr->output_destroy_listener.link);
3542 
3543 	wl_array_release(&gr->vertices);
3544 	wl_array_release(&gr->vtxcnt);
3545 
3546 	if (gr->fragment_binding)
3547 		weston_binding_destroy(gr->fragment_binding);
3548 	if (gr->fan_binding)
3549 		weston_binding_destroy(gr->fan_binding);
3550 
3551 	// OHOS hdi-backend
3552 	gbm_device_destroy(gr->device);
3553 	close(gr->gbm_fd);
3554 
3555 	free(gr);
3556 }
3557 
3558 static void
output_handle_destroy(struct wl_listener * listener,void * data)3559 output_handle_destroy(struct wl_listener *listener, void *data)
3560 {
3561 	struct gl_renderer *gr;
3562 	struct weston_output *output = data;
3563 
3564 	gr = container_of(listener, struct gl_renderer,
3565 			  output_destroy_listener);
3566 
3567 	if (wl_list_empty(&output->compositor->output_list))
3568 		eglMakeCurrent(gr->egl_display, gr->dummy_surface,
3569 			       gr->dummy_surface, gr->egl_context);
3570 }
3571 
3572 static int
gl_renderer_create_pbuffer_surface(struct gl_renderer * gr)3573 gl_renderer_create_pbuffer_surface(struct gl_renderer *gr) {
3574 	EGLConfig pbuffer_config;
3575 	static const EGLint pbuffer_attribs[] = {
3576 		EGL_WIDTH, 10,
3577 		EGL_HEIGHT, 10,
3578 		EGL_NONE
3579 	};
3580 
3581 	pbuffer_config = gr->egl_config;
3582 	if (pbuffer_config == EGL_NO_CONFIG_KHR) {
3583 		pbuffer_config =
3584 			gl_renderer_get_egl_config(gr, EGL_PBUFFER_BIT,
3585 						   NULL, 0);
3586 	}
3587 	if (pbuffer_config == EGL_NO_CONFIG_KHR) {
3588 		weston_log("failed to choose EGL config for PbufferSurface\n");
3589 		return -1;
3590 	}
3591 
3592 	gr->dummy_surface = eglCreatePbufferSurface(gr->egl_display,
3593 						    pbuffer_config,
3594 						    pbuffer_attribs);
3595 
3596 	if (gr->dummy_surface == EGL_NO_SURFACE) {
3597 		weston_log("failed to create PbufferSurface\n");
3598 		return -1;
3599 	}
3600 
3601 	return 0;
3602 }
3603 
3604 static int
gl_renderer_display_create(struct weston_compositor * ec,const struct gl_renderer_display_options * options)3605 gl_renderer_display_create(struct weston_compositor *ec,
3606 			   const struct gl_renderer_display_options *options)
3607 {
3608 	struct gl_renderer *gr;
3609 
3610 	gr = zalloc(sizeof *gr);
3611 	if (gr == NULL)
3612 		return -1;
3613 
3614 	gr->platform = options->egl_platform;
3615 
3616 	// OHOS hdi-backend
3617 	gr->gbm_fd = open(GBM_DEVICE_PATH, O_RDWR);
3618     if (gr->gbm_fd < 0) {
3619         weston_log("failed to open gbm render node.\n");
3620         goto fail;
3621     }
3622 	drmDropMaster(gr->gbm_fd);
3623 
3624     gr->device = gbm_create_device(gr->gbm_fd);
3625     if (gr->device == NULL) {
3626         weston_log("failed to create gbm device.\n");
3627         goto fail_fd;
3628     }
3629 
3630 	if (gl_renderer_setup_egl_client_extensions(gr) < 0)
3631 		goto fail_device;
3632 
3633 	gr->base.read_pixels = gl_renderer_read_pixels;
3634 	gr->base.repaint_output = gl_renderer_repaint_output;
3635 	gr->base.flush_damage = gl_renderer_flush_damage;
3636 	gr->base.attach = gl_renderer_attach;
3637 	gr->base.surface_set_color = gl_renderer_surface_set_color;
3638 	gr->base.destroy = gl_renderer_destroy;
3639 	gr->base.surface_get_content_size =
3640 		gl_renderer_surface_get_content_size;
3641 	gr->base.surface_copy_content = gl_renderer_surface_copy_content;
3642 
3643 	if (gl_renderer_setup_egl_display(gr, gr->device) < 0)
3644 		goto fail_device;
3645 
3646 	log_egl_info(gr->egl_display);
3647 
3648 	ec->gpu_renderer = &gr->base;
3649 
3650 	if (gl_renderer_setup_egl_extensions(ec) < 0)
3651 		goto fail_with_error;
3652 
3653 	if (!gr->has_configless_context) {
3654 		EGLint egl_surface_type = options->egl_surface_type;
3655 
3656 		if (!gr->has_surfaceless_context)
3657 			egl_surface_type |= EGL_PBUFFER_BIT;
3658 
3659 		gr->egl_config =
3660 			gl_renderer_get_egl_config(gr,
3661 						   egl_surface_type,
3662 						   options->drm_formats,
3663 						   options->drm_formats_count);
3664 		if (gr->egl_config == EGL_NO_CONFIG_KHR) {
3665 			weston_log("failed to choose EGL config\n");
3666 			goto fail_terminate;
3667 		}
3668 	}
3669 
3670 	ec->capabilities |= WESTON_CAP_ROTATION_ANY;
3671 	ec->capabilities |= WESTON_CAP_CAPTURE_YFLIP;
3672 	ec->capabilities |= WESTON_CAP_VIEW_CLIP_MASK;
3673 	if (gr->has_native_fence_sync && gr->has_wait_sync)
3674 		ec->capabilities |= WESTON_CAP_EXPLICIT_SYNC;
3675 
3676 	wl_list_init(&gr->dmabuf_images);
3677 	if (gr->has_dmabuf_import) {
3678 		gr->base.import_dmabuf = gl_renderer_import_dmabuf;
3679 		gr->base.query_dmabuf_formats =
3680 			gl_renderer_query_dmabuf_formats;
3681 		gr->base.query_dmabuf_modifiers =
3682 			gl_renderer_query_dmabuf_modifiers;
3683 	}
3684 	wl_list_init(&gr->dmabuf_formats);
3685 
3686 	if (gr->has_surfaceless_context) {
3687 		weston_log("EGL_KHR_surfaceless_context available\n");
3688 		gr->dummy_surface = EGL_NO_SURFACE;
3689 	} else {
3690 		weston_log("EGL_KHR_surfaceless_context unavailable. "
3691 			   "Trying PbufferSurface\n");
3692 
3693 		if (gl_renderer_create_pbuffer_surface(gr) < 0)
3694 			goto fail_with_error;
3695 	}
3696 
3697 	wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_RGB565);
3698 	wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_YUV420);
3699 	wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_NV12);
3700 	wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_YUYV);
3701 
3702 	wl_signal_init(&gr->destroy_signal);
3703 
3704 	if (gl_renderer_setup(ec, gr->dummy_surface) < 0) {
3705 		if (gr->dummy_surface != EGL_NO_SURFACE)
3706 			weston_platform_destroy_egl_surface(gr->egl_display,
3707 							    gr->dummy_surface);
3708 		goto fail_with_error;
3709 	}
3710 
3711 	return 0;
3712 
3713 fail_with_error:
3714 	gl_renderer_print_egl_error_state();
3715 fail_terminate:
3716 	eglTerminate(gr->egl_display);
3717 
3718 // OHOS hdi-backend
3719 fail_device:
3720 	gbm_device_destroy(gr->device);
3721 fail_fd:
3722 	close(gr->gbm_fd);
3723 
3724 fail:
3725 	free(gr);
3726 	ec->gpu_renderer = NULL;
3727 	return -1;
3728 }
3729 
3730 static int
compile_shaders(struct weston_compositor * ec)3731 compile_shaders(struct weston_compositor *ec)
3732 {
3733 	struct gl_renderer *gr = get_renderer(ec);
3734 
3735 	gr->texture_shader_rgba.vertex_source = vertex_shader;
3736 	gr->texture_shader_rgba.fragment_source = texture_fragment_shader_rgba;
3737 
3738 	gr->texture_shader_rgbx.vertex_source = vertex_shader;
3739 	gr->texture_shader_rgbx.fragment_source = texture_fragment_shader_rgbx;
3740 
3741 	gr->texture_shader_egl_external.vertex_source = vertex_shader;
3742 	gr->texture_shader_egl_external.fragment_source =
3743 		texture_fragment_shader_egl_external;
3744 
3745 	gr->texture_shader_y_uv.vertex_source = vertex_shader;
3746 	gr->texture_shader_y_uv.fragment_source = texture_fragment_shader_y_uv;
3747 
3748 	gr->texture_shader_y_u_v.vertex_source = vertex_shader;
3749 	gr->texture_shader_y_u_v.fragment_source =
3750 		texture_fragment_shader_y_u_v;
3751 
3752 	gr->texture_shader_y_xuxv.vertex_source = vertex_shader;
3753 	gr->texture_shader_y_xuxv.fragment_source =
3754 		texture_fragment_shader_y_xuxv;
3755 
3756 	gr->texture_shader_xyuv.vertex_source = vertex_shader;
3757 	gr->texture_shader_xyuv.fragment_source = texture_fragment_shader_xyuv;
3758 
3759 	gr->solid_shader.vertex_source = vertex_shader;
3760 	gr->solid_shader.fragment_source = solid_fragment_shader;
3761 
3762 	return 0;
3763 }
3764 
3765 static void
fragment_debug_binding(struct weston_keyboard * keyboard,const struct timespec * time,uint32_t key,void * data)3766 fragment_debug_binding(struct weston_keyboard *keyboard,
3767 		       const struct timespec *time,
3768 		       uint32_t key, void *data)
3769 {
3770 	struct weston_compositor *ec = data;
3771 	struct gl_renderer *gr = get_renderer(ec);
3772 	struct weston_output *output;
3773 
3774 	gr->fragment_shader_debug = !gr->fragment_shader_debug;
3775 
3776 	shader_release(&gr->texture_shader_rgba);
3777 	shader_release(&gr->texture_shader_rgbx);
3778 	shader_release(&gr->texture_shader_egl_external);
3779 	shader_release(&gr->texture_shader_y_uv);
3780 	shader_release(&gr->texture_shader_y_u_v);
3781 	shader_release(&gr->texture_shader_y_xuxv);
3782 	shader_release(&gr->texture_shader_xyuv);
3783 	shader_release(&gr->solid_shader);
3784 
3785 	/* Force use_shader() to call glUseProgram(), since we need to use
3786 	 * the recompiled version of the shader. */
3787 	gr->current_shader = NULL;
3788 
3789 	wl_list_for_each(output, &ec->output_list, link)
3790 		weston_output_damage(output);
3791 }
3792 
3793 static void
fan_debug_repaint_binding(struct weston_keyboard * keyboard,const struct timespec * time,uint32_t key,void * data)3794 fan_debug_repaint_binding(struct weston_keyboard *keyboard,
3795 			  const struct timespec *time,
3796 			  uint32_t key, void *data)
3797 {
3798 	struct weston_compositor *compositor = data;
3799 	struct gl_renderer *gr = get_renderer(compositor);
3800 
3801 	gr->fan_debug = !gr->fan_debug;
3802 	weston_compositor_damage_all(compositor);
3803 }
3804 
3805 static uint32_t
get_gl_version(void)3806 get_gl_version(void)
3807 {
3808 	const char *version;
3809 	int major, minor;
3810 
3811 	version = (const char *) glGetString(GL_VERSION);
3812 	if (version &&
3813 	    (sscanf(version, "%d.%d", &major, &minor) == 2 ||
3814 	     sscanf(version, "OpenGL ES %d.%d", &major, &minor) == 2)) {
3815 		return GR_GL_VERSION(major, minor);
3816 	}
3817 
3818 	return GR_GL_VERSION_INVALID;
3819 }
3820 
3821 static int
gl_renderer_setup(struct weston_compositor * ec,EGLSurface egl_surface)3822 gl_renderer_setup(struct weston_compositor *ec, EGLSurface egl_surface)
3823 {
3824 	struct gl_renderer *gr = get_renderer(ec);
3825 	const char *extensions;
3826 	EGLBoolean ret;
3827 
3828 	EGLint context_attribs[16] = {
3829 		EGL_CONTEXT_CLIENT_VERSION, 0,
3830 	};
3831 	unsigned int nattr = 2;
3832 
3833 	if (!eglBindAPI(EGL_OPENGL_ES_API)) {
3834 		weston_log("failed to bind EGL_OPENGL_ES_API\n");
3835 		gl_renderer_print_egl_error_state();
3836 		return -1;
3837 	}
3838 
3839 	/*
3840 	 * Being the compositor we require minimum output latency,
3841 	 * so request a high priority context for ourselves - that should
3842 	 * reschedule all of our rendering and its dependencies to be completed
3843 	 * first. If the driver doesn't permit us to create a high priority
3844 	 * context, it will fallback to the default priority (MEDIUM).
3845 	 */
3846 	if (gr->has_context_priority) {
3847 		context_attribs[nattr++] = EGL_CONTEXT_PRIORITY_LEVEL_IMG;
3848 		context_attribs[nattr++] = EGL_CONTEXT_PRIORITY_HIGH_IMG;
3849 	}
3850 
3851 	assert(nattr < ARRAY_LENGTH(context_attribs));
3852 	context_attribs[nattr] = EGL_NONE;
3853 
3854 	/* try to create an OpenGLES 3 context first */
3855 	context_attribs[1] = 3;
3856 	gr->egl_context = eglCreateContext(gr->egl_display, gr->egl_config,
3857 					   EGL_NO_CONTEXT, context_attribs);
3858 	if (gr->egl_context == NULL) {
3859 		/* and then fallback to OpenGLES 2 */
3860 		context_attribs[1] = 2;
3861 		gr->egl_context = eglCreateContext(gr->egl_display,
3862 						   gr->egl_config,
3863 						   EGL_NO_CONTEXT,
3864 						   context_attribs);
3865 		if (gr->egl_context == NULL) {
3866 			weston_log("failed to create context\n");
3867 			gl_renderer_print_egl_error_state();
3868 			return -1;
3869 		}
3870 	}
3871 
3872 	if (gr->has_context_priority) {
3873 		EGLint value = EGL_CONTEXT_PRIORITY_MEDIUM_IMG;
3874 
3875 		eglQueryContext(gr->egl_display, gr->egl_context,
3876 				EGL_CONTEXT_PRIORITY_LEVEL_IMG, &value);
3877 
3878 		if (value != EGL_CONTEXT_PRIORITY_HIGH_IMG) {
3879 			weston_log("Failed to obtain a high priority context.\n");
3880 			/* Not an error, continue on as normal */
3881 		}
3882 	}
3883 
3884 	ret = eglMakeCurrent(gr->egl_display, EGL_NO_SURFACE,
3885 			     EGL_NO_SURFACE, gr->egl_context);
3886 	if (ret == EGL_FALSE) {
3887 		weston_log("Failed to make EGL context current.\n");
3888 		gl_renderer_print_egl_error_state();
3889 		return -1;
3890 	}
3891 
3892 	gr->gl_version = get_gl_version();
3893 	if (gr->gl_version == GR_GL_VERSION_INVALID) {
3894 		weston_log("warning: failed to detect GLES version, "
3895 			   "defaulting to 2.0.\n");
3896 		gr->gl_version = GR_GL_VERSION(2, 0);
3897 	}
3898 
3899 	log_gl_info();
3900 
3901 	gr->image_target_texture_2d =
3902 		(void *) eglGetProcAddress("glEGLImageTargetTexture2DOES");
3903 
3904 	extensions = (const char *) glGetString(GL_EXTENSIONS);
3905 	if (!extensions) {
3906 		weston_log("Retrieving GL extension string failed.\n");
3907 		return -1;
3908 	}
3909 
3910 	if (!weston_check_egl_extension(extensions, "GL_EXT_texture_format_BGRA8888")) {
3911 		weston_log("GL_EXT_texture_format_BGRA8888 not available\n");
3912 		return -1;
3913 	}
3914 
3915 	if (weston_check_egl_extension(extensions, "GL_EXT_read_format_bgra"))
3916 		ec->read_format = PIXMAN_a8r8g8b8;
3917 	else
3918 		ec->read_format = PIXMAN_a8b8g8r8;
3919 
3920 	if (gr->gl_version >= GR_GL_VERSION(3, 0) ||
3921 	    weston_check_egl_extension(extensions, "GL_EXT_unpack_subimage"))
3922 		gr->has_unpack_subimage = true;
3923 
3924 	if (gr->gl_version >= GR_GL_VERSION(3, 0) ||
3925 	    weston_check_egl_extension(extensions, "GL_EXT_texture_rg"))
3926 		gr->has_gl_texture_rg = true;
3927 
3928 	if (weston_check_egl_extension(extensions, "GL_OES_EGL_image_external"))
3929 		gr->has_egl_image_external = true;
3930 
3931 	glActiveTexture(GL_TEXTURE0);
3932 
3933 	if (compile_shaders(ec))
3934 		return -1;
3935 
3936 // OHOS remove debugger
3937 //	gr->fragment_binding =
3938 //		weston_compositor_add_debug_binding(ec, KEY_S,
3939 //						    fragment_debug_binding,
3940 //						    ec);
3941 //	gr->fan_binding =
3942 //		weston_compositor_add_debug_binding(ec, KEY_F,
3943 //						    fan_debug_repaint_binding,
3944 //						    ec);
3945 
3946 	gr->output_destroy_listener.notify = output_handle_destroy;
3947 	wl_signal_add(&ec->output_destroyed_signal,
3948 		      &gr->output_destroy_listener);
3949 
3950 	weston_log("GL ES 2 renderer features:\n");
3951 	weston_log_continue(STAMP_SPACE "read-back format: %s\n",
3952 		ec->read_format == PIXMAN_a8r8g8b8 ? "BGRA" : "RGBA");
3953 	weston_log_continue(STAMP_SPACE "wl_shm sub-image to texture: %s\n",
3954 			    gr->has_unpack_subimage ? "yes" : "no");
3955 	weston_log_continue(STAMP_SPACE "EGL Wayland extension: %s\n",
3956 			    gr->has_bind_display ? "yes" : "no");
3957 
3958 
3959 	return 0;
3960 }
3961 
3962 WL_EXPORT struct gl_renderer_interface gl_renderer_interface = {
3963 	.display_create = gl_renderer_display_create,
3964 	.output_window_create = gl_renderer_output_window_create,
3965 	.output_pbuffer_create = gl_renderer_output_pbuffer_create,
3966 	.output_destroy = gl_renderer_output_destroy,
3967 	.output_set_border = gl_renderer_output_set_border,
3968 	.create_fence_fd = gl_renderer_create_fence_fd,
3969 	// OHOS hdi-backend
3970 	.output_fbo_create = gl_renderer_output_fbo_create,
3971 	.output_get_current_fbo_index = gl_renderer_output_get_current_fbo_index,
3972 };
3973