1 /*
2 * Copyright (c) 2021 Huawei Device Co., Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial
14 * portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
20 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
21 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 */
25
26 #include "hdi_renderer.h"
27
28 #include <assert.h>
29 #include <chrono>
30 #include <cinttypes>
31 #include <map>
32 #include <set>
33 #include <string.h>
34 #include <sstream>
35 #include <sys/time.h>
36 #include <vector>
37
38 #include "hdi_backend.h"
39 #include "hdi_head.h"
40 #include "hdi_output.h"
41
42 // C header adapter
43 extern "C" {
44 #include "libweston/libweston.h"
45 #include "libweston/libweston-internal.h"
46 #include "libweston/linux-dmabuf.h"
47 #include "shared/helpers.h"
48 }
49
50 #include "libweston/trace.h"
51 DEFINE_LOG_LABEL("HdiRenderer");
52
53 struct hdi_renderer {
54 struct weston_renderer base;
55 };
56
57 struct hdi_output_state;
58 struct hdi_surface_state {
59 // basic attribute
60 struct weston_compositor *compositor;
61 struct weston_surface *surface;
62 struct wl_listener surface_destroy_listener;
63 struct weston_buffer_reference buffer_ref;
64
65 // hdi cache attribute
66 std::map<uint32_t, uint32_t> layer_ids; // device_id: layer_id
67 std::map<uint32_t, struct hdi_output_state *> hos; // device_id: ho
68
69 // hdi once attribute
70 LayerInfo layer_info;
71 IRect dst_rect;
72 IRect src_rect;
73 uint32_t zorder;
74 BlendType blend_type;
75 CompositionType comp_type;
76 TransformType rotate_type;
77 BufferHandle *bh;
78 };
79
80 struct hdi_output_state {
81 std::set<struct hdi_surface_state *> layers;
82 uint32_t gpu_layer_id;
83 };
84
get_output_state(struct weston_output * output)85 struct hdi_output_state * get_output_state(struct weston_output *output)
86 {
87 return reinterpret_cast<struct hdi_output_state *>(output->hdi_renderer_state);
88 }
89
get_surface_state(struct weston_surface * surface)90 struct hdi_surface_state * get_surface_state(struct weston_surface *surface)
91 {
92 if (surface->hdi_renderer_state == nullptr) {
93 return nullptr;
94 }
95 return reinterpret_cast<struct hdi_surface_state *>(surface->hdi_renderer_state);
96 }
97
hdi_renderer_layer_operation(struct hdi_backend * b,int32_t device_id,int32_t layer_id,BufferHandle * buffer,int32_t fence,LayerAlpha * alpha,IRect * dst,IRect * src,uint32_t zorder,BlendType blend_type,CompositionType comp_type,TransformType rotate_type)98 void hdi_renderer_layer_operation(struct hdi_backend *b, int32_t device_id, int32_t layer_id,
99 BufferHandle *buffer, int32_t fence,
100 LayerAlpha *alpha,
101 IRect *dst,
102 IRect *src,
103 uint32_t zorder,
104 BlendType blend_type,
105 CompositionType comp_type,
106 TransformType rotate_type)
107 {
108 LayerDumpInfo dump_info = {
109 .alpha = *alpha,
110 .src = *src,
111 .dst = *dst,
112 .zorder = zorder,
113 .blend_type = blend_type,
114 .comp_type = comp_type,
115 .rotate_type = rotate_type,
116 };
117 b->layer_dump_info_pending[device_id][layer_id] = dump_info;
118
119 LOG_CORE("LayerOperation device_id=%d layer_id=%d", device_id, layer_id);
120 if (buffer != nullptr) {
121 auto ret = b->layer_funcs->SetLayerBuffer(device_id, layer_id, buffer, fence);
122 LOG_CORE("LayerFuncs.SetLayerBuffer return %d", ret);
123 }
124
125 auto ret = b->layer_funcs->SetLayerAlpha(device_id, layer_id, alpha);
126 LOG_CORE("[ret=%d] LayerFuncs.SetLayerAlpha", ret);
127
128 ret = b->layer_funcs->SetLayerSize(device_id, layer_id, dst);
129 LOG_CORE("[ret=%d] LayerFuncs.SetLayerSize (%d, %d) %dx%d", ret, dst->x, dst->y, dst->w, dst->h);
130
131 ret = b->layer_funcs->SetLayerCrop(device_id, layer_id, src);
132 LOG_CORE("[ret=%d] LayerFuncs.SetLayerCrop (%d, %d) %dx%d", ret, src->x, src->y, src->w, src->h);
133
134 ret = b->layer_funcs->SetLayerZorder(device_id, layer_id, zorder);
135 LOG_CORE("[ret=%d] LayerFuncs.SetLayerZorder %d", ret, zorder);
136
137 ret = b->layer_funcs->SetLayerBlendType(device_id, layer_id, blend_type);
138 LOG_CORE("[ret=%d] LayerFuncs.SetLayerBlendType %d", ret, blend_type);
139
140 ret = b->layer_funcs->SetLayerCompositionType(device_id, layer_id, comp_type);
141 LOG_CORE("[ret=%d] LayerFuncs.SetLayerCompositionType %d", ret, comp_type);
142
143 ret = b->layer_funcs->SetTransformMode(device_id, layer_id, rotate_type);
144 LOG_CORE("[ret=%d] LayerFuncs.SetTransformMode %d", ret, rotate_type);
145 }
146
hdi_renderer_layer_close(struct hdi_backend * b,int32_t device_id,int32_t layer_id)147 void hdi_renderer_layer_close(struct hdi_backend *b, int32_t device_id, int32_t layer_id)
148 {
149 int ret = b->layer_funcs->CloseLayer(device_id, layer_id);
150 LOG_CORE("[ret=%d] LayerFuncs.CloseLayer device_id: %d, layer_id: %d", ret, device_id, layer_id);
151 }
152
hdi_renderer_surface_state_mmap(struct hdi_surface_state * hss)153 BufferHandle * hdi_renderer_surface_state_mmap(struct hdi_surface_state *hss)
154 {
155 if (hss == NULL || hss->surface == NULL) {
156 return NULL;
157 }
158
159 struct weston_buffer *buffer = hss->buffer_ref.buffer;
160 if (buffer == NULL) {
161 return NULL;
162 }
163
164 struct linux_dmabuf_buffer *dmabuf = linux_dmabuf_buffer_get(buffer->resource);
165 if (dmabuf == NULL) {
166 return NULL;
167 }
168
169 BufferHandle *bh = dmabuf->attributes.buffer_handle;
170 if (bh == NULL) {
171 return NULL;
172 }
173
174 if (bh->virAddr == NULL) {
175 struct hdi_backend *b = to_hdi_backend(hss->surface->compositor);
176 void *ptr = b->display_gralloc->Mmap(*bh);
177 LOG_CORE("GrallocFuncs.Mmap fd=%d return ptr=%p", bh->fd, ptr);
178 }
179 return bh;
180 }
181
hdi_renderer_surface_state_unmap(struct hdi_surface_state * hss)182 void hdi_renderer_surface_state_unmap(struct hdi_surface_state *hss)
183 {
184 if (hss == NULL || hss->surface == NULL) {
185 return;
186 }
187
188 struct weston_buffer *buffer = hss->buffer_ref.buffer;
189 if (buffer == NULL) {
190 return;
191 }
192
193 struct linux_dmabuf_buffer *dmabuf = linux_dmabuf_buffer_get(buffer->resource);
194 if (dmabuf == NULL) {
195 return;
196 }
197
198 BufferHandle *bh = dmabuf->attributes.buffer_handle;
199 if (bh == NULL) {
200 return;
201 }
202
203 if (bh->virAddr != NULL) {
204 struct hdi_backend *b = to_hdi_backend(hss->compositor);
205 auto fd = bh->fd;
206 auto ptr = bh->virAddr;
207 auto ret = b->display_gralloc->Unmap(*bh);
208 LOG_CORE("GrallocFuncs.Unmap fd=%d ptr=%p return %d", fd, ptr, ret);
209 }
210 }
211
hdi_renderer_surface_state_on_destroy(struct wl_listener * listener,void * data)212 void hdi_renderer_surface_state_on_destroy(struct wl_listener *listener,
213 void *data)
214 {
215 LOG_PASS();
216 struct hdi_surface_state *hss = container_of(listener,
217 struct hdi_surface_state,
218 surface_destroy_listener);
219 struct hdi_backend *b = to_hdi_backend(hss->compositor);
220 for (const auto &[device_id, layer_id] : hss->layer_ids) {
221 hdi_renderer_layer_close(b, device_id, layer_id);
222
223 // delete old layers in ho's cache
224 auto it = hss->hos.find(device_id);
225 if (it != hss->hos.end()) {
226 it->second->layers.erase(hss);
227 }
228 }
229
230 hdi_renderer_surface_state_unmap(hss);
231 weston_buffer_reference(&hss->buffer_ref, NULL);
232
233 free(hss);
234 }
235
hdi_renderer_create_surface_state(struct weston_surface * surface)236 int hdi_renderer_create_surface_state(struct weston_surface *surface)
237 {
238 LOG_PASS();
239 // life time
240 auto hss = new struct hdi_surface_state();
241 if (hss == NULL) {
242 return -1;
243 }
244
245 surface->hdi_renderer_state = hss;
246 hss->surface = surface;
247 hss->compositor = surface->compositor;
248
249 hss->surface_destroy_listener.notify =
250 hdi_renderer_surface_state_on_destroy;
251 wl_signal_add(&surface->destroy_signal,
252 &hss->surface_destroy_listener);
253 return 0;
254 }
255
hdi_renderer_attach(struct weston_surface * surface,struct weston_buffer * buffer)256 void hdi_renderer_attach(struct weston_surface *surface,
257 struct weston_buffer *buffer)
258 {
259 LOG_SCOPE();
260 assert(surface && !"hdi_renderer_attach surface is NULL");
261 assert(buffer && !"hdi_renderer_attach buffer is NULL");
262 if (surface->hdi_renderer_state == NULL) {
263 hdi_renderer_create_surface_state(surface);
264 }
265
266 auto hss = get_surface_state(surface);
267 struct linux_dmabuf_buffer *dmabuf = linux_dmabuf_buffer_get(buffer->resource);
268 if (dmabuf != NULL) {
269 LOG_INFO("dmabuf");
270 hdi_renderer_surface_state_unmap(hss);
271 weston_buffer_reference(&hss->buffer_ref, buffer);
272 buffer->width = dmabuf->attributes.width;
273 buffer->height = dmabuf->attributes.height;
274 return;
275 }
276
277 struct wl_shm_buffer *shmbuf = wl_shm_buffer_get(buffer->resource);
278 if (shmbuf != NULL) {
279 LOG_INFO("shmbuf");
280 hdi_renderer_surface_state_unmap(hss);
281 weston_buffer_reference(&hss->buffer_ref, buffer);
282 buffer->width = wl_shm_buffer_get_width(shmbuf);
283 buffer->height = wl_shm_buffer_get_height(shmbuf);
284 return;
285 }
286
287 LOG_ERROR("cannot attach buffer");
288 }
289
hdi_renderer_destroy(struct weston_compositor * compositor)290 void hdi_renderer_destroy(struct weston_compositor *compositor)
291 {
292 LOG_PASS();
293 struct hdi_renderer *renderer = (struct hdi_renderer *)compositor->hdi_renderer;
294 compositor->hdi_renderer = NULL;
295 free(renderer);
296 }
297
hdi_renderer_flush_damage(struct weston_surface * surface)298 void hdi_renderer_flush_damage(struct weston_surface *surface)
299 {
300 }
301
hdi_renderer_import_dmabuf(struct weston_compositor * compositor,struct linux_dmabuf_buffer * buffer)302 bool hdi_renderer_import_dmabuf(struct weston_compositor *compositor,
303 struct linux_dmabuf_buffer *buffer)
304 {
305 return true;
306 }
307
hdi_renderer_query_dmabuf_formats(struct weston_compositor * compositor,int ** formats,int * num_formats)308 void hdi_renderer_query_dmabuf_formats(struct weston_compositor *compositor,
309 int **formats, int *num_formats)
310 {
311 *num_formats = 0;
312 *formats = NULL;
313 }
314
hdi_renderer_query_dmabuf_modifiers(struct weston_compositor * compositorc,int format,uint64_t ** modifiers,int * num_modifiers)315 void hdi_renderer_query_dmabuf_modifiers(struct weston_compositor *compositorc,
316 int format,
317 uint64_t **modifiers,
318 int *num_modifiers)
319 {
320 *num_modifiers = 0;
321 *modifiers = NULL;
322 }
323
hdi_renderer_read_pixels(struct weston_output * output,pixman_format_code_t format,void * pixels,uint32_t x,uint32_t y,uint32_t width,uint32_t height)324 int hdi_renderer_read_pixels(struct weston_output *output,
325 pixman_format_code_t format, void *pixels,
326 uint32_t x, uint32_t y,
327 uint32_t width, uint32_t height)
328 {
329 BufferHandle *bh = hdi_output_get_framebuffer(output);
330 int32_t bpp = bh->stride / bh->width;
331 int32_t stride = bh->stride;
332 int32_t offset = 0;
333
334 if (output->compositor->capabilities & WESTON_CAP_CAPTURE_YFLIP) {
335 for (int32_t j = y + height - 1; j >= (int32_t)y; j--) {
336 memcpy((uint8_t *)pixels + offset,
337 (uint8_t *)bh->virAddr + j * stride + x * bpp, width * bpp);
338 offset += width * bpp;
339 }
340 } else {
341 if (x == 0 && width == bh->width) {
342 memcpy(pixels, (uint8_t *)bh->virAddr + y * stride, height * stride);
343 return 0;
344 }
345
346 for (int32_t j = y; j < y + height; j++) {
347 memcpy((uint8_t *)pixels + offset,
348 (uint8_t *)bh->virAddr + j * stride + x * bpp, width * bpp);
349 offset += width * bpp;
350 }
351 }
352 return 0;
353 }
354
355 #define min(a, b) ((a) < (b) ? (a) : (b))
356 #define max(a, b) ((a) > (b) ? (a) : (b))
357
358 typedef void(*weston_view_compute_global_region_func)(struct weston_view *view,
359 float x, float y, float *vx, float *vy);
360
weston_view_compute_global_region(struct weston_view * view,pixman_region32_t * outr,pixman_region32_t * inr,weston_view_compute_global_region_func fn)361 void weston_view_compute_global_region(struct weston_view *view,
362 pixman_region32_t *outr,
363 pixman_region32_t *inr,
364 weston_view_compute_global_region_func fn)
365 {
366 float min_x = HUGE_VALF, min_y = HUGE_VALF;
367 float max_x = -HUGE_VALF, max_y = -HUGE_VALF;
368 pixman_box32_t *inbox = pixman_region32_extents(inr);
369 int32_t vs[4][2] = {
370 { inbox->x1, inbox->y1 },
371 { inbox->x1, inbox->y2 },
372 { inbox->x2, inbox->y1 },
373 { inbox->x2, inbox->y2 },
374 };
375
376 if (inbox->x1 == inbox->x2 || inbox->y1 == inbox->y2) {
377 pixman_region32_init(outr);
378 return;
379 }
380
381 for (int i = 0; i < 4; i++) {
382 float x, y;
383 fn(view, vs[i][0], vs[i][1], &x, &y);
384 min_x = min(min_x, x);
385 max_x = max(max_x, x);
386 min_y = min(min_y, y);
387 max_y = max(max_y, y);
388 }
389
390 float int_x = floorf(min_x);
391 float int_y = floorf(min_y);
392 pixman_region32_init_rect(outr, int_x, int_y,
393 ceilf(max_x) - int_x, ceilf(max_y) - int_y);
394 }
395
396 #undef min
397 #undef max
398
weston_view_to_global_region(struct weston_view * view,pixman_region32_t * outr,pixman_region32_t * inr)399 void weston_view_to_global_region(struct weston_view *view,
400 pixman_region32_t *outr,
401 pixman_region32_t *inr)
402 {
403 weston_view_compute_global_region(view, outr, inr, weston_view_to_global_float);
404 }
405
weston_view_from_global_region(struct weston_view * view,pixman_region32_t * outr,pixman_region32_t * inr)406 void weston_view_from_global_region(struct weston_view *view,
407 pixman_region32_t *outr,
408 pixman_region32_t *inr)
409 {
410 weston_view_compute_global_region(view, outr, inr, weston_view_from_global_float);
411 }
412
hdi_renderer_repaint_output_calc_region(pixman_region32_t * global_repaint_region,pixman_region32_t * buffer_repaint_region,pixman_region32_t * output_damage,struct weston_output * output,struct weston_view * view)413 void hdi_renderer_repaint_output_calc_region(pixman_region32_t *global_repaint_region,
414 pixman_region32_t *buffer_repaint_region,
415 pixman_region32_t *output_damage,
416 struct weston_output *output,
417 struct weston_view *view)
418 {
419 struct weston_matrix matrix = output->inverse_matrix;
420 if (view->transform.enabled) {
421 weston_matrix_multiply(&matrix, &view->transform.inverse);
422 LOG_INFO("transform enabled");
423 } else {
424 weston_matrix_translate(&matrix,
425 -view->geometry.x, -view->geometry.y, 0);
426 LOG_INFO("transform disabled");
427 }
428 weston_matrix_multiply(&matrix, &view->surface->buffer_to_surface_matrix);
429
430 auto hss = get_surface_state(view->surface);
431 if (matrix.d[0] == matrix.d[5] && matrix.d[0] == 0) {
432 if (matrix.d[4] > 0 && matrix.d[1] > 0) {
433 LOG_INFO("Transform: 90 mirror");
434 hss->rotate_type = ROTATE_90;
435 } else if (matrix.d[4] < 0 && matrix.d[1] > 0) {
436 LOG_INFO("Transform: 90");
437 hss->rotate_type = ROTATE_90;
438 } else if (matrix.d[4] < 0 && matrix.d[1] < 0) {
439 LOG_INFO("Transform: 270 mirror");
440 hss->rotate_type = ROTATE_270;
441 } else if (matrix.d[4] > 0 && matrix.d[1] < 0) {
442 LOG_INFO("Transform: 270");
443 hss->rotate_type = ROTATE_270;
444 }
445 } else {
446 if (matrix.d[0] > 0 && matrix.d[5] > 0) {
447 LOG_INFO("Transform: 0");
448 hss->rotate_type = ROTATE_NONE;
449 } else if (matrix.d[0] < 0 && matrix.d[5] < 0) {
450 LOG_INFO("Transform: 180");
451 hss->rotate_type = ROTATE_180;
452 } else if (matrix.d[0] < 0 && matrix.d[5] > 0) {
453 LOG_INFO("Transform: 0 mirror");
454 hss->rotate_type = ROTATE_NONE;
455 } else if (matrix.d[0] > 0 && matrix.d[5] < 0) {
456 LOG_INFO("Transform: 180 mirror");
457 hss->rotate_type = ROTATE_180;
458 }
459 }
460
461 LOG_MATRIX(&matrix);
462 LOG_INFO("%d %d", view->surface->width, view->surface->height);
463
464 pixman_region32_t buffer_region;
465 pixman_region32_t surface_repaint_region;
466 struct weston_buffer *buffer = view->surface->buffer_ref.buffer;
467 pixman_region32_init_rect(&buffer_region, 0, 0, buffer->width, buffer->height);
468 pixman_region32_init(&surface_repaint_region);
469
470 LOG_REGION(1, &buffer_region);
471 LOG_REGION(2, &view->transform.boundingbox);
472 pixman_region32_intersect(global_repaint_region, &view->transform.boundingbox, output_damage);
473 LOG_REGION(3, global_repaint_region);
474
475 weston_matrix_transform_region(&surface_repaint_region, &view->transform.inverse, global_repaint_region);
476 LOG_REGION(4, &surface_repaint_region);
477
478 weston_surface_to_buffer_region(view->surface, &surface_repaint_region, buffer_repaint_region);
479 LOG_REGION(5, buffer_repaint_region);
480
481 pixman_region32_intersect(buffer_repaint_region, buffer_repaint_region, &buffer_region);
482 LOG_REGION(6, buffer_repaint_region);
483
484 pixman_region32_translate(global_repaint_region, -output->x, -output->y);
485 LOG_REGION(7, global_repaint_region);
486
487 pixman_region32_fini(&surface_repaint_region);
488 pixman_region32_fini(&buffer_region);
489 }
490
hdi_renderer_surface_state_calc_rect(struct hdi_surface_state * hss,pixman_region32_t * output_damage,struct weston_output * output,struct weston_view * view)491 void hdi_renderer_surface_state_calc_rect(struct hdi_surface_state *hss,
492 pixman_region32_t *output_damage, struct weston_output *output, struct weston_view *view)
493 {
494 pixman_region32_t global_repaint_region;
495 pixman_region32_t buffer_repaint_region;
496 pixman_region32_init(&global_repaint_region);
497 pixman_region32_init(&buffer_repaint_region);
498
499 hdi_renderer_repaint_output_calc_region(&global_repaint_region,
500 &buffer_repaint_region,
501 output_damage,
502 output, view);
503
504 pixman_box32_t *global_box = pixman_region32_extents(&global_repaint_region);
505 hss->dst_rect.x = global_box->x1;
506 hss->dst_rect.y = global_box->y1;
507 hss->dst_rect.w = global_box->x2 - global_box->x1;
508 hss->dst_rect.h = global_box->y2 - global_box->y1;
509
510 pixman_box32_t *buffer_box = pixman_region32_extents(&buffer_repaint_region);
511 hss->src_rect.x = buffer_box->x1;
512 hss->src_rect.y = buffer_box->y1;
513 hss->src_rect.w = buffer_box->x2 - buffer_box->x1;
514 hss->src_rect.h = buffer_box->y2 - buffer_box->y1;
515
516 pixman_region32_fini(&global_repaint_region);
517 pixman_region32_fini(&buffer_repaint_region);
518 }
519
hdi_renderer_surface_state_create_layer(struct hdi_surface_state * hss,struct hdi_backend * b,struct weston_output * output)520 int hdi_renderer_surface_state_create_layer(struct hdi_surface_state *hss,
521 struct hdi_backend *b, struct weston_output *output)
522 {
523 struct weston_mode *mode = output->current_mode;
524 struct weston_head *whead = weston_output_get_first_head(output);
525 auto device_id = hdi_head_get_device_id(whead);
526 auto it = hss->layer_ids.find(device_id);
527 if (it == hss->layer_ids.end()) {
528 hss->layer_info.width = mode->width;
529 hss->layer_info.height = mode->height;
530 if (hss->surface->type == WL_SURFACE_TYPE_VIDEO) {
531 // video
532 } else {
533 // other
534 BufferHandle *bh = hdi_renderer_surface_state_mmap(hss);
535 hss->layer_info.bpp = bh->stride * 0x8 / bh->width;
536 hss->layer_info.pixFormat = (PixelFormat)bh->format;
537 hss->bh = bh;
538 }
539 hss->layer_info.type = LAYER_TYPE_GRAPHIC;
540 int ret = b->layer_funcs->CreateLayer(device_id,
541 &hss->layer_info, &hss->layer_ids[device_id]);
542 LOG_CORE("LayerFuncs.CreateLayer return %d", ret);
543 if (ret != DISPLAY_SUCCESS) {
544 LOG_ERROR("create layer failed");
545 hss->layer_ids.erase(device_id);
546 return -1;
547 }
548 LOG_IMPORTANT("create layer: {%d:%d}", device_id, hss->layer_ids[device_id]);
549 } else {
550 LOG_IMPORTANT("use layer: {%d:%d}", device_id, it->second);
551 }
552 return 0;
553 }
554
hdi_renderer_repaint_output(struct weston_output * output,pixman_region32_t * output_damage)555 void hdi_renderer_repaint_output(struct weston_output *output,
556 pixman_region32_t *output_damage)
557 {
558 LOG_SCOPE();
559 struct weston_compositor *compositor = output->compositor;
560 struct hdi_backend *b = to_hdi_backend(compositor);
561 struct weston_head *whead = weston_output_get_first_head(output);
562 uint32_t device_id = hdi_head_get_device_id(whead);
563 auto ho = get_output_state(output);
564 auto old_layers = ho->layers;
565 ho->layers.clear();
566
567 int32_t zorder = 2;
568 struct weston_view *view;
569 pixman_region32_t repaint;
570 wl_list_for_each_reverse(view, &compositor->view_list, link) {
571 if (view->renderer_type != WESTON_RENDERER_TYPE_HDI) {
572 continue;
573 }
574 pixman_region32_init(&repaint);
575 pixman_region32_intersect(&repaint,
576 &view->transform.boundingbox, output_damage);
577 pixman_region32_subtract(&repaint, &repaint, &view->clip);
578
579 if (!pixman_region32_not_empty(&repaint)) {
580 continue;
581 }
582
583 auto hss = get_surface_state(view->surface);
584 if (hss == NULL) {
585 continue;
586 }
587
588 if (hdi_renderer_surface_state_create_layer(hss, b, output) != 0) {
589 continue;
590 }
591
592 ho->layers.insert(hss);
593 hss->hos[device_id] = ho;
594
595 hdi_renderer_surface_state_calc_rect(hss, output_damage, output, view);
596 hss->zorder = zorder++;
597 hss->blend_type = BLEND_SRCOVER;
598 if (hss->surface->type == WL_SURFACE_TYPE_VIDEO) {
599 hss->comp_type = COMPOSITION_VIDEO;
600 hss->zorder += 100;
601 } else {
602 hss->comp_type = COMPOSITION_DEVICE;
603 }
604 }
605
606 // close not composite layer
607 for (auto &hss : old_layers) {
608 if (ho->layers.find(hss) == ho->layers.end()) {
609 hdi_renderer_layer_close(b, device_id, hss->layer_ids[device_id]);
610 hss->layer_ids.erase(device_id);
611 }
612 }
613
614 wl_list_for_each_reverse(view, &compositor->view_list, link) {
615 if (view->renderer_type != WESTON_RENDERER_TYPE_HDI) {
616 continue;
617 }
618 pixman_region32_init(&repaint);
619 pixman_region32_intersect(&repaint,
620 &view->transform.boundingbox, output_damage);
621 pixman_region32_subtract(&repaint, &repaint, &view->clip);
622
623 if (!pixman_region32_not_empty(&repaint)) {
624 continue;
625 }
626
627 LOG_INFO("LayerOperation: %p", view);
628 auto hss = get_surface_state(view->surface);
629 if (hss == NULL) {
630 continue;
631 }
632
633 if (hdi_renderer_surface_state_create_layer(hss, b, output) != 0) {
634 continue;
635 }
636
637 b->layer_dump_info_pending[device_id][hss->layer_ids[device_id]].view = view;
638 BufferHandle *bh = nullptr;
639 if (hss->surface->type != WL_SURFACE_TYPE_VIDEO) {
640 bh = hdi_renderer_surface_state_mmap(hss);
641 }
642
643 LayerAlpha alpha = { .enPixelAlpha = true };
644 hdi_renderer_layer_operation(b, device_id, hss->layer_ids[device_id],
645 bh, -1,
646 &alpha,
647 &hss->dst_rect,
648 &hss->src_rect,
649 hss->zorder,
650 hss->blend_type,
651 hss->comp_type,
652 hss->rotate_type);
653 }
654 pixman_region32_fini(&repaint);
655 }
656
hdi_renderer_surface_set_color(struct weston_surface * surface,float red,float green,float blue,float alpha)657 void hdi_renderer_surface_set_color(struct weston_surface *surface,
658 float red, float green,
659 float blue, float alpha)
660 {
661 }
662
hdi_renderer_surface_get_content_size(struct weston_surface * surface,int * width,int * height)663 void hdi_renderer_surface_get_content_size(struct weston_surface *surface,
664 int *width, int *height)
665 {
666 auto hss = get_surface_state(surface);
667 if (hss == NULL) {
668 LOG_ERROR("hdi_renderer_state is null\n");
669 *width = 0;
670 *height = 0;
671 return;
672 }
673 BufferHandle *bh = hdi_renderer_surface_state_mmap(hss);
674 if (bh == NULL) {
675 LOG_ERROR("hdi_renderer_surface_state_mmap error\n");
676 *width = 0;
677 *height = 0;
678 return;
679 }
680
681 *width = bh->width;
682 *height = bh->height;
683 return;
684 }
685
hdi_renderer_surface_copy_content(struct weston_surface * surface,void * target,size_t size,int src_x,int src_y,int width,int height)686 int hdi_renderer_surface_copy_content(struct weston_surface *surface,
687 void *target, size_t size,
688 int src_x, int src_y, int width, int height)
689 {
690 auto hss = get_surface_state(surface);
691 if (hss == NULL) {
692 LOG_ERROR("hdi_renderer_state is null\n");
693 return -1;
694 }
695
696
697 BufferHandle *bh = hdi_renderer_surface_state_mmap(hss);
698 if (bh == NULL) {
699 LOG_ERROR("hdi_renderer_surface_state_mmap error\n");
700 return -1;
701 }
702
703 memcpy(target, bh->virAddr, size);
704 return 0;
705 }
706
hdi_renderer_init(struct weston_compositor * compositor)707 int hdi_renderer_init(struct weston_compositor *compositor)
708 {
709 LOG_PASS();
710 struct hdi_renderer *renderer = (struct hdi_renderer *)zalloc(sizeof *renderer);
711
712 renderer->base.attach = hdi_renderer_attach;
713 renderer->base.destroy = hdi_renderer_destroy;
714 renderer->base.flush_damage = hdi_renderer_flush_damage;
715 renderer->base.import_dmabuf = hdi_renderer_import_dmabuf;
716 renderer->base.query_dmabuf_formats = hdi_renderer_query_dmabuf_formats;
717 renderer->base.query_dmabuf_modifiers = hdi_renderer_query_dmabuf_modifiers;
718 renderer->base.read_pixels = hdi_renderer_read_pixels;
719 renderer->base.repaint_output = hdi_renderer_repaint_output;
720 renderer->base.surface_set_color = hdi_renderer_surface_set_color;
721 renderer->base.surface_copy_content = hdi_renderer_surface_copy_content;
722 renderer->base.surface_get_content_size = hdi_renderer_surface_get_content_size;
723
724 compositor->hdi_renderer = &renderer->base;
725 return 0;
726 }
727
hdi_renderer_output_create(struct weston_output * output,const struct hdi_renderer_output_options * options)728 int hdi_renderer_output_create(struct weston_output *output,
729 const struct hdi_renderer_output_options *options)
730 {
731 LOG_SCOPE();
732 auto ho = new struct hdi_output_state();
733 ho->gpu_layer_id = -1;
734 output->hdi_renderer_state = ho;
735 return 0;
736 }
737
hdi_renderer_output_destroy(struct weston_output * output)738 void hdi_renderer_output_destroy(struct weston_output *output)
739 {
740 LOG_SCOPE();
741 auto ho = (struct hdi_output_state *)output->hdi_renderer_state;
742 if (ho->gpu_layer_id == DISPLAY_SUCCESS) {
743 struct hdi_backend *b = to_hdi_backend(output->compositor);
744 struct weston_head *whead = weston_output_get_first_head(output);
745 uint32_t device_id = hdi_head_get_device_id(whead);
746 hdi_renderer_layer_close(b, device_id, ho->gpu_layer_id);
747 }
748
749 delete ho;
750 }
751
hdi_renderer_output_set_gpu_buffer(struct weston_output * output,BufferHandle * buffer)752 void hdi_renderer_output_set_gpu_buffer(struct weston_output *output, BufferHandle *buffer)
753 {
754 LOG_SCOPE();
755 struct hdi_backend *b = to_hdi_backend(output->compositor);
756 struct hdi_output_state *ho =
757 (struct hdi_output_state *)output->hdi_renderer_state;
758 struct weston_head *whead = weston_output_get_first_head(output);
759 int32_t device_id = hdi_head_get_device_id(whead);
760
761 // close last gpu layer
762 if (ho->gpu_layer_id != -1) {
763 hdi_renderer_layer_close(b, device_id, ho->gpu_layer_id);
764 }
765
766 // create layer
767 LayerInfo layer_info = {
768 .width = buffer->width,
769 .height = buffer->height,
770 .type = LAYER_TYPE_GRAPHIC,
771 .bpp = buffer->stride * 0x8 / buffer->width,
772 .pixFormat = (PixelFormat)buffer->format,
773 };
774 int ret = b->layer_funcs->CreateLayer(device_id, &layer_info, &ho->gpu_layer_id);
775 LOG_CORE("LayerFuncs.CreateLayer GPU return %d", ret);
776 if (ret != DISPLAY_SUCCESS) {
777 LOG_ERROR("create layer failed");
778 ho->gpu_layer_id = -1;
779 return;
780 }
781 LOG_INFO("create layer GPU {%d:%d}", device_id, ho->gpu_layer_id);
782
783 // param
784 LayerAlpha alpha = { .enPixelAlpha = true };
785 int32_t fence = -1;
786 IRect dst_rect = { .w = buffer->width, .h = buffer->height, };
787 IRect src_rect = dst_rect;
788
789 // layer operation
790 hdi_renderer_layer_operation(b, device_id, ho->gpu_layer_id,
791 buffer, -1,
792 &alpha,
793 &dst_rect,
794 &src_rect,
795 1, // 1 for gpu
796 BLEND_SRC,
797 COMPOSITION_DEVICE,
798 ROTATE_NONE);
799 }
800