1 /*
2 * Copyright © 2008-2011 Kristian Høgsberg
3 * Copyright © 2011 Intel Corporation
4 * Copyright © 2017, 2018 Collabora, Ltd.
5 * Copyright © 2017, 2018 General Electric Company
6 * Copyright (c) 2018 DisplayLink (UK) Ltd.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining
9 * a copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sublicense, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial
18 * portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
23 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
24 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
25 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
26 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27 * SOFTWARE.
28 */
29
30 #include "config.h"
31
32 #include <stdint.h>
33
34 #include <xf86drm.h>
35 #include <xf86drmMode.h>
36 #include <drm_fourcc.h>
37
38 #include <libweston/libweston.h>
39 #include <libweston/backend-drm.h>
40 #include "shared/helpers.h"
41 #include "drm-internal.h"
42 #include "pixel-formats.h"
43 #include "presentation-time-server-protocol.h"
44
45 #include "libweston/trace.h"
46 DEFINE_LOG_LABEL("KMS");
47
48 #ifndef DRM_FORMAT_MOD_LINEAR
49 #define DRM_FORMAT_MOD_LINEAR 0
50 #endif
51
52 struct drm_property_enum_info plane_type_enums[] = {
53 [WDRM_PLANE_TYPE_PRIMARY] = {
54 .name = "Primary",
55 },
56 [WDRM_PLANE_TYPE_OVERLAY] = {
57 .name = "Overlay",
58 },
59 [WDRM_PLANE_TYPE_CURSOR] = {
60 .name = "Cursor",
61 },
62 };
63
64 const struct drm_property_info plane_props[] = {
65 [WDRM_PLANE_TYPE] = {
66 .name = "type",
67 .enum_values = plane_type_enums,
68 .num_enum_values = WDRM_PLANE_TYPE__COUNT,
69 },
70 [WDRM_PLANE_SRC_X] = { .name = "SRC_X", },
71 [WDRM_PLANE_SRC_Y] = { .name = "SRC_Y", },
72 [WDRM_PLANE_SRC_W] = { .name = "SRC_W", },
73 [WDRM_PLANE_SRC_H] = { .name = "SRC_H", },
74 [WDRM_PLANE_CRTC_X] = { .name = "CRTC_X", },
75 [WDRM_PLANE_CRTC_Y] = { .name = "CRTC_Y", },
76 [WDRM_PLANE_CRTC_W] = { .name = "CRTC_W", },
77 [WDRM_PLANE_CRTC_H] = { .name = "CRTC_H", },
78 [WDRM_PLANE_FB_ID] = { .name = "FB_ID", },
79 [WDRM_PLANE_CRTC_ID] = { .name = "CRTC_ID", },
80 [WDRM_PLANE_IN_FORMATS] = { .name = "IN_FORMATS" },
81 [WDRM_PLANE_IN_FENCE_FD] = { .name = "IN_FENCE_FD" },
82 [WDRM_PLANE_FB_DAMAGE_CLIPS] = { .name = "FB_DAMAGE_CLIPS" },
83 [WDRM_PLANE_ZPOS] = { .name = "zpos" },
84 };
85
86 struct drm_property_enum_info dpms_state_enums[] = {
87 [WDRM_DPMS_STATE_OFF] = {
88 .name = "Off",
89 },
90 [WDRM_DPMS_STATE_ON] = {
91 .name = "On",
92 },
93 [WDRM_DPMS_STATE_STANDBY] = {
94 .name = "Standby",
95 },
96 [WDRM_DPMS_STATE_SUSPEND] = {
97 .name = "Suspend",
98 },
99 };
100
101 struct drm_property_enum_info content_protection_enums[] = {
102 [WDRM_CONTENT_PROTECTION_UNDESIRED] = {
103 .name = "Undesired",
104 },
105 [WDRM_CONTENT_PROTECTION_DESIRED] = {
106 .name = "Desired",
107 },
108 [WDRM_CONTENT_PROTECTION_ENABLED] = {
109 .name = "Enabled",
110 },
111 };
112
113 struct drm_property_enum_info hdcp_content_type_enums[] = {
114 [WDRM_HDCP_CONTENT_TYPE0] = {
115 .name = "HDCP Type0",
116 },
117 [WDRM_HDCP_CONTENT_TYPE1] = {
118 .name = "HDCP Type1",
119 },
120 };
121
122 struct drm_property_enum_info panel_orientation_enums[] = {
123 [WDRM_PANEL_ORIENTATION_NORMAL] = { .name = "Normal", },
124 [WDRM_PANEL_ORIENTATION_UPSIDE_DOWN] = { .name = "Upside Down", },
125 [WDRM_PANEL_ORIENTATION_LEFT_SIDE_UP] = { .name = "Left Side Up", },
126 [WDRM_PANEL_ORIENTATION_RIGHT_SIDE_UP] = { .name = "Right Side Up", },
127 };
128
129 const struct drm_property_info connector_props[] = {
130 [WDRM_CONNECTOR_EDID] = { .name = "EDID" },
131 [WDRM_CONNECTOR_DPMS] = {
132 .name = "DPMS",
133 .enum_values = dpms_state_enums,
134 .num_enum_values = WDRM_DPMS_STATE__COUNT,
135 },
136 [WDRM_CONNECTOR_CRTC_ID] = { .name = "CRTC_ID", },
137 [WDRM_CONNECTOR_NON_DESKTOP] = { .name = "non-desktop", },
138 [WDRM_CONNECTOR_CONTENT_PROTECTION] = {
139 .name = "Content Protection",
140 .enum_values = content_protection_enums,
141 .num_enum_values = WDRM_CONTENT_PROTECTION__COUNT,
142 },
143 [WDRM_CONNECTOR_HDCP_CONTENT_TYPE] = {
144 .name = "HDCP Content Type",
145 .enum_values = hdcp_content_type_enums,
146 .num_enum_values = WDRM_HDCP_CONTENT_TYPE__COUNT,
147 },
148 [WDRM_CONNECTOR_PANEL_ORIENTATION] = {
149 .name = "panel orientation",
150 .enum_values = panel_orientation_enums,
151 .num_enum_values = WDRM_PANEL_ORIENTATION__COUNT,
152 },
153 };
154
155 const struct drm_property_info crtc_props[] = {
156 [WDRM_CRTC_MODE_ID] = { .name = "MODE_ID", },
157 [WDRM_CRTC_ACTIVE] = { .name = "ACTIVE", },
158 };
159
160
161 /**
162 * Mode for drm_pending_state_apply and co.
163 */
164 enum drm_state_apply_mode {
165 DRM_STATE_APPLY_SYNC, /**< state fully processed */
166 DRM_STATE_APPLY_ASYNC, /**< state pending event delivery */
167 DRM_STATE_TEST_ONLY, /**< test if the state can be applied */
168 };
169
170 /**
171 * Get the current value of a KMS property
172 *
173 * Given a drmModeObjectGetProperties return, as well as the drm_property_info
174 * for the target property, return the current value of that property,
175 * with an optional default. If the property is a KMS enum type, the return
176 * value will be translated into the appropriate internal enum.
177 *
178 * If the property is not present, the default value will be returned.
179 *
180 * @param info Internal structure for property to look up
181 * @param props Raw KMS properties for the target object
182 * @param def Value to return if property is not found
183 */
184 uint64_t
drm_property_get_value(struct drm_property_info * info,const drmModeObjectProperties * props,uint64_t def)185 drm_property_get_value(struct drm_property_info *info,
186 const drmModeObjectProperties *props,
187 uint64_t def)
188 {
189 unsigned int i;
190
191 if (info->prop_id == 0)
192 return def;
193
194 for (i = 0; i < props->count_props; i++) {
195 unsigned int j;
196
197 if (props->props[i] != info->prop_id)
198 continue;
199
200 /* Simple (non-enum) types can return the value directly */
201 if (info->num_enum_values == 0)
202 return props->prop_values[i];
203
204 /* Map from raw value to enum value */
205 for (j = 0; j < info->num_enum_values; j++) {
206 if (!info->enum_values[j].valid)
207 continue;
208 if (info->enum_values[j].value != props->prop_values[i])
209 continue;
210
211 return j;
212 }
213
214 /* We don't have a mapping for this enum; return default. */
215 break;
216 }
217
218 return def;
219 }
220
221 /**
222 * Get the current range values of a KMS property
223 *
224 * Given a drmModeObjectGetProperties return, as well as the drm_property_info
225 * for the target property, return the current range values of that property,
226 *
227 * If the property is not present, or there's no it is not a prop range then
228 * NULL will be returned.
229 *
230 * @param info Internal structure for property to look up
231 * @param props Raw KMS properties for the target object
232 */
233 uint64_t *
drm_property_get_range_values(struct drm_property_info * info,const drmModeObjectProperties * props)234 drm_property_get_range_values(struct drm_property_info *info,
235 const drmModeObjectProperties *props)
236 {
237 unsigned int i;
238
239 if (info->prop_id == 0)
240 return NULL;
241
242 for (i = 0; i < props->count_props; i++) {
243
244 if (props->props[i] != info->prop_id)
245 continue;
246
247 if (!(info->flags & DRM_MODE_PROP_RANGE) &&
248 !(info->flags & DRM_MODE_PROP_SIGNED_RANGE))
249 continue;
250
251 return info->range_values;
252 }
253
254 return NULL;
255 }
256
257 /**
258 * Cache DRM property values
259 *
260 * Update a per-object array of drm_property_info structures, given the
261 * DRM properties of the object.
262 *
263 * Call this every time an object newly appears (note that only connectors
264 * can be hotplugged), the first time it is seen, or when its status changes
265 * in a way which invalidates the potential property values (currently, the
266 * only case for this is connector hotplug).
267 *
268 * This updates the property IDs and enum values within the drm_property_info
269 * array.
270 *
271 * DRM property enum values are dynamic at runtime; the user must query the
272 * property to find out the desired runtime value for a requested string
273 * name. Using the 'type' field on planes as an example, there is no single
274 * hardcoded constant for primary plane types; instead, the property must be
275 * queried at runtime to find the value associated with the string "Primary".
276 *
277 * This helper queries and caches the enum values, to allow us to use a set
278 * of compile-time-constant enums portably across various implementations.
279 * The values given in enum_names are searched for, and stored in the
280 * same-indexed field of the map array.
281 *
282 * @param b DRM backend object
283 * @param src DRM property info array to source from
284 * @param info DRM property info array to copy into
285 * @param num_infos Number of entries in the source array
286 * @param props DRM object properties for the object
287 */
288 void
drm_property_info_populate(struct drm_backend * b,const struct drm_property_info * src,struct drm_property_info * info,unsigned int num_infos,drmModeObjectProperties * props)289 drm_property_info_populate(struct drm_backend *b,
290 const struct drm_property_info *src,
291 struct drm_property_info *info,
292 unsigned int num_infos,
293 drmModeObjectProperties *props)
294 {
295 drmModePropertyRes *prop;
296 unsigned i, j;
297
298 for (i = 0; i < num_infos; i++) {
299 unsigned int j;
300
301 info[i].name = src[i].name;
302 info[i].prop_id = 0;
303 info[i].num_enum_values = src[i].num_enum_values;
304
305 if (src[i].num_enum_values == 0)
306 continue;
307
308 info[i].enum_values =
309 malloc(src[i].num_enum_values *
310 sizeof(*info[i].enum_values));
311 assert(info[i].enum_values);
312 for (j = 0; j < info[i].num_enum_values; j++) {
313 info[i].enum_values[j].name = src[i].enum_values[j].name;
314 info[i].enum_values[j].valid = false;
315 }
316 }
317
318 for (i = 0; i < props->count_props; i++) {
319 unsigned int k;
320
321 prop = drmModeGetProperty(b->drm.fd, props->props[i]);
322 if (!prop)
323 continue;
324
325 for (j = 0; j < num_infos; j++) {
326 if (!strcmp(prop->name, info[j].name))
327 break;
328 }
329
330 /* We don't know/care about this property. */
331 if (j == num_infos) {
332 #ifdef DEBUG
333 weston_log("DRM debug: unrecognized property %u '%s'\n",
334 prop->prop_id, prop->name);
335 #endif
336 drmModeFreeProperty(prop);
337 continue;
338 }
339
340 if (info[j].num_enum_values == 0 &&
341 (prop->flags & DRM_MODE_PROP_ENUM)) {
342 weston_log("DRM: expected property %s to not be an"
343 " enum, but it is; ignoring\n", prop->name);
344 drmModeFreeProperty(prop);
345 continue;
346 }
347
348 info[j].prop_id = props->props[i];
349 info[j].flags = prop->flags;
350
351 if (prop->flags & DRM_MODE_PROP_RANGE ||
352 prop->flags & DRM_MODE_PROP_SIGNED_RANGE) {
353 info[j].num_range_values = prop->count_values;
354 for (int i = 0; i < prop->count_values; i++)
355 info[j].range_values[i] = prop->values[i];
356 }
357
358
359 if (info[j].num_enum_values == 0) {
360 drmModeFreeProperty(prop);
361 continue;
362 }
363
364 if (!(prop->flags & DRM_MODE_PROP_ENUM)) {
365 weston_log("DRM: expected property %s to be an enum,"
366 " but it is not; ignoring\n", prop->name);
367 drmModeFreeProperty(prop);
368 info[j].prop_id = 0;
369 continue;
370 }
371
372 for (k = 0; k < info[j].num_enum_values; k++) {
373 int l;
374
375 for (l = 0; l < prop->count_enums; l++) {
376 if (!strcmp(prop->enums[l].name,
377 info[j].enum_values[k].name))
378 break;
379 }
380
381 if (l == prop->count_enums)
382 continue;
383
384 info[j].enum_values[k].valid = true;
385 info[j].enum_values[k].value = prop->enums[l].value;
386 }
387
388 drmModeFreeProperty(prop);
389 }
390
391 #ifdef DEBUG
392 for (i = 0; i < num_infos; i++) {
393 if (info[i].prop_id == 0)
394 weston_log("DRM warning: property '%s' missing\n",
395 info[i].name);
396 }
397 #endif
398 }
399
400 /**
401 * Free DRM property information
402 *
403 * Frees all memory associated with a DRM property info array and zeroes
404 * it out, leaving it usable for a further drm_property_info_update() or
405 * drm_property_info_free().
406 *
407 * @param info DRM property info array
408 * @param num_props Number of entries in array to free
409 */
410 void
drm_property_info_free(struct drm_property_info * info,int num_props)411 drm_property_info_free(struct drm_property_info *info, int num_props)
412 {
413 int i;
414
415 for (i = 0; i < num_props; i++)
416 free(info[i].enum_values);
417
418 memset(info, 0, sizeof(*info) * num_props);
419 }
420
421 static inline uint32_t *
formats_ptr(struct drm_format_modifier_blob * blob)422 formats_ptr(struct drm_format_modifier_blob *blob)
423 {
424 return (uint32_t *)(((char *)blob) + blob->formats_offset);
425 }
426
427 static inline struct drm_format_modifier *
modifiers_ptr(struct drm_format_modifier_blob * blob)428 modifiers_ptr(struct drm_format_modifier_blob *blob)
429 {
430 return (struct drm_format_modifier *)
431 (((char *)blob) + blob->modifiers_offset);
432 }
433
434 /**
435 * Populates the plane's formats array, using either the IN_FORMATS blob
436 * property (if available), or the plane's format list if not.
437 */
438 int
drm_plane_populate_formats(struct drm_plane * plane,const drmModePlane * kplane,const drmModeObjectProperties * props,const bool use_modifiers)439 drm_plane_populate_formats(struct drm_plane *plane, const drmModePlane *kplane,
440 const drmModeObjectProperties *props,
441 const bool use_modifiers)
442 {
443 unsigned i;
444 drmModePropertyBlobRes *blob;
445 struct drm_format_modifier_blob *fmt_mod_blob;
446 struct drm_format_modifier *blob_modifiers;
447 uint32_t *blob_formats;
448 uint32_t blob_id;
449
450 if (!use_modifiers)
451 goto fallback;
452
453 blob_id = drm_property_get_value(&plane->props[WDRM_PLANE_IN_FORMATS],
454 props,
455 0);
456 if (blob_id == 0)
457 goto fallback;
458
459 blob = drmModeGetPropertyBlob(plane->backend->drm.fd, blob_id);
460 if (!blob)
461 goto fallback;
462
463 fmt_mod_blob = blob->data;
464 blob_formats = formats_ptr(fmt_mod_blob);
465 blob_modifiers = modifiers_ptr(fmt_mod_blob);
466
467 if (plane->count_formats != fmt_mod_blob->count_formats) {
468 weston_log("DRM backend: format count differs between "
469 "plane (%d) and IN_FORMATS (%d)\n",
470 plane->count_formats,
471 fmt_mod_blob->count_formats);
472 weston_log("This represents a kernel bug; Weston is "
473 "unable to continue.\n");
474 abort();
475 }
476
477 for (i = 0; i < fmt_mod_blob->count_formats; i++) {
478 uint32_t count_modifiers = 0;
479 uint64_t *modifiers = NULL;
480 unsigned j;
481
482 for (j = 0; j < fmt_mod_blob->count_modifiers; j++) {
483 struct drm_format_modifier *mod = &blob_modifiers[j];
484
485 if ((i < mod->offset) || (i > mod->offset + 63))
486 continue;
487 if (!(mod->formats & (1 << (i - mod->offset))))
488 continue;
489
490 modifiers = realloc(modifiers,
491 (count_modifiers + 1) *
492 sizeof(modifiers[0]));
493 assert(modifiers);
494 modifiers[count_modifiers++] = mod->modifier;
495 }
496
497 if (count_modifiers == 0) {
498 modifiers = malloc(sizeof(*modifiers));
499 *modifiers = DRM_FORMAT_MOD_LINEAR;
500 count_modifiers = 1;
501 }
502
503 plane->formats[i].format = blob_formats[i];
504 plane->formats[i].modifiers = modifiers;
505 plane->formats[i].count_modifiers = count_modifiers;
506 }
507
508 drmModeFreePropertyBlob(blob);
509
510 return 0;
511
512 fallback:
513 /* No IN_FORMATS blob available, so just use the old. */
514 assert(plane->count_formats == kplane->count_formats);
515 for (i = 0; i < kplane->count_formats; i++) {
516 plane->formats[i].format = kplane->formats[i];
517 plane->formats[i].modifiers = malloc(sizeof(uint64_t));
518 plane->formats[i].modifiers[0] = DRM_FORMAT_MOD_LINEAR;
519 plane->formats[i].count_modifiers = 1;
520 }
521
522 return 0;
523 }
524
525 void
drm_output_set_gamma(struct weston_output * output_base,uint16_t size,uint16_t * r,uint16_t * g,uint16_t * b)526 drm_output_set_gamma(struct weston_output *output_base,
527 uint16_t size, uint16_t *r, uint16_t *g, uint16_t *b)
528 {
529 int rc;
530 struct drm_output *output = to_drm_output(output_base);
531 struct drm_backend *backend =
532 to_drm_backend(output->base.compositor);
533
534 /* check */
535 if (output_base->gamma_size != size)
536 return;
537
538 rc = drmModeCrtcSetGamma(backend->drm.fd,
539 output->crtc_id,
540 size, r, g, b);
541 if (rc)
542 weston_log("set gamma failed: %s\n", strerror(errno));
543 }
544
545 /**
546 * Mark an output state as current on the output, i.e. it has been
547 * submitted to the kernel. The mode argument determines whether this
548 * update will be applied synchronously (e.g. when calling drmModeSetCrtc),
549 * or asynchronously (in which case we wait for events to complete).
550 */
551 static void
drm_output_assign_state(struct drm_output_state * state,enum drm_state_apply_mode mode)552 drm_output_assign_state(struct drm_output_state *state,
553 enum drm_state_apply_mode mode)
554 {
555 struct drm_output *output = state->output;
556 struct drm_backend *b = to_drm_backend(output->base.compositor);
557 struct drm_plane_state *plane_state;
558 struct drm_head *head;
559
560 assert(!output->state_last);
561
562 if (mode == DRM_STATE_APPLY_ASYNC)
563 output->state_last = output->state_cur;
564 else
565 drm_output_state_free(output->state_cur);
566
567 wl_list_remove(&state->link);
568 wl_list_init(&state->link);
569 state->pending_state = NULL;
570
571 output->state_cur = state;
572
573 if (b->atomic_modeset && mode == DRM_STATE_APPLY_ASYNC) {
574 drm_debug(b, "\t[CRTC:%u] setting pending flip\n", output->crtc_id);
575 output->atomic_complete_pending = true;
576 }
577
578 if (b->atomic_modeset &&
579 state->protection == WESTON_HDCP_DISABLE)
580 wl_list_for_each(head, &output->base.head_list, base.output_link)
581 weston_head_set_content_protection_status(&head->base,
582 WESTON_HDCP_DISABLE);
583
584 /* Replace state_cur on each affected plane with the new state, being
585 * careful to dispose of orphaned (but only orphaned) previous state.
586 * If the previous state is not orphaned (still has an output_state
587 * attached), it will be disposed of by freeing the output_state. */
588 wl_list_for_each(plane_state, &state->plane_list, link) {
589 struct drm_plane *plane = plane_state->plane;
590
591 if (plane->state_cur && !plane->state_cur->output_state)
592 drm_plane_state_free(plane->state_cur, true);
593 plane->state_cur = plane_state;
594
595 if (mode != DRM_STATE_APPLY_ASYNC) {
596 plane_state->complete = true;
597 continue;
598 }
599
600 if (b->atomic_modeset)
601 continue;
602
603 assert(plane->type != WDRM_PLANE_TYPE_OVERLAY);
604 if (plane->type == WDRM_PLANE_TYPE_PRIMARY)
605 output->page_flip_pending = true;
606 }
607 }
608
609 static void
drm_output_set_cursor(struct drm_output_state * output_state)610 drm_output_set_cursor(struct drm_output_state *output_state)
611 {
612 struct drm_output *output = output_state->output;
613 struct drm_backend *b = to_drm_backend(output->base.compositor);
614 struct drm_plane *plane = output->cursor_plane;
615 struct drm_plane_state *state;
616 uint32_t handle;
617
618 if (!plane)
619 return;
620
621 state = drm_output_state_get_existing_plane(output_state, plane);
622 if (!state)
623 return;
624
625 if (!state->fb) {
626 pixman_region32_fini(&plane->base.damage);
627 pixman_region32_init(&plane->base.damage);
628 drmModeSetCursor(b->drm.fd, output->crtc_id, 0, 0, 0);
629 return;
630 }
631
632 assert(state->fb == output->gbm_cursor_fb[output->current_cursor]);
633 assert(!plane->state_cur->output || plane->state_cur->output == output);
634
635 handle = output->gbm_cursor_handle[output->current_cursor];
636 if (plane->state_cur->fb != state->fb) {
637 if (drmModeSetCursor(b->drm.fd, output->crtc_id, handle,
638 b->cursor_width, b->cursor_height)) {
639 weston_log("failed to set cursor: %s\n",
640 strerror(errno));
641 goto err;
642 }
643 }
644
645 pixman_region32_fini(&plane->base.damage);
646 pixman_region32_init(&plane->base.damage);
647
648 if (drmModeMoveCursor(b->drm.fd, output->crtc_id,
649 state->dest_x, state->dest_y)) {
650 weston_log("failed to move cursor: %s\n", strerror(errno));
651 goto err;
652 }
653
654 return;
655
656 err:
657 b->cursors_are_broken = true;
658 drmModeSetCursor(b->drm.fd, output->crtc_id, 0, 0, 0);
659 }
660
661 static int
drm_output_apply_state_legacy(struct drm_output_state * state)662 drm_output_apply_state_legacy(struct drm_output_state *state)
663 {
664 struct drm_output *output = state->output;
665 struct drm_backend *backend = to_drm_backend(output->base.compositor);
666 struct drm_plane *scanout_plane = output->scanout_plane;
667 struct drm_property_info *dpms_prop;
668 struct drm_plane_state *scanout_state;
669 struct drm_mode *mode;
670 struct drm_head *head;
671 const struct pixel_format_info *pinfo = NULL;
672 uint32_t connectors[MAX_CLONED_CONNECTORS];
673 int n_conn = 0;
674 struct timespec now;
675 int ret = 0;
676
677 wl_list_for_each(head, &output->base.head_list, base.output_link) {
678 assert(n_conn < MAX_CLONED_CONNECTORS);
679 connectors[n_conn++] = head->connector_id;
680 }
681
682 /* If disable_planes is set then assign_planes() wasn't
683 * called for this render, so we could still have a stale
684 * cursor plane set up.
685 */
686 if (output->base.disable_planes) {
687 output->cursor_view = NULL;
688 if (output->cursor_plane) {
689 output->cursor_plane->base.x = INT32_MIN;
690 output->cursor_plane->base.y = INT32_MIN;
691 }
692 }
693
694 if (state->dpms != WESTON_DPMS_ON) {
695 if (output->cursor_plane) {
696 ret = drmModeSetCursor(backend->drm.fd, output->crtc_id,
697 0, 0, 0);
698 if (ret)
699 weston_log("drmModeSetCursor failed disable: %s\n",
700 strerror(errno));
701 }
702
703 ret = drmModeSetCrtc(backend->drm.fd, output->crtc_id, 0, 0, 0,
704 NULL, 0, NULL);
705 if (ret)
706 weston_log("drmModeSetCrtc failed disabling: %s\n",
707 strerror(errno));
708
709 drm_output_assign_state(state, DRM_STATE_APPLY_SYNC);
710 weston_compositor_read_presentation_clock(output->base.compositor, &now);
711 LOG_PASS();
712 drm_output_update_complete(output,
713 WP_PRESENTATION_FEEDBACK_KIND_HW_COMPLETION,
714 now.tv_sec, now.tv_nsec / 1000);
715
716 return 0;
717 }
718
719 scanout_state =
720 drm_output_state_get_existing_plane(state, scanout_plane);
721
722 /* The legacy SetCrtc API doesn't allow us to do scaling, and the
723 * legacy PageFlip API doesn't allow us to do clipping either. */
724 assert(scanout_state->src_x == 0);
725 assert(scanout_state->src_y == 0);
726 assert(scanout_state->src_w ==
727 (unsigned) (output->base.current_mode->width << 16));
728 assert(scanout_state->src_h ==
729 (unsigned) (output->base.current_mode->height << 16));
730 assert(scanout_state->dest_x == 0);
731 assert(scanout_state->dest_y == 0);
732 assert(scanout_state->dest_w == scanout_state->src_w >> 16);
733 assert(scanout_state->dest_h == scanout_state->src_h >> 16);
734 /* The legacy SetCrtc API doesn't support fences */
735 assert(scanout_state->in_fence_fd == -1);
736
737 mode = to_drm_mode(output->base.current_mode);
738 if (backend->state_invalid ||
739 !scanout_plane->state_cur->fb ||
740 scanout_plane->state_cur->fb->strides[0] !=
741 scanout_state->fb->strides[0]) {
742
743 ret = drmModeSetCrtc(backend->drm.fd, output->crtc_id,
744 scanout_state->fb->fb_id,
745 0, 0,
746 connectors, n_conn,
747 &mode->mode_info);
748 if (ret) {
749 weston_log("set mode failed: %s\n", strerror(errno));
750 goto err;
751 }
752 }
753
754 pinfo = scanout_state->fb->format;
755 drm_debug(backend, "\t[CRTC:%u, PLANE:%u] FORMAT: %s\n",
756 output->crtc_id, scanout_state->plane->plane_id,
757 pinfo ? pinfo->drm_format_name : "UNKNOWN");
758
759 if (drmModePageFlip(backend->drm.fd, output->crtc_id,
760 scanout_state->fb->fb_id,
761 DRM_MODE_PAGE_FLIP_EVENT, output) < 0) {
762 weston_log("queueing pageflip failed: %s\n", strerror(errno));
763 goto err;
764 }
765
766 assert(!output->page_flip_pending);
767
768 if (output->pageflip_timer)
769 wl_event_source_timer_update(output->pageflip_timer,
770 backend->pageflip_timeout);
771
772 drm_output_set_cursor(state);
773
774 if (state->dpms != output->state_cur->dpms) {
775 wl_list_for_each(head, &output->base.head_list, base.output_link) {
776 dpms_prop = &head->props_conn[WDRM_CONNECTOR_DPMS];
777 if (dpms_prop->prop_id == 0)
778 continue;
779
780 ret = drmModeConnectorSetProperty(backend->drm.fd,
781 head->connector_id,
782 dpms_prop->prop_id,
783 state->dpms);
784 if (ret) {
785 weston_log("DRM: DPMS: failed property set for %s\n",
786 head->base.name);
787 }
788 }
789 }
790
791 drm_output_assign_state(state, DRM_STATE_APPLY_ASYNC);
792
793 return 0;
794
795 err:
796 output->cursor_view = NULL;
797 drm_output_state_free(state);
798 return -1;
799 }
800
801 static int
crtc_add_prop(drmModeAtomicReq * req,struct drm_output * output,enum wdrm_crtc_property prop,uint64_t val)802 crtc_add_prop(drmModeAtomicReq *req, struct drm_output *output,
803 enum wdrm_crtc_property prop, uint64_t val)
804 {
805 struct drm_property_info *info = &output->props_crtc[prop];
806 int ret;
807
808 if (info->prop_id == 0)
809 return -1;
810
811 ret = drmModeAtomicAddProperty(req, output->crtc_id, info->prop_id,
812 val);
813 drm_debug(output->backend, "\t\t\t[CRTC:%lu] %lu (%s) -> %llu (0x%llx)\n",
814 (unsigned long) output->crtc_id,
815 (unsigned long) info->prop_id, info->name,
816 (unsigned long long) val, (unsigned long long) val);
817 return (ret <= 0) ? -1 : 0;
818 }
819
820 static int
connector_add_prop(drmModeAtomicReq * req,struct drm_head * head,enum wdrm_connector_property prop,uint64_t val)821 connector_add_prop(drmModeAtomicReq *req, struct drm_head *head,
822 enum wdrm_connector_property prop, uint64_t val)
823 {
824 struct drm_property_info *info = &head->props_conn[prop];
825 int ret;
826
827 if (info->prop_id == 0)
828 return -1;
829
830 ret = drmModeAtomicAddProperty(req, head->connector_id,
831 info->prop_id, val);
832 drm_debug(head->backend, "\t\t\t[CONN:%lu] %lu (%s) -> %llu (0x%llx)\n",
833 (unsigned long) head->connector_id,
834 (unsigned long) info->prop_id, info->name,
835 (unsigned long long) val, (unsigned long long) val);
836 return (ret <= 0) ? -1 : 0;
837 }
838
839 static int
plane_add_prop(drmModeAtomicReq * req,struct drm_plane * plane,enum wdrm_plane_property prop,uint64_t val)840 plane_add_prop(drmModeAtomicReq *req, struct drm_plane *plane,
841 enum wdrm_plane_property prop, uint64_t val)
842 {
843 struct drm_property_info *info = &plane->props[prop];
844 int ret;
845
846 if (info->prop_id == 0)
847 return -1;
848
849 ret = drmModeAtomicAddProperty(req, plane->plane_id, info->prop_id,
850 val);
851 drm_debug(plane->backend, "\t\t\t[PLANE:%lu] %lu (%s) -> %llu (0x%llx)\n",
852 (unsigned long) plane->plane_id,
853 (unsigned long) info->prop_id, info->name,
854 (unsigned long long) val, (unsigned long long) val);
855 return (ret <= 0) ? -1 : 0;
856 }
857
858 static bool
drm_head_has_prop(struct drm_head * head,enum wdrm_connector_property prop)859 drm_head_has_prop(struct drm_head *head,
860 enum wdrm_connector_property prop)
861 {
862 if (head && head->props_conn[prop].prop_id != 0)
863 return true;
864
865 return false;
866 }
867
868 /*
869 * This function converts the protection requests from weston_hdcp_protection
870 * corresponding drm values. These values can be set in "Content Protection"
871 * & "HDCP Content Type" connector properties.
872 */
873 static void
get_drm_protection_from_weston(enum weston_hdcp_protection weston_protection,enum wdrm_content_protection_state * drm_protection,enum wdrm_hdcp_content_type * drm_cp_type)874 get_drm_protection_from_weston(enum weston_hdcp_protection weston_protection,
875 enum wdrm_content_protection_state *drm_protection,
876 enum wdrm_hdcp_content_type *drm_cp_type)
877 {
878
879 switch (weston_protection) {
880 case WESTON_HDCP_DISABLE:
881 *drm_protection = WDRM_CONTENT_PROTECTION_UNDESIRED;
882 *drm_cp_type = WDRM_HDCP_CONTENT_TYPE0;
883 break;
884 case WESTON_HDCP_ENABLE_TYPE_0:
885 *drm_protection = WDRM_CONTENT_PROTECTION_DESIRED;
886 *drm_cp_type = WDRM_HDCP_CONTENT_TYPE0;
887 break;
888 case WESTON_HDCP_ENABLE_TYPE_1:
889 *drm_protection = WDRM_CONTENT_PROTECTION_DESIRED;
890 *drm_cp_type = WDRM_HDCP_CONTENT_TYPE1;
891 break;
892 default:
893 assert(0 && "bad weston_hdcp_protection");
894 }
895 }
896
897 static void
drm_head_set_hdcp_property(struct drm_head * head,enum weston_hdcp_protection protection,drmModeAtomicReq * req)898 drm_head_set_hdcp_property(struct drm_head *head,
899 enum weston_hdcp_protection protection,
900 drmModeAtomicReq *req)
901 {
902 int ret;
903 enum wdrm_content_protection_state drm_protection;
904 enum wdrm_hdcp_content_type drm_cp_type;
905 struct drm_property_enum_info *enum_info;
906 uint64_t prop_val;
907
908 get_drm_protection_from_weston(protection, &drm_protection,
909 &drm_cp_type);
910
911 if (!drm_head_has_prop(head, WDRM_CONNECTOR_CONTENT_PROTECTION))
912 return;
913
914 /*
915 * Content-type property is not exposed for platforms not supporting
916 * HDCP2.2, therefore, type-1 cannot be supported. The type-0 content
917 * still can be supported if the content-protection property is exposed.
918 */
919 if (!drm_head_has_prop(head, WDRM_CONNECTOR_HDCP_CONTENT_TYPE) &&
920 drm_cp_type != WDRM_HDCP_CONTENT_TYPE0)
921 return;
922
923 enum_info = head->props_conn[WDRM_CONNECTOR_CONTENT_PROTECTION].enum_values;
924 prop_val = enum_info[drm_protection].value;
925 ret = connector_add_prop(req, head, WDRM_CONNECTOR_CONTENT_PROTECTION,
926 prop_val);
927 assert(ret == 0);
928
929 if (!drm_head_has_prop(head, WDRM_CONNECTOR_HDCP_CONTENT_TYPE))
930 return;
931
932 enum_info = head->props_conn[WDRM_CONNECTOR_HDCP_CONTENT_TYPE].enum_values;
933 prop_val = enum_info[drm_cp_type].value;
934 ret = connector_add_prop(req, head, WDRM_CONNECTOR_HDCP_CONTENT_TYPE,
935 prop_val);
936 assert(ret == 0);
937 }
938
939 static int
drm_output_apply_state_atomic(struct drm_output_state * state,drmModeAtomicReq * req,uint32_t * flags)940 drm_output_apply_state_atomic(struct drm_output_state *state,
941 drmModeAtomicReq *req,
942 uint32_t *flags)
943 {
944 struct drm_output *output = state->output;
945 struct drm_backend *b = to_drm_backend(output->base.compositor);
946 struct drm_plane_state *plane_state;
947 struct drm_mode *current_mode = to_drm_mode(output->base.current_mode);
948 struct drm_head *head;
949 int ret = 0;
950
951 drm_debug(b, "\t\t[atomic] %s output %lu (%s) state\n",
952 (*flags & DRM_MODE_ATOMIC_TEST_ONLY) ? "testing" : "applying",
953 (unsigned long) output->base.id, output->base.name);
954
955 if (state->dpms != output->state_cur->dpms) {
956 drm_debug(b, "\t\t\t[atomic] DPMS state differs, modeset OK\n");
957 *flags |= DRM_MODE_ATOMIC_ALLOW_MODESET;
958 }
959
960 if (state->dpms == WESTON_DPMS_ON) {
961 ret = drm_mode_ensure_blob(b, current_mode);
962 if (ret != 0)
963 return ret;
964
965 ret |= crtc_add_prop(req, output, WDRM_CRTC_MODE_ID,
966 current_mode->blob_id);
967 ret |= crtc_add_prop(req, output, WDRM_CRTC_ACTIVE, 1);
968
969 /* No need for the DPMS property, since it is implicit in
970 * routing and CRTC activity. */
971 wl_list_for_each(head, &output->base.head_list, base.output_link) {
972 ret |= connector_add_prop(req, head, WDRM_CONNECTOR_CRTC_ID,
973 output->crtc_id);
974 }
975 } else {
976 ret |= crtc_add_prop(req, output, WDRM_CRTC_MODE_ID, 0);
977 ret |= crtc_add_prop(req, output, WDRM_CRTC_ACTIVE, 0);
978
979 /* No need for the DPMS property, since it is implicit in
980 * routing and CRTC activity. */
981 wl_list_for_each(head, &output->base.head_list, base.output_link)
982 ret |= connector_add_prop(req, head, WDRM_CONNECTOR_CRTC_ID, 0);
983 }
984
985 wl_list_for_each(head, &output->base.head_list, base.output_link)
986 drm_head_set_hdcp_property(head, state->protection, req);
987
988 if (ret != 0) {
989 weston_log("couldn't set atomic CRTC/connector state\n");
990 return ret;
991 }
992
993 wl_list_for_each(plane_state, &state->plane_list, link) {
994 struct drm_plane *plane = plane_state->plane;
995 const struct pixel_format_info *pinfo = NULL;
996
997 ret |= plane_add_prop(req, plane, WDRM_PLANE_FB_ID,
998 plane_state->fb ? plane_state->fb->fb_id : 0);
999 ret |= plane_add_prop(req, plane, WDRM_PLANE_CRTC_ID,
1000 plane_state->fb ? output->crtc_id : 0);
1001 ret |= plane_add_prop(req, plane, WDRM_PLANE_SRC_X,
1002 plane_state->src_x);
1003 ret |= plane_add_prop(req, plane, WDRM_PLANE_SRC_Y,
1004 plane_state->src_y);
1005 ret |= plane_add_prop(req, plane, WDRM_PLANE_SRC_W,
1006 plane_state->src_w);
1007 ret |= plane_add_prop(req, plane, WDRM_PLANE_SRC_H,
1008 plane_state->src_h);
1009 ret |= plane_add_prop(req, plane, WDRM_PLANE_CRTC_X,
1010 plane_state->dest_x);
1011 ret |= plane_add_prop(req, plane, WDRM_PLANE_CRTC_Y,
1012 plane_state->dest_y);
1013 ret |= plane_add_prop(req, plane, WDRM_PLANE_CRTC_W,
1014 plane_state->dest_w);
1015 ret |= plane_add_prop(req, plane, WDRM_PLANE_CRTC_H,
1016 plane_state->dest_h);
1017 if (plane->props[WDRM_PLANE_FB_DAMAGE_CLIPS].prop_id != 0)
1018 ret |= plane_add_prop(req, plane, WDRM_PLANE_FB_DAMAGE_CLIPS,
1019 plane_state->damage_blob_id);
1020
1021 if (plane_state->fb && plane_state->fb->format)
1022 pinfo = plane_state->fb->format;
1023
1024 drm_debug(plane->backend, "\t\t\t[PLANE:%lu] FORMAT: %s\n",
1025 (unsigned long) plane->plane_id,
1026 pinfo ? pinfo->drm_format_name : "UNKNOWN");
1027
1028 if (plane_state->in_fence_fd >= 0) {
1029 ret |= plane_add_prop(req, plane,
1030 WDRM_PLANE_IN_FENCE_FD,
1031 plane_state->in_fence_fd);
1032 }
1033
1034 /* do note, that 'invented' zpos values are set as immutable */
1035 if (plane_state->zpos != DRM_PLANE_ZPOS_INVALID_PLANE &&
1036 plane_state->plane->zpos_min != plane_state->plane->zpos_max)
1037 ret |= plane_add_prop(req, plane,
1038 WDRM_PLANE_ZPOS,
1039 plane_state->zpos);
1040
1041 if (ret != 0) {
1042 weston_log("couldn't set plane state\n");
1043 return ret;
1044 }
1045 }
1046
1047 return 0;
1048 }
1049
1050 /**
1051 * Helper function used only by drm_pending_state_apply, with the same
1052 * guarantees and constraints as that function.
1053 */
1054 static int
drm_pending_state_apply_atomic(struct drm_pending_state * pending_state,enum drm_state_apply_mode mode)1055 drm_pending_state_apply_atomic(struct drm_pending_state *pending_state,
1056 enum drm_state_apply_mode mode)
1057 {
1058 struct drm_backend *b = pending_state->backend;
1059 struct drm_output_state *output_state, *tmp;
1060 struct drm_plane *plane;
1061 drmModeAtomicReq *req = drmModeAtomicAlloc();
1062 uint32_t flags;
1063 int ret = 0;
1064
1065 if (!req)
1066 return -1;
1067
1068 switch (mode) {
1069 case DRM_STATE_APPLY_SYNC:
1070 flags = 0;
1071 break;
1072 case DRM_STATE_APPLY_ASYNC:
1073 flags = DRM_MODE_PAGE_FLIP_EVENT | DRM_MODE_ATOMIC_NONBLOCK;
1074 break;
1075 case DRM_STATE_TEST_ONLY:
1076 flags = DRM_MODE_ATOMIC_TEST_ONLY;
1077 break;
1078 }
1079
1080 if (b->state_invalid) {
1081 struct weston_head *head_base;
1082 struct drm_head *head;
1083 uint32_t *unused;
1084 int err;
1085
1086 drm_debug(b, "\t\t[atomic] previous state invalid; "
1087 "starting with fresh state\n");
1088
1089 /* If we need to reset all our state (e.g. because we've
1090 * just started, or just been VT-switched in), explicitly
1091 * disable all the CRTCs and connectors we aren't using. */
1092 wl_list_for_each(head_base,
1093 &b->compositor->head_list, compositor_link) {
1094 struct drm_property_info *info;
1095
1096 if (weston_head_is_enabled(head_base))
1097 continue;
1098
1099 head = to_drm_head(head_base);
1100
1101 drm_debug(b, "\t\t[atomic] disabling inactive head %s\n",
1102 head_base->name);
1103
1104 info = &head->props_conn[WDRM_CONNECTOR_CRTC_ID];
1105 err = drmModeAtomicAddProperty(req, head->connector_id,
1106 info->prop_id, 0);
1107 drm_debug(b, "\t\t\t[CONN:%lu] %lu (%s) -> 0\n",
1108 (unsigned long) head->connector_id,
1109 (unsigned long) info->prop_id,
1110 info->name);
1111 if (err <= 0)
1112 ret = -1;
1113 }
1114
1115 wl_array_for_each(unused, &b->unused_crtcs) {
1116 struct drm_property_info infos[WDRM_CRTC__COUNT];
1117 struct drm_property_info *info;
1118 drmModeObjectProperties *props;
1119 uint64_t active;
1120
1121 memset(infos, 0, sizeof(infos));
1122
1123 /* We can't emit a disable on a CRTC that's already
1124 * off, as the kernel will refuse to generate an event
1125 * for an off->off state and fail the commit.
1126 */
1127 props = drmModeObjectGetProperties(b->drm.fd,
1128 *unused,
1129 DRM_MODE_OBJECT_CRTC);
1130 if (!props) {
1131 ret = -1;
1132 continue;
1133 }
1134
1135 drm_property_info_populate(b, crtc_props, infos,
1136 WDRM_CRTC__COUNT,
1137 props);
1138
1139 info = &infos[WDRM_CRTC_ACTIVE];
1140 active = drm_property_get_value(info, props, 0);
1141 drmModeFreeObjectProperties(props);
1142 if (active == 0) {
1143 drm_property_info_free(infos, WDRM_CRTC__COUNT);
1144 continue;
1145 }
1146
1147 drm_debug(b, "\t\t[atomic] disabling unused CRTC %lu\n",
1148 (unsigned long) *unused);
1149
1150 drm_debug(b, "\t\t\t[CRTC:%lu] %lu (%s) -> 0\n",
1151 (unsigned long) *unused,
1152 (unsigned long) info->prop_id, info->name);
1153 err = drmModeAtomicAddProperty(req, *unused,
1154 info->prop_id, 0);
1155 if (err <= 0)
1156 ret = -1;
1157
1158 info = &infos[WDRM_CRTC_MODE_ID];
1159 drm_debug(b, "\t\t\t[CRTC:%lu] %lu (%s) -> 0\n",
1160 (unsigned long) *unused,
1161 (unsigned long) info->prop_id, info->name);
1162 err = drmModeAtomicAddProperty(req, *unused,
1163 info->prop_id, 0);
1164 if (err <= 0)
1165 ret = -1;
1166
1167 drm_property_info_free(infos, WDRM_CRTC__COUNT);
1168 }
1169
1170 /* Disable all the planes; planes which are being used will
1171 * override this state in the output-state application. */
1172 // OHOS: now can not disable planes , commit will failed
1173 // wl_list_for_each(plane, &b->plane_list, link) {
1174 // drm_debug(b, "\t\t[atomic] starting with plane %lu disabled\n",
1175 // (unsigned long) plane->plane_id);
1176 // plane_add_prop(req, plane, WDRM_PLANE_CRTC_ID, 0);
1177 // plane_add_prop(req, plane, WDRM_PLANE_FB_ID, 0);
1178 // }
1179
1180 flags |= DRM_MODE_ATOMIC_ALLOW_MODESET;
1181 }
1182
1183 wl_list_for_each(output_state, &pending_state->output_list, link) {
1184 if (output_state->output->virtual)
1185 continue;
1186 if (mode == DRM_STATE_APPLY_SYNC)
1187 assert(output_state->dpms == WESTON_DPMS_OFF);
1188 ret |= drm_output_apply_state_atomic(output_state, req, &flags);
1189 }
1190
1191 if (ret != 0) {
1192 weston_log("atomic: couldn't compile atomic state\n");
1193 goto out;
1194 }
1195
1196 ret = drmModeAtomicCommit(b->drm.fd, req, flags, b);
1197 drm_debug(b, "[atomic] drmModeAtomicCommit\n");
1198
1199 /* Test commits do not take ownership of the state; return
1200 * without freeing here. */
1201 if (mode == DRM_STATE_TEST_ONLY) {
1202 drmModeAtomicFree(req);
1203 return ret;
1204 }
1205
1206 if (ret != 0) {
1207 weston_log("atomic: couldn't commit new state: %s\n",
1208 strerror(errno));
1209 goto out;
1210 }
1211
1212 wl_list_for_each_safe(output_state, tmp, &pending_state->output_list,
1213 link)
1214 drm_output_assign_state(output_state, mode);
1215
1216 b->state_invalid = false;
1217
1218 assert(wl_list_empty(&pending_state->output_list));
1219
1220 out:
1221 drmModeAtomicFree(req);
1222 drm_pending_state_free(pending_state);
1223 return ret;
1224 }
1225
1226 /**
1227 * Tests a pending state, to see if the kernel will accept the update as
1228 * constructed.
1229 *
1230 * Using atomic modesetting, the kernel performs the same checks as it would
1231 * on a real commit, returning success or failure without actually modifying
1232 * the running state. It does not return -EBUSY if there are pending updates
1233 * in flight, so states may be tested at any point, however this means a
1234 * state which passed testing may fail on a real commit if the timing is not
1235 * respected (e.g. committing before the previous commit has completed).
1236 *
1237 * Without atomic modesetting, we have no way to check, so we optimistically
1238 * claim it will work.
1239 *
1240 * Unlike drm_pending_state_apply() and drm_pending_state_apply_sync(), this
1241 * function does _not_ take ownership of pending_state, nor does it clear
1242 * state_invalid.
1243 */
1244 int
drm_pending_state_test(struct drm_pending_state * pending_state)1245 drm_pending_state_test(struct drm_pending_state *pending_state)
1246 {
1247 struct drm_backend *b = pending_state->backend;
1248
1249 if (b->atomic_modeset)
1250 return drm_pending_state_apply_atomic(pending_state,
1251 DRM_STATE_TEST_ONLY);
1252
1253 /* We have no way to test state before application on the legacy
1254 * modesetting API, so just claim it succeeded. */
1255 return 0;
1256 }
1257
1258 /**
1259 * Applies all of a pending_state asynchronously: the primary entry point for
1260 * applying KMS state to a device. Updates the state for all outputs in the
1261 * pending_state, as well as disabling any unclaimed outputs.
1262 *
1263 * Unconditionally takes ownership of pending_state, and clears state_invalid.
1264 */
1265 int
drm_pending_state_apply(struct drm_pending_state * pending_state)1266 drm_pending_state_apply(struct drm_pending_state *pending_state)
1267 {
1268 struct drm_backend *b = pending_state->backend;
1269 struct drm_output_state *output_state, *tmp;
1270 uint32_t *unused;
1271
1272 if (b->atomic_modeset)
1273 return drm_pending_state_apply_atomic(pending_state,
1274 DRM_STATE_APPLY_ASYNC);
1275
1276 if (b->state_invalid) {
1277 /* If we need to reset all our state (e.g. because we've
1278 * just started, or just been VT-switched in), explicitly
1279 * disable all the CRTCs we aren't using. This also disables
1280 * all connectors on these CRTCs, so we don't need to do that
1281 * separately with the pre-atomic API. */
1282 wl_array_for_each(unused, &b->unused_crtcs)
1283 drmModeSetCrtc(b->drm.fd, *unused, 0, 0, 0, NULL, 0,
1284 NULL);
1285 }
1286
1287 wl_list_for_each_safe(output_state, tmp, &pending_state->output_list,
1288 link) {
1289 struct drm_output *output = output_state->output;
1290 int ret;
1291
1292 if (output->virtual) {
1293 drm_output_assign_state(output_state,
1294 DRM_STATE_APPLY_ASYNC);
1295 continue;
1296 }
1297
1298 ret = drm_output_apply_state_legacy(output_state);
1299 if (ret != 0) {
1300 weston_log("Couldn't apply state for output %s\n",
1301 output->base.name);
1302 }
1303 }
1304
1305 b->state_invalid = false;
1306
1307 assert(wl_list_empty(&pending_state->output_list));
1308
1309 drm_pending_state_free(pending_state);
1310
1311 return 0;
1312 }
1313
1314 /**
1315 * The synchronous version of drm_pending_state_apply. May only be used to
1316 * disable outputs. Does so synchronously: the request is guaranteed to have
1317 * completed on return, and the output will not be touched afterwards.
1318 *
1319 * Unconditionally takes ownership of pending_state, and clears state_invalid.
1320 */
1321 int
drm_pending_state_apply_sync(struct drm_pending_state * pending_state)1322 drm_pending_state_apply_sync(struct drm_pending_state *pending_state)
1323 {
1324 struct drm_backend *b = pending_state->backend;
1325 struct drm_output_state *output_state, *tmp;
1326 uint32_t *unused;
1327
1328 if (b->atomic_modeset)
1329 return drm_pending_state_apply_atomic(pending_state,
1330 DRM_STATE_APPLY_SYNC);
1331
1332 if (b->state_invalid) {
1333 /* If we need to reset all our state (e.g. because we've
1334 * just started, or just been VT-switched in), explicitly
1335 * disable all the CRTCs we aren't using. This also disables
1336 * all connectors on these CRTCs, so we don't need to do that
1337 * separately with the pre-atomic API. */
1338 wl_array_for_each(unused, &b->unused_crtcs)
1339 drmModeSetCrtc(b->drm.fd, *unused, 0, 0, 0, NULL, 0,
1340 NULL);
1341 }
1342
1343 wl_list_for_each_safe(output_state, tmp, &pending_state->output_list,
1344 link) {
1345 int ret;
1346
1347 assert(output_state->dpms == WESTON_DPMS_OFF);
1348 ret = drm_output_apply_state_legacy(output_state);
1349 if (ret != 0) {
1350 weston_log("Couldn't apply state for output %s\n",
1351 output_state->output->base.name);
1352 }
1353 }
1354
1355 b->state_invalid = false;
1356
1357 assert(wl_list_empty(&pending_state->output_list));
1358
1359 drm_pending_state_free(pending_state);
1360
1361 return 0;
1362 }
1363
1364 void
drm_output_update_msc(struct drm_output * output,unsigned int seq)1365 drm_output_update_msc(struct drm_output *output, unsigned int seq)
1366 {
1367 uint64_t msc_hi = output->base.msc >> 32;
1368
1369 if (seq < (output->base.msc & 0xffffffff))
1370 msc_hi++;
1371
1372 output->base.msc = (msc_hi << 32) + seq;
1373 }
1374
1375 static void
page_flip_handler(int fd,unsigned int frame,unsigned int sec,unsigned int usec,void * data)1376 page_flip_handler(int fd, unsigned int frame,
1377 unsigned int sec, unsigned int usec, void *data)
1378 {
1379 struct drm_output *output = data;
1380 struct drm_backend *b = to_drm_backend(output->base.compositor);
1381 uint32_t flags = WP_PRESENTATION_FEEDBACK_KIND_VSYNC |
1382 WP_PRESENTATION_FEEDBACK_KIND_HW_COMPLETION |
1383 WP_PRESENTATION_FEEDBACK_KIND_HW_CLOCK;
1384
1385 drm_output_update_msc(output, frame);
1386
1387 assert(!b->atomic_modeset);
1388 assert(output->page_flip_pending);
1389 output->page_flip_pending = false;
1390
1391 LOG_PASS();
1392 drm_output_update_complete(output, flags, sec, usec);
1393 }
1394
1395 static void
atomic_flip_handler(int fd,unsigned int frame,unsigned int sec,unsigned int usec,unsigned int crtc_id,void * data)1396 atomic_flip_handler(int fd, unsigned int frame, unsigned int sec,
1397 unsigned int usec, unsigned int crtc_id, void *data)
1398 {
1399 struct drm_backend *b = data;
1400 struct drm_output *output = drm_output_find_by_crtc(b, crtc_id);
1401 uint32_t flags = WP_PRESENTATION_FEEDBACK_KIND_VSYNC |
1402 WP_PRESENTATION_FEEDBACK_KIND_HW_COMPLETION |
1403 WP_PRESENTATION_FEEDBACK_KIND_HW_CLOCK;
1404
1405 /* During the initial modeset, we can disable CRTCs which we don't
1406 * actually handle during normal operation; this will give us events
1407 * for unknown outputs. Ignore them. */
1408 if (!output || !output->base.enabled)
1409 return;
1410
1411 drm_output_update_msc(output, frame);
1412
1413 drm_debug(b, "[atomic][CRTC:%u] flip processing started\n", crtc_id);
1414 assert(b->atomic_modeset);
1415 assert(output->atomic_complete_pending);
1416 output->atomic_complete_pending = false;
1417
1418 LOG_PASS();
1419 drm_output_update_complete(output, flags, sec, usec);
1420 drm_debug(b, "[atomic][CRTC:%u] flip processing completed\n", crtc_id);
1421 }
1422
1423 int
on_drm_input(int fd,uint32_t mask,void * data)1424 on_drm_input(int fd, uint32_t mask, void *data)
1425 {
1426 struct drm_backend *b = data;
1427 drmEventContext evctx;
1428
1429 memset(&evctx, 0, sizeof evctx);
1430 evctx.version = 3;
1431 if (b->atomic_modeset)
1432 evctx.page_flip_handler2 = atomic_flip_handler;
1433 else
1434 evctx.page_flip_handler = page_flip_handler;
1435 drmHandleEvent(fd, &evctx);
1436
1437 return 1;
1438 }
1439
1440 int
init_kms_caps(struct drm_backend * b)1441 init_kms_caps(struct drm_backend *b)
1442 {
1443 uint64_t cap;
1444 int ret;
1445 clockid_t clk_id;
1446
1447 weston_log("using %s\n", b->drm.filename);
1448
1449 ret = drmGetCap(b->drm.fd, DRM_CAP_TIMESTAMP_MONOTONIC, &cap);
1450 if (ret == 0 && cap == 1)
1451 clk_id = CLOCK_MONOTONIC;
1452 else
1453 clk_id = CLOCK_REALTIME;
1454
1455 if (weston_compositor_set_presentation_clock(b->compositor, clk_id) < 0) {
1456 weston_log("Error: failed to set presentation clock %d.\n",
1457 clk_id);
1458 return -1;
1459 }
1460
1461 ret = drmGetCap(b->drm.fd, DRM_CAP_CURSOR_WIDTH, &cap);
1462 if (ret == 0)
1463 b->cursor_width = cap;
1464 else
1465 b->cursor_width = 64;
1466
1467 ret = drmGetCap(b->drm.fd, DRM_CAP_CURSOR_HEIGHT, &cap);
1468 if (ret == 0)
1469 b->cursor_height = cap;
1470 else
1471 b->cursor_height = 64;
1472
1473 if (!getenv("WESTON_DISABLE_UNIVERSAL_PLANES")) {
1474 ret = drmSetClientCap(b->drm.fd, DRM_CLIENT_CAP_UNIVERSAL_PLANES, 1);
1475 b->universal_planes = (ret == 0);
1476 }
1477
1478 //OHOS fix,
1479 if (!b->use_tde) {
1480 b->universal_planes = 0;
1481 }
1482 weston_log("b->universal_planes: %{public}d", b->universal_planes);
1483
1484 if (b->universal_planes && !getenv("WESTON_DISABLE_ATOMIC")) {
1485 ret = drmGetCap(b->drm.fd, DRM_CAP_CRTC_IN_VBLANK_EVENT, &cap);
1486 if (ret != 0)
1487 cap = 0;
1488 ret = drmSetClientCap(b->drm.fd, DRM_CLIENT_CAP_ATOMIC, 1);
1489 b->atomic_modeset = ((ret == 0) && (cap == 1));
1490 }
1491 weston_log("DRM: %s atomic modesetting\n",
1492 b->atomic_modeset ? "supports" : "does not support");
1493
1494 if (!getenv("WESTON_DISABLE_GBM_MODIFIERS")) {
1495 ret = drmGetCap(b->drm.fd, DRM_CAP_ADDFB2_MODIFIERS, &cap);
1496 if (ret == 0)
1497 b->fb_modifiers = cap;
1498 }
1499 weston_log("DRM: %s GBM modifiers\n",
1500 b->fb_modifiers ? "supports" : "does not support");
1501
1502 /*
1503 * KMS support for hardware planes cannot properly synchronize
1504 * without nuclear page flip. Without nuclear/atomic, hw plane
1505 * and cursor plane updates would either tear or cause extra
1506 * waits for vblanks which means dropping the compositor framerate
1507 * to a fraction. For cursors, it's not so bad, so they are
1508 * enabled.
1509 */
1510 if (!b->atomic_modeset || getenv("WESTON_FORCE_RENDERER"))
1511 b->sprites_are_broken = true;
1512
1513 ret = drmSetClientCap(b->drm.fd, DRM_CLIENT_CAP_ASPECT_RATIO, 1);
1514 b->aspect_ratio_supported = (ret == 0);
1515 weston_log("DRM: %s picture aspect ratio\n",
1516 b->aspect_ratio_supported ? "supports" : "does not support");
1517
1518 return 0;
1519 }
1520