1 /*
2 * Copyright (C) 2014 Red Hat
3 * Copyright (C) 2014 Intel Corp.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robdclark@gmail.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 */
27
28 #include <drm/drmP.h>
29 #include <drm/drm_atomic.h>
30 #include <drm/drm_plane_helper.h>
31 #include <drm/drm_crtc_helper.h>
32 #include <drm/drm_atomic_helper.h>
33 #include <drm/drm_writeback.h>
34 #include <linux/dma-fence.h>
35
36 #include "drm_crtc_helper_internal.h"
37 #include "drm_crtc_internal.h"
38
39 /**
40 * DOC: overview
41 *
42 * This helper library provides implementations of check and commit functions on
43 * top of the CRTC modeset helper callbacks and the plane helper callbacks. It
44 * also provides convenience implementations for the atomic state handling
45 * callbacks for drivers which don't need to subclass the drm core structures to
46 * add their own additional internal state.
47 *
48 * This library also provides default implementations for the check callback in
49 * drm_atomic_helper_check() and for the commit callback with
50 * drm_atomic_helper_commit(). But the individual stages and callbacks are
51 * exposed to allow drivers to mix and match and e.g. use the plane helpers only
52 * together with a driver private modeset implementation.
53 *
54 * This library also provides implementations for all the legacy driver
55 * interfaces on top of the atomic interface. See drm_atomic_helper_set_config(),
56 * drm_atomic_helper_disable_plane(), drm_atomic_helper_disable_plane() and the
57 * various functions to implement set_property callbacks. New drivers must not
58 * implement these functions themselves but must use the provided helpers.
59 *
60 * The atomic helper uses the same function table structures as all other
61 * modesetting helpers. See the documentation for &struct drm_crtc_helper_funcs,
62 * struct &drm_encoder_helper_funcs and &struct drm_connector_helper_funcs. It
63 * also shares the &struct drm_plane_helper_funcs function table with the plane
64 * helpers.
65 */
66 static void
drm_atomic_helper_plane_changed(struct drm_atomic_state * state,struct drm_plane_state * old_plane_state,struct drm_plane_state * plane_state,struct drm_plane * plane)67 drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
68 struct drm_plane_state *old_plane_state,
69 struct drm_plane_state *plane_state,
70 struct drm_plane *plane)
71 {
72 struct drm_crtc_state *crtc_state;
73
74 if (old_plane_state->crtc) {
75 crtc_state = drm_atomic_get_new_crtc_state(state,
76 old_plane_state->crtc);
77
78 if (WARN_ON(!crtc_state))
79 return;
80
81 crtc_state->planes_changed = true;
82 }
83
84 if (plane_state->crtc) {
85 crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc);
86
87 if (WARN_ON(!crtc_state))
88 return;
89
90 crtc_state->planes_changed = true;
91 }
92 }
93
handle_conflicting_encoders(struct drm_atomic_state * state,bool disable_conflicting_encoders)94 static int handle_conflicting_encoders(struct drm_atomic_state *state,
95 bool disable_conflicting_encoders)
96 {
97 struct drm_connector_state *new_conn_state;
98 struct drm_connector *connector;
99 struct drm_connector_list_iter conn_iter;
100 struct drm_encoder *encoder;
101 unsigned encoder_mask = 0;
102 int i, ret = 0;
103
104 /*
105 * First loop, find all newly assigned encoders from the connectors
106 * part of the state. If the same encoder is assigned to multiple
107 * connectors bail out.
108 */
109 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
110 const struct drm_connector_helper_funcs *funcs = connector->helper_private;
111 struct drm_encoder *new_encoder;
112
113 if (!new_conn_state->crtc)
114 continue;
115
116 if (funcs->atomic_best_encoder)
117 new_encoder = funcs->atomic_best_encoder(connector, new_conn_state);
118 else if (funcs->best_encoder)
119 new_encoder = funcs->best_encoder(connector);
120 else
121 new_encoder = drm_atomic_helper_best_encoder(connector);
122
123 if (new_encoder) {
124 if (encoder_mask & drm_encoder_mask(new_encoder)) {
125 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n",
126 new_encoder->base.id, new_encoder->name,
127 connector->base.id, connector->name);
128
129 return -EINVAL;
130 }
131
132 encoder_mask |= drm_encoder_mask(new_encoder);
133 }
134 }
135
136 if (!encoder_mask)
137 return 0;
138
139 /*
140 * Second loop, iterate over all connectors not part of the state.
141 *
142 * If a conflicting encoder is found and disable_conflicting_encoders
143 * is not set, an error is returned. Userspace can provide a solution
144 * through the atomic ioctl.
145 *
146 * If the flag is set conflicting connectors are removed from the crtc
147 * and the crtc is disabled if no encoder is left. This preserves
148 * compatibility with the legacy set_config behavior.
149 */
150 drm_connector_list_iter_begin(state->dev, &conn_iter);
151 drm_for_each_connector_iter(connector, &conn_iter) {
152 struct drm_crtc_state *crtc_state;
153
154 if (drm_atomic_get_new_connector_state(state, connector))
155 continue;
156
157 encoder = connector->state->best_encoder;
158 if (!encoder || !(encoder_mask & drm_encoder_mask(encoder)))
159 continue;
160
161 if (!disable_conflicting_encoders) {
162 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s] by [CONNECTOR:%d:%s]\n",
163 encoder->base.id, encoder->name,
164 connector->state->crtc->base.id,
165 connector->state->crtc->name,
166 connector->base.id, connector->name);
167 ret = -EINVAL;
168 goto out;
169 }
170
171 new_conn_state = drm_atomic_get_connector_state(state, connector);
172 if (IS_ERR(new_conn_state)) {
173 ret = PTR_ERR(new_conn_state);
174 goto out;
175 }
176
177 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n",
178 encoder->base.id, encoder->name,
179 new_conn_state->crtc->base.id, new_conn_state->crtc->name,
180 connector->base.id, connector->name);
181
182 crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
183
184 ret = drm_atomic_set_crtc_for_connector(new_conn_state, NULL);
185 if (ret)
186 goto out;
187
188 if (!crtc_state->connector_mask) {
189 ret = drm_atomic_set_mode_prop_for_crtc(crtc_state,
190 NULL);
191 if (ret < 0)
192 goto out;
193
194 crtc_state->active = false;
195 }
196 }
197 out:
198 drm_connector_list_iter_end(&conn_iter);
199
200 return ret;
201 }
202
203 static void
set_best_encoder(struct drm_atomic_state * state,struct drm_connector_state * conn_state,struct drm_encoder * encoder)204 set_best_encoder(struct drm_atomic_state *state,
205 struct drm_connector_state *conn_state,
206 struct drm_encoder *encoder)
207 {
208 struct drm_crtc_state *crtc_state;
209 struct drm_crtc *crtc;
210
211 if (conn_state->best_encoder) {
212 /* Unset the encoder_mask in the old crtc state. */
213 crtc = conn_state->connector->state->crtc;
214
215 /* A NULL crtc is an error here because we should have
216 * duplicated a NULL best_encoder when crtc was NULL.
217 * As an exception restoring duplicated atomic state
218 * during resume is allowed, so don't warn when
219 * best_encoder is equal to encoder we intend to set.
220 */
221 WARN_ON(!crtc && encoder != conn_state->best_encoder);
222 if (crtc) {
223 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
224
225 crtc_state->encoder_mask &=
226 ~drm_encoder_mask(conn_state->best_encoder);
227 }
228 }
229
230 if (encoder) {
231 crtc = conn_state->crtc;
232 WARN_ON(!crtc);
233 if (crtc) {
234 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
235
236 crtc_state->encoder_mask |=
237 drm_encoder_mask(encoder);
238 }
239 }
240
241 conn_state->best_encoder = encoder;
242 }
243
244 static void
steal_encoder(struct drm_atomic_state * state,struct drm_encoder * encoder)245 steal_encoder(struct drm_atomic_state *state,
246 struct drm_encoder *encoder)
247 {
248 struct drm_crtc_state *crtc_state;
249 struct drm_connector *connector;
250 struct drm_connector_state *old_connector_state, *new_connector_state;
251 int i;
252
253 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
254 struct drm_crtc *encoder_crtc;
255
256 if (new_connector_state->best_encoder != encoder)
257 continue;
258
259 encoder_crtc = old_connector_state->crtc;
260
261 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n",
262 encoder->base.id, encoder->name,
263 encoder_crtc->base.id, encoder_crtc->name);
264
265 set_best_encoder(state, new_connector_state, NULL);
266
267 crtc_state = drm_atomic_get_new_crtc_state(state, encoder_crtc);
268 crtc_state->connectors_changed = true;
269
270 return;
271 }
272 }
273
274 static int
update_connector_routing(struct drm_atomic_state * state,struct drm_connector * connector,struct drm_connector_state * old_connector_state,struct drm_connector_state * new_connector_state)275 update_connector_routing(struct drm_atomic_state *state,
276 struct drm_connector *connector,
277 struct drm_connector_state *old_connector_state,
278 struct drm_connector_state *new_connector_state)
279 {
280 const struct drm_connector_helper_funcs *funcs;
281 struct drm_encoder *new_encoder;
282 struct drm_crtc_state *crtc_state;
283
284 DRM_DEBUG_ATOMIC("Updating routing for [CONNECTOR:%d:%s]\n",
285 connector->base.id,
286 connector->name);
287
288 if (old_connector_state->crtc != new_connector_state->crtc) {
289 if (old_connector_state->crtc) {
290 crtc_state = drm_atomic_get_new_crtc_state(state, old_connector_state->crtc);
291 crtc_state->connectors_changed = true;
292 }
293
294 if (new_connector_state->crtc) {
295 crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc);
296 crtc_state->connectors_changed = true;
297 }
298 }
299
300 if (!new_connector_state->crtc) {
301 DRM_DEBUG_ATOMIC("Disabling [CONNECTOR:%d:%s]\n",
302 connector->base.id,
303 connector->name);
304
305 set_best_encoder(state, new_connector_state, NULL);
306
307 return 0;
308 }
309
310 funcs = connector->helper_private;
311
312 if (funcs->atomic_best_encoder)
313 new_encoder = funcs->atomic_best_encoder(connector,
314 new_connector_state);
315 else if (funcs->best_encoder)
316 new_encoder = funcs->best_encoder(connector);
317 else
318 new_encoder = drm_atomic_helper_best_encoder(connector);
319
320 if (!new_encoder) {
321 DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
322 connector->base.id,
323 connector->name);
324 return -EINVAL;
325 }
326
327 if (!drm_encoder_crtc_ok(new_encoder, new_connector_state->crtc)) {
328 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] incompatible with [CRTC:%d:%s]\n",
329 new_encoder->base.id,
330 new_encoder->name,
331 new_connector_state->crtc->base.id,
332 new_connector_state->crtc->name);
333 return -EINVAL;
334 }
335
336 if (new_encoder == new_connector_state->best_encoder) {
337 set_best_encoder(state, new_connector_state, new_encoder);
338
339 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n",
340 connector->base.id,
341 connector->name,
342 new_encoder->base.id,
343 new_encoder->name,
344 new_connector_state->crtc->base.id,
345 new_connector_state->crtc->name);
346
347 return 0;
348 }
349
350 steal_encoder(state, new_encoder);
351
352 set_best_encoder(state, new_connector_state, new_encoder);
353
354 crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc);
355 crtc_state->connectors_changed = true;
356
357 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n",
358 connector->base.id,
359 connector->name,
360 new_encoder->base.id,
361 new_encoder->name,
362 new_connector_state->crtc->base.id,
363 new_connector_state->crtc->name);
364
365 return 0;
366 }
367
368 static int
mode_fixup(struct drm_atomic_state * state)369 mode_fixup(struct drm_atomic_state *state)
370 {
371 struct drm_crtc *crtc;
372 struct drm_crtc_state *new_crtc_state;
373 struct drm_connector *connector;
374 struct drm_connector_state *new_conn_state;
375 int i;
376 int ret;
377
378 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
379 if (!new_crtc_state->mode_changed &&
380 !new_crtc_state->connectors_changed)
381 continue;
382
383 drm_mode_copy(&new_crtc_state->adjusted_mode, &new_crtc_state->mode);
384 }
385
386 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
387 const struct drm_encoder_helper_funcs *funcs;
388 struct drm_encoder *encoder;
389
390 WARN_ON(!!new_conn_state->best_encoder != !!new_conn_state->crtc);
391
392 if (!new_conn_state->crtc || !new_conn_state->best_encoder)
393 continue;
394
395 new_crtc_state =
396 drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
397
398 /*
399 * Each encoder has at most one connector (since we always steal
400 * it away), so we won't call ->mode_fixup twice.
401 */
402 encoder = new_conn_state->best_encoder;
403 funcs = encoder->helper_private;
404
405 ret = drm_bridge_mode_fixup(encoder->bridge, &new_crtc_state->mode,
406 &new_crtc_state->adjusted_mode);
407 if (!ret) {
408 DRM_DEBUG_ATOMIC("Bridge fixup failed\n");
409 return -EINVAL;
410 }
411
412 if (funcs && funcs->atomic_check) {
413 ret = funcs->atomic_check(encoder, new_crtc_state,
414 new_conn_state);
415 if (ret) {
416 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] check failed\n",
417 encoder->base.id, encoder->name);
418 return ret;
419 }
420 } else if (funcs && funcs->mode_fixup) {
421 ret = funcs->mode_fixup(encoder, &new_crtc_state->mode,
422 &new_crtc_state->adjusted_mode);
423 if (!ret) {
424 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] fixup failed\n",
425 encoder->base.id, encoder->name);
426 return -EINVAL;
427 }
428 }
429 }
430
431 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
432 const struct drm_crtc_helper_funcs *funcs;
433
434 if (!new_crtc_state->enable)
435 continue;
436
437 if (!new_crtc_state->mode_changed &&
438 !new_crtc_state->connectors_changed)
439 continue;
440
441 funcs = crtc->helper_private;
442 if (!funcs->mode_fixup)
443 continue;
444
445 ret = funcs->mode_fixup(crtc, &new_crtc_state->mode,
446 &new_crtc_state->adjusted_mode);
447 if (!ret) {
448 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] fixup failed\n",
449 crtc->base.id, crtc->name);
450 return -EINVAL;
451 }
452 }
453
454 return 0;
455 }
456
mode_valid_path(struct drm_connector * connector,struct drm_encoder * encoder,struct drm_crtc * crtc,struct drm_display_mode * mode)457 static enum drm_mode_status mode_valid_path(struct drm_connector *connector,
458 struct drm_encoder *encoder,
459 struct drm_crtc *crtc,
460 struct drm_display_mode *mode)
461 {
462 enum drm_mode_status ret;
463
464 ret = drm_encoder_mode_valid(encoder, mode);
465 if (ret != MODE_OK) {
466 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] mode_valid() failed\n",
467 encoder->base.id, encoder->name);
468 return ret;
469 }
470
471 ret = drm_bridge_mode_valid(encoder->bridge, mode);
472 if (ret != MODE_OK) {
473 DRM_DEBUG_ATOMIC("[BRIDGE] mode_valid() failed\n");
474 return ret;
475 }
476
477 ret = drm_crtc_mode_valid(crtc, mode);
478 if (ret != MODE_OK) {
479 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode_valid() failed\n",
480 crtc->base.id, crtc->name);
481 return ret;
482 }
483
484 return ret;
485 }
486
487 static int
mode_valid(struct drm_atomic_state * state)488 mode_valid(struct drm_atomic_state *state)
489 {
490 struct drm_connector_state *conn_state;
491 struct drm_connector *connector;
492 int i;
493
494 for_each_new_connector_in_state(state, connector, conn_state, i) {
495 struct drm_encoder *encoder = conn_state->best_encoder;
496 struct drm_crtc *crtc = conn_state->crtc;
497 struct drm_crtc_state *crtc_state;
498 enum drm_mode_status mode_status;
499 struct drm_display_mode *mode;
500
501 if (!crtc || !encoder)
502 continue;
503
504 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
505 if (!crtc_state)
506 continue;
507 if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
508 continue;
509
510 mode = &crtc_state->mode;
511
512 mode_status = mode_valid_path(connector, encoder, crtc, mode);
513 if (mode_status != MODE_OK)
514 return -EINVAL;
515 }
516
517 return 0;
518 }
519
520 /**
521 * drm_atomic_helper_check_modeset - validate state object for modeset changes
522 * @dev: DRM device
523 * @state: the driver state object
524 *
525 * Check the state object to see if the requested state is physically possible.
526 * This does all the crtc and connector related computations for an atomic
527 * update and adds any additional connectors needed for full modesets. It calls
528 * the various per-object callbacks in the follow order:
529 *
530 * 1. &drm_connector_helper_funcs.atomic_best_encoder for determining the new encoder.
531 * 2. &drm_connector_helper_funcs.atomic_check to validate the connector state.
532 * 3. If it's determined a modeset is needed then all connectors on the affected crtc
533 * crtc are added and &drm_connector_helper_funcs.atomic_check is run on them.
534 * 4. &drm_encoder_helper_funcs.mode_valid, &drm_bridge_funcs.mode_valid and
535 * &drm_crtc_helper_funcs.mode_valid are called on the affected components.
536 * 5. &drm_bridge_funcs.mode_fixup is called on all encoder bridges.
537 * 6. &drm_encoder_helper_funcs.atomic_check is called to validate any encoder state.
538 * This function is only called when the encoder will be part of a configured crtc,
539 * it must not be used for implementing connector property validation.
540 * If this function is NULL, &drm_atomic_encoder_helper_funcs.mode_fixup is called
541 * instead.
542 * 7. &drm_crtc_helper_funcs.mode_fixup is called last, to fix up the mode with crtc constraints.
543 *
544 * &drm_crtc_state.mode_changed is set when the input mode is changed.
545 * &drm_crtc_state.connectors_changed is set when a connector is added or
546 * removed from the crtc. &drm_crtc_state.active_changed is set when
547 * &drm_crtc_state.active changes, which is used for DPMS.
548 * See also: drm_atomic_crtc_needs_modeset()
549 *
550 * IMPORTANT:
551 *
552 * Drivers which set &drm_crtc_state.mode_changed (e.g. in their
553 * &drm_plane_helper_funcs.atomic_check hooks if a plane update can't be done
554 * without a full modeset) _must_ call this function afterwards after that
555 * change. It is permitted to call this function multiple times for the same
556 * update, e.g. when the &drm_crtc_helper_funcs.atomic_check functions depend
557 * upon the adjusted dotclock for fifo space allocation and watermark
558 * computation.
559 *
560 * RETURNS:
561 * Zero for success or -errno
562 */
563 int
drm_atomic_helper_check_modeset(struct drm_device * dev,struct drm_atomic_state * state)564 drm_atomic_helper_check_modeset(struct drm_device *dev,
565 struct drm_atomic_state *state)
566 {
567 struct drm_crtc *crtc;
568 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
569 struct drm_connector *connector;
570 struct drm_connector_state *old_connector_state, *new_connector_state;
571 int i, ret;
572 unsigned connectors_mask = 0;
573
574 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
575 bool has_connectors =
576 !!new_crtc_state->connector_mask;
577
578 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
579
580 if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) {
581 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n",
582 crtc->base.id, crtc->name);
583 new_crtc_state->mode_changed = true;
584 }
585
586 if (old_crtc_state->enable != new_crtc_state->enable) {
587 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enable changed\n",
588 crtc->base.id, crtc->name);
589
590 /*
591 * For clarity this assignment is done here, but
592 * enable == 0 is only true when there are no
593 * connectors and a NULL mode.
594 *
595 * The other way around is true as well. enable != 0
596 * iff connectors are attached and a mode is set.
597 */
598 new_crtc_state->mode_changed = true;
599 new_crtc_state->connectors_changed = true;
600 }
601
602 if (old_crtc_state->active != new_crtc_state->active) {
603 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active changed\n",
604 crtc->base.id, crtc->name);
605 new_crtc_state->active_changed = true;
606 }
607
608 if (new_crtc_state->enable != has_connectors) {
609 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled/connectors mismatch\n",
610 crtc->base.id, crtc->name);
611
612 return -EINVAL;
613 }
614 }
615
616 ret = handle_conflicting_encoders(state, false);
617 if (ret)
618 return ret;
619
620 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
621 const struct drm_connector_helper_funcs *funcs = connector->helper_private;
622
623 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
624
625 /*
626 * This only sets crtc->connectors_changed for routing changes,
627 * drivers must set crtc->connectors_changed themselves when
628 * connector properties need to be updated.
629 */
630 ret = update_connector_routing(state, connector,
631 old_connector_state,
632 new_connector_state);
633 if (ret)
634 return ret;
635 if (old_connector_state->crtc) {
636 new_crtc_state = drm_atomic_get_new_crtc_state(state,
637 old_connector_state->crtc);
638 if (old_connector_state->link_status !=
639 new_connector_state->link_status)
640 new_crtc_state->connectors_changed = true;
641 }
642
643 if (funcs->atomic_check)
644 ret = funcs->atomic_check(connector, new_connector_state);
645 if (ret)
646 return ret;
647
648 connectors_mask |= BIT(i);
649 }
650
651 /*
652 * After all the routing has been prepared we need to add in any
653 * connector which is itself unchanged, but who's crtc changes it's
654 * configuration. This must be done before calling mode_fixup in case a
655 * crtc only changed its mode but has the same set of connectors.
656 */
657 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
658 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
659 continue;
660
661 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n",
662 crtc->base.id, crtc->name,
663 new_crtc_state->enable ? 'y' : 'n',
664 new_crtc_state->active ? 'y' : 'n');
665
666 ret = drm_atomic_add_affected_connectors(state, crtc);
667 if (ret != 0)
668 return ret;
669
670 ret = drm_atomic_add_affected_planes(state, crtc);
671 if (ret != 0)
672 return ret;
673 }
674
675 /*
676 * Iterate over all connectors again, to make sure atomic_check()
677 * has been called on them when a modeset is forced.
678 */
679 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
680 const struct drm_connector_helper_funcs *funcs = connector->helper_private;
681
682 if (connectors_mask & BIT(i))
683 continue;
684
685 if (funcs->atomic_check)
686 ret = funcs->atomic_check(connector, new_connector_state);
687 if (ret)
688 return ret;
689 }
690
691 ret = mode_valid(state);
692 if (ret)
693 return ret;
694
695 return mode_fixup(state);
696 }
697 EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
698
699 /**
700 * drm_atomic_helper_check_plane_state() - Check plane state for validity
701 * @plane_state: plane state to check
702 * @crtc_state: crtc state to check
703 * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
704 * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
705 * @can_position: is it legal to position the plane such that it
706 * doesn't cover the entire crtc? This will generally
707 * only be false for primary planes.
708 * @can_update_disabled: can the plane be updated while the crtc
709 * is disabled?
710 *
711 * Checks that a desired plane update is valid, and updates various
712 * bits of derived state (clipped coordinates etc.). Drivers that provide
713 * their own plane handling rather than helper-provided implementations may
714 * still wish to call this function to avoid duplication of error checking
715 * code.
716 *
717 * RETURNS:
718 * Zero if update appears valid, error code on failure
719 */
drm_atomic_helper_check_plane_state(struct drm_plane_state * plane_state,const struct drm_crtc_state * crtc_state,int min_scale,int max_scale,bool can_position,bool can_update_disabled)720 int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
721 const struct drm_crtc_state *crtc_state,
722 int min_scale,
723 int max_scale,
724 bool can_position,
725 bool can_update_disabled)
726 {
727 struct drm_framebuffer *fb = plane_state->fb;
728 struct drm_rect *src = &plane_state->src;
729 struct drm_rect *dst = &plane_state->dst;
730 unsigned int rotation = plane_state->rotation;
731 struct drm_rect clip = {};
732 int hscale, vscale;
733
734 WARN_ON(plane_state->crtc && plane_state->crtc != crtc_state->crtc);
735
736 *src = drm_plane_state_src(plane_state);
737 *dst = drm_plane_state_dest(plane_state);
738
739 if (!fb) {
740 plane_state->visible = false;
741 return 0;
742 }
743
744 /* crtc should only be NULL when disabling (i.e., !fb) */
745 if (WARN_ON(!plane_state->crtc)) {
746 plane_state->visible = false;
747 return 0;
748 }
749
750 if (!crtc_state->enable && !can_update_disabled) {
751 DRM_DEBUG_KMS("Cannot update plane of a disabled CRTC.\n");
752 return -EINVAL;
753 }
754
755 drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation);
756
757 /* Check scaling */
758 hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
759 vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
760 if (hscale < 0 || vscale < 0) {
761 DRM_DEBUG_KMS("Invalid scaling of plane\n");
762 drm_rect_debug_print("src: ", &plane_state->src, true);
763 drm_rect_debug_print("dst: ", &plane_state->dst, false);
764 return -ERANGE;
765 }
766
767 if (crtc_state->enable)
768 drm_mode_get_hv_timing(&crtc_state->mode, &clip.x2, &clip.y2);
769
770 plane_state->visible = drm_rect_clip_scaled(src, dst, &clip);
771
772 drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
773
774 if (!plane_state->visible)
775 /*
776 * Plane isn't visible; some drivers can handle this
777 * so we just return success here. Drivers that can't
778 * (including those that use the primary plane helper's
779 * update function) will return an error from their
780 * update_plane handler.
781 */
782 return 0;
783
784 if (!can_position && !drm_rect_equals(dst, &clip)) {
785 DRM_DEBUG_KMS("Plane must cover entire CRTC\n");
786 drm_rect_debug_print("dst: ", dst, false);
787 drm_rect_debug_print("clip: ", &clip, false);
788 return -EINVAL;
789 }
790
791 return 0;
792 }
793 EXPORT_SYMBOL(drm_atomic_helper_check_plane_state);
794
795 /**
796 * drm_atomic_helper_check_planes - validate state object for planes changes
797 * @dev: DRM device
798 * @state: the driver state object
799 *
800 * Check the state object to see if the requested state is physically possible.
801 * This does all the plane update related checks using by calling into the
802 * &drm_crtc_helper_funcs.atomic_check and &drm_plane_helper_funcs.atomic_check
803 * hooks provided by the driver.
804 *
805 * It also sets &drm_crtc_state.planes_changed to indicate that a crtc has
806 * updated planes.
807 *
808 * RETURNS:
809 * Zero for success or -errno
810 */
811 int
drm_atomic_helper_check_planes(struct drm_device * dev,struct drm_atomic_state * state)812 drm_atomic_helper_check_planes(struct drm_device *dev,
813 struct drm_atomic_state *state)
814 {
815 struct drm_crtc *crtc;
816 struct drm_crtc_state *new_crtc_state;
817 struct drm_plane *plane;
818 struct drm_plane_state *new_plane_state, *old_plane_state;
819 int i, ret = 0;
820
821 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
822 const struct drm_plane_helper_funcs *funcs;
823
824 WARN_ON(!drm_modeset_is_locked(&plane->mutex));
825
826 funcs = plane->helper_private;
827
828 drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane);
829
830 if (!funcs || !funcs->atomic_check)
831 continue;
832
833 ret = funcs->atomic_check(plane, new_plane_state);
834 if (ret) {
835 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
836 plane->base.id, plane->name);
837 return ret;
838 }
839 }
840
841 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
842 const struct drm_crtc_helper_funcs *funcs;
843
844 funcs = crtc->helper_private;
845
846 if (!funcs || !funcs->atomic_check)
847 continue;
848
849 ret = funcs->atomic_check(crtc, new_crtc_state);
850 if (ret) {
851 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
852 crtc->base.id, crtc->name);
853 return ret;
854 }
855 }
856
857 return ret;
858 }
859 EXPORT_SYMBOL(drm_atomic_helper_check_planes);
860
861 /**
862 * drm_atomic_helper_check - validate state object
863 * @dev: DRM device
864 * @state: the driver state object
865 *
866 * Check the state object to see if the requested state is physically possible.
867 * Only crtcs and planes have check callbacks, so for any additional (global)
868 * checking that a driver needs it can simply wrap that around this function.
869 * Drivers without such needs can directly use this as their
870 * &drm_mode_config_funcs.atomic_check callback.
871 *
872 * This just wraps the two parts of the state checking for planes and modeset
873 * state in the default order: First it calls drm_atomic_helper_check_modeset()
874 * and then drm_atomic_helper_check_planes(). The assumption is that the
875 * @drm_plane_helper_funcs.atomic_check and @drm_crtc_helper_funcs.atomic_check
876 * functions depend upon an updated adjusted_mode.clock to e.g. properly compute
877 * watermarks.
878 *
879 * Note that zpos normalization will add all enable planes to the state which
880 * might not desired for some drivers.
881 * For example enable/disable of a cursor plane which have fixed zpos value
882 * would trigger all other enabled planes to be forced to the state change.
883 *
884 * RETURNS:
885 * Zero for success or -errno
886 */
drm_atomic_helper_check(struct drm_device * dev,struct drm_atomic_state * state)887 int drm_atomic_helper_check(struct drm_device *dev,
888 struct drm_atomic_state *state)
889 {
890 int ret;
891
892 ret = drm_atomic_helper_check_modeset(dev, state);
893 if (ret)
894 return ret;
895
896 if (dev->mode_config.normalize_zpos) {
897 ret = drm_atomic_normalize_zpos(dev, state);
898 if (ret)
899 return ret;
900 }
901
902 ret = drm_atomic_helper_check_planes(dev, state);
903 if (ret)
904 return ret;
905
906 if (state->legacy_cursor_update)
907 state->async_update = !drm_atomic_helper_async_check(dev, state);
908
909 return ret;
910 }
911 EXPORT_SYMBOL(drm_atomic_helper_check);
912
913 static void
disable_outputs(struct drm_device * dev,struct drm_atomic_state * old_state)914 disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
915 {
916 struct drm_connector *connector;
917 struct drm_connector_state *old_conn_state, *new_conn_state;
918 struct drm_crtc *crtc;
919 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
920 int i;
921
922 for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) {
923 const struct drm_encoder_helper_funcs *funcs;
924 struct drm_encoder *encoder;
925
926 /* Shut down everything that's in the changeset and currently
927 * still on. So need to check the old, saved state. */
928 if (!old_conn_state->crtc)
929 continue;
930
931 old_crtc_state = drm_atomic_get_old_crtc_state(old_state, old_conn_state->crtc);
932
933 if (!old_crtc_state->active ||
934 !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
935 continue;
936
937 encoder = old_conn_state->best_encoder;
938
939 /* We shouldn't get this far if we didn't previously have
940 * an encoder.. but WARN_ON() rather than explode.
941 */
942 if (WARN_ON(!encoder))
943 continue;
944
945 funcs = encoder->helper_private;
946
947 DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n",
948 encoder->base.id, encoder->name);
949
950 /*
951 * Each encoder has at most one connector (since we always steal
952 * it away), so we won't call disable hooks twice.
953 */
954 drm_bridge_disable(encoder->bridge);
955
956 /* Right function depends upon target state. */
957 if (funcs) {
958 if (new_conn_state->crtc && funcs->prepare)
959 funcs->prepare(encoder);
960 else if (funcs->disable)
961 funcs->disable(encoder);
962 else if (funcs->dpms)
963 funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
964 }
965
966 drm_bridge_post_disable(encoder->bridge);
967 }
968
969 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
970 const struct drm_crtc_helper_funcs *funcs;
971 int ret;
972
973 /* Shut down everything that needs a full modeset. */
974 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
975 continue;
976
977 if (!old_crtc_state->active)
978 continue;
979
980 funcs = crtc->helper_private;
981
982 DRM_DEBUG_ATOMIC("disabling [CRTC:%d:%s]\n",
983 crtc->base.id, crtc->name);
984
985
986 /* Right function depends upon target state. */
987 if (new_crtc_state->enable && funcs->prepare)
988 funcs->prepare(crtc);
989 else if (funcs->atomic_disable)
990 funcs->atomic_disable(crtc, old_crtc_state);
991 else if (funcs->disable)
992 funcs->disable(crtc);
993 else
994 funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
995
996 if (!(dev->irq_enabled && dev->num_crtcs))
997 continue;
998
999 ret = drm_crtc_vblank_get(crtc);
1000 WARN_ONCE(ret != -EINVAL, "driver forgot to call drm_crtc_vblank_off()\n");
1001 if (ret == 0)
1002 drm_crtc_vblank_put(crtc);
1003 }
1004 }
1005
1006 /**
1007 * drm_atomic_helper_update_legacy_modeset_state - update legacy modeset state
1008 * @dev: DRM device
1009 * @old_state: atomic state object with old state structures
1010 *
1011 * This function updates all the various legacy modeset state pointers in
1012 * connectors, encoders and crtcs. It also updates the timestamping constants
1013 * used for precise vblank timestamps by calling
1014 * drm_calc_timestamping_constants().
1015 *
1016 * Drivers can use this for building their own atomic commit if they don't have
1017 * a pure helper-based modeset implementation.
1018 *
1019 * Since these updates are not synchronized with lockings, only code paths
1020 * called from &drm_mode_config_helper_funcs.atomic_commit_tail can look at the
1021 * legacy state filled out by this helper. Defacto this means this helper and
1022 * the legacy state pointers are only really useful for transitioning an
1023 * existing driver to the atomic world.
1024 */
1025 void
drm_atomic_helper_update_legacy_modeset_state(struct drm_device * dev,struct drm_atomic_state * old_state)1026 drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
1027 struct drm_atomic_state *old_state)
1028 {
1029 struct drm_connector *connector;
1030 struct drm_connector_state *old_conn_state, *new_conn_state;
1031 struct drm_crtc *crtc;
1032 struct drm_crtc_state *new_crtc_state;
1033 int i;
1034
1035 /* clear out existing links and update dpms */
1036 for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) {
1037 if (connector->encoder) {
1038 WARN_ON(!connector->encoder->crtc);
1039
1040 connector->encoder->crtc = NULL;
1041 connector->encoder = NULL;
1042 }
1043
1044 crtc = new_conn_state->crtc;
1045 if ((!crtc && old_conn_state->crtc) ||
1046 (crtc && drm_atomic_crtc_needs_modeset(crtc->state))) {
1047 int mode = DRM_MODE_DPMS_OFF;
1048
1049 if (crtc && crtc->state->active)
1050 mode = DRM_MODE_DPMS_ON;
1051
1052 connector->dpms = mode;
1053 }
1054 }
1055
1056 /* set new links */
1057 for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
1058 if (!new_conn_state->crtc)
1059 continue;
1060
1061 if (WARN_ON(!new_conn_state->best_encoder))
1062 continue;
1063
1064 connector->encoder = new_conn_state->best_encoder;
1065 connector->encoder->crtc = new_conn_state->crtc;
1066 }
1067
1068 /* set legacy state in the crtc structure */
1069 for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
1070 struct drm_plane *primary = crtc->primary;
1071 struct drm_plane_state *new_plane_state;
1072
1073 crtc->mode = new_crtc_state->mode;
1074 crtc->enabled = new_crtc_state->enable;
1075
1076 new_plane_state =
1077 drm_atomic_get_new_plane_state(old_state, primary);
1078
1079 if (new_plane_state && new_plane_state->crtc == crtc) {
1080 crtc->x = new_plane_state->src_x >> 16;
1081 crtc->y = new_plane_state->src_y >> 16;
1082 }
1083
1084 if (new_crtc_state->enable)
1085 drm_calc_timestamping_constants(crtc,
1086 &new_crtc_state->adjusted_mode);
1087 }
1088 }
1089 EXPORT_SYMBOL(drm_atomic_helper_update_legacy_modeset_state);
1090
1091 static void
crtc_set_mode(struct drm_device * dev,struct drm_atomic_state * old_state)1092 crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
1093 {
1094 struct drm_crtc *crtc;
1095 struct drm_crtc_state *new_crtc_state;
1096 struct drm_connector *connector;
1097 struct drm_connector_state *new_conn_state;
1098 int i;
1099
1100 for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
1101 const struct drm_crtc_helper_funcs *funcs;
1102
1103 if (!new_crtc_state->mode_changed)
1104 continue;
1105
1106 funcs = crtc->helper_private;
1107
1108 if (new_crtc_state->enable && funcs->mode_set_nofb) {
1109 DRM_DEBUG_ATOMIC("modeset on [CRTC:%d:%s]\n",
1110 crtc->base.id, crtc->name);
1111
1112 funcs->mode_set_nofb(crtc);
1113 }
1114 }
1115
1116 for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
1117 const struct drm_encoder_helper_funcs *funcs;
1118 struct drm_encoder *encoder;
1119 struct drm_display_mode *mode, *adjusted_mode;
1120
1121 if (!new_conn_state->best_encoder)
1122 continue;
1123
1124 encoder = new_conn_state->best_encoder;
1125 funcs = encoder->helper_private;
1126 new_crtc_state = new_conn_state->crtc->state;
1127 mode = &new_crtc_state->mode;
1128 adjusted_mode = &new_crtc_state->adjusted_mode;
1129
1130 if (!new_crtc_state->mode_changed)
1131 continue;
1132
1133 DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n",
1134 encoder->base.id, encoder->name);
1135
1136 /*
1137 * Each encoder has at most one connector (since we always steal
1138 * it away), so we won't call mode_set hooks twice.
1139 */
1140 if (funcs && funcs->atomic_mode_set) {
1141 funcs->atomic_mode_set(encoder, new_crtc_state,
1142 new_conn_state);
1143 } else if (funcs && funcs->mode_set) {
1144 funcs->mode_set(encoder, mode, adjusted_mode);
1145 }
1146
1147 drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode);
1148 }
1149 }
1150
1151 /**
1152 * drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
1153 * @dev: DRM device
1154 * @old_state: atomic state object with old state structures
1155 *
1156 * This function shuts down all the outputs that need to be shut down and
1157 * prepares them (if required) with the new mode.
1158 *
1159 * For compatibility with legacy crtc helpers this should be called before
1160 * drm_atomic_helper_commit_planes(), which is what the default commit function
1161 * does. But drivers with different needs can group the modeset commits together
1162 * and do the plane commits at the end. This is useful for drivers doing runtime
1163 * PM since planes updates then only happen when the CRTC is actually enabled.
1164 */
drm_atomic_helper_commit_modeset_disables(struct drm_device * dev,struct drm_atomic_state * old_state)1165 void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
1166 struct drm_atomic_state *old_state)
1167 {
1168 disable_outputs(dev, old_state);
1169
1170 drm_atomic_helper_update_legacy_modeset_state(dev, old_state);
1171
1172 crtc_set_mode(dev, old_state);
1173 }
1174 EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
1175
drm_atomic_helper_commit_writebacks(struct drm_device * dev,struct drm_atomic_state * old_state)1176 static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
1177 struct drm_atomic_state *old_state)
1178 {
1179 struct drm_connector *connector;
1180 struct drm_connector_state *new_conn_state;
1181 int i;
1182
1183 for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
1184 const struct drm_connector_helper_funcs *funcs;
1185
1186 funcs = connector->helper_private;
1187 if (!funcs->atomic_commit)
1188 continue;
1189
1190 if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) {
1191 WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
1192 funcs->atomic_commit(connector, new_conn_state);
1193 }
1194 }
1195 }
1196
1197 /**
1198 * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
1199 * @dev: DRM device
1200 * @old_state: atomic state object with old state structures
1201 *
1202 * This function enables all the outputs with the new configuration which had to
1203 * be turned off for the update.
1204 *
1205 * For compatibility with legacy crtc helpers this should be called after
1206 * drm_atomic_helper_commit_planes(), which is what the default commit function
1207 * does. But drivers with different needs can group the modeset commits together
1208 * and do the plane commits at the end. This is useful for drivers doing runtime
1209 * PM since planes updates then only happen when the CRTC is actually enabled.
1210 */
drm_atomic_helper_commit_modeset_enables(struct drm_device * dev,struct drm_atomic_state * old_state)1211 void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
1212 struct drm_atomic_state *old_state)
1213 {
1214 struct drm_crtc *crtc;
1215 struct drm_crtc_state *old_crtc_state;
1216 struct drm_crtc_state *new_crtc_state;
1217 struct drm_connector *connector;
1218 struct drm_connector_state *new_conn_state;
1219 int i;
1220
1221 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
1222 const struct drm_crtc_helper_funcs *funcs;
1223
1224 /* Need to filter out CRTCs where only planes change. */
1225 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
1226 continue;
1227
1228 if (!new_crtc_state->active)
1229 continue;
1230
1231 funcs = crtc->helper_private;
1232
1233 if (new_crtc_state->enable) {
1234 DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n",
1235 crtc->base.id, crtc->name);
1236
1237 if (funcs->atomic_enable)
1238 funcs->atomic_enable(crtc, old_crtc_state);
1239 else
1240 funcs->commit(crtc);
1241 }
1242 }
1243
1244 for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
1245 const struct drm_encoder_helper_funcs *funcs;
1246 struct drm_encoder *encoder;
1247
1248 if (!new_conn_state->best_encoder)
1249 continue;
1250
1251 if (!new_conn_state->crtc->state->active ||
1252 !drm_atomic_crtc_needs_modeset(new_conn_state->crtc->state))
1253 continue;
1254
1255 encoder = new_conn_state->best_encoder;
1256 funcs = encoder->helper_private;
1257
1258 DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n",
1259 encoder->base.id, encoder->name);
1260
1261 /*
1262 * Each encoder has at most one connector (since we always steal
1263 * it away), so we won't call enable hooks twice.
1264 */
1265 drm_bridge_pre_enable(encoder->bridge);
1266
1267 if (funcs) {
1268 if (funcs->enable)
1269 funcs->enable(encoder);
1270 else if (funcs->commit)
1271 funcs->commit(encoder);
1272 }
1273
1274 drm_bridge_enable(encoder->bridge);
1275 }
1276
1277 drm_atomic_helper_commit_writebacks(dev, old_state);
1278 }
1279 EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
1280
1281 /**
1282 * drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state
1283 * @dev: DRM device
1284 * @state: atomic state object with old state structures
1285 * @pre_swap: If true, do an interruptible wait, and @state is the new state.
1286 * Otherwise @state is the old state.
1287 *
1288 * For implicit sync, driver should fish the exclusive fence out from the
1289 * incoming fb's and stash it in the drm_plane_state. This is called after
1290 * drm_atomic_helper_swap_state() so it uses the current plane state (and
1291 * just uses the atomic state to find the changed planes)
1292 *
1293 * Note that @pre_swap is needed since the point where we block for fences moves
1294 * around depending upon whether an atomic commit is blocking or
1295 * non-blocking. For non-blocking commit all waiting needs to happen after
1296 * drm_atomic_helper_swap_state() is called, but for blocking commits we want
1297 * to wait **before** we do anything that can't be easily rolled back. That is
1298 * before we call drm_atomic_helper_swap_state().
1299 *
1300 * Returns zero if success or < 0 if dma_fence_wait() fails.
1301 */
drm_atomic_helper_wait_for_fences(struct drm_device * dev,struct drm_atomic_state * state,bool pre_swap)1302 int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
1303 struct drm_atomic_state *state,
1304 bool pre_swap)
1305 {
1306 struct drm_plane *plane;
1307 struct drm_plane_state *new_plane_state;
1308 int i, ret;
1309
1310 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
1311 if (!new_plane_state->fence)
1312 continue;
1313
1314 WARN_ON(!new_plane_state->fb);
1315
1316 /*
1317 * If waiting for fences pre-swap (ie: nonblock), userspace can
1318 * still interrupt the operation. Instead of blocking until the
1319 * timer expires, make the wait interruptible.
1320 */
1321 ret = dma_fence_wait(new_plane_state->fence, pre_swap);
1322 if (ret)
1323 return ret;
1324
1325 dma_fence_put(new_plane_state->fence);
1326 new_plane_state->fence = NULL;
1327 }
1328
1329 return 0;
1330 }
1331 EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
1332
1333 /**
1334 * drm_atomic_helper_wait_for_vblanks - wait for vblank on crtcs
1335 * @dev: DRM device
1336 * @old_state: atomic state object with old state structures
1337 *
1338 * Helper to, after atomic commit, wait for vblanks on all effected
1339 * crtcs (ie. before cleaning up old framebuffers using
1340 * drm_atomic_helper_cleanup_planes()). It will only wait on CRTCs where the
1341 * framebuffers have actually changed to optimize for the legacy cursor and
1342 * plane update use-case.
1343 *
1344 * Drivers using the nonblocking commit tracking support initialized by calling
1345 * drm_atomic_helper_setup_commit() should look at
1346 * drm_atomic_helper_wait_for_flip_done() as an alternative.
1347 */
1348 void
drm_atomic_helper_wait_for_vblanks(struct drm_device * dev,struct drm_atomic_state * old_state)1349 drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
1350 struct drm_atomic_state *old_state)
1351 {
1352 struct drm_crtc *crtc;
1353 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1354 int i, ret;
1355 unsigned crtc_mask = 0;
1356
1357 /*
1358 * Legacy cursor ioctls are completely unsynced, and userspace
1359 * relies on that (by doing tons of cursor updates).
1360 */
1361 if (old_state->legacy_cursor_update)
1362 return;
1363
1364 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
1365 if (!new_crtc_state->active)
1366 continue;
1367
1368 ret = drm_crtc_vblank_get(crtc);
1369 if (ret != 0)
1370 continue;
1371
1372 crtc_mask |= drm_crtc_mask(crtc);
1373 old_state->crtcs[i].last_vblank_count = drm_crtc_vblank_count(crtc);
1374 }
1375
1376 for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
1377 if (!(crtc_mask & drm_crtc_mask(crtc)))
1378 continue;
1379
1380 ret = wait_event_timeout(dev->vblank[i].queue,
1381 old_state->crtcs[i].last_vblank_count !=
1382 drm_crtc_vblank_count(crtc),
1383 msecs_to_jiffies(50));
1384
1385 WARN(!ret, "[CRTC:%d:%s] vblank wait timed out\n",
1386 crtc->base.id, crtc->name);
1387
1388 drm_crtc_vblank_put(crtc);
1389 }
1390 }
1391 EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
1392
1393 /**
1394 * drm_atomic_helper_wait_for_flip_done - wait for all page flips to be done
1395 * @dev: DRM device
1396 * @old_state: atomic state object with old state structures
1397 *
1398 * Helper to, after atomic commit, wait for page flips on all effected
1399 * crtcs (ie. before cleaning up old framebuffers using
1400 * drm_atomic_helper_cleanup_planes()). Compared to
1401 * drm_atomic_helper_wait_for_vblanks() this waits for the completion of on all
1402 * CRTCs, assuming that cursors-only updates are signalling their completion
1403 * immediately (or using a different path).
1404 *
1405 * This requires that drivers use the nonblocking commit tracking support
1406 * initialized using drm_atomic_helper_setup_commit().
1407 */
drm_atomic_helper_wait_for_flip_done(struct drm_device * dev,struct drm_atomic_state * old_state)1408 void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
1409 struct drm_atomic_state *old_state)
1410 {
1411 struct drm_crtc *crtc;
1412 int i;
1413
1414 for (i = 0; i < dev->mode_config.num_crtc; i++) {
1415 struct drm_crtc_commit *commit = old_state->crtcs[i].commit;
1416 int ret;
1417
1418 crtc = old_state->crtcs[i].ptr;
1419
1420 if (!crtc || !commit)
1421 continue;
1422
1423 ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
1424 if (ret == 0)
1425 DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
1426 crtc->base.id, crtc->name);
1427 }
1428
1429 if (old_state->fake_commit)
1430 complete_all(&old_state->fake_commit->flip_done);
1431 }
1432 EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);
1433
1434 /**
1435 * drm_atomic_helper_commit_tail - commit atomic update to hardware
1436 * @old_state: atomic state object with old state structures
1437 *
1438 * This is the default implementation for the
1439 * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
1440 * that do not support runtime_pm or do not need the CRTC to be
1441 * enabled to perform a commit. Otherwise, see
1442 * drm_atomic_helper_commit_tail_rpm().
1443 *
1444 * Note that the default ordering of how the various stages are called is to
1445 * match the legacy modeset helper library closest.
1446 */
drm_atomic_helper_commit_tail(struct drm_atomic_state * old_state)1447 void drm_atomic_helper_commit_tail(struct drm_atomic_state *old_state)
1448 {
1449 struct drm_device *dev = old_state->dev;
1450
1451 drm_atomic_helper_commit_modeset_disables(dev, old_state);
1452
1453 drm_atomic_helper_commit_planes(dev, old_state, 0);
1454
1455 drm_atomic_helper_commit_modeset_enables(dev, old_state);
1456
1457 drm_atomic_helper_fake_vblank(old_state);
1458
1459 drm_atomic_helper_commit_hw_done(old_state);
1460
1461 drm_atomic_helper_wait_for_vblanks(dev, old_state);
1462
1463 drm_atomic_helper_cleanup_planes(dev, old_state);
1464 }
1465 EXPORT_SYMBOL(drm_atomic_helper_commit_tail);
1466
1467 /**
1468 * drm_atomic_helper_commit_tail_rpm - commit atomic update to hardware
1469 * @old_state: new modeset state to be committed
1470 *
1471 * This is an alternative implementation for the
1472 * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
1473 * that support runtime_pm or need the CRTC to be enabled to perform a
1474 * commit. Otherwise, one should use the default implementation
1475 * drm_atomic_helper_commit_tail().
1476 */
drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state * old_state)1477 void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state)
1478 {
1479 struct drm_device *dev = old_state->dev;
1480
1481 drm_atomic_helper_commit_modeset_disables(dev, old_state);
1482
1483 drm_atomic_helper_commit_modeset_enables(dev, old_state);
1484
1485 drm_atomic_helper_commit_planes(dev, old_state,
1486 DRM_PLANE_COMMIT_ACTIVE_ONLY);
1487
1488 drm_atomic_helper_fake_vblank(old_state);
1489
1490 drm_atomic_helper_commit_hw_done(old_state);
1491
1492 drm_atomic_helper_wait_for_vblanks(dev, old_state);
1493
1494 drm_atomic_helper_cleanup_planes(dev, old_state);
1495 }
1496 EXPORT_SYMBOL(drm_atomic_helper_commit_tail_rpm);
1497
commit_tail(struct drm_atomic_state * old_state)1498 static void commit_tail(struct drm_atomic_state *old_state)
1499 {
1500 struct drm_device *dev = old_state->dev;
1501 const struct drm_mode_config_helper_funcs *funcs;
1502
1503 funcs = dev->mode_config.helper_private;
1504
1505 drm_atomic_helper_wait_for_fences(dev, old_state, false);
1506
1507 drm_atomic_helper_wait_for_dependencies(old_state);
1508
1509 if (funcs && funcs->atomic_commit_tail)
1510 funcs->atomic_commit_tail(old_state);
1511 else
1512 drm_atomic_helper_commit_tail(old_state);
1513
1514 drm_atomic_helper_commit_cleanup_done(old_state);
1515
1516 drm_atomic_state_put(old_state);
1517 }
1518
commit_work(struct work_struct * work)1519 static void commit_work(struct work_struct *work)
1520 {
1521 struct drm_atomic_state *state = container_of(work,
1522 struct drm_atomic_state,
1523 commit_work);
1524 commit_tail(state);
1525 }
1526
1527 /**
1528 * drm_atomic_helper_async_check - check if state can be commited asynchronously
1529 * @dev: DRM device
1530 * @state: the driver state object
1531 *
1532 * This helper will check if it is possible to commit the state asynchronously.
1533 * Async commits are not supposed to swap the states like normal sync commits
1534 * but just do in-place changes on the current state.
1535 *
1536 * It will return 0 if the commit can happen in an asynchronous fashion or error
1537 * if not. Note that error just mean it can't be commited asynchronously, if it
1538 * fails the commit should be treated like a normal synchronous commit.
1539 */
drm_atomic_helper_async_check(struct drm_device * dev,struct drm_atomic_state * state)1540 int drm_atomic_helper_async_check(struct drm_device *dev,
1541 struct drm_atomic_state *state)
1542 {
1543 struct drm_crtc *crtc;
1544 struct drm_crtc_state *crtc_state;
1545 struct drm_plane *plane = NULL;
1546 struct drm_plane_state *old_plane_state = NULL;
1547 struct drm_plane_state *new_plane_state = NULL;
1548 const struct drm_plane_helper_funcs *funcs;
1549 int i, n_planes = 0;
1550
1551 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1552 if (drm_atomic_crtc_needs_modeset(crtc_state))
1553 return -EINVAL;
1554 }
1555
1556 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
1557 n_planes++;
1558
1559 /* FIXME: we support only single plane updates for now */
1560 if (n_planes != 1)
1561 return -EINVAL;
1562
1563 if (!new_plane_state->crtc ||
1564 old_plane_state->crtc != new_plane_state->crtc)
1565 return -EINVAL;
1566
1567 /*
1568 * FIXME: Since prepare_fb and cleanup_fb are always called on
1569 * the new_plane_state for async updates we need to block framebuffer
1570 * changes. This prevents use of a fb that's been cleaned up and
1571 * double cleanups from occuring.
1572 */
1573 if (old_plane_state->fb != new_plane_state->fb)
1574 return -EINVAL;
1575
1576 funcs = plane->helper_private;
1577 if (!funcs->atomic_async_update)
1578 return -EINVAL;
1579
1580 if (new_plane_state->fence)
1581 return -EINVAL;
1582
1583 /*
1584 * Don't do an async update if there is an outstanding commit modifying
1585 * the plane. This prevents our async update's changes from getting
1586 * overridden by a previous synchronous update's state.
1587 */
1588 if (old_plane_state->commit &&
1589 !try_wait_for_completion(&old_plane_state->commit->hw_done))
1590 return -EBUSY;
1591
1592 return funcs->atomic_async_check(plane, new_plane_state);
1593 }
1594 EXPORT_SYMBOL(drm_atomic_helper_async_check);
1595
1596 /**
1597 * drm_atomic_helper_async_commit - commit state asynchronously
1598 * @dev: DRM device
1599 * @state: the driver state object
1600 *
1601 * This function commits a state asynchronously, i.e., not vblank
1602 * synchronized. It should be used on a state only when
1603 * drm_atomic_async_check() succeeds. Async commits are not supposed to swap
1604 * the states like normal sync commits, but just do in-place changes on the
1605 * current state.
1606 *
1607 * TODO: Implement full swap instead of doing in-place changes.
1608 */
drm_atomic_helper_async_commit(struct drm_device * dev,struct drm_atomic_state * state)1609 void drm_atomic_helper_async_commit(struct drm_device *dev,
1610 struct drm_atomic_state *state)
1611 {
1612 struct drm_plane *plane;
1613 struct drm_plane_state *plane_state;
1614 const struct drm_plane_helper_funcs *funcs;
1615 int i;
1616
1617 for_each_new_plane_in_state(state, plane, plane_state, i) {
1618 struct drm_framebuffer *new_fb = plane_state->fb;
1619 struct drm_framebuffer *old_fb = plane->state->fb;
1620
1621 funcs = plane->helper_private;
1622 funcs->atomic_async_update(plane, plane_state);
1623
1624 /*
1625 * ->atomic_async_update() is supposed to update the
1626 * plane->state in-place, make sure at least common
1627 * properties have been properly updated.
1628 */
1629 WARN_ON_ONCE(plane->state->fb != new_fb);
1630 WARN_ON_ONCE(plane->state->crtc_x != plane_state->crtc_x);
1631 WARN_ON_ONCE(plane->state->crtc_y != plane_state->crtc_y);
1632 WARN_ON_ONCE(plane->state->src_x != plane_state->src_x);
1633 WARN_ON_ONCE(plane->state->src_y != plane_state->src_y);
1634
1635 /*
1636 * Make sure the FBs have been swapped so that cleanups in the
1637 * new_state performs a cleanup in the old FB.
1638 */
1639 WARN_ON_ONCE(plane_state->fb != old_fb);
1640 }
1641 }
1642 EXPORT_SYMBOL(drm_atomic_helper_async_commit);
1643
1644 /**
1645 * drm_atomic_helper_commit - commit validated state object
1646 * @dev: DRM device
1647 * @state: the driver state object
1648 * @nonblock: whether nonblocking behavior is requested.
1649 *
1650 * This function commits a with drm_atomic_helper_check() pre-validated state
1651 * object. This can still fail when e.g. the framebuffer reservation fails. This
1652 * function implements nonblocking commits, using
1653 * drm_atomic_helper_setup_commit() and related functions.
1654 *
1655 * Committing the actual hardware state is done through the
1656 * &drm_mode_config_helper_funcs.atomic_commit_tail callback, or it's default
1657 * implementation drm_atomic_helper_commit_tail().
1658 *
1659 * RETURNS:
1660 * Zero for success or -errno.
1661 */
drm_atomic_helper_commit(struct drm_device * dev,struct drm_atomic_state * state,bool nonblock)1662 int drm_atomic_helper_commit(struct drm_device *dev,
1663 struct drm_atomic_state *state,
1664 bool nonblock)
1665 {
1666 int ret;
1667
1668 if (state->async_update) {
1669 ret = drm_atomic_helper_prepare_planes(dev, state);
1670 if (ret)
1671 return ret;
1672
1673 drm_atomic_helper_async_commit(dev, state);
1674 drm_atomic_helper_cleanup_planes(dev, state);
1675
1676 return 0;
1677 }
1678
1679 ret = drm_atomic_helper_setup_commit(state, nonblock);
1680 if (ret)
1681 return ret;
1682
1683 INIT_WORK(&state->commit_work, commit_work);
1684
1685 ret = drm_atomic_helper_prepare_planes(dev, state);
1686 if (ret)
1687 return ret;
1688
1689 if (!nonblock) {
1690 ret = drm_atomic_helper_wait_for_fences(dev, state, true);
1691 if (ret)
1692 goto err;
1693 }
1694
1695 /*
1696 * This is the point of no return - everything below never fails except
1697 * when the hw goes bonghits. Which means we can commit the new state on
1698 * the software side now.
1699 */
1700
1701 ret = drm_atomic_helper_swap_state(state, true);
1702 if (ret)
1703 goto err;
1704
1705 /*
1706 * Everything below can be run asynchronously without the need to grab
1707 * any modeset locks at all under one condition: It must be guaranteed
1708 * that the asynchronous work has either been cancelled (if the driver
1709 * supports it, which at least requires that the framebuffers get
1710 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
1711 * before the new state gets committed on the software side with
1712 * drm_atomic_helper_swap_state().
1713 *
1714 * This scheme allows new atomic state updates to be prepared and
1715 * checked in parallel to the asynchronous completion of the previous
1716 * update. Which is important since compositors need to figure out the
1717 * composition of the next frame right after having submitted the
1718 * current layout.
1719 *
1720 * NOTE: Commit work has multiple phases, first hardware commit, then
1721 * cleanup. We want them to overlap, hence need system_unbound_wq to
1722 * make sure work items don't artifically stall on each another.
1723 */
1724
1725 drm_atomic_state_get(state);
1726 if (nonblock)
1727 queue_work(system_unbound_wq, &state->commit_work);
1728 else
1729 commit_tail(state);
1730
1731 return 0;
1732
1733 err:
1734 drm_atomic_helper_cleanup_planes(dev, state);
1735 return ret;
1736 }
1737 EXPORT_SYMBOL(drm_atomic_helper_commit);
1738
1739 /**
1740 * DOC: implementing nonblocking commit
1741 *
1742 * Nonblocking atomic commits have to be implemented in the following sequence:
1743 *
1744 * 1. Run drm_atomic_helper_prepare_planes() first. This is the only function
1745 * which commit needs to call which can fail, so we want to run it first and
1746 * synchronously.
1747 *
1748 * 2. Synchronize with any outstanding nonblocking commit worker threads which
1749 * might be affected the new state update. This can be done by either cancelling
1750 * or flushing the work items, depending upon whether the driver can deal with
1751 * cancelled updates. Note that it is important to ensure that the framebuffer
1752 * cleanup is still done when cancelling.
1753 *
1754 * Asynchronous workers need to have sufficient parallelism to be able to run
1755 * different atomic commits on different CRTCs in parallel. The simplest way to
1756 * achive this is by running them on the &system_unbound_wq work queue. Note
1757 * that drivers are not required to split up atomic commits and run an
1758 * individual commit in parallel - userspace is supposed to do that if it cares.
1759 * But it might be beneficial to do that for modesets, since those necessarily
1760 * must be done as one global operation, and enabling or disabling a CRTC can
1761 * take a long time. But even that is not required.
1762 *
1763 * 3. The software state is updated synchronously with
1764 * drm_atomic_helper_swap_state(). Doing this under the protection of all modeset
1765 * locks means concurrent callers never see inconsistent state. And doing this
1766 * while it's guaranteed that no relevant nonblocking worker runs means that
1767 * nonblocking workers do not need grab any locks. Actually they must not grab
1768 * locks, for otherwise the work flushing will deadlock.
1769 *
1770 * 4. Schedule a work item to do all subsequent steps, using the split-out
1771 * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
1772 * then cleaning up the framebuffers after the old framebuffer is no longer
1773 * being displayed.
1774 *
1775 * The above scheme is implemented in the atomic helper libraries in
1776 * drm_atomic_helper_commit() using a bunch of helper functions. See
1777 * drm_atomic_helper_setup_commit() for a starting point.
1778 */
1779
stall_checks(struct drm_crtc * crtc,bool nonblock)1780 static int stall_checks(struct drm_crtc *crtc, bool nonblock)
1781 {
1782 struct drm_crtc_commit *commit, *stall_commit = NULL;
1783 bool completed = true;
1784 int i;
1785 long ret = 0;
1786
1787 spin_lock(&crtc->commit_lock);
1788 i = 0;
1789 list_for_each_entry(commit, &crtc->commit_list, commit_entry) {
1790 if (i == 0) {
1791 completed = try_wait_for_completion(&commit->flip_done);
1792 /* Userspace is not allowed to get ahead of the previous
1793 * commit with nonblocking ones. */
1794 if (!completed && nonblock) {
1795 spin_unlock(&crtc->commit_lock);
1796 return -EBUSY;
1797 }
1798 } else if (i == 1) {
1799 stall_commit = drm_crtc_commit_get(commit);
1800 break;
1801 }
1802
1803 i++;
1804 }
1805 spin_unlock(&crtc->commit_lock);
1806
1807 if (!stall_commit)
1808 return 0;
1809
1810 /* We don't want to let commits get ahead of cleanup work too much,
1811 * stalling on 2nd previous commit means triple-buffer won't ever stall.
1812 */
1813 ret = wait_for_completion_interruptible_timeout(&stall_commit->cleanup_done,
1814 10*HZ);
1815 if (ret == 0)
1816 DRM_ERROR("[CRTC:%d:%s] cleanup_done timed out\n",
1817 crtc->base.id, crtc->name);
1818
1819 drm_crtc_commit_put(stall_commit);
1820
1821 return ret < 0 ? ret : 0;
1822 }
1823
release_crtc_commit(struct completion * completion)1824 static void release_crtc_commit(struct completion *completion)
1825 {
1826 struct drm_crtc_commit *commit = container_of(completion,
1827 typeof(*commit),
1828 flip_done);
1829
1830 drm_crtc_commit_put(commit);
1831 }
1832
init_commit(struct drm_crtc_commit * commit,struct drm_crtc * crtc)1833 static void init_commit(struct drm_crtc_commit *commit, struct drm_crtc *crtc)
1834 {
1835 init_completion(&commit->flip_done);
1836 init_completion(&commit->hw_done);
1837 init_completion(&commit->cleanup_done);
1838 INIT_LIST_HEAD(&commit->commit_entry);
1839 kref_init(&commit->ref);
1840 commit->crtc = crtc;
1841 }
1842
1843 static struct drm_crtc_commit *
crtc_or_fake_commit(struct drm_atomic_state * state,struct drm_crtc * crtc)1844 crtc_or_fake_commit(struct drm_atomic_state *state, struct drm_crtc *crtc)
1845 {
1846 if (crtc) {
1847 struct drm_crtc_state *new_crtc_state;
1848
1849 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1850
1851 return new_crtc_state->commit;
1852 }
1853
1854 if (!state->fake_commit) {
1855 state->fake_commit = kzalloc(sizeof(*state->fake_commit), GFP_KERNEL);
1856 if (!state->fake_commit)
1857 return NULL;
1858
1859 init_commit(state->fake_commit, NULL);
1860 }
1861
1862 return state->fake_commit;
1863 }
1864
1865 /**
1866 * drm_atomic_helper_setup_commit - setup possibly nonblocking commit
1867 * @state: new modeset state to be committed
1868 * @nonblock: whether nonblocking behavior is requested.
1869 *
1870 * This function prepares @state to be used by the atomic helper's support for
1871 * nonblocking commits. Drivers using the nonblocking commit infrastructure
1872 * should always call this function from their
1873 * &drm_mode_config_funcs.atomic_commit hook.
1874 *
1875 * To be able to use this support drivers need to use a few more helper
1876 * functions. drm_atomic_helper_wait_for_dependencies() must be called before
1877 * actually committing the hardware state, and for nonblocking commits this call
1878 * must be placed in the async worker. See also drm_atomic_helper_swap_state()
1879 * and it's stall parameter, for when a driver's commit hooks look at the
1880 * &drm_crtc.state, &drm_plane.state or &drm_connector.state pointer directly.
1881 *
1882 * Completion of the hardware commit step must be signalled using
1883 * drm_atomic_helper_commit_hw_done(). After this step the driver is not allowed
1884 * to read or change any permanent software or hardware modeset state. The only
1885 * exception is state protected by other means than &drm_modeset_lock locks.
1886 * Only the free standing @state with pointers to the old state structures can
1887 * be inspected, e.g. to clean up old buffers using
1888 * drm_atomic_helper_cleanup_planes().
1889 *
1890 * At the very end, before cleaning up @state drivers must call
1891 * drm_atomic_helper_commit_cleanup_done().
1892 *
1893 * This is all implemented by in drm_atomic_helper_commit(), giving drivers a
1894 * complete and easy-to-use default implementation of the atomic_commit() hook.
1895 *
1896 * The tracking of asynchronously executed and still pending commits is done
1897 * using the core structure &drm_crtc_commit.
1898 *
1899 * By default there's no need to clean up resources allocated by this function
1900 * explicitly: drm_atomic_state_default_clear() will take care of that
1901 * automatically.
1902 *
1903 * Returns:
1904 *
1905 * 0 on success. -EBUSY when userspace schedules nonblocking commits too fast,
1906 * -ENOMEM on allocation failures and -EINTR when a signal is pending.
1907 */
drm_atomic_helper_setup_commit(struct drm_atomic_state * state,bool nonblock)1908 int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
1909 bool nonblock)
1910 {
1911 struct drm_crtc *crtc;
1912 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1913 struct drm_connector *conn;
1914 struct drm_connector_state *old_conn_state, *new_conn_state;
1915 struct drm_plane *plane;
1916 struct drm_plane_state *old_plane_state, *new_plane_state;
1917 struct drm_crtc_commit *commit;
1918 int i, ret;
1919
1920 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1921 commit = kzalloc(sizeof(*commit), GFP_KERNEL);
1922 if (!commit)
1923 return -ENOMEM;
1924
1925 init_commit(commit, crtc);
1926
1927 new_crtc_state->commit = commit;
1928
1929 ret = stall_checks(crtc, nonblock);
1930 if (ret)
1931 return ret;
1932
1933 /* Drivers only send out events when at least either current or
1934 * new CRTC state is active. Complete right away if everything
1935 * stays off. */
1936 if (!old_crtc_state->active && !new_crtc_state->active) {
1937 complete_all(&commit->flip_done);
1938 continue;
1939 }
1940
1941 /* Legacy cursor updates are fully unsynced. */
1942 if (state->legacy_cursor_update) {
1943 complete_all(&commit->flip_done);
1944 continue;
1945 }
1946
1947 if (!new_crtc_state->event) {
1948 commit->event = kzalloc(sizeof(*commit->event),
1949 GFP_KERNEL);
1950 if (!commit->event)
1951 return -ENOMEM;
1952
1953 new_crtc_state->event = commit->event;
1954 }
1955
1956 new_crtc_state->event->base.completion = &commit->flip_done;
1957 new_crtc_state->event->base.completion_release = release_crtc_commit;
1958 drm_crtc_commit_get(commit);
1959
1960 commit->abort_completion = true;
1961
1962 state->crtcs[i].commit = commit;
1963 drm_crtc_commit_get(commit);
1964 }
1965
1966 for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) {
1967 /* Userspace is not allowed to get ahead of the previous
1968 * commit with nonblocking ones. */
1969 if (nonblock && old_conn_state->commit &&
1970 !try_wait_for_completion(&old_conn_state->commit->flip_done))
1971 return -EBUSY;
1972
1973 /* Always track connectors explicitly for e.g. link retraining. */
1974 commit = crtc_or_fake_commit(state, new_conn_state->crtc ?: old_conn_state->crtc);
1975 if (!commit)
1976 return -ENOMEM;
1977
1978 new_conn_state->commit = drm_crtc_commit_get(commit);
1979 }
1980
1981 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
1982 /* Userspace is not allowed to get ahead of the previous
1983 * commit with nonblocking ones. */
1984 if (nonblock && old_plane_state->commit &&
1985 !try_wait_for_completion(&old_plane_state->commit->flip_done))
1986 return -EBUSY;
1987
1988 /* Always track planes explicitly for async pageflip support. */
1989 commit = crtc_or_fake_commit(state, new_plane_state->crtc ?: old_plane_state->crtc);
1990 if (!commit)
1991 return -ENOMEM;
1992
1993 new_plane_state->commit = drm_crtc_commit_get(commit);
1994 }
1995
1996 return 0;
1997 }
1998 EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
1999
2000 /**
2001 * drm_atomic_helper_wait_for_dependencies - wait for required preceeding commits
2002 * @old_state: atomic state object with old state structures
2003 *
2004 * This function waits for all preceeding commits that touch the same CRTC as
2005 * @old_state to both be committed to the hardware (as signalled by
2006 * drm_atomic_helper_commit_hw_done) and executed by the hardware (as signalled
2007 * by calling drm_crtc_send_vblank_event() on the &drm_crtc_state.event).
2008 *
2009 * This is part of the atomic helper support for nonblocking commits, see
2010 * drm_atomic_helper_setup_commit() for an overview.
2011 */
drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state * old_state)2012 void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
2013 {
2014 struct drm_crtc *crtc;
2015 struct drm_crtc_state *old_crtc_state;
2016 struct drm_plane *plane;
2017 struct drm_plane_state *old_plane_state;
2018 struct drm_connector *conn;
2019 struct drm_connector_state *old_conn_state;
2020 struct drm_crtc_commit *commit;
2021 int i;
2022 long ret;
2023
2024 for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
2025 commit = old_crtc_state->commit;
2026
2027 if (!commit)
2028 continue;
2029
2030 ret = wait_for_completion_timeout(&commit->hw_done,
2031 10*HZ);
2032 if (ret == 0)
2033 DRM_ERROR("[CRTC:%d:%s] hw_done timed out\n",
2034 crtc->base.id, crtc->name);
2035
2036 /* Currently no support for overwriting flips, hence
2037 * stall for previous one to execute completely. */
2038 ret = wait_for_completion_timeout(&commit->flip_done,
2039 10*HZ);
2040 if (ret == 0)
2041 DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
2042 crtc->base.id, crtc->name);
2043 }
2044
2045 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
2046 commit = old_conn_state->commit;
2047
2048 if (!commit)
2049 continue;
2050
2051 ret = wait_for_completion_timeout(&commit->hw_done,
2052 10*HZ);
2053 if (ret == 0)
2054 DRM_ERROR("[CONNECTOR:%d:%s] hw_done timed out\n",
2055 conn->base.id, conn->name);
2056
2057 /* Currently no support for overwriting flips, hence
2058 * stall for previous one to execute completely. */
2059 ret = wait_for_completion_timeout(&commit->flip_done,
2060 10*HZ);
2061 if (ret == 0)
2062 DRM_ERROR("[CONNECTOR:%d:%s] flip_done timed out\n",
2063 conn->base.id, conn->name);
2064 }
2065
2066 for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
2067 commit = old_plane_state->commit;
2068
2069 if (!commit)
2070 continue;
2071
2072 ret = wait_for_completion_timeout(&commit->hw_done,
2073 10*HZ);
2074 if (ret == 0)
2075 DRM_ERROR("[PLANE:%d:%s] hw_done timed out\n",
2076 plane->base.id, plane->name);
2077
2078 /* Currently no support for overwriting flips, hence
2079 * stall for previous one to execute completely. */
2080 ret = wait_for_completion_timeout(&commit->flip_done,
2081 10*HZ);
2082 if (ret == 0)
2083 DRM_ERROR("[PLANE:%d:%s] flip_done timed out\n",
2084 plane->base.id, plane->name);
2085 }
2086 }
2087 EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
2088
2089 /**
2090 * drm_atomic_helper_fake_vblank - fake VBLANK events if needed
2091 * @old_state: atomic state object with old state structures
2092 *
2093 * This function walks all CRTCs and fake VBLANK events on those with
2094 * &drm_crtc_state.no_vblank set to true and &drm_crtc_state.event != NULL.
2095 * The primary use of this function is writeback connectors working in oneshot
2096 * mode and faking VBLANK events. In this case they only fake the VBLANK event
2097 * when a job is queued, and any change to the pipeline that does not touch the
2098 * connector is leading to timeouts when calling
2099 * drm_atomic_helper_wait_for_vblanks() or
2100 * drm_atomic_helper_wait_for_flip_done().
2101 *
2102 * This is part of the atomic helper support for nonblocking commits, see
2103 * drm_atomic_helper_setup_commit() for an overview.
2104 */
drm_atomic_helper_fake_vblank(struct drm_atomic_state * old_state)2105 void drm_atomic_helper_fake_vblank(struct drm_atomic_state *old_state)
2106 {
2107 struct drm_crtc_state *new_crtc_state;
2108 struct drm_crtc *crtc;
2109 int i;
2110
2111 for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
2112 unsigned long flags;
2113
2114 if (!new_crtc_state->no_vblank)
2115 continue;
2116
2117 spin_lock_irqsave(&old_state->dev->event_lock, flags);
2118 if (new_crtc_state->event) {
2119 drm_crtc_send_vblank_event(crtc,
2120 new_crtc_state->event);
2121 new_crtc_state->event = NULL;
2122 }
2123 spin_unlock_irqrestore(&old_state->dev->event_lock, flags);
2124 }
2125 }
2126 EXPORT_SYMBOL(drm_atomic_helper_fake_vblank);
2127
2128 /**
2129 * drm_atomic_helper_commit_hw_done - setup possible nonblocking commit
2130 * @old_state: atomic state object with old state structures
2131 *
2132 * This function is used to signal completion of the hardware commit step. After
2133 * this step the driver is not allowed to read or change any permanent software
2134 * or hardware modeset state. The only exception is state protected by other
2135 * means than &drm_modeset_lock locks.
2136 *
2137 * Drivers should try to postpone any expensive or delayed cleanup work after
2138 * this function is called.
2139 *
2140 * This is part of the atomic helper support for nonblocking commits, see
2141 * drm_atomic_helper_setup_commit() for an overview.
2142 */
drm_atomic_helper_commit_hw_done(struct drm_atomic_state * old_state)2143 void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *old_state)
2144 {
2145 struct drm_crtc *crtc;
2146 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2147 struct drm_crtc_commit *commit;
2148 int i;
2149
2150 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
2151 commit = new_crtc_state->commit;
2152 if (!commit)
2153 continue;
2154
2155 /*
2156 * copy new_crtc_state->commit to old_crtc_state->commit,
2157 * it's unsafe to touch new_crtc_state after hw_done,
2158 * but we still need to do so in cleanup_done().
2159 */
2160 if (old_crtc_state->commit)
2161 drm_crtc_commit_put(old_crtc_state->commit);
2162
2163 old_crtc_state->commit = drm_crtc_commit_get(commit);
2164
2165 /* backend must have consumed any event by now */
2166 WARN_ON(new_crtc_state->event);
2167 complete_all(&commit->hw_done);
2168 }
2169
2170 if (old_state->fake_commit) {
2171 complete_all(&old_state->fake_commit->hw_done);
2172 complete_all(&old_state->fake_commit->flip_done);
2173 }
2174 }
2175 EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
2176
2177 /**
2178 * drm_atomic_helper_commit_cleanup_done - signal completion of commit
2179 * @old_state: atomic state object with old state structures
2180 *
2181 * This signals completion of the atomic update @old_state, including any
2182 * cleanup work. If used, it must be called right before calling
2183 * drm_atomic_state_put().
2184 *
2185 * This is part of the atomic helper support for nonblocking commits, see
2186 * drm_atomic_helper_setup_commit() for an overview.
2187 */
drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state * old_state)2188 void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state)
2189 {
2190 struct drm_crtc *crtc;
2191 struct drm_crtc_state *old_crtc_state;
2192 struct drm_crtc_commit *commit;
2193 int i;
2194
2195 for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
2196 commit = old_crtc_state->commit;
2197 if (WARN_ON(!commit))
2198 continue;
2199
2200 complete_all(&commit->cleanup_done);
2201 WARN_ON(!try_wait_for_completion(&commit->hw_done));
2202
2203 spin_lock(&crtc->commit_lock);
2204 list_del(&commit->commit_entry);
2205 spin_unlock(&crtc->commit_lock);
2206 }
2207
2208 if (old_state->fake_commit)
2209 complete_all(&old_state->fake_commit->cleanup_done);
2210 }
2211 EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
2212
2213 /**
2214 * drm_atomic_helper_prepare_planes - prepare plane resources before commit
2215 * @dev: DRM device
2216 * @state: atomic state object with new state structures
2217 *
2218 * This function prepares plane state, specifically framebuffers, for the new
2219 * configuration, by calling &drm_plane_helper_funcs.prepare_fb. If any failure
2220 * is encountered this function will call &drm_plane_helper_funcs.cleanup_fb on
2221 * any already successfully prepared framebuffer.
2222 *
2223 * Returns:
2224 * 0 on success, negative error code on failure.
2225 */
drm_atomic_helper_prepare_planes(struct drm_device * dev,struct drm_atomic_state * state)2226 int drm_atomic_helper_prepare_planes(struct drm_device *dev,
2227 struct drm_atomic_state *state)
2228 {
2229 struct drm_plane *plane;
2230 struct drm_plane_state *new_plane_state;
2231 int ret, i, j;
2232
2233 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
2234 const struct drm_plane_helper_funcs *funcs;
2235
2236 funcs = plane->helper_private;
2237
2238 if (funcs->prepare_fb) {
2239 ret = funcs->prepare_fb(plane, new_plane_state);
2240 if (ret)
2241 goto fail;
2242 }
2243 }
2244
2245 return 0;
2246
2247 fail:
2248 for_each_new_plane_in_state(state, plane, new_plane_state, j) {
2249 const struct drm_plane_helper_funcs *funcs;
2250
2251 if (j >= i)
2252 continue;
2253
2254 funcs = plane->helper_private;
2255
2256 if (funcs->cleanup_fb)
2257 funcs->cleanup_fb(plane, new_plane_state);
2258 }
2259
2260 return ret;
2261 }
2262 EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
2263
plane_crtc_active(const struct drm_plane_state * state)2264 static bool plane_crtc_active(const struct drm_plane_state *state)
2265 {
2266 return state->crtc && state->crtc->state->active;
2267 }
2268
2269 /**
2270 * drm_atomic_helper_commit_planes - commit plane state
2271 * @dev: DRM device
2272 * @old_state: atomic state object with old state structures
2273 * @flags: flags for committing plane state
2274 *
2275 * This function commits the new plane state using the plane and atomic helper
2276 * functions for planes and crtcs. It assumes that the atomic state has already
2277 * been pushed into the relevant object state pointers, since this step can no
2278 * longer fail.
2279 *
2280 * It still requires the global state object @old_state to know which planes and
2281 * crtcs need to be updated though.
2282 *
2283 * Note that this function does all plane updates across all CRTCs in one step.
2284 * If the hardware can't support this approach look at
2285 * drm_atomic_helper_commit_planes_on_crtc() instead.
2286 *
2287 * Plane parameters can be updated by applications while the associated CRTC is
2288 * disabled. The DRM/KMS core will store the parameters in the plane state,
2289 * which will be available to the driver when the CRTC is turned on. As a result
2290 * most drivers don't need to be immediately notified of plane updates for a
2291 * disabled CRTC.
2292 *
2293 * Unless otherwise needed, drivers are advised to set the ACTIVE_ONLY flag in
2294 * @flags in order not to receive plane update notifications related to a
2295 * disabled CRTC. This avoids the need to manually ignore plane updates in
2296 * driver code when the driver and/or hardware can't or just don't need to deal
2297 * with updates on disabled CRTCs, for example when supporting runtime PM.
2298 *
2299 * Drivers may set the NO_DISABLE_AFTER_MODESET flag in @flags if the relevant
2300 * display controllers require to disable a CRTC's planes when the CRTC is
2301 * disabled. This function would skip the &drm_plane_helper_funcs.atomic_disable
2302 * call for a plane if the CRTC of the old plane state needs a modesetting
2303 * operation. Of course, the drivers need to disable the planes in their CRTC
2304 * disable callbacks since no one else would do that.
2305 *
2306 * The drm_atomic_helper_commit() default implementation doesn't set the
2307 * ACTIVE_ONLY flag to most closely match the behaviour of the legacy helpers.
2308 * This should not be copied blindly by drivers.
2309 */
drm_atomic_helper_commit_planes(struct drm_device * dev,struct drm_atomic_state * old_state,uint32_t flags)2310 void drm_atomic_helper_commit_planes(struct drm_device *dev,
2311 struct drm_atomic_state *old_state,
2312 uint32_t flags)
2313 {
2314 struct drm_crtc *crtc;
2315 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2316 struct drm_plane *plane;
2317 struct drm_plane_state *old_plane_state, *new_plane_state;
2318 int i;
2319 bool active_only = flags & DRM_PLANE_COMMIT_ACTIVE_ONLY;
2320 bool no_disable = flags & DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET;
2321
2322 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
2323 const struct drm_crtc_helper_funcs *funcs;
2324
2325 funcs = crtc->helper_private;
2326
2327 if (!funcs || !funcs->atomic_begin)
2328 continue;
2329
2330 if (active_only && !new_crtc_state->active)
2331 continue;
2332
2333 funcs->atomic_begin(crtc, old_crtc_state);
2334 }
2335
2336 for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
2337 const struct drm_plane_helper_funcs *funcs;
2338 bool disabling;
2339
2340 funcs = plane->helper_private;
2341
2342 if (!funcs)
2343 continue;
2344
2345 disabling = drm_atomic_plane_disabling(old_plane_state,
2346 new_plane_state);
2347
2348 if (active_only) {
2349 /*
2350 * Skip planes related to inactive CRTCs. If the plane
2351 * is enabled use the state of the current CRTC. If the
2352 * plane is being disabled use the state of the old
2353 * CRTC to avoid skipping planes being disabled on an
2354 * active CRTC.
2355 */
2356 if (!disabling && !plane_crtc_active(new_plane_state))
2357 continue;
2358 if (disabling && !plane_crtc_active(old_plane_state))
2359 continue;
2360 }
2361
2362 /*
2363 * Special-case disabling the plane if drivers support it.
2364 */
2365 if (disabling && funcs->atomic_disable) {
2366 struct drm_crtc_state *crtc_state;
2367
2368 crtc_state = old_plane_state->crtc->state;
2369
2370 if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2371 no_disable)
2372 continue;
2373
2374 funcs->atomic_disable(plane, old_plane_state);
2375 } else if (new_plane_state->crtc || disabling) {
2376 funcs->atomic_update(plane, old_plane_state);
2377 }
2378 }
2379
2380 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
2381 const struct drm_crtc_helper_funcs *funcs;
2382
2383 funcs = crtc->helper_private;
2384
2385 if (!funcs || !funcs->atomic_flush)
2386 continue;
2387
2388 if (active_only && !new_crtc_state->active)
2389 continue;
2390
2391 funcs->atomic_flush(crtc, old_crtc_state);
2392 }
2393 }
2394 EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
2395
2396 /**
2397 * drm_atomic_helper_commit_planes_on_crtc - commit plane state for a crtc
2398 * @old_crtc_state: atomic state object with the old crtc state
2399 *
2400 * This function commits the new plane state using the plane and atomic helper
2401 * functions for planes on the specific crtc. It assumes that the atomic state
2402 * has already been pushed into the relevant object state pointers, since this
2403 * step can no longer fail.
2404 *
2405 * This function is useful when plane updates should be done crtc-by-crtc
2406 * instead of one global step like drm_atomic_helper_commit_planes() does.
2407 *
2408 * This function can only be savely used when planes are not allowed to move
2409 * between different CRTCs because this function doesn't handle inter-CRTC
2410 * depencies. Callers need to ensure that either no such depencies exist,
2411 * resolve them through ordering of commit calls or through some other means.
2412 */
2413 void
drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state * old_crtc_state)2414 drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
2415 {
2416 const struct drm_crtc_helper_funcs *crtc_funcs;
2417 struct drm_crtc *crtc = old_crtc_state->crtc;
2418 struct drm_atomic_state *old_state = old_crtc_state->state;
2419 struct drm_crtc_state *new_crtc_state =
2420 drm_atomic_get_new_crtc_state(old_state, crtc);
2421 struct drm_plane *plane;
2422 unsigned plane_mask;
2423
2424 plane_mask = old_crtc_state->plane_mask;
2425 plane_mask |= new_crtc_state->plane_mask;
2426
2427 crtc_funcs = crtc->helper_private;
2428 if (crtc_funcs && crtc_funcs->atomic_begin)
2429 crtc_funcs->atomic_begin(crtc, old_crtc_state);
2430
2431 drm_for_each_plane_mask(plane, crtc->dev, plane_mask) {
2432 struct drm_plane_state *old_plane_state =
2433 drm_atomic_get_old_plane_state(old_state, plane);
2434 struct drm_plane_state *new_plane_state =
2435 drm_atomic_get_new_plane_state(old_state, plane);
2436 const struct drm_plane_helper_funcs *plane_funcs;
2437
2438 plane_funcs = plane->helper_private;
2439
2440 if (!old_plane_state || !plane_funcs)
2441 continue;
2442
2443 WARN_ON(new_plane_state->crtc &&
2444 new_plane_state->crtc != crtc);
2445
2446 if (drm_atomic_plane_disabling(old_plane_state, new_plane_state) &&
2447 plane_funcs->atomic_disable)
2448 plane_funcs->atomic_disable(plane, old_plane_state);
2449 else if (new_plane_state->crtc ||
2450 drm_atomic_plane_disabling(old_plane_state, new_plane_state))
2451 plane_funcs->atomic_update(plane, old_plane_state);
2452 }
2453
2454 if (crtc_funcs && crtc_funcs->atomic_flush)
2455 crtc_funcs->atomic_flush(crtc, old_crtc_state);
2456 }
2457 EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
2458
2459 /**
2460 * drm_atomic_helper_disable_planes_on_crtc - helper to disable CRTC's planes
2461 * @old_crtc_state: atomic state object with the old CRTC state
2462 * @atomic: if set, synchronize with CRTC's atomic_begin/flush hooks
2463 *
2464 * Disables all planes associated with the given CRTC. This can be
2465 * used for instance in the CRTC helper atomic_disable callback to disable
2466 * all planes.
2467 *
2468 * If the atomic-parameter is set the function calls the CRTC's
2469 * atomic_begin hook before and atomic_flush hook after disabling the
2470 * planes.
2471 *
2472 * It is a bug to call this function without having implemented the
2473 * &drm_plane_helper_funcs.atomic_disable plane hook.
2474 */
2475 void
drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state * old_crtc_state,bool atomic)2476 drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state,
2477 bool atomic)
2478 {
2479 struct drm_crtc *crtc = old_crtc_state->crtc;
2480 const struct drm_crtc_helper_funcs *crtc_funcs =
2481 crtc->helper_private;
2482 struct drm_plane *plane;
2483
2484 if (atomic && crtc_funcs && crtc_funcs->atomic_begin)
2485 crtc_funcs->atomic_begin(crtc, NULL);
2486
2487 drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
2488 const struct drm_plane_helper_funcs *plane_funcs =
2489 plane->helper_private;
2490
2491 if (!plane_funcs)
2492 continue;
2493
2494 WARN_ON(!plane_funcs->atomic_disable);
2495 if (plane_funcs->atomic_disable)
2496 plane_funcs->atomic_disable(plane, NULL);
2497 }
2498
2499 if (atomic && crtc_funcs && crtc_funcs->atomic_flush)
2500 crtc_funcs->atomic_flush(crtc, NULL);
2501 }
2502 EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc);
2503
2504 /**
2505 * drm_atomic_helper_cleanup_planes - cleanup plane resources after commit
2506 * @dev: DRM device
2507 * @old_state: atomic state object with old state structures
2508 *
2509 * This function cleans up plane state, specifically framebuffers, from the old
2510 * configuration. Hence the old configuration must be perserved in @old_state to
2511 * be able to call this function.
2512 *
2513 * This function must also be called on the new state when the atomic update
2514 * fails at any point after calling drm_atomic_helper_prepare_planes().
2515 */
drm_atomic_helper_cleanup_planes(struct drm_device * dev,struct drm_atomic_state * old_state)2516 void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
2517 struct drm_atomic_state *old_state)
2518 {
2519 struct drm_plane *plane;
2520 struct drm_plane_state *old_plane_state, *new_plane_state;
2521 int i;
2522
2523 for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
2524 const struct drm_plane_helper_funcs *funcs;
2525 struct drm_plane_state *plane_state;
2526
2527 /*
2528 * This might be called before swapping when commit is aborted,
2529 * in which case we have to cleanup the new state.
2530 */
2531 if (old_plane_state == plane->state)
2532 plane_state = new_plane_state;
2533 else
2534 plane_state = old_plane_state;
2535
2536 funcs = plane->helper_private;
2537
2538 if (funcs->cleanup_fb)
2539 funcs->cleanup_fb(plane, plane_state);
2540 }
2541 }
2542 EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
2543
2544 /**
2545 * drm_atomic_helper_swap_state - store atomic state into current sw state
2546 * @state: atomic state
2547 * @stall: stall for preceeding commits
2548 *
2549 * This function stores the atomic state into the current state pointers in all
2550 * driver objects. It should be called after all failing steps have been done
2551 * and succeeded, but before the actual hardware state is committed.
2552 *
2553 * For cleanup and error recovery the current state for all changed objects will
2554 * be swapped into @state.
2555 *
2556 * With that sequence it fits perfectly into the plane prepare/cleanup sequence:
2557 *
2558 * 1. Call drm_atomic_helper_prepare_planes() with the staged atomic state.
2559 *
2560 * 2. Do any other steps that might fail.
2561 *
2562 * 3. Put the staged state into the current state pointers with this function.
2563 *
2564 * 4. Actually commit the hardware state.
2565 *
2566 * 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3
2567 * contains the old state. Also do any other cleanup required with that state.
2568 *
2569 * @stall must be set when nonblocking commits for this driver directly access
2570 * the &drm_plane.state, &drm_crtc.state or &drm_connector.state pointer. With
2571 * the current atomic helpers this is almost always the case, since the helpers
2572 * don't pass the right state structures to the callbacks.
2573 *
2574 * Returns:
2575 *
2576 * Returns 0 on success. Can return -ERESTARTSYS when @stall is true and the
2577 * waiting for the previous commits has been interrupted.
2578 */
drm_atomic_helper_swap_state(struct drm_atomic_state * state,bool stall)2579 int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
2580 bool stall)
2581 {
2582 int i, ret;
2583 struct drm_connector *connector;
2584 struct drm_connector_state *old_conn_state, *new_conn_state;
2585 struct drm_crtc *crtc;
2586 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2587 struct drm_plane *plane;
2588 struct drm_plane_state *old_plane_state, *new_plane_state;
2589 struct drm_crtc_commit *commit;
2590 struct drm_private_obj *obj;
2591 struct drm_private_state *old_obj_state, *new_obj_state;
2592
2593 if (stall) {
2594 /*
2595 * We have to stall for hw_done here before
2596 * drm_atomic_helper_wait_for_dependencies() because flip
2597 * depth > 1 is not yet supported by all drivers. As long as
2598 * obj->state is directly dereferenced anywhere in the drivers
2599 * atomic_commit_tail function, then it's unsafe to swap state
2600 * before drm_atomic_helper_commit_hw_done() is called.
2601 */
2602
2603 for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
2604 commit = old_crtc_state->commit;
2605
2606 if (!commit)
2607 continue;
2608
2609 ret = wait_for_completion_interruptible(&commit->hw_done);
2610 if (ret)
2611 return ret;
2612 }
2613
2614 for_each_old_connector_in_state(state, connector, old_conn_state, i) {
2615 commit = old_conn_state->commit;
2616
2617 if (!commit)
2618 continue;
2619
2620 ret = wait_for_completion_interruptible(&commit->hw_done);
2621 if (ret)
2622 return ret;
2623 }
2624
2625 for_each_old_plane_in_state(state, plane, old_plane_state, i) {
2626 commit = old_plane_state->commit;
2627
2628 if (!commit)
2629 continue;
2630
2631 ret = wait_for_completion_interruptible(&commit->hw_done);
2632 if (ret)
2633 return ret;
2634 }
2635 }
2636
2637 for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
2638 WARN_ON(connector->state != old_conn_state);
2639
2640 old_conn_state->state = state;
2641 new_conn_state->state = NULL;
2642
2643 state->connectors[i].state = old_conn_state;
2644 connector->state = new_conn_state;
2645 }
2646
2647 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2648 WARN_ON(crtc->state != old_crtc_state);
2649
2650 old_crtc_state->state = state;
2651 new_crtc_state->state = NULL;
2652
2653 state->crtcs[i].state = old_crtc_state;
2654 crtc->state = new_crtc_state;
2655
2656 if (new_crtc_state->commit) {
2657 spin_lock(&crtc->commit_lock);
2658 list_add(&new_crtc_state->commit->commit_entry,
2659 &crtc->commit_list);
2660 spin_unlock(&crtc->commit_lock);
2661
2662 new_crtc_state->commit->event = NULL;
2663 }
2664 }
2665
2666 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
2667 WARN_ON(plane->state != old_plane_state);
2668
2669 old_plane_state->state = state;
2670 new_plane_state->state = NULL;
2671
2672 state->planes[i].state = old_plane_state;
2673 plane->state = new_plane_state;
2674 }
2675
2676 for_each_oldnew_private_obj_in_state(state, obj, old_obj_state, new_obj_state, i) {
2677 WARN_ON(obj->state != old_obj_state);
2678
2679 old_obj_state->state = state;
2680 new_obj_state->state = NULL;
2681
2682 state->private_objs[i].state = old_obj_state;
2683 obj->state = new_obj_state;
2684 }
2685
2686 return 0;
2687 }
2688 EXPORT_SYMBOL(drm_atomic_helper_swap_state);
2689
2690 /**
2691 * drm_atomic_helper_update_plane - Helper for primary plane update using atomic
2692 * @plane: plane object to update
2693 * @crtc: owning CRTC of owning plane
2694 * @fb: framebuffer to flip onto plane
2695 * @crtc_x: x offset of primary plane on crtc
2696 * @crtc_y: y offset of primary plane on crtc
2697 * @crtc_w: width of primary plane rectangle on crtc
2698 * @crtc_h: height of primary plane rectangle on crtc
2699 * @src_x: x offset of @fb for panning
2700 * @src_y: y offset of @fb for panning
2701 * @src_w: width of source rectangle in @fb
2702 * @src_h: height of source rectangle in @fb
2703 * @ctx: lock acquire context
2704 *
2705 * Provides a default plane update handler using the atomic driver interface.
2706 *
2707 * RETURNS:
2708 * Zero on success, error code on failure
2709 */
drm_atomic_helper_update_plane(struct drm_plane * plane,struct drm_crtc * crtc,struct drm_framebuffer * fb,int crtc_x,int crtc_y,unsigned int crtc_w,unsigned int crtc_h,uint32_t src_x,uint32_t src_y,uint32_t src_w,uint32_t src_h,struct drm_modeset_acquire_ctx * ctx)2710 int drm_atomic_helper_update_plane(struct drm_plane *plane,
2711 struct drm_crtc *crtc,
2712 struct drm_framebuffer *fb,
2713 int crtc_x, int crtc_y,
2714 unsigned int crtc_w, unsigned int crtc_h,
2715 uint32_t src_x, uint32_t src_y,
2716 uint32_t src_w, uint32_t src_h,
2717 struct drm_modeset_acquire_ctx *ctx)
2718 {
2719 struct drm_atomic_state *state;
2720 struct drm_plane_state *plane_state;
2721 int ret = 0;
2722
2723 state = drm_atomic_state_alloc(plane->dev);
2724 if (!state)
2725 return -ENOMEM;
2726
2727 state->acquire_ctx = ctx;
2728 plane_state = drm_atomic_get_plane_state(state, plane);
2729 if (IS_ERR(plane_state)) {
2730 ret = PTR_ERR(plane_state);
2731 goto fail;
2732 }
2733
2734 ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
2735 if (ret != 0)
2736 goto fail;
2737 drm_atomic_set_fb_for_plane(plane_state, fb);
2738 plane_state->crtc_x = crtc_x;
2739 plane_state->crtc_y = crtc_y;
2740 plane_state->crtc_w = crtc_w;
2741 plane_state->crtc_h = crtc_h;
2742 plane_state->src_x = src_x;
2743 plane_state->src_y = src_y;
2744 plane_state->src_w = src_w;
2745 plane_state->src_h = src_h;
2746
2747 if (plane == crtc->cursor)
2748 state->legacy_cursor_update = true;
2749
2750 ret = drm_atomic_commit(state);
2751 fail:
2752 drm_atomic_state_put(state);
2753 return ret;
2754 }
2755 EXPORT_SYMBOL(drm_atomic_helper_update_plane);
2756
2757 /**
2758 * drm_atomic_helper_disable_plane - Helper for primary plane disable using * atomic
2759 * @plane: plane to disable
2760 * @ctx: lock acquire context
2761 *
2762 * Provides a default plane disable handler using the atomic driver interface.
2763 *
2764 * RETURNS:
2765 * Zero on success, error code on failure
2766 */
drm_atomic_helper_disable_plane(struct drm_plane * plane,struct drm_modeset_acquire_ctx * ctx)2767 int drm_atomic_helper_disable_plane(struct drm_plane *plane,
2768 struct drm_modeset_acquire_ctx *ctx)
2769 {
2770 struct drm_atomic_state *state;
2771 struct drm_plane_state *plane_state;
2772 int ret = 0;
2773
2774 state = drm_atomic_state_alloc(plane->dev);
2775 if (!state)
2776 return -ENOMEM;
2777
2778 state->acquire_ctx = ctx;
2779 plane_state = drm_atomic_get_plane_state(state, plane);
2780 if (IS_ERR(plane_state)) {
2781 ret = PTR_ERR(plane_state);
2782 goto fail;
2783 }
2784
2785 if (plane_state->crtc && plane_state->crtc->cursor == plane)
2786 plane_state->state->legacy_cursor_update = true;
2787
2788 ret = __drm_atomic_helper_disable_plane(plane, plane_state);
2789 if (ret != 0)
2790 goto fail;
2791
2792 ret = drm_atomic_commit(state);
2793 fail:
2794 drm_atomic_state_put(state);
2795 return ret;
2796 }
2797 EXPORT_SYMBOL(drm_atomic_helper_disable_plane);
2798
2799 /* just used from fb-helper and atomic-helper: */
__drm_atomic_helper_disable_plane(struct drm_plane * plane,struct drm_plane_state * plane_state)2800 int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
2801 struct drm_plane_state *plane_state)
2802 {
2803 int ret;
2804
2805 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
2806 if (ret != 0)
2807 return ret;
2808
2809 drm_atomic_set_fb_for_plane(plane_state, NULL);
2810 plane_state->crtc_x = 0;
2811 plane_state->crtc_y = 0;
2812 plane_state->crtc_w = 0;
2813 plane_state->crtc_h = 0;
2814 plane_state->src_x = 0;
2815 plane_state->src_y = 0;
2816 plane_state->src_w = 0;
2817 plane_state->src_h = 0;
2818
2819 return 0;
2820 }
2821
update_output_state(struct drm_atomic_state * state,struct drm_mode_set * set)2822 static int update_output_state(struct drm_atomic_state *state,
2823 struct drm_mode_set *set)
2824 {
2825 struct drm_device *dev = set->crtc->dev;
2826 struct drm_crtc *crtc;
2827 struct drm_crtc_state *new_crtc_state;
2828 struct drm_connector *connector;
2829 struct drm_connector_state *new_conn_state;
2830 int ret, i;
2831
2832 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
2833 state->acquire_ctx);
2834 if (ret)
2835 return ret;
2836
2837 /* First disable all connectors on the target crtc. */
2838 ret = drm_atomic_add_affected_connectors(state, set->crtc);
2839 if (ret)
2840 return ret;
2841
2842 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
2843 if (new_conn_state->crtc == set->crtc) {
2844 ret = drm_atomic_set_crtc_for_connector(new_conn_state,
2845 NULL);
2846 if (ret)
2847 return ret;
2848
2849 /* Make sure legacy setCrtc always re-trains */
2850 new_conn_state->link_status = DRM_LINK_STATUS_GOOD;
2851 }
2852 }
2853
2854 /* Then set all connectors from set->connectors on the target crtc */
2855 for (i = 0; i < set->num_connectors; i++) {
2856 new_conn_state = drm_atomic_get_connector_state(state,
2857 set->connectors[i]);
2858 if (IS_ERR(new_conn_state))
2859 return PTR_ERR(new_conn_state);
2860
2861 ret = drm_atomic_set_crtc_for_connector(new_conn_state,
2862 set->crtc);
2863 if (ret)
2864 return ret;
2865 }
2866
2867 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
2868 /* Don't update ->enable for the CRTC in the set_config request,
2869 * since a mismatch would indicate a bug in the upper layers.
2870 * The actual modeset code later on will catch any
2871 * inconsistencies here. */
2872 if (crtc == set->crtc)
2873 continue;
2874
2875 if (!new_crtc_state->connector_mask) {
2876 ret = drm_atomic_set_mode_prop_for_crtc(new_crtc_state,
2877 NULL);
2878 if (ret < 0)
2879 return ret;
2880
2881 new_crtc_state->active = false;
2882 }
2883 }
2884
2885 return 0;
2886 }
2887
2888 /**
2889 * drm_atomic_helper_set_config - set a new config from userspace
2890 * @set: mode set configuration
2891 * @ctx: lock acquisition context
2892 *
2893 * Provides a default crtc set_config handler using the atomic driver interface.
2894 *
2895 * NOTE: For backwards compatibility with old userspace this automatically
2896 * resets the "link-status" property to GOOD, to force any link
2897 * re-training. The SETCRTC ioctl does not define whether an update does
2898 * need a full modeset or just a plane update, hence we're allowed to do
2899 * that. See also drm_connector_set_link_status_property().
2900 *
2901 * Returns:
2902 * Returns 0 on success, negative errno numbers on failure.
2903 */
drm_atomic_helper_set_config(struct drm_mode_set * set,struct drm_modeset_acquire_ctx * ctx)2904 int drm_atomic_helper_set_config(struct drm_mode_set *set,
2905 struct drm_modeset_acquire_ctx *ctx)
2906 {
2907 struct drm_atomic_state *state;
2908 struct drm_crtc *crtc = set->crtc;
2909 int ret = 0;
2910
2911 state = drm_atomic_state_alloc(crtc->dev);
2912 if (!state)
2913 return -ENOMEM;
2914
2915 state->acquire_ctx = ctx;
2916 ret = __drm_atomic_helper_set_config(set, state);
2917 if (ret != 0)
2918 goto fail;
2919
2920 ret = handle_conflicting_encoders(state, true);
2921 if (ret)
2922 return ret;
2923
2924 ret = drm_atomic_commit(state);
2925
2926 fail:
2927 drm_atomic_state_put(state);
2928 return ret;
2929 }
2930 EXPORT_SYMBOL(drm_atomic_helper_set_config);
2931
2932 /* just used from fb-helper and atomic-helper: */
__drm_atomic_helper_set_config(struct drm_mode_set * set,struct drm_atomic_state * state)2933 int __drm_atomic_helper_set_config(struct drm_mode_set *set,
2934 struct drm_atomic_state *state)
2935 {
2936 struct drm_crtc_state *crtc_state;
2937 struct drm_plane_state *primary_state;
2938 struct drm_crtc *crtc = set->crtc;
2939 int hdisplay, vdisplay;
2940 int ret;
2941
2942 crtc_state = drm_atomic_get_crtc_state(state, crtc);
2943 if (IS_ERR(crtc_state))
2944 return PTR_ERR(crtc_state);
2945
2946 primary_state = drm_atomic_get_plane_state(state, crtc->primary);
2947 if (IS_ERR(primary_state))
2948 return PTR_ERR(primary_state);
2949
2950 if (!set->mode) {
2951 WARN_ON(set->fb);
2952 WARN_ON(set->num_connectors);
2953
2954 ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
2955 if (ret != 0)
2956 return ret;
2957
2958 crtc_state->active = false;
2959
2960 ret = drm_atomic_set_crtc_for_plane(primary_state, NULL);
2961 if (ret != 0)
2962 return ret;
2963
2964 drm_atomic_set_fb_for_plane(primary_state, NULL);
2965
2966 goto commit;
2967 }
2968
2969 WARN_ON(!set->fb);
2970 WARN_ON(!set->num_connectors);
2971
2972 ret = drm_atomic_set_mode_for_crtc(crtc_state, set->mode);
2973 if (ret != 0)
2974 return ret;
2975
2976 crtc_state->active = true;
2977
2978 ret = drm_atomic_set_crtc_for_plane(primary_state, crtc);
2979 if (ret != 0)
2980 return ret;
2981
2982 drm_mode_get_hv_timing(set->mode, &hdisplay, &vdisplay);
2983
2984 drm_atomic_set_fb_for_plane(primary_state, set->fb);
2985 primary_state->crtc_x = 0;
2986 primary_state->crtc_y = 0;
2987 primary_state->crtc_w = hdisplay;
2988 primary_state->crtc_h = vdisplay;
2989 primary_state->src_x = set->x << 16;
2990 primary_state->src_y = set->y << 16;
2991 if (drm_rotation_90_or_270(primary_state->rotation)) {
2992 primary_state->src_w = vdisplay << 16;
2993 primary_state->src_h = hdisplay << 16;
2994 } else {
2995 primary_state->src_w = hdisplay << 16;
2996 primary_state->src_h = vdisplay << 16;
2997 }
2998
2999 commit:
3000 ret = update_output_state(state, set);
3001 if (ret)
3002 return ret;
3003
3004 return 0;
3005 }
3006
__drm_atomic_helper_disable_all(struct drm_device * dev,struct drm_modeset_acquire_ctx * ctx,bool clean_old_fbs)3007 static int __drm_atomic_helper_disable_all(struct drm_device *dev,
3008 struct drm_modeset_acquire_ctx *ctx,
3009 bool clean_old_fbs)
3010 {
3011 struct drm_atomic_state *state;
3012 struct drm_connector_state *conn_state;
3013 struct drm_connector *conn;
3014 struct drm_plane_state *plane_state;
3015 struct drm_plane *plane;
3016 struct drm_crtc_state *crtc_state;
3017 struct drm_crtc *crtc;
3018 int ret, i;
3019
3020 state = drm_atomic_state_alloc(dev);
3021 if (!state)
3022 return -ENOMEM;
3023
3024 state->acquire_ctx = ctx;
3025
3026 drm_for_each_crtc(crtc, dev) {
3027 crtc_state = drm_atomic_get_crtc_state(state, crtc);
3028 if (IS_ERR(crtc_state)) {
3029 ret = PTR_ERR(crtc_state);
3030 goto free;
3031 }
3032
3033 crtc_state->active = false;
3034
3035 ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL);
3036 if (ret < 0)
3037 goto free;
3038
3039 ret = drm_atomic_add_affected_planes(state, crtc);
3040 if (ret < 0)
3041 goto free;
3042
3043 ret = drm_atomic_add_affected_connectors(state, crtc);
3044 if (ret < 0)
3045 goto free;
3046 }
3047
3048 for_each_new_connector_in_state(state, conn, conn_state, i) {
3049 ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
3050 if (ret < 0)
3051 goto free;
3052 }
3053
3054 for_each_new_plane_in_state(state, plane, plane_state, i) {
3055 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
3056 if (ret < 0)
3057 goto free;
3058
3059 drm_atomic_set_fb_for_plane(plane_state, NULL);
3060 }
3061
3062 ret = drm_atomic_commit(state);
3063 free:
3064 drm_atomic_state_put(state);
3065 return ret;
3066 }
3067
3068 /**
3069 * drm_atomic_helper_disable_all - disable all currently active outputs
3070 * @dev: DRM device
3071 * @ctx: lock acquisition context
3072 *
3073 * Loops through all connectors, finding those that aren't turned off and then
3074 * turns them off by setting their DPMS mode to OFF and deactivating the CRTC
3075 * that they are connected to.
3076 *
3077 * This is used for example in suspend/resume to disable all currently active
3078 * functions when suspending. If you just want to shut down everything at e.g.
3079 * driver unload, look at drm_atomic_helper_shutdown().
3080 *
3081 * Note that if callers haven't already acquired all modeset locks this might
3082 * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
3083 *
3084 * Returns:
3085 * 0 on success or a negative error code on failure.
3086 *
3087 * See also:
3088 * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and
3089 * drm_atomic_helper_shutdown().
3090 */
drm_atomic_helper_disable_all(struct drm_device * dev,struct drm_modeset_acquire_ctx * ctx)3091 int drm_atomic_helper_disable_all(struct drm_device *dev,
3092 struct drm_modeset_acquire_ctx *ctx)
3093 {
3094 return __drm_atomic_helper_disable_all(dev, ctx, false);
3095 }
3096 EXPORT_SYMBOL(drm_atomic_helper_disable_all);
3097
3098 /**
3099 * drm_atomic_helper_shutdown - shutdown all CRTC
3100 * @dev: DRM device
3101 *
3102 * This shuts down all CRTC, which is useful for driver unloading. Shutdown on
3103 * suspend should instead be handled with drm_atomic_helper_suspend(), since
3104 * that also takes a snapshot of the modeset state to be restored on resume.
3105 *
3106 * This is just a convenience wrapper around drm_atomic_helper_disable_all(),
3107 * and it is the atomic version of drm_crtc_force_disable_all().
3108 */
drm_atomic_helper_shutdown(struct drm_device * dev)3109 void drm_atomic_helper_shutdown(struct drm_device *dev)
3110 {
3111 struct drm_modeset_acquire_ctx ctx;
3112 int ret;
3113
3114 drm_modeset_acquire_init(&ctx, 0);
3115 while (1) {
3116 ret = drm_modeset_lock_all_ctx(dev, &ctx);
3117 if (!ret)
3118 ret = __drm_atomic_helper_disable_all(dev, &ctx, true);
3119
3120 if (ret != -EDEADLK)
3121 break;
3122
3123 drm_modeset_backoff(&ctx);
3124 }
3125
3126 if (ret)
3127 DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret);
3128
3129 drm_modeset_drop_locks(&ctx);
3130 drm_modeset_acquire_fini(&ctx);
3131 }
3132 EXPORT_SYMBOL(drm_atomic_helper_shutdown);
3133
3134 /**
3135 * drm_atomic_helper_suspend - subsystem-level suspend helper
3136 * @dev: DRM device
3137 *
3138 * Duplicates the current atomic state, disables all active outputs and then
3139 * returns a pointer to the original atomic state to the caller. Drivers can
3140 * pass this pointer to the drm_atomic_helper_resume() helper upon resume to
3141 * restore the output configuration that was active at the time the system
3142 * entered suspend.
3143 *
3144 * Note that it is potentially unsafe to use this. The atomic state object
3145 * returned by this function is assumed to be persistent. Drivers must ensure
3146 * that this holds true. Before calling this function, drivers must make sure
3147 * to suspend fbdev emulation so that nothing can be using the device.
3148 *
3149 * Returns:
3150 * A pointer to a copy of the state before suspend on success or an ERR_PTR()-
3151 * encoded error code on failure. Drivers should store the returned atomic
3152 * state object and pass it to the drm_atomic_helper_resume() helper upon
3153 * resume.
3154 *
3155 * See also:
3156 * drm_atomic_helper_duplicate_state(), drm_atomic_helper_disable_all(),
3157 * drm_atomic_helper_resume(), drm_atomic_helper_commit_duplicated_state()
3158 */
drm_atomic_helper_suspend(struct drm_device * dev)3159 struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev)
3160 {
3161 struct drm_modeset_acquire_ctx ctx;
3162 struct drm_atomic_state *state;
3163 int err;
3164
3165 drm_modeset_acquire_init(&ctx, 0);
3166
3167 retry:
3168 err = drm_modeset_lock_all_ctx(dev, &ctx);
3169 if (err < 0) {
3170 state = ERR_PTR(err);
3171 goto unlock;
3172 }
3173
3174 state = drm_atomic_helper_duplicate_state(dev, &ctx);
3175 if (IS_ERR(state))
3176 goto unlock;
3177
3178 err = drm_atomic_helper_disable_all(dev, &ctx);
3179 if (err < 0) {
3180 drm_atomic_state_put(state);
3181 state = ERR_PTR(err);
3182 goto unlock;
3183 }
3184
3185 unlock:
3186 if (PTR_ERR(state) == -EDEADLK) {
3187 drm_modeset_backoff(&ctx);
3188 goto retry;
3189 }
3190
3191 drm_modeset_drop_locks(&ctx);
3192 drm_modeset_acquire_fini(&ctx);
3193 return state;
3194 }
3195 EXPORT_SYMBOL(drm_atomic_helper_suspend);
3196
3197 /**
3198 * drm_atomic_helper_commit_duplicated_state - commit duplicated state
3199 * @state: duplicated atomic state to commit
3200 * @ctx: pointer to acquire_ctx to use for commit.
3201 *
3202 * The state returned by drm_atomic_helper_duplicate_state() and
3203 * drm_atomic_helper_suspend() is partially invalid, and needs to
3204 * be fixed up before commit.
3205 *
3206 * Returns:
3207 * 0 on success or a negative error code on failure.
3208 *
3209 * See also:
3210 * drm_atomic_helper_suspend()
3211 */
drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state * state,struct drm_modeset_acquire_ctx * ctx)3212 int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
3213 struct drm_modeset_acquire_ctx *ctx)
3214 {
3215 int i, ret;
3216 struct drm_plane *plane;
3217 struct drm_plane_state *new_plane_state;
3218 struct drm_connector *connector;
3219 struct drm_connector_state *new_conn_state;
3220 struct drm_crtc *crtc;
3221 struct drm_crtc_state *new_crtc_state;
3222
3223 state->acquire_ctx = ctx;
3224
3225 for_each_new_plane_in_state(state, plane, new_plane_state, i)
3226 state->planes[i].old_state = plane->state;
3227
3228 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
3229 state->crtcs[i].old_state = crtc->state;
3230
3231 for_each_new_connector_in_state(state, connector, new_conn_state, i)
3232 state->connectors[i].old_state = connector->state;
3233
3234 ret = drm_atomic_commit(state);
3235
3236 state->acquire_ctx = NULL;
3237
3238 return ret;
3239 }
3240 EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state);
3241
3242 /**
3243 * drm_atomic_helper_resume - subsystem-level resume helper
3244 * @dev: DRM device
3245 * @state: atomic state to resume to
3246 *
3247 * Calls drm_mode_config_reset() to synchronize hardware and software states,
3248 * grabs all modeset locks and commits the atomic state object. This can be
3249 * used in conjunction with the drm_atomic_helper_suspend() helper to
3250 * implement suspend/resume for drivers that support atomic mode-setting.
3251 *
3252 * Returns:
3253 * 0 on success or a negative error code on failure.
3254 *
3255 * See also:
3256 * drm_atomic_helper_suspend()
3257 */
drm_atomic_helper_resume(struct drm_device * dev,struct drm_atomic_state * state)3258 int drm_atomic_helper_resume(struct drm_device *dev,
3259 struct drm_atomic_state *state)
3260 {
3261 struct drm_modeset_acquire_ctx ctx;
3262 int err;
3263
3264 drm_mode_config_reset(dev);
3265
3266 drm_modeset_acquire_init(&ctx, 0);
3267 while (1) {
3268 err = drm_modeset_lock_all_ctx(dev, &ctx);
3269 if (err)
3270 goto out;
3271
3272 err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
3273 out:
3274 if (err != -EDEADLK)
3275 break;
3276
3277 drm_modeset_backoff(&ctx);
3278 }
3279
3280 drm_atomic_state_put(state);
3281 drm_modeset_drop_locks(&ctx);
3282 drm_modeset_acquire_fini(&ctx);
3283
3284 return err;
3285 }
3286 EXPORT_SYMBOL(drm_atomic_helper_resume);
3287
page_flip_common(struct drm_atomic_state * state,struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_pending_vblank_event * event,uint32_t flags)3288 static int page_flip_common(struct drm_atomic_state *state,
3289 struct drm_crtc *crtc,
3290 struct drm_framebuffer *fb,
3291 struct drm_pending_vblank_event *event,
3292 uint32_t flags)
3293 {
3294 struct drm_plane *plane = crtc->primary;
3295 struct drm_plane_state *plane_state;
3296 struct drm_crtc_state *crtc_state;
3297 int ret = 0;
3298
3299 crtc_state = drm_atomic_get_crtc_state(state, crtc);
3300 if (IS_ERR(crtc_state))
3301 return PTR_ERR(crtc_state);
3302
3303 crtc_state->event = event;
3304 crtc_state->pageflip_flags = flags;
3305
3306 plane_state = drm_atomic_get_plane_state(state, plane);
3307 if (IS_ERR(plane_state))
3308 return PTR_ERR(plane_state);
3309
3310 ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
3311 if (ret != 0)
3312 return ret;
3313 drm_atomic_set_fb_for_plane(plane_state, fb);
3314
3315 /* Make sure we don't accidentally do a full modeset. */
3316 state->allow_modeset = false;
3317 if (!crtc_state->active) {
3318 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled, rejecting legacy flip\n",
3319 crtc->base.id, crtc->name);
3320 return -EINVAL;
3321 }
3322
3323 return ret;
3324 }
3325
3326 /**
3327 * drm_atomic_helper_page_flip - execute a legacy page flip
3328 * @crtc: DRM crtc
3329 * @fb: DRM framebuffer
3330 * @event: optional DRM event to signal upon completion
3331 * @flags: flip flags for non-vblank sync'ed updates
3332 * @ctx: lock acquisition context
3333 *
3334 * Provides a default &drm_crtc_funcs.page_flip implementation
3335 * using the atomic driver interface.
3336 *
3337 * Returns:
3338 * Returns 0 on success, negative errno numbers on failure.
3339 *
3340 * See also:
3341 * drm_atomic_helper_page_flip_target()
3342 */
drm_atomic_helper_page_flip(struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_pending_vblank_event * event,uint32_t flags,struct drm_modeset_acquire_ctx * ctx)3343 int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
3344 struct drm_framebuffer *fb,
3345 struct drm_pending_vblank_event *event,
3346 uint32_t flags,
3347 struct drm_modeset_acquire_ctx *ctx)
3348 {
3349 struct drm_plane *plane = crtc->primary;
3350 struct drm_atomic_state *state;
3351 int ret = 0;
3352
3353 state = drm_atomic_state_alloc(plane->dev);
3354 if (!state)
3355 return -ENOMEM;
3356
3357 state->acquire_ctx = ctx;
3358
3359 ret = page_flip_common(state, crtc, fb, event, flags);
3360 if (ret != 0)
3361 goto fail;
3362
3363 ret = drm_atomic_nonblocking_commit(state);
3364 fail:
3365 drm_atomic_state_put(state);
3366 return ret;
3367 }
3368 EXPORT_SYMBOL(drm_atomic_helper_page_flip);
3369
3370 /**
3371 * drm_atomic_helper_page_flip_target - do page flip on target vblank period.
3372 * @crtc: DRM crtc
3373 * @fb: DRM framebuffer
3374 * @event: optional DRM event to signal upon completion
3375 * @flags: flip flags for non-vblank sync'ed updates
3376 * @target: specifying the target vblank period when the flip to take effect
3377 * @ctx: lock acquisition context
3378 *
3379 * Provides a default &drm_crtc_funcs.page_flip_target implementation.
3380 * Similar to drm_atomic_helper_page_flip() with extra parameter to specify
3381 * target vblank period to flip.
3382 *
3383 * Returns:
3384 * Returns 0 on success, negative errno numbers on failure.
3385 */
drm_atomic_helper_page_flip_target(struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_pending_vblank_event * event,uint32_t flags,uint32_t target,struct drm_modeset_acquire_ctx * ctx)3386 int drm_atomic_helper_page_flip_target(struct drm_crtc *crtc,
3387 struct drm_framebuffer *fb,
3388 struct drm_pending_vblank_event *event,
3389 uint32_t flags,
3390 uint32_t target,
3391 struct drm_modeset_acquire_ctx *ctx)
3392 {
3393 struct drm_plane *plane = crtc->primary;
3394 struct drm_atomic_state *state;
3395 struct drm_crtc_state *crtc_state;
3396 int ret = 0;
3397
3398 state = drm_atomic_state_alloc(plane->dev);
3399 if (!state)
3400 return -ENOMEM;
3401
3402 state->acquire_ctx = ctx;
3403
3404 ret = page_flip_common(state, crtc, fb, event, flags);
3405 if (ret != 0)
3406 goto fail;
3407
3408 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
3409 if (WARN_ON(!crtc_state)) {
3410 ret = -EINVAL;
3411 goto fail;
3412 }
3413 crtc_state->target_vblank = target;
3414
3415 ret = drm_atomic_nonblocking_commit(state);
3416 fail:
3417 drm_atomic_state_put(state);
3418 return ret;
3419 }
3420 EXPORT_SYMBOL(drm_atomic_helper_page_flip_target);
3421
3422 /**
3423 * drm_atomic_helper_best_encoder - Helper for
3424 * &drm_connector_helper_funcs.best_encoder callback
3425 * @connector: Connector control structure
3426 *
3427 * This is a &drm_connector_helper_funcs.best_encoder callback helper for
3428 * connectors that support exactly 1 encoder, statically determined at driver
3429 * init time.
3430 */
3431 struct drm_encoder *
drm_atomic_helper_best_encoder(struct drm_connector * connector)3432 drm_atomic_helper_best_encoder(struct drm_connector *connector)
3433 {
3434 WARN_ON(connector->encoder_ids[1]);
3435 return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
3436 }
3437 EXPORT_SYMBOL(drm_atomic_helper_best_encoder);
3438
3439 /**
3440 * DOC: atomic state reset and initialization
3441 *
3442 * Both the drm core and the atomic helpers assume that there is always the full
3443 * and correct atomic software state for all connectors, CRTCs and planes
3444 * available. Which is a bit a problem on driver load and also after system
3445 * suspend. One way to solve this is to have a hardware state read-out
3446 * infrastructure which reconstructs the full software state (e.g. the i915
3447 * driver).
3448 *
3449 * The simpler solution is to just reset the software state to everything off,
3450 * which is easiest to do by calling drm_mode_config_reset(). To facilitate this
3451 * the atomic helpers provide default reset implementations for all hooks.
3452 *
3453 * On the upside the precise state tracking of atomic simplifies system suspend
3454 * and resume a lot. For drivers using drm_mode_config_reset() a complete recipe
3455 * is implemented in drm_atomic_helper_suspend() and drm_atomic_helper_resume().
3456 * For other drivers the building blocks are split out, see the documentation
3457 * for these functions.
3458 */
3459
3460 /**
3461 * drm_atomic_helper_crtc_reset - default &drm_crtc_funcs.reset hook for CRTCs
3462 * @crtc: drm CRTC
3463 *
3464 * Resets the atomic state for @crtc by freeing the state pointer (which might
3465 * be NULL, e.g. at driver load time) and allocating a new empty state object.
3466 */
drm_atomic_helper_crtc_reset(struct drm_crtc * crtc)3467 void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
3468 {
3469 if (crtc->state)
3470 __drm_atomic_helper_crtc_destroy_state(crtc->state);
3471
3472 kfree(crtc->state);
3473 crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL);
3474
3475 if (crtc->state)
3476 crtc->state->crtc = crtc;
3477 }
3478 EXPORT_SYMBOL(drm_atomic_helper_crtc_reset);
3479
3480 /**
3481 * __drm_atomic_helper_crtc_duplicate_state - copy atomic CRTC state
3482 * @crtc: CRTC object
3483 * @state: atomic CRTC state
3484 *
3485 * Copies atomic state from a CRTC's current state and resets inferred values.
3486 * This is useful for drivers that subclass the CRTC state.
3487 */
__drm_atomic_helper_crtc_duplicate_state(struct drm_crtc * crtc,struct drm_crtc_state * state)3488 void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
3489 struct drm_crtc_state *state)
3490 {
3491 memcpy(state, crtc->state, sizeof(*state));
3492
3493 if (state->mode_blob)
3494 drm_property_blob_get(state->mode_blob);
3495 if (state->degamma_lut)
3496 drm_property_blob_get(state->degamma_lut);
3497 if (state->ctm)
3498 drm_property_blob_get(state->ctm);
3499 if (state->gamma_lut)
3500 drm_property_blob_get(state->gamma_lut);
3501 state->mode_changed = false;
3502 state->active_changed = false;
3503 state->planes_changed = false;
3504 state->connectors_changed = false;
3505 state->color_mgmt_changed = false;
3506 state->zpos_changed = false;
3507 state->commit = NULL;
3508 state->event = NULL;
3509 state->pageflip_flags = 0;
3510 }
3511 EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state);
3512
3513 /**
3514 * drm_atomic_helper_crtc_duplicate_state - default state duplicate hook
3515 * @crtc: drm CRTC
3516 *
3517 * Default CRTC state duplicate hook for drivers which don't have their own
3518 * subclassed CRTC state structure.
3519 */
3520 struct drm_crtc_state *
drm_atomic_helper_crtc_duplicate_state(struct drm_crtc * crtc)3521 drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc)
3522 {
3523 struct drm_crtc_state *state;
3524
3525 if (WARN_ON(!crtc->state))
3526 return NULL;
3527
3528 state = kmalloc(sizeof(*state), GFP_KERNEL);
3529 if (state)
3530 __drm_atomic_helper_crtc_duplicate_state(crtc, state);
3531
3532 return state;
3533 }
3534 EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
3535
3536 /**
3537 * __drm_atomic_helper_crtc_destroy_state - release CRTC state
3538 * @state: CRTC state object to release
3539 *
3540 * Releases all resources stored in the CRTC state without actually freeing
3541 * the memory of the CRTC state. This is useful for drivers that subclass the
3542 * CRTC state.
3543 */
__drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state * state)3544 void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state)
3545 {
3546 if (state->commit) {
3547 /*
3548 * In the event that a non-blocking commit returns
3549 * -ERESTARTSYS before the commit_tail work is queued, we will
3550 * have an extra reference to the commit object. Release it, if
3551 * the event has not been consumed by the worker.
3552 *
3553 * state->event may be freed, so we can't directly look at
3554 * state->event->base.completion.
3555 */
3556 if (state->event && state->commit->abort_completion)
3557 drm_crtc_commit_put(state->commit);
3558
3559 kfree(state->commit->event);
3560 state->commit->event = NULL;
3561
3562 drm_crtc_commit_put(state->commit);
3563 }
3564
3565 drm_property_blob_put(state->mode_blob);
3566 drm_property_blob_put(state->degamma_lut);
3567 drm_property_blob_put(state->ctm);
3568 drm_property_blob_put(state->gamma_lut);
3569 }
3570 EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state);
3571
3572 /**
3573 * drm_atomic_helper_crtc_destroy_state - default state destroy hook
3574 * @crtc: drm CRTC
3575 * @state: CRTC state object to release
3576 *
3577 * Default CRTC state destroy hook for drivers which don't have their own
3578 * subclassed CRTC state structure.
3579 */
drm_atomic_helper_crtc_destroy_state(struct drm_crtc * crtc,struct drm_crtc_state * state)3580 void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
3581 struct drm_crtc_state *state)
3582 {
3583 __drm_atomic_helper_crtc_destroy_state(state);
3584 kfree(state);
3585 }
3586 EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state);
3587
3588 /**
3589 * drm_atomic_helper_plane_reset - default &drm_plane_funcs.reset hook for planes
3590 * @plane: drm plane
3591 *
3592 * Resets the atomic state for @plane by freeing the state pointer (which might
3593 * be NULL, e.g. at driver load time) and allocating a new empty state object.
3594 */
drm_atomic_helper_plane_reset(struct drm_plane * plane)3595 void drm_atomic_helper_plane_reset(struct drm_plane *plane)
3596 {
3597 if (plane->state)
3598 __drm_atomic_helper_plane_destroy_state(plane->state);
3599
3600 kfree(plane->state);
3601 plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL);
3602
3603 if (plane->state) {
3604 plane->state->plane = plane;
3605 plane->state->rotation = DRM_MODE_ROTATE_0;
3606
3607 /* Reset the alpha value to fully opaque if it matters */
3608 if (plane->alpha_property)
3609 plane->state->alpha = plane->alpha_property->values[1];
3610 }
3611 }
3612 EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
3613
3614 /**
3615 * __drm_atomic_helper_plane_duplicate_state - copy atomic plane state
3616 * @plane: plane object
3617 * @state: atomic plane state
3618 *
3619 * Copies atomic state from a plane's current state. This is useful for
3620 * drivers that subclass the plane state.
3621 */
__drm_atomic_helper_plane_duplicate_state(struct drm_plane * plane,struct drm_plane_state * state)3622 void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
3623 struct drm_plane_state *state)
3624 {
3625 memcpy(state, plane->state, sizeof(*state));
3626
3627 if (state->fb)
3628 drm_framebuffer_get(state->fb);
3629
3630 state->fence = NULL;
3631 state->commit = NULL;
3632 }
3633 EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state);
3634
3635 /**
3636 * drm_atomic_helper_plane_duplicate_state - default state duplicate hook
3637 * @plane: drm plane
3638 *
3639 * Default plane state duplicate hook for drivers which don't have their own
3640 * subclassed plane state structure.
3641 */
3642 struct drm_plane_state *
drm_atomic_helper_plane_duplicate_state(struct drm_plane * plane)3643 drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane)
3644 {
3645 struct drm_plane_state *state;
3646
3647 if (WARN_ON(!plane->state))
3648 return NULL;
3649
3650 state = kmalloc(sizeof(*state), GFP_KERNEL);
3651 if (state)
3652 __drm_atomic_helper_plane_duplicate_state(plane, state);
3653
3654 return state;
3655 }
3656 EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state);
3657
3658 /**
3659 * __drm_atomic_helper_plane_destroy_state - release plane state
3660 * @state: plane state object to release
3661 *
3662 * Releases all resources stored in the plane state without actually freeing
3663 * the memory of the plane state. This is useful for drivers that subclass the
3664 * plane state.
3665 */
__drm_atomic_helper_plane_destroy_state(struct drm_plane_state * state)3666 void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state)
3667 {
3668 if (state->fb)
3669 drm_framebuffer_put(state->fb);
3670
3671 if (state->fence)
3672 dma_fence_put(state->fence);
3673
3674 if (state->commit)
3675 drm_crtc_commit_put(state->commit);
3676 }
3677 EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state);
3678
3679 /**
3680 * drm_atomic_helper_plane_destroy_state - default state destroy hook
3681 * @plane: drm plane
3682 * @state: plane state object to release
3683 *
3684 * Default plane state destroy hook for drivers which don't have their own
3685 * subclassed plane state structure.
3686 */
drm_atomic_helper_plane_destroy_state(struct drm_plane * plane,struct drm_plane_state * state)3687 void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
3688 struct drm_plane_state *state)
3689 {
3690 __drm_atomic_helper_plane_destroy_state(state);
3691 kfree(state);
3692 }
3693 EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
3694
3695 /**
3696 * __drm_atomic_helper_connector_reset - reset state on connector
3697 * @connector: drm connector
3698 * @conn_state: connector state to assign
3699 *
3700 * Initializes the newly allocated @conn_state and assigns it to
3701 * the &drm_conector->state pointer of @connector, usually required when
3702 * initializing the drivers or when called from the &drm_connector_funcs.reset
3703 * hook.
3704 *
3705 * This is useful for drivers that subclass the connector state.
3706 */
3707 void
__drm_atomic_helper_connector_reset(struct drm_connector * connector,struct drm_connector_state * conn_state)3708 __drm_atomic_helper_connector_reset(struct drm_connector *connector,
3709 struct drm_connector_state *conn_state)
3710 {
3711 if (conn_state)
3712 conn_state->connector = connector;
3713
3714 connector->state = conn_state;
3715 }
3716 EXPORT_SYMBOL(__drm_atomic_helper_connector_reset);
3717
3718 /**
3719 * drm_atomic_helper_connector_reset - default &drm_connector_funcs.reset hook for connectors
3720 * @connector: drm connector
3721 *
3722 * Resets the atomic state for @connector by freeing the state pointer (which
3723 * might be NULL, e.g. at driver load time) and allocating a new empty state
3724 * object.
3725 */
drm_atomic_helper_connector_reset(struct drm_connector * connector)3726 void drm_atomic_helper_connector_reset(struct drm_connector *connector)
3727 {
3728 struct drm_connector_state *conn_state =
3729 kzalloc(sizeof(*conn_state), GFP_KERNEL);
3730
3731 if (connector->state)
3732 __drm_atomic_helper_connector_destroy_state(connector->state);
3733
3734 kfree(connector->state);
3735 __drm_atomic_helper_connector_reset(connector, conn_state);
3736 }
3737 EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
3738
3739 /**
3740 * __drm_atomic_helper_connector_duplicate_state - copy atomic connector state
3741 * @connector: connector object
3742 * @state: atomic connector state
3743 *
3744 * Copies atomic state from a connector's current state. This is useful for
3745 * drivers that subclass the connector state.
3746 */
3747 void
__drm_atomic_helper_connector_duplicate_state(struct drm_connector * connector,struct drm_connector_state * state)3748 __drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
3749 struct drm_connector_state *state)
3750 {
3751 memcpy(state, connector->state, sizeof(*state));
3752 if (state->crtc)
3753 drm_connector_get(connector);
3754 state->commit = NULL;
3755
3756 /* Don't copy over a writeback job, they are used only once */
3757 state->writeback_job = NULL;
3758 }
3759 EXPORT_SYMBOL(__drm_atomic_helper_connector_duplicate_state);
3760
3761 /**
3762 * drm_atomic_helper_connector_duplicate_state - default state duplicate hook
3763 * @connector: drm connector
3764 *
3765 * Default connector state duplicate hook for drivers which don't have their own
3766 * subclassed connector state structure.
3767 */
3768 struct drm_connector_state *
drm_atomic_helper_connector_duplicate_state(struct drm_connector * connector)3769 drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector)
3770 {
3771 struct drm_connector_state *state;
3772
3773 if (WARN_ON(!connector->state))
3774 return NULL;
3775
3776 state = kmalloc(sizeof(*state), GFP_KERNEL);
3777 if (state)
3778 __drm_atomic_helper_connector_duplicate_state(connector, state);
3779
3780 return state;
3781 }
3782 EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
3783
3784 /**
3785 * drm_atomic_helper_duplicate_state - duplicate an atomic state object
3786 * @dev: DRM device
3787 * @ctx: lock acquisition context
3788 *
3789 * Makes a copy of the current atomic state by looping over all objects and
3790 * duplicating their respective states. This is used for example by suspend/
3791 * resume support code to save the state prior to suspend such that it can
3792 * be restored upon resume.
3793 *
3794 * Note that this treats atomic state as persistent between save and restore.
3795 * Drivers must make sure that this is possible and won't result in confusion
3796 * or erroneous behaviour.
3797 *
3798 * Note that if callers haven't already acquired all modeset locks this might
3799 * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
3800 *
3801 * Returns:
3802 * A pointer to the copy of the atomic state object on success or an
3803 * ERR_PTR()-encoded error code on failure.
3804 *
3805 * See also:
3806 * drm_atomic_helper_suspend(), drm_atomic_helper_resume()
3807 */
3808 struct drm_atomic_state *
drm_atomic_helper_duplicate_state(struct drm_device * dev,struct drm_modeset_acquire_ctx * ctx)3809 drm_atomic_helper_duplicate_state(struct drm_device *dev,
3810 struct drm_modeset_acquire_ctx *ctx)
3811 {
3812 struct drm_atomic_state *state;
3813 struct drm_connector *conn;
3814 struct drm_connector_list_iter conn_iter;
3815 struct drm_plane *plane;
3816 struct drm_crtc *crtc;
3817 int err = 0;
3818
3819 state = drm_atomic_state_alloc(dev);
3820 if (!state)
3821 return ERR_PTR(-ENOMEM);
3822
3823 state->acquire_ctx = ctx;
3824
3825 drm_for_each_crtc(crtc, dev) {
3826 struct drm_crtc_state *crtc_state;
3827
3828 crtc_state = drm_atomic_get_crtc_state(state, crtc);
3829 if (IS_ERR(crtc_state)) {
3830 err = PTR_ERR(crtc_state);
3831 goto free;
3832 }
3833 }
3834
3835 drm_for_each_plane(plane, dev) {
3836 struct drm_plane_state *plane_state;
3837
3838 plane_state = drm_atomic_get_plane_state(state, plane);
3839 if (IS_ERR(plane_state)) {
3840 err = PTR_ERR(plane_state);
3841 goto free;
3842 }
3843 }
3844
3845 drm_connector_list_iter_begin(dev, &conn_iter);
3846 drm_for_each_connector_iter(conn, &conn_iter) {
3847 struct drm_connector_state *conn_state;
3848
3849 conn_state = drm_atomic_get_connector_state(state, conn);
3850 if (IS_ERR(conn_state)) {
3851 err = PTR_ERR(conn_state);
3852 drm_connector_list_iter_end(&conn_iter);
3853 goto free;
3854 }
3855 }
3856 drm_connector_list_iter_end(&conn_iter);
3857
3858 /* clear the acquire context so that it isn't accidentally reused */
3859 state->acquire_ctx = NULL;
3860
3861 free:
3862 if (err < 0) {
3863 drm_atomic_state_put(state);
3864 state = ERR_PTR(err);
3865 }
3866
3867 return state;
3868 }
3869 EXPORT_SYMBOL(drm_atomic_helper_duplicate_state);
3870
3871 /**
3872 * __drm_atomic_helper_connector_destroy_state - release connector state
3873 * @state: connector state object to release
3874 *
3875 * Releases all resources stored in the connector state without actually
3876 * freeing the memory of the connector state. This is useful for drivers that
3877 * subclass the connector state.
3878 */
3879 void
__drm_atomic_helper_connector_destroy_state(struct drm_connector_state * state)3880 __drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state)
3881 {
3882 if (state->crtc)
3883 drm_connector_put(state->connector);
3884
3885 if (state->commit)
3886 drm_crtc_commit_put(state->commit);
3887 }
3888 EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state);
3889
3890 /**
3891 * drm_atomic_helper_connector_destroy_state - default state destroy hook
3892 * @connector: drm connector
3893 * @state: connector state object to release
3894 *
3895 * Default connector state destroy hook for drivers which don't have their own
3896 * subclassed connector state structure.
3897 */
drm_atomic_helper_connector_destroy_state(struct drm_connector * connector,struct drm_connector_state * state)3898 void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
3899 struct drm_connector_state *state)
3900 {
3901 __drm_atomic_helper_connector_destroy_state(state);
3902 kfree(state);
3903 }
3904 EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
3905
3906 /**
3907 * drm_atomic_helper_legacy_gamma_set - set the legacy gamma correction table
3908 * @crtc: CRTC object
3909 * @red: red correction table
3910 * @green: green correction table
3911 * @blue: green correction table
3912 * @size: size of the tables
3913 * @ctx: lock acquire context
3914 *
3915 * Implements support for legacy gamma correction table for drivers
3916 * that support color management through the DEGAMMA_LUT/GAMMA_LUT
3917 * properties. See drm_crtc_enable_color_mgmt() and the containing chapter for
3918 * how the atomic color management and gamma tables work.
3919 */
drm_atomic_helper_legacy_gamma_set(struct drm_crtc * crtc,u16 * red,u16 * green,u16 * blue,uint32_t size,struct drm_modeset_acquire_ctx * ctx)3920 int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
3921 u16 *red, u16 *green, u16 *blue,
3922 uint32_t size,
3923 struct drm_modeset_acquire_ctx *ctx)
3924 {
3925 struct drm_device *dev = crtc->dev;
3926 struct drm_atomic_state *state;
3927 struct drm_crtc_state *crtc_state;
3928 struct drm_property_blob *blob = NULL;
3929 struct drm_color_lut *blob_data;
3930 int i, ret = 0;
3931 bool replaced;
3932
3933 state = drm_atomic_state_alloc(crtc->dev);
3934 if (!state)
3935 return -ENOMEM;
3936
3937 blob = drm_property_create_blob(dev,
3938 sizeof(struct drm_color_lut) * size,
3939 NULL);
3940 if (IS_ERR(blob)) {
3941 ret = PTR_ERR(blob);
3942 blob = NULL;
3943 goto fail;
3944 }
3945
3946 /* Prepare GAMMA_LUT with the legacy values. */
3947 blob_data = blob->data;
3948 for (i = 0; i < size; i++) {
3949 blob_data[i].red = red[i];
3950 blob_data[i].green = green[i];
3951 blob_data[i].blue = blue[i];
3952 }
3953
3954 state->acquire_ctx = ctx;
3955 crtc_state = drm_atomic_get_crtc_state(state, crtc);
3956 if (IS_ERR(crtc_state)) {
3957 ret = PTR_ERR(crtc_state);
3958 goto fail;
3959 }
3960
3961 /* Reset DEGAMMA_LUT and CTM properties. */
3962 replaced = drm_property_replace_blob(&crtc_state->degamma_lut, NULL);
3963 replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL);
3964 replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, blob);
3965 crtc_state->color_mgmt_changed |= replaced;
3966
3967 ret = drm_atomic_commit(state);
3968
3969 fail:
3970 drm_atomic_state_put(state);
3971 drm_property_blob_put(blob);
3972 return ret;
3973 }
3974 EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);
3975
3976 /**
3977 * __drm_atomic_helper_private_duplicate_state - copy atomic private state
3978 * @obj: CRTC object
3979 * @state: new private object state
3980 *
3981 * Copies atomic state from a private objects's current state and resets inferred values.
3982 * This is useful for drivers that subclass the private state.
3983 */
__drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj * obj,struct drm_private_state * state)3984 void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj,
3985 struct drm_private_state *state)
3986 {
3987 memcpy(state, obj->state, sizeof(*state));
3988 }
3989 EXPORT_SYMBOL(__drm_atomic_helper_private_obj_duplicate_state);
3990