• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 Red Hat
3  * Copyright (C) 2014 Intel Corp.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  * Rob Clark <robdclark@gmail.com>
25  * Daniel Vetter <daniel.vetter@ffwll.ch>
26  */
27 
28 
29 #include <drm/drmP.h>
30 #include <drm/drm_atomic.h>
31 #include <drm/drm_mode.h>
32 #include <drm/drm_plane_helper.h>
33 #include <linux/sync_file.h>
34 
35 #include "drm_crtc_internal.h"
36 
crtc_commit_free(struct kref * kref)37 static void crtc_commit_free(struct kref *kref)
38 {
39 	struct drm_crtc_commit *commit =
40 		container_of(kref, struct drm_crtc_commit, ref);
41 
42 	kfree(commit);
43 }
44 
drm_crtc_commit_put(struct drm_crtc_commit * commit)45 void drm_crtc_commit_put(struct drm_crtc_commit *commit)
46 {
47 	kref_put(&commit->ref, crtc_commit_free);
48 }
49 EXPORT_SYMBOL(drm_crtc_commit_put);
50 
51 /**
52  * drm_atomic_state_default_release -
53  * release memory initialized by drm_atomic_state_init
54  * @state: atomic state
55  *
56  * Free all the memory allocated by drm_atomic_state_init.
57  * This is useful for drivers that subclass the atomic state.
58  */
drm_atomic_state_default_release(struct drm_atomic_state * state)59 void drm_atomic_state_default_release(struct drm_atomic_state *state)
60 {
61 	kfree(state->connectors);
62 	kfree(state->crtcs);
63 	kfree(state->planes);
64 }
65 EXPORT_SYMBOL(drm_atomic_state_default_release);
66 
67 /**
68  * drm_atomic_state_init - init new atomic state
69  * @dev: DRM device
70  * @state: atomic state
71  *
72  * Default implementation for filling in a new atomic state.
73  * This is useful for drivers that subclass the atomic state.
74  */
75 int
drm_atomic_state_init(struct drm_device * dev,struct drm_atomic_state * state)76 drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
77 {
78 	/* TODO legacy paths should maybe do a better job about
79 	 * setting this appropriately?
80 	 */
81 	state->allow_modeset = true;
82 
83 	state->crtcs = kcalloc(dev->mode_config.num_crtc,
84 			       sizeof(*state->crtcs), GFP_KERNEL);
85 	if (!state->crtcs)
86 		goto fail;
87 	state->planes = kcalloc(dev->mode_config.num_total_plane,
88 				sizeof(*state->planes), GFP_KERNEL);
89 	if (!state->planes)
90 		goto fail;
91 
92 	state->dev = dev;
93 
94 	DRM_DEBUG_ATOMIC("Allocated atomic state %p\n", state);
95 
96 	return 0;
97 fail:
98 	drm_atomic_state_default_release(state);
99 	return -ENOMEM;
100 }
101 EXPORT_SYMBOL(drm_atomic_state_init);
102 
103 /**
104  * drm_atomic_state_alloc - allocate atomic state
105  * @dev: DRM device
106  *
107  * This allocates an empty atomic state to track updates.
108  */
109 struct drm_atomic_state *
drm_atomic_state_alloc(struct drm_device * dev)110 drm_atomic_state_alloc(struct drm_device *dev)
111 {
112 	struct drm_mode_config *config = &dev->mode_config;
113 	struct drm_atomic_state *state;
114 
115 	if (!config->funcs->atomic_state_alloc) {
116 		state = kzalloc(sizeof(*state), GFP_KERNEL);
117 		if (!state)
118 			return NULL;
119 		if (drm_atomic_state_init(dev, state) < 0) {
120 			kfree(state);
121 			return NULL;
122 		}
123 		return state;
124 	}
125 
126 	return config->funcs->atomic_state_alloc(dev);
127 }
128 EXPORT_SYMBOL(drm_atomic_state_alloc);
129 
130 /**
131  * drm_atomic_state_default_clear - clear base atomic state
132  * @state: atomic state
133  *
134  * Default implementation for clearing atomic state.
135  * This is useful for drivers that subclass the atomic state.
136  */
drm_atomic_state_default_clear(struct drm_atomic_state * state)137 void drm_atomic_state_default_clear(struct drm_atomic_state *state)
138 {
139 	struct drm_device *dev = state->dev;
140 	struct drm_mode_config *config = &dev->mode_config;
141 	int i;
142 
143 	DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state);
144 
145 	for (i = 0; i < state->num_connector; i++) {
146 		struct drm_connector *connector = state->connectors[i].ptr;
147 
148 		if (!connector)
149 			continue;
150 
151 		connector->funcs->atomic_destroy_state(connector,
152 						       state->connectors[i].state);
153 		state->connectors[i].ptr = NULL;
154 		state->connectors[i].state = NULL;
155 		drm_connector_unreference(connector);
156 	}
157 
158 	for (i = 0; i < config->num_crtc; i++) {
159 		struct drm_crtc *crtc = state->crtcs[i].ptr;
160 
161 		if (!crtc)
162 			continue;
163 
164 		crtc->funcs->atomic_destroy_state(crtc,
165 						  state->crtcs[i].state);
166 
167 		if (state->crtcs[i].commit) {
168 			kfree(state->crtcs[i].commit->event);
169 			state->crtcs[i].commit->event = NULL;
170 			drm_crtc_commit_put(state->crtcs[i].commit);
171 		}
172 
173 		state->crtcs[i].commit = NULL;
174 		state->crtcs[i].ptr = NULL;
175 		state->crtcs[i].state = NULL;
176 	}
177 
178 	for (i = 0; i < config->num_total_plane; i++) {
179 		struct drm_plane *plane = state->planes[i].ptr;
180 
181 		if (!plane)
182 			continue;
183 
184 		plane->funcs->atomic_destroy_state(plane,
185 						   state->planes[i].state);
186 		state->planes[i].ptr = NULL;
187 		state->planes[i].state = NULL;
188 	}
189 }
190 EXPORT_SYMBOL(drm_atomic_state_default_clear);
191 
192 /**
193  * drm_atomic_state_clear - clear state object
194  * @state: atomic state
195  *
196  * When the w/w mutex algorithm detects a deadlock we need to back off and drop
197  * all locks. So someone else could sneak in and change the current modeset
198  * configuration. Which means that all the state assembled in @state is no
199  * longer an atomic update to the current state, but to some arbitrary earlier
200  * state. Which could break assumptions the driver's ->atomic_check likely
201  * relies on.
202  *
203  * Hence we must clear all cached state and completely start over, using this
204  * function.
205  */
drm_atomic_state_clear(struct drm_atomic_state * state)206 void drm_atomic_state_clear(struct drm_atomic_state *state)
207 {
208 	struct drm_device *dev = state->dev;
209 	struct drm_mode_config *config = &dev->mode_config;
210 
211 	if (config->funcs->atomic_state_clear)
212 		config->funcs->atomic_state_clear(state);
213 	else
214 		drm_atomic_state_default_clear(state);
215 }
216 EXPORT_SYMBOL(drm_atomic_state_clear);
217 
218 /**
219  * drm_atomic_state_free - free all memory for an atomic state
220  * @state: atomic state to deallocate
221  *
222  * This frees all memory associated with an atomic state, including all the
223  * per-object state for planes, crtcs and connectors.
224  */
drm_atomic_state_free(struct drm_atomic_state * state)225 void drm_atomic_state_free(struct drm_atomic_state *state)
226 {
227 	struct drm_device *dev;
228 	struct drm_mode_config *config;
229 
230 	if (!state)
231 		return;
232 
233 	dev = state->dev;
234 	config = &dev->mode_config;
235 
236 	drm_atomic_state_clear(state);
237 
238 	DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state);
239 
240 	if (config->funcs->atomic_state_free) {
241 		config->funcs->atomic_state_free(state);
242 	} else {
243 		drm_atomic_state_default_release(state);
244 		kfree(state);
245 	}
246 }
247 EXPORT_SYMBOL(drm_atomic_state_free);
248 
249 /**
250  * drm_atomic_get_crtc_state - get crtc state
251  * @state: global atomic state object
252  * @crtc: crtc to get state object for
253  *
254  * This function returns the crtc state for the given crtc, allocating it if
255  * needed. It will also grab the relevant crtc lock to make sure that the state
256  * is consistent.
257  *
258  * Returns:
259  *
260  * Either the allocated state or the error code encoded into the pointer. When
261  * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
262  * entire atomic sequence must be restarted. All other errors are fatal.
263  */
264 struct drm_crtc_state *
drm_atomic_get_crtc_state(struct drm_atomic_state * state,struct drm_crtc * crtc)265 drm_atomic_get_crtc_state(struct drm_atomic_state *state,
266 			  struct drm_crtc *crtc)
267 {
268 	int ret, index = drm_crtc_index(crtc);
269 	struct drm_crtc_state *crtc_state;
270 
271 	WARN_ON(!state->acquire_ctx);
272 
273 	crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
274 	if (crtc_state)
275 		return crtc_state;
276 
277 	ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
278 	if (ret)
279 		return ERR_PTR(ret);
280 
281 	crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
282 	if (!crtc_state)
283 		return ERR_PTR(-ENOMEM);
284 
285 	state->crtcs[index].state = crtc_state;
286 	state->crtcs[index].ptr = crtc;
287 	crtc_state->state = state;
288 
289 	DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n",
290 			 crtc->base.id, crtc->name, crtc_state, state);
291 
292 	return crtc_state;
293 }
294 EXPORT_SYMBOL(drm_atomic_get_crtc_state);
295 
set_out_fence_for_crtc(struct drm_atomic_state * state,struct drm_crtc * crtc,s32 __user * fence_ptr)296 static void set_out_fence_for_crtc(struct drm_atomic_state *state,
297 				   struct drm_crtc *crtc, s32 __user *fence_ptr)
298 {
299 	state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
300 }
301 
get_out_fence_for_crtc(struct drm_atomic_state * state,struct drm_crtc * crtc)302 static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
303 					  struct drm_crtc *crtc)
304 {
305 	s32 __user *fence_ptr;
306 
307 	fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
308 	state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
309 
310 	return fence_ptr;
311 }
312 
313 /**
314  * drm_atomic_set_mode_for_crtc - set mode for CRTC
315  * @state: the CRTC whose incoming state to update
316  * @mode: kernel-internal mode to use for the CRTC, or NULL to disable
317  *
318  * Set a mode (originating from the kernel) on the desired CRTC state. Does
319  * not change any other state properties, including enable, active, or
320  * mode_changed.
321  *
322  * RETURNS:
323  * Zero on success, error code on failure. Cannot return -EDEADLK.
324  */
drm_atomic_set_mode_for_crtc(struct drm_crtc_state * state,struct drm_display_mode * mode)325 int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
326 				 struct drm_display_mode *mode)
327 {
328 	struct drm_mode_modeinfo umode;
329 
330 	/* Early return for no change. */
331 	if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0)
332 		return 0;
333 
334 	drm_property_unreference_blob(state->mode_blob);
335 	state->mode_blob = NULL;
336 
337 	if (mode) {
338 		drm_mode_convert_to_umode(&umode, mode);
339 		state->mode_blob =
340 			drm_property_create_blob(state->crtc->dev,
341 		                                 sizeof(umode),
342 		                                 &umode);
343 		if (IS_ERR(state->mode_blob))
344 			return PTR_ERR(state->mode_blob);
345 
346 		drm_mode_copy(&state->mode, mode);
347 		state->enable = true;
348 		DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
349 				 mode->name, state);
350 	} else {
351 		memset(&state->mode, 0, sizeof(state->mode));
352 		state->enable = false;
353 		DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
354 				 state);
355 	}
356 
357 	return 0;
358 }
359 EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc);
360 
361 /**
362  * drm_atomic_set_mode_prop_for_crtc - set mode for CRTC
363  * @state: the CRTC whose incoming state to update
364  * @blob: pointer to blob property to use for mode
365  *
366  * Set a mode (originating from a blob property) on the desired CRTC state.
367  * This function will take a reference on the blob property for the CRTC state,
368  * and release the reference held on the state's existing mode property, if any
369  * was set.
370  *
371  * RETURNS:
372  * Zero on success, error code on failure. Cannot return -EDEADLK.
373  */
drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state * state,struct drm_property_blob * blob)374 int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
375                                       struct drm_property_blob *blob)
376 {
377 	if (blob == state->mode_blob)
378 		return 0;
379 
380 	drm_property_unreference_blob(state->mode_blob);
381 	state->mode_blob = NULL;
382 
383 	memset(&state->mode, 0, sizeof(state->mode));
384 
385 	if (blob) {
386 		if (blob->length != sizeof(struct drm_mode_modeinfo) ||
387 		    drm_mode_convert_umode(&state->mode,
388 		                           (const struct drm_mode_modeinfo *)
389 		                            blob->data))
390 			return -EINVAL;
391 
392 		state->mode_blob = drm_property_reference_blob(blob);
393 		state->enable = true;
394 		DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
395 				 state->mode.name, state);
396 	} else {
397 		state->enable = false;
398 		DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
399 				 state);
400 	}
401 
402 	return 0;
403 }
404 EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
405 
406 /**
407  * drm_atomic_replace_property_blob - replace a blob property
408  * @blob: a pointer to the member blob to be replaced
409  * @new_blob: the new blob to replace with
410  * @replaced: whether the blob has been replaced
411  *
412  * RETURNS:
413  * Zero on success, error code on failure
414  */
415 static void
drm_atomic_replace_property_blob(struct drm_property_blob ** blob,struct drm_property_blob * new_blob,bool * replaced)416 drm_atomic_replace_property_blob(struct drm_property_blob **blob,
417 				 struct drm_property_blob *new_blob,
418 				 bool *replaced)
419 {
420 	struct drm_property_blob *old_blob = *blob;
421 
422 	if (old_blob == new_blob)
423 		return;
424 
425 	drm_property_unreference_blob(old_blob);
426 	if (new_blob)
427 		drm_property_reference_blob(new_blob);
428 	*blob = new_blob;
429 	*replaced = true;
430 
431 	return;
432 }
433 
434 static int
drm_atomic_replace_property_blob_from_id(struct drm_crtc * crtc,struct drm_property_blob ** blob,uint64_t blob_id,ssize_t expected_size,bool * replaced)435 drm_atomic_replace_property_blob_from_id(struct drm_crtc *crtc,
436 					 struct drm_property_blob **blob,
437 					 uint64_t blob_id,
438 					 ssize_t expected_size,
439 					 bool *replaced)
440 {
441 	struct drm_property_blob *new_blob = NULL;
442 
443 	if (blob_id != 0) {
444 		new_blob = drm_property_lookup_blob(crtc->dev, blob_id);
445 		if (new_blob == NULL)
446 			return -EINVAL;
447 
448 		if (expected_size > 0 && expected_size != new_blob->length) {
449 			drm_property_unreference_blob(new_blob);
450 			return -EINVAL;
451 		}
452 	}
453 
454 	drm_atomic_replace_property_blob(blob, new_blob, replaced);
455 	drm_property_unreference_blob(new_blob);
456 
457 	return 0;
458 }
459 
460 /**
461  * drm_atomic_crtc_set_property - set property on CRTC
462  * @crtc: the drm CRTC to set a property on
463  * @state: the state object to update with the new property value
464  * @property: the property to set
465  * @val: the new property value
466  *
467  * Use this instead of calling crtc->atomic_set_property directly.
468  * This function handles generic/core properties and calls out to
469  * driver's ->atomic_set_property() for driver properties.  To ensure
470  * consistent behavior you must call this function rather than the
471  * driver hook directly.
472  *
473  * RETURNS:
474  * Zero on success, error code on failure
475  */
drm_atomic_crtc_set_property(struct drm_crtc * crtc,struct drm_crtc_state * state,struct drm_property * property,uint64_t val)476 int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
477 		struct drm_crtc_state *state, struct drm_property *property,
478 		uint64_t val)
479 {
480 	struct drm_device *dev = crtc->dev;
481 	struct drm_mode_config *config = &dev->mode_config;
482 	bool replaced = false;
483 	int ret;
484 
485 	if (property == config->prop_active)
486 		state->active = val;
487 	else if (property == config->prop_mode_id) {
488 		struct drm_property_blob *mode =
489 			drm_property_lookup_blob(dev, val);
490 		ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
491 		drm_property_unreference_blob(mode);
492 		return ret;
493 	} else if (property == config->degamma_lut_property) {
494 		ret = drm_atomic_replace_property_blob_from_id(crtc,
495 					&state->degamma_lut,
496 					val,
497 					-1,
498 					&replaced);
499 		state->color_mgmt_changed |= replaced;
500 		return ret;
501 	} else if (property == config->ctm_property) {
502 		ret = drm_atomic_replace_property_blob_from_id(crtc,
503 					&state->ctm,
504 					val,
505 					sizeof(struct drm_color_ctm),
506 					&replaced);
507 		state->color_mgmt_changed |= replaced;
508 		return ret;
509 	} else if (property == config->gamma_lut_property) {
510 		ret = drm_atomic_replace_property_blob_from_id(crtc,
511 					&state->gamma_lut,
512 					val,
513 					-1,
514 					&replaced);
515 		state->color_mgmt_changed |= replaced;
516 		return ret;
517 	} else if (property == config->prop_out_fence_ptr) {
518 		s32 __user *fence_ptr = u64_to_user_ptr(val);
519 
520 		if (!fence_ptr)
521 			return 0;
522 
523 		if (put_user(-1, fence_ptr))
524 			return -EFAULT;
525 
526 		set_out_fence_for_crtc(state->state, crtc, fence_ptr);
527 	} else if (crtc->funcs->atomic_set_property)
528 		return crtc->funcs->atomic_set_property(crtc, state, property, val);
529 	else
530 		return -EINVAL;
531 
532 	return 0;
533 }
534 EXPORT_SYMBOL(drm_atomic_crtc_set_property);
535 
536 /**
537  * drm_atomic_crtc_get_property - get property value from CRTC state
538  * @crtc: the drm CRTC to set a property on
539  * @state: the state object to get the property value from
540  * @property: the property to set
541  * @val: return location for the property value
542  *
543  * This function handles generic/core properties and calls out to
544  * driver's ->atomic_get_property() for driver properties.  To ensure
545  * consistent behavior you must call this function rather than the
546  * driver hook directly.
547  *
548  * RETURNS:
549  * Zero on success, error code on failure
550  */
551 static int
drm_atomic_crtc_get_property(struct drm_crtc * crtc,const struct drm_crtc_state * state,struct drm_property * property,uint64_t * val)552 drm_atomic_crtc_get_property(struct drm_crtc *crtc,
553 		const struct drm_crtc_state *state,
554 		struct drm_property *property, uint64_t *val)
555 {
556 	struct drm_device *dev = crtc->dev;
557 	struct drm_mode_config *config = &dev->mode_config;
558 
559 	if (property == config->prop_active)
560 		*val = state->active;
561 	else if (property == config->prop_mode_id)
562 		*val = (state->mode_blob) ? state->mode_blob->base.id : 0;
563 	else if (property == config->degamma_lut_property)
564 		*val = (state->degamma_lut) ? state->degamma_lut->base.id : 0;
565 	else if (property == config->ctm_property)
566 		*val = (state->ctm) ? state->ctm->base.id : 0;
567 	else if (property == config->gamma_lut_property)
568 		*val = (state->gamma_lut) ? state->gamma_lut->base.id : 0;
569 	else if (property == config->prop_out_fence_ptr)
570 		*val = 0;
571 	else if (crtc->funcs->atomic_get_property)
572 		return crtc->funcs->atomic_get_property(crtc, state, property, val);
573 	else
574 		return -EINVAL;
575 
576 	return 0;
577 }
578 
579 /**
580  * drm_atomic_crtc_check - check crtc state
581  * @crtc: crtc to check
582  * @state: crtc state to check
583  *
584  * Provides core sanity checks for crtc state.
585  *
586  * RETURNS:
587  * Zero on success, error code on failure
588  */
drm_atomic_crtc_check(struct drm_crtc * crtc,struct drm_crtc_state * state)589 static int drm_atomic_crtc_check(struct drm_crtc *crtc,
590 		struct drm_crtc_state *state)
591 {
592 	/* NOTE: we explicitly don't enforce constraints such as primary
593 	 * layer covering entire screen, since that is something we want
594 	 * to allow (on hw that supports it).  For hw that does not, it
595 	 * should be checked in driver's crtc->atomic_check() vfunc.
596 	 *
597 	 * TODO: Add generic modeset state checks once we support those.
598 	 */
599 
600 	if (state->active && !state->enable) {
601 		DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n",
602 				 crtc->base.id, crtc->name);
603 		return -EINVAL;
604 	}
605 
606 	/* The state->enable vs. state->mode_blob checks can be WARN_ON,
607 	 * as this is a kernel-internal detail that userspace should never
608 	 * be able to trigger. */
609 	if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
610 	    WARN_ON(state->enable && !state->mode_blob)) {
611 		DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n",
612 				 crtc->base.id, crtc->name);
613 		return -EINVAL;
614 	}
615 
616 	if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
617 	    WARN_ON(!state->enable && state->mode_blob)) {
618 		DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n",
619 				 crtc->base.id, crtc->name);
620 		return -EINVAL;
621 	}
622 
623 	/*
624 	 * Reject event generation for when a CRTC is off and stays off.
625 	 * It wouldn't be hard to implement this, but userspace has a track
626 	 * record of happily burning through 100% cpu (or worse, crash) when the
627 	 * display pipe is suspended. To avoid all that fun just reject updates
628 	 * that ask for events since likely that indicates a bug in the
629 	 * compositor's drawing loop. This is consistent with the vblank IOCTL
630 	 * and legacy page_flip IOCTL which also reject service on a disabled
631 	 * pipe.
632 	 */
633 	if (state->event && !state->active && !crtc->state->active) {
634 		DRM_DEBUG_ATOMIC("[CRTC:%d] requesting event but off\n",
635 				 crtc->base.id);
636 		return -EINVAL;
637 	}
638 
639 	return 0;
640 }
641 
642 /**
643  * drm_atomic_get_plane_state - get plane state
644  * @state: global atomic state object
645  * @plane: plane to get state object for
646  *
647  * This function returns the plane state for the given plane, allocating it if
648  * needed. It will also grab the relevant plane lock to make sure that the state
649  * is consistent.
650  *
651  * Returns:
652  *
653  * Either the allocated state or the error code encoded into the pointer. When
654  * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
655  * entire atomic sequence must be restarted. All other errors are fatal.
656  */
657 struct drm_plane_state *
drm_atomic_get_plane_state(struct drm_atomic_state * state,struct drm_plane * plane)658 drm_atomic_get_plane_state(struct drm_atomic_state *state,
659 			  struct drm_plane *plane)
660 {
661 	int ret, index = drm_plane_index(plane);
662 	struct drm_plane_state *plane_state;
663 
664 	WARN_ON(!state->acquire_ctx);
665 
666 	plane_state = drm_atomic_get_existing_plane_state(state, plane);
667 	if (plane_state)
668 		return plane_state;
669 
670 	ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
671 	if (ret)
672 		return ERR_PTR(ret);
673 
674 	plane_state = plane->funcs->atomic_duplicate_state(plane);
675 	if (!plane_state)
676 		return ERR_PTR(-ENOMEM);
677 
678 	state->planes[index].state = plane_state;
679 	state->planes[index].ptr = plane;
680 	plane_state->state = state;
681 
682 	DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n",
683 			 plane->base.id, plane->name, plane_state, state);
684 
685 	if (plane_state->crtc) {
686 		struct drm_crtc_state *crtc_state;
687 
688 		crtc_state = drm_atomic_get_crtc_state(state,
689 						       plane_state->crtc);
690 		if (IS_ERR(crtc_state))
691 			return ERR_CAST(crtc_state);
692 	}
693 
694 	return plane_state;
695 }
696 EXPORT_SYMBOL(drm_atomic_get_plane_state);
697 
698 /**
699  * drm_atomic_plane_set_property - set property on plane
700  * @plane: the drm plane to set a property on
701  * @state: the state object to update with the new property value
702  * @property: the property to set
703  * @val: the new property value
704  *
705  * Use this instead of calling plane->atomic_set_property directly.
706  * This function handles generic/core properties and calls out to
707  * driver's ->atomic_set_property() for driver properties.  To ensure
708  * consistent behavior you must call this function rather than the
709  * driver hook directly.
710  *
711  * RETURNS:
712  * Zero on success, error code on failure
713  */
drm_atomic_plane_set_property(struct drm_plane * plane,struct drm_plane_state * state,struct drm_property * property,uint64_t val)714 int drm_atomic_plane_set_property(struct drm_plane *plane,
715 		struct drm_plane_state *state, struct drm_property *property,
716 		uint64_t val)
717 {
718 	struct drm_device *dev = plane->dev;
719 	struct drm_mode_config *config = &dev->mode_config;
720 
721 	if (property == config->prop_fb_id) {
722 		struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, val);
723 		drm_atomic_set_fb_for_plane(state, fb);
724 		if (fb)
725 			drm_framebuffer_unreference(fb);
726 	} else if (property == config->prop_in_fence_fd) {
727 		if (state->fence)
728 			return -EINVAL;
729 
730 		if (U642I64(val) == -1)
731 			return 0;
732 
733 		state->fence = sync_file_get_fence(val);
734 		if (!state->fence)
735 			return -EINVAL;
736 
737 	} else if (property == config->prop_crtc_id) {
738 		struct drm_crtc *crtc = drm_crtc_find(dev, val);
739 		return drm_atomic_set_crtc_for_plane(state, crtc);
740 	} else if (property == config->prop_crtc_x) {
741 		state->crtc_x = U642I64(val);
742 	} else if (property == config->prop_crtc_y) {
743 		state->crtc_y = U642I64(val);
744 	} else if (property == config->prop_crtc_w) {
745 		state->crtc_w = val;
746 	} else if (property == config->prop_crtc_h) {
747 		state->crtc_h = val;
748 	} else if (property == config->prop_src_x) {
749 		state->src_x = val;
750 	} else if (property == config->prop_src_y) {
751 		state->src_y = val;
752 	} else if (property == config->prop_src_w) {
753 		state->src_w = val;
754 	} else if (property == config->prop_src_h) {
755 		state->src_h = val;
756 	} else if (property == config->rotation_property) {
757 		state->rotation = val;
758 	} else if (property == plane->zpos_property) {
759 		state->zpos = val;
760 	} else if (plane->funcs->atomic_set_property) {
761 		return plane->funcs->atomic_set_property(plane, state,
762 				property, val);
763 	} else {
764 		return -EINVAL;
765 	}
766 
767 	return 0;
768 }
769 EXPORT_SYMBOL(drm_atomic_plane_set_property);
770 
771 /**
772  * drm_atomic_plane_get_property - get property value from plane state
773  * @plane: the drm plane to set a property on
774  * @state: the state object to get the property value from
775  * @property: the property to set
776  * @val: return location for the property value
777  *
778  * This function handles generic/core properties and calls out to
779  * driver's ->atomic_get_property() for driver properties.  To ensure
780  * consistent behavior you must call this function rather than the
781  * driver hook directly.
782  *
783  * RETURNS:
784  * Zero on success, error code on failure
785  */
786 static int
drm_atomic_plane_get_property(struct drm_plane * plane,const struct drm_plane_state * state,struct drm_property * property,uint64_t * val)787 drm_atomic_plane_get_property(struct drm_plane *plane,
788 		const struct drm_plane_state *state,
789 		struct drm_property *property, uint64_t *val)
790 {
791 	struct drm_device *dev = plane->dev;
792 	struct drm_mode_config *config = &dev->mode_config;
793 
794 	if (property == config->prop_fb_id) {
795 		*val = (state->fb) ? state->fb->base.id : 0;
796 	} else if (property == config->prop_in_fence_fd) {
797 		*val = -1;
798 	} else if (property == config->prop_crtc_id) {
799 		*val = (state->crtc) ? state->crtc->base.id : 0;
800 	} else if (property == config->prop_crtc_x) {
801 		*val = I642U64(state->crtc_x);
802 	} else if (property == config->prop_crtc_y) {
803 		*val = I642U64(state->crtc_y);
804 	} else if (property == config->prop_crtc_w) {
805 		*val = state->crtc_w;
806 	} else if (property == config->prop_crtc_h) {
807 		*val = state->crtc_h;
808 	} else if (property == config->prop_src_x) {
809 		*val = state->src_x;
810 	} else if (property == config->prop_src_y) {
811 		*val = state->src_y;
812 	} else if (property == config->prop_src_w) {
813 		*val = state->src_w;
814 	} else if (property == config->prop_src_h) {
815 		*val = state->src_h;
816 	} else if (property == config->rotation_property) {
817 		*val = state->rotation;
818 	} else if (property == plane->zpos_property) {
819 		*val = state->zpos;
820 	} else if (plane->funcs->atomic_get_property) {
821 		return plane->funcs->atomic_get_property(plane, state, property, val);
822 	} else {
823 		return -EINVAL;
824 	}
825 
826 	return 0;
827 }
828 
829 static bool
plane_switching_crtc(struct drm_atomic_state * state,struct drm_plane * plane,struct drm_plane_state * plane_state)830 plane_switching_crtc(struct drm_atomic_state *state,
831 		     struct drm_plane *plane,
832 		     struct drm_plane_state *plane_state)
833 {
834 	if (!plane->state->crtc || !plane_state->crtc)
835 		return false;
836 
837 	if (plane->state->crtc == plane_state->crtc)
838 		return false;
839 
840 	/* This could be refined, but currently there's no helper or driver code
841 	 * to implement direct switching of active planes nor userspace to take
842 	 * advantage of more direct plane switching without the intermediate
843 	 * full OFF state.
844 	 */
845 	return true;
846 }
847 
848 /**
849  * drm_atomic_plane_check - check plane state
850  * @plane: plane to check
851  * @state: plane state to check
852  *
853  * Provides core sanity checks for plane state.
854  *
855  * RETURNS:
856  * Zero on success, error code on failure
857  */
drm_atomic_plane_check(struct drm_plane * plane,struct drm_plane_state * state)858 static int drm_atomic_plane_check(struct drm_plane *plane,
859 		struct drm_plane_state *state)
860 {
861 	unsigned int fb_width, fb_height;
862 	int ret;
863 
864 	/* either *both* CRTC and FB must be set, or neither */
865 	if (WARN_ON(state->crtc && !state->fb)) {
866 		DRM_DEBUG_ATOMIC("CRTC set but no FB\n");
867 		return -EINVAL;
868 	} else if (WARN_ON(state->fb && !state->crtc)) {
869 		DRM_DEBUG_ATOMIC("FB set but no CRTC\n");
870 		return -EINVAL;
871 	}
872 
873 	/* if disabled, we don't care about the rest of the state: */
874 	if (!state->crtc)
875 		return 0;
876 
877 	/* Check whether this plane is usable on this CRTC */
878 	if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) {
879 		DRM_DEBUG_ATOMIC("Invalid crtc for plane\n");
880 		return -EINVAL;
881 	}
882 
883 	/* Check whether this plane supports the fb pixel format. */
884 	ret = drm_plane_check_pixel_format(plane, state->fb->pixel_format);
885 	if (ret) {
886 		char *format_name = drm_get_format_name(state->fb->pixel_format);
887 		DRM_DEBUG_ATOMIC("Invalid pixel format %s\n", format_name);
888 		kfree(format_name);
889 		return ret;
890 	}
891 
892 	/* Give drivers some help against integer overflows */
893 	if (state->crtc_w > INT_MAX ||
894 	    state->crtc_x > INT_MAX - (int32_t) state->crtc_w ||
895 	    state->crtc_h > INT_MAX ||
896 	    state->crtc_y > INT_MAX - (int32_t) state->crtc_h) {
897 		DRM_DEBUG_ATOMIC("Invalid CRTC coordinates %ux%u+%d+%d\n",
898 				 state->crtc_w, state->crtc_h,
899 				 state->crtc_x, state->crtc_y);
900 		return -ERANGE;
901 	}
902 
903 	fb_width = state->fb->width << 16;
904 	fb_height = state->fb->height << 16;
905 
906 	/* Make sure source coordinates are inside the fb. */
907 	if (state->src_w > fb_width ||
908 	    state->src_x > fb_width - state->src_w ||
909 	    state->src_h > fb_height ||
910 	    state->src_y > fb_height - state->src_h) {
911 		DRM_DEBUG_ATOMIC("Invalid source coordinates "
912 				 "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
913 				 state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
914 				 state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
915 				 state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
916 				 state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10);
917 		return -ENOSPC;
918 	}
919 
920 	if (plane_switching_crtc(state->state, plane, state)) {
921 		DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n",
922 				 plane->base.id, plane->name);
923 		return -EINVAL;
924 	}
925 
926 	return 0;
927 }
928 
929 /**
930  * drm_atomic_get_connector_state - get connector state
931  * @state: global atomic state object
932  * @connector: connector to get state object for
933  *
934  * This function returns the connector state for the given connector,
935  * allocating it if needed. It will also grab the relevant connector lock to
936  * make sure that the state is consistent.
937  *
938  * Returns:
939  *
940  * Either the allocated state or the error code encoded into the pointer. When
941  * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
942  * entire atomic sequence must be restarted. All other errors are fatal.
943  */
944 struct drm_connector_state *
drm_atomic_get_connector_state(struct drm_atomic_state * state,struct drm_connector * connector)945 drm_atomic_get_connector_state(struct drm_atomic_state *state,
946 			  struct drm_connector *connector)
947 {
948 	int ret, index;
949 	struct drm_mode_config *config = &connector->dev->mode_config;
950 	struct drm_connector_state *connector_state;
951 
952 	WARN_ON(!state->acquire_ctx);
953 
954 	ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
955 	if (ret)
956 		return ERR_PTR(ret);
957 
958 	index = drm_connector_index(connector);
959 
960 	if (index >= state->num_connector) {
961 		struct __drm_connnectors_state *c;
962 		int alloc = max(index + 1, config->num_connector);
963 
964 		c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL);
965 		if (!c)
966 			return ERR_PTR(-ENOMEM);
967 
968 		state->connectors = c;
969 		memset(&state->connectors[state->num_connector], 0,
970 		       sizeof(*state->connectors) * (alloc - state->num_connector));
971 
972 		state->num_connector = alloc;
973 	}
974 
975 	if (state->connectors[index].state)
976 		return state->connectors[index].state;
977 
978 	connector_state = connector->funcs->atomic_duplicate_state(connector);
979 	if (!connector_state)
980 		return ERR_PTR(-ENOMEM);
981 
982 	drm_connector_reference(connector);
983 	state->connectors[index].state = connector_state;
984 	state->connectors[index].ptr = connector;
985 	connector_state->state = state;
986 
987 	DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d] %p state to %p\n",
988 			 connector->base.id, connector_state, state);
989 
990 	if (connector_state->crtc) {
991 		struct drm_crtc_state *crtc_state;
992 
993 		crtc_state = drm_atomic_get_crtc_state(state,
994 						       connector_state->crtc);
995 		if (IS_ERR(crtc_state))
996 			return ERR_CAST(crtc_state);
997 	}
998 
999 	return connector_state;
1000 }
1001 EXPORT_SYMBOL(drm_atomic_get_connector_state);
1002 
1003 /**
1004  * drm_atomic_connector_set_property - set property on connector.
1005  * @connector: the drm connector to set a property on
1006  * @state: the state object to update with the new property value
1007  * @property: the property to set
1008  * @val: the new property value
1009  *
1010  * Use this instead of calling connector->atomic_set_property directly.
1011  * This function handles generic/core properties and calls out to
1012  * driver's ->atomic_set_property() for driver properties.  To ensure
1013  * consistent behavior you must call this function rather than the
1014  * driver hook directly.
1015  *
1016  * RETURNS:
1017  * Zero on success, error code on failure
1018  */
drm_atomic_connector_set_property(struct drm_connector * connector,struct drm_connector_state * state,struct drm_property * property,uint64_t val)1019 int drm_atomic_connector_set_property(struct drm_connector *connector,
1020 		struct drm_connector_state *state, struct drm_property *property,
1021 		uint64_t val)
1022 {
1023 	struct drm_device *dev = connector->dev;
1024 	struct drm_mode_config *config = &dev->mode_config;
1025 
1026 	if (property == config->prop_crtc_id) {
1027 		struct drm_crtc *crtc = drm_crtc_find(dev, val);
1028 		return drm_atomic_set_crtc_for_connector(state, crtc);
1029 	} else if (property == config->dpms_property) {
1030 		/* setting DPMS property requires special handling, which
1031 		 * is done in legacy setprop path for us.  Disallow (for
1032 		 * now?) atomic writes to DPMS property:
1033 		 */
1034 		return -EINVAL;
1035 	} else if (connector->funcs->atomic_set_property) {
1036 		return connector->funcs->atomic_set_property(connector,
1037 				state, property, val);
1038 	} else {
1039 		return -EINVAL;
1040 	}
1041 }
1042 EXPORT_SYMBOL(drm_atomic_connector_set_property);
1043 
1044 /**
1045  * drm_atomic_connector_get_property - get property value from connector state
1046  * @connector: the drm connector to set a property on
1047  * @state: the state object to get the property value from
1048  * @property: the property to set
1049  * @val: return location for the property value
1050  *
1051  * This function handles generic/core properties and calls out to
1052  * driver's ->atomic_get_property() for driver properties.  To ensure
1053  * consistent behavior you must call this function rather than the
1054  * driver hook directly.
1055  *
1056  * RETURNS:
1057  * Zero on success, error code on failure
1058  */
1059 static int
drm_atomic_connector_get_property(struct drm_connector * connector,const struct drm_connector_state * state,struct drm_property * property,uint64_t * val)1060 drm_atomic_connector_get_property(struct drm_connector *connector,
1061 		const struct drm_connector_state *state,
1062 		struct drm_property *property, uint64_t *val)
1063 {
1064 	struct drm_device *dev = connector->dev;
1065 	struct drm_mode_config *config = &dev->mode_config;
1066 
1067 	if (property == config->prop_crtc_id) {
1068 		*val = (state->crtc) ? state->crtc->base.id : 0;
1069 	} else if (property == config->dpms_property) {
1070 		*val = connector->dpms;
1071 	} else if (connector->funcs->atomic_get_property) {
1072 		return connector->funcs->atomic_get_property(connector,
1073 				state, property, val);
1074 	} else {
1075 		return -EINVAL;
1076 	}
1077 
1078 	return 0;
1079 }
1080 
drm_atomic_get_property(struct drm_mode_object * obj,struct drm_property * property,uint64_t * val)1081 int drm_atomic_get_property(struct drm_mode_object *obj,
1082 		struct drm_property *property, uint64_t *val)
1083 {
1084 	struct drm_device *dev = property->dev;
1085 	int ret;
1086 
1087 	switch (obj->type) {
1088 	case DRM_MODE_OBJECT_CONNECTOR: {
1089 		struct drm_connector *connector = obj_to_connector(obj);
1090 		WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
1091 		ret = drm_atomic_connector_get_property(connector,
1092 				connector->state, property, val);
1093 		break;
1094 	}
1095 	case DRM_MODE_OBJECT_CRTC: {
1096 		struct drm_crtc *crtc = obj_to_crtc(obj);
1097 		WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
1098 		ret = drm_atomic_crtc_get_property(crtc,
1099 				crtc->state, property, val);
1100 		break;
1101 	}
1102 	case DRM_MODE_OBJECT_PLANE: {
1103 		struct drm_plane *plane = obj_to_plane(obj);
1104 		WARN_ON(!drm_modeset_is_locked(&plane->mutex));
1105 		ret = drm_atomic_plane_get_property(plane,
1106 				plane->state, property, val);
1107 		break;
1108 	}
1109 	default:
1110 		ret = -EINVAL;
1111 		break;
1112 	}
1113 
1114 	return ret;
1115 }
1116 
1117 /**
1118  * drm_atomic_set_crtc_for_plane - set crtc for plane
1119  * @plane_state: the plane whose incoming state to update
1120  * @crtc: crtc to use for the plane
1121  *
1122  * Changing the assigned crtc for a plane requires us to grab the lock and state
1123  * for the new crtc, as needed. This function takes care of all these details
1124  * besides updating the pointer in the state object itself.
1125  *
1126  * Returns:
1127  * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
1128  * then the w/w mutex code has detected a deadlock and the entire atomic
1129  * sequence must be restarted. All other errors are fatal.
1130  */
1131 int
drm_atomic_set_crtc_for_plane(struct drm_plane_state * plane_state,struct drm_crtc * crtc)1132 drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
1133 			      struct drm_crtc *crtc)
1134 {
1135 	struct drm_plane *plane = plane_state->plane;
1136 	struct drm_crtc_state *crtc_state;
1137 
1138 	if (plane_state->crtc) {
1139 		crtc_state = drm_atomic_get_crtc_state(plane_state->state,
1140 						       plane_state->crtc);
1141 		if (WARN_ON(IS_ERR(crtc_state)))
1142 			return PTR_ERR(crtc_state);
1143 
1144 		crtc_state->plane_mask &= ~(1 << drm_plane_index(plane));
1145 	}
1146 
1147 	plane_state->crtc = crtc;
1148 
1149 	if (crtc) {
1150 		crtc_state = drm_atomic_get_crtc_state(plane_state->state,
1151 						       crtc);
1152 		if (IS_ERR(crtc_state))
1153 			return PTR_ERR(crtc_state);
1154 		crtc_state->plane_mask |= (1 << drm_plane_index(plane));
1155 	}
1156 
1157 	if (crtc)
1158 		DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d:%s]\n",
1159 				 plane_state, crtc->base.id, crtc->name);
1160 	else
1161 		DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n",
1162 				 plane_state);
1163 
1164 	return 0;
1165 }
1166 EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane);
1167 
1168 /**
1169  * drm_atomic_set_fb_for_plane - set framebuffer for plane
1170  * @plane_state: atomic state object for the plane
1171  * @fb: fb to use for the plane
1172  *
1173  * Changing the assigned framebuffer for a plane requires us to grab a reference
1174  * to the new fb and drop the reference to the old fb, if there is one. This
1175  * function takes care of all these details besides updating the pointer in the
1176  * state object itself.
1177  */
1178 void
drm_atomic_set_fb_for_plane(struct drm_plane_state * plane_state,struct drm_framebuffer * fb)1179 drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
1180 			    struct drm_framebuffer *fb)
1181 {
1182 	if (plane_state->fb)
1183 		drm_framebuffer_unreference(plane_state->fb);
1184 	if (fb)
1185 		drm_framebuffer_reference(fb);
1186 	plane_state->fb = fb;
1187 
1188 	if (fb)
1189 		DRM_DEBUG_ATOMIC("Set [FB:%d] for plane state %p\n",
1190 				 fb->base.id, plane_state);
1191 	else
1192 		DRM_DEBUG_ATOMIC("Set [NOFB] for plane state %p\n",
1193 				 plane_state);
1194 }
1195 EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
1196 
1197 /**
1198  * drm_atomic_set_fence_for_plane - set fence for plane
1199  * @plane_state: atomic state object for the plane
1200  * @fence: fence to use for the plane
1201  *
1202  * Helper to setup the plane_state fence in case it is not set yet.
1203  * By using this drivers doesn't need to worry if the user choose
1204  * implicit or explicit fencing.
1205  *
1206  * This function will not set the fence to the state if it was set
1207  * via explicit fencing interfaces on the atomic ioctl. It will
1208  * all drope the reference to the fence as we not storing it
1209  * anywhere.
1210  *
1211  * Otherwise, if plane_state->fence is not set this function we
1212  * just set it with the received implict fence.
1213  */
1214 void
drm_atomic_set_fence_for_plane(struct drm_plane_state * plane_state,struct fence * fence)1215 drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
1216 			       struct fence *fence)
1217 {
1218 	if (plane_state->fence) {
1219 		fence_put(fence);
1220 		return;
1221 	}
1222 
1223 	plane_state->fence = fence;
1224 }
1225 EXPORT_SYMBOL(drm_atomic_set_fence_for_plane);
1226 
1227 /**
1228  * drm_atomic_set_crtc_for_connector - set crtc for connector
1229  * @conn_state: atomic state object for the connector
1230  * @crtc: crtc to use for the connector
1231  *
1232  * Changing the assigned crtc for a connector requires us to grab the lock and
1233  * state for the new crtc, as needed. This function takes care of all these
1234  * details besides updating the pointer in the state object itself.
1235  *
1236  * Returns:
1237  * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
1238  * then the w/w mutex code has detected a deadlock and the entire atomic
1239  * sequence must be restarted. All other errors are fatal.
1240  */
1241 int
drm_atomic_set_crtc_for_connector(struct drm_connector_state * conn_state,struct drm_crtc * crtc)1242 drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
1243 				  struct drm_crtc *crtc)
1244 {
1245 	struct drm_crtc_state *crtc_state;
1246 
1247 	if (conn_state->crtc == crtc)
1248 		return 0;
1249 
1250 	if (conn_state->crtc) {
1251 		crtc_state = drm_atomic_get_existing_crtc_state(conn_state->state,
1252 								conn_state->crtc);
1253 
1254 		crtc_state->connector_mask &=
1255 			~(1 << drm_connector_index(conn_state->connector));
1256 
1257 		drm_connector_unreference(conn_state->connector);
1258 		conn_state->crtc = NULL;
1259 	}
1260 
1261 	if (crtc) {
1262 		crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc);
1263 		if (IS_ERR(crtc_state))
1264 			return PTR_ERR(crtc_state);
1265 
1266 		crtc_state->connector_mask |=
1267 			1 << drm_connector_index(conn_state->connector);
1268 
1269 		drm_connector_reference(conn_state->connector);
1270 		conn_state->crtc = crtc;
1271 
1272 		DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d:%s]\n",
1273 				 conn_state, crtc->base.id, crtc->name);
1274 	} else {
1275 		DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n",
1276 				 conn_state);
1277 	}
1278 
1279 	return 0;
1280 }
1281 EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector);
1282 
1283 /**
1284  * drm_atomic_add_affected_connectors - add connectors for crtc
1285  * @state: atomic state
1286  * @crtc: DRM crtc
1287  *
1288  * This function walks the current configuration and adds all connectors
1289  * currently using @crtc to the atomic configuration @state. Note that this
1290  * function must acquire the connection mutex. This can potentially cause
1291  * unneeded seralization if the update is just for the planes on one crtc. Hence
1292  * drivers and helpers should only call this when really needed (e.g. when a
1293  * full modeset needs to happen due to some change).
1294  *
1295  * Returns:
1296  * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
1297  * then the w/w mutex code has detected a deadlock and the entire atomic
1298  * sequence must be restarted. All other errors are fatal.
1299  */
1300 int
drm_atomic_add_affected_connectors(struct drm_atomic_state * state,struct drm_crtc * crtc)1301 drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
1302 				   struct drm_crtc *crtc)
1303 {
1304 	struct drm_mode_config *config = &state->dev->mode_config;
1305 	struct drm_connector *connector;
1306 	struct drm_connector_state *conn_state;
1307 	int ret;
1308 
1309 	ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
1310 	if (ret)
1311 		return ret;
1312 
1313 	DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n",
1314 			 crtc->base.id, crtc->name, state);
1315 
1316 	/*
1317 	 * Changed connectors are already in @state, so only need to look at the
1318 	 * current configuration.
1319 	 */
1320 	drm_for_each_connector(connector, state->dev) {
1321 		if (connector->state->crtc != crtc)
1322 			continue;
1323 
1324 		conn_state = drm_atomic_get_connector_state(state, connector);
1325 		if (IS_ERR(conn_state))
1326 			return PTR_ERR(conn_state);
1327 	}
1328 
1329 	return 0;
1330 }
1331 EXPORT_SYMBOL(drm_atomic_add_affected_connectors);
1332 
1333 /**
1334  * drm_atomic_add_affected_planes - add planes for crtc
1335  * @state: atomic state
1336  * @crtc: DRM crtc
1337  *
1338  * This function walks the current configuration and adds all planes
1339  * currently used by @crtc to the atomic configuration @state. This is useful
1340  * when an atomic commit also needs to check all currently enabled plane on
1341  * @crtc, e.g. when changing the mode. It's also useful when re-enabling a CRTC
1342  * to avoid special code to force-enable all planes.
1343  *
1344  * Since acquiring a plane state will always also acquire the w/w mutex of the
1345  * current CRTC for that plane (if there is any) adding all the plane states for
1346  * a CRTC will not reduce parallism of atomic updates.
1347  *
1348  * Returns:
1349  * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
1350  * then the w/w mutex code has detected a deadlock and the entire atomic
1351  * sequence must be restarted. All other errors are fatal.
1352  */
1353 int
drm_atomic_add_affected_planes(struct drm_atomic_state * state,struct drm_crtc * crtc)1354 drm_atomic_add_affected_planes(struct drm_atomic_state *state,
1355 			       struct drm_crtc *crtc)
1356 {
1357 	struct drm_plane *plane;
1358 
1359 	WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc));
1360 
1361 	drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
1362 		struct drm_plane_state *plane_state =
1363 			drm_atomic_get_plane_state(state, plane);
1364 
1365 		if (IS_ERR(plane_state))
1366 			return PTR_ERR(plane_state);
1367 	}
1368 	return 0;
1369 }
1370 EXPORT_SYMBOL(drm_atomic_add_affected_planes);
1371 
1372 /**
1373  * drm_atomic_legacy_backoff - locking backoff for legacy ioctls
1374  * @state: atomic state
1375  *
1376  * This function should be used by legacy entry points which don't understand
1377  * -EDEADLK semantics. For simplicity this one will grab all modeset locks after
1378  * the slowpath completed.
1379  */
drm_atomic_legacy_backoff(struct drm_atomic_state * state)1380 void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
1381 {
1382 	struct drm_device *dev = state->dev;
1383 	unsigned crtc_mask = 0;
1384 	struct drm_crtc *crtc;
1385 	int ret;
1386 	bool global = false;
1387 
1388 	drm_for_each_crtc(crtc, dev) {
1389 		if (crtc->acquire_ctx != state->acquire_ctx)
1390 			continue;
1391 
1392 		crtc_mask |= drm_crtc_mask(crtc);
1393 		crtc->acquire_ctx = NULL;
1394 	}
1395 
1396 	if (WARN_ON(dev->mode_config.acquire_ctx == state->acquire_ctx)) {
1397 		global = true;
1398 
1399 		dev->mode_config.acquire_ctx = NULL;
1400 	}
1401 
1402 retry:
1403 	drm_modeset_backoff(state->acquire_ctx);
1404 
1405 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
1406 	if (ret)
1407 		goto retry;
1408 
1409 	drm_for_each_crtc(crtc, dev)
1410 		if (drm_crtc_mask(crtc) & crtc_mask)
1411 			crtc->acquire_ctx = state->acquire_ctx;
1412 
1413 	if (global)
1414 		dev->mode_config.acquire_ctx = state->acquire_ctx;
1415 }
1416 EXPORT_SYMBOL(drm_atomic_legacy_backoff);
1417 
1418 /**
1419  * drm_atomic_check_only - check whether a given config would work
1420  * @state: atomic configuration to check
1421  *
1422  * Note that this function can return -EDEADLK if the driver needed to acquire
1423  * more locks but encountered a deadlock. The caller must then do the usual w/w
1424  * backoff dance and restart. All other errors are fatal.
1425  *
1426  * Returns:
1427  * 0 on success, negative error code on failure.
1428  */
drm_atomic_check_only(struct drm_atomic_state * state)1429 int drm_atomic_check_only(struct drm_atomic_state *state)
1430 {
1431 	struct drm_device *dev = state->dev;
1432 	struct drm_mode_config *config = &dev->mode_config;
1433 	struct drm_plane *plane;
1434 	struct drm_plane_state *plane_state;
1435 	struct drm_crtc *crtc;
1436 	struct drm_crtc_state *crtc_state;
1437 	int i, ret = 0;
1438 
1439 	DRM_DEBUG_ATOMIC("checking %p\n", state);
1440 
1441 	for_each_plane_in_state(state, plane, plane_state, i) {
1442 		ret = drm_atomic_plane_check(plane, plane_state);
1443 		if (ret) {
1444 			DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n",
1445 					 plane->base.id, plane->name);
1446 			return ret;
1447 		}
1448 	}
1449 
1450 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
1451 		ret = drm_atomic_crtc_check(crtc, crtc_state);
1452 		if (ret) {
1453 			DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n",
1454 					 crtc->base.id, crtc->name);
1455 			return ret;
1456 		}
1457 	}
1458 
1459 	if (config->funcs->atomic_check)
1460 		ret = config->funcs->atomic_check(state->dev, state);
1461 
1462 	if (ret)
1463 		return ret;
1464 
1465 	if (!state->allow_modeset) {
1466 		for_each_crtc_in_state(state, crtc, crtc_state, i) {
1467 			if (drm_atomic_crtc_needs_modeset(crtc_state)) {
1468 				DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n",
1469 						 crtc->base.id, crtc->name);
1470 				return -EINVAL;
1471 			}
1472 		}
1473 	}
1474 
1475 	return 0;
1476 }
1477 EXPORT_SYMBOL(drm_atomic_check_only);
1478 
1479 /**
1480  * drm_atomic_commit - commit configuration atomically
1481  * @state: atomic configuration to check
1482  *
1483  * Note that this function can return -EDEADLK if the driver needed to acquire
1484  * more locks but encountered a deadlock. The caller must then do the usual w/w
1485  * backoff dance and restart. All other errors are fatal.
1486  *
1487  * Also note that on successful execution ownership of @state is transferred
1488  * from the caller of this function to the function itself. The caller must not
1489  * free or in any other way access @state. If the function fails then the caller
1490  * must clean up @state itself.
1491  *
1492  * Returns:
1493  * 0 on success, negative error code on failure.
1494  */
drm_atomic_commit(struct drm_atomic_state * state)1495 int drm_atomic_commit(struct drm_atomic_state *state)
1496 {
1497 	struct drm_mode_config *config = &state->dev->mode_config;
1498 	int ret;
1499 
1500 	ret = drm_atomic_check_only(state);
1501 	if (ret)
1502 		return ret;
1503 
1504 	DRM_DEBUG_ATOMIC("commiting %p\n", state);
1505 
1506 	return config->funcs->atomic_commit(state->dev, state, false);
1507 }
1508 EXPORT_SYMBOL(drm_atomic_commit);
1509 
1510 /**
1511  * drm_atomic_nonblocking_commit - atomic&nonblocking configuration commit
1512  * @state: atomic configuration to check
1513  *
1514  * Note that this function can return -EDEADLK if the driver needed to acquire
1515  * more locks but encountered a deadlock. The caller must then do the usual w/w
1516  * backoff dance and restart. All other errors are fatal.
1517  *
1518  * Also note that on successful execution ownership of @state is transferred
1519  * from the caller of this function to the function itself. The caller must not
1520  * free or in any other way access @state. If the function fails then the caller
1521  * must clean up @state itself.
1522  *
1523  * Returns:
1524  * 0 on success, negative error code on failure.
1525  */
drm_atomic_nonblocking_commit(struct drm_atomic_state * state)1526 int drm_atomic_nonblocking_commit(struct drm_atomic_state *state)
1527 {
1528 	struct drm_mode_config *config = &state->dev->mode_config;
1529 	int ret;
1530 
1531 	ret = drm_atomic_check_only(state);
1532 	if (ret)
1533 		return ret;
1534 
1535 	DRM_DEBUG_ATOMIC("commiting %p nonblocking\n", state);
1536 
1537 	return config->funcs->atomic_commit(state->dev, state, true);
1538 }
1539 EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
1540 
1541 /*
1542  * The big monstor ioctl
1543  */
1544 
create_vblank_event(struct drm_device * dev,uint64_t user_data)1545 static struct drm_pending_vblank_event *create_vblank_event(
1546 		struct drm_device *dev, uint64_t user_data)
1547 {
1548 	struct drm_pending_vblank_event *e = NULL;
1549 
1550 	e = kzalloc(sizeof *e, GFP_KERNEL);
1551 	if (!e)
1552 		return NULL;
1553 
1554 	e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
1555 	e->event.base.length = sizeof(e->event);
1556 	e->event.user_data = user_data;
1557 
1558 	return e;
1559 }
1560 
atomic_set_prop(struct drm_atomic_state * state,struct drm_mode_object * obj,struct drm_property * prop,uint64_t prop_value)1561 static int atomic_set_prop(struct drm_atomic_state *state,
1562 		struct drm_mode_object *obj, struct drm_property *prop,
1563 		uint64_t prop_value)
1564 {
1565 	struct drm_mode_object *ref;
1566 	int ret;
1567 
1568 	if (!drm_property_change_valid_get(prop, prop_value, &ref))
1569 		return -EINVAL;
1570 
1571 	switch (obj->type) {
1572 	case DRM_MODE_OBJECT_CONNECTOR: {
1573 		struct drm_connector *connector = obj_to_connector(obj);
1574 		struct drm_connector_state *connector_state;
1575 
1576 		connector_state = drm_atomic_get_connector_state(state, connector);
1577 		if (IS_ERR(connector_state)) {
1578 			ret = PTR_ERR(connector_state);
1579 			break;
1580 		}
1581 
1582 		ret = drm_atomic_connector_set_property(connector,
1583 				connector_state, prop, prop_value);
1584 		break;
1585 	}
1586 	case DRM_MODE_OBJECT_CRTC: {
1587 		struct drm_crtc *crtc = obj_to_crtc(obj);
1588 		struct drm_crtc_state *crtc_state;
1589 
1590 		crtc_state = drm_atomic_get_crtc_state(state, crtc);
1591 		if (IS_ERR(crtc_state)) {
1592 			ret = PTR_ERR(crtc_state);
1593 			break;
1594 		}
1595 
1596 		ret = drm_atomic_crtc_set_property(crtc,
1597 				crtc_state, prop, prop_value);
1598 		break;
1599 	}
1600 	case DRM_MODE_OBJECT_PLANE: {
1601 		struct drm_plane *plane = obj_to_plane(obj);
1602 		struct drm_plane_state *plane_state;
1603 
1604 		plane_state = drm_atomic_get_plane_state(state, plane);
1605 		if (IS_ERR(plane_state)) {
1606 			ret = PTR_ERR(plane_state);
1607 			break;
1608 		}
1609 
1610 		ret = drm_atomic_plane_set_property(plane,
1611 				plane_state, prop, prop_value);
1612 		break;
1613 	}
1614 	default:
1615 		ret = -EINVAL;
1616 		break;
1617 	}
1618 
1619 	drm_property_change_valid_put(prop, ref);
1620 	return ret;
1621 }
1622 
1623 /**
1624  * drm_atomic_clean_old_fb -- Unset old_fb pointers and set plane->fb pointers.
1625  *
1626  * @dev: drm device to check.
1627  * @plane_mask: plane mask for planes that were updated.
1628  * @ret: return value, can be -EDEADLK for a retry.
1629  *
1630  * Before doing an update plane->old_fb is set to plane->fb,
1631  * but before dropping the locks old_fb needs to be set to NULL
1632  * and plane->fb updated. This is a common operation for each
1633  * atomic update, so this call is split off as a helper.
1634  */
drm_atomic_clean_old_fb(struct drm_device * dev,unsigned plane_mask,int ret)1635 void drm_atomic_clean_old_fb(struct drm_device *dev,
1636 			     unsigned plane_mask,
1637 			     int ret)
1638 {
1639 	struct drm_plane *plane;
1640 
1641 	/* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
1642 	 * locks (ie. while it is still safe to deref plane->state).  We
1643 	 * need to do this here because the driver entry points cannot
1644 	 * distinguish between legacy and atomic ioctls.
1645 	 */
1646 	drm_for_each_plane_mask(plane, dev, plane_mask) {
1647 		if (ret == 0) {
1648 			struct drm_framebuffer *new_fb = plane->state->fb;
1649 			if (new_fb)
1650 				drm_framebuffer_reference(new_fb);
1651 			plane->fb = new_fb;
1652 			plane->crtc = plane->state->crtc;
1653 
1654 			if (plane->old_fb)
1655 				drm_framebuffer_unreference(plane->old_fb);
1656 		}
1657 		plane->old_fb = NULL;
1658 	}
1659 }
1660 EXPORT_SYMBOL(drm_atomic_clean_old_fb);
1661 
1662 /**
1663  * DOC: explicit fencing properties
1664  *
1665  * Explicit fencing allows userspace to control the buffer synchronization
1666  * between devices. A Fence or a group of fences are transfered to/from
1667  * userspace using Sync File fds and there are two DRM properties for that.
1668  * IN_FENCE_FD on each DRM Plane to send fences to the kernel and
1669  * OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel.
1670  *
1671  * As a contrast, with implicit fencing the kernel keeps track of any
1672  * ongoing rendering, and automatically ensures that the atomic update waits
1673  * for any pending rendering to complete. For shared buffers represented with
1674  * a struct &dma_buf this is tracked in &reservation_object structures.
1675  * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org),
1676  * whereas explicit fencing is what Android wants.
1677  *
1678  * "IN_FENCE_FD”:
1679  *	Use this property to pass a fence that DRM should wait on before
1680  *	proceeding with the Atomic Commit request and show the framebuffer for
1681  *	the plane on the screen. The fence can be either a normal fence or a
1682  *	merged one, the sync_file framework will handle both cases and use a
1683  *	fence_array if a merged fence is received. Passing -1 here means no
1684  *	fences to wait on.
1685  *
1686  *	If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag
1687  *	it will only check if the Sync File is a valid one.
1688  *
1689  *	On the driver side the fence is stored on the @fence parameter of
1690  *	struct &drm_plane_state. Drivers which also support implicit fencing
1691  *	should set the implicit fence using drm_atomic_set_fence_for_plane(),
1692  *	to make sure there's consistent behaviour between drivers in precedence
1693  *	of implicit vs. explicit fencing.
1694  *
1695  * "OUT_FENCE_PTR”:
1696  *	Use this property to pass a file descriptor pointer to DRM. Once the
1697  *	Atomic Commit request call returns OUT_FENCE_PTR will be filled with
1698  *	the file descriptor number of a Sync File. This Sync File contains the
1699  *	CRTC fence that will be signaled when all framebuffers present on the
1700  *	Atomic Commit * request for that given CRTC are scanned out on the
1701  *	screen.
1702  *
1703  *	The Atomic Commit request fails if a invalid pointer is passed. If the
1704  *	Atomic Commit request fails for any other reason the out fence fd
1705  *	returned will be -1. On a Atomic Commit with the
1706  *	DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1.
1707  *
1708  *	Note that out-fences don't have a special interface to drivers and are
1709  *	internally represented by a struct &drm_pending_vblank_event in struct
1710  *	&drm_crtc_state, which is also used by the nonblocking atomic commit
1711  *	helpers and for the DRM event handling for existing userspace.
1712  */
1713 
1714 struct drm_out_fence_state {
1715 	s32 __user *out_fence_ptr;
1716 	struct sync_file *sync_file;
1717 	int fd;
1718 };
1719 
setup_out_fence(struct drm_out_fence_state * fence_state,struct fence * fence)1720 static int setup_out_fence(struct drm_out_fence_state *fence_state,
1721 			   struct fence *fence)
1722 {
1723 	fence_state->fd = get_unused_fd_flags(O_CLOEXEC);
1724 	if (fence_state->fd < 0)
1725 		return fence_state->fd;
1726 
1727 	if (put_user(fence_state->fd, fence_state->out_fence_ptr))
1728 		return -EFAULT;
1729 
1730 	fence_state->sync_file = sync_file_create(fence);
1731 	if (!fence_state->sync_file)
1732 		return -ENOMEM;
1733 
1734 	return 0;
1735 }
1736 
prepare_crtc_signaling(struct drm_device * dev,struct drm_atomic_state * state,struct drm_mode_atomic * arg,struct drm_file * file_priv,struct drm_out_fence_state ** fence_state,unsigned int * num_fences)1737 static int prepare_crtc_signaling(struct drm_device *dev,
1738 				  struct drm_atomic_state *state,
1739 				  struct drm_mode_atomic *arg,
1740 				  struct drm_file *file_priv,
1741 				  struct drm_out_fence_state **fence_state,
1742 				  unsigned int *num_fences)
1743 {
1744 	struct drm_crtc *crtc;
1745 	struct drm_crtc_state *crtc_state;
1746 	int i, ret;
1747 
1748 	if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
1749 		return 0;
1750 
1751 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
1752 		s32 __user *fence_ptr;
1753 
1754 		fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
1755 
1756 		if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) {
1757 			struct drm_pending_vblank_event *e;
1758 
1759 			e = create_vblank_event(dev, arg->user_data);
1760 			if (!e)
1761 				return -ENOMEM;
1762 
1763 			crtc_state->event = e;
1764 		}
1765 
1766 		if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
1767 			struct drm_pending_vblank_event *e = crtc_state->event;
1768 
1769 			if (!file_priv)
1770 				continue;
1771 
1772 			ret = drm_event_reserve_init(dev, file_priv, &e->base,
1773 						     &e->event.base);
1774 			if (ret) {
1775 				kfree(e);
1776 				crtc_state->event = NULL;
1777 				return ret;
1778 			}
1779 		}
1780 
1781 		if (fence_ptr) {
1782 			struct fence *fence;
1783 			struct drm_out_fence_state *f;
1784 
1785 			f = krealloc(*fence_state, sizeof(**fence_state) *
1786 				     (*num_fences + 1), GFP_KERNEL);
1787 			if (!f)
1788 				return -ENOMEM;
1789 
1790 			memset(&f[*num_fences], 0, sizeof(*f));
1791 
1792 			f[*num_fences].out_fence_ptr = fence_ptr;
1793 			*fence_state = f;
1794 
1795 			fence = drm_crtc_create_fence(crtc);
1796 			if (!fence)
1797 				return -ENOMEM;
1798 
1799 			ret = setup_out_fence(&f[(*num_fences)++], fence);
1800 			if (ret) {
1801 				fence_put(fence);
1802 				return ret;
1803 			}
1804 
1805 			crtc_state->event->base.fence = fence;
1806 		}
1807 	}
1808 
1809 	return 0;
1810 }
1811 
complete_crtc_signaling(struct drm_device * dev,struct drm_atomic_state * state,struct drm_out_fence_state * fence_state,unsigned int num_fences,bool install_fds)1812 static void complete_crtc_signaling(struct drm_device *dev,
1813 				    struct drm_atomic_state *state,
1814 				    struct drm_out_fence_state *fence_state,
1815 				    unsigned int num_fences,
1816 				    bool install_fds)
1817 {
1818 	struct drm_crtc *crtc;
1819 	struct drm_crtc_state *crtc_state;
1820 	int i;
1821 
1822 	if (install_fds) {
1823 		for (i = 0; i < num_fences; i++)
1824 			fd_install(fence_state[i].fd,
1825 				   fence_state[i].sync_file->file);
1826 
1827 		kfree(fence_state);
1828 		return;
1829 	}
1830 
1831 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
1832 		struct drm_pending_vblank_event *event = crtc_state->event;
1833 		/*
1834 		 * Free the allocated event. drm_atomic_helper_setup_commit
1835 		 * can allocate an event too, so only free it if it's ours
1836 		 * to prevent a double free in drm_atomic_state_clear.
1837 		 */
1838 		if (event && (event->base.fence || event->base.file_priv)) {
1839 			drm_event_cancel_free(dev, &event->base);
1840 			crtc_state->event = NULL;
1841 		}
1842 	}
1843 
1844 	if (!fence_state)
1845 		return;
1846 
1847 	for (i = 0; i < num_fences; i++) {
1848 		if (fence_state[i].sync_file)
1849 			fput(fence_state[i].sync_file->file);
1850 		if (fence_state[i].fd >= 0)
1851 			put_unused_fd(fence_state[i].fd);
1852 
1853 		/* If this fails log error to the user */
1854 		if (fence_state[i].out_fence_ptr &&
1855 		    put_user(-1, fence_state[i].out_fence_ptr))
1856 			DRM_DEBUG_ATOMIC("Couldn't clear out_fence_ptr\n");
1857 	}
1858 
1859 	kfree(fence_state);
1860 }
1861 
drm_mode_atomic_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1862 int drm_mode_atomic_ioctl(struct drm_device *dev,
1863 			  void *data, struct drm_file *file_priv)
1864 {
1865 	struct drm_mode_atomic *arg = data;
1866 	uint32_t __user *objs_ptr = (uint32_t __user *)(unsigned long)(arg->objs_ptr);
1867 	uint32_t __user *count_props_ptr = (uint32_t __user *)(unsigned long)(arg->count_props_ptr);
1868 	uint32_t __user *props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
1869 	uint64_t __user *prop_values_ptr = (uint64_t __user *)(unsigned long)(arg->prop_values_ptr);
1870 	unsigned int copied_objs, copied_props;
1871 	struct drm_atomic_state *state;
1872 	struct drm_modeset_acquire_ctx ctx;
1873 	struct drm_plane *plane;
1874 	struct drm_out_fence_state *fence_state;
1875 	unsigned plane_mask;
1876 	int ret = 0;
1877 	unsigned int i, j, num_fences;
1878 
1879 	/* disallow for drivers not supporting atomic: */
1880 	if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
1881 		return -EINVAL;
1882 
1883 	/* disallow for userspace that has not enabled atomic cap (even
1884 	 * though this may be a bit overkill, since legacy userspace
1885 	 * wouldn't know how to call this ioctl)
1886 	 */
1887 	if (!file_priv->atomic)
1888 		return -EINVAL;
1889 
1890 	if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS)
1891 		return -EINVAL;
1892 
1893 	if (arg->reserved)
1894 		return -EINVAL;
1895 
1896 	if ((arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) &&
1897 			!dev->mode_config.async_page_flip)
1898 		return -EINVAL;
1899 
1900 	/* can't test and expect an event at the same time. */
1901 	if ((arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) &&
1902 			(arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
1903 		return -EINVAL;
1904 
1905 	drm_modeset_acquire_init(&ctx, 0);
1906 
1907 	state = drm_atomic_state_alloc(dev);
1908 	if (!state)
1909 		return -ENOMEM;
1910 
1911 	state->acquire_ctx = &ctx;
1912 	state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
1913 
1914 retry:
1915 	plane_mask = 0;
1916 	copied_objs = 0;
1917 	copied_props = 0;
1918 	fence_state = NULL;
1919 	num_fences = 0;
1920 
1921 	for (i = 0; i < arg->count_objs; i++) {
1922 		uint32_t obj_id, count_props;
1923 		struct drm_mode_object *obj;
1924 
1925 		if (get_user(obj_id, objs_ptr + copied_objs)) {
1926 			ret = -EFAULT;
1927 			goto out;
1928 		}
1929 
1930 		obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_ANY);
1931 		if (!obj) {
1932 			ret = -ENOENT;
1933 			goto out;
1934 		}
1935 
1936 		if (!obj->properties) {
1937 			drm_mode_object_unreference(obj);
1938 			ret = -ENOENT;
1939 			goto out;
1940 		}
1941 
1942 		if (get_user(count_props, count_props_ptr + copied_objs)) {
1943 			drm_mode_object_unreference(obj);
1944 			ret = -EFAULT;
1945 			goto out;
1946 		}
1947 
1948 		copied_objs++;
1949 
1950 		for (j = 0; j < count_props; j++) {
1951 			uint32_t prop_id;
1952 			uint64_t prop_value;
1953 			struct drm_property *prop;
1954 
1955 			if (get_user(prop_id, props_ptr + copied_props)) {
1956 				drm_mode_object_unreference(obj);
1957 				ret = -EFAULT;
1958 				goto out;
1959 			}
1960 
1961 			prop = drm_mode_obj_find_prop_id(obj, prop_id);
1962 			if (!prop) {
1963 				drm_mode_object_unreference(obj);
1964 				ret = -ENOENT;
1965 				goto out;
1966 			}
1967 
1968 			if (copy_from_user(&prop_value,
1969 					   prop_values_ptr + copied_props,
1970 					   sizeof(prop_value))) {
1971 				drm_mode_object_unreference(obj);
1972 				ret = -EFAULT;
1973 				goto out;
1974 			}
1975 
1976 			ret = atomic_set_prop(state, obj, prop, prop_value);
1977 			if (ret) {
1978 				drm_mode_object_unreference(obj);
1979 				goto out;
1980 			}
1981 
1982 			copied_props++;
1983 		}
1984 
1985 		if (obj->type == DRM_MODE_OBJECT_PLANE && count_props &&
1986 		    !(arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)) {
1987 			plane = obj_to_plane(obj);
1988 			plane_mask |= (1 << drm_plane_index(plane));
1989 			plane->old_fb = plane->fb;
1990 		}
1991 		drm_mode_object_unreference(obj);
1992 	}
1993 
1994 	ret = prepare_crtc_signaling(dev, state, arg, file_priv, &fence_state,
1995 				     &num_fences);
1996 	if (ret)
1997 		goto out;
1998 
1999 	if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
2000 		/*
2001 		 * Unlike commit, check_only does not clean up state.
2002 		 * Below we call drm_atomic_state_free for it.
2003 		 */
2004 		ret = drm_atomic_check_only(state);
2005 	} else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
2006 		ret = drm_atomic_nonblocking_commit(state);
2007 	} else {
2008 		ret = drm_atomic_commit(state);
2009 	}
2010 
2011 out:
2012 	drm_atomic_clean_old_fb(dev, plane_mask, ret);
2013 
2014 	complete_crtc_signaling(dev, state, fence_state, num_fences, !ret);
2015 
2016 	if (ret == -EDEADLK) {
2017 		drm_atomic_state_clear(state);
2018 		drm_modeset_backoff(&ctx);
2019 		goto retry;
2020 	}
2021 
2022 	if (ret || arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
2023 		drm_atomic_state_free(state);
2024 
2025 	drm_modeset_drop_locks(&ctx);
2026 	drm_modeset_acquire_fini(&ctx);
2027 
2028 	return ret;
2029 }
2030