• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49 
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57 
58 #include "ivsrcid/ivsrcid_vislands30.h"
59 
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68 
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #endif
103 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
105 
106 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
107 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
108 
109 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
110 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
111 
112 /* Number of bytes in PSP header for firmware. */
113 #define PSP_HEADER_BYTES 0x100
114 
115 /* Number of bytes in PSP footer for firmware. */
116 #define PSP_FOOTER_BYTES 0x100
117 
118 /**
119  * DOC: overview
120  *
121  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
122  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
123  * requests into DC requests, and DC responses into DRM responses.
124  *
125  * The root control structure is &struct amdgpu_display_manager.
126  */
127 
128 /* basic init/fini API */
129 static int amdgpu_dm_init(struct amdgpu_device *adev);
130 static void amdgpu_dm_fini(struct amdgpu_device *adev);
131 
get_subconnector_type(struct dc_link * link)132 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
133 {
134 	switch (link->dpcd_caps.dongle_type) {
135 	case DISPLAY_DONGLE_NONE:
136 		return DRM_MODE_SUBCONNECTOR_Native;
137 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
138 		return DRM_MODE_SUBCONNECTOR_VGA;
139 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
140 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
141 		return DRM_MODE_SUBCONNECTOR_DVID;
142 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
143 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
144 		return DRM_MODE_SUBCONNECTOR_HDMIA;
145 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
146 	default:
147 		return DRM_MODE_SUBCONNECTOR_Unknown;
148 	}
149 }
150 
update_subconnector_property(struct amdgpu_dm_connector * aconnector)151 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
152 {
153 	struct dc_link *link = aconnector->dc_link;
154 	struct drm_connector *connector = &aconnector->base;
155 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
156 
157 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
158 		return;
159 
160 	if (aconnector->dc_sink)
161 		subconnector = get_subconnector_type(link);
162 
163 	drm_object_property_set_value(&connector->base,
164 			connector->dev->mode_config.dp_subconnector_property,
165 			subconnector);
166 }
167 
168 /*
169  * initializes drm_device display related structures, based on the information
170  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
171  * drm_encoder, drm_mode_config
172  *
173  * Returns 0 on success
174  */
175 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
176 /* removes and deallocates the drm structures, created by the above function */
177 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
178 
179 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
180 				struct drm_plane *plane,
181 				unsigned long possible_crtcs,
182 				const struct dc_plane_cap *plane_cap);
183 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
184 			       struct drm_plane *plane,
185 			       uint32_t link_index);
186 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
187 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
188 				    uint32_t link_index,
189 				    struct amdgpu_encoder *amdgpu_encoder);
190 static int amdgpu_dm_encoder_init(struct drm_device *dev,
191 				  struct amdgpu_encoder *aencoder,
192 				  uint32_t link_index);
193 
194 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
195 
196 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
197 				   struct drm_atomic_state *state,
198 				   bool nonblock);
199 
200 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
201 
202 static int amdgpu_dm_atomic_check(struct drm_device *dev,
203 				  struct drm_atomic_state *state);
204 
205 static void handle_cursor_update(struct drm_plane *plane,
206 				 struct drm_plane_state *old_plane_state);
207 
208 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
209 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
211 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
212 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
213 
214 /*
215  * dm_vblank_get_counter
216  *
217  * @brief
218  * Get counter for number of vertical blanks
219  *
220  * @param
221  * struct amdgpu_device *adev - [in] desired amdgpu device
222  * int disp_idx - [in] which CRTC to get the counter from
223  *
224  * @return
225  * Counter for vertical blanks
226  */
dm_vblank_get_counter(struct amdgpu_device * adev,int crtc)227 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
228 {
229 	if (crtc >= adev->mode_info.num_crtc)
230 		return 0;
231 	else {
232 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
233 
234 		if (acrtc->dm_irq_params.stream == NULL) {
235 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
236 				  crtc);
237 			return 0;
238 		}
239 
240 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
241 	}
242 }
243 
dm_crtc_get_scanoutpos(struct amdgpu_device * adev,int crtc,u32 * vbl,u32 * position)244 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
245 				  u32 *vbl, u32 *position)
246 {
247 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
248 
249 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
250 		return -EINVAL;
251 	else {
252 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
253 
254 		if (acrtc->dm_irq_params.stream ==  NULL) {
255 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
256 				  crtc);
257 			return 0;
258 		}
259 
260 		/*
261 		 * TODO rework base driver to use values directly.
262 		 * for now parse it back into reg-format
263 		 */
264 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
265 					 &v_blank_start,
266 					 &v_blank_end,
267 					 &h_position,
268 					 &v_position);
269 
270 		*position = v_position | (h_position << 16);
271 		*vbl = v_blank_start | (v_blank_end << 16);
272 	}
273 
274 	return 0;
275 }
276 
dm_is_idle(void * handle)277 static bool dm_is_idle(void *handle)
278 {
279 	/* XXX todo */
280 	return true;
281 }
282 
dm_wait_for_idle(void * handle)283 static int dm_wait_for_idle(void *handle)
284 {
285 	/* XXX todo */
286 	return 0;
287 }
288 
dm_check_soft_reset(void * handle)289 static bool dm_check_soft_reset(void *handle)
290 {
291 	return false;
292 }
293 
dm_soft_reset(void * handle)294 static int dm_soft_reset(void *handle)
295 {
296 	/* XXX todo */
297 	return 0;
298 }
299 
300 static struct amdgpu_crtc *
get_crtc_by_otg_inst(struct amdgpu_device * adev,int otg_inst)301 get_crtc_by_otg_inst(struct amdgpu_device *adev,
302 		     int otg_inst)
303 {
304 	struct drm_device *dev = adev_to_drm(adev);
305 	struct drm_crtc *crtc;
306 	struct amdgpu_crtc *amdgpu_crtc;
307 
308 	if (otg_inst == -1) {
309 		WARN_ON(1);
310 		return adev->mode_info.crtcs[0];
311 	}
312 
313 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
314 		amdgpu_crtc = to_amdgpu_crtc(crtc);
315 
316 		if (amdgpu_crtc->otg_inst == otg_inst)
317 			return amdgpu_crtc;
318 	}
319 
320 	return NULL;
321 }
322 
amdgpu_dm_vrr_active_irq(struct amdgpu_crtc * acrtc)323 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
324 {
325 	return acrtc->dm_irq_params.freesync_config.state ==
326 		       VRR_STATE_ACTIVE_VARIABLE ||
327 	       acrtc->dm_irq_params.freesync_config.state ==
328 		       VRR_STATE_ACTIVE_FIXED;
329 }
330 
amdgpu_dm_vrr_active(struct dm_crtc_state * dm_state)331 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
332 {
333 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
334 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
335 }
336 
337 /**
338  * dm_pflip_high_irq() - Handle pageflip interrupt
339  * @interrupt_params: ignored
340  *
341  * Handles the pageflip interrupt by notifying all interested parties
342  * that the pageflip has been completed.
343  */
dm_pflip_high_irq(void * interrupt_params)344 static void dm_pflip_high_irq(void *interrupt_params)
345 {
346 	struct amdgpu_crtc *amdgpu_crtc;
347 	struct common_irq_params *irq_params = interrupt_params;
348 	struct amdgpu_device *adev = irq_params->adev;
349 	unsigned long flags;
350 	struct drm_pending_vblank_event *e;
351 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
352 	bool vrr_active;
353 
354 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
355 
356 	/* IRQ could occur when in initial stage */
357 	/* TODO work and BO cleanup */
358 	if (amdgpu_crtc == NULL) {
359 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
360 		return;
361 	}
362 
363 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
364 
365 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
366 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
367 						 amdgpu_crtc->pflip_status,
368 						 AMDGPU_FLIP_SUBMITTED,
369 						 amdgpu_crtc->crtc_id,
370 						 amdgpu_crtc);
371 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
372 		return;
373 	}
374 
375 	/* page flip completed. */
376 	e = amdgpu_crtc->event;
377 	amdgpu_crtc->event = NULL;
378 
379 	if (!e)
380 		WARN_ON(1);
381 
382 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
383 
384 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
385 	if (!vrr_active ||
386 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
387 				      &v_blank_end, &hpos, &vpos) ||
388 	    (vpos < v_blank_start)) {
389 		/* Update to correct count and vblank timestamp if racing with
390 		 * vblank irq. This also updates to the correct vblank timestamp
391 		 * even in VRR mode, as scanout is past the front-porch atm.
392 		 */
393 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
394 
395 		/* Wake up userspace by sending the pageflip event with proper
396 		 * count and timestamp of vblank of flip completion.
397 		 */
398 		if (e) {
399 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
400 
401 			/* Event sent, so done with vblank for this flip */
402 			drm_crtc_vblank_put(&amdgpu_crtc->base);
403 		}
404 	} else if (e) {
405 		/* VRR active and inside front-porch: vblank count and
406 		 * timestamp for pageflip event will only be up to date after
407 		 * drm_crtc_handle_vblank() has been executed from late vblank
408 		 * irq handler after start of back-porch (vline 0). We queue the
409 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
410 		 * updated timestamp and count, once it runs after us.
411 		 *
412 		 * We need to open-code this instead of using the helper
413 		 * drm_crtc_arm_vblank_event(), as that helper would
414 		 * call drm_crtc_accurate_vblank_count(), which we must
415 		 * not call in VRR mode while we are in front-porch!
416 		 */
417 
418 		/* sequence will be replaced by real count during send-out. */
419 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
420 		e->pipe = amdgpu_crtc->crtc_id;
421 
422 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
423 		e = NULL;
424 	}
425 
426 	/* Keep track of vblank of this flip for flip throttling. We use the
427 	 * cooked hw counter, as that one incremented at start of this vblank
428 	 * of pageflip completion, so last_flip_vblank is the forbidden count
429 	 * for queueing new pageflips if vsync + VRR is enabled.
430 	 */
431 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
432 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
433 
434 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
435 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
436 
437 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
438 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
439 			 vrr_active, (int) !e);
440 }
441 
dm_vupdate_high_irq(void * interrupt_params)442 static void dm_vupdate_high_irq(void *interrupt_params)
443 {
444 	struct common_irq_params *irq_params = interrupt_params;
445 	struct amdgpu_device *adev = irq_params->adev;
446 	struct amdgpu_crtc *acrtc;
447 	unsigned long flags;
448 	int vrr_active;
449 
450 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
451 
452 	if (acrtc) {
453 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
454 
455 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
456 			      acrtc->crtc_id,
457 			      vrr_active);
458 
459 		/* Core vblank handling is done here after end of front-porch in
460 		 * vrr mode, as vblank timestamping will give valid results
461 		 * while now done after front-porch. This will also deliver
462 		 * page-flip completion events that have been queued to us
463 		 * if a pageflip happened inside front-porch.
464 		 */
465 		if (vrr_active) {
466 			drm_crtc_handle_vblank(&acrtc->base);
467 
468 			/* BTR processing for pre-DCE12 ASICs */
469 			if (acrtc->dm_irq_params.stream &&
470 			    adev->family < AMDGPU_FAMILY_AI) {
471 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
472 				mod_freesync_handle_v_update(
473 				    adev->dm.freesync_module,
474 				    acrtc->dm_irq_params.stream,
475 				    &acrtc->dm_irq_params.vrr_params);
476 
477 				dc_stream_adjust_vmin_vmax(
478 				    adev->dm.dc,
479 				    acrtc->dm_irq_params.stream,
480 				    &acrtc->dm_irq_params.vrr_params.adjust);
481 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
482 			}
483 		}
484 	}
485 }
486 
487 /**
488  * dm_crtc_high_irq() - Handles CRTC interrupt
489  * @interrupt_params: used for determining the CRTC instance
490  *
491  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
492  * event handler.
493  */
dm_crtc_high_irq(void * interrupt_params)494 static void dm_crtc_high_irq(void *interrupt_params)
495 {
496 	struct common_irq_params *irq_params = interrupt_params;
497 	struct amdgpu_device *adev = irq_params->adev;
498 	struct amdgpu_crtc *acrtc;
499 	unsigned long flags;
500 	int vrr_active;
501 
502 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
503 	if (!acrtc)
504 		return;
505 
506 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
507 
508 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
509 		      vrr_active, acrtc->dm_irq_params.active_planes);
510 
511 	/**
512 	 * Core vblank handling at start of front-porch is only possible
513 	 * in non-vrr mode, as only there vblank timestamping will give
514 	 * valid results while done in front-porch. Otherwise defer it
515 	 * to dm_vupdate_high_irq after end of front-porch.
516 	 */
517 	if (!vrr_active)
518 		drm_crtc_handle_vblank(&acrtc->base);
519 
520 	/**
521 	 * Following stuff must happen at start of vblank, for crc
522 	 * computation and below-the-range btr support in vrr mode.
523 	 */
524 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
525 
526 	/* BTR updates need to happen before VUPDATE on Vega and above. */
527 	if (adev->family < AMDGPU_FAMILY_AI)
528 		return;
529 
530 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
531 
532 	if (acrtc->dm_irq_params.stream &&
533 	    acrtc->dm_irq_params.vrr_params.supported &&
534 	    acrtc->dm_irq_params.freesync_config.state ==
535 		    VRR_STATE_ACTIVE_VARIABLE) {
536 		mod_freesync_handle_v_update(adev->dm.freesync_module,
537 					     acrtc->dm_irq_params.stream,
538 					     &acrtc->dm_irq_params.vrr_params);
539 
540 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
541 					   &acrtc->dm_irq_params.vrr_params.adjust);
542 	}
543 
544 	/*
545 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
546 	 * In that case, pageflip completion interrupts won't fire and pageflip
547 	 * completion events won't get delivered. Prevent this by sending
548 	 * pending pageflip events from here if a flip is still pending.
549 	 *
550 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
551 	 * avoid race conditions between flip programming and completion,
552 	 * which could cause too early flip completion events.
553 	 */
554 	if (adev->family >= AMDGPU_FAMILY_RV &&
555 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
556 	    acrtc->dm_irq_params.active_planes == 0) {
557 		if (acrtc->event) {
558 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
559 			acrtc->event = NULL;
560 			drm_crtc_vblank_put(&acrtc->base);
561 		}
562 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
563 	}
564 
565 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
566 }
567 
dm_set_clockgating_state(void * handle,enum amd_clockgating_state state)568 static int dm_set_clockgating_state(void *handle,
569 		  enum amd_clockgating_state state)
570 {
571 	return 0;
572 }
573 
dm_set_powergating_state(void * handle,enum amd_powergating_state state)574 static int dm_set_powergating_state(void *handle,
575 		  enum amd_powergating_state state)
576 {
577 	return 0;
578 }
579 
580 /* Prototypes of private functions */
581 static int dm_early_init(void* handle);
582 
583 /* Allocate memory for FBC compressed data  */
amdgpu_dm_fbc_init(struct drm_connector * connector)584 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
585 {
586 	struct drm_device *dev = connector->dev;
587 	struct amdgpu_device *adev = drm_to_adev(dev);
588 	struct dm_compressor_info *compressor = &adev->dm.compressor;
589 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
590 	struct drm_display_mode *mode;
591 	unsigned long max_size = 0;
592 
593 	if (adev->dm.dc->fbc_compressor == NULL)
594 		return;
595 
596 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
597 		return;
598 
599 	if (compressor->bo_ptr)
600 		return;
601 
602 
603 	list_for_each_entry(mode, &connector->modes, head) {
604 		if (max_size < mode->htotal * mode->vtotal)
605 			max_size = mode->htotal * mode->vtotal;
606 	}
607 
608 	if (max_size) {
609 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
610 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
611 			    &compressor->gpu_addr, &compressor->cpu_addr);
612 
613 		if (r)
614 			DRM_ERROR("DM: Failed to initialize FBC\n");
615 		else {
616 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
617 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
618 		}
619 
620 	}
621 
622 }
623 
amdgpu_dm_audio_component_get_eld(struct device * kdev,int port,int pipe,bool * enabled,unsigned char * buf,int max_bytes)624 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
625 					  int pipe, bool *enabled,
626 					  unsigned char *buf, int max_bytes)
627 {
628 	struct drm_device *dev = dev_get_drvdata(kdev);
629 	struct amdgpu_device *adev = drm_to_adev(dev);
630 	struct drm_connector *connector;
631 	struct drm_connector_list_iter conn_iter;
632 	struct amdgpu_dm_connector *aconnector;
633 	int ret = 0;
634 
635 	*enabled = false;
636 
637 	mutex_lock(&adev->dm.audio_lock);
638 
639 	drm_connector_list_iter_begin(dev, &conn_iter);
640 	drm_for_each_connector_iter(connector, &conn_iter) {
641 		aconnector = to_amdgpu_dm_connector(connector);
642 		if (aconnector->audio_inst != port)
643 			continue;
644 
645 		*enabled = true;
646 		ret = drm_eld_size(connector->eld);
647 		memcpy(buf, connector->eld, min(max_bytes, ret));
648 
649 		break;
650 	}
651 	drm_connector_list_iter_end(&conn_iter);
652 
653 	mutex_unlock(&adev->dm.audio_lock);
654 
655 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
656 
657 	return ret;
658 }
659 
660 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
661 	.get_eld = amdgpu_dm_audio_component_get_eld,
662 };
663 
amdgpu_dm_audio_component_bind(struct device * kdev,struct device * hda_kdev,void * data)664 static int amdgpu_dm_audio_component_bind(struct device *kdev,
665 				       struct device *hda_kdev, void *data)
666 {
667 	struct drm_device *dev = dev_get_drvdata(kdev);
668 	struct amdgpu_device *adev = drm_to_adev(dev);
669 	struct drm_audio_component *acomp = data;
670 
671 	acomp->ops = &amdgpu_dm_audio_component_ops;
672 	acomp->dev = kdev;
673 	adev->dm.audio_component = acomp;
674 
675 	return 0;
676 }
677 
amdgpu_dm_audio_component_unbind(struct device * kdev,struct device * hda_kdev,void * data)678 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
679 					  struct device *hda_kdev, void *data)
680 {
681 	struct drm_device *dev = dev_get_drvdata(kdev);
682 	struct amdgpu_device *adev = drm_to_adev(dev);
683 	struct drm_audio_component *acomp = data;
684 
685 	acomp->ops = NULL;
686 	acomp->dev = NULL;
687 	adev->dm.audio_component = NULL;
688 }
689 
690 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
691 	.bind	= amdgpu_dm_audio_component_bind,
692 	.unbind	= amdgpu_dm_audio_component_unbind,
693 };
694 
amdgpu_dm_audio_init(struct amdgpu_device * adev)695 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
696 {
697 	int i, ret;
698 
699 	if (!amdgpu_audio)
700 		return 0;
701 
702 	adev->mode_info.audio.enabled = true;
703 
704 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
705 
706 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
707 		adev->mode_info.audio.pin[i].channels = -1;
708 		adev->mode_info.audio.pin[i].rate = -1;
709 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
710 		adev->mode_info.audio.pin[i].status_bits = 0;
711 		adev->mode_info.audio.pin[i].category_code = 0;
712 		adev->mode_info.audio.pin[i].connected = false;
713 		adev->mode_info.audio.pin[i].id =
714 			adev->dm.dc->res_pool->audios[i]->inst;
715 		adev->mode_info.audio.pin[i].offset = 0;
716 	}
717 
718 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
719 	if (ret < 0)
720 		return ret;
721 
722 	adev->dm.audio_registered = true;
723 
724 	return 0;
725 }
726 
amdgpu_dm_audio_fini(struct amdgpu_device * adev)727 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
728 {
729 	if (!amdgpu_audio)
730 		return;
731 
732 	if (!adev->mode_info.audio.enabled)
733 		return;
734 
735 	if (adev->dm.audio_registered) {
736 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
737 		adev->dm.audio_registered = false;
738 	}
739 
740 	/* TODO: Disable audio? */
741 
742 	adev->mode_info.audio.enabled = false;
743 }
744 
amdgpu_dm_audio_eld_notify(struct amdgpu_device * adev,int pin)745 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
746 {
747 	struct drm_audio_component *acomp = adev->dm.audio_component;
748 
749 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
750 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
751 
752 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
753 						 pin, -1);
754 	}
755 }
756 
dm_dmub_hw_init(struct amdgpu_device * adev)757 static int dm_dmub_hw_init(struct amdgpu_device *adev)
758 {
759 	const struct dmcub_firmware_header_v1_0 *hdr;
760 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
761 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
762 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
763 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
764 	struct abm *abm = adev->dm.dc->res_pool->abm;
765 	struct dmub_srv_hw_params hw_params;
766 	enum dmub_status status;
767 	const unsigned char *fw_inst_const, *fw_bss_data;
768 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
769 	bool has_hw_support;
770 
771 	if (!dmub_srv)
772 		/* DMUB isn't supported on the ASIC. */
773 		return 0;
774 
775 	if (!fb_info) {
776 		DRM_ERROR("No framebuffer info for DMUB service.\n");
777 		return -EINVAL;
778 	}
779 
780 	if (!dmub_fw) {
781 		/* Firmware required for DMUB support. */
782 		DRM_ERROR("No firmware provided for DMUB.\n");
783 		return -EINVAL;
784 	}
785 
786 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
787 	if (status != DMUB_STATUS_OK) {
788 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
789 		return -EINVAL;
790 	}
791 
792 	if (!has_hw_support) {
793 		DRM_INFO("DMUB unsupported on ASIC\n");
794 		return 0;
795 	}
796 
797 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
798 
799 	fw_inst_const = dmub_fw->data +
800 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
801 			PSP_HEADER_BYTES;
802 
803 	fw_bss_data = dmub_fw->data +
804 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
805 		      le32_to_cpu(hdr->inst_const_bytes);
806 
807 	/* Copy firmware and bios info into FB memory. */
808 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
809 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
810 
811 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
812 
813 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
814 	 * amdgpu_ucode_init_single_fw will load dmub firmware
815 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
816 	 * will be done by dm_dmub_hw_init
817 	 */
818 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
819 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
820 				fw_inst_const_size);
821 	}
822 
823 	if (fw_bss_data_size)
824 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
825 		       fw_bss_data, fw_bss_data_size);
826 
827 	/* Copy firmware bios info into FB memory. */
828 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
829 	       adev->bios_size);
830 
831 	/* Reset regions that need to be reset. */
832 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
833 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
834 
835 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
836 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
837 
838 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
839 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
840 
841 	/* Initialize hardware. */
842 	memset(&hw_params, 0, sizeof(hw_params));
843 	hw_params.fb_base = adev->gmc.fb_start;
844 	hw_params.fb_offset = adev->gmc.aper_base;
845 
846 	/* backdoor load firmware and trigger dmub running */
847 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
848 		hw_params.load_inst_const = true;
849 
850 	if (dmcu)
851 		hw_params.psp_version = dmcu->psp_version;
852 
853 	for (i = 0; i < fb_info->num_fb; ++i)
854 		hw_params.fb[i] = &fb_info->fb[i];
855 
856 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
857 	if (status != DMUB_STATUS_OK) {
858 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
859 		return -EINVAL;
860 	}
861 
862 	/* Wait for firmware load to finish. */
863 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
864 	if (status != DMUB_STATUS_OK)
865 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
866 
867 	/* Init DMCU and ABM if available. */
868 	if (dmcu && abm) {
869 		dmcu->funcs->dmcu_init(dmcu);
870 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
871 	}
872 
873 	if (!adev->dm.dc->ctx->dmub_srv)
874 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
875 	if (!adev->dm.dc->ctx->dmub_srv) {
876 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
877 		return -ENOMEM;
878 	}
879 
880 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
881 		 adev->dm.dmcub_fw_version);
882 
883 	return 0;
884 }
885 
amdgpu_check_debugfs_connector_property_change(struct amdgpu_device * adev,struct drm_atomic_state * state)886 static void amdgpu_check_debugfs_connector_property_change(struct amdgpu_device *adev,
887 							   struct drm_atomic_state *state)
888 {
889 	struct drm_connector *connector;
890 	struct drm_crtc *crtc;
891 	struct amdgpu_dm_connector *amdgpu_dm_connector;
892 	struct drm_connector_state *conn_state;
893 	struct dm_crtc_state *acrtc_state;
894 	struct drm_crtc_state *crtc_state;
895 	struct dc_stream_state *stream;
896 	struct drm_device *dev = adev_to_drm(adev);
897 
898 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
899 
900 		amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
901 		conn_state = connector->state;
902 
903 		if (!(conn_state && conn_state->crtc))
904 			continue;
905 
906 		crtc = conn_state->crtc;
907 		acrtc_state = to_dm_crtc_state(crtc->state);
908 
909 		if (!(acrtc_state && acrtc_state->stream))
910 			continue;
911 
912 		stream = acrtc_state->stream;
913 
914 		if (amdgpu_dm_connector->dsc_settings.dsc_force_enable ||
915 		    amdgpu_dm_connector->dsc_settings.dsc_num_slices_v ||
916 		    amdgpu_dm_connector->dsc_settings.dsc_num_slices_h ||
917 		    amdgpu_dm_connector->dsc_settings.dsc_bits_per_pixel) {
918 			conn_state = drm_atomic_get_connector_state(state, connector);
919 			crtc_state = drm_atomic_get_crtc_state(state, crtc);
920 			crtc_state->mode_changed = true;
921 		}
922 	}
923 }
924 
amdgpu_dm_init(struct amdgpu_device * adev)925 static int amdgpu_dm_init(struct amdgpu_device *adev)
926 {
927 	struct dc_init_data init_data;
928 #ifdef CONFIG_DRM_AMD_DC_HDCP
929 	struct dc_callback_init init_params;
930 #endif
931 	int r;
932 
933 	adev->dm.ddev = adev_to_drm(adev);
934 	adev->dm.adev = adev;
935 
936 	/* Zero all the fields */
937 	memset(&init_data, 0, sizeof(init_data));
938 #ifdef CONFIG_DRM_AMD_DC_HDCP
939 	memset(&init_params, 0, sizeof(init_params));
940 #endif
941 
942 	mutex_init(&adev->dm.dc_lock);
943 	mutex_init(&adev->dm.audio_lock);
944 
945 	if(amdgpu_dm_irq_init(adev)) {
946 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
947 		goto error;
948 	}
949 
950 	init_data.asic_id.chip_family = adev->family;
951 
952 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
953 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
954 	init_data.asic_id.chip_id = adev->pdev->device;
955 
956 	init_data.asic_id.vram_width = adev->gmc.vram_width;
957 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
958 	init_data.asic_id.atombios_base_address =
959 		adev->mode_info.atom_context->bios;
960 
961 	init_data.driver = adev;
962 
963 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
964 
965 	if (!adev->dm.cgs_device) {
966 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
967 		goto error;
968 	}
969 
970 	init_data.cgs_device = adev->dm.cgs_device;
971 
972 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
973 
974 	switch (adev->asic_type) {
975 	case CHIP_CARRIZO:
976 	case CHIP_STONEY:
977 	case CHIP_RAVEN:
978 	case CHIP_RENOIR:
979 		init_data.flags.gpu_vm_support = true;
980 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
981 			init_data.flags.disable_dmcu = true;
982 		break;
983 	default:
984 		break;
985 	}
986 
987 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
988 		init_data.flags.fbc_support = true;
989 
990 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
991 		init_data.flags.multi_mon_pp_mclk_switch = true;
992 
993 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
994 		init_data.flags.disable_fractional_pwm = true;
995 
996 	init_data.flags.power_down_display_on_boot = true;
997 
998 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
999 
1000 	/* Display Core create. */
1001 	adev->dm.dc = dc_create(&init_data);
1002 
1003 	if (adev->dm.dc) {
1004 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1005 	} else {
1006 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1007 		goto error;
1008 	}
1009 
1010 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1011 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1012 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1013 	}
1014 
1015 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1016 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1017 
1018 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1019 		adev->dm.dc->debug.disable_stutter = true;
1020 
1021 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1022 		adev->dm.dc->debug.disable_dsc = true;
1023 
1024 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1025 		adev->dm.dc->debug.disable_clock_gate = true;
1026 
1027 	r = dm_dmub_hw_init(adev);
1028 	if (r) {
1029 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1030 		goto error;
1031 	}
1032 
1033 	dc_hardware_init(adev->dm.dc);
1034 
1035 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1036 	if (!adev->dm.freesync_module) {
1037 		DRM_ERROR(
1038 		"amdgpu: failed to initialize freesync_module.\n");
1039 	} else
1040 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1041 				adev->dm.freesync_module);
1042 
1043 	amdgpu_dm_init_color_mod();
1044 
1045 #ifdef CONFIG_DRM_AMD_DC_HDCP
1046 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1047 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1048 
1049 		if (!adev->dm.hdcp_workqueue)
1050 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1051 		else
1052 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1053 
1054 		dc_init_callbacks(adev->dm.dc, &init_params);
1055 	}
1056 #endif
1057 	if (amdgpu_dm_initialize_drm_device(adev)) {
1058 		DRM_ERROR(
1059 		"amdgpu: failed to initialize sw for display support.\n");
1060 		goto error;
1061 	}
1062 
1063 	/* create fake encoders for MST */
1064 	dm_dp_create_fake_mst_encoders(adev);
1065 
1066 	/* TODO: Add_display_info? */
1067 
1068 	/* TODO use dynamic cursor width */
1069 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1070 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1071 
1072 	/* Disable vblank IRQs aggressively for power-saving */
1073 	adev_to_drm(adev)->vblank_disable_immediate = true;
1074 
1075 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1076 		DRM_ERROR(
1077 		"amdgpu: failed to initialize sw for display support.\n");
1078 		goto error;
1079 	}
1080 
1081 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1082 
1083 	return 0;
1084 error:
1085 	amdgpu_dm_fini(adev);
1086 
1087 	return -EINVAL;
1088 }
1089 
amdgpu_dm_fini(struct amdgpu_device * adev)1090 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1091 {
1092 	int i;
1093 
1094 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1095 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1096 	}
1097 
1098 	amdgpu_dm_audio_fini(adev);
1099 
1100 	amdgpu_dm_destroy_drm_device(&adev->dm);
1101 
1102 #ifdef CONFIG_DRM_AMD_DC_HDCP
1103 	if (adev->dm.hdcp_workqueue) {
1104 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1105 		adev->dm.hdcp_workqueue = NULL;
1106 	}
1107 
1108 	if (adev->dm.dc)
1109 		dc_deinit_callbacks(adev->dm.dc);
1110 #endif
1111 	if (adev->dm.dc->ctx->dmub_srv) {
1112 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1113 		adev->dm.dc->ctx->dmub_srv = NULL;
1114 	}
1115 
1116 	if (adev->dm.dmub_bo)
1117 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1118 				      &adev->dm.dmub_bo_gpu_addr,
1119 				      &adev->dm.dmub_bo_cpu_addr);
1120 
1121 	/* DC Destroy TODO: Replace destroy DAL */
1122 	if (adev->dm.dc)
1123 		dc_destroy(&adev->dm.dc);
1124 	/*
1125 	 * TODO: pageflip, vlank interrupt
1126 	 *
1127 	 * amdgpu_dm_irq_fini(adev);
1128 	 */
1129 
1130 	if (adev->dm.cgs_device) {
1131 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1132 		adev->dm.cgs_device = NULL;
1133 	}
1134 	if (adev->dm.freesync_module) {
1135 		mod_freesync_destroy(adev->dm.freesync_module);
1136 		adev->dm.freesync_module = NULL;
1137 	}
1138 
1139 	mutex_destroy(&adev->dm.audio_lock);
1140 	mutex_destroy(&adev->dm.dc_lock);
1141 
1142 	return;
1143 }
1144 
load_dmcu_fw(struct amdgpu_device * adev)1145 static int load_dmcu_fw(struct amdgpu_device *adev)
1146 {
1147 	const char *fw_name_dmcu = NULL;
1148 	int r;
1149 	const struct dmcu_firmware_header_v1_0 *hdr;
1150 
1151 	switch(adev->asic_type) {
1152 #if defined(CONFIG_DRM_AMD_DC_SI)
1153 	case CHIP_TAHITI:
1154 	case CHIP_PITCAIRN:
1155 	case CHIP_VERDE:
1156 	case CHIP_OLAND:
1157 #endif
1158 	case CHIP_BONAIRE:
1159 	case CHIP_HAWAII:
1160 	case CHIP_KAVERI:
1161 	case CHIP_KABINI:
1162 	case CHIP_MULLINS:
1163 	case CHIP_TONGA:
1164 	case CHIP_FIJI:
1165 	case CHIP_CARRIZO:
1166 	case CHIP_STONEY:
1167 	case CHIP_POLARIS11:
1168 	case CHIP_POLARIS10:
1169 	case CHIP_POLARIS12:
1170 	case CHIP_VEGAM:
1171 	case CHIP_VEGA10:
1172 	case CHIP_VEGA12:
1173 	case CHIP_VEGA20:
1174 	case CHIP_NAVI10:
1175 	case CHIP_NAVI14:
1176 	case CHIP_RENOIR:
1177 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1178 	case CHIP_SIENNA_CICHLID:
1179 	case CHIP_NAVY_FLOUNDER:
1180 #endif
1181 		return 0;
1182 	case CHIP_NAVI12:
1183 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1184 		break;
1185 	case CHIP_RAVEN:
1186 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1187 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1188 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1189 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1190 		else
1191 			return 0;
1192 		break;
1193 	default:
1194 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1195 		return -EINVAL;
1196 	}
1197 
1198 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1199 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1200 		return 0;
1201 	}
1202 
1203 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1204 	if (r == -ENOENT) {
1205 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1206 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1207 		adev->dm.fw_dmcu = NULL;
1208 		return 0;
1209 	}
1210 	if (r) {
1211 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1212 			fw_name_dmcu);
1213 		return r;
1214 	}
1215 
1216 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1217 	if (r) {
1218 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1219 			fw_name_dmcu);
1220 		release_firmware(adev->dm.fw_dmcu);
1221 		adev->dm.fw_dmcu = NULL;
1222 		return r;
1223 	}
1224 
1225 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1226 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1227 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1228 	adev->firmware.fw_size +=
1229 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1230 
1231 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1232 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1233 	adev->firmware.fw_size +=
1234 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1235 
1236 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1237 
1238 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1239 
1240 	return 0;
1241 }
1242 
amdgpu_dm_dmub_reg_read(void * ctx,uint32_t address)1243 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1244 {
1245 	struct amdgpu_device *adev = ctx;
1246 
1247 	return dm_read_reg(adev->dm.dc->ctx, address);
1248 }
1249 
amdgpu_dm_dmub_reg_write(void * ctx,uint32_t address,uint32_t value)1250 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1251 				     uint32_t value)
1252 {
1253 	struct amdgpu_device *adev = ctx;
1254 
1255 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1256 }
1257 
dm_dmub_sw_init(struct amdgpu_device * adev)1258 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1259 {
1260 	struct dmub_srv_create_params create_params;
1261 	struct dmub_srv_region_params region_params;
1262 	struct dmub_srv_region_info region_info;
1263 	struct dmub_srv_fb_params fb_params;
1264 	struct dmub_srv_fb_info *fb_info;
1265 	struct dmub_srv *dmub_srv;
1266 	const struct dmcub_firmware_header_v1_0 *hdr;
1267 	const char *fw_name_dmub;
1268 	enum dmub_asic dmub_asic;
1269 	enum dmub_status status;
1270 	int r;
1271 
1272 	switch (adev->asic_type) {
1273 	case CHIP_RENOIR:
1274 		dmub_asic = DMUB_ASIC_DCN21;
1275 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1276 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1277 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1278 		break;
1279 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1280 	case CHIP_SIENNA_CICHLID:
1281 		dmub_asic = DMUB_ASIC_DCN30;
1282 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1283 		break;
1284 	case CHIP_NAVY_FLOUNDER:
1285 		dmub_asic = DMUB_ASIC_DCN30;
1286 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1287 		break;
1288 #endif
1289 
1290 	default:
1291 		/* ASIC doesn't support DMUB. */
1292 		return 0;
1293 	}
1294 
1295 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1296 	if (r) {
1297 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1298 		return 0;
1299 	}
1300 
1301 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1302 	if (r) {
1303 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1304 		return 0;
1305 	}
1306 
1307 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1308 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1309 
1310 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1311 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1312 			AMDGPU_UCODE_ID_DMCUB;
1313 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1314 			adev->dm.dmub_fw;
1315 		adev->firmware.fw_size +=
1316 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1317 
1318 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1319 			 adev->dm.dmcub_fw_version);
1320 	}
1321 
1322 
1323 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1324 	dmub_srv = adev->dm.dmub_srv;
1325 
1326 	if (!dmub_srv) {
1327 		DRM_ERROR("Failed to allocate DMUB service!\n");
1328 		return -ENOMEM;
1329 	}
1330 
1331 	memset(&create_params, 0, sizeof(create_params));
1332 	create_params.user_ctx = adev;
1333 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1334 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1335 	create_params.asic = dmub_asic;
1336 
1337 	/* Create the DMUB service. */
1338 	status = dmub_srv_create(dmub_srv, &create_params);
1339 	if (status != DMUB_STATUS_OK) {
1340 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1341 		return -EINVAL;
1342 	}
1343 
1344 	/* Calculate the size of all the regions for the DMUB service. */
1345 	memset(&region_params, 0, sizeof(region_params));
1346 
1347 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1348 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1349 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1350 	region_params.vbios_size = adev->bios_size;
1351 	region_params.fw_bss_data = region_params.bss_data_size ?
1352 		adev->dm.dmub_fw->data +
1353 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1354 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1355 	region_params.fw_inst_const =
1356 		adev->dm.dmub_fw->data +
1357 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1358 		PSP_HEADER_BYTES;
1359 
1360 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1361 					   &region_info);
1362 
1363 	if (status != DMUB_STATUS_OK) {
1364 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1365 		return -EINVAL;
1366 	}
1367 
1368 	/*
1369 	 * Allocate a framebuffer based on the total size of all the regions.
1370 	 * TODO: Move this into GART.
1371 	 */
1372 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1373 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1374 				    &adev->dm.dmub_bo_gpu_addr,
1375 				    &adev->dm.dmub_bo_cpu_addr);
1376 	if (r)
1377 		return r;
1378 
1379 	/* Rebase the regions on the framebuffer address. */
1380 	memset(&fb_params, 0, sizeof(fb_params));
1381 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1382 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1383 	fb_params.region_info = &region_info;
1384 
1385 	adev->dm.dmub_fb_info =
1386 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1387 	fb_info = adev->dm.dmub_fb_info;
1388 
1389 	if (!fb_info) {
1390 		DRM_ERROR(
1391 			"Failed to allocate framebuffer info for DMUB service!\n");
1392 		return -ENOMEM;
1393 	}
1394 
1395 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1396 	if (status != DMUB_STATUS_OK) {
1397 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1398 		return -EINVAL;
1399 	}
1400 
1401 	return 0;
1402 }
1403 
dm_sw_init(void * handle)1404 static int dm_sw_init(void *handle)
1405 {
1406 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1407 	int r;
1408 
1409 	r = dm_dmub_sw_init(adev);
1410 	if (r)
1411 		return r;
1412 
1413 	return load_dmcu_fw(adev);
1414 }
1415 
dm_sw_fini(void * handle)1416 static int dm_sw_fini(void *handle)
1417 {
1418 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1419 
1420 	kfree(adev->dm.dmub_fb_info);
1421 	adev->dm.dmub_fb_info = NULL;
1422 
1423 	if (adev->dm.dmub_srv) {
1424 		dmub_srv_destroy(adev->dm.dmub_srv);
1425 		adev->dm.dmub_srv = NULL;
1426 	}
1427 
1428 	release_firmware(adev->dm.dmub_fw);
1429 	adev->dm.dmub_fw = NULL;
1430 
1431 	release_firmware(adev->dm.fw_dmcu);
1432 	adev->dm.fw_dmcu = NULL;
1433 
1434 	return 0;
1435 }
1436 
detect_mst_link_for_all_connectors(struct drm_device * dev)1437 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1438 {
1439 	struct amdgpu_dm_connector *aconnector;
1440 	struct drm_connector *connector;
1441 	struct drm_connector_list_iter iter;
1442 	int ret = 0;
1443 
1444 	drm_connector_list_iter_begin(dev, &iter);
1445 	drm_for_each_connector_iter(connector, &iter) {
1446 		aconnector = to_amdgpu_dm_connector(connector);
1447 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1448 		    aconnector->mst_mgr.aux) {
1449 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1450 					 aconnector,
1451 					 aconnector->base.base.id);
1452 
1453 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1454 			if (ret < 0) {
1455 				DRM_ERROR("DM_MST: Failed to start MST\n");
1456 				aconnector->dc_link->type =
1457 					dc_connection_single;
1458 				break;
1459 			}
1460 		}
1461 	}
1462 	drm_connector_list_iter_end(&iter);
1463 
1464 	return ret;
1465 }
1466 
dm_late_init(void * handle)1467 static int dm_late_init(void *handle)
1468 {
1469 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1470 
1471 	struct dmcu_iram_parameters params;
1472 	unsigned int linear_lut[16];
1473 	int i;
1474 	struct dmcu *dmcu = NULL;
1475 	bool ret = true;
1476 
1477 	dmcu = adev->dm.dc->res_pool->dmcu;
1478 
1479 	for (i = 0; i < 16; i++)
1480 		linear_lut[i] = 0xFFFF * i / 15;
1481 
1482 	params.set = 0;
1483 	params.backlight_ramping_start = 0xCCCC;
1484 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1485 	params.backlight_lut_array_size = 16;
1486 	params.backlight_lut_array = linear_lut;
1487 
1488 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1489 	 * 0xFFFF x 0.01 = 0x28F
1490 	 */
1491 	params.min_abm_backlight = 0x28F;
1492 
1493 	/* In the case where abm is implemented on dmcub,
1494 	 * dmcu object will be null.
1495 	 * ABM 2.4 and up are implemented on dmcub.
1496 	 */
1497 	if (dmcu)
1498 		ret = dmcu_load_iram(dmcu, params);
1499 	else if (adev->dm.dc->ctx->dmub_srv)
1500 		ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1501 
1502 	if (!ret)
1503 		return -EINVAL;
1504 
1505 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1506 }
1507 
s3_handle_mst(struct drm_device * dev,bool suspend)1508 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1509 {
1510 	struct amdgpu_dm_connector *aconnector;
1511 	struct drm_connector *connector;
1512 	struct drm_connector_list_iter iter;
1513 	struct drm_dp_mst_topology_mgr *mgr;
1514 	int ret;
1515 	bool need_hotplug = false;
1516 
1517 	drm_connector_list_iter_begin(dev, &iter);
1518 	drm_for_each_connector_iter(connector, &iter) {
1519 		aconnector = to_amdgpu_dm_connector(connector);
1520 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1521 		    aconnector->mst_port)
1522 			continue;
1523 
1524 		mgr = &aconnector->mst_mgr;
1525 
1526 		if (suspend) {
1527 			drm_dp_mst_topology_mgr_suspend(mgr);
1528 		} else {
1529 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1530 			if (ret < 0) {
1531 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1532 				need_hotplug = true;
1533 			}
1534 		}
1535 	}
1536 	drm_connector_list_iter_end(&iter);
1537 
1538 	if (need_hotplug)
1539 		drm_kms_helper_hotplug_event(dev);
1540 }
1541 
amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device * adev)1542 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1543 {
1544 	struct smu_context *smu = &adev->smu;
1545 	int ret = 0;
1546 
1547 	if (!is_support_sw_smu(adev))
1548 		return 0;
1549 
1550 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1551 	 * on window driver dc implementation.
1552 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1553 	 * should be passed to smu during boot up and resume from s3.
1554 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1555 	 * dcn20_resource_construct
1556 	 * then call pplib functions below to pass the settings to smu:
1557 	 * smu_set_watermarks_for_clock_ranges
1558 	 * smu_set_watermarks_table
1559 	 * navi10_set_watermarks_table
1560 	 * smu_write_watermarks_table
1561 	 *
1562 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1563 	 * dc has implemented different flow for window driver:
1564 	 * dc_hardware_init / dc_set_power_state
1565 	 * dcn10_init_hw
1566 	 * notify_wm_ranges
1567 	 * set_wm_ranges
1568 	 * -- Linux
1569 	 * smu_set_watermarks_for_clock_ranges
1570 	 * renoir_set_watermarks_table
1571 	 * smu_write_watermarks_table
1572 	 *
1573 	 * For Linux,
1574 	 * dc_hardware_init -> amdgpu_dm_init
1575 	 * dc_set_power_state --> dm_resume
1576 	 *
1577 	 * therefore, this function apply to navi10/12/14 but not Renoir
1578 	 * *
1579 	 */
1580 	switch(adev->asic_type) {
1581 	case CHIP_NAVI10:
1582 	case CHIP_NAVI14:
1583 	case CHIP_NAVI12:
1584 		break;
1585 	default:
1586 		return 0;
1587 	}
1588 
1589 	ret = smu_write_watermarks_table(smu);
1590 	if (ret) {
1591 		DRM_ERROR("Failed to update WMTABLE!\n");
1592 		return ret;
1593 	}
1594 
1595 	return 0;
1596 }
1597 
1598 /**
1599  * dm_hw_init() - Initialize DC device
1600  * @handle: The base driver device containing the amdgpu_dm device.
1601  *
1602  * Initialize the &struct amdgpu_display_manager device. This involves calling
1603  * the initializers of each DM component, then populating the struct with them.
1604  *
1605  * Although the function implies hardware initialization, both hardware and
1606  * software are initialized here. Splitting them out to their relevant init
1607  * hooks is a future TODO item.
1608  *
1609  * Some notable things that are initialized here:
1610  *
1611  * - Display Core, both software and hardware
1612  * - DC modules that we need (freesync and color management)
1613  * - DRM software states
1614  * - Interrupt sources and handlers
1615  * - Vblank support
1616  * - Debug FS entries, if enabled
1617  */
dm_hw_init(void * handle)1618 static int dm_hw_init(void *handle)
1619 {
1620 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1621 	/* Create DAL display manager */
1622 	amdgpu_dm_init(adev);
1623 	amdgpu_dm_hpd_init(adev);
1624 
1625 	return 0;
1626 }
1627 
1628 /**
1629  * dm_hw_fini() - Teardown DC device
1630  * @handle: The base driver device containing the amdgpu_dm device.
1631  *
1632  * Teardown components within &struct amdgpu_display_manager that require
1633  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1634  * were loaded. Also flush IRQ workqueues and disable them.
1635  */
dm_hw_fini(void * handle)1636 static int dm_hw_fini(void *handle)
1637 {
1638 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1639 
1640 	amdgpu_dm_hpd_fini(adev);
1641 
1642 	amdgpu_dm_irq_fini(adev);
1643 	amdgpu_dm_fini(adev);
1644 	return 0;
1645 }
1646 
1647 
1648 static int dm_enable_vblank(struct drm_crtc *crtc);
1649 static void dm_disable_vblank(struct drm_crtc *crtc);
1650 
dm_gpureset_toggle_interrupts(struct amdgpu_device * adev,struct dc_state * state,bool enable)1651 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1652 				 struct dc_state *state, bool enable)
1653 {
1654 	enum dc_irq_source irq_source;
1655 	struct amdgpu_crtc *acrtc;
1656 	int rc = -EBUSY;
1657 	int i = 0;
1658 
1659 	for (i = 0; i < state->stream_count; i++) {
1660 		acrtc = get_crtc_by_otg_inst(
1661 				adev, state->stream_status[i].primary_otg_inst);
1662 
1663 		if (acrtc && state->stream_status[i].plane_count != 0) {
1664 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1665 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1666 			DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1667 				  acrtc->crtc_id, enable ? "en" : "dis", rc);
1668 			if (rc)
1669 				DRM_WARN("Failed to %s pflip interrupts\n",
1670 					 enable ? "enable" : "disable");
1671 
1672 			if (enable) {
1673 				rc = dm_enable_vblank(&acrtc->base);
1674 				if (rc)
1675 					DRM_WARN("Failed to enable vblank interrupts\n");
1676 			} else {
1677 				dm_disable_vblank(&acrtc->base);
1678 			}
1679 
1680 		}
1681 	}
1682 
1683 }
1684 
amdgpu_dm_commit_zero_streams(struct dc * dc)1685 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1686 {
1687 	struct dc_state *context = NULL;
1688 	enum dc_status res = DC_ERROR_UNEXPECTED;
1689 	int i;
1690 	struct dc_stream_state *del_streams[MAX_PIPES];
1691 	int del_streams_count = 0;
1692 
1693 	memset(del_streams, 0, sizeof(del_streams));
1694 
1695 	context = dc_create_state(dc);
1696 	if (context == NULL)
1697 		goto context_alloc_fail;
1698 
1699 	dc_resource_state_copy_construct_current(dc, context);
1700 
1701 	/* First remove from context all streams */
1702 	for (i = 0; i < context->stream_count; i++) {
1703 		struct dc_stream_state *stream = context->streams[i];
1704 
1705 		del_streams[del_streams_count++] = stream;
1706 	}
1707 
1708 	/* Remove all planes for removed streams and then remove the streams */
1709 	for (i = 0; i < del_streams_count; i++) {
1710 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1711 			res = DC_FAIL_DETACH_SURFACES;
1712 			goto fail;
1713 		}
1714 
1715 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1716 		if (res != DC_OK)
1717 			goto fail;
1718 	}
1719 
1720 
1721 	res = dc_validate_global_state(dc, context, false);
1722 
1723 	if (res != DC_OK) {
1724 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1725 		goto fail;
1726 	}
1727 
1728 	res = dc_commit_state(dc, context);
1729 
1730 fail:
1731 	dc_release_state(context);
1732 
1733 context_alloc_fail:
1734 	return res;
1735 }
1736 
dm_suspend(void * handle)1737 static int dm_suspend(void *handle)
1738 {
1739 	struct amdgpu_device *adev = handle;
1740 	struct amdgpu_display_manager *dm = &adev->dm;
1741 	int ret = 0;
1742 
1743 	if (amdgpu_in_reset(adev)) {
1744 		mutex_lock(&dm->dc_lock);
1745 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1746 
1747 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1748 
1749 		amdgpu_dm_commit_zero_streams(dm->dc);
1750 
1751 		amdgpu_dm_irq_suspend(adev);
1752 
1753 		return ret;
1754 	}
1755 
1756 	WARN_ON(adev->dm.cached_state);
1757 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1758 
1759 	s3_handle_mst(adev_to_drm(adev), true);
1760 
1761 	amdgpu_dm_irq_suspend(adev);
1762 
1763 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1764 
1765 	return 0;
1766 }
1767 
1768 static struct amdgpu_dm_connector *
amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state * state,struct drm_crtc * crtc)1769 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1770 					     struct drm_crtc *crtc)
1771 {
1772 	uint32_t i;
1773 	struct drm_connector_state *new_con_state;
1774 	struct drm_connector *connector;
1775 	struct drm_crtc *crtc_from_state;
1776 
1777 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1778 		crtc_from_state = new_con_state->crtc;
1779 
1780 		if (crtc_from_state == crtc)
1781 			return to_amdgpu_dm_connector(connector);
1782 	}
1783 
1784 	return NULL;
1785 }
1786 
emulated_link_detect(struct dc_link * link)1787 static void emulated_link_detect(struct dc_link *link)
1788 {
1789 	struct dc_sink_init_data sink_init_data = { 0 };
1790 	struct display_sink_capability sink_caps = { 0 };
1791 	enum dc_edid_status edid_status;
1792 	struct dc_context *dc_ctx = link->ctx;
1793 	struct dc_sink *sink = NULL;
1794 	struct dc_sink *prev_sink = NULL;
1795 
1796 	link->type = dc_connection_none;
1797 	prev_sink = link->local_sink;
1798 
1799 	if (prev_sink)
1800 		dc_sink_release(prev_sink);
1801 
1802 	switch (link->connector_signal) {
1803 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1804 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1805 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1806 		break;
1807 	}
1808 
1809 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1810 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1811 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1812 		break;
1813 	}
1814 
1815 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1816 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1817 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1818 		break;
1819 	}
1820 
1821 	case SIGNAL_TYPE_LVDS: {
1822 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1823 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1824 		break;
1825 	}
1826 
1827 	case SIGNAL_TYPE_EDP: {
1828 		sink_caps.transaction_type =
1829 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1830 		sink_caps.signal = SIGNAL_TYPE_EDP;
1831 		break;
1832 	}
1833 
1834 	case SIGNAL_TYPE_DISPLAY_PORT: {
1835 		sink_caps.transaction_type =
1836 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1837 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1838 		break;
1839 	}
1840 
1841 	default:
1842 		DC_ERROR("Invalid connector type! signal:%d\n",
1843 			link->connector_signal);
1844 		return;
1845 	}
1846 
1847 	sink_init_data.link = link;
1848 	sink_init_data.sink_signal = sink_caps.signal;
1849 
1850 	sink = dc_sink_create(&sink_init_data);
1851 	if (!sink) {
1852 		DC_ERROR("Failed to create sink!\n");
1853 		return;
1854 	}
1855 
1856 	/* dc_sink_create returns a new reference */
1857 	link->local_sink = sink;
1858 
1859 	edid_status = dm_helpers_read_local_edid(
1860 			link->ctx,
1861 			link,
1862 			sink);
1863 
1864 	if (edid_status != EDID_OK)
1865 		DC_ERROR("Failed to read EDID");
1866 
1867 }
1868 
dm_gpureset_commit_state(struct dc_state * dc_state,struct amdgpu_display_manager * dm)1869 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1870 				     struct amdgpu_display_manager *dm)
1871 {
1872 	struct {
1873 		struct dc_surface_update surface_updates[MAX_SURFACES];
1874 		struct dc_plane_info plane_infos[MAX_SURFACES];
1875 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
1876 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1877 		struct dc_stream_update stream_update;
1878 	} * bundle;
1879 	int k, m;
1880 
1881 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1882 
1883 	if (!bundle) {
1884 		dm_error("Failed to allocate update bundle\n");
1885 		goto cleanup;
1886 	}
1887 
1888 	for (k = 0; k < dc_state->stream_count; k++) {
1889 		bundle->stream_update.stream = dc_state->streams[k];
1890 
1891 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1892 			bundle->surface_updates[m].surface =
1893 				dc_state->stream_status->plane_states[m];
1894 			bundle->surface_updates[m].surface->force_full_update =
1895 				true;
1896 		}
1897 		dc_commit_updates_for_stream(
1898 			dm->dc, bundle->surface_updates,
1899 			dc_state->stream_status->plane_count,
1900 			dc_state->streams[k], &bundle->stream_update, dc_state);
1901 	}
1902 
1903 cleanup:
1904 	kfree(bundle);
1905 
1906 	return;
1907 }
1908 
dm_set_dpms_off(struct dc_link * link)1909 static void dm_set_dpms_off(struct dc_link *link)
1910 {
1911 	struct dc_stream_state *stream_state;
1912 	struct amdgpu_dm_connector *aconnector = link->priv;
1913 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
1914 	struct dc_stream_update stream_update;
1915 	bool dpms_off = true;
1916 
1917 	memset(&stream_update, 0, sizeof(stream_update));
1918 	stream_update.dpms_off = &dpms_off;
1919 
1920 	mutex_lock(&adev->dm.dc_lock);
1921 	stream_state = dc_stream_find_from_link(link);
1922 
1923 	if (stream_state == NULL) {
1924 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
1925 		mutex_unlock(&adev->dm.dc_lock);
1926 		return;
1927 	}
1928 
1929 	stream_update.stream = stream_state;
1930 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
1931 				     stream_state, &stream_update,
1932 				     stream_state->ctx->dc->current_state);
1933 	mutex_unlock(&adev->dm.dc_lock);
1934 }
1935 
dm_resume(void * handle)1936 static int dm_resume(void *handle)
1937 {
1938 	struct amdgpu_device *adev = handle;
1939 	struct drm_device *ddev = adev_to_drm(adev);
1940 	struct amdgpu_display_manager *dm = &adev->dm;
1941 	struct amdgpu_dm_connector *aconnector;
1942 	struct drm_connector *connector;
1943 	struct drm_connector_list_iter iter;
1944 	struct drm_crtc *crtc;
1945 	struct drm_crtc_state *new_crtc_state;
1946 	struct dm_crtc_state *dm_new_crtc_state;
1947 	struct drm_plane *plane;
1948 	struct drm_plane_state *new_plane_state;
1949 	struct dm_plane_state *dm_new_plane_state;
1950 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1951 	enum dc_connection_type new_connection_type = dc_connection_none;
1952 	struct dc_state *dc_state;
1953 	int i, r, j;
1954 
1955 	if (amdgpu_in_reset(adev)) {
1956 		dc_state = dm->cached_dc_state;
1957 
1958 		r = dm_dmub_hw_init(adev);
1959 		if (r)
1960 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1961 
1962 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1963 		dc_resume(dm->dc);
1964 
1965 		amdgpu_dm_irq_resume_early(adev);
1966 
1967 		for (i = 0; i < dc_state->stream_count; i++) {
1968 			dc_state->streams[i]->mode_changed = true;
1969 			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
1970 				dc_state->stream_status[i].plane_states[j]->update_flags.raw
1971 					= 0xffffffff;
1972 			}
1973 		}
1974 
1975 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
1976 
1977 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
1978 
1979 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1980 
1981 		dc_release_state(dm->cached_dc_state);
1982 		dm->cached_dc_state = NULL;
1983 
1984 		amdgpu_dm_irq_resume_late(adev);
1985 
1986 		mutex_unlock(&dm->dc_lock);
1987 
1988 		return 0;
1989 	}
1990 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
1991 	dc_release_state(dm_state->context);
1992 	dm_state->context = dc_create_state(dm->dc);
1993 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1994 	dc_resource_state_construct(dm->dc, dm_state->context);
1995 
1996 	/* Before powering on DC we need to re-initialize DMUB. */
1997 	r = dm_dmub_hw_init(adev);
1998 	if (r)
1999 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2000 
2001 	/* power on hardware */
2002 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2003 
2004 	/* program HPD filter */
2005 	dc_resume(dm->dc);
2006 
2007 	/*
2008 	 * early enable HPD Rx IRQ, should be done before set mode as short
2009 	 * pulse interrupts are used for MST
2010 	 */
2011 	amdgpu_dm_irq_resume_early(adev);
2012 
2013 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2014 	s3_handle_mst(ddev, false);
2015 
2016 	/* Do detection*/
2017 	drm_connector_list_iter_begin(ddev, &iter);
2018 	drm_for_each_connector_iter(connector, &iter) {
2019 		aconnector = to_amdgpu_dm_connector(connector);
2020 
2021 		/*
2022 		 * this is the case when traversing through already created
2023 		 * MST connectors, should be skipped
2024 		 */
2025 		if (aconnector->mst_port)
2026 			continue;
2027 
2028 		mutex_lock(&aconnector->hpd_lock);
2029 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2030 			DRM_ERROR("KMS: Failed to detect connector\n");
2031 
2032 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2033 			emulated_link_detect(aconnector->dc_link);
2034 		else
2035 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2036 
2037 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2038 			aconnector->fake_enable = false;
2039 
2040 		if (aconnector->dc_sink)
2041 			dc_sink_release(aconnector->dc_sink);
2042 		aconnector->dc_sink = NULL;
2043 		amdgpu_dm_update_connector_after_detect(aconnector);
2044 		mutex_unlock(&aconnector->hpd_lock);
2045 	}
2046 	drm_connector_list_iter_end(&iter);
2047 
2048 	/* Force mode set in atomic commit */
2049 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2050 		new_crtc_state->active_changed = true;
2051 
2052 	/*
2053 	 * atomic_check is expected to create the dc states. We need to release
2054 	 * them here, since they were duplicated as part of the suspend
2055 	 * procedure.
2056 	 */
2057 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2058 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2059 		if (dm_new_crtc_state->stream) {
2060 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2061 			dc_stream_release(dm_new_crtc_state->stream);
2062 			dm_new_crtc_state->stream = NULL;
2063 		}
2064 	}
2065 
2066 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2067 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2068 		if (dm_new_plane_state->dc_state) {
2069 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2070 			dc_plane_state_release(dm_new_plane_state->dc_state);
2071 			dm_new_plane_state->dc_state = NULL;
2072 		}
2073 	}
2074 
2075 	drm_atomic_helper_resume(ddev, dm->cached_state);
2076 
2077 	dm->cached_state = NULL;
2078 
2079 	amdgpu_dm_irq_resume_late(adev);
2080 
2081 	amdgpu_dm_smu_write_watermarks_table(adev);
2082 
2083 	return 0;
2084 }
2085 
2086 /**
2087  * DOC: DM Lifecycle
2088  *
2089  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2090  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2091  * the base driver's device list to be initialized and torn down accordingly.
2092  *
2093  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2094  */
2095 
2096 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2097 	.name = "dm",
2098 	.early_init = dm_early_init,
2099 	.late_init = dm_late_init,
2100 	.sw_init = dm_sw_init,
2101 	.sw_fini = dm_sw_fini,
2102 	.hw_init = dm_hw_init,
2103 	.hw_fini = dm_hw_fini,
2104 	.suspend = dm_suspend,
2105 	.resume = dm_resume,
2106 	.is_idle = dm_is_idle,
2107 	.wait_for_idle = dm_wait_for_idle,
2108 	.check_soft_reset = dm_check_soft_reset,
2109 	.soft_reset = dm_soft_reset,
2110 	.set_clockgating_state = dm_set_clockgating_state,
2111 	.set_powergating_state = dm_set_powergating_state,
2112 };
2113 
2114 const struct amdgpu_ip_block_version dm_ip_block =
2115 {
2116 	.type = AMD_IP_BLOCK_TYPE_DCE,
2117 	.major = 1,
2118 	.minor = 0,
2119 	.rev = 0,
2120 	.funcs = &amdgpu_dm_funcs,
2121 };
2122 
2123 
2124 /**
2125  * DOC: atomic
2126  *
2127  * *WIP*
2128  */
2129 
2130 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2131 	.fb_create = amdgpu_display_user_framebuffer_create,
2132 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2133 	.atomic_check = amdgpu_dm_atomic_check,
2134 	.atomic_commit = amdgpu_dm_atomic_commit,
2135 };
2136 
2137 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2138 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2139 };
2140 
update_connector_ext_caps(struct amdgpu_dm_connector * aconnector)2141 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2142 {
2143 	u32 max_cll, min_cll, max, min, q, r;
2144 	struct amdgpu_dm_backlight_caps *caps;
2145 	struct amdgpu_display_manager *dm;
2146 	struct drm_connector *conn_base;
2147 	struct amdgpu_device *adev;
2148 	struct dc_link *link = NULL;
2149 	static const u8 pre_computed_values[] = {
2150 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2151 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2152 
2153 	if (!aconnector || !aconnector->dc_link)
2154 		return;
2155 
2156 	link = aconnector->dc_link;
2157 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2158 		return;
2159 
2160 	conn_base = &aconnector->base;
2161 	adev = drm_to_adev(conn_base->dev);
2162 	dm = &adev->dm;
2163 	caps = &dm->backlight_caps;
2164 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2165 	caps->aux_support = false;
2166 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2167 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2168 
2169 	if (caps->ext_caps->bits.oled == 1 /*||
2170 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2171 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2172 		caps->aux_support = true;
2173 
2174 	if (amdgpu_backlight == 0)
2175 		caps->aux_support = false;
2176 	else if (amdgpu_backlight == 1)
2177 		caps->aux_support = true;
2178 
2179 	/* From the specification (CTA-861-G), for calculating the maximum
2180 	 * luminance we need to use:
2181 	 *	Luminance = 50*2**(CV/32)
2182 	 * Where CV is a one-byte value.
2183 	 * For calculating this expression we may need float point precision;
2184 	 * to avoid this complexity level, we take advantage that CV is divided
2185 	 * by a constant. From the Euclids division algorithm, we know that CV
2186 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2187 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2188 	 * need to pre-compute the value of r/32. For pre-computing the values
2189 	 * We just used the following Ruby line:
2190 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2191 	 * The results of the above expressions can be verified at
2192 	 * pre_computed_values.
2193 	 */
2194 	q = max_cll >> 5;
2195 	r = max_cll % 32;
2196 	max = (1 << q) * pre_computed_values[r];
2197 
2198 	// min luminance: maxLum * (CV/255)^2 / 100
2199 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2200 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2201 
2202 	caps->aux_max_input_signal = max;
2203 	caps->aux_min_input_signal = min;
2204 }
2205 
amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector * aconnector)2206 void amdgpu_dm_update_connector_after_detect(
2207 		struct amdgpu_dm_connector *aconnector)
2208 {
2209 	struct drm_connector *connector = &aconnector->base;
2210 	struct drm_device *dev = connector->dev;
2211 	struct dc_sink *sink;
2212 
2213 	/* MST handled by drm_mst framework */
2214 	if (aconnector->mst_mgr.mst_state == true)
2215 		return;
2216 
2217 	sink = aconnector->dc_link->local_sink;
2218 	if (sink)
2219 		dc_sink_retain(sink);
2220 
2221 	/*
2222 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2223 	 * the connector sink is set to either fake or physical sink depends on link status.
2224 	 * Skip if already done during boot.
2225 	 */
2226 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2227 			&& aconnector->dc_em_sink) {
2228 
2229 		/*
2230 		 * For S3 resume with headless use eml_sink to fake stream
2231 		 * because on resume connector->sink is set to NULL
2232 		 */
2233 		mutex_lock(&dev->mode_config.mutex);
2234 
2235 		if (sink) {
2236 			if (aconnector->dc_sink) {
2237 				amdgpu_dm_update_freesync_caps(connector, NULL);
2238 				/*
2239 				 * retain and release below are used to
2240 				 * bump up refcount for sink because the link doesn't point
2241 				 * to it anymore after disconnect, so on next crtc to connector
2242 				 * reshuffle by UMD we will get into unwanted dc_sink release
2243 				 */
2244 				dc_sink_release(aconnector->dc_sink);
2245 			}
2246 			aconnector->dc_sink = sink;
2247 			dc_sink_retain(aconnector->dc_sink);
2248 			amdgpu_dm_update_freesync_caps(connector,
2249 					aconnector->edid);
2250 		} else {
2251 			amdgpu_dm_update_freesync_caps(connector, NULL);
2252 			if (!aconnector->dc_sink) {
2253 				aconnector->dc_sink = aconnector->dc_em_sink;
2254 				dc_sink_retain(aconnector->dc_sink);
2255 			}
2256 		}
2257 
2258 		mutex_unlock(&dev->mode_config.mutex);
2259 
2260 		if (sink)
2261 			dc_sink_release(sink);
2262 		return;
2263 	}
2264 
2265 	/*
2266 	 * TODO: temporary guard to look for proper fix
2267 	 * if this sink is MST sink, we should not do anything
2268 	 */
2269 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2270 		dc_sink_release(sink);
2271 		return;
2272 	}
2273 
2274 	if (aconnector->dc_sink == sink) {
2275 		/*
2276 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2277 		 * Do nothing!!
2278 		 */
2279 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2280 				aconnector->connector_id);
2281 		if (sink)
2282 			dc_sink_release(sink);
2283 		return;
2284 	}
2285 
2286 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2287 		aconnector->connector_id, aconnector->dc_sink, sink);
2288 
2289 	mutex_lock(&dev->mode_config.mutex);
2290 
2291 	/*
2292 	 * 1. Update status of the drm connector
2293 	 * 2. Send an event and let userspace tell us what to do
2294 	 */
2295 	if (sink) {
2296 		/*
2297 		 * TODO: check if we still need the S3 mode update workaround.
2298 		 * If yes, put it here.
2299 		 */
2300 		if (aconnector->dc_sink) {
2301 			amdgpu_dm_update_freesync_caps(connector, NULL);
2302 			dc_sink_release(aconnector->dc_sink);
2303 		}
2304 
2305 		aconnector->dc_sink = sink;
2306 		dc_sink_retain(aconnector->dc_sink);
2307 		if (sink->dc_edid.length == 0) {
2308 			aconnector->edid = NULL;
2309 			if (aconnector->dc_link->aux_mode) {
2310 				drm_dp_cec_unset_edid(
2311 					&aconnector->dm_dp_aux.aux);
2312 			}
2313 		} else {
2314 			aconnector->edid =
2315 				(struct edid *)sink->dc_edid.raw_edid;
2316 
2317 			drm_connector_update_edid_property(connector,
2318 							   aconnector->edid);
2319 			if (aconnector->dc_link->aux_mode)
2320 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2321 						    aconnector->edid);
2322 		}
2323 
2324 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2325 		update_connector_ext_caps(aconnector);
2326 	} else {
2327 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2328 		amdgpu_dm_update_freesync_caps(connector, NULL);
2329 		drm_connector_update_edid_property(connector, NULL);
2330 		aconnector->num_modes = 0;
2331 		dc_sink_release(aconnector->dc_sink);
2332 		aconnector->dc_sink = NULL;
2333 		aconnector->edid = NULL;
2334 #ifdef CONFIG_DRM_AMD_DC_HDCP
2335 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2336 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2337 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2338 #endif
2339 	}
2340 
2341 	mutex_unlock(&dev->mode_config.mutex);
2342 
2343 	update_subconnector_property(aconnector);
2344 
2345 	if (sink)
2346 		dc_sink_release(sink);
2347 }
2348 
handle_hpd_irq(void * param)2349 static void handle_hpd_irq(void *param)
2350 {
2351 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2352 	struct drm_connector *connector = &aconnector->base;
2353 	struct drm_device *dev = connector->dev;
2354 	enum dc_connection_type new_connection_type = dc_connection_none;
2355 #ifdef CONFIG_DRM_AMD_DC_HDCP
2356 	struct amdgpu_device *adev = drm_to_adev(dev);
2357 #endif
2358 
2359 	/*
2360 	 * In case of failure or MST no need to update connector status or notify the OS
2361 	 * since (for MST case) MST does this in its own context.
2362 	 */
2363 	mutex_lock(&aconnector->hpd_lock);
2364 
2365 #ifdef CONFIG_DRM_AMD_DC_HDCP
2366 	if (adev->dm.hdcp_workqueue)
2367 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2368 #endif
2369 	if (aconnector->fake_enable)
2370 		aconnector->fake_enable = false;
2371 
2372 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2373 		DRM_ERROR("KMS: Failed to detect connector\n");
2374 
2375 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2376 		emulated_link_detect(aconnector->dc_link);
2377 
2378 
2379 		drm_modeset_lock_all(dev);
2380 		dm_restore_drm_connector_state(dev, connector);
2381 		drm_modeset_unlock_all(dev);
2382 
2383 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2384 			drm_kms_helper_hotplug_event(dev);
2385 
2386 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2387 		if (new_connection_type == dc_connection_none &&
2388 		    aconnector->dc_link->type == dc_connection_none)
2389 			dm_set_dpms_off(aconnector->dc_link);
2390 
2391 		amdgpu_dm_update_connector_after_detect(aconnector);
2392 
2393 		drm_modeset_lock_all(dev);
2394 		dm_restore_drm_connector_state(dev, connector);
2395 		drm_modeset_unlock_all(dev);
2396 
2397 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2398 			drm_kms_helper_hotplug_event(dev);
2399 	}
2400 	mutex_unlock(&aconnector->hpd_lock);
2401 
2402 }
2403 
dm_handle_hpd_rx_irq(struct amdgpu_dm_connector * aconnector)2404 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2405 {
2406 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2407 	uint8_t dret;
2408 	bool new_irq_handled = false;
2409 	int dpcd_addr;
2410 	int dpcd_bytes_to_read;
2411 
2412 	const int max_process_count = 30;
2413 	int process_count = 0;
2414 
2415 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2416 
2417 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2418 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2419 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2420 		dpcd_addr = DP_SINK_COUNT;
2421 	} else {
2422 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2423 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2424 		dpcd_addr = DP_SINK_COUNT_ESI;
2425 	}
2426 
2427 	dret = drm_dp_dpcd_read(
2428 		&aconnector->dm_dp_aux.aux,
2429 		dpcd_addr,
2430 		esi,
2431 		dpcd_bytes_to_read);
2432 
2433 	while (dret == dpcd_bytes_to_read &&
2434 		process_count < max_process_count) {
2435 		uint8_t retry;
2436 		dret = 0;
2437 
2438 		process_count++;
2439 
2440 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2441 		/* handle HPD short pulse irq */
2442 		if (aconnector->mst_mgr.mst_state)
2443 			drm_dp_mst_hpd_irq(
2444 				&aconnector->mst_mgr,
2445 				esi,
2446 				&new_irq_handled);
2447 
2448 		if (new_irq_handled) {
2449 			/* ACK at DPCD to notify down stream */
2450 			const int ack_dpcd_bytes_to_write =
2451 				dpcd_bytes_to_read - 1;
2452 
2453 			for (retry = 0; retry < 3; retry++) {
2454 				uint8_t wret;
2455 
2456 				wret = drm_dp_dpcd_write(
2457 					&aconnector->dm_dp_aux.aux,
2458 					dpcd_addr + 1,
2459 					&esi[1],
2460 					ack_dpcd_bytes_to_write);
2461 				if (wret == ack_dpcd_bytes_to_write)
2462 					break;
2463 			}
2464 
2465 			/* check if there is new irq to be handled */
2466 			dret = drm_dp_dpcd_read(
2467 				&aconnector->dm_dp_aux.aux,
2468 				dpcd_addr,
2469 				esi,
2470 				dpcd_bytes_to_read);
2471 
2472 			new_irq_handled = false;
2473 		} else {
2474 			break;
2475 		}
2476 	}
2477 
2478 	if (process_count == max_process_count)
2479 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2480 }
2481 
handle_hpd_rx_irq(void * param)2482 static void handle_hpd_rx_irq(void *param)
2483 {
2484 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2485 	struct drm_connector *connector = &aconnector->base;
2486 	struct drm_device *dev = connector->dev;
2487 	struct dc_link *dc_link = aconnector->dc_link;
2488 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2489 	enum dc_connection_type new_connection_type = dc_connection_none;
2490 #ifdef CONFIG_DRM_AMD_DC_HDCP
2491 	union hpd_irq_data hpd_irq_data;
2492 	struct amdgpu_device *adev = drm_to_adev(dev);
2493 
2494 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2495 #endif
2496 
2497 	/*
2498 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2499 	 * conflict, after implement i2c helper, this mutex should be
2500 	 * retired.
2501 	 */
2502 	if (dc_link->type != dc_connection_mst_branch)
2503 		mutex_lock(&aconnector->hpd_lock);
2504 
2505 
2506 #ifdef CONFIG_DRM_AMD_DC_HDCP
2507 	if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2508 #else
2509 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2510 #endif
2511 			!is_mst_root_connector) {
2512 		/* Downstream Port status changed. */
2513 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2514 			DRM_ERROR("KMS: Failed to detect connector\n");
2515 
2516 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2517 			emulated_link_detect(dc_link);
2518 
2519 			if (aconnector->fake_enable)
2520 				aconnector->fake_enable = false;
2521 
2522 			amdgpu_dm_update_connector_after_detect(aconnector);
2523 
2524 
2525 			drm_modeset_lock_all(dev);
2526 			dm_restore_drm_connector_state(dev, connector);
2527 			drm_modeset_unlock_all(dev);
2528 
2529 			drm_kms_helper_hotplug_event(dev);
2530 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2531 
2532 			if (aconnector->fake_enable)
2533 				aconnector->fake_enable = false;
2534 
2535 			amdgpu_dm_update_connector_after_detect(aconnector);
2536 
2537 
2538 			drm_modeset_lock_all(dev);
2539 			dm_restore_drm_connector_state(dev, connector);
2540 			drm_modeset_unlock_all(dev);
2541 
2542 			drm_kms_helper_hotplug_event(dev);
2543 		}
2544 	}
2545 #ifdef CONFIG_DRM_AMD_DC_HDCP
2546 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2547 		if (adev->dm.hdcp_workqueue)
2548 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2549 	}
2550 #endif
2551 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2552 	    (dc_link->type == dc_connection_mst_branch))
2553 		dm_handle_hpd_rx_irq(aconnector);
2554 
2555 	if (dc_link->type != dc_connection_mst_branch) {
2556 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2557 		mutex_unlock(&aconnector->hpd_lock);
2558 	}
2559 }
2560 
register_hpd_handlers(struct amdgpu_device * adev)2561 static void register_hpd_handlers(struct amdgpu_device *adev)
2562 {
2563 	struct drm_device *dev = adev_to_drm(adev);
2564 	struct drm_connector *connector;
2565 	struct amdgpu_dm_connector *aconnector;
2566 	const struct dc_link *dc_link;
2567 	struct dc_interrupt_params int_params = {0};
2568 
2569 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2570 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2571 
2572 	list_for_each_entry(connector,
2573 			&dev->mode_config.connector_list, head)	{
2574 
2575 		aconnector = to_amdgpu_dm_connector(connector);
2576 		dc_link = aconnector->dc_link;
2577 
2578 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2579 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2580 			int_params.irq_source = dc_link->irq_source_hpd;
2581 
2582 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2583 					handle_hpd_irq,
2584 					(void *) aconnector);
2585 		}
2586 
2587 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2588 
2589 			/* Also register for DP short pulse (hpd_rx). */
2590 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2591 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2592 
2593 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2594 					handle_hpd_rx_irq,
2595 					(void *) aconnector);
2596 		}
2597 	}
2598 }
2599 
2600 #if defined(CONFIG_DRM_AMD_DC_SI)
2601 /* Register IRQ sources and initialize IRQ callbacks */
dce60_register_irq_handlers(struct amdgpu_device * adev)2602 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2603 {
2604 	struct dc *dc = adev->dm.dc;
2605 	struct common_irq_params *c_irq_params;
2606 	struct dc_interrupt_params int_params = {0};
2607 	int r;
2608 	int i;
2609 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2610 
2611 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2612 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2613 
2614 	/*
2615 	 * Actions of amdgpu_irq_add_id():
2616 	 * 1. Register a set() function with base driver.
2617 	 *    Base driver will call set() function to enable/disable an
2618 	 *    interrupt in DC hardware.
2619 	 * 2. Register amdgpu_dm_irq_handler().
2620 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2621 	 *    coming from DC hardware.
2622 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2623 	 *    for acknowledging and handling. */
2624 
2625 	/* Use VBLANK interrupt */
2626 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2627 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2628 		if (r) {
2629 			DRM_ERROR("Failed to add crtc irq id!\n");
2630 			return r;
2631 		}
2632 
2633 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2634 		int_params.irq_source =
2635 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2636 
2637 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2638 
2639 		c_irq_params->adev = adev;
2640 		c_irq_params->irq_src = int_params.irq_source;
2641 
2642 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2643 				dm_crtc_high_irq, c_irq_params);
2644 	}
2645 
2646 	/* Use GRPH_PFLIP interrupt */
2647 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2648 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2649 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2650 		if (r) {
2651 			DRM_ERROR("Failed to add page flip irq id!\n");
2652 			return r;
2653 		}
2654 
2655 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2656 		int_params.irq_source =
2657 			dc_interrupt_to_irq_source(dc, i, 0);
2658 
2659 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2660 
2661 		c_irq_params->adev = adev;
2662 		c_irq_params->irq_src = int_params.irq_source;
2663 
2664 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2665 				dm_pflip_high_irq, c_irq_params);
2666 
2667 	}
2668 
2669 	/* HPD */
2670 	r = amdgpu_irq_add_id(adev, client_id,
2671 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2672 	if (r) {
2673 		DRM_ERROR("Failed to add hpd irq id!\n");
2674 		return r;
2675 	}
2676 
2677 	register_hpd_handlers(adev);
2678 
2679 	return 0;
2680 }
2681 #endif
2682 
2683 /* Register IRQ sources and initialize IRQ callbacks */
dce110_register_irq_handlers(struct amdgpu_device * adev)2684 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2685 {
2686 	struct dc *dc = adev->dm.dc;
2687 	struct common_irq_params *c_irq_params;
2688 	struct dc_interrupt_params int_params = {0};
2689 	int r;
2690 	int i;
2691 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2692 
2693 	if (adev->asic_type >= CHIP_VEGA10)
2694 		client_id = SOC15_IH_CLIENTID_DCE;
2695 
2696 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2697 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2698 
2699 	/*
2700 	 * Actions of amdgpu_irq_add_id():
2701 	 * 1. Register a set() function with base driver.
2702 	 *    Base driver will call set() function to enable/disable an
2703 	 *    interrupt in DC hardware.
2704 	 * 2. Register amdgpu_dm_irq_handler().
2705 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2706 	 *    coming from DC hardware.
2707 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2708 	 *    for acknowledging and handling. */
2709 
2710 	/* Use VBLANK interrupt */
2711 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2712 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2713 		if (r) {
2714 			DRM_ERROR("Failed to add crtc irq id!\n");
2715 			return r;
2716 		}
2717 
2718 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2719 		int_params.irq_source =
2720 			dc_interrupt_to_irq_source(dc, i, 0);
2721 
2722 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2723 
2724 		c_irq_params->adev = adev;
2725 		c_irq_params->irq_src = int_params.irq_source;
2726 
2727 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2728 				dm_crtc_high_irq, c_irq_params);
2729 	}
2730 
2731 	/* Use VUPDATE interrupt */
2732 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2733 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2734 		if (r) {
2735 			DRM_ERROR("Failed to add vupdate irq id!\n");
2736 			return r;
2737 		}
2738 
2739 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2740 		int_params.irq_source =
2741 			dc_interrupt_to_irq_source(dc, i, 0);
2742 
2743 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2744 
2745 		c_irq_params->adev = adev;
2746 		c_irq_params->irq_src = int_params.irq_source;
2747 
2748 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2749 				dm_vupdate_high_irq, c_irq_params);
2750 	}
2751 
2752 	/* Use GRPH_PFLIP interrupt */
2753 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2754 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2755 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2756 		if (r) {
2757 			DRM_ERROR("Failed to add page flip irq id!\n");
2758 			return r;
2759 		}
2760 
2761 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2762 		int_params.irq_source =
2763 			dc_interrupt_to_irq_source(dc, i, 0);
2764 
2765 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2766 
2767 		c_irq_params->adev = adev;
2768 		c_irq_params->irq_src = int_params.irq_source;
2769 
2770 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2771 				dm_pflip_high_irq, c_irq_params);
2772 
2773 	}
2774 
2775 	/* HPD */
2776 	r = amdgpu_irq_add_id(adev, client_id,
2777 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2778 	if (r) {
2779 		DRM_ERROR("Failed to add hpd irq id!\n");
2780 		return r;
2781 	}
2782 
2783 	register_hpd_handlers(adev);
2784 
2785 	return 0;
2786 }
2787 
2788 #if defined(CONFIG_DRM_AMD_DC_DCN)
2789 /* Register IRQ sources and initialize IRQ callbacks */
dcn10_register_irq_handlers(struct amdgpu_device * adev)2790 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2791 {
2792 	struct dc *dc = adev->dm.dc;
2793 	struct common_irq_params *c_irq_params;
2794 	struct dc_interrupt_params int_params = {0};
2795 	int r;
2796 	int i;
2797 
2798 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2799 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2800 
2801 	/*
2802 	 * Actions of amdgpu_irq_add_id():
2803 	 * 1. Register a set() function with base driver.
2804 	 *    Base driver will call set() function to enable/disable an
2805 	 *    interrupt in DC hardware.
2806 	 * 2. Register amdgpu_dm_irq_handler().
2807 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2808 	 *    coming from DC hardware.
2809 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2810 	 *    for acknowledging and handling.
2811 	 */
2812 
2813 	/* Use VSTARTUP interrupt */
2814 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2815 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2816 			i++) {
2817 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2818 
2819 		if (r) {
2820 			DRM_ERROR("Failed to add crtc irq id!\n");
2821 			return r;
2822 		}
2823 
2824 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2825 		int_params.irq_source =
2826 			dc_interrupt_to_irq_source(dc, i, 0);
2827 
2828 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2829 
2830 		c_irq_params->adev = adev;
2831 		c_irq_params->irq_src = int_params.irq_source;
2832 
2833 		amdgpu_dm_irq_register_interrupt(
2834 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2835 	}
2836 
2837 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2838 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2839 	 * to trigger at end of each vblank, regardless of state of the lock,
2840 	 * matching DCE behaviour.
2841 	 */
2842 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2843 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2844 	     i++) {
2845 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2846 
2847 		if (r) {
2848 			DRM_ERROR("Failed to add vupdate irq id!\n");
2849 			return r;
2850 		}
2851 
2852 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2853 		int_params.irq_source =
2854 			dc_interrupt_to_irq_source(dc, i, 0);
2855 
2856 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2857 
2858 		c_irq_params->adev = adev;
2859 		c_irq_params->irq_src = int_params.irq_source;
2860 
2861 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2862 				dm_vupdate_high_irq, c_irq_params);
2863 	}
2864 
2865 	/* Use GRPH_PFLIP interrupt */
2866 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2867 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2868 			i++) {
2869 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2870 		if (r) {
2871 			DRM_ERROR("Failed to add page flip irq id!\n");
2872 			return r;
2873 		}
2874 
2875 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2876 		int_params.irq_source =
2877 			dc_interrupt_to_irq_source(dc, i, 0);
2878 
2879 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2880 
2881 		c_irq_params->adev = adev;
2882 		c_irq_params->irq_src = int_params.irq_source;
2883 
2884 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2885 				dm_pflip_high_irq, c_irq_params);
2886 
2887 	}
2888 
2889 	/* HPD */
2890 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2891 			&adev->hpd_irq);
2892 	if (r) {
2893 		DRM_ERROR("Failed to add hpd irq id!\n");
2894 		return r;
2895 	}
2896 
2897 	register_hpd_handlers(adev);
2898 
2899 	return 0;
2900 }
2901 #endif
2902 
2903 /*
2904  * Acquires the lock for the atomic state object and returns
2905  * the new atomic state.
2906  *
2907  * This should only be called during atomic check.
2908  */
dm_atomic_get_state(struct drm_atomic_state * state,struct dm_atomic_state ** dm_state)2909 static int dm_atomic_get_state(struct drm_atomic_state *state,
2910 			       struct dm_atomic_state **dm_state)
2911 {
2912 	struct drm_device *dev = state->dev;
2913 	struct amdgpu_device *adev = drm_to_adev(dev);
2914 	struct amdgpu_display_manager *dm = &adev->dm;
2915 	struct drm_private_state *priv_state;
2916 
2917 	if (*dm_state)
2918 		return 0;
2919 
2920 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2921 	if (IS_ERR(priv_state))
2922 		return PTR_ERR(priv_state);
2923 
2924 	*dm_state = to_dm_atomic_state(priv_state);
2925 
2926 	return 0;
2927 }
2928 
2929 static struct dm_atomic_state *
dm_atomic_get_new_state(struct drm_atomic_state * state)2930 dm_atomic_get_new_state(struct drm_atomic_state *state)
2931 {
2932 	struct drm_device *dev = state->dev;
2933 	struct amdgpu_device *adev = drm_to_adev(dev);
2934 	struct amdgpu_display_manager *dm = &adev->dm;
2935 	struct drm_private_obj *obj;
2936 	struct drm_private_state *new_obj_state;
2937 	int i;
2938 
2939 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2940 		if (obj->funcs == dm->atomic_obj.funcs)
2941 			return to_dm_atomic_state(new_obj_state);
2942 	}
2943 
2944 	return NULL;
2945 }
2946 
2947 static struct drm_private_state *
dm_atomic_duplicate_state(struct drm_private_obj * obj)2948 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2949 {
2950 	struct dm_atomic_state *old_state, *new_state;
2951 
2952 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2953 	if (!new_state)
2954 		return NULL;
2955 
2956 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2957 
2958 	old_state = to_dm_atomic_state(obj->state);
2959 
2960 	if (old_state && old_state->context)
2961 		new_state->context = dc_copy_state(old_state->context);
2962 
2963 	if (!new_state->context) {
2964 		kfree(new_state);
2965 		return NULL;
2966 	}
2967 
2968 	return &new_state->base;
2969 }
2970 
dm_atomic_destroy_state(struct drm_private_obj * obj,struct drm_private_state * state)2971 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2972 				    struct drm_private_state *state)
2973 {
2974 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2975 
2976 	if (dm_state && dm_state->context)
2977 		dc_release_state(dm_state->context);
2978 
2979 	kfree(dm_state);
2980 }
2981 
2982 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2983 	.atomic_duplicate_state = dm_atomic_duplicate_state,
2984 	.atomic_destroy_state = dm_atomic_destroy_state,
2985 };
2986 
amdgpu_dm_mode_config_init(struct amdgpu_device * adev)2987 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2988 {
2989 	struct dm_atomic_state *state;
2990 	int r;
2991 
2992 	adev->mode_info.mode_config_initialized = true;
2993 
2994 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2995 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2996 
2997 	adev_to_drm(adev)->mode_config.max_width = 16384;
2998 	adev_to_drm(adev)->mode_config.max_height = 16384;
2999 
3000 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3001 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3002 	/* indicates support for immediate flip */
3003 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3004 
3005 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3006 
3007 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3008 	if (!state)
3009 		return -ENOMEM;
3010 
3011 	state->context = dc_create_state(adev->dm.dc);
3012 	if (!state->context) {
3013 		kfree(state);
3014 		return -ENOMEM;
3015 	}
3016 
3017 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3018 
3019 	drm_atomic_private_obj_init(adev_to_drm(adev),
3020 				    &adev->dm.atomic_obj,
3021 				    &state->base,
3022 				    &dm_atomic_state_funcs);
3023 
3024 	r = amdgpu_display_modeset_create_props(adev);
3025 	if (r) {
3026 		dc_release_state(state->context);
3027 		kfree(state);
3028 		return r;
3029 	}
3030 
3031 	r = amdgpu_dm_audio_init(adev);
3032 	if (r) {
3033 		dc_release_state(state->context);
3034 		kfree(state);
3035 		return r;
3036 	}
3037 
3038 	return 0;
3039 }
3040 
3041 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3042 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3043 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3044 
3045 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3046 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3047 
amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager * dm)3048 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3049 {
3050 #if defined(CONFIG_ACPI)
3051 	struct amdgpu_dm_backlight_caps caps;
3052 
3053 	memset(&caps, 0, sizeof(caps));
3054 
3055 	if (dm->backlight_caps.caps_valid)
3056 		return;
3057 
3058 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3059 	if (caps.caps_valid) {
3060 		dm->backlight_caps.caps_valid = true;
3061 		if (caps.aux_support)
3062 			return;
3063 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
3064 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
3065 	} else {
3066 		dm->backlight_caps.min_input_signal =
3067 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3068 		dm->backlight_caps.max_input_signal =
3069 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3070 	}
3071 #else
3072 	if (dm->backlight_caps.aux_support)
3073 		return;
3074 
3075 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3076 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3077 #endif
3078 }
3079 
get_brightness_range(const struct amdgpu_dm_backlight_caps * caps,unsigned * min,unsigned * max)3080 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3081 				unsigned *min, unsigned *max)
3082 {
3083 	if (!caps)
3084 		return 0;
3085 
3086 	if (caps->aux_support) {
3087 		// Firmware limits are in nits, DC API wants millinits.
3088 		*max = 1000 * caps->aux_max_input_signal;
3089 		*min = 1000 * caps->aux_min_input_signal;
3090 	} else {
3091 		// Firmware limits are 8-bit, PWM control is 16-bit.
3092 		*max = 0x101 * caps->max_input_signal;
3093 		*min = 0x101 * caps->min_input_signal;
3094 	}
3095 	return 1;
3096 }
3097 
convert_brightness_from_user(const struct amdgpu_dm_backlight_caps * caps,uint32_t brightness)3098 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3099 					uint32_t brightness)
3100 {
3101 	unsigned min, max;
3102 
3103 	if (!get_brightness_range(caps, &min, &max))
3104 		return brightness;
3105 
3106 	// Rescale 0..255 to min..max
3107 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3108 				       AMDGPU_MAX_BL_LEVEL);
3109 }
3110 
convert_brightness_to_user(const struct amdgpu_dm_backlight_caps * caps,uint32_t brightness)3111 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3112 				      uint32_t brightness)
3113 {
3114 	unsigned min, max;
3115 
3116 	if (!get_brightness_range(caps, &min, &max))
3117 		return brightness;
3118 
3119 	if (brightness < min)
3120 		return 0;
3121 	// Rescale min..max to 0..255
3122 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3123 				 max - min);
3124 }
3125 
amdgpu_dm_backlight_update_status(struct backlight_device * bd)3126 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3127 {
3128 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3129 	struct amdgpu_dm_backlight_caps caps;
3130 	struct dc_link *link = NULL;
3131 	u32 brightness;
3132 	bool rc;
3133 
3134 	amdgpu_dm_update_backlight_caps(dm);
3135 	caps = dm->backlight_caps;
3136 
3137 	link = (struct dc_link *)dm->backlight_link;
3138 
3139 	brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3140 	// Change brightness based on AUX property
3141 	if (caps.aux_support)
3142 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3143 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3144 	else
3145 		rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3146 
3147 	return rc ? 0 : 1;
3148 }
3149 
amdgpu_dm_backlight_get_brightness(struct backlight_device * bd)3150 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3151 {
3152 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3153 	struct amdgpu_dm_backlight_caps caps;
3154 
3155 	amdgpu_dm_update_backlight_caps(dm);
3156 	caps = dm->backlight_caps;
3157 
3158 	if (caps.aux_support) {
3159 		struct dc_link *link = (struct dc_link *)dm->backlight_link;
3160 		u32 avg, peak;
3161 		bool rc;
3162 
3163 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3164 		if (!rc)
3165 			return bd->props.brightness;
3166 		return convert_brightness_to_user(&caps, avg);
3167 	} else {
3168 		int ret = dc_link_get_backlight_level(dm->backlight_link);
3169 
3170 		if (ret == DC_ERROR_UNEXPECTED)
3171 			return bd->props.brightness;
3172 		return convert_brightness_to_user(&caps, ret);
3173 	}
3174 }
3175 
3176 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3177 	.options = BL_CORE_SUSPENDRESUME,
3178 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3179 	.update_status	= amdgpu_dm_backlight_update_status,
3180 };
3181 
3182 static void
amdgpu_dm_register_backlight_device(struct amdgpu_display_manager * dm)3183 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3184 {
3185 	char bl_name[16];
3186 	struct backlight_properties props = { 0 };
3187 
3188 	amdgpu_dm_update_backlight_caps(dm);
3189 
3190 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3191 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3192 	props.type = BACKLIGHT_RAW;
3193 
3194 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3195 		 adev_to_drm(dm->adev)->primary->index);
3196 
3197 	dm->backlight_dev = backlight_device_register(bl_name,
3198 						      adev_to_drm(dm->adev)->dev,
3199 						      dm,
3200 						      &amdgpu_dm_backlight_ops,
3201 						      &props);
3202 
3203 	if (IS_ERR(dm->backlight_dev))
3204 		DRM_ERROR("DM: Backlight registration failed!\n");
3205 	else
3206 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3207 }
3208 
3209 #endif
3210 
initialize_plane(struct amdgpu_display_manager * dm,struct amdgpu_mode_info * mode_info,int plane_id,enum drm_plane_type plane_type,const struct dc_plane_cap * plane_cap)3211 static int initialize_plane(struct amdgpu_display_manager *dm,
3212 			    struct amdgpu_mode_info *mode_info, int plane_id,
3213 			    enum drm_plane_type plane_type,
3214 			    const struct dc_plane_cap *plane_cap)
3215 {
3216 	struct drm_plane *plane;
3217 	unsigned long possible_crtcs;
3218 	int ret = 0;
3219 
3220 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3221 	if (!plane) {
3222 		DRM_ERROR("KMS: Failed to allocate plane\n");
3223 		return -ENOMEM;
3224 	}
3225 	plane->type = plane_type;
3226 
3227 	/*
3228 	 * HACK: IGT tests expect that the primary plane for a CRTC
3229 	 * can only have one possible CRTC. Only expose support for
3230 	 * any CRTC if they're not going to be used as a primary plane
3231 	 * for a CRTC - like overlay or underlay planes.
3232 	 */
3233 	possible_crtcs = 1 << plane_id;
3234 	if (plane_id >= dm->dc->caps.max_streams)
3235 		possible_crtcs = 0xff;
3236 
3237 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3238 
3239 	if (ret) {
3240 		DRM_ERROR("KMS: Failed to initialize plane\n");
3241 		kfree(plane);
3242 		return ret;
3243 	}
3244 
3245 	if (mode_info)
3246 		mode_info->planes[plane_id] = plane;
3247 
3248 	return ret;
3249 }
3250 
3251 
register_backlight_device(struct amdgpu_display_manager * dm,struct dc_link * link)3252 static void register_backlight_device(struct amdgpu_display_manager *dm,
3253 				      struct dc_link *link)
3254 {
3255 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3256 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3257 
3258 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3259 	    link->type != dc_connection_none) {
3260 		/*
3261 		 * Event if registration failed, we should continue with
3262 		 * DM initialization because not having a backlight control
3263 		 * is better then a black screen.
3264 		 */
3265 		amdgpu_dm_register_backlight_device(dm);
3266 
3267 		if (dm->backlight_dev)
3268 			dm->backlight_link = link;
3269 	}
3270 #endif
3271 }
3272 
3273 
3274 /*
3275  * In this architecture, the association
3276  * connector -> encoder -> crtc
3277  * id not really requried. The crtc and connector will hold the
3278  * display_index as an abstraction to use with DAL component
3279  *
3280  * Returns 0 on success
3281  */
amdgpu_dm_initialize_drm_device(struct amdgpu_device * adev)3282 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3283 {
3284 	struct amdgpu_display_manager *dm = &adev->dm;
3285 	int32_t i;
3286 	struct amdgpu_dm_connector *aconnector = NULL;
3287 	struct amdgpu_encoder *aencoder = NULL;
3288 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3289 	uint32_t link_cnt;
3290 	int32_t primary_planes;
3291 	enum dc_connection_type new_connection_type = dc_connection_none;
3292 	const struct dc_plane_cap *plane;
3293 
3294 	dm->display_indexes_num = dm->dc->caps.max_streams;
3295 	/* Update the actual used number of crtc */
3296 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3297 
3298 	link_cnt = dm->dc->caps.max_links;
3299 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3300 		DRM_ERROR("DM: Failed to initialize mode config\n");
3301 		return -EINVAL;
3302 	}
3303 
3304 	/* There is one primary plane per CRTC */
3305 	primary_planes = dm->dc->caps.max_streams;
3306 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3307 
3308 	/*
3309 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3310 	 * Order is reversed to match iteration order in atomic check.
3311 	 */
3312 	for (i = (primary_planes - 1); i >= 0; i--) {
3313 		plane = &dm->dc->caps.planes[i];
3314 
3315 		if (initialize_plane(dm, mode_info, i,
3316 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3317 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3318 			goto fail;
3319 		}
3320 	}
3321 
3322 	/*
3323 	 * Initialize overlay planes, index starting after primary planes.
3324 	 * These planes have a higher DRM index than the primary planes since
3325 	 * they should be considered as having a higher z-order.
3326 	 * Order is reversed to match iteration order in atomic check.
3327 	 *
3328 	 * Only support DCN for now, and only expose one so we don't encourage
3329 	 * userspace to use up all the pipes.
3330 	 */
3331 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3332 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3333 
3334 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3335 			continue;
3336 
3337 		if (!plane->blends_with_above || !plane->blends_with_below)
3338 			continue;
3339 
3340 		if (!plane->pixel_format_support.argb8888)
3341 			continue;
3342 
3343 		if (initialize_plane(dm, NULL, primary_planes + i,
3344 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3345 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3346 			goto fail;
3347 		}
3348 
3349 		/* Only create one overlay plane. */
3350 		break;
3351 	}
3352 
3353 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3354 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3355 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3356 			goto fail;
3357 		}
3358 
3359 	/* loops over all connectors on the board */
3360 	for (i = 0; i < link_cnt; i++) {
3361 		struct dc_link *link = NULL;
3362 
3363 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3364 			DRM_ERROR(
3365 				"KMS: Cannot support more than %d display indexes\n",
3366 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3367 			continue;
3368 		}
3369 
3370 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3371 		if (!aconnector)
3372 			goto fail;
3373 
3374 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3375 		if (!aencoder)
3376 			goto fail;
3377 
3378 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3379 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3380 			goto fail;
3381 		}
3382 
3383 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3384 			DRM_ERROR("KMS: Failed to initialize connector\n");
3385 			goto fail;
3386 		}
3387 
3388 		link = dc_get_link_at_index(dm->dc, i);
3389 
3390 		if (!dc_link_detect_sink(link, &new_connection_type))
3391 			DRM_ERROR("KMS: Failed to detect connector\n");
3392 
3393 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3394 			emulated_link_detect(link);
3395 			amdgpu_dm_update_connector_after_detect(aconnector);
3396 
3397 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3398 			amdgpu_dm_update_connector_after_detect(aconnector);
3399 			register_backlight_device(dm, link);
3400 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3401 				amdgpu_dm_set_psr_caps(link);
3402 		}
3403 
3404 
3405 	}
3406 
3407 	/* Software is initialized. Now we can register interrupt handlers. */
3408 	switch (adev->asic_type) {
3409 #if defined(CONFIG_DRM_AMD_DC_SI)
3410 	case CHIP_TAHITI:
3411 	case CHIP_PITCAIRN:
3412 	case CHIP_VERDE:
3413 	case CHIP_OLAND:
3414 		if (dce60_register_irq_handlers(dm->adev)) {
3415 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3416 			goto fail;
3417 		}
3418 		break;
3419 #endif
3420 	case CHIP_BONAIRE:
3421 	case CHIP_HAWAII:
3422 	case CHIP_KAVERI:
3423 	case CHIP_KABINI:
3424 	case CHIP_MULLINS:
3425 	case CHIP_TONGA:
3426 	case CHIP_FIJI:
3427 	case CHIP_CARRIZO:
3428 	case CHIP_STONEY:
3429 	case CHIP_POLARIS11:
3430 	case CHIP_POLARIS10:
3431 	case CHIP_POLARIS12:
3432 	case CHIP_VEGAM:
3433 	case CHIP_VEGA10:
3434 	case CHIP_VEGA12:
3435 	case CHIP_VEGA20:
3436 		if (dce110_register_irq_handlers(dm->adev)) {
3437 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3438 			goto fail;
3439 		}
3440 		break;
3441 #if defined(CONFIG_DRM_AMD_DC_DCN)
3442 	case CHIP_RAVEN:
3443 	case CHIP_NAVI12:
3444 	case CHIP_NAVI10:
3445 	case CHIP_NAVI14:
3446 	case CHIP_RENOIR:
3447 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3448 	case CHIP_SIENNA_CICHLID:
3449 	case CHIP_NAVY_FLOUNDER:
3450 #endif
3451 		if (dcn10_register_irq_handlers(dm->adev)) {
3452 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3453 			goto fail;
3454 		}
3455 		break;
3456 #endif
3457 	default:
3458 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3459 		goto fail;
3460 	}
3461 
3462 	return 0;
3463 fail:
3464 	kfree(aencoder);
3465 	kfree(aconnector);
3466 
3467 	return -EINVAL;
3468 }
3469 
amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager * dm)3470 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3471 {
3472 	drm_mode_config_cleanup(dm->ddev);
3473 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3474 	return;
3475 }
3476 
3477 /******************************************************************************
3478  * amdgpu_display_funcs functions
3479  *****************************************************************************/
3480 
3481 /*
3482  * dm_bandwidth_update - program display watermarks
3483  *
3484  * @adev: amdgpu_device pointer
3485  *
3486  * Calculate and program the display watermarks and line buffer allocation.
3487  */
dm_bandwidth_update(struct amdgpu_device * adev)3488 static void dm_bandwidth_update(struct amdgpu_device *adev)
3489 {
3490 	/* TODO: implement later */
3491 }
3492 
3493 static const struct amdgpu_display_funcs dm_display_funcs = {
3494 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3495 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3496 	.backlight_set_level = NULL, /* never called for DC */
3497 	.backlight_get_level = NULL, /* never called for DC */
3498 	.hpd_sense = NULL,/* called unconditionally */
3499 	.hpd_set_polarity = NULL, /* called unconditionally */
3500 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3501 	.page_flip_get_scanoutpos =
3502 		dm_crtc_get_scanoutpos,/* called unconditionally */
3503 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3504 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3505 };
3506 
3507 #if defined(CONFIG_DEBUG_KERNEL_DC)
3508 
s3_debug_store(struct device * device,struct device_attribute * attr,const char * buf,size_t count)3509 static ssize_t s3_debug_store(struct device *device,
3510 			      struct device_attribute *attr,
3511 			      const char *buf,
3512 			      size_t count)
3513 {
3514 	int ret;
3515 	int s3_state;
3516 	struct drm_device *drm_dev = dev_get_drvdata(device);
3517 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3518 
3519 	ret = kstrtoint(buf, 0, &s3_state);
3520 
3521 	if (ret == 0) {
3522 		if (s3_state) {
3523 			dm_resume(adev);
3524 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3525 		} else
3526 			dm_suspend(adev);
3527 	}
3528 
3529 	return ret == 0 ? count : 0;
3530 }
3531 
3532 DEVICE_ATTR_WO(s3_debug);
3533 
3534 #endif
3535 
dm_early_init(void * handle)3536 static int dm_early_init(void *handle)
3537 {
3538 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3539 
3540 	switch (adev->asic_type) {
3541 #if defined(CONFIG_DRM_AMD_DC_SI)
3542 	case CHIP_TAHITI:
3543 	case CHIP_PITCAIRN:
3544 	case CHIP_VERDE:
3545 		adev->mode_info.num_crtc = 6;
3546 		adev->mode_info.num_hpd = 6;
3547 		adev->mode_info.num_dig = 6;
3548 		break;
3549 	case CHIP_OLAND:
3550 		adev->mode_info.num_crtc = 2;
3551 		adev->mode_info.num_hpd = 2;
3552 		adev->mode_info.num_dig = 2;
3553 		break;
3554 #endif
3555 	case CHIP_BONAIRE:
3556 	case CHIP_HAWAII:
3557 		adev->mode_info.num_crtc = 6;
3558 		adev->mode_info.num_hpd = 6;
3559 		adev->mode_info.num_dig = 6;
3560 		break;
3561 	case CHIP_KAVERI:
3562 		adev->mode_info.num_crtc = 4;
3563 		adev->mode_info.num_hpd = 6;
3564 		adev->mode_info.num_dig = 7;
3565 		break;
3566 	case CHIP_KABINI:
3567 	case CHIP_MULLINS:
3568 		adev->mode_info.num_crtc = 2;
3569 		adev->mode_info.num_hpd = 6;
3570 		adev->mode_info.num_dig = 6;
3571 		break;
3572 	case CHIP_FIJI:
3573 	case CHIP_TONGA:
3574 		adev->mode_info.num_crtc = 6;
3575 		adev->mode_info.num_hpd = 6;
3576 		adev->mode_info.num_dig = 7;
3577 		break;
3578 	case CHIP_CARRIZO:
3579 		adev->mode_info.num_crtc = 3;
3580 		adev->mode_info.num_hpd = 6;
3581 		adev->mode_info.num_dig = 9;
3582 		break;
3583 	case CHIP_STONEY:
3584 		adev->mode_info.num_crtc = 2;
3585 		adev->mode_info.num_hpd = 6;
3586 		adev->mode_info.num_dig = 9;
3587 		break;
3588 	case CHIP_POLARIS11:
3589 	case CHIP_POLARIS12:
3590 		adev->mode_info.num_crtc = 5;
3591 		adev->mode_info.num_hpd = 5;
3592 		adev->mode_info.num_dig = 5;
3593 		break;
3594 	case CHIP_POLARIS10:
3595 	case CHIP_VEGAM:
3596 		adev->mode_info.num_crtc = 6;
3597 		adev->mode_info.num_hpd = 6;
3598 		adev->mode_info.num_dig = 6;
3599 		break;
3600 	case CHIP_VEGA10:
3601 	case CHIP_VEGA12:
3602 	case CHIP_VEGA20:
3603 		adev->mode_info.num_crtc = 6;
3604 		adev->mode_info.num_hpd = 6;
3605 		adev->mode_info.num_dig = 6;
3606 		break;
3607 #if defined(CONFIG_DRM_AMD_DC_DCN)
3608 	case CHIP_RAVEN:
3609 		adev->mode_info.num_crtc = 4;
3610 		adev->mode_info.num_hpd = 4;
3611 		adev->mode_info.num_dig = 4;
3612 		break;
3613 #endif
3614 	case CHIP_NAVI10:
3615 	case CHIP_NAVI12:
3616 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3617 	case CHIP_SIENNA_CICHLID:
3618 	case CHIP_NAVY_FLOUNDER:
3619 #endif
3620 		adev->mode_info.num_crtc = 6;
3621 		adev->mode_info.num_hpd = 6;
3622 		adev->mode_info.num_dig = 6;
3623 		break;
3624 	case CHIP_NAVI14:
3625 		adev->mode_info.num_crtc = 5;
3626 		adev->mode_info.num_hpd = 5;
3627 		adev->mode_info.num_dig = 5;
3628 		break;
3629 	case CHIP_RENOIR:
3630 		adev->mode_info.num_crtc = 4;
3631 		adev->mode_info.num_hpd = 4;
3632 		adev->mode_info.num_dig = 4;
3633 		break;
3634 	default:
3635 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3636 		return -EINVAL;
3637 	}
3638 
3639 	amdgpu_dm_set_irq_funcs(adev);
3640 
3641 	if (adev->mode_info.funcs == NULL)
3642 		adev->mode_info.funcs = &dm_display_funcs;
3643 
3644 	/*
3645 	 * Note: Do NOT change adev->audio_endpt_rreg and
3646 	 * adev->audio_endpt_wreg because they are initialised in
3647 	 * amdgpu_device_init()
3648 	 */
3649 #if defined(CONFIG_DEBUG_KERNEL_DC)
3650 	device_create_file(
3651 		adev_to_drm(adev)->dev,
3652 		&dev_attr_s3_debug);
3653 #endif
3654 
3655 	return 0;
3656 }
3657 
modeset_required(struct drm_crtc_state * crtc_state,struct dc_stream_state * new_stream,struct dc_stream_state * old_stream)3658 static bool modeset_required(struct drm_crtc_state *crtc_state,
3659 			     struct dc_stream_state *new_stream,
3660 			     struct dc_stream_state *old_stream)
3661 {
3662 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3663 }
3664 
modereset_required(struct drm_crtc_state * crtc_state)3665 static bool modereset_required(struct drm_crtc_state *crtc_state)
3666 {
3667 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3668 }
3669 
amdgpu_dm_encoder_destroy(struct drm_encoder * encoder)3670 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3671 {
3672 	drm_encoder_cleanup(encoder);
3673 	kfree(encoder);
3674 }
3675 
3676 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3677 	.destroy = amdgpu_dm_encoder_destroy,
3678 };
3679 
3680 
fill_dc_scaling_info(const struct drm_plane_state * state,struct dc_scaling_info * scaling_info)3681 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3682 				struct dc_scaling_info *scaling_info)
3683 {
3684 	int scale_w, scale_h;
3685 
3686 	memset(scaling_info, 0, sizeof(*scaling_info));
3687 
3688 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3689 	scaling_info->src_rect.x = state->src_x >> 16;
3690 	scaling_info->src_rect.y = state->src_y >> 16;
3691 
3692 	/*
3693 	 * For reasons we don't (yet) fully understand a non-zero
3694 	 * src_y coordinate into an NV12 buffer can cause a
3695 	 * system hang. To avoid hangs (and maybe be overly cautious)
3696 	 * let's reject both non-zero src_x and src_y.
3697 	 *
3698 	 * We currently know of only one use-case to reproduce a
3699 	 * scenario with non-zero src_x and src_y for NV12, which
3700 	 * is to gesture the YouTube Android app into full screen
3701 	 * on ChromeOS.
3702 	 */
3703 	if (state->fb &&
3704 	    state->fb->format->format == DRM_FORMAT_NV12 &&
3705 	    (scaling_info->src_rect.x != 0 ||
3706 	     scaling_info->src_rect.y != 0))
3707 		return -EINVAL;
3708 
3709 	/*
3710 	 * For reasons we don't (yet) fully understand a non-zero
3711 	 * src_y coordinate into an NV12 buffer can cause a
3712 	 * system hang. To avoid hangs (and maybe be overly cautious)
3713 	 * let's reject both non-zero src_x and src_y.
3714 	 *
3715 	 * We currently know of only one use-case to reproduce a
3716 	 * scenario with non-zero src_x and src_y for NV12, which
3717 	 * is to gesture the YouTube Android app into full screen
3718 	 * on ChromeOS.
3719 	 */
3720 	if (state->fb &&
3721 	    state->fb->format->format == DRM_FORMAT_NV12 &&
3722 	    (scaling_info->src_rect.x != 0 ||
3723 	     scaling_info->src_rect.y != 0))
3724 		return -EINVAL;
3725 
3726 	scaling_info->src_rect.width = state->src_w >> 16;
3727 	if (scaling_info->src_rect.width == 0)
3728 		return -EINVAL;
3729 
3730 	scaling_info->src_rect.height = state->src_h >> 16;
3731 	if (scaling_info->src_rect.height == 0)
3732 		return -EINVAL;
3733 
3734 	scaling_info->dst_rect.x = state->crtc_x;
3735 	scaling_info->dst_rect.y = state->crtc_y;
3736 
3737 	if (state->crtc_w == 0)
3738 		return -EINVAL;
3739 
3740 	scaling_info->dst_rect.width = state->crtc_w;
3741 
3742 	if (state->crtc_h == 0)
3743 		return -EINVAL;
3744 
3745 	scaling_info->dst_rect.height = state->crtc_h;
3746 
3747 	/* DRM doesn't specify clipping on destination output. */
3748 	scaling_info->clip_rect = scaling_info->dst_rect;
3749 
3750 	/* TODO: Validate scaling per-format with DC plane caps */
3751 	scale_w = scaling_info->dst_rect.width * 1000 /
3752 		  scaling_info->src_rect.width;
3753 
3754 	if (scale_w < 250 || scale_w > 16000)
3755 		return -EINVAL;
3756 
3757 	scale_h = scaling_info->dst_rect.height * 1000 /
3758 		  scaling_info->src_rect.height;
3759 
3760 	if (scale_h < 250 || scale_h > 16000)
3761 		return -EINVAL;
3762 
3763 	/*
3764 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3765 	 * assume reasonable defaults based on the format.
3766 	 */
3767 
3768 	return 0;
3769 }
3770 
get_fb_info(const struct amdgpu_framebuffer * amdgpu_fb,uint64_t * tiling_flags,bool * tmz_surface)3771 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3772 		       uint64_t *tiling_flags, bool *tmz_surface)
3773 {
3774 	struct amdgpu_bo *rbo;
3775 	int r;
3776 
3777 	if (!amdgpu_fb) {
3778 		*tiling_flags = 0;
3779 		*tmz_surface = false;
3780 		return 0;
3781 	}
3782 
3783 	rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3784 	r = amdgpu_bo_reserve(rbo, false);
3785 
3786 	if (unlikely(r)) {
3787 		/* Don't show error message when returning -ERESTARTSYS */
3788 		if (r != -ERESTARTSYS)
3789 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
3790 		return r;
3791 	}
3792 
3793 	if (tiling_flags)
3794 		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3795 
3796 	if (tmz_surface)
3797 		*tmz_surface = amdgpu_bo_encrypted(rbo);
3798 
3799 	amdgpu_bo_unreserve(rbo);
3800 
3801 	return r;
3802 }
3803 
get_dcc_address(uint64_t address,uint64_t tiling_flags)3804 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3805 {
3806 	uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3807 
3808 	return offset ? (address + offset * 256) : 0;
3809 }
3810 
3811 static int
fill_plane_dcc_attributes(struct amdgpu_device * adev,const struct amdgpu_framebuffer * afb,const enum surface_pixel_format format,const enum dc_rotation_angle rotation,const struct plane_size * plane_size,const union dc_tiling_info * tiling_info,const uint64_t info,struct dc_plane_dcc_param * dcc,struct dc_plane_address * address,bool force_disable_dcc)3812 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3813 			  const struct amdgpu_framebuffer *afb,
3814 			  const enum surface_pixel_format format,
3815 			  const enum dc_rotation_angle rotation,
3816 			  const struct plane_size *plane_size,
3817 			  const union dc_tiling_info *tiling_info,
3818 			  const uint64_t info,
3819 			  struct dc_plane_dcc_param *dcc,
3820 			  struct dc_plane_address *address,
3821 			  bool force_disable_dcc)
3822 {
3823 	struct dc *dc = adev->dm.dc;
3824 	struct dc_dcc_surface_param input;
3825 	struct dc_surface_dcc_cap output;
3826 	uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3827 	uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3828 	uint64_t dcc_address;
3829 
3830 	memset(&input, 0, sizeof(input));
3831 	memset(&output, 0, sizeof(output));
3832 
3833 	if (force_disable_dcc)
3834 		return 0;
3835 
3836 	if (!offset)
3837 		return 0;
3838 
3839 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3840 		return 0;
3841 
3842 	if (!dc->cap_funcs.get_dcc_compression_cap)
3843 		return -EINVAL;
3844 
3845 	input.format = format;
3846 	input.surface_size.width = plane_size->surface_size.width;
3847 	input.surface_size.height = plane_size->surface_size.height;
3848 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3849 
3850 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3851 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3852 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3853 		input.scan = SCAN_DIRECTION_VERTICAL;
3854 
3855 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3856 		return -EINVAL;
3857 
3858 	if (!output.capable)
3859 		return -EINVAL;
3860 
3861 	if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3862 		return -EINVAL;
3863 
3864 	dcc->enable = 1;
3865 	dcc->meta_pitch =
3866 		AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3867 	dcc->independent_64b_blks = i64b;
3868 
3869 	dcc_address = get_dcc_address(afb->address, info);
3870 	address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3871 	address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3872 
3873 	return 0;
3874 }
3875 
3876 static int
fill_plane_buffer_attributes(struct amdgpu_device * adev,const struct amdgpu_framebuffer * afb,const enum surface_pixel_format format,const enum dc_rotation_angle rotation,const uint64_t tiling_flags,union dc_tiling_info * tiling_info,struct plane_size * plane_size,struct dc_plane_dcc_param * dcc,struct dc_plane_address * address,bool tmz_surface,bool force_disable_dcc)3877 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3878 			     const struct amdgpu_framebuffer *afb,
3879 			     const enum surface_pixel_format format,
3880 			     const enum dc_rotation_angle rotation,
3881 			     const uint64_t tiling_flags,
3882 			     union dc_tiling_info *tiling_info,
3883 			     struct plane_size *plane_size,
3884 			     struct dc_plane_dcc_param *dcc,
3885 			     struct dc_plane_address *address,
3886 			     bool tmz_surface,
3887 			     bool force_disable_dcc)
3888 {
3889 	const struct drm_framebuffer *fb = &afb->base;
3890 	int ret;
3891 
3892 	memset(tiling_info, 0, sizeof(*tiling_info));
3893 	memset(plane_size, 0, sizeof(*plane_size));
3894 	memset(dcc, 0, sizeof(*dcc));
3895 	memset(address, 0, sizeof(*address));
3896 
3897 	address->tmz_surface = tmz_surface;
3898 
3899 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3900 		plane_size->surface_size.x = 0;
3901 		plane_size->surface_size.y = 0;
3902 		plane_size->surface_size.width = fb->width;
3903 		plane_size->surface_size.height = fb->height;
3904 		plane_size->surface_pitch =
3905 			fb->pitches[0] / fb->format->cpp[0];
3906 
3907 		address->type = PLN_ADDR_TYPE_GRAPHICS;
3908 		address->grph.addr.low_part = lower_32_bits(afb->address);
3909 		address->grph.addr.high_part = upper_32_bits(afb->address);
3910 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3911 		uint64_t chroma_addr = afb->address + fb->offsets[1];
3912 
3913 		plane_size->surface_size.x = 0;
3914 		plane_size->surface_size.y = 0;
3915 		plane_size->surface_size.width = fb->width;
3916 		plane_size->surface_size.height = fb->height;
3917 		plane_size->surface_pitch =
3918 			fb->pitches[0] / fb->format->cpp[0];
3919 
3920 		plane_size->chroma_size.x = 0;
3921 		plane_size->chroma_size.y = 0;
3922 		/* TODO: set these based on surface format */
3923 		plane_size->chroma_size.width = fb->width / 2;
3924 		plane_size->chroma_size.height = fb->height / 2;
3925 
3926 		plane_size->chroma_pitch =
3927 			fb->pitches[1] / fb->format->cpp[1];
3928 
3929 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3930 		address->video_progressive.luma_addr.low_part =
3931 			lower_32_bits(afb->address);
3932 		address->video_progressive.luma_addr.high_part =
3933 			upper_32_bits(afb->address);
3934 		address->video_progressive.chroma_addr.low_part =
3935 			lower_32_bits(chroma_addr);
3936 		address->video_progressive.chroma_addr.high_part =
3937 			upper_32_bits(chroma_addr);
3938 	}
3939 
3940 	/* Fill GFX8 params */
3941 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3942 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3943 
3944 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3945 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3946 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3947 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3948 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3949 
3950 		/* XXX fix me for VI */
3951 		tiling_info->gfx8.num_banks = num_banks;
3952 		tiling_info->gfx8.array_mode =
3953 				DC_ARRAY_2D_TILED_THIN1;
3954 		tiling_info->gfx8.tile_split = tile_split;
3955 		tiling_info->gfx8.bank_width = bankw;
3956 		tiling_info->gfx8.bank_height = bankh;
3957 		tiling_info->gfx8.tile_aspect = mtaspect;
3958 		tiling_info->gfx8.tile_mode =
3959 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3960 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3961 			== DC_ARRAY_1D_TILED_THIN1) {
3962 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3963 	}
3964 
3965 	tiling_info->gfx8.pipe_config =
3966 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3967 
3968 	if (adev->asic_type == CHIP_VEGA10 ||
3969 	    adev->asic_type == CHIP_VEGA12 ||
3970 	    adev->asic_type == CHIP_VEGA20 ||
3971 	    adev->asic_type == CHIP_NAVI10 ||
3972 	    adev->asic_type == CHIP_NAVI14 ||
3973 	    adev->asic_type == CHIP_NAVI12 ||
3974 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3975 		adev->asic_type == CHIP_SIENNA_CICHLID ||
3976 		adev->asic_type == CHIP_NAVY_FLOUNDER ||
3977 #endif
3978 	    adev->asic_type == CHIP_RENOIR ||
3979 	    adev->asic_type == CHIP_RAVEN) {
3980 		/* Fill GFX9 params */
3981 		tiling_info->gfx9.num_pipes =
3982 			adev->gfx.config.gb_addr_config_fields.num_pipes;
3983 		tiling_info->gfx9.num_banks =
3984 			adev->gfx.config.gb_addr_config_fields.num_banks;
3985 		tiling_info->gfx9.pipe_interleave =
3986 			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3987 		tiling_info->gfx9.num_shader_engines =
3988 			adev->gfx.config.gb_addr_config_fields.num_se;
3989 		tiling_info->gfx9.max_compressed_frags =
3990 			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3991 		tiling_info->gfx9.num_rb_per_se =
3992 			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3993 		tiling_info->gfx9.swizzle =
3994 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3995 		tiling_info->gfx9.shaderEnable = 1;
3996 
3997 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
3998 		if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3999 		    adev->asic_type == CHIP_NAVY_FLOUNDER)
4000 			tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4001 #endif
4002 		ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
4003 						plane_size, tiling_info,
4004 						tiling_flags, dcc, address,
4005 						force_disable_dcc);
4006 		if (ret)
4007 			return ret;
4008 	}
4009 
4010 	return 0;
4011 }
4012 
4013 static void
fill_blending_from_plane_state(const struct drm_plane_state * plane_state,bool * per_pixel_alpha,bool * global_alpha,int * global_alpha_value)4014 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4015 			       bool *per_pixel_alpha, bool *global_alpha,
4016 			       int *global_alpha_value)
4017 {
4018 	*per_pixel_alpha = false;
4019 	*global_alpha = false;
4020 	*global_alpha_value = 0xff;
4021 
4022 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4023 		return;
4024 
4025 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4026 		static const uint32_t alpha_formats[] = {
4027 			DRM_FORMAT_ARGB8888,
4028 			DRM_FORMAT_RGBA8888,
4029 			DRM_FORMAT_ABGR8888,
4030 		};
4031 		uint32_t format = plane_state->fb->format->format;
4032 		unsigned int i;
4033 
4034 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4035 			if (format == alpha_formats[i]) {
4036 				*per_pixel_alpha = true;
4037 				break;
4038 			}
4039 		}
4040 	}
4041 
4042 	if (plane_state->alpha < 0xffff) {
4043 		*global_alpha = true;
4044 		*global_alpha_value = plane_state->alpha >> 8;
4045 	}
4046 }
4047 
4048 static int
fill_plane_color_attributes(const struct drm_plane_state * plane_state,const enum surface_pixel_format format,enum dc_color_space * color_space)4049 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4050 			    const enum surface_pixel_format format,
4051 			    enum dc_color_space *color_space)
4052 {
4053 	bool full_range;
4054 
4055 	*color_space = COLOR_SPACE_SRGB;
4056 
4057 	/* DRM color properties only affect non-RGB formats. */
4058 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4059 		return 0;
4060 
4061 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4062 
4063 	switch (plane_state->color_encoding) {
4064 	case DRM_COLOR_YCBCR_BT601:
4065 		if (full_range)
4066 			*color_space = COLOR_SPACE_YCBCR601;
4067 		else
4068 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4069 		break;
4070 
4071 	case DRM_COLOR_YCBCR_BT709:
4072 		if (full_range)
4073 			*color_space = COLOR_SPACE_YCBCR709;
4074 		else
4075 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4076 		break;
4077 
4078 	case DRM_COLOR_YCBCR_BT2020:
4079 		if (full_range)
4080 			*color_space = COLOR_SPACE_2020_YCBCR;
4081 		else
4082 			return -EINVAL;
4083 		break;
4084 
4085 	default:
4086 		return -EINVAL;
4087 	}
4088 
4089 	return 0;
4090 }
4091 
4092 static int
fill_dc_plane_info_and_addr(struct amdgpu_device * adev,const struct drm_plane_state * plane_state,const uint64_t tiling_flags,struct dc_plane_info * plane_info,struct dc_plane_address * address,bool tmz_surface,bool force_disable_dcc)4093 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4094 			    const struct drm_plane_state *plane_state,
4095 			    const uint64_t tiling_flags,
4096 			    struct dc_plane_info *plane_info,
4097 			    struct dc_plane_address *address,
4098 			    bool tmz_surface,
4099 			    bool force_disable_dcc)
4100 {
4101 	const struct drm_framebuffer *fb = plane_state->fb;
4102 	const struct amdgpu_framebuffer *afb =
4103 		to_amdgpu_framebuffer(plane_state->fb);
4104 	struct drm_format_name_buf format_name;
4105 	int ret;
4106 
4107 	memset(plane_info, 0, sizeof(*plane_info));
4108 
4109 	switch (fb->format->format) {
4110 	case DRM_FORMAT_C8:
4111 		plane_info->format =
4112 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4113 		break;
4114 	case DRM_FORMAT_RGB565:
4115 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4116 		break;
4117 	case DRM_FORMAT_XRGB8888:
4118 	case DRM_FORMAT_ARGB8888:
4119 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4120 		break;
4121 	case DRM_FORMAT_XRGB2101010:
4122 	case DRM_FORMAT_ARGB2101010:
4123 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4124 		break;
4125 	case DRM_FORMAT_XBGR2101010:
4126 	case DRM_FORMAT_ABGR2101010:
4127 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4128 		break;
4129 	case DRM_FORMAT_XBGR8888:
4130 	case DRM_FORMAT_ABGR8888:
4131 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4132 		break;
4133 	case DRM_FORMAT_NV21:
4134 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4135 		break;
4136 	case DRM_FORMAT_NV12:
4137 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4138 		break;
4139 	case DRM_FORMAT_P010:
4140 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4141 		break;
4142 	case DRM_FORMAT_XRGB16161616F:
4143 	case DRM_FORMAT_ARGB16161616F:
4144 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4145 		break;
4146 	case DRM_FORMAT_XBGR16161616F:
4147 	case DRM_FORMAT_ABGR16161616F:
4148 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4149 		break;
4150 	default:
4151 		DRM_ERROR(
4152 			"Unsupported screen format %s\n",
4153 			drm_get_format_name(fb->format->format, &format_name));
4154 		return -EINVAL;
4155 	}
4156 
4157 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4158 	case DRM_MODE_ROTATE_0:
4159 		plane_info->rotation = ROTATION_ANGLE_0;
4160 		break;
4161 	case DRM_MODE_ROTATE_90:
4162 		plane_info->rotation = ROTATION_ANGLE_90;
4163 		break;
4164 	case DRM_MODE_ROTATE_180:
4165 		plane_info->rotation = ROTATION_ANGLE_180;
4166 		break;
4167 	case DRM_MODE_ROTATE_270:
4168 		plane_info->rotation = ROTATION_ANGLE_270;
4169 		break;
4170 	default:
4171 		plane_info->rotation = ROTATION_ANGLE_0;
4172 		break;
4173 	}
4174 
4175 	plane_info->visible = true;
4176 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4177 
4178 	plane_info->layer_index = 0;
4179 
4180 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
4181 					  &plane_info->color_space);
4182 	if (ret)
4183 		return ret;
4184 
4185 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4186 					   plane_info->rotation, tiling_flags,
4187 					   &plane_info->tiling_info,
4188 					   &plane_info->plane_size,
4189 					   &plane_info->dcc, address, tmz_surface,
4190 					   force_disable_dcc);
4191 	if (ret)
4192 		return ret;
4193 
4194 	fill_blending_from_plane_state(
4195 		plane_state, &plane_info->per_pixel_alpha,
4196 		&plane_info->global_alpha, &plane_info->global_alpha_value);
4197 
4198 	return 0;
4199 }
4200 
fill_dc_plane_attributes(struct amdgpu_device * adev,struct dc_plane_state * dc_plane_state,struct drm_plane_state * plane_state,struct drm_crtc_state * crtc_state)4201 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4202 				    struct dc_plane_state *dc_plane_state,
4203 				    struct drm_plane_state *plane_state,
4204 				    struct drm_crtc_state *crtc_state)
4205 {
4206 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4207 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
4208 	struct dc_scaling_info scaling_info;
4209 	struct dc_plane_info plane_info;
4210 	int ret;
4211 	bool force_disable_dcc = false;
4212 
4213 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
4214 	if (ret)
4215 		return ret;
4216 
4217 	dc_plane_state->src_rect = scaling_info.src_rect;
4218 	dc_plane_state->dst_rect = scaling_info.dst_rect;
4219 	dc_plane_state->clip_rect = scaling_info.clip_rect;
4220 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4221 
4222 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4223 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
4224 					  dm_plane_state->tiling_flags,
4225 					  &plane_info,
4226 					  &dc_plane_state->address,
4227 					  dm_plane_state->tmz_surface,
4228 					  force_disable_dcc);
4229 	if (ret)
4230 		return ret;
4231 
4232 	dc_plane_state->format = plane_info.format;
4233 	dc_plane_state->color_space = plane_info.color_space;
4234 	dc_plane_state->format = plane_info.format;
4235 	dc_plane_state->plane_size = plane_info.plane_size;
4236 	dc_plane_state->rotation = plane_info.rotation;
4237 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4238 	dc_plane_state->stereo_format = plane_info.stereo_format;
4239 	dc_plane_state->tiling_info = plane_info.tiling_info;
4240 	dc_plane_state->visible = plane_info.visible;
4241 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4242 	dc_plane_state->global_alpha = plane_info.global_alpha;
4243 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4244 	dc_plane_state->dcc = plane_info.dcc;
4245 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4246 
4247 	/*
4248 	 * Always set input transfer function, since plane state is refreshed
4249 	 * every time.
4250 	 */
4251 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4252 	if (ret)
4253 		return ret;
4254 
4255 	return 0;
4256 }
4257 
update_stream_scaling_settings(const struct drm_display_mode * mode,const struct dm_connector_state * dm_state,struct dc_stream_state * stream)4258 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4259 					   const struct dm_connector_state *dm_state,
4260 					   struct dc_stream_state *stream)
4261 {
4262 	enum amdgpu_rmx_type rmx_type;
4263 
4264 	struct rect src = { 0 }; /* viewport in composition space*/
4265 	struct rect dst = { 0 }; /* stream addressable area */
4266 
4267 	/* no mode. nothing to be done */
4268 	if (!mode)
4269 		return;
4270 
4271 	/* Full screen scaling by default */
4272 	src.width = mode->hdisplay;
4273 	src.height = mode->vdisplay;
4274 	dst.width = stream->timing.h_addressable;
4275 	dst.height = stream->timing.v_addressable;
4276 
4277 	if (dm_state) {
4278 		rmx_type = dm_state->scaling;
4279 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4280 			if (src.width * dst.height <
4281 					src.height * dst.width) {
4282 				/* height needs less upscaling/more downscaling */
4283 				dst.width = src.width *
4284 						dst.height / src.height;
4285 			} else {
4286 				/* width needs less upscaling/more downscaling */
4287 				dst.height = src.height *
4288 						dst.width / src.width;
4289 			}
4290 		} else if (rmx_type == RMX_CENTER) {
4291 			dst = src;
4292 		}
4293 
4294 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4295 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4296 
4297 		if (dm_state->underscan_enable) {
4298 			dst.x += dm_state->underscan_hborder / 2;
4299 			dst.y += dm_state->underscan_vborder / 2;
4300 			dst.width -= dm_state->underscan_hborder;
4301 			dst.height -= dm_state->underscan_vborder;
4302 		}
4303 	}
4304 
4305 	stream->src = src;
4306 	stream->dst = dst;
4307 
4308 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4309 			dst.x, dst.y, dst.width, dst.height);
4310 
4311 }
4312 
4313 static enum dc_color_depth
convert_color_depth_from_display_info(const struct drm_connector * connector,bool is_y420,int requested_bpc)4314 convert_color_depth_from_display_info(const struct drm_connector *connector,
4315 				      bool is_y420, int requested_bpc)
4316 {
4317 	uint8_t bpc;
4318 
4319 	if (is_y420) {
4320 		bpc = 8;
4321 
4322 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4323 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4324 			bpc = 16;
4325 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4326 			bpc = 12;
4327 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4328 			bpc = 10;
4329 	} else {
4330 		bpc = (uint8_t)connector->display_info.bpc;
4331 		/* Assume 8 bpc by default if no bpc is specified. */
4332 		bpc = bpc ? bpc : 8;
4333 	}
4334 
4335 	if (requested_bpc > 0) {
4336 		/*
4337 		 * Cap display bpc based on the user requested value.
4338 		 *
4339 		 * The value for state->max_bpc may not correctly updated
4340 		 * depending on when the connector gets added to the state
4341 		 * or if this was called outside of atomic check, so it
4342 		 * can't be used directly.
4343 		 */
4344 		bpc = min_t(u8, bpc, requested_bpc);
4345 
4346 		/* Round down to the nearest even number. */
4347 		bpc = bpc - (bpc & 1);
4348 	}
4349 
4350 	switch (bpc) {
4351 	case 0:
4352 		/*
4353 		 * Temporary Work around, DRM doesn't parse color depth for
4354 		 * EDID revision before 1.4
4355 		 * TODO: Fix edid parsing
4356 		 */
4357 		return COLOR_DEPTH_888;
4358 	case 6:
4359 		return COLOR_DEPTH_666;
4360 	case 8:
4361 		return COLOR_DEPTH_888;
4362 	case 10:
4363 		return COLOR_DEPTH_101010;
4364 	case 12:
4365 		return COLOR_DEPTH_121212;
4366 	case 14:
4367 		return COLOR_DEPTH_141414;
4368 	case 16:
4369 		return COLOR_DEPTH_161616;
4370 	default:
4371 		return COLOR_DEPTH_UNDEFINED;
4372 	}
4373 }
4374 
4375 static enum dc_aspect_ratio
get_aspect_ratio(const struct drm_display_mode * mode_in)4376 get_aspect_ratio(const struct drm_display_mode *mode_in)
4377 {
4378 	/* 1-1 mapping, since both enums follow the HDMI spec. */
4379 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4380 }
4381 
4382 static enum dc_color_space
get_output_color_space(const struct dc_crtc_timing * dc_crtc_timing)4383 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4384 {
4385 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
4386 
4387 	switch (dc_crtc_timing->pixel_encoding)	{
4388 	case PIXEL_ENCODING_YCBCR422:
4389 	case PIXEL_ENCODING_YCBCR444:
4390 	case PIXEL_ENCODING_YCBCR420:
4391 	{
4392 		/*
4393 		 * 27030khz is the separation point between HDTV and SDTV
4394 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
4395 		 * respectively
4396 		 */
4397 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
4398 			if (dc_crtc_timing->flags.Y_ONLY)
4399 				color_space =
4400 					COLOR_SPACE_YCBCR709_LIMITED;
4401 			else
4402 				color_space = COLOR_SPACE_YCBCR709;
4403 		} else {
4404 			if (dc_crtc_timing->flags.Y_ONLY)
4405 				color_space =
4406 					COLOR_SPACE_YCBCR601_LIMITED;
4407 			else
4408 				color_space = COLOR_SPACE_YCBCR601;
4409 		}
4410 
4411 	}
4412 	break;
4413 	case PIXEL_ENCODING_RGB:
4414 		color_space = COLOR_SPACE_SRGB;
4415 		break;
4416 
4417 	default:
4418 		WARN_ON(1);
4419 		break;
4420 	}
4421 
4422 	return color_space;
4423 }
4424 
adjust_colour_depth_from_display_info(struct dc_crtc_timing * timing_out,const struct drm_display_info * info)4425 static bool adjust_colour_depth_from_display_info(
4426 	struct dc_crtc_timing *timing_out,
4427 	const struct drm_display_info *info)
4428 {
4429 	enum dc_color_depth depth = timing_out->display_color_depth;
4430 	int normalized_clk;
4431 	do {
4432 		normalized_clk = timing_out->pix_clk_100hz / 10;
4433 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4434 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4435 			normalized_clk /= 2;
4436 		/* Adjusting pix clock following on HDMI spec based on colour depth */
4437 		switch (depth) {
4438 		case COLOR_DEPTH_888:
4439 			break;
4440 		case COLOR_DEPTH_101010:
4441 			normalized_clk = (normalized_clk * 30) / 24;
4442 			break;
4443 		case COLOR_DEPTH_121212:
4444 			normalized_clk = (normalized_clk * 36) / 24;
4445 			break;
4446 		case COLOR_DEPTH_161616:
4447 			normalized_clk = (normalized_clk * 48) / 24;
4448 			break;
4449 		default:
4450 			/* The above depths are the only ones valid for HDMI. */
4451 			return false;
4452 		}
4453 		if (normalized_clk <= info->max_tmds_clock) {
4454 			timing_out->display_color_depth = depth;
4455 			return true;
4456 		}
4457 	} while (--depth > COLOR_DEPTH_666);
4458 	return false;
4459 }
4460 
fill_stream_properties_from_drm_display_mode(struct dc_stream_state * stream,const struct drm_display_mode * mode_in,const struct drm_connector * connector,const struct drm_connector_state * connector_state,const struct dc_stream_state * old_stream,int requested_bpc)4461 static void fill_stream_properties_from_drm_display_mode(
4462 	struct dc_stream_state *stream,
4463 	const struct drm_display_mode *mode_in,
4464 	const struct drm_connector *connector,
4465 	const struct drm_connector_state *connector_state,
4466 	const struct dc_stream_state *old_stream,
4467 	int requested_bpc)
4468 {
4469 	struct dc_crtc_timing *timing_out = &stream->timing;
4470 	const struct drm_display_info *info = &connector->display_info;
4471 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4472 	struct hdmi_vendor_infoframe hv_frame;
4473 	struct hdmi_avi_infoframe avi_frame;
4474 
4475 	memset(&hv_frame, 0, sizeof(hv_frame));
4476 	memset(&avi_frame, 0, sizeof(avi_frame));
4477 
4478 	timing_out->h_border_left = 0;
4479 	timing_out->h_border_right = 0;
4480 	timing_out->v_border_top = 0;
4481 	timing_out->v_border_bottom = 0;
4482 	/* TODO: un-hardcode */
4483 	if (drm_mode_is_420_only(info, mode_in)
4484 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4485 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4486 	else if (drm_mode_is_420_also(info, mode_in)
4487 			&& aconnector->force_yuv420_output)
4488 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4489 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4490 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4491 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4492 	else
4493 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4494 
4495 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4496 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4497 		connector,
4498 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4499 		requested_bpc);
4500 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4501 	timing_out->hdmi_vic = 0;
4502 
4503 	if(old_stream) {
4504 		timing_out->vic = old_stream->timing.vic;
4505 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4506 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4507 	} else {
4508 		timing_out->vic = drm_match_cea_mode(mode_in);
4509 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4510 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4511 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4512 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4513 	}
4514 
4515 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4516 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4517 		timing_out->vic = avi_frame.video_code;
4518 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4519 		timing_out->hdmi_vic = hv_frame.vic;
4520 	}
4521 
4522 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4523 	timing_out->h_total = mode_in->crtc_htotal;
4524 	timing_out->h_sync_width =
4525 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4526 	timing_out->h_front_porch =
4527 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4528 	timing_out->v_total = mode_in->crtc_vtotal;
4529 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4530 	timing_out->v_front_porch =
4531 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4532 	timing_out->v_sync_width =
4533 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4534 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4535 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4536 
4537 	stream->output_color_space = get_output_color_space(timing_out);
4538 
4539 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4540 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4541 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4542 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4543 		    drm_mode_is_420_also(info, mode_in) &&
4544 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4545 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4546 			adjust_colour_depth_from_display_info(timing_out, info);
4547 		}
4548 	}
4549 }
4550 
fill_audio_info(struct audio_info * audio_info,const struct drm_connector * drm_connector,const struct dc_sink * dc_sink)4551 static void fill_audio_info(struct audio_info *audio_info,
4552 			    const struct drm_connector *drm_connector,
4553 			    const struct dc_sink *dc_sink)
4554 {
4555 	int i = 0;
4556 	int cea_revision = 0;
4557 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4558 
4559 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4560 	audio_info->product_id = edid_caps->product_id;
4561 
4562 	cea_revision = drm_connector->display_info.cea_rev;
4563 
4564 	strscpy(audio_info->display_name,
4565 		edid_caps->display_name,
4566 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4567 
4568 	if (cea_revision >= 3) {
4569 		audio_info->mode_count = edid_caps->audio_mode_count;
4570 
4571 		for (i = 0; i < audio_info->mode_count; ++i) {
4572 			audio_info->modes[i].format_code =
4573 					(enum audio_format_code)
4574 					(edid_caps->audio_modes[i].format_code);
4575 			audio_info->modes[i].channel_count =
4576 					edid_caps->audio_modes[i].channel_count;
4577 			audio_info->modes[i].sample_rates.all =
4578 					edid_caps->audio_modes[i].sample_rate;
4579 			audio_info->modes[i].sample_size =
4580 					edid_caps->audio_modes[i].sample_size;
4581 		}
4582 	}
4583 
4584 	audio_info->flags.all = edid_caps->speaker_flags;
4585 
4586 	/* TODO: We only check for the progressive mode, check for interlace mode too */
4587 	if (drm_connector->latency_present[0]) {
4588 		audio_info->video_latency = drm_connector->video_latency[0];
4589 		audio_info->audio_latency = drm_connector->audio_latency[0];
4590 	}
4591 
4592 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4593 
4594 }
4595 
4596 static void
copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode * src_mode,struct drm_display_mode * dst_mode)4597 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4598 				      struct drm_display_mode *dst_mode)
4599 {
4600 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4601 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4602 	dst_mode->crtc_clock = src_mode->crtc_clock;
4603 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4604 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4605 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4606 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4607 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
4608 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
4609 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4610 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4611 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4612 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4613 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4614 }
4615 
4616 static void
decide_crtc_timing_for_drm_display_mode(struct drm_display_mode * drm_mode,const struct drm_display_mode * native_mode,bool scale_enabled)4617 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4618 					const struct drm_display_mode *native_mode,
4619 					bool scale_enabled)
4620 {
4621 	if (scale_enabled) {
4622 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4623 	} else if (native_mode->clock == drm_mode->clock &&
4624 			native_mode->htotal == drm_mode->htotal &&
4625 			native_mode->vtotal == drm_mode->vtotal) {
4626 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4627 	} else {
4628 		/* no scaling nor amdgpu inserted, no need to patch */
4629 	}
4630 }
4631 
4632 static struct dc_sink *
create_fake_sink(struct amdgpu_dm_connector * aconnector)4633 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4634 {
4635 	struct dc_sink_init_data sink_init_data = { 0 };
4636 	struct dc_sink *sink = NULL;
4637 	sink_init_data.link = aconnector->dc_link;
4638 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4639 
4640 	sink = dc_sink_create(&sink_init_data);
4641 	if (!sink) {
4642 		DRM_ERROR("Failed to create sink!\n");
4643 		return NULL;
4644 	}
4645 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4646 
4647 	return sink;
4648 }
4649 
set_multisync_trigger_params(struct dc_stream_state * stream)4650 static void set_multisync_trigger_params(
4651 		struct dc_stream_state *stream)
4652 {
4653 	if (stream->triggered_crtc_reset.enabled) {
4654 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4655 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4656 	}
4657 }
4658 
set_master_stream(struct dc_stream_state * stream_set[],int stream_count)4659 static void set_master_stream(struct dc_stream_state *stream_set[],
4660 			      int stream_count)
4661 {
4662 	int j, highest_rfr = 0, master_stream = 0;
4663 
4664 	for (j = 0;  j < stream_count; j++) {
4665 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4666 			int refresh_rate = 0;
4667 
4668 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4669 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4670 			if (refresh_rate > highest_rfr) {
4671 				highest_rfr = refresh_rate;
4672 				master_stream = j;
4673 			}
4674 		}
4675 	}
4676 	for (j = 0;  j < stream_count; j++) {
4677 		if (stream_set[j])
4678 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4679 	}
4680 }
4681 
dm_enable_per_frame_crtc_master_sync(struct dc_state * context)4682 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4683 {
4684 	int i = 0;
4685 
4686 	if (context->stream_count < 2)
4687 		return;
4688 	for (i = 0; i < context->stream_count ; i++) {
4689 		if (!context->streams[i])
4690 			continue;
4691 		/*
4692 		 * TODO: add a function to read AMD VSDB bits and set
4693 		 * crtc_sync_master.multi_sync_enabled flag
4694 		 * For now it's set to false
4695 		 */
4696 		set_multisync_trigger_params(context->streams[i]);
4697 	}
4698 	set_master_stream(context->streams, context->stream_count);
4699 }
4700 
4701 static struct dc_stream_state *
create_stream_for_sink(struct amdgpu_dm_connector * aconnector,const struct drm_display_mode * drm_mode,const struct dm_connector_state * dm_state,const struct dc_stream_state * old_stream,int requested_bpc)4702 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4703 		       const struct drm_display_mode *drm_mode,
4704 		       const struct dm_connector_state *dm_state,
4705 		       const struct dc_stream_state *old_stream,
4706 		       int requested_bpc)
4707 {
4708 	struct drm_display_mode *preferred_mode = NULL;
4709 	struct drm_connector *drm_connector;
4710 	const struct drm_connector_state *con_state =
4711 		dm_state ? &dm_state->base : NULL;
4712 	struct dc_stream_state *stream = NULL;
4713 	struct drm_display_mode mode = *drm_mode;
4714 	bool native_mode_found = false;
4715 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4716 	int mode_refresh;
4717 	int preferred_refresh = 0;
4718 #if defined(CONFIG_DRM_AMD_DC_DCN)
4719 	struct dsc_dec_dpcd_caps dsc_caps;
4720 #endif
4721 	uint32_t link_bandwidth_kbps;
4722 
4723 	struct dc_sink *sink = NULL;
4724 	if (aconnector == NULL) {
4725 		DRM_ERROR("aconnector is NULL!\n");
4726 		return stream;
4727 	}
4728 
4729 	drm_connector = &aconnector->base;
4730 
4731 	if (!aconnector->dc_sink) {
4732 		sink = create_fake_sink(aconnector);
4733 		if (!sink)
4734 			return stream;
4735 	} else {
4736 		sink = aconnector->dc_sink;
4737 		dc_sink_retain(sink);
4738 	}
4739 
4740 	stream = dc_create_stream_for_sink(sink);
4741 
4742 	if (stream == NULL) {
4743 		DRM_ERROR("Failed to create stream for sink!\n");
4744 		goto finish;
4745 	}
4746 
4747 	stream->dm_stream_context = aconnector;
4748 
4749 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4750 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4751 
4752 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4753 		/* Search for preferred mode */
4754 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4755 			native_mode_found = true;
4756 			break;
4757 		}
4758 	}
4759 	if (!native_mode_found)
4760 		preferred_mode = list_first_entry_or_null(
4761 				&aconnector->base.modes,
4762 				struct drm_display_mode,
4763 				head);
4764 
4765 	mode_refresh = drm_mode_vrefresh(&mode);
4766 
4767 	if (preferred_mode == NULL) {
4768 		/*
4769 		 * This may not be an error, the use case is when we have no
4770 		 * usermode calls to reset and set mode upon hotplug. In this
4771 		 * case, we call set mode ourselves to restore the previous mode
4772 		 * and the modelist may not be filled in in time.
4773 		 */
4774 		DRM_DEBUG_DRIVER("No preferred mode found\n");
4775 	} else {
4776 		decide_crtc_timing_for_drm_display_mode(
4777 				&mode, preferred_mode,
4778 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
4779 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
4780 	}
4781 
4782 	if (!dm_state)
4783 		drm_mode_set_crtcinfo(&mode, 0);
4784 
4785 	/*
4786 	* If scaling is enabled and refresh rate didn't change
4787 	* we copy the vic and polarities of the old timings
4788 	*/
4789 	if (!scale || mode_refresh != preferred_refresh)
4790 		fill_stream_properties_from_drm_display_mode(stream,
4791 			&mode, &aconnector->base, con_state, NULL, requested_bpc);
4792 	else
4793 		fill_stream_properties_from_drm_display_mode(stream,
4794 			&mode, &aconnector->base, con_state, old_stream, requested_bpc);
4795 
4796 	stream->timing.flags.DSC = 0;
4797 
4798 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4799 #if defined(CONFIG_DRM_AMD_DC_DCN)
4800 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4801 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4802 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4803 				      &dsc_caps);
4804 #endif
4805 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4806 							     dc_link_get_link_cap(aconnector->dc_link));
4807 
4808 #if defined(CONFIG_DRM_AMD_DC_DCN)
4809 		if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
4810 			/* Set DSC policy according to dsc_clock_en */
4811 			dc_dsc_policy_set_enable_dsc_when_not_needed(
4812 				aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
4813 
4814 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4815 						  &dsc_caps,
4816 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4817 						  link_bandwidth_kbps,
4818 						  &stream->timing,
4819 						  &stream->timing.dsc_cfg))
4820 				stream->timing.flags.DSC = 1;
4821 			/* Overwrite the stream flag if DSC is enabled through debugfs */
4822 			if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
4823 				stream->timing.flags.DSC = 1;
4824 
4825 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
4826 				stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
4827 
4828 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
4829 				stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
4830 
4831 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
4832 				stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
4833 		}
4834 #endif
4835 	}
4836 
4837 	update_stream_scaling_settings(&mode, dm_state, stream);
4838 
4839 	fill_audio_info(
4840 		&stream->audio_info,
4841 		drm_connector,
4842 		sink);
4843 
4844 	update_stream_signal(stream, sink);
4845 
4846 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4847 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
4848 
4849 	if (stream->link->psr_settings.psr_feature_enabled) {
4850 		//
4851 		// should decide stream support vsc sdp colorimetry capability
4852 		// before building vsc info packet
4853 		//
4854 		stream->use_vsc_sdp_for_colorimetry = false;
4855 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4856 			stream->use_vsc_sdp_for_colorimetry =
4857 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4858 		} else {
4859 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4860 				stream->use_vsc_sdp_for_colorimetry = true;
4861 		}
4862 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4863 	}
4864 finish:
4865 	dc_sink_release(sink);
4866 
4867 	return stream;
4868 }
4869 
amdgpu_dm_crtc_destroy(struct drm_crtc * crtc)4870 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4871 {
4872 	drm_crtc_cleanup(crtc);
4873 	kfree(crtc);
4874 }
4875 
dm_crtc_destroy_state(struct drm_crtc * crtc,struct drm_crtc_state * state)4876 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4877 				  struct drm_crtc_state *state)
4878 {
4879 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
4880 
4881 	/* TODO Destroy dc_stream objects are stream object is flattened */
4882 	if (cur->stream)
4883 		dc_stream_release(cur->stream);
4884 
4885 
4886 	__drm_atomic_helper_crtc_destroy_state(state);
4887 
4888 
4889 	kfree(state);
4890 }
4891 
dm_crtc_reset_state(struct drm_crtc * crtc)4892 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4893 {
4894 	struct dm_crtc_state *state;
4895 
4896 	if (crtc->state)
4897 		dm_crtc_destroy_state(crtc, crtc->state);
4898 
4899 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4900 	if (WARN_ON(!state))
4901 		return;
4902 
4903 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
4904 }
4905 
4906 static struct drm_crtc_state *
dm_crtc_duplicate_state(struct drm_crtc * crtc)4907 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4908 {
4909 	struct dm_crtc_state *state, *cur;
4910 
4911 	cur = to_dm_crtc_state(crtc->state);
4912 
4913 	if (WARN_ON(!crtc->state))
4914 		return NULL;
4915 
4916 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4917 	if (!state)
4918 		return NULL;
4919 
4920 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4921 
4922 	if (cur->stream) {
4923 		state->stream = cur->stream;
4924 		dc_stream_retain(state->stream);
4925 	}
4926 
4927 	state->active_planes = cur->active_planes;
4928 	state->vrr_infopacket = cur->vrr_infopacket;
4929 	state->abm_level = cur->abm_level;
4930 	state->vrr_supported = cur->vrr_supported;
4931 	state->freesync_config = cur->freesync_config;
4932 	state->crc_src = cur->crc_src;
4933 	state->cm_has_degamma = cur->cm_has_degamma;
4934 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4935 
4936 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
4937 
4938 	return &state->base;
4939 }
4940 
dm_set_vupdate_irq(struct drm_crtc * crtc,bool enable)4941 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4942 {
4943 	enum dc_irq_source irq_source;
4944 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4945 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4946 	int rc;
4947 
4948 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4949 
4950 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4951 
4952 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4953 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
4954 	return rc;
4955 }
4956 
dm_set_vblank(struct drm_crtc * crtc,bool enable)4957 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4958 {
4959 	enum dc_irq_source irq_source;
4960 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4961 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4962 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4963 	int rc = 0;
4964 
4965 	if (enable) {
4966 		/* vblank irq on -> Only need vupdate irq in vrr mode */
4967 		if (amdgpu_dm_vrr_active(acrtc_state))
4968 			rc = dm_set_vupdate_irq(crtc, true);
4969 	} else {
4970 		/* vblank irq off -> vupdate irq off */
4971 		rc = dm_set_vupdate_irq(crtc, false);
4972 	}
4973 
4974 	if (rc)
4975 		return rc;
4976 
4977 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4978 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4979 }
4980 
dm_enable_vblank(struct drm_crtc * crtc)4981 static int dm_enable_vblank(struct drm_crtc *crtc)
4982 {
4983 	return dm_set_vblank(crtc, true);
4984 }
4985 
dm_disable_vblank(struct drm_crtc * crtc)4986 static void dm_disable_vblank(struct drm_crtc *crtc)
4987 {
4988 	dm_set_vblank(crtc, false);
4989 }
4990 
4991 /* Implemented only the options currently availible for the driver */
4992 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4993 	.reset = dm_crtc_reset_state,
4994 	.destroy = amdgpu_dm_crtc_destroy,
4995 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
4996 	.set_config = drm_atomic_helper_set_config,
4997 	.page_flip = drm_atomic_helper_page_flip,
4998 	.atomic_duplicate_state = dm_crtc_duplicate_state,
4999 	.atomic_destroy_state = dm_crtc_destroy_state,
5000 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
5001 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5002 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5003 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
5004 	.enable_vblank = dm_enable_vblank,
5005 	.disable_vblank = dm_disable_vblank,
5006 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5007 };
5008 
5009 static enum drm_connector_status
amdgpu_dm_connector_detect(struct drm_connector * connector,bool force)5010 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5011 {
5012 	bool connected;
5013 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5014 
5015 	/*
5016 	 * Notes:
5017 	 * 1. This interface is NOT called in context of HPD irq.
5018 	 * 2. This interface *is called* in context of user-mode ioctl. Which
5019 	 * makes it a bad place for *any* MST-related activity.
5020 	 */
5021 
5022 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5023 	    !aconnector->fake_enable)
5024 		connected = (aconnector->dc_sink != NULL);
5025 	else
5026 		connected = (aconnector->base.force == DRM_FORCE_ON);
5027 
5028 	update_subconnector_property(aconnector);
5029 
5030 	return (connected ? connector_status_connected :
5031 			connector_status_disconnected);
5032 }
5033 
amdgpu_dm_connector_atomic_set_property(struct drm_connector * connector,struct drm_connector_state * connector_state,struct drm_property * property,uint64_t val)5034 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5035 					    struct drm_connector_state *connector_state,
5036 					    struct drm_property *property,
5037 					    uint64_t val)
5038 {
5039 	struct drm_device *dev = connector->dev;
5040 	struct amdgpu_device *adev = drm_to_adev(dev);
5041 	struct dm_connector_state *dm_old_state =
5042 		to_dm_connector_state(connector->state);
5043 	struct dm_connector_state *dm_new_state =
5044 		to_dm_connector_state(connector_state);
5045 
5046 	int ret = -EINVAL;
5047 
5048 	if (property == dev->mode_config.scaling_mode_property) {
5049 		enum amdgpu_rmx_type rmx_type;
5050 
5051 		switch (val) {
5052 		case DRM_MODE_SCALE_CENTER:
5053 			rmx_type = RMX_CENTER;
5054 			break;
5055 		case DRM_MODE_SCALE_ASPECT:
5056 			rmx_type = RMX_ASPECT;
5057 			break;
5058 		case DRM_MODE_SCALE_FULLSCREEN:
5059 			rmx_type = RMX_FULL;
5060 			break;
5061 		case DRM_MODE_SCALE_NONE:
5062 		default:
5063 			rmx_type = RMX_OFF;
5064 			break;
5065 		}
5066 
5067 		if (dm_old_state->scaling == rmx_type)
5068 			return 0;
5069 
5070 		dm_new_state->scaling = rmx_type;
5071 		ret = 0;
5072 	} else if (property == adev->mode_info.underscan_hborder_property) {
5073 		dm_new_state->underscan_hborder = val;
5074 		ret = 0;
5075 	} else if (property == adev->mode_info.underscan_vborder_property) {
5076 		dm_new_state->underscan_vborder = val;
5077 		ret = 0;
5078 	} else if (property == adev->mode_info.underscan_property) {
5079 		dm_new_state->underscan_enable = val;
5080 		ret = 0;
5081 	} else if (property == adev->mode_info.abm_level_property) {
5082 		dm_new_state->abm_level = val;
5083 		ret = 0;
5084 	}
5085 
5086 	return ret;
5087 }
5088 
amdgpu_dm_connector_atomic_get_property(struct drm_connector * connector,const struct drm_connector_state * state,struct drm_property * property,uint64_t * val)5089 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5090 					    const struct drm_connector_state *state,
5091 					    struct drm_property *property,
5092 					    uint64_t *val)
5093 {
5094 	struct drm_device *dev = connector->dev;
5095 	struct amdgpu_device *adev = drm_to_adev(dev);
5096 	struct dm_connector_state *dm_state =
5097 		to_dm_connector_state(state);
5098 	int ret = -EINVAL;
5099 
5100 	if (property == dev->mode_config.scaling_mode_property) {
5101 		switch (dm_state->scaling) {
5102 		case RMX_CENTER:
5103 			*val = DRM_MODE_SCALE_CENTER;
5104 			break;
5105 		case RMX_ASPECT:
5106 			*val = DRM_MODE_SCALE_ASPECT;
5107 			break;
5108 		case RMX_FULL:
5109 			*val = DRM_MODE_SCALE_FULLSCREEN;
5110 			break;
5111 		case RMX_OFF:
5112 		default:
5113 			*val = DRM_MODE_SCALE_NONE;
5114 			break;
5115 		}
5116 		ret = 0;
5117 	} else if (property == adev->mode_info.underscan_hborder_property) {
5118 		*val = dm_state->underscan_hborder;
5119 		ret = 0;
5120 	} else if (property == adev->mode_info.underscan_vborder_property) {
5121 		*val = dm_state->underscan_vborder;
5122 		ret = 0;
5123 	} else if (property == adev->mode_info.underscan_property) {
5124 		*val = dm_state->underscan_enable;
5125 		ret = 0;
5126 	} else if (property == adev->mode_info.abm_level_property) {
5127 		*val = dm_state->abm_level;
5128 		ret = 0;
5129 	}
5130 
5131 	return ret;
5132 }
5133 
amdgpu_dm_connector_unregister(struct drm_connector * connector)5134 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5135 {
5136 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5137 
5138 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5139 }
5140 
amdgpu_dm_connector_destroy(struct drm_connector * connector)5141 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5142 {
5143 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5144 	const struct dc_link *link = aconnector->dc_link;
5145 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5146 	struct amdgpu_display_manager *dm = &adev->dm;
5147 
5148 	/*
5149 	 * Call only if mst_mgr was iniitalized before since it's not done
5150 	 * for all connector types.
5151 	 */
5152 	if (aconnector->mst_mgr.dev)
5153 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5154 
5155 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5156 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5157 
5158 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5159 	    link->type != dc_connection_none &&
5160 	    dm->backlight_dev) {
5161 		backlight_device_unregister(dm->backlight_dev);
5162 		dm->backlight_dev = NULL;
5163 	}
5164 #endif
5165 
5166 	if (aconnector->dc_em_sink)
5167 		dc_sink_release(aconnector->dc_em_sink);
5168 	aconnector->dc_em_sink = NULL;
5169 	if (aconnector->dc_sink)
5170 		dc_sink_release(aconnector->dc_sink);
5171 	aconnector->dc_sink = NULL;
5172 
5173 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5174 	drm_connector_unregister(connector);
5175 	drm_connector_cleanup(connector);
5176 	if (aconnector->i2c) {
5177 		i2c_del_adapter(&aconnector->i2c->base);
5178 		kfree(aconnector->i2c);
5179 	}
5180 	kfree(aconnector->dm_dp_aux.aux.name);
5181 
5182 	kfree(connector);
5183 }
5184 
amdgpu_dm_connector_funcs_reset(struct drm_connector * connector)5185 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5186 {
5187 	struct dm_connector_state *state =
5188 		to_dm_connector_state(connector->state);
5189 
5190 	if (connector->state)
5191 		__drm_atomic_helper_connector_destroy_state(connector->state);
5192 
5193 	kfree(state);
5194 
5195 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5196 
5197 	if (state) {
5198 		state->scaling = RMX_OFF;
5199 		state->underscan_enable = false;
5200 		state->underscan_hborder = 0;
5201 		state->underscan_vborder = 0;
5202 		state->base.max_requested_bpc = 8;
5203 		state->vcpi_slots = 0;
5204 		state->pbn = 0;
5205 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5206 			state->abm_level = amdgpu_dm_abm_level;
5207 
5208 		__drm_atomic_helper_connector_reset(connector, &state->base);
5209 	}
5210 }
5211 
5212 struct drm_connector_state *
amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector * connector)5213 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5214 {
5215 	struct dm_connector_state *state =
5216 		to_dm_connector_state(connector->state);
5217 
5218 	struct dm_connector_state *new_state =
5219 			kmemdup(state, sizeof(*state), GFP_KERNEL);
5220 
5221 	if (!new_state)
5222 		return NULL;
5223 
5224 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5225 
5226 	new_state->freesync_capable = state->freesync_capable;
5227 	new_state->abm_level = state->abm_level;
5228 	new_state->scaling = state->scaling;
5229 	new_state->underscan_enable = state->underscan_enable;
5230 	new_state->underscan_hborder = state->underscan_hborder;
5231 	new_state->underscan_vborder = state->underscan_vborder;
5232 	new_state->vcpi_slots = state->vcpi_slots;
5233 	new_state->pbn = state->pbn;
5234 	return &new_state->base;
5235 }
5236 
5237 static int
amdgpu_dm_connector_late_register(struct drm_connector * connector)5238 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5239 {
5240 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5241 		to_amdgpu_dm_connector(connector);
5242 	int r;
5243 
5244 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5245 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5246 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5247 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5248 		if (r)
5249 			return r;
5250 	}
5251 
5252 #if defined(CONFIG_DEBUG_FS)
5253 	connector_debugfs_init(amdgpu_dm_connector);
5254 #endif
5255 
5256 	return 0;
5257 }
5258 
5259 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5260 	.reset = amdgpu_dm_connector_funcs_reset,
5261 	.detect = amdgpu_dm_connector_detect,
5262 	.fill_modes = drm_helper_probe_single_connector_modes,
5263 	.destroy = amdgpu_dm_connector_destroy,
5264 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5265 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5266 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5267 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5268 	.late_register = amdgpu_dm_connector_late_register,
5269 	.early_unregister = amdgpu_dm_connector_unregister
5270 };
5271 
get_modes(struct drm_connector * connector)5272 static int get_modes(struct drm_connector *connector)
5273 {
5274 	return amdgpu_dm_connector_get_modes(connector);
5275 }
5276 
create_eml_sink(struct amdgpu_dm_connector * aconnector)5277 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5278 {
5279 	struct dc_sink_init_data init_params = {
5280 			.link = aconnector->dc_link,
5281 			.sink_signal = SIGNAL_TYPE_VIRTUAL
5282 	};
5283 	struct edid *edid;
5284 
5285 	if (!aconnector->base.edid_blob_ptr) {
5286 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5287 				aconnector->base.name);
5288 
5289 		aconnector->base.force = DRM_FORCE_OFF;
5290 		aconnector->base.override_edid = false;
5291 		return;
5292 	}
5293 
5294 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5295 
5296 	aconnector->edid = edid;
5297 
5298 	aconnector->dc_em_sink = dc_link_add_remote_sink(
5299 		aconnector->dc_link,
5300 		(uint8_t *)edid,
5301 		(edid->extensions + 1) * EDID_LENGTH,
5302 		&init_params);
5303 
5304 	if (aconnector->base.force == DRM_FORCE_ON) {
5305 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
5306 		aconnector->dc_link->local_sink :
5307 		aconnector->dc_em_sink;
5308 		dc_sink_retain(aconnector->dc_sink);
5309 	}
5310 }
5311 
handle_edid_mgmt(struct amdgpu_dm_connector * aconnector)5312 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5313 {
5314 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5315 
5316 	/*
5317 	 * In case of headless boot with force on for DP managed connector
5318 	 * Those settings have to be != 0 to get initial modeset
5319 	 */
5320 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5321 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5322 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5323 	}
5324 
5325 
5326 	aconnector->base.override_edid = true;
5327 	create_eml_sink(aconnector);
5328 }
5329 
5330 static struct dc_stream_state *
create_validate_stream_for_sink(struct amdgpu_dm_connector * aconnector,const struct drm_display_mode * drm_mode,const struct dm_connector_state * dm_state,const struct dc_stream_state * old_stream)5331 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5332 				const struct drm_display_mode *drm_mode,
5333 				const struct dm_connector_state *dm_state,
5334 				const struct dc_stream_state *old_stream)
5335 {
5336 	struct drm_connector *connector = &aconnector->base;
5337 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5338 	struct dc_stream_state *stream;
5339 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5340 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5341 	enum dc_status dc_result = DC_OK;
5342 
5343 	do {
5344 		stream = create_stream_for_sink(aconnector, drm_mode,
5345 						dm_state, old_stream,
5346 						requested_bpc);
5347 		if (stream == NULL) {
5348 			DRM_ERROR("Failed to create stream for sink!\n");
5349 			break;
5350 		}
5351 
5352 		dc_result = dc_validate_stream(adev->dm.dc, stream);
5353 
5354 		if (dc_result != DC_OK) {
5355 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5356 				      drm_mode->hdisplay,
5357 				      drm_mode->vdisplay,
5358 				      drm_mode->clock,
5359 				      dc_result,
5360 				      dc_status_to_str(dc_result));
5361 
5362 			dc_stream_release(stream);
5363 			stream = NULL;
5364 			requested_bpc -= 2; /* lower bpc to retry validation */
5365 		}
5366 
5367 	} while (stream == NULL && requested_bpc >= 6);
5368 
5369 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
5370 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
5371 
5372 		aconnector->force_yuv420_output = true;
5373 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
5374 						dm_state, old_stream);
5375 		aconnector->force_yuv420_output = false;
5376 	}
5377 
5378 	return stream;
5379 }
5380 
amdgpu_dm_connector_mode_valid(struct drm_connector * connector,struct drm_display_mode * mode)5381 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5382 				   struct drm_display_mode *mode)
5383 {
5384 	int result = MODE_ERROR;
5385 	struct dc_sink *dc_sink;
5386 	/* TODO: Unhardcode stream count */
5387 	struct dc_stream_state *stream;
5388 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5389 
5390 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5391 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
5392 		return result;
5393 
5394 	/*
5395 	 * Only run this the first time mode_valid is called to initilialize
5396 	 * EDID mgmt
5397 	 */
5398 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5399 		!aconnector->dc_em_sink)
5400 		handle_edid_mgmt(aconnector);
5401 
5402 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5403 
5404 	if (dc_sink == NULL) {
5405 		DRM_ERROR("dc_sink is NULL!\n");
5406 		goto fail;
5407 	}
5408 
5409 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5410 	if (stream) {
5411 		dc_stream_release(stream);
5412 		result = MODE_OK;
5413 	}
5414 
5415 fail:
5416 	/* TODO: error handling*/
5417 	return result;
5418 }
5419 
fill_hdr_info_packet(const struct drm_connector_state * state,struct dc_info_packet * out)5420 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5421 				struct dc_info_packet *out)
5422 {
5423 	struct hdmi_drm_infoframe frame;
5424 	unsigned char buf[30]; /* 26 + 4 */
5425 	ssize_t len;
5426 	int ret, i;
5427 
5428 	memset(out, 0, sizeof(*out));
5429 
5430 	if (!state->hdr_output_metadata)
5431 		return 0;
5432 
5433 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5434 	if (ret)
5435 		return ret;
5436 
5437 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5438 	if (len < 0)
5439 		return (int)len;
5440 
5441 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
5442 	if (len != 30)
5443 		return -EINVAL;
5444 
5445 	/* Prepare the infopacket for DC. */
5446 	switch (state->connector->connector_type) {
5447 	case DRM_MODE_CONNECTOR_HDMIA:
5448 		out->hb0 = 0x87; /* type */
5449 		out->hb1 = 0x01; /* version */
5450 		out->hb2 = 0x1A; /* length */
5451 		out->sb[0] = buf[3]; /* checksum */
5452 		i = 1;
5453 		break;
5454 
5455 	case DRM_MODE_CONNECTOR_DisplayPort:
5456 	case DRM_MODE_CONNECTOR_eDP:
5457 		out->hb0 = 0x00; /* sdp id, zero */
5458 		out->hb1 = 0x87; /* type */
5459 		out->hb2 = 0x1D; /* payload len - 1 */
5460 		out->hb3 = (0x13 << 2); /* sdp version */
5461 		out->sb[0] = 0x01; /* version */
5462 		out->sb[1] = 0x1A; /* length */
5463 		i = 2;
5464 		break;
5465 
5466 	default:
5467 		return -EINVAL;
5468 	}
5469 
5470 	memcpy(&out->sb[i], &buf[4], 26);
5471 	out->valid = true;
5472 
5473 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5474 		       sizeof(out->sb), false);
5475 
5476 	return 0;
5477 }
5478 
5479 static bool
is_hdr_metadata_different(const struct drm_connector_state * old_state,const struct drm_connector_state * new_state)5480 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5481 			  const struct drm_connector_state *new_state)
5482 {
5483 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5484 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5485 
5486 	if (old_blob != new_blob) {
5487 		if (old_blob && new_blob &&
5488 		    old_blob->length == new_blob->length)
5489 			return memcmp(old_blob->data, new_blob->data,
5490 				      old_blob->length);
5491 
5492 		return true;
5493 	}
5494 
5495 	return false;
5496 }
5497 
5498 static int
amdgpu_dm_connector_atomic_check(struct drm_connector * conn,struct drm_atomic_state * state)5499 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5500 				 struct drm_atomic_state *state)
5501 {
5502 	struct drm_connector_state *new_con_state =
5503 		drm_atomic_get_new_connector_state(state, conn);
5504 	struct drm_connector_state *old_con_state =
5505 		drm_atomic_get_old_connector_state(state, conn);
5506 	struct drm_crtc *crtc = new_con_state->crtc;
5507 	struct drm_crtc_state *new_crtc_state;
5508 	int ret;
5509 
5510 	if (!crtc)
5511 		return 0;
5512 
5513 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5514 		struct dc_info_packet hdr_infopacket;
5515 
5516 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5517 		if (ret)
5518 			return ret;
5519 
5520 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5521 		if (IS_ERR(new_crtc_state))
5522 			return PTR_ERR(new_crtc_state);
5523 
5524 		/*
5525 		 * DC considers the stream backends changed if the
5526 		 * static metadata changes. Forcing the modeset also
5527 		 * gives a simple way for userspace to switch from
5528 		 * 8bpc to 10bpc when setting the metadata to enter
5529 		 * or exit HDR.
5530 		 *
5531 		 * Changing the static metadata after it's been
5532 		 * set is permissible, however. So only force a
5533 		 * modeset if we're entering or exiting HDR.
5534 		 */
5535 		new_crtc_state->mode_changed =
5536 			!old_con_state->hdr_output_metadata ||
5537 			!new_con_state->hdr_output_metadata;
5538 	}
5539 
5540 	return 0;
5541 }
5542 
5543 static const struct drm_connector_helper_funcs
5544 amdgpu_dm_connector_helper_funcs = {
5545 	/*
5546 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5547 	 * modes will be filtered by drm_mode_validate_size(), and those modes
5548 	 * are missing after user start lightdm. So we need to renew modes list.
5549 	 * in get_modes call back, not just return the modes count
5550 	 */
5551 	.get_modes = get_modes,
5552 	.mode_valid = amdgpu_dm_connector_mode_valid,
5553 	.atomic_check = amdgpu_dm_connector_atomic_check,
5554 };
5555 
dm_crtc_helper_disable(struct drm_crtc * crtc)5556 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5557 {
5558 }
5559 
count_crtc_active_planes(struct drm_crtc_state * new_crtc_state)5560 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5561 {
5562 	struct drm_atomic_state *state = new_crtc_state->state;
5563 	struct drm_plane *plane;
5564 	int num_active = 0;
5565 
5566 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5567 		struct drm_plane_state *new_plane_state;
5568 
5569 		/* Cursor planes are "fake". */
5570 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5571 			continue;
5572 
5573 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5574 
5575 		if (!new_plane_state) {
5576 			/*
5577 			 * The plane is enable on the CRTC and hasn't changed
5578 			 * state. This means that it previously passed
5579 			 * validation and is therefore enabled.
5580 			 */
5581 			num_active += 1;
5582 			continue;
5583 		}
5584 
5585 		/* We need a framebuffer to be considered enabled. */
5586 		num_active += (new_plane_state->fb != NULL);
5587 	}
5588 
5589 	return num_active;
5590 }
5591 
dm_update_crtc_active_planes(struct drm_crtc * crtc,struct drm_crtc_state * new_crtc_state)5592 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5593 					 struct drm_crtc_state *new_crtc_state)
5594 {
5595 	struct dm_crtc_state *dm_new_crtc_state =
5596 		to_dm_crtc_state(new_crtc_state);
5597 
5598 	dm_new_crtc_state->active_planes = 0;
5599 
5600 	if (!dm_new_crtc_state->stream)
5601 		return;
5602 
5603 	dm_new_crtc_state->active_planes =
5604 		count_crtc_active_planes(new_crtc_state);
5605 }
5606 
dm_crtc_helper_atomic_check(struct drm_crtc * crtc,struct drm_crtc_state * state)5607 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5608 				       struct drm_crtc_state *state)
5609 {
5610 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5611 	struct dc *dc = adev->dm.dc;
5612 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5613 	int ret = -EINVAL;
5614 
5615 	dm_update_crtc_active_planes(crtc, state);
5616 
5617 	if (unlikely(!dm_crtc_state->stream &&
5618 		     modeset_required(state, NULL, dm_crtc_state->stream))) {
5619 		WARN_ON(1);
5620 		return ret;
5621 	}
5622 
5623 	/*
5624 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
5625 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
5626 	 * planes are disabled, which is not supported by the hardware. And there is legacy
5627 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
5628 	 */
5629 	if (state->enable &&
5630 	    !(state->plane_mask & drm_plane_mask(crtc->primary)))
5631 		return -EINVAL;
5632 
5633 	/* In some use cases, like reset, no stream is attached */
5634 	if (!dm_crtc_state->stream)
5635 		return 0;
5636 
5637 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5638 		return 0;
5639 
5640 	return ret;
5641 }
5642 
dm_crtc_helper_mode_fixup(struct drm_crtc * crtc,const struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)5643 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5644 				      const struct drm_display_mode *mode,
5645 				      struct drm_display_mode *adjusted_mode)
5646 {
5647 	return true;
5648 }
5649 
5650 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5651 	.disable = dm_crtc_helper_disable,
5652 	.atomic_check = dm_crtc_helper_atomic_check,
5653 	.mode_fixup = dm_crtc_helper_mode_fixup,
5654 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
5655 };
5656 
dm_encoder_helper_disable(struct drm_encoder * encoder)5657 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5658 {
5659 
5660 }
5661 
convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)5662 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5663 {
5664 	switch (display_color_depth) {
5665 		case COLOR_DEPTH_666:
5666 			return 6;
5667 		case COLOR_DEPTH_888:
5668 			return 8;
5669 		case COLOR_DEPTH_101010:
5670 			return 10;
5671 		case COLOR_DEPTH_121212:
5672 			return 12;
5673 		case COLOR_DEPTH_141414:
5674 			return 14;
5675 		case COLOR_DEPTH_161616:
5676 			return 16;
5677 		default:
5678 			break;
5679 		}
5680 	return 0;
5681 }
5682 
dm_encoder_helper_atomic_check(struct drm_encoder * encoder,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)5683 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5684 					  struct drm_crtc_state *crtc_state,
5685 					  struct drm_connector_state *conn_state)
5686 {
5687 	struct drm_atomic_state *state = crtc_state->state;
5688 	struct drm_connector *connector = conn_state->connector;
5689 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5690 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5691 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5692 	struct drm_dp_mst_topology_mgr *mst_mgr;
5693 	struct drm_dp_mst_port *mst_port;
5694 	enum dc_color_depth color_depth;
5695 	int clock, bpp = 0;
5696 	bool is_y420 = false;
5697 
5698 	if (!aconnector->port || !aconnector->dc_sink)
5699 		return 0;
5700 
5701 	mst_port = aconnector->port;
5702 	mst_mgr = &aconnector->mst_port->mst_mgr;
5703 
5704 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5705 		return 0;
5706 
5707 	if (!state->duplicated) {
5708 		int max_bpc = conn_state->max_requested_bpc;
5709 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5710 				aconnector->force_yuv420_output;
5711 		color_depth = convert_color_depth_from_display_info(connector,
5712 								    is_y420,
5713 								    max_bpc);
5714 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5715 		clock = adjusted_mode->clock;
5716 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5717 	}
5718 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5719 									   mst_mgr,
5720 									   mst_port,
5721 									   dm_new_connector_state->pbn,
5722 									   dm_mst_get_pbn_divider(aconnector->dc_link));
5723 	if (dm_new_connector_state->vcpi_slots < 0) {
5724 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5725 		return dm_new_connector_state->vcpi_slots;
5726 	}
5727 	return 0;
5728 }
5729 
5730 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5731 	.disable = dm_encoder_helper_disable,
5732 	.atomic_check = dm_encoder_helper_atomic_check
5733 };
5734 
5735 #if defined(CONFIG_DRM_AMD_DC_DCN)
dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state * state,struct dc_state * dc_state)5736 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5737 					    struct dc_state *dc_state)
5738 {
5739 	struct dc_stream_state *stream = NULL;
5740 	struct drm_connector *connector;
5741 	struct drm_connector_state *new_con_state, *old_con_state;
5742 	struct amdgpu_dm_connector *aconnector;
5743 	struct dm_connector_state *dm_conn_state;
5744 	int i, j, clock, bpp;
5745 	int vcpi, pbn_div, pbn = 0;
5746 
5747 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5748 
5749 		aconnector = to_amdgpu_dm_connector(connector);
5750 
5751 		if (!aconnector->port)
5752 			continue;
5753 
5754 		if (!new_con_state || !new_con_state->crtc)
5755 			continue;
5756 
5757 		dm_conn_state = to_dm_connector_state(new_con_state);
5758 
5759 		for (j = 0; j < dc_state->stream_count; j++) {
5760 			stream = dc_state->streams[j];
5761 			if (!stream)
5762 				continue;
5763 
5764 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5765 				break;
5766 
5767 			stream = NULL;
5768 		}
5769 
5770 		if (!stream)
5771 			continue;
5772 
5773 		if (stream->timing.flags.DSC != 1) {
5774 			drm_dp_mst_atomic_enable_dsc(state,
5775 						     aconnector->port,
5776 						     dm_conn_state->pbn,
5777 						     0,
5778 						     false);
5779 			continue;
5780 		}
5781 
5782 		pbn_div = dm_mst_get_pbn_divider(stream->link);
5783 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
5784 		clock = stream->timing.pix_clk_100hz / 10;
5785 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5786 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
5787 						    aconnector->port,
5788 						    pbn, pbn_div,
5789 						    true);
5790 		if (vcpi < 0)
5791 			return vcpi;
5792 
5793 		dm_conn_state->pbn = pbn;
5794 		dm_conn_state->vcpi_slots = vcpi;
5795 	}
5796 	return 0;
5797 }
5798 #endif
5799 
dm_drm_plane_reset(struct drm_plane * plane)5800 static void dm_drm_plane_reset(struct drm_plane *plane)
5801 {
5802 	struct dm_plane_state *amdgpu_state = NULL;
5803 
5804 	if (plane->state)
5805 		plane->funcs->atomic_destroy_state(plane, plane->state);
5806 
5807 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5808 	WARN_ON(amdgpu_state == NULL);
5809 
5810 	if (amdgpu_state)
5811 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5812 }
5813 
5814 static struct drm_plane_state *
dm_drm_plane_duplicate_state(struct drm_plane * plane)5815 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5816 {
5817 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5818 
5819 	old_dm_plane_state = to_dm_plane_state(plane->state);
5820 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5821 	if (!dm_plane_state)
5822 		return NULL;
5823 
5824 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5825 
5826 	if (old_dm_plane_state->dc_state) {
5827 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5828 		dc_plane_state_retain(dm_plane_state->dc_state);
5829 	}
5830 
5831 	/* Framebuffer hasn't been updated yet, so retain old flags. */
5832 	dm_plane_state->tiling_flags = old_dm_plane_state->tiling_flags;
5833 	dm_plane_state->tmz_surface = old_dm_plane_state->tmz_surface;
5834 
5835 	return &dm_plane_state->base;
5836 }
5837 
dm_drm_plane_destroy_state(struct drm_plane * plane,struct drm_plane_state * state)5838 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5839 				struct drm_plane_state *state)
5840 {
5841 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5842 
5843 	if (dm_plane_state->dc_state)
5844 		dc_plane_state_release(dm_plane_state->dc_state);
5845 
5846 	drm_atomic_helper_plane_destroy_state(plane, state);
5847 }
5848 
5849 static const struct drm_plane_funcs dm_plane_funcs = {
5850 	.update_plane	= drm_atomic_helper_update_plane,
5851 	.disable_plane	= drm_atomic_helper_disable_plane,
5852 	.destroy	= drm_primary_helper_destroy,
5853 	.reset = dm_drm_plane_reset,
5854 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
5855 	.atomic_destroy_state = dm_drm_plane_destroy_state,
5856 };
5857 
dm_plane_helper_prepare_fb(struct drm_plane * plane,struct drm_plane_state * new_state)5858 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5859 				      struct drm_plane_state *new_state)
5860 {
5861 	struct amdgpu_framebuffer *afb;
5862 	struct drm_gem_object *obj;
5863 	struct amdgpu_device *adev;
5864 	struct amdgpu_bo *rbo;
5865 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5866 	struct list_head list;
5867 	struct ttm_validate_buffer tv;
5868 	struct ww_acquire_ctx ticket;
5869 	uint32_t domain;
5870 	int r;
5871 
5872 	if (!new_state->fb) {
5873 		DRM_DEBUG_DRIVER("No FB bound\n");
5874 		return 0;
5875 	}
5876 
5877 	afb = to_amdgpu_framebuffer(new_state->fb);
5878 	obj = new_state->fb->obj[0];
5879 	rbo = gem_to_amdgpu_bo(obj);
5880 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5881 	INIT_LIST_HEAD(&list);
5882 
5883 	tv.bo = &rbo->tbo;
5884 	tv.num_shared = 1;
5885 	list_add(&tv.head, &list);
5886 
5887 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5888 	if (r) {
5889 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5890 		return r;
5891 	}
5892 
5893 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5894 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
5895 	else
5896 		domain = AMDGPU_GEM_DOMAIN_VRAM;
5897 
5898 	r = amdgpu_bo_pin(rbo, domain);
5899 	if (unlikely(r != 0)) {
5900 		if (r != -ERESTARTSYS)
5901 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5902 		ttm_eu_backoff_reservation(&ticket, &list);
5903 		return r;
5904 	}
5905 
5906 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5907 	if (unlikely(r != 0)) {
5908 		amdgpu_bo_unpin(rbo);
5909 		ttm_eu_backoff_reservation(&ticket, &list);
5910 		DRM_ERROR("%p bind failed\n", rbo);
5911 		return r;
5912 	}
5913 
5914 	ttm_eu_backoff_reservation(&ticket, &list);
5915 
5916 	afb->address = amdgpu_bo_gpu_offset(rbo);
5917 
5918 	amdgpu_bo_ref(rbo);
5919 
5920 	/**
5921 	 * We don't do surface updates on planes that have been newly created,
5922 	 * but we also don't have the afb->address during atomic check.
5923 	 *
5924 	 * Fill in buffer attributes depending on the address here, but only on
5925 	 * newly created planes since they're not being used by DC yet and this
5926 	 * won't modify global state.
5927 	 */
5928 	dm_plane_state_old = to_dm_plane_state(plane->state);
5929 	dm_plane_state_new = to_dm_plane_state(new_state);
5930 
5931 	if (dm_plane_state_new->dc_state &&
5932 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5933 		struct dc_plane_state *plane_state =
5934 			dm_plane_state_new->dc_state;
5935 		bool force_disable_dcc = !plane_state->dcc.enable;
5936 
5937 		fill_plane_buffer_attributes(
5938 			adev, afb, plane_state->format, plane_state->rotation,
5939 			dm_plane_state_new->tiling_flags,
5940 			&plane_state->tiling_info, &plane_state->plane_size,
5941 			&plane_state->dcc, &plane_state->address,
5942 			dm_plane_state_new->tmz_surface, force_disable_dcc);
5943 	}
5944 
5945 	return 0;
5946 }
5947 
dm_plane_helper_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)5948 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5949 				       struct drm_plane_state *old_state)
5950 {
5951 	struct amdgpu_bo *rbo;
5952 	int r;
5953 
5954 	if (!old_state->fb)
5955 		return;
5956 
5957 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5958 	r = amdgpu_bo_reserve(rbo, false);
5959 	if (unlikely(r)) {
5960 		DRM_ERROR("failed to reserve rbo before unpin\n");
5961 		return;
5962 	}
5963 
5964 	amdgpu_bo_unpin(rbo);
5965 	amdgpu_bo_unreserve(rbo);
5966 	amdgpu_bo_unref(&rbo);
5967 }
5968 
dm_plane_helper_check_state(struct drm_plane_state * state,struct drm_crtc_state * new_crtc_state)5969 static int dm_plane_helper_check_state(struct drm_plane_state *state,
5970 				       struct drm_crtc_state *new_crtc_state)
5971 {
5972 	int max_downscale = 0;
5973 	int max_upscale = INT_MAX;
5974 
5975 	/* TODO: These should be checked against DC plane caps */
5976 	return drm_atomic_helper_check_plane_state(
5977 		state, new_crtc_state, max_downscale, max_upscale, true, true);
5978 }
5979 
dm_plane_atomic_check(struct drm_plane * plane,struct drm_plane_state * state)5980 static int dm_plane_atomic_check(struct drm_plane *plane,
5981 				 struct drm_plane_state *state)
5982 {
5983 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
5984 	struct dc *dc = adev->dm.dc;
5985 	struct dm_plane_state *dm_plane_state;
5986 	struct dc_scaling_info scaling_info;
5987 	struct drm_crtc_state *new_crtc_state;
5988 	int ret;
5989 
5990 	dm_plane_state = to_dm_plane_state(state);
5991 
5992 	if (!dm_plane_state->dc_state)
5993 		return 0;
5994 
5995 	new_crtc_state =
5996 		drm_atomic_get_new_crtc_state(state->state, state->crtc);
5997 	if (!new_crtc_state)
5998 		return -EINVAL;
5999 
6000 	ret = dm_plane_helper_check_state(state, new_crtc_state);
6001 	if (ret)
6002 		return ret;
6003 
6004 	ret = fill_dc_scaling_info(state, &scaling_info);
6005 	if (ret)
6006 		return ret;
6007 
6008 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6009 		return 0;
6010 
6011 	return -EINVAL;
6012 }
6013 
dm_plane_atomic_async_check(struct drm_plane * plane,struct drm_plane_state * new_plane_state)6014 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6015 				       struct drm_plane_state *new_plane_state)
6016 {
6017 	/* Only support async updates on cursor planes. */
6018 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6019 		return -EINVAL;
6020 
6021 	return 0;
6022 }
6023 
dm_plane_atomic_async_update(struct drm_plane * plane,struct drm_plane_state * new_state)6024 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6025 					 struct drm_plane_state *new_state)
6026 {
6027 	struct drm_plane_state *old_state =
6028 		drm_atomic_get_old_plane_state(new_state->state, plane);
6029 
6030 	swap(plane->state->fb, new_state->fb);
6031 
6032 	plane->state->src_x = new_state->src_x;
6033 	plane->state->src_y = new_state->src_y;
6034 	plane->state->src_w = new_state->src_w;
6035 	plane->state->src_h = new_state->src_h;
6036 	plane->state->crtc_x = new_state->crtc_x;
6037 	plane->state->crtc_y = new_state->crtc_y;
6038 	plane->state->crtc_w = new_state->crtc_w;
6039 	plane->state->crtc_h = new_state->crtc_h;
6040 
6041 	handle_cursor_update(plane, old_state);
6042 }
6043 
6044 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6045 	.prepare_fb = dm_plane_helper_prepare_fb,
6046 	.cleanup_fb = dm_plane_helper_cleanup_fb,
6047 	.atomic_check = dm_plane_atomic_check,
6048 	.atomic_async_check = dm_plane_atomic_async_check,
6049 	.atomic_async_update = dm_plane_atomic_async_update
6050 };
6051 
6052 /*
6053  * TODO: these are currently initialized to rgb formats only.
6054  * For future use cases we should either initialize them dynamically based on
6055  * plane capabilities, or initialize this array to all formats, so internal drm
6056  * check will succeed, and let DC implement proper check
6057  */
6058 static const uint32_t rgb_formats[] = {
6059 	DRM_FORMAT_XRGB8888,
6060 	DRM_FORMAT_ARGB8888,
6061 	DRM_FORMAT_RGBA8888,
6062 	DRM_FORMAT_XRGB2101010,
6063 	DRM_FORMAT_XBGR2101010,
6064 	DRM_FORMAT_ARGB2101010,
6065 	DRM_FORMAT_ABGR2101010,
6066 	DRM_FORMAT_XBGR8888,
6067 	DRM_FORMAT_ABGR8888,
6068 	DRM_FORMAT_RGB565,
6069 };
6070 
6071 static const uint32_t overlay_formats[] = {
6072 	DRM_FORMAT_XRGB8888,
6073 	DRM_FORMAT_ARGB8888,
6074 	DRM_FORMAT_RGBA8888,
6075 	DRM_FORMAT_XBGR8888,
6076 	DRM_FORMAT_ABGR8888,
6077 	DRM_FORMAT_RGB565
6078 };
6079 
6080 static const u32 cursor_formats[] = {
6081 	DRM_FORMAT_ARGB8888
6082 };
6083 
get_plane_formats(const struct drm_plane * plane,const struct dc_plane_cap * plane_cap,uint32_t * formats,int max_formats)6084 static int get_plane_formats(const struct drm_plane *plane,
6085 			     const struct dc_plane_cap *plane_cap,
6086 			     uint32_t *formats, int max_formats)
6087 {
6088 	int i, num_formats = 0;
6089 
6090 	/*
6091 	 * TODO: Query support for each group of formats directly from
6092 	 * DC plane caps. This will require adding more formats to the
6093 	 * caps list.
6094 	 */
6095 
6096 	switch (plane->type) {
6097 	case DRM_PLANE_TYPE_PRIMARY:
6098 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6099 			if (num_formats >= max_formats)
6100 				break;
6101 
6102 			formats[num_formats++] = rgb_formats[i];
6103 		}
6104 
6105 		if (plane_cap && plane_cap->pixel_format_support.nv12)
6106 			formats[num_formats++] = DRM_FORMAT_NV12;
6107 		if (plane_cap && plane_cap->pixel_format_support.p010)
6108 			formats[num_formats++] = DRM_FORMAT_P010;
6109 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
6110 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6111 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6112 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6113 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6114 		}
6115 		break;
6116 
6117 	case DRM_PLANE_TYPE_OVERLAY:
6118 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6119 			if (num_formats >= max_formats)
6120 				break;
6121 
6122 			formats[num_formats++] = overlay_formats[i];
6123 		}
6124 		break;
6125 
6126 	case DRM_PLANE_TYPE_CURSOR:
6127 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6128 			if (num_formats >= max_formats)
6129 				break;
6130 
6131 			formats[num_formats++] = cursor_formats[i];
6132 		}
6133 		break;
6134 	}
6135 
6136 	return num_formats;
6137 }
6138 
amdgpu_dm_plane_init(struct amdgpu_display_manager * dm,struct drm_plane * plane,unsigned long possible_crtcs,const struct dc_plane_cap * plane_cap)6139 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6140 				struct drm_plane *plane,
6141 				unsigned long possible_crtcs,
6142 				const struct dc_plane_cap *plane_cap)
6143 {
6144 	uint32_t formats[32];
6145 	int num_formats;
6146 	int res = -EPERM;
6147 	unsigned int supported_rotations;
6148 
6149 	num_formats = get_plane_formats(plane, plane_cap, formats,
6150 					ARRAY_SIZE(formats));
6151 
6152 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6153 				       &dm_plane_funcs, formats, num_formats,
6154 				       NULL, plane->type, NULL);
6155 	if (res)
6156 		return res;
6157 
6158 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6159 	    plane_cap && plane_cap->per_pixel_alpha) {
6160 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6161 					  BIT(DRM_MODE_BLEND_PREMULTI);
6162 
6163 		drm_plane_create_alpha_property(plane);
6164 		drm_plane_create_blend_mode_property(plane, blend_caps);
6165 	}
6166 
6167 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6168 	    plane_cap &&
6169 	    (plane_cap->pixel_format_support.nv12 ||
6170 	     plane_cap->pixel_format_support.p010)) {
6171 		/* This only affects YUV formats. */
6172 		drm_plane_create_color_properties(
6173 			plane,
6174 			BIT(DRM_COLOR_YCBCR_BT601) |
6175 			BIT(DRM_COLOR_YCBCR_BT709) |
6176 			BIT(DRM_COLOR_YCBCR_BT2020),
6177 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6178 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6179 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6180 	}
6181 
6182 	supported_rotations =
6183 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6184 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6185 
6186 	if (dm->adev->asic_type >= CHIP_BONAIRE)
6187 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6188 						   supported_rotations);
6189 
6190 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6191 
6192 	/* Create (reset) the plane state */
6193 	if (plane->funcs->reset)
6194 		plane->funcs->reset(plane);
6195 
6196 	return 0;
6197 }
6198 
amdgpu_dm_crtc_init(struct amdgpu_display_manager * dm,struct drm_plane * plane,uint32_t crtc_index)6199 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6200 			       struct drm_plane *plane,
6201 			       uint32_t crtc_index)
6202 {
6203 	struct amdgpu_crtc *acrtc = NULL;
6204 	struct drm_plane *cursor_plane;
6205 
6206 	int res = -ENOMEM;
6207 
6208 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6209 	if (!cursor_plane)
6210 		goto fail;
6211 
6212 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6213 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6214 
6215 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6216 	if (!acrtc)
6217 		goto fail;
6218 
6219 	res = drm_crtc_init_with_planes(
6220 			dm->ddev,
6221 			&acrtc->base,
6222 			plane,
6223 			cursor_plane,
6224 			&amdgpu_dm_crtc_funcs, NULL);
6225 
6226 	if (res)
6227 		goto fail;
6228 
6229 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6230 
6231 	/* Create (reset) the plane state */
6232 	if (acrtc->base.funcs->reset)
6233 		acrtc->base.funcs->reset(&acrtc->base);
6234 
6235 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6236 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6237 
6238 	acrtc->crtc_id = crtc_index;
6239 	acrtc->base.enabled = false;
6240 	acrtc->otg_inst = -1;
6241 
6242 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6243 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6244 				   true, MAX_COLOR_LUT_ENTRIES);
6245 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6246 
6247 	return 0;
6248 
6249 fail:
6250 	kfree(acrtc);
6251 	kfree(cursor_plane);
6252 	return res;
6253 }
6254 
6255 
to_drm_connector_type(enum signal_type st)6256 static int to_drm_connector_type(enum signal_type st)
6257 {
6258 	switch (st) {
6259 	case SIGNAL_TYPE_HDMI_TYPE_A:
6260 		return DRM_MODE_CONNECTOR_HDMIA;
6261 	case SIGNAL_TYPE_EDP:
6262 		return DRM_MODE_CONNECTOR_eDP;
6263 	case SIGNAL_TYPE_LVDS:
6264 		return DRM_MODE_CONNECTOR_LVDS;
6265 	case SIGNAL_TYPE_RGB:
6266 		return DRM_MODE_CONNECTOR_VGA;
6267 	case SIGNAL_TYPE_DISPLAY_PORT:
6268 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
6269 		return DRM_MODE_CONNECTOR_DisplayPort;
6270 	case SIGNAL_TYPE_DVI_DUAL_LINK:
6271 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
6272 		return DRM_MODE_CONNECTOR_DVID;
6273 	case SIGNAL_TYPE_VIRTUAL:
6274 		return DRM_MODE_CONNECTOR_VIRTUAL;
6275 
6276 	default:
6277 		return DRM_MODE_CONNECTOR_Unknown;
6278 	}
6279 }
6280 
amdgpu_dm_connector_to_encoder(struct drm_connector * connector)6281 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6282 {
6283 	struct drm_encoder *encoder;
6284 
6285 	/* There is only one encoder per connector */
6286 	drm_connector_for_each_possible_encoder(connector, encoder)
6287 		return encoder;
6288 
6289 	return NULL;
6290 }
6291 
amdgpu_dm_get_native_mode(struct drm_connector * connector)6292 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6293 {
6294 	struct drm_encoder *encoder;
6295 	struct amdgpu_encoder *amdgpu_encoder;
6296 
6297 	encoder = amdgpu_dm_connector_to_encoder(connector);
6298 
6299 	if (encoder == NULL)
6300 		return;
6301 
6302 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6303 
6304 	amdgpu_encoder->native_mode.clock = 0;
6305 
6306 	if (!list_empty(&connector->probed_modes)) {
6307 		struct drm_display_mode *preferred_mode = NULL;
6308 
6309 		list_for_each_entry(preferred_mode,
6310 				    &connector->probed_modes,
6311 				    head) {
6312 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6313 				amdgpu_encoder->native_mode = *preferred_mode;
6314 
6315 			break;
6316 		}
6317 
6318 	}
6319 }
6320 
6321 static struct drm_display_mode *
amdgpu_dm_create_common_mode(struct drm_encoder * encoder,char * name,int hdisplay,int vdisplay)6322 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6323 			     char *name,
6324 			     int hdisplay, int vdisplay)
6325 {
6326 	struct drm_device *dev = encoder->dev;
6327 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6328 	struct drm_display_mode *mode = NULL;
6329 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6330 
6331 	mode = drm_mode_duplicate(dev, native_mode);
6332 
6333 	if (mode == NULL)
6334 		return NULL;
6335 
6336 	mode->hdisplay = hdisplay;
6337 	mode->vdisplay = vdisplay;
6338 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6339 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6340 
6341 	return mode;
6342 
6343 }
6344 
amdgpu_dm_connector_add_common_modes(struct drm_encoder * encoder,struct drm_connector * connector)6345 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6346 						 struct drm_connector *connector)
6347 {
6348 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6349 	struct drm_display_mode *mode = NULL;
6350 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6351 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6352 				to_amdgpu_dm_connector(connector);
6353 	int i;
6354 	int n;
6355 	struct mode_size {
6356 		char name[DRM_DISPLAY_MODE_LEN];
6357 		int w;
6358 		int h;
6359 	} common_modes[] = {
6360 		{  "640x480",  640,  480},
6361 		{  "800x600",  800,  600},
6362 		{ "1024x768", 1024,  768},
6363 		{ "1280x720", 1280,  720},
6364 		{ "1280x800", 1280,  800},
6365 		{"1280x1024", 1280, 1024},
6366 		{ "1440x900", 1440,  900},
6367 		{"1680x1050", 1680, 1050},
6368 		{"1600x1200", 1600, 1200},
6369 		{"1920x1080", 1920, 1080},
6370 		{"1920x1200", 1920, 1200}
6371 	};
6372 
6373 	n = ARRAY_SIZE(common_modes);
6374 
6375 	for (i = 0; i < n; i++) {
6376 		struct drm_display_mode *curmode = NULL;
6377 		bool mode_existed = false;
6378 
6379 		if (common_modes[i].w > native_mode->hdisplay ||
6380 		    common_modes[i].h > native_mode->vdisplay ||
6381 		   (common_modes[i].w == native_mode->hdisplay &&
6382 		    common_modes[i].h == native_mode->vdisplay))
6383 			continue;
6384 
6385 		list_for_each_entry(curmode, &connector->probed_modes, head) {
6386 			if (common_modes[i].w == curmode->hdisplay &&
6387 			    common_modes[i].h == curmode->vdisplay) {
6388 				mode_existed = true;
6389 				break;
6390 			}
6391 		}
6392 
6393 		if (mode_existed)
6394 			continue;
6395 
6396 		mode = amdgpu_dm_create_common_mode(encoder,
6397 				common_modes[i].name, common_modes[i].w,
6398 				common_modes[i].h);
6399 		drm_mode_probed_add(connector, mode);
6400 		amdgpu_dm_connector->num_modes++;
6401 	}
6402 }
6403 
amdgpu_dm_connector_ddc_get_modes(struct drm_connector * connector,struct edid * edid)6404 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6405 					      struct edid *edid)
6406 {
6407 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6408 			to_amdgpu_dm_connector(connector);
6409 
6410 	if (edid) {
6411 		/* empty probed_modes */
6412 		INIT_LIST_HEAD(&connector->probed_modes);
6413 		amdgpu_dm_connector->num_modes =
6414 				drm_add_edid_modes(connector, edid);
6415 
6416 		/* sorting the probed modes before calling function
6417 		 * amdgpu_dm_get_native_mode() since EDID can have
6418 		 * more than one preferred mode. The modes that are
6419 		 * later in the probed mode list could be of higher
6420 		 * and preferred resolution. For example, 3840x2160
6421 		 * resolution in base EDID preferred timing and 4096x2160
6422 		 * preferred resolution in DID extension block later.
6423 		 */
6424 		drm_mode_sort(&connector->probed_modes);
6425 		amdgpu_dm_get_native_mode(connector);
6426 	} else {
6427 		amdgpu_dm_connector->num_modes = 0;
6428 	}
6429 }
6430 
amdgpu_dm_connector_get_modes(struct drm_connector * connector)6431 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6432 {
6433 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6434 			to_amdgpu_dm_connector(connector);
6435 	struct drm_encoder *encoder;
6436 	struct edid *edid = amdgpu_dm_connector->edid;
6437 
6438 	encoder = amdgpu_dm_connector_to_encoder(connector);
6439 
6440 	if (!edid || !drm_edid_is_valid(edid)) {
6441 		amdgpu_dm_connector->num_modes =
6442 				drm_add_modes_noedid(connector, 640, 480);
6443 	} else {
6444 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
6445 		amdgpu_dm_connector_add_common_modes(encoder, connector);
6446 	}
6447 	amdgpu_dm_fbc_init(connector);
6448 
6449 	return amdgpu_dm_connector->num_modes;
6450 }
6451 
amdgpu_dm_connector_init_helper(struct amdgpu_display_manager * dm,struct amdgpu_dm_connector * aconnector,int connector_type,struct dc_link * link,int link_index)6452 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6453 				     struct amdgpu_dm_connector *aconnector,
6454 				     int connector_type,
6455 				     struct dc_link *link,
6456 				     int link_index)
6457 {
6458 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6459 
6460 	/*
6461 	 * Some of the properties below require access to state, like bpc.
6462 	 * Allocate some default initial connector state with our reset helper.
6463 	 */
6464 	if (aconnector->base.funcs->reset)
6465 		aconnector->base.funcs->reset(&aconnector->base);
6466 
6467 	aconnector->connector_id = link_index;
6468 	aconnector->dc_link = link;
6469 	aconnector->base.interlace_allowed = false;
6470 	aconnector->base.doublescan_allowed = false;
6471 	aconnector->base.stereo_allowed = false;
6472 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6473 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6474 	aconnector->audio_inst = -1;
6475 	mutex_init(&aconnector->hpd_lock);
6476 
6477 	/*
6478 	 * configure support HPD hot plug connector_>polled default value is 0
6479 	 * which means HPD hot plug not supported
6480 	 */
6481 	switch (connector_type) {
6482 	case DRM_MODE_CONNECTOR_HDMIA:
6483 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6484 		aconnector->base.ycbcr_420_allowed =
6485 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6486 		break;
6487 	case DRM_MODE_CONNECTOR_DisplayPort:
6488 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6489 		aconnector->base.ycbcr_420_allowed =
6490 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
6491 		break;
6492 	case DRM_MODE_CONNECTOR_DVID:
6493 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6494 		break;
6495 	default:
6496 		break;
6497 	}
6498 
6499 	drm_object_attach_property(&aconnector->base.base,
6500 				dm->ddev->mode_config.scaling_mode_property,
6501 				DRM_MODE_SCALE_NONE);
6502 
6503 	drm_object_attach_property(&aconnector->base.base,
6504 				adev->mode_info.underscan_property,
6505 				UNDERSCAN_OFF);
6506 	drm_object_attach_property(&aconnector->base.base,
6507 				adev->mode_info.underscan_hborder_property,
6508 				0);
6509 	drm_object_attach_property(&aconnector->base.base,
6510 				adev->mode_info.underscan_vborder_property,
6511 				0);
6512 
6513 	if (!aconnector->mst_port)
6514 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6515 
6516 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
6517 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6518 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6519 
6520 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6521 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6522 		drm_object_attach_property(&aconnector->base.base,
6523 				adev->mode_info.abm_level_property, 0);
6524 	}
6525 
6526 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6527 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6528 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
6529 		drm_object_attach_property(
6530 			&aconnector->base.base,
6531 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
6532 
6533 		if (!aconnector->mst_port)
6534 			drm_connector_attach_vrr_capable_property(&aconnector->base);
6535 
6536 #ifdef CONFIG_DRM_AMD_DC_HDCP
6537 		if (adev->dm.hdcp_workqueue)
6538 			drm_connector_attach_content_protection_property(&aconnector->base, true);
6539 #endif
6540 	}
6541 }
6542 
amdgpu_dm_i2c_xfer(struct i2c_adapter * i2c_adap,struct i2c_msg * msgs,int num)6543 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6544 			      struct i2c_msg *msgs, int num)
6545 {
6546 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6547 	struct ddc_service *ddc_service = i2c->ddc_service;
6548 	struct i2c_command cmd;
6549 	int i;
6550 	int result = -EIO;
6551 
6552 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6553 
6554 	if (!cmd.payloads)
6555 		return result;
6556 
6557 	cmd.number_of_payloads = num;
6558 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6559 	cmd.speed = 100;
6560 
6561 	for (i = 0; i < num; i++) {
6562 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6563 		cmd.payloads[i].address = msgs[i].addr;
6564 		cmd.payloads[i].length = msgs[i].len;
6565 		cmd.payloads[i].data = msgs[i].buf;
6566 	}
6567 
6568 	if (dc_submit_i2c(
6569 			ddc_service->ctx->dc,
6570 			ddc_service->ddc_pin->hw_info.ddc_channel,
6571 			&cmd))
6572 		result = num;
6573 
6574 	kfree(cmd.payloads);
6575 	return result;
6576 }
6577 
amdgpu_dm_i2c_func(struct i2c_adapter * adap)6578 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6579 {
6580 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6581 }
6582 
6583 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6584 	.master_xfer = amdgpu_dm_i2c_xfer,
6585 	.functionality = amdgpu_dm_i2c_func,
6586 };
6587 
6588 static struct amdgpu_i2c_adapter *
create_i2c(struct ddc_service * ddc_service,int link_index,int * res)6589 create_i2c(struct ddc_service *ddc_service,
6590 	   int link_index,
6591 	   int *res)
6592 {
6593 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6594 	struct amdgpu_i2c_adapter *i2c;
6595 
6596 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6597 	if (!i2c)
6598 		return NULL;
6599 	i2c->base.owner = THIS_MODULE;
6600 	i2c->base.class = I2C_CLASS_DDC;
6601 	i2c->base.dev.parent = &adev->pdev->dev;
6602 	i2c->base.algo = &amdgpu_dm_i2c_algo;
6603 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6604 	i2c_set_adapdata(&i2c->base, i2c);
6605 	i2c->ddc_service = ddc_service;
6606 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6607 
6608 	return i2c;
6609 }
6610 
6611 
6612 /*
6613  * Note: this function assumes that dc_link_detect() was called for the
6614  * dc_link which will be represented by this aconnector.
6615  */
amdgpu_dm_connector_init(struct amdgpu_display_manager * dm,struct amdgpu_dm_connector * aconnector,uint32_t link_index,struct amdgpu_encoder * aencoder)6616 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6617 				    struct amdgpu_dm_connector *aconnector,
6618 				    uint32_t link_index,
6619 				    struct amdgpu_encoder *aencoder)
6620 {
6621 	int res = 0;
6622 	int connector_type;
6623 	struct dc *dc = dm->dc;
6624 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
6625 	struct amdgpu_i2c_adapter *i2c;
6626 
6627 	link->priv = aconnector;
6628 
6629 	DRM_DEBUG_DRIVER("%s()\n", __func__);
6630 
6631 	i2c = create_i2c(link->ddc, link->link_index, &res);
6632 	if (!i2c) {
6633 		DRM_ERROR("Failed to create i2c adapter data\n");
6634 		return -ENOMEM;
6635 	}
6636 
6637 	aconnector->i2c = i2c;
6638 	res = i2c_add_adapter(&i2c->base);
6639 
6640 	if (res) {
6641 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6642 		goto out_free;
6643 	}
6644 
6645 	connector_type = to_drm_connector_type(link->connector_signal);
6646 
6647 	res = drm_connector_init_with_ddc(
6648 			dm->ddev,
6649 			&aconnector->base,
6650 			&amdgpu_dm_connector_funcs,
6651 			connector_type,
6652 			&i2c->base);
6653 
6654 	if (res) {
6655 		DRM_ERROR("connector_init failed\n");
6656 		aconnector->connector_id = -1;
6657 		goto out_free;
6658 	}
6659 
6660 	drm_connector_helper_add(
6661 			&aconnector->base,
6662 			&amdgpu_dm_connector_helper_funcs);
6663 
6664 	amdgpu_dm_connector_init_helper(
6665 		dm,
6666 		aconnector,
6667 		connector_type,
6668 		link,
6669 		link_index);
6670 
6671 	drm_connector_attach_encoder(
6672 		&aconnector->base, &aencoder->base);
6673 
6674 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6675 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
6676 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6677 
6678 out_free:
6679 	if (res) {
6680 		kfree(i2c);
6681 		aconnector->i2c = NULL;
6682 	}
6683 	return res;
6684 }
6685 
amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device * adev)6686 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6687 {
6688 	switch (adev->mode_info.num_crtc) {
6689 	case 1:
6690 		return 0x1;
6691 	case 2:
6692 		return 0x3;
6693 	case 3:
6694 		return 0x7;
6695 	case 4:
6696 		return 0xf;
6697 	case 5:
6698 		return 0x1f;
6699 	case 6:
6700 	default:
6701 		return 0x3f;
6702 	}
6703 }
6704 
amdgpu_dm_encoder_init(struct drm_device * dev,struct amdgpu_encoder * aencoder,uint32_t link_index)6705 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6706 				  struct amdgpu_encoder *aencoder,
6707 				  uint32_t link_index)
6708 {
6709 	struct amdgpu_device *adev = drm_to_adev(dev);
6710 
6711 	int res = drm_encoder_init(dev,
6712 				   &aencoder->base,
6713 				   &amdgpu_dm_encoder_funcs,
6714 				   DRM_MODE_ENCODER_TMDS,
6715 				   NULL);
6716 
6717 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6718 
6719 	if (!res)
6720 		aencoder->encoder_id = link_index;
6721 	else
6722 		aencoder->encoder_id = -1;
6723 
6724 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6725 
6726 	return res;
6727 }
6728 
manage_dm_interrupts(struct amdgpu_device * adev,struct amdgpu_crtc * acrtc,bool enable)6729 static void manage_dm_interrupts(struct amdgpu_device *adev,
6730 				 struct amdgpu_crtc *acrtc,
6731 				 bool enable)
6732 {
6733 	/*
6734 	 * We have no guarantee that the frontend index maps to the same
6735 	 * backend index - some even map to more than one.
6736 	 *
6737 	 * TODO: Use a different interrupt or check DC itself for the mapping.
6738 	 */
6739 	int irq_type =
6740 		amdgpu_display_crtc_idx_to_irq_type(
6741 			adev,
6742 			acrtc->crtc_id);
6743 
6744 	if (enable) {
6745 		drm_crtc_vblank_on(&acrtc->base);
6746 		amdgpu_irq_get(
6747 			adev,
6748 			&adev->pageflip_irq,
6749 			irq_type);
6750 	} else {
6751 
6752 		amdgpu_irq_put(
6753 			adev,
6754 			&adev->pageflip_irq,
6755 			irq_type);
6756 		drm_crtc_vblank_off(&acrtc->base);
6757 	}
6758 }
6759 
dm_update_pflip_irq_state(struct amdgpu_device * adev,struct amdgpu_crtc * acrtc)6760 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6761 				      struct amdgpu_crtc *acrtc)
6762 {
6763 	int irq_type =
6764 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6765 
6766 	/**
6767 	 * This reads the current state for the IRQ and force reapplies
6768 	 * the setting to hardware.
6769 	 */
6770 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6771 }
6772 
6773 static bool
is_scaling_state_different(const struct dm_connector_state * dm_state,const struct dm_connector_state * old_dm_state)6774 is_scaling_state_different(const struct dm_connector_state *dm_state,
6775 			   const struct dm_connector_state *old_dm_state)
6776 {
6777 	if (dm_state->scaling != old_dm_state->scaling)
6778 		return true;
6779 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6780 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6781 			return true;
6782 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6783 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6784 			return true;
6785 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6786 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6787 		return true;
6788 	return false;
6789 }
6790 
6791 #ifdef CONFIG_DRM_AMD_DC_HDCP
is_content_protection_different(struct drm_connector_state * state,const struct drm_connector_state * old_state,const struct drm_connector * connector,struct hdcp_workqueue * hdcp_w)6792 static bool is_content_protection_different(struct drm_connector_state *state,
6793 					    const struct drm_connector_state *old_state,
6794 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6795 {
6796 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6797 
6798 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
6799 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6800 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6801 		return true;
6802 	}
6803 
6804 	/* CP is being re enabled, ignore this */
6805 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6806 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6807 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6808 		return false;
6809 	}
6810 
6811 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6812 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6813 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6814 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6815 
6816 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6817 	 * hot-plug, headless s3, dpms
6818 	 */
6819 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6820 	    aconnector->dc_sink != NULL)
6821 		return true;
6822 
6823 	if (old_state->content_protection == state->content_protection)
6824 		return false;
6825 
6826 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6827 		return true;
6828 
6829 	return false;
6830 }
6831 
6832 #endif
remove_stream(struct amdgpu_device * adev,struct amdgpu_crtc * acrtc,struct dc_stream_state * stream)6833 static void remove_stream(struct amdgpu_device *adev,
6834 			  struct amdgpu_crtc *acrtc,
6835 			  struct dc_stream_state *stream)
6836 {
6837 	/* this is the update mode case */
6838 
6839 	acrtc->otg_inst = -1;
6840 	acrtc->enabled = false;
6841 }
6842 
get_cursor_position(struct drm_plane * plane,struct drm_crtc * crtc,struct dc_cursor_position * position)6843 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6844 			       struct dc_cursor_position *position)
6845 {
6846 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6847 	int x, y;
6848 	int xorigin = 0, yorigin = 0;
6849 
6850 	if (!crtc || !plane->state->fb)
6851 		return 0;
6852 
6853 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6854 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6855 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6856 			  __func__,
6857 			  plane->state->crtc_w,
6858 			  plane->state->crtc_h);
6859 		return -EINVAL;
6860 	}
6861 
6862 	x = plane->state->crtc_x;
6863 	y = plane->state->crtc_y;
6864 
6865 	if (x <= -amdgpu_crtc->max_cursor_width ||
6866 	    y <= -amdgpu_crtc->max_cursor_height)
6867 		return 0;
6868 
6869 	if (x < 0) {
6870 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6871 		x = 0;
6872 	}
6873 	if (y < 0) {
6874 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6875 		y = 0;
6876 	}
6877 	position->enable = true;
6878 	position->translate_by_source = true;
6879 	position->x = x;
6880 	position->y = y;
6881 	position->x_hotspot = xorigin;
6882 	position->y_hotspot = yorigin;
6883 
6884 	return 0;
6885 }
6886 
handle_cursor_update(struct drm_plane * plane,struct drm_plane_state * old_plane_state)6887 static void handle_cursor_update(struct drm_plane *plane,
6888 				 struct drm_plane_state *old_plane_state)
6889 {
6890 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
6891 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6892 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6893 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6894 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6895 	uint64_t address = afb ? afb->address : 0;
6896 	struct dc_cursor_position position = {0};
6897 	struct dc_cursor_attributes attributes;
6898 	int ret;
6899 
6900 	if (!plane->state->fb && !old_plane_state->fb)
6901 		return;
6902 
6903 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6904 			 __func__,
6905 			 amdgpu_crtc->crtc_id,
6906 			 plane->state->crtc_w,
6907 			 plane->state->crtc_h);
6908 
6909 	ret = get_cursor_position(plane, crtc, &position);
6910 	if (ret)
6911 		return;
6912 
6913 	if (!position.enable) {
6914 		/* turn off cursor */
6915 		if (crtc_state && crtc_state->stream) {
6916 			mutex_lock(&adev->dm.dc_lock);
6917 			dc_stream_set_cursor_position(crtc_state->stream,
6918 						      &position);
6919 			mutex_unlock(&adev->dm.dc_lock);
6920 		}
6921 		return;
6922 	}
6923 
6924 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
6925 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
6926 
6927 	memset(&attributes, 0, sizeof(attributes));
6928 	attributes.address.high_part = upper_32_bits(address);
6929 	attributes.address.low_part  = lower_32_bits(address);
6930 	attributes.width             = plane->state->crtc_w;
6931 	attributes.height            = plane->state->crtc_h;
6932 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6933 	attributes.rotation_angle    = 0;
6934 	attributes.attribute_flags.value = 0;
6935 
6936 	attributes.pitch = attributes.width;
6937 
6938 	if (crtc_state->stream) {
6939 		mutex_lock(&adev->dm.dc_lock);
6940 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6941 							 &attributes))
6942 			DRM_ERROR("DC failed to set cursor attributes\n");
6943 
6944 		if (!dc_stream_set_cursor_position(crtc_state->stream,
6945 						   &position))
6946 			DRM_ERROR("DC failed to set cursor position\n");
6947 		mutex_unlock(&adev->dm.dc_lock);
6948 	}
6949 }
6950 
prepare_flip_isr(struct amdgpu_crtc * acrtc)6951 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6952 {
6953 
6954 	assert_spin_locked(&acrtc->base.dev->event_lock);
6955 	WARN_ON(acrtc->event);
6956 
6957 	acrtc->event = acrtc->base.state->event;
6958 
6959 	/* Set the flip status */
6960 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6961 
6962 	/* Mark this event as consumed */
6963 	acrtc->base.state->event = NULL;
6964 
6965 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6966 						 acrtc->crtc_id);
6967 }
6968 
update_freesync_state_on_stream(struct amdgpu_display_manager * dm,struct dm_crtc_state * new_crtc_state,struct dc_stream_state * new_stream,struct dc_plane_state * surface,u32 flip_timestamp_in_us)6969 static void update_freesync_state_on_stream(
6970 	struct amdgpu_display_manager *dm,
6971 	struct dm_crtc_state *new_crtc_state,
6972 	struct dc_stream_state *new_stream,
6973 	struct dc_plane_state *surface,
6974 	u32 flip_timestamp_in_us)
6975 {
6976 	struct mod_vrr_params vrr_params;
6977 	struct dc_info_packet vrr_infopacket = {0};
6978 	struct amdgpu_device *adev = dm->adev;
6979 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
6980 	unsigned long flags;
6981 
6982 	if (!new_stream)
6983 		return;
6984 
6985 	/*
6986 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6987 	 * For now it's sufficient to just guard against these conditions.
6988 	 */
6989 
6990 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6991 		return;
6992 
6993 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
6994         vrr_params = acrtc->dm_irq_params.vrr_params;
6995 
6996 	if (surface) {
6997 		mod_freesync_handle_preflip(
6998 			dm->freesync_module,
6999 			surface,
7000 			new_stream,
7001 			flip_timestamp_in_us,
7002 			&vrr_params);
7003 
7004 		if (adev->family < AMDGPU_FAMILY_AI &&
7005 		    amdgpu_dm_vrr_active(new_crtc_state)) {
7006 			mod_freesync_handle_v_update(dm->freesync_module,
7007 						     new_stream, &vrr_params);
7008 
7009 			/* Need to call this before the frame ends. */
7010 			dc_stream_adjust_vmin_vmax(dm->dc,
7011 						   new_crtc_state->stream,
7012 						   &vrr_params.adjust);
7013 		}
7014 	}
7015 
7016 	mod_freesync_build_vrr_infopacket(
7017 		dm->freesync_module,
7018 		new_stream,
7019 		&vrr_params,
7020 		PACKET_TYPE_VRR,
7021 		TRANSFER_FUNC_UNKNOWN,
7022 		&vrr_infopacket);
7023 
7024 	new_crtc_state->freesync_timing_changed |=
7025 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7026 			&vrr_params.adjust,
7027 			sizeof(vrr_params.adjust)) != 0);
7028 
7029 	new_crtc_state->freesync_vrr_info_changed |=
7030 		(memcmp(&new_crtc_state->vrr_infopacket,
7031 			&vrr_infopacket,
7032 			sizeof(vrr_infopacket)) != 0);
7033 
7034 	acrtc->dm_irq_params.vrr_params = vrr_params;
7035 	new_crtc_state->vrr_infopacket = vrr_infopacket;
7036 
7037 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7038 	new_stream->vrr_infopacket = vrr_infopacket;
7039 
7040 	if (new_crtc_state->freesync_vrr_info_changed)
7041 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7042 			      new_crtc_state->base.crtc->base.id,
7043 			      (int)new_crtc_state->base.vrr_enabled,
7044 			      (int)vrr_params.state);
7045 
7046 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7047 }
7048 
update_stream_irq_parameters(struct amdgpu_display_manager * dm,struct dm_crtc_state * new_crtc_state)7049 static void update_stream_irq_parameters(
7050 	struct amdgpu_display_manager *dm,
7051 	struct dm_crtc_state *new_crtc_state)
7052 {
7053 	struct dc_stream_state *new_stream = new_crtc_state->stream;
7054 	struct mod_vrr_params vrr_params;
7055 	struct mod_freesync_config config = new_crtc_state->freesync_config;
7056 	struct amdgpu_device *adev = dm->adev;
7057 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7058 	unsigned long flags;
7059 
7060 	if (!new_stream)
7061 		return;
7062 
7063 	/*
7064 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7065 	 * For now it's sufficient to just guard against these conditions.
7066 	 */
7067 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7068 		return;
7069 
7070 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7071 	vrr_params = acrtc->dm_irq_params.vrr_params;
7072 
7073 	if (new_crtc_state->vrr_supported &&
7074 	    config.min_refresh_in_uhz &&
7075 	    config.max_refresh_in_uhz) {
7076 		config.state = new_crtc_state->base.vrr_enabled ?
7077 			VRR_STATE_ACTIVE_VARIABLE :
7078 			VRR_STATE_INACTIVE;
7079 	} else {
7080 		config.state = VRR_STATE_UNSUPPORTED;
7081 	}
7082 
7083 	mod_freesync_build_vrr_params(dm->freesync_module,
7084 				      new_stream,
7085 				      &config, &vrr_params);
7086 
7087 	new_crtc_state->freesync_timing_changed |=
7088 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7089 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7090 
7091 	new_crtc_state->freesync_config = config;
7092 	/* Copy state for access from DM IRQ handler */
7093 	acrtc->dm_irq_params.freesync_config = config;
7094 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7095 	acrtc->dm_irq_params.vrr_params = vrr_params;
7096 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7097 }
7098 
amdgpu_dm_handle_vrr_transition(struct dm_crtc_state * old_state,struct dm_crtc_state * new_state)7099 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7100 					    struct dm_crtc_state *new_state)
7101 {
7102 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7103 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7104 
7105 	if (!old_vrr_active && new_vrr_active) {
7106 		/* Transition VRR inactive -> active:
7107 		 * While VRR is active, we must not disable vblank irq, as a
7108 		 * reenable after disable would compute bogus vblank/pflip
7109 		 * timestamps if it likely happened inside display front-porch.
7110 		 *
7111 		 * We also need vupdate irq for the actual core vblank handling
7112 		 * at end of vblank.
7113 		 */
7114 		dm_set_vupdate_irq(new_state->base.crtc, true);
7115 		drm_crtc_vblank_get(new_state->base.crtc);
7116 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7117 				 __func__, new_state->base.crtc->base.id);
7118 	} else if (old_vrr_active && !new_vrr_active) {
7119 		/* Transition VRR active -> inactive:
7120 		 * Allow vblank irq disable again for fixed refresh rate.
7121 		 */
7122 		dm_set_vupdate_irq(new_state->base.crtc, false);
7123 		drm_crtc_vblank_put(new_state->base.crtc);
7124 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7125 				 __func__, new_state->base.crtc->base.id);
7126 	}
7127 }
7128 
amdgpu_dm_commit_cursors(struct drm_atomic_state * state)7129 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7130 {
7131 	struct drm_plane *plane;
7132 	struct drm_plane_state *old_plane_state, *new_plane_state;
7133 	int i;
7134 
7135 	/*
7136 	 * TODO: Make this per-stream so we don't issue redundant updates for
7137 	 * commits with multiple streams.
7138 	 */
7139 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7140 				       new_plane_state, i)
7141 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7142 			handle_cursor_update(plane, old_plane_state);
7143 }
7144 
amdgpu_dm_commit_planes(struct drm_atomic_state * state,struct dc_state * dc_state,struct drm_device * dev,struct amdgpu_display_manager * dm,struct drm_crtc * pcrtc,bool wait_for_vblank)7145 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7146 				    struct dc_state *dc_state,
7147 				    struct drm_device *dev,
7148 				    struct amdgpu_display_manager *dm,
7149 				    struct drm_crtc *pcrtc,
7150 				    bool wait_for_vblank)
7151 {
7152 	uint32_t i;
7153 	uint64_t timestamp_ns;
7154 	struct drm_plane *plane;
7155 	struct drm_plane_state *old_plane_state, *new_plane_state;
7156 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7157 	struct drm_crtc_state *new_pcrtc_state =
7158 			drm_atomic_get_new_crtc_state(state, pcrtc);
7159 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7160 	struct dm_crtc_state *dm_old_crtc_state =
7161 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7162 	int planes_count = 0, vpos, hpos;
7163 	long r;
7164 	unsigned long flags;
7165 	struct amdgpu_bo *abo;
7166 	uint32_t target_vblank, last_flip_vblank;
7167 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7168 	bool pflip_present = false;
7169 	struct {
7170 		struct dc_surface_update surface_updates[MAX_SURFACES];
7171 		struct dc_plane_info plane_infos[MAX_SURFACES];
7172 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
7173 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7174 		struct dc_stream_update stream_update;
7175 	} *bundle;
7176 
7177 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7178 
7179 	if (!bundle) {
7180 		dm_error("Failed to allocate update bundle\n");
7181 		goto cleanup;
7182 	}
7183 
7184 	/*
7185 	 * Disable the cursor first if we're disabling all the planes.
7186 	 * It'll remain on the screen after the planes are re-enabled
7187 	 * if we don't.
7188 	 */
7189 	if (acrtc_state->active_planes == 0)
7190 		amdgpu_dm_commit_cursors(state);
7191 
7192 	/* update planes when needed */
7193 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7194 		struct drm_crtc *crtc = new_plane_state->crtc;
7195 		struct drm_crtc_state *new_crtc_state;
7196 		struct drm_framebuffer *fb = new_plane_state->fb;
7197 		bool plane_needs_flip;
7198 		struct dc_plane_state *dc_plane;
7199 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7200 
7201 		/* Cursor plane is handled after stream updates */
7202 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7203 			continue;
7204 
7205 		if (!fb || !crtc || pcrtc != crtc)
7206 			continue;
7207 
7208 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7209 		if (!new_crtc_state->active)
7210 			continue;
7211 
7212 		dc_plane = dm_new_plane_state->dc_state;
7213 
7214 		bundle->surface_updates[planes_count].surface = dc_plane;
7215 		if (new_pcrtc_state->color_mgmt_changed) {
7216 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7217 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7218 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7219 		}
7220 
7221 		fill_dc_scaling_info(new_plane_state,
7222 				     &bundle->scaling_infos[planes_count]);
7223 
7224 		bundle->surface_updates[planes_count].scaling_info =
7225 			&bundle->scaling_infos[planes_count];
7226 
7227 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7228 
7229 		pflip_present = pflip_present || plane_needs_flip;
7230 
7231 		if (!plane_needs_flip) {
7232 			planes_count += 1;
7233 			continue;
7234 		}
7235 
7236 		abo = gem_to_amdgpu_bo(fb->obj[0]);
7237 
7238 		/*
7239 		 * Wait for all fences on this FB. Do limited wait to avoid
7240 		 * deadlock during GPU reset when this fence will not signal
7241 		 * but we hold reservation lock for the BO.
7242 		 */
7243 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7244 							false,
7245 							msecs_to_jiffies(5000));
7246 		if (unlikely(r <= 0))
7247 			DRM_ERROR("Waiting for fences timed out!");
7248 
7249 		fill_dc_plane_info_and_addr(
7250 			dm->adev, new_plane_state,
7251 			dm_new_plane_state->tiling_flags,
7252 			&bundle->plane_infos[planes_count],
7253 			&bundle->flip_addrs[planes_count].address,
7254 			dm_new_plane_state->tmz_surface, false);
7255 
7256 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7257 				 new_plane_state->plane->index,
7258 				 bundle->plane_infos[planes_count].dcc.enable);
7259 
7260 		bundle->surface_updates[planes_count].plane_info =
7261 			&bundle->plane_infos[planes_count];
7262 
7263 		/*
7264 		 * Only allow immediate flips for fast updates that don't
7265 		 * change FB pitch, DCC state, rotation or mirroing.
7266 		 */
7267 		bundle->flip_addrs[planes_count].flip_immediate =
7268 			crtc->state->async_flip &&
7269 			acrtc_state->update_type == UPDATE_TYPE_FAST;
7270 
7271 		timestamp_ns = ktime_get_ns();
7272 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7273 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7274 		bundle->surface_updates[planes_count].surface = dc_plane;
7275 
7276 		if (!bundle->surface_updates[planes_count].surface) {
7277 			DRM_ERROR("No surface for CRTC: id=%d\n",
7278 					acrtc_attach->crtc_id);
7279 			continue;
7280 		}
7281 
7282 		if (plane == pcrtc->primary)
7283 			update_freesync_state_on_stream(
7284 				dm,
7285 				acrtc_state,
7286 				acrtc_state->stream,
7287 				dc_plane,
7288 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7289 
7290 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7291 				 __func__,
7292 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7293 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7294 
7295 		planes_count += 1;
7296 
7297 	}
7298 
7299 	if (pflip_present) {
7300 		if (!vrr_active) {
7301 			/* Use old throttling in non-vrr fixed refresh rate mode
7302 			 * to keep flip scheduling based on target vblank counts
7303 			 * working in a backwards compatible way, e.g., for
7304 			 * clients using the GLX_OML_sync_control extension or
7305 			 * DRI3/Present extension with defined target_msc.
7306 			 */
7307 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7308 		}
7309 		else {
7310 			/* For variable refresh rate mode only:
7311 			 * Get vblank of last completed flip to avoid > 1 vrr
7312 			 * flips per video frame by use of throttling, but allow
7313 			 * flip programming anywhere in the possibly large
7314 			 * variable vrr vblank interval for fine-grained flip
7315 			 * timing control and more opportunity to avoid stutter
7316 			 * on late submission of flips.
7317 			 */
7318 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7319 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7320 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7321 		}
7322 
7323 		target_vblank = last_flip_vblank + wait_for_vblank;
7324 
7325 		/*
7326 		 * Wait until we're out of the vertical blank period before the one
7327 		 * targeted by the flip
7328 		 */
7329 		while ((acrtc_attach->enabled &&
7330 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7331 							    0, &vpos, &hpos, NULL,
7332 							    NULL, &pcrtc->hwmode)
7333 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7334 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7335 			(int)(target_vblank -
7336 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7337 			usleep_range(1000, 1100);
7338 		}
7339 
7340 		/**
7341 		 * Prepare the flip event for the pageflip interrupt to handle.
7342 		 *
7343 		 * This only works in the case where we've already turned on the
7344 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
7345 		 * from 0 -> n planes we have to skip a hardware generated event
7346 		 * and rely on sending it from software.
7347 		 */
7348 		if (acrtc_attach->base.state->event &&
7349 		    acrtc_state->active_planes > 0) {
7350 			drm_crtc_vblank_get(pcrtc);
7351 
7352 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7353 
7354 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7355 			prepare_flip_isr(acrtc_attach);
7356 
7357 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7358 		}
7359 
7360 		if (acrtc_state->stream) {
7361 			if (acrtc_state->freesync_vrr_info_changed)
7362 				bundle->stream_update.vrr_infopacket =
7363 					&acrtc_state->stream->vrr_infopacket;
7364 		}
7365 	}
7366 
7367 	/* Update the planes if changed or disable if we don't have any. */
7368 	if ((planes_count || acrtc_state->active_planes == 0) &&
7369 		acrtc_state->stream) {
7370 		bundle->stream_update.stream = acrtc_state->stream;
7371 		if (new_pcrtc_state->mode_changed) {
7372 			bundle->stream_update.src = acrtc_state->stream->src;
7373 			bundle->stream_update.dst = acrtc_state->stream->dst;
7374 		}
7375 
7376 		if (new_pcrtc_state->color_mgmt_changed) {
7377 			/*
7378 			 * TODO: This isn't fully correct since we've actually
7379 			 * already modified the stream in place.
7380 			 */
7381 			bundle->stream_update.gamut_remap =
7382 				&acrtc_state->stream->gamut_remap_matrix;
7383 			bundle->stream_update.output_csc_transform =
7384 				&acrtc_state->stream->csc_color_matrix;
7385 			bundle->stream_update.out_transfer_func =
7386 				acrtc_state->stream->out_transfer_func;
7387 		}
7388 
7389 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
7390 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7391 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
7392 
7393 		/*
7394 		 * If FreeSync state on the stream has changed then we need to
7395 		 * re-adjust the min/max bounds now that DC doesn't handle this
7396 		 * as part of commit.
7397 		 */
7398 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7399 		    amdgpu_dm_vrr_active(acrtc_state)) {
7400 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7401 			dc_stream_adjust_vmin_vmax(
7402 				dm->dc, acrtc_state->stream,
7403 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
7404 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7405 		}
7406 		mutex_lock(&dm->dc_lock);
7407 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7408 				acrtc_state->stream->link->psr_settings.psr_allow_active)
7409 			amdgpu_dm_psr_disable(acrtc_state->stream);
7410 
7411 		dc_commit_updates_for_stream(dm->dc,
7412 						     bundle->surface_updates,
7413 						     planes_count,
7414 						     acrtc_state->stream,
7415 						     &bundle->stream_update,
7416 						     dc_state);
7417 
7418 		/**
7419 		 * Enable or disable the interrupts on the backend.
7420 		 *
7421 		 * Most pipes are put into power gating when unused.
7422 		 *
7423 		 * When power gating is enabled on a pipe we lose the
7424 		 * interrupt enablement state when power gating is disabled.
7425 		 *
7426 		 * So we need to update the IRQ control state in hardware
7427 		 * whenever the pipe turns on (since it could be previously
7428 		 * power gated) or off (since some pipes can't be power gated
7429 		 * on some ASICs).
7430 		 */
7431 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7432 			dm_update_pflip_irq_state(drm_to_adev(dev),
7433 						  acrtc_attach);
7434 
7435 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7436 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7437 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7438 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
7439 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7440 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7441 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
7442 			amdgpu_dm_psr_enable(acrtc_state->stream);
7443 		}
7444 
7445 		mutex_unlock(&dm->dc_lock);
7446 	}
7447 
7448 	/*
7449 	 * Update cursor state *after* programming all the planes.
7450 	 * This avoids redundant programming in the case where we're going
7451 	 * to be disabling a single plane - those pipes are being disabled.
7452 	 */
7453 	if (acrtc_state->active_planes)
7454 		amdgpu_dm_commit_cursors(state);
7455 
7456 cleanup:
7457 	kfree(bundle);
7458 }
7459 
amdgpu_dm_commit_audio(struct drm_device * dev,struct drm_atomic_state * state)7460 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7461 				   struct drm_atomic_state *state)
7462 {
7463 	struct amdgpu_device *adev = drm_to_adev(dev);
7464 	struct amdgpu_dm_connector *aconnector;
7465 	struct drm_connector *connector;
7466 	struct drm_connector_state *old_con_state, *new_con_state;
7467 	struct drm_crtc_state *new_crtc_state;
7468 	struct dm_crtc_state *new_dm_crtc_state;
7469 	const struct dc_stream_status *status;
7470 	int i, inst;
7471 
7472 	/* Notify device removals. */
7473 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7474 		if (old_con_state->crtc != new_con_state->crtc) {
7475 			/* CRTC changes require notification. */
7476 			goto notify;
7477 		}
7478 
7479 		if (!new_con_state->crtc)
7480 			continue;
7481 
7482 		new_crtc_state = drm_atomic_get_new_crtc_state(
7483 			state, new_con_state->crtc);
7484 
7485 		if (!new_crtc_state)
7486 			continue;
7487 
7488 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7489 			continue;
7490 
7491 	notify:
7492 		aconnector = to_amdgpu_dm_connector(connector);
7493 
7494 		mutex_lock(&adev->dm.audio_lock);
7495 		inst = aconnector->audio_inst;
7496 		aconnector->audio_inst = -1;
7497 		mutex_unlock(&adev->dm.audio_lock);
7498 
7499 		amdgpu_dm_audio_eld_notify(adev, inst);
7500 	}
7501 
7502 	/* Notify audio device additions. */
7503 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7504 		if (!new_con_state->crtc)
7505 			continue;
7506 
7507 		new_crtc_state = drm_atomic_get_new_crtc_state(
7508 			state, new_con_state->crtc);
7509 
7510 		if (!new_crtc_state)
7511 			continue;
7512 
7513 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7514 			continue;
7515 
7516 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7517 		if (!new_dm_crtc_state->stream)
7518 			continue;
7519 
7520 		status = dc_stream_get_status(new_dm_crtc_state->stream);
7521 		if (!status)
7522 			continue;
7523 
7524 		aconnector = to_amdgpu_dm_connector(connector);
7525 
7526 		mutex_lock(&adev->dm.audio_lock);
7527 		inst = status->audio_inst;
7528 		aconnector->audio_inst = inst;
7529 		mutex_unlock(&adev->dm.audio_lock);
7530 
7531 		amdgpu_dm_audio_eld_notify(adev, inst);
7532 	}
7533 }
7534 
7535 /*
7536  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7537  * @crtc_state: the DRM CRTC state
7538  * @stream_state: the DC stream state.
7539  *
7540  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7541  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7542  */
amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state * crtc_state,struct dc_stream_state * stream_state)7543 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7544 						struct dc_stream_state *stream_state)
7545 {
7546 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7547 }
7548 
amdgpu_dm_atomic_commit(struct drm_device * dev,struct drm_atomic_state * state,bool nonblock)7549 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7550 				   struct drm_atomic_state *state,
7551 				   bool nonblock)
7552 {
7553 	/*
7554 	 * Add check here for SoC's that support hardware cursor plane, to
7555 	 * unset legacy_cursor_update
7556 	 */
7557 
7558 	return drm_atomic_helper_commit(dev, state, nonblock);
7559 
7560 	/*TODO Handle EINTR, reenable IRQ*/
7561 }
7562 
7563 /**
7564  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7565  * @state: The atomic state to commit
7566  *
7567  * This will tell DC to commit the constructed DC state from atomic_check,
7568  * programming the hardware. Any failures here implies a hardware failure, since
7569  * atomic check should have filtered anything non-kosher.
7570  */
amdgpu_dm_atomic_commit_tail(struct drm_atomic_state * state)7571 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7572 {
7573 	struct drm_device *dev = state->dev;
7574 	struct amdgpu_device *adev = drm_to_adev(dev);
7575 	struct amdgpu_display_manager *dm = &adev->dm;
7576 	struct dm_atomic_state *dm_state;
7577 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7578 	uint32_t i, j;
7579 	struct drm_crtc *crtc;
7580 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7581 	unsigned long flags;
7582 	bool wait_for_vblank = true;
7583 	struct drm_connector *connector;
7584 	struct drm_connector_state *old_con_state, *new_con_state;
7585 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7586 	int crtc_disable_count = 0;
7587 	bool mode_set_reset_required = false;
7588 
7589 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
7590 
7591 	dm_state = dm_atomic_get_new_state(state);
7592 	if (dm_state && dm_state->context) {
7593 		dc_state = dm_state->context;
7594 	} else {
7595 		/* No state changes, retain current state. */
7596 		dc_state_temp = dc_create_state(dm->dc);
7597 		ASSERT(dc_state_temp);
7598 		dc_state = dc_state_temp;
7599 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
7600 	}
7601 
7602 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
7603 				       new_crtc_state, i) {
7604 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7605 
7606 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7607 
7608 		if (old_crtc_state->active &&
7609 		    (!new_crtc_state->active ||
7610 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7611 			manage_dm_interrupts(adev, acrtc, false);
7612 			dc_stream_release(dm_old_crtc_state->stream);
7613 		}
7614 	}
7615 
7616 	drm_atomic_helper_calc_timestamping_constants(state);
7617 
7618 	/* update changed items */
7619 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7620 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7621 
7622 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7623 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7624 
7625 		DRM_DEBUG_DRIVER(
7626 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7627 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7628 			"connectors_changed:%d\n",
7629 			acrtc->crtc_id,
7630 			new_crtc_state->enable,
7631 			new_crtc_state->active,
7632 			new_crtc_state->planes_changed,
7633 			new_crtc_state->mode_changed,
7634 			new_crtc_state->active_changed,
7635 			new_crtc_state->connectors_changed);
7636 
7637 		/* Copy all transient state flags into dc state */
7638 		if (dm_new_crtc_state->stream) {
7639 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7640 							    dm_new_crtc_state->stream);
7641 		}
7642 
7643 		/* handles headless hotplug case, updating new_state and
7644 		 * aconnector as needed
7645 		 */
7646 
7647 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7648 
7649 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7650 
7651 			if (!dm_new_crtc_state->stream) {
7652 				/*
7653 				 * this could happen because of issues with
7654 				 * userspace notifications delivery.
7655 				 * In this case userspace tries to set mode on
7656 				 * display which is disconnected in fact.
7657 				 * dc_sink is NULL in this case on aconnector.
7658 				 * We expect reset mode will come soon.
7659 				 *
7660 				 * This can also happen when unplug is done
7661 				 * during resume sequence ended
7662 				 *
7663 				 * In this case, we want to pretend we still
7664 				 * have a sink to keep the pipe running so that
7665 				 * hw state is consistent with the sw state
7666 				 */
7667 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7668 						__func__, acrtc->base.base.id);
7669 				continue;
7670 			}
7671 
7672 			if (dm_old_crtc_state->stream)
7673 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7674 
7675 			pm_runtime_get_noresume(dev->dev);
7676 
7677 			acrtc->enabled = true;
7678 			acrtc->hw_mode = new_crtc_state->mode;
7679 			crtc->hwmode = new_crtc_state->mode;
7680 			mode_set_reset_required = true;
7681 		} else if (modereset_required(new_crtc_state)) {
7682 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7683 			/* i.e. reset mode */
7684 			if (dm_old_crtc_state->stream)
7685 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7686 			mode_set_reset_required = true;
7687 		}
7688 	} /* for_each_crtc_in_state() */
7689 
7690 	if (dc_state) {
7691 		/* if there mode set or reset, disable eDP PSR */
7692 		if (mode_set_reset_required)
7693 			amdgpu_dm_psr_disable_all(dm);
7694 
7695 		dm_enable_per_frame_crtc_master_sync(dc_state);
7696 		mutex_lock(&dm->dc_lock);
7697 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
7698 		mutex_unlock(&dm->dc_lock);
7699 	}
7700 
7701 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7702 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7703 
7704 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7705 
7706 		if (dm_new_crtc_state->stream != NULL) {
7707 			const struct dc_stream_status *status =
7708 					dc_stream_get_status(dm_new_crtc_state->stream);
7709 
7710 			if (!status)
7711 				status = dc_stream_get_status_from_state(dc_state,
7712 									 dm_new_crtc_state->stream);
7713 			if (!status)
7714 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7715 			else
7716 				acrtc->otg_inst = status->primary_otg_inst;
7717 		}
7718 	}
7719 #ifdef CONFIG_DRM_AMD_DC_HDCP
7720 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7721 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7722 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7723 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7724 
7725 		new_crtc_state = NULL;
7726 
7727 		if (acrtc)
7728 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7729 
7730 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7731 
7732 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7733 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7734 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7735 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7736 			continue;
7737 		}
7738 
7739 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7740 			hdcp_update_display(
7741 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7742 				new_con_state->hdcp_content_type,
7743 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7744 													 : false);
7745 	}
7746 #endif
7747 
7748 	/* Handle connector state changes */
7749 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7750 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7751 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7752 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7753 		struct dc_surface_update dummy_updates[MAX_SURFACES];
7754 		struct dc_stream_update stream_update;
7755 		struct dc_info_packet hdr_packet;
7756 		struct dc_stream_status *status = NULL;
7757 		bool abm_changed, hdr_changed, scaling_changed;
7758 
7759 		memset(&dummy_updates, 0, sizeof(dummy_updates));
7760 		memset(&stream_update, 0, sizeof(stream_update));
7761 
7762 		if (acrtc) {
7763 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7764 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7765 		}
7766 
7767 		/* Skip any modesets/resets */
7768 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7769 			continue;
7770 
7771 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7772 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7773 
7774 		scaling_changed = is_scaling_state_different(dm_new_con_state,
7775 							     dm_old_con_state);
7776 
7777 		abm_changed = dm_new_crtc_state->abm_level !=
7778 			      dm_old_crtc_state->abm_level;
7779 
7780 		hdr_changed =
7781 			is_hdr_metadata_different(old_con_state, new_con_state);
7782 
7783 		if (!scaling_changed && !abm_changed && !hdr_changed)
7784 			continue;
7785 
7786 		stream_update.stream = dm_new_crtc_state->stream;
7787 		if (scaling_changed) {
7788 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7789 					dm_new_con_state, dm_new_crtc_state->stream);
7790 
7791 			stream_update.src = dm_new_crtc_state->stream->src;
7792 			stream_update.dst = dm_new_crtc_state->stream->dst;
7793 		}
7794 
7795 		if (abm_changed) {
7796 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7797 
7798 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
7799 		}
7800 
7801 		if (hdr_changed) {
7802 			fill_hdr_info_packet(new_con_state, &hdr_packet);
7803 			stream_update.hdr_static_metadata = &hdr_packet;
7804 		}
7805 
7806 		status = dc_stream_get_status(dm_new_crtc_state->stream);
7807 		WARN_ON(!status);
7808 		WARN_ON(!status->plane_count);
7809 
7810 		/*
7811 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7812 		 * Here we create an empty update on each plane.
7813 		 * To fix this, DC should permit updating only stream properties.
7814 		 */
7815 		for (j = 0; j < status->plane_count; j++)
7816 			dummy_updates[j].surface = status->plane_states[0];
7817 
7818 
7819 		mutex_lock(&dm->dc_lock);
7820 		dc_commit_updates_for_stream(dm->dc,
7821 						     dummy_updates,
7822 						     status->plane_count,
7823 						     dm_new_crtc_state->stream,
7824 						     &stream_update,
7825 						     dc_state);
7826 		mutex_unlock(&dm->dc_lock);
7827 	}
7828 
7829 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
7830 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7831 				      new_crtc_state, i) {
7832 		if (old_crtc_state->active && !new_crtc_state->active)
7833 			crtc_disable_count++;
7834 
7835 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7836 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7837 
7838 		/* For freesync config update on crtc state and params for irq */
7839 		update_stream_irq_parameters(dm, dm_new_crtc_state);
7840 
7841 		/* Handle vrr on->off / off->on transitions */
7842 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7843 						dm_new_crtc_state);
7844 	}
7845 
7846 	/**
7847 	 * Enable interrupts for CRTCs that are newly enabled or went through
7848 	 * a modeset. It was intentionally deferred until after the front end
7849 	 * state was modified to wait until the OTG was on and so the IRQ
7850 	 * handlers didn't access stale or invalid state.
7851 	 */
7852 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7853 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7854 
7855 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7856 
7857 		if (new_crtc_state->active &&
7858 		    (!old_crtc_state->active ||
7859 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7860 			dc_stream_retain(dm_new_crtc_state->stream);
7861 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
7862 			manage_dm_interrupts(adev, acrtc, true);
7863 
7864 #ifdef CONFIG_DEBUG_FS
7865 			/**
7866 			 * Frontend may have changed so reapply the CRC capture
7867 			 * settings for the stream.
7868 			 */
7869 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7870 
7871 			if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7872 				amdgpu_dm_crtc_configure_crc_source(
7873 					crtc, dm_new_crtc_state,
7874 					dm_new_crtc_state->crc_src);
7875 			}
7876 #endif
7877 		}
7878 	}
7879 
7880 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7881 		if (new_crtc_state->async_flip)
7882 			wait_for_vblank = false;
7883 
7884 	/* update planes when needed per crtc*/
7885 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7886 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7887 
7888 		if (dm_new_crtc_state->stream)
7889 			amdgpu_dm_commit_planes(state, dc_state, dev,
7890 						dm, crtc, wait_for_vblank);
7891 	}
7892 
7893 	/* Update audio instances for each connector. */
7894 	amdgpu_dm_commit_audio(dev, state);
7895 
7896 	/*
7897 	 * send vblank event on all events not handled in flip and
7898 	 * mark consumed event for drm_atomic_helper_commit_hw_done
7899 	 */
7900 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7901 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7902 
7903 		if (new_crtc_state->event)
7904 			drm_send_event_locked(dev, &new_crtc_state->event->base);
7905 
7906 		new_crtc_state->event = NULL;
7907 	}
7908 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7909 
7910 	/* Signal HW programming completion */
7911 	drm_atomic_helper_commit_hw_done(state);
7912 
7913 	if (wait_for_vblank)
7914 		drm_atomic_helper_wait_for_flip_done(dev, state);
7915 
7916 	drm_atomic_helper_cleanup_planes(dev, state);
7917 
7918 	/*
7919 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7920 	 * so we can put the GPU into runtime suspend if we're not driving any
7921 	 * displays anymore
7922 	 */
7923 	for (i = 0; i < crtc_disable_count; i++)
7924 		pm_runtime_put_autosuspend(dev->dev);
7925 	pm_runtime_mark_last_busy(dev->dev);
7926 
7927 	if (dc_state_temp)
7928 		dc_release_state(dc_state_temp);
7929 }
7930 
7931 
dm_force_atomic_commit(struct drm_connector * connector)7932 static int dm_force_atomic_commit(struct drm_connector *connector)
7933 {
7934 	int ret = 0;
7935 	struct drm_device *ddev = connector->dev;
7936 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7937 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7938 	struct drm_plane *plane = disconnected_acrtc->base.primary;
7939 	struct drm_connector_state *conn_state;
7940 	struct drm_crtc_state *crtc_state;
7941 	struct drm_plane_state *plane_state;
7942 
7943 	if (!state)
7944 		return -ENOMEM;
7945 
7946 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
7947 
7948 	/* Construct an atomic state to restore previous display setting */
7949 
7950 	/*
7951 	 * Attach connectors to drm_atomic_state
7952 	 */
7953 	conn_state = drm_atomic_get_connector_state(state, connector);
7954 
7955 	ret = PTR_ERR_OR_ZERO(conn_state);
7956 	if (ret)
7957 		goto out;
7958 
7959 	/* Attach crtc to drm_atomic_state*/
7960 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7961 
7962 	ret = PTR_ERR_OR_ZERO(crtc_state);
7963 	if (ret)
7964 		goto out;
7965 
7966 	/* force a restore */
7967 	crtc_state->mode_changed = true;
7968 
7969 	/* Attach plane to drm_atomic_state */
7970 	plane_state = drm_atomic_get_plane_state(state, plane);
7971 
7972 	ret = PTR_ERR_OR_ZERO(plane_state);
7973 	if (ret)
7974 		goto out;
7975 
7976 	/* Call commit internally with the state we just constructed */
7977 	ret = drm_atomic_commit(state);
7978 
7979 out:
7980 	drm_atomic_state_put(state);
7981 	if (ret)
7982 		DRM_ERROR("Restoring old state failed with %i\n", ret);
7983 
7984 	return ret;
7985 }
7986 
7987 /*
7988  * This function handles all cases when set mode does not come upon hotplug.
7989  * This includes when a display is unplugged then plugged back into the
7990  * same port and when running without usermode desktop manager supprot
7991  */
dm_restore_drm_connector_state(struct drm_device * dev,struct drm_connector * connector)7992 void dm_restore_drm_connector_state(struct drm_device *dev,
7993 				    struct drm_connector *connector)
7994 {
7995 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7996 	struct amdgpu_crtc *disconnected_acrtc;
7997 	struct dm_crtc_state *acrtc_state;
7998 
7999 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8000 		return;
8001 
8002 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8003 	if (!disconnected_acrtc)
8004 		return;
8005 
8006 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8007 	if (!acrtc_state->stream)
8008 		return;
8009 
8010 	/*
8011 	 * If the previous sink is not released and different from the current,
8012 	 * we deduce we are in a state where we can not rely on usermode call
8013 	 * to turn on the display, so we do it here
8014 	 */
8015 	if (acrtc_state->stream->sink != aconnector->dc_sink)
8016 		dm_force_atomic_commit(&aconnector->base);
8017 }
8018 
8019 /*
8020  * Grabs all modesetting locks to serialize against any blocking commits,
8021  * Waits for completion of all non blocking commits.
8022  */
do_aquire_global_lock(struct drm_device * dev,struct drm_atomic_state * state)8023 static int do_aquire_global_lock(struct drm_device *dev,
8024 				 struct drm_atomic_state *state)
8025 {
8026 	struct drm_crtc *crtc;
8027 	struct drm_crtc_commit *commit;
8028 	long ret;
8029 
8030 	/*
8031 	 * Adding all modeset locks to aquire_ctx will
8032 	 * ensure that when the framework release it the
8033 	 * extra locks we are locking here will get released to
8034 	 */
8035 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8036 	if (ret)
8037 		return ret;
8038 
8039 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8040 		spin_lock(&crtc->commit_lock);
8041 		commit = list_first_entry_or_null(&crtc->commit_list,
8042 				struct drm_crtc_commit, commit_entry);
8043 		if (commit)
8044 			drm_crtc_commit_get(commit);
8045 		spin_unlock(&crtc->commit_lock);
8046 
8047 		if (!commit)
8048 			continue;
8049 
8050 		/*
8051 		 * Make sure all pending HW programming completed and
8052 		 * page flips done
8053 		 */
8054 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8055 
8056 		if (ret > 0)
8057 			ret = wait_for_completion_interruptible_timeout(
8058 					&commit->flip_done, 10*HZ);
8059 
8060 		if (ret == 0)
8061 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8062 				  "timed out\n", crtc->base.id, crtc->name);
8063 
8064 		drm_crtc_commit_put(commit);
8065 	}
8066 
8067 	return ret < 0 ? ret : 0;
8068 }
8069 
get_freesync_config_for_crtc(struct dm_crtc_state * new_crtc_state,struct dm_connector_state * new_con_state)8070 static void get_freesync_config_for_crtc(
8071 	struct dm_crtc_state *new_crtc_state,
8072 	struct dm_connector_state *new_con_state)
8073 {
8074 	struct mod_freesync_config config = {0};
8075 	struct amdgpu_dm_connector *aconnector =
8076 			to_amdgpu_dm_connector(new_con_state->base.connector);
8077 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
8078 	int vrefresh = drm_mode_vrefresh(mode);
8079 
8080 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8081 					vrefresh >= aconnector->min_vfreq &&
8082 					vrefresh <= aconnector->max_vfreq;
8083 
8084 	if (new_crtc_state->vrr_supported) {
8085 		new_crtc_state->stream->ignore_msa_timing_param = true;
8086 		config.state = new_crtc_state->base.vrr_enabled ?
8087 				VRR_STATE_ACTIVE_VARIABLE :
8088 				VRR_STATE_INACTIVE;
8089 		config.min_refresh_in_uhz =
8090 				aconnector->min_vfreq * 1000000;
8091 		config.max_refresh_in_uhz =
8092 				aconnector->max_vfreq * 1000000;
8093 		config.vsif_supported = true;
8094 		config.btr = true;
8095 	}
8096 
8097 	new_crtc_state->freesync_config = config;
8098 }
8099 
reset_freesync_config_for_crtc(struct dm_crtc_state * new_crtc_state)8100 static void reset_freesync_config_for_crtc(
8101 	struct dm_crtc_state *new_crtc_state)
8102 {
8103 	new_crtc_state->vrr_supported = false;
8104 
8105 	memset(&new_crtc_state->vrr_infopacket, 0,
8106 	       sizeof(new_crtc_state->vrr_infopacket));
8107 }
8108 
dm_update_crtc_state(struct amdgpu_display_manager * dm,struct drm_atomic_state * state,struct drm_crtc * crtc,struct drm_crtc_state * old_crtc_state,struct drm_crtc_state * new_crtc_state,bool enable,bool * lock_and_validation_needed)8109 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8110 				struct drm_atomic_state *state,
8111 				struct drm_crtc *crtc,
8112 				struct drm_crtc_state *old_crtc_state,
8113 				struct drm_crtc_state *new_crtc_state,
8114 				bool enable,
8115 				bool *lock_and_validation_needed)
8116 {
8117 	struct dm_atomic_state *dm_state = NULL;
8118 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8119 	struct dc_stream_state *new_stream;
8120 	int ret = 0;
8121 
8122 	/*
8123 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8124 	 * update changed items
8125 	 */
8126 	struct amdgpu_crtc *acrtc = NULL;
8127 	struct amdgpu_dm_connector *aconnector = NULL;
8128 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8129 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8130 
8131 	new_stream = NULL;
8132 
8133 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8134 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8135 	acrtc = to_amdgpu_crtc(crtc);
8136 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8137 
8138 	/* TODO This hack should go away */
8139 	if (aconnector && enable) {
8140 		/* Make sure fake sink is created in plug-in scenario */
8141 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8142 							    &aconnector->base);
8143 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8144 							    &aconnector->base);
8145 
8146 		if (IS_ERR(drm_new_conn_state)) {
8147 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8148 			goto fail;
8149 		}
8150 
8151 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8152 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8153 
8154 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8155 			goto skip_modeset;
8156 
8157 		new_stream = create_validate_stream_for_sink(aconnector,
8158 							     &new_crtc_state->mode,
8159 							     dm_new_conn_state,
8160 							     dm_old_crtc_state->stream);
8161 
8162 		/*
8163 		 * we can have no stream on ACTION_SET if a display
8164 		 * was disconnected during S3, in this case it is not an
8165 		 * error, the OS will be updated after detection, and
8166 		 * will do the right thing on next atomic commit
8167 		 */
8168 
8169 		if (!new_stream) {
8170 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8171 					__func__, acrtc->base.base.id);
8172 			ret = -ENOMEM;
8173 			goto fail;
8174 		}
8175 
8176 		/*
8177 		 * TODO: Check VSDB bits to decide whether this should
8178 		 * be enabled or not.
8179 		 */
8180 		new_stream->triggered_crtc_reset.enabled =
8181 			dm->force_timing_sync;
8182 
8183 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8184 
8185 		ret = fill_hdr_info_packet(drm_new_conn_state,
8186 					   &new_stream->hdr_static_metadata);
8187 		if (ret)
8188 			goto fail;
8189 
8190 		/*
8191 		 * If we already removed the old stream from the context
8192 		 * (and set the new stream to NULL) then we can't reuse
8193 		 * the old stream even if the stream and scaling are unchanged.
8194 		 * We'll hit the BUG_ON and black screen.
8195 		 *
8196 		 * TODO: Refactor this function to allow this check to work
8197 		 * in all conditions.
8198 		 */
8199 		if (dm_new_crtc_state->stream &&
8200 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8201 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8202 			new_crtc_state->mode_changed = false;
8203 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8204 					 new_crtc_state->mode_changed);
8205 		}
8206 	}
8207 
8208 	/* mode_changed flag may get updated above, need to check again */
8209 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8210 		goto skip_modeset;
8211 
8212 	DRM_DEBUG_DRIVER(
8213 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8214 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8215 		"connectors_changed:%d\n",
8216 		acrtc->crtc_id,
8217 		new_crtc_state->enable,
8218 		new_crtc_state->active,
8219 		new_crtc_state->planes_changed,
8220 		new_crtc_state->mode_changed,
8221 		new_crtc_state->active_changed,
8222 		new_crtc_state->connectors_changed);
8223 
8224 	/* Remove stream for any changed/disabled CRTC */
8225 	if (!enable) {
8226 
8227 		if (!dm_old_crtc_state->stream)
8228 			goto skip_modeset;
8229 
8230 		ret = dm_atomic_get_state(state, &dm_state);
8231 		if (ret)
8232 			goto fail;
8233 
8234 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8235 				crtc->base.id);
8236 
8237 		/* i.e. reset mode */
8238 		if (dc_remove_stream_from_ctx(
8239 				dm->dc,
8240 				dm_state->context,
8241 				dm_old_crtc_state->stream) != DC_OK) {
8242 			ret = -EINVAL;
8243 			goto fail;
8244 		}
8245 
8246 		dc_stream_release(dm_old_crtc_state->stream);
8247 		dm_new_crtc_state->stream = NULL;
8248 
8249 		reset_freesync_config_for_crtc(dm_new_crtc_state);
8250 
8251 		*lock_and_validation_needed = true;
8252 
8253 	} else {/* Add stream for any updated/enabled CRTC */
8254 		/*
8255 		 * Quick fix to prevent NULL pointer on new_stream when
8256 		 * added MST connectors not found in existing crtc_state in the chained mode
8257 		 * TODO: need to dig out the root cause of that
8258 		 */
8259 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8260 			goto skip_modeset;
8261 
8262 		if (modereset_required(new_crtc_state))
8263 			goto skip_modeset;
8264 
8265 		if (modeset_required(new_crtc_state, new_stream,
8266 				     dm_old_crtc_state->stream)) {
8267 
8268 			WARN_ON(dm_new_crtc_state->stream);
8269 
8270 			ret = dm_atomic_get_state(state, &dm_state);
8271 			if (ret)
8272 				goto fail;
8273 
8274 			dm_new_crtc_state->stream = new_stream;
8275 
8276 			dc_stream_retain(new_stream);
8277 
8278 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8279 						crtc->base.id);
8280 
8281 			if (dc_add_stream_to_ctx(
8282 					dm->dc,
8283 					dm_state->context,
8284 					dm_new_crtc_state->stream) != DC_OK) {
8285 				ret = -EINVAL;
8286 				goto fail;
8287 			}
8288 
8289 			*lock_and_validation_needed = true;
8290 		}
8291 	}
8292 
8293 skip_modeset:
8294 	/* Release extra reference */
8295 	if (new_stream)
8296 		 dc_stream_release(new_stream);
8297 
8298 	/*
8299 	 * We want to do dc stream updates that do not require a
8300 	 * full modeset below.
8301 	 */
8302 	if (!(enable && aconnector && new_crtc_state->active))
8303 		return 0;
8304 	/*
8305 	 * Given above conditions, the dc state cannot be NULL because:
8306 	 * 1. We're in the process of enabling CRTCs (just been added
8307 	 *    to the dc context, or already is on the context)
8308 	 * 2. Has a valid connector attached, and
8309 	 * 3. Is currently active and enabled.
8310 	 * => The dc stream state currently exists.
8311 	 */
8312 	BUG_ON(dm_new_crtc_state->stream == NULL);
8313 
8314 	/* Scaling or underscan settings */
8315 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
8316 				drm_atomic_crtc_needs_modeset(new_crtc_state))
8317 		update_stream_scaling_settings(
8318 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8319 
8320 	/* ABM settings */
8321 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8322 
8323 	/*
8324 	 * Color management settings. We also update color properties
8325 	 * when a modeset is needed, to ensure it gets reprogrammed.
8326 	 */
8327 	if (dm_new_crtc_state->base.color_mgmt_changed ||
8328 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8329 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8330 		if (ret)
8331 			goto fail;
8332 	}
8333 
8334 	/* Update Freesync settings. */
8335 	get_freesync_config_for_crtc(dm_new_crtc_state,
8336 				     dm_new_conn_state);
8337 
8338 	return ret;
8339 
8340 fail:
8341 	if (new_stream)
8342 		dc_stream_release(new_stream);
8343 	return ret;
8344 }
8345 
should_reset_plane(struct drm_atomic_state * state,struct drm_plane * plane,struct drm_plane_state * old_plane_state,struct drm_plane_state * new_plane_state)8346 static bool should_reset_plane(struct drm_atomic_state *state,
8347 			       struct drm_plane *plane,
8348 			       struct drm_plane_state *old_plane_state,
8349 			       struct drm_plane_state *new_plane_state)
8350 {
8351 	struct drm_plane *other;
8352 	struct drm_plane_state *old_other_state, *new_other_state;
8353 	struct drm_crtc_state *new_crtc_state;
8354 	int i;
8355 
8356 	/*
8357 	 * TODO: Remove this hack once the checks below are sufficient
8358 	 * enough to determine when we need to reset all the planes on
8359 	 * the stream.
8360 	 */
8361 	if (state->allow_modeset)
8362 		return true;
8363 
8364 	/* Exit early if we know that we're adding or removing the plane. */
8365 	if (old_plane_state->crtc != new_plane_state->crtc)
8366 		return true;
8367 
8368 	/* old crtc == new_crtc == NULL, plane not in context. */
8369 	if (!new_plane_state->crtc)
8370 		return false;
8371 
8372 	new_crtc_state =
8373 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8374 
8375 	if (!new_crtc_state)
8376 		return true;
8377 
8378 	/* CRTC Degamma changes currently require us to recreate planes. */
8379 	if (new_crtc_state->color_mgmt_changed)
8380 		return true;
8381 
8382 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8383 		return true;
8384 
8385 	/*
8386 	 * If there are any new primary or overlay planes being added or
8387 	 * removed then the z-order can potentially change. To ensure
8388 	 * correct z-order and pipe acquisition the current DC architecture
8389 	 * requires us to remove and recreate all existing planes.
8390 	 *
8391 	 * TODO: Come up with a more elegant solution for this.
8392 	 */
8393 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8394 		struct dm_plane_state *old_dm_plane_state, *new_dm_plane_state;
8395 
8396 		if (other->type == DRM_PLANE_TYPE_CURSOR)
8397 			continue;
8398 
8399 		if (old_other_state->crtc != new_plane_state->crtc &&
8400 		    new_other_state->crtc != new_plane_state->crtc)
8401 			continue;
8402 
8403 		if (old_other_state->crtc != new_other_state->crtc)
8404 			return true;
8405 
8406 		/* Src/dst size and scaling updates. */
8407 		if (old_other_state->src_w != new_other_state->src_w ||
8408 		    old_other_state->src_h != new_other_state->src_h ||
8409 		    old_other_state->crtc_w != new_other_state->crtc_w ||
8410 		    old_other_state->crtc_h != new_other_state->crtc_h)
8411 			return true;
8412 
8413 		/* Rotation / mirroring updates. */
8414 		if (old_other_state->rotation != new_other_state->rotation)
8415 			return true;
8416 
8417 		/* Blending updates. */
8418 		if (old_other_state->pixel_blend_mode !=
8419 		    new_other_state->pixel_blend_mode)
8420 			return true;
8421 
8422 		/* Alpha updates. */
8423 		if (old_other_state->alpha != new_other_state->alpha)
8424 			return true;
8425 
8426 		/* Colorspace changes. */
8427 		if (old_other_state->color_range != new_other_state->color_range ||
8428 		    old_other_state->color_encoding != new_other_state->color_encoding)
8429 			return true;
8430 
8431 		/* Framebuffer checks fall at the end. */
8432 		if (!old_other_state->fb || !new_other_state->fb)
8433 			continue;
8434 
8435 		/* Pixel format changes can require bandwidth updates. */
8436 		if (old_other_state->fb->format != new_other_state->fb->format)
8437 			return true;
8438 
8439 		old_dm_plane_state = to_dm_plane_state(old_other_state);
8440 		new_dm_plane_state = to_dm_plane_state(new_other_state);
8441 
8442 		/* Tiling and DCC changes also require bandwidth updates. */
8443 		if (old_dm_plane_state->tiling_flags !=
8444 		    new_dm_plane_state->tiling_flags)
8445 			return true;
8446 	}
8447 
8448 	return false;
8449 }
8450 
dm_update_plane_state(struct dc * dc,struct drm_atomic_state * state,struct drm_plane * plane,struct drm_plane_state * old_plane_state,struct drm_plane_state * new_plane_state,bool enable,bool * lock_and_validation_needed)8451 static int dm_update_plane_state(struct dc *dc,
8452 				 struct drm_atomic_state *state,
8453 				 struct drm_plane *plane,
8454 				 struct drm_plane_state *old_plane_state,
8455 				 struct drm_plane_state *new_plane_state,
8456 				 bool enable,
8457 				 bool *lock_and_validation_needed)
8458 {
8459 
8460 	struct dm_atomic_state *dm_state = NULL;
8461 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8462 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8463 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8464 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8465 	struct amdgpu_crtc *new_acrtc;
8466 	bool needs_reset;
8467 	int ret = 0;
8468 
8469 
8470 	new_plane_crtc = new_plane_state->crtc;
8471 	old_plane_crtc = old_plane_state->crtc;
8472 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
8473 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
8474 
8475 	/*TODO Implement better atomic check for cursor plane */
8476 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8477 		if (!enable || !new_plane_crtc ||
8478 			drm_atomic_plane_disabling(plane->state, new_plane_state))
8479 			return 0;
8480 
8481 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8482 
8483 		if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8484 			(new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8485 			DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8486 							 new_plane_state->crtc_w, new_plane_state->crtc_h);
8487 			return -EINVAL;
8488 		}
8489 
8490 		return 0;
8491 	}
8492 
8493 	needs_reset = should_reset_plane(state, plane, old_plane_state,
8494 					 new_plane_state);
8495 
8496 	/* Remove any changed/removed planes */
8497 	if (!enable) {
8498 		if (!needs_reset)
8499 			return 0;
8500 
8501 		if (!old_plane_crtc)
8502 			return 0;
8503 
8504 		old_crtc_state = drm_atomic_get_old_crtc_state(
8505 				state, old_plane_crtc);
8506 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8507 
8508 		if (!dm_old_crtc_state->stream)
8509 			return 0;
8510 
8511 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8512 				plane->base.id, old_plane_crtc->base.id);
8513 
8514 		ret = dm_atomic_get_state(state, &dm_state);
8515 		if (ret)
8516 			return ret;
8517 
8518 		if (!dc_remove_plane_from_context(
8519 				dc,
8520 				dm_old_crtc_state->stream,
8521 				dm_old_plane_state->dc_state,
8522 				dm_state->context)) {
8523 
8524 			return -EINVAL;
8525 		}
8526 
8527 
8528 		dc_plane_state_release(dm_old_plane_state->dc_state);
8529 		dm_new_plane_state->dc_state = NULL;
8530 
8531 		*lock_and_validation_needed = true;
8532 
8533 	} else { /* Add new planes */
8534 		struct dc_plane_state *dc_new_plane_state;
8535 
8536 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8537 			return 0;
8538 
8539 		if (!new_plane_crtc)
8540 			return 0;
8541 
8542 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8543 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8544 
8545 		if (!dm_new_crtc_state->stream)
8546 			return 0;
8547 
8548 		if (!needs_reset)
8549 			return 0;
8550 
8551 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8552 		if (ret)
8553 			return ret;
8554 
8555 		WARN_ON(dm_new_plane_state->dc_state);
8556 
8557 		dc_new_plane_state = dc_create_plane_state(dc);
8558 		if (!dc_new_plane_state)
8559 			return -ENOMEM;
8560 
8561 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8562 				plane->base.id, new_plane_crtc->base.id);
8563 
8564 		ret = fill_dc_plane_attributes(
8565 			drm_to_adev(new_plane_crtc->dev),
8566 			dc_new_plane_state,
8567 			new_plane_state,
8568 			new_crtc_state);
8569 		if (ret) {
8570 			dc_plane_state_release(dc_new_plane_state);
8571 			return ret;
8572 		}
8573 
8574 		ret = dm_atomic_get_state(state, &dm_state);
8575 		if (ret) {
8576 			dc_plane_state_release(dc_new_plane_state);
8577 			return ret;
8578 		}
8579 
8580 		/*
8581 		 * Any atomic check errors that occur after this will
8582 		 * not need a release. The plane state will be attached
8583 		 * to the stream, and therefore part of the atomic
8584 		 * state. It'll be released when the atomic state is
8585 		 * cleaned.
8586 		 */
8587 		if (!dc_add_plane_to_context(
8588 				dc,
8589 				dm_new_crtc_state->stream,
8590 				dc_new_plane_state,
8591 				dm_state->context)) {
8592 
8593 			dc_plane_state_release(dc_new_plane_state);
8594 			return -EINVAL;
8595 		}
8596 
8597 		dm_new_plane_state->dc_state = dc_new_plane_state;
8598 
8599 		/* Tell DC to do a full surface update every time there
8600 		 * is a plane change. Inefficient, but works for now.
8601 		 */
8602 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8603 
8604 		*lock_and_validation_needed = true;
8605 	}
8606 
8607 
8608 	return ret;
8609 }
8610 
8611 #if defined(CONFIG_DRM_AMD_DC_DCN)
add_affected_mst_dsc_crtcs(struct drm_atomic_state * state,struct drm_crtc * crtc)8612 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8613 {
8614 	struct drm_connector *connector;
8615 	struct drm_connector_state *conn_state;
8616 	struct amdgpu_dm_connector *aconnector = NULL;
8617 	int i;
8618 	for_each_new_connector_in_state(state, connector, conn_state, i) {
8619 		if (conn_state->crtc != crtc)
8620 			continue;
8621 
8622 		aconnector = to_amdgpu_dm_connector(connector);
8623 		if (!aconnector->port || !aconnector->mst_port)
8624 			aconnector = NULL;
8625 		else
8626 			break;
8627 	}
8628 
8629 	if (!aconnector)
8630 		return 0;
8631 
8632 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8633 }
8634 #endif
8635 
validate_overlay(struct drm_atomic_state * state)8636 static int validate_overlay(struct drm_atomic_state *state)
8637 {
8638 	int i;
8639 	struct drm_plane *plane;
8640 	struct drm_plane_state *old_plane_state, *new_plane_state;
8641 	struct drm_plane_state *primary_state, *overlay_state = NULL;
8642 
8643 	/* Check if primary plane is contained inside overlay */
8644 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8645 		if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
8646 			if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8647 				return 0;
8648 
8649 			overlay_state = new_plane_state;
8650 			continue;
8651 		}
8652 	}
8653 
8654 	/* check if we're making changes to the overlay plane */
8655 	if (!overlay_state)
8656 		return 0;
8657 
8658 	/* check if overlay plane is enabled */
8659 	if (!overlay_state->crtc)
8660 		return 0;
8661 
8662 	/* find the primary plane for the CRTC that the overlay is enabled on */
8663 	primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
8664 	if (IS_ERR(primary_state))
8665 		return PTR_ERR(primary_state);
8666 
8667 	/* check if primary plane is enabled */
8668 	if (!primary_state->crtc)
8669 		return 0;
8670 
8671 	/* Perform the bounds check to ensure the overlay plane covers the primary */
8672 	if (primary_state->crtc_x < overlay_state->crtc_x ||
8673 	    primary_state->crtc_y < overlay_state->crtc_y ||
8674 	    primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
8675 	    primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
8676 		DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
8677 		return -EINVAL;
8678 	}
8679 
8680 	return 0;
8681 }
8682 
8683 /**
8684  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8685  * @dev: The DRM device
8686  * @state: The atomic state to commit
8687  *
8688  * Validate that the given atomic state is programmable by DC into hardware.
8689  * This involves constructing a &struct dc_state reflecting the new hardware
8690  * state we wish to commit, then querying DC to see if it is programmable. It's
8691  * important not to modify the existing DC state. Otherwise, atomic_check
8692  * may unexpectedly commit hardware changes.
8693  *
8694  * When validating the DC state, it's important that the right locks are
8695  * acquired. For full updates case which removes/adds/updates streams on one
8696  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8697  * that any such full update commit will wait for completion of any outstanding
8698  * flip using DRMs synchronization events.
8699  *
8700  * Note that DM adds the affected connectors for all CRTCs in state, when that
8701  * might not seem necessary. This is because DC stream creation requires the
8702  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8703  * be possible but non-trivial - a possible TODO item.
8704  *
8705  * Return: -Error code if validation failed.
8706  */
amdgpu_dm_atomic_check(struct drm_device * dev,struct drm_atomic_state * state)8707 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8708 				  struct drm_atomic_state *state)
8709 {
8710 	struct amdgpu_device *adev = drm_to_adev(dev);
8711 	struct dm_atomic_state *dm_state = NULL;
8712 	struct dc *dc = adev->dm.dc;
8713 	struct drm_connector *connector;
8714 	struct drm_connector_state *old_con_state, *new_con_state;
8715 	struct drm_crtc *crtc;
8716 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8717 	struct drm_plane *plane;
8718 	struct drm_plane_state *old_plane_state, *new_plane_state;
8719 	enum dc_status status;
8720 	int ret, i;
8721 	bool lock_and_validation_needed = false;
8722 
8723 	amdgpu_check_debugfs_connector_property_change(adev, state);
8724 
8725 	ret = drm_atomic_helper_check_modeset(dev, state);
8726 	if (ret)
8727 		goto fail;
8728 
8729 	/* Check connector changes */
8730 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8731 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8732 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8733 
8734 		/* Skip connectors that are disabled or part of modeset already. */
8735 		if (!old_con_state->crtc && !new_con_state->crtc)
8736 			continue;
8737 
8738 		if (!new_con_state->crtc)
8739 			continue;
8740 
8741 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8742 		if (IS_ERR(new_crtc_state)) {
8743 			ret = PTR_ERR(new_crtc_state);
8744 			goto fail;
8745 		}
8746 
8747 		if (dm_old_con_state->abm_level !=
8748 		    dm_new_con_state->abm_level)
8749 			new_crtc_state->connectors_changed = true;
8750 	}
8751 
8752 #if defined(CONFIG_DRM_AMD_DC_DCN)
8753 	if (dc_resource_is_dsc_encoding_supported(dc)) {
8754 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8755 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8756 				ret = add_affected_mst_dsc_crtcs(state, crtc);
8757 				if (ret)
8758 					goto fail;
8759 			}
8760 		}
8761 	}
8762 #endif
8763 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8764 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8765 		    !new_crtc_state->color_mgmt_changed &&
8766 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8767 			continue;
8768 
8769 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
8770 		if (ret)
8771 			goto fail;
8772 
8773 		if (!new_crtc_state->enable)
8774 			continue;
8775 
8776 		ret = drm_atomic_add_affected_connectors(state, crtc);
8777 		if (ret)
8778 			return ret;
8779 
8780 		ret = drm_atomic_add_affected_planes(state, crtc);
8781 		if (ret)
8782 			goto fail;
8783 	}
8784 
8785 	/*
8786 	 * Add all primary and overlay planes on the CRTC to the state
8787 	 * whenever a plane is enabled to maintain correct z-ordering
8788 	 * and to enable fast surface updates.
8789 	 */
8790 	drm_for_each_crtc(crtc, dev) {
8791 		bool modified = false;
8792 
8793 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8794 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8795 				continue;
8796 
8797 			if (new_plane_state->crtc == crtc ||
8798 			    old_plane_state->crtc == crtc) {
8799 				modified = true;
8800 				break;
8801 			}
8802 		}
8803 
8804 		if (!modified)
8805 			continue;
8806 
8807 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8808 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8809 				continue;
8810 
8811 			new_plane_state =
8812 				drm_atomic_get_plane_state(state, plane);
8813 
8814 			if (IS_ERR(new_plane_state)) {
8815 				ret = PTR_ERR(new_plane_state);
8816 				goto fail;
8817 			}
8818 		}
8819 	}
8820 
8821 	/* Prepass for updating tiling flags on new planes. */
8822 	for_each_new_plane_in_state(state, plane, new_plane_state, i) {
8823 		struct dm_plane_state *new_dm_plane_state = to_dm_plane_state(new_plane_state);
8824 		struct amdgpu_framebuffer *new_afb = to_amdgpu_framebuffer(new_plane_state->fb);
8825 
8826 		ret = get_fb_info(new_afb, &new_dm_plane_state->tiling_flags,
8827 				  &new_dm_plane_state->tmz_surface);
8828 		if (ret)
8829 			goto fail;
8830 	}
8831 
8832 	/* Remove exiting planes if they are modified */
8833 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8834 		ret = dm_update_plane_state(dc, state, plane,
8835 					    old_plane_state,
8836 					    new_plane_state,
8837 					    false,
8838 					    &lock_and_validation_needed);
8839 		if (ret)
8840 			goto fail;
8841 	}
8842 
8843 	/* Disable all crtcs which require disable */
8844 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8845 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8846 					   old_crtc_state,
8847 					   new_crtc_state,
8848 					   false,
8849 					   &lock_and_validation_needed);
8850 		if (ret)
8851 			goto fail;
8852 	}
8853 
8854 	/* Enable all crtcs which require enable */
8855 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8856 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8857 					   old_crtc_state,
8858 					   new_crtc_state,
8859 					   true,
8860 					   &lock_and_validation_needed);
8861 		if (ret)
8862 			goto fail;
8863 	}
8864 
8865 	ret = validate_overlay(state);
8866 	if (ret)
8867 		goto fail;
8868 
8869 	/* Add new/modified planes */
8870 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8871 		ret = dm_update_plane_state(dc, state, plane,
8872 					    old_plane_state,
8873 					    new_plane_state,
8874 					    true,
8875 					    &lock_and_validation_needed);
8876 		if (ret)
8877 			goto fail;
8878 	}
8879 
8880 	/* Run this here since we want to validate the streams we created */
8881 	ret = drm_atomic_helper_check_planes(dev, state);
8882 	if (ret)
8883 		goto fail;
8884 
8885 	if (state->legacy_cursor_update) {
8886 		/*
8887 		 * This is a fast cursor update coming from the plane update
8888 		 * helper, check if it can be done asynchronously for better
8889 		 * performance.
8890 		 */
8891 		state->async_update =
8892 			!drm_atomic_helper_async_check(dev, state);
8893 
8894 		/*
8895 		 * Skip the remaining global validation if this is an async
8896 		 * update. Cursor updates can be done without affecting
8897 		 * state or bandwidth calcs and this avoids the performance
8898 		 * penalty of locking the private state object and
8899 		 * allocating a new dc_state.
8900 		 */
8901 		if (state->async_update)
8902 			return 0;
8903 	}
8904 
8905 	/* Check scaling and underscan changes*/
8906 	/* TODO Removed scaling changes validation due to inability to commit
8907 	 * new stream into context w\o causing full reset. Need to
8908 	 * decide how to handle.
8909 	 */
8910 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8911 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8912 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8913 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8914 
8915 		/* Skip any modesets/resets */
8916 		if (!acrtc || drm_atomic_crtc_needs_modeset(
8917 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8918 			continue;
8919 
8920 		/* Skip any thing not scale or underscan changes */
8921 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8922 			continue;
8923 
8924 		lock_and_validation_needed = true;
8925 	}
8926 
8927 	/**
8928 	 * Streams and planes are reset when there are changes that affect
8929 	 * bandwidth. Anything that affects bandwidth needs to go through
8930 	 * DC global validation to ensure that the configuration can be applied
8931 	 * to hardware.
8932 	 *
8933 	 * We have to currently stall out here in atomic_check for outstanding
8934 	 * commits to finish in this case because our IRQ handlers reference
8935 	 * DRM state directly - we can end up disabling interrupts too early
8936 	 * if we don't.
8937 	 *
8938 	 * TODO: Remove this stall and drop DM state private objects.
8939 	 */
8940 	if (lock_and_validation_needed) {
8941 		ret = dm_atomic_get_state(state, &dm_state);
8942 		if (ret)
8943 			goto fail;
8944 
8945 		ret = do_aquire_global_lock(dev, state);
8946 		if (ret)
8947 			goto fail;
8948 
8949 #if defined(CONFIG_DRM_AMD_DC_DCN)
8950 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8951 			goto fail;
8952 
8953 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8954 		if (ret)
8955 			goto fail;
8956 #endif
8957 
8958 		/*
8959 		 * Perform validation of MST topology in the state:
8960 		 * We need to perform MST atomic check before calling
8961 		 * dc_validate_global_state(), or there is a chance
8962 		 * to get stuck in an infinite loop and hang eventually.
8963 		 */
8964 		ret = drm_dp_mst_atomic_check(state);
8965 		if (ret)
8966 			goto fail;
8967 		status = dc_validate_global_state(dc, dm_state->context, false);
8968 		if (status != DC_OK) {
8969 			drm_dbg_atomic(dev,
8970 				       "DC global validation failure: %s (%d)",
8971 				       dc_status_to_str(status), status);
8972 			ret = -EINVAL;
8973 			goto fail;
8974 		}
8975 	} else {
8976 		/*
8977 		 * The commit is a fast update. Fast updates shouldn't change
8978 		 * the DC context, affect global validation, and can have their
8979 		 * commit work done in parallel with other commits not touching
8980 		 * the same resource. If we have a new DC context as part of
8981 		 * the DM atomic state from validation we need to free it and
8982 		 * retain the existing one instead.
8983 		 *
8984 		 * Furthermore, since the DM atomic state only contains the DC
8985 		 * context and can safely be annulled, we can free the state
8986 		 * and clear the associated private object now to free
8987 		 * some memory and avoid a possible use-after-free later.
8988 		 */
8989 
8990 		for (i = 0; i < state->num_private_objs; i++) {
8991 			struct drm_private_obj *obj = state->private_objs[i].ptr;
8992 
8993 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
8994 				int j = state->num_private_objs-1;
8995 
8996 				dm_atomic_destroy_state(obj,
8997 						state->private_objs[i].state);
8998 
8999 				/* If i is not at the end of the array then the
9000 				 * last element needs to be moved to where i was
9001 				 * before the array can safely be truncated.
9002 				 */
9003 				if (i != j)
9004 					state->private_objs[i] =
9005 						state->private_objs[j];
9006 
9007 				state->private_objs[j].ptr = NULL;
9008 				state->private_objs[j].state = NULL;
9009 				state->private_objs[j].old_state = NULL;
9010 				state->private_objs[j].new_state = NULL;
9011 
9012 				state->num_private_objs = j;
9013 				break;
9014 			}
9015 		}
9016 	}
9017 
9018 	/* Store the overall update type for use later in atomic check. */
9019 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9020 		struct dm_crtc_state *dm_new_crtc_state =
9021 			to_dm_crtc_state(new_crtc_state);
9022 
9023 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
9024 							 UPDATE_TYPE_FULL :
9025 							 UPDATE_TYPE_FAST;
9026 	}
9027 
9028 	/* Must be success */
9029 	WARN_ON(ret);
9030 	return ret;
9031 
9032 fail:
9033 	if (ret == -EDEADLK)
9034 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9035 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9036 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9037 	else
9038 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9039 
9040 	return ret;
9041 }
9042 
is_dp_capable_without_timing_msa(struct dc * dc,struct amdgpu_dm_connector * amdgpu_dm_connector)9043 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9044 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
9045 {
9046 	uint8_t dpcd_data;
9047 	bool capable = false;
9048 
9049 	if (amdgpu_dm_connector->dc_link &&
9050 		dm_helpers_dp_read_dpcd(
9051 				NULL,
9052 				amdgpu_dm_connector->dc_link,
9053 				DP_DOWN_STREAM_PORT_COUNT,
9054 				&dpcd_data,
9055 				sizeof(dpcd_data))) {
9056 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9057 	}
9058 
9059 	return capable;
9060 }
amdgpu_dm_update_freesync_caps(struct drm_connector * connector,struct edid * edid)9061 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9062 					struct edid *edid)
9063 {
9064 	int i;
9065 	bool edid_check_required;
9066 	struct detailed_timing *timing;
9067 	struct detailed_non_pixel *data;
9068 	struct detailed_data_monitor_range *range;
9069 	struct amdgpu_dm_connector *amdgpu_dm_connector =
9070 			to_amdgpu_dm_connector(connector);
9071 	struct dm_connector_state *dm_con_state = NULL;
9072 
9073 	struct drm_device *dev = connector->dev;
9074 	struct amdgpu_device *adev = drm_to_adev(dev);
9075 	bool freesync_capable = false;
9076 
9077 	if (!connector->state) {
9078 		DRM_ERROR("%s - Connector has no state", __func__);
9079 		goto update;
9080 	}
9081 
9082 	if (!edid) {
9083 		dm_con_state = to_dm_connector_state(connector->state);
9084 
9085 		amdgpu_dm_connector->min_vfreq = 0;
9086 		amdgpu_dm_connector->max_vfreq = 0;
9087 		amdgpu_dm_connector->pixel_clock_mhz = 0;
9088 
9089 		goto update;
9090 	}
9091 
9092 	dm_con_state = to_dm_connector_state(connector->state);
9093 
9094 	edid_check_required = false;
9095 	if (!amdgpu_dm_connector->dc_sink) {
9096 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9097 		goto update;
9098 	}
9099 	if (!adev->dm.freesync_module)
9100 		goto update;
9101 	/*
9102 	 * if edid non zero restrict freesync only for dp and edp
9103 	 */
9104 	if (edid) {
9105 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9106 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9107 			edid_check_required = is_dp_capable_without_timing_msa(
9108 						adev->dm.dc,
9109 						amdgpu_dm_connector);
9110 		}
9111 	}
9112 	if (edid_check_required == true && (edid->version > 1 ||
9113 	   (edid->version == 1 && edid->revision > 1))) {
9114 		for (i = 0; i < 4; i++) {
9115 
9116 			timing	= &edid->detailed_timings[i];
9117 			data	= &timing->data.other_data;
9118 			range	= &data->data.range;
9119 			/*
9120 			 * Check if monitor has continuous frequency mode
9121 			 */
9122 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
9123 				continue;
9124 			/*
9125 			 * Check for flag range limits only. If flag == 1 then
9126 			 * no additional timing information provided.
9127 			 * Default GTF, GTF Secondary curve and CVT are not
9128 			 * supported
9129 			 */
9130 			if (range->flags != 1)
9131 				continue;
9132 
9133 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9134 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9135 			amdgpu_dm_connector->pixel_clock_mhz =
9136 				range->pixel_clock_mhz * 10;
9137 			break;
9138 		}
9139 
9140 		if (amdgpu_dm_connector->max_vfreq -
9141 		    amdgpu_dm_connector->min_vfreq > 10) {
9142 
9143 			freesync_capable = true;
9144 		}
9145 	}
9146 
9147 update:
9148 	if (dm_con_state)
9149 		dm_con_state->freesync_capable = freesync_capable;
9150 
9151 	if (connector->vrr_capable_property)
9152 		drm_connector_set_vrr_capable_property(connector,
9153 						       freesync_capable);
9154 }
9155 
amdgpu_dm_set_psr_caps(struct dc_link * link)9156 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9157 {
9158 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9159 
9160 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9161 		return;
9162 	if (link->type == dc_connection_none)
9163 		return;
9164 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9165 					dpcd_data, sizeof(dpcd_data))) {
9166 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9167 
9168 		if (dpcd_data[0] == 0) {
9169 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9170 			link->psr_settings.psr_feature_enabled = false;
9171 		} else {
9172 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
9173 			link->psr_settings.psr_feature_enabled = true;
9174 		}
9175 
9176 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9177 	}
9178 }
9179 
9180 /*
9181  * amdgpu_dm_link_setup_psr() - configure psr link
9182  * @stream: stream state
9183  *
9184  * Return: true if success
9185  */
amdgpu_dm_link_setup_psr(struct dc_stream_state * stream)9186 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9187 {
9188 	struct dc_link *link = NULL;
9189 	struct psr_config psr_config = {0};
9190 	struct psr_context psr_context = {0};
9191 	bool ret = false;
9192 
9193 	if (stream == NULL)
9194 		return false;
9195 
9196 	link = stream->link;
9197 
9198 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9199 
9200 	if (psr_config.psr_version > 0) {
9201 		psr_config.psr_exit_link_training_required = 0x1;
9202 		psr_config.psr_frame_capture_indication_req = 0;
9203 		psr_config.psr_rfb_setup_time = 0x37;
9204 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9205 		psr_config.allow_smu_optimizations = 0x0;
9206 
9207 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9208 
9209 	}
9210 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
9211 
9212 	return ret;
9213 }
9214 
9215 /*
9216  * amdgpu_dm_psr_enable() - enable psr f/w
9217  * @stream: stream state
9218  *
9219  * Return: true if success
9220  */
amdgpu_dm_psr_enable(struct dc_stream_state * stream)9221 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9222 {
9223 	struct dc_link *link = stream->link;
9224 	unsigned int vsync_rate_hz = 0;
9225 	struct dc_static_screen_params params = {0};
9226 	/* Calculate number of static frames before generating interrupt to
9227 	 * enter PSR.
9228 	 */
9229 	// Init fail safe of 2 frames static
9230 	unsigned int num_frames_static = 2;
9231 
9232 	DRM_DEBUG_DRIVER("Enabling psr...\n");
9233 
9234 	vsync_rate_hz = div64_u64(div64_u64((
9235 			stream->timing.pix_clk_100hz * 100),
9236 			stream->timing.v_total),
9237 			stream->timing.h_total);
9238 
9239 	/* Round up
9240 	 * Calculate number of frames such that at least 30 ms of time has
9241 	 * passed.
9242 	 */
9243 	if (vsync_rate_hz != 0) {
9244 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9245 		num_frames_static = (30000 / frame_time_microsec) + 1;
9246 	}
9247 
9248 	params.triggers.cursor_update = true;
9249 	params.triggers.overlay_update = true;
9250 	params.triggers.surface_update = true;
9251 	params.num_frames = num_frames_static;
9252 
9253 	dc_stream_set_static_screen_params(link->ctx->dc,
9254 					   &stream, 1,
9255 					   &params);
9256 
9257 	return dc_link_set_psr_allow_active(link, true, false);
9258 }
9259 
9260 /*
9261  * amdgpu_dm_psr_disable() - disable psr f/w
9262  * @stream:  stream state
9263  *
9264  * Return: true if success
9265  */
amdgpu_dm_psr_disable(struct dc_stream_state * stream)9266 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9267 {
9268 
9269 	DRM_DEBUG_DRIVER("Disabling psr...\n");
9270 
9271 	return dc_link_set_psr_allow_active(stream->link, false, true);
9272 }
9273 
9274 /*
9275  * amdgpu_dm_psr_disable() - disable psr f/w
9276  * if psr is enabled on any stream
9277  *
9278  * Return: true if success
9279  */
amdgpu_dm_psr_disable_all(struct amdgpu_display_manager * dm)9280 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9281 {
9282 	DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9283 	return dc_set_psr_allow_active(dm->dc, false);
9284 }
9285 
amdgpu_dm_trigger_timing_sync(struct drm_device * dev)9286 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9287 {
9288 	struct amdgpu_device *adev = drm_to_adev(dev);
9289 	struct dc *dc = adev->dm.dc;
9290 	int i;
9291 
9292 	mutex_lock(&adev->dm.dc_lock);
9293 	if (dc->current_state) {
9294 		for (i = 0; i < dc->current_state->stream_count; ++i)
9295 			dc->current_state->streams[i]
9296 				->triggered_crtc_reset.enabled =
9297 				adev->dm.force_timing_sync;
9298 
9299 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
9300 		dc_trigger_sync(dc, dc->current_state);
9301 	}
9302 	mutex_unlock(&adev->dm.dc_lock);
9303 }
9304