• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49 
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57 
58 #include "ivsrcid/ivsrcid_vislands30.h"
59 
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68 
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #endif
103 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
105 
106 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
107 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
108 
109 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
110 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
111 
112 /* Number of bytes in PSP header for firmware. */
113 #define PSP_HEADER_BYTES 0x100
114 
115 /* Number of bytes in PSP footer for firmware. */
116 #define PSP_FOOTER_BYTES 0x100
117 
118 /**
119  * DOC: overview
120  *
121  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
122  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
123  * requests into DC requests, and DC responses into DRM responses.
124  *
125  * The root control structure is &struct amdgpu_display_manager.
126  */
127 
128 /* basic init/fini API */
129 static int amdgpu_dm_init(struct amdgpu_device *adev);
130 static void amdgpu_dm_fini(struct amdgpu_device *adev);
131 
get_subconnector_type(struct dc_link * link)132 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
133 {
134 	switch (link->dpcd_caps.dongle_type) {
135 	case DISPLAY_DONGLE_NONE:
136 		return DRM_MODE_SUBCONNECTOR_Native;
137 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
138 		return DRM_MODE_SUBCONNECTOR_VGA;
139 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
140 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
141 		return DRM_MODE_SUBCONNECTOR_DVID;
142 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
143 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
144 		return DRM_MODE_SUBCONNECTOR_HDMIA;
145 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
146 	default:
147 		return DRM_MODE_SUBCONNECTOR_Unknown;
148 	}
149 }
150 
update_subconnector_property(struct amdgpu_dm_connector * aconnector)151 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
152 {
153 	struct dc_link *link = aconnector->dc_link;
154 	struct drm_connector *connector = &aconnector->base;
155 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
156 
157 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
158 		return;
159 
160 	if (aconnector->dc_sink)
161 		subconnector = get_subconnector_type(link);
162 
163 	drm_object_property_set_value(&connector->base,
164 			connector->dev->mode_config.dp_subconnector_property,
165 			subconnector);
166 }
167 
168 /*
169  * initializes drm_device display related structures, based on the information
170  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
171  * drm_encoder, drm_mode_config
172  *
173  * Returns 0 on success
174  */
175 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
176 /* removes and deallocates the drm structures, created by the above function */
177 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
178 
179 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
180 				struct drm_plane *plane,
181 				unsigned long possible_crtcs,
182 				const struct dc_plane_cap *plane_cap);
183 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
184 			       struct drm_plane *plane,
185 			       uint32_t link_index);
186 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
187 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
188 				    uint32_t link_index,
189 				    struct amdgpu_encoder *amdgpu_encoder);
190 static int amdgpu_dm_encoder_init(struct drm_device *dev,
191 				  struct amdgpu_encoder *aencoder,
192 				  uint32_t link_index);
193 
194 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
195 
196 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
197 				   struct drm_atomic_state *state,
198 				   bool nonblock);
199 
200 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
201 
202 static int amdgpu_dm_atomic_check(struct drm_device *dev,
203 				  struct drm_atomic_state *state);
204 
205 static void handle_cursor_update(struct drm_plane *plane,
206 				 struct drm_plane_state *old_plane_state);
207 
208 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
209 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
211 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
212 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
213 
214 /*
215  * dm_vblank_get_counter
216  *
217  * @brief
218  * Get counter for number of vertical blanks
219  *
220  * @param
221  * struct amdgpu_device *adev - [in] desired amdgpu device
222  * int disp_idx - [in] which CRTC to get the counter from
223  *
224  * @return
225  * Counter for vertical blanks
226  */
dm_vblank_get_counter(struct amdgpu_device * adev,int crtc)227 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
228 {
229 	if (crtc >= adev->mode_info.num_crtc)
230 		return 0;
231 	else {
232 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
233 
234 		if (acrtc->dm_irq_params.stream == NULL) {
235 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
236 				  crtc);
237 			return 0;
238 		}
239 
240 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
241 	}
242 }
243 
dm_crtc_get_scanoutpos(struct amdgpu_device * adev,int crtc,u32 * vbl,u32 * position)244 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
245 				  u32 *vbl, u32 *position)
246 {
247 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
248 
249 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
250 		return -EINVAL;
251 	else {
252 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
253 
254 		if (acrtc->dm_irq_params.stream ==  NULL) {
255 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
256 				  crtc);
257 			return 0;
258 		}
259 
260 		/*
261 		 * TODO rework base driver to use values directly.
262 		 * for now parse it back into reg-format
263 		 */
264 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
265 					 &v_blank_start,
266 					 &v_blank_end,
267 					 &h_position,
268 					 &v_position);
269 
270 		*position = v_position | (h_position << 16);
271 		*vbl = v_blank_start | (v_blank_end << 16);
272 	}
273 
274 	return 0;
275 }
276 
dm_is_idle(void * handle)277 static bool dm_is_idle(void *handle)
278 {
279 	/* XXX todo */
280 	return true;
281 }
282 
dm_wait_for_idle(void * handle)283 static int dm_wait_for_idle(void *handle)
284 {
285 	/* XXX todo */
286 	return 0;
287 }
288 
dm_check_soft_reset(void * handle)289 static bool dm_check_soft_reset(void *handle)
290 {
291 	return false;
292 }
293 
dm_soft_reset(void * handle)294 static int dm_soft_reset(void *handle)
295 {
296 	/* XXX todo */
297 	return 0;
298 }
299 
300 static struct amdgpu_crtc *
get_crtc_by_otg_inst(struct amdgpu_device * adev,int otg_inst)301 get_crtc_by_otg_inst(struct amdgpu_device *adev,
302 		     int otg_inst)
303 {
304 	struct drm_device *dev = adev_to_drm(adev);
305 	struct drm_crtc *crtc;
306 	struct amdgpu_crtc *amdgpu_crtc;
307 
308 	if (otg_inst == -1) {
309 		WARN_ON(1);
310 		return adev->mode_info.crtcs[0];
311 	}
312 
313 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
314 		amdgpu_crtc = to_amdgpu_crtc(crtc);
315 
316 		if (amdgpu_crtc->otg_inst == otg_inst)
317 			return amdgpu_crtc;
318 	}
319 
320 	return NULL;
321 }
322 
amdgpu_dm_vrr_active_irq(struct amdgpu_crtc * acrtc)323 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
324 {
325 	return acrtc->dm_irq_params.freesync_config.state ==
326 		       VRR_STATE_ACTIVE_VARIABLE ||
327 	       acrtc->dm_irq_params.freesync_config.state ==
328 		       VRR_STATE_ACTIVE_FIXED;
329 }
330 
amdgpu_dm_vrr_active(struct dm_crtc_state * dm_state)331 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
332 {
333 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
334 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
335 }
336 
337 /**
338  * dm_pflip_high_irq() - Handle pageflip interrupt
339  * @interrupt_params: ignored
340  *
341  * Handles the pageflip interrupt by notifying all interested parties
342  * that the pageflip has been completed.
343  */
dm_pflip_high_irq(void * interrupt_params)344 static void dm_pflip_high_irq(void *interrupt_params)
345 {
346 	struct amdgpu_crtc *amdgpu_crtc;
347 	struct common_irq_params *irq_params = interrupt_params;
348 	struct amdgpu_device *adev = irq_params->adev;
349 	unsigned long flags;
350 	struct drm_pending_vblank_event *e;
351 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
352 	bool vrr_active;
353 
354 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
355 
356 	/* IRQ could occur when in initial stage */
357 	/* TODO work and BO cleanup */
358 	if (amdgpu_crtc == NULL) {
359 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
360 		return;
361 	}
362 
363 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
364 
365 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
366 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
367 						 amdgpu_crtc->pflip_status,
368 						 AMDGPU_FLIP_SUBMITTED,
369 						 amdgpu_crtc->crtc_id,
370 						 amdgpu_crtc);
371 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
372 		return;
373 	}
374 
375 	/* page flip completed. */
376 	e = amdgpu_crtc->event;
377 	amdgpu_crtc->event = NULL;
378 
379 	if (!e)
380 		WARN_ON(1);
381 
382 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
383 
384 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
385 	if (!vrr_active ||
386 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
387 				      &v_blank_end, &hpos, &vpos) ||
388 	    (vpos < v_blank_start)) {
389 		/* Update to correct count and vblank timestamp if racing with
390 		 * vblank irq. This also updates to the correct vblank timestamp
391 		 * even in VRR mode, as scanout is past the front-porch atm.
392 		 */
393 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
394 
395 		/* Wake up userspace by sending the pageflip event with proper
396 		 * count and timestamp of vblank of flip completion.
397 		 */
398 		if (e) {
399 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
400 
401 			/* Event sent, so done with vblank for this flip */
402 			drm_crtc_vblank_put(&amdgpu_crtc->base);
403 		}
404 	} else if (e) {
405 		/* VRR active and inside front-porch: vblank count and
406 		 * timestamp for pageflip event will only be up to date after
407 		 * drm_crtc_handle_vblank() has been executed from late vblank
408 		 * irq handler after start of back-porch (vline 0). We queue the
409 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
410 		 * updated timestamp and count, once it runs after us.
411 		 *
412 		 * We need to open-code this instead of using the helper
413 		 * drm_crtc_arm_vblank_event(), as that helper would
414 		 * call drm_crtc_accurate_vblank_count(), which we must
415 		 * not call in VRR mode while we are in front-porch!
416 		 */
417 
418 		/* sequence will be replaced by real count during send-out. */
419 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
420 		e->pipe = amdgpu_crtc->crtc_id;
421 
422 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
423 		e = NULL;
424 	}
425 
426 	/* Keep track of vblank of this flip for flip throttling. We use the
427 	 * cooked hw counter, as that one incremented at start of this vblank
428 	 * of pageflip completion, so last_flip_vblank is the forbidden count
429 	 * for queueing new pageflips if vsync + VRR is enabled.
430 	 */
431 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
432 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
433 
434 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
435 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
436 
437 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
438 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
439 			 vrr_active, (int) !e);
440 }
441 
dm_vupdate_high_irq(void * interrupt_params)442 static void dm_vupdate_high_irq(void *interrupt_params)
443 {
444 	struct common_irq_params *irq_params = interrupt_params;
445 	struct amdgpu_device *adev = irq_params->adev;
446 	struct amdgpu_crtc *acrtc;
447 	unsigned long flags;
448 	int vrr_active;
449 
450 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
451 
452 	if (acrtc) {
453 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
454 
455 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
456 			      acrtc->crtc_id,
457 			      vrr_active);
458 
459 		/* Core vblank handling is done here after end of front-porch in
460 		 * vrr mode, as vblank timestamping will give valid results
461 		 * while now done after front-porch. This will also deliver
462 		 * page-flip completion events that have been queued to us
463 		 * if a pageflip happened inside front-porch.
464 		 */
465 		if (vrr_active) {
466 			drm_crtc_handle_vblank(&acrtc->base);
467 
468 			/* BTR processing for pre-DCE12 ASICs */
469 			if (acrtc->dm_irq_params.stream &&
470 			    adev->family < AMDGPU_FAMILY_AI) {
471 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
472 				mod_freesync_handle_v_update(
473 				    adev->dm.freesync_module,
474 				    acrtc->dm_irq_params.stream,
475 				    &acrtc->dm_irq_params.vrr_params);
476 
477 				dc_stream_adjust_vmin_vmax(
478 				    adev->dm.dc,
479 				    acrtc->dm_irq_params.stream,
480 				    &acrtc->dm_irq_params.vrr_params.adjust);
481 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
482 			}
483 		}
484 	}
485 }
486 
487 /**
488  * dm_crtc_high_irq() - Handles CRTC interrupt
489  * @interrupt_params: used for determining the CRTC instance
490  *
491  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
492  * event handler.
493  */
dm_crtc_high_irq(void * interrupt_params)494 static void dm_crtc_high_irq(void *interrupt_params)
495 {
496 	struct common_irq_params *irq_params = interrupt_params;
497 	struct amdgpu_device *adev = irq_params->adev;
498 	struct amdgpu_crtc *acrtc;
499 	unsigned long flags;
500 	int vrr_active;
501 
502 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
503 	if (!acrtc)
504 		return;
505 
506 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
507 
508 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
509 		      vrr_active, acrtc->dm_irq_params.active_planes);
510 
511 	/**
512 	 * Core vblank handling at start of front-porch is only possible
513 	 * in non-vrr mode, as only there vblank timestamping will give
514 	 * valid results while done in front-porch. Otherwise defer it
515 	 * to dm_vupdate_high_irq after end of front-porch.
516 	 */
517 	if (!vrr_active)
518 		drm_crtc_handle_vblank(&acrtc->base);
519 
520 	/**
521 	 * Following stuff must happen at start of vblank, for crc
522 	 * computation and below-the-range btr support in vrr mode.
523 	 */
524 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
525 
526 	/* BTR updates need to happen before VUPDATE on Vega and above. */
527 	if (adev->family < AMDGPU_FAMILY_AI)
528 		return;
529 
530 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
531 
532 	if (acrtc->dm_irq_params.stream &&
533 	    acrtc->dm_irq_params.vrr_params.supported &&
534 	    acrtc->dm_irq_params.freesync_config.state ==
535 		    VRR_STATE_ACTIVE_VARIABLE) {
536 		mod_freesync_handle_v_update(adev->dm.freesync_module,
537 					     acrtc->dm_irq_params.stream,
538 					     &acrtc->dm_irq_params.vrr_params);
539 
540 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
541 					   &acrtc->dm_irq_params.vrr_params.adjust);
542 	}
543 
544 	/*
545 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
546 	 * In that case, pageflip completion interrupts won't fire and pageflip
547 	 * completion events won't get delivered. Prevent this by sending
548 	 * pending pageflip events from here if a flip is still pending.
549 	 *
550 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
551 	 * avoid race conditions between flip programming and completion,
552 	 * which could cause too early flip completion events.
553 	 */
554 	if (adev->family >= AMDGPU_FAMILY_RV &&
555 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
556 	    acrtc->dm_irq_params.active_planes == 0) {
557 		if (acrtc->event) {
558 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
559 			acrtc->event = NULL;
560 			drm_crtc_vblank_put(&acrtc->base);
561 		}
562 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
563 	}
564 
565 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
566 }
567 
dm_set_clockgating_state(void * handle,enum amd_clockgating_state state)568 static int dm_set_clockgating_state(void *handle,
569 		  enum amd_clockgating_state state)
570 {
571 	return 0;
572 }
573 
dm_set_powergating_state(void * handle,enum amd_powergating_state state)574 static int dm_set_powergating_state(void *handle,
575 		  enum amd_powergating_state state)
576 {
577 	return 0;
578 }
579 
580 /* Prototypes of private functions */
581 static int dm_early_init(void* handle);
582 
583 /* Allocate memory for FBC compressed data  */
amdgpu_dm_fbc_init(struct drm_connector * connector)584 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
585 {
586 	struct drm_device *dev = connector->dev;
587 	struct amdgpu_device *adev = drm_to_adev(dev);
588 	struct dm_compressor_info *compressor = &adev->dm.compressor;
589 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
590 	struct drm_display_mode *mode;
591 	unsigned long max_size = 0;
592 
593 	if (adev->dm.dc->fbc_compressor == NULL)
594 		return;
595 
596 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
597 		return;
598 
599 	if (compressor->bo_ptr)
600 		return;
601 
602 
603 	list_for_each_entry(mode, &connector->modes, head) {
604 		if (max_size < mode->htotal * mode->vtotal)
605 			max_size = mode->htotal * mode->vtotal;
606 	}
607 
608 	if (max_size) {
609 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
610 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
611 			    &compressor->gpu_addr, &compressor->cpu_addr);
612 
613 		if (r)
614 			DRM_ERROR("DM: Failed to initialize FBC\n");
615 		else {
616 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
617 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
618 		}
619 
620 	}
621 
622 }
623 
amdgpu_dm_audio_component_get_eld(struct device * kdev,int port,int pipe,bool * enabled,unsigned char * buf,int max_bytes)624 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
625 					  int pipe, bool *enabled,
626 					  unsigned char *buf, int max_bytes)
627 {
628 	struct drm_device *dev = dev_get_drvdata(kdev);
629 	struct amdgpu_device *adev = drm_to_adev(dev);
630 	struct drm_connector *connector;
631 	struct drm_connector_list_iter conn_iter;
632 	struct amdgpu_dm_connector *aconnector;
633 	int ret = 0;
634 
635 	*enabled = false;
636 
637 	mutex_lock(&adev->dm.audio_lock);
638 
639 	drm_connector_list_iter_begin(dev, &conn_iter);
640 	drm_for_each_connector_iter(connector, &conn_iter) {
641 		aconnector = to_amdgpu_dm_connector(connector);
642 		if (aconnector->audio_inst != port)
643 			continue;
644 
645 		*enabled = true;
646 		ret = drm_eld_size(connector->eld);
647 		memcpy(buf, connector->eld, min(max_bytes, ret));
648 
649 		break;
650 	}
651 	drm_connector_list_iter_end(&conn_iter);
652 
653 	mutex_unlock(&adev->dm.audio_lock);
654 
655 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
656 
657 	return ret;
658 }
659 
660 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
661 	.get_eld = amdgpu_dm_audio_component_get_eld,
662 };
663 
amdgpu_dm_audio_component_bind(struct device * kdev,struct device * hda_kdev,void * data)664 static int amdgpu_dm_audio_component_bind(struct device *kdev,
665 				       struct device *hda_kdev, void *data)
666 {
667 	struct drm_device *dev = dev_get_drvdata(kdev);
668 	struct amdgpu_device *adev = drm_to_adev(dev);
669 	struct drm_audio_component *acomp = data;
670 
671 	acomp->ops = &amdgpu_dm_audio_component_ops;
672 	acomp->dev = kdev;
673 	adev->dm.audio_component = acomp;
674 
675 	return 0;
676 }
677 
amdgpu_dm_audio_component_unbind(struct device * kdev,struct device * hda_kdev,void * data)678 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
679 					  struct device *hda_kdev, void *data)
680 {
681 	struct drm_device *dev = dev_get_drvdata(kdev);
682 	struct amdgpu_device *adev = drm_to_adev(dev);
683 	struct drm_audio_component *acomp = data;
684 
685 	acomp->ops = NULL;
686 	acomp->dev = NULL;
687 	adev->dm.audio_component = NULL;
688 }
689 
690 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
691 	.bind	= amdgpu_dm_audio_component_bind,
692 	.unbind	= amdgpu_dm_audio_component_unbind,
693 };
694 
amdgpu_dm_audio_init(struct amdgpu_device * adev)695 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
696 {
697 	int i, ret;
698 
699 	if (!amdgpu_audio)
700 		return 0;
701 
702 	adev->mode_info.audio.enabled = true;
703 
704 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
705 
706 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
707 		adev->mode_info.audio.pin[i].channels = -1;
708 		adev->mode_info.audio.pin[i].rate = -1;
709 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
710 		adev->mode_info.audio.pin[i].status_bits = 0;
711 		adev->mode_info.audio.pin[i].category_code = 0;
712 		adev->mode_info.audio.pin[i].connected = false;
713 		adev->mode_info.audio.pin[i].id =
714 			adev->dm.dc->res_pool->audios[i]->inst;
715 		adev->mode_info.audio.pin[i].offset = 0;
716 	}
717 
718 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
719 	if (ret < 0)
720 		return ret;
721 
722 	adev->dm.audio_registered = true;
723 
724 	return 0;
725 }
726 
amdgpu_dm_audio_fini(struct amdgpu_device * adev)727 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
728 {
729 	if (!amdgpu_audio)
730 		return;
731 
732 	if (!adev->mode_info.audio.enabled)
733 		return;
734 
735 	if (adev->dm.audio_registered) {
736 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
737 		adev->dm.audio_registered = false;
738 	}
739 
740 	/* TODO: Disable audio? */
741 
742 	adev->mode_info.audio.enabled = false;
743 }
744 
amdgpu_dm_audio_eld_notify(struct amdgpu_device * adev,int pin)745 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
746 {
747 	struct drm_audio_component *acomp = adev->dm.audio_component;
748 
749 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
750 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
751 
752 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
753 						 pin, -1);
754 	}
755 }
756 
dm_dmub_hw_init(struct amdgpu_device * adev)757 static int dm_dmub_hw_init(struct amdgpu_device *adev)
758 {
759 	const struct dmcub_firmware_header_v1_0 *hdr;
760 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
761 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
762 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
763 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
764 	struct abm *abm = adev->dm.dc->res_pool->abm;
765 	struct dmub_srv_hw_params hw_params;
766 	enum dmub_status status;
767 	const unsigned char *fw_inst_const, *fw_bss_data;
768 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
769 	bool has_hw_support;
770 
771 	if (!dmub_srv)
772 		/* DMUB isn't supported on the ASIC. */
773 		return 0;
774 
775 	if (!fb_info) {
776 		DRM_ERROR("No framebuffer info for DMUB service.\n");
777 		return -EINVAL;
778 	}
779 
780 	if (!dmub_fw) {
781 		/* Firmware required for DMUB support. */
782 		DRM_ERROR("No firmware provided for DMUB.\n");
783 		return -EINVAL;
784 	}
785 
786 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
787 	if (status != DMUB_STATUS_OK) {
788 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
789 		return -EINVAL;
790 	}
791 
792 	if (!has_hw_support) {
793 		DRM_INFO("DMUB unsupported on ASIC\n");
794 		return 0;
795 	}
796 
797 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
798 
799 	fw_inst_const = dmub_fw->data +
800 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
801 			PSP_HEADER_BYTES;
802 
803 	fw_bss_data = dmub_fw->data +
804 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
805 		      le32_to_cpu(hdr->inst_const_bytes);
806 
807 	/* Copy firmware and bios info into FB memory. */
808 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
809 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
810 
811 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
812 
813 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
814 	 * amdgpu_ucode_init_single_fw will load dmub firmware
815 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
816 	 * will be done by dm_dmub_hw_init
817 	 */
818 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
819 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
820 				fw_inst_const_size);
821 	}
822 
823 	if (fw_bss_data_size)
824 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
825 		       fw_bss_data, fw_bss_data_size);
826 
827 	/* Copy firmware bios info into FB memory. */
828 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
829 	       adev->bios_size);
830 
831 	/* Reset regions that need to be reset. */
832 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
833 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
834 
835 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
836 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
837 
838 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
839 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
840 
841 	/* Initialize hardware. */
842 	memset(&hw_params, 0, sizeof(hw_params));
843 	hw_params.fb_base = adev->gmc.fb_start;
844 	hw_params.fb_offset = adev->gmc.aper_base;
845 
846 	/* backdoor load firmware and trigger dmub running */
847 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
848 		hw_params.load_inst_const = true;
849 
850 	if (dmcu)
851 		hw_params.psp_version = dmcu->psp_version;
852 
853 	for (i = 0; i < fb_info->num_fb; ++i)
854 		hw_params.fb[i] = &fb_info->fb[i];
855 
856 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
857 	if (status != DMUB_STATUS_OK) {
858 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
859 		return -EINVAL;
860 	}
861 
862 	/* Wait for firmware load to finish. */
863 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
864 	if (status != DMUB_STATUS_OK)
865 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
866 
867 	/* Init DMCU and ABM if available. */
868 	if (dmcu && abm) {
869 		dmcu->funcs->dmcu_init(dmcu);
870 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
871 	}
872 
873 	if (!adev->dm.dc->ctx->dmub_srv)
874 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
875 	if (!adev->dm.dc->ctx->dmub_srv) {
876 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
877 		return -ENOMEM;
878 	}
879 
880 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
881 		 adev->dm.dmcub_fw_version);
882 
883 	return 0;
884 }
885 
amdgpu_check_debugfs_connector_property_change(struct amdgpu_device * adev,struct drm_atomic_state * state)886 static void amdgpu_check_debugfs_connector_property_change(struct amdgpu_device *adev,
887 							   struct drm_atomic_state *state)
888 {
889 	struct drm_connector *connector;
890 	struct drm_crtc *crtc;
891 	struct amdgpu_dm_connector *amdgpu_dm_connector;
892 	struct drm_connector_state *conn_state;
893 	struct dm_crtc_state *acrtc_state;
894 	struct drm_crtc_state *crtc_state;
895 	struct dc_stream_state *stream;
896 	struct drm_device *dev = adev_to_drm(adev);
897 
898 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
899 
900 		amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
901 		conn_state = connector->state;
902 
903 		if (!(conn_state && conn_state->crtc))
904 			continue;
905 
906 		crtc = conn_state->crtc;
907 		acrtc_state = to_dm_crtc_state(crtc->state);
908 
909 		if (!(acrtc_state && acrtc_state->stream))
910 			continue;
911 
912 		stream = acrtc_state->stream;
913 
914 		if (amdgpu_dm_connector->dsc_settings.dsc_force_enable ||
915 		    amdgpu_dm_connector->dsc_settings.dsc_num_slices_v ||
916 		    amdgpu_dm_connector->dsc_settings.dsc_num_slices_h ||
917 		    amdgpu_dm_connector->dsc_settings.dsc_bits_per_pixel) {
918 			conn_state = drm_atomic_get_connector_state(state, connector);
919 			crtc_state = drm_atomic_get_crtc_state(state, crtc);
920 			crtc_state->mode_changed = true;
921 		}
922 	}
923 }
924 
925 struct amdgpu_stutter_quirk {
926 	u16 chip_vendor;
927 	u16 chip_device;
928 	u16 subsys_vendor;
929 	u16 subsys_device;
930 	u8 revision;
931 };
932 
933 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
934 	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
935 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
936 	{ 0, 0, 0, 0, 0 },
937 };
938 
dm_should_disable_stutter(struct pci_dev * pdev)939 static bool dm_should_disable_stutter(struct pci_dev *pdev)
940 {
941 	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
942 
943 	while (p && p->chip_device != 0) {
944 		if (pdev->vendor == p->chip_vendor &&
945 		    pdev->device == p->chip_device &&
946 		    pdev->subsystem_vendor == p->subsys_vendor &&
947 		    pdev->subsystem_device == p->subsys_device &&
948 		    pdev->revision == p->revision) {
949 			return true;
950 		}
951 		++p;
952 	}
953 	return false;
954 }
955 
amdgpu_dm_init(struct amdgpu_device * adev)956 static int amdgpu_dm_init(struct amdgpu_device *adev)
957 {
958 	struct dc_init_data init_data;
959 #ifdef CONFIG_DRM_AMD_DC_HDCP
960 	struct dc_callback_init init_params;
961 #endif
962 	int r;
963 
964 	adev->dm.ddev = adev_to_drm(adev);
965 	adev->dm.adev = adev;
966 
967 	/* Zero all the fields */
968 	memset(&init_data, 0, sizeof(init_data));
969 #ifdef CONFIG_DRM_AMD_DC_HDCP
970 	memset(&init_params, 0, sizeof(init_params));
971 #endif
972 
973 	mutex_init(&adev->dm.dc_lock);
974 	mutex_init(&adev->dm.audio_lock);
975 
976 	if(amdgpu_dm_irq_init(adev)) {
977 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
978 		goto error;
979 	}
980 
981 	init_data.asic_id.chip_family = adev->family;
982 
983 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
984 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
985 	init_data.asic_id.chip_id = adev->pdev->device;
986 
987 	init_data.asic_id.vram_width = adev->gmc.vram_width;
988 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
989 	init_data.asic_id.atombios_base_address =
990 		adev->mode_info.atom_context->bios;
991 
992 	init_data.driver = adev;
993 
994 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
995 
996 	if (!adev->dm.cgs_device) {
997 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
998 		goto error;
999 	}
1000 
1001 	init_data.cgs_device = adev->dm.cgs_device;
1002 
1003 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1004 
1005 	switch (adev->asic_type) {
1006 	case CHIP_CARRIZO:
1007 	case CHIP_STONEY:
1008 	case CHIP_RAVEN:
1009 	case CHIP_RENOIR:
1010 		init_data.flags.gpu_vm_support = true;
1011 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1012 			init_data.flags.disable_dmcu = true;
1013 		break;
1014 	default:
1015 		break;
1016 	}
1017 
1018 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1019 		init_data.flags.fbc_support = true;
1020 
1021 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1022 		init_data.flags.multi_mon_pp_mclk_switch = true;
1023 
1024 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1025 		init_data.flags.disable_fractional_pwm = true;
1026 
1027 	init_data.flags.power_down_display_on_boot = true;
1028 
1029 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
1030 
1031 	/* Display Core create. */
1032 	adev->dm.dc = dc_create(&init_data);
1033 
1034 	if (adev->dm.dc) {
1035 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1036 	} else {
1037 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1038 		goto error;
1039 	}
1040 
1041 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1042 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1043 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1044 	}
1045 
1046 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1047 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1048 	if (dm_should_disable_stutter(adev->pdev))
1049 		adev->dm.dc->debug.disable_stutter = true;
1050 
1051 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1052 		adev->dm.dc->debug.disable_stutter = true;
1053 
1054 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1055 		adev->dm.dc->debug.disable_dsc = true;
1056 
1057 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1058 		adev->dm.dc->debug.disable_clock_gate = true;
1059 
1060 	r = dm_dmub_hw_init(adev);
1061 	if (r) {
1062 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1063 		goto error;
1064 	}
1065 
1066 	dc_hardware_init(adev->dm.dc);
1067 
1068 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1069 	if (!adev->dm.freesync_module) {
1070 		DRM_ERROR(
1071 		"amdgpu: failed to initialize freesync_module.\n");
1072 	} else
1073 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1074 				adev->dm.freesync_module);
1075 
1076 	amdgpu_dm_init_color_mod();
1077 
1078 #ifdef CONFIG_DRM_AMD_DC_HDCP
1079 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1080 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1081 
1082 		if (!adev->dm.hdcp_workqueue)
1083 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1084 		else
1085 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1086 
1087 		dc_init_callbacks(adev->dm.dc, &init_params);
1088 	}
1089 #endif
1090 	if (amdgpu_dm_initialize_drm_device(adev)) {
1091 		DRM_ERROR(
1092 		"amdgpu: failed to initialize sw for display support.\n");
1093 		goto error;
1094 	}
1095 
1096 	/* create fake encoders for MST */
1097 	dm_dp_create_fake_mst_encoders(adev);
1098 
1099 	/* TODO: Add_display_info? */
1100 
1101 	/* TODO use dynamic cursor width */
1102 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1103 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1104 
1105 	/* Disable vblank IRQs aggressively for power-saving */
1106 	adev_to_drm(adev)->vblank_disable_immediate = true;
1107 
1108 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1109 		DRM_ERROR(
1110 		"amdgpu: failed to initialize sw for display support.\n");
1111 		goto error;
1112 	}
1113 
1114 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1115 
1116 	return 0;
1117 error:
1118 	amdgpu_dm_fini(adev);
1119 
1120 	return -EINVAL;
1121 }
1122 
amdgpu_dm_fini(struct amdgpu_device * adev)1123 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1124 {
1125 	int i;
1126 
1127 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1128 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1129 	}
1130 
1131 	amdgpu_dm_audio_fini(adev);
1132 
1133 	amdgpu_dm_destroy_drm_device(&adev->dm);
1134 
1135 #ifdef CONFIG_DRM_AMD_DC_HDCP
1136 	if (adev->dm.hdcp_workqueue) {
1137 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1138 		adev->dm.hdcp_workqueue = NULL;
1139 	}
1140 
1141 	if (adev->dm.dc)
1142 		dc_deinit_callbacks(adev->dm.dc);
1143 #endif
1144 	if (adev->dm.dc->ctx->dmub_srv) {
1145 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1146 		adev->dm.dc->ctx->dmub_srv = NULL;
1147 	}
1148 
1149 	if (adev->dm.dmub_bo)
1150 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1151 				      &adev->dm.dmub_bo_gpu_addr,
1152 				      &adev->dm.dmub_bo_cpu_addr);
1153 
1154 	/* DC Destroy TODO: Replace destroy DAL */
1155 	if (adev->dm.dc)
1156 		dc_destroy(&adev->dm.dc);
1157 	/*
1158 	 * TODO: pageflip, vlank interrupt
1159 	 *
1160 	 * amdgpu_dm_irq_fini(adev);
1161 	 */
1162 
1163 	if (adev->dm.cgs_device) {
1164 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1165 		adev->dm.cgs_device = NULL;
1166 	}
1167 	if (adev->dm.freesync_module) {
1168 		mod_freesync_destroy(adev->dm.freesync_module);
1169 		adev->dm.freesync_module = NULL;
1170 	}
1171 
1172 	mutex_destroy(&adev->dm.audio_lock);
1173 	mutex_destroy(&adev->dm.dc_lock);
1174 
1175 	return;
1176 }
1177 
load_dmcu_fw(struct amdgpu_device * adev)1178 static int load_dmcu_fw(struct amdgpu_device *adev)
1179 {
1180 	const char *fw_name_dmcu = NULL;
1181 	int r;
1182 	const struct dmcu_firmware_header_v1_0 *hdr;
1183 
1184 	switch(adev->asic_type) {
1185 #if defined(CONFIG_DRM_AMD_DC_SI)
1186 	case CHIP_TAHITI:
1187 	case CHIP_PITCAIRN:
1188 	case CHIP_VERDE:
1189 	case CHIP_OLAND:
1190 #endif
1191 	case CHIP_BONAIRE:
1192 	case CHIP_HAWAII:
1193 	case CHIP_KAVERI:
1194 	case CHIP_KABINI:
1195 	case CHIP_MULLINS:
1196 	case CHIP_TONGA:
1197 	case CHIP_FIJI:
1198 	case CHIP_CARRIZO:
1199 	case CHIP_STONEY:
1200 	case CHIP_POLARIS11:
1201 	case CHIP_POLARIS10:
1202 	case CHIP_POLARIS12:
1203 	case CHIP_VEGAM:
1204 	case CHIP_VEGA10:
1205 	case CHIP_VEGA12:
1206 	case CHIP_VEGA20:
1207 	case CHIP_NAVI10:
1208 	case CHIP_NAVI14:
1209 	case CHIP_RENOIR:
1210 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1211 	case CHIP_SIENNA_CICHLID:
1212 	case CHIP_NAVY_FLOUNDER:
1213 #endif
1214 		return 0;
1215 	case CHIP_NAVI12:
1216 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1217 		break;
1218 	case CHIP_RAVEN:
1219 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1220 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1221 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1222 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1223 		else
1224 			return 0;
1225 		break;
1226 	default:
1227 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1228 		return -EINVAL;
1229 	}
1230 
1231 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1232 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1233 		return 0;
1234 	}
1235 
1236 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1237 	if (r == -ENOENT) {
1238 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1239 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1240 		adev->dm.fw_dmcu = NULL;
1241 		return 0;
1242 	}
1243 	if (r) {
1244 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1245 			fw_name_dmcu);
1246 		return r;
1247 	}
1248 
1249 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1250 	if (r) {
1251 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1252 			fw_name_dmcu);
1253 		release_firmware(adev->dm.fw_dmcu);
1254 		adev->dm.fw_dmcu = NULL;
1255 		return r;
1256 	}
1257 
1258 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1259 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1260 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1261 	adev->firmware.fw_size +=
1262 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1263 
1264 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1265 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1266 	adev->firmware.fw_size +=
1267 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1268 
1269 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1270 
1271 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1272 
1273 	return 0;
1274 }
1275 
amdgpu_dm_dmub_reg_read(void * ctx,uint32_t address)1276 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1277 {
1278 	struct amdgpu_device *adev = ctx;
1279 
1280 	return dm_read_reg(adev->dm.dc->ctx, address);
1281 }
1282 
amdgpu_dm_dmub_reg_write(void * ctx,uint32_t address,uint32_t value)1283 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1284 				     uint32_t value)
1285 {
1286 	struct amdgpu_device *adev = ctx;
1287 
1288 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1289 }
1290 
dm_dmub_sw_init(struct amdgpu_device * adev)1291 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1292 {
1293 	struct dmub_srv_create_params create_params;
1294 	struct dmub_srv_region_params region_params;
1295 	struct dmub_srv_region_info region_info;
1296 	struct dmub_srv_fb_params fb_params;
1297 	struct dmub_srv_fb_info *fb_info;
1298 	struct dmub_srv *dmub_srv;
1299 	const struct dmcub_firmware_header_v1_0 *hdr;
1300 	const char *fw_name_dmub;
1301 	enum dmub_asic dmub_asic;
1302 	enum dmub_status status;
1303 	int r;
1304 
1305 	switch (adev->asic_type) {
1306 	case CHIP_RENOIR:
1307 		dmub_asic = DMUB_ASIC_DCN21;
1308 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1309 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1310 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1311 		break;
1312 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1313 	case CHIP_SIENNA_CICHLID:
1314 		dmub_asic = DMUB_ASIC_DCN30;
1315 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1316 		break;
1317 	case CHIP_NAVY_FLOUNDER:
1318 		dmub_asic = DMUB_ASIC_DCN30;
1319 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1320 		break;
1321 #endif
1322 
1323 	default:
1324 		/* ASIC doesn't support DMUB. */
1325 		return 0;
1326 	}
1327 
1328 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1329 	if (r) {
1330 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1331 		return 0;
1332 	}
1333 
1334 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1335 	if (r) {
1336 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1337 		return 0;
1338 	}
1339 
1340 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1341 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1342 
1343 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1344 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1345 			AMDGPU_UCODE_ID_DMCUB;
1346 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1347 			adev->dm.dmub_fw;
1348 		adev->firmware.fw_size +=
1349 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1350 
1351 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1352 			 adev->dm.dmcub_fw_version);
1353 	}
1354 
1355 
1356 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1357 	dmub_srv = adev->dm.dmub_srv;
1358 
1359 	if (!dmub_srv) {
1360 		DRM_ERROR("Failed to allocate DMUB service!\n");
1361 		return -ENOMEM;
1362 	}
1363 
1364 	memset(&create_params, 0, sizeof(create_params));
1365 	create_params.user_ctx = adev;
1366 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1367 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1368 	create_params.asic = dmub_asic;
1369 
1370 	/* Create the DMUB service. */
1371 	status = dmub_srv_create(dmub_srv, &create_params);
1372 	if (status != DMUB_STATUS_OK) {
1373 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1374 		return -EINVAL;
1375 	}
1376 
1377 	/* Calculate the size of all the regions for the DMUB service. */
1378 	memset(&region_params, 0, sizeof(region_params));
1379 
1380 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1381 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1382 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1383 	region_params.vbios_size = adev->bios_size;
1384 	region_params.fw_bss_data = region_params.bss_data_size ?
1385 		adev->dm.dmub_fw->data +
1386 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1387 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1388 	region_params.fw_inst_const =
1389 		adev->dm.dmub_fw->data +
1390 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1391 		PSP_HEADER_BYTES;
1392 
1393 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1394 					   &region_info);
1395 
1396 	if (status != DMUB_STATUS_OK) {
1397 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1398 		return -EINVAL;
1399 	}
1400 
1401 	/*
1402 	 * Allocate a framebuffer based on the total size of all the regions.
1403 	 * TODO: Move this into GART.
1404 	 */
1405 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1406 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1407 				    &adev->dm.dmub_bo_gpu_addr,
1408 				    &adev->dm.dmub_bo_cpu_addr);
1409 	if (r)
1410 		return r;
1411 
1412 	/* Rebase the regions on the framebuffer address. */
1413 	memset(&fb_params, 0, sizeof(fb_params));
1414 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1415 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1416 	fb_params.region_info = &region_info;
1417 
1418 	adev->dm.dmub_fb_info =
1419 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1420 	fb_info = adev->dm.dmub_fb_info;
1421 
1422 	if (!fb_info) {
1423 		DRM_ERROR(
1424 			"Failed to allocate framebuffer info for DMUB service!\n");
1425 		return -ENOMEM;
1426 	}
1427 
1428 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1429 	if (status != DMUB_STATUS_OK) {
1430 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1431 		return -EINVAL;
1432 	}
1433 
1434 	return 0;
1435 }
1436 
dm_sw_init(void * handle)1437 static int dm_sw_init(void *handle)
1438 {
1439 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1440 	int r;
1441 
1442 	r = dm_dmub_sw_init(adev);
1443 	if (r)
1444 		return r;
1445 
1446 	return load_dmcu_fw(adev);
1447 }
1448 
dm_sw_fini(void * handle)1449 static int dm_sw_fini(void *handle)
1450 {
1451 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1452 
1453 	kfree(adev->dm.dmub_fb_info);
1454 	adev->dm.dmub_fb_info = NULL;
1455 
1456 	if (adev->dm.dmub_srv) {
1457 		dmub_srv_destroy(adev->dm.dmub_srv);
1458 		adev->dm.dmub_srv = NULL;
1459 	}
1460 
1461 	release_firmware(adev->dm.dmub_fw);
1462 	adev->dm.dmub_fw = NULL;
1463 
1464 	release_firmware(adev->dm.fw_dmcu);
1465 	adev->dm.fw_dmcu = NULL;
1466 
1467 	return 0;
1468 }
1469 
detect_mst_link_for_all_connectors(struct drm_device * dev)1470 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1471 {
1472 	struct amdgpu_dm_connector *aconnector;
1473 	struct drm_connector *connector;
1474 	struct drm_connector_list_iter iter;
1475 	int ret = 0;
1476 
1477 	drm_connector_list_iter_begin(dev, &iter);
1478 	drm_for_each_connector_iter(connector, &iter) {
1479 		aconnector = to_amdgpu_dm_connector(connector);
1480 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1481 		    aconnector->mst_mgr.aux) {
1482 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1483 					 aconnector,
1484 					 aconnector->base.base.id);
1485 
1486 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1487 			if (ret < 0) {
1488 				DRM_ERROR("DM_MST: Failed to start MST\n");
1489 				aconnector->dc_link->type =
1490 					dc_connection_single;
1491 				break;
1492 			}
1493 		}
1494 	}
1495 	drm_connector_list_iter_end(&iter);
1496 
1497 	return ret;
1498 }
1499 
dm_late_init(void * handle)1500 static int dm_late_init(void *handle)
1501 {
1502 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1503 
1504 	struct dmcu_iram_parameters params;
1505 	unsigned int linear_lut[16];
1506 	int i;
1507 	struct dmcu *dmcu = NULL;
1508 	bool ret = true;
1509 
1510 	dmcu = adev->dm.dc->res_pool->dmcu;
1511 
1512 	for (i = 0; i < 16; i++)
1513 		linear_lut[i] = 0xFFFF * i / 15;
1514 
1515 	params.set = 0;
1516 	params.backlight_ramping_start = 0xCCCC;
1517 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1518 	params.backlight_lut_array_size = 16;
1519 	params.backlight_lut_array = linear_lut;
1520 
1521 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1522 	 * 0xFFFF x 0.01 = 0x28F
1523 	 */
1524 	params.min_abm_backlight = 0x28F;
1525 
1526 	/* In the case where abm is implemented on dmcub,
1527 	 * dmcu object will be null.
1528 	 * ABM 2.4 and up are implemented on dmcub.
1529 	 */
1530 	if (dmcu)
1531 		ret = dmcu_load_iram(dmcu, params);
1532 	else if (adev->dm.dc->ctx->dmub_srv)
1533 		ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1534 
1535 	if (!ret)
1536 		return -EINVAL;
1537 
1538 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1539 }
1540 
s3_handle_mst(struct drm_device * dev,bool suspend)1541 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1542 {
1543 	struct amdgpu_dm_connector *aconnector;
1544 	struct drm_connector *connector;
1545 	struct drm_connector_list_iter iter;
1546 	struct drm_dp_mst_topology_mgr *mgr;
1547 	int ret;
1548 	bool need_hotplug = false;
1549 
1550 	drm_connector_list_iter_begin(dev, &iter);
1551 	drm_for_each_connector_iter(connector, &iter) {
1552 		aconnector = to_amdgpu_dm_connector(connector);
1553 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1554 		    aconnector->mst_port)
1555 			continue;
1556 
1557 		mgr = &aconnector->mst_mgr;
1558 
1559 		if (suspend) {
1560 			drm_dp_mst_topology_mgr_suspend(mgr);
1561 		} else {
1562 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1563 			if (ret < 0) {
1564 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1565 				need_hotplug = true;
1566 			}
1567 		}
1568 	}
1569 	drm_connector_list_iter_end(&iter);
1570 
1571 	if (need_hotplug)
1572 		drm_kms_helper_hotplug_event(dev);
1573 }
1574 
amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device * adev)1575 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1576 {
1577 	struct smu_context *smu = &adev->smu;
1578 	int ret = 0;
1579 
1580 	if (!is_support_sw_smu(adev))
1581 		return 0;
1582 
1583 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1584 	 * on window driver dc implementation.
1585 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1586 	 * should be passed to smu during boot up and resume from s3.
1587 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1588 	 * dcn20_resource_construct
1589 	 * then call pplib functions below to pass the settings to smu:
1590 	 * smu_set_watermarks_for_clock_ranges
1591 	 * smu_set_watermarks_table
1592 	 * navi10_set_watermarks_table
1593 	 * smu_write_watermarks_table
1594 	 *
1595 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1596 	 * dc has implemented different flow for window driver:
1597 	 * dc_hardware_init / dc_set_power_state
1598 	 * dcn10_init_hw
1599 	 * notify_wm_ranges
1600 	 * set_wm_ranges
1601 	 * -- Linux
1602 	 * smu_set_watermarks_for_clock_ranges
1603 	 * renoir_set_watermarks_table
1604 	 * smu_write_watermarks_table
1605 	 *
1606 	 * For Linux,
1607 	 * dc_hardware_init -> amdgpu_dm_init
1608 	 * dc_set_power_state --> dm_resume
1609 	 *
1610 	 * therefore, this function apply to navi10/12/14 but not Renoir
1611 	 * *
1612 	 */
1613 	switch(adev->asic_type) {
1614 	case CHIP_NAVI10:
1615 	case CHIP_NAVI14:
1616 	case CHIP_NAVI12:
1617 		break;
1618 	default:
1619 		return 0;
1620 	}
1621 
1622 	ret = smu_write_watermarks_table(smu);
1623 	if (ret) {
1624 		DRM_ERROR("Failed to update WMTABLE!\n");
1625 		return ret;
1626 	}
1627 
1628 	return 0;
1629 }
1630 
1631 /**
1632  * dm_hw_init() - Initialize DC device
1633  * @handle: The base driver device containing the amdgpu_dm device.
1634  *
1635  * Initialize the &struct amdgpu_display_manager device. This involves calling
1636  * the initializers of each DM component, then populating the struct with them.
1637  *
1638  * Although the function implies hardware initialization, both hardware and
1639  * software are initialized here. Splitting them out to their relevant init
1640  * hooks is a future TODO item.
1641  *
1642  * Some notable things that are initialized here:
1643  *
1644  * - Display Core, both software and hardware
1645  * - DC modules that we need (freesync and color management)
1646  * - DRM software states
1647  * - Interrupt sources and handlers
1648  * - Vblank support
1649  * - Debug FS entries, if enabled
1650  */
dm_hw_init(void * handle)1651 static int dm_hw_init(void *handle)
1652 {
1653 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1654 	/* Create DAL display manager */
1655 	amdgpu_dm_init(adev);
1656 	amdgpu_dm_hpd_init(adev);
1657 
1658 	return 0;
1659 }
1660 
1661 /**
1662  * dm_hw_fini() - Teardown DC device
1663  * @handle: The base driver device containing the amdgpu_dm device.
1664  *
1665  * Teardown components within &struct amdgpu_display_manager that require
1666  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1667  * were loaded. Also flush IRQ workqueues and disable them.
1668  */
dm_hw_fini(void * handle)1669 static int dm_hw_fini(void *handle)
1670 {
1671 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1672 
1673 	amdgpu_dm_hpd_fini(adev);
1674 
1675 	amdgpu_dm_irq_fini(adev);
1676 	amdgpu_dm_fini(adev);
1677 	return 0;
1678 }
1679 
1680 
1681 static int dm_enable_vblank(struct drm_crtc *crtc);
1682 static void dm_disable_vblank(struct drm_crtc *crtc);
1683 
dm_gpureset_toggle_interrupts(struct amdgpu_device * adev,struct dc_state * state,bool enable)1684 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1685 				 struct dc_state *state, bool enable)
1686 {
1687 	enum dc_irq_source irq_source;
1688 	struct amdgpu_crtc *acrtc;
1689 	int rc = -EBUSY;
1690 	int i = 0;
1691 
1692 	for (i = 0; i < state->stream_count; i++) {
1693 		acrtc = get_crtc_by_otg_inst(
1694 				adev, state->stream_status[i].primary_otg_inst);
1695 
1696 		if (acrtc && state->stream_status[i].plane_count != 0) {
1697 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1698 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1699 			DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1700 				  acrtc->crtc_id, enable ? "en" : "dis", rc);
1701 			if (rc)
1702 				DRM_WARN("Failed to %s pflip interrupts\n",
1703 					 enable ? "enable" : "disable");
1704 
1705 			if (enable) {
1706 				rc = dm_enable_vblank(&acrtc->base);
1707 				if (rc)
1708 					DRM_WARN("Failed to enable vblank interrupts\n");
1709 			} else {
1710 				dm_disable_vblank(&acrtc->base);
1711 			}
1712 
1713 		}
1714 	}
1715 
1716 }
1717 
amdgpu_dm_commit_zero_streams(struct dc * dc)1718 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1719 {
1720 	struct dc_state *context = NULL;
1721 	enum dc_status res = DC_ERROR_UNEXPECTED;
1722 	int i;
1723 	struct dc_stream_state *del_streams[MAX_PIPES];
1724 	int del_streams_count = 0;
1725 
1726 	memset(del_streams, 0, sizeof(del_streams));
1727 
1728 	context = dc_create_state(dc);
1729 	if (context == NULL)
1730 		goto context_alloc_fail;
1731 
1732 	dc_resource_state_copy_construct_current(dc, context);
1733 
1734 	/* First remove from context all streams */
1735 	for (i = 0; i < context->stream_count; i++) {
1736 		struct dc_stream_state *stream = context->streams[i];
1737 
1738 		del_streams[del_streams_count++] = stream;
1739 	}
1740 
1741 	/* Remove all planes for removed streams and then remove the streams */
1742 	for (i = 0; i < del_streams_count; i++) {
1743 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1744 			res = DC_FAIL_DETACH_SURFACES;
1745 			goto fail;
1746 		}
1747 
1748 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1749 		if (res != DC_OK)
1750 			goto fail;
1751 	}
1752 
1753 
1754 	res = dc_validate_global_state(dc, context, false);
1755 
1756 	if (res != DC_OK) {
1757 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1758 		goto fail;
1759 	}
1760 
1761 	res = dc_commit_state(dc, context);
1762 
1763 fail:
1764 	dc_release_state(context);
1765 
1766 context_alloc_fail:
1767 	return res;
1768 }
1769 
dm_suspend(void * handle)1770 static int dm_suspend(void *handle)
1771 {
1772 	struct amdgpu_device *adev = handle;
1773 	struct amdgpu_display_manager *dm = &adev->dm;
1774 	int ret = 0;
1775 
1776 	if (amdgpu_in_reset(adev)) {
1777 		mutex_lock(&dm->dc_lock);
1778 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1779 
1780 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1781 
1782 		amdgpu_dm_commit_zero_streams(dm->dc);
1783 
1784 		amdgpu_dm_irq_suspend(adev);
1785 
1786 		return ret;
1787 	}
1788 
1789 	WARN_ON(adev->dm.cached_state);
1790 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1791 
1792 	s3_handle_mst(adev_to_drm(adev), true);
1793 
1794 	amdgpu_dm_irq_suspend(adev);
1795 
1796 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1797 
1798 	return 0;
1799 }
1800 
1801 static struct amdgpu_dm_connector *
amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state * state,struct drm_crtc * crtc)1802 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1803 					     struct drm_crtc *crtc)
1804 {
1805 	uint32_t i;
1806 	struct drm_connector_state *new_con_state;
1807 	struct drm_connector *connector;
1808 	struct drm_crtc *crtc_from_state;
1809 
1810 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1811 		crtc_from_state = new_con_state->crtc;
1812 
1813 		if (crtc_from_state == crtc)
1814 			return to_amdgpu_dm_connector(connector);
1815 	}
1816 
1817 	return NULL;
1818 }
1819 
emulated_link_detect(struct dc_link * link)1820 static void emulated_link_detect(struct dc_link *link)
1821 {
1822 	struct dc_sink_init_data sink_init_data = { 0 };
1823 	struct display_sink_capability sink_caps = { 0 };
1824 	enum dc_edid_status edid_status;
1825 	struct dc_context *dc_ctx = link->ctx;
1826 	struct dc_sink *sink = NULL;
1827 	struct dc_sink *prev_sink = NULL;
1828 
1829 	link->type = dc_connection_none;
1830 	prev_sink = link->local_sink;
1831 
1832 	if (prev_sink)
1833 		dc_sink_release(prev_sink);
1834 
1835 	switch (link->connector_signal) {
1836 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1837 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1838 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1839 		break;
1840 	}
1841 
1842 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1843 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1844 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1845 		break;
1846 	}
1847 
1848 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1849 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1850 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1851 		break;
1852 	}
1853 
1854 	case SIGNAL_TYPE_LVDS: {
1855 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1856 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1857 		break;
1858 	}
1859 
1860 	case SIGNAL_TYPE_EDP: {
1861 		sink_caps.transaction_type =
1862 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1863 		sink_caps.signal = SIGNAL_TYPE_EDP;
1864 		break;
1865 	}
1866 
1867 	case SIGNAL_TYPE_DISPLAY_PORT: {
1868 		sink_caps.transaction_type =
1869 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1870 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1871 		break;
1872 	}
1873 
1874 	default:
1875 		DC_ERROR("Invalid connector type! signal:%d\n",
1876 			link->connector_signal);
1877 		return;
1878 	}
1879 
1880 	sink_init_data.link = link;
1881 	sink_init_data.sink_signal = sink_caps.signal;
1882 
1883 	sink = dc_sink_create(&sink_init_data);
1884 	if (!sink) {
1885 		DC_ERROR("Failed to create sink!\n");
1886 		return;
1887 	}
1888 
1889 	/* dc_sink_create returns a new reference */
1890 	link->local_sink = sink;
1891 
1892 	edid_status = dm_helpers_read_local_edid(
1893 			link->ctx,
1894 			link,
1895 			sink);
1896 
1897 	if (edid_status != EDID_OK)
1898 		DC_ERROR("Failed to read EDID");
1899 
1900 }
1901 
dm_gpureset_commit_state(struct dc_state * dc_state,struct amdgpu_display_manager * dm)1902 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1903 				     struct amdgpu_display_manager *dm)
1904 {
1905 	struct {
1906 		struct dc_surface_update surface_updates[MAX_SURFACES];
1907 		struct dc_plane_info plane_infos[MAX_SURFACES];
1908 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
1909 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1910 		struct dc_stream_update stream_update;
1911 	} * bundle;
1912 	int k, m;
1913 
1914 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1915 
1916 	if (!bundle) {
1917 		dm_error("Failed to allocate update bundle\n");
1918 		goto cleanup;
1919 	}
1920 
1921 	for (k = 0; k < dc_state->stream_count; k++) {
1922 		bundle->stream_update.stream = dc_state->streams[k];
1923 
1924 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1925 			bundle->surface_updates[m].surface =
1926 				dc_state->stream_status->plane_states[m];
1927 			bundle->surface_updates[m].surface->force_full_update =
1928 				true;
1929 		}
1930 		dc_commit_updates_for_stream(
1931 			dm->dc, bundle->surface_updates,
1932 			dc_state->stream_status->plane_count,
1933 			dc_state->streams[k], &bundle->stream_update, dc_state);
1934 	}
1935 
1936 cleanup:
1937 	kfree(bundle);
1938 
1939 	return;
1940 }
1941 
dm_set_dpms_off(struct dc_link * link)1942 static void dm_set_dpms_off(struct dc_link *link)
1943 {
1944 	struct dc_stream_state *stream_state;
1945 	struct amdgpu_dm_connector *aconnector = link->priv;
1946 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
1947 	struct dc_stream_update stream_update;
1948 	bool dpms_off = true;
1949 
1950 	memset(&stream_update, 0, sizeof(stream_update));
1951 	stream_update.dpms_off = &dpms_off;
1952 
1953 	mutex_lock(&adev->dm.dc_lock);
1954 	stream_state = dc_stream_find_from_link(link);
1955 
1956 	if (stream_state == NULL) {
1957 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
1958 		mutex_unlock(&adev->dm.dc_lock);
1959 		return;
1960 	}
1961 
1962 	stream_update.stream = stream_state;
1963 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
1964 				     stream_state, &stream_update,
1965 				     stream_state->ctx->dc->current_state);
1966 	mutex_unlock(&adev->dm.dc_lock);
1967 }
1968 
dm_resume(void * handle)1969 static int dm_resume(void *handle)
1970 {
1971 	struct amdgpu_device *adev = handle;
1972 	struct drm_device *ddev = adev_to_drm(adev);
1973 	struct amdgpu_display_manager *dm = &adev->dm;
1974 	struct amdgpu_dm_connector *aconnector;
1975 	struct drm_connector *connector;
1976 	struct drm_connector_list_iter iter;
1977 	struct drm_crtc *crtc;
1978 	struct drm_crtc_state *new_crtc_state;
1979 	struct dm_crtc_state *dm_new_crtc_state;
1980 	struct drm_plane *plane;
1981 	struct drm_plane_state *new_plane_state;
1982 	struct dm_plane_state *dm_new_plane_state;
1983 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1984 	enum dc_connection_type new_connection_type = dc_connection_none;
1985 	struct dc_state *dc_state;
1986 	int i, r, j;
1987 
1988 	if (amdgpu_in_reset(adev)) {
1989 		dc_state = dm->cached_dc_state;
1990 
1991 		r = dm_dmub_hw_init(adev);
1992 		if (r)
1993 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1994 
1995 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1996 		dc_resume(dm->dc);
1997 
1998 		amdgpu_dm_irq_resume_early(adev);
1999 
2000 		for (i = 0; i < dc_state->stream_count; i++) {
2001 			dc_state->streams[i]->mode_changed = true;
2002 			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2003 				dc_state->stream_status[i].plane_states[j]->update_flags.raw
2004 					= 0xffffffff;
2005 			}
2006 		}
2007 
2008 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2009 
2010 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2011 
2012 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2013 
2014 		dc_release_state(dm->cached_dc_state);
2015 		dm->cached_dc_state = NULL;
2016 
2017 		amdgpu_dm_irq_resume_late(adev);
2018 
2019 		mutex_unlock(&dm->dc_lock);
2020 
2021 		return 0;
2022 	}
2023 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2024 	dc_release_state(dm_state->context);
2025 	dm_state->context = dc_create_state(dm->dc);
2026 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2027 	dc_resource_state_construct(dm->dc, dm_state->context);
2028 
2029 	/* Before powering on DC we need to re-initialize DMUB. */
2030 	r = dm_dmub_hw_init(adev);
2031 	if (r)
2032 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2033 
2034 	/* power on hardware */
2035 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2036 
2037 	/* program HPD filter */
2038 	dc_resume(dm->dc);
2039 
2040 	/*
2041 	 * early enable HPD Rx IRQ, should be done before set mode as short
2042 	 * pulse interrupts are used for MST
2043 	 */
2044 	amdgpu_dm_irq_resume_early(adev);
2045 
2046 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2047 	s3_handle_mst(ddev, false);
2048 
2049 	/* Do detection*/
2050 	drm_connector_list_iter_begin(ddev, &iter);
2051 	drm_for_each_connector_iter(connector, &iter) {
2052 		aconnector = to_amdgpu_dm_connector(connector);
2053 
2054 		if (!aconnector->dc_link)
2055 			continue;
2056 
2057 		/*
2058 		 * this is the case when traversing through already created
2059 		 * MST connectors, should be skipped
2060 		 */
2061 		if (aconnector->dc_link->type == dc_connection_mst_branch)
2062 			continue;
2063 
2064 		mutex_lock(&aconnector->hpd_lock);
2065 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2066 			DRM_ERROR("KMS: Failed to detect connector\n");
2067 
2068 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2069 			emulated_link_detect(aconnector->dc_link);
2070 		else
2071 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2072 
2073 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2074 			aconnector->fake_enable = false;
2075 
2076 		if (aconnector->dc_sink)
2077 			dc_sink_release(aconnector->dc_sink);
2078 		aconnector->dc_sink = NULL;
2079 		amdgpu_dm_update_connector_after_detect(aconnector);
2080 		mutex_unlock(&aconnector->hpd_lock);
2081 	}
2082 	drm_connector_list_iter_end(&iter);
2083 
2084 	/* Force mode set in atomic commit */
2085 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2086 		new_crtc_state->active_changed = true;
2087 
2088 	/*
2089 	 * atomic_check is expected to create the dc states. We need to release
2090 	 * them here, since they were duplicated as part of the suspend
2091 	 * procedure.
2092 	 */
2093 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2094 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2095 		if (dm_new_crtc_state->stream) {
2096 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2097 			dc_stream_release(dm_new_crtc_state->stream);
2098 			dm_new_crtc_state->stream = NULL;
2099 		}
2100 	}
2101 
2102 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2103 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2104 		if (dm_new_plane_state->dc_state) {
2105 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2106 			dc_plane_state_release(dm_new_plane_state->dc_state);
2107 			dm_new_plane_state->dc_state = NULL;
2108 		}
2109 	}
2110 
2111 	drm_atomic_helper_resume(ddev, dm->cached_state);
2112 
2113 	dm->cached_state = NULL;
2114 
2115 	amdgpu_dm_irq_resume_late(adev);
2116 
2117 	amdgpu_dm_smu_write_watermarks_table(adev);
2118 
2119 	return 0;
2120 }
2121 
2122 /**
2123  * DOC: DM Lifecycle
2124  *
2125  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2126  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2127  * the base driver's device list to be initialized and torn down accordingly.
2128  *
2129  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2130  */
2131 
2132 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2133 	.name = "dm",
2134 	.early_init = dm_early_init,
2135 	.late_init = dm_late_init,
2136 	.sw_init = dm_sw_init,
2137 	.sw_fini = dm_sw_fini,
2138 	.hw_init = dm_hw_init,
2139 	.hw_fini = dm_hw_fini,
2140 	.suspend = dm_suspend,
2141 	.resume = dm_resume,
2142 	.is_idle = dm_is_idle,
2143 	.wait_for_idle = dm_wait_for_idle,
2144 	.check_soft_reset = dm_check_soft_reset,
2145 	.soft_reset = dm_soft_reset,
2146 	.set_clockgating_state = dm_set_clockgating_state,
2147 	.set_powergating_state = dm_set_powergating_state,
2148 };
2149 
2150 const struct amdgpu_ip_block_version dm_ip_block =
2151 {
2152 	.type = AMD_IP_BLOCK_TYPE_DCE,
2153 	.major = 1,
2154 	.minor = 0,
2155 	.rev = 0,
2156 	.funcs = &amdgpu_dm_funcs,
2157 };
2158 
2159 
2160 /**
2161  * DOC: atomic
2162  *
2163  * *WIP*
2164  */
2165 
2166 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2167 	.fb_create = amdgpu_display_user_framebuffer_create,
2168 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2169 	.atomic_check = amdgpu_dm_atomic_check,
2170 	.atomic_commit = amdgpu_dm_atomic_commit,
2171 };
2172 
2173 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2174 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2175 };
2176 
update_connector_ext_caps(struct amdgpu_dm_connector * aconnector)2177 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2178 {
2179 	u32 max_avg, min_cll, max, min, q, r;
2180 	struct amdgpu_dm_backlight_caps *caps;
2181 	struct amdgpu_display_manager *dm;
2182 	struct drm_connector *conn_base;
2183 	struct amdgpu_device *adev;
2184 	struct dc_link *link = NULL;
2185 	static const u8 pre_computed_values[] = {
2186 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2187 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2188 
2189 	if (!aconnector || !aconnector->dc_link)
2190 		return;
2191 
2192 	link = aconnector->dc_link;
2193 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2194 		return;
2195 
2196 	conn_base = &aconnector->base;
2197 	adev = drm_to_adev(conn_base->dev);
2198 	dm = &adev->dm;
2199 	caps = &dm->backlight_caps;
2200 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2201 	caps->aux_support = false;
2202 	max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
2203 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2204 
2205 	if (caps->ext_caps->bits.oled == 1 /*||
2206 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2207 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2208 		caps->aux_support = true;
2209 
2210 	if (amdgpu_backlight == 0)
2211 		caps->aux_support = false;
2212 	else if (amdgpu_backlight == 1)
2213 		caps->aux_support = true;
2214 
2215 	/* From the specification (CTA-861-G), for calculating the maximum
2216 	 * luminance we need to use:
2217 	 *	Luminance = 50*2**(CV/32)
2218 	 * Where CV is a one-byte value.
2219 	 * For calculating this expression we may need float point precision;
2220 	 * to avoid this complexity level, we take advantage that CV is divided
2221 	 * by a constant. From the Euclids division algorithm, we know that CV
2222 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2223 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2224 	 * need to pre-compute the value of r/32. For pre-computing the values
2225 	 * We just used the following Ruby line:
2226 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2227 	 * The results of the above expressions can be verified at
2228 	 * pre_computed_values.
2229 	 */
2230 	q = max_avg >> 5;
2231 	r = max_avg % 32;
2232 	max = (1 << q) * pre_computed_values[r];
2233 
2234 	// min luminance: maxLum * (CV/255)^2 / 100
2235 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2236 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2237 
2238 	caps->aux_max_input_signal = max;
2239 	caps->aux_min_input_signal = min;
2240 }
2241 
amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector * aconnector)2242 void amdgpu_dm_update_connector_after_detect(
2243 		struct amdgpu_dm_connector *aconnector)
2244 {
2245 	struct drm_connector *connector = &aconnector->base;
2246 	struct drm_device *dev = connector->dev;
2247 	struct dc_sink *sink;
2248 
2249 	/* MST handled by drm_mst framework */
2250 	if (aconnector->mst_mgr.mst_state == true)
2251 		return;
2252 
2253 	sink = aconnector->dc_link->local_sink;
2254 	if (sink)
2255 		dc_sink_retain(sink);
2256 
2257 	/*
2258 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2259 	 * the connector sink is set to either fake or physical sink depends on link status.
2260 	 * Skip if already done during boot.
2261 	 */
2262 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2263 			&& aconnector->dc_em_sink) {
2264 
2265 		/*
2266 		 * For S3 resume with headless use eml_sink to fake stream
2267 		 * because on resume connector->sink is set to NULL
2268 		 */
2269 		mutex_lock(&dev->mode_config.mutex);
2270 
2271 		if (sink) {
2272 			if (aconnector->dc_sink) {
2273 				amdgpu_dm_update_freesync_caps(connector, NULL);
2274 				/*
2275 				 * retain and release below are used to
2276 				 * bump up refcount for sink because the link doesn't point
2277 				 * to it anymore after disconnect, so on next crtc to connector
2278 				 * reshuffle by UMD we will get into unwanted dc_sink release
2279 				 */
2280 				dc_sink_release(aconnector->dc_sink);
2281 			}
2282 			aconnector->dc_sink = sink;
2283 			dc_sink_retain(aconnector->dc_sink);
2284 			amdgpu_dm_update_freesync_caps(connector,
2285 					aconnector->edid);
2286 		} else {
2287 			amdgpu_dm_update_freesync_caps(connector, NULL);
2288 			if (!aconnector->dc_sink) {
2289 				aconnector->dc_sink = aconnector->dc_em_sink;
2290 				dc_sink_retain(aconnector->dc_sink);
2291 			}
2292 		}
2293 
2294 		mutex_unlock(&dev->mode_config.mutex);
2295 
2296 		if (sink)
2297 			dc_sink_release(sink);
2298 		return;
2299 	}
2300 
2301 	/*
2302 	 * TODO: temporary guard to look for proper fix
2303 	 * if this sink is MST sink, we should not do anything
2304 	 */
2305 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2306 		dc_sink_release(sink);
2307 		return;
2308 	}
2309 
2310 	if (aconnector->dc_sink == sink) {
2311 		/*
2312 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2313 		 * Do nothing!!
2314 		 */
2315 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2316 				aconnector->connector_id);
2317 		if (sink)
2318 			dc_sink_release(sink);
2319 		return;
2320 	}
2321 
2322 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2323 		aconnector->connector_id, aconnector->dc_sink, sink);
2324 
2325 	mutex_lock(&dev->mode_config.mutex);
2326 
2327 	/*
2328 	 * 1. Update status of the drm connector
2329 	 * 2. Send an event and let userspace tell us what to do
2330 	 */
2331 	if (sink) {
2332 		/*
2333 		 * TODO: check if we still need the S3 mode update workaround.
2334 		 * If yes, put it here.
2335 		 */
2336 		if (aconnector->dc_sink) {
2337 			amdgpu_dm_update_freesync_caps(connector, NULL);
2338 			dc_sink_release(aconnector->dc_sink);
2339 		}
2340 
2341 		aconnector->dc_sink = sink;
2342 		dc_sink_retain(aconnector->dc_sink);
2343 		if (sink->dc_edid.length == 0) {
2344 			aconnector->edid = NULL;
2345 			if (aconnector->dc_link->aux_mode) {
2346 				drm_dp_cec_unset_edid(
2347 					&aconnector->dm_dp_aux.aux);
2348 			}
2349 		} else {
2350 			aconnector->edid =
2351 				(struct edid *)sink->dc_edid.raw_edid;
2352 
2353 			if (aconnector->dc_link->aux_mode)
2354 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2355 						    aconnector->edid);
2356 		}
2357 
2358 		drm_connector_update_edid_property(connector, aconnector->edid);
2359 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2360 		update_connector_ext_caps(aconnector);
2361 	} else {
2362 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2363 		amdgpu_dm_update_freesync_caps(connector, NULL);
2364 		drm_connector_update_edid_property(connector, NULL);
2365 		aconnector->num_modes = 0;
2366 		dc_sink_release(aconnector->dc_sink);
2367 		aconnector->dc_sink = NULL;
2368 		aconnector->edid = NULL;
2369 #ifdef CONFIG_DRM_AMD_DC_HDCP
2370 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2371 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2372 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2373 #endif
2374 	}
2375 
2376 	mutex_unlock(&dev->mode_config.mutex);
2377 
2378 	update_subconnector_property(aconnector);
2379 
2380 	if (sink)
2381 		dc_sink_release(sink);
2382 }
2383 
handle_hpd_irq(void * param)2384 static void handle_hpd_irq(void *param)
2385 {
2386 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2387 	struct drm_connector *connector = &aconnector->base;
2388 	struct drm_device *dev = connector->dev;
2389 	enum dc_connection_type new_connection_type = dc_connection_none;
2390 #ifdef CONFIG_DRM_AMD_DC_HDCP
2391 	struct amdgpu_device *adev = drm_to_adev(dev);
2392 #endif
2393 
2394 	/*
2395 	 * In case of failure or MST no need to update connector status or notify the OS
2396 	 * since (for MST case) MST does this in its own context.
2397 	 */
2398 	mutex_lock(&aconnector->hpd_lock);
2399 
2400 #ifdef CONFIG_DRM_AMD_DC_HDCP
2401 	if (adev->dm.hdcp_workqueue)
2402 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2403 #endif
2404 	if (aconnector->fake_enable)
2405 		aconnector->fake_enable = false;
2406 
2407 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2408 		DRM_ERROR("KMS: Failed to detect connector\n");
2409 
2410 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2411 		emulated_link_detect(aconnector->dc_link);
2412 
2413 
2414 		drm_modeset_lock_all(dev);
2415 		dm_restore_drm_connector_state(dev, connector);
2416 		drm_modeset_unlock_all(dev);
2417 
2418 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2419 			drm_kms_helper_hotplug_event(dev);
2420 
2421 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2422 		if (new_connection_type == dc_connection_none &&
2423 		    aconnector->dc_link->type == dc_connection_none)
2424 			dm_set_dpms_off(aconnector->dc_link);
2425 
2426 		amdgpu_dm_update_connector_after_detect(aconnector);
2427 
2428 		drm_modeset_lock_all(dev);
2429 		dm_restore_drm_connector_state(dev, connector);
2430 		drm_modeset_unlock_all(dev);
2431 
2432 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2433 			drm_kms_helper_hotplug_event(dev);
2434 	}
2435 	mutex_unlock(&aconnector->hpd_lock);
2436 
2437 }
2438 
dm_handle_hpd_rx_irq(struct amdgpu_dm_connector * aconnector)2439 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2440 {
2441 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2442 	uint8_t dret;
2443 	bool new_irq_handled = false;
2444 	int dpcd_addr;
2445 	int dpcd_bytes_to_read;
2446 
2447 	const int max_process_count = 30;
2448 	int process_count = 0;
2449 
2450 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2451 
2452 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2453 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2454 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2455 		dpcd_addr = DP_SINK_COUNT;
2456 	} else {
2457 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2458 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2459 		dpcd_addr = DP_SINK_COUNT_ESI;
2460 	}
2461 
2462 	dret = drm_dp_dpcd_read(
2463 		&aconnector->dm_dp_aux.aux,
2464 		dpcd_addr,
2465 		esi,
2466 		dpcd_bytes_to_read);
2467 
2468 	while (dret == dpcd_bytes_to_read &&
2469 		process_count < max_process_count) {
2470 		uint8_t retry;
2471 		dret = 0;
2472 
2473 		process_count++;
2474 
2475 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2476 		/* handle HPD short pulse irq */
2477 		if (aconnector->mst_mgr.mst_state)
2478 			drm_dp_mst_hpd_irq(
2479 				&aconnector->mst_mgr,
2480 				esi,
2481 				&new_irq_handled);
2482 
2483 		if (new_irq_handled) {
2484 			/* ACK at DPCD to notify down stream */
2485 			const int ack_dpcd_bytes_to_write =
2486 				dpcd_bytes_to_read - 1;
2487 
2488 			for (retry = 0; retry < 3; retry++) {
2489 				uint8_t wret;
2490 
2491 				wret = drm_dp_dpcd_write(
2492 					&aconnector->dm_dp_aux.aux,
2493 					dpcd_addr + 1,
2494 					&esi[1],
2495 					ack_dpcd_bytes_to_write);
2496 				if (wret == ack_dpcd_bytes_to_write)
2497 					break;
2498 			}
2499 
2500 			/* check if there is new irq to be handled */
2501 			dret = drm_dp_dpcd_read(
2502 				&aconnector->dm_dp_aux.aux,
2503 				dpcd_addr,
2504 				esi,
2505 				dpcd_bytes_to_read);
2506 
2507 			new_irq_handled = false;
2508 		} else {
2509 			break;
2510 		}
2511 	}
2512 
2513 	if (process_count == max_process_count)
2514 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2515 }
2516 
handle_hpd_rx_irq(void * param)2517 static void handle_hpd_rx_irq(void *param)
2518 {
2519 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2520 	struct drm_connector *connector = &aconnector->base;
2521 	struct drm_device *dev = connector->dev;
2522 	struct dc_link *dc_link = aconnector->dc_link;
2523 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2524 	enum dc_connection_type new_connection_type = dc_connection_none;
2525 #ifdef CONFIG_DRM_AMD_DC_HDCP
2526 	union hpd_irq_data hpd_irq_data;
2527 	struct amdgpu_device *adev = drm_to_adev(dev);
2528 
2529 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2530 #endif
2531 
2532 	/*
2533 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2534 	 * conflict, after implement i2c helper, this mutex should be
2535 	 * retired.
2536 	 */
2537 	if (dc_link->type != dc_connection_mst_branch)
2538 		mutex_lock(&aconnector->hpd_lock);
2539 
2540 
2541 #ifdef CONFIG_DRM_AMD_DC_HDCP
2542 	if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2543 #else
2544 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2545 #endif
2546 			!is_mst_root_connector) {
2547 		/* Downstream Port status changed. */
2548 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2549 			DRM_ERROR("KMS: Failed to detect connector\n");
2550 
2551 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2552 			emulated_link_detect(dc_link);
2553 
2554 			if (aconnector->fake_enable)
2555 				aconnector->fake_enable = false;
2556 
2557 			amdgpu_dm_update_connector_after_detect(aconnector);
2558 
2559 
2560 			drm_modeset_lock_all(dev);
2561 			dm_restore_drm_connector_state(dev, connector);
2562 			drm_modeset_unlock_all(dev);
2563 
2564 			drm_kms_helper_hotplug_event(dev);
2565 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2566 
2567 			if (aconnector->fake_enable)
2568 				aconnector->fake_enable = false;
2569 
2570 			amdgpu_dm_update_connector_after_detect(aconnector);
2571 
2572 
2573 			drm_modeset_lock_all(dev);
2574 			dm_restore_drm_connector_state(dev, connector);
2575 			drm_modeset_unlock_all(dev);
2576 
2577 			drm_kms_helper_hotplug_event(dev);
2578 		}
2579 	}
2580 #ifdef CONFIG_DRM_AMD_DC_HDCP
2581 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2582 		if (adev->dm.hdcp_workqueue)
2583 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2584 	}
2585 #endif
2586 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2587 	    (dc_link->type == dc_connection_mst_branch))
2588 		dm_handle_hpd_rx_irq(aconnector);
2589 
2590 	if (dc_link->type != dc_connection_mst_branch) {
2591 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2592 		mutex_unlock(&aconnector->hpd_lock);
2593 	}
2594 }
2595 
register_hpd_handlers(struct amdgpu_device * adev)2596 static void register_hpd_handlers(struct amdgpu_device *adev)
2597 {
2598 	struct drm_device *dev = adev_to_drm(adev);
2599 	struct drm_connector *connector;
2600 	struct amdgpu_dm_connector *aconnector;
2601 	const struct dc_link *dc_link;
2602 	struct dc_interrupt_params int_params = {0};
2603 
2604 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2605 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2606 
2607 	list_for_each_entry(connector,
2608 			&dev->mode_config.connector_list, head)	{
2609 
2610 		aconnector = to_amdgpu_dm_connector(connector);
2611 		dc_link = aconnector->dc_link;
2612 
2613 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2614 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2615 			int_params.irq_source = dc_link->irq_source_hpd;
2616 
2617 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2618 					handle_hpd_irq,
2619 					(void *) aconnector);
2620 		}
2621 
2622 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2623 
2624 			/* Also register for DP short pulse (hpd_rx). */
2625 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2626 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2627 
2628 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2629 					handle_hpd_rx_irq,
2630 					(void *) aconnector);
2631 		}
2632 	}
2633 }
2634 
2635 #if defined(CONFIG_DRM_AMD_DC_SI)
2636 /* Register IRQ sources and initialize IRQ callbacks */
dce60_register_irq_handlers(struct amdgpu_device * adev)2637 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2638 {
2639 	struct dc *dc = adev->dm.dc;
2640 	struct common_irq_params *c_irq_params;
2641 	struct dc_interrupt_params int_params = {0};
2642 	int r;
2643 	int i;
2644 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2645 
2646 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2647 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2648 
2649 	/*
2650 	 * Actions of amdgpu_irq_add_id():
2651 	 * 1. Register a set() function with base driver.
2652 	 *    Base driver will call set() function to enable/disable an
2653 	 *    interrupt in DC hardware.
2654 	 * 2. Register amdgpu_dm_irq_handler().
2655 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2656 	 *    coming from DC hardware.
2657 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2658 	 *    for acknowledging and handling. */
2659 
2660 	/* Use VBLANK interrupt */
2661 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2662 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2663 		if (r) {
2664 			DRM_ERROR("Failed to add crtc irq id!\n");
2665 			return r;
2666 		}
2667 
2668 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2669 		int_params.irq_source =
2670 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2671 
2672 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2673 
2674 		c_irq_params->adev = adev;
2675 		c_irq_params->irq_src = int_params.irq_source;
2676 
2677 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2678 				dm_crtc_high_irq, c_irq_params);
2679 	}
2680 
2681 	/* Use GRPH_PFLIP interrupt */
2682 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2683 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2684 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2685 		if (r) {
2686 			DRM_ERROR("Failed to add page flip irq id!\n");
2687 			return r;
2688 		}
2689 
2690 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2691 		int_params.irq_source =
2692 			dc_interrupt_to_irq_source(dc, i, 0);
2693 
2694 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2695 
2696 		c_irq_params->adev = adev;
2697 		c_irq_params->irq_src = int_params.irq_source;
2698 
2699 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2700 				dm_pflip_high_irq, c_irq_params);
2701 
2702 	}
2703 
2704 	/* HPD */
2705 	r = amdgpu_irq_add_id(adev, client_id,
2706 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2707 	if (r) {
2708 		DRM_ERROR("Failed to add hpd irq id!\n");
2709 		return r;
2710 	}
2711 
2712 	register_hpd_handlers(adev);
2713 
2714 	return 0;
2715 }
2716 #endif
2717 
2718 /* Register IRQ sources and initialize IRQ callbacks */
dce110_register_irq_handlers(struct amdgpu_device * adev)2719 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2720 {
2721 	struct dc *dc = adev->dm.dc;
2722 	struct common_irq_params *c_irq_params;
2723 	struct dc_interrupt_params int_params = {0};
2724 	int r;
2725 	int i;
2726 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2727 
2728 	if (adev->asic_type >= CHIP_VEGA10)
2729 		client_id = SOC15_IH_CLIENTID_DCE;
2730 
2731 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2732 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2733 
2734 	/*
2735 	 * Actions of amdgpu_irq_add_id():
2736 	 * 1. Register a set() function with base driver.
2737 	 *    Base driver will call set() function to enable/disable an
2738 	 *    interrupt in DC hardware.
2739 	 * 2. Register amdgpu_dm_irq_handler().
2740 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2741 	 *    coming from DC hardware.
2742 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2743 	 *    for acknowledging and handling. */
2744 
2745 	/* Use VBLANK interrupt */
2746 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2747 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2748 		if (r) {
2749 			DRM_ERROR("Failed to add crtc irq id!\n");
2750 			return r;
2751 		}
2752 
2753 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2754 		int_params.irq_source =
2755 			dc_interrupt_to_irq_source(dc, i, 0);
2756 
2757 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2758 
2759 		c_irq_params->adev = adev;
2760 		c_irq_params->irq_src = int_params.irq_source;
2761 
2762 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2763 				dm_crtc_high_irq, c_irq_params);
2764 	}
2765 
2766 	/* Use VUPDATE interrupt */
2767 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2768 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2769 		if (r) {
2770 			DRM_ERROR("Failed to add vupdate irq id!\n");
2771 			return r;
2772 		}
2773 
2774 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2775 		int_params.irq_source =
2776 			dc_interrupt_to_irq_source(dc, i, 0);
2777 
2778 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2779 
2780 		c_irq_params->adev = adev;
2781 		c_irq_params->irq_src = int_params.irq_source;
2782 
2783 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2784 				dm_vupdate_high_irq, c_irq_params);
2785 	}
2786 
2787 	/* Use GRPH_PFLIP interrupt */
2788 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2789 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2790 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2791 		if (r) {
2792 			DRM_ERROR("Failed to add page flip irq id!\n");
2793 			return r;
2794 		}
2795 
2796 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2797 		int_params.irq_source =
2798 			dc_interrupt_to_irq_source(dc, i, 0);
2799 
2800 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2801 
2802 		c_irq_params->adev = adev;
2803 		c_irq_params->irq_src = int_params.irq_source;
2804 
2805 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2806 				dm_pflip_high_irq, c_irq_params);
2807 
2808 	}
2809 
2810 	/* HPD */
2811 	r = amdgpu_irq_add_id(adev, client_id,
2812 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2813 	if (r) {
2814 		DRM_ERROR("Failed to add hpd irq id!\n");
2815 		return r;
2816 	}
2817 
2818 	register_hpd_handlers(adev);
2819 
2820 	return 0;
2821 }
2822 
2823 #if defined(CONFIG_DRM_AMD_DC_DCN)
2824 /* Register IRQ sources and initialize IRQ callbacks */
dcn10_register_irq_handlers(struct amdgpu_device * adev)2825 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2826 {
2827 	struct dc *dc = adev->dm.dc;
2828 	struct common_irq_params *c_irq_params;
2829 	struct dc_interrupt_params int_params = {0};
2830 	int r;
2831 	int i;
2832 
2833 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2834 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2835 
2836 	/*
2837 	 * Actions of amdgpu_irq_add_id():
2838 	 * 1. Register a set() function with base driver.
2839 	 *    Base driver will call set() function to enable/disable an
2840 	 *    interrupt in DC hardware.
2841 	 * 2. Register amdgpu_dm_irq_handler().
2842 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2843 	 *    coming from DC hardware.
2844 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2845 	 *    for acknowledging and handling.
2846 	 */
2847 
2848 	/* Use VSTARTUP interrupt */
2849 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2850 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2851 			i++) {
2852 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2853 
2854 		if (r) {
2855 			DRM_ERROR("Failed to add crtc irq id!\n");
2856 			return r;
2857 		}
2858 
2859 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2860 		int_params.irq_source =
2861 			dc_interrupt_to_irq_source(dc, i, 0);
2862 
2863 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2864 
2865 		c_irq_params->adev = adev;
2866 		c_irq_params->irq_src = int_params.irq_source;
2867 
2868 		amdgpu_dm_irq_register_interrupt(
2869 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2870 	}
2871 
2872 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2873 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2874 	 * to trigger at end of each vblank, regardless of state of the lock,
2875 	 * matching DCE behaviour.
2876 	 */
2877 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2878 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2879 	     i++) {
2880 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2881 
2882 		if (r) {
2883 			DRM_ERROR("Failed to add vupdate irq id!\n");
2884 			return r;
2885 		}
2886 
2887 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2888 		int_params.irq_source =
2889 			dc_interrupt_to_irq_source(dc, i, 0);
2890 
2891 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2892 
2893 		c_irq_params->adev = adev;
2894 		c_irq_params->irq_src = int_params.irq_source;
2895 
2896 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2897 				dm_vupdate_high_irq, c_irq_params);
2898 	}
2899 
2900 	/* Use GRPH_PFLIP interrupt */
2901 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2902 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2903 			i++) {
2904 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2905 		if (r) {
2906 			DRM_ERROR("Failed to add page flip irq id!\n");
2907 			return r;
2908 		}
2909 
2910 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2911 		int_params.irq_source =
2912 			dc_interrupt_to_irq_source(dc, i, 0);
2913 
2914 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2915 
2916 		c_irq_params->adev = adev;
2917 		c_irq_params->irq_src = int_params.irq_source;
2918 
2919 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2920 				dm_pflip_high_irq, c_irq_params);
2921 
2922 	}
2923 
2924 	/* HPD */
2925 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2926 			&adev->hpd_irq);
2927 	if (r) {
2928 		DRM_ERROR("Failed to add hpd irq id!\n");
2929 		return r;
2930 	}
2931 
2932 	register_hpd_handlers(adev);
2933 
2934 	return 0;
2935 }
2936 #endif
2937 
2938 /*
2939  * Acquires the lock for the atomic state object and returns
2940  * the new atomic state.
2941  *
2942  * This should only be called during atomic check.
2943  */
dm_atomic_get_state(struct drm_atomic_state * state,struct dm_atomic_state ** dm_state)2944 static int dm_atomic_get_state(struct drm_atomic_state *state,
2945 			       struct dm_atomic_state **dm_state)
2946 {
2947 	struct drm_device *dev = state->dev;
2948 	struct amdgpu_device *adev = drm_to_adev(dev);
2949 	struct amdgpu_display_manager *dm = &adev->dm;
2950 	struct drm_private_state *priv_state;
2951 
2952 	if (*dm_state)
2953 		return 0;
2954 
2955 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2956 	if (IS_ERR(priv_state))
2957 		return PTR_ERR(priv_state);
2958 
2959 	*dm_state = to_dm_atomic_state(priv_state);
2960 
2961 	return 0;
2962 }
2963 
2964 static struct dm_atomic_state *
dm_atomic_get_new_state(struct drm_atomic_state * state)2965 dm_atomic_get_new_state(struct drm_atomic_state *state)
2966 {
2967 	struct drm_device *dev = state->dev;
2968 	struct amdgpu_device *adev = drm_to_adev(dev);
2969 	struct amdgpu_display_manager *dm = &adev->dm;
2970 	struct drm_private_obj *obj;
2971 	struct drm_private_state *new_obj_state;
2972 	int i;
2973 
2974 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2975 		if (obj->funcs == dm->atomic_obj.funcs)
2976 			return to_dm_atomic_state(new_obj_state);
2977 	}
2978 
2979 	return NULL;
2980 }
2981 
2982 static struct drm_private_state *
dm_atomic_duplicate_state(struct drm_private_obj * obj)2983 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2984 {
2985 	struct dm_atomic_state *old_state, *new_state;
2986 
2987 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2988 	if (!new_state)
2989 		return NULL;
2990 
2991 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2992 
2993 	old_state = to_dm_atomic_state(obj->state);
2994 
2995 	if (old_state && old_state->context)
2996 		new_state->context = dc_copy_state(old_state->context);
2997 
2998 	if (!new_state->context) {
2999 		kfree(new_state);
3000 		return NULL;
3001 	}
3002 
3003 	return &new_state->base;
3004 }
3005 
dm_atomic_destroy_state(struct drm_private_obj * obj,struct drm_private_state * state)3006 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3007 				    struct drm_private_state *state)
3008 {
3009 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3010 
3011 	if (dm_state && dm_state->context)
3012 		dc_release_state(dm_state->context);
3013 
3014 	kfree(dm_state);
3015 }
3016 
3017 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3018 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3019 	.atomic_destroy_state = dm_atomic_destroy_state,
3020 };
3021 
amdgpu_dm_mode_config_init(struct amdgpu_device * adev)3022 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3023 {
3024 	struct dm_atomic_state *state;
3025 	int r;
3026 
3027 	adev->mode_info.mode_config_initialized = true;
3028 
3029 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3030 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3031 
3032 	adev_to_drm(adev)->mode_config.max_width = 16384;
3033 	adev_to_drm(adev)->mode_config.max_height = 16384;
3034 
3035 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3036 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3037 	/* indicates support for immediate flip */
3038 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3039 
3040 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3041 
3042 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3043 	if (!state)
3044 		return -ENOMEM;
3045 
3046 	state->context = dc_create_state(adev->dm.dc);
3047 	if (!state->context) {
3048 		kfree(state);
3049 		return -ENOMEM;
3050 	}
3051 
3052 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3053 
3054 	drm_atomic_private_obj_init(adev_to_drm(adev),
3055 				    &adev->dm.atomic_obj,
3056 				    &state->base,
3057 				    &dm_atomic_state_funcs);
3058 
3059 	r = amdgpu_display_modeset_create_props(adev);
3060 	if (r) {
3061 		dc_release_state(state->context);
3062 		kfree(state);
3063 		return r;
3064 	}
3065 
3066 	r = amdgpu_dm_audio_init(adev);
3067 	if (r) {
3068 		dc_release_state(state->context);
3069 		kfree(state);
3070 		return r;
3071 	}
3072 
3073 	return 0;
3074 }
3075 
3076 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3077 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3078 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3079 
3080 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3081 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3082 
amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager * dm)3083 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3084 {
3085 #if defined(CONFIG_ACPI)
3086 	struct amdgpu_dm_backlight_caps caps;
3087 
3088 	memset(&caps, 0, sizeof(caps));
3089 
3090 	if (dm->backlight_caps.caps_valid)
3091 		return;
3092 
3093 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3094 	if (caps.caps_valid) {
3095 		dm->backlight_caps.caps_valid = true;
3096 		if (caps.aux_support)
3097 			return;
3098 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
3099 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
3100 	} else {
3101 		dm->backlight_caps.min_input_signal =
3102 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3103 		dm->backlight_caps.max_input_signal =
3104 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3105 	}
3106 #else
3107 	if (dm->backlight_caps.aux_support)
3108 		return;
3109 
3110 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3111 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3112 #endif
3113 }
3114 
get_brightness_range(const struct amdgpu_dm_backlight_caps * caps,unsigned * min,unsigned * max)3115 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3116 				unsigned *min, unsigned *max)
3117 {
3118 	if (!caps)
3119 		return 0;
3120 
3121 	if (caps->aux_support) {
3122 		// Firmware limits are in nits, DC API wants millinits.
3123 		*max = 1000 * caps->aux_max_input_signal;
3124 		*min = 1000 * caps->aux_min_input_signal;
3125 	} else {
3126 		// Firmware limits are 8-bit, PWM control is 16-bit.
3127 		*max = 0x101 * caps->max_input_signal;
3128 		*min = 0x101 * caps->min_input_signal;
3129 	}
3130 	return 1;
3131 }
3132 
convert_brightness_from_user(const struct amdgpu_dm_backlight_caps * caps,uint32_t brightness)3133 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3134 					uint32_t brightness)
3135 {
3136 	unsigned min, max;
3137 
3138 	if (!get_brightness_range(caps, &min, &max))
3139 		return brightness;
3140 
3141 	// Rescale 0..255 to min..max
3142 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3143 				       AMDGPU_MAX_BL_LEVEL);
3144 }
3145 
convert_brightness_to_user(const struct amdgpu_dm_backlight_caps * caps,uint32_t brightness)3146 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3147 				      uint32_t brightness)
3148 {
3149 	unsigned min, max;
3150 
3151 	if (!get_brightness_range(caps, &min, &max))
3152 		return brightness;
3153 
3154 	if (brightness < min)
3155 		return 0;
3156 	// Rescale min..max to 0..255
3157 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3158 				 max - min);
3159 }
3160 
amdgpu_dm_backlight_update_status(struct backlight_device * bd)3161 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3162 {
3163 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3164 	struct amdgpu_dm_backlight_caps caps;
3165 	struct dc_link *link = NULL;
3166 	u32 brightness;
3167 	bool rc;
3168 
3169 	amdgpu_dm_update_backlight_caps(dm);
3170 	caps = dm->backlight_caps;
3171 
3172 	link = (struct dc_link *)dm->backlight_link;
3173 
3174 	brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3175 	// Change brightness based on AUX property
3176 	if (caps.aux_support)
3177 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3178 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3179 	else
3180 		rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3181 
3182 	return rc ? 0 : 1;
3183 }
3184 
amdgpu_dm_backlight_get_brightness(struct backlight_device * bd)3185 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3186 {
3187 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3188 	struct amdgpu_dm_backlight_caps caps;
3189 
3190 	amdgpu_dm_update_backlight_caps(dm);
3191 	caps = dm->backlight_caps;
3192 
3193 	if (caps.aux_support) {
3194 		struct dc_link *link = (struct dc_link *)dm->backlight_link;
3195 		u32 avg, peak;
3196 		bool rc;
3197 
3198 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3199 		if (!rc)
3200 			return bd->props.brightness;
3201 		return convert_brightness_to_user(&caps, avg);
3202 	} else {
3203 		int ret = dc_link_get_backlight_level(dm->backlight_link);
3204 
3205 		if (ret == DC_ERROR_UNEXPECTED)
3206 			return bd->props.brightness;
3207 		return convert_brightness_to_user(&caps, ret);
3208 	}
3209 }
3210 
3211 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3212 	.options = BL_CORE_SUSPENDRESUME,
3213 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3214 	.update_status	= amdgpu_dm_backlight_update_status,
3215 };
3216 
3217 static void
amdgpu_dm_register_backlight_device(struct amdgpu_display_manager * dm)3218 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3219 {
3220 	char bl_name[16];
3221 	struct backlight_properties props = { 0 };
3222 
3223 	amdgpu_dm_update_backlight_caps(dm);
3224 
3225 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3226 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3227 	props.type = BACKLIGHT_RAW;
3228 
3229 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3230 		 adev_to_drm(dm->adev)->primary->index);
3231 
3232 	dm->backlight_dev = backlight_device_register(bl_name,
3233 						      adev_to_drm(dm->adev)->dev,
3234 						      dm,
3235 						      &amdgpu_dm_backlight_ops,
3236 						      &props);
3237 
3238 	if (IS_ERR(dm->backlight_dev))
3239 		DRM_ERROR("DM: Backlight registration failed!\n");
3240 	else
3241 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3242 }
3243 
3244 #endif
3245 
initialize_plane(struct amdgpu_display_manager * dm,struct amdgpu_mode_info * mode_info,int plane_id,enum drm_plane_type plane_type,const struct dc_plane_cap * plane_cap)3246 static int initialize_plane(struct amdgpu_display_manager *dm,
3247 			    struct amdgpu_mode_info *mode_info, int plane_id,
3248 			    enum drm_plane_type plane_type,
3249 			    const struct dc_plane_cap *plane_cap)
3250 {
3251 	struct drm_plane *plane;
3252 	unsigned long possible_crtcs;
3253 	int ret = 0;
3254 
3255 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3256 	if (!plane) {
3257 		DRM_ERROR("KMS: Failed to allocate plane\n");
3258 		return -ENOMEM;
3259 	}
3260 	plane->type = plane_type;
3261 
3262 	/*
3263 	 * HACK: IGT tests expect that the primary plane for a CRTC
3264 	 * can only have one possible CRTC. Only expose support for
3265 	 * any CRTC if they're not going to be used as a primary plane
3266 	 * for a CRTC - like overlay or underlay planes.
3267 	 */
3268 	possible_crtcs = 1 << plane_id;
3269 	if (plane_id >= dm->dc->caps.max_streams)
3270 		possible_crtcs = 0xff;
3271 
3272 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3273 
3274 	if (ret) {
3275 		DRM_ERROR("KMS: Failed to initialize plane\n");
3276 		kfree(plane);
3277 		return ret;
3278 	}
3279 
3280 	if (mode_info)
3281 		mode_info->planes[plane_id] = plane;
3282 
3283 	return ret;
3284 }
3285 
3286 
register_backlight_device(struct amdgpu_display_manager * dm,struct dc_link * link)3287 static void register_backlight_device(struct amdgpu_display_manager *dm,
3288 				      struct dc_link *link)
3289 {
3290 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3291 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3292 
3293 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3294 	    link->type != dc_connection_none) {
3295 		/*
3296 		 * Event if registration failed, we should continue with
3297 		 * DM initialization because not having a backlight control
3298 		 * is better then a black screen.
3299 		 */
3300 		amdgpu_dm_register_backlight_device(dm);
3301 
3302 		if (dm->backlight_dev)
3303 			dm->backlight_link = link;
3304 	}
3305 #endif
3306 }
3307 
3308 
3309 /*
3310  * In this architecture, the association
3311  * connector -> encoder -> crtc
3312  * id not really requried. The crtc and connector will hold the
3313  * display_index as an abstraction to use with DAL component
3314  *
3315  * Returns 0 on success
3316  */
amdgpu_dm_initialize_drm_device(struct amdgpu_device * adev)3317 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3318 {
3319 	struct amdgpu_display_manager *dm = &adev->dm;
3320 	int32_t i;
3321 	struct amdgpu_dm_connector *aconnector = NULL;
3322 	struct amdgpu_encoder *aencoder = NULL;
3323 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3324 	uint32_t link_cnt;
3325 	int32_t primary_planes;
3326 	enum dc_connection_type new_connection_type = dc_connection_none;
3327 	const struct dc_plane_cap *plane;
3328 
3329 	dm->display_indexes_num = dm->dc->caps.max_streams;
3330 	/* Update the actual used number of crtc */
3331 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3332 
3333 	link_cnt = dm->dc->caps.max_links;
3334 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3335 		DRM_ERROR("DM: Failed to initialize mode config\n");
3336 		return -EINVAL;
3337 	}
3338 
3339 	/* There is one primary plane per CRTC */
3340 	primary_planes = dm->dc->caps.max_streams;
3341 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3342 
3343 	/*
3344 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3345 	 * Order is reversed to match iteration order in atomic check.
3346 	 */
3347 	for (i = (primary_planes - 1); i >= 0; i--) {
3348 		plane = &dm->dc->caps.planes[i];
3349 
3350 		if (initialize_plane(dm, mode_info, i,
3351 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3352 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3353 			goto fail;
3354 		}
3355 	}
3356 
3357 	/*
3358 	 * Initialize overlay planes, index starting after primary planes.
3359 	 * These planes have a higher DRM index than the primary planes since
3360 	 * they should be considered as having a higher z-order.
3361 	 * Order is reversed to match iteration order in atomic check.
3362 	 *
3363 	 * Only support DCN for now, and only expose one so we don't encourage
3364 	 * userspace to use up all the pipes.
3365 	 */
3366 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3367 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3368 
3369 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3370 			continue;
3371 
3372 		if (!plane->blends_with_above || !plane->blends_with_below)
3373 			continue;
3374 
3375 		if (!plane->pixel_format_support.argb8888)
3376 			continue;
3377 
3378 		if (initialize_plane(dm, NULL, primary_planes + i,
3379 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3380 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3381 			goto fail;
3382 		}
3383 
3384 		/* Only create one overlay plane. */
3385 		break;
3386 	}
3387 
3388 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3389 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3390 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3391 			goto fail;
3392 		}
3393 
3394 	/* loops over all connectors on the board */
3395 	for (i = 0; i < link_cnt; i++) {
3396 		struct dc_link *link = NULL;
3397 
3398 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3399 			DRM_ERROR(
3400 				"KMS: Cannot support more than %d display indexes\n",
3401 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3402 			continue;
3403 		}
3404 
3405 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3406 		if (!aconnector)
3407 			goto fail;
3408 
3409 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3410 		if (!aencoder)
3411 			goto fail;
3412 
3413 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3414 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3415 			goto fail;
3416 		}
3417 
3418 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3419 			DRM_ERROR("KMS: Failed to initialize connector\n");
3420 			goto fail;
3421 		}
3422 
3423 		link = dc_get_link_at_index(dm->dc, i);
3424 
3425 		if (!dc_link_detect_sink(link, &new_connection_type))
3426 			DRM_ERROR("KMS: Failed to detect connector\n");
3427 
3428 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3429 			emulated_link_detect(link);
3430 			amdgpu_dm_update_connector_after_detect(aconnector);
3431 
3432 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3433 			amdgpu_dm_update_connector_after_detect(aconnector);
3434 			register_backlight_device(dm, link);
3435 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3436 				amdgpu_dm_set_psr_caps(link);
3437 		}
3438 
3439 
3440 	}
3441 
3442 	/* Software is initialized. Now we can register interrupt handlers. */
3443 	switch (adev->asic_type) {
3444 #if defined(CONFIG_DRM_AMD_DC_SI)
3445 	case CHIP_TAHITI:
3446 	case CHIP_PITCAIRN:
3447 	case CHIP_VERDE:
3448 	case CHIP_OLAND:
3449 		if (dce60_register_irq_handlers(dm->adev)) {
3450 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3451 			goto fail;
3452 		}
3453 		break;
3454 #endif
3455 	case CHIP_BONAIRE:
3456 	case CHIP_HAWAII:
3457 	case CHIP_KAVERI:
3458 	case CHIP_KABINI:
3459 	case CHIP_MULLINS:
3460 	case CHIP_TONGA:
3461 	case CHIP_FIJI:
3462 	case CHIP_CARRIZO:
3463 	case CHIP_STONEY:
3464 	case CHIP_POLARIS11:
3465 	case CHIP_POLARIS10:
3466 	case CHIP_POLARIS12:
3467 	case CHIP_VEGAM:
3468 	case CHIP_VEGA10:
3469 	case CHIP_VEGA12:
3470 	case CHIP_VEGA20:
3471 		if (dce110_register_irq_handlers(dm->adev)) {
3472 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3473 			goto fail;
3474 		}
3475 		break;
3476 #if defined(CONFIG_DRM_AMD_DC_DCN)
3477 	case CHIP_RAVEN:
3478 	case CHIP_NAVI12:
3479 	case CHIP_NAVI10:
3480 	case CHIP_NAVI14:
3481 	case CHIP_RENOIR:
3482 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3483 	case CHIP_SIENNA_CICHLID:
3484 	case CHIP_NAVY_FLOUNDER:
3485 #endif
3486 		if (dcn10_register_irq_handlers(dm->adev)) {
3487 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3488 			goto fail;
3489 		}
3490 		break;
3491 #endif
3492 	default:
3493 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3494 		goto fail;
3495 	}
3496 
3497 	return 0;
3498 fail:
3499 	kfree(aencoder);
3500 	kfree(aconnector);
3501 
3502 	return -EINVAL;
3503 }
3504 
amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager * dm)3505 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3506 {
3507 	drm_mode_config_cleanup(dm->ddev);
3508 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3509 	return;
3510 }
3511 
3512 /******************************************************************************
3513  * amdgpu_display_funcs functions
3514  *****************************************************************************/
3515 
3516 /*
3517  * dm_bandwidth_update - program display watermarks
3518  *
3519  * @adev: amdgpu_device pointer
3520  *
3521  * Calculate and program the display watermarks and line buffer allocation.
3522  */
dm_bandwidth_update(struct amdgpu_device * adev)3523 static void dm_bandwidth_update(struct amdgpu_device *adev)
3524 {
3525 	/* TODO: implement later */
3526 }
3527 
3528 static const struct amdgpu_display_funcs dm_display_funcs = {
3529 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3530 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3531 	.backlight_set_level = NULL, /* never called for DC */
3532 	.backlight_get_level = NULL, /* never called for DC */
3533 	.hpd_sense = NULL,/* called unconditionally */
3534 	.hpd_set_polarity = NULL, /* called unconditionally */
3535 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3536 	.page_flip_get_scanoutpos =
3537 		dm_crtc_get_scanoutpos,/* called unconditionally */
3538 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3539 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3540 };
3541 
3542 #if defined(CONFIG_DEBUG_KERNEL_DC)
3543 
s3_debug_store(struct device * device,struct device_attribute * attr,const char * buf,size_t count)3544 static ssize_t s3_debug_store(struct device *device,
3545 			      struct device_attribute *attr,
3546 			      const char *buf,
3547 			      size_t count)
3548 {
3549 	int ret;
3550 	int s3_state;
3551 	struct drm_device *drm_dev = dev_get_drvdata(device);
3552 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3553 
3554 	ret = kstrtoint(buf, 0, &s3_state);
3555 
3556 	if (ret == 0) {
3557 		if (s3_state) {
3558 			dm_resume(adev);
3559 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3560 		} else
3561 			dm_suspend(adev);
3562 	}
3563 
3564 	return ret == 0 ? count : 0;
3565 }
3566 
3567 DEVICE_ATTR_WO(s3_debug);
3568 
3569 #endif
3570 
dm_early_init(void * handle)3571 static int dm_early_init(void *handle)
3572 {
3573 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3574 
3575 	switch (adev->asic_type) {
3576 #if defined(CONFIG_DRM_AMD_DC_SI)
3577 	case CHIP_TAHITI:
3578 	case CHIP_PITCAIRN:
3579 	case CHIP_VERDE:
3580 		adev->mode_info.num_crtc = 6;
3581 		adev->mode_info.num_hpd = 6;
3582 		adev->mode_info.num_dig = 6;
3583 		break;
3584 	case CHIP_OLAND:
3585 		adev->mode_info.num_crtc = 2;
3586 		adev->mode_info.num_hpd = 2;
3587 		adev->mode_info.num_dig = 2;
3588 		break;
3589 #endif
3590 	case CHIP_BONAIRE:
3591 	case CHIP_HAWAII:
3592 		adev->mode_info.num_crtc = 6;
3593 		adev->mode_info.num_hpd = 6;
3594 		adev->mode_info.num_dig = 6;
3595 		break;
3596 	case CHIP_KAVERI:
3597 		adev->mode_info.num_crtc = 4;
3598 		adev->mode_info.num_hpd = 6;
3599 		adev->mode_info.num_dig = 7;
3600 		break;
3601 	case CHIP_KABINI:
3602 	case CHIP_MULLINS:
3603 		adev->mode_info.num_crtc = 2;
3604 		adev->mode_info.num_hpd = 6;
3605 		adev->mode_info.num_dig = 6;
3606 		break;
3607 	case CHIP_FIJI:
3608 	case CHIP_TONGA:
3609 		adev->mode_info.num_crtc = 6;
3610 		adev->mode_info.num_hpd = 6;
3611 		adev->mode_info.num_dig = 7;
3612 		break;
3613 	case CHIP_CARRIZO:
3614 		adev->mode_info.num_crtc = 3;
3615 		adev->mode_info.num_hpd = 6;
3616 		adev->mode_info.num_dig = 9;
3617 		break;
3618 	case CHIP_STONEY:
3619 		adev->mode_info.num_crtc = 2;
3620 		adev->mode_info.num_hpd = 6;
3621 		adev->mode_info.num_dig = 9;
3622 		break;
3623 	case CHIP_POLARIS11:
3624 	case CHIP_POLARIS12:
3625 		adev->mode_info.num_crtc = 5;
3626 		adev->mode_info.num_hpd = 5;
3627 		adev->mode_info.num_dig = 5;
3628 		break;
3629 	case CHIP_POLARIS10:
3630 	case CHIP_VEGAM:
3631 		adev->mode_info.num_crtc = 6;
3632 		adev->mode_info.num_hpd = 6;
3633 		adev->mode_info.num_dig = 6;
3634 		break;
3635 	case CHIP_VEGA10:
3636 	case CHIP_VEGA12:
3637 	case CHIP_VEGA20:
3638 		adev->mode_info.num_crtc = 6;
3639 		adev->mode_info.num_hpd = 6;
3640 		adev->mode_info.num_dig = 6;
3641 		break;
3642 #if defined(CONFIG_DRM_AMD_DC_DCN)
3643 	case CHIP_RAVEN:
3644 		adev->mode_info.num_crtc = 4;
3645 		adev->mode_info.num_hpd = 4;
3646 		adev->mode_info.num_dig = 4;
3647 		break;
3648 #endif
3649 	case CHIP_NAVI10:
3650 	case CHIP_NAVI12:
3651 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3652 	case CHIP_SIENNA_CICHLID:
3653 	case CHIP_NAVY_FLOUNDER:
3654 #endif
3655 		adev->mode_info.num_crtc = 6;
3656 		adev->mode_info.num_hpd = 6;
3657 		adev->mode_info.num_dig = 6;
3658 		break;
3659 	case CHIP_NAVI14:
3660 		adev->mode_info.num_crtc = 5;
3661 		adev->mode_info.num_hpd = 5;
3662 		adev->mode_info.num_dig = 5;
3663 		break;
3664 	case CHIP_RENOIR:
3665 		adev->mode_info.num_crtc = 4;
3666 		adev->mode_info.num_hpd = 4;
3667 		adev->mode_info.num_dig = 4;
3668 		break;
3669 	default:
3670 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3671 		return -EINVAL;
3672 	}
3673 
3674 	amdgpu_dm_set_irq_funcs(adev);
3675 
3676 	if (adev->mode_info.funcs == NULL)
3677 		adev->mode_info.funcs = &dm_display_funcs;
3678 
3679 	/*
3680 	 * Note: Do NOT change adev->audio_endpt_rreg and
3681 	 * adev->audio_endpt_wreg because they are initialised in
3682 	 * amdgpu_device_init()
3683 	 */
3684 #if defined(CONFIG_DEBUG_KERNEL_DC)
3685 	device_create_file(
3686 		adev_to_drm(adev)->dev,
3687 		&dev_attr_s3_debug);
3688 #endif
3689 
3690 	return 0;
3691 }
3692 
modeset_required(struct drm_crtc_state * crtc_state,struct dc_stream_state * new_stream,struct dc_stream_state * old_stream)3693 static bool modeset_required(struct drm_crtc_state *crtc_state,
3694 			     struct dc_stream_state *new_stream,
3695 			     struct dc_stream_state *old_stream)
3696 {
3697 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3698 }
3699 
modereset_required(struct drm_crtc_state * crtc_state)3700 static bool modereset_required(struct drm_crtc_state *crtc_state)
3701 {
3702 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3703 }
3704 
amdgpu_dm_encoder_destroy(struct drm_encoder * encoder)3705 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3706 {
3707 	drm_encoder_cleanup(encoder);
3708 	kfree(encoder);
3709 }
3710 
3711 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3712 	.destroy = amdgpu_dm_encoder_destroy,
3713 };
3714 
3715 
fill_dc_scaling_info(const struct drm_plane_state * state,struct dc_scaling_info * scaling_info)3716 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3717 				struct dc_scaling_info *scaling_info)
3718 {
3719 	int scale_w, scale_h;
3720 
3721 	memset(scaling_info, 0, sizeof(*scaling_info));
3722 
3723 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3724 	scaling_info->src_rect.x = state->src_x >> 16;
3725 	scaling_info->src_rect.y = state->src_y >> 16;
3726 
3727 	/*
3728 	 * For reasons we don't (yet) fully understand a non-zero
3729 	 * src_y coordinate into an NV12 buffer can cause a
3730 	 * system hang. To avoid hangs (and maybe be overly cautious)
3731 	 * let's reject both non-zero src_x and src_y.
3732 	 *
3733 	 * We currently know of only one use-case to reproduce a
3734 	 * scenario with non-zero src_x and src_y for NV12, which
3735 	 * is to gesture the YouTube Android app into full screen
3736 	 * on ChromeOS.
3737 	 */
3738 	if (state->fb &&
3739 	    state->fb->format->format == DRM_FORMAT_NV12 &&
3740 	    (scaling_info->src_rect.x != 0 ||
3741 	     scaling_info->src_rect.y != 0))
3742 		return -EINVAL;
3743 
3744 	/*
3745 	 * For reasons we don't (yet) fully understand a non-zero
3746 	 * src_y coordinate into an NV12 buffer can cause a
3747 	 * system hang. To avoid hangs (and maybe be overly cautious)
3748 	 * let's reject both non-zero src_x and src_y.
3749 	 *
3750 	 * We currently know of only one use-case to reproduce a
3751 	 * scenario with non-zero src_x and src_y for NV12, which
3752 	 * is to gesture the YouTube Android app into full screen
3753 	 * on ChromeOS.
3754 	 */
3755 	if (state->fb &&
3756 	    state->fb->format->format == DRM_FORMAT_NV12 &&
3757 	    (scaling_info->src_rect.x != 0 ||
3758 	     scaling_info->src_rect.y != 0))
3759 		return -EINVAL;
3760 
3761 	scaling_info->src_rect.width = state->src_w >> 16;
3762 	if (scaling_info->src_rect.width == 0)
3763 		return -EINVAL;
3764 
3765 	scaling_info->src_rect.height = state->src_h >> 16;
3766 	if (scaling_info->src_rect.height == 0)
3767 		return -EINVAL;
3768 
3769 	scaling_info->dst_rect.x = state->crtc_x;
3770 	scaling_info->dst_rect.y = state->crtc_y;
3771 
3772 	if (state->crtc_w == 0)
3773 		return -EINVAL;
3774 
3775 	scaling_info->dst_rect.width = state->crtc_w;
3776 
3777 	if (state->crtc_h == 0)
3778 		return -EINVAL;
3779 
3780 	scaling_info->dst_rect.height = state->crtc_h;
3781 
3782 	/* DRM doesn't specify clipping on destination output. */
3783 	scaling_info->clip_rect = scaling_info->dst_rect;
3784 
3785 	/* TODO: Validate scaling per-format with DC plane caps */
3786 	scale_w = scaling_info->dst_rect.width * 1000 /
3787 		  scaling_info->src_rect.width;
3788 
3789 	if (scale_w < 250 || scale_w > 16000)
3790 		return -EINVAL;
3791 
3792 	scale_h = scaling_info->dst_rect.height * 1000 /
3793 		  scaling_info->src_rect.height;
3794 
3795 	if (scale_h < 250 || scale_h > 16000)
3796 		return -EINVAL;
3797 
3798 	/*
3799 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3800 	 * assume reasonable defaults based on the format.
3801 	 */
3802 
3803 	return 0;
3804 }
3805 
get_fb_info(const struct amdgpu_framebuffer * amdgpu_fb,uint64_t * tiling_flags,bool * tmz_surface)3806 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3807 		       uint64_t *tiling_flags, bool *tmz_surface)
3808 {
3809 	struct amdgpu_bo *rbo;
3810 	int r;
3811 
3812 	if (!amdgpu_fb) {
3813 		*tiling_flags = 0;
3814 		*tmz_surface = false;
3815 		return 0;
3816 	}
3817 
3818 	rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3819 	r = amdgpu_bo_reserve(rbo, false);
3820 
3821 	if (unlikely(r)) {
3822 		/* Don't show error message when returning -ERESTARTSYS */
3823 		if (r != -ERESTARTSYS)
3824 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
3825 		return r;
3826 	}
3827 
3828 	if (tiling_flags)
3829 		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3830 
3831 	if (tmz_surface)
3832 		*tmz_surface = amdgpu_bo_encrypted(rbo);
3833 
3834 	amdgpu_bo_unreserve(rbo);
3835 
3836 	return r;
3837 }
3838 
get_dcc_address(uint64_t address,uint64_t tiling_flags)3839 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3840 {
3841 	uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3842 
3843 	return offset ? (address + offset * 256) : 0;
3844 }
3845 
3846 static int
fill_plane_dcc_attributes(struct amdgpu_device * adev,const struct amdgpu_framebuffer * afb,const enum surface_pixel_format format,const enum dc_rotation_angle rotation,const struct plane_size * plane_size,const union dc_tiling_info * tiling_info,const uint64_t info,struct dc_plane_dcc_param * dcc,struct dc_plane_address * address,bool force_disable_dcc)3847 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3848 			  const struct amdgpu_framebuffer *afb,
3849 			  const enum surface_pixel_format format,
3850 			  const enum dc_rotation_angle rotation,
3851 			  const struct plane_size *plane_size,
3852 			  const union dc_tiling_info *tiling_info,
3853 			  const uint64_t info,
3854 			  struct dc_plane_dcc_param *dcc,
3855 			  struct dc_plane_address *address,
3856 			  bool force_disable_dcc)
3857 {
3858 	struct dc *dc = adev->dm.dc;
3859 	struct dc_dcc_surface_param input;
3860 	struct dc_surface_dcc_cap output;
3861 	uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3862 	uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3863 	uint64_t dcc_address;
3864 
3865 	memset(&input, 0, sizeof(input));
3866 	memset(&output, 0, sizeof(output));
3867 
3868 	if (force_disable_dcc)
3869 		return 0;
3870 
3871 	if (!offset)
3872 		return 0;
3873 
3874 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3875 		return 0;
3876 
3877 	if (!dc->cap_funcs.get_dcc_compression_cap)
3878 		return -EINVAL;
3879 
3880 	input.format = format;
3881 	input.surface_size.width = plane_size->surface_size.width;
3882 	input.surface_size.height = plane_size->surface_size.height;
3883 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3884 
3885 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3886 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3887 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3888 		input.scan = SCAN_DIRECTION_VERTICAL;
3889 
3890 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3891 		return -EINVAL;
3892 
3893 	if (!output.capable)
3894 		return -EINVAL;
3895 
3896 	if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3897 		return -EINVAL;
3898 
3899 	dcc->enable = 1;
3900 	dcc->meta_pitch =
3901 		AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3902 	dcc->independent_64b_blks = i64b;
3903 
3904 	dcc_address = get_dcc_address(afb->address, info);
3905 	address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3906 	address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3907 
3908 	return 0;
3909 }
3910 
3911 static int
fill_plane_buffer_attributes(struct amdgpu_device * adev,const struct amdgpu_framebuffer * afb,const enum surface_pixel_format format,const enum dc_rotation_angle rotation,const uint64_t tiling_flags,union dc_tiling_info * tiling_info,struct plane_size * plane_size,struct dc_plane_dcc_param * dcc,struct dc_plane_address * address,bool tmz_surface,bool force_disable_dcc)3912 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3913 			     const struct amdgpu_framebuffer *afb,
3914 			     const enum surface_pixel_format format,
3915 			     const enum dc_rotation_angle rotation,
3916 			     const uint64_t tiling_flags,
3917 			     union dc_tiling_info *tiling_info,
3918 			     struct plane_size *plane_size,
3919 			     struct dc_plane_dcc_param *dcc,
3920 			     struct dc_plane_address *address,
3921 			     bool tmz_surface,
3922 			     bool force_disable_dcc)
3923 {
3924 	const struct drm_framebuffer *fb = &afb->base;
3925 	int ret;
3926 
3927 	memset(tiling_info, 0, sizeof(*tiling_info));
3928 	memset(plane_size, 0, sizeof(*plane_size));
3929 	memset(dcc, 0, sizeof(*dcc));
3930 	memset(address, 0, sizeof(*address));
3931 
3932 	address->tmz_surface = tmz_surface;
3933 
3934 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3935 		plane_size->surface_size.x = 0;
3936 		plane_size->surface_size.y = 0;
3937 		plane_size->surface_size.width = fb->width;
3938 		plane_size->surface_size.height = fb->height;
3939 		plane_size->surface_pitch =
3940 			fb->pitches[0] / fb->format->cpp[0];
3941 
3942 		address->type = PLN_ADDR_TYPE_GRAPHICS;
3943 		address->grph.addr.low_part = lower_32_bits(afb->address);
3944 		address->grph.addr.high_part = upper_32_bits(afb->address);
3945 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3946 		uint64_t chroma_addr = afb->address + fb->offsets[1];
3947 
3948 		plane_size->surface_size.x = 0;
3949 		plane_size->surface_size.y = 0;
3950 		plane_size->surface_size.width = fb->width;
3951 		plane_size->surface_size.height = fb->height;
3952 		plane_size->surface_pitch =
3953 			fb->pitches[0] / fb->format->cpp[0];
3954 
3955 		plane_size->chroma_size.x = 0;
3956 		plane_size->chroma_size.y = 0;
3957 		/* TODO: set these based on surface format */
3958 		plane_size->chroma_size.width = fb->width / 2;
3959 		plane_size->chroma_size.height = fb->height / 2;
3960 
3961 		plane_size->chroma_pitch =
3962 			fb->pitches[1] / fb->format->cpp[1];
3963 
3964 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3965 		address->video_progressive.luma_addr.low_part =
3966 			lower_32_bits(afb->address);
3967 		address->video_progressive.luma_addr.high_part =
3968 			upper_32_bits(afb->address);
3969 		address->video_progressive.chroma_addr.low_part =
3970 			lower_32_bits(chroma_addr);
3971 		address->video_progressive.chroma_addr.high_part =
3972 			upper_32_bits(chroma_addr);
3973 	}
3974 
3975 	/* Fill GFX8 params */
3976 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3977 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3978 
3979 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3980 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3981 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3982 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3983 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3984 
3985 		/* XXX fix me for VI */
3986 		tiling_info->gfx8.num_banks = num_banks;
3987 		tiling_info->gfx8.array_mode =
3988 				DC_ARRAY_2D_TILED_THIN1;
3989 		tiling_info->gfx8.tile_split = tile_split;
3990 		tiling_info->gfx8.bank_width = bankw;
3991 		tiling_info->gfx8.bank_height = bankh;
3992 		tiling_info->gfx8.tile_aspect = mtaspect;
3993 		tiling_info->gfx8.tile_mode =
3994 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3995 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3996 			== DC_ARRAY_1D_TILED_THIN1) {
3997 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3998 	}
3999 
4000 	tiling_info->gfx8.pipe_config =
4001 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4002 
4003 	if (adev->asic_type == CHIP_VEGA10 ||
4004 	    adev->asic_type == CHIP_VEGA12 ||
4005 	    adev->asic_type == CHIP_VEGA20 ||
4006 	    adev->asic_type == CHIP_NAVI10 ||
4007 	    adev->asic_type == CHIP_NAVI14 ||
4008 	    adev->asic_type == CHIP_NAVI12 ||
4009 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
4010 		adev->asic_type == CHIP_SIENNA_CICHLID ||
4011 		adev->asic_type == CHIP_NAVY_FLOUNDER ||
4012 #endif
4013 	    adev->asic_type == CHIP_RENOIR ||
4014 	    adev->asic_type == CHIP_RAVEN) {
4015 		/* Fill GFX9 params */
4016 		tiling_info->gfx9.num_pipes =
4017 			adev->gfx.config.gb_addr_config_fields.num_pipes;
4018 		tiling_info->gfx9.num_banks =
4019 			adev->gfx.config.gb_addr_config_fields.num_banks;
4020 		tiling_info->gfx9.pipe_interleave =
4021 			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4022 		tiling_info->gfx9.num_shader_engines =
4023 			adev->gfx.config.gb_addr_config_fields.num_se;
4024 		tiling_info->gfx9.max_compressed_frags =
4025 			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4026 		tiling_info->gfx9.num_rb_per_se =
4027 			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4028 		tiling_info->gfx9.swizzle =
4029 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
4030 		tiling_info->gfx9.shaderEnable = 1;
4031 
4032 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
4033 		if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4034 		    adev->asic_type == CHIP_NAVY_FLOUNDER)
4035 			tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4036 #endif
4037 		ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
4038 						plane_size, tiling_info,
4039 						tiling_flags, dcc, address,
4040 						force_disable_dcc);
4041 		if (ret)
4042 			return ret;
4043 	}
4044 
4045 	return 0;
4046 }
4047 
4048 static void
fill_blending_from_plane_state(const struct drm_plane_state * plane_state,bool * per_pixel_alpha,bool * global_alpha,int * global_alpha_value)4049 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4050 			       bool *per_pixel_alpha, bool *global_alpha,
4051 			       int *global_alpha_value)
4052 {
4053 	*per_pixel_alpha = false;
4054 	*global_alpha = false;
4055 	*global_alpha_value = 0xff;
4056 
4057 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4058 		return;
4059 
4060 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4061 		static const uint32_t alpha_formats[] = {
4062 			DRM_FORMAT_ARGB8888,
4063 			DRM_FORMAT_RGBA8888,
4064 			DRM_FORMAT_ABGR8888,
4065 		};
4066 		uint32_t format = plane_state->fb->format->format;
4067 		unsigned int i;
4068 
4069 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4070 			if (format == alpha_formats[i]) {
4071 				*per_pixel_alpha = true;
4072 				break;
4073 			}
4074 		}
4075 	}
4076 
4077 	if (plane_state->alpha < 0xffff) {
4078 		*global_alpha = true;
4079 		*global_alpha_value = plane_state->alpha >> 8;
4080 	}
4081 }
4082 
4083 static int
fill_plane_color_attributes(const struct drm_plane_state * plane_state,const enum surface_pixel_format format,enum dc_color_space * color_space)4084 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4085 			    const enum surface_pixel_format format,
4086 			    enum dc_color_space *color_space)
4087 {
4088 	bool full_range;
4089 
4090 	*color_space = COLOR_SPACE_SRGB;
4091 
4092 	/* DRM color properties only affect non-RGB formats. */
4093 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4094 		return 0;
4095 
4096 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4097 
4098 	switch (plane_state->color_encoding) {
4099 	case DRM_COLOR_YCBCR_BT601:
4100 		if (full_range)
4101 			*color_space = COLOR_SPACE_YCBCR601;
4102 		else
4103 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4104 		break;
4105 
4106 	case DRM_COLOR_YCBCR_BT709:
4107 		if (full_range)
4108 			*color_space = COLOR_SPACE_YCBCR709;
4109 		else
4110 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4111 		break;
4112 
4113 	case DRM_COLOR_YCBCR_BT2020:
4114 		if (full_range)
4115 			*color_space = COLOR_SPACE_2020_YCBCR;
4116 		else
4117 			return -EINVAL;
4118 		break;
4119 
4120 	default:
4121 		return -EINVAL;
4122 	}
4123 
4124 	return 0;
4125 }
4126 
4127 static int
fill_dc_plane_info_and_addr(struct amdgpu_device * adev,const struct drm_plane_state * plane_state,const uint64_t tiling_flags,struct dc_plane_info * plane_info,struct dc_plane_address * address,bool tmz_surface,bool force_disable_dcc)4128 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4129 			    const struct drm_plane_state *plane_state,
4130 			    const uint64_t tiling_flags,
4131 			    struct dc_plane_info *plane_info,
4132 			    struct dc_plane_address *address,
4133 			    bool tmz_surface,
4134 			    bool force_disable_dcc)
4135 {
4136 	const struct drm_framebuffer *fb = plane_state->fb;
4137 	const struct amdgpu_framebuffer *afb =
4138 		to_amdgpu_framebuffer(plane_state->fb);
4139 	struct drm_format_name_buf format_name;
4140 	int ret;
4141 
4142 	memset(plane_info, 0, sizeof(*plane_info));
4143 
4144 	switch (fb->format->format) {
4145 	case DRM_FORMAT_C8:
4146 		plane_info->format =
4147 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4148 		break;
4149 	case DRM_FORMAT_RGB565:
4150 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4151 		break;
4152 	case DRM_FORMAT_XRGB8888:
4153 	case DRM_FORMAT_ARGB8888:
4154 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4155 		break;
4156 	case DRM_FORMAT_XRGB2101010:
4157 	case DRM_FORMAT_ARGB2101010:
4158 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4159 		break;
4160 	case DRM_FORMAT_XBGR2101010:
4161 	case DRM_FORMAT_ABGR2101010:
4162 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4163 		break;
4164 	case DRM_FORMAT_XBGR8888:
4165 	case DRM_FORMAT_ABGR8888:
4166 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4167 		break;
4168 	case DRM_FORMAT_NV21:
4169 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4170 		break;
4171 	case DRM_FORMAT_NV12:
4172 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4173 		break;
4174 	case DRM_FORMAT_P010:
4175 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4176 		break;
4177 	case DRM_FORMAT_XRGB16161616F:
4178 	case DRM_FORMAT_ARGB16161616F:
4179 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4180 		break;
4181 	case DRM_FORMAT_XBGR16161616F:
4182 	case DRM_FORMAT_ABGR16161616F:
4183 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4184 		break;
4185 	default:
4186 		DRM_ERROR(
4187 			"Unsupported screen format %s\n",
4188 			drm_get_format_name(fb->format->format, &format_name));
4189 		return -EINVAL;
4190 	}
4191 
4192 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4193 	case DRM_MODE_ROTATE_0:
4194 		plane_info->rotation = ROTATION_ANGLE_0;
4195 		break;
4196 	case DRM_MODE_ROTATE_90:
4197 		plane_info->rotation = ROTATION_ANGLE_90;
4198 		break;
4199 	case DRM_MODE_ROTATE_180:
4200 		plane_info->rotation = ROTATION_ANGLE_180;
4201 		break;
4202 	case DRM_MODE_ROTATE_270:
4203 		plane_info->rotation = ROTATION_ANGLE_270;
4204 		break;
4205 	default:
4206 		plane_info->rotation = ROTATION_ANGLE_0;
4207 		break;
4208 	}
4209 
4210 	plane_info->visible = true;
4211 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4212 
4213 	plane_info->layer_index = 0;
4214 
4215 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
4216 					  &plane_info->color_space);
4217 	if (ret)
4218 		return ret;
4219 
4220 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4221 					   plane_info->rotation, tiling_flags,
4222 					   &plane_info->tiling_info,
4223 					   &plane_info->plane_size,
4224 					   &plane_info->dcc, address, tmz_surface,
4225 					   force_disable_dcc);
4226 	if (ret)
4227 		return ret;
4228 
4229 	fill_blending_from_plane_state(
4230 		plane_state, &plane_info->per_pixel_alpha,
4231 		&plane_info->global_alpha, &plane_info->global_alpha_value);
4232 
4233 	return 0;
4234 }
4235 
fill_dc_plane_attributes(struct amdgpu_device * adev,struct dc_plane_state * dc_plane_state,struct drm_plane_state * plane_state,struct drm_crtc_state * crtc_state)4236 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4237 				    struct dc_plane_state *dc_plane_state,
4238 				    struct drm_plane_state *plane_state,
4239 				    struct drm_crtc_state *crtc_state)
4240 {
4241 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4242 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
4243 	struct dc_scaling_info scaling_info;
4244 	struct dc_plane_info plane_info;
4245 	int ret;
4246 	bool force_disable_dcc = false;
4247 
4248 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
4249 	if (ret)
4250 		return ret;
4251 
4252 	dc_plane_state->src_rect = scaling_info.src_rect;
4253 	dc_plane_state->dst_rect = scaling_info.dst_rect;
4254 	dc_plane_state->clip_rect = scaling_info.clip_rect;
4255 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4256 
4257 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4258 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
4259 					  dm_plane_state->tiling_flags,
4260 					  &plane_info,
4261 					  &dc_plane_state->address,
4262 					  dm_plane_state->tmz_surface,
4263 					  force_disable_dcc);
4264 	if (ret)
4265 		return ret;
4266 
4267 	dc_plane_state->format = plane_info.format;
4268 	dc_plane_state->color_space = plane_info.color_space;
4269 	dc_plane_state->format = plane_info.format;
4270 	dc_plane_state->plane_size = plane_info.plane_size;
4271 	dc_plane_state->rotation = plane_info.rotation;
4272 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4273 	dc_plane_state->stereo_format = plane_info.stereo_format;
4274 	dc_plane_state->tiling_info = plane_info.tiling_info;
4275 	dc_plane_state->visible = plane_info.visible;
4276 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4277 	dc_plane_state->global_alpha = plane_info.global_alpha;
4278 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4279 	dc_plane_state->dcc = plane_info.dcc;
4280 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4281 
4282 	/*
4283 	 * Always set input transfer function, since plane state is refreshed
4284 	 * every time.
4285 	 */
4286 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4287 	if (ret)
4288 		return ret;
4289 
4290 	return 0;
4291 }
4292 
update_stream_scaling_settings(const struct drm_display_mode * mode,const struct dm_connector_state * dm_state,struct dc_stream_state * stream)4293 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4294 					   const struct dm_connector_state *dm_state,
4295 					   struct dc_stream_state *stream)
4296 {
4297 	enum amdgpu_rmx_type rmx_type;
4298 
4299 	struct rect src = { 0 }; /* viewport in composition space*/
4300 	struct rect dst = { 0 }; /* stream addressable area */
4301 
4302 	/* no mode. nothing to be done */
4303 	if (!mode)
4304 		return;
4305 
4306 	/* Full screen scaling by default */
4307 	src.width = mode->hdisplay;
4308 	src.height = mode->vdisplay;
4309 	dst.width = stream->timing.h_addressable;
4310 	dst.height = stream->timing.v_addressable;
4311 
4312 	if (dm_state) {
4313 		rmx_type = dm_state->scaling;
4314 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4315 			if (src.width * dst.height <
4316 					src.height * dst.width) {
4317 				/* height needs less upscaling/more downscaling */
4318 				dst.width = src.width *
4319 						dst.height / src.height;
4320 			} else {
4321 				/* width needs less upscaling/more downscaling */
4322 				dst.height = src.height *
4323 						dst.width / src.width;
4324 			}
4325 		} else if (rmx_type == RMX_CENTER) {
4326 			dst = src;
4327 		}
4328 
4329 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4330 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4331 
4332 		if (dm_state->underscan_enable) {
4333 			dst.x += dm_state->underscan_hborder / 2;
4334 			dst.y += dm_state->underscan_vborder / 2;
4335 			dst.width -= dm_state->underscan_hborder;
4336 			dst.height -= dm_state->underscan_vborder;
4337 		}
4338 	}
4339 
4340 	stream->src = src;
4341 	stream->dst = dst;
4342 
4343 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4344 			dst.x, dst.y, dst.width, dst.height);
4345 
4346 }
4347 
4348 static enum dc_color_depth
convert_color_depth_from_display_info(const struct drm_connector * connector,bool is_y420,int requested_bpc)4349 convert_color_depth_from_display_info(const struct drm_connector *connector,
4350 				      bool is_y420, int requested_bpc)
4351 {
4352 	uint8_t bpc;
4353 
4354 	if (is_y420) {
4355 		bpc = 8;
4356 
4357 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4358 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4359 			bpc = 16;
4360 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4361 			bpc = 12;
4362 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4363 			bpc = 10;
4364 	} else {
4365 		bpc = (uint8_t)connector->display_info.bpc;
4366 		/* Assume 8 bpc by default if no bpc is specified. */
4367 		bpc = bpc ? bpc : 8;
4368 	}
4369 
4370 	if (requested_bpc > 0) {
4371 		/*
4372 		 * Cap display bpc based on the user requested value.
4373 		 *
4374 		 * The value for state->max_bpc may not correctly updated
4375 		 * depending on when the connector gets added to the state
4376 		 * or if this was called outside of atomic check, so it
4377 		 * can't be used directly.
4378 		 */
4379 		bpc = min_t(u8, bpc, requested_bpc);
4380 
4381 		/* Round down to the nearest even number. */
4382 		bpc = bpc - (bpc & 1);
4383 	}
4384 
4385 	switch (bpc) {
4386 	case 0:
4387 		/*
4388 		 * Temporary Work around, DRM doesn't parse color depth for
4389 		 * EDID revision before 1.4
4390 		 * TODO: Fix edid parsing
4391 		 */
4392 		return COLOR_DEPTH_888;
4393 	case 6:
4394 		return COLOR_DEPTH_666;
4395 	case 8:
4396 		return COLOR_DEPTH_888;
4397 	case 10:
4398 		return COLOR_DEPTH_101010;
4399 	case 12:
4400 		return COLOR_DEPTH_121212;
4401 	case 14:
4402 		return COLOR_DEPTH_141414;
4403 	case 16:
4404 		return COLOR_DEPTH_161616;
4405 	default:
4406 		return COLOR_DEPTH_UNDEFINED;
4407 	}
4408 }
4409 
4410 static enum dc_aspect_ratio
get_aspect_ratio(const struct drm_display_mode * mode_in)4411 get_aspect_ratio(const struct drm_display_mode *mode_in)
4412 {
4413 	/* 1-1 mapping, since both enums follow the HDMI spec. */
4414 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4415 }
4416 
4417 static enum dc_color_space
get_output_color_space(const struct dc_crtc_timing * dc_crtc_timing)4418 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4419 {
4420 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
4421 
4422 	switch (dc_crtc_timing->pixel_encoding)	{
4423 	case PIXEL_ENCODING_YCBCR422:
4424 	case PIXEL_ENCODING_YCBCR444:
4425 	case PIXEL_ENCODING_YCBCR420:
4426 	{
4427 		/*
4428 		 * 27030khz is the separation point between HDTV and SDTV
4429 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
4430 		 * respectively
4431 		 */
4432 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
4433 			if (dc_crtc_timing->flags.Y_ONLY)
4434 				color_space =
4435 					COLOR_SPACE_YCBCR709_LIMITED;
4436 			else
4437 				color_space = COLOR_SPACE_YCBCR709;
4438 		} else {
4439 			if (dc_crtc_timing->flags.Y_ONLY)
4440 				color_space =
4441 					COLOR_SPACE_YCBCR601_LIMITED;
4442 			else
4443 				color_space = COLOR_SPACE_YCBCR601;
4444 		}
4445 
4446 	}
4447 	break;
4448 	case PIXEL_ENCODING_RGB:
4449 		color_space = COLOR_SPACE_SRGB;
4450 		break;
4451 
4452 	default:
4453 		WARN_ON(1);
4454 		break;
4455 	}
4456 
4457 	return color_space;
4458 }
4459 
adjust_colour_depth_from_display_info(struct dc_crtc_timing * timing_out,const struct drm_display_info * info)4460 static bool adjust_colour_depth_from_display_info(
4461 	struct dc_crtc_timing *timing_out,
4462 	const struct drm_display_info *info)
4463 {
4464 	enum dc_color_depth depth = timing_out->display_color_depth;
4465 	int normalized_clk;
4466 	do {
4467 		normalized_clk = timing_out->pix_clk_100hz / 10;
4468 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4469 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4470 			normalized_clk /= 2;
4471 		/* Adjusting pix clock following on HDMI spec based on colour depth */
4472 		switch (depth) {
4473 		case COLOR_DEPTH_888:
4474 			break;
4475 		case COLOR_DEPTH_101010:
4476 			normalized_clk = (normalized_clk * 30) / 24;
4477 			break;
4478 		case COLOR_DEPTH_121212:
4479 			normalized_clk = (normalized_clk * 36) / 24;
4480 			break;
4481 		case COLOR_DEPTH_161616:
4482 			normalized_clk = (normalized_clk * 48) / 24;
4483 			break;
4484 		default:
4485 			/* The above depths are the only ones valid for HDMI. */
4486 			return false;
4487 		}
4488 		if (normalized_clk <= info->max_tmds_clock) {
4489 			timing_out->display_color_depth = depth;
4490 			return true;
4491 		}
4492 	} while (--depth > COLOR_DEPTH_666);
4493 	return false;
4494 }
4495 
fill_stream_properties_from_drm_display_mode(struct dc_stream_state * stream,const struct drm_display_mode * mode_in,const struct drm_connector * connector,const struct drm_connector_state * connector_state,const struct dc_stream_state * old_stream,int requested_bpc)4496 static void fill_stream_properties_from_drm_display_mode(
4497 	struct dc_stream_state *stream,
4498 	const struct drm_display_mode *mode_in,
4499 	const struct drm_connector *connector,
4500 	const struct drm_connector_state *connector_state,
4501 	const struct dc_stream_state *old_stream,
4502 	int requested_bpc)
4503 {
4504 	struct dc_crtc_timing *timing_out = &stream->timing;
4505 	const struct drm_display_info *info = &connector->display_info;
4506 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4507 	struct hdmi_vendor_infoframe hv_frame;
4508 	struct hdmi_avi_infoframe avi_frame;
4509 
4510 	memset(&hv_frame, 0, sizeof(hv_frame));
4511 	memset(&avi_frame, 0, sizeof(avi_frame));
4512 
4513 	timing_out->h_border_left = 0;
4514 	timing_out->h_border_right = 0;
4515 	timing_out->v_border_top = 0;
4516 	timing_out->v_border_bottom = 0;
4517 	/* TODO: un-hardcode */
4518 	if (drm_mode_is_420_only(info, mode_in)
4519 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4520 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4521 	else if (drm_mode_is_420_also(info, mode_in)
4522 			&& aconnector->force_yuv420_output)
4523 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4524 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4525 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4526 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4527 	else
4528 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4529 
4530 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4531 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4532 		connector,
4533 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4534 		requested_bpc);
4535 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4536 	timing_out->hdmi_vic = 0;
4537 
4538 	if(old_stream) {
4539 		timing_out->vic = old_stream->timing.vic;
4540 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4541 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4542 	} else {
4543 		timing_out->vic = drm_match_cea_mode(mode_in);
4544 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4545 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4546 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4547 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4548 	}
4549 
4550 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4551 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4552 		timing_out->vic = avi_frame.video_code;
4553 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4554 		timing_out->hdmi_vic = hv_frame.vic;
4555 	}
4556 
4557 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4558 	timing_out->h_total = mode_in->crtc_htotal;
4559 	timing_out->h_sync_width =
4560 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4561 	timing_out->h_front_porch =
4562 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4563 	timing_out->v_total = mode_in->crtc_vtotal;
4564 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4565 	timing_out->v_front_porch =
4566 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4567 	timing_out->v_sync_width =
4568 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4569 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4570 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4571 
4572 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4573 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4574 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4575 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4576 		    drm_mode_is_420_also(info, mode_in) &&
4577 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4578 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4579 			adjust_colour_depth_from_display_info(timing_out, info);
4580 		}
4581 	}
4582 
4583 	stream->output_color_space = get_output_color_space(timing_out);
4584 }
4585 
fill_audio_info(struct audio_info * audio_info,const struct drm_connector * drm_connector,const struct dc_sink * dc_sink)4586 static void fill_audio_info(struct audio_info *audio_info,
4587 			    const struct drm_connector *drm_connector,
4588 			    const struct dc_sink *dc_sink)
4589 {
4590 	int i = 0;
4591 	int cea_revision = 0;
4592 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4593 
4594 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4595 	audio_info->product_id = edid_caps->product_id;
4596 
4597 	cea_revision = drm_connector->display_info.cea_rev;
4598 
4599 	strscpy(audio_info->display_name,
4600 		edid_caps->display_name,
4601 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4602 
4603 	if (cea_revision >= 3) {
4604 		audio_info->mode_count = edid_caps->audio_mode_count;
4605 
4606 		for (i = 0; i < audio_info->mode_count; ++i) {
4607 			audio_info->modes[i].format_code =
4608 					(enum audio_format_code)
4609 					(edid_caps->audio_modes[i].format_code);
4610 			audio_info->modes[i].channel_count =
4611 					edid_caps->audio_modes[i].channel_count;
4612 			audio_info->modes[i].sample_rates.all =
4613 					edid_caps->audio_modes[i].sample_rate;
4614 			audio_info->modes[i].sample_size =
4615 					edid_caps->audio_modes[i].sample_size;
4616 		}
4617 	}
4618 
4619 	audio_info->flags.all = edid_caps->speaker_flags;
4620 
4621 	/* TODO: We only check for the progressive mode, check for interlace mode too */
4622 	if (drm_connector->latency_present[0]) {
4623 		audio_info->video_latency = drm_connector->video_latency[0];
4624 		audio_info->audio_latency = drm_connector->audio_latency[0];
4625 	}
4626 
4627 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4628 
4629 }
4630 
4631 static void
copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode * src_mode,struct drm_display_mode * dst_mode)4632 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4633 				      struct drm_display_mode *dst_mode)
4634 {
4635 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4636 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4637 	dst_mode->crtc_clock = src_mode->crtc_clock;
4638 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4639 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4640 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4641 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4642 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
4643 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
4644 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4645 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4646 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4647 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4648 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4649 }
4650 
4651 static void
decide_crtc_timing_for_drm_display_mode(struct drm_display_mode * drm_mode,const struct drm_display_mode * native_mode,bool scale_enabled)4652 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4653 					const struct drm_display_mode *native_mode,
4654 					bool scale_enabled)
4655 {
4656 	if (scale_enabled) {
4657 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4658 	} else if (native_mode->clock == drm_mode->clock &&
4659 			native_mode->htotal == drm_mode->htotal &&
4660 			native_mode->vtotal == drm_mode->vtotal) {
4661 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4662 	} else {
4663 		/* no scaling nor amdgpu inserted, no need to patch */
4664 	}
4665 }
4666 
4667 static struct dc_sink *
create_fake_sink(struct amdgpu_dm_connector * aconnector)4668 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4669 {
4670 	struct dc_sink_init_data sink_init_data = { 0 };
4671 	struct dc_sink *sink = NULL;
4672 	sink_init_data.link = aconnector->dc_link;
4673 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4674 
4675 	sink = dc_sink_create(&sink_init_data);
4676 	if (!sink) {
4677 		DRM_ERROR("Failed to create sink!\n");
4678 		return NULL;
4679 	}
4680 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4681 
4682 	return sink;
4683 }
4684 
set_multisync_trigger_params(struct dc_stream_state * stream)4685 static void set_multisync_trigger_params(
4686 		struct dc_stream_state *stream)
4687 {
4688 	if (stream->triggered_crtc_reset.enabled) {
4689 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4690 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4691 	}
4692 }
4693 
set_master_stream(struct dc_stream_state * stream_set[],int stream_count)4694 static void set_master_stream(struct dc_stream_state *stream_set[],
4695 			      int stream_count)
4696 {
4697 	int j, highest_rfr = 0, master_stream = 0;
4698 
4699 	for (j = 0;  j < stream_count; j++) {
4700 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4701 			int refresh_rate = 0;
4702 
4703 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4704 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4705 			if (refresh_rate > highest_rfr) {
4706 				highest_rfr = refresh_rate;
4707 				master_stream = j;
4708 			}
4709 		}
4710 	}
4711 	for (j = 0;  j < stream_count; j++) {
4712 		if (stream_set[j])
4713 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4714 	}
4715 }
4716 
dm_enable_per_frame_crtc_master_sync(struct dc_state * context)4717 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4718 {
4719 	int i = 0;
4720 
4721 	if (context->stream_count < 2)
4722 		return;
4723 	for (i = 0; i < context->stream_count ; i++) {
4724 		if (!context->streams[i])
4725 			continue;
4726 		/*
4727 		 * TODO: add a function to read AMD VSDB bits and set
4728 		 * crtc_sync_master.multi_sync_enabled flag
4729 		 * For now it's set to false
4730 		 */
4731 		set_multisync_trigger_params(context->streams[i]);
4732 	}
4733 	set_master_stream(context->streams, context->stream_count);
4734 }
4735 
4736 static struct dc_stream_state *
create_stream_for_sink(struct amdgpu_dm_connector * aconnector,const struct drm_display_mode * drm_mode,const struct dm_connector_state * dm_state,const struct dc_stream_state * old_stream,int requested_bpc)4737 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4738 		       const struct drm_display_mode *drm_mode,
4739 		       const struct dm_connector_state *dm_state,
4740 		       const struct dc_stream_state *old_stream,
4741 		       int requested_bpc)
4742 {
4743 	struct drm_display_mode *preferred_mode = NULL;
4744 	struct drm_connector *drm_connector;
4745 	const struct drm_connector_state *con_state =
4746 		dm_state ? &dm_state->base : NULL;
4747 	struct dc_stream_state *stream = NULL;
4748 	struct drm_display_mode mode = *drm_mode;
4749 	bool native_mode_found = false;
4750 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4751 	int mode_refresh;
4752 	int preferred_refresh = 0;
4753 #if defined(CONFIG_DRM_AMD_DC_DCN)
4754 	struct dsc_dec_dpcd_caps dsc_caps;
4755 #endif
4756 	uint32_t link_bandwidth_kbps;
4757 
4758 	struct dc_sink *sink = NULL;
4759 	if (aconnector == NULL) {
4760 		DRM_ERROR("aconnector is NULL!\n");
4761 		return stream;
4762 	}
4763 
4764 	drm_connector = &aconnector->base;
4765 
4766 	if (!aconnector->dc_sink) {
4767 		sink = create_fake_sink(aconnector);
4768 		if (!sink)
4769 			return stream;
4770 	} else {
4771 		sink = aconnector->dc_sink;
4772 		dc_sink_retain(sink);
4773 	}
4774 
4775 	stream = dc_create_stream_for_sink(sink);
4776 
4777 	if (stream == NULL) {
4778 		DRM_ERROR("Failed to create stream for sink!\n");
4779 		goto finish;
4780 	}
4781 
4782 	stream->dm_stream_context = aconnector;
4783 
4784 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4785 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4786 
4787 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4788 		/* Search for preferred mode */
4789 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4790 			native_mode_found = true;
4791 			break;
4792 		}
4793 	}
4794 	if (!native_mode_found)
4795 		preferred_mode = list_first_entry_or_null(
4796 				&aconnector->base.modes,
4797 				struct drm_display_mode,
4798 				head);
4799 
4800 	mode_refresh = drm_mode_vrefresh(&mode);
4801 
4802 	if (preferred_mode == NULL) {
4803 		/*
4804 		 * This may not be an error, the use case is when we have no
4805 		 * usermode calls to reset and set mode upon hotplug. In this
4806 		 * case, we call set mode ourselves to restore the previous mode
4807 		 * and the modelist may not be filled in in time.
4808 		 */
4809 		DRM_DEBUG_DRIVER("No preferred mode found\n");
4810 	} else {
4811 		decide_crtc_timing_for_drm_display_mode(
4812 				&mode, preferred_mode,
4813 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
4814 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
4815 	}
4816 
4817 	if (!dm_state)
4818 		drm_mode_set_crtcinfo(&mode, 0);
4819 
4820 	/*
4821 	* If scaling is enabled and refresh rate didn't change
4822 	* we copy the vic and polarities of the old timings
4823 	*/
4824 	if (!scale || mode_refresh != preferred_refresh)
4825 		fill_stream_properties_from_drm_display_mode(stream,
4826 			&mode, &aconnector->base, con_state, NULL, requested_bpc);
4827 	else
4828 		fill_stream_properties_from_drm_display_mode(stream,
4829 			&mode, &aconnector->base, con_state, old_stream, requested_bpc);
4830 
4831 	stream->timing.flags.DSC = 0;
4832 
4833 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4834 #if defined(CONFIG_DRM_AMD_DC_DCN)
4835 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4836 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4837 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4838 				      &dsc_caps);
4839 #endif
4840 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4841 							     dc_link_get_link_cap(aconnector->dc_link));
4842 
4843 #if defined(CONFIG_DRM_AMD_DC_DCN)
4844 		if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
4845 			/* Set DSC policy according to dsc_clock_en */
4846 			dc_dsc_policy_set_enable_dsc_when_not_needed(
4847 				aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
4848 
4849 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4850 						  &dsc_caps,
4851 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4852 						  link_bandwidth_kbps,
4853 						  &stream->timing,
4854 						  &stream->timing.dsc_cfg))
4855 				stream->timing.flags.DSC = 1;
4856 			/* Overwrite the stream flag if DSC is enabled through debugfs */
4857 			if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
4858 				stream->timing.flags.DSC = 1;
4859 
4860 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
4861 				stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
4862 
4863 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
4864 				stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
4865 
4866 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
4867 				stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
4868 		}
4869 #endif
4870 	}
4871 
4872 	update_stream_scaling_settings(&mode, dm_state, stream);
4873 
4874 	fill_audio_info(
4875 		&stream->audio_info,
4876 		drm_connector,
4877 		sink);
4878 
4879 	update_stream_signal(stream, sink);
4880 
4881 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4882 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
4883 
4884 	if (stream->link->psr_settings.psr_feature_enabled) {
4885 		//
4886 		// should decide stream support vsc sdp colorimetry capability
4887 		// before building vsc info packet
4888 		//
4889 		stream->use_vsc_sdp_for_colorimetry = false;
4890 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4891 			stream->use_vsc_sdp_for_colorimetry =
4892 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4893 		} else {
4894 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4895 				stream->use_vsc_sdp_for_colorimetry = true;
4896 		}
4897 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4898 	}
4899 finish:
4900 	dc_sink_release(sink);
4901 
4902 	return stream;
4903 }
4904 
amdgpu_dm_crtc_destroy(struct drm_crtc * crtc)4905 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4906 {
4907 	drm_crtc_cleanup(crtc);
4908 	kfree(crtc);
4909 }
4910 
dm_crtc_destroy_state(struct drm_crtc * crtc,struct drm_crtc_state * state)4911 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4912 				  struct drm_crtc_state *state)
4913 {
4914 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
4915 
4916 	/* TODO Destroy dc_stream objects are stream object is flattened */
4917 	if (cur->stream)
4918 		dc_stream_release(cur->stream);
4919 
4920 
4921 	__drm_atomic_helper_crtc_destroy_state(state);
4922 
4923 
4924 	kfree(state);
4925 }
4926 
dm_crtc_reset_state(struct drm_crtc * crtc)4927 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4928 {
4929 	struct dm_crtc_state *state;
4930 
4931 	if (crtc->state)
4932 		dm_crtc_destroy_state(crtc, crtc->state);
4933 
4934 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4935 	if (WARN_ON(!state))
4936 		return;
4937 
4938 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
4939 }
4940 
4941 static struct drm_crtc_state *
dm_crtc_duplicate_state(struct drm_crtc * crtc)4942 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4943 {
4944 	struct dm_crtc_state *state, *cur;
4945 
4946 	cur = to_dm_crtc_state(crtc->state);
4947 
4948 	if (WARN_ON(!crtc->state))
4949 		return NULL;
4950 
4951 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4952 	if (!state)
4953 		return NULL;
4954 
4955 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4956 
4957 	if (cur->stream) {
4958 		state->stream = cur->stream;
4959 		dc_stream_retain(state->stream);
4960 	}
4961 
4962 	state->active_planes = cur->active_planes;
4963 	state->vrr_infopacket = cur->vrr_infopacket;
4964 	state->abm_level = cur->abm_level;
4965 	state->vrr_supported = cur->vrr_supported;
4966 	state->freesync_config = cur->freesync_config;
4967 	state->crc_src = cur->crc_src;
4968 	state->cm_has_degamma = cur->cm_has_degamma;
4969 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4970 
4971 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
4972 
4973 	return &state->base;
4974 }
4975 
dm_set_vupdate_irq(struct drm_crtc * crtc,bool enable)4976 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4977 {
4978 	enum dc_irq_source irq_source;
4979 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4980 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4981 	int rc;
4982 
4983 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4984 
4985 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4986 
4987 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4988 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
4989 	return rc;
4990 }
4991 
dm_set_vblank(struct drm_crtc * crtc,bool enable)4992 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4993 {
4994 	enum dc_irq_source irq_source;
4995 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4996 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4997 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4998 	int rc = 0;
4999 
5000 	if (enable) {
5001 		/* vblank irq on -> Only need vupdate irq in vrr mode */
5002 		if (amdgpu_dm_vrr_active(acrtc_state))
5003 			rc = dm_set_vupdate_irq(crtc, true);
5004 	} else {
5005 		/* vblank irq off -> vupdate irq off */
5006 		rc = dm_set_vupdate_irq(crtc, false);
5007 	}
5008 
5009 	if (rc)
5010 		return rc;
5011 
5012 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5013 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5014 }
5015 
dm_enable_vblank(struct drm_crtc * crtc)5016 static int dm_enable_vblank(struct drm_crtc *crtc)
5017 {
5018 	return dm_set_vblank(crtc, true);
5019 }
5020 
dm_disable_vblank(struct drm_crtc * crtc)5021 static void dm_disable_vblank(struct drm_crtc *crtc)
5022 {
5023 	dm_set_vblank(crtc, false);
5024 }
5025 
5026 /* Implemented only the options currently availible for the driver */
5027 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5028 	.reset = dm_crtc_reset_state,
5029 	.destroy = amdgpu_dm_crtc_destroy,
5030 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
5031 	.set_config = drm_atomic_helper_set_config,
5032 	.page_flip = drm_atomic_helper_page_flip,
5033 	.atomic_duplicate_state = dm_crtc_duplicate_state,
5034 	.atomic_destroy_state = dm_crtc_destroy_state,
5035 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
5036 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5037 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5038 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
5039 	.enable_vblank = dm_enable_vblank,
5040 	.disable_vblank = dm_disable_vblank,
5041 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5042 };
5043 
5044 static enum drm_connector_status
amdgpu_dm_connector_detect(struct drm_connector * connector,bool force)5045 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5046 {
5047 	bool connected;
5048 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5049 
5050 	/*
5051 	 * Notes:
5052 	 * 1. This interface is NOT called in context of HPD irq.
5053 	 * 2. This interface *is called* in context of user-mode ioctl. Which
5054 	 * makes it a bad place for *any* MST-related activity.
5055 	 */
5056 
5057 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5058 	    !aconnector->fake_enable)
5059 		connected = (aconnector->dc_sink != NULL);
5060 	else
5061 		connected = (aconnector->base.force == DRM_FORCE_ON);
5062 
5063 	update_subconnector_property(aconnector);
5064 
5065 	return (connected ? connector_status_connected :
5066 			connector_status_disconnected);
5067 }
5068 
amdgpu_dm_connector_atomic_set_property(struct drm_connector * connector,struct drm_connector_state * connector_state,struct drm_property * property,uint64_t val)5069 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5070 					    struct drm_connector_state *connector_state,
5071 					    struct drm_property *property,
5072 					    uint64_t val)
5073 {
5074 	struct drm_device *dev = connector->dev;
5075 	struct amdgpu_device *adev = drm_to_adev(dev);
5076 	struct dm_connector_state *dm_old_state =
5077 		to_dm_connector_state(connector->state);
5078 	struct dm_connector_state *dm_new_state =
5079 		to_dm_connector_state(connector_state);
5080 
5081 	int ret = -EINVAL;
5082 
5083 	if (property == dev->mode_config.scaling_mode_property) {
5084 		enum amdgpu_rmx_type rmx_type;
5085 
5086 		switch (val) {
5087 		case DRM_MODE_SCALE_CENTER:
5088 			rmx_type = RMX_CENTER;
5089 			break;
5090 		case DRM_MODE_SCALE_ASPECT:
5091 			rmx_type = RMX_ASPECT;
5092 			break;
5093 		case DRM_MODE_SCALE_FULLSCREEN:
5094 			rmx_type = RMX_FULL;
5095 			break;
5096 		case DRM_MODE_SCALE_NONE:
5097 		default:
5098 			rmx_type = RMX_OFF;
5099 			break;
5100 		}
5101 
5102 		if (dm_old_state->scaling == rmx_type)
5103 			return 0;
5104 
5105 		dm_new_state->scaling = rmx_type;
5106 		ret = 0;
5107 	} else if (property == adev->mode_info.underscan_hborder_property) {
5108 		dm_new_state->underscan_hborder = val;
5109 		ret = 0;
5110 	} else if (property == adev->mode_info.underscan_vborder_property) {
5111 		dm_new_state->underscan_vborder = val;
5112 		ret = 0;
5113 	} else if (property == adev->mode_info.underscan_property) {
5114 		dm_new_state->underscan_enable = val;
5115 		ret = 0;
5116 	} else if (property == adev->mode_info.abm_level_property) {
5117 		dm_new_state->abm_level = val;
5118 		ret = 0;
5119 	}
5120 
5121 	return ret;
5122 }
5123 
amdgpu_dm_connector_atomic_get_property(struct drm_connector * connector,const struct drm_connector_state * state,struct drm_property * property,uint64_t * val)5124 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5125 					    const struct drm_connector_state *state,
5126 					    struct drm_property *property,
5127 					    uint64_t *val)
5128 {
5129 	struct drm_device *dev = connector->dev;
5130 	struct amdgpu_device *adev = drm_to_adev(dev);
5131 	struct dm_connector_state *dm_state =
5132 		to_dm_connector_state(state);
5133 	int ret = -EINVAL;
5134 
5135 	if (property == dev->mode_config.scaling_mode_property) {
5136 		switch (dm_state->scaling) {
5137 		case RMX_CENTER:
5138 			*val = DRM_MODE_SCALE_CENTER;
5139 			break;
5140 		case RMX_ASPECT:
5141 			*val = DRM_MODE_SCALE_ASPECT;
5142 			break;
5143 		case RMX_FULL:
5144 			*val = DRM_MODE_SCALE_FULLSCREEN;
5145 			break;
5146 		case RMX_OFF:
5147 		default:
5148 			*val = DRM_MODE_SCALE_NONE;
5149 			break;
5150 		}
5151 		ret = 0;
5152 	} else if (property == adev->mode_info.underscan_hborder_property) {
5153 		*val = dm_state->underscan_hborder;
5154 		ret = 0;
5155 	} else if (property == adev->mode_info.underscan_vborder_property) {
5156 		*val = dm_state->underscan_vborder;
5157 		ret = 0;
5158 	} else if (property == adev->mode_info.underscan_property) {
5159 		*val = dm_state->underscan_enable;
5160 		ret = 0;
5161 	} else if (property == adev->mode_info.abm_level_property) {
5162 		*val = dm_state->abm_level;
5163 		ret = 0;
5164 	}
5165 
5166 	return ret;
5167 }
5168 
amdgpu_dm_connector_unregister(struct drm_connector * connector)5169 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5170 {
5171 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5172 
5173 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5174 }
5175 
amdgpu_dm_connector_destroy(struct drm_connector * connector)5176 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5177 {
5178 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5179 	const struct dc_link *link = aconnector->dc_link;
5180 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5181 	struct amdgpu_display_manager *dm = &adev->dm;
5182 
5183 	/*
5184 	 * Call only if mst_mgr was iniitalized before since it's not done
5185 	 * for all connector types.
5186 	 */
5187 	if (aconnector->mst_mgr.dev)
5188 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5189 
5190 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5191 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5192 
5193 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5194 	    link->type != dc_connection_none &&
5195 	    dm->backlight_dev) {
5196 		backlight_device_unregister(dm->backlight_dev);
5197 		dm->backlight_dev = NULL;
5198 	}
5199 #endif
5200 
5201 	if (aconnector->dc_em_sink)
5202 		dc_sink_release(aconnector->dc_em_sink);
5203 	aconnector->dc_em_sink = NULL;
5204 	if (aconnector->dc_sink)
5205 		dc_sink_release(aconnector->dc_sink);
5206 	aconnector->dc_sink = NULL;
5207 
5208 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5209 	drm_connector_unregister(connector);
5210 	drm_connector_cleanup(connector);
5211 	if (aconnector->i2c) {
5212 		i2c_del_adapter(&aconnector->i2c->base);
5213 		kfree(aconnector->i2c);
5214 	}
5215 	kfree(aconnector->dm_dp_aux.aux.name);
5216 
5217 	kfree(connector);
5218 }
5219 
amdgpu_dm_connector_funcs_reset(struct drm_connector * connector)5220 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5221 {
5222 	struct dm_connector_state *state =
5223 		to_dm_connector_state(connector->state);
5224 
5225 	if (connector->state)
5226 		__drm_atomic_helper_connector_destroy_state(connector->state);
5227 
5228 	kfree(state);
5229 
5230 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5231 
5232 	if (state) {
5233 		state->scaling = RMX_OFF;
5234 		state->underscan_enable = false;
5235 		state->underscan_hborder = 0;
5236 		state->underscan_vborder = 0;
5237 		state->base.max_requested_bpc = 8;
5238 		state->vcpi_slots = 0;
5239 		state->pbn = 0;
5240 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5241 			state->abm_level = amdgpu_dm_abm_level;
5242 
5243 		__drm_atomic_helper_connector_reset(connector, &state->base);
5244 	}
5245 }
5246 
5247 struct drm_connector_state *
amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector * connector)5248 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5249 {
5250 	struct dm_connector_state *state =
5251 		to_dm_connector_state(connector->state);
5252 
5253 	struct dm_connector_state *new_state =
5254 			kmemdup(state, sizeof(*state), GFP_KERNEL);
5255 
5256 	if (!new_state)
5257 		return NULL;
5258 
5259 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5260 
5261 	new_state->freesync_capable = state->freesync_capable;
5262 	new_state->abm_level = state->abm_level;
5263 	new_state->scaling = state->scaling;
5264 	new_state->underscan_enable = state->underscan_enable;
5265 	new_state->underscan_hborder = state->underscan_hborder;
5266 	new_state->underscan_vborder = state->underscan_vborder;
5267 	new_state->vcpi_slots = state->vcpi_slots;
5268 	new_state->pbn = state->pbn;
5269 	return &new_state->base;
5270 }
5271 
5272 static int
amdgpu_dm_connector_late_register(struct drm_connector * connector)5273 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5274 {
5275 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5276 		to_amdgpu_dm_connector(connector);
5277 	int r;
5278 
5279 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5280 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5281 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5282 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5283 		if (r)
5284 			return r;
5285 	}
5286 
5287 #if defined(CONFIG_DEBUG_FS)
5288 	connector_debugfs_init(amdgpu_dm_connector);
5289 #endif
5290 
5291 	return 0;
5292 }
5293 
5294 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5295 	.reset = amdgpu_dm_connector_funcs_reset,
5296 	.detect = amdgpu_dm_connector_detect,
5297 	.fill_modes = drm_helper_probe_single_connector_modes,
5298 	.destroy = amdgpu_dm_connector_destroy,
5299 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5300 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5301 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5302 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5303 	.late_register = amdgpu_dm_connector_late_register,
5304 	.early_unregister = amdgpu_dm_connector_unregister
5305 };
5306 
get_modes(struct drm_connector * connector)5307 static int get_modes(struct drm_connector *connector)
5308 {
5309 	return amdgpu_dm_connector_get_modes(connector);
5310 }
5311 
create_eml_sink(struct amdgpu_dm_connector * aconnector)5312 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5313 {
5314 	struct dc_sink_init_data init_params = {
5315 			.link = aconnector->dc_link,
5316 			.sink_signal = SIGNAL_TYPE_VIRTUAL
5317 	};
5318 	struct edid *edid;
5319 
5320 	if (!aconnector->base.edid_blob_ptr) {
5321 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5322 				aconnector->base.name);
5323 
5324 		aconnector->base.force = DRM_FORCE_OFF;
5325 		aconnector->base.override_edid = false;
5326 		return;
5327 	}
5328 
5329 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5330 
5331 	aconnector->edid = edid;
5332 
5333 	aconnector->dc_em_sink = dc_link_add_remote_sink(
5334 		aconnector->dc_link,
5335 		(uint8_t *)edid,
5336 		(edid->extensions + 1) * EDID_LENGTH,
5337 		&init_params);
5338 
5339 	if (aconnector->base.force == DRM_FORCE_ON) {
5340 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
5341 		aconnector->dc_link->local_sink :
5342 		aconnector->dc_em_sink;
5343 		dc_sink_retain(aconnector->dc_sink);
5344 	}
5345 }
5346 
handle_edid_mgmt(struct amdgpu_dm_connector * aconnector)5347 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5348 {
5349 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5350 
5351 	/*
5352 	 * In case of headless boot with force on for DP managed connector
5353 	 * Those settings have to be != 0 to get initial modeset
5354 	 */
5355 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5356 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5357 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5358 	}
5359 
5360 
5361 	aconnector->base.override_edid = true;
5362 	create_eml_sink(aconnector);
5363 }
5364 
5365 static struct dc_stream_state *
create_validate_stream_for_sink(struct amdgpu_dm_connector * aconnector,const struct drm_display_mode * drm_mode,const struct dm_connector_state * dm_state,const struct dc_stream_state * old_stream)5366 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5367 				const struct drm_display_mode *drm_mode,
5368 				const struct dm_connector_state *dm_state,
5369 				const struct dc_stream_state *old_stream)
5370 {
5371 	struct drm_connector *connector = &aconnector->base;
5372 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5373 	struct dc_stream_state *stream;
5374 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5375 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5376 	enum dc_status dc_result = DC_OK;
5377 
5378 	do {
5379 		stream = create_stream_for_sink(aconnector, drm_mode,
5380 						dm_state, old_stream,
5381 						requested_bpc);
5382 		if (stream == NULL) {
5383 			DRM_ERROR("Failed to create stream for sink!\n");
5384 			break;
5385 		}
5386 
5387 		dc_result = dc_validate_stream(adev->dm.dc, stream);
5388 
5389 		if (dc_result != DC_OK) {
5390 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5391 				      drm_mode->hdisplay,
5392 				      drm_mode->vdisplay,
5393 				      drm_mode->clock,
5394 				      dc_result,
5395 				      dc_status_to_str(dc_result));
5396 
5397 			dc_stream_release(stream);
5398 			stream = NULL;
5399 			requested_bpc -= 2; /* lower bpc to retry validation */
5400 		}
5401 
5402 	} while (stream == NULL && requested_bpc >= 6);
5403 
5404 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
5405 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
5406 
5407 		aconnector->force_yuv420_output = true;
5408 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
5409 						dm_state, old_stream);
5410 		aconnector->force_yuv420_output = false;
5411 	}
5412 
5413 	return stream;
5414 }
5415 
amdgpu_dm_connector_mode_valid(struct drm_connector * connector,struct drm_display_mode * mode)5416 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5417 				   struct drm_display_mode *mode)
5418 {
5419 	int result = MODE_ERROR;
5420 	struct dc_sink *dc_sink;
5421 	/* TODO: Unhardcode stream count */
5422 	struct dc_stream_state *stream;
5423 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5424 
5425 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5426 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
5427 		return result;
5428 
5429 	/*
5430 	 * Only run this the first time mode_valid is called to initilialize
5431 	 * EDID mgmt
5432 	 */
5433 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5434 		!aconnector->dc_em_sink)
5435 		handle_edid_mgmt(aconnector);
5436 
5437 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5438 
5439 	if (dc_sink == NULL) {
5440 		DRM_ERROR("dc_sink is NULL!\n");
5441 		goto fail;
5442 	}
5443 
5444 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5445 	if (stream) {
5446 		dc_stream_release(stream);
5447 		result = MODE_OK;
5448 	}
5449 
5450 fail:
5451 	/* TODO: error handling*/
5452 	return result;
5453 }
5454 
fill_hdr_info_packet(const struct drm_connector_state * state,struct dc_info_packet * out)5455 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5456 				struct dc_info_packet *out)
5457 {
5458 	struct hdmi_drm_infoframe frame;
5459 	unsigned char buf[30]; /* 26 + 4 */
5460 	ssize_t len;
5461 	int ret, i;
5462 
5463 	memset(out, 0, sizeof(*out));
5464 
5465 	if (!state->hdr_output_metadata)
5466 		return 0;
5467 
5468 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5469 	if (ret)
5470 		return ret;
5471 
5472 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5473 	if (len < 0)
5474 		return (int)len;
5475 
5476 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
5477 	if (len != 30)
5478 		return -EINVAL;
5479 
5480 	/* Prepare the infopacket for DC. */
5481 	switch (state->connector->connector_type) {
5482 	case DRM_MODE_CONNECTOR_HDMIA:
5483 		out->hb0 = 0x87; /* type */
5484 		out->hb1 = 0x01; /* version */
5485 		out->hb2 = 0x1A; /* length */
5486 		out->sb[0] = buf[3]; /* checksum */
5487 		i = 1;
5488 		break;
5489 
5490 	case DRM_MODE_CONNECTOR_DisplayPort:
5491 	case DRM_MODE_CONNECTOR_eDP:
5492 		out->hb0 = 0x00; /* sdp id, zero */
5493 		out->hb1 = 0x87; /* type */
5494 		out->hb2 = 0x1D; /* payload len - 1 */
5495 		out->hb3 = (0x13 << 2); /* sdp version */
5496 		out->sb[0] = 0x01; /* version */
5497 		out->sb[1] = 0x1A; /* length */
5498 		i = 2;
5499 		break;
5500 
5501 	default:
5502 		return -EINVAL;
5503 	}
5504 
5505 	memcpy(&out->sb[i], &buf[4], 26);
5506 	out->valid = true;
5507 
5508 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5509 		       sizeof(out->sb), false);
5510 
5511 	return 0;
5512 }
5513 
5514 static bool
is_hdr_metadata_different(const struct drm_connector_state * old_state,const struct drm_connector_state * new_state)5515 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5516 			  const struct drm_connector_state *new_state)
5517 {
5518 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5519 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5520 
5521 	if (old_blob != new_blob) {
5522 		if (old_blob && new_blob &&
5523 		    old_blob->length == new_blob->length)
5524 			return memcmp(old_blob->data, new_blob->data,
5525 				      old_blob->length);
5526 
5527 		return true;
5528 	}
5529 
5530 	return false;
5531 }
5532 
5533 static int
amdgpu_dm_connector_atomic_check(struct drm_connector * conn,struct drm_atomic_state * state)5534 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5535 				 struct drm_atomic_state *state)
5536 {
5537 	struct drm_connector_state *new_con_state =
5538 		drm_atomic_get_new_connector_state(state, conn);
5539 	struct drm_connector_state *old_con_state =
5540 		drm_atomic_get_old_connector_state(state, conn);
5541 	struct drm_crtc *crtc = new_con_state->crtc;
5542 	struct drm_crtc_state *new_crtc_state;
5543 	int ret;
5544 
5545 	if (!crtc)
5546 		return 0;
5547 
5548 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5549 		struct dc_info_packet hdr_infopacket;
5550 
5551 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5552 		if (ret)
5553 			return ret;
5554 
5555 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5556 		if (IS_ERR(new_crtc_state))
5557 			return PTR_ERR(new_crtc_state);
5558 
5559 		/*
5560 		 * DC considers the stream backends changed if the
5561 		 * static metadata changes. Forcing the modeset also
5562 		 * gives a simple way for userspace to switch from
5563 		 * 8bpc to 10bpc when setting the metadata to enter
5564 		 * or exit HDR.
5565 		 *
5566 		 * Changing the static metadata after it's been
5567 		 * set is permissible, however. So only force a
5568 		 * modeset if we're entering or exiting HDR.
5569 		 */
5570 		new_crtc_state->mode_changed =
5571 			!old_con_state->hdr_output_metadata ||
5572 			!new_con_state->hdr_output_metadata;
5573 	}
5574 
5575 	return 0;
5576 }
5577 
5578 static const struct drm_connector_helper_funcs
5579 amdgpu_dm_connector_helper_funcs = {
5580 	/*
5581 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5582 	 * modes will be filtered by drm_mode_validate_size(), and those modes
5583 	 * are missing after user start lightdm. So we need to renew modes list.
5584 	 * in get_modes call back, not just return the modes count
5585 	 */
5586 	.get_modes = get_modes,
5587 	.mode_valid = amdgpu_dm_connector_mode_valid,
5588 	.atomic_check = amdgpu_dm_connector_atomic_check,
5589 };
5590 
dm_crtc_helper_disable(struct drm_crtc * crtc)5591 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5592 {
5593 }
5594 
count_crtc_active_planes(struct drm_crtc_state * new_crtc_state)5595 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5596 {
5597 	struct drm_atomic_state *state = new_crtc_state->state;
5598 	struct drm_plane *plane;
5599 	int num_active = 0;
5600 
5601 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5602 		struct drm_plane_state *new_plane_state;
5603 
5604 		/* Cursor planes are "fake". */
5605 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5606 			continue;
5607 
5608 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5609 
5610 		if (!new_plane_state) {
5611 			/*
5612 			 * The plane is enable on the CRTC and hasn't changed
5613 			 * state. This means that it previously passed
5614 			 * validation and is therefore enabled.
5615 			 */
5616 			num_active += 1;
5617 			continue;
5618 		}
5619 
5620 		/* We need a framebuffer to be considered enabled. */
5621 		num_active += (new_plane_state->fb != NULL);
5622 	}
5623 
5624 	return num_active;
5625 }
5626 
dm_update_crtc_active_planes(struct drm_crtc * crtc,struct drm_crtc_state * new_crtc_state)5627 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5628 					 struct drm_crtc_state *new_crtc_state)
5629 {
5630 	struct dm_crtc_state *dm_new_crtc_state =
5631 		to_dm_crtc_state(new_crtc_state);
5632 
5633 	dm_new_crtc_state->active_planes = 0;
5634 
5635 	if (!dm_new_crtc_state->stream)
5636 		return;
5637 
5638 	dm_new_crtc_state->active_planes =
5639 		count_crtc_active_planes(new_crtc_state);
5640 }
5641 
dm_crtc_helper_atomic_check(struct drm_crtc * crtc,struct drm_crtc_state * state)5642 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5643 				       struct drm_crtc_state *state)
5644 {
5645 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5646 	struct dc *dc = adev->dm.dc;
5647 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5648 	int ret = -EINVAL;
5649 
5650 	dm_update_crtc_active_planes(crtc, state);
5651 
5652 	if (unlikely(!dm_crtc_state->stream &&
5653 		     modeset_required(state, NULL, dm_crtc_state->stream))) {
5654 		WARN_ON(1);
5655 		return ret;
5656 	}
5657 
5658 	/*
5659 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
5660 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
5661 	 * planes are disabled, which is not supported by the hardware. And there is legacy
5662 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
5663 	 */
5664 	if (state->enable &&
5665 	    !(state->plane_mask & drm_plane_mask(crtc->primary)))
5666 		return -EINVAL;
5667 
5668 	/* In some use cases, like reset, no stream is attached */
5669 	if (!dm_crtc_state->stream)
5670 		return 0;
5671 
5672 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5673 		return 0;
5674 
5675 	return ret;
5676 }
5677 
dm_crtc_helper_mode_fixup(struct drm_crtc * crtc,const struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)5678 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5679 				      const struct drm_display_mode *mode,
5680 				      struct drm_display_mode *adjusted_mode)
5681 {
5682 	return true;
5683 }
5684 
5685 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5686 	.disable = dm_crtc_helper_disable,
5687 	.atomic_check = dm_crtc_helper_atomic_check,
5688 	.mode_fixup = dm_crtc_helper_mode_fixup,
5689 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
5690 };
5691 
dm_encoder_helper_disable(struct drm_encoder * encoder)5692 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5693 {
5694 
5695 }
5696 
convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)5697 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5698 {
5699 	switch (display_color_depth) {
5700 		case COLOR_DEPTH_666:
5701 			return 6;
5702 		case COLOR_DEPTH_888:
5703 			return 8;
5704 		case COLOR_DEPTH_101010:
5705 			return 10;
5706 		case COLOR_DEPTH_121212:
5707 			return 12;
5708 		case COLOR_DEPTH_141414:
5709 			return 14;
5710 		case COLOR_DEPTH_161616:
5711 			return 16;
5712 		default:
5713 			break;
5714 		}
5715 	return 0;
5716 }
5717 
dm_encoder_helper_atomic_check(struct drm_encoder * encoder,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)5718 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5719 					  struct drm_crtc_state *crtc_state,
5720 					  struct drm_connector_state *conn_state)
5721 {
5722 	struct drm_atomic_state *state = crtc_state->state;
5723 	struct drm_connector *connector = conn_state->connector;
5724 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5725 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5726 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5727 	struct drm_dp_mst_topology_mgr *mst_mgr;
5728 	struct drm_dp_mst_port *mst_port;
5729 	enum dc_color_depth color_depth;
5730 	int clock, bpp = 0;
5731 	bool is_y420 = false;
5732 
5733 	if (!aconnector->port || !aconnector->dc_sink)
5734 		return 0;
5735 
5736 	mst_port = aconnector->port;
5737 	mst_mgr = &aconnector->mst_port->mst_mgr;
5738 
5739 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5740 		return 0;
5741 
5742 	if (!state->duplicated) {
5743 		int max_bpc = conn_state->max_requested_bpc;
5744 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5745 				aconnector->force_yuv420_output;
5746 		color_depth = convert_color_depth_from_display_info(connector,
5747 								    is_y420,
5748 								    max_bpc);
5749 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5750 		clock = adjusted_mode->clock;
5751 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5752 	}
5753 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5754 									   mst_mgr,
5755 									   mst_port,
5756 									   dm_new_connector_state->pbn,
5757 									   dm_mst_get_pbn_divider(aconnector->dc_link));
5758 	if (dm_new_connector_state->vcpi_slots < 0) {
5759 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5760 		return dm_new_connector_state->vcpi_slots;
5761 	}
5762 	return 0;
5763 }
5764 
5765 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5766 	.disable = dm_encoder_helper_disable,
5767 	.atomic_check = dm_encoder_helper_atomic_check
5768 };
5769 
5770 #if defined(CONFIG_DRM_AMD_DC_DCN)
dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state * state,struct dc_state * dc_state)5771 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5772 					    struct dc_state *dc_state)
5773 {
5774 	struct dc_stream_state *stream = NULL;
5775 	struct drm_connector *connector;
5776 	struct drm_connector_state *new_con_state, *old_con_state;
5777 	struct amdgpu_dm_connector *aconnector;
5778 	struct dm_connector_state *dm_conn_state;
5779 	int i, j, clock, bpp;
5780 	int vcpi, pbn_div, pbn = 0;
5781 
5782 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5783 
5784 		aconnector = to_amdgpu_dm_connector(connector);
5785 
5786 		if (!aconnector->port)
5787 			continue;
5788 
5789 		if (!new_con_state || !new_con_state->crtc)
5790 			continue;
5791 
5792 		dm_conn_state = to_dm_connector_state(new_con_state);
5793 
5794 		for (j = 0; j < dc_state->stream_count; j++) {
5795 			stream = dc_state->streams[j];
5796 			if (!stream)
5797 				continue;
5798 
5799 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5800 				break;
5801 
5802 			stream = NULL;
5803 		}
5804 
5805 		if (!stream)
5806 			continue;
5807 
5808 		if (stream->timing.flags.DSC != 1) {
5809 			drm_dp_mst_atomic_enable_dsc(state,
5810 						     aconnector->port,
5811 						     dm_conn_state->pbn,
5812 						     0,
5813 						     false);
5814 			continue;
5815 		}
5816 
5817 		pbn_div = dm_mst_get_pbn_divider(stream->link);
5818 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
5819 		clock = stream->timing.pix_clk_100hz / 10;
5820 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5821 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
5822 						    aconnector->port,
5823 						    pbn, pbn_div,
5824 						    true);
5825 		if (vcpi < 0)
5826 			return vcpi;
5827 
5828 		dm_conn_state->pbn = pbn;
5829 		dm_conn_state->vcpi_slots = vcpi;
5830 	}
5831 	return 0;
5832 }
5833 #endif
5834 
dm_drm_plane_reset(struct drm_plane * plane)5835 static void dm_drm_plane_reset(struct drm_plane *plane)
5836 {
5837 	struct dm_plane_state *amdgpu_state = NULL;
5838 
5839 	if (plane->state)
5840 		plane->funcs->atomic_destroy_state(plane, plane->state);
5841 
5842 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5843 	WARN_ON(amdgpu_state == NULL);
5844 
5845 	if (amdgpu_state)
5846 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5847 }
5848 
5849 static struct drm_plane_state *
dm_drm_plane_duplicate_state(struct drm_plane * plane)5850 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5851 {
5852 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5853 
5854 	old_dm_plane_state = to_dm_plane_state(plane->state);
5855 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5856 	if (!dm_plane_state)
5857 		return NULL;
5858 
5859 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5860 
5861 	if (old_dm_plane_state->dc_state) {
5862 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5863 		dc_plane_state_retain(dm_plane_state->dc_state);
5864 	}
5865 
5866 	/* Framebuffer hasn't been updated yet, so retain old flags. */
5867 	dm_plane_state->tiling_flags = old_dm_plane_state->tiling_flags;
5868 	dm_plane_state->tmz_surface = old_dm_plane_state->tmz_surface;
5869 
5870 	return &dm_plane_state->base;
5871 }
5872 
dm_drm_plane_destroy_state(struct drm_plane * plane,struct drm_plane_state * state)5873 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5874 				struct drm_plane_state *state)
5875 {
5876 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5877 
5878 	if (dm_plane_state->dc_state)
5879 		dc_plane_state_release(dm_plane_state->dc_state);
5880 
5881 	drm_atomic_helper_plane_destroy_state(plane, state);
5882 }
5883 
5884 static const struct drm_plane_funcs dm_plane_funcs = {
5885 	.update_plane	= drm_atomic_helper_update_plane,
5886 	.disable_plane	= drm_atomic_helper_disable_plane,
5887 	.destroy	= drm_primary_helper_destroy,
5888 	.reset = dm_drm_plane_reset,
5889 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
5890 	.atomic_destroy_state = dm_drm_plane_destroy_state,
5891 };
5892 
dm_plane_helper_prepare_fb(struct drm_plane * plane,struct drm_plane_state * new_state)5893 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5894 				      struct drm_plane_state *new_state)
5895 {
5896 	struct amdgpu_framebuffer *afb;
5897 	struct drm_gem_object *obj;
5898 	struct amdgpu_device *adev;
5899 	struct amdgpu_bo *rbo;
5900 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5901 	struct list_head list;
5902 	struct ttm_validate_buffer tv;
5903 	struct ww_acquire_ctx ticket;
5904 	uint32_t domain;
5905 	int r;
5906 
5907 	if (!new_state->fb) {
5908 		DRM_DEBUG_DRIVER("No FB bound\n");
5909 		return 0;
5910 	}
5911 
5912 	afb = to_amdgpu_framebuffer(new_state->fb);
5913 	obj = new_state->fb->obj[0];
5914 	rbo = gem_to_amdgpu_bo(obj);
5915 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5916 	INIT_LIST_HEAD(&list);
5917 
5918 	tv.bo = &rbo->tbo;
5919 	tv.num_shared = 1;
5920 	list_add(&tv.head, &list);
5921 
5922 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5923 	if (r) {
5924 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5925 		return r;
5926 	}
5927 
5928 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5929 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
5930 	else
5931 		domain = AMDGPU_GEM_DOMAIN_VRAM;
5932 
5933 	r = amdgpu_bo_pin(rbo, domain);
5934 	if (unlikely(r != 0)) {
5935 		if (r != -ERESTARTSYS)
5936 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5937 		ttm_eu_backoff_reservation(&ticket, &list);
5938 		return r;
5939 	}
5940 
5941 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5942 	if (unlikely(r != 0)) {
5943 		amdgpu_bo_unpin(rbo);
5944 		ttm_eu_backoff_reservation(&ticket, &list);
5945 		DRM_ERROR("%p bind failed\n", rbo);
5946 		return r;
5947 	}
5948 
5949 	ttm_eu_backoff_reservation(&ticket, &list);
5950 
5951 	afb->address = amdgpu_bo_gpu_offset(rbo);
5952 
5953 	amdgpu_bo_ref(rbo);
5954 
5955 	/**
5956 	 * We don't do surface updates on planes that have been newly created,
5957 	 * but we also don't have the afb->address during atomic check.
5958 	 *
5959 	 * Fill in buffer attributes depending on the address here, but only on
5960 	 * newly created planes since they're not being used by DC yet and this
5961 	 * won't modify global state.
5962 	 */
5963 	dm_plane_state_old = to_dm_plane_state(plane->state);
5964 	dm_plane_state_new = to_dm_plane_state(new_state);
5965 
5966 	if (dm_plane_state_new->dc_state &&
5967 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5968 		struct dc_plane_state *plane_state =
5969 			dm_plane_state_new->dc_state;
5970 		bool force_disable_dcc = !plane_state->dcc.enable;
5971 
5972 		fill_plane_buffer_attributes(
5973 			adev, afb, plane_state->format, plane_state->rotation,
5974 			dm_plane_state_new->tiling_flags,
5975 			&plane_state->tiling_info, &plane_state->plane_size,
5976 			&plane_state->dcc, &plane_state->address,
5977 			dm_plane_state_new->tmz_surface, force_disable_dcc);
5978 	}
5979 
5980 	return 0;
5981 }
5982 
dm_plane_helper_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)5983 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5984 				       struct drm_plane_state *old_state)
5985 {
5986 	struct amdgpu_bo *rbo;
5987 	int r;
5988 
5989 	if (!old_state->fb)
5990 		return;
5991 
5992 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5993 	r = amdgpu_bo_reserve(rbo, false);
5994 	if (unlikely(r)) {
5995 		DRM_ERROR("failed to reserve rbo before unpin\n");
5996 		return;
5997 	}
5998 
5999 	amdgpu_bo_unpin(rbo);
6000 	amdgpu_bo_unreserve(rbo);
6001 	amdgpu_bo_unref(&rbo);
6002 }
6003 
dm_plane_helper_check_state(struct drm_plane_state * state,struct drm_crtc_state * new_crtc_state)6004 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6005 				       struct drm_crtc_state *new_crtc_state)
6006 {
6007 	int max_downscale = 0;
6008 	int max_upscale = INT_MAX;
6009 
6010 	/* TODO: These should be checked against DC plane caps */
6011 	return drm_atomic_helper_check_plane_state(
6012 		state, new_crtc_state, max_downscale, max_upscale, true, true);
6013 }
6014 
dm_plane_atomic_check(struct drm_plane * plane,struct drm_plane_state * state)6015 static int dm_plane_atomic_check(struct drm_plane *plane,
6016 				 struct drm_plane_state *state)
6017 {
6018 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
6019 	struct dc *dc = adev->dm.dc;
6020 	struct dm_plane_state *dm_plane_state;
6021 	struct dc_scaling_info scaling_info;
6022 	struct drm_crtc_state *new_crtc_state;
6023 	int ret;
6024 
6025 	dm_plane_state = to_dm_plane_state(state);
6026 
6027 	if (!dm_plane_state->dc_state)
6028 		return 0;
6029 
6030 	new_crtc_state =
6031 		drm_atomic_get_new_crtc_state(state->state, state->crtc);
6032 	if (!new_crtc_state)
6033 		return -EINVAL;
6034 
6035 	ret = dm_plane_helper_check_state(state, new_crtc_state);
6036 	if (ret)
6037 		return ret;
6038 
6039 	ret = fill_dc_scaling_info(state, &scaling_info);
6040 	if (ret)
6041 		return ret;
6042 
6043 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6044 		return 0;
6045 
6046 	return -EINVAL;
6047 }
6048 
dm_plane_atomic_async_check(struct drm_plane * plane,struct drm_plane_state * new_plane_state)6049 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6050 				       struct drm_plane_state *new_plane_state)
6051 {
6052 	/* Only support async updates on cursor planes. */
6053 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6054 		return -EINVAL;
6055 
6056 	return 0;
6057 }
6058 
dm_plane_atomic_async_update(struct drm_plane * plane,struct drm_plane_state * new_state)6059 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6060 					 struct drm_plane_state *new_state)
6061 {
6062 	struct drm_plane_state *old_state =
6063 		drm_atomic_get_old_plane_state(new_state->state, plane);
6064 
6065 	swap(plane->state->fb, new_state->fb);
6066 
6067 	plane->state->src_x = new_state->src_x;
6068 	plane->state->src_y = new_state->src_y;
6069 	plane->state->src_w = new_state->src_w;
6070 	plane->state->src_h = new_state->src_h;
6071 	plane->state->crtc_x = new_state->crtc_x;
6072 	plane->state->crtc_y = new_state->crtc_y;
6073 	plane->state->crtc_w = new_state->crtc_w;
6074 	plane->state->crtc_h = new_state->crtc_h;
6075 
6076 	handle_cursor_update(plane, old_state);
6077 }
6078 
6079 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6080 	.prepare_fb = dm_plane_helper_prepare_fb,
6081 	.cleanup_fb = dm_plane_helper_cleanup_fb,
6082 	.atomic_check = dm_plane_atomic_check,
6083 	.atomic_async_check = dm_plane_atomic_async_check,
6084 	.atomic_async_update = dm_plane_atomic_async_update
6085 };
6086 
6087 /*
6088  * TODO: these are currently initialized to rgb formats only.
6089  * For future use cases we should either initialize them dynamically based on
6090  * plane capabilities, or initialize this array to all formats, so internal drm
6091  * check will succeed, and let DC implement proper check
6092  */
6093 static const uint32_t rgb_formats[] = {
6094 	DRM_FORMAT_XRGB8888,
6095 	DRM_FORMAT_ARGB8888,
6096 	DRM_FORMAT_RGBA8888,
6097 	DRM_FORMAT_XRGB2101010,
6098 	DRM_FORMAT_XBGR2101010,
6099 	DRM_FORMAT_ARGB2101010,
6100 	DRM_FORMAT_ABGR2101010,
6101 	DRM_FORMAT_XBGR8888,
6102 	DRM_FORMAT_ABGR8888,
6103 	DRM_FORMAT_RGB565,
6104 };
6105 
6106 static const uint32_t overlay_formats[] = {
6107 	DRM_FORMAT_XRGB8888,
6108 	DRM_FORMAT_ARGB8888,
6109 	DRM_FORMAT_RGBA8888,
6110 	DRM_FORMAT_XBGR8888,
6111 	DRM_FORMAT_ABGR8888,
6112 	DRM_FORMAT_RGB565
6113 };
6114 
6115 static const u32 cursor_formats[] = {
6116 	DRM_FORMAT_ARGB8888
6117 };
6118 
get_plane_formats(const struct drm_plane * plane,const struct dc_plane_cap * plane_cap,uint32_t * formats,int max_formats)6119 static int get_plane_formats(const struct drm_plane *plane,
6120 			     const struct dc_plane_cap *plane_cap,
6121 			     uint32_t *formats, int max_formats)
6122 {
6123 	int i, num_formats = 0;
6124 
6125 	/*
6126 	 * TODO: Query support for each group of formats directly from
6127 	 * DC plane caps. This will require adding more formats to the
6128 	 * caps list.
6129 	 */
6130 
6131 	switch (plane->type) {
6132 	case DRM_PLANE_TYPE_PRIMARY:
6133 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6134 			if (num_formats >= max_formats)
6135 				break;
6136 
6137 			formats[num_formats++] = rgb_formats[i];
6138 		}
6139 
6140 		if (plane_cap && plane_cap->pixel_format_support.nv12)
6141 			formats[num_formats++] = DRM_FORMAT_NV12;
6142 		if (plane_cap && plane_cap->pixel_format_support.p010)
6143 			formats[num_formats++] = DRM_FORMAT_P010;
6144 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
6145 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6146 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6147 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6148 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6149 		}
6150 		break;
6151 
6152 	case DRM_PLANE_TYPE_OVERLAY:
6153 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6154 			if (num_formats >= max_formats)
6155 				break;
6156 
6157 			formats[num_formats++] = overlay_formats[i];
6158 		}
6159 		break;
6160 
6161 	case DRM_PLANE_TYPE_CURSOR:
6162 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6163 			if (num_formats >= max_formats)
6164 				break;
6165 
6166 			formats[num_formats++] = cursor_formats[i];
6167 		}
6168 		break;
6169 	}
6170 
6171 	return num_formats;
6172 }
6173 
amdgpu_dm_plane_init(struct amdgpu_display_manager * dm,struct drm_plane * plane,unsigned long possible_crtcs,const struct dc_plane_cap * plane_cap)6174 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6175 				struct drm_plane *plane,
6176 				unsigned long possible_crtcs,
6177 				const struct dc_plane_cap *plane_cap)
6178 {
6179 	uint32_t formats[32];
6180 	int num_formats;
6181 	int res = -EPERM;
6182 	unsigned int supported_rotations;
6183 
6184 	num_formats = get_plane_formats(plane, plane_cap, formats,
6185 					ARRAY_SIZE(formats));
6186 
6187 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6188 				       &dm_plane_funcs, formats, num_formats,
6189 				       NULL, plane->type, NULL);
6190 	if (res)
6191 		return res;
6192 
6193 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6194 	    plane_cap && plane_cap->per_pixel_alpha) {
6195 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6196 					  BIT(DRM_MODE_BLEND_PREMULTI);
6197 
6198 		drm_plane_create_alpha_property(plane);
6199 		drm_plane_create_blend_mode_property(plane, blend_caps);
6200 	}
6201 
6202 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6203 	    plane_cap &&
6204 	    (plane_cap->pixel_format_support.nv12 ||
6205 	     plane_cap->pixel_format_support.p010)) {
6206 		/* This only affects YUV formats. */
6207 		drm_plane_create_color_properties(
6208 			plane,
6209 			BIT(DRM_COLOR_YCBCR_BT601) |
6210 			BIT(DRM_COLOR_YCBCR_BT709) |
6211 			BIT(DRM_COLOR_YCBCR_BT2020),
6212 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6213 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6214 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6215 	}
6216 
6217 	supported_rotations =
6218 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6219 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6220 
6221 	if (dm->adev->asic_type >= CHIP_BONAIRE)
6222 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6223 						   supported_rotations);
6224 
6225 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6226 
6227 	/* Create (reset) the plane state */
6228 	if (plane->funcs->reset)
6229 		plane->funcs->reset(plane);
6230 
6231 	return 0;
6232 }
6233 
amdgpu_dm_crtc_init(struct amdgpu_display_manager * dm,struct drm_plane * plane,uint32_t crtc_index)6234 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6235 			       struct drm_plane *plane,
6236 			       uint32_t crtc_index)
6237 {
6238 	struct amdgpu_crtc *acrtc = NULL;
6239 	struct drm_plane *cursor_plane;
6240 
6241 	int res = -ENOMEM;
6242 
6243 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6244 	if (!cursor_plane)
6245 		goto fail;
6246 
6247 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6248 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6249 
6250 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6251 	if (!acrtc)
6252 		goto fail;
6253 
6254 	res = drm_crtc_init_with_planes(
6255 			dm->ddev,
6256 			&acrtc->base,
6257 			plane,
6258 			cursor_plane,
6259 			&amdgpu_dm_crtc_funcs, NULL);
6260 
6261 	if (res)
6262 		goto fail;
6263 
6264 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6265 
6266 	/* Create (reset) the plane state */
6267 	if (acrtc->base.funcs->reset)
6268 		acrtc->base.funcs->reset(&acrtc->base);
6269 
6270 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6271 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6272 
6273 	acrtc->crtc_id = crtc_index;
6274 	acrtc->base.enabled = false;
6275 	acrtc->otg_inst = -1;
6276 
6277 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6278 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6279 				   true, MAX_COLOR_LUT_ENTRIES);
6280 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6281 
6282 	return 0;
6283 
6284 fail:
6285 	kfree(acrtc);
6286 	kfree(cursor_plane);
6287 	return res;
6288 }
6289 
6290 
to_drm_connector_type(enum signal_type st)6291 static int to_drm_connector_type(enum signal_type st)
6292 {
6293 	switch (st) {
6294 	case SIGNAL_TYPE_HDMI_TYPE_A:
6295 		return DRM_MODE_CONNECTOR_HDMIA;
6296 	case SIGNAL_TYPE_EDP:
6297 		return DRM_MODE_CONNECTOR_eDP;
6298 	case SIGNAL_TYPE_LVDS:
6299 		return DRM_MODE_CONNECTOR_LVDS;
6300 	case SIGNAL_TYPE_RGB:
6301 		return DRM_MODE_CONNECTOR_VGA;
6302 	case SIGNAL_TYPE_DISPLAY_PORT:
6303 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
6304 		return DRM_MODE_CONNECTOR_DisplayPort;
6305 	case SIGNAL_TYPE_DVI_DUAL_LINK:
6306 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
6307 		return DRM_MODE_CONNECTOR_DVID;
6308 	case SIGNAL_TYPE_VIRTUAL:
6309 		return DRM_MODE_CONNECTOR_VIRTUAL;
6310 
6311 	default:
6312 		return DRM_MODE_CONNECTOR_Unknown;
6313 	}
6314 }
6315 
amdgpu_dm_connector_to_encoder(struct drm_connector * connector)6316 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6317 {
6318 	struct drm_encoder *encoder;
6319 
6320 	/* There is only one encoder per connector */
6321 	drm_connector_for_each_possible_encoder(connector, encoder)
6322 		return encoder;
6323 
6324 	return NULL;
6325 }
6326 
amdgpu_dm_get_native_mode(struct drm_connector * connector)6327 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6328 {
6329 	struct drm_encoder *encoder;
6330 	struct amdgpu_encoder *amdgpu_encoder;
6331 
6332 	encoder = amdgpu_dm_connector_to_encoder(connector);
6333 
6334 	if (encoder == NULL)
6335 		return;
6336 
6337 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6338 
6339 	amdgpu_encoder->native_mode.clock = 0;
6340 
6341 	if (!list_empty(&connector->probed_modes)) {
6342 		struct drm_display_mode *preferred_mode = NULL;
6343 
6344 		list_for_each_entry(preferred_mode,
6345 				    &connector->probed_modes,
6346 				    head) {
6347 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6348 				amdgpu_encoder->native_mode = *preferred_mode;
6349 
6350 			break;
6351 		}
6352 
6353 	}
6354 }
6355 
6356 static struct drm_display_mode *
amdgpu_dm_create_common_mode(struct drm_encoder * encoder,char * name,int hdisplay,int vdisplay)6357 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6358 			     char *name,
6359 			     int hdisplay, int vdisplay)
6360 {
6361 	struct drm_device *dev = encoder->dev;
6362 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6363 	struct drm_display_mode *mode = NULL;
6364 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6365 
6366 	mode = drm_mode_duplicate(dev, native_mode);
6367 
6368 	if (mode == NULL)
6369 		return NULL;
6370 
6371 	mode->hdisplay = hdisplay;
6372 	mode->vdisplay = vdisplay;
6373 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6374 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6375 
6376 	return mode;
6377 
6378 }
6379 
amdgpu_dm_connector_add_common_modes(struct drm_encoder * encoder,struct drm_connector * connector)6380 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6381 						 struct drm_connector *connector)
6382 {
6383 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6384 	struct drm_display_mode *mode = NULL;
6385 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6386 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6387 				to_amdgpu_dm_connector(connector);
6388 	int i;
6389 	int n;
6390 	struct mode_size {
6391 		char name[DRM_DISPLAY_MODE_LEN];
6392 		int w;
6393 		int h;
6394 	} common_modes[] = {
6395 		{  "640x480",  640,  480},
6396 		{  "800x600",  800,  600},
6397 		{ "1024x768", 1024,  768},
6398 		{ "1280x720", 1280,  720},
6399 		{ "1280x800", 1280,  800},
6400 		{"1280x1024", 1280, 1024},
6401 		{ "1440x900", 1440,  900},
6402 		{"1680x1050", 1680, 1050},
6403 		{"1600x1200", 1600, 1200},
6404 		{"1920x1080", 1920, 1080},
6405 		{"1920x1200", 1920, 1200}
6406 	};
6407 
6408 	n = ARRAY_SIZE(common_modes);
6409 
6410 	for (i = 0; i < n; i++) {
6411 		struct drm_display_mode *curmode = NULL;
6412 		bool mode_existed = false;
6413 
6414 		if (common_modes[i].w > native_mode->hdisplay ||
6415 		    common_modes[i].h > native_mode->vdisplay ||
6416 		   (common_modes[i].w == native_mode->hdisplay &&
6417 		    common_modes[i].h == native_mode->vdisplay))
6418 			continue;
6419 
6420 		list_for_each_entry(curmode, &connector->probed_modes, head) {
6421 			if (common_modes[i].w == curmode->hdisplay &&
6422 			    common_modes[i].h == curmode->vdisplay) {
6423 				mode_existed = true;
6424 				break;
6425 			}
6426 		}
6427 
6428 		if (mode_existed)
6429 			continue;
6430 
6431 		mode = amdgpu_dm_create_common_mode(encoder,
6432 				common_modes[i].name, common_modes[i].w,
6433 				common_modes[i].h);
6434 		if (!mode)
6435 			continue;
6436 
6437 		drm_mode_probed_add(connector, mode);
6438 		amdgpu_dm_connector->num_modes++;
6439 	}
6440 }
6441 
amdgpu_dm_connector_ddc_get_modes(struct drm_connector * connector,struct edid * edid)6442 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6443 					      struct edid *edid)
6444 {
6445 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6446 			to_amdgpu_dm_connector(connector);
6447 
6448 	if (edid) {
6449 		/* empty probed_modes */
6450 		INIT_LIST_HEAD(&connector->probed_modes);
6451 		amdgpu_dm_connector->num_modes =
6452 				drm_add_edid_modes(connector, edid);
6453 
6454 		/* sorting the probed modes before calling function
6455 		 * amdgpu_dm_get_native_mode() since EDID can have
6456 		 * more than one preferred mode. The modes that are
6457 		 * later in the probed mode list could be of higher
6458 		 * and preferred resolution. For example, 3840x2160
6459 		 * resolution in base EDID preferred timing and 4096x2160
6460 		 * preferred resolution in DID extension block later.
6461 		 */
6462 		drm_mode_sort(&connector->probed_modes);
6463 		amdgpu_dm_get_native_mode(connector);
6464 	} else {
6465 		amdgpu_dm_connector->num_modes = 0;
6466 	}
6467 }
6468 
amdgpu_dm_connector_get_modes(struct drm_connector * connector)6469 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6470 {
6471 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6472 			to_amdgpu_dm_connector(connector);
6473 	struct drm_encoder *encoder;
6474 	struct edid *edid = amdgpu_dm_connector->edid;
6475 
6476 	encoder = amdgpu_dm_connector_to_encoder(connector);
6477 
6478 	if (!edid || !drm_edid_is_valid(edid)) {
6479 		amdgpu_dm_connector->num_modes =
6480 				drm_add_modes_noedid(connector, 640, 480);
6481 	} else {
6482 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
6483 		amdgpu_dm_connector_add_common_modes(encoder, connector);
6484 	}
6485 	amdgpu_dm_fbc_init(connector);
6486 
6487 	return amdgpu_dm_connector->num_modes;
6488 }
6489 
amdgpu_dm_connector_init_helper(struct amdgpu_display_manager * dm,struct amdgpu_dm_connector * aconnector,int connector_type,struct dc_link * link,int link_index)6490 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6491 				     struct amdgpu_dm_connector *aconnector,
6492 				     int connector_type,
6493 				     struct dc_link *link,
6494 				     int link_index)
6495 {
6496 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6497 
6498 	/*
6499 	 * Some of the properties below require access to state, like bpc.
6500 	 * Allocate some default initial connector state with our reset helper.
6501 	 */
6502 	if (aconnector->base.funcs->reset)
6503 		aconnector->base.funcs->reset(&aconnector->base);
6504 
6505 	aconnector->connector_id = link_index;
6506 	aconnector->dc_link = link;
6507 	aconnector->base.interlace_allowed = false;
6508 	aconnector->base.doublescan_allowed = false;
6509 	aconnector->base.stereo_allowed = false;
6510 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6511 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6512 	aconnector->audio_inst = -1;
6513 	mutex_init(&aconnector->hpd_lock);
6514 
6515 	/*
6516 	 * configure support HPD hot plug connector_>polled default value is 0
6517 	 * which means HPD hot plug not supported
6518 	 */
6519 	switch (connector_type) {
6520 	case DRM_MODE_CONNECTOR_HDMIA:
6521 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6522 		aconnector->base.ycbcr_420_allowed =
6523 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6524 		break;
6525 	case DRM_MODE_CONNECTOR_DisplayPort:
6526 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6527 		aconnector->base.ycbcr_420_allowed =
6528 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
6529 		break;
6530 	case DRM_MODE_CONNECTOR_DVID:
6531 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6532 		break;
6533 	default:
6534 		break;
6535 	}
6536 
6537 	drm_object_attach_property(&aconnector->base.base,
6538 				dm->ddev->mode_config.scaling_mode_property,
6539 				DRM_MODE_SCALE_NONE);
6540 
6541 	drm_object_attach_property(&aconnector->base.base,
6542 				adev->mode_info.underscan_property,
6543 				UNDERSCAN_OFF);
6544 	drm_object_attach_property(&aconnector->base.base,
6545 				adev->mode_info.underscan_hborder_property,
6546 				0);
6547 	drm_object_attach_property(&aconnector->base.base,
6548 				adev->mode_info.underscan_vborder_property,
6549 				0);
6550 
6551 	if (!aconnector->mst_port)
6552 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6553 
6554 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
6555 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6556 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6557 
6558 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6559 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6560 		drm_object_attach_property(&aconnector->base.base,
6561 				adev->mode_info.abm_level_property, 0);
6562 	}
6563 
6564 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6565 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6566 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
6567 		drm_object_attach_property(
6568 			&aconnector->base.base,
6569 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
6570 
6571 		if (!aconnector->mst_port)
6572 			drm_connector_attach_vrr_capable_property(&aconnector->base);
6573 
6574 #ifdef CONFIG_DRM_AMD_DC_HDCP
6575 		if (adev->dm.hdcp_workqueue)
6576 			drm_connector_attach_content_protection_property(&aconnector->base, true);
6577 #endif
6578 	}
6579 }
6580 
amdgpu_dm_i2c_xfer(struct i2c_adapter * i2c_adap,struct i2c_msg * msgs,int num)6581 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6582 			      struct i2c_msg *msgs, int num)
6583 {
6584 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6585 	struct ddc_service *ddc_service = i2c->ddc_service;
6586 	struct i2c_command cmd;
6587 	int i;
6588 	int result = -EIO;
6589 
6590 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6591 
6592 	if (!cmd.payloads)
6593 		return result;
6594 
6595 	cmd.number_of_payloads = num;
6596 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6597 	cmd.speed = 100;
6598 
6599 	for (i = 0; i < num; i++) {
6600 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6601 		cmd.payloads[i].address = msgs[i].addr;
6602 		cmd.payloads[i].length = msgs[i].len;
6603 		cmd.payloads[i].data = msgs[i].buf;
6604 	}
6605 
6606 	if (dc_submit_i2c(
6607 			ddc_service->ctx->dc,
6608 			ddc_service->ddc_pin->hw_info.ddc_channel,
6609 			&cmd))
6610 		result = num;
6611 
6612 	kfree(cmd.payloads);
6613 	return result;
6614 }
6615 
amdgpu_dm_i2c_func(struct i2c_adapter * adap)6616 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6617 {
6618 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6619 }
6620 
6621 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6622 	.master_xfer = amdgpu_dm_i2c_xfer,
6623 	.functionality = amdgpu_dm_i2c_func,
6624 };
6625 
6626 static struct amdgpu_i2c_adapter *
create_i2c(struct ddc_service * ddc_service,int link_index,int * res)6627 create_i2c(struct ddc_service *ddc_service,
6628 	   int link_index,
6629 	   int *res)
6630 {
6631 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6632 	struct amdgpu_i2c_adapter *i2c;
6633 
6634 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6635 	if (!i2c)
6636 		return NULL;
6637 	i2c->base.owner = THIS_MODULE;
6638 	i2c->base.class = I2C_CLASS_DDC;
6639 	i2c->base.dev.parent = &adev->pdev->dev;
6640 	i2c->base.algo = &amdgpu_dm_i2c_algo;
6641 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6642 	i2c_set_adapdata(&i2c->base, i2c);
6643 	i2c->ddc_service = ddc_service;
6644 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6645 
6646 	return i2c;
6647 }
6648 
6649 
6650 /*
6651  * Note: this function assumes that dc_link_detect() was called for the
6652  * dc_link which will be represented by this aconnector.
6653  */
amdgpu_dm_connector_init(struct amdgpu_display_manager * dm,struct amdgpu_dm_connector * aconnector,uint32_t link_index,struct amdgpu_encoder * aencoder)6654 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6655 				    struct amdgpu_dm_connector *aconnector,
6656 				    uint32_t link_index,
6657 				    struct amdgpu_encoder *aencoder)
6658 {
6659 	int res = 0;
6660 	int connector_type;
6661 	struct dc *dc = dm->dc;
6662 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
6663 	struct amdgpu_i2c_adapter *i2c;
6664 
6665 	link->priv = aconnector;
6666 
6667 	DRM_DEBUG_DRIVER("%s()\n", __func__);
6668 
6669 	i2c = create_i2c(link->ddc, link->link_index, &res);
6670 	if (!i2c) {
6671 		DRM_ERROR("Failed to create i2c adapter data\n");
6672 		return -ENOMEM;
6673 	}
6674 
6675 	aconnector->i2c = i2c;
6676 	res = i2c_add_adapter(&i2c->base);
6677 
6678 	if (res) {
6679 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6680 		goto out_free;
6681 	}
6682 
6683 	connector_type = to_drm_connector_type(link->connector_signal);
6684 
6685 	res = drm_connector_init_with_ddc(
6686 			dm->ddev,
6687 			&aconnector->base,
6688 			&amdgpu_dm_connector_funcs,
6689 			connector_type,
6690 			&i2c->base);
6691 
6692 	if (res) {
6693 		DRM_ERROR("connector_init failed\n");
6694 		aconnector->connector_id = -1;
6695 		goto out_free;
6696 	}
6697 
6698 	drm_connector_helper_add(
6699 			&aconnector->base,
6700 			&amdgpu_dm_connector_helper_funcs);
6701 
6702 	amdgpu_dm_connector_init_helper(
6703 		dm,
6704 		aconnector,
6705 		connector_type,
6706 		link,
6707 		link_index);
6708 
6709 	drm_connector_attach_encoder(
6710 		&aconnector->base, &aencoder->base);
6711 
6712 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6713 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
6714 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6715 
6716 out_free:
6717 	if (res) {
6718 		kfree(i2c);
6719 		aconnector->i2c = NULL;
6720 	}
6721 	return res;
6722 }
6723 
amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device * adev)6724 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6725 {
6726 	switch (adev->mode_info.num_crtc) {
6727 	case 1:
6728 		return 0x1;
6729 	case 2:
6730 		return 0x3;
6731 	case 3:
6732 		return 0x7;
6733 	case 4:
6734 		return 0xf;
6735 	case 5:
6736 		return 0x1f;
6737 	case 6:
6738 	default:
6739 		return 0x3f;
6740 	}
6741 }
6742 
amdgpu_dm_encoder_init(struct drm_device * dev,struct amdgpu_encoder * aencoder,uint32_t link_index)6743 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6744 				  struct amdgpu_encoder *aencoder,
6745 				  uint32_t link_index)
6746 {
6747 	struct amdgpu_device *adev = drm_to_adev(dev);
6748 
6749 	int res = drm_encoder_init(dev,
6750 				   &aencoder->base,
6751 				   &amdgpu_dm_encoder_funcs,
6752 				   DRM_MODE_ENCODER_TMDS,
6753 				   NULL);
6754 
6755 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6756 
6757 	if (!res)
6758 		aencoder->encoder_id = link_index;
6759 	else
6760 		aencoder->encoder_id = -1;
6761 
6762 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6763 
6764 	return res;
6765 }
6766 
manage_dm_interrupts(struct amdgpu_device * adev,struct amdgpu_crtc * acrtc,bool enable)6767 static void manage_dm_interrupts(struct amdgpu_device *adev,
6768 				 struct amdgpu_crtc *acrtc,
6769 				 bool enable)
6770 {
6771 	/*
6772 	 * We have no guarantee that the frontend index maps to the same
6773 	 * backend index - some even map to more than one.
6774 	 *
6775 	 * TODO: Use a different interrupt or check DC itself for the mapping.
6776 	 */
6777 	int irq_type =
6778 		amdgpu_display_crtc_idx_to_irq_type(
6779 			adev,
6780 			acrtc->crtc_id);
6781 
6782 	if (enable) {
6783 		drm_crtc_vblank_on(&acrtc->base);
6784 		amdgpu_irq_get(
6785 			adev,
6786 			&adev->pageflip_irq,
6787 			irq_type);
6788 	} else {
6789 
6790 		amdgpu_irq_put(
6791 			adev,
6792 			&adev->pageflip_irq,
6793 			irq_type);
6794 		drm_crtc_vblank_off(&acrtc->base);
6795 	}
6796 }
6797 
dm_update_pflip_irq_state(struct amdgpu_device * adev,struct amdgpu_crtc * acrtc)6798 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6799 				      struct amdgpu_crtc *acrtc)
6800 {
6801 	int irq_type =
6802 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6803 
6804 	/**
6805 	 * This reads the current state for the IRQ and force reapplies
6806 	 * the setting to hardware.
6807 	 */
6808 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6809 }
6810 
6811 static bool
is_scaling_state_different(const struct dm_connector_state * dm_state,const struct dm_connector_state * old_dm_state)6812 is_scaling_state_different(const struct dm_connector_state *dm_state,
6813 			   const struct dm_connector_state *old_dm_state)
6814 {
6815 	if (dm_state->scaling != old_dm_state->scaling)
6816 		return true;
6817 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6818 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6819 			return true;
6820 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6821 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6822 			return true;
6823 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6824 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6825 		return true;
6826 	return false;
6827 }
6828 
6829 #ifdef CONFIG_DRM_AMD_DC_HDCP
is_content_protection_different(struct drm_connector_state * state,const struct drm_connector_state * old_state,const struct drm_connector * connector,struct hdcp_workqueue * hdcp_w)6830 static bool is_content_protection_different(struct drm_connector_state *state,
6831 					    const struct drm_connector_state *old_state,
6832 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6833 {
6834 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6835 
6836 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
6837 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6838 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6839 		return true;
6840 	}
6841 
6842 	/* CP is being re enabled, ignore this */
6843 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6844 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6845 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6846 		return false;
6847 	}
6848 
6849 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6850 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6851 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6852 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6853 
6854 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6855 	 * hot-plug, headless s3, dpms
6856 	 */
6857 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6858 	    aconnector->dc_sink != NULL)
6859 		return true;
6860 
6861 	if (old_state->content_protection == state->content_protection)
6862 		return false;
6863 
6864 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6865 		return true;
6866 
6867 	return false;
6868 }
6869 
6870 #endif
remove_stream(struct amdgpu_device * adev,struct amdgpu_crtc * acrtc,struct dc_stream_state * stream)6871 static void remove_stream(struct amdgpu_device *adev,
6872 			  struct amdgpu_crtc *acrtc,
6873 			  struct dc_stream_state *stream)
6874 {
6875 	/* this is the update mode case */
6876 
6877 	acrtc->otg_inst = -1;
6878 	acrtc->enabled = false;
6879 }
6880 
get_cursor_position(struct drm_plane * plane,struct drm_crtc * crtc,struct dc_cursor_position * position)6881 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6882 			       struct dc_cursor_position *position)
6883 {
6884 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6885 	int x, y;
6886 	int xorigin = 0, yorigin = 0;
6887 
6888 	if (!crtc || !plane->state->fb)
6889 		return 0;
6890 
6891 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6892 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6893 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6894 			  __func__,
6895 			  plane->state->crtc_w,
6896 			  plane->state->crtc_h);
6897 		return -EINVAL;
6898 	}
6899 
6900 	x = plane->state->crtc_x;
6901 	y = plane->state->crtc_y;
6902 
6903 	if (x <= -amdgpu_crtc->max_cursor_width ||
6904 	    y <= -amdgpu_crtc->max_cursor_height)
6905 		return 0;
6906 
6907 	if (x < 0) {
6908 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6909 		x = 0;
6910 	}
6911 	if (y < 0) {
6912 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6913 		y = 0;
6914 	}
6915 	position->enable = true;
6916 	position->translate_by_source = true;
6917 	position->x = x;
6918 	position->y = y;
6919 	position->x_hotspot = xorigin;
6920 	position->y_hotspot = yorigin;
6921 
6922 	return 0;
6923 }
6924 
handle_cursor_update(struct drm_plane * plane,struct drm_plane_state * old_plane_state)6925 static void handle_cursor_update(struct drm_plane *plane,
6926 				 struct drm_plane_state *old_plane_state)
6927 {
6928 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
6929 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6930 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6931 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6932 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6933 	uint64_t address = afb ? afb->address : 0;
6934 	struct dc_cursor_position position = {0};
6935 	struct dc_cursor_attributes attributes;
6936 	int ret;
6937 
6938 	if (!plane->state->fb && !old_plane_state->fb)
6939 		return;
6940 
6941 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6942 			 __func__,
6943 			 amdgpu_crtc->crtc_id,
6944 			 plane->state->crtc_w,
6945 			 plane->state->crtc_h);
6946 
6947 	ret = get_cursor_position(plane, crtc, &position);
6948 	if (ret)
6949 		return;
6950 
6951 	if (!position.enable) {
6952 		/* turn off cursor */
6953 		if (crtc_state && crtc_state->stream) {
6954 			mutex_lock(&adev->dm.dc_lock);
6955 			dc_stream_set_cursor_position(crtc_state->stream,
6956 						      &position);
6957 			mutex_unlock(&adev->dm.dc_lock);
6958 		}
6959 		return;
6960 	}
6961 
6962 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
6963 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
6964 
6965 	memset(&attributes, 0, sizeof(attributes));
6966 	attributes.address.high_part = upper_32_bits(address);
6967 	attributes.address.low_part  = lower_32_bits(address);
6968 	attributes.width             = plane->state->crtc_w;
6969 	attributes.height            = plane->state->crtc_h;
6970 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6971 	attributes.rotation_angle    = 0;
6972 	attributes.attribute_flags.value = 0;
6973 
6974 	attributes.pitch = attributes.width;
6975 
6976 	if (crtc_state->stream) {
6977 		mutex_lock(&adev->dm.dc_lock);
6978 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6979 							 &attributes))
6980 			DRM_ERROR("DC failed to set cursor attributes\n");
6981 
6982 		if (!dc_stream_set_cursor_position(crtc_state->stream,
6983 						   &position))
6984 			DRM_ERROR("DC failed to set cursor position\n");
6985 		mutex_unlock(&adev->dm.dc_lock);
6986 	}
6987 }
6988 
prepare_flip_isr(struct amdgpu_crtc * acrtc)6989 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6990 {
6991 
6992 	assert_spin_locked(&acrtc->base.dev->event_lock);
6993 	WARN_ON(acrtc->event);
6994 
6995 	acrtc->event = acrtc->base.state->event;
6996 
6997 	/* Set the flip status */
6998 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6999 
7000 	/* Mark this event as consumed */
7001 	acrtc->base.state->event = NULL;
7002 
7003 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7004 						 acrtc->crtc_id);
7005 }
7006 
update_freesync_state_on_stream(struct amdgpu_display_manager * dm,struct dm_crtc_state * new_crtc_state,struct dc_stream_state * new_stream,struct dc_plane_state * surface,u32 flip_timestamp_in_us)7007 static void update_freesync_state_on_stream(
7008 	struct amdgpu_display_manager *dm,
7009 	struct dm_crtc_state *new_crtc_state,
7010 	struct dc_stream_state *new_stream,
7011 	struct dc_plane_state *surface,
7012 	u32 flip_timestamp_in_us)
7013 {
7014 	struct mod_vrr_params vrr_params;
7015 	struct dc_info_packet vrr_infopacket = {0};
7016 	struct amdgpu_device *adev = dm->adev;
7017 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7018 	unsigned long flags;
7019 
7020 	if (!new_stream)
7021 		return;
7022 
7023 	/*
7024 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7025 	 * For now it's sufficient to just guard against these conditions.
7026 	 */
7027 
7028 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7029 		return;
7030 
7031 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7032         vrr_params = acrtc->dm_irq_params.vrr_params;
7033 
7034 	if (surface) {
7035 		mod_freesync_handle_preflip(
7036 			dm->freesync_module,
7037 			surface,
7038 			new_stream,
7039 			flip_timestamp_in_us,
7040 			&vrr_params);
7041 
7042 		if (adev->family < AMDGPU_FAMILY_AI &&
7043 		    amdgpu_dm_vrr_active(new_crtc_state)) {
7044 			mod_freesync_handle_v_update(dm->freesync_module,
7045 						     new_stream, &vrr_params);
7046 
7047 			/* Need to call this before the frame ends. */
7048 			dc_stream_adjust_vmin_vmax(dm->dc,
7049 						   new_crtc_state->stream,
7050 						   &vrr_params.adjust);
7051 		}
7052 	}
7053 
7054 	mod_freesync_build_vrr_infopacket(
7055 		dm->freesync_module,
7056 		new_stream,
7057 		&vrr_params,
7058 		PACKET_TYPE_VRR,
7059 		TRANSFER_FUNC_UNKNOWN,
7060 		&vrr_infopacket);
7061 
7062 	new_crtc_state->freesync_timing_changed |=
7063 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7064 			&vrr_params.adjust,
7065 			sizeof(vrr_params.adjust)) != 0);
7066 
7067 	new_crtc_state->freesync_vrr_info_changed |=
7068 		(memcmp(&new_crtc_state->vrr_infopacket,
7069 			&vrr_infopacket,
7070 			sizeof(vrr_infopacket)) != 0);
7071 
7072 	acrtc->dm_irq_params.vrr_params = vrr_params;
7073 	new_crtc_state->vrr_infopacket = vrr_infopacket;
7074 
7075 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7076 	new_stream->vrr_infopacket = vrr_infopacket;
7077 
7078 	if (new_crtc_state->freesync_vrr_info_changed)
7079 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7080 			      new_crtc_state->base.crtc->base.id,
7081 			      (int)new_crtc_state->base.vrr_enabled,
7082 			      (int)vrr_params.state);
7083 
7084 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7085 }
7086 
update_stream_irq_parameters(struct amdgpu_display_manager * dm,struct dm_crtc_state * new_crtc_state)7087 static void update_stream_irq_parameters(
7088 	struct amdgpu_display_manager *dm,
7089 	struct dm_crtc_state *new_crtc_state)
7090 {
7091 	struct dc_stream_state *new_stream = new_crtc_state->stream;
7092 	struct mod_vrr_params vrr_params;
7093 	struct mod_freesync_config config = new_crtc_state->freesync_config;
7094 	struct amdgpu_device *adev = dm->adev;
7095 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7096 	unsigned long flags;
7097 
7098 	if (!new_stream)
7099 		return;
7100 
7101 	/*
7102 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7103 	 * For now it's sufficient to just guard against these conditions.
7104 	 */
7105 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7106 		return;
7107 
7108 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7109 	vrr_params = acrtc->dm_irq_params.vrr_params;
7110 
7111 	if (new_crtc_state->vrr_supported &&
7112 	    config.min_refresh_in_uhz &&
7113 	    config.max_refresh_in_uhz) {
7114 		config.state = new_crtc_state->base.vrr_enabled ?
7115 			VRR_STATE_ACTIVE_VARIABLE :
7116 			VRR_STATE_INACTIVE;
7117 	} else {
7118 		config.state = VRR_STATE_UNSUPPORTED;
7119 	}
7120 
7121 	mod_freesync_build_vrr_params(dm->freesync_module,
7122 				      new_stream,
7123 				      &config, &vrr_params);
7124 
7125 	new_crtc_state->freesync_timing_changed |=
7126 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7127 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7128 
7129 	new_crtc_state->freesync_config = config;
7130 	/* Copy state for access from DM IRQ handler */
7131 	acrtc->dm_irq_params.freesync_config = config;
7132 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7133 	acrtc->dm_irq_params.vrr_params = vrr_params;
7134 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7135 }
7136 
amdgpu_dm_handle_vrr_transition(struct dm_crtc_state * old_state,struct dm_crtc_state * new_state)7137 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7138 					    struct dm_crtc_state *new_state)
7139 {
7140 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7141 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7142 
7143 	if (!old_vrr_active && new_vrr_active) {
7144 		/* Transition VRR inactive -> active:
7145 		 * While VRR is active, we must not disable vblank irq, as a
7146 		 * reenable after disable would compute bogus vblank/pflip
7147 		 * timestamps if it likely happened inside display front-porch.
7148 		 *
7149 		 * We also need vupdate irq for the actual core vblank handling
7150 		 * at end of vblank.
7151 		 */
7152 		dm_set_vupdate_irq(new_state->base.crtc, true);
7153 		drm_crtc_vblank_get(new_state->base.crtc);
7154 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7155 				 __func__, new_state->base.crtc->base.id);
7156 	} else if (old_vrr_active && !new_vrr_active) {
7157 		/* Transition VRR active -> inactive:
7158 		 * Allow vblank irq disable again for fixed refresh rate.
7159 		 */
7160 		dm_set_vupdate_irq(new_state->base.crtc, false);
7161 		drm_crtc_vblank_put(new_state->base.crtc);
7162 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7163 				 __func__, new_state->base.crtc->base.id);
7164 	}
7165 }
7166 
amdgpu_dm_commit_cursors(struct drm_atomic_state * state)7167 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7168 {
7169 	struct drm_plane *plane;
7170 	struct drm_plane_state *old_plane_state, *new_plane_state;
7171 	int i;
7172 
7173 	/*
7174 	 * TODO: Make this per-stream so we don't issue redundant updates for
7175 	 * commits with multiple streams.
7176 	 */
7177 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7178 				       new_plane_state, i)
7179 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7180 			handle_cursor_update(plane, old_plane_state);
7181 }
7182 
amdgpu_dm_commit_planes(struct drm_atomic_state * state,struct dc_state * dc_state,struct drm_device * dev,struct amdgpu_display_manager * dm,struct drm_crtc * pcrtc,bool wait_for_vblank)7183 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7184 				    struct dc_state *dc_state,
7185 				    struct drm_device *dev,
7186 				    struct amdgpu_display_manager *dm,
7187 				    struct drm_crtc *pcrtc,
7188 				    bool wait_for_vblank)
7189 {
7190 	uint32_t i;
7191 	uint64_t timestamp_ns;
7192 	struct drm_plane *plane;
7193 	struct drm_plane_state *old_plane_state, *new_plane_state;
7194 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7195 	struct drm_crtc_state *new_pcrtc_state =
7196 			drm_atomic_get_new_crtc_state(state, pcrtc);
7197 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7198 	struct dm_crtc_state *dm_old_crtc_state =
7199 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7200 	int planes_count = 0, vpos, hpos;
7201 	long r;
7202 	unsigned long flags;
7203 	struct amdgpu_bo *abo;
7204 	uint32_t target_vblank, last_flip_vblank;
7205 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7206 	bool pflip_present = false;
7207 	struct {
7208 		struct dc_surface_update surface_updates[MAX_SURFACES];
7209 		struct dc_plane_info plane_infos[MAX_SURFACES];
7210 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
7211 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7212 		struct dc_stream_update stream_update;
7213 	} *bundle;
7214 
7215 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7216 
7217 	if (!bundle) {
7218 		dm_error("Failed to allocate update bundle\n");
7219 		goto cleanup;
7220 	}
7221 
7222 	/*
7223 	 * Disable the cursor first if we're disabling all the planes.
7224 	 * It'll remain on the screen after the planes are re-enabled
7225 	 * if we don't.
7226 	 */
7227 	if (acrtc_state->active_planes == 0)
7228 		amdgpu_dm_commit_cursors(state);
7229 
7230 	/* update planes when needed */
7231 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7232 		struct drm_crtc *crtc = new_plane_state->crtc;
7233 		struct drm_crtc_state *new_crtc_state;
7234 		struct drm_framebuffer *fb = new_plane_state->fb;
7235 		bool plane_needs_flip;
7236 		struct dc_plane_state *dc_plane;
7237 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7238 
7239 		/* Cursor plane is handled after stream updates */
7240 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7241 			continue;
7242 
7243 		if (!fb || !crtc || pcrtc != crtc)
7244 			continue;
7245 
7246 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7247 		if (!new_crtc_state->active)
7248 			continue;
7249 
7250 		dc_plane = dm_new_plane_state->dc_state;
7251 		if (!dc_plane)
7252 			continue;
7253 
7254 		bundle->surface_updates[planes_count].surface = dc_plane;
7255 		if (new_pcrtc_state->color_mgmt_changed) {
7256 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7257 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7258 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7259 		}
7260 
7261 		fill_dc_scaling_info(new_plane_state,
7262 				     &bundle->scaling_infos[planes_count]);
7263 
7264 		bundle->surface_updates[planes_count].scaling_info =
7265 			&bundle->scaling_infos[planes_count];
7266 
7267 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7268 
7269 		pflip_present = pflip_present || plane_needs_flip;
7270 
7271 		if (!plane_needs_flip) {
7272 			planes_count += 1;
7273 			continue;
7274 		}
7275 
7276 		abo = gem_to_amdgpu_bo(fb->obj[0]);
7277 
7278 		/*
7279 		 * Wait for all fences on this FB. Do limited wait to avoid
7280 		 * deadlock during GPU reset when this fence will not signal
7281 		 * but we hold reservation lock for the BO.
7282 		 */
7283 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7284 							false,
7285 							msecs_to_jiffies(5000));
7286 		if (unlikely(r <= 0))
7287 			DRM_ERROR("Waiting for fences timed out!");
7288 
7289 		fill_dc_plane_info_and_addr(
7290 			dm->adev, new_plane_state,
7291 			dm_new_plane_state->tiling_flags,
7292 			&bundle->plane_infos[planes_count],
7293 			&bundle->flip_addrs[planes_count].address,
7294 			dm_new_plane_state->tmz_surface, false);
7295 
7296 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7297 				 new_plane_state->plane->index,
7298 				 bundle->plane_infos[planes_count].dcc.enable);
7299 
7300 		bundle->surface_updates[planes_count].plane_info =
7301 			&bundle->plane_infos[planes_count];
7302 
7303 		/*
7304 		 * Only allow immediate flips for fast updates that don't
7305 		 * change FB pitch, DCC state, rotation or mirroing.
7306 		 */
7307 		bundle->flip_addrs[planes_count].flip_immediate =
7308 			crtc->state->async_flip &&
7309 			acrtc_state->update_type == UPDATE_TYPE_FAST;
7310 
7311 		timestamp_ns = ktime_get_ns();
7312 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7313 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7314 		bundle->surface_updates[planes_count].surface = dc_plane;
7315 
7316 		if (!bundle->surface_updates[planes_count].surface) {
7317 			DRM_ERROR("No surface for CRTC: id=%d\n",
7318 					acrtc_attach->crtc_id);
7319 			continue;
7320 		}
7321 
7322 		if (plane == pcrtc->primary)
7323 			update_freesync_state_on_stream(
7324 				dm,
7325 				acrtc_state,
7326 				acrtc_state->stream,
7327 				dc_plane,
7328 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7329 
7330 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7331 				 __func__,
7332 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7333 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7334 
7335 		planes_count += 1;
7336 
7337 	}
7338 
7339 	if (pflip_present) {
7340 		if (!vrr_active) {
7341 			/* Use old throttling in non-vrr fixed refresh rate mode
7342 			 * to keep flip scheduling based on target vblank counts
7343 			 * working in a backwards compatible way, e.g., for
7344 			 * clients using the GLX_OML_sync_control extension or
7345 			 * DRI3/Present extension with defined target_msc.
7346 			 */
7347 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7348 		}
7349 		else {
7350 			/* For variable refresh rate mode only:
7351 			 * Get vblank of last completed flip to avoid > 1 vrr
7352 			 * flips per video frame by use of throttling, but allow
7353 			 * flip programming anywhere in the possibly large
7354 			 * variable vrr vblank interval for fine-grained flip
7355 			 * timing control and more opportunity to avoid stutter
7356 			 * on late submission of flips.
7357 			 */
7358 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7359 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7360 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7361 		}
7362 
7363 		target_vblank = last_flip_vblank + wait_for_vblank;
7364 
7365 		/*
7366 		 * Wait until we're out of the vertical blank period before the one
7367 		 * targeted by the flip
7368 		 */
7369 		while ((acrtc_attach->enabled &&
7370 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7371 							    0, &vpos, &hpos, NULL,
7372 							    NULL, &pcrtc->hwmode)
7373 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7374 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7375 			(int)(target_vblank -
7376 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7377 			usleep_range(1000, 1100);
7378 		}
7379 
7380 		/**
7381 		 * Prepare the flip event for the pageflip interrupt to handle.
7382 		 *
7383 		 * This only works in the case where we've already turned on the
7384 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
7385 		 * from 0 -> n planes we have to skip a hardware generated event
7386 		 * and rely on sending it from software.
7387 		 */
7388 		if (acrtc_attach->base.state->event &&
7389 		    acrtc_state->active_planes > 0) {
7390 			drm_crtc_vblank_get(pcrtc);
7391 
7392 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7393 
7394 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7395 			prepare_flip_isr(acrtc_attach);
7396 
7397 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7398 		}
7399 
7400 		if (acrtc_state->stream) {
7401 			if (acrtc_state->freesync_vrr_info_changed)
7402 				bundle->stream_update.vrr_infopacket =
7403 					&acrtc_state->stream->vrr_infopacket;
7404 		}
7405 	}
7406 
7407 	/* Update the planes if changed or disable if we don't have any. */
7408 	if ((planes_count || acrtc_state->active_planes == 0) &&
7409 		acrtc_state->stream) {
7410 		bundle->stream_update.stream = acrtc_state->stream;
7411 		if (new_pcrtc_state->mode_changed) {
7412 			bundle->stream_update.src = acrtc_state->stream->src;
7413 			bundle->stream_update.dst = acrtc_state->stream->dst;
7414 		}
7415 
7416 		if (new_pcrtc_state->color_mgmt_changed) {
7417 			/*
7418 			 * TODO: This isn't fully correct since we've actually
7419 			 * already modified the stream in place.
7420 			 */
7421 			bundle->stream_update.gamut_remap =
7422 				&acrtc_state->stream->gamut_remap_matrix;
7423 			bundle->stream_update.output_csc_transform =
7424 				&acrtc_state->stream->csc_color_matrix;
7425 			bundle->stream_update.out_transfer_func =
7426 				acrtc_state->stream->out_transfer_func;
7427 		}
7428 
7429 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
7430 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7431 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
7432 
7433 		/*
7434 		 * If FreeSync state on the stream has changed then we need to
7435 		 * re-adjust the min/max bounds now that DC doesn't handle this
7436 		 * as part of commit.
7437 		 */
7438 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7439 		    amdgpu_dm_vrr_active(acrtc_state)) {
7440 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7441 			dc_stream_adjust_vmin_vmax(
7442 				dm->dc, acrtc_state->stream,
7443 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
7444 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7445 		}
7446 		mutex_lock(&dm->dc_lock);
7447 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7448 				acrtc_state->stream->link->psr_settings.psr_allow_active)
7449 			amdgpu_dm_psr_disable(acrtc_state->stream);
7450 
7451 		dc_commit_updates_for_stream(dm->dc,
7452 						     bundle->surface_updates,
7453 						     planes_count,
7454 						     acrtc_state->stream,
7455 						     &bundle->stream_update,
7456 						     dc_state);
7457 
7458 		/**
7459 		 * Enable or disable the interrupts on the backend.
7460 		 *
7461 		 * Most pipes are put into power gating when unused.
7462 		 *
7463 		 * When power gating is enabled on a pipe we lose the
7464 		 * interrupt enablement state when power gating is disabled.
7465 		 *
7466 		 * So we need to update the IRQ control state in hardware
7467 		 * whenever the pipe turns on (since it could be previously
7468 		 * power gated) or off (since some pipes can't be power gated
7469 		 * on some ASICs).
7470 		 */
7471 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7472 			dm_update_pflip_irq_state(drm_to_adev(dev),
7473 						  acrtc_attach);
7474 
7475 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7476 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7477 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7478 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
7479 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7480 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7481 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
7482 			amdgpu_dm_psr_enable(acrtc_state->stream);
7483 		}
7484 
7485 		mutex_unlock(&dm->dc_lock);
7486 	}
7487 
7488 	/*
7489 	 * Update cursor state *after* programming all the planes.
7490 	 * This avoids redundant programming in the case where we're going
7491 	 * to be disabling a single plane - those pipes are being disabled.
7492 	 */
7493 	if (acrtc_state->active_planes)
7494 		amdgpu_dm_commit_cursors(state);
7495 
7496 cleanup:
7497 	kfree(bundle);
7498 }
7499 
amdgpu_dm_commit_audio(struct drm_device * dev,struct drm_atomic_state * state)7500 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7501 				   struct drm_atomic_state *state)
7502 {
7503 	struct amdgpu_device *adev = drm_to_adev(dev);
7504 	struct amdgpu_dm_connector *aconnector;
7505 	struct drm_connector *connector;
7506 	struct drm_connector_state *old_con_state, *new_con_state;
7507 	struct drm_crtc_state *new_crtc_state;
7508 	struct dm_crtc_state *new_dm_crtc_state;
7509 	const struct dc_stream_status *status;
7510 	int i, inst;
7511 
7512 	/* Notify device removals. */
7513 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7514 		if (old_con_state->crtc != new_con_state->crtc) {
7515 			/* CRTC changes require notification. */
7516 			goto notify;
7517 		}
7518 
7519 		if (!new_con_state->crtc)
7520 			continue;
7521 
7522 		new_crtc_state = drm_atomic_get_new_crtc_state(
7523 			state, new_con_state->crtc);
7524 
7525 		if (!new_crtc_state)
7526 			continue;
7527 
7528 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7529 			continue;
7530 
7531 	notify:
7532 		aconnector = to_amdgpu_dm_connector(connector);
7533 
7534 		mutex_lock(&adev->dm.audio_lock);
7535 		inst = aconnector->audio_inst;
7536 		aconnector->audio_inst = -1;
7537 		mutex_unlock(&adev->dm.audio_lock);
7538 
7539 		amdgpu_dm_audio_eld_notify(adev, inst);
7540 	}
7541 
7542 	/* Notify audio device additions. */
7543 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7544 		if (!new_con_state->crtc)
7545 			continue;
7546 
7547 		new_crtc_state = drm_atomic_get_new_crtc_state(
7548 			state, new_con_state->crtc);
7549 
7550 		if (!new_crtc_state)
7551 			continue;
7552 
7553 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7554 			continue;
7555 
7556 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7557 		if (!new_dm_crtc_state->stream)
7558 			continue;
7559 
7560 		status = dc_stream_get_status(new_dm_crtc_state->stream);
7561 		if (!status)
7562 			continue;
7563 
7564 		aconnector = to_amdgpu_dm_connector(connector);
7565 
7566 		mutex_lock(&adev->dm.audio_lock);
7567 		inst = status->audio_inst;
7568 		aconnector->audio_inst = inst;
7569 		mutex_unlock(&adev->dm.audio_lock);
7570 
7571 		amdgpu_dm_audio_eld_notify(adev, inst);
7572 	}
7573 }
7574 
7575 /*
7576  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7577  * @crtc_state: the DRM CRTC state
7578  * @stream_state: the DC stream state.
7579  *
7580  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7581  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7582  */
amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state * crtc_state,struct dc_stream_state * stream_state)7583 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7584 						struct dc_stream_state *stream_state)
7585 {
7586 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7587 }
7588 
amdgpu_dm_atomic_commit(struct drm_device * dev,struct drm_atomic_state * state,bool nonblock)7589 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7590 				   struct drm_atomic_state *state,
7591 				   bool nonblock)
7592 {
7593 	/*
7594 	 * Add check here for SoC's that support hardware cursor plane, to
7595 	 * unset legacy_cursor_update
7596 	 */
7597 
7598 	return drm_atomic_helper_commit(dev, state, nonblock);
7599 
7600 	/*TODO Handle EINTR, reenable IRQ*/
7601 }
7602 
7603 /**
7604  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7605  * @state: The atomic state to commit
7606  *
7607  * This will tell DC to commit the constructed DC state from atomic_check,
7608  * programming the hardware. Any failures here implies a hardware failure, since
7609  * atomic check should have filtered anything non-kosher.
7610  */
amdgpu_dm_atomic_commit_tail(struct drm_atomic_state * state)7611 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7612 {
7613 	struct drm_device *dev = state->dev;
7614 	struct amdgpu_device *adev = drm_to_adev(dev);
7615 	struct amdgpu_display_manager *dm = &adev->dm;
7616 	struct dm_atomic_state *dm_state;
7617 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7618 	uint32_t i, j;
7619 	struct drm_crtc *crtc;
7620 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7621 	unsigned long flags;
7622 	bool wait_for_vblank = true;
7623 	struct drm_connector *connector;
7624 	struct drm_connector_state *old_con_state, *new_con_state;
7625 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7626 	int crtc_disable_count = 0;
7627 	bool mode_set_reset_required = false;
7628 
7629 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
7630 
7631 	dm_state = dm_atomic_get_new_state(state);
7632 	if (dm_state && dm_state->context) {
7633 		dc_state = dm_state->context;
7634 	} else {
7635 		/* No state changes, retain current state. */
7636 		dc_state_temp = dc_create_state(dm->dc);
7637 		ASSERT(dc_state_temp);
7638 		dc_state = dc_state_temp;
7639 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
7640 	}
7641 
7642 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
7643 				       new_crtc_state, i) {
7644 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7645 
7646 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7647 
7648 		if (old_crtc_state->active &&
7649 		    (!new_crtc_state->active ||
7650 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7651 			manage_dm_interrupts(adev, acrtc, false);
7652 			dc_stream_release(dm_old_crtc_state->stream);
7653 		}
7654 	}
7655 
7656 	drm_atomic_helper_calc_timestamping_constants(state);
7657 
7658 	/* update changed items */
7659 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7660 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7661 
7662 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7663 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7664 
7665 		DRM_DEBUG_DRIVER(
7666 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7667 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7668 			"connectors_changed:%d\n",
7669 			acrtc->crtc_id,
7670 			new_crtc_state->enable,
7671 			new_crtc_state->active,
7672 			new_crtc_state->planes_changed,
7673 			new_crtc_state->mode_changed,
7674 			new_crtc_state->active_changed,
7675 			new_crtc_state->connectors_changed);
7676 
7677 		/* Copy all transient state flags into dc state */
7678 		if (dm_new_crtc_state->stream) {
7679 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7680 							    dm_new_crtc_state->stream);
7681 		}
7682 
7683 		/* handles headless hotplug case, updating new_state and
7684 		 * aconnector as needed
7685 		 */
7686 
7687 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7688 
7689 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7690 
7691 			if (!dm_new_crtc_state->stream) {
7692 				/*
7693 				 * this could happen because of issues with
7694 				 * userspace notifications delivery.
7695 				 * In this case userspace tries to set mode on
7696 				 * display which is disconnected in fact.
7697 				 * dc_sink is NULL in this case on aconnector.
7698 				 * We expect reset mode will come soon.
7699 				 *
7700 				 * This can also happen when unplug is done
7701 				 * during resume sequence ended
7702 				 *
7703 				 * In this case, we want to pretend we still
7704 				 * have a sink to keep the pipe running so that
7705 				 * hw state is consistent with the sw state
7706 				 */
7707 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7708 						__func__, acrtc->base.base.id);
7709 				continue;
7710 			}
7711 
7712 			if (dm_old_crtc_state->stream)
7713 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7714 
7715 			pm_runtime_get_noresume(dev->dev);
7716 
7717 			acrtc->enabled = true;
7718 			acrtc->hw_mode = new_crtc_state->mode;
7719 			crtc->hwmode = new_crtc_state->mode;
7720 			mode_set_reset_required = true;
7721 		} else if (modereset_required(new_crtc_state)) {
7722 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7723 			/* i.e. reset mode */
7724 			if (dm_old_crtc_state->stream)
7725 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7726 			mode_set_reset_required = true;
7727 		}
7728 	} /* for_each_crtc_in_state() */
7729 
7730 	if (dc_state) {
7731 		/* if there mode set or reset, disable eDP PSR */
7732 		if (mode_set_reset_required)
7733 			amdgpu_dm_psr_disable_all(dm);
7734 
7735 		dm_enable_per_frame_crtc_master_sync(dc_state);
7736 		mutex_lock(&dm->dc_lock);
7737 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
7738 		mutex_unlock(&dm->dc_lock);
7739 	}
7740 
7741 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7742 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7743 
7744 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7745 
7746 		if (dm_new_crtc_state->stream != NULL) {
7747 			const struct dc_stream_status *status =
7748 					dc_stream_get_status(dm_new_crtc_state->stream);
7749 
7750 			if (!status)
7751 				status = dc_stream_get_status_from_state(dc_state,
7752 									 dm_new_crtc_state->stream);
7753 			if (!status)
7754 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7755 			else
7756 				acrtc->otg_inst = status->primary_otg_inst;
7757 		}
7758 	}
7759 #ifdef CONFIG_DRM_AMD_DC_HDCP
7760 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7761 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7762 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7763 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7764 
7765 		new_crtc_state = NULL;
7766 
7767 		if (acrtc)
7768 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7769 
7770 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7771 
7772 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7773 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7774 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7775 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7776 			continue;
7777 		}
7778 
7779 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7780 			hdcp_update_display(
7781 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7782 				new_con_state->hdcp_content_type,
7783 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7784 													 : false);
7785 	}
7786 #endif
7787 
7788 	/* Handle connector state changes */
7789 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7790 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7791 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7792 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7793 		struct dc_surface_update dummy_updates[MAX_SURFACES];
7794 		struct dc_stream_update stream_update;
7795 		struct dc_info_packet hdr_packet;
7796 		struct dc_stream_status *status = NULL;
7797 		bool abm_changed, hdr_changed, scaling_changed;
7798 
7799 		memset(&dummy_updates, 0, sizeof(dummy_updates));
7800 		memset(&stream_update, 0, sizeof(stream_update));
7801 
7802 		if (acrtc) {
7803 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7804 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7805 		}
7806 
7807 		/* Skip any modesets/resets */
7808 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7809 			continue;
7810 
7811 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7812 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7813 
7814 		scaling_changed = is_scaling_state_different(dm_new_con_state,
7815 							     dm_old_con_state);
7816 
7817 		abm_changed = dm_new_crtc_state->abm_level !=
7818 			      dm_old_crtc_state->abm_level;
7819 
7820 		hdr_changed =
7821 			is_hdr_metadata_different(old_con_state, new_con_state);
7822 
7823 		if (!scaling_changed && !abm_changed && !hdr_changed)
7824 			continue;
7825 
7826 		stream_update.stream = dm_new_crtc_state->stream;
7827 		if (scaling_changed) {
7828 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7829 					dm_new_con_state, dm_new_crtc_state->stream);
7830 
7831 			stream_update.src = dm_new_crtc_state->stream->src;
7832 			stream_update.dst = dm_new_crtc_state->stream->dst;
7833 		}
7834 
7835 		if (abm_changed) {
7836 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7837 
7838 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
7839 		}
7840 
7841 		if (hdr_changed) {
7842 			fill_hdr_info_packet(new_con_state, &hdr_packet);
7843 			stream_update.hdr_static_metadata = &hdr_packet;
7844 		}
7845 
7846 		status = dc_stream_get_status(dm_new_crtc_state->stream);
7847 		WARN_ON(!status);
7848 		WARN_ON(!status->plane_count);
7849 
7850 		/*
7851 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7852 		 * Here we create an empty update on each plane.
7853 		 * To fix this, DC should permit updating only stream properties.
7854 		 */
7855 		for (j = 0; j < status->plane_count; j++)
7856 			dummy_updates[j].surface = status->plane_states[0];
7857 
7858 
7859 		mutex_lock(&dm->dc_lock);
7860 		dc_commit_updates_for_stream(dm->dc,
7861 						     dummy_updates,
7862 						     status->plane_count,
7863 						     dm_new_crtc_state->stream,
7864 						     &stream_update,
7865 						     dc_state);
7866 		mutex_unlock(&dm->dc_lock);
7867 	}
7868 
7869 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
7870 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7871 				      new_crtc_state, i) {
7872 		if (old_crtc_state->active && !new_crtc_state->active)
7873 			crtc_disable_count++;
7874 
7875 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7876 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7877 
7878 		/* For freesync config update on crtc state and params for irq */
7879 		update_stream_irq_parameters(dm, dm_new_crtc_state);
7880 
7881 		/* Handle vrr on->off / off->on transitions */
7882 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7883 						dm_new_crtc_state);
7884 	}
7885 
7886 	/**
7887 	 * Enable interrupts for CRTCs that are newly enabled or went through
7888 	 * a modeset. It was intentionally deferred until after the front end
7889 	 * state was modified to wait until the OTG was on and so the IRQ
7890 	 * handlers didn't access stale or invalid state.
7891 	 */
7892 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7893 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7894 
7895 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7896 
7897 		if (new_crtc_state->active &&
7898 		    (!old_crtc_state->active ||
7899 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7900 			dc_stream_retain(dm_new_crtc_state->stream);
7901 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
7902 			manage_dm_interrupts(adev, acrtc, true);
7903 
7904 #ifdef CONFIG_DEBUG_FS
7905 			/**
7906 			 * Frontend may have changed so reapply the CRC capture
7907 			 * settings for the stream.
7908 			 */
7909 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7910 
7911 			if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7912 				amdgpu_dm_crtc_configure_crc_source(
7913 					crtc, dm_new_crtc_state,
7914 					dm_new_crtc_state->crc_src);
7915 			}
7916 #endif
7917 		}
7918 	}
7919 
7920 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7921 		if (new_crtc_state->async_flip)
7922 			wait_for_vblank = false;
7923 
7924 	/* update planes when needed per crtc*/
7925 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7926 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7927 
7928 		if (dm_new_crtc_state->stream)
7929 			amdgpu_dm_commit_planes(state, dc_state, dev,
7930 						dm, crtc, wait_for_vblank);
7931 	}
7932 
7933 	/* Update audio instances for each connector. */
7934 	amdgpu_dm_commit_audio(dev, state);
7935 
7936 	/*
7937 	 * send vblank event on all events not handled in flip and
7938 	 * mark consumed event for drm_atomic_helper_commit_hw_done
7939 	 */
7940 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7941 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7942 
7943 		if (new_crtc_state->event)
7944 			drm_send_event_locked(dev, &new_crtc_state->event->base);
7945 
7946 		new_crtc_state->event = NULL;
7947 	}
7948 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7949 
7950 	/* Signal HW programming completion */
7951 	drm_atomic_helper_commit_hw_done(state);
7952 
7953 	if (wait_for_vblank)
7954 		drm_atomic_helper_wait_for_flip_done(dev, state);
7955 
7956 	drm_atomic_helper_cleanup_planes(dev, state);
7957 
7958 	/*
7959 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7960 	 * so we can put the GPU into runtime suspend if we're not driving any
7961 	 * displays anymore
7962 	 */
7963 	for (i = 0; i < crtc_disable_count; i++)
7964 		pm_runtime_put_autosuspend(dev->dev);
7965 	pm_runtime_mark_last_busy(dev->dev);
7966 
7967 	if (dc_state_temp)
7968 		dc_release_state(dc_state_temp);
7969 }
7970 
7971 
dm_force_atomic_commit(struct drm_connector * connector)7972 static int dm_force_atomic_commit(struct drm_connector *connector)
7973 {
7974 	int ret = 0;
7975 	struct drm_device *ddev = connector->dev;
7976 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7977 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7978 	struct drm_plane *plane = disconnected_acrtc->base.primary;
7979 	struct drm_connector_state *conn_state;
7980 	struct drm_crtc_state *crtc_state;
7981 	struct drm_plane_state *plane_state;
7982 
7983 	if (!state)
7984 		return -ENOMEM;
7985 
7986 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
7987 
7988 	/* Construct an atomic state to restore previous display setting */
7989 
7990 	/*
7991 	 * Attach connectors to drm_atomic_state
7992 	 */
7993 	conn_state = drm_atomic_get_connector_state(state, connector);
7994 
7995 	ret = PTR_ERR_OR_ZERO(conn_state);
7996 	if (ret)
7997 		goto out;
7998 
7999 	/* Attach crtc to drm_atomic_state*/
8000 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8001 
8002 	ret = PTR_ERR_OR_ZERO(crtc_state);
8003 	if (ret)
8004 		goto out;
8005 
8006 	/* force a restore */
8007 	crtc_state->mode_changed = true;
8008 
8009 	/* Attach plane to drm_atomic_state */
8010 	plane_state = drm_atomic_get_plane_state(state, plane);
8011 
8012 	ret = PTR_ERR_OR_ZERO(plane_state);
8013 	if (ret)
8014 		goto out;
8015 
8016 	/* Call commit internally with the state we just constructed */
8017 	ret = drm_atomic_commit(state);
8018 
8019 out:
8020 	drm_atomic_state_put(state);
8021 	if (ret)
8022 		DRM_ERROR("Restoring old state failed with %i\n", ret);
8023 
8024 	return ret;
8025 }
8026 
8027 /*
8028  * This function handles all cases when set mode does not come upon hotplug.
8029  * This includes when a display is unplugged then plugged back into the
8030  * same port and when running without usermode desktop manager supprot
8031  */
dm_restore_drm_connector_state(struct drm_device * dev,struct drm_connector * connector)8032 void dm_restore_drm_connector_state(struct drm_device *dev,
8033 				    struct drm_connector *connector)
8034 {
8035 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8036 	struct amdgpu_crtc *disconnected_acrtc;
8037 	struct dm_crtc_state *acrtc_state;
8038 
8039 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8040 		return;
8041 
8042 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8043 	if (!disconnected_acrtc)
8044 		return;
8045 
8046 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8047 	if (!acrtc_state->stream)
8048 		return;
8049 
8050 	/*
8051 	 * If the previous sink is not released and different from the current,
8052 	 * we deduce we are in a state where we can not rely on usermode call
8053 	 * to turn on the display, so we do it here
8054 	 */
8055 	if (acrtc_state->stream->sink != aconnector->dc_sink)
8056 		dm_force_atomic_commit(&aconnector->base);
8057 }
8058 
8059 /*
8060  * Grabs all modesetting locks to serialize against any blocking commits,
8061  * Waits for completion of all non blocking commits.
8062  */
do_aquire_global_lock(struct drm_device * dev,struct drm_atomic_state * state)8063 static int do_aquire_global_lock(struct drm_device *dev,
8064 				 struct drm_atomic_state *state)
8065 {
8066 	struct drm_crtc *crtc;
8067 	struct drm_crtc_commit *commit;
8068 	long ret;
8069 
8070 	/*
8071 	 * Adding all modeset locks to aquire_ctx will
8072 	 * ensure that when the framework release it the
8073 	 * extra locks we are locking here will get released to
8074 	 */
8075 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8076 	if (ret)
8077 		return ret;
8078 
8079 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8080 		spin_lock(&crtc->commit_lock);
8081 		commit = list_first_entry_or_null(&crtc->commit_list,
8082 				struct drm_crtc_commit, commit_entry);
8083 		if (commit)
8084 			drm_crtc_commit_get(commit);
8085 		spin_unlock(&crtc->commit_lock);
8086 
8087 		if (!commit)
8088 			continue;
8089 
8090 		/*
8091 		 * Make sure all pending HW programming completed and
8092 		 * page flips done
8093 		 */
8094 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8095 
8096 		if (ret > 0)
8097 			ret = wait_for_completion_interruptible_timeout(
8098 					&commit->flip_done, 10*HZ);
8099 
8100 		if (ret == 0)
8101 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8102 				  "timed out\n", crtc->base.id, crtc->name);
8103 
8104 		drm_crtc_commit_put(commit);
8105 	}
8106 
8107 	return ret < 0 ? ret : 0;
8108 }
8109 
get_freesync_config_for_crtc(struct dm_crtc_state * new_crtc_state,struct dm_connector_state * new_con_state)8110 static void get_freesync_config_for_crtc(
8111 	struct dm_crtc_state *new_crtc_state,
8112 	struct dm_connector_state *new_con_state)
8113 {
8114 	struct mod_freesync_config config = {0};
8115 	struct amdgpu_dm_connector *aconnector =
8116 			to_amdgpu_dm_connector(new_con_state->base.connector);
8117 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
8118 	int vrefresh = drm_mode_vrefresh(mode);
8119 
8120 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8121 					vrefresh >= aconnector->min_vfreq &&
8122 					vrefresh <= aconnector->max_vfreq;
8123 
8124 	if (new_crtc_state->vrr_supported) {
8125 		new_crtc_state->stream->ignore_msa_timing_param = true;
8126 		config.state = new_crtc_state->base.vrr_enabled ?
8127 				VRR_STATE_ACTIVE_VARIABLE :
8128 				VRR_STATE_INACTIVE;
8129 		config.min_refresh_in_uhz =
8130 				aconnector->min_vfreq * 1000000;
8131 		config.max_refresh_in_uhz =
8132 				aconnector->max_vfreq * 1000000;
8133 		config.vsif_supported = true;
8134 		config.btr = true;
8135 	}
8136 
8137 	new_crtc_state->freesync_config = config;
8138 }
8139 
reset_freesync_config_for_crtc(struct dm_crtc_state * new_crtc_state)8140 static void reset_freesync_config_for_crtc(
8141 	struct dm_crtc_state *new_crtc_state)
8142 {
8143 	new_crtc_state->vrr_supported = false;
8144 
8145 	memset(&new_crtc_state->vrr_infopacket, 0,
8146 	       sizeof(new_crtc_state->vrr_infopacket));
8147 }
8148 
dm_update_crtc_state(struct amdgpu_display_manager * dm,struct drm_atomic_state * state,struct drm_crtc * crtc,struct drm_crtc_state * old_crtc_state,struct drm_crtc_state * new_crtc_state,bool enable,bool * lock_and_validation_needed)8149 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8150 				struct drm_atomic_state *state,
8151 				struct drm_crtc *crtc,
8152 				struct drm_crtc_state *old_crtc_state,
8153 				struct drm_crtc_state *new_crtc_state,
8154 				bool enable,
8155 				bool *lock_and_validation_needed)
8156 {
8157 	struct dm_atomic_state *dm_state = NULL;
8158 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8159 	struct dc_stream_state *new_stream;
8160 	int ret = 0;
8161 
8162 	/*
8163 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8164 	 * update changed items
8165 	 */
8166 	struct amdgpu_crtc *acrtc = NULL;
8167 	struct amdgpu_dm_connector *aconnector = NULL;
8168 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8169 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8170 
8171 	new_stream = NULL;
8172 
8173 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8174 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8175 	acrtc = to_amdgpu_crtc(crtc);
8176 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8177 
8178 	/* TODO This hack should go away */
8179 	if (aconnector && enable) {
8180 		/* Make sure fake sink is created in plug-in scenario */
8181 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8182 							    &aconnector->base);
8183 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8184 							    &aconnector->base);
8185 
8186 		if (IS_ERR(drm_new_conn_state)) {
8187 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8188 			goto fail;
8189 		}
8190 
8191 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8192 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8193 
8194 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8195 			goto skip_modeset;
8196 
8197 		new_stream = create_validate_stream_for_sink(aconnector,
8198 							     &new_crtc_state->mode,
8199 							     dm_new_conn_state,
8200 							     dm_old_crtc_state->stream);
8201 
8202 		/*
8203 		 * we can have no stream on ACTION_SET if a display
8204 		 * was disconnected during S3, in this case it is not an
8205 		 * error, the OS will be updated after detection, and
8206 		 * will do the right thing on next atomic commit
8207 		 */
8208 
8209 		if (!new_stream) {
8210 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8211 					__func__, acrtc->base.base.id);
8212 			ret = -ENOMEM;
8213 			goto fail;
8214 		}
8215 
8216 		/*
8217 		 * TODO: Check VSDB bits to decide whether this should
8218 		 * be enabled or not.
8219 		 */
8220 		new_stream->triggered_crtc_reset.enabled =
8221 			dm->force_timing_sync;
8222 
8223 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8224 
8225 		ret = fill_hdr_info_packet(drm_new_conn_state,
8226 					   &new_stream->hdr_static_metadata);
8227 		if (ret)
8228 			goto fail;
8229 
8230 		/*
8231 		 * If we already removed the old stream from the context
8232 		 * (and set the new stream to NULL) then we can't reuse
8233 		 * the old stream even if the stream and scaling are unchanged.
8234 		 * We'll hit the BUG_ON and black screen.
8235 		 *
8236 		 * TODO: Refactor this function to allow this check to work
8237 		 * in all conditions.
8238 		 */
8239 		if (dm_new_crtc_state->stream &&
8240 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8241 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8242 			new_crtc_state->mode_changed = false;
8243 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8244 					 new_crtc_state->mode_changed);
8245 		}
8246 	}
8247 
8248 	/* mode_changed flag may get updated above, need to check again */
8249 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8250 		goto skip_modeset;
8251 
8252 	DRM_DEBUG_DRIVER(
8253 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8254 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8255 		"connectors_changed:%d\n",
8256 		acrtc->crtc_id,
8257 		new_crtc_state->enable,
8258 		new_crtc_state->active,
8259 		new_crtc_state->planes_changed,
8260 		new_crtc_state->mode_changed,
8261 		new_crtc_state->active_changed,
8262 		new_crtc_state->connectors_changed);
8263 
8264 	/* Remove stream for any changed/disabled CRTC */
8265 	if (!enable) {
8266 
8267 		if (!dm_old_crtc_state->stream)
8268 			goto skip_modeset;
8269 
8270 		ret = dm_atomic_get_state(state, &dm_state);
8271 		if (ret)
8272 			goto fail;
8273 
8274 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8275 				crtc->base.id);
8276 
8277 		/* i.e. reset mode */
8278 		if (dc_remove_stream_from_ctx(
8279 				dm->dc,
8280 				dm_state->context,
8281 				dm_old_crtc_state->stream) != DC_OK) {
8282 			ret = -EINVAL;
8283 			goto fail;
8284 		}
8285 
8286 		dc_stream_release(dm_old_crtc_state->stream);
8287 		dm_new_crtc_state->stream = NULL;
8288 
8289 		reset_freesync_config_for_crtc(dm_new_crtc_state);
8290 
8291 		*lock_and_validation_needed = true;
8292 
8293 	} else {/* Add stream for any updated/enabled CRTC */
8294 		/*
8295 		 * Quick fix to prevent NULL pointer on new_stream when
8296 		 * added MST connectors not found in existing crtc_state in the chained mode
8297 		 * TODO: need to dig out the root cause of that
8298 		 */
8299 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8300 			goto skip_modeset;
8301 
8302 		if (modereset_required(new_crtc_state))
8303 			goto skip_modeset;
8304 
8305 		if (modeset_required(new_crtc_state, new_stream,
8306 				     dm_old_crtc_state->stream)) {
8307 
8308 			WARN_ON(dm_new_crtc_state->stream);
8309 
8310 			ret = dm_atomic_get_state(state, &dm_state);
8311 			if (ret)
8312 				goto fail;
8313 
8314 			dm_new_crtc_state->stream = new_stream;
8315 
8316 			dc_stream_retain(new_stream);
8317 
8318 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8319 						crtc->base.id);
8320 
8321 			if (dc_add_stream_to_ctx(
8322 					dm->dc,
8323 					dm_state->context,
8324 					dm_new_crtc_state->stream) != DC_OK) {
8325 				ret = -EINVAL;
8326 				goto fail;
8327 			}
8328 
8329 			*lock_and_validation_needed = true;
8330 		}
8331 	}
8332 
8333 skip_modeset:
8334 	/* Release extra reference */
8335 	if (new_stream)
8336 		 dc_stream_release(new_stream);
8337 
8338 	/*
8339 	 * We want to do dc stream updates that do not require a
8340 	 * full modeset below.
8341 	 */
8342 	if (!(enable && aconnector && new_crtc_state->active))
8343 		return 0;
8344 	/*
8345 	 * Given above conditions, the dc state cannot be NULL because:
8346 	 * 1. We're in the process of enabling CRTCs (just been added
8347 	 *    to the dc context, or already is on the context)
8348 	 * 2. Has a valid connector attached, and
8349 	 * 3. Is currently active and enabled.
8350 	 * => The dc stream state currently exists.
8351 	 */
8352 	BUG_ON(dm_new_crtc_state->stream == NULL);
8353 
8354 	/* Scaling or underscan settings */
8355 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
8356 				drm_atomic_crtc_needs_modeset(new_crtc_state))
8357 		update_stream_scaling_settings(
8358 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8359 
8360 	/* ABM settings */
8361 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8362 
8363 	/*
8364 	 * Color management settings. We also update color properties
8365 	 * when a modeset is needed, to ensure it gets reprogrammed.
8366 	 */
8367 	if (dm_new_crtc_state->base.color_mgmt_changed ||
8368 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8369 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8370 		if (ret)
8371 			goto fail;
8372 	}
8373 
8374 	/* Update Freesync settings. */
8375 	get_freesync_config_for_crtc(dm_new_crtc_state,
8376 				     dm_new_conn_state);
8377 
8378 	return ret;
8379 
8380 fail:
8381 	if (new_stream)
8382 		dc_stream_release(new_stream);
8383 	return ret;
8384 }
8385 
should_reset_plane(struct drm_atomic_state * state,struct drm_plane * plane,struct drm_plane_state * old_plane_state,struct drm_plane_state * new_plane_state)8386 static bool should_reset_plane(struct drm_atomic_state *state,
8387 			       struct drm_plane *plane,
8388 			       struct drm_plane_state *old_plane_state,
8389 			       struct drm_plane_state *new_plane_state)
8390 {
8391 	struct drm_plane *other;
8392 	struct drm_plane_state *old_other_state, *new_other_state;
8393 	struct drm_crtc_state *new_crtc_state;
8394 	int i;
8395 
8396 	/*
8397 	 * TODO: Remove this hack once the checks below are sufficient
8398 	 * enough to determine when we need to reset all the planes on
8399 	 * the stream.
8400 	 */
8401 	if (state->allow_modeset)
8402 		return true;
8403 
8404 	/* Exit early if we know that we're adding or removing the plane. */
8405 	if (old_plane_state->crtc != new_plane_state->crtc)
8406 		return true;
8407 
8408 	/* old crtc == new_crtc == NULL, plane not in context. */
8409 	if (!new_plane_state->crtc)
8410 		return false;
8411 
8412 	new_crtc_state =
8413 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8414 
8415 	if (!new_crtc_state)
8416 		return true;
8417 
8418 	/* CRTC Degamma changes currently require us to recreate planes. */
8419 	if (new_crtc_state->color_mgmt_changed)
8420 		return true;
8421 
8422 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8423 		return true;
8424 
8425 	/*
8426 	 * If there are any new primary or overlay planes being added or
8427 	 * removed then the z-order can potentially change. To ensure
8428 	 * correct z-order and pipe acquisition the current DC architecture
8429 	 * requires us to remove and recreate all existing planes.
8430 	 *
8431 	 * TODO: Come up with a more elegant solution for this.
8432 	 */
8433 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8434 		struct dm_plane_state *old_dm_plane_state, *new_dm_plane_state;
8435 
8436 		if (other->type == DRM_PLANE_TYPE_CURSOR)
8437 			continue;
8438 
8439 		if (old_other_state->crtc != new_plane_state->crtc &&
8440 		    new_other_state->crtc != new_plane_state->crtc)
8441 			continue;
8442 
8443 		if (old_other_state->crtc != new_other_state->crtc)
8444 			return true;
8445 
8446 		/* Src/dst size and scaling updates. */
8447 		if (old_other_state->src_w != new_other_state->src_w ||
8448 		    old_other_state->src_h != new_other_state->src_h ||
8449 		    old_other_state->crtc_w != new_other_state->crtc_w ||
8450 		    old_other_state->crtc_h != new_other_state->crtc_h)
8451 			return true;
8452 
8453 		/* Rotation / mirroring updates. */
8454 		if (old_other_state->rotation != new_other_state->rotation)
8455 			return true;
8456 
8457 		/* Blending updates. */
8458 		if (old_other_state->pixel_blend_mode !=
8459 		    new_other_state->pixel_blend_mode)
8460 			return true;
8461 
8462 		/* Alpha updates. */
8463 		if (old_other_state->alpha != new_other_state->alpha)
8464 			return true;
8465 
8466 		/* Colorspace changes. */
8467 		if (old_other_state->color_range != new_other_state->color_range ||
8468 		    old_other_state->color_encoding != new_other_state->color_encoding)
8469 			return true;
8470 
8471 		/* Framebuffer checks fall at the end. */
8472 		if (!old_other_state->fb || !new_other_state->fb)
8473 			continue;
8474 
8475 		/* Pixel format changes can require bandwidth updates. */
8476 		if (old_other_state->fb->format != new_other_state->fb->format)
8477 			return true;
8478 
8479 		old_dm_plane_state = to_dm_plane_state(old_other_state);
8480 		new_dm_plane_state = to_dm_plane_state(new_other_state);
8481 
8482 		/* Tiling and DCC changes also require bandwidth updates. */
8483 		if (old_dm_plane_state->tiling_flags !=
8484 		    new_dm_plane_state->tiling_flags)
8485 			return true;
8486 	}
8487 
8488 	return false;
8489 }
8490 
dm_update_plane_state(struct dc * dc,struct drm_atomic_state * state,struct drm_plane * plane,struct drm_plane_state * old_plane_state,struct drm_plane_state * new_plane_state,bool enable,bool * lock_and_validation_needed)8491 static int dm_update_plane_state(struct dc *dc,
8492 				 struct drm_atomic_state *state,
8493 				 struct drm_plane *plane,
8494 				 struct drm_plane_state *old_plane_state,
8495 				 struct drm_plane_state *new_plane_state,
8496 				 bool enable,
8497 				 bool *lock_and_validation_needed)
8498 {
8499 
8500 	struct dm_atomic_state *dm_state = NULL;
8501 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8502 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8503 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8504 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8505 	struct amdgpu_crtc *new_acrtc;
8506 	bool needs_reset;
8507 	int ret = 0;
8508 
8509 
8510 	new_plane_crtc = new_plane_state->crtc;
8511 	old_plane_crtc = old_plane_state->crtc;
8512 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
8513 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
8514 
8515 	/*TODO Implement better atomic check for cursor plane */
8516 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8517 		if (!enable || !new_plane_crtc ||
8518 			drm_atomic_plane_disabling(plane->state, new_plane_state))
8519 			return 0;
8520 
8521 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8522 
8523 		if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8524 			(new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8525 			DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8526 							 new_plane_state->crtc_w, new_plane_state->crtc_h);
8527 			return -EINVAL;
8528 		}
8529 
8530 		return 0;
8531 	}
8532 
8533 	needs_reset = should_reset_plane(state, plane, old_plane_state,
8534 					 new_plane_state);
8535 
8536 	/* Remove any changed/removed planes */
8537 	if (!enable) {
8538 		if (!needs_reset)
8539 			return 0;
8540 
8541 		if (!old_plane_crtc)
8542 			return 0;
8543 
8544 		old_crtc_state = drm_atomic_get_old_crtc_state(
8545 				state, old_plane_crtc);
8546 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8547 
8548 		if (!dm_old_crtc_state->stream)
8549 			return 0;
8550 
8551 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8552 				plane->base.id, old_plane_crtc->base.id);
8553 
8554 		ret = dm_atomic_get_state(state, &dm_state);
8555 		if (ret)
8556 			return ret;
8557 
8558 		if (!dc_remove_plane_from_context(
8559 				dc,
8560 				dm_old_crtc_state->stream,
8561 				dm_old_plane_state->dc_state,
8562 				dm_state->context)) {
8563 
8564 			return -EINVAL;
8565 		}
8566 
8567 		if (dm_old_plane_state->dc_state)
8568 			dc_plane_state_release(dm_old_plane_state->dc_state);
8569 
8570 		dm_new_plane_state->dc_state = NULL;
8571 
8572 		*lock_and_validation_needed = true;
8573 
8574 	} else { /* Add new planes */
8575 		struct dc_plane_state *dc_new_plane_state;
8576 
8577 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8578 			return 0;
8579 
8580 		if (!new_plane_crtc)
8581 			return 0;
8582 
8583 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8584 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8585 
8586 		if (!dm_new_crtc_state->stream)
8587 			return 0;
8588 
8589 		if (!needs_reset)
8590 			return 0;
8591 
8592 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8593 		if (ret)
8594 			return ret;
8595 
8596 		WARN_ON(dm_new_plane_state->dc_state);
8597 
8598 		dc_new_plane_state = dc_create_plane_state(dc);
8599 		if (!dc_new_plane_state)
8600 			return -ENOMEM;
8601 
8602 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8603 				plane->base.id, new_plane_crtc->base.id);
8604 
8605 		ret = fill_dc_plane_attributes(
8606 			drm_to_adev(new_plane_crtc->dev),
8607 			dc_new_plane_state,
8608 			new_plane_state,
8609 			new_crtc_state);
8610 		if (ret) {
8611 			dc_plane_state_release(dc_new_plane_state);
8612 			return ret;
8613 		}
8614 
8615 		ret = dm_atomic_get_state(state, &dm_state);
8616 		if (ret) {
8617 			dc_plane_state_release(dc_new_plane_state);
8618 			return ret;
8619 		}
8620 
8621 		/*
8622 		 * Any atomic check errors that occur after this will
8623 		 * not need a release. The plane state will be attached
8624 		 * to the stream, and therefore part of the atomic
8625 		 * state. It'll be released when the atomic state is
8626 		 * cleaned.
8627 		 */
8628 		if (!dc_add_plane_to_context(
8629 				dc,
8630 				dm_new_crtc_state->stream,
8631 				dc_new_plane_state,
8632 				dm_state->context)) {
8633 
8634 			dc_plane_state_release(dc_new_plane_state);
8635 			return -EINVAL;
8636 		}
8637 
8638 		dm_new_plane_state->dc_state = dc_new_plane_state;
8639 
8640 		/* Tell DC to do a full surface update every time there
8641 		 * is a plane change. Inefficient, but works for now.
8642 		 */
8643 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8644 
8645 		*lock_and_validation_needed = true;
8646 	}
8647 
8648 
8649 	return ret;
8650 }
8651 
8652 #if defined(CONFIG_DRM_AMD_DC_DCN)
add_affected_mst_dsc_crtcs(struct drm_atomic_state * state,struct drm_crtc * crtc)8653 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8654 {
8655 	struct drm_connector *connector;
8656 	struct drm_connector_state *conn_state, *old_conn_state;
8657 	struct amdgpu_dm_connector *aconnector = NULL;
8658 	int i;
8659 	for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
8660 		if (!conn_state->crtc)
8661 			conn_state = old_conn_state;
8662 
8663 		if (conn_state->crtc != crtc)
8664 			continue;
8665 
8666 		aconnector = to_amdgpu_dm_connector(connector);
8667 		if (!aconnector->port || !aconnector->mst_port)
8668 			aconnector = NULL;
8669 		else
8670 			break;
8671 	}
8672 
8673 	if (!aconnector)
8674 		return 0;
8675 
8676 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8677 }
8678 #endif
8679 
validate_overlay(struct drm_atomic_state * state)8680 static int validate_overlay(struct drm_atomic_state *state)
8681 {
8682 	int i;
8683 	struct drm_plane *plane;
8684 	struct drm_plane_state *old_plane_state, *new_plane_state;
8685 	struct drm_plane_state *primary_state, *overlay_state = NULL;
8686 
8687 	/* Check if primary plane is contained inside overlay */
8688 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8689 		if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
8690 			if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8691 				return 0;
8692 
8693 			overlay_state = new_plane_state;
8694 			continue;
8695 		}
8696 	}
8697 
8698 	/* check if we're making changes to the overlay plane */
8699 	if (!overlay_state)
8700 		return 0;
8701 
8702 	/* check if overlay plane is enabled */
8703 	if (!overlay_state->crtc)
8704 		return 0;
8705 
8706 	/* find the primary plane for the CRTC that the overlay is enabled on */
8707 	primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
8708 	if (IS_ERR(primary_state))
8709 		return PTR_ERR(primary_state);
8710 
8711 	/* check if primary plane is enabled */
8712 	if (!primary_state->crtc)
8713 		return 0;
8714 
8715 	/* Perform the bounds check to ensure the overlay plane covers the primary */
8716 	if (primary_state->crtc_x < overlay_state->crtc_x ||
8717 	    primary_state->crtc_y < overlay_state->crtc_y ||
8718 	    primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
8719 	    primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
8720 		DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
8721 		return -EINVAL;
8722 	}
8723 
8724 	return 0;
8725 }
8726 
8727 /**
8728  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8729  * @dev: The DRM device
8730  * @state: The atomic state to commit
8731  *
8732  * Validate that the given atomic state is programmable by DC into hardware.
8733  * This involves constructing a &struct dc_state reflecting the new hardware
8734  * state we wish to commit, then querying DC to see if it is programmable. It's
8735  * important not to modify the existing DC state. Otherwise, atomic_check
8736  * may unexpectedly commit hardware changes.
8737  *
8738  * When validating the DC state, it's important that the right locks are
8739  * acquired. For full updates case which removes/adds/updates streams on one
8740  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8741  * that any such full update commit will wait for completion of any outstanding
8742  * flip using DRMs synchronization events.
8743  *
8744  * Note that DM adds the affected connectors for all CRTCs in state, when that
8745  * might not seem necessary. This is because DC stream creation requires the
8746  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8747  * be possible but non-trivial - a possible TODO item.
8748  *
8749  * Return: -Error code if validation failed.
8750  */
amdgpu_dm_atomic_check(struct drm_device * dev,struct drm_atomic_state * state)8751 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8752 				  struct drm_atomic_state *state)
8753 {
8754 	struct amdgpu_device *adev = drm_to_adev(dev);
8755 	struct dm_atomic_state *dm_state = NULL;
8756 	struct dc *dc = adev->dm.dc;
8757 	struct drm_connector *connector;
8758 	struct drm_connector_state *old_con_state, *new_con_state;
8759 	struct drm_crtc *crtc;
8760 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8761 	struct drm_plane *plane;
8762 	struct drm_plane_state *old_plane_state, *new_plane_state;
8763 	enum dc_status status;
8764 	int ret, i;
8765 	bool lock_and_validation_needed = false;
8766 
8767 	amdgpu_check_debugfs_connector_property_change(adev, state);
8768 
8769 	ret = drm_atomic_helper_check_modeset(dev, state);
8770 	if (ret)
8771 		goto fail;
8772 
8773 	/* Check connector changes */
8774 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8775 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8776 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8777 
8778 		/* Skip connectors that are disabled or part of modeset already. */
8779 		if (!old_con_state->crtc && !new_con_state->crtc)
8780 			continue;
8781 
8782 		if (!new_con_state->crtc)
8783 			continue;
8784 
8785 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8786 		if (IS_ERR(new_crtc_state)) {
8787 			ret = PTR_ERR(new_crtc_state);
8788 			goto fail;
8789 		}
8790 
8791 		if (dm_old_con_state->abm_level != dm_new_con_state->abm_level ||
8792 		    dm_old_con_state->scaling != dm_new_con_state->scaling)
8793 			new_crtc_state->connectors_changed = true;
8794 	}
8795 
8796 #if defined(CONFIG_DRM_AMD_DC_DCN)
8797 	if (dc_resource_is_dsc_encoding_supported(dc)) {
8798 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8799 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8800 				ret = add_affected_mst_dsc_crtcs(state, crtc);
8801 				if (ret)
8802 					goto fail;
8803 			}
8804 		}
8805 	}
8806 #endif
8807 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8808 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8809 		    !new_crtc_state->color_mgmt_changed &&
8810 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8811 			continue;
8812 
8813 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
8814 		if (ret)
8815 			goto fail;
8816 
8817 		if (!new_crtc_state->enable)
8818 			continue;
8819 
8820 		ret = drm_atomic_add_affected_connectors(state, crtc);
8821 		if (ret)
8822 			return ret;
8823 
8824 		ret = drm_atomic_add_affected_planes(state, crtc);
8825 		if (ret)
8826 			goto fail;
8827 	}
8828 
8829 	/*
8830 	 * Add all primary and overlay planes on the CRTC to the state
8831 	 * whenever a plane is enabled to maintain correct z-ordering
8832 	 * and to enable fast surface updates.
8833 	 */
8834 	drm_for_each_crtc(crtc, dev) {
8835 		bool modified = false;
8836 
8837 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8838 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8839 				continue;
8840 
8841 			if (new_plane_state->crtc == crtc ||
8842 			    old_plane_state->crtc == crtc) {
8843 				modified = true;
8844 				break;
8845 			}
8846 		}
8847 
8848 		if (!modified)
8849 			continue;
8850 
8851 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8852 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8853 				continue;
8854 
8855 			new_plane_state =
8856 				drm_atomic_get_plane_state(state, plane);
8857 
8858 			if (IS_ERR(new_plane_state)) {
8859 				ret = PTR_ERR(new_plane_state);
8860 				goto fail;
8861 			}
8862 		}
8863 	}
8864 
8865 	/* Prepass for updating tiling flags on new planes. */
8866 	for_each_new_plane_in_state(state, plane, new_plane_state, i) {
8867 		struct dm_plane_state *new_dm_plane_state = to_dm_plane_state(new_plane_state);
8868 		struct amdgpu_framebuffer *new_afb = to_amdgpu_framebuffer(new_plane_state->fb);
8869 
8870 		ret = get_fb_info(new_afb, &new_dm_plane_state->tiling_flags,
8871 				  &new_dm_plane_state->tmz_surface);
8872 		if (ret)
8873 			goto fail;
8874 	}
8875 
8876 	/* Remove exiting planes if they are modified */
8877 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8878 		ret = dm_update_plane_state(dc, state, plane,
8879 					    old_plane_state,
8880 					    new_plane_state,
8881 					    false,
8882 					    &lock_and_validation_needed);
8883 		if (ret)
8884 			goto fail;
8885 	}
8886 
8887 	/* Disable all crtcs which require disable */
8888 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8889 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8890 					   old_crtc_state,
8891 					   new_crtc_state,
8892 					   false,
8893 					   &lock_and_validation_needed);
8894 		if (ret)
8895 			goto fail;
8896 	}
8897 
8898 	/* Enable all crtcs which require enable */
8899 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8900 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8901 					   old_crtc_state,
8902 					   new_crtc_state,
8903 					   true,
8904 					   &lock_and_validation_needed);
8905 		if (ret)
8906 			goto fail;
8907 	}
8908 
8909 	ret = validate_overlay(state);
8910 	if (ret)
8911 		goto fail;
8912 
8913 	/* Add new/modified planes */
8914 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8915 		ret = dm_update_plane_state(dc, state, plane,
8916 					    old_plane_state,
8917 					    new_plane_state,
8918 					    true,
8919 					    &lock_and_validation_needed);
8920 		if (ret)
8921 			goto fail;
8922 	}
8923 
8924 	/* Run this here since we want to validate the streams we created */
8925 	ret = drm_atomic_helper_check_planes(dev, state);
8926 	if (ret)
8927 		goto fail;
8928 
8929 	if (state->legacy_cursor_update) {
8930 		/*
8931 		 * This is a fast cursor update coming from the plane update
8932 		 * helper, check if it can be done asynchronously for better
8933 		 * performance.
8934 		 */
8935 		state->async_update =
8936 			!drm_atomic_helper_async_check(dev, state);
8937 
8938 		/*
8939 		 * Skip the remaining global validation if this is an async
8940 		 * update. Cursor updates can be done without affecting
8941 		 * state or bandwidth calcs and this avoids the performance
8942 		 * penalty of locking the private state object and
8943 		 * allocating a new dc_state.
8944 		 */
8945 		if (state->async_update)
8946 			return 0;
8947 	}
8948 
8949 	/* Check scaling and underscan changes*/
8950 	/* TODO Removed scaling changes validation due to inability to commit
8951 	 * new stream into context w\o causing full reset. Need to
8952 	 * decide how to handle.
8953 	 */
8954 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8955 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8956 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8957 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8958 
8959 		/* Skip any modesets/resets */
8960 		if (!acrtc || drm_atomic_crtc_needs_modeset(
8961 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8962 			continue;
8963 
8964 		/* Skip any thing not scale or underscan changes */
8965 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8966 			continue;
8967 
8968 		lock_and_validation_needed = true;
8969 	}
8970 
8971 	/**
8972 	 * Streams and planes are reset when there are changes that affect
8973 	 * bandwidth. Anything that affects bandwidth needs to go through
8974 	 * DC global validation to ensure that the configuration can be applied
8975 	 * to hardware.
8976 	 *
8977 	 * We have to currently stall out here in atomic_check for outstanding
8978 	 * commits to finish in this case because our IRQ handlers reference
8979 	 * DRM state directly - we can end up disabling interrupts too early
8980 	 * if we don't.
8981 	 *
8982 	 * TODO: Remove this stall and drop DM state private objects.
8983 	 */
8984 	if (lock_and_validation_needed) {
8985 		ret = dm_atomic_get_state(state, &dm_state);
8986 		if (ret)
8987 			goto fail;
8988 
8989 		ret = do_aquire_global_lock(dev, state);
8990 		if (ret)
8991 			goto fail;
8992 
8993 #if defined(CONFIG_DRM_AMD_DC_DCN)
8994 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8995 			goto fail;
8996 
8997 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8998 		if (ret)
8999 			goto fail;
9000 #endif
9001 
9002 		/*
9003 		 * Perform validation of MST topology in the state:
9004 		 * We need to perform MST atomic check before calling
9005 		 * dc_validate_global_state(), or there is a chance
9006 		 * to get stuck in an infinite loop and hang eventually.
9007 		 */
9008 		ret = drm_dp_mst_atomic_check(state);
9009 		if (ret)
9010 			goto fail;
9011 		status = dc_validate_global_state(dc, dm_state->context, false);
9012 		if (status != DC_OK) {
9013 			drm_dbg_atomic(dev,
9014 				       "DC global validation failure: %s (%d)",
9015 				       dc_status_to_str(status), status);
9016 			ret = -EINVAL;
9017 			goto fail;
9018 		}
9019 	} else {
9020 		/*
9021 		 * The commit is a fast update. Fast updates shouldn't change
9022 		 * the DC context, affect global validation, and can have their
9023 		 * commit work done in parallel with other commits not touching
9024 		 * the same resource. If we have a new DC context as part of
9025 		 * the DM atomic state from validation we need to free it and
9026 		 * retain the existing one instead.
9027 		 *
9028 		 * Furthermore, since the DM atomic state only contains the DC
9029 		 * context and can safely be annulled, we can free the state
9030 		 * and clear the associated private object now to free
9031 		 * some memory and avoid a possible use-after-free later.
9032 		 */
9033 
9034 		for (i = 0; i < state->num_private_objs; i++) {
9035 			struct drm_private_obj *obj = state->private_objs[i].ptr;
9036 
9037 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
9038 				int j = state->num_private_objs-1;
9039 
9040 				dm_atomic_destroy_state(obj,
9041 						state->private_objs[i].state);
9042 
9043 				/* If i is not at the end of the array then the
9044 				 * last element needs to be moved to where i was
9045 				 * before the array can safely be truncated.
9046 				 */
9047 				if (i != j)
9048 					state->private_objs[i] =
9049 						state->private_objs[j];
9050 
9051 				state->private_objs[j].ptr = NULL;
9052 				state->private_objs[j].state = NULL;
9053 				state->private_objs[j].old_state = NULL;
9054 				state->private_objs[j].new_state = NULL;
9055 
9056 				state->num_private_objs = j;
9057 				break;
9058 			}
9059 		}
9060 	}
9061 
9062 	/* Store the overall update type for use later in atomic check. */
9063 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9064 		struct dm_crtc_state *dm_new_crtc_state =
9065 			to_dm_crtc_state(new_crtc_state);
9066 
9067 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
9068 							 UPDATE_TYPE_FULL :
9069 							 UPDATE_TYPE_FAST;
9070 	}
9071 
9072 	/* Must be success */
9073 	WARN_ON(ret);
9074 	return ret;
9075 
9076 fail:
9077 	if (ret == -EDEADLK)
9078 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9079 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9080 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9081 	else
9082 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9083 
9084 	return ret;
9085 }
9086 
is_dp_capable_without_timing_msa(struct dc * dc,struct amdgpu_dm_connector * amdgpu_dm_connector)9087 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9088 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
9089 {
9090 	uint8_t dpcd_data;
9091 	bool capable = false;
9092 
9093 	if (amdgpu_dm_connector->dc_link &&
9094 		dm_helpers_dp_read_dpcd(
9095 				NULL,
9096 				amdgpu_dm_connector->dc_link,
9097 				DP_DOWN_STREAM_PORT_COUNT,
9098 				&dpcd_data,
9099 				sizeof(dpcd_data))) {
9100 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9101 	}
9102 
9103 	return capable;
9104 }
amdgpu_dm_update_freesync_caps(struct drm_connector * connector,struct edid * edid)9105 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9106 					struct edid *edid)
9107 {
9108 	int i;
9109 	bool edid_check_required;
9110 	struct detailed_timing *timing;
9111 	struct detailed_non_pixel *data;
9112 	struct detailed_data_monitor_range *range;
9113 	struct amdgpu_dm_connector *amdgpu_dm_connector =
9114 			to_amdgpu_dm_connector(connector);
9115 	struct dm_connector_state *dm_con_state = NULL;
9116 
9117 	struct drm_device *dev = connector->dev;
9118 	struct amdgpu_device *adev = drm_to_adev(dev);
9119 	bool freesync_capable = false;
9120 
9121 	if (!connector->state) {
9122 		DRM_ERROR("%s - Connector has no state", __func__);
9123 		goto update;
9124 	}
9125 
9126 	if (!edid) {
9127 		dm_con_state = to_dm_connector_state(connector->state);
9128 
9129 		amdgpu_dm_connector->min_vfreq = 0;
9130 		amdgpu_dm_connector->max_vfreq = 0;
9131 		amdgpu_dm_connector->pixel_clock_mhz = 0;
9132 
9133 		goto update;
9134 	}
9135 
9136 	dm_con_state = to_dm_connector_state(connector->state);
9137 
9138 	edid_check_required = false;
9139 	if (!amdgpu_dm_connector->dc_sink) {
9140 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9141 		goto update;
9142 	}
9143 	if (!adev->dm.freesync_module)
9144 		goto update;
9145 	/*
9146 	 * if edid non zero restrict freesync only for dp and edp
9147 	 */
9148 	if (edid) {
9149 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9150 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9151 			edid_check_required = is_dp_capable_without_timing_msa(
9152 						adev->dm.dc,
9153 						amdgpu_dm_connector);
9154 		}
9155 	}
9156 	if (edid_check_required == true && (edid->version > 1 ||
9157 	   (edid->version == 1 && edid->revision > 1))) {
9158 		for (i = 0; i < 4; i++) {
9159 
9160 			timing	= &edid->detailed_timings[i];
9161 			data	= &timing->data.other_data;
9162 			range	= &data->data.range;
9163 			/*
9164 			 * Check if monitor has continuous frequency mode
9165 			 */
9166 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
9167 				continue;
9168 			/*
9169 			 * Check for flag range limits only. If flag == 1 then
9170 			 * no additional timing information provided.
9171 			 * Default GTF, GTF Secondary curve and CVT are not
9172 			 * supported
9173 			 */
9174 			if (range->flags != 1)
9175 				continue;
9176 
9177 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9178 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9179 			amdgpu_dm_connector->pixel_clock_mhz =
9180 				range->pixel_clock_mhz * 10;
9181 			break;
9182 		}
9183 
9184 		if (amdgpu_dm_connector->max_vfreq -
9185 		    amdgpu_dm_connector->min_vfreq > 10) {
9186 
9187 			freesync_capable = true;
9188 		}
9189 	}
9190 
9191 update:
9192 	if (dm_con_state)
9193 		dm_con_state->freesync_capable = freesync_capable;
9194 
9195 	if (connector->vrr_capable_property)
9196 		drm_connector_set_vrr_capable_property(connector,
9197 						       freesync_capable);
9198 }
9199 
amdgpu_dm_set_psr_caps(struct dc_link * link)9200 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9201 {
9202 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9203 
9204 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9205 		return;
9206 	if (link->type == dc_connection_none)
9207 		return;
9208 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9209 					dpcd_data, sizeof(dpcd_data))) {
9210 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9211 
9212 		if (dpcd_data[0] == 0) {
9213 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9214 			link->psr_settings.psr_feature_enabled = false;
9215 		} else {
9216 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
9217 			link->psr_settings.psr_feature_enabled = true;
9218 		}
9219 
9220 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9221 	}
9222 }
9223 
9224 /*
9225  * amdgpu_dm_link_setup_psr() - configure psr link
9226  * @stream: stream state
9227  *
9228  * Return: true if success
9229  */
amdgpu_dm_link_setup_psr(struct dc_stream_state * stream)9230 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9231 {
9232 	struct dc_link *link = NULL;
9233 	struct psr_config psr_config = {0};
9234 	struct psr_context psr_context = {0};
9235 	bool ret = false;
9236 
9237 	if (stream == NULL)
9238 		return false;
9239 
9240 	link = stream->link;
9241 
9242 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9243 
9244 	if (psr_config.psr_version > 0) {
9245 		psr_config.psr_exit_link_training_required = 0x1;
9246 		psr_config.psr_frame_capture_indication_req = 0;
9247 		psr_config.psr_rfb_setup_time = 0x37;
9248 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9249 		psr_config.allow_smu_optimizations = 0x0;
9250 
9251 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9252 
9253 	}
9254 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
9255 
9256 	return ret;
9257 }
9258 
9259 /*
9260  * amdgpu_dm_psr_enable() - enable psr f/w
9261  * @stream: stream state
9262  *
9263  * Return: true if success
9264  */
amdgpu_dm_psr_enable(struct dc_stream_state * stream)9265 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9266 {
9267 	struct dc_link *link = stream->link;
9268 	unsigned int vsync_rate_hz = 0;
9269 	struct dc_static_screen_params params = {0};
9270 	/* Calculate number of static frames before generating interrupt to
9271 	 * enter PSR.
9272 	 */
9273 	// Init fail safe of 2 frames static
9274 	unsigned int num_frames_static = 2;
9275 
9276 	DRM_DEBUG_DRIVER("Enabling psr...\n");
9277 
9278 	vsync_rate_hz = div64_u64(div64_u64((
9279 			stream->timing.pix_clk_100hz * 100),
9280 			stream->timing.v_total),
9281 			stream->timing.h_total);
9282 
9283 	/* Round up
9284 	 * Calculate number of frames such that at least 30 ms of time has
9285 	 * passed.
9286 	 */
9287 	if (vsync_rate_hz != 0) {
9288 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9289 		num_frames_static = (30000 / frame_time_microsec) + 1;
9290 	}
9291 
9292 	params.triggers.cursor_update = true;
9293 	params.triggers.overlay_update = true;
9294 	params.triggers.surface_update = true;
9295 	params.num_frames = num_frames_static;
9296 
9297 	dc_stream_set_static_screen_params(link->ctx->dc,
9298 					   &stream, 1,
9299 					   &params);
9300 
9301 	return dc_link_set_psr_allow_active(link, true, false);
9302 }
9303 
9304 /*
9305  * amdgpu_dm_psr_disable() - disable psr f/w
9306  * @stream:  stream state
9307  *
9308  * Return: true if success
9309  */
amdgpu_dm_psr_disable(struct dc_stream_state * stream)9310 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9311 {
9312 
9313 	DRM_DEBUG_DRIVER("Disabling psr...\n");
9314 
9315 	return dc_link_set_psr_allow_active(stream->link, false, true);
9316 }
9317 
9318 /*
9319  * amdgpu_dm_psr_disable() - disable psr f/w
9320  * if psr is enabled on any stream
9321  *
9322  * Return: true if success
9323  */
amdgpu_dm_psr_disable_all(struct amdgpu_display_manager * dm)9324 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9325 {
9326 	DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9327 	return dc_set_psr_allow_active(dm->dc, false);
9328 }
9329 
amdgpu_dm_trigger_timing_sync(struct drm_device * dev)9330 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9331 {
9332 	struct amdgpu_device *adev = drm_to_adev(dev);
9333 	struct dc *dc = adev->dm.dc;
9334 	int i;
9335 
9336 	mutex_lock(&adev->dm.dc_lock);
9337 	if (dc->current_state) {
9338 		for (i = 0; i < dc->current_state->stream_count; ++i)
9339 			dc->current_state->streams[i]
9340 				->triggered_crtc_reset.enabled =
9341 				adev->dm.force_timing_sync;
9342 
9343 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
9344 		dc_trigger_sync(dc, dc->current_state);
9345 	}
9346 	mutex_unlock(&adev->dm.dc_lock);
9347 }
9348