• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "dc/inc/core_types.h"
33 #include "dal_asic_id.h"
34 #include "dmub/dmub_srv.h"
35 #include "dc/inc/hw/dmcu.h"
36 #include "dc/inc/hw/abm.h"
37 #include "dc/dc_dmub_srv.h"
38 #include "dc/dc_edid_parser.h"
39 #include "dc/dc_stat.h"
40 #include "amdgpu_dm_trace.h"
41 
42 #include "vid.h"
43 #include "amdgpu.h"
44 #include "amdgpu_display.h"
45 #include "amdgpu_ucode.h"
46 #include "atom.h"
47 #include "amdgpu_dm.h"
48 #ifdef CONFIG_DRM_AMD_DC_HDCP
49 #include "amdgpu_dm_hdcp.h"
50 #include <drm/drm_hdcp.h>
51 #endif
52 #include "amdgpu_pm.h"
53 
54 #include "amd_shared.h"
55 #include "amdgpu_dm_irq.h"
56 #include "dm_helpers.h"
57 #include "amdgpu_dm_mst_types.h"
58 #if defined(CONFIG_DEBUG_FS)
59 #include "amdgpu_dm_debugfs.h"
60 #endif
61 #include "amdgpu_dm_psr.h"
62 
63 #include "ivsrcid/ivsrcid_vislands30.h"
64 
65 #include "i2caux_interface.h"
66 #include <linux/module.h>
67 #include <linux/moduleparam.h>
68 #include <linux/types.h>
69 #include <linux/pm_runtime.h>
70 #include <linux/pci.h>
71 #include <linux/firmware.h>
72 #include <linux/component.h>
73 #include <linux/dmi.h>
74 
75 #include <drm/drm_atomic.h>
76 #include <drm/drm_atomic_uapi.h>
77 #include <drm/drm_atomic_helper.h>
78 #include <drm/drm_dp_mst_helper.h>
79 #include <drm/drm_fb_helper.h>
80 #include <drm/drm_fourcc.h>
81 #include <drm/drm_edid.h>
82 #include <drm/drm_vblank.h>
83 #include <drm/drm_audio_component.h>
84 
85 #if defined(CONFIG_DRM_AMD_DC_DCN)
86 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
87 
88 #include "dcn/dcn_1_0_offset.h"
89 #include "dcn/dcn_1_0_sh_mask.h"
90 #include "soc15_hw_ip.h"
91 #include "vega10_ip_offset.h"
92 
93 #include "soc15_common.h"
94 #endif
95 
96 #include "modules/inc/mod_freesync.h"
97 #include "modules/power/power_helpers.h"
98 #include "modules/inc/mod_info_packet.h"
99 
100 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
102 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
104 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
106 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
108 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
109 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
110 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
111 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
112 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
113 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
114 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
115 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
116 
117 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
118 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
119 
120 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
121 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
122 
123 /* Number of bytes in PSP header for firmware. */
124 #define PSP_HEADER_BYTES 0x100
125 
126 /* Number of bytes in PSP footer for firmware. */
127 #define PSP_FOOTER_BYTES 0x100
128 
129 /**
130  * DOC: overview
131  *
132  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
133  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
134  * requests into DC requests, and DC responses into DRM responses.
135  *
136  * The root control structure is &struct amdgpu_display_manager.
137  */
138 
139 /* basic init/fini API */
140 static int amdgpu_dm_init(struct amdgpu_device *adev);
141 static void amdgpu_dm_fini(struct amdgpu_device *adev);
142 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
143 
get_subconnector_type(struct dc_link * link)144 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
145 {
146 	switch (link->dpcd_caps.dongle_type) {
147 	case DISPLAY_DONGLE_NONE:
148 		return DRM_MODE_SUBCONNECTOR_Native;
149 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
150 		return DRM_MODE_SUBCONNECTOR_VGA;
151 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
152 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
153 		return DRM_MODE_SUBCONNECTOR_DVID;
154 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
155 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
156 		return DRM_MODE_SUBCONNECTOR_HDMIA;
157 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
158 	default:
159 		return DRM_MODE_SUBCONNECTOR_Unknown;
160 	}
161 }
162 
update_subconnector_property(struct amdgpu_dm_connector * aconnector)163 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
164 {
165 	struct dc_link *link = aconnector->dc_link;
166 	struct drm_connector *connector = &aconnector->base;
167 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
168 
169 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
170 		return;
171 
172 	if (aconnector->dc_sink)
173 		subconnector = get_subconnector_type(link);
174 
175 	drm_object_property_set_value(&connector->base,
176 			connector->dev->mode_config.dp_subconnector_property,
177 			subconnector);
178 }
179 
180 /*
181  * initializes drm_device display related structures, based on the information
182  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
183  * drm_encoder, drm_mode_config
184  *
185  * Returns 0 on success
186  */
187 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
188 /* removes and deallocates the drm structures, created by the above function */
189 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
190 
191 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
192 				struct drm_plane *plane,
193 				unsigned long possible_crtcs,
194 				const struct dc_plane_cap *plane_cap);
195 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
196 			       struct drm_plane *plane,
197 			       uint32_t link_index);
198 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
199 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
200 				    uint32_t link_index,
201 				    struct amdgpu_encoder *amdgpu_encoder);
202 static int amdgpu_dm_encoder_init(struct drm_device *dev,
203 				  struct amdgpu_encoder *aencoder,
204 				  uint32_t link_index);
205 
206 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
207 
208 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
209 
210 static int amdgpu_dm_atomic_check(struct drm_device *dev,
211 				  struct drm_atomic_state *state);
212 
213 static void handle_cursor_update(struct drm_plane *plane,
214 				 struct drm_plane_state *old_plane_state);
215 
216 static const struct drm_format_info *
217 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
218 
219 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
220 
221 static bool
222 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
223 				 struct drm_crtc_state *new_crtc_state);
224 /*
225  * dm_vblank_get_counter
226  *
227  * @brief
228  * Get counter for number of vertical blanks
229  *
230  * @param
231  * struct amdgpu_device *adev - [in] desired amdgpu device
232  * int disp_idx - [in] which CRTC to get the counter from
233  *
234  * @return
235  * Counter for vertical blanks
236  */
dm_vblank_get_counter(struct amdgpu_device * adev,int crtc)237 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
238 {
239 	if (crtc >= adev->mode_info.num_crtc)
240 		return 0;
241 	else {
242 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
243 
244 		if (acrtc->dm_irq_params.stream == NULL) {
245 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
246 				  crtc);
247 			return 0;
248 		}
249 
250 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
251 	}
252 }
253 
dm_crtc_get_scanoutpos(struct amdgpu_device * adev,int crtc,u32 * vbl,u32 * position)254 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
255 				  u32 *vbl, u32 *position)
256 {
257 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
258 
259 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
260 		return -EINVAL;
261 	else {
262 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
263 
264 		if (acrtc->dm_irq_params.stream ==  NULL) {
265 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
266 				  crtc);
267 			return 0;
268 		}
269 
270 		/*
271 		 * TODO rework base driver to use values directly.
272 		 * for now parse it back into reg-format
273 		 */
274 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
275 					 &v_blank_start,
276 					 &v_blank_end,
277 					 &h_position,
278 					 &v_position);
279 
280 		*position = v_position | (h_position << 16);
281 		*vbl = v_blank_start | (v_blank_end << 16);
282 	}
283 
284 	return 0;
285 }
286 
dm_is_idle(void * handle)287 static bool dm_is_idle(void *handle)
288 {
289 	/* XXX todo */
290 	return true;
291 }
292 
dm_wait_for_idle(void * handle)293 static int dm_wait_for_idle(void *handle)
294 {
295 	/* XXX todo */
296 	return 0;
297 }
298 
dm_check_soft_reset(void * handle)299 static bool dm_check_soft_reset(void *handle)
300 {
301 	return false;
302 }
303 
dm_soft_reset(void * handle)304 static int dm_soft_reset(void *handle)
305 {
306 	/* XXX todo */
307 	return 0;
308 }
309 
310 static struct amdgpu_crtc *
get_crtc_by_otg_inst(struct amdgpu_device * adev,int otg_inst)311 get_crtc_by_otg_inst(struct amdgpu_device *adev,
312 		     int otg_inst)
313 {
314 	struct drm_device *dev = adev_to_drm(adev);
315 	struct drm_crtc *crtc;
316 	struct amdgpu_crtc *amdgpu_crtc;
317 
318 	if (WARN_ON(otg_inst == -1))
319 		return adev->mode_info.crtcs[0];
320 
321 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
322 		amdgpu_crtc = to_amdgpu_crtc(crtc);
323 
324 		if (amdgpu_crtc->otg_inst == otg_inst)
325 			return amdgpu_crtc;
326 	}
327 
328 	return NULL;
329 }
330 
amdgpu_dm_vrr_active_irq(struct amdgpu_crtc * acrtc)331 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
332 {
333 	return acrtc->dm_irq_params.freesync_config.state ==
334 		       VRR_STATE_ACTIVE_VARIABLE ||
335 	       acrtc->dm_irq_params.freesync_config.state ==
336 		       VRR_STATE_ACTIVE_FIXED;
337 }
338 
amdgpu_dm_vrr_active(struct dm_crtc_state * dm_state)339 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
340 {
341 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
342 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
343 }
344 
is_dc_timing_adjust_needed(struct dm_crtc_state * old_state,struct dm_crtc_state * new_state)345 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
346 					      struct dm_crtc_state *new_state)
347 {
348 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
349 		return true;
350 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
351 		return true;
352 	else
353 		return false;
354 }
355 
356 /**
357  * update_planes_and_stream_adapter() - Send planes to be updated in DC
358  *
359  * DC has a generic way to update planes and stream via
360  * dc_update_planes_and_stream function; however, DM might need some
361  * adjustments and preparation before calling it. This function is a wrapper
362  * for the dc_update_planes_and_stream that does any required configuration
363  * before passing control to DC.
364  */
update_planes_and_stream_adapter(struct dc * dc,int update_type,int planes_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update,struct dc_surface_update * array_of_surface_update)365 static inline bool update_planes_and_stream_adapter(struct dc *dc,
366 						    int update_type,
367 						    int planes_count,
368 						    struct dc_stream_state *stream,
369 						    struct dc_stream_update *stream_update,
370 						    struct dc_surface_update *array_of_surface_update)
371 {
372 	/*
373 	 * Previous frame finished and HW is ready for optimization.
374 	 */
375 	if (update_type == UPDATE_TYPE_FAST)
376 		dc_post_update_surfaces_to_stream(dc);
377 
378 	return dc_update_planes_and_stream(dc,
379 					   array_of_surface_update,
380 					   planes_count,
381 					   stream,
382 					   stream_update);
383 }
384 
385 /**
386  * dm_pflip_high_irq() - Handle pageflip interrupt
387  * @interrupt_params: ignored
388  *
389  * Handles the pageflip interrupt by notifying all interested parties
390  * that the pageflip has been completed.
391  */
dm_pflip_high_irq(void * interrupt_params)392 static void dm_pflip_high_irq(void *interrupt_params)
393 {
394 	struct amdgpu_crtc *amdgpu_crtc;
395 	struct common_irq_params *irq_params = interrupt_params;
396 	struct amdgpu_device *adev = irq_params->adev;
397 	unsigned long flags;
398 	struct drm_pending_vblank_event *e;
399 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
400 	bool vrr_active;
401 
402 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
403 
404 	/* IRQ could occur when in initial stage */
405 	/* TODO work and BO cleanup */
406 	if (amdgpu_crtc == NULL) {
407 		DC_LOG_PFLIP("CRTC is null, returning.\n");
408 		return;
409 	}
410 
411 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
412 
413 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
414 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
415 						 amdgpu_crtc->pflip_status,
416 						 AMDGPU_FLIP_SUBMITTED,
417 						 amdgpu_crtc->crtc_id,
418 						 amdgpu_crtc);
419 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
420 		return;
421 	}
422 
423 	/* page flip completed. */
424 	e = amdgpu_crtc->event;
425 	amdgpu_crtc->event = NULL;
426 
427 	WARN_ON(!e);
428 
429 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
430 
431 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
432 	if (!vrr_active ||
433 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
434 				      &v_blank_end, &hpos, &vpos) ||
435 	    (vpos < v_blank_start)) {
436 		/* Update to correct count and vblank timestamp if racing with
437 		 * vblank irq. This also updates to the correct vblank timestamp
438 		 * even in VRR mode, as scanout is past the front-porch atm.
439 		 */
440 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
441 
442 		/* Wake up userspace by sending the pageflip event with proper
443 		 * count and timestamp of vblank of flip completion.
444 		 */
445 		if (e) {
446 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
447 
448 			/* Event sent, so done with vblank for this flip */
449 			drm_crtc_vblank_put(&amdgpu_crtc->base);
450 		}
451 	} else if (e) {
452 		/* VRR active and inside front-porch: vblank count and
453 		 * timestamp for pageflip event will only be up to date after
454 		 * drm_crtc_handle_vblank() has been executed from late vblank
455 		 * irq handler after start of back-porch (vline 0). We queue the
456 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
457 		 * updated timestamp and count, once it runs after us.
458 		 *
459 		 * We need to open-code this instead of using the helper
460 		 * drm_crtc_arm_vblank_event(), as that helper would
461 		 * call drm_crtc_accurate_vblank_count(), which we must
462 		 * not call in VRR mode while we are in front-porch!
463 		 */
464 
465 		/* sequence will be replaced by real count during send-out. */
466 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
467 		e->pipe = amdgpu_crtc->crtc_id;
468 
469 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
470 		e = NULL;
471 	}
472 
473 	/* Keep track of vblank of this flip for flip throttling. We use the
474 	 * cooked hw counter, as that one incremented at start of this vblank
475 	 * of pageflip completion, so last_flip_vblank is the forbidden count
476 	 * for queueing new pageflips if vsync + VRR is enabled.
477 	 */
478 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
479 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
480 
481 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
482 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
483 
484 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
485 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
486 		     vrr_active, (int) !e);
487 }
488 
dm_vupdate_high_irq(void * interrupt_params)489 static void dm_vupdate_high_irq(void *interrupt_params)
490 {
491 	struct common_irq_params *irq_params = interrupt_params;
492 	struct amdgpu_device *adev = irq_params->adev;
493 	struct amdgpu_crtc *acrtc;
494 	struct drm_device *drm_dev;
495 	struct drm_vblank_crtc *vblank;
496 	ktime_t frame_duration_ns, previous_timestamp;
497 	unsigned long flags;
498 	int vrr_active;
499 
500 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
501 
502 	if (acrtc) {
503 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
504 		drm_dev = acrtc->base.dev;
505 		vblank = &drm_dev->vblank[acrtc->base.index];
506 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
507 		frame_duration_ns = vblank->time - previous_timestamp;
508 
509 		if (frame_duration_ns > 0) {
510 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
511 						frame_duration_ns,
512 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
513 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
514 		}
515 
516 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
517 			      acrtc->crtc_id,
518 			      vrr_active);
519 
520 		/* Core vblank handling is done here after end of front-porch in
521 		 * vrr mode, as vblank timestamping will give valid results
522 		 * while now done after front-porch. This will also deliver
523 		 * page-flip completion events that have been queued to us
524 		 * if a pageflip happened inside front-porch.
525 		 */
526 		if (vrr_active) {
527 			drm_crtc_handle_vblank(&acrtc->base);
528 
529 			/* BTR processing for pre-DCE12 ASICs */
530 			if (acrtc->dm_irq_params.stream &&
531 			    adev->family < AMDGPU_FAMILY_AI) {
532 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
533 				mod_freesync_handle_v_update(
534 				    adev->dm.freesync_module,
535 				    acrtc->dm_irq_params.stream,
536 				    &acrtc->dm_irq_params.vrr_params);
537 
538 				dc_stream_adjust_vmin_vmax(
539 				    adev->dm.dc,
540 				    acrtc->dm_irq_params.stream,
541 				    &acrtc->dm_irq_params.vrr_params.adjust);
542 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
543 			}
544 		}
545 	}
546 }
547 
548 /**
549  * dm_crtc_high_irq() - Handles CRTC interrupt
550  * @interrupt_params: used for determining the CRTC instance
551  *
552  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
553  * event handler.
554  */
dm_crtc_high_irq(void * interrupt_params)555 static void dm_crtc_high_irq(void *interrupt_params)
556 {
557 	struct common_irq_params *irq_params = interrupt_params;
558 	struct amdgpu_device *adev = irq_params->adev;
559 	struct amdgpu_crtc *acrtc;
560 	unsigned long flags;
561 	int vrr_active;
562 
563 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
564 	if (!acrtc)
565 		return;
566 
567 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
568 
569 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
570 		      vrr_active, acrtc->dm_irq_params.active_planes);
571 
572 	/**
573 	 * Core vblank handling at start of front-porch is only possible
574 	 * in non-vrr mode, as only there vblank timestamping will give
575 	 * valid results while done in front-porch. Otherwise defer it
576 	 * to dm_vupdate_high_irq after end of front-porch.
577 	 */
578 	if (!vrr_active)
579 		drm_crtc_handle_vblank(&acrtc->base);
580 
581 	/**
582 	 * Following stuff must happen at start of vblank, for crc
583 	 * computation and below-the-range btr support in vrr mode.
584 	 */
585 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
586 
587 	/* BTR updates need to happen before VUPDATE on Vega and above. */
588 	if (adev->family < AMDGPU_FAMILY_AI)
589 		return;
590 
591 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
592 
593 	if (acrtc->dm_irq_params.stream &&
594 	    acrtc->dm_irq_params.vrr_params.supported &&
595 	    acrtc->dm_irq_params.freesync_config.state ==
596 		    VRR_STATE_ACTIVE_VARIABLE) {
597 		mod_freesync_handle_v_update(adev->dm.freesync_module,
598 					     acrtc->dm_irq_params.stream,
599 					     &acrtc->dm_irq_params.vrr_params);
600 
601 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
602 					   &acrtc->dm_irq_params.vrr_params.adjust);
603 	}
604 
605 	/*
606 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
607 	 * In that case, pageflip completion interrupts won't fire and pageflip
608 	 * completion events won't get delivered. Prevent this by sending
609 	 * pending pageflip events from here if a flip is still pending.
610 	 *
611 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
612 	 * avoid race conditions between flip programming and completion,
613 	 * which could cause too early flip completion events.
614 	 */
615 	if (adev->family >= AMDGPU_FAMILY_RV &&
616 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
617 	    acrtc->dm_irq_params.active_planes == 0) {
618 		if (acrtc->event) {
619 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
620 			acrtc->event = NULL;
621 			drm_crtc_vblank_put(&acrtc->base);
622 		}
623 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
624 	}
625 
626 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
627 }
628 
629 #if defined(CONFIG_DRM_AMD_DC_DCN)
630 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
631 /**
632  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
633  * DCN generation ASICs
634  * @interrupt_params: interrupt parameters
635  *
636  * Used to set crc window/read out crc value at vertical line 0 position
637  */
dm_dcn_vertical_interrupt0_high_irq(void * interrupt_params)638 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
639 {
640 	struct common_irq_params *irq_params = interrupt_params;
641 	struct amdgpu_device *adev = irq_params->adev;
642 	struct amdgpu_crtc *acrtc;
643 
644 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
645 
646 	if (!acrtc)
647 		return;
648 
649 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
650 }
651 #endif
652 
653 /**
654  * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
655  * @adev: amdgpu_device pointer
656  * @notify: dmub notification structure
657  *
658  * Dmub AUX or SET_CONFIG command completion processing callback
659  * Copies dmub notification to DM which is to be read by AUX command.
660  * issuing thread and also signals the event to wake up the thread.
661  */
dmub_aux_setconfig_callback(struct amdgpu_device * adev,struct dmub_notification * notify)662 void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
663 {
664 	if (adev->dm.dmub_notify)
665 		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
666 	if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
667 		complete(&adev->dm.dmub_aux_transfer_done);
668 }
669 
670 /**
671  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
672  * @adev: amdgpu_device pointer
673  * @notify: dmub notification structure
674  *
675  * Dmub Hpd interrupt processing callback. Gets displayindex through the
676  * ink index and calls helper to do the processing.
677  */
dmub_hpd_callback(struct amdgpu_device * adev,struct dmub_notification * notify)678 void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
679 {
680 	struct amdgpu_dm_connector *aconnector;
681 	struct drm_connector *connector;
682 	struct drm_connector_list_iter iter;
683 	struct dc_link *link;
684 	uint8_t link_index = 0;
685 	struct drm_device *dev;
686 
687 	if (adev == NULL)
688 		return;
689 
690 	if (notify == NULL) {
691 		DRM_ERROR("DMUB HPD callback notification was NULL");
692 		return;
693 	}
694 
695 	if (notify->link_index > adev->dm.dc->link_count) {
696 		DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
697 		return;
698 	}
699 
700 	link_index = notify->link_index;
701 	link = adev->dm.dc->links[link_index];
702 	dev = adev->dm.ddev;
703 
704 	drm_connector_list_iter_begin(dev, &iter);
705 	drm_for_each_connector_iter(connector, &iter) {
706 		aconnector = to_amdgpu_dm_connector(connector);
707 		if (link && aconnector->dc_link == link) {
708 			DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
709 			handle_hpd_irq_helper(aconnector);
710 			break;
711 		}
712 	}
713 	drm_connector_list_iter_end(&iter);
714 
715 }
716 
717 /**
718  * register_dmub_notify_callback - Sets callback for DMUB notify
719  * @adev: amdgpu_device pointer
720  * @type: Type of dmub notification
721  * @callback: Dmub interrupt callback function
722  * @dmub_int_thread_offload: offload indicator
723  *
724  * API to register a dmub callback handler for a dmub notification
725  * Also sets indicator whether callback processing to be offloaded.
726  * to dmub interrupt handling thread
727  * Return: true if successfully registered, false if there is existing registration
728  */
register_dmub_notify_callback(struct amdgpu_device * adev,enum dmub_notification_type type,dmub_notify_interrupt_callback_t callback,bool dmub_int_thread_offload)729 bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
730 dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
731 {
732 	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
733 		adev->dm.dmub_callback[type] = callback;
734 		adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
735 	} else
736 		return false;
737 
738 	return true;
739 }
740 
dm_handle_hpd_work(struct work_struct * work)741 static void dm_handle_hpd_work(struct work_struct *work)
742 {
743 	struct dmub_hpd_work *dmub_hpd_wrk;
744 
745 	dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
746 
747 	if (!dmub_hpd_wrk->dmub_notify) {
748 		DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
749 		return;
750 	}
751 
752 	if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
753 		dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
754 		dmub_hpd_wrk->dmub_notify);
755 	}
756 	kfree(dmub_hpd_wrk);
757 
758 }
759 
760 #define DMUB_TRACE_MAX_READ 64
761 /**
762  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
763  * @interrupt_params: used for determining the Outbox instance
764  *
765  * Handles the Outbox Interrupt
766  * event handler.
767  */
dm_dmub_outbox1_low_irq(void * interrupt_params)768 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
769 {
770 	struct dmub_notification notify;
771 	struct common_irq_params *irq_params = interrupt_params;
772 	struct amdgpu_device *adev = irq_params->adev;
773 	struct amdgpu_display_manager *dm = &adev->dm;
774 	struct dmcub_trace_buf_entry entry = { 0 };
775 	uint32_t count = 0;
776 	struct dmub_hpd_work *dmub_hpd_wrk;
777 
778 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
779 		dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
780 		if (!dmub_hpd_wrk) {
781 			DRM_ERROR("Failed to allocate dmub_hpd_wrk");
782 			return;
783 		}
784 		INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
785 
786 		if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
787 			do {
788 				dc_stat_get_dmub_notification(adev->dm.dc, &notify);
789 				if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
790 					DRM_ERROR("DM: notify type %d larger than the array size %zu!", notify.type,
791 					ARRAY_SIZE(dm->dmub_thread_offload));
792 					continue;
793 				}
794 				if (dm->dmub_thread_offload[notify.type] == true) {
795 					dmub_hpd_wrk->dmub_notify = &notify;
796 					dmub_hpd_wrk->adev = adev;
797 					queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
798 				} else {
799 					dm->dmub_callback[notify.type](adev, &notify);
800 				}
801 
802 			} while (notify.pending_notification);
803 
804 		} else {
805 			DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
806 		}
807 	}
808 
809 
810 	do {
811 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
812 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
813 							entry.param0, entry.param1);
814 
815 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
816 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
817 		} else
818 			break;
819 
820 		count++;
821 
822 	} while (count <= DMUB_TRACE_MAX_READ);
823 
824 	ASSERT(count <= DMUB_TRACE_MAX_READ);
825 }
826 #endif
827 
dm_set_clockgating_state(void * handle,enum amd_clockgating_state state)828 static int dm_set_clockgating_state(void *handle,
829 		  enum amd_clockgating_state state)
830 {
831 	return 0;
832 }
833 
dm_set_powergating_state(void * handle,enum amd_powergating_state state)834 static int dm_set_powergating_state(void *handle,
835 		  enum amd_powergating_state state)
836 {
837 	return 0;
838 }
839 
840 /* Prototypes of private functions */
841 static int dm_early_init(void* handle);
842 
843 /* Allocate memory for FBC compressed data  */
amdgpu_dm_fbc_init(struct drm_connector * connector)844 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
845 {
846 	struct drm_device *dev = connector->dev;
847 	struct amdgpu_device *adev = drm_to_adev(dev);
848 	struct dm_compressor_info *compressor = &adev->dm.compressor;
849 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
850 	struct drm_display_mode *mode;
851 	unsigned long max_size = 0;
852 
853 	if (adev->dm.dc->fbc_compressor == NULL)
854 		return;
855 
856 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
857 		return;
858 
859 	if (compressor->bo_ptr)
860 		return;
861 
862 
863 	list_for_each_entry(mode, &connector->modes, head) {
864 		if (max_size < mode->htotal * mode->vtotal)
865 			max_size = mode->htotal * mode->vtotal;
866 	}
867 
868 	if (max_size) {
869 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
870 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
871 			    &compressor->gpu_addr, &compressor->cpu_addr);
872 
873 		if (r)
874 			DRM_ERROR("DM: Failed to initialize FBC\n");
875 		else {
876 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
877 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
878 		}
879 
880 	}
881 
882 }
883 
amdgpu_dm_audio_component_get_eld(struct device * kdev,int port,int pipe,bool * enabled,unsigned char * buf,int max_bytes)884 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
885 					  int pipe, bool *enabled,
886 					  unsigned char *buf, int max_bytes)
887 {
888 	struct drm_device *dev = dev_get_drvdata(kdev);
889 	struct amdgpu_device *adev = drm_to_adev(dev);
890 	struct drm_connector *connector;
891 	struct drm_connector_list_iter conn_iter;
892 	struct amdgpu_dm_connector *aconnector;
893 	int ret = 0;
894 
895 	*enabled = false;
896 
897 	mutex_lock(&adev->dm.audio_lock);
898 
899 	drm_connector_list_iter_begin(dev, &conn_iter);
900 	drm_for_each_connector_iter(connector, &conn_iter) {
901 		aconnector = to_amdgpu_dm_connector(connector);
902 		if (aconnector->audio_inst != port)
903 			continue;
904 
905 		*enabled = true;
906 		ret = drm_eld_size(connector->eld);
907 		memcpy(buf, connector->eld, min(max_bytes, ret));
908 
909 		break;
910 	}
911 	drm_connector_list_iter_end(&conn_iter);
912 
913 	mutex_unlock(&adev->dm.audio_lock);
914 
915 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
916 
917 	return ret;
918 }
919 
920 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
921 	.get_eld = amdgpu_dm_audio_component_get_eld,
922 };
923 
amdgpu_dm_audio_component_bind(struct device * kdev,struct device * hda_kdev,void * data)924 static int amdgpu_dm_audio_component_bind(struct device *kdev,
925 				       struct device *hda_kdev, void *data)
926 {
927 	struct drm_device *dev = dev_get_drvdata(kdev);
928 	struct amdgpu_device *adev = drm_to_adev(dev);
929 	struct drm_audio_component *acomp = data;
930 
931 	acomp->ops = &amdgpu_dm_audio_component_ops;
932 	acomp->dev = kdev;
933 	adev->dm.audio_component = acomp;
934 
935 	return 0;
936 }
937 
amdgpu_dm_audio_component_unbind(struct device * kdev,struct device * hda_kdev,void * data)938 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
939 					  struct device *hda_kdev, void *data)
940 {
941 	struct drm_device *dev = dev_get_drvdata(kdev);
942 	struct amdgpu_device *adev = drm_to_adev(dev);
943 	struct drm_audio_component *acomp = data;
944 
945 	acomp->ops = NULL;
946 	acomp->dev = NULL;
947 	adev->dm.audio_component = NULL;
948 }
949 
950 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
951 	.bind	= amdgpu_dm_audio_component_bind,
952 	.unbind	= amdgpu_dm_audio_component_unbind,
953 };
954 
amdgpu_dm_audio_init(struct amdgpu_device * adev)955 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
956 {
957 	int i, ret;
958 
959 	if (!amdgpu_audio)
960 		return 0;
961 
962 	adev->mode_info.audio.enabled = true;
963 
964 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
965 
966 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
967 		adev->mode_info.audio.pin[i].channels = -1;
968 		adev->mode_info.audio.pin[i].rate = -1;
969 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
970 		adev->mode_info.audio.pin[i].status_bits = 0;
971 		adev->mode_info.audio.pin[i].category_code = 0;
972 		adev->mode_info.audio.pin[i].connected = false;
973 		adev->mode_info.audio.pin[i].id =
974 			adev->dm.dc->res_pool->audios[i]->inst;
975 		adev->mode_info.audio.pin[i].offset = 0;
976 	}
977 
978 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
979 	if (ret < 0)
980 		return ret;
981 
982 	adev->dm.audio_registered = true;
983 
984 	return 0;
985 }
986 
amdgpu_dm_audio_fini(struct amdgpu_device * adev)987 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
988 {
989 	if (!amdgpu_audio)
990 		return;
991 
992 	if (!adev->mode_info.audio.enabled)
993 		return;
994 
995 	if (adev->dm.audio_registered) {
996 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
997 		adev->dm.audio_registered = false;
998 	}
999 
1000 	/* TODO: Disable audio? */
1001 
1002 	adev->mode_info.audio.enabled = false;
1003 }
1004 
amdgpu_dm_audio_eld_notify(struct amdgpu_device * adev,int pin)1005 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1006 {
1007 	struct drm_audio_component *acomp = adev->dm.audio_component;
1008 
1009 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1010 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1011 
1012 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1013 						 pin, -1);
1014 	}
1015 }
1016 
dm_dmub_hw_init(struct amdgpu_device * adev)1017 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1018 {
1019 	const struct dmcub_firmware_header_v1_0 *hdr;
1020 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1021 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1022 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
1023 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1024 	struct abm *abm = adev->dm.dc->res_pool->abm;
1025 	struct dmub_srv_hw_params hw_params;
1026 	enum dmub_status status;
1027 	const unsigned char *fw_inst_const, *fw_bss_data;
1028 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
1029 	bool has_hw_support;
1030 
1031 	if (!dmub_srv)
1032 		/* DMUB isn't supported on the ASIC. */
1033 		return 0;
1034 
1035 	if (!fb_info) {
1036 		DRM_ERROR("No framebuffer info for DMUB service.\n");
1037 		return -EINVAL;
1038 	}
1039 
1040 	if (!dmub_fw) {
1041 		/* Firmware required for DMUB support. */
1042 		DRM_ERROR("No firmware provided for DMUB.\n");
1043 		return -EINVAL;
1044 	}
1045 
1046 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1047 	if (status != DMUB_STATUS_OK) {
1048 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1049 		return -EINVAL;
1050 	}
1051 
1052 	if (!has_hw_support) {
1053 		DRM_INFO("DMUB unsupported on ASIC\n");
1054 		return 0;
1055 	}
1056 
1057 	/* Reset DMCUB if it was previously running - before we overwrite its memory. */
1058 	status = dmub_srv_hw_reset(dmub_srv);
1059 	if (status != DMUB_STATUS_OK)
1060 		DRM_WARN("Error resetting DMUB HW: %d\n", status);
1061 
1062 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1063 
1064 	fw_inst_const = dmub_fw->data +
1065 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1066 			PSP_HEADER_BYTES;
1067 
1068 	fw_bss_data = dmub_fw->data +
1069 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1070 		      le32_to_cpu(hdr->inst_const_bytes);
1071 
1072 	/* Copy firmware and bios info into FB memory. */
1073 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1074 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1075 
1076 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1077 
1078 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1079 	 * amdgpu_ucode_init_single_fw will load dmub firmware
1080 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
1081 	 * will be done by dm_dmub_hw_init
1082 	 */
1083 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1084 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1085 				fw_inst_const_size);
1086 	}
1087 
1088 	if (fw_bss_data_size)
1089 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1090 		       fw_bss_data, fw_bss_data_size);
1091 
1092 	/* Copy firmware bios info into FB memory. */
1093 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1094 	       adev->bios_size);
1095 
1096 	/* Reset regions that need to be reset. */
1097 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1098 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1099 
1100 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1101 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1102 
1103 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1104 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1105 
1106 	/* Initialize hardware. */
1107 	memset(&hw_params, 0, sizeof(hw_params));
1108 	hw_params.fb_base = adev->gmc.fb_start;
1109 	hw_params.fb_offset = adev->gmc.aper_base;
1110 
1111 	/* backdoor load firmware and trigger dmub running */
1112 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1113 		hw_params.load_inst_const = true;
1114 
1115 	if (dmcu)
1116 		hw_params.psp_version = dmcu->psp_version;
1117 
1118 	for (i = 0; i < fb_info->num_fb; ++i)
1119 		hw_params.fb[i] = &fb_info->fb[i];
1120 
1121 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
1122 	if (status != DMUB_STATUS_OK) {
1123 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1124 		return -EINVAL;
1125 	}
1126 
1127 	/* Wait for firmware load to finish. */
1128 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1129 	if (status != DMUB_STATUS_OK)
1130 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1131 
1132 	/* Init DMCU and ABM if available. */
1133 	if (dmcu && abm) {
1134 		dmcu->funcs->dmcu_init(dmcu);
1135 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1136 	}
1137 
1138 	if (!adev->dm.dc->ctx->dmub_srv)
1139 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1140 	if (!adev->dm.dc->ctx->dmub_srv) {
1141 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1142 		return -ENOMEM;
1143 	}
1144 
1145 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1146 		 adev->dm.dmcub_fw_version);
1147 
1148 	return 0;
1149 }
1150 
dm_dmub_hw_resume(struct amdgpu_device * adev)1151 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1152 {
1153 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1154 	enum dmub_status status;
1155 	bool init;
1156 
1157 	if (!dmub_srv) {
1158 		/* DMUB isn't supported on the ASIC. */
1159 		return;
1160 	}
1161 
1162 	status = dmub_srv_is_hw_init(dmub_srv, &init);
1163 	if (status != DMUB_STATUS_OK)
1164 		DRM_WARN("DMUB hardware init check failed: %d\n", status);
1165 
1166 	if (status == DMUB_STATUS_OK && init) {
1167 		/* Wait for firmware load to finish. */
1168 		status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1169 		if (status != DMUB_STATUS_OK)
1170 			DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1171 	} else {
1172 		/* Perform the full hardware initialization. */
1173 		dm_dmub_hw_init(adev);
1174 	}
1175 }
1176 
1177 #if defined(CONFIG_DRM_AMD_DC_DCN)
mmhub_read_system_context(struct amdgpu_device * adev,struct dc_phy_addr_space_config * pa_config)1178 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1179 {
1180 	uint64_t pt_base;
1181 	uint32_t logical_addr_low;
1182 	uint32_t logical_addr_high;
1183 	uint32_t agp_base, agp_bot, agp_top;
1184 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1185 
1186 	memset(pa_config, 0, sizeof(*pa_config));
1187 
1188 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1189 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1190 
1191 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1192 		/*
1193 		 * Raven2 has a HW issue that it is unable to use the vram which
1194 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1195 		 * workaround that increase system aperture high address (add 1)
1196 		 * to get rid of the VM fault and hardware hang.
1197 		 */
1198 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1199 	else
1200 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1201 
1202 	agp_base = 0;
1203 	agp_bot = adev->gmc.agp_start >> 24;
1204 	agp_top = adev->gmc.agp_end >> 24;
1205 
1206 
1207 	page_table_start.high_part = upper_32_bits(adev->gmc.gart_start >>
1208 						   AMDGPU_GPU_PAGE_SHIFT);
1209 	page_table_start.low_part = lower_32_bits(adev->gmc.gart_start >>
1210 						  AMDGPU_GPU_PAGE_SHIFT);
1211 	page_table_end.high_part = upper_32_bits(adev->gmc.gart_end >>
1212 						 AMDGPU_GPU_PAGE_SHIFT);
1213 	page_table_end.low_part = lower_32_bits(adev->gmc.gart_end >>
1214 						AMDGPU_GPU_PAGE_SHIFT);
1215 	page_table_base.high_part = upper_32_bits(pt_base);
1216 	page_table_base.low_part = lower_32_bits(pt_base);
1217 
1218 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1219 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1220 
1221 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1222 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1223 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1224 
1225 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1226 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1227 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1228 
1229 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1230 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1231 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1232 
1233 	pa_config->is_hvm_enabled = 0;
1234 
1235 }
1236 #endif
1237 #if defined(CONFIG_DRM_AMD_DC_DCN)
vblank_control_worker(struct work_struct * work)1238 static void vblank_control_worker(struct work_struct *work)
1239 {
1240 	struct vblank_control_work *vblank_work =
1241 		container_of(work, struct vblank_control_work, work);
1242 	struct amdgpu_display_manager *dm = vblank_work->dm;
1243 
1244 	mutex_lock(&dm->dc_lock);
1245 
1246 	if (vblank_work->enable)
1247 		dm->active_vblank_irq_count++;
1248 	else if(dm->active_vblank_irq_count)
1249 		dm->active_vblank_irq_count--;
1250 
1251 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1252 
1253 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1254 
1255 	/* Control PSR based on vblank requirements from OS */
1256 	if (vblank_work->stream && vblank_work->stream->link) {
1257 		if (vblank_work->enable) {
1258 			if (vblank_work->stream->link->psr_settings.psr_allow_active)
1259 				amdgpu_dm_psr_disable(vblank_work->stream);
1260 		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1261 			   !vblank_work->stream->link->psr_settings.psr_allow_active &&
1262 			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1263 			amdgpu_dm_psr_enable(vblank_work->stream);
1264 		}
1265 	}
1266 
1267 	mutex_unlock(&dm->dc_lock);
1268 
1269 	dc_stream_release(vblank_work->stream);
1270 
1271 	kfree(vblank_work);
1272 }
1273 
1274 #endif
1275 
dm_handle_hpd_rx_offload_work(struct work_struct * work)1276 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1277 {
1278 	struct hpd_rx_irq_offload_work *offload_work;
1279 	struct amdgpu_dm_connector *aconnector;
1280 	struct dc_link *dc_link;
1281 	struct amdgpu_device *adev;
1282 	enum dc_connection_type new_connection_type = dc_connection_none;
1283 	unsigned long flags;
1284 
1285 	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1286 	aconnector = offload_work->offload_wq->aconnector;
1287 
1288 	if (!aconnector) {
1289 		DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1290 		goto skip;
1291 	}
1292 
1293 	adev = drm_to_adev(aconnector->base.dev);
1294 	dc_link = aconnector->dc_link;
1295 
1296 	mutex_lock(&aconnector->hpd_lock);
1297 	if (!dc_link_detect_sink(dc_link, &new_connection_type))
1298 		DRM_ERROR("KMS: Failed to detect connector\n");
1299 	mutex_unlock(&aconnector->hpd_lock);
1300 
1301 	if (new_connection_type == dc_connection_none)
1302 		goto skip;
1303 
1304 	if (amdgpu_in_reset(adev))
1305 		goto skip;
1306 
1307 	mutex_lock(&adev->dm.dc_lock);
1308 	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1309 		dc_link_dp_handle_automated_test(dc_link);
1310 	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1311 			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1312 			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1313 		dc_link_dp_handle_link_loss(dc_link);
1314 		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1315 		offload_work->offload_wq->is_handling_link_loss = false;
1316 		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1317 	}
1318 	mutex_unlock(&adev->dm.dc_lock);
1319 
1320 skip:
1321 	kfree(offload_work);
1322 
1323 }
1324 
hpd_rx_irq_create_workqueue(struct dc * dc)1325 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1326 {
1327 	int max_caps = dc->caps.max_links;
1328 	int i = 0;
1329 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1330 
1331 	hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1332 
1333 	if (!hpd_rx_offload_wq)
1334 		return NULL;
1335 
1336 
1337 	for (i = 0; i < max_caps; i++) {
1338 		hpd_rx_offload_wq[i].wq =
1339 				    create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1340 
1341 		if (hpd_rx_offload_wq[i].wq == NULL) {
1342 			DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1343 			goto out_err;
1344 		}
1345 
1346 		spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1347 	}
1348 
1349 	return hpd_rx_offload_wq;
1350 
1351 out_err:
1352 	for (i = 0; i < max_caps; i++) {
1353 		if (hpd_rx_offload_wq[i].wq)
1354 			destroy_workqueue(hpd_rx_offload_wq[i].wq);
1355 	}
1356 	kfree(hpd_rx_offload_wq);
1357 	return NULL;
1358 }
1359 
1360 struct amdgpu_stutter_quirk {
1361 	u16 chip_vendor;
1362 	u16 chip_device;
1363 	u16 subsys_vendor;
1364 	u16 subsys_device;
1365 	u8 revision;
1366 };
1367 
1368 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1369 	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1370 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1371 	{ 0, 0, 0, 0, 0 },
1372 };
1373 
dm_should_disable_stutter(struct pci_dev * pdev)1374 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1375 {
1376 	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1377 
1378 	while (p && p->chip_device != 0) {
1379 		if (pdev->vendor == p->chip_vendor &&
1380 		    pdev->device == p->chip_device &&
1381 		    pdev->subsystem_vendor == p->subsys_vendor &&
1382 		    pdev->subsystem_device == p->subsys_device &&
1383 		    pdev->revision == p->revision) {
1384 			return true;
1385 		}
1386 		++p;
1387 	}
1388 	return false;
1389 }
1390 
1391 static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
1392 	{
1393 		.matches = {
1394 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1395 			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
1396 		},
1397 	},
1398 	{
1399 		.matches = {
1400 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1401 			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
1402 		},
1403 	},
1404 	{
1405 		.matches = {
1406 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1407 			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
1408 		},
1409 	},
1410 	{
1411 		.matches = {
1412 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1413 			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
1414 		},
1415 	},
1416 	{
1417 		.matches = {
1418 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1419 			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
1420 		},
1421 	},
1422 	{
1423 		.matches = {
1424 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1425 			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
1426 		},
1427 	},
1428 	{
1429 		.matches = {
1430 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1431 			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
1432 		},
1433 	},
1434 	{
1435 		.matches = {
1436 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1437 			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
1438 		},
1439 	},
1440 	{
1441 		.matches = {
1442 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1443 			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
1444 		},
1445 	},
1446 	{}
1447 	/* TODO: refactor this from a fixed table to a dynamic option */
1448 };
1449 
retrieve_dmi_info(struct amdgpu_display_manager * dm)1450 static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
1451 {
1452 	const struct dmi_system_id *dmi_id;
1453 
1454 	dm->aux_hpd_discon_quirk = false;
1455 
1456 	dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
1457 	if (dmi_id) {
1458 		dm->aux_hpd_discon_quirk = true;
1459 		DRM_INFO("aux_hpd_discon_quirk attached\n");
1460 	}
1461 }
1462 
amdgpu_dm_init(struct amdgpu_device * adev)1463 static int amdgpu_dm_init(struct amdgpu_device *adev)
1464 {
1465 	struct dc_init_data init_data;
1466 #ifdef CONFIG_DRM_AMD_DC_HDCP
1467 	struct dc_callback_init init_params;
1468 #endif
1469 	int r;
1470 
1471 	adev->dm.ddev = adev_to_drm(adev);
1472 	adev->dm.adev = adev;
1473 
1474 	/* Zero all the fields */
1475 	memset(&init_data, 0, sizeof(init_data));
1476 #ifdef CONFIG_DRM_AMD_DC_HDCP
1477 	memset(&init_params, 0, sizeof(init_params));
1478 #endif
1479 
1480 	mutex_init(&adev->dm.dc_lock);
1481 	mutex_init(&adev->dm.audio_lock);
1482 #if defined(CONFIG_DRM_AMD_DC_DCN)
1483 	spin_lock_init(&adev->dm.vblank_lock);
1484 #endif
1485 
1486 	if(amdgpu_dm_irq_init(adev)) {
1487 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1488 		goto error;
1489 	}
1490 
1491 	init_data.asic_id.chip_family = adev->family;
1492 
1493 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1494 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1495 	init_data.asic_id.chip_id = adev->pdev->device;
1496 
1497 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1498 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1499 	init_data.asic_id.atombios_base_address =
1500 		adev->mode_info.atom_context->bios;
1501 
1502 	init_data.driver = adev;
1503 
1504 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1505 
1506 	if (!adev->dm.cgs_device) {
1507 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1508 		goto error;
1509 	}
1510 
1511 	init_data.cgs_device = adev->dm.cgs_device;
1512 
1513 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1514 
1515 	switch (adev->asic_type) {
1516 	case CHIP_CARRIZO:
1517 	case CHIP_STONEY:
1518 	case CHIP_RAVEN:
1519 	case CHIP_RENOIR:
1520 		init_data.flags.gpu_vm_support = true;
1521 		switch (adev->dm.dmcub_fw_version) {
1522 		case 0: /* development */
1523 		case 0x1: /* linux-firmware.git hash 6d9f399 */
1524 		case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1525 			init_data.flags.disable_dmcu = false;
1526 			break;
1527 		default:
1528 			init_data.flags.disable_dmcu = true;
1529 		}
1530 		break;
1531 	case CHIP_VANGOGH:
1532 	case CHIP_YELLOW_CARP:
1533 		init_data.flags.gpu_vm_support = true;
1534 		break;
1535 	default:
1536 		break;
1537 	}
1538 
1539 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1540 		init_data.flags.fbc_support = true;
1541 
1542 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1543 		init_data.flags.multi_mon_pp_mclk_switch = true;
1544 
1545 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1546 		init_data.flags.disable_fractional_pwm = true;
1547 
1548 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1549 		init_data.flags.edp_no_power_sequencing = true;
1550 
1551 	init_data.flags.power_down_display_on_boot = true;
1552 
1553 	INIT_LIST_HEAD(&adev->dm.da_list);
1554 
1555 	retrieve_dmi_info(&adev->dm);
1556 
1557 	/* Display Core create. */
1558 	adev->dm.dc = dc_create(&init_data);
1559 
1560 	if (adev->dm.dc) {
1561 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1562 	} else {
1563 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1564 		goto error;
1565 	}
1566 
1567 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1568 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1569 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1570 	}
1571 
1572 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1573 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1574 	if (dm_should_disable_stutter(adev->pdev))
1575 		adev->dm.dc->debug.disable_stutter = true;
1576 
1577 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1578 		adev->dm.dc->debug.disable_stutter = true;
1579 
1580 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1581 		adev->dm.dc->debug.disable_dsc = true;
1582 
1583 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1584 		adev->dm.dc->debug.disable_clock_gate = true;
1585 
1586 	r = dm_dmub_hw_init(adev);
1587 	if (r) {
1588 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1589 		goto error;
1590 	}
1591 
1592 	dc_hardware_init(adev->dm.dc);
1593 
1594 	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1595 	if (!adev->dm.hpd_rx_offload_wq) {
1596 		DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1597 		goto error;
1598 	}
1599 
1600 #if defined(CONFIG_DRM_AMD_DC_DCN)
1601 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1602 		struct dc_phy_addr_space_config pa_config;
1603 
1604 		mmhub_read_system_context(adev, &pa_config);
1605 
1606 		// Call the DC init_memory func
1607 		dc_setup_system_context(adev->dm.dc, &pa_config);
1608 	}
1609 #endif
1610 
1611 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1612 	if (!adev->dm.freesync_module) {
1613 		DRM_ERROR(
1614 		"amdgpu: failed to initialize freesync_module.\n");
1615 	} else
1616 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1617 				adev->dm.freesync_module);
1618 
1619 	amdgpu_dm_init_color_mod();
1620 
1621 #if defined(CONFIG_DRM_AMD_DC_DCN)
1622 	if (adev->dm.dc->caps.max_links > 0) {
1623 		adev->dm.vblank_control_workqueue =
1624 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1625 		if (!adev->dm.vblank_control_workqueue)
1626 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1627 	}
1628 #endif
1629 
1630 #ifdef CONFIG_DRM_AMD_DC_HDCP
1631 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1632 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1633 
1634 		if (!adev->dm.hdcp_workqueue)
1635 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1636 		else
1637 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1638 
1639 		dc_init_callbacks(adev->dm.dc, &init_params);
1640 	}
1641 #endif
1642 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1643 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1644 #endif
1645 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1646 		init_completion(&adev->dm.dmub_aux_transfer_done);
1647 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1648 		if (!adev->dm.dmub_notify) {
1649 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1650 			goto error;
1651 		}
1652 
1653 		adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1654 		if (!adev->dm.delayed_hpd_wq) {
1655 			DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1656 			goto error;
1657 		}
1658 
1659 		amdgpu_dm_outbox_init(adev);
1660 #if defined(CONFIG_DRM_AMD_DC_DCN)
1661 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1662 			dmub_aux_setconfig_callback, false)) {
1663 			DRM_ERROR("amdgpu: fail to register dmub aux callback");
1664 			goto error;
1665 		}
1666 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1667 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1668 			goto error;
1669 		}
1670 #endif
1671 	}
1672 
1673 	if (amdgpu_dm_initialize_drm_device(adev)) {
1674 		DRM_ERROR(
1675 		"amdgpu: failed to initialize sw for display support.\n");
1676 		goto error;
1677 	}
1678 
1679 	/* create fake encoders for MST */
1680 	dm_dp_create_fake_mst_encoders(adev);
1681 
1682 	/* TODO: Add_display_info? */
1683 
1684 	/* TODO use dynamic cursor width */
1685 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1686 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1687 
1688 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1689 		DRM_ERROR(
1690 		"amdgpu: failed to initialize sw for display support.\n");
1691 		goto error;
1692 	}
1693 
1694 
1695 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1696 
1697 	return 0;
1698 error:
1699 	amdgpu_dm_fini(adev);
1700 
1701 	return -EINVAL;
1702 }
1703 
amdgpu_dm_early_fini(void * handle)1704 static int amdgpu_dm_early_fini(void *handle)
1705 {
1706 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1707 
1708 	amdgpu_dm_audio_fini(adev);
1709 
1710 	return 0;
1711 }
1712 
amdgpu_dm_fini(struct amdgpu_device * adev)1713 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1714 {
1715 	int i;
1716 
1717 #if defined(CONFIG_DRM_AMD_DC_DCN)
1718 	if (adev->dm.vblank_control_workqueue) {
1719 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1720 		adev->dm.vblank_control_workqueue = NULL;
1721 	}
1722 #endif
1723 
1724 	amdgpu_dm_destroy_drm_device(&adev->dm);
1725 
1726 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1727 	if (adev->dm.crc_rd_wrk) {
1728 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1729 		kfree(adev->dm.crc_rd_wrk);
1730 		adev->dm.crc_rd_wrk = NULL;
1731 	}
1732 #endif
1733 #ifdef CONFIG_DRM_AMD_DC_HDCP
1734 	if (adev->dm.hdcp_workqueue) {
1735 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1736 		adev->dm.hdcp_workqueue = NULL;
1737 	}
1738 
1739 	if (adev->dm.dc)
1740 		dc_deinit_callbacks(adev->dm.dc);
1741 #endif
1742 
1743 	if (adev->dm.dc)
1744 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1745 
1746 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1747 		kfree(adev->dm.dmub_notify);
1748 		adev->dm.dmub_notify = NULL;
1749 		destroy_workqueue(adev->dm.delayed_hpd_wq);
1750 		adev->dm.delayed_hpd_wq = NULL;
1751 	}
1752 
1753 	if (adev->dm.dmub_bo)
1754 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1755 				      &adev->dm.dmub_bo_gpu_addr,
1756 				      &adev->dm.dmub_bo_cpu_addr);
1757 
1758 	/* DC Destroy TODO: Replace destroy DAL */
1759 	if (adev->dm.dc)
1760 		dc_destroy(&adev->dm.dc);
1761 	/*
1762 	 * TODO: pageflip, vlank interrupt
1763 	 *
1764 	 * amdgpu_dm_irq_fini(adev);
1765 	 */
1766 
1767 	if (adev->dm.cgs_device) {
1768 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1769 		adev->dm.cgs_device = NULL;
1770 	}
1771 	if (adev->dm.freesync_module) {
1772 		mod_freesync_destroy(adev->dm.freesync_module);
1773 		adev->dm.freesync_module = NULL;
1774 	}
1775 
1776 	if (adev->dm.hpd_rx_offload_wq) {
1777 		for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1778 			if (adev->dm.hpd_rx_offload_wq[i].wq) {
1779 				destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1780 				adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1781 			}
1782 		}
1783 
1784 		kfree(adev->dm.hpd_rx_offload_wq);
1785 		adev->dm.hpd_rx_offload_wq = NULL;
1786 	}
1787 
1788 	mutex_destroy(&adev->dm.audio_lock);
1789 	mutex_destroy(&adev->dm.dc_lock);
1790 
1791 	return;
1792 }
1793 
load_dmcu_fw(struct amdgpu_device * adev)1794 static int load_dmcu_fw(struct amdgpu_device *adev)
1795 {
1796 	const char *fw_name_dmcu = NULL;
1797 	int r;
1798 	const struct dmcu_firmware_header_v1_0 *hdr;
1799 
1800 	switch(adev->asic_type) {
1801 #if defined(CONFIG_DRM_AMD_DC_SI)
1802 	case CHIP_TAHITI:
1803 	case CHIP_PITCAIRN:
1804 	case CHIP_VERDE:
1805 	case CHIP_OLAND:
1806 #endif
1807 	case CHIP_BONAIRE:
1808 	case CHIP_HAWAII:
1809 	case CHIP_KAVERI:
1810 	case CHIP_KABINI:
1811 	case CHIP_MULLINS:
1812 	case CHIP_TONGA:
1813 	case CHIP_FIJI:
1814 	case CHIP_CARRIZO:
1815 	case CHIP_STONEY:
1816 	case CHIP_POLARIS11:
1817 	case CHIP_POLARIS10:
1818 	case CHIP_POLARIS12:
1819 	case CHIP_VEGAM:
1820 	case CHIP_VEGA10:
1821 	case CHIP_VEGA12:
1822 	case CHIP_VEGA20:
1823 	case CHIP_NAVI10:
1824 	case CHIP_NAVI14:
1825 	case CHIP_RENOIR:
1826 	case CHIP_SIENNA_CICHLID:
1827 	case CHIP_NAVY_FLOUNDER:
1828 	case CHIP_DIMGREY_CAVEFISH:
1829 	case CHIP_BEIGE_GOBY:
1830 	case CHIP_VANGOGH:
1831 	case CHIP_YELLOW_CARP:
1832 		return 0;
1833 	case CHIP_NAVI12:
1834 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1835 		break;
1836 	case CHIP_RAVEN:
1837 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1838 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1839 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1840 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1841 		else
1842 			return 0;
1843 		break;
1844 	default:
1845 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1846 		return -EINVAL;
1847 	}
1848 
1849 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1850 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1851 		return 0;
1852 	}
1853 
1854 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1855 	if (r == -ENOENT) {
1856 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1857 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1858 		adev->dm.fw_dmcu = NULL;
1859 		return 0;
1860 	}
1861 	if (r) {
1862 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1863 			fw_name_dmcu);
1864 		return r;
1865 	}
1866 
1867 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1868 	if (r) {
1869 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1870 			fw_name_dmcu);
1871 		release_firmware(adev->dm.fw_dmcu);
1872 		adev->dm.fw_dmcu = NULL;
1873 		return r;
1874 	}
1875 
1876 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1877 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1878 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1879 	adev->firmware.fw_size +=
1880 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1881 
1882 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1883 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1884 	adev->firmware.fw_size +=
1885 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1886 
1887 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1888 
1889 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1890 
1891 	return 0;
1892 }
1893 
amdgpu_dm_dmub_reg_read(void * ctx,uint32_t address)1894 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1895 {
1896 	struct amdgpu_device *adev = ctx;
1897 
1898 	return dm_read_reg(adev->dm.dc->ctx, address);
1899 }
1900 
amdgpu_dm_dmub_reg_write(void * ctx,uint32_t address,uint32_t value)1901 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1902 				     uint32_t value)
1903 {
1904 	struct amdgpu_device *adev = ctx;
1905 
1906 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1907 }
1908 
dm_dmub_sw_init(struct amdgpu_device * adev)1909 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1910 {
1911 	struct dmub_srv_create_params create_params;
1912 	struct dmub_srv_region_params region_params;
1913 	struct dmub_srv_region_info region_info;
1914 	struct dmub_srv_memory_params memory_params;
1915 	struct dmub_srv_fb_info *fb_info;
1916 	struct dmub_srv *dmub_srv;
1917 	const struct dmcub_firmware_header_v1_0 *hdr;
1918 	const char *fw_name_dmub;
1919 	enum dmub_asic dmub_asic;
1920 	enum dmub_status status;
1921 	int r;
1922 
1923 	switch (adev->asic_type) {
1924 	case CHIP_RENOIR:
1925 		dmub_asic = DMUB_ASIC_DCN21;
1926 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1927 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1928 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1929 		break;
1930 	case CHIP_SIENNA_CICHLID:
1931 		dmub_asic = DMUB_ASIC_DCN30;
1932 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1933 		break;
1934 	case CHIP_NAVY_FLOUNDER:
1935 		dmub_asic = DMUB_ASIC_DCN30;
1936 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1937 		break;
1938 	case CHIP_VANGOGH:
1939 		dmub_asic = DMUB_ASIC_DCN301;
1940 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1941 		break;
1942 	case CHIP_DIMGREY_CAVEFISH:
1943 		dmub_asic = DMUB_ASIC_DCN302;
1944 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1945 		break;
1946 	case CHIP_BEIGE_GOBY:
1947 		dmub_asic = DMUB_ASIC_DCN303;
1948 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1949 		break;
1950 	case CHIP_YELLOW_CARP:
1951 		dmub_asic = DMUB_ASIC_DCN31;
1952 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1953 		break;
1954 
1955 	default:
1956 		/* ASIC doesn't support DMUB. */
1957 		return 0;
1958 	}
1959 
1960 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1961 	if (r) {
1962 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1963 		return 0;
1964 	}
1965 
1966 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1967 	if (r) {
1968 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1969 		return 0;
1970 	}
1971 
1972 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1973 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1974 
1975 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1976 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1977 			AMDGPU_UCODE_ID_DMCUB;
1978 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1979 			adev->dm.dmub_fw;
1980 		adev->firmware.fw_size +=
1981 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1982 
1983 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1984 			 adev->dm.dmcub_fw_version);
1985 	}
1986 
1987 
1988 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1989 	dmub_srv = adev->dm.dmub_srv;
1990 
1991 	if (!dmub_srv) {
1992 		DRM_ERROR("Failed to allocate DMUB service!\n");
1993 		return -ENOMEM;
1994 	}
1995 
1996 	memset(&create_params, 0, sizeof(create_params));
1997 	create_params.user_ctx = adev;
1998 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1999 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
2000 	create_params.asic = dmub_asic;
2001 
2002 	/* Create the DMUB service. */
2003 	status = dmub_srv_create(dmub_srv, &create_params);
2004 	if (status != DMUB_STATUS_OK) {
2005 		DRM_ERROR("Error creating DMUB service: %d\n", status);
2006 		return -EINVAL;
2007 	}
2008 
2009 	/* Calculate the size of all the regions for the DMUB service. */
2010 	memset(&region_params, 0, sizeof(region_params));
2011 
2012 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2013 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2014 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2015 	region_params.vbios_size = adev->bios_size;
2016 	region_params.fw_bss_data = region_params.bss_data_size ?
2017 		adev->dm.dmub_fw->data +
2018 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2019 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
2020 	region_params.fw_inst_const =
2021 		adev->dm.dmub_fw->data +
2022 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2023 		PSP_HEADER_BYTES;
2024 	region_params.is_mailbox_in_inbox = false;
2025 
2026 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2027 					   &region_info);
2028 
2029 	if (status != DMUB_STATUS_OK) {
2030 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2031 		return -EINVAL;
2032 	}
2033 
2034 	/*
2035 	 * Allocate a framebuffer based on the total size of all the regions.
2036 	 * TODO: Move this into GART.
2037 	 */
2038 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2039 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2040 				    &adev->dm.dmub_bo_gpu_addr,
2041 				    &adev->dm.dmub_bo_cpu_addr);
2042 	if (r)
2043 		return r;
2044 
2045 	/* Rebase the regions on the framebuffer address. */
2046 	memset(&memory_params, 0, sizeof(memory_params));
2047 	memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr;
2048 	memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr;
2049 	memory_params.region_info = &region_info;
2050 
2051 	adev->dm.dmub_fb_info =
2052 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2053 	fb_info = adev->dm.dmub_fb_info;
2054 
2055 	if (!fb_info) {
2056 		DRM_ERROR(
2057 			"Failed to allocate framebuffer info for DMUB service!\n");
2058 		return -ENOMEM;
2059 	}
2060 
2061 	status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info);
2062 	if (status != DMUB_STATUS_OK) {
2063 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2064 		return -EINVAL;
2065 	}
2066 
2067 	return 0;
2068 }
2069 
dm_sw_init(void * handle)2070 static int dm_sw_init(void *handle)
2071 {
2072 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2073 	int r;
2074 
2075 	r = dm_dmub_sw_init(adev);
2076 	if (r)
2077 		return r;
2078 
2079 	return load_dmcu_fw(adev);
2080 }
2081 
dm_sw_fini(void * handle)2082 static int dm_sw_fini(void *handle)
2083 {
2084 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2085 
2086 	kfree(adev->dm.dmub_fb_info);
2087 	adev->dm.dmub_fb_info = NULL;
2088 
2089 	if (adev->dm.dmub_srv) {
2090 		dmub_srv_destroy(adev->dm.dmub_srv);
2091 		adev->dm.dmub_srv = NULL;
2092 	}
2093 
2094 	release_firmware(adev->dm.dmub_fw);
2095 	adev->dm.dmub_fw = NULL;
2096 
2097 	release_firmware(adev->dm.fw_dmcu);
2098 	adev->dm.fw_dmcu = NULL;
2099 
2100 	return 0;
2101 }
2102 
detect_mst_link_for_all_connectors(struct drm_device * dev)2103 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2104 {
2105 	struct amdgpu_dm_connector *aconnector;
2106 	struct drm_connector *connector;
2107 	struct drm_connector_list_iter iter;
2108 	int ret = 0;
2109 
2110 	drm_connector_list_iter_begin(dev, &iter);
2111 	drm_for_each_connector_iter(connector, &iter) {
2112 		aconnector = to_amdgpu_dm_connector(connector);
2113 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
2114 		    aconnector->mst_mgr.aux) {
2115 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2116 					 aconnector,
2117 					 aconnector->base.base.id);
2118 
2119 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2120 			if (ret < 0) {
2121 				DRM_ERROR("DM_MST: Failed to start MST\n");
2122 				aconnector->dc_link->type =
2123 					dc_connection_single;
2124 				break;
2125 			}
2126 		}
2127 	}
2128 	drm_connector_list_iter_end(&iter);
2129 
2130 	return ret;
2131 }
2132 
dm_late_init(void * handle)2133 static int dm_late_init(void *handle)
2134 {
2135 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2136 
2137 	struct dmcu_iram_parameters params;
2138 	unsigned int linear_lut[16];
2139 	int i;
2140 	struct dmcu *dmcu = NULL;
2141 
2142 	dmcu = adev->dm.dc->res_pool->dmcu;
2143 
2144 	for (i = 0; i < 16; i++)
2145 		linear_lut[i] = 0xFFFF * i / 15;
2146 
2147 	params.set = 0;
2148 	params.backlight_ramping_override = false;
2149 	params.backlight_ramping_start = 0xCCCC;
2150 	params.backlight_ramping_reduction = 0xCCCCCCCC;
2151 	params.backlight_lut_array_size = 16;
2152 	params.backlight_lut_array = linear_lut;
2153 
2154 	/* Min backlight level after ABM reduction,  Don't allow below 1%
2155 	 * 0xFFFF x 0.01 = 0x28F
2156 	 */
2157 	params.min_abm_backlight = 0x28F;
2158 	/* In the case where abm is implemented on dmcub,
2159 	* dmcu object will be null.
2160 	* ABM 2.4 and up are implemented on dmcub.
2161 	*/
2162 	if (dmcu) {
2163 		if (!dmcu_load_iram(dmcu, params))
2164 			return -EINVAL;
2165 	} else if (adev->dm.dc->ctx->dmub_srv) {
2166 		struct dc_link *edp_links[MAX_NUM_EDP];
2167 		int edp_num;
2168 
2169 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
2170 		for (i = 0; i < edp_num; i++) {
2171 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2172 				return -EINVAL;
2173 		}
2174 	}
2175 
2176 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2177 }
2178 
s3_handle_mst(struct drm_device * dev,bool suspend)2179 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2180 {
2181 	struct amdgpu_dm_connector *aconnector;
2182 	struct drm_connector *connector;
2183 	struct drm_connector_list_iter iter;
2184 	struct drm_dp_mst_topology_mgr *mgr;
2185 	int ret;
2186 	bool need_hotplug = false;
2187 
2188 	drm_connector_list_iter_begin(dev, &iter);
2189 	drm_for_each_connector_iter(connector, &iter) {
2190 		aconnector = to_amdgpu_dm_connector(connector);
2191 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
2192 		    aconnector->mst_port)
2193 			continue;
2194 
2195 		mgr = &aconnector->mst_mgr;
2196 
2197 		if (suspend) {
2198 			drm_dp_mst_topology_mgr_suspend(mgr);
2199 		} else {
2200 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2201 			if (ret < 0) {
2202 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
2203 				need_hotplug = true;
2204 			}
2205 		}
2206 	}
2207 	drm_connector_list_iter_end(&iter);
2208 
2209 	if (need_hotplug)
2210 		drm_kms_helper_hotplug_event(dev);
2211 }
2212 
amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device * adev)2213 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2214 {
2215 	struct smu_context *smu = &adev->smu;
2216 	int ret = 0;
2217 
2218 	if (!is_support_sw_smu(adev))
2219 		return 0;
2220 
2221 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2222 	 * on window driver dc implementation.
2223 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2224 	 * should be passed to smu during boot up and resume from s3.
2225 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
2226 	 * dcn20_resource_construct
2227 	 * then call pplib functions below to pass the settings to smu:
2228 	 * smu_set_watermarks_for_clock_ranges
2229 	 * smu_set_watermarks_table
2230 	 * navi10_set_watermarks_table
2231 	 * smu_write_watermarks_table
2232 	 *
2233 	 * For Renoir, clock settings of dcn watermark are also fixed values.
2234 	 * dc has implemented different flow for window driver:
2235 	 * dc_hardware_init / dc_set_power_state
2236 	 * dcn10_init_hw
2237 	 * notify_wm_ranges
2238 	 * set_wm_ranges
2239 	 * -- Linux
2240 	 * smu_set_watermarks_for_clock_ranges
2241 	 * renoir_set_watermarks_table
2242 	 * smu_write_watermarks_table
2243 	 *
2244 	 * For Linux,
2245 	 * dc_hardware_init -> amdgpu_dm_init
2246 	 * dc_set_power_state --> dm_resume
2247 	 *
2248 	 * therefore, this function apply to navi10/12/14 but not Renoir
2249 	 * *
2250 	 */
2251 	switch(adev->asic_type) {
2252 	case CHIP_NAVI10:
2253 	case CHIP_NAVI14:
2254 	case CHIP_NAVI12:
2255 		break;
2256 	default:
2257 		return 0;
2258 	}
2259 
2260 	ret = smu_write_watermarks_table(smu);
2261 	if (ret) {
2262 		DRM_ERROR("Failed to update WMTABLE!\n");
2263 		return ret;
2264 	}
2265 
2266 	return 0;
2267 }
2268 
2269 /**
2270  * dm_hw_init() - Initialize DC device
2271  * @handle: The base driver device containing the amdgpu_dm device.
2272  *
2273  * Initialize the &struct amdgpu_display_manager device. This involves calling
2274  * the initializers of each DM component, then populating the struct with them.
2275  *
2276  * Although the function implies hardware initialization, both hardware and
2277  * software are initialized here. Splitting them out to their relevant init
2278  * hooks is a future TODO item.
2279  *
2280  * Some notable things that are initialized here:
2281  *
2282  * - Display Core, both software and hardware
2283  * - DC modules that we need (freesync and color management)
2284  * - DRM software states
2285  * - Interrupt sources and handlers
2286  * - Vblank support
2287  * - Debug FS entries, if enabled
2288  */
dm_hw_init(void * handle)2289 static int dm_hw_init(void *handle)
2290 {
2291 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2292 	/* Create DAL display manager */
2293 	amdgpu_dm_init(adev);
2294 	amdgpu_dm_hpd_init(adev);
2295 
2296 	return 0;
2297 }
2298 
2299 /**
2300  * dm_hw_fini() - Teardown DC device
2301  * @handle: The base driver device containing the amdgpu_dm device.
2302  *
2303  * Teardown components within &struct amdgpu_display_manager that require
2304  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2305  * were loaded. Also flush IRQ workqueues and disable them.
2306  */
dm_hw_fini(void * handle)2307 static int dm_hw_fini(void *handle)
2308 {
2309 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2310 
2311 	amdgpu_dm_hpd_fini(adev);
2312 
2313 	amdgpu_dm_irq_fini(adev);
2314 	amdgpu_dm_fini(adev);
2315 	return 0;
2316 }
2317 
2318 
2319 static int dm_enable_vblank(struct drm_crtc *crtc);
2320 static void dm_disable_vblank(struct drm_crtc *crtc);
2321 
dm_gpureset_toggle_interrupts(struct amdgpu_device * adev,struct dc_state * state,bool enable)2322 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2323 				 struct dc_state *state, bool enable)
2324 {
2325 	enum dc_irq_source irq_source;
2326 	struct amdgpu_crtc *acrtc;
2327 	int rc = -EBUSY;
2328 	int i = 0;
2329 
2330 	for (i = 0; i < state->stream_count; i++) {
2331 		acrtc = get_crtc_by_otg_inst(
2332 				adev, state->stream_status[i].primary_otg_inst);
2333 
2334 		if (acrtc && state->stream_status[i].plane_count != 0) {
2335 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2336 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2337 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2338 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
2339 			if (rc)
2340 				DRM_WARN("Failed to %s pflip interrupts\n",
2341 					 enable ? "enable" : "disable");
2342 
2343 			if (enable) {
2344 				rc = dm_enable_vblank(&acrtc->base);
2345 				if (rc)
2346 					DRM_WARN("Failed to enable vblank interrupts\n");
2347 			} else {
2348 				dm_disable_vblank(&acrtc->base);
2349 			}
2350 
2351 		}
2352 	}
2353 
2354 }
2355 
amdgpu_dm_commit_zero_streams(struct dc * dc)2356 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2357 {
2358 	struct dc_state *context = NULL;
2359 	enum dc_status res = DC_ERROR_UNEXPECTED;
2360 	int i;
2361 	struct dc_stream_state *del_streams[MAX_PIPES];
2362 	int del_streams_count = 0;
2363 
2364 	memset(del_streams, 0, sizeof(del_streams));
2365 
2366 	context = dc_create_state(dc);
2367 	if (context == NULL)
2368 		goto context_alloc_fail;
2369 
2370 	dc_resource_state_copy_construct_current(dc, context);
2371 
2372 	/* First remove from context all streams */
2373 	for (i = 0; i < context->stream_count; i++) {
2374 		struct dc_stream_state *stream = context->streams[i];
2375 
2376 		del_streams[del_streams_count++] = stream;
2377 	}
2378 
2379 	/* Remove all planes for removed streams and then remove the streams */
2380 	for (i = 0; i < del_streams_count; i++) {
2381 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2382 			res = DC_FAIL_DETACH_SURFACES;
2383 			goto fail;
2384 		}
2385 
2386 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2387 		if (res != DC_OK)
2388 			goto fail;
2389 	}
2390 
2391 
2392 	res = dc_validate_global_state(dc, context, false);
2393 
2394 	if (res != DC_OK) {
2395 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
2396 		goto fail;
2397 	}
2398 
2399 	res = dc_commit_state(dc, context);
2400 
2401 fail:
2402 	dc_release_state(context);
2403 
2404 context_alloc_fail:
2405 	return res;
2406 }
2407 
hpd_rx_irq_work_suspend(struct amdgpu_display_manager * dm)2408 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2409 {
2410 	int i;
2411 
2412 	if (dm->hpd_rx_offload_wq) {
2413 		for (i = 0; i < dm->dc->caps.max_links; i++)
2414 			flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2415 	}
2416 }
2417 
dm_suspend(void * handle)2418 static int dm_suspend(void *handle)
2419 {
2420 	struct amdgpu_device *adev = handle;
2421 	struct amdgpu_display_manager *dm = &adev->dm;
2422 	int ret = 0;
2423 
2424 	if (amdgpu_in_reset(adev)) {
2425 		mutex_lock(&dm->dc_lock);
2426 
2427 #if defined(CONFIG_DRM_AMD_DC_DCN)
2428 		dc_allow_idle_optimizations(adev->dm.dc, false);
2429 #endif
2430 
2431 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2432 
2433 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2434 
2435 		amdgpu_dm_commit_zero_streams(dm->dc);
2436 
2437 		amdgpu_dm_irq_suspend(adev);
2438 
2439 		hpd_rx_irq_work_suspend(dm);
2440 
2441 		return ret;
2442 	}
2443 
2444 	WARN_ON(adev->dm.cached_state);
2445 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2446 
2447 	s3_handle_mst(adev_to_drm(adev), true);
2448 
2449 	amdgpu_dm_irq_suspend(adev);
2450 
2451 	hpd_rx_irq_work_suspend(dm);
2452 
2453 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2454 
2455 	return 0;
2456 }
2457 
2458 static struct amdgpu_dm_connector *
amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state * state,struct drm_crtc * crtc)2459 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2460 					     struct drm_crtc *crtc)
2461 {
2462 	uint32_t i;
2463 	struct drm_connector_state *new_con_state;
2464 	struct drm_connector *connector;
2465 	struct drm_crtc *crtc_from_state;
2466 
2467 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2468 		crtc_from_state = new_con_state->crtc;
2469 
2470 		if (crtc_from_state == crtc)
2471 			return to_amdgpu_dm_connector(connector);
2472 	}
2473 
2474 	return NULL;
2475 }
2476 
emulated_link_detect(struct dc_link * link)2477 static void emulated_link_detect(struct dc_link *link)
2478 {
2479 	struct dc_sink_init_data sink_init_data = { 0 };
2480 	struct display_sink_capability sink_caps = { 0 };
2481 	enum dc_edid_status edid_status;
2482 	struct dc_context *dc_ctx = link->ctx;
2483 	struct dc_sink *sink = NULL;
2484 	struct dc_sink *prev_sink = NULL;
2485 
2486 	link->type = dc_connection_none;
2487 	prev_sink = link->local_sink;
2488 
2489 	if (prev_sink)
2490 		dc_sink_release(prev_sink);
2491 
2492 	switch (link->connector_signal) {
2493 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2494 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2495 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2496 		break;
2497 	}
2498 
2499 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2500 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2501 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2502 		break;
2503 	}
2504 
2505 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2506 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2507 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2508 		break;
2509 	}
2510 
2511 	case SIGNAL_TYPE_LVDS: {
2512 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2513 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2514 		break;
2515 	}
2516 
2517 	case SIGNAL_TYPE_EDP: {
2518 		sink_caps.transaction_type =
2519 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2520 		sink_caps.signal = SIGNAL_TYPE_EDP;
2521 		break;
2522 	}
2523 
2524 	case SIGNAL_TYPE_DISPLAY_PORT: {
2525 		sink_caps.transaction_type =
2526 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2527 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2528 		break;
2529 	}
2530 
2531 	default:
2532 		DC_ERROR("Invalid connector type! signal:%d\n",
2533 			link->connector_signal);
2534 		return;
2535 	}
2536 
2537 	sink_init_data.link = link;
2538 	sink_init_data.sink_signal = sink_caps.signal;
2539 
2540 	sink = dc_sink_create(&sink_init_data);
2541 	if (!sink) {
2542 		DC_ERROR("Failed to create sink!\n");
2543 		return;
2544 	}
2545 
2546 	/* dc_sink_create returns a new reference */
2547 	link->local_sink = sink;
2548 
2549 	edid_status = dm_helpers_read_local_edid(
2550 			link->ctx,
2551 			link,
2552 			sink);
2553 
2554 	if (edid_status != EDID_OK)
2555 		DC_ERROR("Failed to read EDID");
2556 
2557 }
2558 
dm_gpureset_commit_state(struct dc_state * dc_state,struct amdgpu_display_manager * dm)2559 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2560 				     struct amdgpu_display_manager *dm)
2561 {
2562 	struct {
2563 		struct dc_surface_update surface_updates[MAX_SURFACES];
2564 		struct dc_plane_info plane_infos[MAX_SURFACES];
2565 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2566 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2567 		struct dc_stream_update stream_update;
2568 	} * bundle;
2569 	int k, m;
2570 
2571 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2572 
2573 	if (!bundle) {
2574 		dm_error("Failed to allocate update bundle\n");
2575 		goto cleanup;
2576 	}
2577 
2578 	for (k = 0; k < dc_state->stream_count; k++) {
2579 		bundle->stream_update.stream = dc_state->streams[k];
2580 
2581 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2582 			bundle->surface_updates[m].surface =
2583 				dc_state->stream_status->plane_states[m];
2584 			bundle->surface_updates[m].surface->force_full_update =
2585 				true;
2586 		}
2587 
2588 		update_planes_and_stream_adapter(dm->dc,
2589 					 UPDATE_TYPE_FULL,
2590 					 dc_state->stream_status->plane_count,
2591 					 dc_state->streams[k],
2592 					 &bundle->stream_update,
2593 					 bundle->surface_updates);
2594 	}
2595 
2596 cleanup:
2597 	kfree(bundle);
2598 
2599 	return;
2600 }
2601 
dm_set_dpms_off(struct dc_link * link)2602 static void dm_set_dpms_off(struct dc_link *link)
2603 {
2604 	struct dc_stream_state *stream_state;
2605 	struct amdgpu_dm_connector *aconnector = link->priv;
2606 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2607 	struct dc_stream_update stream_update;
2608 	bool dpms_off = true;
2609 
2610 	memset(&stream_update, 0, sizeof(stream_update));
2611 	stream_update.dpms_off = &dpms_off;
2612 
2613 	mutex_lock(&adev->dm.dc_lock);
2614 	stream_state = dc_stream_find_from_link(link);
2615 
2616 	if (stream_state == NULL) {
2617 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2618 		mutex_unlock(&adev->dm.dc_lock);
2619 		return;
2620 	}
2621 
2622 	stream_update.stream = stream_state;
2623 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2624 				     stream_state, &stream_update,
2625 				     stream_state->ctx->dc->current_state);
2626 	mutex_unlock(&adev->dm.dc_lock);
2627 }
2628 
dm_resume(void * handle)2629 static int dm_resume(void *handle)
2630 {
2631 	struct amdgpu_device *adev = handle;
2632 	struct drm_device *ddev = adev_to_drm(adev);
2633 	struct amdgpu_display_manager *dm = &adev->dm;
2634 	struct amdgpu_dm_connector *aconnector;
2635 	struct drm_connector *connector;
2636 	struct drm_connector_list_iter iter;
2637 	struct drm_crtc *crtc;
2638 	struct drm_crtc_state *new_crtc_state;
2639 	struct dm_crtc_state *dm_new_crtc_state;
2640 	struct drm_plane *plane;
2641 	struct drm_plane_state *new_plane_state;
2642 	struct dm_plane_state *dm_new_plane_state;
2643 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2644 	enum dc_connection_type new_connection_type = dc_connection_none;
2645 	struct dc_state *dc_state;
2646 	int i, r, j;
2647 
2648 	if (amdgpu_in_reset(adev)) {
2649 		dc_state = dm->cached_dc_state;
2650 
2651 		if (dc_enable_dmub_notifications(adev->dm.dc))
2652 			amdgpu_dm_outbox_init(adev);
2653 
2654 		r = dm_dmub_hw_init(adev);
2655 		if (r)
2656 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2657 
2658 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2659 		dc_resume(dm->dc);
2660 
2661 		amdgpu_dm_irq_resume_early(adev);
2662 
2663 		for (i = 0; i < dc_state->stream_count; i++) {
2664 			dc_state->streams[i]->mode_changed = true;
2665 			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2666 				dc_state->stream_status[i].plane_states[j]->update_flags.raw
2667 					= 0xffffffff;
2668 			}
2669 		}
2670 #if defined(CONFIG_DRM_AMD_DC_DCN)
2671 		/*
2672 		 * Resource allocation happens for link encoders for newer ASIC in
2673 		 * dc_validate_global_state, so we need to revalidate it.
2674 		 *
2675 		 * This shouldn't fail (it passed once before), so warn if it does.
2676 		 */
2677 		WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2678 #endif
2679 
2680 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2681 
2682 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2683 
2684 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2685 
2686 		dc_release_state(dm->cached_dc_state);
2687 		dm->cached_dc_state = NULL;
2688 
2689 		amdgpu_dm_irq_resume_late(adev);
2690 
2691 		mutex_unlock(&dm->dc_lock);
2692 
2693 		return 0;
2694 	}
2695 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2696 	dc_release_state(dm_state->context);
2697 	dm_state->context = dc_create_state(dm->dc);
2698 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2699 	dc_resource_state_construct(dm->dc, dm_state->context);
2700 
2701 	/* Re-enable outbox interrupts for DPIA. */
2702 	if (dc_enable_dmub_notifications(adev->dm.dc))
2703 		amdgpu_dm_outbox_init(adev);
2704 
2705 	/* Before powering on DC we need to re-initialize DMUB. */
2706 	dm_dmub_hw_resume(adev);
2707 
2708 	/* power on hardware */
2709 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2710 
2711 	/* program HPD filter */
2712 	dc_resume(dm->dc);
2713 
2714 	/*
2715 	 * early enable HPD Rx IRQ, should be done before set mode as short
2716 	 * pulse interrupts are used for MST
2717 	 */
2718 	amdgpu_dm_irq_resume_early(adev);
2719 
2720 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2721 	s3_handle_mst(ddev, false);
2722 
2723 	/* Do detection*/
2724 	drm_connector_list_iter_begin(ddev, &iter);
2725 	drm_for_each_connector_iter(connector, &iter) {
2726 		aconnector = to_amdgpu_dm_connector(connector);
2727 
2728 		if (!aconnector->dc_link)
2729 			continue;
2730 
2731 		/*
2732 		 * this is the case when traversing through already created
2733 		 * MST connectors, should be skipped
2734 		 */
2735 		if (aconnector->dc_link->type == dc_connection_mst_branch)
2736 			continue;
2737 
2738 		mutex_lock(&aconnector->hpd_lock);
2739 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2740 			DRM_ERROR("KMS: Failed to detect connector\n");
2741 
2742 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2743 			emulated_link_detect(aconnector->dc_link);
2744 		else
2745 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2746 
2747 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2748 			aconnector->fake_enable = false;
2749 
2750 		if (aconnector->dc_sink)
2751 			dc_sink_release(aconnector->dc_sink);
2752 		aconnector->dc_sink = NULL;
2753 		amdgpu_dm_update_connector_after_detect(aconnector);
2754 		mutex_unlock(&aconnector->hpd_lock);
2755 	}
2756 	drm_connector_list_iter_end(&iter);
2757 
2758 	/* Force mode set in atomic commit */
2759 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2760 		new_crtc_state->active_changed = true;
2761 
2762 	/*
2763 	 * atomic_check is expected to create the dc states. We need to release
2764 	 * them here, since they were duplicated as part of the suspend
2765 	 * procedure.
2766 	 */
2767 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2768 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2769 		if (dm_new_crtc_state->stream) {
2770 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2771 			dc_stream_release(dm_new_crtc_state->stream);
2772 			dm_new_crtc_state->stream = NULL;
2773 		}
2774 	}
2775 
2776 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2777 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2778 		if (dm_new_plane_state->dc_state) {
2779 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2780 			dc_plane_state_release(dm_new_plane_state->dc_state);
2781 			dm_new_plane_state->dc_state = NULL;
2782 		}
2783 	}
2784 
2785 	drm_atomic_helper_resume(ddev, dm->cached_state);
2786 
2787 	dm->cached_state = NULL;
2788 
2789 	amdgpu_dm_irq_resume_late(adev);
2790 
2791 	amdgpu_dm_smu_write_watermarks_table(adev);
2792 
2793 	return 0;
2794 }
2795 
2796 /**
2797  * DOC: DM Lifecycle
2798  *
2799  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2800  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2801  * the base driver's device list to be initialized and torn down accordingly.
2802  *
2803  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2804  */
2805 
2806 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2807 	.name = "dm",
2808 	.early_init = dm_early_init,
2809 	.late_init = dm_late_init,
2810 	.sw_init = dm_sw_init,
2811 	.sw_fini = dm_sw_fini,
2812 	.early_fini = amdgpu_dm_early_fini,
2813 	.hw_init = dm_hw_init,
2814 	.hw_fini = dm_hw_fini,
2815 	.suspend = dm_suspend,
2816 	.resume = dm_resume,
2817 	.is_idle = dm_is_idle,
2818 	.wait_for_idle = dm_wait_for_idle,
2819 	.check_soft_reset = dm_check_soft_reset,
2820 	.soft_reset = dm_soft_reset,
2821 	.set_clockgating_state = dm_set_clockgating_state,
2822 	.set_powergating_state = dm_set_powergating_state,
2823 };
2824 
2825 const struct amdgpu_ip_block_version dm_ip_block =
2826 {
2827 	.type = AMD_IP_BLOCK_TYPE_DCE,
2828 	.major = 1,
2829 	.minor = 0,
2830 	.rev = 0,
2831 	.funcs = &amdgpu_dm_funcs,
2832 };
2833 
2834 
2835 /**
2836  * DOC: atomic
2837  *
2838  * *WIP*
2839  */
2840 
2841 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2842 	.fb_create = amdgpu_display_user_framebuffer_create,
2843 	.get_format_info = amd_get_format_info,
2844 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2845 	.atomic_check = amdgpu_dm_atomic_check,
2846 	.atomic_commit = drm_atomic_helper_commit,
2847 };
2848 
2849 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2850 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2851 };
2852 
update_connector_ext_caps(struct amdgpu_dm_connector * aconnector)2853 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2854 {
2855 	u32 max_avg, min_cll, max, min, q, r;
2856 	struct amdgpu_dm_backlight_caps *caps;
2857 	struct amdgpu_display_manager *dm;
2858 	struct drm_connector *conn_base;
2859 	struct amdgpu_device *adev;
2860 	struct dc_link *link = NULL;
2861 	static const u8 pre_computed_values[] = {
2862 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2863 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2864 	int i;
2865 
2866 	if (!aconnector || !aconnector->dc_link)
2867 		return;
2868 
2869 	link = aconnector->dc_link;
2870 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2871 		return;
2872 
2873 	conn_base = &aconnector->base;
2874 	adev = drm_to_adev(conn_base->dev);
2875 	dm = &adev->dm;
2876 	for (i = 0; i < dm->num_of_edps; i++) {
2877 		if (link == dm->backlight_link[i])
2878 			break;
2879 	}
2880 	if (i >= dm->num_of_edps)
2881 		return;
2882 	caps = &dm->backlight_caps[i];
2883 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2884 	caps->aux_support = false;
2885 	max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
2886 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2887 
2888 	if (caps->ext_caps->bits.oled == 1 /*||
2889 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2890 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2891 		caps->aux_support = true;
2892 
2893 	if (amdgpu_backlight == 0)
2894 		caps->aux_support = false;
2895 	else if (amdgpu_backlight == 1)
2896 		caps->aux_support = true;
2897 
2898 	/* From the specification (CTA-861-G), for calculating the maximum
2899 	 * luminance we need to use:
2900 	 *	Luminance = 50*2**(CV/32)
2901 	 * Where CV is a one-byte value.
2902 	 * For calculating this expression we may need float point precision;
2903 	 * to avoid this complexity level, we take advantage that CV is divided
2904 	 * by a constant. From the Euclids division algorithm, we know that CV
2905 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2906 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2907 	 * need to pre-compute the value of r/32. For pre-computing the values
2908 	 * We just used the following Ruby line:
2909 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2910 	 * The results of the above expressions can be verified at
2911 	 * pre_computed_values.
2912 	 */
2913 	q = max_avg >> 5;
2914 	r = max_avg % 32;
2915 	max = (1 << q) * pre_computed_values[r];
2916 
2917 	// min luminance: maxLum * (CV/255)^2 / 100
2918 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2919 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2920 
2921 	caps->aux_max_input_signal = max;
2922 	caps->aux_min_input_signal = min;
2923 }
2924 
amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector * aconnector)2925 void amdgpu_dm_update_connector_after_detect(
2926 		struct amdgpu_dm_connector *aconnector)
2927 {
2928 	struct drm_connector *connector = &aconnector->base;
2929 	struct drm_device *dev = connector->dev;
2930 	struct dc_sink *sink;
2931 
2932 	/* MST handled by drm_mst framework */
2933 	if (aconnector->mst_mgr.mst_state == true)
2934 		return;
2935 
2936 	sink = aconnector->dc_link->local_sink;
2937 	if (sink)
2938 		dc_sink_retain(sink);
2939 
2940 	/*
2941 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2942 	 * the connector sink is set to either fake or physical sink depends on link status.
2943 	 * Skip if already done during boot.
2944 	 */
2945 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2946 			&& aconnector->dc_em_sink) {
2947 
2948 		/*
2949 		 * For S3 resume with headless use eml_sink to fake stream
2950 		 * because on resume connector->sink is set to NULL
2951 		 */
2952 		mutex_lock(&dev->mode_config.mutex);
2953 
2954 		if (sink) {
2955 			if (aconnector->dc_sink) {
2956 				amdgpu_dm_update_freesync_caps(connector, NULL);
2957 				/*
2958 				 * retain and release below are used to
2959 				 * bump up refcount for sink because the link doesn't point
2960 				 * to it anymore after disconnect, so on next crtc to connector
2961 				 * reshuffle by UMD we will get into unwanted dc_sink release
2962 				 */
2963 				dc_sink_release(aconnector->dc_sink);
2964 			}
2965 			aconnector->dc_sink = sink;
2966 			dc_sink_retain(aconnector->dc_sink);
2967 			amdgpu_dm_update_freesync_caps(connector,
2968 					aconnector->edid);
2969 		} else {
2970 			amdgpu_dm_update_freesync_caps(connector, NULL);
2971 			if (!aconnector->dc_sink) {
2972 				aconnector->dc_sink = aconnector->dc_em_sink;
2973 				dc_sink_retain(aconnector->dc_sink);
2974 			}
2975 		}
2976 
2977 		mutex_unlock(&dev->mode_config.mutex);
2978 
2979 		if (sink)
2980 			dc_sink_release(sink);
2981 		return;
2982 	}
2983 
2984 	/*
2985 	 * TODO: temporary guard to look for proper fix
2986 	 * if this sink is MST sink, we should not do anything
2987 	 */
2988 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2989 		dc_sink_release(sink);
2990 		return;
2991 	}
2992 
2993 	if (aconnector->dc_sink == sink) {
2994 		/*
2995 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2996 		 * Do nothing!!
2997 		 */
2998 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2999 				aconnector->connector_id);
3000 		if (sink)
3001 			dc_sink_release(sink);
3002 		return;
3003 	}
3004 
3005 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
3006 		aconnector->connector_id, aconnector->dc_sink, sink);
3007 
3008 	mutex_lock(&dev->mode_config.mutex);
3009 
3010 	/*
3011 	 * 1. Update status of the drm connector
3012 	 * 2. Send an event and let userspace tell us what to do
3013 	 */
3014 	if (sink) {
3015 		/*
3016 		 * TODO: check if we still need the S3 mode update workaround.
3017 		 * If yes, put it here.
3018 		 */
3019 		if (aconnector->dc_sink) {
3020 			amdgpu_dm_update_freesync_caps(connector, NULL);
3021 			dc_sink_release(aconnector->dc_sink);
3022 		}
3023 
3024 		aconnector->dc_sink = sink;
3025 		dc_sink_retain(aconnector->dc_sink);
3026 		if (sink->dc_edid.length == 0) {
3027 			aconnector->edid = NULL;
3028 			if (aconnector->dc_link->aux_mode) {
3029 				drm_dp_cec_unset_edid(
3030 					&aconnector->dm_dp_aux.aux);
3031 			}
3032 		} else {
3033 			aconnector->edid =
3034 				(struct edid *)sink->dc_edid.raw_edid;
3035 
3036 			if (aconnector->dc_link->aux_mode)
3037 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3038 						    aconnector->edid);
3039 		}
3040 
3041 		drm_connector_update_edid_property(connector, aconnector->edid);
3042 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3043 		update_connector_ext_caps(aconnector);
3044 	} else {
3045 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3046 		amdgpu_dm_update_freesync_caps(connector, NULL);
3047 		drm_connector_update_edid_property(connector, NULL);
3048 		aconnector->num_modes = 0;
3049 		dc_sink_release(aconnector->dc_sink);
3050 		aconnector->dc_sink = NULL;
3051 		aconnector->edid = NULL;
3052 #ifdef CONFIG_DRM_AMD_DC_HDCP
3053 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3054 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3055 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3056 #endif
3057 	}
3058 
3059 	mutex_unlock(&dev->mode_config.mutex);
3060 
3061 	update_subconnector_property(aconnector);
3062 
3063 	if (sink)
3064 		dc_sink_release(sink);
3065 }
3066 
handle_hpd_irq_helper(struct amdgpu_dm_connector * aconnector)3067 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3068 {
3069 	struct drm_connector *connector = &aconnector->base;
3070 	struct drm_device *dev = connector->dev;
3071 	enum dc_connection_type new_connection_type = dc_connection_none;
3072 	struct amdgpu_device *adev = drm_to_adev(dev);
3073 #ifdef CONFIG_DRM_AMD_DC_HDCP
3074 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3075 #endif
3076 
3077 	if (adev->dm.disable_hpd_irq)
3078 		return;
3079 
3080 	/*
3081 	 * In case of failure or MST no need to update connector status or notify the OS
3082 	 * since (for MST case) MST does this in its own context.
3083 	 */
3084 	mutex_lock(&aconnector->hpd_lock);
3085 
3086 #ifdef CONFIG_DRM_AMD_DC_HDCP
3087 	if (adev->dm.hdcp_workqueue) {
3088 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3089 		dm_con_state->update_hdcp = true;
3090 	}
3091 #endif
3092 	if (aconnector->fake_enable)
3093 		aconnector->fake_enable = false;
3094 
3095 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3096 		DRM_ERROR("KMS: Failed to detect connector\n");
3097 
3098 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
3099 		emulated_link_detect(aconnector->dc_link);
3100 
3101 
3102 		drm_modeset_lock_all(dev);
3103 		dm_restore_drm_connector_state(dev, connector);
3104 		drm_modeset_unlock_all(dev);
3105 
3106 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3107 			drm_kms_helper_hotplug_event(dev);
3108 
3109 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3110 		if (new_connection_type == dc_connection_none &&
3111 		    aconnector->dc_link->type == dc_connection_none)
3112 			dm_set_dpms_off(aconnector->dc_link);
3113 
3114 		amdgpu_dm_update_connector_after_detect(aconnector);
3115 
3116 		drm_modeset_lock_all(dev);
3117 		dm_restore_drm_connector_state(dev, connector);
3118 		drm_modeset_unlock_all(dev);
3119 
3120 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3121 			drm_kms_helper_hotplug_event(dev);
3122 	}
3123 	mutex_unlock(&aconnector->hpd_lock);
3124 
3125 }
3126 
handle_hpd_irq(void * param)3127 static void handle_hpd_irq(void *param)
3128 {
3129 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3130 
3131 	handle_hpd_irq_helper(aconnector);
3132 
3133 }
3134 
dm_handle_mst_sideband_msg(struct amdgpu_dm_connector * aconnector)3135 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3136 {
3137 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3138 	uint8_t dret;
3139 	bool new_irq_handled = false;
3140 	int dpcd_addr;
3141 	int dpcd_bytes_to_read;
3142 
3143 	const int max_process_count = 30;
3144 	int process_count = 0;
3145 
3146 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3147 
3148 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3149 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3150 		/* DPCD 0x200 - 0x201 for downstream IRQ */
3151 		dpcd_addr = DP_SINK_COUNT;
3152 	} else {
3153 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3154 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
3155 		dpcd_addr = DP_SINK_COUNT_ESI;
3156 	}
3157 
3158 	dret = drm_dp_dpcd_read(
3159 		&aconnector->dm_dp_aux.aux,
3160 		dpcd_addr,
3161 		esi,
3162 		dpcd_bytes_to_read);
3163 
3164 	while (dret == dpcd_bytes_to_read &&
3165 		process_count < max_process_count) {
3166 		uint8_t retry;
3167 		dret = 0;
3168 
3169 		process_count++;
3170 
3171 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3172 		/* handle HPD short pulse irq */
3173 		if (aconnector->mst_mgr.mst_state)
3174 			drm_dp_mst_hpd_irq(
3175 				&aconnector->mst_mgr,
3176 				esi,
3177 				&new_irq_handled);
3178 
3179 		if (new_irq_handled) {
3180 			/* ACK at DPCD to notify down stream */
3181 			const int ack_dpcd_bytes_to_write =
3182 				dpcd_bytes_to_read - 1;
3183 
3184 			for (retry = 0; retry < 3; retry++) {
3185 				uint8_t wret;
3186 
3187 				wret = drm_dp_dpcd_write(
3188 					&aconnector->dm_dp_aux.aux,
3189 					dpcd_addr + 1,
3190 					&esi[1],
3191 					ack_dpcd_bytes_to_write);
3192 				if (wret == ack_dpcd_bytes_to_write)
3193 					break;
3194 			}
3195 
3196 			/* check if there is new irq to be handled */
3197 			dret = drm_dp_dpcd_read(
3198 				&aconnector->dm_dp_aux.aux,
3199 				dpcd_addr,
3200 				esi,
3201 				dpcd_bytes_to_read);
3202 
3203 			new_irq_handled = false;
3204 		} else {
3205 			break;
3206 		}
3207 	}
3208 
3209 	if (process_count == max_process_count)
3210 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3211 }
3212 
schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue * offload_wq,union hpd_irq_data hpd_irq_data)3213 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3214 							union hpd_irq_data hpd_irq_data)
3215 {
3216 	struct hpd_rx_irq_offload_work *offload_work =
3217 				kzalloc(sizeof(*offload_work), GFP_KERNEL);
3218 
3219 	if (!offload_work) {
3220 		DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3221 		return;
3222 	}
3223 
3224 	INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3225 	offload_work->data = hpd_irq_data;
3226 	offload_work->offload_wq = offload_wq;
3227 
3228 	queue_work(offload_wq->wq, &offload_work->work);
3229 	DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3230 }
3231 
handle_hpd_rx_irq(void * param)3232 static void handle_hpd_rx_irq(void *param)
3233 {
3234 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3235 	struct drm_connector *connector = &aconnector->base;
3236 	struct drm_device *dev = connector->dev;
3237 	struct dc_link *dc_link = aconnector->dc_link;
3238 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3239 	bool result = false;
3240 	enum dc_connection_type new_connection_type = dc_connection_none;
3241 	struct amdgpu_device *adev = drm_to_adev(dev);
3242 	union hpd_irq_data hpd_irq_data;
3243 	bool link_loss = false;
3244 	bool has_left_work = false;
3245 	int idx = aconnector->base.index;
3246 	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3247 
3248 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3249 
3250 	if (adev->dm.disable_hpd_irq)
3251 		return;
3252 
3253 	/*
3254 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3255 	 * conflict, after implement i2c helper, this mutex should be
3256 	 * retired.
3257 	 */
3258 	mutex_lock(&aconnector->hpd_lock);
3259 
3260 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3261 						&link_loss, true, &has_left_work);
3262 
3263 	if (!has_left_work)
3264 		goto out;
3265 
3266 	if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3267 		schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3268 		goto out;
3269 	}
3270 
3271 	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3272 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3273 			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3274 			dm_handle_mst_sideband_msg(aconnector);
3275 			goto out;
3276 		}
3277 
3278 		if (link_loss) {
3279 			bool skip = false;
3280 
3281 			spin_lock(&offload_wq->offload_lock);
3282 			skip = offload_wq->is_handling_link_loss;
3283 
3284 			if (!skip)
3285 				offload_wq->is_handling_link_loss = true;
3286 
3287 			spin_unlock(&offload_wq->offload_lock);
3288 
3289 			if (!skip)
3290 				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3291 
3292 			goto out;
3293 		}
3294 	}
3295 
3296 out:
3297 	if (result && !is_mst_root_connector) {
3298 		/* Downstream Port status changed. */
3299 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
3300 			DRM_ERROR("KMS: Failed to detect connector\n");
3301 
3302 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3303 			emulated_link_detect(dc_link);
3304 
3305 			if (aconnector->fake_enable)
3306 				aconnector->fake_enable = false;
3307 
3308 			amdgpu_dm_update_connector_after_detect(aconnector);
3309 
3310 
3311 			drm_modeset_lock_all(dev);
3312 			dm_restore_drm_connector_state(dev, connector);
3313 			drm_modeset_unlock_all(dev);
3314 
3315 			drm_kms_helper_hotplug_event(dev);
3316 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3317 
3318 			if (aconnector->fake_enable)
3319 				aconnector->fake_enable = false;
3320 
3321 			amdgpu_dm_update_connector_after_detect(aconnector);
3322 
3323 
3324 			drm_modeset_lock_all(dev);
3325 			dm_restore_drm_connector_state(dev, connector);
3326 			drm_modeset_unlock_all(dev);
3327 
3328 			drm_kms_helper_hotplug_event(dev);
3329 		}
3330 	}
3331 #ifdef CONFIG_DRM_AMD_DC_HDCP
3332 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3333 		if (adev->dm.hdcp_workqueue)
3334 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3335 	}
3336 #endif
3337 
3338 	if (dc_link->type != dc_connection_mst_branch)
3339 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3340 
3341 	mutex_unlock(&aconnector->hpd_lock);
3342 }
3343 
register_hpd_handlers(struct amdgpu_device * adev)3344 static void register_hpd_handlers(struct amdgpu_device *adev)
3345 {
3346 	struct drm_device *dev = adev_to_drm(adev);
3347 	struct drm_connector *connector;
3348 	struct amdgpu_dm_connector *aconnector;
3349 	const struct dc_link *dc_link;
3350 	struct dc_interrupt_params int_params = {0};
3351 
3352 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3353 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3354 
3355 	list_for_each_entry(connector,
3356 			&dev->mode_config.connector_list, head)	{
3357 
3358 		aconnector = to_amdgpu_dm_connector(connector);
3359 		dc_link = aconnector->dc_link;
3360 
3361 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3362 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3363 			int_params.irq_source = dc_link->irq_source_hpd;
3364 
3365 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3366 					handle_hpd_irq,
3367 					(void *) aconnector);
3368 		}
3369 
3370 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3371 
3372 			/* Also register for DP short pulse (hpd_rx). */
3373 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3374 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
3375 
3376 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3377 					handle_hpd_rx_irq,
3378 					(void *) aconnector);
3379 
3380 			if (adev->dm.hpd_rx_offload_wq)
3381 				adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3382 					aconnector;
3383 		}
3384 	}
3385 }
3386 
3387 #if defined(CONFIG_DRM_AMD_DC_SI)
3388 /* Register IRQ sources and initialize IRQ callbacks */
dce60_register_irq_handlers(struct amdgpu_device * adev)3389 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3390 {
3391 	struct dc *dc = adev->dm.dc;
3392 	struct common_irq_params *c_irq_params;
3393 	struct dc_interrupt_params int_params = {0};
3394 	int r;
3395 	int i;
3396 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3397 
3398 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3399 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3400 
3401 	/*
3402 	 * Actions of amdgpu_irq_add_id():
3403 	 * 1. Register a set() function with base driver.
3404 	 *    Base driver will call set() function to enable/disable an
3405 	 *    interrupt in DC hardware.
3406 	 * 2. Register amdgpu_dm_irq_handler().
3407 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3408 	 *    coming from DC hardware.
3409 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3410 	 *    for acknowledging and handling. */
3411 
3412 	/* Use VBLANK interrupt */
3413 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3414 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3415 		if (r) {
3416 			DRM_ERROR("Failed to add crtc irq id!\n");
3417 			return r;
3418 		}
3419 
3420 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3421 		int_params.irq_source =
3422 			dc_interrupt_to_irq_source(dc, i+1 , 0);
3423 
3424 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3425 
3426 		c_irq_params->adev = adev;
3427 		c_irq_params->irq_src = int_params.irq_source;
3428 
3429 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3430 				dm_crtc_high_irq, c_irq_params);
3431 	}
3432 
3433 	/* Use GRPH_PFLIP interrupt */
3434 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3435 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3436 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3437 		if (r) {
3438 			DRM_ERROR("Failed to add page flip irq id!\n");
3439 			return r;
3440 		}
3441 
3442 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3443 		int_params.irq_source =
3444 			dc_interrupt_to_irq_source(dc, i, 0);
3445 
3446 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3447 
3448 		c_irq_params->adev = adev;
3449 		c_irq_params->irq_src = int_params.irq_source;
3450 
3451 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3452 				dm_pflip_high_irq, c_irq_params);
3453 
3454 	}
3455 
3456 	/* HPD */
3457 	r = amdgpu_irq_add_id(adev, client_id,
3458 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3459 	if (r) {
3460 		DRM_ERROR("Failed to add hpd irq id!\n");
3461 		return r;
3462 	}
3463 
3464 	register_hpd_handlers(adev);
3465 
3466 	return 0;
3467 }
3468 #endif
3469 
3470 /* Register IRQ sources and initialize IRQ callbacks */
dce110_register_irq_handlers(struct amdgpu_device * adev)3471 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3472 {
3473 	struct dc *dc = adev->dm.dc;
3474 	struct common_irq_params *c_irq_params;
3475 	struct dc_interrupt_params int_params = {0};
3476 	int r;
3477 	int i;
3478 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3479 
3480 	if (adev->asic_type >= CHIP_VEGA10)
3481 		client_id = SOC15_IH_CLIENTID_DCE;
3482 
3483 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3484 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3485 
3486 	/*
3487 	 * Actions of amdgpu_irq_add_id():
3488 	 * 1. Register a set() function with base driver.
3489 	 *    Base driver will call set() function to enable/disable an
3490 	 *    interrupt in DC hardware.
3491 	 * 2. Register amdgpu_dm_irq_handler().
3492 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3493 	 *    coming from DC hardware.
3494 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3495 	 *    for acknowledging and handling. */
3496 
3497 	/* Use VBLANK interrupt */
3498 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3499 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3500 		if (r) {
3501 			DRM_ERROR("Failed to add crtc irq id!\n");
3502 			return r;
3503 		}
3504 
3505 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3506 		int_params.irq_source =
3507 			dc_interrupt_to_irq_source(dc, i, 0);
3508 
3509 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3510 
3511 		c_irq_params->adev = adev;
3512 		c_irq_params->irq_src = int_params.irq_source;
3513 
3514 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3515 				dm_crtc_high_irq, c_irq_params);
3516 	}
3517 
3518 	/* Use VUPDATE interrupt */
3519 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3520 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3521 		if (r) {
3522 			DRM_ERROR("Failed to add vupdate irq id!\n");
3523 			return r;
3524 		}
3525 
3526 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3527 		int_params.irq_source =
3528 			dc_interrupt_to_irq_source(dc, i, 0);
3529 
3530 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3531 
3532 		c_irq_params->adev = adev;
3533 		c_irq_params->irq_src = int_params.irq_source;
3534 
3535 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3536 				dm_vupdate_high_irq, c_irq_params);
3537 	}
3538 
3539 	/* Use GRPH_PFLIP interrupt */
3540 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3541 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3542 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3543 		if (r) {
3544 			DRM_ERROR("Failed to add page flip irq id!\n");
3545 			return r;
3546 		}
3547 
3548 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3549 		int_params.irq_source =
3550 			dc_interrupt_to_irq_source(dc, i, 0);
3551 
3552 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3553 
3554 		c_irq_params->adev = adev;
3555 		c_irq_params->irq_src = int_params.irq_source;
3556 
3557 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3558 				dm_pflip_high_irq, c_irq_params);
3559 
3560 	}
3561 
3562 	/* HPD */
3563 	r = amdgpu_irq_add_id(adev, client_id,
3564 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3565 	if (r) {
3566 		DRM_ERROR("Failed to add hpd irq id!\n");
3567 		return r;
3568 	}
3569 
3570 	register_hpd_handlers(adev);
3571 
3572 	return 0;
3573 }
3574 
3575 #if defined(CONFIG_DRM_AMD_DC_DCN)
3576 /* Register IRQ sources and initialize IRQ callbacks */
dcn10_register_irq_handlers(struct amdgpu_device * adev)3577 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3578 {
3579 	struct dc *dc = adev->dm.dc;
3580 	struct common_irq_params *c_irq_params;
3581 	struct dc_interrupt_params int_params = {0};
3582 	int r;
3583 	int i;
3584 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3585 	static const unsigned int vrtl_int_srcid[] = {
3586 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3587 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3588 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3589 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3590 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3591 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3592 	};
3593 #endif
3594 
3595 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3596 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3597 
3598 	/*
3599 	 * Actions of amdgpu_irq_add_id():
3600 	 * 1. Register a set() function with base driver.
3601 	 *    Base driver will call set() function to enable/disable an
3602 	 *    interrupt in DC hardware.
3603 	 * 2. Register amdgpu_dm_irq_handler().
3604 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3605 	 *    coming from DC hardware.
3606 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3607 	 *    for acknowledging and handling.
3608 	 */
3609 
3610 	/* Use VSTARTUP interrupt */
3611 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3612 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3613 			i++) {
3614 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3615 
3616 		if (r) {
3617 			DRM_ERROR("Failed to add crtc irq id!\n");
3618 			return r;
3619 		}
3620 
3621 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3622 		int_params.irq_source =
3623 			dc_interrupt_to_irq_source(dc, i, 0);
3624 
3625 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3626 
3627 		c_irq_params->adev = adev;
3628 		c_irq_params->irq_src = int_params.irq_source;
3629 
3630 		amdgpu_dm_irq_register_interrupt(
3631 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3632 	}
3633 
3634 	/* Use otg vertical line interrupt */
3635 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3636 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3637 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3638 				vrtl_int_srcid[i], &adev->vline0_irq);
3639 
3640 		if (r) {
3641 			DRM_ERROR("Failed to add vline0 irq id!\n");
3642 			return r;
3643 		}
3644 
3645 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3646 		int_params.irq_source =
3647 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3648 
3649 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3650 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3651 			break;
3652 		}
3653 
3654 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3655 					- DC_IRQ_SOURCE_DC1_VLINE0];
3656 
3657 		c_irq_params->adev = adev;
3658 		c_irq_params->irq_src = int_params.irq_source;
3659 
3660 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3661 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3662 	}
3663 #endif
3664 
3665 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3666 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3667 	 * to trigger at end of each vblank, regardless of state of the lock,
3668 	 * matching DCE behaviour.
3669 	 */
3670 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3671 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3672 	     i++) {
3673 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3674 
3675 		if (r) {
3676 			DRM_ERROR("Failed to add vupdate irq id!\n");
3677 			return r;
3678 		}
3679 
3680 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3681 		int_params.irq_source =
3682 			dc_interrupt_to_irq_source(dc, i, 0);
3683 
3684 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3685 
3686 		c_irq_params->adev = adev;
3687 		c_irq_params->irq_src = int_params.irq_source;
3688 
3689 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3690 				dm_vupdate_high_irq, c_irq_params);
3691 	}
3692 
3693 	/* Use GRPH_PFLIP interrupt */
3694 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3695 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3696 			i++) {
3697 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3698 		if (r) {
3699 			DRM_ERROR("Failed to add page flip irq id!\n");
3700 			return r;
3701 		}
3702 
3703 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3704 		int_params.irq_source =
3705 			dc_interrupt_to_irq_source(dc, i, 0);
3706 
3707 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3708 
3709 		c_irq_params->adev = adev;
3710 		c_irq_params->irq_src = int_params.irq_source;
3711 
3712 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3713 				dm_pflip_high_irq, c_irq_params);
3714 
3715 	}
3716 
3717 	/* HPD */
3718 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3719 			&adev->hpd_irq);
3720 	if (r) {
3721 		DRM_ERROR("Failed to add hpd irq id!\n");
3722 		return r;
3723 	}
3724 
3725 	register_hpd_handlers(adev);
3726 
3727 	return 0;
3728 }
3729 /* Register Outbox IRQ sources and initialize IRQ callbacks */
register_outbox_irq_handlers(struct amdgpu_device * adev)3730 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3731 {
3732 	struct dc *dc = adev->dm.dc;
3733 	struct common_irq_params *c_irq_params;
3734 	struct dc_interrupt_params int_params = {0};
3735 	int r, i;
3736 
3737 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3738 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3739 
3740 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3741 			&adev->dmub_outbox_irq);
3742 	if (r) {
3743 		DRM_ERROR("Failed to add outbox irq id!\n");
3744 		return r;
3745 	}
3746 
3747 	if (dc->ctx->dmub_srv) {
3748 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3749 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3750 		int_params.irq_source =
3751 		dc_interrupt_to_irq_source(dc, i, 0);
3752 
3753 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3754 
3755 		c_irq_params->adev = adev;
3756 		c_irq_params->irq_src = int_params.irq_source;
3757 
3758 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3759 				dm_dmub_outbox1_low_irq, c_irq_params);
3760 	}
3761 
3762 	return 0;
3763 }
3764 #endif
3765 
3766 /*
3767  * Acquires the lock for the atomic state object and returns
3768  * the new atomic state.
3769  *
3770  * This should only be called during atomic check.
3771  */
dm_atomic_get_state(struct drm_atomic_state * state,struct dm_atomic_state ** dm_state)3772 static int dm_atomic_get_state(struct drm_atomic_state *state,
3773 			       struct dm_atomic_state **dm_state)
3774 {
3775 	struct drm_device *dev = state->dev;
3776 	struct amdgpu_device *adev = drm_to_adev(dev);
3777 	struct amdgpu_display_manager *dm = &adev->dm;
3778 	struct drm_private_state *priv_state;
3779 
3780 	if (*dm_state)
3781 		return 0;
3782 
3783 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3784 	if (IS_ERR(priv_state))
3785 		return PTR_ERR(priv_state);
3786 
3787 	*dm_state = to_dm_atomic_state(priv_state);
3788 
3789 	return 0;
3790 }
3791 
3792 static struct dm_atomic_state *
dm_atomic_get_new_state(struct drm_atomic_state * state)3793 dm_atomic_get_new_state(struct drm_atomic_state *state)
3794 {
3795 	struct drm_device *dev = state->dev;
3796 	struct amdgpu_device *adev = drm_to_adev(dev);
3797 	struct amdgpu_display_manager *dm = &adev->dm;
3798 	struct drm_private_obj *obj;
3799 	struct drm_private_state *new_obj_state;
3800 	int i;
3801 
3802 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3803 		if (obj->funcs == dm->atomic_obj.funcs)
3804 			return to_dm_atomic_state(new_obj_state);
3805 	}
3806 
3807 	return NULL;
3808 }
3809 
3810 static struct drm_private_state *
dm_atomic_duplicate_state(struct drm_private_obj * obj)3811 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3812 {
3813 	struct dm_atomic_state *old_state, *new_state;
3814 
3815 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3816 	if (!new_state)
3817 		return NULL;
3818 
3819 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3820 
3821 	old_state = to_dm_atomic_state(obj->state);
3822 
3823 	if (old_state && old_state->context)
3824 		new_state->context = dc_copy_state(old_state->context);
3825 
3826 	if (!new_state->context) {
3827 		kfree(new_state);
3828 		return NULL;
3829 	}
3830 
3831 	return &new_state->base;
3832 }
3833 
dm_atomic_destroy_state(struct drm_private_obj * obj,struct drm_private_state * state)3834 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3835 				    struct drm_private_state *state)
3836 {
3837 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3838 
3839 	if (dm_state && dm_state->context)
3840 		dc_release_state(dm_state->context);
3841 
3842 	kfree(dm_state);
3843 }
3844 
3845 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3846 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3847 	.atomic_destroy_state = dm_atomic_destroy_state,
3848 };
3849 
amdgpu_dm_mode_config_init(struct amdgpu_device * adev)3850 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3851 {
3852 	struct dm_atomic_state *state;
3853 	int r;
3854 
3855 	adev->mode_info.mode_config_initialized = true;
3856 
3857 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3858 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3859 
3860 	adev_to_drm(adev)->mode_config.max_width = 16384;
3861 	adev_to_drm(adev)->mode_config.max_height = 16384;
3862 
3863 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3864 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3865 	/* indicates support for immediate flip */
3866 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3867 
3868 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3869 
3870 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3871 	if (!state)
3872 		return -ENOMEM;
3873 
3874 	state->context = dc_create_state(adev->dm.dc);
3875 	if (!state->context) {
3876 		kfree(state);
3877 		return -ENOMEM;
3878 	}
3879 
3880 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3881 
3882 	drm_atomic_private_obj_init(adev_to_drm(adev),
3883 				    &adev->dm.atomic_obj,
3884 				    &state->base,
3885 				    &dm_atomic_state_funcs);
3886 
3887 	r = amdgpu_display_modeset_create_props(adev);
3888 	if (r) {
3889 		dc_release_state(state->context);
3890 		kfree(state);
3891 		return r;
3892 	}
3893 
3894 	r = amdgpu_dm_audio_init(adev);
3895 	if (r) {
3896 		dc_release_state(state->context);
3897 		kfree(state);
3898 		return r;
3899 	}
3900 
3901 	return 0;
3902 }
3903 
3904 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3905 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3906 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3907 
3908 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3909 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3910 
amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager * dm,int bl_idx)3911 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3912 					    int bl_idx)
3913 {
3914 #if defined(CONFIG_ACPI)
3915 	struct amdgpu_dm_backlight_caps caps;
3916 
3917 	memset(&caps, 0, sizeof(caps));
3918 
3919 	if (dm->backlight_caps[bl_idx].caps_valid)
3920 		return;
3921 
3922 	amdgpu_acpi_get_backlight_caps(&caps);
3923 	if (caps.caps_valid) {
3924 		dm->backlight_caps[bl_idx].caps_valid = true;
3925 		if (caps.aux_support)
3926 			return;
3927 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3928 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3929 	} else {
3930 		dm->backlight_caps[bl_idx].min_input_signal =
3931 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3932 		dm->backlight_caps[bl_idx].max_input_signal =
3933 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3934 	}
3935 #else
3936 	if (dm->backlight_caps[bl_idx].aux_support)
3937 		return;
3938 
3939 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3940 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3941 #endif
3942 }
3943 
get_brightness_range(const struct amdgpu_dm_backlight_caps * caps,unsigned * min,unsigned * max)3944 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3945 				unsigned *min, unsigned *max)
3946 {
3947 	if (!caps)
3948 		return 0;
3949 
3950 	if (caps->aux_support) {
3951 		// Firmware limits are in nits, DC API wants millinits.
3952 		*max = 1000 * caps->aux_max_input_signal;
3953 		*min = 1000 * caps->aux_min_input_signal;
3954 	} else {
3955 		// Firmware limits are 8-bit, PWM control is 16-bit.
3956 		*max = 0x101 * caps->max_input_signal;
3957 		*min = 0x101 * caps->min_input_signal;
3958 	}
3959 	return 1;
3960 }
3961 
convert_brightness_from_user(const struct amdgpu_dm_backlight_caps * caps,uint32_t brightness)3962 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3963 					uint32_t brightness)
3964 {
3965 	unsigned min, max;
3966 
3967 	if (!get_brightness_range(caps, &min, &max))
3968 		return brightness;
3969 
3970 	// Rescale 0..255 to min..max
3971 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3972 				       AMDGPU_MAX_BL_LEVEL);
3973 }
3974 
convert_brightness_to_user(const struct amdgpu_dm_backlight_caps * caps,uint32_t brightness)3975 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3976 				      uint32_t brightness)
3977 {
3978 	unsigned min, max;
3979 
3980 	if (!get_brightness_range(caps, &min, &max))
3981 		return brightness;
3982 
3983 	if (brightness < min)
3984 		return 0;
3985 	// Rescale min..max to 0..255
3986 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3987 				 max - min);
3988 }
3989 
amdgpu_dm_backlight_set_level(struct amdgpu_display_manager * dm,int bl_idx,u32 user_brightness)3990 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3991 					 int bl_idx,
3992 					 u32 user_brightness)
3993 {
3994 	struct amdgpu_dm_backlight_caps caps;
3995 	struct dc_link *link;
3996 	u32 brightness;
3997 	bool rc;
3998 
3999 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
4000 	caps = dm->backlight_caps[bl_idx];
4001 
4002 	dm->brightness[bl_idx] = user_brightness;
4003 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
4004 	link = (struct dc_link *)dm->backlight_link[bl_idx];
4005 
4006 	/* Change brightness based on AUX property */
4007 	if (caps.aux_support) {
4008 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
4009 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
4010 		if (!rc)
4011 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
4012 	} else {
4013 		rc = dc_link_set_backlight_level(link, brightness, 0);
4014 		if (!rc)
4015 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
4016 	}
4017 
4018 	if (rc)
4019 		dm->actual_brightness[bl_idx] = user_brightness;
4020 }
4021 
amdgpu_dm_backlight_update_status(struct backlight_device * bd)4022 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4023 {
4024 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4025 	int i;
4026 
4027 	for (i = 0; i < dm->num_of_edps; i++) {
4028 		if (bd == dm->backlight_dev[i])
4029 			break;
4030 	}
4031 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4032 		i = 0;
4033 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4034 
4035 	return 0;
4036 }
4037 
amdgpu_dm_backlight_get_level(struct amdgpu_display_manager * dm,int bl_idx)4038 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4039 					 int bl_idx)
4040 {
4041 	struct amdgpu_dm_backlight_caps caps;
4042 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4043 
4044 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
4045 	caps = dm->backlight_caps[bl_idx];
4046 
4047 	if (caps.aux_support) {
4048 		u32 avg, peak;
4049 		bool rc;
4050 
4051 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4052 		if (!rc)
4053 			return dm->brightness[bl_idx];
4054 		return convert_brightness_to_user(&caps, avg);
4055 	} else {
4056 		int ret = dc_link_get_backlight_level(link);
4057 
4058 		if (ret == DC_ERROR_UNEXPECTED)
4059 			return dm->brightness[bl_idx];
4060 		return convert_brightness_to_user(&caps, ret);
4061 	}
4062 }
4063 
amdgpu_dm_backlight_get_brightness(struct backlight_device * bd)4064 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4065 {
4066 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4067 	int i;
4068 
4069 	for (i = 0; i < dm->num_of_edps; i++) {
4070 		if (bd == dm->backlight_dev[i])
4071 			break;
4072 	}
4073 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4074 		i = 0;
4075 	return amdgpu_dm_backlight_get_level(dm, i);
4076 }
4077 
4078 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4079 	.options = BL_CORE_SUSPENDRESUME,
4080 	.get_brightness = amdgpu_dm_backlight_get_brightness,
4081 	.update_status	= amdgpu_dm_backlight_update_status,
4082 };
4083 
4084 static void
amdgpu_dm_register_backlight_device(struct amdgpu_display_manager * dm)4085 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4086 {
4087 	char bl_name[16];
4088 	struct backlight_properties props = { 0 };
4089 
4090 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4091 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4092 
4093 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4094 	props.brightness = AMDGPU_MAX_BL_LEVEL;
4095 	props.type = BACKLIGHT_RAW;
4096 
4097 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4098 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4099 
4100 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4101 								       adev_to_drm(dm->adev)->dev,
4102 								       dm,
4103 								       &amdgpu_dm_backlight_ops,
4104 								       &props);
4105 
4106 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4107 		DRM_ERROR("DM: Backlight registration failed!\n");
4108 	else
4109 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4110 }
4111 #endif
4112 
initialize_plane(struct amdgpu_display_manager * dm,struct amdgpu_mode_info * mode_info,int plane_id,enum drm_plane_type plane_type,const struct dc_plane_cap * plane_cap)4113 static int initialize_plane(struct amdgpu_display_manager *dm,
4114 			    struct amdgpu_mode_info *mode_info, int plane_id,
4115 			    enum drm_plane_type plane_type,
4116 			    const struct dc_plane_cap *plane_cap)
4117 {
4118 	struct drm_plane *plane;
4119 	unsigned long possible_crtcs;
4120 	int ret = 0;
4121 
4122 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4123 	if (!plane) {
4124 		DRM_ERROR("KMS: Failed to allocate plane\n");
4125 		return -ENOMEM;
4126 	}
4127 	plane->type = plane_type;
4128 
4129 	/*
4130 	 * HACK: IGT tests expect that the primary plane for a CRTC
4131 	 * can only have one possible CRTC. Only expose support for
4132 	 * any CRTC if they're not going to be used as a primary plane
4133 	 * for a CRTC - like overlay or underlay planes.
4134 	 */
4135 	possible_crtcs = 1 << plane_id;
4136 	if (plane_id >= dm->dc->caps.max_streams)
4137 		possible_crtcs = 0xff;
4138 
4139 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4140 
4141 	if (ret) {
4142 		DRM_ERROR("KMS: Failed to initialize plane\n");
4143 		kfree(plane);
4144 		return ret;
4145 	}
4146 
4147 	if (mode_info)
4148 		mode_info->planes[plane_id] = plane;
4149 
4150 	return ret;
4151 }
4152 
4153 
register_backlight_device(struct amdgpu_display_manager * dm,struct dc_link * link)4154 static void register_backlight_device(struct amdgpu_display_manager *dm,
4155 				      struct dc_link *link)
4156 {
4157 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4158 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4159 
4160 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4161 	    link->type != dc_connection_none) {
4162 		/*
4163 		 * Event if registration failed, we should continue with
4164 		 * DM initialization because not having a backlight control
4165 		 * is better then a black screen.
4166 		 */
4167 		if (!dm->backlight_dev[dm->num_of_edps])
4168 			amdgpu_dm_register_backlight_device(dm);
4169 
4170 		if (dm->backlight_dev[dm->num_of_edps]) {
4171 			dm->backlight_link[dm->num_of_edps] = link;
4172 			dm->num_of_edps++;
4173 		}
4174 	}
4175 #endif
4176 }
4177 
4178 
4179 /*
4180  * In this architecture, the association
4181  * connector -> encoder -> crtc
4182  * id not really requried. The crtc and connector will hold the
4183  * display_index as an abstraction to use with DAL component
4184  *
4185  * Returns 0 on success
4186  */
amdgpu_dm_initialize_drm_device(struct amdgpu_device * adev)4187 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4188 {
4189 	struct amdgpu_display_manager *dm = &adev->dm;
4190 	int32_t i;
4191 	struct amdgpu_dm_connector *aconnector = NULL;
4192 	struct amdgpu_encoder *aencoder = NULL;
4193 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4194 	uint32_t link_cnt;
4195 	int32_t primary_planes;
4196 	enum dc_connection_type new_connection_type = dc_connection_none;
4197 	const struct dc_plane_cap *plane;
4198 
4199 	dm->display_indexes_num = dm->dc->caps.max_streams;
4200 	/* Update the actual used number of crtc */
4201 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4202 
4203 	link_cnt = dm->dc->caps.max_links;
4204 	if (amdgpu_dm_mode_config_init(dm->adev)) {
4205 		DRM_ERROR("DM: Failed to initialize mode config\n");
4206 		return -EINVAL;
4207 	}
4208 
4209 	/* There is one primary plane per CRTC */
4210 	primary_planes = dm->dc->caps.max_streams;
4211 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4212 
4213 	/*
4214 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
4215 	 * Order is reversed to match iteration order in atomic check.
4216 	 */
4217 	for (i = (primary_planes - 1); i >= 0; i--) {
4218 		plane = &dm->dc->caps.planes[i];
4219 
4220 		if (initialize_plane(dm, mode_info, i,
4221 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
4222 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
4223 			goto fail;
4224 		}
4225 	}
4226 
4227 	/*
4228 	 * Initialize overlay planes, index starting after primary planes.
4229 	 * These planes have a higher DRM index than the primary planes since
4230 	 * they should be considered as having a higher z-order.
4231 	 * Order is reversed to match iteration order in atomic check.
4232 	 *
4233 	 * Only support DCN for now, and only expose one so we don't encourage
4234 	 * userspace to use up all the pipes.
4235 	 */
4236 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4237 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4238 
4239 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4240 			continue;
4241 
4242 		if (!plane->blends_with_above || !plane->blends_with_below)
4243 			continue;
4244 
4245 		if (!plane->pixel_format_support.argb8888)
4246 			continue;
4247 
4248 		if (initialize_plane(dm, NULL, primary_planes + i,
4249 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
4250 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4251 			goto fail;
4252 		}
4253 
4254 		/* Only create one overlay plane. */
4255 		break;
4256 	}
4257 
4258 	for (i = 0; i < dm->dc->caps.max_streams; i++)
4259 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4260 			DRM_ERROR("KMS: Failed to initialize crtc\n");
4261 			goto fail;
4262 		}
4263 
4264 #if defined(CONFIG_DRM_AMD_DC_DCN)
4265 	/* Use Outbox interrupt */
4266 	switch (adev->asic_type) {
4267 	case CHIP_SIENNA_CICHLID:
4268 	case CHIP_NAVY_FLOUNDER:
4269 	case CHIP_YELLOW_CARP:
4270 	case CHIP_RENOIR:
4271 		if (register_outbox_irq_handlers(dm->adev)) {
4272 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4273 			goto fail;
4274 		}
4275 		break;
4276 	default:
4277 		DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
4278 	}
4279 #endif
4280 
4281 	/* loops over all connectors on the board */
4282 	for (i = 0; i < link_cnt; i++) {
4283 		struct dc_link *link = NULL;
4284 
4285 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4286 			DRM_ERROR(
4287 				"KMS: Cannot support more than %d display indexes\n",
4288 					AMDGPU_DM_MAX_DISPLAY_INDEX);
4289 			continue;
4290 		}
4291 
4292 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4293 		if (!aconnector)
4294 			goto fail;
4295 
4296 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4297 		if (!aencoder)
4298 			goto fail;
4299 
4300 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4301 			DRM_ERROR("KMS: Failed to initialize encoder\n");
4302 			goto fail;
4303 		}
4304 
4305 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4306 			DRM_ERROR("KMS: Failed to initialize connector\n");
4307 			goto fail;
4308 		}
4309 
4310 		link = dc_get_link_at_index(dm->dc, i);
4311 
4312 		if (!dc_link_detect_sink(link, &new_connection_type))
4313 			DRM_ERROR("KMS: Failed to detect connector\n");
4314 
4315 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
4316 			emulated_link_detect(link);
4317 			amdgpu_dm_update_connector_after_detect(aconnector);
4318 
4319 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4320 			amdgpu_dm_update_connector_after_detect(aconnector);
4321 			register_backlight_device(dm, link);
4322 
4323 			if (dm->num_of_edps)
4324 				update_connector_ext_caps(aconnector);
4325 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
4326 				amdgpu_dm_set_psr_caps(link);
4327 
4328 			/* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4329 			 * PSR is also supported.
4330 			 */
4331 			if (link->psr_settings.psr_feature_enabled)
4332 				adev_to_drm(adev)->vblank_disable_immediate = false;
4333 		}
4334 
4335 
4336 	}
4337 
4338 	/* Software is initialized. Now we can register interrupt handlers. */
4339 	switch (adev->asic_type) {
4340 #if defined(CONFIG_DRM_AMD_DC_SI)
4341 	case CHIP_TAHITI:
4342 	case CHIP_PITCAIRN:
4343 	case CHIP_VERDE:
4344 	case CHIP_OLAND:
4345 		if (dce60_register_irq_handlers(dm->adev)) {
4346 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4347 			goto fail;
4348 		}
4349 		break;
4350 #endif
4351 	case CHIP_BONAIRE:
4352 	case CHIP_HAWAII:
4353 	case CHIP_KAVERI:
4354 	case CHIP_KABINI:
4355 	case CHIP_MULLINS:
4356 	case CHIP_TONGA:
4357 	case CHIP_FIJI:
4358 	case CHIP_CARRIZO:
4359 	case CHIP_STONEY:
4360 	case CHIP_POLARIS11:
4361 	case CHIP_POLARIS10:
4362 	case CHIP_POLARIS12:
4363 	case CHIP_VEGAM:
4364 	case CHIP_VEGA10:
4365 	case CHIP_VEGA12:
4366 	case CHIP_VEGA20:
4367 		if (dce110_register_irq_handlers(dm->adev)) {
4368 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4369 			goto fail;
4370 		}
4371 		break;
4372 #if defined(CONFIG_DRM_AMD_DC_DCN)
4373 	case CHIP_RAVEN:
4374 	case CHIP_NAVI12:
4375 	case CHIP_NAVI10:
4376 	case CHIP_NAVI14:
4377 	case CHIP_RENOIR:
4378 	case CHIP_SIENNA_CICHLID:
4379 	case CHIP_NAVY_FLOUNDER:
4380 	case CHIP_DIMGREY_CAVEFISH:
4381 	case CHIP_BEIGE_GOBY:
4382 	case CHIP_VANGOGH:
4383 	case CHIP_YELLOW_CARP:
4384 		if (dcn10_register_irq_handlers(dm->adev)) {
4385 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4386 			goto fail;
4387 		}
4388 		break;
4389 #endif
4390 	default:
4391 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4392 		goto fail;
4393 	}
4394 
4395 	return 0;
4396 fail:
4397 	kfree(aencoder);
4398 	kfree(aconnector);
4399 
4400 	return -EINVAL;
4401 }
4402 
amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager * dm)4403 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4404 {
4405 	drm_atomic_private_obj_fini(&dm->atomic_obj);
4406 	return;
4407 }
4408 
4409 /******************************************************************************
4410  * amdgpu_display_funcs functions
4411  *****************************************************************************/
4412 
4413 /*
4414  * dm_bandwidth_update - program display watermarks
4415  *
4416  * @adev: amdgpu_device pointer
4417  *
4418  * Calculate and program the display watermarks and line buffer allocation.
4419  */
dm_bandwidth_update(struct amdgpu_device * adev)4420 static void dm_bandwidth_update(struct amdgpu_device *adev)
4421 {
4422 	/* TODO: implement later */
4423 }
4424 
4425 static const struct amdgpu_display_funcs dm_display_funcs = {
4426 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4427 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4428 	.backlight_set_level = NULL, /* never called for DC */
4429 	.backlight_get_level = NULL, /* never called for DC */
4430 	.hpd_sense = NULL,/* called unconditionally */
4431 	.hpd_set_polarity = NULL, /* called unconditionally */
4432 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4433 	.page_flip_get_scanoutpos =
4434 		dm_crtc_get_scanoutpos,/* called unconditionally */
4435 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4436 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
4437 };
4438 
4439 #if defined(CONFIG_DEBUG_KERNEL_DC)
4440 
s3_debug_store(struct device * device,struct device_attribute * attr,const char * buf,size_t count)4441 static ssize_t s3_debug_store(struct device *device,
4442 			      struct device_attribute *attr,
4443 			      const char *buf,
4444 			      size_t count)
4445 {
4446 	int ret;
4447 	int s3_state;
4448 	struct drm_device *drm_dev = dev_get_drvdata(device);
4449 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
4450 
4451 	ret = kstrtoint(buf, 0, &s3_state);
4452 
4453 	if (ret == 0) {
4454 		if (s3_state) {
4455 			dm_resume(adev);
4456 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
4457 		} else
4458 			dm_suspend(adev);
4459 	}
4460 
4461 	return ret == 0 ? count : 0;
4462 }
4463 
4464 DEVICE_ATTR_WO(s3_debug);
4465 
4466 #endif
4467 
dm_early_init(void * handle)4468 static int dm_early_init(void *handle)
4469 {
4470 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4471 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4472 	struct atom_context *ctx = mode_info->atom_context;
4473 	int index = GetIndexIntoMasterTable(DATA, Object_Header);
4474 	u16 data_offset;
4475 
4476 	/* if there is no object header, skip DM */
4477 	if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
4478 		adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
4479 		dev_info(adev->dev, "No object header, skipping DM\n");
4480 		return -ENOENT;
4481 	}
4482 
4483 	switch (adev->asic_type) {
4484 #if defined(CONFIG_DRM_AMD_DC_SI)
4485 	case CHIP_TAHITI:
4486 	case CHIP_PITCAIRN:
4487 	case CHIP_VERDE:
4488 		adev->mode_info.num_crtc = 6;
4489 		adev->mode_info.num_hpd = 6;
4490 		adev->mode_info.num_dig = 6;
4491 		break;
4492 	case CHIP_OLAND:
4493 		adev->mode_info.num_crtc = 2;
4494 		adev->mode_info.num_hpd = 2;
4495 		adev->mode_info.num_dig = 2;
4496 		break;
4497 #endif
4498 	case CHIP_BONAIRE:
4499 	case CHIP_HAWAII:
4500 		adev->mode_info.num_crtc = 6;
4501 		adev->mode_info.num_hpd = 6;
4502 		adev->mode_info.num_dig = 6;
4503 		break;
4504 	case CHIP_KAVERI:
4505 		adev->mode_info.num_crtc = 4;
4506 		adev->mode_info.num_hpd = 6;
4507 		adev->mode_info.num_dig = 7;
4508 		break;
4509 	case CHIP_KABINI:
4510 	case CHIP_MULLINS:
4511 		adev->mode_info.num_crtc = 2;
4512 		adev->mode_info.num_hpd = 6;
4513 		adev->mode_info.num_dig = 6;
4514 		break;
4515 	case CHIP_FIJI:
4516 	case CHIP_TONGA:
4517 		adev->mode_info.num_crtc = 6;
4518 		adev->mode_info.num_hpd = 6;
4519 		adev->mode_info.num_dig = 7;
4520 		break;
4521 	case CHIP_CARRIZO:
4522 		adev->mode_info.num_crtc = 3;
4523 		adev->mode_info.num_hpd = 6;
4524 		adev->mode_info.num_dig = 9;
4525 		break;
4526 	case CHIP_STONEY:
4527 		adev->mode_info.num_crtc = 2;
4528 		adev->mode_info.num_hpd = 6;
4529 		adev->mode_info.num_dig = 9;
4530 		break;
4531 	case CHIP_POLARIS11:
4532 	case CHIP_POLARIS12:
4533 		adev->mode_info.num_crtc = 5;
4534 		adev->mode_info.num_hpd = 5;
4535 		adev->mode_info.num_dig = 5;
4536 		break;
4537 	case CHIP_POLARIS10:
4538 	case CHIP_VEGAM:
4539 		adev->mode_info.num_crtc = 6;
4540 		adev->mode_info.num_hpd = 6;
4541 		adev->mode_info.num_dig = 6;
4542 		break;
4543 	case CHIP_VEGA10:
4544 	case CHIP_VEGA12:
4545 	case CHIP_VEGA20:
4546 		adev->mode_info.num_crtc = 6;
4547 		adev->mode_info.num_hpd = 6;
4548 		adev->mode_info.num_dig = 6;
4549 		break;
4550 #if defined(CONFIG_DRM_AMD_DC_DCN)
4551 	case CHIP_RAVEN:
4552 	case CHIP_RENOIR:
4553 	case CHIP_VANGOGH:
4554 		adev->mode_info.num_crtc = 4;
4555 		adev->mode_info.num_hpd = 4;
4556 		adev->mode_info.num_dig = 4;
4557 		break;
4558 	case CHIP_NAVI10:
4559 	case CHIP_NAVI12:
4560 	case CHIP_SIENNA_CICHLID:
4561 	case CHIP_NAVY_FLOUNDER:
4562 		adev->mode_info.num_crtc = 6;
4563 		adev->mode_info.num_hpd = 6;
4564 		adev->mode_info.num_dig = 6;
4565 		break;
4566 	case CHIP_YELLOW_CARP:
4567 		adev->mode_info.num_crtc = 4;
4568 		adev->mode_info.num_hpd = 4;
4569 		adev->mode_info.num_dig = 4;
4570 		break;
4571 	case CHIP_NAVI14:
4572 	case CHIP_DIMGREY_CAVEFISH:
4573 		adev->mode_info.num_crtc = 5;
4574 		adev->mode_info.num_hpd = 5;
4575 		adev->mode_info.num_dig = 5;
4576 		break;
4577 	case CHIP_BEIGE_GOBY:
4578 		adev->mode_info.num_crtc = 2;
4579 		adev->mode_info.num_hpd = 2;
4580 		adev->mode_info.num_dig = 2;
4581 		break;
4582 #endif
4583 	default:
4584 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4585 		return -EINVAL;
4586 	}
4587 
4588 	amdgpu_dm_set_irq_funcs(adev);
4589 
4590 	if (adev->mode_info.funcs == NULL)
4591 		adev->mode_info.funcs = &dm_display_funcs;
4592 
4593 	/*
4594 	 * Note: Do NOT change adev->audio_endpt_rreg and
4595 	 * adev->audio_endpt_wreg because they are initialised in
4596 	 * amdgpu_device_init()
4597 	 */
4598 #if defined(CONFIG_DEBUG_KERNEL_DC)
4599 	device_create_file(
4600 		adev_to_drm(adev)->dev,
4601 		&dev_attr_s3_debug);
4602 #endif
4603 
4604 	return 0;
4605 }
4606 
modeset_required(struct drm_crtc_state * crtc_state,struct dc_stream_state * new_stream,struct dc_stream_state * old_stream)4607 static bool modeset_required(struct drm_crtc_state *crtc_state,
4608 			     struct dc_stream_state *new_stream,
4609 			     struct dc_stream_state *old_stream)
4610 {
4611 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4612 }
4613 
modereset_required(struct drm_crtc_state * crtc_state)4614 static bool modereset_required(struct drm_crtc_state *crtc_state)
4615 {
4616 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4617 }
4618 
amdgpu_dm_encoder_destroy(struct drm_encoder * encoder)4619 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4620 {
4621 	drm_encoder_cleanup(encoder);
4622 	kfree(encoder);
4623 }
4624 
4625 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4626 	.destroy = amdgpu_dm_encoder_destroy,
4627 };
4628 
4629 
get_min_max_dc_plane_scaling(struct drm_device * dev,struct drm_framebuffer * fb,int * min_downscale,int * max_upscale)4630 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4631 					 struct drm_framebuffer *fb,
4632 					 int *min_downscale, int *max_upscale)
4633 {
4634 	struct amdgpu_device *adev = drm_to_adev(dev);
4635 	struct dc *dc = adev->dm.dc;
4636 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4637 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4638 
4639 	switch (fb->format->format) {
4640 	case DRM_FORMAT_P010:
4641 	case DRM_FORMAT_NV12:
4642 	case DRM_FORMAT_NV21:
4643 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4644 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4645 		break;
4646 
4647 	case DRM_FORMAT_XRGB16161616F:
4648 	case DRM_FORMAT_ARGB16161616F:
4649 	case DRM_FORMAT_XBGR16161616F:
4650 	case DRM_FORMAT_ABGR16161616F:
4651 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4652 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4653 		break;
4654 
4655 	default:
4656 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4657 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4658 		break;
4659 	}
4660 
4661 	/*
4662 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4663 	 * scaling factor of 1.0 == 1000 units.
4664 	 */
4665 	if (*max_upscale == 1)
4666 		*max_upscale = 1000;
4667 
4668 	if (*min_downscale == 1)
4669 		*min_downscale = 1000;
4670 }
4671 
4672 
fill_dc_scaling_info(const struct drm_plane_state * state,struct dc_scaling_info * scaling_info)4673 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4674 				struct dc_scaling_info *scaling_info)
4675 {
4676 	int scale_w, scale_h, min_downscale, max_upscale;
4677 
4678 	memset(scaling_info, 0, sizeof(*scaling_info));
4679 
4680 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4681 	scaling_info->src_rect.x = state->src_x >> 16;
4682 	scaling_info->src_rect.y = state->src_y >> 16;
4683 
4684 	/*
4685 	 * For reasons we don't (yet) fully understand a non-zero
4686 	 * src_y coordinate into an NV12 buffer can cause a
4687 	 * system hang. To avoid hangs (and maybe be overly cautious)
4688 	 * let's reject both non-zero src_x and src_y.
4689 	 *
4690 	 * We currently know of only one use-case to reproduce a
4691 	 * scenario with non-zero src_x and src_y for NV12, which
4692 	 * is to gesture the YouTube Android app into full screen
4693 	 * on ChromeOS.
4694 	 */
4695 	if (state->fb &&
4696 	    state->fb->format->format == DRM_FORMAT_NV12 &&
4697 	    (scaling_info->src_rect.x != 0 ||
4698 	     scaling_info->src_rect.y != 0))
4699 		return -EINVAL;
4700 
4701 	scaling_info->src_rect.width = state->src_w >> 16;
4702 	if (scaling_info->src_rect.width == 0)
4703 		return -EINVAL;
4704 
4705 	scaling_info->src_rect.height = state->src_h >> 16;
4706 	if (scaling_info->src_rect.height == 0)
4707 		return -EINVAL;
4708 
4709 	scaling_info->dst_rect.x = state->crtc_x;
4710 	scaling_info->dst_rect.y = state->crtc_y;
4711 
4712 	if (state->crtc_w == 0)
4713 		return -EINVAL;
4714 
4715 	scaling_info->dst_rect.width = state->crtc_w;
4716 
4717 	if (state->crtc_h == 0)
4718 		return -EINVAL;
4719 
4720 	scaling_info->dst_rect.height = state->crtc_h;
4721 
4722 	/* DRM doesn't specify clipping on destination output. */
4723 	scaling_info->clip_rect = scaling_info->dst_rect;
4724 
4725 	/* Validate scaling per-format with DC plane caps */
4726 	if (state->plane && state->plane->dev && state->fb) {
4727 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4728 					     &min_downscale, &max_upscale);
4729 	} else {
4730 		min_downscale = 250;
4731 		max_upscale = 16000;
4732 	}
4733 
4734 	scale_w = scaling_info->dst_rect.width * 1000 /
4735 		  scaling_info->src_rect.width;
4736 
4737 	if (scale_w < min_downscale || scale_w > max_upscale)
4738 		return -EINVAL;
4739 
4740 	scale_h = scaling_info->dst_rect.height * 1000 /
4741 		  scaling_info->src_rect.height;
4742 
4743 	if (scale_h < min_downscale || scale_h > max_upscale)
4744 		return -EINVAL;
4745 
4746 	/*
4747 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4748 	 * assume reasonable defaults based on the format.
4749 	 */
4750 
4751 	return 0;
4752 }
4753 
4754 static void
fill_gfx8_tiling_info_from_flags(union dc_tiling_info * tiling_info,uint64_t tiling_flags)4755 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4756 				 uint64_t tiling_flags)
4757 {
4758 	/* Fill GFX8 params */
4759 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4760 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4761 
4762 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4763 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4764 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4765 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4766 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4767 
4768 		/* XXX fix me for VI */
4769 		tiling_info->gfx8.num_banks = num_banks;
4770 		tiling_info->gfx8.array_mode =
4771 				DC_ARRAY_2D_TILED_THIN1;
4772 		tiling_info->gfx8.tile_split = tile_split;
4773 		tiling_info->gfx8.bank_width = bankw;
4774 		tiling_info->gfx8.bank_height = bankh;
4775 		tiling_info->gfx8.tile_aspect = mtaspect;
4776 		tiling_info->gfx8.tile_mode =
4777 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4778 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4779 			== DC_ARRAY_1D_TILED_THIN1) {
4780 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4781 	}
4782 
4783 	tiling_info->gfx8.pipe_config =
4784 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4785 }
4786 
4787 static void
fill_gfx9_tiling_info_from_device(const struct amdgpu_device * adev,union dc_tiling_info * tiling_info)4788 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4789 				  union dc_tiling_info *tiling_info)
4790 {
4791 	tiling_info->gfx9.num_pipes =
4792 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4793 	tiling_info->gfx9.num_banks =
4794 		adev->gfx.config.gb_addr_config_fields.num_banks;
4795 	tiling_info->gfx9.pipe_interleave =
4796 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4797 	tiling_info->gfx9.num_shader_engines =
4798 		adev->gfx.config.gb_addr_config_fields.num_se;
4799 	tiling_info->gfx9.max_compressed_frags =
4800 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4801 	tiling_info->gfx9.num_rb_per_se =
4802 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4803 	tiling_info->gfx9.shaderEnable = 1;
4804 	if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4805 	    adev->asic_type == CHIP_NAVY_FLOUNDER ||
4806 	    adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4807 	    adev->asic_type == CHIP_BEIGE_GOBY ||
4808 	    adev->asic_type == CHIP_YELLOW_CARP ||
4809 	    adev->asic_type == CHIP_VANGOGH)
4810 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4811 }
4812 
4813 static int
validate_dcc(struct amdgpu_device * adev,const enum surface_pixel_format format,const enum dc_rotation_angle rotation,const union dc_tiling_info * tiling_info,const struct dc_plane_dcc_param * dcc,const struct dc_plane_address * address,const struct plane_size * plane_size)4814 validate_dcc(struct amdgpu_device *adev,
4815 	     const enum surface_pixel_format format,
4816 	     const enum dc_rotation_angle rotation,
4817 	     const union dc_tiling_info *tiling_info,
4818 	     const struct dc_plane_dcc_param *dcc,
4819 	     const struct dc_plane_address *address,
4820 	     const struct plane_size *plane_size)
4821 {
4822 	struct dc *dc = adev->dm.dc;
4823 	struct dc_dcc_surface_param input;
4824 	struct dc_surface_dcc_cap output;
4825 
4826 	memset(&input, 0, sizeof(input));
4827 	memset(&output, 0, sizeof(output));
4828 
4829 	if (!dcc->enable)
4830 		return 0;
4831 
4832 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4833 	    !dc->cap_funcs.get_dcc_compression_cap)
4834 		return -EINVAL;
4835 
4836 	input.format = format;
4837 	input.surface_size.width = plane_size->surface_size.width;
4838 	input.surface_size.height = plane_size->surface_size.height;
4839 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4840 
4841 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4842 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4843 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4844 		input.scan = SCAN_DIRECTION_VERTICAL;
4845 
4846 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4847 		return -EINVAL;
4848 
4849 	if (!output.capable)
4850 		return -EINVAL;
4851 
4852 	if (dcc->independent_64b_blks == 0 &&
4853 	    output.grph.rgb.independent_64b_blks != 0)
4854 		return -EINVAL;
4855 
4856 	return 0;
4857 }
4858 
4859 static bool
modifier_has_dcc(uint64_t modifier)4860 modifier_has_dcc(uint64_t modifier)
4861 {
4862 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4863 }
4864 
4865 static unsigned
modifier_gfx9_swizzle_mode(uint64_t modifier)4866 modifier_gfx9_swizzle_mode(uint64_t modifier)
4867 {
4868 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4869 		return 0;
4870 
4871 	return AMD_FMT_MOD_GET(TILE, modifier);
4872 }
4873 
4874 static const struct drm_format_info *
amd_get_format_info(const struct drm_mode_fb_cmd2 * cmd)4875 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4876 {
4877 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4878 }
4879 
4880 static void
fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device * adev,union dc_tiling_info * tiling_info,uint64_t modifier)4881 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4882 				    union dc_tiling_info *tiling_info,
4883 				    uint64_t modifier)
4884 {
4885 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4886 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4887 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4888 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4889 
4890 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4891 
4892 	if (!IS_AMD_FMT_MOD(modifier))
4893 		return;
4894 
4895 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4896 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4897 
4898 	if (adev->family >= AMDGPU_FAMILY_NV) {
4899 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4900 	} else {
4901 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4902 
4903 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4904 	}
4905 }
4906 
4907 enum dm_micro_swizzle {
4908 	MICRO_SWIZZLE_Z = 0,
4909 	MICRO_SWIZZLE_S = 1,
4910 	MICRO_SWIZZLE_D = 2,
4911 	MICRO_SWIZZLE_R = 3
4912 };
4913 
dm_plane_format_mod_supported(struct drm_plane * plane,uint32_t format,uint64_t modifier)4914 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4915 					  uint32_t format,
4916 					  uint64_t modifier)
4917 {
4918 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4919 	const struct drm_format_info *info = drm_format_info(format);
4920 	int i;
4921 
4922 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4923 
4924 	if (!info)
4925 		return false;
4926 
4927 	/*
4928 	 * We always have to allow these modifiers:
4929 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4930 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4931 	 */
4932 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4933 	    modifier == DRM_FORMAT_MOD_INVALID) {
4934 		return true;
4935 	}
4936 
4937 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4938 	for (i = 0; i < plane->modifier_count; i++) {
4939 		if (modifier == plane->modifiers[i])
4940 			break;
4941 	}
4942 	if (i == plane->modifier_count)
4943 		return false;
4944 
4945 	/*
4946 	 * For D swizzle the canonical modifier depends on the bpp, so check
4947 	 * it here.
4948 	 */
4949 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4950 	    adev->family >= AMDGPU_FAMILY_NV) {
4951 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4952 			return false;
4953 	}
4954 
4955 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4956 	    info->cpp[0] < 8)
4957 		return false;
4958 
4959 	if (modifier_has_dcc(modifier)) {
4960 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4961 		if (info->cpp[0] != 4)
4962 			return false;
4963 		/* We support multi-planar formats, but not when combined with
4964 		 * additional DCC metadata planes. */
4965 		if (info->num_planes > 1)
4966 			return false;
4967 	}
4968 
4969 	return true;
4970 }
4971 
4972 static void
add_modifier(uint64_t ** mods,uint64_t * size,uint64_t * cap,uint64_t mod)4973 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4974 {
4975 	if (!*mods)
4976 		return;
4977 
4978 	if (*cap - *size < 1) {
4979 		uint64_t new_cap = *cap * 2;
4980 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4981 
4982 		if (!new_mods) {
4983 			kfree(*mods);
4984 			*mods = NULL;
4985 			return;
4986 		}
4987 
4988 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4989 		kfree(*mods);
4990 		*mods = new_mods;
4991 		*cap = new_cap;
4992 	}
4993 
4994 	(*mods)[*size] = mod;
4995 	*size += 1;
4996 }
4997 
4998 static void
add_gfx9_modifiers(const struct amdgpu_device * adev,uint64_t ** mods,uint64_t * size,uint64_t * capacity)4999 add_gfx9_modifiers(const struct amdgpu_device *adev,
5000 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
5001 {
5002 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5003 	int pipe_xor_bits = min(8, pipes +
5004 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
5005 	int bank_xor_bits = min(8 - pipe_xor_bits,
5006 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
5007 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
5008 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
5009 
5010 
5011 	if (adev->family == AMDGPU_FAMILY_RV) {
5012 		/* Raven2 and later */
5013 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
5014 
5015 		/*
5016 		 * No _D DCC swizzles yet because we only allow 32bpp, which
5017 		 * doesn't support _D on DCN
5018 		 */
5019 
5020 		if (has_constant_encode) {
5021 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
5022 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5023 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5024 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5025 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5026 				    AMD_FMT_MOD_SET(DCC, 1) |
5027 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5028 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5029 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5030 		}
5031 
5032 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5033 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5034 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5035 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5036 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5037 			    AMD_FMT_MOD_SET(DCC, 1) |
5038 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5039 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5040 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5041 
5042 		if (has_constant_encode) {
5043 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
5044 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5045 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5046 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5047 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5048 				    AMD_FMT_MOD_SET(DCC, 1) |
5049 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5050 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5051 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5052 
5053 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5054 				    AMD_FMT_MOD_SET(RB, rb) |
5055 				    AMD_FMT_MOD_SET(PIPE, pipes));
5056 		}
5057 
5058 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5059 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5060 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5061 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5062 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5063 			    AMD_FMT_MOD_SET(DCC, 1) |
5064 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5065 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5066 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5067 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5068 			    AMD_FMT_MOD_SET(RB, rb) |
5069 			    AMD_FMT_MOD_SET(PIPE, pipes));
5070 	}
5071 
5072 	/*
5073 	 * Only supported for 64bpp on Raven, will be filtered on format in
5074 	 * dm_plane_format_mod_supported.
5075 	 */
5076 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5077 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5078 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5079 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5080 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5081 
5082 	if (adev->family == AMDGPU_FAMILY_RV) {
5083 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5084 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5085 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5086 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5087 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5088 	}
5089 
5090 	/*
5091 	 * Only supported for 64bpp on Raven, will be filtered on format in
5092 	 * dm_plane_format_mod_supported.
5093 	 */
5094 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5095 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5096 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5097 
5098 	if (adev->family == AMDGPU_FAMILY_RV) {
5099 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5100 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5101 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5102 	}
5103 }
5104 
5105 static void
add_gfx10_1_modifiers(const struct amdgpu_device * adev,uint64_t ** mods,uint64_t * size,uint64_t * capacity)5106 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5107 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5108 {
5109 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5110 
5111 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5112 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5113 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5114 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5115 		    AMD_FMT_MOD_SET(DCC, 1) |
5116 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5117 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5118 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5119 
5120 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5121 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5122 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5123 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5124 		    AMD_FMT_MOD_SET(DCC, 1) |
5125 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5126 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5127 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5128 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5129 
5130 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5131 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5132 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5133 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5134 
5135 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5136 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5137 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5138 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5139 
5140 
5141 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5142 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5143 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5144 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5145 
5146 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5147 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5148 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5149 }
5150 
5151 static void
add_gfx10_3_modifiers(const struct amdgpu_device * adev,uint64_t ** mods,uint64_t * size,uint64_t * capacity)5152 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5153 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5154 {
5155 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5156 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5157 
5158 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5159 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5160 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5161 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5162 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5163 		    AMD_FMT_MOD_SET(DCC, 1) |
5164 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5165 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5166 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5167 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5168 
5169 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5170 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5171 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5172 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5173 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5174 		    AMD_FMT_MOD_SET(DCC, 1) |
5175 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5176 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5177 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5178 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5179 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5180 
5181 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5182 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5183 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5184 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5185 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5186 
5187 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5188 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5189 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5190 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5191 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5192 
5193 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5194 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5195 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5196 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5197 
5198 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5199 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5200 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5201 }
5202 
5203 static int
get_plane_modifiers(const struct amdgpu_device * adev,unsigned int plane_type,uint64_t ** mods)5204 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5205 {
5206 	uint64_t size = 0, capacity = 128;
5207 	*mods = NULL;
5208 
5209 	/* We have not hooked up any pre-GFX9 modifiers. */
5210 	if (adev->family < AMDGPU_FAMILY_AI)
5211 		return 0;
5212 
5213 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5214 
5215 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5216 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5217 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5218 		return *mods ? 0 : -ENOMEM;
5219 	}
5220 
5221 	switch (adev->family) {
5222 	case AMDGPU_FAMILY_AI:
5223 	case AMDGPU_FAMILY_RV:
5224 		add_gfx9_modifiers(adev, mods, &size, &capacity);
5225 		break;
5226 	case AMDGPU_FAMILY_NV:
5227 	case AMDGPU_FAMILY_VGH:
5228 	case AMDGPU_FAMILY_YC:
5229 		if (adev->asic_type >= CHIP_SIENNA_CICHLID)
5230 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5231 		else
5232 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5233 		break;
5234 	}
5235 
5236 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5237 
5238 	/* INVALID marks the end of the list. */
5239 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5240 
5241 	if (!*mods)
5242 		return -ENOMEM;
5243 
5244 	return 0;
5245 }
5246 
5247 static int
fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device * adev,const struct amdgpu_framebuffer * afb,const enum surface_pixel_format format,const enum dc_rotation_angle rotation,const struct plane_size * plane_size,union dc_tiling_info * tiling_info,struct dc_plane_dcc_param * dcc,struct dc_plane_address * address,const bool force_disable_dcc)5248 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5249 					  const struct amdgpu_framebuffer *afb,
5250 					  const enum surface_pixel_format format,
5251 					  const enum dc_rotation_angle rotation,
5252 					  const struct plane_size *plane_size,
5253 					  union dc_tiling_info *tiling_info,
5254 					  struct dc_plane_dcc_param *dcc,
5255 					  struct dc_plane_address *address,
5256 					  const bool force_disable_dcc)
5257 {
5258 	const uint64_t modifier = afb->base.modifier;
5259 	int ret = 0;
5260 
5261 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5262 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5263 
5264 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5265 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
5266 
5267 		dcc->enable = 1;
5268 		dcc->meta_pitch = afb->base.pitches[1];
5269 		dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5270 
5271 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5272 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5273 	}
5274 
5275 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5276 	if (ret)
5277 		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5278 
5279 	return ret;
5280 }
5281 
5282 static int
fill_plane_buffer_attributes(struct amdgpu_device * adev,const struct amdgpu_framebuffer * afb,const enum surface_pixel_format format,const enum dc_rotation_angle rotation,const uint64_t tiling_flags,union dc_tiling_info * tiling_info,struct plane_size * plane_size,struct dc_plane_dcc_param * dcc,struct dc_plane_address * address,bool tmz_surface,bool force_disable_dcc)5283 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5284 			     const struct amdgpu_framebuffer *afb,
5285 			     const enum surface_pixel_format format,
5286 			     const enum dc_rotation_angle rotation,
5287 			     const uint64_t tiling_flags,
5288 			     union dc_tiling_info *tiling_info,
5289 			     struct plane_size *plane_size,
5290 			     struct dc_plane_dcc_param *dcc,
5291 			     struct dc_plane_address *address,
5292 			     bool tmz_surface,
5293 			     bool force_disable_dcc)
5294 {
5295 	const struct drm_framebuffer *fb = &afb->base;
5296 	int ret;
5297 
5298 	memset(tiling_info, 0, sizeof(*tiling_info));
5299 	memset(plane_size, 0, sizeof(*plane_size));
5300 	memset(dcc, 0, sizeof(*dcc));
5301 	memset(address, 0, sizeof(*address));
5302 
5303 	address->tmz_surface = tmz_surface;
5304 
5305 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5306 		uint64_t addr = afb->address + fb->offsets[0];
5307 
5308 		plane_size->surface_size.x = 0;
5309 		plane_size->surface_size.y = 0;
5310 		plane_size->surface_size.width = fb->width;
5311 		plane_size->surface_size.height = fb->height;
5312 		plane_size->surface_pitch =
5313 			fb->pitches[0] / fb->format->cpp[0];
5314 
5315 		address->type = PLN_ADDR_TYPE_GRAPHICS;
5316 		address->grph.addr.low_part = lower_32_bits(addr);
5317 		address->grph.addr.high_part = upper_32_bits(addr);
5318 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5319 		uint64_t luma_addr = afb->address + fb->offsets[0];
5320 		uint64_t chroma_addr = afb->address + fb->offsets[1];
5321 
5322 		plane_size->surface_size.x = 0;
5323 		plane_size->surface_size.y = 0;
5324 		plane_size->surface_size.width = fb->width;
5325 		plane_size->surface_size.height = fb->height;
5326 		plane_size->surface_pitch =
5327 			fb->pitches[0] / fb->format->cpp[0];
5328 
5329 		plane_size->chroma_size.x = 0;
5330 		plane_size->chroma_size.y = 0;
5331 		/* TODO: set these based on surface format */
5332 		plane_size->chroma_size.width = fb->width / 2;
5333 		plane_size->chroma_size.height = fb->height / 2;
5334 
5335 		plane_size->chroma_pitch =
5336 			fb->pitches[1] / fb->format->cpp[1];
5337 
5338 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5339 		address->video_progressive.luma_addr.low_part =
5340 			lower_32_bits(luma_addr);
5341 		address->video_progressive.luma_addr.high_part =
5342 			upper_32_bits(luma_addr);
5343 		address->video_progressive.chroma_addr.low_part =
5344 			lower_32_bits(chroma_addr);
5345 		address->video_progressive.chroma_addr.high_part =
5346 			upper_32_bits(chroma_addr);
5347 	}
5348 
5349 	if (adev->family >= AMDGPU_FAMILY_AI) {
5350 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5351 								rotation, plane_size,
5352 								tiling_info, dcc,
5353 								address,
5354 								force_disable_dcc);
5355 		if (ret)
5356 			return ret;
5357 	} else {
5358 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5359 	}
5360 
5361 	return 0;
5362 }
5363 
5364 static void
fill_blending_from_plane_state(const struct drm_plane_state * plane_state,bool * per_pixel_alpha,bool * global_alpha,int * global_alpha_value)5365 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5366 			       bool *per_pixel_alpha, bool *global_alpha,
5367 			       int *global_alpha_value)
5368 {
5369 	*per_pixel_alpha = false;
5370 	*global_alpha = false;
5371 	*global_alpha_value = 0xff;
5372 
5373 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5374 		return;
5375 
5376 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5377 		static const uint32_t alpha_formats[] = {
5378 			DRM_FORMAT_ARGB8888,
5379 			DRM_FORMAT_RGBA8888,
5380 			DRM_FORMAT_ABGR8888,
5381 		};
5382 		uint32_t format = plane_state->fb->format->format;
5383 		unsigned int i;
5384 
5385 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5386 			if (format == alpha_formats[i]) {
5387 				*per_pixel_alpha = true;
5388 				break;
5389 			}
5390 		}
5391 	}
5392 
5393 	if (plane_state->alpha < 0xffff) {
5394 		*global_alpha = true;
5395 		*global_alpha_value = plane_state->alpha >> 8;
5396 	}
5397 }
5398 
5399 static int
fill_plane_color_attributes(const struct drm_plane_state * plane_state,const enum surface_pixel_format format,enum dc_color_space * color_space)5400 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5401 			    const enum surface_pixel_format format,
5402 			    enum dc_color_space *color_space)
5403 {
5404 	bool full_range;
5405 
5406 	*color_space = COLOR_SPACE_SRGB;
5407 
5408 	/* DRM color properties only affect non-RGB formats. */
5409 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5410 		return 0;
5411 
5412 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5413 
5414 	switch (plane_state->color_encoding) {
5415 	case DRM_COLOR_YCBCR_BT601:
5416 		if (full_range)
5417 			*color_space = COLOR_SPACE_YCBCR601;
5418 		else
5419 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
5420 		break;
5421 
5422 	case DRM_COLOR_YCBCR_BT709:
5423 		if (full_range)
5424 			*color_space = COLOR_SPACE_YCBCR709;
5425 		else
5426 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
5427 		break;
5428 
5429 	case DRM_COLOR_YCBCR_BT2020:
5430 		if (full_range)
5431 			*color_space = COLOR_SPACE_2020_YCBCR;
5432 		else
5433 			return -EINVAL;
5434 		break;
5435 
5436 	default:
5437 		return -EINVAL;
5438 	}
5439 
5440 	return 0;
5441 }
5442 
5443 static int
fill_dc_plane_info_and_addr(struct amdgpu_device * adev,const struct drm_plane_state * plane_state,const uint64_t tiling_flags,struct dc_plane_info * plane_info,struct dc_plane_address * address,bool tmz_surface,bool force_disable_dcc)5444 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5445 			    const struct drm_plane_state *plane_state,
5446 			    const uint64_t tiling_flags,
5447 			    struct dc_plane_info *plane_info,
5448 			    struct dc_plane_address *address,
5449 			    bool tmz_surface,
5450 			    bool force_disable_dcc)
5451 {
5452 	const struct drm_framebuffer *fb = plane_state->fb;
5453 	const struct amdgpu_framebuffer *afb =
5454 		to_amdgpu_framebuffer(plane_state->fb);
5455 	int ret;
5456 
5457 	memset(plane_info, 0, sizeof(*plane_info));
5458 
5459 	switch (fb->format->format) {
5460 	case DRM_FORMAT_C8:
5461 		plane_info->format =
5462 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5463 		break;
5464 	case DRM_FORMAT_RGB565:
5465 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5466 		break;
5467 	case DRM_FORMAT_XRGB8888:
5468 	case DRM_FORMAT_ARGB8888:
5469 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5470 		break;
5471 	case DRM_FORMAT_XRGB2101010:
5472 	case DRM_FORMAT_ARGB2101010:
5473 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5474 		break;
5475 	case DRM_FORMAT_XBGR2101010:
5476 	case DRM_FORMAT_ABGR2101010:
5477 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5478 		break;
5479 	case DRM_FORMAT_XBGR8888:
5480 	case DRM_FORMAT_ABGR8888:
5481 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5482 		break;
5483 	case DRM_FORMAT_NV21:
5484 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5485 		break;
5486 	case DRM_FORMAT_NV12:
5487 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5488 		break;
5489 	case DRM_FORMAT_P010:
5490 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5491 		break;
5492 	case DRM_FORMAT_XRGB16161616F:
5493 	case DRM_FORMAT_ARGB16161616F:
5494 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5495 		break;
5496 	case DRM_FORMAT_XBGR16161616F:
5497 	case DRM_FORMAT_ABGR16161616F:
5498 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5499 		break;
5500 	case DRM_FORMAT_XRGB16161616:
5501 	case DRM_FORMAT_ARGB16161616:
5502 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5503 		break;
5504 	case DRM_FORMAT_XBGR16161616:
5505 	case DRM_FORMAT_ABGR16161616:
5506 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5507 		break;
5508 	default:
5509 		DRM_ERROR(
5510 			"Unsupported screen format %p4cc\n",
5511 			&fb->format->format);
5512 		return -EINVAL;
5513 	}
5514 
5515 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5516 	case DRM_MODE_ROTATE_0:
5517 		plane_info->rotation = ROTATION_ANGLE_0;
5518 		break;
5519 	case DRM_MODE_ROTATE_90:
5520 		plane_info->rotation = ROTATION_ANGLE_90;
5521 		break;
5522 	case DRM_MODE_ROTATE_180:
5523 		plane_info->rotation = ROTATION_ANGLE_180;
5524 		break;
5525 	case DRM_MODE_ROTATE_270:
5526 		plane_info->rotation = ROTATION_ANGLE_270;
5527 		break;
5528 	default:
5529 		plane_info->rotation = ROTATION_ANGLE_0;
5530 		break;
5531 	}
5532 
5533 	plane_info->visible = true;
5534 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5535 
5536 	plane_info->layer_index = plane_state->normalized_zpos;
5537 
5538 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5539 					  &plane_info->color_space);
5540 	if (ret)
5541 		return ret;
5542 
5543 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5544 					   plane_info->rotation, tiling_flags,
5545 					   &plane_info->tiling_info,
5546 					   &plane_info->plane_size,
5547 					   &plane_info->dcc, address, tmz_surface,
5548 					   force_disable_dcc);
5549 	if (ret)
5550 		return ret;
5551 
5552 	fill_blending_from_plane_state(
5553 		plane_state, &plane_info->per_pixel_alpha,
5554 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5555 
5556 	return 0;
5557 }
5558 
fill_dc_plane_attributes(struct amdgpu_device * adev,struct dc_plane_state * dc_plane_state,struct drm_plane_state * plane_state,struct drm_crtc_state * crtc_state)5559 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5560 				    struct dc_plane_state *dc_plane_state,
5561 				    struct drm_plane_state *plane_state,
5562 				    struct drm_crtc_state *crtc_state)
5563 {
5564 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5565 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5566 	struct dc_scaling_info scaling_info;
5567 	struct dc_plane_info plane_info;
5568 	int ret;
5569 	bool force_disable_dcc = false;
5570 
5571 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
5572 	if (ret)
5573 		return ret;
5574 
5575 	dc_plane_state->src_rect = scaling_info.src_rect;
5576 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5577 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5578 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5579 
5580 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5581 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5582 					  afb->tiling_flags,
5583 					  &plane_info,
5584 					  &dc_plane_state->address,
5585 					  afb->tmz_surface,
5586 					  force_disable_dcc);
5587 	if (ret)
5588 		return ret;
5589 
5590 	dc_plane_state->format = plane_info.format;
5591 	dc_plane_state->color_space = plane_info.color_space;
5592 	dc_plane_state->format = plane_info.format;
5593 	dc_plane_state->plane_size = plane_info.plane_size;
5594 	dc_plane_state->rotation = plane_info.rotation;
5595 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5596 	dc_plane_state->stereo_format = plane_info.stereo_format;
5597 	dc_plane_state->tiling_info = plane_info.tiling_info;
5598 	dc_plane_state->visible = plane_info.visible;
5599 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5600 	dc_plane_state->global_alpha = plane_info.global_alpha;
5601 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5602 	dc_plane_state->dcc = plane_info.dcc;
5603 	dc_plane_state->layer_index = plane_info.layer_index;
5604 	dc_plane_state->flip_int_enabled = true;
5605 
5606 	/*
5607 	 * Always set input transfer function, since plane state is refreshed
5608 	 * every time.
5609 	 */
5610 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5611 	if (ret)
5612 		return ret;
5613 
5614 	return 0;
5615 }
5616 
update_stream_scaling_settings(const struct drm_display_mode * mode,const struct dm_connector_state * dm_state,struct dc_stream_state * stream)5617 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5618 					   const struct dm_connector_state *dm_state,
5619 					   struct dc_stream_state *stream)
5620 {
5621 	enum amdgpu_rmx_type rmx_type;
5622 
5623 	struct rect src = { 0 }; /* viewport in composition space*/
5624 	struct rect dst = { 0 }; /* stream addressable area */
5625 
5626 	/* no mode. nothing to be done */
5627 	if (!mode)
5628 		return;
5629 
5630 	/* Full screen scaling by default */
5631 	src.width = mode->hdisplay;
5632 	src.height = mode->vdisplay;
5633 	dst.width = stream->timing.h_addressable;
5634 	dst.height = stream->timing.v_addressable;
5635 
5636 	if (dm_state) {
5637 		rmx_type = dm_state->scaling;
5638 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5639 			if (src.width * dst.height <
5640 					src.height * dst.width) {
5641 				/* height needs less upscaling/more downscaling */
5642 				dst.width = src.width *
5643 						dst.height / src.height;
5644 			} else {
5645 				/* width needs less upscaling/more downscaling */
5646 				dst.height = src.height *
5647 						dst.width / src.width;
5648 			}
5649 		} else if (rmx_type == RMX_CENTER) {
5650 			dst = src;
5651 		}
5652 
5653 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5654 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5655 
5656 		if (dm_state->underscan_enable) {
5657 			dst.x += dm_state->underscan_hborder / 2;
5658 			dst.y += dm_state->underscan_vborder / 2;
5659 			dst.width -= dm_state->underscan_hborder;
5660 			dst.height -= dm_state->underscan_vborder;
5661 		}
5662 	}
5663 
5664 	stream->src = src;
5665 	stream->dst = dst;
5666 
5667 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5668 		      dst.x, dst.y, dst.width, dst.height);
5669 
5670 }
5671 
5672 static enum dc_color_depth
convert_color_depth_from_display_info(const struct drm_connector * connector,bool is_y420,int requested_bpc)5673 convert_color_depth_from_display_info(const struct drm_connector *connector,
5674 				      bool is_y420, int requested_bpc)
5675 {
5676 	uint8_t bpc;
5677 
5678 	if (is_y420) {
5679 		bpc = 8;
5680 
5681 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5682 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5683 			bpc = 16;
5684 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5685 			bpc = 12;
5686 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5687 			bpc = 10;
5688 	} else {
5689 		bpc = (uint8_t)connector->display_info.bpc;
5690 		/* Assume 8 bpc by default if no bpc is specified. */
5691 		bpc = bpc ? bpc : 8;
5692 	}
5693 
5694 	if (requested_bpc > 0) {
5695 		/*
5696 		 * Cap display bpc based on the user requested value.
5697 		 *
5698 		 * The value for state->max_bpc may not correctly updated
5699 		 * depending on when the connector gets added to the state
5700 		 * or if this was called outside of atomic check, so it
5701 		 * can't be used directly.
5702 		 */
5703 		bpc = min_t(u8, bpc, requested_bpc);
5704 
5705 		/* Round down to the nearest even number. */
5706 		bpc = bpc - (bpc & 1);
5707 	}
5708 
5709 	switch (bpc) {
5710 	case 0:
5711 		/*
5712 		 * Temporary Work around, DRM doesn't parse color depth for
5713 		 * EDID revision before 1.4
5714 		 * TODO: Fix edid parsing
5715 		 */
5716 		return COLOR_DEPTH_888;
5717 	case 6:
5718 		return COLOR_DEPTH_666;
5719 	case 8:
5720 		return COLOR_DEPTH_888;
5721 	case 10:
5722 		return COLOR_DEPTH_101010;
5723 	case 12:
5724 		return COLOR_DEPTH_121212;
5725 	case 14:
5726 		return COLOR_DEPTH_141414;
5727 	case 16:
5728 		return COLOR_DEPTH_161616;
5729 	default:
5730 		return COLOR_DEPTH_UNDEFINED;
5731 	}
5732 }
5733 
5734 static enum dc_aspect_ratio
get_aspect_ratio(const struct drm_display_mode * mode_in)5735 get_aspect_ratio(const struct drm_display_mode *mode_in)
5736 {
5737 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5738 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5739 }
5740 
5741 static enum dc_color_space
get_output_color_space(const struct dc_crtc_timing * dc_crtc_timing)5742 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5743 {
5744 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5745 
5746 	switch (dc_crtc_timing->pixel_encoding)	{
5747 	case PIXEL_ENCODING_YCBCR422:
5748 	case PIXEL_ENCODING_YCBCR444:
5749 	case PIXEL_ENCODING_YCBCR420:
5750 	{
5751 		/*
5752 		 * 27030khz is the separation point between HDTV and SDTV
5753 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5754 		 * respectively
5755 		 */
5756 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5757 			if (dc_crtc_timing->flags.Y_ONLY)
5758 				color_space =
5759 					COLOR_SPACE_YCBCR709_LIMITED;
5760 			else
5761 				color_space = COLOR_SPACE_YCBCR709;
5762 		} else {
5763 			if (dc_crtc_timing->flags.Y_ONLY)
5764 				color_space =
5765 					COLOR_SPACE_YCBCR601_LIMITED;
5766 			else
5767 				color_space = COLOR_SPACE_YCBCR601;
5768 		}
5769 
5770 	}
5771 	break;
5772 	case PIXEL_ENCODING_RGB:
5773 		color_space = COLOR_SPACE_SRGB;
5774 		break;
5775 
5776 	default:
5777 		WARN_ON(1);
5778 		break;
5779 	}
5780 
5781 	return color_space;
5782 }
5783 
adjust_colour_depth_from_display_info(struct dc_crtc_timing * timing_out,const struct drm_display_info * info)5784 static bool adjust_colour_depth_from_display_info(
5785 	struct dc_crtc_timing *timing_out,
5786 	const struct drm_display_info *info)
5787 {
5788 	enum dc_color_depth depth = timing_out->display_color_depth;
5789 	int normalized_clk;
5790 	do {
5791 		normalized_clk = timing_out->pix_clk_100hz / 10;
5792 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5793 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5794 			normalized_clk /= 2;
5795 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5796 		switch (depth) {
5797 		case COLOR_DEPTH_888:
5798 			break;
5799 		case COLOR_DEPTH_101010:
5800 			normalized_clk = (normalized_clk * 30) / 24;
5801 			break;
5802 		case COLOR_DEPTH_121212:
5803 			normalized_clk = (normalized_clk * 36) / 24;
5804 			break;
5805 		case COLOR_DEPTH_161616:
5806 			normalized_clk = (normalized_clk * 48) / 24;
5807 			break;
5808 		default:
5809 			/* The above depths are the only ones valid for HDMI. */
5810 			return false;
5811 		}
5812 		if (normalized_clk <= info->max_tmds_clock) {
5813 			timing_out->display_color_depth = depth;
5814 			return true;
5815 		}
5816 	} while (--depth > COLOR_DEPTH_666);
5817 	return false;
5818 }
5819 
fill_stream_properties_from_drm_display_mode(struct dc_stream_state * stream,const struct drm_display_mode * mode_in,const struct drm_connector * connector,const struct drm_connector_state * connector_state,const struct dc_stream_state * old_stream,int requested_bpc)5820 static void fill_stream_properties_from_drm_display_mode(
5821 	struct dc_stream_state *stream,
5822 	const struct drm_display_mode *mode_in,
5823 	const struct drm_connector *connector,
5824 	const struct drm_connector_state *connector_state,
5825 	const struct dc_stream_state *old_stream,
5826 	int requested_bpc)
5827 {
5828 	struct dc_crtc_timing *timing_out = &stream->timing;
5829 	const struct drm_display_info *info = &connector->display_info;
5830 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5831 	struct hdmi_vendor_infoframe hv_frame;
5832 	struct hdmi_avi_infoframe avi_frame;
5833 
5834 	memset(&hv_frame, 0, sizeof(hv_frame));
5835 	memset(&avi_frame, 0, sizeof(avi_frame));
5836 
5837 	timing_out->h_border_left = 0;
5838 	timing_out->h_border_right = 0;
5839 	timing_out->v_border_top = 0;
5840 	timing_out->v_border_bottom = 0;
5841 	/* TODO: un-hardcode */
5842 	if (drm_mode_is_420_only(info, mode_in)
5843 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5844 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5845 	else if (drm_mode_is_420_also(info, mode_in)
5846 			&& aconnector->force_yuv420_output)
5847 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5848 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5849 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5850 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5851 	else
5852 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5853 
5854 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5855 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5856 		connector,
5857 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5858 		requested_bpc);
5859 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5860 	timing_out->hdmi_vic = 0;
5861 
5862 	if(old_stream) {
5863 		timing_out->vic = old_stream->timing.vic;
5864 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5865 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5866 	} else {
5867 		timing_out->vic = drm_match_cea_mode(mode_in);
5868 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5869 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5870 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5871 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5872 	}
5873 
5874 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5875 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5876 		timing_out->vic = avi_frame.video_code;
5877 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5878 		timing_out->hdmi_vic = hv_frame.vic;
5879 	}
5880 
5881 	if (is_freesync_video_mode(mode_in, aconnector)) {
5882 		timing_out->h_addressable = mode_in->hdisplay;
5883 		timing_out->h_total = mode_in->htotal;
5884 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5885 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5886 		timing_out->v_total = mode_in->vtotal;
5887 		timing_out->v_addressable = mode_in->vdisplay;
5888 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5889 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5890 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5891 	} else {
5892 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5893 		timing_out->h_total = mode_in->crtc_htotal;
5894 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5895 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5896 		timing_out->v_total = mode_in->crtc_vtotal;
5897 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5898 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5899 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5900 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5901 	}
5902 
5903 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5904 
5905 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5906 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5907 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5908 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5909 		    drm_mode_is_420_also(info, mode_in) &&
5910 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5911 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5912 			adjust_colour_depth_from_display_info(timing_out, info);
5913 		}
5914 	}
5915 
5916 	stream->output_color_space = get_output_color_space(timing_out);
5917 }
5918 
fill_audio_info(struct audio_info * audio_info,const struct drm_connector * drm_connector,const struct dc_sink * dc_sink)5919 static void fill_audio_info(struct audio_info *audio_info,
5920 			    const struct drm_connector *drm_connector,
5921 			    const struct dc_sink *dc_sink)
5922 {
5923 	int i = 0;
5924 	int cea_revision = 0;
5925 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5926 
5927 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5928 	audio_info->product_id = edid_caps->product_id;
5929 
5930 	cea_revision = drm_connector->display_info.cea_rev;
5931 
5932 	strscpy(audio_info->display_name,
5933 		edid_caps->display_name,
5934 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5935 
5936 	if (cea_revision >= 3) {
5937 		audio_info->mode_count = edid_caps->audio_mode_count;
5938 
5939 		for (i = 0; i < audio_info->mode_count; ++i) {
5940 			audio_info->modes[i].format_code =
5941 					(enum audio_format_code)
5942 					(edid_caps->audio_modes[i].format_code);
5943 			audio_info->modes[i].channel_count =
5944 					edid_caps->audio_modes[i].channel_count;
5945 			audio_info->modes[i].sample_rates.all =
5946 					edid_caps->audio_modes[i].sample_rate;
5947 			audio_info->modes[i].sample_size =
5948 					edid_caps->audio_modes[i].sample_size;
5949 		}
5950 	}
5951 
5952 	audio_info->flags.all = edid_caps->speaker_flags;
5953 
5954 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5955 	if (drm_connector->latency_present[0]) {
5956 		audio_info->video_latency = drm_connector->video_latency[0];
5957 		audio_info->audio_latency = drm_connector->audio_latency[0];
5958 	}
5959 
5960 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5961 
5962 }
5963 
5964 static void
copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode * src_mode,struct drm_display_mode * dst_mode)5965 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5966 				      struct drm_display_mode *dst_mode)
5967 {
5968 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5969 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5970 	dst_mode->crtc_clock = src_mode->crtc_clock;
5971 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5972 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5973 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5974 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5975 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5976 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5977 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5978 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5979 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5980 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5981 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5982 }
5983 
5984 static void
decide_crtc_timing_for_drm_display_mode(struct drm_display_mode * drm_mode,const struct drm_display_mode * native_mode,bool scale_enabled)5985 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5986 					const struct drm_display_mode *native_mode,
5987 					bool scale_enabled)
5988 {
5989 	if (scale_enabled) {
5990 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5991 	} else if (native_mode->clock == drm_mode->clock &&
5992 			native_mode->htotal == drm_mode->htotal &&
5993 			native_mode->vtotal == drm_mode->vtotal) {
5994 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5995 	} else {
5996 		/* no scaling nor amdgpu inserted, no need to patch */
5997 	}
5998 }
5999 
6000 static struct dc_sink *
create_fake_sink(struct amdgpu_dm_connector * aconnector)6001 create_fake_sink(struct amdgpu_dm_connector *aconnector)
6002 {
6003 	struct dc_sink_init_data sink_init_data = { 0 };
6004 	struct dc_sink *sink = NULL;
6005 	sink_init_data.link = aconnector->dc_link;
6006 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6007 
6008 	sink = dc_sink_create(&sink_init_data);
6009 	if (!sink) {
6010 		DRM_ERROR("Failed to create sink!\n");
6011 		return NULL;
6012 	}
6013 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6014 
6015 	return sink;
6016 }
6017 
set_multisync_trigger_params(struct dc_stream_state * stream)6018 static void set_multisync_trigger_params(
6019 		struct dc_stream_state *stream)
6020 {
6021 	struct dc_stream_state *master = NULL;
6022 
6023 	if (stream->triggered_crtc_reset.enabled) {
6024 		master = stream->triggered_crtc_reset.event_source;
6025 		stream->triggered_crtc_reset.event =
6026 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6027 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6028 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6029 	}
6030 }
6031 
set_master_stream(struct dc_stream_state * stream_set[],int stream_count)6032 static void set_master_stream(struct dc_stream_state *stream_set[],
6033 			      int stream_count)
6034 {
6035 	int j, highest_rfr = 0, master_stream = 0;
6036 
6037 	for (j = 0;  j < stream_count; j++) {
6038 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6039 			int refresh_rate = 0;
6040 
6041 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6042 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6043 			if (refresh_rate > highest_rfr) {
6044 				highest_rfr = refresh_rate;
6045 				master_stream = j;
6046 			}
6047 		}
6048 	}
6049 	for (j = 0;  j < stream_count; j++) {
6050 		if (stream_set[j])
6051 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6052 	}
6053 }
6054 
dm_enable_per_frame_crtc_master_sync(struct dc_state * context)6055 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6056 {
6057 	int i = 0;
6058 	struct dc_stream_state *stream;
6059 
6060 	if (context->stream_count < 2)
6061 		return;
6062 	for (i = 0; i < context->stream_count ; i++) {
6063 		if (!context->streams[i])
6064 			continue;
6065 		/*
6066 		 * TODO: add a function to read AMD VSDB bits and set
6067 		 * crtc_sync_master.multi_sync_enabled flag
6068 		 * For now it's set to false
6069 		 */
6070 	}
6071 
6072 	set_master_stream(context->streams, context->stream_count);
6073 
6074 	for (i = 0; i < context->stream_count ; i++) {
6075 		stream = context->streams[i];
6076 
6077 		if (!stream)
6078 			continue;
6079 
6080 		set_multisync_trigger_params(stream);
6081 	}
6082 }
6083 
6084 #if defined(CONFIG_DRM_AMD_DC_DCN)
update_dsc_caps(struct amdgpu_dm_connector * aconnector,struct dc_sink * sink,struct dc_stream_state * stream,struct dsc_dec_dpcd_caps * dsc_caps)6085 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6086 							struct dc_sink *sink, struct dc_stream_state *stream,
6087 							struct dsc_dec_dpcd_caps *dsc_caps)
6088 {
6089 	stream->timing.flags.DSC = 0;
6090 	dsc_caps->is_dsc_supported = false;
6091 
6092 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6093 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6094 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6095 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6096 				      dsc_caps);
6097 	}
6098 }
6099 
apply_dsc_policy_for_stream(struct amdgpu_dm_connector * aconnector,struct dc_sink * sink,struct dc_stream_state * stream,struct dsc_dec_dpcd_caps * dsc_caps)6100 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6101 										struct dc_sink *sink, struct dc_stream_state *stream,
6102 										struct dsc_dec_dpcd_caps *dsc_caps)
6103 {
6104 	struct drm_connector *drm_connector = &aconnector->base;
6105 	uint32_t link_bandwidth_kbps;
6106 
6107 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6108 							dc_link_get_link_cap(aconnector->dc_link));
6109 	/* Set DSC policy according to dsc_clock_en */
6110 	dc_dsc_policy_set_enable_dsc_when_not_needed(
6111 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6112 
6113 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6114 
6115 		if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6116 						dsc_caps,
6117 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6118 						0,
6119 						link_bandwidth_kbps,
6120 						&stream->timing,
6121 						&stream->timing.dsc_cfg)) {
6122 			stream->timing.flags.DSC = 1;
6123 			DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
6124 		}
6125 	}
6126 
6127 	/* Overwrite the stream flag if DSC is enabled through debugfs */
6128 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6129 		stream->timing.flags.DSC = 1;
6130 
6131 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6132 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6133 
6134 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6135 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6136 
6137 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6138 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6139 }
6140 #endif
6141 
6142 /**
6143  * DOC: FreeSync Video
6144  *
6145  * When a userspace application wants to play a video, the content follows a
6146  * standard format definition that usually specifies the FPS for that format.
6147  * The below list illustrates some video format and the expected FPS,
6148  * respectively:
6149  *
6150  * - TV/NTSC (23.976 FPS)
6151  * - Cinema (24 FPS)
6152  * - TV/PAL (25 FPS)
6153  * - TV/NTSC (29.97 FPS)
6154  * - TV/NTSC (30 FPS)
6155  * - Cinema HFR (48 FPS)
6156  * - TV/PAL (50 FPS)
6157  * - Commonly used (60 FPS)
6158  * - Multiples of 24 (48,72,96 FPS)
6159  *
6160  * The list of standards video format is not huge and can be added to the
6161  * connector modeset list beforehand. With that, userspace can leverage
6162  * FreeSync to extends the front porch in order to attain the target refresh
6163  * rate. Such a switch will happen seamlessly, without screen blanking or
6164  * reprogramming of the output in any other way. If the userspace requests a
6165  * modesetting change compatible with FreeSync modes that only differ in the
6166  * refresh rate, DC will skip the full update and avoid blink during the
6167  * transition. For example, the video player can change the modesetting from
6168  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6169  * causing any display blink. This same concept can be applied to a mode
6170  * setting change.
6171  */
6172 static struct drm_display_mode *
get_highest_refresh_rate_mode(struct amdgpu_dm_connector * aconnector,bool use_probed_modes)6173 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6174 			  bool use_probed_modes)
6175 {
6176 	struct drm_display_mode *m, *m_pref = NULL;
6177 	u16 current_refresh, highest_refresh;
6178 	struct list_head *list_head = use_probed_modes ?
6179 						    &aconnector->base.probed_modes :
6180 						    &aconnector->base.modes;
6181 
6182 	if (aconnector->freesync_vid_base.clock != 0)
6183 		return &aconnector->freesync_vid_base;
6184 
6185 	/* Find the preferred mode */
6186 	list_for_each_entry (m, list_head, head) {
6187 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
6188 			m_pref = m;
6189 			break;
6190 		}
6191 	}
6192 
6193 	if (!m_pref) {
6194 		/* Probably an EDID with no preferred mode. Fallback to first entry */
6195 		m_pref = list_first_entry_or_null(
6196 			&aconnector->base.modes, struct drm_display_mode, head);
6197 		if (!m_pref) {
6198 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6199 			return NULL;
6200 		}
6201 	}
6202 
6203 	highest_refresh = drm_mode_vrefresh(m_pref);
6204 
6205 	/*
6206 	 * Find the mode with highest refresh rate with same resolution.
6207 	 * For some monitors, preferred mode is not the mode with highest
6208 	 * supported refresh rate.
6209 	 */
6210 	list_for_each_entry (m, list_head, head) {
6211 		current_refresh  = drm_mode_vrefresh(m);
6212 
6213 		if (m->hdisplay == m_pref->hdisplay &&
6214 		    m->vdisplay == m_pref->vdisplay &&
6215 		    highest_refresh < current_refresh) {
6216 			highest_refresh = current_refresh;
6217 			m_pref = m;
6218 		}
6219 	}
6220 
6221 	aconnector->freesync_vid_base = *m_pref;
6222 	return m_pref;
6223 }
6224 
is_freesync_video_mode(const struct drm_display_mode * mode,struct amdgpu_dm_connector * aconnector)6225 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6226 				   struct amdgpu_dm_connector *aconnector)
6227 {
6228 	struct drm_display_mode *high_mode;
6229 	int timing_diff;
6230 
6231 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
6232 	if (!high_mode || !mode)
6233 		return false;
6234 
6235 	timing_diff = high_mode->vtotal - mode->vtotal;
6236 
6237 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6238 	    high_mode->hdisplay != mode->hdisplay ||
6239 	    high_mode->vdisplay != mode->vdisplay ||
6240 	    high_mode->hsync_start != mode->hsync_start ||
6241 	    high_mode->hsync_end != mode->hsync_end ||
6242 	    high_mode->htotal != mode->htotal ||
6243 	    high_mode->hskew != mode->hskew ||
6244 	    high_mode->vscan != mode->vscan ||
6245 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
6246 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
6247 		return false;
6248 	else
6249 		return true;
6250 }
6251 
6252 static struct dc_stream_state *
create_stream_for_sink(struct amdgpu_dm_connector * aconnector,const struct drm_display_mode * drm_mode,const struct dm_connector_state * dm_state,const struct dc_stream_state * old_stream,int requested_bpc)6253 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6254 		       const struct drm_display_mode *drm_mode,
6255 		       const struct dm_connector_state *dm_state,
6256 		       const struct dc_stream_state *old_stream,
6257 		       int requested_bpc)
6258 {
6259 	struct drm_display_mode *preferred_mode = NULL;
6260 	struct drm_connector *drm_connector;
6261 	const struct drm_connector_state *con_state =
6262 		dm_state ? &dm_state->base : NULL;
6263 	struct dc_stream_state *stream = NULL;
6264 	struct drm_display_mode mode = *drm_mode;
6265 	struct drm_display_mode saved_mode;
6266 	struct drm_display_mode *freesync_mode = NULL;
6267 	bool native_mode_found = false;
6268 	bool recalculate_timing = false;
6269 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6270 	int mode_refresh;
6271 	int preferred_refresh = 0;
6272 #if defined(CONFIG_DRM_AMD_DC_DCN)
6273 	struct dsc_dec_dpcd_caps dsc_caps;
6274 #endif
6275 	struct dc_sink *sink = NULL;
6276 
6277 	memset(&saved_mode, 0, sizeof(saved_mode));
6278 
6279 	if (aconnector == NULL) {
6280 		DRM_ERROR("aconnector is NULL!\n");
6281 		return stream;
6282 	}
6283 
6284 	drm_connector = &aconnector->base;
6285 
6286 	if (!aconnector->dc_sink) {
6287 		sink = create_fake_sink(aconnector);
6288 		if (!sink)
6289 			return stream;
6290 	} else {
6291 		sink = aconnector->dc_sink;
6292 		dc_sink_retain(sink);
6293 	}
6294 
6295 	stream = dc_create_stream_for_sink(sink);
6296 
6297 	if (stream == NULL) {
6298 		DRM_ERROR("Failed to create stream for sink!\n");
6299 		goto finish;
6300 	}
6301 
6302 	stream->dm_stream_context = aconnector;
6303 
6304 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6305 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6306 
6307 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6308 		/* Search for preferred mode */
6309 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6310 			native_mode_found = true;
6311 			break;
6312 		}
6313 	}
6314 	if (!native_mode_found)
6315 		preferred_mode = list_first_entry_or_null(
6316 				&aconnector->base.modes,
6317 				struct drm_display_mode,
6318 				head);
6319 
6320 	mode_refresh = drm_mode_vrefresh(&mode);
6321 
6322 	if (preferred_mode == NULL) {
6323 		/*
6324 		 * This may not be an error, the use case is when we have no
6325 		 * usermode calls to reset and set mode upon hotplug. In this
6326 		 * case, we call set mode ourselves to restore the previous mode
6327 		 * and the modelist may not be filled in in time.
6328 		 */
6329 		DRM_DEBUG_DRIVER("No preferred mode found\n");
6330 	} else {
6331 		recalculate_timing = amdgpu_freesync_vid_mode &&
6332 				 is_freesync_video_mode(&mode, aconnector);
6333 		if (recalculate_timing) {
6334 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6335 			saved_mode = mode;
6336 			mode = *freesync_mode;
6337 		} else {
6338 			decide_crtc_timing_for_drm_display_mode(
6339 				&mode, preferred_mode, scale);
6340 
6341 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
6342 		}
6343 	}
6344 
6345 	if (recalculate_timing)
6346 		drm_mode_set_crtcinfo(&saved_mode, 0);
6347 	else if (!dm_state)
6348 		drm_mode_set_crtcinfo(&mode, 0);
6349 
6350        /*
6351 	* If scaling is enabled and refresh rate didn't change
6352 	* we copy the vic and polarities of the old timings
6353 	*/
6354 	if (!scale || mode_refresh != preferred_refresh)
6355 		fill_stream_properties_from_drm_display_mode(
6356 			stream, &mode, &aconnector->base, con_state, NULL,
6357 			requested_bpc);
6358 	else
6359 		fill_stream_properties_from_drm_display_mode(
6360 			stream, &mode, &aconnector->base, con_state, old_stream,
6361 			requested_bpc);
6362 
6363 #if defined(CONFIG_DRM_AMD_DC_DCN)
6364 	/* SST DSC determination policy */
6365 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6366 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6367 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6368 #endif
6369 
6370 	update_stream_scaling_settings(&mode, dm_state, stream);
6371 
6372 	fill_audio_info(
6373 		&stream->audio_info,
6374 		drm_connector,
6375 		sink);
6376 
6377 	update_stream_signal(stream, sink);
6378 
6379 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6380 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6381 
6382 	if (stream->link->psr_settings.psr_feature_enabled) {
6383 		//
6384 		// should decide stream support vsc sdp colorimetry capability
6385 		// before building vsc info packet
6386 		//
6387 		stream->use_vsc_sdp_for_colorimetry = false;
6388 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6389 			stream->use_vsc_sdp_for_colorimetry =
6390 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6391 		} else {
6392 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6393 				stream->use_vsc_sdp_for_colorimetry = true;
6394 		}
6395 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
6396 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6397 
6398 	}
6399 finish:
6400 	dc_sink_release(sink);
6401 
6402 	return stream;
6403 }
6404 
amdgpu_dm_crtc_destroy(struct drm_crtc * crtc)6405 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6406 {
6407 	drm_crtc_cleanup(crtc);
6408 	kfree(crtc);
6409 }
6410 
dm_crtc_destroy_state(struct drm_crtc * crtc,struct drm_crtc_state * state)6411 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6412 				  struct drm_crtc_state *state)
6413 {
6414 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
6415 
6416 	/* TODO Destroy dc_stream objects are stream object is flattened */
6417 	if (cur->stream)
6418 		dc_stream_release(cur->stream);
6419 
6420 
6421 	__drm_atomic_helper_crtc_destroy_state(state);
6422 
6423 
6424 	kfree(state);
6425 }
6426 
dm_crtc_reset_state(struct drm_crtc * crtc)6427 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6428 {
6429 	struct dm_crtc_state *state;
6430 
6431 	if (crtc->state)
6432 		dm_crtc_destroy_state(crtc, crtc->state);
6433 
6434 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6435 	if (WARN_ON(!state))
6436 		return;
6437 
6438 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
6439 }
6440 
6441 static struct drm_crtc_state *
dm_crtc_duplicate_state(struct drm_crtc * crtc)6442 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6443 {
6444 	struct dm_crtc_state *state, *cur;
6445 
6446 	cur = to_dm_crtc_state(crtc->state);
6447 
6448 	if (WARN_ON(!crtc->state))
6449 		return NULL;
6450 
6451 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6452 	if (!state)
6453 		return NULL;
6454 
6455 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6456 
6457 	if (cur->stream) {
6458 		state->stream = cur->stream;
6459 		dc_stream_retain(state->stream);
6460 	}
6461 
6462 	state->active_planes = cur->active_planes;
6463 	state->vrr_infopacket = cur->vrr_infopacket;
6464 	state->abm_level = cur->abm_level;
6465 	state->vrr_supported = cur->vrr_supported;
6466 	state->freesync_config = cur->freesync_config;
6467 	state->cm_has_degamma = cur->cm_has_degamma;
6468 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6469 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
6470 
6471 	return &state->base;
6472 }
6473 
6474 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
amdgpu_dm_crtc_late_register(struct drm_crtc * crtc)6475 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6476 {
6477 	crtc_debugfs_init(crtc);
6478 
6479 	return 0;
6480 }
6481 #endif
6482 
dm_set_vupdate_irq(struct drm_crtc * crtc,bool enable)6483 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6484 {
6485 	enum dc_irq_source irq_source;
6486 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6487 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6488 	int rc;
6489 
6490 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6491 
6492 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6493 
6494 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6495 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
6496 	return rc;
6497 }
6498 
dm_set_vblank(struct drm_crtc * crtc,bool enable)6499 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6500 {
6501 	enum dc_irq_source irq_source;
6502 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6503 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6504 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6505 #if defined(CONFIG_DRM_AMD_DC_DCN)
6506 	struct amdgpu_display_manager *dm = &adev->dm;
6507 	struct vblank_control_work *work;
6508 #endif
6509 	int rc = 0;
6510 
6511 	if (enable) {
6512 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6513 		if (amdgpu_dm_vrr_active(acrtc_state))
6514 			rc = dm_set_vupdate_irq(crtc, true);
6515 	} else {
6516 		/* vblank irq off -> vupdate irq off */
6517 		rc = dm_set_vupdate_irq(crtc, false);
6518 	}
6519 
6520 	if (rc)
6521 		return rc;
6522 
6523 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6524 
6525 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6526 		return -EBUSY;
6527 
6528 	if (amdgpu_in_reset(adev))
6529 		return 0;
6530 
6531 #if defined(CONFIG_DRM_AMD_DC_DCN)
6532 	if (dm->vblank_control_workqueue) {
6533 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
6534 		if (!work)
6535 			return -ENOMEM;
6536 
6537 		INIT_WORK(&work->work, vblank_control_worker);
6538 		work->dm = dm;
6539 		work->acrtc = acrtc;
6540 		work->enable = enable;
6541 
6542 		if (acrtc_state->stream) {
6543 			dc_stream_retain(acrtc_state->stream);
6544 			work->stream = acrtc_state->stream;
6545 		}
6546 
6547 		queue_work(dm->vblank_control_workqueue, &work->work);
6548 	}
6549 #endif
6550 
6551 	return 0;
6552 }
6553 
dm_enable_vblank(struct drm_crtc * crtc)6554 static int dm_enable_vblank(struct drm_crtc *crtc)
6555 {
6556 	return dm_set_vblank(crtc, true);
6557 }
6558 
dm_disable_vblank(struct drm_crtc * crtc)6559 static void dm_disable_vblank(struct drm_crtc *crtc)
6560 {
6561 	dm_set_vblank(crtc, false);
6562 }
6563 
6564 /* Implemented only the options currently availible for the driver */
6565 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6566 	.reset = dm_crtc_reset_state,
6567 	.destroy = amdgpu_dm_crtc_destroy,
6568 	.set_config = drm_atomic_helper_set_config,
6569 	.page_flip = drm_atomic_helper_page_flip,
6570 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6571 	.atomic_destroy_state = dm_crtc_destroy_state,
6572 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6573 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6574 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6575 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6576 	.enable_vblank = dm_enable_vblank,
6577 	.disable_vblank = dm_disable_vblank,
6578 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6579 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6580 	.late_register = amdgpu_dm_crtc_late_register,
6581 #endif
6582 };
6583 
6584 static enum drm_connector_status
amdgpu_dm_connector_detect(struct drm_connector * connector,bool force)6585 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6586 {
6587 	bool connected;
6588 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6589 
6590 	/*
6591 	 * Notes:
6592 	 * 1. This interface is NOT called in context of HPD irq.
6593 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6594 	 * makes it a bad place for *any* MST-related activity.
6595 	 */
6596 
6597 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6598 	    !aconnector->fake_enable)
6599 		connected = (aconnector->dc_sink != NULL);
6600 	else
6601 		connected = (aconnector->base.force == DRM_FORCE_ON);
6602 
6603 	update_subconnector_property(aconnector);
6604 
6605 	return (connected ? connector_status_connected :
6606 			connector_status_disconnected);
6607 }
6608 
amdgpu_dm_connector_atomic_set_property(struct drm_connector * connector,struct drm_connector_state * connector_state,struct drm_property * property,uint64_t val)6609 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6610 					    struct drm_connector_state *connector_state,
6611 					    struct drm_property *property,
6612 					    uint64_t val)
6613 {
6614 	struct drm_device *dev = connector->dev;
6615 	struct amdgpu_device *adev = drm_to_adev(dev);
6616 	struct dm_connector_state *dm_old_state =
6617 		to_dm_connector_state(connector->state);
6618 	struct dm_connector_state *dm_new_state =
6619 		to_dm_connector_state(connector_state);
6620 
6621 	int ret = -EINVAL;
6622 
6623 	if (property == dev->mode_config.scaling_mode_property) {
6624 		enum amdgpu_rmx_type rmx_type;
6625 
6626 		switch (val) {
6627 		case DRM_MODE_SCALE_CENTER:
6628 			rmx_type = RMX_CENTER;
6629 			break;
6630 		case DRM_MODE_SCALE_ASPECT:
6631 			rmx_type = RMX_ASPECT;
6632 			break;
6633 		case DRM_MODE_SCALE_FULLSCREEN:
6634 			rmx_type = RMX_FULL;
6635 			break;
6636 		case DRM_MODE_SCALE_NONE:
6637 		default:
6638 			rmx_type = RMX_OFF;
6639 			break;
6640 		}
6641 
6642 		if (dm_old_state->scaling == rmx_type)
6643 			return 0;
6644 
6645 		dm_new_state->scaling = rmx_type;
6646 		ret = 0;
6647 	} else if (property == adev->mode_info.underscan_hborder_property) {
6648 		dm_new_state->underscan_hborder = val;
6649 		ret = 0;
6650 	} else if (property == adev->mode_info.underscan_vborder_property) {
6651 		dm_new_state->underscan_vborder = val;
6652 		ret = 0;
6653 	} else if (property == adev->mode_info.underscan_property) {
6654 		dm_new_state->underscan_enable = val;
6655 		ret = 0;
6656 	} else if (property == adev->mode_info.abm_level_property) {
6657 		dm_new_state->abm_level = val;
6658 		ret = 0;
6659 	}
6660 
6661 	return ret;
6662 }
6663 
amdgpu_dm_connector_atomic_get_property(struct drm_connector * connector,const struct drm_connector_state * state,struct drm_property * property,uint64_t * val)6664 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6665 					    const struct drm_connector_state *state,
6666 					    struct drm_property *property,
6667 					    uint64_t *val)
6668 {
6669 	struct drm_device *dev = connector->dev;
6670 	struct amdgpu_device *adev = drm_to_adev(dev);
6671 	struct dm_connector_state *dm_state =
6672 		to_dm_connector_state(state);
6673 	int ret = -EINVAL;
6674 
6675 	if (property == dev->mode_config.scaling_mode_property) {
6676 		switch (dm_state->scaling) {
6677 		case RMX_CENTER:
6678 			*val = DRM_MODE_SCALE_CENTER;
6679 			break;
6680 		case RMX_ASPECT:
6681 			*val = DRM_MODE_SCALE_ASPECT;
6682 			break;
6683 		case RMX_FULL:
6684 			*val = DRM_MODE_SCALE_FULLSCREEN;
6685 			break;
6686 		case RMX_OFF:
6687 		default:
6688 			*val = DRM_MODE_SCALE_NONE;
6689 			break;
6690 		}
6691 		ret = 0;
6692 	} else if (property == adev->mode_info.underscan_hborder_property) {
6693 		*val = dm_state->underscan_hborder;
6694 		ret = 0;
6695 	} else if (property == adev->mode_info.underscan_vborder_property) {
6696 		*val = dm_state->underscan_vborder;
6697 		ret = 0;
6698 	} else if (property == adev->mode_info.underscan_property) {
6699 		*val = dm_state->underscan_enable;
6700 		ret = 0;
6701 	} else if (property == adev->mode_info.abm_level_property) {
6702 		*val = dm_state->abm_level;
6703 		ret = 0;
6704 	}
6705 
6706 	return ret;
6707 }
6708 
amdgpu_dm_connector_unregister(struct drm_connector * connector)6709 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6710 {
6711 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6712 
6713 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6714 }
6715 
amdgpu_dm_connector_destroy(struct drm_connector * connector)6716 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6717 {
6718 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6719 	const struct dc_link *link = aconnector->dc_link;
6720 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6721 	struct amdgpu_display_manager *dm = &adev->dm;
6722 	int i;
6723 
6724 	/*
6725 	 * Call only if mst_mgr was iniitalized before since it's not done
6726 	 * for all connector types.
6727 	 */
6728 	if (aconnector->mst_mgr.dev)
6729 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6730 
6731 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6732 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6733 	for (i = 0; i < dm->num_of_edps; i++) {
6734 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6735 			backlight_device_unregister(dm->backlight_dev[i]);
6736 			dm->backlight_dev[i] = NULL;
6737 		}
6738 	}
6739 #endif
6740 
6741 	if (aconnector->dc_em_sink)
6742 		dc_sink_release(aconnector->dc_em_sink);
6743 	aconnector->dc_em_sink = NULL;
6744 	if (aconnector->dc_sink)
6745 		dc_sink_release(aconnector->dc_sink);
6746 	aconnector->dc_sink = NULL;
6747 
6748 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6749 	drm_connector_unregister(connector);
6750 	drm_connector_cleanup(connector);
6751 	if (aconnector->i2c) {
6752 		i2c_del_adapter(&aconnector->i2c->base);
6753 		kfree(aconnector->i2c);
6754 	}
6755 	kfree(aconnector->dm_dp_aux.aux.name);
6756 
6757 	kfree(connector);
6758 }
6759 
amdgpu_dm_connector_funcs_reset(struct drm_connector * connector)6760 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6761 {
6762 	struct dm_connector_state *state =
6763 		to_dm_connector_state(connector->state);
6764 
6765 	if (connector->state)
6766 		__drm_atomic_helper_connector_destroy_state(connector->state);
6767 
6768 	kfree(state);
6769 
6770 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6771 
6772 	if (state) {
6773 		state->scaling = RMX_OFF;
6774 		state->underscan_enable = false;
6775 		state->underscan_hborder = 0;
6776 		state->underscan_vborder = 0;
6777 		state->base.max_requested_bpc = 8;
6778 		state->vcpi_slots = 0;
6779 		state->pbn = 0;
6780 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6781 			state->abm_level = amdgpu_dm_abm_level;
6782 
6783 		__drm_atomic_helper_connector_reset(connector, &state->base);
6784 	}
6785 }
6786 
6787 struct drm_connector_state *
amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector * connector)6788 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6789 {
6790 	struct dm_connector_state *state =
6791 		to_dm_connector_state(connector->state);
6792 
6793 	struct dm_connector_state *new_state =
6794 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6795 
6796 	if (!new_state)
6797 		return NULL;
6798 
6799 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6800 
6801 	new_state->freesync_capable = state->freesync_capable;
6802 	new_state->abm_level = state->abm_level;
6803 	new_state->scaling = state->scaling;
6804 	new_state->underscan_enable = state->underscan_enable;
6805 	new_state->underscan_hborder = state->underscan_hborder;
6806 	new_state->underscan_vborder = state->underscan_vborder;
6807 	new_state->vcpi_slots = state->vcpi_slots;
6808 	new_state->pbn = state->pbn;
6809 	return &new_state->base;
6810 }
6811 
6812 static int
amdgpu_dm_connector_late_register(struct drm_connector * connector)6813 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6814 {
6815 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6816 		to_amdgpu_dm_connector(connector);
6817 	int r;
6818 
6819 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6820 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6821 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6822 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6823 		if (r)
6824 			return r;
6825 	}
6826 
6827 #if defined(CONFIG_DEBUG_FS)
6828 	connector_debugfs_init(amdgpu_dm_connector);
6829 #endif
6830 
6831 	return 0;
6832 }
6833 
6834 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6835 	.reset = amdgpu_dm_connector_funcs_reset,
6836 	.detect = amdgpu_dm_connector_detect,
6837 	.fill_modes = drm_helper_probe_single_connector_modes,
6838 	.destroy = amdgpu_dm_connector_destroy,
6839 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6840 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6841 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6842 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6843 	.late_register = amdgpu_dm_connector_late_register,
6844 	.early_unregister = amdgpu_dm_connector_unregister
6845 };
6846 
get_modes(struct drm_connector * connector)6847 static int get_modes(struct drm_connector *connector)
6848 {
6849 	return amdgpu_dm_connector_get_modes(connector);
6850 }
6851 
create_eml_sink(struct amdgpu_dm_connector * aconnector)6852 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6853 {
6854 	struct dc_sink_init_data init_params = {
6855 			.link = aconnector->dc_link,
6856 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6857 	};
6858 	struct edid *edid;
6859 
6860 	if (!aconnector->base.edid_blob_ptr) {
6861 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6862 				aconnector->base.name);
6863 
6864 		aconnector->base.force = DRM_FORCE_OFF;
6865 		aconnector->base.override_edid = false;
6866 		return;
6867 	}
6868 
6869 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6870 
6871 	aconnector->edid = edid;
6872 
6873 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6874 		aconnector->dc_link,
6875 		(uint8_t *)edid,
6876 		(edid->extensions + 1) * EDID_LENGTH,
6877 		&init_params);
6878 
6879 	if (aconnector->base.force == DRM_FORCE_ON) {
6880 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6881 		aconnector->dc_link->local_sink :
6882 		aconnector->dc_em_sink;
6883 		dc_sink_retain(aconnector->dc_sink);
6884 	}
6885 }
6886 
handle_edid_mgmt(struct amdgpu_dm_connector * aconnector)6887 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6888 {
6889 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6890 
6891 	/*
6892 	 * In case of headless boot with force on for DP managed connector
6893 	 * Those settings have to be != 0 to get initial modeset
6894 	 */
6895 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6896 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6897 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6898 	}
6899 
6900 
6901 	aconnector->base.override_edid = true;
6902 	create_eml_sink(aconnector);
6903 }
6904 
6905 static struct dc_stream_state *
create_validate_stream_for_sink(struct amdgpu_dm_connector * aconnector,const struct drm_display_mode * drm_mode,const struct dm_connector_state * dm_state,const struct dc_stream_state * old_stream)6906 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6907 				const struct drm_display_mode *drm_mode,
6908 				const struct dm_connector_state *dm_state,
6909 				const struct dc_stream_state *old_stream)
6910 {
6911 	struct drm_connector *connector = &aconnector->base;
6912 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6913 	struct dc_stream_state *stream;
6914 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6915 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6916 	enum dc_status dc_result = DC_OK;
6917 
6918 	do {
6919 		stream = create_stream_for_sink(aconnector, drm_mode,
6920 						dm_state, old_stream,
6921 						requested_bpc);
6922 		if (stream == NULL) {
6923 			DRM_ERROR("Failed to create stream for sink!\n");
6924 			break;
6925 		}
6926 
6927 		dc_result = dc_validate_stream(adev->dm.dc, stream);
6928 
6929 		if (dc_result != DC_OK) {
6930 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6931 				      drm_mode->hdisplay,
6932 				      drm_mode->vdisplay,
6933 				      drm_mode->clock,
6934 				      dc_result,
6935 				      dc_status_to_str(dc_result));
6936 
6937 			dc_stream_release(stream);
6938 			stream = NULL;
6939 			requested_bpc -= 2; /* lower bpc to retry validation */
6940 		}
6941 
6942 	} while (stream == NULL && requested_bpc >= 6);
6943 
6944 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6945 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6946 
6947 		aconnector->force_yuv420_output = true;
6948 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
6949 						dm_state, old_stream);
6950 		aconnector->force_yuv420_output = false;
6951 	}
6952 
6953 	return stream;
6954 }
6955 
amdgpu_dm_connector_mode_valid(struct drm_connector * connector,struct drm_display_mode * mode)6956 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6957 				   struct drm_display_mode *mode)
6958 {
6959 	int result = MODE_ERROR;
6960 	struct dc_sink *dc_sink;
6961 	/* TODO: Unhardcode stream count */
6962 	struct dc_stream_state *stream;
6963 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6964 
6965 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6966 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
6967 		return result;
6968 
6969 	/*
6970 	 * Only run this the first time mode_valid is called to initilialize
6971 	 * EDID mgmt
6972 	 */
6973 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6974 		!aconnector->dc_em_sink)
6975 		handle_edid_mgmt(aconnector);
6976 
6977 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6978 
6979 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6980 				aconnector->base.force != DRM_FORCE_ON) {
6981 		DRM_ERROR("dc_sink is NULL!\n");
6982 		goto fail;
6983 	}
6984 
6985 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6986 	if (stream) {
6987 		dc_stream_release(stream);
6988 		result = MODE_OK;
6989 	}
6990 
6991 fail:
6992 	/* TODO: error handling*/
6993 	return result;
6994 }
6995 
fill_hdr_info_packet(const struct drm_connector_state * state,struct dc_info_packet * out)6996 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6997 				struct dc_info_packet *out)
6998 {
6999 	struct hdmi_drm_infoframe frame;
7000 	unsigned char buf[30]; /* 26 + 4 */
7001 	ssize_t len;
7002 	int ret, i;
7003 
7004 	memset(out, 0, sizeof(*out));
7005 
7006 	if (!state->hdr_output_metadata)
7007 		return 0;
7008 
7009 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7010 	if (ret)
7011 		return ret;
7012 
7013 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7014 	if (len < 0)
7015 		return (int)len;
7016 
7017 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
7018 	if (len != 30)
7019 		return -EINVAL;
7020 
7021 	/* Prepare the infopacket for DC. */
7022 	switch (state->connector->connector_type) {
7023 	case DRM_MODE_CONNECTOR_HDMIA:
7024 		out->hb0 = 0x87; /* type */
7025 		out->hb1 = 0x01; /* version */
7026 		out->hb2 = 0x1A; /* length */
7027 		out->sb[0] = buf[3]; /* checksum */
7028 		i = 1;
7029 		break;
7030 
7031 	case DRM_MODE_CONNECTOR_DisplayPort:
7032 	case DRM_MODE_CONNECTOR_eDP:
7033 		out->hb0 = 0x00; /* sdp id, zero */
7034 		out->hb1 = 0x87; /* type */
7035 		out->hb2 = 0x1D; /* payload len - 1 */
7036 		out->hb3 = (0x13 << 2); /* sdp version */
7037 		out->sb[0] = 0x01; /* version */
7038 		out->sb[1] = 0x1A; /* length */
7039 		i = 2;
7040 		break;
7041 
7042 	default:
7043 		return -EINVAL;
7044 	}
7045 
7046 	memcpy(&out->sb[i], &buf[4], 26);
7047 	out->valid = true;
7048 
7049 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7050 		       sizeof(out->sb), false);
7051 
7052 	return 0;
7053 }
7054 
7055 static int
amdgpu_dm_connector_atomic_check(struct drm_connector * conn,struct drm_atomic_state * state)7056 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7057 				 struct drm_atomic_state *state)
7058 {
7059 	struct drm_connector_state *new_con_state =
7060 		drm_atomic_get_new_connector_state(state, conn);
7061 	struct drm_connector_state *old_con_state =
7062 		drm_atomic_get_old_connector_state(state, conn);
7063 	struct drm_crtc *crtc = new_con_state->crtc;
7064 	struct drm_crtc_state *new_crtc_state;
7065 	int ret;
7066 
7067 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
7068 
7069 	if (!crtc)
7070 		return 0;
7071 
7072 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7073 		struct dc_info_packet hdr_infopacket;
7074 
7075 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7076 		if (ret)
7077 			return ret;
7078 
7079 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7080 		if (IS_ERR(new_crtc_state))
7081 			return PTR_ERR(new_crtc_state);
7082 
7083 		/*
7084 		 * DC considers the stream backends changed if the
7085 		 * static metadata changes. Forcing the modeset also
7086 		 * gives a simple way for userspace to switch from
7087 		 * 8bpc to 10bpc when setting the metadata to enter
7088 		 * or exit HDR.
7089 		 *
7090 		 * Changing the static metadata after it's been
7091 		 * set is permissible, however. So only force a
7092 		 * modeset if we're entering or exiting HDR.
7093 		 */
7094 		new_crtc_state->mode_changed =
7095 			!old_con_state->hdr_output_metadata ||
7096 			!new_con_state->hdr_output_metadata;
7097 	}
7098 
7099 	return 0;
7100 }
7101 
7102 static const struct drm_connector_helper_funcs
7103 amdgpu_dm_connector_helper_funcs = {
7104 	/*
7105 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7106 	 * modes will be filtered by drm_mode_validate_size(), and those modes
7107 	 * are missing after user start lightdm. So we need to renew modes list.
7108 	 * in get_modes call back, not just return the modes count
7109 	 */
7110 	.get_modes = get_modes,
7111 	.mode_valid = amdgpu_dm_connector_mode_valid,
7112 	.atomic_check = amdgpu_dm_connector_atomic_check,
7113 };
7114 
dm_crtc_helper_disable(struct drm_crtc * crtc)7115 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7116 {
7117 }
7118 
count_crtc_active_planes(struct drm_crtc_state * new_crtc_state)7119 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7120 {
7121 	struct drm_atomic_state *state = new_crtc_state->state;
7122 	struct drm_plane *plane;
7123 	int num_active = 0;
7124 
7125 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7126 		struct drm_plane_state *new_plane_state;
7127 
7128 		/* Cursor planes are "fake". */
7129 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7130 			continue;
7131 
7132 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7133 
7134 		if (!new_plane_state) {
7135 			/*
7136 			 * The plane is enable on the CRTC and hasn't changed
7137 			 * state. This means that it previously passed
7138 			 * validation and is therefore enabled.
7139 			 */
7140 			num_active += 1;
7141 			continue;
7142 		}
7143 
7144 		/* We need a framebuffer to be considered enabled. */
7145 		num_active += (new_plane_state->fb != NULL);
7146 	}
7147 
7148 	return num_active;
7149 }
7150 
dm_update_crtc_active_planes(struct drm_crtc * crtc,struct drm_crtc_state * new_crtc_state)7151 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7152 					 struct drm_crtc_state *new_crtc_state)
7153 {
7154 	struct dm_crtc_state *dm_new_crtc_state =
7155 		to_dm_crtc_state(new_crtc_state);
7156 
7157 	dm_new_crtc_state->active_planes = 0;
7158 
7159 	if (!dm_new_crtc_state->stream)
7160 		return;
7161 
7162 	dm_new_crtc_state->active_planes =
7163 		count_crtc_active_planes(new_crtc_state);
7164 }
7165 
dm_crtc_helper_atomic_check(struct drm_crtc * crtc,struct drm_atomic_state * state)7166 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7167 				       struct drm_atomic_state *state)
7168 {
7169 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7170 									  crtc);
7171 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7172 	struct dc *dc = adev->dm.dc;
7173 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7174 	int ret = -EINVAL;
7175 
7176 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7177 
7178 	dm_update_crtc_active_planes(crtc, crtc_state);
7179 
7180 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7181 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7182 		return ret;
7183 	}
7184 
7185 	/*
7186 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7187 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7188 	 * planes are disabled, which is not supported by the hardware. And there is legacy
7189 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7190 	 */
7191 	if (crtc_state->enable &&
7192 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7193 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7194 		return -EINVAL;
7195 	}
7196 
7197 	/* In some use cases, like reset, no stream is attached */
7198 	if (!dm_crtc_state->stream)
7199 		return 0;
7200 
7201 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7202 		return 0;
7203 
7204 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7205 	return ret;
7206 }
7207 
dm_crtc_helper_mode_fixup(struct drm_crtc * crtc,const struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)7208 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7209 				      const struct drm_display_mode *mode,
7210 				      struct drm_display_mode *adjusted_mode)
7211 {
7212 	return true;
7213 }
7214 
7215 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7216 	.disable = dm_crtc_helper_disable,
7217 	.atomic_check = dm_crtc_helper_atomic_check,
7218 	.mode_fixup = dm_crtc_helper_mode_fixup,
7219 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
7220 };
7221 
dm_encoder_helper_disable(struct drm_encoder * encoder)7222 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7223 {
7224 
7225 }
7226 
convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)7227 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7228 {
7229 	switch (display_color_depth) {
7230 		case COLOR_DEPTH_666:
7231 			return 6;
7232 		case COLOR_DEPTH_888:
7233 			return 8;
7234 		case COLOR_DEPTH_101010:
7235 			return 10;
7236 		case COLOR_DEPTH_121212:
7237 			return 12;
7238 		case COLOR_DEPTH_141414:
7239 			return 14;
7240 		case COLOR_DEPTH_161616:
7241 			return 16;
7242 		default:
7243 			break;
7244 		}
7245 	return 0;
7246 }
7247 
dm_encoder_helper_atomic_check(struct drm_encoder * encoder,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)7248 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7249 					  struct drm_crtc_state *crtc_state,
7250 					  struct drm_connector_state *conn_state)
7251 {
7252 	struct drm_atomic_state *state = crtc_state->state;
7253 	struct drm_connector *connector = conn_state->connector;
7254 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7255 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7256 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7257 	struct drm_dp_mst_topology_mgr *mst_mgr;
7258 	struct drm_dp_mst_port *mst_port;
7259 	enum dc_color_depth color_depth;
7260 	int clock, bpp = 0;
7261 	bool is_y420 = false;
7262 
7263 	if (!aconnector->port || !aconnector->dc_sink)
7264 		return 0;
7265 
7266 	mst_port = aconnector->port;
7267 	mst_mgr = &aconnector->mst_port->mst_mgr;
7268 
7269 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7270 		return 0;
7271 
7272 	if (!state->duplicated) {
7273 		int max_bpc = conn_state->max_requested_bpc;
7274 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7275 				aconnector->force_yuv420_output;
7276 		color_depth = convert_color_depth_from_display_info(connector,
7277 								    is_y420,
7278 								    max_bpc);
7279 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7280 		clock = adjusted_mode->clock;
7281 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7282 	}
7283 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7284 									   mst_mgr,
7285 									   mst_port,
7286 									   dm_new_connector_state->pbn,
7287 									   dm_mst_get_pbn_divider(aconnector->dc_link));
7288 	if (dm_new_connector_state->vcpi_slots < 0) {
7289 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7290 		return dm_new_connector_state->vcpi_slots;
7291 	}
7292 	return 0;
7293 }
7294 
7295 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7296 	.disable = dm_encoder_helper_disable,
7297 	.atomic_check = dm_encoder_helper_atomic_check
7298 };
7299 
7300 #if defined(CONFIG_DRM_AMD_DC_DCN)
dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state * state,struct dc_state * dc_state,struct dsc_mst_fairness_vars * vars)7301 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7302 					    struct dc_state *dc_state,
7303 					    struct dsc_mst_fairness_vars *vars)
7304 {
7305 	struct dc_stream_state *stream = NULL;
7306 	struct drm_connector *connector;
7307 	struct drm_connector_state *new_con_state;
7308 	struct amdgpu_dm_connector *aconnector;
7309 	struct dm_connector_state *dm_conn_state;
7310 	int i, j, clock;
7311 	int vcpi, pbn_div, pbn = 0;
7312 
7313 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7314 
7315 		aconnector = to_amdgpu_dm_connector(connector);
7316 
7317 		if (!aconnector->port)
7318 			continue;
7319 
7320 		if (!new_con_state || !new_con_state->crtc)
7321 			continue;
7322 
7323 		dm_conn_state = to_dm_connector_state(new_con_state);
7324 
7325 		for (j = 0; j < dc_state->stream_count; j++) {
7326 			stream = dc_state->streams[j];
7327 			if (!stream)
7328 				continue;
7329 
7330 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7331 				break;
7332 
7333 			stream = NULL;
7334 		}
7335 
7336 		if (!stream)
7337 			continue;
7338 
7339 		if (stream->timing.flags.DSC != 1) {
7340 			drm_dp_mst_atomic_enable_dsc(state,
7341 						     aconnector->port,
7342 						     dm_conn_state->pbn,
7343 						     0,
7344 						     false);
7345 			continue;
7346 		}
7347 
7348 		pbn_div = dm_mst_get_pbn_divider(stream->link);
7349 		clock = stream->timing.pix_clk_100hz / 10;
7350 		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
7351 		for (j = 0; j < dc_state->stream_count; j++) {
7352 			if (vars[j].aconnector == aconnector) {
7353 				pbn = vars[j].pbn;
7354 				break;
7355 			}
7356 		}
7357 
7358 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
7359 						    aconnector->port,
7360 						    pbn, pbn_div,
7361 						    true);
7362 		if (vcpi < 0)
7363 			return vcpi;
7364 
7365 		dm_conn_state->pbn = pbn;
7366 		dm_conn_state->vcpi_slots = vcpi;
7367 	}
7368 	return 0;
7369 }
7370 #endif
7371 
dm_drm_plane_reset(struct drm_plane * plane)7372 static void dm_drm_plane_reset(struct drm_plane *plane)
7373 {
7374 	struct dm_plane_state *amdgpu_state = NULL;
7375 
7376 	if (plane->state)
7377 		plane->funcs->atomic_destroy_state(plane, plane->state);
7378 
7379 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7380 	WARN_ON(amdgpu_state == NULL);
7381 
7382 	if (amdgpu_state)
7383 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7384 }
7385 
7386 static struct drm_plane_state *
dm_drm_plane_duplicate_state(struct drm_plane * plane)7387 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7388 {
7389 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7390 
7391 	old_dm_plane_state = to_dm_plane_state(plane->state);
7392 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7393 	if (!dm_plane_state)
7394 		return NULL;
7395 
7396 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7397 
7398 	if (old_dm_plane_state->dc_state) {
7399 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7400 		dc_plane_state_retain(dm_plane_state->dc_state);
7401 	}
7402 
7403 	return &dm_plane_state->base;
7404 }
7405 
dm_drm_plane_destroy_state(struct drm_plane * plane,struct drm_plane_state * state)7406 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7407 				struct drm_plane_state *state)
7408 {
7409 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7410 
7411 	if (dm_plane_state->dc_state)
7412 		dc_plane_state_release(dm_plane_state->dc_state);
7413 
7414 	drm_atomic_helper_plane_destroy_state(plane, state);
7415 }
7416 
7417 static const struct drm_plane_funcs dm_plane_funcs = {
7418 	.update_plane	= drm_atomic_helper_update_plane,
7419 	.disable_plane	= drm_atomic_helper_disable_plane,
7420 	.destroy	= drm_primary_helper_destroy,
7421 	.reset = dm_drm_plane_reset,
7422 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
7423 	.atomic_destroy_state = dm_drm_plane_destroy_state,
7424 	.format_mod_supported = dm_plane_format_mod_supported,
7425 };
7426 
dm_plane_helper_prepare_fb(struct drm_plane * plane,struct drm_plane_state * new_state)7427 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7428 				      struct drm_plane_state *new_state)
7429 {
7430 	struct amdgpu_framebuffer *afb;
7431 	struct drm_gem_object *obj;
7432 	struct amdgpu_device *adev;
7433 	struct amdgpu_bo *rbo;
7434 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7435 	struct list_head list;
7436 	struct ttm_validate_buffer tv;
7437 	struct ww_acquire_ctx ticket;
7438 	uint32_t domain;
7439 	int r;
7440 
7441 	if (!new_state->fb) {
7442 		DRM_DEBUG_KMS("No FB bound\n");
7443 		return 0;
7444 	}
7445 
7446 	afb = to_amdgpu_framebuffer(new_state->fb);
7447 	obj = new_state->fb->obj[0];
7448 	rbo = gem_to_amdgpu_bo(obj);
7449 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7450 	INIT_LIST_HEAD(&list);
7451 
7452 	tv.bo = &rbo->tbo;
7453 	tv.num_shared = 1;
7454 	list_add(&tv.head, &list);
7455 
7456 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7457 	if (r) {
7458 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7459 		return r;
7460 	}
7461 
7462 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7463 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
7464 	else
7465 		domain = AMDGPU_GEM_DOMAIN_VRAM;
7466 
7467 	r = amdgpu_bo_pin(rbo, domain);
7468 	if (unlikely(r != 0)) {
7469 		if (r != -ERESTARTSYS)
7470 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7471 		ttm_eu_backoff_reservation(&ticket, &list);
7472 		return r;
7473 	}
7474 
7475 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7476 	if (unlikely(r != 0)) {
7477 		amdgpu_bo_unpin(rbo);
7478 		ttm_eu_backoff_reservation(&ticket, &list);
7479 		DRM_ERROR("%p bind failed\n", rbo);
7480 		return r;
7481 	}
7482 
7483 	ttm_eu_backoff_reservation(&ticket, &list);
7484 
7485 	afb->address = amdgpu_bo_gpu_offset(rbo);
7486 
7487 	amdgpu_bo_ref(rbo);
7488 
7489 	/**
7490 	 * We don't do surface updates on planes that have been newly created,
7491 	 * but we also don't have the afb->address during atomic check.
7492 	 *
7493 	 * Fill in buffer attributes depending on the address here, but only on
7494 	 * newly created planes since they're not being used by DC yet and this
7495 	 * won't modify global state.
7496 	 */
7497 	dm_plane_state_old = to_dm_plane_state(plane->state);
7498 	dm_plane_state_new = to_dm_plane_state(new_state);
7499 
7500 	if (dm_plane_state_new->dc_state &&
7501 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7502 		struct dc_plane_state *plane_state =
7503 			dm_plane_state_new->dc_state;
7504 		bool force_disable_dcc = !plane_state->dcc.enable;
7505 
7506 		fill_plane_buffer_attributes(
7507 			adev, afb, plane_state->format, plane_state->rotation,
7508 			afb->tiling_flags,
7509 			&plane_state->tiling_info, &plane_state->plane_size,
7510 			&plane_state->dcc, &plane_state->address,
7511 			afb->tmz_surface, force_disable_dcc);
7512 	}
7513 
7514 	return 0;
7515 }
7516 
dm_plane_helper_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)7517 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7518 				       struct drm_plane_state *old_state)
7519 {
7520 	struct amdgpu_bo *rbo;
7521 	int r;
7522 
7523 	if (!old_state->fb)
7524 		return;
7525 
7526 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7527 	r = amdgpu_bo_reserve(rbo, false);
7528 	if (unlikely(r)) {
7529 		DRM_ERROR("failed to reserve rbo before unpin\n");
7530 		return;
7531 	}
7532 
7533 	amdgpu_bo_unpin(rbo);
7534 	amdgpu_bo_unreserve(rbo);
7535 	amdgpu_bo_unref(&rbo);
7536 }
7537 
dm_plane_helper_check_state(struct drm_plane_state * state,struct drm_crtc_state * new_crtc_state)7538 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7539 				       struct drm_crtc_state *new_crtc_state)
7540 {
7541 	struct drm_framebuffer *fb = state->fb;
7542 	int min_downscale, max_upscale;
7543 	int min_scale = 0;
7544 	int max_scale = INT_MAX;
7545 
7546 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7547 	if (fb && state->crtc) {
7548 		/* Validate viewport to cover the case when only the position changes */
7549 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7550 			int viewport_width = state->crtc_w;
7551 			int viewport_height = state->crtc_h;
7552 
7553 			if (state->crtc_x < 0)
7554 				viewport_width += state->crtc_x;
7555 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7556 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7557 
7558 			if (state->crtc_y < 0)
7559 				viewport_height += state->crtc_y;
7560 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7561 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7562 
7563 			if (viewport_width < 0 || viewport_height < 0) {
7564 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7565 				return -EINVAL;
7566 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7567 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7568 				return -EINVAL;
7569 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7570 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7571 				return -EINVAL;
7572 			}
7573 
7574 		}
7575 
7576 		/* Get min/max allowed scaling factors from plane caps. */
7577 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7578 					     &min_downscale, &max_upscale);
7579 		/*
7580 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7581 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7582 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7583 		 */
7584 		min_scale = (1000 << 16) / max_upscale;
7585 		max_scale = (1000 << 16) / min_downscale;
7586 	}
7587 
7588 	return drm_atomic_helper_check_plane_state(
7589 		state, new_crtc_state, min_scale, max_scale, true, true);
7590 }
7591 
dm_plane_atomic_check(struct drm_plane * plane,struct drm_atomic_state * state)7592 static int dm_plane_atomic_check(struct drm_plane *plane,
7593 				 struct drm_atomic_state *state)
7594 {
7595 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7596 										 plane);
7597 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7598 	struct dc *dc = adev->dm.dc;
7599 	struct dm_plane_state *dm_plane_state;
7600 	struct dc_scaling_info scaling_info;
7601 	struct drm_crtc_state *new_crtc_state;
7602 	int ret;
7603 
7604 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7605 
7606 	dm_plane_state = to_dm_plane_state(new_plane_state);
7607 
7608 	if (!dm_plane_state->dc_state)
7609 		return 0;
7610 
7611 	new_crtc_state =
7612 		drm_atomic_get_new_crtc_state(state,
7613 					      new_plane_state->crtc);
7614 	if (!new_crtc_state)
7615 		return -EINVAL;
7616 
7617 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7618 	if (ret)
7619 		return ret;
7620 
7621 	ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
7622 	if (ret)
7623 		return ret;
7624 
7625 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7626 		return 0;
7627 
7628 	return -EINVAL;
7629 }
7630 
dm_plane_atomic_async_check(struct drm_plane * plane,struct drm_atomic_state * state)7631 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7632 				       struct drm_atomic_state *state)
7633 {
7634 	/* Only support async updates on cursor planes. */
7635 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7636 		return -EINVAL;
7637 
7638 	return 0;
7639 }
7640 
dm_plane_atomic_async_update(struct drm_plane * plane,struct drm_atomic_state * state)7641 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7642 					 struct drm_atomic_state *state)
7643 {
7644 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7645 									   plane);
7646 	struct drm_plane_state *old_state =
7647 		drm_atomic_get_old_plane_state(state, plane);
7648 
7649 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7650 
7651 	swap(plane->state->fb, new_state->fb);
7652 
7653 	plane->state->src_x = new_state->src_x;
7654 	plane->state->src_y = new_state->src_y;
7655 	plane->state->src_w = new_state->src_w;
7656 	plane->state->src_h = new_state->src_h;
7657 	plane->state->crtc_x = new_state->crtc_x;
7658 	plane->state->crtc_y = new_state->crtc_y;
7659 	plane->state->crtc_w = new_state->crtc_w;
7660 	plane->state->crtc_h = new_state->crtc_h;
7661 
7662 	handle_cursor_update(plane, old_state);
7663 }
7664 
7665 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7666 	.prepare_fb = dm_plane_helper_prepare_fb,
7667 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7668 	.atomic_check = dm_plane_atomic_check,
7669 	.atomic_async_check = dm_plane_atomic_async_check,
7670 	.atomic_async_update = dm_plane_atomic_async_update
7671 };
7672 
7673 /*
7674  * TODO: these are currently initialized to rgb formats only.
7675  * For future use cases we should either initialize them dynamically based on
7676  * plane capabilities, or initialize this array to all formats, so internal drm
7677  * check will succeed, and let DC implement proper check
7678  */
7679 static const uint32_t rgb_formats[] = {
7680 	DRM_FORMAT_XRGB8888,
7681 	DRM_FORMAT_ARGB8888,
7682 	DRM_FORMAT_RGBA8888,
7683 	DRM_FORMAT_XRGB2101010,
7684 	DRM_FORMAT_XBGR2101010,
7685 	DRM_FORMAT_ARGB2101010,
7686 	DRM_FORMAT_ABGR2101010,
7687 	DRM_FORMAT_XRGB16161616,
7688 	DRM_FORMAT_XBGR16161616,
7689 	DRM_FORMAT_ARGB16161616,
7690 	DRM_FORMAT_ABGR16161616,
7691 	DRM_FORMAT_XBGR8888,
7692 	DRM_FORMAT_ABGR8888,
7693 	DRM_FORMAT_RGB565,
7694 };
7695 
7696 static const uint32_t overlay_formats[] = {
7697 	DRM_FORMAT_XRGB8888,
7698 	DRM_FORMAT_ARGB8888,
7699 	DRM_FORMAT_RGBA8888,
7700 	DRM_FORMAT_XBGR8888,
7701 	DRM_FORMAT_ABGR8888,
7702 	DRM_FORMAT_RGB565
7703 };
7704 
7705 static const u32 cursor_formats[] = {
7706 	DRM_FORMAT_ARGB8888
7707 };
7708 
get_plane_formats(const struct drm_plane * plane,const struct dc_plane_cap * plane_cap,uint32_t * formats,int max_formats)7709 static int get_plane_formats(const struct drm_plane *plane,
7710 			     const struct dc_plane_cap *plane_cap,
7711 			     uint32_t *formats, int max_formats)
7712 {
7713 	int i, num_formats = 0;
7714 
7715 	/*
7716 	 * TODO: Query support for each group of formats directly from
7717 	 * DC plane caps. This will require adding more formats to the
7718 	 * caps list.
7719 	 */
7720 
7721 	switch (plane->type) {
7722 	case DRM_PLANE_TYPE_PRIMARY:
7723 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7724 			if (num_formats >= max_formats)
7725 				break;
7726 
7727 			formats[num_formats++] = rgb_formats[i];
7728 		}
7729 
7730 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7731 			formats[num_formats++] = DRM_FORMAT_NV12;
7732 		if (plane_cap && plane_cap->pixel_format_support.p010)
7733 			formats[num_formats++] = DRM_FORMAT_P010;
7734 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7735 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7736 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7737 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7738 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7739 		}
7740 		break;
7741 
7742 	case DRM_PLANE_TYPE_OVERLAY:
7743 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7744 			if (num_formats >= max_formats)
7745 				break;
7746 
7747 			formats[num_formats++] = overlay_formats[i];
7748 		}
7749 		break;
7750 
7751 	case DRM_PLANE_TYPE_CURSOR:
7752 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7753 			if (num_formats >= max_formats)
7754 				break;
7755 
7756 			formats[num_formats++] = cursor_formats[i];
7757 		}
7758 		break;
7759 	}
7760 
7761 	return num_formats;
7762 }
7763 
amdgpu_dm_plane_init(struct amdgpu_display_manager * dm,struct drm_plane * plane,unsigned long possible_crtcs,const struct dc_plane_cap * plane_cap)7764 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7765 				struct drm_plane *plane,
7766 				unsigned long possible_crtcs,
7767 				const struct dc_plane_cap *plane_cap)
7768 {
7769 	uint32_t formats[32];
7770 	int num_formats;
7771 	int res = -EPERM;
7772 	unsigned int supported_rotations;
7773 	uint64_t *modifiers = NULL;
7774 
7775 	num_formats = get_plane_formats(plane, plane_cap, formats,
7776 					ARRAY_SIZE(formats));
7777 
7778 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7779 	if (res)
7780 		return res;
7781 
7782 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7783 				       &dm_plane_funcs, formats, num_formats,
7784 				       modifiers, plane->type, NULL);
7785 	kfree(modifiers);
7786 	if (res)
7787 		return res;
7788 
7789 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7790 	    plane_cap && plane_cap->per_pixel_alpha) {
7791 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7792 					  BIT(DRM_MODE_BLEND_PREMULTI);
7793 
7794 		drm_plane_create_alpha_property(plane);
7795 		drm_plane_create_blend_mode_property(plane, blend_caps);
7796 	}
7797 
7798 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7799 	    plane_cap &&
7800 	    (plane_cap->pixel_format_support.nv12 ||
7801 	     plane_cap->pixel_format_support.p010)) {
7802 		/* This only affects YUV formats. */
7803 		drm_plane_create_color_properties(
7804 			plane,
7805 			BIT(DRM_COLOR_YCBCR_BT601) |
7806 			BIT(DRM_COLOR_YCBCR_BT709) |
7807 			BIT(DRM_COLOR_YCBCR_BT2020),
7808 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7809 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7810 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7811 	}
7812 
7813 	supported_rotations =
7814 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7815 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7816 
7817 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7818 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7819 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7820 						   supported_rotations);
7821 
7822 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7823 
7824 	/* Create (reset) the plane state */
7825 	if (plane->funcs->reset)
7826 		plane->funcs->reset(plane);
7827 
7828 	return 0;
7829 }
7830 
amdgpu_dm_crtc_init(struct amdgpu_display_manager * dm,struct drm_plane * plane,uint32_t crtc_index)7831 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7832 			       struct drm_plane *plane,
7833 			       uint32_t crtc_index)
7834 {
7835 	struct amdgpu_crtc *acrtc = NULL;
7836 	struct drm_plane *cursor_plane;
7837 
7838 	int res = -ENOMEM;
7839 
7840 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7841 	if (!cursor_plane)
7842 		goto fail;
7843 
7844 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7845 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7846 
7847 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7848 	if (!acrtc)
7849 		goto fail;
7850 
7851 	res = drm_crtc_init_with_planes(
7852 			dm->ddev,
7853 			&acrtc->base,
7854 			plane,
7855 			cursor_plane,
7856 			&amdgpu_dm_crtc_funcs, NULL);
7857 
7858 	if (res)
7859 		goto fail;
7860 
7861 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7862 
7863 	/* Create (reset) the plane state */
7864 	if (acrtc->base.funcs->reset)
7865 		acrtc->base.funcs->reset(&acrtc->base);
7866 
7867 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7868 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7869 
7870 	acrtc->crtc_id = crtc_index;
7871 	acrtc->base.enabled = false;
7872 	acrtc->otg_inst = -1;
7873 
7874 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7875 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7876 				   true, MAX_COLOR_LUT_ENTRIES);
7877 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7878 
7879 	return 0;
7880 
7881 fail:
7882 	kfree(acrtc);
7883 	kfree(cursor_plane);
7884 	return res;
7885 }
7886 
7887 
to_drm_connector_type(enum signal_type st)7888 static int to_drm_connector_type(enum signal_type st)
7889 {
7890 	switch (st) {
7891 	case SIGNAL_TYPE_HDMI_TYPE_A:
7892 		return DRM_MODE_CONNECTOR_HDMIA;
7893 	case SIGNAL_TYPE_EDP:
7894 		return DRM_MODE_CONNECTOR_eDP;
7895 	case SIGNAL_TYPE_LVDS:
7896 		return DRM_MODE_CONNECTOR_LVDS;
7897 	case SIGNAL_TYPE_RGB:
7898 		return DRM_MODE_CONNECTOR_VGA;
7899 	case SIGNAL_TYPE_DISPLAY_PORT:
7900 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
7901 		return DRM_MODE_CONNECTOR_DisplayPort;
7902 	case SIGNAL_TYPE_DVI_DUAL_LINK:
7903 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
7904 		return DRM_MODE_CONNECTOR_DVID;
7905 	case SIGNAL_TYPE_VIRTUAL:
7906 		return DRM_MODE_CONNECTOR_VIRTUAL;
7907 
7908 	default:
7909 		return DRM_MODE_CONNECTOR_Unknown;
7910 	}
7911 }
7912 
amdgpu_dm_connector_to_encoder(struct drm_connector * connector)7913 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7914 {
7915 	struct drm_encoder *encoder;
7916 
7917 	/* There is only one encoder per connector */
7918 	drm_connector_for_each_possible_encoder(connector, encoder)
7919 		return encoder;
7920 
7921 	return NULL;
7922 }
7923 
amdgpu_dm_get_native_mode(struct drm_connector * connector)7924 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7925 {
7926 	struct drm_encoder *encoder;
7927 	struct amdgpu_encoder *amdgpu_encoder;
7928 
7929 	encoder = amdgpu_dm_connector_to_encoder(connector);
7930 
7931 	if (encoder == NULL)
7932 		return;
7933 
7934 	amdgpu_encoder = to_amdgpu_encoder(encoder);
7935 
7936 	amdgpu_encoder->native_mode.clock = 0;
7937 
7938 	if (!list_empty(&connector->probed_modes)) {
7939 		struct drm_display_mode *preferred_mode = NULL;
7940 
7941 		list_for_each_entry(preferred_mode,
7942 				    &connector->probed_modes,
7943 				    head) {
7944 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7945 				amdgpu_encoder->native_mode = *preferred_mode;
7946 
7947 			break;
7948 		}
7949 
7950 	}
7951 }
7952 
7953 static struct drm_display_mode *
amdgpu_dm_create_common_mode(struct drm_encoder * encoder,char * name,int hdisplay,int vdisplay)7954 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7955 			     char *name,
7956 			     int hdisplay, int vdisplay)
7957 {
7958 	struct drm_device *dev = encoder->dev;
7959 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7960 	struct drm_display_mode *mode = NULL;
7961 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7962 
7963 	mode = drm_mode_duplicate(dev, native_mode);
7964 
7965 	if (mode == NULL)
7966 		return NULL;
7967 
7968 	mode->hdisplay = hdisplay;
7969 	mode->vdisplay = vdisplay;
7970 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7971 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7972 
7973 	return mode;
7974 
7975 }
7976 
amdgpu_dm_connector_add_common_modes(struct drm_encoder * encoder,struct drm_connector * connector)7977 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7978 						 struct drm_connector *connector)
7979 {
7980 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7981 	struct drm_display_mode *mode = NULL;
7982 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7983 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7984 				to_amdgpu_dm_connector(connector);
7985 	int i;
7986 	int n;
7987 	struct mode_size {
7988 		char name[DRM_DISPLAY_MODE_LEN];
7989 		int w;
7990 		int h;
7991 	} common_modes[] = {
7992 		{  "640x480",  640,  480},
7993 		{  "800x600",  800,  600},
7994 		{ "1024x768", 1024,  768},
7995 		{ "1280x720", 1280,  720},
7996 		{ "1280x800", 1280,  800},
7997 		{"1280x1024", 1280, 1024},
7998 		{ "1440x900", 1440,  900},
7999 		{"1680x1050", 1680, 1050},
8000 		{"1600x1200", 1600, 1200},
8001 		{"1920x1080", 1920, 1080},
8002 		{"1920x1200", 1920, 1200}
8003 	};
8004 
8005 	n = ARRAY_SIZE(common_modes);
8006 
8007 	for (i = 0; i < n; i++) {
8008 		struct drm_display_mode *curmode = NULL;
8009 		bool mode_existed = false;
8010 
8011 		if (common_modes[i].w > native_mode->hdisplay ||
8012 		    common_modes[i].h > native_mode->vdisplay ||
8013 		   (common_modes[i].w == native_mode->hdisplay &&
8014 		    common_modes[i].h == native_mode->vdisplay))
8015 			continue;
8016 
8017 		list_for_each_entry(curmode, &connector->probed_modes, head) {
8018 			if (common_modes[i].w == curmode->hdisplay &&
8019 			    common_modes[i].h == curmode->vdisplay) {
8020 				mode_existed = true;
8021 				break;
8022 			}
8023 		}
8024 
8025 		if (mode_existed)
8026 			continue;
8027 
8028 		mode = amdgpu_dm_create_common_mode(encoder,
8029 				common_modes[i].name, common_modes[i].w,
8030 				common_modes[i].h);
8031 		if (!mode)
8032 			continue;
8033 
8034 		drm_mode_probed_add(connector, mode);
8035 		amdgpu_dm_connector->num_modes++;
8036 	}
8037 }
8038 
amdgpu_set_panel_orientation(struct drm_connector * connector)8039 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8040 {
8041 	struct drm_encoder *encoder;
8042 	struct amdgpu_encoder *amdgpu_encoder;
8043 	const struct drm_display_mode *native_mode;
8044 
8045 	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8046 	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8047 		return;
8048 
8049 	encoder = amdgpu_dm_connector_to_encoder(connector);
8050 	if (!encoder)
8051 		return;
8052 
8053 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8054 
8055 	native_mode = &amdgpu_encoder->native_mode;
8056 	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8057 		return;
8058 
8059 	drm_connector_set_panel_orientation_with_quirk(connector,
8060 						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8061 						       native_mode->hdisplay,
8062 						       native_mode->vdisplay);
8063 }
8064 
amdgpu_dm_connector_ddc_get_modes(struct drm_connector * connector,struct edid * edid)8065 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8066 					      struct edid *edid)
8067 {
8068 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8069 			to_amdgpu_dm_connector(connector);
8070 
8071 	if (edid) {
8072 		/* empty probed_modes */
8073 		INIT_LIST_HEAD(&connector->probed_modes);
8074 		amdgpu_dm_connector->num_modes =
8075 				drm_add_edid_modes(connector, edid);
8076 
8077 		/* sorting the probed modes before calling function
8078 		 * amdgpu_dm_get_native_mode() since EDID can have
8079 		 * more than one preferred mode. The modes that are
8080 		 * later in the probed mode list could be of higher
8081 		 * and preferred resolution. For example, 3840x2160
8082 		 * resolution in base EDID preferred timing and 4096x2160
8083 		 * preferred resolution in DID extension block later.
8084 		 */
8085 		drm_mode_sort(&connector->probed_modes);
8086 		amdgpu_dm_get_native_mode(connector);
8087 
8088 		/* Freesync capabilities are reset by calling
8089 		 * drm_add_edid_modes() and need to be
8090 		 * restored here.
8091 		 */
8092 		amdgpu_dm_update_freesync_caps(connector, edid);
8093 
8094 		amdgpu_set_panel_orientation(connector);
8095 	} else {
8096 		amdgpu_dm_connector->num_modes = 0;
8097 	}
8098 }
8099 
is_duplicate_mode(struct amdgpu_dm_connector * aconnector,struct drm_display_mode * mode)8100 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8101 			      struct drm_display_mode *mode)
8102 {
8103 	struct drm_display_mode *m;
8104 
8105 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8106 		if (drm_mode_equal(m, mode))
8107 			return true;
8108 	}
8109 
8110 	return false;
8111 }
8112 
add_fs_modes(struct amdgpu_dm_connector * aconnector)8113 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8114 {
8115 	const struct drm_display_mode *m;
8116 	struct drm_display_mode *new_mode;
8117 	uint i;
8118 	uint32_t new_modes_count = 0;
8119 
8120 	/* Standard FPS values
8121 	 *
8122 	 * 23.976   - TV/NTSC
8123 	 * 24 	    - Cinema
8124 	 * 25 	    - TV/PAL
8125 	 * 29.97    - TV/NTSC
8126 	 * 30 	    - TV/NTSC
8127 	 * 48 	    - Cinema HFR
8128 	 * 50 	    - TV/PAL
8129 	 * 60 	    - Commonly used
8130 	 * 48,72,96 - Multiples of 24
8131 	 */
8132 	static const uint32_t common_rates[] = {
8133 		23976, 24000, 25000, 29970, 30000,
8134 		48000, 50000, 60000, 72000, 96000
8135 	};
8136 
8137 	/*
8138 	 * Find mode with highest refresh rate with the same resolution
8139 	 * as the preferred mode. Some monitors report a preferred mode
8140 	 * with lower resolution than the highest refresh rate supported.
8141 	 */
8142 
8143 	m = get_highest_refresh_rate_mode(aconnector, true);
8144 	if (!m)
8145 		return 0;
8146 
8147 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8148 		uint64_t target_vtotal, target_vtotal_diff;
8149 		uint64_t num, den;
8150 
8151 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8152 			continue;
8153 
8154 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8155 		    common_rates[i] > aconnector->max_vfreq * 1000)
8156 			continue;
8157 
8158 		num = (unsigned long long)m->clock * 1000 * 1000;
8159 		den = common_rates[i] * (unsigned long long)m->htotal;
8160 		target_vtotal = div_u64(num, den);
8161 		target_vtotal_diff = target_vtotal - m->vtotal;
8162 
8163 		/* Check for illegal modes */
8164 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8165 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
8166 		    m->vtotal + target_vtotal_diff < m->vsync_end)
8167 			continue;
8168 
8169 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8170 		if (!new_mode)
8171 			goto out;
8172 
8173 		new_mode->vtotal += (u16)target_vtotal_diff;
8174 		new_mode->vsync_start += (u16)target_vtotal_diff;
8175 		new_mode->vsync_end += (u16)target_vtotal_diff;
8176 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8177 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
8178 
8179 		if (!is_duplicate_mode(aconnector, new_mode)) {
8180 			drm_mode_probed_add(&aconnector->base, new_mode);
8181 			new_modes_count += 1;
8182 		} else
8183 			drm_mode_destroy(aconnector->base.dev, new_mode);
8184 	}
8185  out:
8186 	return new_modes_count;
8187 }
8188 
amdgpu_dm_connector_add_freesync_modes(struct drm_connector * connector,struct edid * edid)8189 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8190 						   struct edid *edid)
8191 {
8192 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8193 		to_amdgpu_dm_connector(connector);
8194 
8195 	if (!(amdgpu_freesync_vid_mode && edid))
8196 		return;
8197 
8198 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8199 		amdgpu_dm_connector->num_modes +=
8200 			add_fs_modes(amdgpu_dm_connector);
8201 }
8202 
amdgpu_dm_connector_get_modes(struct drm_connector * connector)8203 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8204 {
8205 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8206 			to_amdgpu_dm_connector(connector);
8207 	struct drm_encoder *encoder;
8208 	struct edid *edid = amdgpu_dm_connector->edid;
8209 
8210 	encoder = amdgpu_dm_connector_to_encoder(connector);
8211 
8212 	if (!drm_edid_is_valid(edid)) {
8213 		amdgpu_dm_connector->num_modes =
8214 				drm_add_modes_noedid(connector, 640, 480);
8215 	} else {
8216 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
8217 		amdgpu_dm_connector_add_common_modes(encoder, connector);
8218 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
8219 	}
8220 	amdgpu_dm_fbc_init(connector);
8221 
8222 	return amdgpu_dm_connector->num_modes;
8223 }
8224 
amdgpu_dm_connector_init_helper(struct amdgpu_display_manager * dm,struct amdgpu_dm_connector * aconnector,int connector_type,struct dc_link * link,int link_index)8225 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8226 				     struct amdgpu_dm_connector *aconnector,
8227 				     int connector_type,
8228 				     struct dc_link *link,
8229 				     int link_index)
8230 {
8231 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8232 
8233 	/*
8234 	 * Some of the properties below require access to state, like bpc.
8235 	 * Allocate some default initial connector state with our reset helper.
8236 	 */
8237 	if (aconnector->base.funcs->reset)
8238 		aconnector->base.funcs->reset(&aconnector->base);
8239 
8240 	aconnector->connector_id = link_index;
8241 	aconnector->dc_link = link;
8242 	aconnector->base.interlace_allowed = false;
8243 	aconnector->base.doublescan_allowed = false;
8244 	aconnector->base.stereo_allowed = false;
8245 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8246 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8247 	aconnector->audio_inst = -1;
8248 	mutex_init(&aconnector->hpd_lock);
8249 
8250 	/*
8251 	 * configure support HPD hot plug connector_>polled default value is 0
8252 	 * which means HPD hot plug not supported
8253 	 */
8254 	switch (connector_type) {
8255 	case DRM_MODE_CONNECTOR_HDMIA:
8256 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8257 		aconnector->base.ycbcr_420_allowed =
8258 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8259 		break;
8260 	case DRM_MODE_CONNECTOR_DisplayPort:
8261 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8262 		aconnector->base.ycbcr_420_allowed =
8263 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
8264 		break;
8265 	case DRM_MODE_CONNECTOR_DVID:
8266 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8267 		break;
8268 	default:
8269 		break;
8270 	}
8271 
8272 	drm_object_attach_property(&aconnector->base.base,
8273 				dm->ddev->mode_config.scaling_mode_property,
8274 				DRM_MODE_SCALE_NONE);
8275 
8276 	drm_object_attach_property(&aconnector->base.base,
8277 				adev->mode_info.underscan_property,
8278 				UNDERSCAN_OFF);
8279 	drm_object_attach_property(&aconnector->base.base,
8280 				adev->mode_info.underscan_hborder_property,
8281 				0);
8282 	drm_object_attach_property(&aconnector->base.base,
8283 				adev->mode_info.underscan_vborder_property,
8284 				0);
8285 
8286 	if (!aconnector->mst_port)
8287 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8288 
8289 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
8290 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8291 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8292 
8293 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8294 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8295 		drm_object_attach_property(&aconnector->base.base,
8296 				adev->mode_info.abm_level_property, 0);
8297 	}
8298 
8299 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8300 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8301 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
8302 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8303 
8304 		if (!aconnector->mst_port)
8305 			drm_connector_attach_vrr_capable_property(&aconnector->base);
8306 
8307 #ifdef CONFIG_DRM_AMD_DC_HDCP
8308 		if (adev->dm.hdcp_workqueue)
8309 			drm_connector_attach_content_protection_property(&aconnector->base, true);
8310 #endif
8311 	}
8312 }
8313 
amdgpu_dm_i2c_xfer(struct i2c_adapter * i2c_adap,struct i2c_msg * msgs,int num)8314 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8315 			      struct i2c_msg *msgs, int num)
8316 {
8317 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8318 	struct ddc_service *ddc_service = i2c->ddc_service;
8319 	struct i2c_command cmd;
8320 	int i;
8321 	int result = -EIO;
8322 
8323 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8324 
8325 	if (!cmd.payloads)
8326 		return result;
8327 
8328 	cmd.number_of_payloads = num;
8329 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8330 	cmd.speed = 100;
8331 
8332 	for (i = 0; i < num; i++) {
8333 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8334 		cmd.payloads[i].address = msgs[i].addr;
8335 		cmd.payloads[i].length = msgs[i].len;
8336 		cmd.payloads[i].data = msgs[i].buf;
8337 	}
8338 
8339 	if (dc_submit_i2c(
8340 			ddc_service->ctx->dc,
8341 			ddc_service->ddc_pin->hw_info.ddc_channel,
8342 			&cmd))
8343 		result = num;
8344 
8345 	kfree(cmd.payloads);
8346 	return result;
8347 }
8348 
amdgpu_dm_i2c_func(struct i2c_adapter * adap)8349 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8350 {
8351 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8352 }
8353 
8354 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8355 	.master_xfer = amdgpu_dm_i2c_xfer,
8356 	.functionality = amdgpu_dm_i2c_func,
8357 };
8358 
8359 static struct amdgpu_i2c_adapter *
create_i2c(struct ddc_service * ddc_service,int link_index,int * res)8360 create_i2c(struct ddc_service *ddc_service,
8361 	   int link_index,
8362 	   int *res)
8363 {
8364 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8365 	struct amdgpu_i2c_adapter *i2c;
8366 
8367 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8368 	if (!i2c)
8369 		return NULL;
8370 	i2c->base.owner = THIS_MODULE;
8371 	i2c->base.class = I2C_CLASS_DDC;
8372 	i2c->base.dev.parent = &adev->pdev->dev;
8373 	i2c->base.algo = &amdgpu_dm_i2c_algo;
8374 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8375 	i2c_set_adapdata(&i2c->base, i2c);
8376 	i2c->ddc_service = ddc_service;
8377 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8378 
8379 	return i2c;
8380 }
8381 
8382 
8383 /*
8384  * Note: this function assumes that dc_link_detect() was called for the
8385  * dc_link which will be represented by this aconnector.
8386  */
amdgpu_dm_connector_init(struct amdgpu_display_manager * dm,struct amdgpu_dm_connector * aconnector,uint32_t link_index,struct amdgpu_encoder * aencoder)8387 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8388 				    struct amdgpu_dm_connector *aconnector,
8389 				    uint32_t link_index,
8390 				    struct amdgpu_encoder *aencoder)
8391 {
8392 	int res = 0;
8393 	int connector_type;
8394 	struct dc *dc = dm->dc;
8395 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
8396 	struct amdgpu_i2c_adapter *i2c;
8397 
8398 	link->priv = aconnector;
8399 
8400 	DRM_DEBUG_DRIVER("%s()\n", __func__);
8401 
8402 	i2c = create_i2c(link->ddc, link->link_index, &res);
8403 	if (!i2c) {
8404 		DRM_ERROR("Failed to create i2c adapter data\n");
8405 		return -ENOMEM;
8406 	}
8407 
8408 	aconnector->i2c = i2c;
8409 	res = i2c_add_adapter(&i2c->base);
8410 
8411 	if (res) {
8412 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8413 		goto out_free;
8414 	}
8415 
8416 	connector_type = to_drm_connector_type(link->connector_signal);
8417 
8418 	res = drm_connector_init_with_ddc(
8419 			dm->ddev,
8420 			&aconnector->base,
8421 			&amdgpu_dm_connector_funcs,
8422 			connector_type,
8423 			&i2c->base);
8424 
8425 	if (res) {
8426 		DRM_ERROR("connector_init failed\n");
8427 		aconnector->connector_id = -1;
8428 		goto out_free;
8429 	}
8430 
8431 	drm_connector_helper_add(
8432 			&aconnector->base,
8433 			&amdgpu_dm_connector_helper_funcs);
8434 
8435 	amdgpu_dm_connector_init_helper(
8436 		dm,
8437 		aconnector,
8438 		connector_type,
8439 		link,
8440 		link_index);
8441 
8442 	drm_connector_attach_encoder(
8443 		&aconnector->base, &aencoder->base);
8444 
8445 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8446 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
8447 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8448 
8449 out_free:
8450 	if (res) {
8451 		kfree(i2c);
8452 		aconnector->i2c = NULL;
8453 	}
8454 	return res;
8455 }
8456 
amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device * adev)8457 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8458 {
8459 	switch (adev->mode_info.num_crtc) {
8460 	case 1:
8461 		return 0x1;
8462 	case 2:
8463 		return 0x3;
8464 	case 3:
8465 		return 0x7;
8466 	case 4:
8467 		return 0xf;
8468 	case 5:
8469 		return 0x1f;
8470 	case 6:
8471 	default:
8472 		return 0x3f;
8473 	}
8474 }
8475 
amdgpu_dm_encoder_init(struct drm_device * dev,struct amdgpu_encoder * aencoder,uint32_t link_index)8476 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8477 				  struct amdgpu_encoder *aencoder,
8478 				  uint32_t link_index)
8479 {
8480 	struct amdgpu_device *adev = drm_to_adev(dev);
8481 
8482 	int res = drm_encoder_init(dev,
8483 				   &aencoder->base,
8484 				   &amdgpu_dm_encoder_funcs,
8485 				   DRM_MODE_ENCODER_TMDS,
8486 				   NULL);
8487 
8488 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8489 
8490 	if (!res)
8491 		aencoder->encoder_id = link_index;
8492 	else
8493 		aencoder->encoder_id = -1;
8494 
8495 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8496 
8497 	return res;
8498 }
8499 
manage_dm_interrupts(struct amdgpu_device * adev,struct amdgpu_crtc * acrtc,bool enable)8500 static void manage_dm_interrupts(struct amdgpu_device *adev,
8501 				 struct amdgpu_crtc *acrtc,
8502 				 bool enable)
8503 {
8504 	/*
8505 	 * We have no guarantee that the frontend index maps to the same
8506 	 * backend index - some even map to more than one.
8507 	 *
8508 	 * TODO: Use a different interrupt or check DC itself for the mapping.
8509 	 */
8510 	int irq_type =
8511 		amdgpu_display_crtc_idx_to_irq_type(
8512 			adev,
8513 			acrtc->crtc_id);
8514 
8515 	if (enable) {
8516 		drm_crtc_vblank_on(&acrtc->base);
8517 		amdgpu_irq_get(
8518 			adev,
8519 			&adev->pageflip_irq,
8520 			irq_type);
8521 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8522 		amdgpu_irq_get(
8523 			adev,
8524 			&adev->vline0_irq,
8525 			irq_type);
8526 #endif
8527 	} else {
8528 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8529 		amdgpu_irq_put(
8530 			adev,
8531 			&adev->vline0_irq,
8532 			irq_type);
8533 #endif
8534 		amdgpu_irq_put(
8535 			adev,
8536 			&adev->pageflip_irq,
8537 			irq_type);
8538 		drm_crtc_vblank_off(&acrtc->base);
8539 	}
8540 }
8541 
dm_update_pflip_irq_state(struct amdgpu_device * adev,struct amdgpu_crtc * acrtc)8542 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8543 				      struct amdgpu_crtc *acrtc)
8544 {
8545 	int irq_type =
8546 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8547 
8548 	/**
8549 	 * This reads the current state for the IRQ and force reapplies
8550 	 * the setting to hardware.
8551 	 */
8552 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8553 }
8554 
8555 static bool
is_scaling_state_different(const struct dm_connector_state * dm_state,const struct dm_connector_state * old_dm_state)8556 is_scaling_state_different(const struct dm_connector_state *dm_state,
8557 			   const struct dm_connector_state *old_dm_state)
8558 {
8559 	if (dm_state->scaling != old_dm_state->scaling)
8560 		return true;
8561 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8562 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8563 			return true;
8564 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8565 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8566 			return true;
8567 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8568 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8569 		return true;
8570 	return false;
8571 }
8572 
8573 #ifdef CONFIG_DRM_AMD_DC_HDCP
is_content_protection_different(struct drm_crtc_state * new_crtc_state,struct drm_crtc_state * old_crtc_state,struct drm_connector_state * new_conn_state,struct drm_connector_state * old_conn_state,const struct drm_connector * connector,struct hdcp_workqueue * hdcp_w)8574 static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state,
8575 					    struct drm_crtc_state *old_crtc_state,
8576 					    struct drm_connector_state *new_conn_state,
8577 					    struct drm_connector_state *old_conn_state,
8578 					    const struct drm_connector *connector,
8579 					    struct hdcp_workqueue *hdcp_w)
8580 {
8581 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8582 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8583 
8584 	pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
8585 		connector->index, connector->status, connector->dpms);
8586 	pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
8587 		old_conn_state->content_protection, new_conn_state->content_protection);
8588 
8589 	if (old_crtc_state)
8590 		pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
8591 		old_crtc_state->enable,
8592 		old_crtc_state->active,
8593 		old_crtc_state->mode_changed,
8594 		old_crtc_state->active_changed,
8595 		old_crtc_state->connectors_changed);
8596 
8597 	if (new_crtc_state)
8598 		pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
8599 		new_crtc_state->enable,
8600 		new_crtc_state->active,
8601 		new_crtc_state->mode_changed,
8602 		new_crtc_state->active_changed,
8603 		new_crtc_state->connectors_changed);
8604 
8605 	/* hdcp content type change */
8606 	if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type &&
8607 	    new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8608 		new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8609 		pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__);
8610 		return true;
8611 	}
8612 
8613 	/* CP is being re enabled, ignore this */
8614 	if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8615 	    new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8616 		if (new_crtc_state && new_crtc_state->mode_changed) {
8617 			new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8618 			pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__);
8619 			return true;
8620 		};
8621 		new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8622 		pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__);
8623 		return false;
8624 	}
8625 
8626 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8627 	 *
8628 	 * Handles:	UNDESIRED -> ENABLED
8629 	 */
8630 	if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8631 	    new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8632 		new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8633 
8634 	/* Stream removed and re-enabled
8635 	 *
8636 	 * Can sometimes overlap with the HPD case,
8637 	 * thus set update_hdcp to false to avoid
8638 	 * setting HDCP multiple times.
8639 	 *
8640 	 * Handles:	DESIRED -> DESIRED (Special case)
8641 	 */
8642 	if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) &&
8643 		new_conn_state->crtc && new_conn_state->crtc->enabled &&
8644 		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8645 		dm_con_state->update_hdcp = false;
8646 		pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n",
8647 			__func__);
8648 		return true;
8649 	}
8650 
8651 	/* Hot-plug, headless s3, dpms
8652 	 *
8653 	 * Only start HDCP if the display is connected/enabled.
8654 	 * update_hdcp flag will be set to false until the next
8655 	 * HPD comes in.
8656 	 *
8657 	 * Handles:	DESIRED -> DESIRED (Special case)
8658 	 */
8659 	if (dm_con_state->update_hdcp &&
8660 	new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8661 	connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8662 		dm_con_state->update_hdcp = false;
8663 		pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n",
8664 			__func__);
8665 		return true;
8666 	}
8667 
8668 	if (old_conn_state->content_protection == new_conn_state->content_protection) {
8669 		if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8670 			if (new_crtc_state && new_crtc_state->mode_changed) {
8671 				pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n",
8672 					__func__);
8673 				return true;
8674 			};
8675 			pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n",
8676 				__func__);
8677 			return false;
8678 		};
8679 
8680 		pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__);
8681 		return false;
8682 	}
8683 
8684 	if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8685 		pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n",
8686 			__func__);
8687 		return true;
8688 	}
8689 
8690 	pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__);
8691 	return false;
8692 }
8693 #endif
8694 
remove_stream(struct amdgpu_device * adev,struct amdgpu_crtc * acrtc,struct dc_stream_state * stream)8695 static void remove_stream(struct amdgpu_device *adev,
8696 			  struct amdgpu_crtc *acrtc,
8697 			  struct dc_stream_state *stream)
8698 {
8699 	/* this is the update mode case */
8700 
8701 	acrtc->otg_inst = -1;
8702 	acrtc->enabled = false;
8703 }
8704 
get_cursor_position(struct drm_plane * plane,struct drm_crtc * crtc,struct dc_cursor_position * position)8705 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8706 			       struct dc_cursor_position *position)
8707 {
8708 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8709 	int x, y;
8710 	int xorigin = 0, yorigin = 0;
8711 
8712 	if (!crtc || !plane->state->fb)
8713 		return 0;
8714 
8715 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8716 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8717 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8718 			  __func__,
8719 			  plane->state->crtc_w,
8720 			  plane->state->crtc_h);
8721 		return -EINVAL;
8722 	}
8723 
8724 	x = plane->state->crtc_x;
8725 	y = plane->state->crtc_y;
8726 
8727 	if (x <= -amdgpu_crtc->max_cursor_width ||
8728 	    y <= -amdgpu_crtc->max_cursor_height)
8729 		return 0;
8730 
8731 	if (x < 0) {
8732 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8733 		x = 0;
8734 	}
8735 	if (y < 0) {
8736 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8737 		y = 0;
8738 	}
8739 	position->enable = true;
8740 	position->translate_by_source = true;
8741 	position->x = x;
8742 	position->y = y;
8743 	position->x_hotspot = xorigin;
8744 	position->y_hotspot = yorigin;
8745 
8746 	return 0;
8747 }
8748 
handle_cursor_update(struct drm_plane * plane,struct drm_plane_state * old_plane_state)8749 static void handle_cursor_update(struct drm_plane *plane,
8750 				 struct drm_plane_state *old_plane_state)
8751 {
8752 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8753 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8754 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8755 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8756 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8757 	uint64_t address = afb ? afb->address : 0;
8758 	struct dc_cursor_position position = {0};
8759 	struct dc_cursor_attributes attributes;
8760 	int ret;
8761 
8762 	if (!plane->state->fb && !old_plane_state->fb)
8763 		return;
8764 
8765 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8766 		      __func__,
8767 		      amdgpu_crtc->crtc_id,
8768 		      plane->state->crtc_w,
8769 		      plane->state->crtc_h);
8770 
8771 	ret = get_cursor_position(plane, crtc, &position);
8772 	if (ret)
8773 		return;
8774 
8775 	if (!position.enable) {
8776 		/* turn off cursor */
8777 		if (crtc_state && crtc_state->stream) {
8778 			mutex_lock(&adev->dm.dc_lock);
8779 			dc_stream_set_cursor_position(crtc_state->stream,
8780 						      &position);
8781 			mutex_unlock(&adev->dm.dc_lock);
8782 		}
8783 		return;
8784 	}
8785 
8786 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8787 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8788 
8789 	memset(&attributes, 0, sizeof(attributes));
8790 	attributes.address.high_part = upper_32_bits(address);
8791 	attributes.address.low_part  = lower_32_bits(address);
8792 	attributes.width             = plane->state->crtc_w;
8793 	attributes.height            = plane->state->crtc_h;
8794 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8795 	attributes.rotation_angle    = 0;
8796 	attributes.attribute_flags.value = 0;
8797 
8798 	/* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM
8799 	 * legacy gamma setup.
8800 	 */
8801 	if (crtc_state->cm_is_degamma_srgb &&
8802 	    adev->dm.dc->caps.color.dpp.gamma_corr)
8803 		attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1;
8804 
8805 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8806 
8807 	if (crtc_state->stream) {
8808 		mutex_lock(&adev->dm.dc_lock);
8809 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8810 							 &attributes))
8811 			DRM_ERROR("DC failed to set cursor attributes\n");
8812 
8813 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8814 						   &position))
8815 			DRM_ERROR("DC failed to set cursor position\n");
8816 		mutex_unlock(&adev->dm.dc_lock);
8817 	}
8818 }
8819 
prepare_flip_isr(struct amdgpu_crtc * acrtc)8820 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8821 {
8822 
8823 	assert_spin_locked(&acrtc->base.dev->event_lock);
8824 	WARN_ON(acrtc->event);
8825 
8826 	acrtc->event = acrtc->base.state->event;
8827 
8828 	/* Set the flip status */
8829 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8830 
8831 	/* Mark this event as consumed */
8832 	acrtc->base.state->event = NULL;
8833 
8834 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8835 		     acrtc->crtc_id);
8836 }
8837 
update_freesync_state_on_stream(struct amdgpu_display_manager * dm,struct dm_crtc_state * new_crtc_state,struct dc_stream_state * new_stream,struct dc_plane_state * surface,u32 flip_timestamp_in_us)8838 static void update_freesync_state_on_stream(
8839 	struct amdgpu_display_manager *dm,
8840 	struct dm_crtc_state *new_crtc_state,
8841 	struct dc_stream_state *new_stream,
8842 	struct dc_plane_state *surface,
8843 	u32 flip_timestamp_in_us)
8844 {
8845 	struct mod_vrr_params vrr_params;
8846 	struct dc_info_packet vrr_infopacket = {0};
8847 	struct amdgpu_device *adev = dm->adev;
8848 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8849 	unsigned long flags;
8850 	bool pack_sdp_v1_3 = false;
8851 
8852 	if (!new_stream)
8853 		return;
8854 
8855 	/*
8856 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8857 	 * For now it's sufficient to just guard against these conditions.
8858 	 */
8859 
8860 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8861 		return;
8862 
8863 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8864         vrr_params = acrtc->dm_irq_params.vrr_params;
8865 
8866 	if (surface) {
8867 		mod_freesync_handle_preflip(
8868 			dm->freesync_module,
8869 			surface,
8870 			new_stream,
8871 			flip_timestamp_in_us,
8872 			&vrr_params);
8873 
8874 		if (adev->family < AMDGPU_FAMILY_AI &&
8875 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8876 			mod_freesync_handle_v_update(dm->freesync_module,
8877 						     new_stream, &vrr_params);
8878 
8879 			/* Need to call this before the frame ends. */
8880 			dc_stream_adjust_vmin_vmax(dm->dc,
8881 						   new_crtc_state->stream,
8882 						   &vrr_params.adjust);
8883 		}
8884 	}
8885 
8886 	mod_freesync_build_vrr_infopacket(
8887 		dm->freesync_module,
8888 		new_stream,
8889 		&vrr_params,
8890 		PACKET_TYPE_VRR,
8891 		TRANSFER_FUNC_UNKNOWN,
8892 		&vrr_infopacket,
8893 		pack_sdp_v1_3);
8894 
8895 	new_crtc_state->freesync_timing_changed |=
8896 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8897 			&vrr_params.adjust,
8898 			sizeof(vrr_params.adjust)) != 0);
8899 
8900 	new_crtc_state->freesync_vrr_info_changed |=
8901 		(memcmp(&new_crtc_state->vrr_infopacket,
8902 			&vrr_infopacket,
8903 			sizeof(vrr_infopacket)) != 0);
8904 
8905 	acrtc->dm_irq_params.vrr_params = vrr_params;
8906 	new_crtc_state->vrr_infopacket = vrr_infopacket;
8907 
8908 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8909 	new_stream->vrr_infopacket = vrr_infopacket;
8910 
8911 	if (new_crtc_state->freesync_vrr_info_changed)
8912 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8913 			      new_crtc_state->base.crtc->base.id,
8914 			      (int)new_crtc_state->base.vrr_enabled,
8915 			      (int)vrr_params.state);
8916 
8917 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8918 }
8919 
update_stream_irq_parameters(struct amdgpu_display_manager * dm,struct dm_crtc_state * new_crtc_state)8920 static void update_stream_irq_parameters(
8921 	struct amdgpu_display_manager *dm,
8922 	struct dm_crtc_state *new_crtc_state)
8923 {
8924 	struct dc_stream_state *new_stream = new_crtc_state->stream;
8925 	struct mod_vrr_params vrr_params;
8926 	struct mod_freesync_config config = new_crtc_state->freesync_config;
8927 	struct amdgpu_device *adev = dm->adev;
8928 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8929 	unsigned long flags;
8930 
8931 	if (!new_stream)
8932 		return;
8933 
8934 	/*
8935 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8936 	 * For now it's sufficient to just guard against these conditions.
8937 	 */
8938 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8939 		return;
8940 
8941 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8942 	vrr_params = acrtc->dm_irq_params.vrr_params;
8943 
8944 	if (new_crtc_state->vrr_supported &&
8945 	    config.min_refresh_in_uhz &&
8946 	    config.max_refresh_in_uhz) {
8947 		/*
8948 		 * if freesync compatible mode was set, config.state will be set
8949 		 * in atomic check
8950 		 */
8951 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8952 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8953 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8954 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8955 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8956 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8957 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8958 		} else {
8959 			config.state = new_crtc_state->base.vrr_enabled ?
8960 						     VRR_STATE_ACTIVE_VARIABLE :
8961 						     VRR_STATE_INACTIVE;
8962 		}
8963 	} else {
8964 		config.state = VRR_STATE_UNSUPPORTED;
8965 	}
8966 
8967 	mod_freesync_build_vrr_params(dm->freesync_module,
8968 				      new_stream,
8969 				      &config, &vrr_params);
8970 
8971 	new_crtc_state->freesync_timing_changed |=
8972 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8973 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8974 
8975 	new_crtc_state->freesync_config = config;
8976 	/* Copy state for access from DM IRQ handler */
8977 	acrtc->dm_irq_params.freesync_config = config;
8978 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8979 	acrtc->dm_irq_params.vrr_params = vrr_params;
8980 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8981 }
8982 
amdgpu_dm_handle_vrr_transition(struct dm_crtc_state * old_state,struct dm_crtc_state * new_state)8983 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8984 					    struct dm_crtc_state *new_state)
8985 {
8986 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8987 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8988 
8989 	if (!old_vrr_active && new_vrr_active) {
8990 		/* Transition VRR inactive -> active:
8991 		 * While VRR is active, we must not disable vblank irq, as a
8992 		 * reenable after disable would compute bogus vblank/pflip
8993 		 * timestamps if it likely happened inside display front-porch.
8994 		 *
8995 		 * We also need vupdate irq for the actual core vblank handling
8996 		 * at end of vblank.
8997 		 */
8998 		WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, true) != 0);
8999 		WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0);
9000 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9001 				 __func__, new_state->base.crtc->base.id);
9002 	} else if (old_vrr_active && !new_vrr_active) {
9003 		/* Transition VRR active -> inactive:
9004 		 * Allow vblank irq disable again for fixed refresh rate.
9005 		 */
9006 		WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, false) != 0);
9007 		drm_crtc_vblank_put(new_state->base.crtc);
9008 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9009 				 __func__, new_state->base.crtc->base.id);
9010 	}
9011 }
9012 
amdgpu_dm_commit_cursors(struct drm_atomic_state * state)9013 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9014 {
9015 	struct drm_plane *plane;
9016 	struct drm_plane_state *old_plane_state;
9017 	int i;
9018 
9019 	/*
9020 	 * TODO: Make this per-stream so we don't issue redundant updates for
9021 	 * commits with multiple streams.
9022 	 */
9023 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
9024 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9025 			handle_cursor_update(plane, old_plane_state);
9026 }
9027 
get_mem_type(struct drm_framebuffer * fb)9028 static inline uint32_t get_mem_type(struct drm_framebuffer *fb)
9029 {
9030 	struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]);
9031 
9032 	return abo->tbo.resource ? abo->tbo.resource->mem_type : 0;
9033 }
9034 
amdgpu_dm_commit_planes(struct drm_atomic_state * state,struct dc_state * dc_state,struct drm_device * dev,struct amdgpu_display_manager * dm,struct drm_crtc * pcrtc,bool wait_for_vblank)9035 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9036 				    struct dc_state *dc_state,
9037 				    struct drm_device *dev,
9038 				    struct amdgpu_display_manager *dm,
9039 				    struct drm_crtc *pcrtc,
9040 				    bool wait_for_vblank)
9041 {
9042 	uint32_t i;
9043 	uint64_t timestamp_ns;
9044 	struct drm_plane *plane;
9045 	struct drm_plane_state *old_plane_state, *new_plane_state;
9046 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9047 	struct drm_crtc_state *new_pcrtc_state =
9048 			drm_atomic_get_new_crtc_state(state, pcrtc);
9049 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9050 	struct dm_crtc_state *dm_old_crtc_state =
9051 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9052 	int planes_count = 0, vpos, hpos;
9053 	long r;
9054 	unsigned long flags;
9055 	struct amdgpu_bo *abo;
9056 	uint32_t target_vblank, last_flip_vblank;
9057 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9058 	bool pflip_present = false;
9059 	struct {
9060 		struct dc_surface_update surface_updates[MAX_SURFACES];
9061 		struct dc_plane_info plane_infos[MAX_SURFACES];
9062 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
9063 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9064 		struct dc_stream_update stream_update;
9065 	} *bundle;
9066 
9067 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9068 
9069 	if (!bundle) {
9070 		dm_error("Failed to allocate update bundle\n");
9071 		goto cleanup;
9072 	}
9073 
9074 	/*
9075 	 * Disable the cursor first if we're disabling all the planes.
9076 	 * It'll remain on the screen after the planes are re-enabled
9077 	 * if we don't.
9078 	 */
9079 	if (acrtc_state->active_planes == 0)
9080 		amdgpu_dm_commit_cursors(state);
9081 
9082 	/* update planes when needed */
9083 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9084 		struct drm_crtc *crtc = new_plane_state->crtc;
9085 		struct drm_crtc_state *new_crtc_state;
9086 		struct drm_framebuffer *fb = new_plane_state->fb;
9087 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9088 		bool plane_needs_flip;
9089 		struct dc_plane_state *dc_plane;
9090 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9091 
9092 		/* Cursor plane is handled after stream updates */
9093 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9094 			continue;
9095 
9096 		if (!fb || !crtc || pcrtc != crtc)
9097 			continue;
9098 
9099 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9100 		if (!new_crtc_state->active)
9101 			continue;
9102 
9103 		dc_plane = dm_new_plane_state->dc_state;
9104 		if (!dc_plane)
9105 			continue;
9106 
9107 		bundle->surface_updates[planes_count].surface = dc_plane;
9108 		if (new_pcrtc_state->color_mgmt_changed) {
9109 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9110 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9111 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9112 		}
9113 
9114 		fill_dc_scaling_info(new_plane_state,
9115 				     &bundle->scaling_infos[planes_count]);
9116 
9117 		bundle->surface_updates[planes_count].scaling_info =
9118 			&bundle->scaling_infos[planes_count];
9119 
9120 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9121 
9122 		pflip_present = pflip_present || plane_needs_flip;
9123 
9124 		if (!plane_needs_flip) {
9125 			planes_count += 1;
9126 			continue;
9127 		}
9128 
9129 		abo = gem_to_amdgpu_bo(fb->obj[0]);
9130 
9131 		/*
9132 		 * Wait for all fences on this FB. Do limited wait to avoid
9133 		 * deadlock during GPU reset when this fence will not signal
9134 		 * but we hold reservation lock for the BO.
9135 		 */
9136 		r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9137 					  msecs_to_jiffies(5000));
9138 		if (unlikely(r <= 0))
9139 			DRM_ERROR("Waiting for fences timed out!");
9140 
9141 		fill_dc_plane_info_and_addr(
9142 			dm->adev, new_plane_state,
9143 			afb->tiling_flags,
9144 			&bundle->plane_infos[planes_count],
9145 			&bundle->flip_addrs[planes_count].address,
9146 			afb->tmz_surface, false);
9147 
9148 		DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
9149 				 new_plane_state->plane->index,
9150 				 bundle->plane_infos[planes_count].dcc.enable);
9151 
9152 		bundle->surface_updates[planes_count].plane_info =
9153 			&bundle->plane_infos[planes_count];
9154 
9155 		/*
9156 		 * Only allow immediate flips for fast updates that don't
9157 		 * change memory domain, FB pitch, DCC state, rotation or
9158 		 * mirroring.
9159 		 */
9160 		bundle->flip_addrs[planes_count].flip_immediate =
9161 			crtc->state->async_flip &&
9162 			acrtc_state->update_type == UPDATE_TYPE_FAST &&
9163 			get_mem_type(old_plane_state->fb) == get_mem_type(fb);
9164 
9165 		timestamp_ns = ktime_get_ns();
9166 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9167 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9168 		bundle->surface_updates[planes_count].surface = dc_plane;
9169 
9170 		if (!bundle->surface_updates[planes_count].surface) {
9171 			DRM_ERROR("No surface for CRTC: id=%d\n",
9172 					acrtc_attach->crtc_id);
9173 			continue;
9174 		}
9175 
9176 		if (plane == pcrtc->primary)
9177 			update_freesync_state_on_stream(
9178 				dm,
9179 				acrtc_state,
9180 				acrtc_state->stream,
9181 				dc_plane,
9182 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9183 
9184 		DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
9185 				 __func__,
9186 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9187 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9188 
9189 		planes_count += 1;
9190 
9191 	}
9192 
9193 	if (pflip_present) {
9194 		if (!vrr_active) {
9195 			/* Use old throttling in non-vrr fixed refresh rate mode
9196 			 * to keep flip scheduling based on target vblank counts
9197 			 * working in a backwards compatible way, e.g., for
9198 			 * clients using the GLX_OML_sync_control extension or
9199 			 * DRI3/Present extension with defined target_msc.
9200 			 */
9201 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9202 		}
9203 		else {
9204 			/* For variable refresh rate mode only:
9205 			 * Get vblank of last completed flip to avoid > 1 vrr
9206 			 * flips per video frame by use of throttling, but allow
9207 			 * flip programming anywhere in the possibly large
9208 			 * variable vrr vblank interval for fine-grained flip
9209 			 * timing control and more opportunity to avoid stutter
9210 			 * on late submission of flips.
9211 			 */
9212 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9213 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9214 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9215 		}
9216 
9217 		target_vblank = last_flip_vblank + wait_for_vblank;
9218 
9219 		/*
9220 		 * Wait until we're out of the vertical blank period before the one
9221 		 * targeted by the flip
9222 		 */
9223 		while ((acrtc_attach->enabled &&
9224 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9225 							    0, &vpos, &hpos, NULL,
9226 							    NULL, &pcrtc->hwmode)
9227 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9228 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9229 			(int)(target_vblank -
9230 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9231 			usleep_range(1000, 1100);
9232 		}
9233 
9234 		/**
9235 		 * Prepare the flip event for the pageflip interrupt to handle.
9236 		 *
9237 		 * This only works in the case where we've already turned on the
9238 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
9239 		 * from 0 -> n planes we have to skip a hardware generated event
9240 		 * and rely on sending it from software.
9241 		 */
9242 		if (acrtc_attach->base.state->event &&
9243 		    acrtc_state->active_planes > 0) {
9244 			drm_crtc_vblank_get(pcrtc);
9245 
9246 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9247 
9248 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9249 			prepare_flip_isr(acrtc_attach);
9250 
9251 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9252 		}
9253 
9254 		if (acrtc_state->stream) {
9255 			if (acrtc_state->freesync_vrr_info_changed)
9256 				bundle->stream_update.vrr_infopacket =
9257 					&acrtc_state->stream->vrr_infopacket;
9258 		}
9259 	}
9260 
9261 	/* Update the planes if changed or disable if we don't have any. */
9262 	if ((planes_count || acrtc_state->active_planes == 0) &&
9263 		acrtc_state->stream) {
9264 #if defined(CONFIG_DRM_AMD_DC_DCN)
9265 		/*
9266 		 * If PSR or idle optimizations are enabled then flush out
9267 		 * any pending work before hardware programming.
9268 		 */
9269 		if (dm->vblank_control_workqueue)
9270 			flush_workqueue(dm->vblank_control_workqueue);
9271 #endif
9272 
9273 		bundle->stream_update.stream = acrtc_state->stream;
9274 		if (new_pcrtc_state->mode_changed) {
9275 			bundle->stream_update.src = acrtc_state->stream->src;
9276 			bundle->stream_update.dst = acrtc_state->stream->dst;
9277 		}
9278 
9279 		if (new_pcrtc_state->color_mgmt_changed) {
9280 			/*
9281 			 * TODO: This isn't fully correct since we've actually
9282 			 * already modified the stream in place.
9283 			 */
9284 			bundle->stream_update.gamut_remap =
9285 				&acrtc_state->stream->gamut_remap_matrix;
9286 			bundle->stream_update.output_csc_transform =
9287 				&acrtc_state->stream->csc_color_matrix;
9288 			bundle->stream_update.out_transfer_func =
9289 				acrtc_state->stream->out_transfer_func;
9290 		}
9291 
9292 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
9293 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9294 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
9295 
9296 		mutex_lock(&dm->dc_lock);
9297 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9298 				acrtc_state->stream->link->psr_settings.psr_allow_active)
9299 			amdgpu_dm_psr_disable(acrtc_state->stream);
9300 		mutex_unlock(&dm->dc_lock);
9301 
9302 		/*
9303 		 * If FreeSync state on the stream has changed then we need to
9304 		 * re-adjust the min/max bounds now that DC doesn't handle this
9305 		 * as part of commit.
9306 		 */
9307 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9308 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9309 			dc_stream_adjust_vmin_vmax(
9310 				dm->dc, acrtc_state->stream,
9311 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
9312 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9313 		}
9314 		mutex_lock(&dm->dc_lock);
9315 
9316 		update_planes_and_stream_adapter(dm->dc,
9317 					 acrtc_state->update_type,
9318 					 planes_count,
9319 					 acrtc_state->stream,
9320 					 &bundle->stream_update,
9321 					 bundle->surface_updates);
9322 
9323 		/**
9324 		 * Enable or disable the interrupts on the backend.
9325 		 *
9326 		 * Most pipes are put into power gating when unused.
9327 		 *
9328 		 * When power gating is enabled on a pipe we lose the
9329 		 * interrupt enablement state when power gating is disabled.
9330 		 *
9331 		 * So we need to update the IRQ control state in hardware
9332 		 * whenever the pipe turns on (since it could be previously
9333 		 * power gated) or off (since some pipes can't be power gated
9334 		 * on some ASICs).
9335 		 */
9336 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9337 			dm_update_pflip_irq_state(drm_to_adev(dev),
9338 						  acrtc_attach);
9339 
9340 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9341 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9342 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9343 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
9344 
9345 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
9346 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9347 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9348 			struct amdgpu_dm_connector *aconn =
9349 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9350 
9351 			if (aconn->psr_skip_count > 0)
9352 				aconn->psr_skip_count--;
9353 
9354 			/* Allow PSR when skip count is 0. */
9355 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9356 		} else {
9357 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
9358 		}
9359 
9360 		mutex_unlock(&dm->dc_lock);
9361 	}
9362 
9363 	/*
9364 	 * Update cursor state *after* programming all the planes.
9365 	 * This avoids redundant programming in the case where we're going
9366 	 * to be disabling a single plane - those pipes are being disabled.
9367 	 */
9368 	if (acrtc_state->active_planes)
9369 		amdgpu_dm_commit_cursors(state);
9370 
9371 cleanup:
9372 	kfree(bundle);
9373 }
9374 
amdgpu_dm_commit_audio(struct drm_device * dev,struct drm_atomic_state * state)9375 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9376 				   struct drm_atomic_state *state)
9377 {
9378 	struct amdgpu_device *adev = drm_to_adev(dev);
9379 	struct amdgpu_dm_connector *aconnector;
9380 	struct drm_connector *connector;
9381 	struct drm_connector_state *old_con_state, *new_con_state;
9382 	struct drm_crtc_state *new_crtc_state;
9383 	struct dm_crtc_state *new_dm_crtc_state;
9384 	const struct dc_stream_status *status;
9385 	int i, inst;
9386 
9387 	/* Notify device removals. */
9388 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9389 		if (old_con_state->crtc != new_con_state->crtc) {
9390 			/* CRTC changes require notification. */
9391 			goto notify;
9392 		}
9393 
9394 		if (!new_con_state->crtc)
9395 			continue;
9396 
9397 		new_crtc_state = drm_atomic_get_new_crtc_state(
9398 			state, new_con_state->crtc);
9399 
9400 		if (!new_crtc_state)
9401 			continue;
9402 
9403 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9404 			continue;
9405 
9406 	notify:
9407 		aconnector = to_amdgpu_dm_connector(connector);
9408 
9409 		mutex_lock(&adev->dm.audio_lock);
9410 		inst = aconnector->audio_inst;
9411 		aconnector->audio_inst = -1;
9412 		mutex_unlock(&adev->dm.audio_lock);
9413 
9414 		amdgpu_dm_audio_eld_notify(adev, inst);
9415 	}
9416 
9417 	/* Notify audio device additions. */
9418 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
9419 		if (!new_con_state->crtc)
9420 			continue;
9421 
9422 		new_crtc_state = drm_atomic_get_new_crtc_state(
9423 			state, new_con_state->crtc);
9424 
9425 		if (!new_crtc_state)
9426 			continue;
9427 
9428 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9429 			continue;
9430 
9431 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9432 		if (!new_dm_crtc_state->stream)
9433 			continue;
9434 
9435 		status = dc_stream_get_status(new_dm_crtc_state->stream);
9436 		if (!status)
9437 			continue;
9438 
9439 		aconnector = to_amdgpu_dm_connector(connector);
9440 
9441 		mutex_lock(&adev->dm.audio_lock);
9442 		inst = status->audio_inst;
9443 		aconnector->audio_inst = inst;
9444 		mutex_unlock(&adev->dm.audio_lock);
9445 
9446 		amdgpu_dm_audio_eld_notify(adev, inst);
9447 	}
9448 }
9449 
9450 /*
9451  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9452  * @crtc_state: the DRM CRTC state
9453  * @stream_state: the DC stream state.
9454  *
9455  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9456  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9457  */
amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state * crtc_state,struct dc_stream_state * stream_state)9458 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9459 						struct dc_stream_state *stream_state)
9460 {
9461 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9462 }
9463 
9464 /**
9465  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9466  * @state: The atomic state to commit
9467  *
9468  * This will tell DC to commit the constructed DC state from atomic_check,
9469  * programming the hardware. Any failures here implies a hardware failure, since
9470  * atomic check should have filtered anything non-kosher.
9471  */
amdgpu_dm_atomic_commit_tail(struct drm_atomic_state * state)9472 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9473 {
9474 	struct drm_device *dev = state->dev;
9475 	struct amdgpu_device *adev = drm_to_adev(dev);
9476 	struct amdgpu_display_manager *dm = &adev->dm;
9477 	struct dm_atomic_state *dm_state;
9478 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9479 	uint32_t i, j;
9480 	struct drm_crtc *crtc;
9481 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9482 	unsigned long flags;
9483 	bool wait_for_vblank = true;
9484 	struct drm_connector *connector;
9485 	struct drm_connector_state *old_con_state, *new_con_state;
9486 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9487 	int crtc_disable_count = 0;
9488 	bool mode_set_reset_required = false;
9489 
9490 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
9491 
9492 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
9493 
9494 	dm_state = dm_atomic_get_new_state(state);
9495 	if (dm_state && dm_state->context) {
9496 		dc_state = dm_state->context;
9497 	} else {
9498 		/* No state changes, retain current state. */
9499 		dc_state_temp = dc_create_state(dm->dc);
9500 		ASSERT(dc_state_temp);
9501 		dc_state = dc_state_temp;
9502 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
9503 	}
9504 
9505 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9506 				       new_crtc_state, i) {
9507 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9508 
9509 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9510 
9511 		if (old_crtc_state->active &&
9512 		    (!new_crtc_state->active ||
9513 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9514 			manage_dm_interrupts(adev, acrtc, false);
9515 			dc_stream_release(dm_old_crtc_state->stream);
9516 		}
9517 	}
9518 
9519 	drm_atomic_helper_calc_timestamping_constants(state);
9520 
9521 	/* update changed items */
9522 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9523 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9524 
9525 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9526 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9527 
9528 		DRM_DEBUG_ATOMIC(
9529 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9530 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9531 			"connectors_changed:%d\n",
9532 			acrtc->crtc_id,
9533 			new_crtc_state->enable,
9534 			new_crtc_state->active,
9535 			new_crtc_state->planes_changed,
9536 			new_crtc_state->mode_changed,
9537 			new_crtc_state->active_changed,
9538 			new_crtc_state->connectors_changed);
9539 
9540 		/* Disable cursor if disabling crtc */
9541 		if (old_crtc_state->active && !new_crtc_state->active) {
9542 			struct dc_cursor_position position;
9543 
9544 			memset(&position, 0, sizeof(position));
9545 			mutex_lock(&dm->dc_lock);
9546 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9547 			mutex_unlock(&dm->dc_lock);
9548 		}
9549 
9550 		/* Copy all transient state flags into dc state */
9551 		if (dm_new_crtc_state->stream) {
9552 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9553 							    dm_new_crtc_state->stream);
9554 		}
9555 
9556 		/* handles headless hotplug case, updating new_state and
9557 		 * aconnector as needed
9558 		 */
9559 
9560 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9561 
9562 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9563 
9564 			if (!dm_new_crtc_state->stream) {
9565 				/*
9566 				 * this could happen because of issues with
9567 				 * userspace notifications delivery.
9568 				 * In this case userspace tries to set mode on
9569 				 * display which is disconnected in fact.
9570 				 * dc_sink is NULL in this case on aconnector.
9571 				 * We expect reset mode will come soon.
9572 				 *
9573 				 * This can also happen when unplug is done
9574 				 * during resume sequence ended
9575 				 *
9576 				 * In this case, we want to pretend we still
9577 				 * have a sink to keep the pipe running so that
9578 				 * hw state is consistent with the sw state
9579 				 */
9580 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9581 						__func__, acrtc->base.base.id);
9582 				continue;
9583 			}
9584 
9585 			if (dm_old_crtc_state->stream)
9586 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9587 
9588 			pm_runtime_get_noresume(dev->dev);
9589 
9590 			acrtc->enabled = true;
9591 			acrtc->hw_mode = new_crtc_state->mode;
9592 			crtc->hwmode = new_crtc_state->mode;
9593 			mode_set_reset_required = true;
9594 		} else if (modereset_required(new_crtc_state)) {
9595 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9596 			/* i.e. reset mode */
9597 			if (dm_old_crtc_state->stream)
9598 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9599 
9600 			mode_set_reset_required = true;
9601 		}
9602 	} /* for_each_crtc_in_state() */
9603 
9604 	if (dc_state) {
9605 		/* if there mode set or reset, disable eDP PSR */
9606 		if (mode_set_reset_required) {
9607 #if defined(CONFIG_DRM_AMD_DC_DCN)
9608 			if (dm->vblank_control_workqueue)
9609 				flush_workqueue(dm->vblank_control_workqueue);
9610 #endif
9611 			amdgpu_dm_psr_disable_all(dm);
9612 		}
9613 
9614 		dm_enable_per_frame_crtc_master_sync(dc_state);
9615 		mutex_lock(&dm->dc_lock);
9616 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
9617 #if defined(CONFIG_DRM_AMD_DC_DCN)
9618                /* Allow idle optimization when vblank count is 0 for display off */
9619                if (dm->active_vblank_irq_count == 0)
9620                    dc_allow_idle_optimizations(dm->dc,true);
9621 #endif
9622 		mutex_unlock(&dm->dc_lock);
9623 	}
9624 
9625 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9626 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9627 
9628 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9629 
9630 		if (dm_new_crtc_state->stream != NULL) {
9631 			const struct dc_stream_status *status =
9632 					dc_stream_get_status(dm_new_crtc_state->stream);
9633 
9634 			if (!status)
9635 				status = dc_stream_get_status_from_state(dc_state,
9636 									 dm_new_crtc_state->stream);
9637 			if (!status)
9638 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9639 			else
9640 				acrtc->otg_inst = status->primary_otg_inst;
9641 		}
9642 	}
9643 #ifdef CONFIG_DRM_AMD_DC_HDCP
9644 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9645 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9646 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9647 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9648 
9649 		if (!adev->dm.hdcp_workqueue)
9650 			continue;
9651 
9652 		pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i);
9653 
9654 		if (!connector)
9655 			continue;
9656 
9657 		pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
9658 			connector->index, connector->status, connector->dpms);
9659 		pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
9660 			old_con_state->content_protection, new_con_state->content_protection);
9661 
9662 		if (aconnector->dc_sink) {
9663 			if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
9664 				aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) {
9665 				pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n",
9666 				aconnector->dc_sink->edid_caps.display_name);
9667 			}
9668 		}
9669 
9670 		new_crtc_state = NULL;
9671 		old_crtc_state = NULL;
9672 
9673 		if (acrtc) {
9674 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9675 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9676 		}
9677 
9678 		if (old_crtc_state)
9679 			pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
9680 			old_crtc_state->enable,
9681 			old_crtc_state->active,
9682 			old_crtc_state->mode_changed,
9683 			old_crtc_state->active_changed,
9684 			old_crtc_state->connectors_changed);
9685 
9686 		if (new_crtc_state)
9687 			pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
9688 			new_crtc_state->enable,
9689 			new_crtc_state->active,
9690 			new_crtc_state->mode_changed,
9691 			new_crtc_state->active_changed,
9692 			new_crtc_state->connectors_changed);
9693 	}
9694 
9695 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9696 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9697 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9698 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9699 
9700 		if (!adev->dm.hdcp_workqueue)
9701 			continue;
9702 
9703 		new_crtc_state = NULL;
9704 		old_crtc_state = NULL;
9705 
9706 		if (acrtc) {
9707 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9708 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9709 		}
9710 
9711 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9712 
9713 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9714 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9715 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9716 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9717 			dm_new_con_state->update_hdcp = true;
9718 			continue;
9719 		}
9720 
9721 		if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state,
9722 											old_con_state, connector, adev->dm.hdcp_workqueue)) {
9723 			/* when display is unplugged from mst hub, connctor will
9724 			 * be destroyed within dm_dp_mst_connector_destroy. connector
9725 			 * hdcp perperties, like type, undesired, desired, enabled,
9726 			 * will be lost. So, save hdcp properties into hdcp_work within
9727 			 * amdgpu_dm_atomic_commit_tail. if the same display is
9728 			 * plugged back with same display index, its hdcp properties
9729 			 * will be retrieved from hdcp_work within dm_dp_mst_get_modes
9730 			 */
9731 
9732 			bool enable_encryption = false;
9733 
9734 			if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED)
9735 				enable_encryption = true;
9736 
9737 			if (aconnector->dc_link && aconnector->dc_sink &&
9738 				aconnector->dc_link->type == dc_connection_mst_branch) {
9739 				struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
9740 				struct hdcp_workqueue *hdcp_w =
9741 					&hdcp_work[aconnector->dc_link->link_index];
9742 
9743 				hdcp_w->hdcp_content_type[connector->index] =
9744 					new_con_state->hdcp_content_type;
9745 				hdcp_w->content_protection[connector->index] =
9746 					new_con_state->content_protection;
9747 			}
9748 
9749 			if (new_crtc_state && new_crtc_state->mode_changed &&
9750 				new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED)
9751 				enable_encryption = true;
9752 
9753 			DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
9754 
9755 			hdcp_update_display(
9756 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9757 				new_con_state->hdcp_content_type, enable_encryption);
9758 		}
9759 	}
9760 #endif
9761 
9762 	/* Handle connector state changes */
9763 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9764 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9765 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9766 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9767 		struct dc_surface_update dummy_updates[MAX_SURFACES];
9768 		struct dc_stream_update stream_update;
9769 		struct dc_info_packet hdr_packet;
9770 		struct dc_stream_status *status = NULL;
9771 		bool abm_changed, hdr_changed, scaling_changed;
9772 
9773 		memset(&dummy_updates, 0, sizeof(dummy_updates));
9774 		memset(&stream_update, 0, sizeof(stream_update));
9775 
9776 		if (acrtc) {
9777 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9778 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9779 		}
9780 
9781 		/* Skip any modesets/resets */
9782 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9783 			continue;
9784 
9785 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9786 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9787 
9788 		scaling_changed = is_scaling_state_different(dm_new_con_state,
9789 							     dm_old_con_state);
9790 
9791 		abm_changed = dm_new_crtc_state->abm_level !=
9792 			      dm_old_crtc_state->abm_level;
9793 
9794 		hdr_changed =
9795 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9796 
9797 		if (!scaling_changed && !abm_changed && !hdr_changed)
9798 			continue;
9799 
9800 		stream_update.stream = dm_new_crtc_state->stream;
9801 		if (scaling_changed) {
9802 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9803 					dm_new_con_state, dm_new_crtc_state->stream);
9804 
9805 			stream_update.src = dm_new_crtc_state->stream->src;
9806 			stream_update.dst = dm_new_crtc_state->stream->dst;
9807 		}
9808 
9809 		if (abm_changed) {
9810 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9811 
9812 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
9813 		}
9814 
9815 		if (hdr_changed) {
9816 			fill_hdr_info_packet(new_con_state, &hdr_packet);
9817 			stream_update.hdr_static_metadata = &hdr_packet;
9818 		}
9819 
9820 		status = dc_stream_get_status(dm_new_crtc_state->stream);
9821 
9822 		if (WARN_ON(!status))
9823 			continue;
9824 
9825 		WARN_ON(!status->plane_count);
9826 
9827 		/*
9828 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9829 		 * Here we create an empty update on each plane.
9830 		 * To fix this, DC should permit updating only stream properties.
9831 		 */
9832 		for (j = 0; j < status->plane_count; j++)
9833 			dummy_updates[j].surface = status->plane_states[0];
9834 
9835 
9836 		mutex_lock(&dm->dc_lock);
9837 		dc_update_planes_and_stream(dm->dc,
9838 					    dummy_updates,
9839 					    status->plane_count,
9840 					    dm_new_crtc_state->stream,
9841 					    &stream_update);
9842 		mutex_unlock(&dm->dc_lock);
9843 	}
9844 
9845 	/**
9846 	 * Enable interrupts for CRTCs that are newly enabled or went through
9847 	 * a modeset. It was intentionally deferred until after the front end
9848 	 * state was modified to wait until the OTG was on and so the IRQ
9849 	 * handlers didn't access stale or invalid state.
9850 	 */
9851 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9852 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9853 #ifdef CONFIG_DEBUG_FS
9854 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9855 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9856 		struct crc_rd_work *crc_rd_wrk;
9857 #endif
9858 #endif
9859 		/* Count number of newly disabled CRTCs for dropping PM refs later. */
9860 		if (old_crtc_state->active && !new_crtc_state->active)
9861 			crtc_disable_count++;
9862 
9863 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9864 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9865 
9866 		/* For freesync config update on crtc state and params for irq */
9867 		update_stream_irq_parameters(dm, dm_new_crtc_state);
9868 
9869 #ifdef CONFIG_DEBUG_FS
9870 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9871 		crc_rd_wrk = dm->crc_rd_wrk;
9872 #endif
9873 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9874 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9875 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9876 #endif
9877 
9878 		if (new_crtc_state->active &&
9879 		    (!old_crtc_state->active ||
9880 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9881 			dc_stream_retain(dm_new_crtc_state->stream);
9882 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9883 			manage_dm_interrupts(adev, acrtc, true);
9884 		}
9885 		/* Handle vrr on->off / off->on transitions */
9886 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state);
9887 
9888 #ifdef CONFIG_DEBUG_FS
9889 		if (new_crtc_state->active &&
9890 		    (!old_crtc_state->active ||
9891 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9892 			/**
9893 			 * Frontend may have changed so reapply the CRC capture
9894 			 * settings for the stream.
9895 			 */
9896 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9897 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9898 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9899 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9900 					acrtc->dm_irq_params.crc_window.update_win = true;
9901 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9902 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9903 					crc_rd_wrk->crtc = crtc;
9904 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9905 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9906 				}
9907 #endif
9908 				if (amdgpu_dm_crtc_configure_crc_source(
9909 					crtc, dm_new_crtc_state, cur_crc_src))
9910 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9911 			}
9912 		}
9913 #endif
9914 	}
9915 
9916 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9917 		if (new_crtc_state->async_flip)
9918 			wait_for_vblank = false;
9919 
9920 	/* update planes when needed per crtc*/
9921 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9922 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9923 
9924 		if (dm_new_crtc_state->stream)
9925 			amdgpu_dm_commit_planes(state, dc_state, dev,
9926 						dm, crtc, wait_for_vblank);
9927 	}
9928 
9929 	/* Update audio instances for each connector. */
9930 	amdgpu_dm_commit_audio(dev, state);
9931 
9932 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
9933 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9934 	/* restore the backlight level */
9935 	for (i = 0; i < dm->num_of_edps; i++) {
9936 		if (dm->backlight_dev[i] &&
9937 		    (dm->actual_brightness[i] != dm->brightness[i]))
9938 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9939 	}
9940 #endif
9941 	/*
9942 	 * send vblank event on all events not handled in flip and
9943 	 * mark consumed event for drm_atomic_helper_commit_hw_done
9944 	 */
9945 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9946 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9947 
9948 		if (new_crtc_state->event)
9949 			drm_send_event_locked(dev, &new_crtc_state->event->base);
9950 
9951 		new_crtc_state->event = NULL;
9952 	}
9953 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9954 
9955 	/* Signal HW programming completion */
9956 	drm_atomic_helper_commit_hw_done(state);
9957 
9958 	if (wait_for_vblank)
9959 		drm_atomic_helper_wait_for_flip_done(dev, state);
9960 
9961 	drm_atomic_helper_cleanup_planes(dev, state);
9962 
9963 	/* return the stolen vga memory back to VRAM */
9964 	if (!adev->mman.keep_stolen_vga_memory)
9965 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9966 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9967 
9968 	/*
9969 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9970 	 * so we can put the GPU into runtime suspend if we're not driving any
9971 	 * displays anymore
9972 	 */
9973 	for (i = 0; i < crtc_disable_count; i++)
9974 		pm_runtime_put_autosuspend(dev->dev);
9975 	pm_runtime_mark_last_busy(dev->dev);
9976 
9977 	if (dc_state_temp)
9978 		dc_release_state(dc_state_temp);
9979 }
9980 
9981 
dm_force_atomic_commit(struct drm_connector * connector)9982 static int dm_force_atomic_commit(struct drm_connector *connector)
9983 {
9984 	int ret = 0;
9985 	struct drm_device *ddev = connector->dev;
9986 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9987 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9988 	struct drm_plane *plane = disconnected_acrtc->base.primary;
9989 	struct drm_connector_state *conn_state;
9990 	struct drm_crtc_state *crtc_state;
9991 	struct drm_plane_state *plane_state;
9992 
9993 	if (!state)
9994 		return -ENOMEM;
9995 
9996 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
9997 
9998 	/* Construct an atomic state to restore previous display setting */
9999 
10000 	/*
10001 	 * Attach connectors to drm_atomic_state
10002 	 */
10003 	conn_state = drm_atomic_get_connector_state(state, connector);
10004 
10005 	ret = PTR_ERR_OR_ZERO(conn_state);
10006 	if (ret)
10007 		goto out;
10008 
10009 	/* Attach crtc to drm_atomic_state*/
10010 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
10011 
10012 	ret = PTR_ERR_OR_ZERO(crtc_state);
10013 	if (ret)
10014 		goto out;
10015 
10016 	/* force a restore */
10017 	crtc_state->mode_changed = true;
10018 
10019 	/* Attach plane to drm_atomic_state */
10020 	plane_state = drm_atomic_get_plane_state(state, plane);
10021 
10022 	ret = PTR_ERR_OR_ZERO(plane_state);
10023 	if (ret)
10024 		goto out;
10025 
10026 	/* Call commit internally with the state we just constructed */
10027 	ret = drm_atomic_commit(state);
10028 
10029 out:
10030 	drm_atomic_state_put(state);
10031 	if (ret)
10032 		DRM_ERROR("Restoring old state failed with %i\n", ret);
10033 
10034 	return ret;
10035 }
10036 
10037 /*
10038  * This function handles all cases when set mode does not come upon hotplug.
10039  * This includes when a display is unplugged then plugged back into the
10040  * same port and when running without usermode desktop manager supprot
10041  */
dm_restore_drm_connector_state(struct drm_device * dev,struct drm_connector * connector)10042 void dm_restore_drm_connector_state(struct drm_device *dev,
10043 				    struct drm_connector *connector)
10044 {
10045 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10046 	struct amdgpu_crtc *disconnected_acrtc;
10047 	struct dm_crtc_state *acrtc_state;
10048 
10049 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10050 		return;
10051 
10052 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10053 	if (!disconnected_acrtc)
10054 		return;
10055 
10056 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10057 	if (!acrtc_state->stream)
10058 		return;
10059 
10060 	/*
10061 	 * If the previous sink is not released and different from the current,
10062 	 * we deduce we are in a state where we can not rely on usermode call
10063 	 * to turn on the display, so we do it here
10064 	 */
10065 	if (acrtc_state->stream->sink != aconnector->dc_sink)
10066 		dm_force_atomic_commit(&aconnector->base);
10067 }
10068 
10069 /*
10070  * Grabs all modesetting locks to serialize against any blocking commits,
10071  * Waits for completion of all non blocking commits.
10072  */
do_aquire_global_lock(struct drm_device * dev,struct drm_atomic_state * state)10073 static int do_aquire_global_lock(struct drm_device *dev,
10074 				 struct drm_atomic_state *state)
10075 {
10076 	struct drm_crtc *crtc;
10077 	struct drm_crtc_commit *commit;
10078 	long ret;
10079 
10080 	/*
10081 	 * Adding all modeset locks to aquire_ctx will
10082 	 * ensure that when the framework release it the
10083 	 * extra locks we are locking here will get released to
10084 	 */
10085 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10086 	if (ret)
10087 		return ret;
10088 
10089 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10090 		spin_lock(&crtc->commit_lock);
10091 		commit = list_first_entry_or_null(&crtc->commit_list,
10092 				struct drm_crtc_commit, commit_entry);
10093 		if (commit)
10094 			drm_crtc_commit_get(commit);
10095 		spin_unlock(&crtc->commit_lock);
10096 
10097 		if (!commit)
10098 			continue;
10099 
10100 		/*
10101 		 * Make sure all pending HW programming completed and
10102 		 * page flips done
10103 		 */
10104 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10105 
10106 		if (ret > 0)
10107 			ret = wait_for_completion_interruptible_timeout(
10108 					&commit->flip_done, 10*HZ);
10109 
10110 		if (ret == 0)
10111 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10112 				  "timed out\n", crtc->base.id, crtc->name);
10113 
10114 		drm_crtc_commit_put(commit);
10115 	}
10116 
10117 	return ret < 0 ? ret : 0;
10118 }
10119 
get_freesync_config_for_crtc(struct dm_crtc_state * new_crtc_state,struct dm_connector_state * new_con_state)10120 static void get_freesync_config_for_crtc(
10121 	struct dm_crtc_state *new_crtc_state,
10122 	struct dm_connector_state *new_con_state)
10123 {
10124 	struct mod_freesync_config config = {0};
10125 	struct amdgpu_dm_connector *aconnector =
10126 			to_amdgpu_dm_connector(new_con_state->base.connector);
10127 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
10128 	int vrefresh = drm_mode_vrefresh(mode);
10129 	bool fs_vid_mode = false;
10130 
10131 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10132 					vrefresh >= aconnector->min_vfreq &&
10133 					vrefresh <= aconnector->max_vfreq;
10134 
10135 	if (new_crtc_state->vrr_supported) {
10136 		new_crtc_state->stream->ignore_msa_timing_param = true;
10137 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10138 
10139 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10140 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10141 		config.vsif_supported = true;
10142 		config.btr = true;
10143 
10144 		if (fs_vid_mode) {
10145 			config.state = VRR_STATE_ACTIVE_FIXED;
10146 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10147 			goto out;
10148 		} else if (new_crtc_state->base.vrr_enabled) {
10149 			config.state = VRR_STATE_ACTIVE_VARIABLE;
10150 		} else {
10151 			config.state = VRR_STATE_INACTIVE;
10152 		}
10153 	}
10154 out:
10155 	new_crtc_state->freesync_config = config;
10156 }
10157 
reset_freesync_config_for_crtc(struct dm_crtc_state * new_crtc_state)10158 static void reset_freesync_config_for_crtc(
10159 	struct dm_crtc_state *new_crtc_state)
10160 {
10161 	new_crtc_state->vrr_supported = false;
10162 
10163 	memset(&new_crtc_state->vrr_infopacket, 0,
10164 	       sizeof(new_crtc_state->vrr_infopacket));
10165 }
10166 
10167 static bool
is_timing_unchanged_for_freesync(struct drm_crtc_state * old_crtc_state,struct drm_crtc_state * new_crtc_state)10168 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10169 				 struct drm_crtc_state *new_crtc_state)
10170 {
10171 	struct drm_display_mode old_mode, new_mode;
10172 
10173 	if (!old_crtc_state || !new_crtc_state)
10174 		return false;
10175 
10176 	old_mode = old_crtc_state->mode;
10177 	new_mode = new_crtc_state->mode;
10178 
10179 	if (old_mode.clock       == new_mode.clock &&
10180 	    old_mode.hdisplay    == new_mode.hdisplay &&
10181 	    old_mode.vdisplay    == new_mode.vdisplay &&
10182 	    old_mode.htotal      == new_mode.htotal &&
10183 	    old_mode.vtotal      != new_mode.vtotal &&
10184 	    old_mode.hsync_start == new_mode.hsync_start &&
10185 	    old_mode.vsync_start != new_mode.vsync_start &&
10186 	    old_mode.hsync_end   == new_mode.hsync_end &&
10187 	    old_mode.vsync_end   != new_mode.vsync_end &&
10188 	    old_mode.hskew       == new_mode.hskew &&
10189 	    old_mode.vscan       == new_mode.vscan &&
10190 	    (old_mode.vsync_end - old_mode.vsync_start) ==
10191 	    (new_mode.vsync_end - new_mode.vsync_start))
10192 		return true;
10193 
10194 	return false;
10195 }
10196 
set_freesync_fixed_config(struct dm_crtc_state * dm_new_crtc_state)10197 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10198 	uint64_t num, den, res;
10199 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10200 
10201 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10202 
10203 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10204 	den = (unsigned long long)new_crtc_state->mode.htotal *
10205 	      (unsigned long long)new_crtc_state->mode.vtotal;
10206 
10207 	res = div_u64(num, den);
10208 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10209 }
10210 
dm_update_crtc_state(struct amdgpu_display_manager * dm,struct drm_atomic_state * state,struct drm_crtc * crtc,struct drm_crtc_state * old_crtc_state,struct drm_crtc_state * new_crtc_state,bool enable,bool * lock_and_validation_needed)10211 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10212 				struct drm_atomic_state *state,
10213 				struct drm_crtc *crtc,
10214 				struct drm_crtc_state *old_crtc_state,
10215 				struct drm_crtc_state *new_crtc_state,
10216 				bool enable,
10217 				bool *lock_and_validation_needed)
10218 {
10219 	struct dm_atomic_state *dm_state = NULL;
10220 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10221 	struct dc_stream_state *new_stream;
10222 	int ret = 0;
10223 
10224 	/*
10225 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10226 	 * update changed items
10227 	 */
10228 	struct amdgpu_crtc *acrtc = NULL;
10229 	struct amdgpu_dm_connector *aconnector = NULL;
10230 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10231 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10232 
10233 	new_stream = NULL;
10234 
10235 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10236 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10237 	acrtc = to_amdgpu_crtc(crtc);
10238 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10239 
10240 	/* TODO This hack should go away */
10241 	if (aconnector && enable) {
10242 		/* Make sure fake sink is created in plug-in scenario */
10243 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10244 							    &aconnector->base);
10245 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10246 							    &aconnector->base);
10247 
10248 		if (IS_ERR(drm_new_conn_state)) {
10249 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10250 			goto fail;
10251 		}
10252 
10253 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10254 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10255 
10256 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10257 			goto skip_modeset;
10258 
10259 		new_stream = create_validate_stream_for_sink(aconnector,
10260 							     &new_crtc_state->mode,
10261 							     dm_new_conn_state,
10262 							     dm_old_crtc_state->stream);
10263 
10264 		/*
10265 		 * we can have no stream on ACTION_SET if a display
10266 		 * was disconnected during S3, in this case it is not an
10267 		 * error, the OS will be updated after detection, and
10268 		 * will do the right thing on next atomic commit
10269 		 */
10270 
10271 		if (!new_stream) {
10272 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10273 					__func__, acrtc->base.base.id);
10274 			ret = -ENOMEM;
10275 			goto fail;
10276 		}
10277 
10278 		/*
10279 		 * TODO: Check VSDB bits to decide whether this should
10280 		 * be enabled or not.
10281 		 */
10282 		new_stream->triggered_crtc_reset.enabled =
10283 			dm->force_timing_sync;
10284 
10285 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10286 
10287 		ret = fill_hdr_info_packet(drm_new_conn_state,
10288 					   &new_stream->hdr_static_metadata);
10289 		if (ret)
10290 			goto fail;
10291 
10292 		/*
10293 		 * If we already removed the old stream from the context
10294 		 * (and set the new stream to NULL) then we can't reuse
10295 		 * the old stream even if the stream and scaling are unchanged.
10296 		 * We'll hit the BUG_ON and black screen.
10297 		 *
10298 		 * TODO: Refactor this function to allow this check to work
10299 		 * in all conditions.
10300 		 */
10301 		if (amdgpu_freesync_vid_mode &&
10302 		    dm_new_crtc_state->stream &&
10303 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10304 			goto skip_modeset;
10305 
10306 		if (dm_new_crtc_state->stream &&
10307 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10308 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10309 			new_crtc_state->mode_changed = false;
10310 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10311 					 new_crtc_state->mode_changed);
10312 		}
10313 	}
10314 
10315 	/* mode_changed flag may get updated above, need to check again */
10316 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10317 		goto skip_modeset;
10318 
10319 	DRM_DEBUG_ATOMIC(
10320 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10321 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
10322 		"connectors_changed:%d\n",
10323 		acrtc->crtc_id,
10324 		new_crtc_state->enable,
10325 		new_crtc_state->active,
10326 		new_crtc_state->planes_changed,
10327 		new_crtc_state->mode_changed,
10328 		new_crtc_state->active_changed,
10329 		new_crtc_state->connectors_changed);
10330 
10331 	/* Remove stream for any changed/disabled CRTC */
10332 	if (!enable) {
10333 
10334 		if (!dm_old_crtc_state->stream)
10335 			goto skip_modeset;
10336 
10337 		/* Unset freesync video if it was active before */
10338 		if (dm_old_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) {
10339 			dm_new_crtc_state->freesync_config.state = VRR_STATE_INACTIVE;
10340 			dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = 0;
10341 		}
10342 
10343 		/* Now check if we should set freesync video mode */
10344 		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
10345 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10346 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) &&
10347 		    is_timing_unchanged_for_freesync(new_crtc_state,
10348 						     old_crtc_state)) {
10349 			new_crtc_state->mode_changed = false;
10350 			DRM_DEBUG_DRIVER(
10351 				"Mode change not required for front porch change, "
10352 				"setting mode_changed to %d",
10353 				new_crtc_state->mode_changed);
10354 
10355 			set_freesync_fixed_config(dm_new_crtc_state);
10356 
10357 			goto skip_modeset;
10358 		} else if (amdgpu_freesync_vid_mode && aconnector &&
10359 			   is_freesync_video_mode(&new_crtc_state->mode,
10360 						  aconnector)) {
10361 			struct drm_display_mode *high_mode;
10362 
10363 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
10364 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10365 				set_freesync_fixed_config(dm_new_crtc_state);
10366 			}
10367 		}
10368 
10369 		ret = dm_atomic_get_state(state, &dm_state);
10370 		if (ret)
10371 			goto fail;
10372 
10373 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10374 				crtc->base.id);
10375 
10376 		/* i.e. reset mode */
10377 		if (dc_remove_stream_from_ctx(
10378 				dm->dc,
10379 				dm_state->context,
10380 				dm_old_crtc_state->stream) != DC_OK) {
10381 			ret = -EINVAL;
10382 			goto fail;
10383 		}
10384 
10385 		dc_stream_release(dm_old_crtc_state->stream);
10386 		dm_new_crtc_state->stream = NULL;
10387 
10388 		reset_freesync_config_for_crtc(dm_new_crtc_state);
10389 
10390 		*lock_and_validation_needed = true;
10391 
10392 	} else {/* Add stream for any updated/enabled CRTC */
10393 		/*
10394 		 * Quick fix to prevent NULL pointer on new_stream when
10395 		 * added MST connectors not found in existing crtc_state in the chained mode
10396 		 * TODO: need to dig out the root cause of that
10397 		 */
10398 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10399 			goto skip_modeset;
10400 
10401 		if (modereset_required(new_crtc_state))
10402 			goto skip_modeset;
10403 
10404 		if (modeset_required(new_crtc_state, new_stream,
10405 				     dm_old_crtc_state->stream)) {
10406 
10407 			WARN_ON(dm_new_crtc_state->stream);
10408 
10409 			ret = dm_atomic_get_state(state, &dm_state);
10410 			if (ret)
10411 				goto fail;
10412 
10413 			dm_new_crtc_state->stream = new_stream;
10414 
10415 			dc_stream_retain(new_stream);
10416 
10417 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10418 					 crtc->base.id);
10419 
10420 			if (dc_add_stream_to_ctx(
10421 					dm->dc,
10422 					dm_state->context,
10423 					dm_new_crtc_state->stream) != DC_OK) {
10424 				ret = -EINVAL;
10425 				goto fail;
10426 			}
10427 
10428 			*lock_and_validation_needed = true;
10429 		}
10430 	}
10431 
10432 skip_modeset:
10433 	/* Release extra reference */
10434 	if (new_stream)
10435 		 dc_stream_release(new_stream);
10436 
10437 	/*
10438 	 * We want to do dc stream updates that do not require a
10439 	 * full modeset below.
10440 	 */
10441 	if (!(enable && aconnector && new_crtc_state->active))
10442 		return 0;
10443 	/*
10444 	 * Given above conditions, the dc state cannot be NULL because:
10445 	 * 1. We're in the process of enabling CRTCs (just been added
10446 	 *    to the dc context, or already is on the context)
10447 	 * 2. Has a valid connector attached, and
10448 	 * 3. Is currently active and enabled.
10449 	 * => The dc stream state currently exists.
10450 	 */
10451 	BUG_ON(dm_new_crtc_state->stream == NULL);
10452 
10453 	/* Scaling or underscan settings */
10454 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10455 				drm_atomic_crtc_needs_modeset(new_crtc_state))
10456 		update_stream_scaling_settings(
10457 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10458 
10459 	/* ABM settings */
10460 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10461 
10462 	/*
10463 	 * Color management settings. We also update color properties
10464 	 * when a modeset is needed, to ensure it gets reprogrammed.
10465 	 */
10466 	if (dm_new_crtc_state->base.color_mgmt_changed ||
10467 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10468 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10469 		if (ret)
10470 			goto fail;
10471 	}
10472 
10473 	/* Update Freesync settings. */
10474 	get_freesync_config_for_crtc(dm_new_crtc_state,
10475 				     dm_new_conn_state);
10476 
10477 	return ret;
10478 
10479 fail:
10480 	if (new_stream)
10481 		dc_stream_release(new_stream);
10482 	return ret;
10483 }
10484 
should_reset_plane(struct drm_atomic_state * state,struct drm_plane * plane,struct drm_plane_state * old_plane_state,struct drm_plane_state * new_plane_state)10485 static bool should_reset_plane(struct drm_atomic_state *state,
10486 			       struct drm_plane *plane,
10487 			       struct drm_plane_state *old_plane_state,
10488 			       struct drm_plane_state *new_plane_state)
10489 {
10490 	struct drm_plane *other;
10491 	struct drm_plane_state *old_other_state, *new_other_state;
10492 	struct drm_crtc_state *new_crtc_state;
10493 	int i;
10494 
10495 	/*
10496 	 * TODO: Remove this hack once the checks below are sufficient
10497 	 * enough to determine when we need to reset all the planes on
10498 	 * the stream.
10499 	 */
10500 	if (state->allow_modeset)
10501 		return true;
10502 
10503 	/* Exit early if we know that we're adding or removing the plane. */
10504 	if (old_plane_state->crtc != new_plane_state->crtc)
10505 		return true;
10506 
10507 	/* old crtc == new_crtc == NULL, plane not in context. */
10508 	if (!new_plane_state->crtc)
10509 		return false;
10510 
10511 	new_crtc_state =
10512 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10513 
10514 	if (!new_crtc_state)
10515 		return true;
10516 
10517 	/* CRTC Degamma changes currently require us to recreate planes. */
10518 	if (new_crtc_state->color_mgmt_changed)
10519 		return true;
10520 
10521 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10522 		return true;
10523 
10524 	/*
10525 	 * If there are any new primary or overlay planes being added or
10526 	 * removed then the z-order can potentially change. To ensure
10527 	 * correct z-order and pipe acquisition the current DC architecture
10528 	 * requires us to remove and recreate all existing planes.
10529 	 *
10530 	 * TODO: Come up with a more elegant solution for this.
10531 	 */
10532 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10533 		struct amdgpu_framebuffer *old_afb, *new_afb;
10534 		if (other->type == DRM_PLANE_TYPE_CURSOR)
10535 			continue;
10536 
10537 		if (old_other_state->crtc != new_plane_state->crtc &&
10538 		    new_other_state->crtc != new_plane_state->crtc)
10539 			continue;
10540 
10541 		if (old_other_state->crtc != new_other_state->crtc)
10542 			return true;
10543 
10544 		/* Src/dst size and scaling updates. */
10545 		if (old_other_state->src_w != new_other_state->src_w ||
10546 		    old_other_state->src_h != new_other_state->src_h ||
10547 		    old_other_state->crtc_w != new_other_state->crtc_w ||
10548 		    old_other_state->crtc_h != new_other_state->crtc_h)
10549 			return true;
10550 
10551 		/* Rotation / mirroring updates. */
10552 		if (old_other_state->rotation != new_other_state->rotation)
10553 			return true;
10554 
10555 		/* Blending updates. */
10556 		if (old_other_state->pixel_blend_mode !=
10557 		    new_other_state->pixel_blend_mode)
10558 			return true;
10559 
10560 		/* Alpha updates. */
10561 		if (old_other_state->alpha != new_other_state->alpha)
10562 			return true;
10563 
10564 		/* Colorspace changes. */
10565 		if (old_other_state->color_range != new_other_state->color_range ||
10566 		    old_other_state->color_encoding != new_other_state->color_encoding)
10567 			return true;
10568 
10569 		/* Framebuffer checks fall at the end. */
10570 		if (!old_other_state->fb || !new_other_state->fb)
10571 			continue;
10572 
10573 		/* Pixel format changes can require bandwidth updates. */
10574 		if (old_other_state->fb->format != new_other_state->fb->format)
10575 			return true;
10576 
10577 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10578 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10579 
10580 		/* Tiling and DCC changes also require bandwidth updates. */
10581 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
10582 		    old_afb->base.modifier != new_afb->base.modifier)
10583 			return true;
10584 	}
10585 
10586 	return false;
10587 }
10588 
dm_check_cursor_fb(struct amdgpu_crtc * new_acrtc,struct drm_plane_state * new_plane_state,struct drm_framebuffer * fb)10589 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10590 			      struct drm_plane_state *new_plane_state,
10591 			      struct drm_framebuffer *fb)
10592 {
10593 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10594 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10595 	unsigned int pitch;
10596 	bool linear;
10597 
10598 	if (fb->width > new_acrtc->max_cursor_width ||
10599 	    fb->height > new_acrtc->max_cursor_height) {
10600 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10601 				 new_plane_state->fb->width,
10602 				 new_plane_state->fb->height);
10603 		return -EINVAL;
10604 	}
10605 	if (new_plane_state->src_w != fb->width << 16 ||
10606 	    new_plane_state->src_h != fb->height << 16) {
10607 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10608 		return -EINVAL;
10609 	}
10610 
10611 	/* Pitch in pixels */
10612 	pitch = fb->pitches[0] / fb->format->cpp[0];
10613 
10614 	if (fb->width != pitch) {
10615 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10616 				 fb->width, pitch);
10617 		return -EINVAL;
10618 	}
10619 
10620 	switch (pitch) {
10621 	case 64:
10622 	case 128:
10623 	case 256:
10624 		/* FB pitch is supported by cursor plane */
10625 		break;
10626 	default:
10627 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10628 		return -EINVAL;
10629 	}
10630 
10631 	/* Core DRM takes care of checking FB modifiers, so we only need to
10632 	 * check tiling flags when the FB doesn't have a modifier. */
10633 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10634 		if (adev->family < AMDGPU_FAMILY_AI) {
10635 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10636 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10637 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10638 		} else {
10639 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10640 		}
10641 		if (!linear) {
10642 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
10643 			return -EINVAL;
10644 		}
10645 	}
10646 
10647 	return 0;
10648 }
10649 
dm_update_plane_state(struct dc * dc,struct drm_atomic_state * state,struct drm_plane * plane,struct drm_plane_state * old_plane_state,struct drm_plane_state * new_plane_state,bool enable,bool * lock_and_validation_needed)10650 static int dm_update_plane_state(struct dc *dc,
10651 				 struct drm_atomic_state *state,
10652 				 struct drm_plane *plane,
10653 				 struct drm_plane_state *old_plane_state,
10654 				 struct drm_plane_state *new_plane_state,
10655 				 bool enable,
10656 				 bool *lock_and_validation_needed)
10657 {
10658 
10659 	struct dm_atomic_state *dm_state = NULL;
10660 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10661 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10662 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10663 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10664 	struct amdgpu_crtc *new_acrtc;
10665 	bool needs_reset;
10666 	int ret = 0;
10667 
10668 
10669 	new_plane_crtc = new_plane_state->crtc;
10670 	old_plane_crtc = old_plane_state->crtc;
10671 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
10672 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
10673 
10674 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10675 		if (!enable || !new_plane_crtc ||
10676 			drm_atomic_plane_disabling(plane->state, new_plane_state))
10677 			return 0;
10678 
10679 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10680 
10681 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10682 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10683 			return -EINVAL;
10684 		}
10685 
10686 		if (new_plane_state->fb) {
10687 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10688 						 new_plane_state->fb);
10689 			if (ret)
10690 				return ret;
10691 		}
10692 
10693 		return 0;
10694 	}
10695 
10696 	needs_reset = should_reset_plane(state, plane, old_plane_state,
10697 					 new_plane_state);
10698 
10699 	/* Remove any changed/removed planes */
10700 	if (!enable) {
10701 		if (!needs_reset)
10702 			return 0;
10703 
10704 		if (!old_plane_crtc)
10705 			return 0;
10706 
10707 		old_crtc_state = drm_atomic_get_old_crtc_state(
10708 				state, old_plane_crtc);
10709 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10710 
10711 		if (!dm_old_crtc_state->stream)
10712 			return 0;
10713 
10714 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10715 				plane->base.id, old_plane_crtc->base.id);
10716 
10717 		ret = dm_atomic_get_state(state, &dm_state);
10718 		if (ret)
10719 			return ret;
10720 
10721 		if (!dc_remove_plane_from_context(
10722 				dc,
10723 				dm_old_crtc_state->stream,
10724 				dm_old_plane_state->dc_state,
10725 				dm_state->context)) {
10726 
10727 			return -EINVAL;
10728 		}
10729 
10730 		if (dm_old_plane_state->dc_state)
10731 			dc_plane_state_release(dm_old_plane_state->dc_state);
10732 
10733 		dm_new_plane_state->dc_state = NULL;
10734 
10735 		*lock_and_validation_needed = true;
10736 
10737 	} else { /* Add new planes */
10738 		struct dc_plane_state *dc_new_plane_state;
10739 
10740 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10741 			return 0;
10742 
10743 		if (!new_plane_crtc)
10744 			return 0;
10745 
10746 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10747 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10748 
10749 		if (!dm_new_crtc_state->stream)
10750 			return 0;
10751 
10752 		if (!needs_reset)
10753 			return 0;
10754 
10755 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10756 		if (ret)
10757 			return ret;
10758 
10759 		WARN_ON(dm_new_plane_state->dc_state);
10760 
10761 		dc_new_plane_state = dc_create_plane_state(dc);
10762 		if (!dc_new_plane_state)
10763 			return -ENOMEM;
10764 
10765 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10766 				 plane->base.id, new_plane_crtc->base.id);
10767 
10768 		ret = fill_dc_plane_attributes(
10769 			drm_to_adev(new_plane_crtc->dev),
10770 			dc_new_plane_state,
10771 			new_plane_state,
10772 			new_crtc_state);
10773 		if (ret) {
10774 			dc_plane_state_release(dc_new_plane_state);
10775 			return ret;
10776 		}
10777 
10778 		ret = dm_atomic_get_state(state, &dm_state);
10779 		if (ret) {
10780 			dc_plane_state_release(dc_new_plane_state);
10781 			return ret;
10782 		}
10783 
10784 		/*
10785 		 * Any atomic check errors that occur after this will
10786 		 * not need a release. The plane state will be attached
10787 		 * to the stream, and therefore part of the atomic
10788 		 * state. It'll be released when the atomic state is
10789 		 * cleaned.
10790 		 */
10791 		if (!dc_add_plane_to_context(
10792 				dc,
10793 				dm_new_crtc_state->stream,
10794 				dc_new_plane_state,
10795 				dm_state->context)) {
10796 
10797 			dc_plane_state_release(dc_new_plane_state);
10798 			return -EINVAL;
10799 		}
10800 
10801 		dm_new_plane_state->dc_state = dc_new_plane_state;
10802 
10803 		/* Tell DC to do a full surface update every time there
10804 		 * is a plane change. Inefficient, but works for now.
10805 		 */
10806 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10807 
10808 		*lock_and_validation_needed = true;
10809 	}
10810 
10811 
10812 	return ret;
10813 }
10814 
dm_check_crtc_cursor(struct drm_atomic_state * state,struct drm_crtc * crtc,struct drm_crtc_state * new_crtc_state)10815 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10816 				struct drm_crtc *crtc,
10817 				struct drm_crtc_state *new_crtc_state)
10818 {
10819 	struct drm_plane_state *new_cursor_state, *new_primary_state;
10820 	int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
10821 
10822 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10823 	 * cursor per pipe but it's going to inherit the scaling and
10824 	 * positioning from the underlying pipe. Check the cursor plane's
10825 	 * blending properties match the primary plane's. */
10826 
10827 	new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
10828 	new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
10829 	if (!new_cursor_state || !new_primary_state ||
10830 	    !new_cursor_state->fb || !new_primary_state->fb) {
10831 		return 0;
10832 	}
10833 
10834 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10835 			 (new_cursor_state->src_w >> 16);
10836 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10837 			 (new_cursor_state->src_h >> 16);
10838 
10839 	primary_scale_w = new_primary_state->crtc_w * 1000 /
10840 			 (new_primary_state->src_w >> 16);
10841 	primary_scale_h = new_primary_state->crtc_h * 1000 /
10842 			 (new_primary_state->src_h >> 16);
10843 
10844 	if (cursor_scale_w != primary_scale_w ||
10845 	    cursor_scale_h != primary_scale_h) {
10846 		drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
10847 		return -EINVAL;
10848 	}
10849 
10850 	return 0;
10851 }
10852 
10853 #if defined(CONFIG_DRM_AMD_DC_DCN)
add_affected_mst_dsc_crtcs(struct drm_atomic_state * state,struct drm_crtc * crtc)10854 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10855 {
10856 	struct drm_connector *connector;
10857 	struct drm_connector_state *conn_state, *old_conn_state;
10858 	struct amdgpu_dm_connector *aconnector = NULL;
10859 	int i;
10860 	for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
10861 		if (!conn_state->crtc)
10862 			conn_state = old_conn_state;
10863 
10864 		if (conn_state->crtc != crtc)
10865 			continue;
10866 
10867 		aconnector = to_amdgpu_dm_connector(connector);
10868 		if (!aconnector->port || !aconnector->mst_port)
10869 			aconnector = NULL;
10870 		else
10871 			break;
10872 	}
10873 
10874 	if (!aconnector)
10875 		return 0;
10876 
10877 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10878 }
10879 #endif
10880 
validate_overlay(struct drm_atomic_state * state)10881 static int validate_overlay(struct drm_atomic_state *state)
10882 {
10883 	int i;
10884 	struct drm_plane *plane;
10885 	struct drm_plane_state *new_plane_state;
10886 	struct drm_plane_state *primary_state, *overlay_state = NULL;
10887 
10888 	/* Check if primary plane is contained inside overlay */
10889 	for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) {
10890 		if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10891 			if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10892 				return 0;
10893 
10894 			overlay_state = new_plane_state;
10895 			continue;
10896 		}
10897 	}
10898 
10899 	/* check if we're making changes to the overlay plane */
10900 	if (!overlay_state)
10901 		return 0;
10902 
10903 	/* check if overlay plane is enabled */
10904 	if (!overlay_state->crtc)
10905 		return 0;
10906 
10907 	/* find the primary plane for the CRTC that the overlay is enabled on */
10908 	primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10909 	if (IS_ERR(primary_state))
10910 		return PTR_ERR(primary_state);
10911 
10912 	/* check if primary plane is enabled */
10913 	if (!primary_state->crtc)
10914 		return 0;
10915 
10916 	/* Perform the bounds check to ensure the overlay plane covers the primary */
10917 	if (primary_state->crtc_x < overlay_state->crtc_x ||
10918 	    primary_state->crtc_y < overlay_state->crtc_y ||
10919 	    primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10920 	    primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10921 		DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10922 		return -EINVAL;
10923 	}
10924 
10925 	return 0;
10926 }
10927 
10928 /**
10929  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10930  * @dev: The DRM device
10931  * @state: The atomic state to commit
10932  *
10933  * Validate that the given atomic state is programmable by DC into hardware.
10934  * This involves constructing a &struct dc_state reflecting the new hardware
10935  * state we wish to commit, then querying DC to see if it is programmable. It's
10936  * important not to modify the existing DC state. Otherwise, atomic_check
10937  * may unexpectedly commit hardware changes.
10938  *
10939  * When validating the DC state, it's important that the right locks are
10940  * acquired. For full updates case which removes/adds/updates streams on one
10941  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10942  * that any such full update commit will wait for completion of any outstanding
10943  * flip using DRMs synchronization events.
10944  *
10945  * Note that DM adds the affected connectors for all CRTCs in state, when that
10946  * might not seem necessary. This is because DC stream creation requires the
10947  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10948  * be possible but non-trivial - a possible TODO item.
10949  *
10950  * Return: -Error code if validation failed.
10951  */
amdgpu_dm_atomic_check(struct drm_device * dev,struct drm_atomic_state * state)10952 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10953 				  struct drm_atomic_state *state)
10954 {
10955 	struct amdgpu_device *adev = drm_to_adev(dev);
10956 	struct dm_atomic_state *dm_state = NULL;
10957 	struct dc *dc = adev->dm.dc;
10958 	struct drm_connector *connector;
10959 	struct drm_connector_state *old_con_state, *new_con_state;
10960 	struct drm_crtc *crtc;
10961 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10962 	struct drm_plane *plane;
10963 	struct drm_plane_state *old_plane_state, *new_plane_state;
10964 	enum dc_status status;
10965 	int ret, i;
10966 	bool lock_and_validation_needed = false;
10967 	struct dm_crtc_state *dm_old_crtc_state;
10968 #if defined(CONFIG_DRM_AMD_DC_DCN)
10969 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
10970 #endif
10971 
10972 	trace_amdgpu_dm_atomic_check_begin(state);
10973 
10974 	ret = drm_atomic_helper_check_modeset(dev, state);
10975 	if (ret)
10976 		goto fail;
10977 
10978 	/* Check connector changes */
10979 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10980 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10981 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10982 
10983 		/* Skip connectors that are disabled or part of modeset already. */
10984 		if (!old_con_state->crtc && !new_con_state->crtc)
10985 			continue;
10986 
10987 		if (!new_con_state->crtc)
10988 			continue;
10989 
10990 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10991 		if (IS_ERR(new_crtc_state)) {
10992 			ret = PTR_ERR(new_crtc_state);
10993 			goto fail;
10994 		}
10995 
10996 		if (dm_old_con_state->abm_level != dm_new_con_state->abm_level ||
10997 		    dm_old_con_state->scaling != dm_new_con_state->scaling)
10998 			new_crtc_state->connectors_changed = true;
10999 	}
11000 
11001 #if defined(CONFIG_DRM_AMD_DC_DCN)
11002 	if (dc_resource_is_dsc_encoding_supported(dc)) {
11003 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11004 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
11005 				ret = add_affected_mst_dsc_crtcs(state, crtc);
11006 				if (ret)
11007 					goto fail;
11008 			}
11009 		}
11010 	}
11011 #endif
11012 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11013 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
11014 
11015 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
11016 		    !new_crtc_state->color_mgmt_changed &&
11017 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
11018 			dm_old_crtc_state->dsc_force_changed == false)
11019 			continue;
11020 
11021 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
11022 		if (ret)
11023 			goto fail;
11024 
11025 		if (!new_crtc_state->enable)
11026 			continue;
11027 
11028 		ret = drm_atomic_add_affected_connectors(state, crtc);
11029 		if (ret)
11030 			return ret;
11031 
11032 		ret = drm_atomic_add_affected_planes(state, crtc);
11033 		if (ret)
11034 			goto fail;
11035 
11036 		if (dm_old_crtc_state->dsc_force_changed)
11037 			new_crtc_state->mode_changed = true;
11038 	}
11039 
11040 	/*
11041 	 * Add all primary and overlay planes on the CRTC to the state
11042 	 * whenever a plane is enabled to maintain correct z-ordering
11043 	 * and to enable fast surface updates.
11044 	 */
11045 	drm_for_each_crtc(crtc, dev) {
11046 		bool modified = false;
11047 
11048 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11049 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11050 				continue;
11051 
11052 			if (new_plane_state->crtc == crtc ||
11053 			    old_plane_state->crtc == crtc) {
11054 				modified = true;
11055 				break;
11056 			}
11057 		}
11058 
11059 		if (!modified)
11060 			continue;
11061 
11062 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11063 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11064 				continue;
11065 
11066 			new_plane_state =
11067 				drm_atomic_get_plane_state(state, plane);
11068 
11069 			if (IS_ERR(new_plane_state)) {
11070 				ret = PTR_ERR(new_plane_state);
11071 				goto fail;
11072 			}
11073 		}
11074 	}
11075 
11076 	/*
11077 	 * DC consults the zpos (layer_index in DC terminology) to determine the
11078 	 * hw plane on which to enable the hw cursor (see
11079 	 * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in
11080 	 * atomic state, so call drm helper to normalize zpos.
11081 	 */
11082 	ret = drm_atomic_normalize_zpos(dev, state);
11083 	if (ret) {
11084 		drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n");
11085 		goto fail;
11086 	}
11087 
11088 	/* Remove exiting planes if they are modified */
11089 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11090 		ret = dm_update_plane_state(dc, state, plane,
11091 					    old_plane_state,
11092 					    new_plane_state,
11093 					    false,
11094 					    &lock_and_validation_needed);
11095 		if (ret)
11096 			goto fail;
11097 	}
11098 
11099 	/* Disable all crtcs which require disable */
11100 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11101 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11102 					   old_crtc_state,
11103 					   new_crtc_state,
11104 					   false,
11105 					   &lock_and_validation_needed);
11106 		if (ret)
11107 			goto fail;
11108 	}
11109 
11110 	/* Enable all crtcs which require enable */
11111 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11112 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11113 					   old_crtc_state,
11114 					   new_crtc_state,
11115 					   true,
11116 					   &lock_and_validation_needed);
11117 		if (ret)
11118 			goto fail;
11119 	}
11120 
11121 	ret = validate_overlay(state);
11122 	if (ret)
11123 		goto fail;
11124 
11125 	/* Add new/modified planes */
11126 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11127 		ret = dm_update_plane_state(dc, state, plane,
11128 					    old_plane_state,
11129 					    new_plane_state,
11130 					    true,
11131 					    &lock_and_validation_needed);
11132 		if (ret)
11133 			goto fail;
11134 	}
11135 
11136 	/* Run this here since we want to validate the streams we created */
11137 	ret = drm_atomic_helper_check_planes(dev, state);
11138 	if (ret)
11139 		goto fail;
11140 
11141 	/* Check cursor planes scaling */
11142 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11143 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11144 		if (ret)
11145 			goto fail;
11146 	}
11147 
11148 	if (state->legacy_cursor_update) {
11149 		/*
11150 		 * This is a fast cursor update coming from the plane update
11151 		 * helper, check if it can be done asynchronously for better
11152 		 * performance.
11153 		 */
11154 		state->async_update =
11155 			!drm_atomic_helper_async_check(dev, state);
11156 
11157 		/*
11158 		 * Skip the remaining global validation if this is an async
11159 		 * update. Cursor updates can be done without affecting
11160 		 * state or bandwidth calcs and this avoids the performance
11161 		 * penalty of locking the private state object and
11162 		 * allocating a new dc_state.
11163 		 */
11164 		if (state->async_update)
11165 			return 0;
11166 	}
11167 
11168 	/* Check scaling and underscan changes*/
11169 	/* TODO Removed scaling changes validation due to inability to commit
11170 	 * new stream into context w\o causing full reset. Need to
11171 	 * decide how to handle.
11172 	 */
11173 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11174 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11175 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11176 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11177 
11178 		/* Skip any modesets/resets */
11179 		if (!acrtc || drm_atomic_crtc_needs_modeset(
11180 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11181 			continue;
11182 
11183 		/* Skip any thing not scale or underscan changes */
11184 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11185 			continue;
11186 
11187 		lock_and_validation_needed = true;
11188 	}
11189 
11190 	/**
11191 	 * Streams and planes are reset when there are changes that affect
11192 	 * bandwidth. Anything that affects bandwidth needs to go through
11193 	 * DC global validation to ensure that the configuration can be applied
11194 	 * to hardware.
11195 	 *
11196 	 * We have to currently stall out here in atomic_check for outstanding
11197 	 * commits to finish in this case because our IRQ handlers reference
11198 	 * DRM state directly - we can end up disabling interrupts too early
11199 	 * if we don't.
11200 	 *
11201 	 * TODO: Remove this stall and drop DM state private objects.
11202 	 */
11203 	if (lock_and_validation_needed) {
11204 		ret = dm_atomic_get_state(state, &dm_state);
11205 		if (ret)
11206 			goto fail;
11207 
11208 		ret = do_aquire_global_lock(dev, state);
11209 		if (ret)
11210 			goto fail;
11211 
11212 #if defined(CONFIG_DRM_AMD_DC_DCN)
11213 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
11214 			goto fail;
11215 
11216 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11217 		if (ret)
11218 			goto fail;
11219 #endif
11220 
11221 		/*
11222 		 * Perform validation of MST topology in the state:
11223 		 * We need to perform MST atomic check before calling
11224 		 * dc_validate_global_state(), or there is a chance
11225 		 * to get stuck in an infinite loop and hang eventually.
11226 		 */
11227 		ret = drm_dp_mst_atomic_check(state);
11228 		if (ret)
11229 			goto fail;
11230 		status = dc_validate_global_state(dc, dm_state->context, false);
11231 		if (status != DC_OK) {
11232 			drm_dbg_atomic(dev,
11233 				       "DC global validation failure: %s (%d)",
11234 				       dc_status_to_str(status), status);
11235 			ret = -EINVAL;
11236 			goto fail;
11237 		}
11238 	} else {
11239 		/*
11240 		 * The commit is a fast update. Fast updates shouldn't change
11241 		 * the DC context, affect global validation, and can have their
11242 		 * commit work done in parallel with other commits not touching
11243 		 * the same resource. If we have a new DC context as part of
11244 		 * the DM atomic state from validation we need to free it and
11245 		 * retain the existing one instead.
11246 		 *
11247 		 * Furthermore, since the DM atomic state only contains the DC
11248 		 * context and can safely be annulled, we can free the state
11249 		 * and clear the associated private object now to free
11250 		 * some memory and avoid a possible use-after-free later.
11251 		 */
11252 
11253 		for (i = 0; i < state->num_private_objs; i++) {
11254 			struct drm_private_obj *obj = state->private_objs[i].ptr;
11255 
11256 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
11257 				int j = state->num_private_objs-1;
11258 
11259 				dm_atomic_destroy_state(obj,
11260 						state->private_objs[i].state);
11261 
11262 				/* If i is not at the end of the array then the
11263 				 * last element needs to be moved to where i was
11264 				 * before the array can safely be truncated.
11265 				 */
11266 				if (i != j)
11267 					state->private_objs[i] =
11268 						state->private_objs[j];
11269 
11270 				state->private_objs[j].ptr = NULL;
11271 				state->private_objs[j].state = NULL;
11272 				state->private_objs[j].old_state = NULL;
11273 				state->private_objs[j].new_state = NULL;
11274 
11275 				state->num_private_objs = j;
11276 				break;
11277 			}
11278 		}
11279 	}
11280 
11281 	/* Store the overall update type for use later in atomic check. */
11282 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11283 		struct dm_crtc_state *dm_new_crtc_state =
11284 			to_dm_crtc_state(new_crtc_state);
11285 
11286 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
11287 							 UPDATE_TYPE_FULL :
11288 							 UPDATE_TYPE_FAST;
11289 	}
11290 
11291 	/* Must be success */
11292 	WARN_ON(ret);
11293 
11294 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11295 
11296 	return ret;
11297 
11298 fail:
11299 	if (ret == -EDEADLK)
11300 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11301 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11302 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11303 	else
11304 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11305 
11306 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11307 
11308 	return ret;
11309 }
11310 
is_dp_capable_without_timing_msa(struct dc * dc,struct amdgpu_dm_connector * amdgpu_dm_connector)11311 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11312 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
11313 {
11314 	uint8_t dpcd_data;
11315 	bool capable = false;
11316 
11317 	if (amdgpu_dm_connector->dc_link &&
11318 		dm_helpers_dp_read_dpcd(
11319 				NULL,
11320 				amdgpu_dm_connector->dc_link,
11321 				DP_DOWN_STREAM_PORT_COUNT,
11322 				&dpcd_data,
11323 				sizeof(dpcd_data))) {
11324 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11325 	}
11326 
11327 	return capable;
11328 }
11329 
dm_edid_parser_send_cea(struct amdgpu_display_manager * dm,unsigned int offset,unsigned int total_length,uint8_t * data,unsigned int length,struct amdgpu_hdmi_vsdb_info * vsdb)11330 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11331 		unsigned int offset,
11332 		unsigned int total_length,
11333 		uint8_t *data,
11334 		unsigned int length,
11335 		struct amdgpu_hdmi_vsdb_info *vsdb)
11336 {
11337 	bool res;
11338 	union dmub_rb_cmd cmd;
11339 	struct dmub_cmd_send_edid_cea *input;
11340 	struct dmub_cmd_edid_cea_output *output;
11341 
11342 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11343 		return false;
11344 
11345 	memset(&cmd, 0, sizeof(cmd));
11346 
11347 	input = &cmd.edid_cea.data.input;
11348 
11349 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11350 	cmd.edid_cea.header.sub_type = 0;
11351 	cmd.edid_cea.header.payload_bytes =
11352 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11353 	input->offset = offset;
11354 	input->length = length;
11355 	input->total_length = total_length;
11356 	memcpy(input->payload, data, length);
11357 
11358 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11359 	if (!res) {
11360 		DRM_ERROR("EDID CEA parser failed\n");
11361 		return false;
11362 	}
11363 
11364 	output = &cmd.edid_cea.data.output;
11365 
11366 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11367 		if (!output->ack.success) {
11368 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
11369 					output->ack.offset);
11370 		}
11371 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11372 		if (!output->amd_vsdb.vsdb_found)
11373 			return false;
11374 
11375 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11376 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11377 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11378 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11379 	} else {
11380 		DRM_WARN("Unknown EDID CEA parser results\n");
11381 		return false;
11382 	}
11383 
11384 	return true;
11385 }
11386 
parse_edid_cea_dmcu(struct amdgpu_display_manager * dm,uint8_t * edid_ext,int len,struct amdgpu_hdmi_vsdb_info * vsdb_info)11387 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11388 		uint8_t *edid_ext, int len,
11389 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11390 {
11391 	int i;
11392 
11393 	/* send extension block to DMCU for parsing */
11394 	for (i = 0; i < len; i += 8) {
11395 		bool res;
11396 		int offset;
11397 
11398 		/* send 8 bytes a time */
11399 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11400 			return false;
11401 
11402 		if (i+8 == len) {
11403 			/* EDID block sent completed, expect result */
11404 			int version, min_rate, max_rate;
11405 
11406 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11407 			if (res) {
11408 				/* amd vsdb found */
11409 				vsdb_info->freesync_supported = 1;
11410 				vsdb_info->amd_vsdb_version = version;
11411 				vsdb_info->min_refresh_rate_hz = min_rate;
11412 				vsdb_info->max_refresh_rate_hz = max_rate;
11413 				return true;
11414 			}
11415 			/* not amd vsdb */
11416 			return false;
11417 		}
11418 
11419 		/* check for ack*/
11420 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11421 		if (!res)
11422 			return false;
11423 	}
11424 
11425 	return false;
11426 }
11427 
parse_edid_cea_dmub(struct amdgpu_display_manager * dm,uint8_t * edid_ext,int len,struct amdgpu_hdmi_vsdb_info * vsdb_info)11428 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11429 		uint8_t *edid_ext, int len,
11430 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11431 {
11432 	int i;
11433 
11434 	/* send extension block to DMCU for parsing */
11435 	for (i = 0; i < len; i += 8) {
11436 		/* send 8 bytes a time */
11437 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11438 			return false;
11439 	}
11440 
11441 	return vsdb_info->freesync_supported;
11442 }
11443 
parse_edid_cea(struct amdgpu_dm_connector * aconnector,uint8_t * edid_ext,int len,struct amdgpu_hdmi_vsdb_info * vsdb_info)11444 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11445 		uint8_t *edid_ext, int len,
11446 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11447 {
11448 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11449 
11450 	if (adev->dm.dmub_srv)
11451 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11452 	else
11453 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11454 }
11455 
parse_hdmi_amd_vsdb(struct amdgpu_dm_connector * aconnector,struct edid * edid,struct amdgpu_hdmi_vsdb_info * vsdb_info)11456 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11457 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11458 {
11459 	uint8_t *edid_ext = NULL;
11460 	int i;
11461 	bool valid_vsdb_found = false;
11462 
11463 	/*----- drm_find_cea_extension() -----*/
11464 	/* No EDID or EDID extensions */
11465 	if (edid == NULL || edid->extensions == 0)
11466 		return -ENODEV;
11467 
11468 	/* Find CEA extension */
11469 	for (i = 0; i < edid->extensions; i++) {
11470 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11471 		if (edid_ext[0] == CEA_EXT)
11472 			break;
11473 	}
11474 
11475 	if (i == edid->extensions)
11476 		return -ENODEV;
11477 
11478 	/*----- cea_db_offsets() -----*/
11479 	if (edid_ext[0] != CEA_EXT)
11480 		return -ENODEV;
11481 
11482 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11483 
11484 	return valid_vsdb_found ? i : -ENODEV;
11485 }
11486 
amdgpu_dm_update_freesync_caps(struct drm_connector * connector,struct edid * edid)11487 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11488 					struct edid *edid)
11489 {
11490 	int i = 0;
11491 	struct detailed_timing *timing;
11492 	struct detailed_non_pixel *data;
11493 	struct detailed_data_monitor_range *range;
11494 	struct amdgpu_dm_connector *amdgpu_dm_connector =
11495 			to_amdgpu_dm_connector(connector);
11496 	struct dm_connector_state *dm_con_state = NULL;
11497 
11498 	struct drm_device *dev = connector->dev;
11499 	struct amdgpu_device *adev = drm_to_adev(dev);
11500 	bool freesync_capable = false;
11501 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11502 
11503 	if (!connector->state) {
11504 		DRM_ERROR("%s - Connector has no state", __func__);
11505 		goto update;
11506 	}
11507 
11508 	if (!edid) {
11509 		dm_con_state = to_dm_connector_state(connector->state);
11510 
11511 		amdgpu_dm_connector->min_vfreq = 0;
11512 		amdgpu_dm_connector->max_vfreq = 0;
11513 		amdgpu_dm_connector->pixel_clock_mhz = 0;
11514 
11515 		goto update;
11516 	}
11517 
11518 	dm_con_state = to_dm_connector_state(connector->state);
11519 
11520 	if (!amdgpu_dm_connector->dc_sink) {
11521 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
11522 		goto update;
11523 	}
11524 	if (!adev->dm.freesync_module)
11525 		goto update;
11526 
11527 
11528 	if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11529 		|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
11530 		bool edid_check_required = false;
11531 
11532 		if (edid) {
11533 			edid_check_required = is_dp_capable_without_timing_msa(
11534 						adev->dm.dc,
11535 						amdgpu_dm_connector);
11536 		}
11537 
11538 		if (edid_check_required == true && (edid->version > 1 ||
11539 		   (edid->version == 1 && edid->revision > 1))) {
11540 			for (i = 0; i < 4; i++) {
11541 
11542 				timing	= &edid->detailed_timings[i];
11543 				data	= &timing->data.other_data;
11544 				range	= &data->data.range;
11545 				/*
11546 				 * Check if monitor has continuous frequency mode
11547 				 */
11548 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
11549 					continue;
11550 				/*
11551 				 * Check for flag range limits only. If flag == 1 then
11552 				 * no additional timing information provided.
11553 				 * Default GTF, GTF Secondary curve and CVT are not
11554 				 * supported
11555 				 */
11556 				if (range->flags != 1)
11557 					continue;
11558 
11559 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11560 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11561 				amdgpu_dm_connector->pixel_clock_mhz =
11562 					range->pixel_clock_mhz * 10;
11563 
11564 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11565 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11566 
11567 				break;
11568 			}
11569 
11570 			if (amdgpu_dm_connector->max_vfreq -
11571 			    amdgpu_dm_connector->min_vfreq > 10) {
11572 
11573 				freesync_capable = true;
11574 			}
11575 		}
11576 	} else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11577 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11578 		if (i >= 0 && vsdb_info.freesync_supported) {
11579 			timing  = &edid->detailed_timings[i];
11580 			data    = &timing->data.other_data;
11581 
11582 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11583 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11584 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11585 				freesync_capable = true;
11586 
11587 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11588 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11589 		}
11590 	}
11591 
11592 update:
11593 	if (dm_con_state)
11594 		dm_con_state->freesync_capable = freesync_capable;
11595 
11596 	if (connector->vrr_capable_property)
11597 		drm_connector_set_vrr_capable_property(connector,
11598 						       freesync_capable);
11599 }
11600 
amdgpu_dm_trigger_timing_sync(struct drm_device * dev)11601 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11602 {
11603 	struct amdgpu_device *adev = drm_to_adev(dev);
11604 	struct dc *dc = adev->dm.dc;
11605 	int i;
11606 
11607 	mutex_lock(&adev->dm.dc_lock);
11608 	if (dc->current_state) {
11609 		for (i = 0; i < dc->current_state->stream_count; ++i)
11610 			dc->current_state->streams[i]
11611 				->triggered_crtc_reset.enabled =
11612 				adev->dm.force_timing_sync;
11613 
11614 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
11615 		dc_trigger_sync(dc, dc->current_state);
11616 	}
11617 	mutex_unlock(&adev->dm.dc_lock);
11618 }
11619 
dm_write_reg_func(const struct dc_context * ctx,uint32_t address,uint32_t value,const char * func_name)11620 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11621 		       uint32_t value, const char *func_name)
11622 {
11623 #ifdef DM_CHECK_ADDR_0
11624 	if (address == 0) {
11625 		DC_ERR("invalid register write. address = 0");
11626 		return;
11627 	}
11628 #endif
11629 	cgs_write_register(ctx->cgs_device, address, value);
11630 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11631 }
11632 
dm_read_reg_func(const struct dc_context * ctx,uint32_t address,const char * func_name)11633 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11634 			  const char *func_name)
11635 {
11636 	uint32_t value;
11637 #ifdef DM_CHECK_ADDR_0
11638 	if (address == 0) {
11639 		DC_ERR("invalid register read; address = 0\n");
11640 		return 0;
11641 	}
11642 #endif
11643 
11644 	if (ctx->dmub_srv &&
11645 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11646 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11647 		ASSERT(false);
11648 		return 0;
11649 	}
11650 
11651 	value = cgs_read_register(ctx->cgs_device, address);
11652 
11653 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11654 
11655 	return value;
11656 }
11657 
amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context * ctx,unsigned int linkIndex,struct aux_payload * payload,enum aux_return_code_type * operation_result)11658 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
11659 				struct aux_payload *payload, enum aux_return_code_type *operation_result)
11660 {
11661 	struct amdgpu_device *adev = ctx->driver_context;
11662 	int ret = 0;
11663 
11664 	dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
11665 	ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
11666 	if (ret == 0) {
11667 		*operation_result = AUX_RET_ERROR_TIMEOUT;
11668 		return -1;
11669 	}
11670 	*operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
11671 
11672 	if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11673 		(*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
11674 
11675 		// For read case, Copy data to payload
11676 		if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11677 		(*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
11678 			memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11679 			adev->dm.dmub_notify->aux_reply.length);
11680 	}
11681 
11682 	return adev->dm.dmub_notify->aux_reply.length;
11683 }
11684