• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2015-2020 Advanced Micro Devices, Inc. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #ifndef __AMDGPU_DM_H__
27 #define __AMDGPU_DM_H__
28 
29 #include <drm/drm_atomic.h>
30 #include <drm/drm_connector.h>
31 #include <drm/drm_crtc.h>
32 #include <drm/drm_dp_mst_helper.h>
33 #include <drm/drm_plane.h>
34 
35 /*
36  * This file contains the definition for amdgpu_display_manager
37  * and its API for amdgpu driver's use.
38  * This component provides all the display related functionality
39  * and this is the only component that calls DAL API.
40  * The API contained here intended for amdgpu driver use.
41  * The API that is called directly from KMS framework is located
42  * in amdgpu_dm_kms.h file
43  */
44 
45 #define AMDGPU_DM_MAX_DISPLAY_INDEX 31
46 
47 #define AMDGPU_DM_MAX_CRTC 6
48 
49 #define AMDGPU_DM_MAX_NUM_EDP 2
50 
51 #define AMDGPU_DMUB_NOTIFICATION_MAX 5
52 /*
53 #include "include/amdgpu_dal_power_if.h"
54 #include "amdgpu_dm_irq.h"
55 */
56 
57 #include "irq_types.h"
58 #include "signal_types.h"
59 #include "amdgpu_dm_crc.h"
60 struct aux_payload;
61 enum aux_return_code_type;
62 
63 /* Forward declarations */
64 struct amdgpu_device;
65 struct amdgpu_crtc;
66 struct drm_device;
67 struct dc;
68 struct amdgpu_bo;
69 struct dmub_srv;
70 struct dc_plane_state;
71 struct dmub_notification;
72 
73 struct common_irq_params {
74 	struct amdgpu_device *adev;
75 	enum dc_irq_source irq_src;
76 	atomic64_t previous_timestamp;
77 };
78 
79 /**
80  * struct dm_compressor_info - Buffer info used by frame buffer compression
81  * @cpu_addr: MMIO cpu addr
82  * @bo_ptr: Pointer to the buffer object
83  * @gpu_addr: MMIO gpu addr
84  */
85 struct dm_compressor_info {
86 	void *cpu_addr;
87 	struct amdgpu_bo *bo_ptr;
88 	uint64_t gpu_addr;
89 };
90 
91 typedef void (*dmub_notify_interrupt_callback_t)(struct amdgpu_device *adev, struct dmub_notification *notify);
92 
93 /**
94  * struct dmub_hpd_work - Handle time consuming work in low priority outbox IRQ
95  *
96  * @handle_hpd_work: Work to be executed in a separate thread to handle hpd_low_irq
97  * @dmub_notify:  notification for callback function
98  * @adev: amdgpu_device pointer
99  */
100 struct dmub_hpd_work {
101 	struct work_struct handle_hpd_work;
102 	struct dmub_notification *dmub_notify;
103 	struct amdgpu_device *adev;
104 };
105 
106 /**
107  * struct vblank_control_work - Work data for vblank control
108  * @work: Kernel work data for the work event
109  * @dm: amdgpu display manager device
110  * @acrtc: amdgpu CRTC instance for which the event has occurred
111  * @stream: DC stream for which the event has occurred
112  * @enable: true if enabling vblank
113  */
114 struct vblank_control_work {
115 	struct work_struct work;
116 	struct amdgpu_display_manager *dm;
117 	struct amdgpu_crtc *acrtc;
118 	struct dc_stream_state *stream;
119 	bool enable;
120 };
121 
122 /**
123  * struct amdgpu_dm_backlight_caps - Information about backlight
124  *
125  * Describe the backlight support for ACPI or eDP AUX.
126  */
127 struct amdgpu_dm_backlight_caps {
128 	/**
129 	 * @ext_caps: Keep the data struct with all the information about the
130 	 * display support for HDR.
131 	 */
132 	union dpcd_sink_ext_caps *ext_caps;
133 	/**
134 	 * @aux_min_input_signal: Min brightness value supported by the display
135 	 */
136 	u32 aux_min_input_signal;
137 	/**
138 	 * @aux_max_input_signal: Max brightness value supported by the display
139 	 * in nits.
140 	 */
141 	u32 aux_max_input_signal;
142 	/**
143 	 * @min_input_signal: minimum possible input in range 0-255.
144 	 */
145 	int min_input_signal;
146 	/**
147 	 * @max_input_signal: maximum possible input in range 0-255.
148 	 */
149 	int max_input_signal;
150 	/**
151 	 * @caps_valid: true if these values are from the ACPI interface.
152 	 */
153 	bool caps_valid;
154 	/**
155 	 * @aux_support: Describes if the display supports AUX backlight.
156 	 */
157 	bool aux_support;
158 };
159 
160 /**
161  * struct dal_allocation - Tracks mapped FB memory for SMU communication
162  * @list: list of dal allocations
163  * @bo: GPU buffer object
164  * @cpu_ptr: CPU virtual address of the GPU buffer object
165  * @gpu_addr: GPU virtual address of the GPU buffer object
166  */
167 struct dal_allocation {
168 	struct list_head list;
169 	struct amdgpu_bo *bo;
170 	void *cpu_ptr;
171 	u64 gpu_addr;
172 };
173 
174 /**
175  * struct hpd_rx_irq_offload_work_queue - Work queue to handle hpd_rx_irq
176  * offload work
177  */
178 struct hpd_rx_irq_offload_work_queue {
179 	/**
180 	 * @wq: workqueue structure to queue offload work.
181 	 */
182 	struct workqueue_struct *wq;
183 	/**
184 	 * @offload_lock: To protect fields of offload work queue.
185 	 */
186 	spinlock_t offload_lock;
187 	/**
188 	 * @is_handling_link_loss: Used to prevent inserting link loss event when
189 	 * we're handling link loss
190 	 */
191 	bool is_handling_link_loss;
192 	/**
193 	 * @aconnector: The aconnector that this work queue is attached to
194 	 */
195 	struct amdgpu_dm_connector *aconnector;
196 };
197 
198 /**
199  * struct hpd_rx_irq_offload_work - hpd_rx_irq offload work structure
200  */
201 struct hpd_rx_irq_offload_work {
202 	/**
203 	 * @work: offload work
204 	 */
205 	struct work_struct work;
206 	/**
207 	 * @data: reference irq data which is used while handling offload work
208 	 */
209 	union hpd_irq_data data;
210 	/**
211 	 * @offload_wq: offload work queue that this work is queued to
212 	 */
213 	struct hpd_rx_irq_offload_work_queue *offload_wq;
214 };
215 
216 /**
217  * struct amdgpu_display_manager - Central amdgpu display manager device
218  *
219  * @dc: Display Core control structure
220  * @adev: AMDGPU base driver structure
221  * @ddev: DRM base driver structure
222  * @display_indexes_num: Max number of display streams supported
223  * @irq_handler_list_table_lock: Synchronizes access to IRQ tables
224  * @backlight_dev: Backlight control device
225  * @backlight_link: Link on which to control backlight
226  * @backlight_caps: Capabilities of the backlight device
227  * @freesync_module: Module handling freesync calculations
228  * @hdcp_workqueue: AMDGPU content protection queue
229  * @fw_dmcu: Reference to DMCU firmware
230  * @dmcu_fw_version: Version of the DMCU firmware
231  * @soc_bounding_box: SOC bounding box values provided by gpu_info FW
232  * @cached_state: Caches device atomic state for suspend/resume
233  * @cached_dc_state: Cached state of content streams
234  * @compressor: Frame buffer compression buffer. See &struct dm_compressor_info
235  * @force_timing_sync: set via debugfs. When set, indicates that all connected
236  *		       displays will be forced to synchronize.
237  * @dmcub_trace_event_en: enable dmcub trace events
238  */
239 struct amdgpu_display_manager {
240 
241 	struct dc *dc;
242 
243 	/**
244 	 * @dmub_srv:
245 	 *
246 	 * DMUB service, used for controlling the DMUB on hardware
247 	 * that supports it. The pointer to the dmub_srv will be
248 	 * NULL on hardware that does not support it.
249 	 */
250 	struct dmub_srv *dmub_srv;
251 
252 	/**
253 	 * @dmub_notify:
254 	 *
255 	 * Notification from DMUB.
256 	 */
257 
258 	struct dmub_notification *dmub_notify;
259 
260 	/**
261 	 * @dmub_callback:
262 	 *
263 	 * Callback functions to handle notification from DMUB.
264 	 */
265 
266 	dmub_notify_interrupt_callback_t dmub_callback[AMDGPU_DMUB_NOTIFICATION_MAX];
267 
268 	/**
269 	 * @dmub_thread_offload:
270 	 *
271 	 * Flag to indicate if callback is offload.
272 	 */
273 
274 	bool dmub_thread_offload[AMDGPU_DMUB_NOTIFICATION_MAX];
275 
276 	/**
277 	 * @dmub_fb_info:
278 	 *
279 	 * Framebuffer regions for the DMUB.
280 	 */
281 	struct dmub_srv_fb_info *dmub_fb_info;
282 
283 	/**
284 	 * @dmub_fw:
285 	 *
286 	 * DMUB firmware, required on hardware that has DMUB support.
287 	 */
288 	const struct firmware *dmub_fw;
289 
290 	/**
291 	 * @dmub_bo:
292 	 *
293 	 * Buffer object for the DMUB.
294 	 */
295 	struct amdgpu_bo *dmub_bo;
296 
297 	/**
298 	 * @dmub_bo_gpu_addr:
299 	 *
300 	 * GPU virtual address for the DMUB buffer object.
301 	 */
302 	u64 dmub_bo_gpu_addr;
303 
304 	/**
305 	 * @dmub_bo_cpu_addr:
306 	 *
307 	 * CPU address for the DMUB buffer object.
308 	 */
309 	void *dmub_bo_cpu_addr;
310 
311 	/**
312 	 * @dmcub_fw_version:
313 	 *
314 	 * DMCUB firmware version.
315 	 */
316 	uint32_t dmcub_fw_version;
317 
318 	/**
319 	 * @cgs_device:
320 	 *
321 	 * The Common Graphics Services device. It provides an interface for
322 	 * accessing registers.
323 	 */
324 	struct cgs_device *cgs_device;
325 
326 	struct amdgpu_device *adev;
327 	struct drm_device *ddev;
328 	u16 display_indexes_num;
329 
330 	/**
331 	 * @atomic_obj:
332 	 *
333 	 * In combination with &dm_atomic_state it helps manage
334 	 * global atomic state that doesn't map cleanly into existing
335 	 * drm resources, like &dc_context.
336 	 */
337 	struct drm_private_obj atomic_obj;
338 
339 	/**
340 	 * @dc_lock:
341 	 *
342 	 * Guards access to DC functions that can issue register write
343 	 * sequences.
344 	 */
345 	struct mutex dc_lock;
346 
347 	/**
348 	 * @audio_lock:
349 	 *
350 	 * Guards access to audio instance changes.
351 	 */
352 	struct mutex audio_lock;
353 
354 #if defined(CONFIG_DRM_AMD_DC_DCN)
355 	/**
356 	 * @vblank_lock:
357 	 *
358 	 * Guards access to deferred vblank work state.
359 	 */
360 	spinlock_t vblank_lock;
361 #endif
362 
363 	/**
364 	 * @audio_component:
365 	 *
366 	 * Used to notify ELD changes to sound driver.
367 	 */
368 	struct drm_audio_component *audio_component;
369 
370 	/**
371 	 * @audio_registered:
372 	 *
373 	 * True if the audio component has been registered
374 	 * successfully, false otherwise.
375 	 */
376 	bool audio_registered;
377 
378 	/**
379 	 * @irq_handler_list_low_tab:
380 	 *
381 	 * Low priority IRQ handler table.
382 	 *
383 	 * It is a n*m table consisting of n IRQ sources, and m handlers per IRQ
384 	 * source. Low priority IRQ handlers are deferred to a workqueue to be
385 	 * processed. Hence, they can sleep.
386 	 *
387 	 * Note that handlers are called in the same order as they were
388 	 * registered (FIFO).
389 	 */
390 	struct list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
391 
392 	/**
393 	 * @irq_handler_list_high_tab:
394 	 *
395 	 * High priority IRQ handler table.
396 	 *
397 	 * It is a n*m table, same as &irq_handler_list_low_tab. However,
398 	 * handlers in this table are not deferred and are called immediately.
399 	 */
400 	struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER];
401 
402 	/**
403 	 * @pflip_params:
404 	 *
405 	 * Page flip IRQ parameters, passed to registered handlers when
406 	 * triggered.
407 	 */
408 	struct common_irq_params
409 	pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1];
410 
411 	/**
412 	 * @vblank_params:
413 	 *
414 	 * Vertical blanking IRQ parameters, passed to registered handlers when
415 	 * triggered.
416 	 */
417 	struct common_irq_params
418 	vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1];
419 
420 	/**
421 	 * @vline0_params:
422 	 *
423 	 * OTG vertical interrupt0 IRQ parameters, passed to registered
424 	 * handlers when triggered.
425 	 */
426 	struct common_irq_params
427 	vline0_params[DC_IRQ_SOURCE_DC6_VLINE0 - DC_IRQ_SOURCE_DC1_VLINE0 + 1];
428 
429 	/**
430 	 * @vupdate_params:
431 	 *
432 	 * Vertical update IRQ parameters, passed to registered handlers when
433 	 * triggered.
434 	 */
435 	struct common_irq_params
436 	vupdate_params[DC_IRQ_SOURCE_VUPDATE6 - DC_IRQ_SOURCE_VUPDATE1 + 1];
437 
438 	/**
439 	 * @dmub_trace_params:
440 	 *
441 	 * DMUB trace event IRQ parameters, passed to registered handlers when
442 	 * triggered.
443 	 */
444 	struct common_irq_params
445 	dmub_trace_params[1];
446 
447 	struct common_irq_params
448 	dmub_outbox_params[1];
449 
450 	spinlock_t irq_handler_list_table_lock;
451 
452 	struct backlight_device *backlight_dev[AMDGPU_DM_MAX_NUM_EDP];
453 
454 	const struct dc_link *backlight_link[AMDGPU_DM_MAX_NUM_EDP];
455 
456 	uint8_t num_of_edps;
457 
458 	struct amdgpu_dm_backlight_caps backlight_caps[AMDGPU_DM_MAX_NUM_EDP];
459 
460 	struct mod_freesync *freesync_module;
461 #ifdef CONFIG_DRM_AMD_DC_HDCP
462 	struct hdcp_workqueue *hdcp_workqueue;
463 #endif
464 
465 #if defined(CONFIG_DRM_AMD_DC_DCN)
466 	/**
467 	 * @vblank_control_workqueue:
468 	 *
469 	 * Deferred work for vblank control events.
470 	 */
471 	struct workqueue_struct *vblank_control_workqueue;
472 #endif
473 
474 	struct drm_atomic_state *cached_state;
475 	struct dc_state *cached_dc_state;
476 
477 	struct dm_compressor_info compressor;
478 
479 	const struct firmware *fw_dmcu;
480 	uint32_t dmcu_fw_version;
481 	/**
482 	 * @soc_bounding_box:
483 	 *
484 	 * gpu_info FW provided soc bounding box struct or 0 if not
485 	 * available in FW
486 	 */
487 	const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
488 
489 #if defined(CONFIG_DRM_AMD_DC_DCN)
490 	/**
491 	 * @active_vblank_irq_count:
492 	 *
493 	 * number of currently active vblank irqs
494 	 */
495 	uint32_t active_vblank_irq_count;
496 #endif
497 
498 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
499 	/**
500 	 * @crc_rd_wrk:
501 	 *
502 	 * Work to be executed in a separate thread to communicate with PSP.
503 	 */
504 	struct crc_rd_work *crc_rd_wrk;
505 #endif
506 	/**
507 	 * @hpd_rx_offload_wq:
508 	 *
509 	 * Work queue to offload works of hpd_rx_irq
510 	 */
511 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq;
512 	/**
513 	 * @mst_encoders:
514 	 *
515 	 * fake encoders used for DP MST.
516 	 */
517 	struct amdgpu_encoder mst_encoders[AMDGPU_DM_MAX_CRTC];
518 	bool force_timing_sync;
519 	bool disable_hpd_irq;
520 	bool dmcub_trace_event_en;
521 	/**
522 	 * @da_list:
523 	 *
524 	 * DAL fb memory allocation list, for communication with SMU.
525 	 */
526 	struct list_head da_list;
527 	struct completion dmub_aux_transfer_done;
528 	struct workqueue_struct *delayed_hpd_wq;
529 
530 	/**
531 	 * @brightness:
532 	 *
533 	 * cached backlight values.
534 	 */
535 	u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
536 	/**
537 	 * @actual_brightness:
538 	 *
539 	 * last successfully applied backlight values.
540 	 */
541 	u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP];
542 
543 	/**
544 	 * @aux_hpd_discon_quirk:
545 	 *
546 	 * quirk for hpd discon while aux is on-going.
547 	 * occurred on certain intel platform
548 	 */
549 	bool aux_hpd_discon_quirk;
550 };
551 
552 enum dsc_clock_force_state {
553 	DSC_CLK_FORCE_DEFAULT = 0,
554 	DSC_CLK_FORCE_ENABLE,
555 	DSC_CLK_FORCE_DISABLE,
556 };
557 
558 struct dsc_preferred_settings {
559 	enum dsc_clock_force_state dsc_force_enable;
560 	uint32_t dsc_num_slices_v;
561 	uint32_t dsc_num_slices_h;
562 	uint32_t dsc_bits_per_pixel;
563 	bool dsc_force_disable_passthrough;
564 };
565 
566 struct amdgpu_dm_connector {
567 
568 	struct drm_connector base;
569 	uint32_t connector_id;
570 
571 	/* we need to mind the EDID between detect
572 	   and get modes due to analog/digital/tvencoder */
573 	struct edid *edid;
574 
575 	/* shared with amdgpu */
576 	struct amdgpu_hpd hpd;
577 
578 	/* number of modes generated from EDID at 'dc_sink' */
579 	int num_modes;
580 
581 	/* The 'old' sink - before an HPD.
582 	 * The 'current' sink is in dc_link->sink. */
583 	struct dc_sink *dc_sink;
584 	struct dc_link *dc_link;
585 	struct dc_sink *dc_em_sink;
586 
587 	/* DM only */
588 	struct drm_dp_mst_topology_mgr mst_mgr;
589 	struct amdgpu_dm_dp_aux dm_dp_aux;
590 	struct drm_dp_mst_port *port;
591 	struct amdgpu_dm_connector *mst_port;
592 	struct drm_dp_aux *dsc_aux;
593 
594 	/* TODO see if we can merge with ddc_bus or make a dm_connector */
595 	struct amdgpu_i2c_adapter *i2c;
596 
597 	/* Monitor range limits */
598 	int min_vfreq ;
599 	int max_vfreq ;
600 	int pixel_clock_mhz;
601 
602 	/* Audio instance - protected by audio_lock. */
603 	int audio_inst;
604 
605 	struct mutex hpd_lock;
606 
607 	bool fake_enable;
608 #ifdef CONFIG_DEBUG_FS
609 	uint32_t debugfs_dpcd_address;
610 	uint32_t debugfs_dpcd_size;
611 #endif
612 	bool force_yuv420_output;
613 	struct dsc_preferred_settings dsc_settings;
614 	/* Cached display modes */
615 	struct drm_display_mode freesync_vid_base;
616 
617 	int psr_skip_count;
618 };
619 
620 #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
621 
622 extern const struct amdgpu_ip_block_version dm_ip_block;
623 
624 struct dm_plane_state {
625 	struct drm_plane_state base;
626 	struct dc_plane_state *dc_state;
627 };
628 
629 struct dm_crtc_state {
630 	struct drm_crtc_state base;
631 	struct dc_stream_state *stream;
632 
633 	bool cm_has_degamma;
634 	bool cm_is_degamma_srgb;
635 
636 	int update_type;
637 	int active_planes;
638 
639 	int crc_skip_count;
640 
641 	bool freesync_timing_changed;
642 	bool freesync_vrr_info_changed;
643 
644 	bool dsc_force_changed;
645 	bool vrr_supported;
646 	struct mod_freesync_config freesync_config;
647 	struct dc_info_packet vrr_infopacket;
648 
649 	int abm_level;
650 };
651 
652 #define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
653 
654 struct dm_atomic_state {
655 	struct drm_private_state base;
656 
657 	struct dc_state *context;
658 };
659 
660 #define to_dm_atomic_state(x) container_of(x, struct dm_atomic_state, base)
661 
662 struct dm_connector_state {
663 	struct drm_connector_state base;
664 
665 	enum amdgpu_rmx_type scaling;
666 	uint8_t underscan_vborder;
667 	uint8_t underscan_hborder;
668 	bool underscan_enable;
669 	bool freesync_capable;
670 #ifdef CONFIG_DRM_AMD_DC_HDCP
671 	bool update_hdcp;
672 #endif
673 	uint8_t abm_level;
674 	int vcpi_slots;
675 	uint64_t pbn;
676 };
677 
678 struct amdgpu_hdmi_vsdb_info {
679 	unsigned int amd_vsdb_version;		/* VSDB version, should be used to determine which VSIF to send */
680 	bool freesync_supported;		/* FreeSync Supported */
681 	unsigned int min_refresh_rate_hz;	/* FreeSync Minimum Refresh Rate in Hz */
682 	unsigned int max_refresh_rate_hz;	/* FreeSync Maximum Refresh Rate in Hz */
683 };
684 
685 
686 #define to_dm_connector_state(x)\
687 	container_of((x), struct dm_connector_state, base)
688 
689 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector);
690 struct drm_connector_state *
691 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector);
692 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
693 					    struct drm_connector_state *state,
694 					    struct drm_property *property,
695 					    uint64_t val);
696 
697 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
698 					    const struct drm_connector_state *state,
699 					    struct drm_property *property,
700 					    uint64_t *val);
701 
702 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev);
703 
704 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
705 				     struct amdgpu_dm_connector *aconnector,
706 				     int connector_type,
707 				     struct dc_link *link,
708 				     int link_index);
709 
710 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
711 				   struct drm_display_mode *mode);
712 
713 void dm_restore_drm_connector_state(struct drm_device *dev,
714 				    struct drm_connector *connector);
715 
716 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
717 					struct edid *edid);
718 
719 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev);
720 
721 #define MAX_COLOR_LUT_ENTRIES 4096
722 /* Legacy gamm LUT users such as X doesn't like large LUT sizes */
723 #define MAX_COLOR_LEGACY_LUT_ENTRIES 256
724 
725 void amdgpu_dm_init_color_mod(void);
726 int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state);
727 int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc);
728 int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
729 				      struct dc_plane_state *dc_plane_state);
730 
731 void amdgpu_dm_update_connector_after_detect(
732 		struct amdgpu_dm_connector *aconnector);
733 
734 extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
735 
736 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
737 					struct aux_payload *payload, enum aux_return_code_type *operation_result);
738 #endif /* __AMDGPU_DM_H__ */
739