1 /*
2 * Copyright (C) 2015-2020 Advanced Micro Devices, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 #ifndef __AMDGPU_DM_H__
27 #define __AMDGPU_DM_H__
28
29 #include <drm/display/drm_dp_mst_helper.h>
30 #include <drm/drm_atomic.h>
31 #include <drm/drm_connector.h>
32 #include <drm/drm_crtc.h>
33 #include <drm/drm_plane.h>
34 #include "link_service_types.h"
35
36 /*
37 * This file contains the definition for amdgpu_display_manager
38 * and its API for amdgpu driver's use.
39 * This component provides all the display related functionality
40 * and this is the only component that calls DAL API.
41 * The API contained here intended for amdgpu driver use.
42 * The API that is called directly from KMS framework is located
43 * in amdgpu_dm_kms.h file
44 */
45
46 #define AMDGPU_DM_MAX_DISPLAY_INDEX 31
47
48 #define AMDGPU_DM_MAX_CRTC 6
49
50 #define AMDGPU_DM_MAX_NUM_EDP 2
51
52 #define AMDGPU_DMUB_NOTIFICATION_MAX 5
53
54 /*
55 #include "include/amdgpu_dal_power_if.h"
56 #include "amdgpu_dm_irq.h"
57 */
58
59 #include "irq_types.h"
60 #include "signal_types.h"
61 #include "amdgpu_dm_crc.h"
62 struct aux_payload;
63 struct set_config_cmd_payload;
64 enum aux_return_code_type;
65 enum set_config_status;
66
67 /* Forward declarations */
68 struct amdgpu_device;
69 struct amdgpu_crtc;
70 struct drm_device;
71 struct dc;
72 struct amdgpu_bo;
73 struct dmub_srv;
74 struct dc_plane_state;
75 struct dmub_notification;
76
77 struct common_irq_params {
78 struct amdgpu_device *adev;
79 enum dc_irq_source irq_src;
80 atomic64_t previous_timestamp;
81 };
82
83 /**
84 * struct dm_compressor_info - Buffer info used by frame buffer compression
85 * @cpu_addr: MMIO cpu addr
86 * @bo_ptr: Pointer to the buffer object
87 * @gpu_addr: MMIO gpu addr
88 */
89 struct dm_compressor_info {
90 void *cpu_addr;
91 struct amdgpu_bo *bo_ptr;
92 uint64_t gpu_addr;
93 };
94
95 typedef void (*dmub_notify_interrupt_callback_t)(struct amdgpu_device *adev, struct dmub_notification *notify);
96
97 /**
98 * struct dmub_hpd_work - Handle time consuming work in low priority outbox IRQ
99 *
100 * @handle_hpd_work: Work to be executed in a separate thread to handle hpd_low_irq
101 * @dmub_notify: notification for callback function
102 * @adev: amdgpu_device pointer
103 */
104 struct dmub_hpd_work {
105 struct work_struct handle_hpd_work;
106 struct dmub_notification *dmub_notify;
107 struct amdgpu_device *adev;
108 };
109
110 /**
111 * struct vblank_control_work - Work data for vblank control
112 * @work: Kernel work data for the work event
113 * @dm: amdgpu display manager device
114 * @acrtc: amdgpu CRTC instance for which the event has occurred
115 * @stream: DC stream for which the event has occurred
116 * @enable: true if enabling vblank
117 */
118 struct vblank_control_work {
119 struct work_struct work;
120 struct amdgpu_display_manager *dm;
121 struct amdgpu_crtc *acrtc;
122 struct dc_stream_state *stream;
123 bool enable;
124 };
125
126 /**
127 * struct amdgpu_dm_backlight_caps - Information about backlight
128 *
129 * Describe the backlight support for ACPI or eDP AUX.
130 */
131 struct amdgpu_dm_backlight_caps {
132 /**
133 * @ext_caps: Keep the data struct with all the information about the
134 * display support for HDR.
135 */
136 union dpcd_sink_ext_caps *ext_caps;
137 /**
138 * @aux_min_input_signal: Min brightness value supported by the display
139 */
140 u32 aux_min_input_signal;
141 /**
142 * @aux_max_input_signal: Max brightness value supported by the display
143 * in nits.
144 */
145 u32 aux_max_input_signal;
146 /**
147 * @min_input_signal: minimum possible input in range 0-255.
148 */
149 int min_input_signal;
150 /**
151 * @max_input_signal: maximum possible input in range 0-255.
152 */
153 int max_input_signal;
154 /**
155 * @caps_valid: true if these values are from the ACPI interface.
156 */
157 bool caps_valid;
158 /**
159 * @aux_support: Describes if the display supports AUX backlight.
160 */
161 bool aux_support;
162 };
163
164 /**
165 * struct dal_allocation - Tracks mapped FB memory for SMU communication
166 * @list: list of dal allocations
167 * @bo: GPU buffer object
168 * @cpu_ptr: CPU virtual address of the GPU buffer object
169 * @gpu_addr: GPU virtual address of the GPU buffer object
170 */
171 struct dal_allocation {
172 struct list_head list;
173 struct amdgpu_bo *bo;
174 void *cpu_ptr;
175 u64 gpu_addr;
176 };
177
178 /**
179 * struct hpd_rx_irq_offload_work_queue - Work queue to handle hpd_rx_irq
180 * offload work
181 */
182 struct hpd_rx_irq_offload_work_queue {
183 /**
184 * @wq: workqueue structure to queue offload work.
185 */
186 struct workqueue_struct *wq;
187 /**
188 * @offload_lock: To protect fields of offload work queue.
189 */
190 spinlock_t offload_lock;
191 /**
192 * @is_handling_link_loss: Used to prevent inserting link loss event when
193 * we're handling link loss
194 */
195 bool is_handling_link_loss;
196 /**
197 * @is_handling_mst_msg_rdy_event: Used to prevent inserting mst message
198 * ready event when we're already handling mst message ready event
199 */
200 bool is_handling_mst_msg_rdy_event;
201 /**
202 * @aconnector: The aconnector that this work queue is attached to
203 */
204 struct amdgpu_dm_connector *aconnector;
205 };
206
207 /**
208 * struct hpd_rx_irq_offload_work - hpd_rx_irq offload work structure
209 */
210 struct hpd_rx_irq_offload_work {
211 /**
212 * @work: offload work
213 */
214 struct work_struct work;
215 /**
216 * @data: reference irq data which is used while handling offload work
217 */
218 union hpd_irq_data data;
219 /**
220 * @offload_wq: offload work queue that this work is queued to
221 */
222 struct hpd_rx_irq_offload_work_queue *offload_wq;
223 };
224
225 /**
226 * struct amdgpu_display_manager - Central amdgpu display manager device
227 *
228 * @dc: Display Core control structure
229 * @adev: AMDGPU base driver structure
230 * @ddev: DRM base driver structure
231 * @display_indexes_num: Max number of display streams supported
232 * @irq_handler_list_table_lock: Synchronizes access to IRQ tables
233 * @backlight_dev: Backlight control device
234 * @backlight_link: Link on which to control backlight
235 * @backlight_caps: Capabilities of the backlight device
236 * @freesync_module: Module handling freesync calculations
237 * @hdcp_workqueue: AMDGPU content protection queue
238 * @fw_dmcu: Reference to DMCU firmware
239 * @dmcu_fw_version: Version of the DMCU firmware
240 * @soc_bounding_box: SOC bounding box values provided by gpu_info FW
241 * @cached_state: Caches device atomic state for suspend/resume
242 * @cached_dc_state: Cached state of content streams
243 * @compressor: Frame buffer compression buffer. See &struct dm_compressor_info
244 * @force_timing_sync: set via debugfs. When set, indicates that all connected
245 * displays will be forced to synchronize.
246 * @dmcub_trace_event_en: enable dmcub trace events
247 * @dmub_outbox_params: DMUB Outbox parameters
248 * @num_of_edps: number of backlight eDPs
249 * @disable_hpd_irq: disables all HPD and HPD RX interrupt handling in the
250 * driver when true
251 * @dmub_aux_transfer_done: struct completion used to indicate when DMUB
252 * transfers are done
253 * @delayed_hpd_wq: work queue used to delay DMUB HPD work
254 */
255 struct amdgpu_display_manager {
256
257 struct dc *dc;
258
259 /**
260 * @dmub_srv:
261 *
262 * DMUB service, used for controlling the DMUB on hardware
263 * that supports it. The pointer to the dmub_srv will be
264 * NULL on hardware that does not support it.
265 */
266 struct dmub_srv *dmub_srv;
267
268 /**
269 * @dmub_notify:
270 *
271 * Notification from DMUB.
272 */
273
274 struct dmub_notification *dmub_notify;
275
276 /**
277 * @dmub_callback:
278 *
279 * Callback functions to handle notification from DMUB.
280 */
281
282 dmub_notify_interrupt_callback_t dmub_callback[AMDGPU_DMUB_NOTIFICATION_MAX];
283
284 /**
285 * @dmub_thread_offload:
286 *
287 * Flag to indicate if callback is offload.
288 */
289
290 bool dmub_thread_offload[AMDGPU_DMUB_NOTIFICATION_MAX];
291
292 /**
293 * @dmub_fb_info:
294 *
295 * Framebuffer regions for the DMUB.
296 */
297 struct dmub_srv_fb_info *dmub_fb_info;
298
299 /**
300 * @dmub_fw:
301 *
302 * DMUB firmware, required on hardware that has DMUB support.
303 */
304 const struct firmware *dmub_fw;
305
306 /**
307 * @dmub_bo:
308 *
309 * Buffer object for the DMUB.
310 */
311 struct amdgpu_bo *dmub_bo;
312
313 /**
314 * @dmub_bo_gpu_addr:
315 *
316 * GPU virtual address for the DMUB buffer object.
317 */
318 u64 dmub_bo_gpu_addr;
319
320 /**
321 * @dmub_bo_cpu_addr:
322 *
323 * CPU address for the DMUB buffer object.
324 */
325 void *dmub_bo_cpu_addr;
326
327 /**
328 * @dmcub_fw_version:
329 *
330 * DMCUB firmware version.
331 */
332 uint32_t dmcub_fw_version;
333
334 /**
335 * @cgs_device:
336 *
337 * The Common Graphics Services device. It provides an interface for
338 * accessing registers.
339 */
340 struct cgs_device *cgs_device;
341
342 struct amdgpu_device *adev;
343 struct drm_device *ddev;
344 u16 display_indexes_num;
345
346 /**
347 * @atomic_obj:
348 *
349 * In combination with &dm_atomic_state it helps manage
350 * global atomic state that doesn't map cleanly into existing
351 * drm resources, like &dc_context.
352 */
353 struct drm_private_obj atomic_obj;
354
355 /**
356 * @dc_lock:
357 *
358 * Guards access to DC functions that can issue register write
359 * sequences.
360 */
361 struct mutex dc_lock;
362
363 /**
364 * @audio_lock:
365 *
366 * Guards access to audio instance changes.
367 */
368 struct mutex audio_lock;
369
370 /**
371 * @vblank_lock:
372 *
373 * Guards access to deferred vblank work state.
374 */
375 spinlock_t vblank_lock;
376
377 /**
378 * @audio_component:
379 *
380 * Used to notify ELD changes to sound driver.
381 */
382 struct drm_audio_component *audio_component;
383
384 /**
385 * @audio_registered:
386 *
387 * True if the audio component has been registered
388 * successfully, false otherwise.
389 */
390 bool audio_registered;
391
392 /**
393 * @irq_handler_list_low_tab:
394 *
395 * Low priority IRQ handler table.
396 *
397 * It is a n*m table consisting of n IRQ sources, and m handlers per IRQ
398 * source. Low priority IRQ handlers are deferred to a workqueue to be
399 * processed. Hence, they can sleep.
400 *
401 * Note that handlers are called in the same order as they were
402 * registered (FIFO).
403 */
404 struct list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
405
406 /**
407 * @irq_handler_list_high_tab:
408 *
409 * High priority IRQ handler table.
410 *
411 * It is a n*m table, same as &irq_handler_list_low_tab. However,
412 * handlers in this table are not deferred and are called immediately.
413 */
414 struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER];
415
416 /**
417 * @pflip_params:
418 *
419 * Page flip IRQ parameters, passed to registered handlers when
420 * triggered.
421 */
422 struct common_irq_params
423 pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1];
424
425 /**
426 * @vblank_params:
427 *
428 * Vertical blanking IRQ parameters, passed to registered handlers when
429 * triggered.
430 */
431 struct common_irq_params
432 vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1];
433
434 /**
435 * @vline0_params:
436 *
437 * OTG vertical interrupt0 IRQ parameters, passed to registered
438 * handlers when triggered.
439 */
440 struct common_irq_params
441 vline0_params[DC_IRQ_SOURCE_DC6_VLINE0 - DC_IRQ_SOURCE_DC1_VLINE0 + 1];
442
443 /**
444 * @vupdate_params:
445 *
446 * Vertical update IRQ parameters, passed to registered handlers when
447 * triggered.
448 */
449 struct common_irq_params
450 vupdate_params[DC_IRQ_SOURCE_VUPDATE6 - DC_IRQ_SOURCE_VUPDATE1 + 1];
451
452 /**
453 * @dmub_trace_params:
454 *
455 * DMUB trace event IRQ parameters, passed to registered handlers when
456 * triggered.
457 */
458 struct common_irq_params
459 dmub_trace_params[1];
460
461 struct common_irq_params
462 dmub_outbox_params[1];
463
464 spinlock_t irq_handler_list_table_lock;
465
466 struct backlight_device *backlight_dev[AMDGPU_DM_MAX_NUM_EDP];
467
468 const struct dc_link *backlight_link[AMDGPU_DM_MAX_NUM_EDP];
469
470 uint8_t num_of_edps;
471
472 struct amdgpu_dm_backlight_caps backlight_caps[AMDGPU_DM_MAX_NUM_EDP];
473
474 struct mod_freesync *freesync_module;
475 #ifdef CONFIG_DRM_AMD_DC_HDCP
476 struct hdcp_workqueue *hdcp_workqueue;
477 #endif
478
479 /**
480 * @vblank_control_workqueue:
481 *
482 * Deferred work for vblank control events.
483 */
484 struct workqueue_struct *vblank_control_workqueue;
485
486 struct drm_atomic_state *cached_state;
487 struct dc_state *cached_dc_state;
488
489 struct dm_compressor_info compressor;
490
491 const struct firmware *fw_dmcu;
492 uint32_t dmcu_fw_version;
493 /**
494 * @soc_bounding_box:
495 *
496 * gpu_info FW provided soc bounding box struct or 0 if not
497 * available in FW
498 */
499 const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
500
501 /**
502 * @active_vblank_irq_count:
503 *
504 * number of currently active vblank irqs
505 */
506 uint32_t active_vblank_irq_count;
507
508 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
509 /**
510 * @crc_rd_wrk:
511 *
512 * Work to be executed in a separate thread to communicate with PSP.
513 */
514 struct crc_rd_work *crc_rd_wrk;
515 #endif
516 /**
517 * @hpd_rx_offload_wq:
518 *
519 * Work queue to offload works of hpd_rx_irq
520 */
521 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq;
522 /**
523 * @mst_encoders:
524 *
525 * fake encoders used for DP MST.
526 */
527 struct amdgpu_encoder mst_encoders[AMDGPU_DM_MAX_CRTC];
528 bool force_timing_sync;
529 bool disable_hpd_irq;
530 bool dmcub_trace_event_en;
531 /**
532 * @da_list:
533 *
534 * DAL fb memory allocation list, for communication with SMU.
535 */
536 struct list_head da_list;
537 struct completion dmub_aux_transfer_done;
538 struct workqueue_struct *delayed_hpd_wq;
539
540 /**
541 * @brightness:
542 *
543 * cached backlight values.
544 */
545 u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
546 /**
547 * @actual_brightness:
548 *
549 * last successfully applied backlight values.
550 */
551 u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP];
552
553 /**
554 * @aux_hpd_discon_quirk:
555 *
556 * quirk for hpd discon while aux is on-going.
557 * occurred on certain intel platform
558 */
559 bool aux_hpd_discon_quirk;
560
561 /**
562 * @dpia_aux_lock:
563 *
564 * Guards access to DPIA AUX
565 */
566 struct mutex dpia_aux_lock;
567 };
568
569 enum dsc_clock_force_state {
570 DSC_CLK_FORCE_DEFAULT = 0,
571 DSC_CLK_FORCE_ENABLE,
572 DSC_CLK_FORCE_DISABLE,
573 };
574
575 struct dsc_preferred_settings {
576 enum dsc_clock_force_state dsc_force_enable;
577 uint32_t dsc_num_slices_v;
578 uint32_t dsc_num_slices_h;
579 uint32_t dsc_bits_per_pixel;
580 bool dsc_force_disable_passthrough;
581 };
582
583 enum mst_progress_status {
584 MST_STATUS_DEFAULT = 0,
585 MST_PROBE = BIT(0),
586 MST_REMOTE_EDID = BIT(1),
587 MST_ALLOCATE_NEW_PAYLOAD = BIT(2),
588 MST_CLEAR_ALLOCATED_PAYLOAD = BIT(3),
589 };
590
591 struct amdgpu_dm_connector {
592
593 struct drm_connector base;
594 uint32_t connector_id;
595
596 /* we need to mind the EDID between detect
597 and get modes due to analog/digital/tvencoder */
598 struct edid *edid;
599
600 /* shared with amdgpu */
601 struct amdgpu_hpd hpd;
602
603 /* number of modes generated from EDID at 'dc_sink' */
604 int num_modes;
605
606 /* The 'old' sink - before an HPD.
607 * The 'current' sink is in dc_link->sink. */
608 struct dc_sink *dc_sink;
609 struct dc_link *dc_link;
610
611 /**
612 * @dc_em_sink: Reference to the emulated (virtual) sink.
613 */
614 struct dc_sink *dc_em_sink;
615
616 /* DM only */
617 struct drm_dp_mst_topology_mgr mst_mgr;
618 struct amdgpu_dm_dp_aux dm_dp_aux;
619 struct drm_dp_mst_port *port;
620 struct amdgpu_dm_connector *mst_port;
621 struct drm_dp_aux *dsc_aux;
622 struct mutex handle_mst_msg_ready;
623
624 /* TODO see if we can merge with ddc_bus or make a dm_connector */
625 struct amdgpu_i2c_adapter *i2c;
626
627 /* Monitor range limits */
628 /**
629 * @min_vfreq: Minimal frequency supported by the display in Hz. This
630 * value is set to zero when there is no FreeSync support.
631 */
632 int min_vfreq;
633
634 /**
635 * @max_vfreq: Maximum frequency supported by the display in Hz. This
636 * value is set to zero when there is no FreeSync support.
637 */
638 int max_vfreq ;
639 int pixel_clock_mhz;
640
641 /* Audio instance - protected by audio_lock. */
642 int audio_inst;
643
644 struct mutex hpd_lock;
645
646 bool fake_enable;
647 #ifdef CONFIG_DEBUG_FS
648 uint32_t debugfs_dpcd_address;
649 uint32_t debugfs_dpcd_size;
650 #endif
651 bool force_yuv420_output;
652 struct dsc_preferred_settings dsc_settings;
653 union dp_downstream_port_present mst_downstream_port_present;
654 /* Cached display modes */
655 struct drm_display_mode freesync_vid_base;
656
657 int psr_skip_count;
658
659 /* Record progress status of mst*/
660 uint8_t mst_status;
661
662 /* Automated testing */
663 bool timing_changed;
664 struct dc_crtc_timing *timing_requested;
665 };
666
amdgpu_dm_set_mst_status(uint8_t * status,uint8_t flags,bool set)667 static inline void amdgpu_dm_set_mst_status(uint8_t *status,
668 uint8_t flags, bool set)
669 {
670 if (set)
671 *status |= flags;
672 else
673 *status &= ~flags;
674 }
675
676 #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
677
678 extern const struct amdgpu_ip_block_version dm_ip_block;
679
680 struct dm_plane_state {
681 struct drm_plane_state base;
682 struct dc_plane_state *dc_state;
683 };
684
685 struct dm_crtc_state {
686 struct drm_crtc_state base;
687 struct dc_stream_state *stream;
688
689 bool cm_has_degamma;
690 bool cm_is_degamma_srgb;
691
692 bool mpo_requested;
693
694 int update_type;
695 int active_planes;
696
697 int crc_skip_count;
698
699 bool freesync_vrr_info_changed;
700
701 bool dsc_force_changed;
702 bool vrr_supported;
703 struct mod_freesync_config freesync_config;
704 struct dc_info_packet vrr_infopacket;
705
706 int abm_level;
707 };
708
709 #define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
710
711 struct dm_atomic_state {
712 struct drm_private_state base;
713
714 struct dc_state *context;
715 };
716
717 #define to_dm_atomic_state(x) container_of(x, struct dm_atomic_state, base)
718
719 struct dm_connector_state {
720 struct drm_connector_state base;
721
722 enum amdgpu_rmx_type scaling;
723 uint8_t underscan_vborder;
724 uint8_t underscan_hborder;
725 bool underscan_enable;
726 bool freesync_capable;
727 #ifdef CONFIG_DRM_AMD_DC_HDCP
728 bool update_hdcp;
729 #endif
730 uint8_t abm_level;
731 int vcpi_slots;
732 uint64_t pbn;
733 };
734
735 /**
736 * struct amdgpu_hdmi_vsdb_info - Keep track of the VSDB info
737 *
738 * AMDGPU supports FreeSync over HDMI by using the VSDB section, and this
739 * struct is useful to keep track of the display-specific information about
740 * FreeSync.
741 */
742 struct amdgpu_hdmi_vsdb_info {
743 /**
744 * @amd_vsdb_version: Vendor Specific Data Block Version, should be
745 * used to determine which Vendor Specific InfoFrame (VSIF) to send.
746 */
747 unsigned int amd_vsdb_version;
748
749 /**
750 * @freesync_supported: FreeSync Supported.
751 */
752 bool freesync_supported;
753
754 /**
755 * @min_refresh_rate_hz: FreeSync Minimum Refresh Rate in Hz.
756 */
757 unsigned int min_refresh_rate_hz;
758
759 /**
760 * @max_refresh_rate_hz: FreeSync Maximum Refresh Rate in Hz
761 */
762 unsigned int max_refresh_rate_hz;
763 };
764
765
766 #define to_dm_connector_state(x)\
767 container_of((x), struct dm_connector_state, base)
768
769 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector);
770 struct drm_connector_state *
771 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector);
772 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
773 struct drm_connector_state *state,
774 struct drm_property *property,
775 uint64_t val);
776
777 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
778 const struct drm_connector_state *state,
779 struct drm_property *property,
780 uint64_t *val);
781
782 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev);
783
784 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
785 struct amdgpu_dm_connector *aconnector,
786 int connector_type,
787 struct dc_link *link,
788 int link_index);
789
790 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
791 struct drm_display_mode *mode);
792
793 void dm_restore_drm_connector_state(struct drm_device *dev,
794 struct drm_connector *connector);
795
796 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
797 struct edid *edid);
798
799 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev);
800
801 #define MAX_COLOR_LUT_ENTRIES 4096
802 /* Legacy gamm LUT users such as X doesn't like large LUT sizes */
803 #define MAX_COLOR_LEGACY_LUT_ENTRIES 256
804
805 void amdgpu_dm_init_color_mod(void);
806 int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state);
807 int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc);
808 int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
809 struct dc_plane_state *dc_plane_state);
810
811 void amdgpu_dm_update_connector_after_detect(
812 struct amdgpu_dm_connector *aconnector);
813
814 extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
815
816 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int link_index,
817 struct aux_payload *payload, enum aux_return_code_type *operation_result);
818
819 int amdgpu_dm_process_dmub_set_config_sync(struct dc_context *ctx, unsigned int link_index,
820 struct set_config_cmd_payload *payload, enum set_config_status *operation_result);
821
822 bool check_seamless_boot_capability(struct amdgpu_device *adev);
823
824 struct dc_stream_state *
825 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
826 const struct drm_display_mode *drm_mode,
827 const struct dm_connector_state *dm_state,
828 const struct dc_stream_state *old_stream);
829
830 int dm_atomic_get_state(struct drm_atomic_state *state,
831 struct dm_atomic_state **dm_state);
832
833 struct amdgpu_dm_connector *
834 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
835 struct drm_crtc *crtc);
836
837 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth);
838 #endif /* __AMDGPU_DM_H__ */
839