1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57
58 #include "ivsrcid/ivsrcid_vislands30.h"
59
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87
88 #include "soc15_common.h"
89 #endif
90
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #endif
103 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
105
106 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
107 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
108
109 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
110 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
111
112 /* Number of bytes in PSP header for firmware. */
113 #define PSP_HEADER_BYTES 0x100
114
115 /* Number of bytes in PSP footer for firmware. */
116 #define PSP_FOOTER_BYTES 0x100
117
118 /**
119 * DOC: overview
120 *
121 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
122 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
123 * requests into DC requests, and DC responses into DRM responses.
124 *
125 * The root control structure is &struct amdgpu_display_manager.
126 */
127
128 /* basic init/fini API */
129 static int amdgpu_dm_init(struct amdgpu_device *adev);
130 static void amdgpu_dm_fini(struct amdgpu_device *adev);
131
get_subconnector_type(struct dc_link * link)132 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
133 {
134 switch (link->dpcd_caps.dongle_type) {
135 case DISPLAY_DONGLE_NONE:
136 return DRM_MODE_SUBCONNECTOR_Native;
137 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
138 return DRM_MODE_SUBCONNECTOR_VGA;
139 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
140 case DISPLAY_DONGLE_DP_DVI_DONGLE:
141 return DRM_MODE_SUBCONNECTOR_DVID;
142 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
143 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
144 return DRM_MODE_SUBCONNECTOR_HDMIA;
145 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
146 default:
147 return DRM_MODE_SUBCONNECTOR_Unknown;
148 }
149 }
150
update_subconnector_property(struct amdgpu_dm_connector * aconnector)151 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
152 {
153 struct dc_link *link = aconnector->dc_link;
154 struct drm_connector *connector = &aconnector->base;
155 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
156
157 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
158 return;
159
160 if (aconnector->dc_sink)
161 subconnector = get_subconnector_type(link);
162
163 drm_object_property_set_value(&connector->base,
164 connector->dev->mode_config.dp_subconnector_property,
165 subconnector);
166 }
167
168 /*
169 * initializes drm_device display related structures, based on the information
170 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
171 * drm_encoder, drm_mode_config
172 *
173 * Returns 0 on success
174 */
175 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
176 /* removes and deallocates the drm structures, created by the above function */
177 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
178
179 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
180 struct drm_plane *plane,
181 unsigned long possible_crtcs,
182 const struct dc_plane_cap *plane_cap);
183 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
184 struct drm_plane *plane,
185 uint32_t link_index);
186 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
187 struct amdgpu_dm_connector *amdgpu_dm_connector,
188 uint32_t link_index,
189 struct amdgpu_encoder *amdgpu_encoder);
190 static int amdgpu_dm_encoder_init(struct drm_device *dev,
191 struct amdgpu_encoder *aencoder,
192 uint32_t link_index);
193
194 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
195
196 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
197 struct drm_atomic_state *state,
198 bool nonblock);
199
200 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
201
202 static int amdgpu_dm_atomic_check(struct drm_device *dev,
203 struct drm_atomic_state *state);
204
205 static void handle_cursor_update(struct drm_plane *plane,
206 struct drm_plane_state *old_plane_state);
207
208 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
209 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
211 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
212 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
213
214 /*
215 * dm_vblank_get_counter
216 *
217 * @brief
218 * Get counter for number of vertical blanks
219 *
220 * @param
221 * struct amdgpu_device *adev - [in] desired amdgpu device
222 * int disp_idx - [in] which CRTC to get the counter from
223 *
224 * @return
225 * Counter for vertical blanks
226 */
dm_vblank_get_counter(struct amdgpu_device * adev,int crtc)227 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
228 {
229 if (crtc >= adev->mode_info.num_crtc)
230 return 0;
231 else {
232 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
233
234 if (acrtc->dm_irq_params.stream == NULL) {
235 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
236 crtc);
237 return 0;
238 }
239
240 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
241 }
242 }
243
dm_crtc_get_scanoutpos(struct amdgpu_device * adev,int crtc,u32 * vbl,u32 * position)244 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
245 u32 *vbl, u32 *position)
246 {
247 uint32_t v_blank_start, v_blank_end, h_position, v_position;
248
249 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
250 return -EINVAL;
251 else {
252 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
253
254 if (acrtc->dm_irq_params.stream == NULL) {
255 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
256 crtc);
257 return 0;
258 }
259
260 /*
261 * TODO rework base driver to use values directly.
262 * for now parse it back into reg-format
263 */
264 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
265 &v_blank_start,
266 &v_blank_end,
267 &h_position,
268 &v_position);
269
270 *position = v_position | (h_position << 16);
271 *vbl = v_blank_start | (v_blank_end << 16);
272 }
273
274 return 0;
275 }
276
dm_is_idle(void * handle)277 static bool dm_is_idle(void *handle)
278 {
279 /* XXX todo */
280 return true;
281 }
282
dm_wait_for_idle(void * handle)283 static int dm_wait_for_idle(void *handle)
284 {
285 /* XXX todo */
286 return 0;
287 }
288
dm_check_soft_reset(void * handle)289 static bool dm_check_soft_reset(void *handle)
290 {
291 return false;
292 }
293
dm_soft_reset(void * handle)294 static int dm_soft_reset(void *handle)
295 {
296 /* XXX todo */
297 return 0;
298 }
299
300 static struct amdgpu_crtc *
get_crtc_by_otg_inst(struct amdgpu_device * adev,int otg_inst)301 get_crtc_by_otg_inst(struct amdgpu_device *adev,
302 int otg_inst)
303 {
304 struct drm_device *dev = adev_to_drm(adev);
305 struct drm_crtc *crtc;
306 struct amdgpu_crtc *amdgpu_crtc;
307
308 if (otg_inst == -1) {
309 WARN_ON(1);
310 return adev->mode_info.crtcs[0];
311 }
312
313 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
314 amdgpu_crtc = to_amdgpu_crtc(crtc);
315
316 if (amdgpu_crtc->otg_inst == otg_inst)
317 return amdgpu_crtc;
318 }
319
320 return NULL;
321 }
322
amdgpu_dm_vrr_active_irq(struct amdgpu_crtc * acrtc)323 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
324 {
325 return acrtc->dm_irq_params.freesync_config.state ==
326 VRR_STATE_ACTIVE_VARIABLE ||
327 acrtc->dm_irq_params.freesync_config.state ==
328 VRR_STATE_ACTIVE_FIXED;
329 }
330
amdgpu_dm_vrr_active(struct dm_crtc_state * dm_state)331 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
332 {
333 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
334 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
335 }
336
337 /**
338 * dm_pflip_high_irq() - Handle pageflip interrupt
339 * @interrupt_params: ignored
340 *
341 * Handles the pageflip interrupt by notifying all interested parties
342 * that the pageflip has been completed.
343 */
dm_pflip_high_irq(void * interrupt_params)344 static void dm_pflip_high_irq(void *interrupt_params)
345 {
346 struct amdgpu_crtc *amdgpu_crtc;
347 struct common_irq_params *irq_params = interrupt_params;
348 struct amdgpu_device *adev = irq_params->adev;
349 unsigned long flags;
350 struct drm_pending_vblank_event *e;
351 uint32_t vpos, hpos, v_blank_start, v_blank_end;
352 bool vrr_active;
353
354 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
355
356 /* IRQ could occur when in initial stage */
357 /* TODO work and BO cleanup */
358 if (amdgpu_crtc == NULL) {
359 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
360 return;
361 }
362
363 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
364
365 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
366 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
367 amdgpu_crtc->pflip_status,
368 AMDGPU_FLIP_SUBMITTED,
369 amdgpu_crtc->crtc_id,
370 amdgpu_crtc);
371 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
372 return;
373 }
374
375 /* page flip completed. */
376 e = amdgpu_crtc->event;
377 amdgpu_crtc->event = NULL;
378
379 if (!e)
380 WARN_ON(1);
381
382 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
383
384 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
385 if (!vrr_active ||
386 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
387 &v_blank_end, &hpos, &vpos) ||
388 (vpos < v_blank_start)) {
389 /* Update to correct count and vblank timestamp if racing with
390 * vblank irq. This also updates to the correct vblank timestamp
391 * even in VRR mode, as scanout is past the front-porch atm.
392 */
393 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
394
395 /* Wake up userspace by sending the pageflip event with proper
396 * count and timestamp of vblank of flip completion.
397 */
398 if (e) {
399 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
400
401 /* Event sent, so done with vblank for this flip */
402 drm_crtc_vblank_put(&amdgpu_crtc->base);
403 }
404 } else if (e) {
405 /* VRR active and inside front-porch: vblank count and
406 * timestamp for pageflip event will only be up to date after
407 * drm_crtc_handle_vblank() has been executed from late vblank
408 * irq handler after start of back-porch (vline 0). We queue the
409 * pageflip event for send-out by drm_crtc_handle_vblank() with
410 * updated timestamp and count, once it runs after us.
411 *
412 * We need to open-code this instead of using the helper
413 * drm_crtc_arm_vblank_event(), as that helper would
414 * call drm_crtc_accurate_vblank_count(), which we must
415 * not call in VRR mode while we are in front-porch!
416 */
417
418 /* sequence will be replaced by real count during send-out. */
419 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
420 e->pipe = amdgpu_crtc->crtc_id;
421
422 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
423 e = NULL;
424 }
425
426 /* Keep track of vblank of this flip for flip throttling. We use the
427 * cooked hw counter, as that one incremented at start of this vblank
428 * of pageflip completion, so last_flip_vblank is the forbidden count
429 * for queueing new pageflips if vsync + VRR is enabled.
430 */
431 amdgpu_crtc->dm_irq_params.last_flip_vblank =
432 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
433
434 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
435 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
436
437 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
438 amdgpu_crtc->crtc_id, amdgpu_crtc,
439 vrr_active, (int) !e);
440 }
441
dm_vupdate_high_irq(void * interrupt_params)442 static void dm_vupdate_high_irq(void *interrupt_params)
443 {
444 struct common_irq_params *irq_params = interrupt_params;
445 struct amdgpu_device *adev = irq_params->adev;
446 struct amdgpu_crtc *acrtc;
447 unsigned long flags;
448 int vrr_active;
449
450 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
451
452 if (acrtc) {
453 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
454
455 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
456 acrtc->crtc_id,
457 vrr_active);
458
459 /* Core vblank handling is done here after end of front-porch in
460 * vrr mode, as vblank timestamping will give valid results
461 * while now done after front-porch. This will also deliver
462 * page-flip completion events that have been queued to us
463 * if a pageflip happened inside front-porch.
464 */
465 if (vrr_active) {
466 drm_crtc_handle_vblank(&acrtc->base);
467
468 /* BTR processing for pre-DCE12 ASICs */
469 if (acrtc->dm_irq_params.stream &&
470 adev->family < AMDGPU_FAMILY_AI) {
471 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
472 mod_freesync_handle_v_update(
473 adev->dm.freesync_module,
474 acrtc->dm_irq_params.stream,
475 &acrtc->dm_irq_params.vrr_params);
476
477 dc_stream_adjust_vmin_vmax(
478 adev->dm.dc,
479 acrtc->dm_irq_params.stream,
480 &acrtc->dm_irq_params.vrr_params.adjust);
481 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
482 }
483 }
484 }
485 }
486
487 /**
488 * dm_crtc_high_irq() - Handles CRTC interrupt
489 * @interrupt_params: used for determining the CRTC instance
490 *
491 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
492 * event handler.
493 */
dm_crtc_high_irq(void * interrupt_params)494 static void dm_crtc_high_irq(void *interrupt_params)
495 {
496 struct common_irq_params *irq_params = interrupt_params;
497 struct amdgpu_device *adev = irq_params->adev;
498 struct amdgpu_crtc *acrtc;
499 unsigned long flags;
500 int vrr_active;
501
502 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
503 if (!acrtc)
504 return;
505
506 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
507
508 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
509 vrr_active, acrtc->dm_irq_params.active_planes);
510
511 /**
512 * Core vblank handling at start of front-porch is only possible
513 * in non-vrr mode, as only there vblank timestamping will give
514 * valid results while done in front-porch. Otherwise defer it
515 * to dm_vupdate_high_irq after end of front-porch.
516 */
517 if (!vrr_active)
518 drm_crtc_handle_vblank(&acrtc->base);
519
520 /**
521 * Following stuff must happen at start of vblank, for crc
522 * computation and below-the-range btr support in vrr mode.
523 */
524 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
525
526 /* BTR updates need to happen before VUPDATE on Vega and above. */
527 if (adev->family < AMDGPU_FAMILY_AI)
528 return;
529
530 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
531
532 if (acrtc->dm_irq_params.stream &&
533 acrtc->dm_irq_params.vrr_params.supported &&
534 acrtc->dm_irq_params.freesync_config.state ==
535 VRR_STATE_ACTIVE_VARIABLE) {
536 mod_freesync_handle_v_update(adev->dm.freesync_module,
537 acrtc->dm_irq_params.stream,
538 &acrtc->dm_irq_params.vrr_params);
539
540 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
541 &acrtc->dm_irq_params.vrr_params.adjust);
542 }
543
544 /*
545 * If there aren't any active_planes then DCH HUBP may be clock-gated.
546 * In that case, pageflip completion interrupts won't fire and pageflip
547 * completion events won't get delivered. Prevent this by sending
548 * pending pageflip events from here if a flip is still pending.
549 *
550 * If any planes are enabled, use dm_pflip_high_irq() instead, to
551 * avoid race conditions between flip programming and completion,
552 * which could cause too early flip completion events.
553 */
554 if (adev->family >= AMDGPU_FAMILY_RV &&
555 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
556 acrtc->dm_irq_params.active_planes == 0) {
557 if (acrtc->event) {
558 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
559 acrtc->event = NULL;
560 drm_crtc_vblank_put(&acrtc->base);
561 }
562 acrtc->pflip_status = AMDGPU_FLIP_NONE;
563 }
564
565 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
566 }
567
dm_set_clockgating_state(void * handle,enum amd_clockgating_state state)568 static int dm_set_clockgating_state(void *handle,
569 enum amd_clockgating_state state)
570 {
571 return 0;
572 }
573
dm_set_powergating_state(void * handle,enum amd_powergating_state state)574 static int dm_set_powergating_state(void *handle,
575 enum amd_powergating_state state)
576 {
577 return 0;
578 }
579
580 /* Prototypes of private functions */
581 static int dm_early_init(void* handle);
582
583 /* Allocate memory for FBC compressed data */
amdgpu_dm_fbc_init(struct drm_connector * connector)584 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
585 {
586 struct drm_device *dev = connector->dev;
587 struct amdgpu_device *adev = drm_to_adev(dev);
588 struct dm_compressor_info *compressor = &adev->dm.compressor;
589 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
590 struct drm_display_mode *mode;
591 unsigned long max_size = 0;
592
593 if (adev->dm.dc->fbc_compressor == NULL)
594 return;
595
596 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
597 return;
598
599 if (compressor->bo_ptr)
600 return;
601
602
603 list_for_each_entry(mode, &connector->modes, head) {
604 if (max_size < mode->htotal * mode->vtotal)
605 max_size = mode->htotal * mode->vtotal;
606 }
607
608 if (max_size) {
609 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
610 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
611 &compressor->gpu_addr, &compressor->cpu_addr);
612
613 if (r)
614 DRM_ERROR("DM: Failed to initialize FBC\n");
615 else {
616 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
617 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
618 }
619
620 }
621
622 }
623
amdgpu_dm_audio_component_get_eld(struct device * kdev,int port,int pipe,bool * enabled,unsigned char * buf,int max_bytes)624 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
625 int pipe, bool *enabled,
626 unsigned char *buf, int max_bytes)
627 {
628 struct drm_device *dev = dev_get_drvdata(kdev);
629 struct amdgpu_device *adev = drm_to_adev(dev);
630 struct drm_connector *connector;
631 struct drm_connector_list_iter conn_iter;
632 struct amdgpu_dm_connector *aconnector;
633 int ret = 0;
634
635 *enabled = false;
636
637 mutex_lock(&adev->dm.audio_lock);
638
639 drm_connector_list_iter_begin(dev, &conn_iter);
640 drm_for_each_connector_iter(connector, &conn_iter) {
641 aconnector = to_amdgpu_dm_connector(connector);
642 if (aconnector->audio_inst != port)
643 continue;
644
645 *enabled = true;
646 ret = drm_eld_size(connector->eld);
647 memcpy(buf, connector->eld, min(max_bytes, ret));
648
649 break;
650 }
651 drm_connector_list_iter_end(&conn_iter);
652
653 mutex_unlock(&adev->dm.audio_lock);
654
655 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
656
657 return ret;
658 }
659
660 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
661 .get_eld = amdgpu_dm_audio_component_get_eld,
662 };
663
amdgpu_dm_audio_component_bind(struct device * kdev,struct device * hda_kdev,void * data)664 static int amdgpu_dm_audio_component_bind(struct device *kdev,
665 struct device *hda_kdev, void *data)
666 {
667 struct drm_device *dev = dev_get_drvdata(kdev);
668 struct amdgpu_device *adev = drm_to_adev(dev);
669 struct drm_audio_component *acomp = data;
670
671 acomp->ops = &amdgpu_dm_audio_component_ops;
672 acomp->dev = kdev;
673 adev->dm.audio_component = acomp;
674
675 return 0;
676 }
677
amdgpu_dm_audio_component_unbind(struct device * kdev,struct device * hda_kdev,void * data)678 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
679 struct device *hda_kdev, void *data)
680 {
681 struct drm_device *dev = dev_get_drvdata(kdev);
682 struct amdgpu_device *adev = drm_to_adev(dev);
683 struct drm_audio_component *acomp = data;
684
685 acomp->ops = NULL;
686 acomp->dev = NULL;
687 adev->dm.audio_component = NULL;
688 }
689
690 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
691 .bind = amdgpu_dm_audio_component_bind,
692 .unbind = amdgpu_dm_audio_component_unbind,
693 };
694
amdgpu_dm_audio_init(struct amdgpu_device * adev)695 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
696 {
697 int i, ret;
698
699 if (!amdgpu_audio)
700 return 0;
701
702 adev->mode_info.audio.enabled = true;
703
704 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
705
706 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
707 adev->mode_info.audio.pin[i].channels = -1;
708 adev->mode_info.audio.pin[i].rate = -1;
709 adev->mode_info.audio.pin[i].bits_per_sample = -1;
710 adev->mode_info.audio.pin[i].status_bits = 0;
711 adev->mode_info.audio.pin[i].category_code = 0;
712 adev->mode_info.audio.pin[i].connected = false;
713 adev->mode_info.audio.pin[i].id =
714 adev->dm.dc->res_pool->audios[i]->inst;
715 adev->mode_info.audio.pin[i].offset = 0;
716 }
717
718 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
719 if (ret < 0)
720 return ret;
721
722 adev->dm.audio_registered = true;
723
724 return 0;
725 }
726
amdgpu_dm_audio_fini(struct amdgpu_device * adev)727 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
728 {
729 if (!amdgpu_audio)
730 return;
731
732 if (!adev->mode_info.audio.enabled)
733 return;
734
735 if (adev->dm.audio_registered) {
736 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
737 adev->dm.audio_registered = false;
738 }
739
740 /* TODO: Disable audio? */
741
742 adev->mode_info.audio.enabled = false;
743 }
744
amdgpu_dm_audio_eld_notify(struct amdgpu_device * adev,int pin)745 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
746 {
747 struct drm_audio_component *acomp = adev->dm.audio_component;
748
749 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
750 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
751
752 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
753 pin, -1);
754 }
755 }
756
dm_dmub_hw_init(struct amdgpu_device * adev)757 static int dm_dmub_hw_init(struct amdgpu_device *adev)
758 {
759 const struct dmcub_firmware_header_v1_0 *hdr;
760 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
761 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
762 const struct firmware *dmub_fw = adev->dm.dmub_fw;
763 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
764 struct abm *abm = adev->dm.dc->res_pool->abm;
765 struct dmub_srv_hw_params hw_params;
766 enum dmub_status status;
767 const unsigned char *fw_inst_const, *fw_bss_data;
768 uint32_t i, fw_inst_const_size, fw_bss_data_size;
769 bool has_hw_support;
770
771 if (!dmub_srv)
772 /* DMUB isn't supported on the ASIC. */
773 return 0;
774
775 if (!fb_info) {
776 DRM_ERROR("No framebuffer info for DMUB service.\n");
777 return -EINVAL;
778 }
779
780 if (!dmub_fw) {
781 /* Firmware required for DMUB support. */
782 DRM_ERROR("No firmware provided for DMUB.\n");
783 return -EINVAL;
784 }
785
786 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
787 if (status != DMUB_STATUS_OK) {
788 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
789 return -EINVAL;
790 }
791
792 if (!has_hw_support) {
793 DRM_INFO("DMUB unsupported on ASIC\n");
794 return 0;
795 }
796
797 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
798
799 fw_inst_const = dmub_fw->data +
800 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
801 PSP_HEADER_BYTES;
802
803 fw_bss_data = dmub_fw->data +
804 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
805 le32_to_cpu(hdr->inst_const_bytes);
806
807 /* Copy firmware and bios info into FB memory. */
808 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
809 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
810
811 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
812
813 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
814 * amdgpu_ucode_init_single_fw will load dmub firmware
815 * fw_inst_const part to cw0; otherwise, the firmware back door load
816 * will be done by dm_dmub_hw_init
817 */
818 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
819 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
820 fw_inst_const_size);
821 }
822
823 if (fw_bss_data_size)
824 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
825 fw_bss_data, fw_bss_data_size);
826
827 /* Copy firmware bios info into FB memory. */
828 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
829 adev->bios_size);
830
831 /* Reset regions that need to be reset. */
832 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
833 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
834
835 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
836 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
837
838 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
839 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
840
841 /* Initialize hardware. */
842 memset(&hw_params, 0, sizeof(hw_params));
843 hw_params.fb_base = adev->gmc.fb_start;
844 hw_params.fb_offset = adev->gmc.aper_base;
845
846 /* backdoor load firmware and trigger dmub running */
847 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
848 hw_params.load_inst_const = true;
849
850 if (dmcu)
851 hw_params.psp_version = dmcu->psp_version;
852
853 for (i = 0; i < fb_info->num_fb; ++i)
854 hw_params.fb[i] = &fb_info->fb[i];
855
856 status = dmub_srv_hw_init(dmub_srv, &hw_params);
857 if (status != DMUB_STATUS_OK) {
858 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
859 return -EINVAL;
860 }
861
862 /* Wait for firmware load to finish. */
863 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
864 if (status != DMUB_STATUS_OK)
865 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
866
867 /* Init DMCU and ABM if available. */
868 if (dmcu && abm) {
869 dmcu->funcs->dmcu_init(dmcu);
870 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
871 }
872
873 if (!adev->dm.dc->ctx->dmub_srv)
874 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
875 if (!adev->dm.dc->ctx->dmub_srv) {
876 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
877 return -ENOMEM;
878 }
879
880 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
881 adev->dm.dmcub_fw_version);
882
883 return 0;
884 }
885
amdgpu_check_debugfs_connector_property_change(struct amdgpu_device * adev,struct drm_atomic_state * state)886 static void amdgpu_check_debugfs_connector_property_change(struct amdgpu_device *adev,
887 struct drm_atomic_state *state)
888 {
889 struct drm_connector *connector;
890 struct drm_crtc *crtc;
891 struct amdgpu_dm_connector *amdgpu_dm_connector;
892 struct drm_connector_state *conn_state;
893 struct dm_crtc_state *acrtc_state;
894 struct drm_crtc_state *crtc_state;
895 struct dc_stream_state *stream;
896 struct drm_device *dev = adev_to_drm(adev);
897
898 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
899
900 amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
901 conn_state = connector->state;
902
903 if (!(conn_state && conn_state->crtc))
904 continue;
905
906 crtc = conn_state->crtc;
907 acrtc_state = to_dm_crtc_state(crtc->state);
908
909 if (!(acrtc_state && acrtc_state->stream))
910 continue;
911
912 stream = acrtc_state->stream;
913
914 if (amdgpu_dm_connector->dsc_settings.dsc_force_enable ||
915 amdgpu_dm_connector->dsc_settings.dsc_num_slices_v ||
916 amdgpu_dm_connector->dsc_settings.dsc_num_slices_h ||
917 amdgpu_dm_connector->dsc_settings.dsc_bits_per_pixel) {
918 conn_state = drm_atomic_get_connector_state(state, connector);
919 crtc_state = drm_atomic_get_crtc_state(state, crtc);
920 crtc_state->mode_changed = true;
921 }
922 }
923 }
924
925 struct amdgpu_stutter_quirk {
926 u16 chip_vendor;
927 u16 chip_device;
928 u16 subsys_vendor;
929 u16 subsys_device;
930 u8 revision;
931 };
932
933 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
934 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
935 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
936 { 0, 0, 0, 0, 0 },
937 };
938
dm_should_disable_stutter(struct pci_dev * pdev)939 static bool dm_should_disable_stutter(struct pci_dev *pdev)
940 {
941 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
942
943 while (p && p->chip_device != 0) {
944 if (pdev->vendor == p->chip_vendor &&
945 pdev->device == p->chip_device &&
946 pdev->subsystem_vendor == p->subsys_vendor &&
947 pdev->subsystem_device == p->subsys_device &&
948 pdev->revision == p->revision) {
949 return true;
950 }
951 ++p;
952 }
953 return false;
954 }
955
amdgpu_dm_init(struct amdgpu_device * adev)956 static int amdgpu_dm_init(struct amdgpu_device *adev)
957 {
958 struct dc_init_data init_data;
959 #ifdef CONFIG_DRM_AMD_DC_HDCP
960 struct dc_callback_init init_params;
961 #endif
962 int r;
963
964 adev->dm.ddev = adev_to_drm(adev);
965 adev->dm.adev = adev;
966
967 /* Zero all the fields */
968 memset(&init_data, 0, sizeof(init_data));
969 #ifdef CONFIG_DRM_AMD_DC_HDCP
970 memset(&init_params, 0, sizeof(init_params));
971 #endif
972
973 mutex_init(&adev->dm.dc_lock);
974 mutex_init(&adev->dm.audio_lock);
975
976 if(amdgpu_dm_irq_init(adev)) {
977 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
978 goto error;
979 }
980
981 init_data.asic_id.chip_family = adev->family;
982
983 init_data.asic_id.pci_revision_id = adev->pdev->revision;
984 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
985 init_data.asic_id.chip_id = adev->pdev->device;
986
987 init_data.asic_id.vram_width = adev->gmc.vram_width;
988 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
989 init_data.asic_id.atombios_base_address =
990 adev->mode_info.atom_context->bios;
991
992 init_data.driver = adev;
993
994 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
995
996 if (!adev->dm.cgs_device) {
997 DRM_ERROR("amdgpu: failed to create cgs device.\n");
998 goto error;
999 }
1000
1001 init_data.cgs_device = adev->dm.cgs_device;
1002
1003 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1004
1005 switch (adev->asic_type) {
1006 case CHIP_CARRIZO:
1007 case CHIP_STONEY:
1008 case CHIP_RAVEN:
1009 case CHIP_RENOIR:
1010 init_data.flags.gpu_vm_support = true;
1011 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1012 init_data.flags.disable_dmcu = true;
1013 break;
1014 default:
1015 break;
1016 }
1017
1018 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1019 init_data.flags.fbc_support = true;
1020
1021 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1022 init_data.flags.multi_mon_pp_mclk_switch = true;
1023
1024 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1025 init_data.flags.disable_fractional_pwm = true;
1026
1027 init_data.flags.power_down_display_on_boot = true;
1028
1029 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
1030
1031 /* Display Core create. */
1032 adev->dm.dc = dc_create(&init_data);
1033
1034 if (adev->dm.dc) {
1035 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1036 } else {
1037 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1038 goto error;
1039 }
1040
1041 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1042 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1043 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1044 }
1045
1046 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1047 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1048 if (dm_should_disable_stutter(adev->pdev))
1049 adev->dm.dc->debug.disable_stutter = true;
1050
1051 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1052 adev->dm.dc->debug.disable_stutter = true;
1053
1054 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1055 adev->dm.dc->debug.disable_dsc = true;
1056
1057 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1058 adev->dm.dc->debug.disable_clock_gate = true;
1059
1060 r = dm_dmub_hw_init(adev);
1061 if (r) {
1062 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1063 goto error;
1064 }
1065
1066 dc_hardware_init(adev->dm.dc);
1067
1068 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1069 if (!adev->dm.freesync_module) {
1070 DRM_ERROR(
1071 "amdgpu: failed to initialize freesync_module.\n");
1072 } else
1073 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1074 adev->dm.freesync_module);
1075
1076 amdgpu_dm_init_color_mod();
1077
1078 #ifdef CONFIG_DRM_AMD_DC_HDCP
1079 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1080 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1081
1082 if (!adev->dm.hdcp_workqueue)
1083 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1084 else
1085 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1086
1087 dc_init_callbacks(adev->dm.dc, &init_params);
1088 }
1089 #endif
1090 if (amdgpu_dm_initialize_drm_device(adev)) {
1091 DRM_ERROR(
1092 "amdgpu: failed to initialize sw for display support.\n");
1093 goto error;
1094 }
1095
1096 /* create fake encoders for MST */
1097 dm_dp_create_fake_mst_encoders(adev);
1098
1099 /* TODO: Add_display_info? */
1100
1101 /* TODO use dynamic cursor width */
1102 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1103 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1104
1105 /* Disable vblank IRQs aggressively for power-saving */
1106 adev_to_drm(adev)->vblank_disable_immediate = true;
1107
1108 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1109 DRM_ERROR(
1110 "amdgpu: failed to initialize sw for display support.\n");
1111 goto error;
1112 }
1113
1114 DRM_DEBUG_DRIVER("KMS initialized.\n");
1115
1116 return 0;
1117 error:
1118 amdgpu_dm_fini(adev);
1119
1120 return -EINVAL;
1121 }
1122
amdgpu_dm_fini(struct amdgpu_device * adev)1123 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1124 {
1125 int i;
1126
1127 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1128 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1129 }
1130
1131 amdgpu_dm_audio_fini(adev);
1132
1133 amdgpu_dm_destroy_drm_device(&adev->dm);
1134
1135 #ifdef CONFIG_DRM_AMD_DC_HDCP
1136 if (adev->dm.hdcp_workqueue) {
1137 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1138 adev->dm.hdcp_workqueue = NULL;
1139 }
1140
1141 if (adev->dm.dc)
1142 dc_deinit_callbacks(adev->dm.dc);
1143 #endif
1144 if (adev->dm.dc->ctx->dmub_srv) {
1145 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1146 adev->dm.dc->ctx->dmub_srv = NULL;
1147 }
1148
1149 if (adev->dm.dmub_bo)
1150 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1151 &adev->dm.dmub_bo_gpu_addr,
1152 &adev->dm.dmub_bo_cpu_addr);
1153
1154 /* DC Destroy TODO: Replace destroy DAL */
1155 if (adev->dm.dc)
1156 dc_destroy(&adev->dm.dc);
1157 /*
1158 * TODO: pageflip, vlank interrupt
1159 *
1160 * amdgpu_dm_irq_fini(adev);
1161 */
1162
1163 if (adev->dm.cgs_device) {
1164 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1165 adev->dm.cgs_device = NULL;
1166 }
1167 if (adev->dm.freesync_module) {
1168 mod_freesync_destroy(adev->dm.freesync_module);
1169 adev->dm.freesync_module = NULL;
1170 }
1171
1172 mutex_destroy(&adev->dm.audio_lock);
1173 mutex_destroy(&adev->dm.dc_lock);
1174
1175 return;
1176 }
1177
load_dmcu_fw(struct amdgpu_device * adev)1178 static int load_dmcu_fw(struct amdgpu_device *adev)
1179 {
1180 const char *fw_name_dmcu = NULL;
1181 int r;
1182 const struct dmcu_firmware_header_v1_0 *hdr;
1183
1184 switch(adev->asic_type) {
1185 #if defined(CONFIG_DRM_AMD_DC_SI)
1186 case CHIP_TAHITI:
1187 case CHIP_PITCAIRN:
1188 case CHIP_VERDE:
1189 case CHIP_OLAND:
1190 #endif
1191 case CHIP_BONAIRE:
1192 case CHIP_HAWAII:
1193 case CHIP_KAVERI:
1194 case CHIP_KABINI:
1195 case CHIP_MULLINS:
1196 case CHIP_TONGA:
1197 case CHIP_FIJI:
1198 case CHIP_CARRIZO:
1199 case CHIP_STONEY:
1200 case CHIP_POLARIS11:
1201 case CHIP_POLARIS10:
1202 case CHIP_POLARIS12:
1203 case CHIP_VEGAM:
1204 case CHIP_VEGA10:
1205 case CHIP_VEGA12:
1206 case CHIP_VEGA20:
1207 case CHIP_NAVI10:
1208 case CHIP_NAVI14:
1209 case CHIP_RENOIR:
1210 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1211 case CHIP_SIENNA_CICHLID:
1212 case CHIP_NAVY_FLOUNDER:
1213 #endif
1214 return 0;
1215 case CHIP_NAVI12:
1216 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1217 break;
1218 case CHIP_RAVEN:
1219 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1220 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1221 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1222 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1223 else
1224 return 0;
1225 break;
1226 default:
1227 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1228 return -EINVAL;
1229 }
1230
1231 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1232 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1233 return 0;
1234 }
1235
1236 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1237 if (r == -ENOENT) {
1238 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1239 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1240 adev->dm.fw_dmcu = NULL;
1241 return 0;
1242 }
1243 if (r) {
1244 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1245 fw_name_dmcu);
1246 return r;
1247 }
1248
1249 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1250 if (r) {
1251 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1252 fw_name_dmcu);
1253 release_firmware(adev->dm.fw_dmcu);
1254 adev->dm.fw_dmcu = NULL;
1255 return r;
1256 }
1257
1258 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1259 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1260 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1261 adev->firmware.fw_size +=
1262 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1263
1264 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1265 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1266 adev->firmware.fw_size +=
1267 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1268
1269 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1270
1271 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1272
1273 return 0;
1274 }
1275
amdgpu_dm_dmub_reg_read(void * ctx,uint32_t address)1276 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1277 {
1278 struct amdgpu_device *adev = ctx;
1279
1280 return dm_read_reg(adev->dm.dc->ctx, address);
1281 }
1282
amdgpu_dm_dmub_reg_write(void * ctx,uint32_t address,uint32_t value)1283 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1284 uint32_t value)
1285 {
1286 struct amdgpu_device *adev = ctx;
1287
1288 return dm_write_reg(adev->dm.dc->ctx, address, value);
1289 }
1290
dm_dmub_sw_init(struct amdgpu_device * adev)1291 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1292 {
1293 struct dmub_srv_create_params create_params;
1294 struct dmub_srv_region_params region_params;
1295 struct dmub_srv_region_info region_info;
1296 struct dmub_srv_memory_params memory_params;
1297 struct dmub_srv_fb_info *fb_info;
1298 struct dmub_srv *dmub_srv;
1299 const struct dmcub_firmware_header_v1_0 *hdr;
1300 const char *fw_name_dmub;
1301 enum dmub_asic dmub_asic;
1302 enum dmub_status status;
1303 int r;
1304
1305 switch (adev->asic_type) {
1306 case CHIP_RENOIR:
1307 dmub_asic = DMUB_ASIC_DCN21;
1308 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1309 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1310 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1311 break;
1312 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1313 case CHIP_SIENNA_CICHLID:
1314 dmub_asic = DMUB_ASIC_DCN30;
1315 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1316 break;
1317 case CHIP_NAVY_FLOUNDER:
1318 dmub_asic = DMUB_ASIC_DCN30;
1319 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1320 break;
1321 #endif
1322
1323 default:
1324 /* ASIC doesn't support DMUB. */
1325 return 0;
1326 }
1327
1328 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1329 if (r) {
1330 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1331 return 0;
1332 }
1333
1334 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1335 if (r) {
1336 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1337 return 0;
1338 }
1339
1340 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1341 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1342
1343 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1344 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1345 AMDGPU_UCODE_ID_DMCUB;
1346 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1347 adev->dm.dmub_fw;
1348 adev->firmware.fw_size +=
1349 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1350
1351 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1352 adev->dm.dmcub_fw_version);
1353 }
1354
1355
1356 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1357 dmub_srv = adev->dm.dmub_srv;
1358
1359 if (!dmub_srv) {
1360 DRM_ERROR("Failed to allocate DMUB service!\n");
1361 return -ENOMEM;
1362 }
1363
1364 memset(&create_params, 0, sizeof(create_params));
1365 create_params.user_ctx = adev;
1366 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1367 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1368 create_params.asic = dmub_asic;
1369
1370 /* Create the DMUB service. */
1371 status = dmub_srv_create(dmub_srv, &create_params);
1372 if (status != DMUB_STATUS_OK) {
1373 DRM_ERROR("Error creating DMUB service: %d\n", status);
1374 return -EINVAL;
1375 }
1376
1377 /* Calculate the size of all the regions for the DMUB service. */
1378 memset(®ion_params, 0, sizeof(region_params));
1379
1380 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1381 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1382 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1383 region_params.vbios_size = adev->bios_size;
1384 region_params.fw_bss_data = region_params.bss_data_size ?
1385 adev->dm.dmub_fw->data +
1386 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1387 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1388 region_params.fw_inst_const =
1389 adev->dm.dmub_fw->data +
1390 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1391 PSP_HEADER_BYTES;
1392 region_params.is_mailbox_in_inbox = false;
1393
1394 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1395 ®ion_info);
1396
1397 if (status != DMUB_STATUS_OK) {
1398 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1399 return -EINVAL;
1400 }
1401
1402 /*
1403 * Allocate a framebuffer based on the total size of all the regions.
1404 * TODO: Move this into GART.
1405 */
1406 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1407 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1408 &adev->dm.dmub_bo_gpu_addr,
1409 &adev->dm.dmub_bo_cpu_addr);
1410 if (r)
1411 return r;
1412
1413 /* Rebase the regions on the framebuffer address. */
1414 memset(&memory_params, 0, sizeof(memory_params));
1415 memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr;
1416 memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr;
1417 memory_params.region_info = ®ion_info;
1418
1419 adev->dm.dmub_fb_info =
1420 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1421 fb_info = adev->dm.dmub_fb_info;
1422
1423 if (!fb_info) {
1424 DRM_ERROR(
1425 "Failed to allocate framebuffer info for DMUB service!\n");
1426 return -ENOMEM;
1427 }
1428
1429 status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info);
1430 if (status != DMUB_STATUS_OK) {
1431 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1432 return -EINVAL;
1433 }
1434
1435 return 0;
1436 }
1437
dm_sw_init(void * handle)1438 static int dm_sw_init(void *handle)
1439 {
1440 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1441 int r;
1442
1443 r = dm_dmub_sw_init(adev);
1444 if (r)
1445 return r;
1446
1447 return load_dmcu_fw(adev);
1448 }
1449
dm_sw_fini(void * handle)1450 static int dm_sw_fini(void *handle)
1451 {
1452 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1453
1454 kfree(adev->dm.dmub_fb_info);
1455 adev->dm.dmub_fb_info = NULL;
1456
1457 if (adev->dm.dmub_srv) {
1458 dmub_srv_destroy(adev->dm.dmub_srv);
1459 kfree(adev->dm.dmub_srv);
1460 adev->dm.dmub_srv = NULL;
1461 }
1462
1463 release_firmware(adev->dm.dmub_fw);
1464 adev->dm.dmub_fw = NULL;
1465
1466 release_firmware(adev->dm.fw_dmcu);
1467 adev->dm.fw_dmcu = NULL;
1468
1469 return 0;
1470 }
1471
detect_mst_link_for_all_connectors(struct drm_device * dev)1472 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1473 {
1474 struct amdgpu_dm_connector *aconnector;
1475 struct drm_connector *connector;
1476 struct drm_connector_list_iter iter;
1477 int ret = 0;
1478
1479 drm_connector_list_iter_begin(dev, &iter);
1480 drm_for_each_connector_iter(connector, &iter) {
1481 aconnector = to_amdgpu_dm_connector(connector);
1482 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1483 aconnector->mst_mgr.aux) {
1484 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1485 aconnector,
1486 aconnector->base.base.id);
1487
1488 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1489 if (ret < 0) {
1490 DRM_ERROR("DM_MST: Failed to start MST\n");
1491 aconnector->dc_link->type =
1492 dc_connection_single;
1493 break;
1494 }
1495 }
1496 }
1497 drm_connector_list_iter_end(&iter);
1498
1499 return ret;
1500 }
1501
dm_late_init(void * handle)1502 static int dm_late_init(void *handle)
1503 {
1504 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1505
1506 struct dmcu_iram_parameters params;
1507 unsigned int linear_lut[16];
1508 int i;
1509 struct dmcu *dmcu = NULL;
1510 bool ret = true;
1511
1512 dmcu = adev->dm.dc->res_pool->dmcu;
1513
1514 for (i = 0; i < 16; i++)
1515 linear_lut[i] = 0xFFFF * i / 15;
1516
1517 params.set = 0;
1518 params.backlight_ramping_start = 0xCCCC;
1519 params.backlight_ramping_reduction = 0xCCCCCCCC;
1520 params.backlight_lut_array_size = 16;
1521 params.backlight_lut_array = linear_lut;
1522
1523 /* Min backlight level after ABM reduction, Don't allow below 1%
1524 * 0xFFFF x 0.01 = 0x28F
1525 */
1526 params.min_abm_backlight = 0x28F;
1527
1528 /* In the case where abm is implemented on dmcub,
1529 * dmcu object will be null.
1530 * ABM 2.4 and up are implemented on dmcub.
1531 */
1532 if (dmcu)
1533 ret = dmcu_load_iram(dmcu, params);
1534 else if (adev->dm.dc->ctx->dmub_srv)
1535 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1536
1537 if (!ret)
1538 return -EINVAL;
1539
1540 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1541 }
1542
s3_handle_mst(struct drm_device * dev,bool suspend)1543 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1544 {
1545 struct amdgpu_dm_connector *aconnector;
1546 struct drm_connector *connector;
1547 struct drm_connector_list_iter iter;
1548 struct drm_dp_mst_topology_mgr *mgr;
1549 int ret;
1550 bool need_hotplug = false;
1551
1552 drm_connector_list_iter_begin(dev, &iter);
1553 drm_for_each_connector_iter(connector, &iter) {
1554 aconnector = to_amdgpu_dm_connector(connector);
1555 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1556 aconnector->mst_port)
1557 continue;
1558
1559 mgr = &aconnector->mst_mgr;
1560
1561 if (suspend) {
1562 drm_dp_mst_topology_mgr_suspend(mgr);
1563 } else {
1564 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1565 if (ret < 0) {
1566 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1567 need_hotplug = true;
1568 }
1569 }
1570 }
1571 drm_connector_list_iter_end(&iter);
1572
1573 if (need_hotplug)
1574 drm_kms_helper_hotplug_event(dev);
1575 }
1576
amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device * adev)1577 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1578 {
1579 struct smu_context *smu = &adev->smu;
1580 int ret = 0;
1581
1582 if (!is_support_sw_smu(adev))
1583 return 0;
1584
1585 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1586 * on window driver dc implementation.
1587 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1588 * should be passed to smu during boot up and resume from s3.
1589 * boot up: dc calculate dcn watermark clock settings within dc_create,
1590 * dcn20_resource_construct
1591 * then call pplib functions below to pass the settings to smu:
1592 * smu_set_watermarks_for_clock_ranges
1593 * smu_set_watermarks_table
1594 * navi10_set_watermarks_table
1595 * smu_write_watermarks_table
1596 *
1597 * For Renoir, clock settings of dcn watermark are also fixed values.
1598 * dc has implemented different flow for window driver:
1599 * dc_hardware_init / dc_set_power_state
1600 * dcn10_init_hw
1601 * notify_wm_ranges
1602 * set_wm_ranges
1603 * -- Linux
1604 * smu_set_watermarks_for_clock_ranges
1605 * renoir_set_watermarks_table
1606 * smu_write_watermarks_table
1607 *
1608 * For Linux,
1609 * dc_hardware_init -> amdgpu_dm_init
1610 * dc_set_power_state --> dm_resume
1611 *
1612 * therefore, this function apply to navi10/12/14 but not Renoir
1613 * *
1614 */
1615 switch(adev->asic_type) {
1616 case CHIP_NAVI10:
1617 case CHIP_NAVI14:
1618 case CHIP_NAVI12:
1619 break;
1620 default:
1621 return 0;
1622 }
1623
1624 ret = smu_write_watermarks_table(smu);
1625 if (ret) {
1626 DRM_ERROR("Failed to update WMTABLE!\n");
1627 return ret;
1628 }
1629
1630 return 0;
1631 }
1632
1633 /**
1634 * dm_hw_init() - Initialize DC device
1635 * @handle: The base driver device containing the amdgpu_dm device.
1636 *
1637 * Initialize the &struct amdgpu_display_manager device. This involves calling
1638 * the initializers of each DM component, then populating the struct with them.
1639 *
1640 * Although the function implies hardware initialization, both hardware and
1641 * software are initialized here. Splitting them out to their relevant init
1642 * hooks is a future TODO item.
1643 *
1644 * Some notable things that are initialized here:
1645 *
1646 * - Display Core, both software and hardware
1647 * - DC modules that we need (freesync and color management)
1648 * - DRM software states
1649 * - Interrupt sources and handlers
1650 * - Vblank support
1651 * - Debug FS entries, if enabled
1652 */
dm_hw_init(void * handle)1653 static int dm_hw_init(void *handle)
1654 {
1655 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1656 /* Create DAL display manager */
1657 amdgpu_dm_init(adev);
1658 amdgpu_dm_hpd_init(adev);
1659
1660 return 0;
1661 }
1662
1663 /**
1664 * dm_hw_fini() - Teardown DC device
1665 * @handle: The base driver device containing the amdgpu_dm device.
1666 *
1667 * Teardown components within &struct amdgpu_display_manager that require
1668 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1669 * were loaded. Also flush IRQ workqueues and disable them.
1670 */
dm_hw_fini(void * handle)1671 static int dm_hw_fini(void *handle)
1672 {
1673 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1674
1675 amdgpu_dm_hpd_fini(adev);
1676
1677 amdgpu_dm_irq_fini(adev);
1678 amdgpu_dm_fini(adev);
1679 return 0;
1680 }
1681
1682
1683 static int dm_enable_vblank(struct drm_crtc *crtc);
1684 static void dm_disable_vblank(struct drm_crtc *crtc);
1685
dm_gpureset_toggle_interrupts(struct amdgpu_device * adev,struct dc_state * state,bool enable)1686 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1687 struct dc_state *state, bool enable)
1688 {
1689 enum dc_irq_source irq_source;
1690 struct amdgpu_crtc *acrtc;
1691 int rc = -EBUSY;
1692 int i = 0;
1693
1694 for (i = 0; i < state->stream_count; i++) {
1695 acrtc = get_crtc_by_otg_inst(
1696 adev, state->stream_status[i].primary_otg_inst);
1697
1698 if (acrtc && state->stream_status[i].plane_count != 0) {
1699 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1700 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1701 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1702 acrtc->crtc_id, enable ? "en" : "dis", rc);
1703 if (rc)
1704 DRM_WARN("Failed to %s pflip interrupts\n",
1705 enable ? "enable" : "disable");
1706
1707 if (enable) {
1708 rc = dm_enable_vblank(&acrtc->base);
1709 if (rc)
1710 DRM_WARN("Failed to enable vblank interrupts\n");
1711 } else {
1712 dm_disable_vblank(&acrtc->base);
1713 }
1714
1715 }
1716 }
1717
1718 }
1719
amdgpu_dm_commit_zero_streams(struct dc * dc)1720 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1721 {
1722 struct dc_state *context = NULL;
1723 enum dc_status res = DC_ERROR_UNEXPECTED;
1724 int i;
1725 struct dc_stream_state *del_streams[MAX_PIPES];
1726 int del_streams_count = 0;
1727
1728 memset(del_streams, 0, sizeof(del_streams));
1729
1730 context = dc_create_state(dc);
1731 if (context == NULL)
1732 goto context_alloc_fail;
1733
1734 dc_resource_state_copy_construct_current(dc, context);
1735
1736 /* First remove from context all streams */
1737 for (i = 0; i < context->stream_count; i++) {
1738 struct dc_stream_state *stream = context->streams[i];
1739
1740 del_streams[del_streams_count++] = stream;
1741 }
1742
1743 /* Remove all planes for removed streams and then remove the streams */
1744 for (i = 0; i < del_streams_count; i++) {
1745 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1746 res = DC_FAIL_DETACH_SURFACES;
1747 goto fail;
1748 }
1749
1750 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1751 if (res != DC_OK)
1752 goto fail;
1753 }
1754
1755
1756 res = dc_validate_global_state(dc, context, false);
1757
1758 if (res != DC_OK) {
1759 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1760 goto fail;
1761 }
1762
1763 res = dc_commit_state(dc, context);
1764
1765 fail:
1766 dc_release_state(context);
1767
1768 context_alloc_fail:
1769 return res;
1770 }
1771
dm_suspend(void * handle)1772 static int dm_suspend(void *handle)
1773 {
1774 struct amdgpu_device *adev = handle;
1775 struct amdgpu_display_manager *dm = &adev->dm;
1776 int ret = 0;
1777
1778 if (amdgpu_in_reset(adev)) {
1779 mutex_lock(&dm->dc_lock);
1780 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1781
1782 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1783
1784 amdgpu_dm_commit_zero_streams(dm->dc);
1785
1786 amdgpu_dm_irq_suspend(adev);
1787
1788 return ret;
1789 }
1790
1791 WARN_ON(adev->dm.cached_state);
1792 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1793
1794 s3_handle_mst(adev_to_drm(adev), true);
1795
1796 amdgpu_dm_irq_suspend(adev);
1797
1798 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1799
1800 return 0;
1801 }
1802
1803 static struct amdgpu_dm_connector *
amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state * state,struct drm_crtc * crtc)1804 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1805 struct drm_crtc *crtc)
1806 {
1807 uint32_t i;
1808 struct drm_connector_state *new_con_state;
1809 struct drm_connector *connector;
1810 struct drm_crtc *crtc_from_state;
1811
1812 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1813 crtc_from_state = new_con_state->crtc;
1814
1815 if (crtc_from_state == crtc)
1816 return to_amdgpu_dm_connector(connector);
1817 }
1818
1819 return NULL;
1820 }
1821
emulated_link_detect(struct dc_link * link)1822 static void emulated_link_detect(struct dc_link *link)
1823 {
1824 struct dc_sink_init_data sink_init_data = { 0 };
1825 struct display_sink_capability sink_caps = { 0 };
1826 enum dc_edid_status edid_status;
1827 struct dc_context *dc_ctx = link->ctx;
1828 struct dc_sink *sink = NULL;
1829 struct dc_sink *prev_sink = NULL;
1830
1831 link->type = dc_connection_none;
1832 prev_sink = link->local_sink;
1833
1834 if (prev_sink)
1835 dc_sink_release(prev_sink);
1836
1837 switch (link->connector_signal) {
1838 case SIGNAL_TYPE_HDMI_TYPE_A: {
1839 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1840 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1841 break;
1842 }
1843
1844 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1845 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1846 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1847 break;
1848 }
1849
1850 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1851 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1852 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1853 break;
1854 }
1855
1856 case SIGNAL_TYPE_LVDS: {
1857 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1858 sink_caps.signal = SIGNAL_TYPE_LVDS;
1859 break;
1860 }
1861
1862 case SIGNAL_TYPE_EDP: {
1863 sink_caps.transaction_type =
1864 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1865 sink_caps.signal = SIGNAL_TYPE_EDP;
1866 break;
1867 }
1868
1869 case SIGNAL_TYPE_DISPLAY_PORT: {
1870 sink_caps.transaction_type =
1871 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1872 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1873 break;
1874 }
1875
1876 default:
1877 DC_ERROR("Invalid connector type! signal:%d\n",
1878 link->connector_signal);
1879 return;
1880 }
1881
1882 sink_init_data.link = link;
1883 sink_init_data.sink_signal = sink_caps.signal;
1884
1885 sink = dc_sink_create(&sink_init_data);
1886 if (!sink) {
1887 DC_ERROR("Failed to create sink!\n");
1888 return;
1889 }
1890
1891 /* dc_sink_create returns a new reference */
1892 link->local_sink = sink;
1893
1894 edid_status = dm_helpers_read_local_edid(
1895 link->ctx,
1896 link,
1897 sink);
1898
1899 if (edid_status != EDID_OK)
1900 DC_ERROR("Failed to read EDID");
1901
1902 }
1903
dm_gpureset_commit_state(struct dc_state * dc_state,struct amdgpu_display_manager * dm)1904 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1905 struct amdgpu_display_manager *dm)
1906 {
1907 struct {
1908 struct dc_surface_update surface_updates[MAX_SURFACES];
1909 struct dc_plane_info plane_infos[MAX_SURFACES];
1910 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1911 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1912 struct dc_stream_update stream_update;
1913 } * bundle;
1914 int k, m;
1915
1916 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1917
1918 if (!bundle) {
1919 dm_error("Failed to allocate update bundle\n");
1920 goto cleanup;
1921 }
1922
1923 for (k = 0; k < dc_state->stream_count; k++) {
1924 bundle->stream_update.stream = dc_state->streams[k];
1925
1926 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1927 bundle->surface_updates[m].surface =
1928 dc_state->stream_status->plane_states[m];
1929 bundle->surface_updates[m].surface->force_full_update =
1930 true;
1931 }
1932 dc_commit_updates_for_stream(
1933 dm->dc, bundle->surface_updates,
1934 dc_state->stream_status->plane_count,
1935 dc_state->streams[k], &bundle->stream_update, dc_state);
1936 }
1937
1938 cleanup:
1939 kfree(bundle);
1940
1941 return;
1942 }
1943
dm_set_dpms_off(struct dc_link * link)1944 static void dm_set_dpms_off(struct dc_link *link)
1945 {
1946 struct dc_stream_state *stream_state;
1947 struct amdgpu_dm_connector *aconnector = link->priv;
1948 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
1949 struct dc_stream_update stream_update;
1950 bool dpms_off = true;
1951
1952 memset(&stream_update, 0, sizeof(stream_update));
1953 stream_update.dpms_off = &dpms_off;
1954
1955 mutex_lock(&adev->dm.dc_lock);
1956 stream_state = dc_stream_find_from_link(link);
1957
1958 if (stream_state == NULL) {
1959 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
1960 mutex_unlock(&adev->dm.dc_lock);
1961 return;
1962 }
1963
1964 stream_update.stream = stream_state;
1965 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
1966 stream_state, &stream_update,
1967 stream_state->ctx->dc->current_state);
1968 mutex_unlock(&adev->dm.dc_lock);
1969 }
1970
dm_resume(void * handle)1971 static int dm_resume(void *handle)
1972 {
1973 struct amdgpu_device *adev = handle;
1974 struct drm_device *ddev = adev_to_drm(adev);
1975 struct amdgpu_display_manager *dm = &adev->dm;
1976 struct amdgpu_dm_connector *aconnector;
1977 struct drm_connector *connector;
1978 struct drm_connector_list_iter iter;
1979 struct drm_crtc *crtc;
1980 struct drm_crtc_state *new_crtc_state;
1981 struct dm_crtc_state *dm_new_crtc_state;
1982 struct drm_plane *plane;
1983 struct drm_plane_state *new_plane_state;
1984 struct dm_plane_state *dm_new_plane_state;
1985 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1986 enum dc_connection_type new_connection_type = dc_connection_none;
1987 struct dc_state *dc_state;
1988 int i, r, j;
1989
1990 if (amdgpu_in_reset(adev)) {
1991 dc_state = dm->cached_dc_state;
1992
1993 r = dm_dmub_hw_init(adev);
1994 if (r)
1995 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1996
1997 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1998 dc_resume(dm->dc);
1999
2000 amdgpu_dm_irq_resume_early(adev);
2001
2002 for (i = 0; i < dc_state->stream_count; i++) {
2003 dc_state->streams[i]->mode_changed = true;
2004 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2005 dc_state->stream_status[i].plane_states[j]->update_flags.raw
2006 = 0xffffffff;
2007 }
2008 }
2009
2010 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2011
2012 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2013
2014 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2015
2016 dc_release_state(dm->cached_dc_state);
2017 dm->cached_dc_state = NULL;
2018
2019 amdgpu_dm_irq_resume_late(adev);
2020
2021 mutex_unlock(&dm->dc_lock);
2022
2023 return 0;
2024 }
2025 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2026 dc_release_state(dm_state->context);
2027 dm_state->context = dc_create_state(dm->dc);
2028 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2029 dc_resource_state_construct(dm->dc, dm_state->context);
2030
2031 /* Before powering on DC we need to re-initialize DMUB. */
2032 r = dm_dmub_hw_init(adev);
2033 if (r)
2034 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2035
2036 /* power on hardware */
2037 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2038
2039 /* program HPD filter */
2040 dc_resume(dm->dc);
2041
2042 /*
2043 * early enable HPD Rx IRQ, should be done before set mode as short
2044 * pulse interrupts are used for MST
2045 */
2046 amdgpu_dm_irq_resume_early(adev);
2047
2048 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2049 s3_handle_mst(ddev, false);
2050
2051 /* Do detection*/
2052 drm_connector_list_iter_begin(ddev, &iter);
2053 drm_for_each_connector_iter(connector, &iter) {
2054 aconnector = to_amdgpu_dm_connector(connector);
2055
2056 if (!aconnector->dc_link)
2057 continue;
2058
2059 /*
2060 * this is the case when traversing through already created
2061 * MST connectors, should be skipped
2062 */
2063 if (aconnector->dc_link->type == dc_connection_mst_branch)
2064 continue;
2065
2066 mutex_lock(&aconnector->hpd_lock);
2067 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2068 DRM_ERROR("KMS: Failed to detect connector\n");
2069
2070 if (aconnector->base.force && new_connection_type == dc_connection_none)
2071 emulated_link_detect(aconnector->dc_link);
2072 else
2073 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2074
2075 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2076 aconnector->fake_enable = false;
2077
2078 if (aconnector->dc_sink)
2079 dc_sink_release(aconnector->dc_sink);
2080 aconnector->dc_sink = NULL;
2081 amdgpu_dm_update_connector_after_detect(aconnector);
2082 mutex_unlock(&aconnector->hpd_lock);
2083 }
2084 drm_connector_list_iter_end(&iter);
2085
2086 /* Force mode set in atomic commit */
2087 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2088 new_crtc_state->active_changed = true;
2089
2090 /*
2091 * atomic_check is expected to create the dc states. We need to release
2092 * them here, since they were duplicated as part of the suspend
2093 * procedure.
2094 */
2095 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2096 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2097 if (dm_new_crtc_state->stream) {
2098 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2099 dc_stream_release(dm_new_crtc_state->stream);
2100 dm_new_crtc_state->stream = NULL;
2101 }
2102 }
2103
2104 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2105 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2106 if (dm_new_plane_state->dc_state) {
2107 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2108 dc_plane_state_release(dm_new_plane_state->dc_state);
2109 dm_new_plane_state->dc_state = NULL;
2110 }
2111 }
2112
2113 drm_atomic_helper_resume(ddev, dm->cached_state);
2114
2115 dm->cached_state = NULL;
2116
2117 amdgpu_dm_irq_resume_late(adev);
2118
2119 amdgpu_dm_smu_write_watermarks_table(adev);
2120
2121 return 0;
2122 }
2123
2124 /**
2125 * DOC: DM Lifecycle
2126 *
2127 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2128 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2129 * the base driver's device list to be initialized and torn down accordingly.
2130 *
2131 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2132 */
2133
2134 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2135 .name = "dm",
2136 .early_init = dm_early_init,
2137 .late_init = dm_late_init,
2138 .sw_init = dm_sw_init,
2139 .sw_fini = dm_sw_fini,
2140 .hw_init = dm_hw_init,
2141 .hw_fini = dm_hw_fini,
2142 .suspend = dm_suspend,
2143 .resume = dm_resume,
2144 .is_idle = dm_is_idle,
2145 .wait_for_idle = dm_wait_for_idle,
2146 .check_soft_reset = dm_check_soft_reset,
2147 .soft_reset = dm_soft_reset,
2148 .set_clockgating_state = dm_set_clockgating_state,
2149 .set_powergating_state = dm_set_powergating_state,
2150 };
2151
2152 const struct amdgpu_ip_block_version dm_ip_block =
2153 {
2154 .type = AMD_IP_BLOCK_TYPE_DCE,
2155 .major = 1,
2156 .minor = 0,
2157 .rev = 0,
2158 .funcs = &amdgpu_dm_funcs,
2159 };
2160
2161
2162 /**
2163 * DOC: atomic
2164 *
2165 * *WIP*
2166 */
2167
2168 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2169 .fb_create = amdgpu_display_user_framebuffer_create,
2170 .output_poll_changed = drm_fb_helper_output_poll_changed,
2171 .atomic_check = amdgpu_dm_atomic_check,
2172 .atomic_commit = amdgpu_dm_atomic_commit,
2173 };
2174
2175 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2176 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2177 };
2178
update_connector_ext_caps(struct amdgpu_dm_connector * aconnector)2179 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2180 {
2181 u32 max_avg, min_cll, max, min, q, r;
2182 struct amdgpu_dm_backlight_caps *caps;
2183 struct amdgpu_display_manager *dm;
2184 struct drm_connector *conn_base;
2185 struct amdgpu_device *adev;
2186 struct dc_link *link = NULL;
2187 static const u8 pre_computed_values[] = {
2188 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2189 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2190
2191 if (!aconnector || !aconnector->dc_link)
2192 return;
2193
2194 link = aconnector->dc_link;
2195 if (link->connector_signal != SIGNAL_TYPE_EDP)
2196 return;
2197
2198 conn_base = &aconnector->base;
2199 adev = drm_to_adev(conn_base->dev);
2200 dm = &adev->dm;
2201 caps = &dm->backlight_caps;
2202 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2203 caps->aux_support = false;
2204 max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
2205 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2206
2207 if (caps->ext_caps->bits.oled == 1 /*||
2208 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2209 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2210 caps->aux_support = true;
2211
2212 if (amdgpu_backlight == 0)
2213 caps->aux_support = false;
2214 else if (amdgpu_backlight == 1)
2215 caps->aux_support = true;
2216
2217 /* From the specification (CTA-861-G), for calculating the maximum
2218 * luminance we need to use:
2219 * Luminance = 50*2**(CV/32)
2220 * Where CV is a one-byte value.
2221 * For calculating this expression we may need float point precision;
2222 * to avoid this complexity level, we take advantage that CV is divided
2223 * by a constant. From the Euclids division algorithm, we know that CV
2224 * can be written as: CV = 32*q + r. Next, we replace CV in the
2225 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2226 * need to pre-compute the value of r/32. For pre-computing the values
2227 * We just used the following Ruby line:
2228 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2229 * The results of the above expressions can be verified at
2230 * pre_computed_values.
2231 */
2232 q = max_avg >> 5;
2233 r = max_avg % 32;
2234 max = (1 << q) * pre_computed_values[r];
2235
2236 // min luminance: maxLum * (CV/255)^2 / 100
2237 q = DIV_ROUND_CLOSEST(min_cll, 255);
2238 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2239
2240 caps->aux_max_input_signal = max;
2241 caps->aux_min_input_signal = min;
2242 }
2243
amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector * aconnector)2244 void amdgpu_dm_update_connector_after_detect(
2245 struct amdgpu_dm_connector *aconnector)
2246 {
2247 struct drm_connector *connector = &aconnector->base;
2248 struct drm_device *dev = connector->dev;
2249 struct dc_sink *sink;
2250
2251 /* MST handled by drm_mst framework */
2252 if (aconnector->mst_mgr.mst_state == true)
2253 return;
2254
2255 sink = aconnector->dc_link->local_sink;
2256 if (sink)
2257 dc_sink_retain(sink);
2258
2259 /*
2260 * Edid mgmt connector gets first update only in mode_valid hook and then
2261 * the connector sink is set to either fake or physical sink depends on link status.
2262 * Skip if already done during boot.
2263 */
2264 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2265 && aconnector->dc_em_sink) {
2266
2267 /*
2268 * For S3 resume with headless use eml_sink to fake stream
2269 * because on resume connector->sink is set to NULL
2270 */
2271 mutex_lock(&dev->mode_config.mutex);
2272
2273 if (sink) {
2274 if (aconnector->dc_sink) {
2275 amdgpu_dm_update_freesync_caps(connector, NULL);
2276 /*
2277 * retain and release below are used to
2278 * bump up refcount for sink because the link doesn't point
2279 * to it anymore after disconnect, so on next crtc to connector
2280 * reshuffle by UMD we will get into unwanted dc_sink release
2281 */
2282 dc_sink_release(aconnector->dc_sink);
2283 }
2284 aconnector->dc_sink = sink;
2285 dc_sink_retain(aconnector->dc_sink);
2286 amdgpu_dm_update_freesync_caps(connector,
2287 aconnector->edid);
2288 } else {
2289 amdgpu_dm_update_freesync_caps(connector, NULL);
2290 if (!aconnector->dc_sink) {
2291 aconnector->dc_sink = aconnector->dc_em_sink;
2292 dc_sink_retain(aconnector->dc_sink);
2293 }
2294 }
2295
2296 mutex_unlock(&dev->mode_config.mutex);
2297
2298 if (sink)
2299 dc_sink_release(sink);
2300 return;
2301 }
2302
2303 /*
2304 * TODO: temporary guard to look for proper fix
2305 * if this sink is MST sink, we should not do anything
2306 */
2307 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2308 dc_sink_release(sink);
2309 return;
2310 }
2311
2312 if (aconnector->dc_sink == sink) {
2313 /*
2314 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2315 * Do nothing!!
2316 */
2317 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2318 aconnector->connector_id);
2319 if (sink)
2320 dc_sink_release(sink);
2321 return;
2322 }
2323
2324 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2325 aconnector->connector_id, aconnector->dc_sink, sink);
2326
2327 mutex_lock(&dev->mode_config.mutex);
2328
2329 /*
2330 * 1. Update status of the drm connector
2331 * 2. Send an event and let userspace tell us what to do
2332 */
2333 if (sink) {
2334 /*
2335 * TODO: check if we still need the S3 mode update workaround.
2336 * If yes, put it here.
2337 */
2338 if (aconnector->dc_sink) {
2339 amdgpu_dm_update_freesync_caps(connector, NULL);
2340 dc_sink_release(aconnector->dc_sink);
2341 }
2342
2343 aconnector->dc_sink = sink;
2344 dc_sink_retain(aconnector->dc_sink);
2345 if (sink->dc_edid.length == 0) {
2346 aconnector->edid = NULL;
2347 if (aconnector->dc_link->aux_mode) {
2348 drm_dp_cec_unset_edid(
2349 &aconnector->dm_dp_aux.aux);
2350 }
2351 } else {
2352 aconnector->edid =
2353 (struct edid *)sink->dc_edid.raw_edid;
2354
2355 if (aconnector->dc_link->aux_mode)
2356 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2357 aconnector->edid);
2358 }
2359
2360 drm_connector_update_edid_property(connector, aconnector->edid);
2361 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2362 update_connector_ext_caps(aconnector);
2363 } else {
2364 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2365 amdgpu_dm_update_freesync_caps(connector, NULL);
2366 drm_connector_update_edid_property(connector, NULL);
2367 aconnector->num_modes = 0;
2368 dc_sink_release(aconnector->dc_sink);
2369 aconnector->dc_sink = NULL;
2370 aconnector->edid = NULL;
2371 #ifdef CONFIG_DRM_AMD_DC_HDCP
2372 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2373 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2374 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2375 #endif
2376 }
2377
2378 mutex_unlock(&dev->mode_config.mutex);
2379
2380 update_subconnector_property(aconnector);
2381
2382 if (sink)
2383 dc_sink_release(sink);
2384 }
2385
handle_hpd_irq(void * param)2386 static void handle_hpd_irq(void *param)
2387 {
2388 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2389 struct drm_connector *connector = &aconnector->base;
2390 struct drm_device *dev = connector->dev;
2391 enum dc_connection_type new_connection_type = dc_connection_none;
2392 #ifdef CONFIG_DRM_AMD_DC_HDCP
2393 struct amdgpu_device *adev = drm_to_adev(dev);
2394 #endif
2395
2396 /*
2397 * In case of failure or MST no need to update connector status or notify the OS
2398 * since (for MST case) MST does this in its own context.
2399 */
2400 mutex_lock(&aconnector->hpd_lock);
2401
2402 #ifdef CONFIG_DRM_AMD_DC_HDCP
2403 if (adev->dm.hdcp_workqueue)
2404 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2405 #endif
2406 if (aconnector->fake_enable)
2407 aconnector->fake_enable = false;
2408
2409 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2410 DRM_ERROR("KMS: Failed to detect connector\n");
2411
2412 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2413 emulated_link_detect(aconnector->dc_link);
2414
2415
2416 drm_modeset_lock_all(dev);
2417 dm_restore_drm_connector_state(dev, connector);
2418 drm_modeset_unlock_all(dev);
2419
2420 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2421 drm_kms_helper_hotplug_event(dev);
2422
2423 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2424 if (new_connection_type == dc_connection_none &&
2425 aconnector->dc_link->type == dc_connection_none)
2426 dm_set_dpms_off(aconnector->dc_link);
2427
2428 amdgpu_dm_update_connector_after_detect(aconnector);
2429
2430 drm_modeset_lock_all(dev);
2431 dm_restore_drm_connector_state(dev, connector);
2432 drm_modeset_unlock_all(dev);
2433
2434 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2435 drm_kms_helper_hotplug_event(dev);
2436 }
2437 mutex_unlock(&aconnector->hpd_lock);
2438
2439 }
2440
dm_handle_hpd_rx_irq(struct amdgpu_dm_connector * aconnector)2441 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2442 {
2443 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2444 uint8_t dret;
2445 bool new_irq_handled = false;
2446 int dpcd_addr;
2447 int dpcd_bytes_to_read;
2448
2449 const int max_process_count = 30;
2450 int process_count = 0;
2451
2452 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2453
2454 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2455 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2456 /* DPCD 0x200 - 0x201 for downstream IRQ */
2457 dpcd_addr = DP_SINK_COUNT;
2458 } else {
2459 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2460 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2461 dpcd_addr = DP_SINK_COUNT_ESI;
2462 }
2463
2464 dret = drm_dp_dpcd_read(
2465 &aconnector->dm_dp_aux.aux,
2466 dpcd_addr,
2467 esi,
2468 dpcd_bytes_to_read);
2469
2470 while (dret == dpcd_bytes_to_read &&
2471 process_count < max_process_count) {
2472 uint8_t retry;
2473 dret = 0;
2474
2475 process_count++;
2476
2477 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2478 /* handle HPD short pulse irq */
2479 if (aconnector->mst_mgr.mst_state)
2480 drm_dp_mst_hpd_irq(
2481 &aconnector->mst_mgr,
2482 esi,
2483 &new_irq_handled);
2484
2485 if (new_irq_handled) {
2486 /* ACK at DPCD to notify down stream */
2487 const int ack_dpcd_bytes_to_write =
2488 dpcd_bytes_to_read - 1;
2489
2490 for (retry = 0; retry < 3; retry++) {
2491 uint8_t wret;
2492
2493 wret = drm_dp_dpcd_write(
2494 &aconnector->dm_dp_aux.aux,
2495 dpcd_addr + 1,
2496 &esi[1],
2497 ack_dpcd_bytes_to_write);
2498 if (wret == ack_dpcd_bytes_to_write)
2499 break;
2500 }
2501
2502 /* check if there is new irq to be handled */
2503 dret = drm_dp_dpcd_read(
2504 &aconnector->dm_dp_aux.aux,
2505 dpcd_addr,
2506 esi,
2507 dpcd_bytes_to_read);
2508
2509 new_irq_handled = false;
2510 } else {
2511 break;
2512 }
2513 }
2514
2515 if (process_count == max_process_count)
2516 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2517 }
2518
handle_hpd_rx_irq(void * param)2519 static void handle_hpd_rx_irq(void *param)
2520 {
2521 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2522 struct drm_connector *connector = &aconnector->base;
2523 struct drm_device *dev = connector->dev;
2524 struct dc_link *dc_link = aconnector->dc_link;
2525 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2526 enum dc_connection_type new_connection_type = dc_connection_none;
2527 #ifdef CONFIG_DRM_AMD_DC_HDCP
2528 union hpd_irq_data hpd_irq_data;
2529 struct amdgpu_device *adev = drm_to_adev(dev);
2530
2531 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2532 #endif
2533
2534 /*
2535 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2536 * conflict, after implement i2c helper, this mutex should be
2537 * retired.
2538 */
2539 if (dc_link->type != dc_connection_mst_branch)
2540 mutex_lock(&aconnector->hpd_lock);
2541
2542
2543 #ifdef CONFIG_DRM_AMD_DC_HDCP
2544 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2545 #else
2546 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2547 #endif
2548 !is_mst_root_connector) {
2549 /* Downstream Port status changed. */
2550 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2551 DRM_ERROR("KMS: Failed to detect connector\n");
2552
2553 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2554 emulated_link_detect(dc_link);
2555
2556 if (aconnector->fake_enable)
2557 aconnector->fake_enable = false;
2558
2559 amdgpu_dm_update_connector_after_detect(aconnector);
2560
2561
2562 drm_modeset_lock_all(dev);
2563 dm_restore_drm_connector_state(dev, connector);
2564 drm_modeset_unlock_all(dev);
2565
2566 drm_kms_helper_hotplug_event(dev);
2567 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2568
2569 if (aconnector->fake_enable)
2570 aconnector->fake_enable = false;
2571
2572 amdgpu_dm_update_connector_after_detect(aconnector);
2573
2574
2575 drm_modeset_lock_all(dev);
2576 dm_restore_drm_connector_state(dev, connector);
2577 drm_modeset_unlock_all(dev);
2578
2579 drm_kms_helper_hotplug_event(dev);
2580 }
2581 }
2582 #ifdef CONFIG_DRM_AMD_DC_HDCP
2583 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2584 if (adev->dm.hdcp_workqueue)
2585 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2586 }
2587 #endif
2588 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2589 (dc_link->type == dc_connection_mst_branch))
2590 dm_handle_hpd_rx_irq(aconnector);
2591
2592 if (dc_link->type != dc_connection_mst_branch) {
2593 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2594 mutex_unlock(&aconnector->hpd_lock);
2595 }
2596 }
2597
register_hpd_handlers(struct amdgpu_device * adev)2598 static void register_hpd_handlers(struct amdgpu_device *adev)
2599 {
2600 struct drm_device *dev = adev_to_drm(adev);
2601 struct drm_connector *connector;
2602 struct amdgpu_dm_connector *aconnector;
2603 const struct dc_link *dc_link;
2604 struct dc_interrupt_params int_params = {0};
2605
2606 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2607 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2608
2609 list_for_each_entry(connector,
2610 &dev->mode_config.connector_list, head) {
2611
2612 aconnector = to_amdgpu_dm_connector(connector);
2613 dc_link = aconnector->dc_link;
2614
2615 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2616 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2617 int_params.irq_source = dc_link->irq_source_hpd;
2618
2619 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2620 handle_hpd_irq,
2621 (void *) aconnector);
2622 }
2623
2624 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2625
2626 /* Also register for DP short pulse (hpd_rx). */
2627 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2628 int_params.irq_source = dc_link->irq_source_hpd_rx;
2629
2630 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2631 handle_hpd_rx_irq,
2632 (void *) aconnector);
2633 }
2634 }
2635 }
2636
2637 #if defined(CONFIG_DRM_AMD_DC_SI)
2638 /* Register IRQ sources and initialize IRQ callbacks */
dce60_register_irq_handlers(struct amdgpu_device * adev)2639 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2640 {
2641 struct dc *dc = adev->dm.dc;
2642 struct common_irq_params *c_irq_params;
2643 struct dc_interrupt_params int_params = {0};
2644 int r;
2645 int i;
2646 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2647
2648 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2649 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2650
2651 /*
2652 * Actions of amdgpu_irq_add_id():
2653 * 1. Register a set() function with base driver.
2654 * Base driver will call set() function to enable/disable an
2655 * interrupt in DC hardware.
2656 * 2. Register amdgpu_dm_irq_handler().
2657 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2658 * coming from DC hardware.
2659 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2660 * for acknowledging and handling. */
2661
2662 /* Use VBLANK interrupt */
2663 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2664 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2665 if (r) {
2666 DRM_ERROR("Failed to add crtc irq id!\n");
2667 return r;
2668 }
2669
2670 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2671 int_params.irq_source =
2672 dc_interrupt_to_irq_source(dc, i+1 , 0);
2673
2674 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2675
2676 c_irq_params->adev = adev;
2677 c_irq_params->irq_src = int_params.irq_source;
2678
2679 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2680 dm_crtc_high_irq, c_irq_params);
2681 }
2682
2683 /* Use GRPH_PFLIP interrupt */
2684 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2685 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2686 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2687 if (r) {
2688 DRM_ERROR("Failed to add page flip irq id!\n");
2689 return r;
2690 }
2691
2692 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2693 int_params.irq_source =
2694 dc_interrupt_to_irq_source(dc, i, 0);
2695
2696 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2697
2698 c_irq_params->adev = adev;
2699 c_irq_params->irq_src = int_params.irq_source;
2700
2701 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2702 dm_pflip_high_irq, c_irq_params);
2703
2704 }
2705
2706 /* HPD */
2707 r = amdgpu_irq_add_id(adev, client_id,
2708 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2709 if (r) {
2710 DRM_ERROR("Failed to add hpd irq id!\n");
2711 return r;
2712 }
2713
2714 register_hpd_handlers(adev);
2715
2716 return 0;
2717 }
2718 #endif
2719
2720 /* Register IRQ sources and initialize IRQ callbacks */
dce110_register_irq_handlers(struct amdgpu_device * adev)2721 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2722 {
2723 struct dc *dc = adev->dm.dc;
2724 struct common_irq_params *c_irq_params;
2725 struct dc_interrupt_params int_params = {0};
2726 int r;
2727 int i;
2728 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2729
2730 if (adev->asic_type >= CHIP_VEGA10)
2731 client_id = SOC15_IH_CLIENTID_DCE;
2732
2733 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2734 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2735
2736 /*
2737 * Actions of amdgpu_irq_add_id():
2738 * 1. Register a set() function with base driver.
2739 * Base driver will call set() function to enable/disable an
2740 * interrupt in DC hardware.
2741 * 2. Register amdgpu_dm_irq_handler().
2742 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2743 * coming from DC hardware.
2744 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2745 * for acknowledging and handling. */
2746
2747 /* Use VBLANK interrupt */
2748 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2749 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2750 if (r) {
2751 DRM_ERROR("Failed to add crtc irq id!\n");
2752 return r;
2753 }
2754
2755 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2756 int_params.irq_source =
2757 dc_interrupt_to_irq_source(dc, i, 0);
2758
2759 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2760
2761 c_irq_params->adev = adev;
2762 c_irq_params->irq_src = int_params.irq_source;
2763
2764 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2765 dm_crtc_high_irq, c_irq_params);
2766 }
2767
2768 /* Use VUPDATE interrupt */
2769 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2770 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2771 if (r) {
2772 DRM_ERROR("Failed to add vupdate irq id!\n");
2773 return r;
2774 }
2775
2776 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2777 int_params.irq_source =
2778 dc_interrupt_to_irq_source(dc, i, 0);
2779
2780 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2781
2782 c_irq_params->adev = adev;
2783 c_irq_params->irq_src = int_params.irq_source;
2784
2785 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2786 dm_vupdate_high_irq, c_irq_params);
2787 }
2788
2789 /* Use GRPH_PFLIP interrupt */
2790 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2791 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2792 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2793 if (r) {
2794 DRM_ERROR("Failed to add page flip irq id!\n");
2795 return r;
2796 }
2797
2798 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2799 int_params.irq_source =
2800 dc_interrupt_to_irq_source(dc, i, 0);
2801
2802 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2803
2804 c_irq_params->adev = adev;
2805 c_irq_params->irq_src = int_params.irq_source;
2806
2807 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2808 dm_pflip_high_irq, c_irq_params);
2809
2810 }
2811
2812 /* HPD */
2813 r = amdgpu_irq_add_id(adev, client_id,
2814 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2815 if (r) {
2816 DRM_ERROR("Failed to add hpd irq id!\n");
2817 return r;
2818 }
2819
2820 register_hpd_handlers(adev);
2821
2822 return 0;
2823 }
2824
2825 #if defined(CONFIG_DRM_AMD_DC_DCN)
2826 /* Register IRQ sources and initialize IRQ callbacks */
dcn10_register_irq_handlers(struct amdgpu_device * adev)2827 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2828 {
2829 struct dc *dc = adev->dm.dc;
2830 struct common_irq_params *c_irq_params;
2831 struct dc_interrupt_params int_params = {0};
2832 int r;
2833 int i;
2834
2835 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2836 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2837
2838 /*
2839 * Actions of amdgpu_irq_add_id():
2840 * 1. Register a set() function with base driver.
2841 * Base driver will call set() function to enable/disable an
2842 * interrupt in DC hardware.
2843 * 2. Register amdgpu_dm_irq_handler().
2844 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2845 * coming from DC hardware.
2846 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2847 * for acknowledging and handling.
2848 */
2849
2850 /* Use VSTARTUP interrupt */
2851 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2852 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2853 i++) {
2854 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2855
2856 if (r) {
2857 DRM_ERROR("Failed to add crtc irq id!\n");
2858 return r;
2859 }
2860
2861 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2862 int_params.irq_source =
2863 dc_interrupt_to_irq_source(dc, i, 0);
2864
2865 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2866
2867 c_irq_params->adev = adev;
2868 c_irq_params->irq_src = int_params.irq_source;
2869
2870 amdgpu_dm_irq_register_interrupt(
2871 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2872 }
2873
2874 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2875 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2876 * to trigger at end of each vblank, regardless of state of the lock,
2877 * matching DCE behaviour.
2878 */
2879 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2880 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2881 i++) {
2882 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2883
2884 if (r) {
2885 DRM_ERROR("Failed to add vupdate irq id!\n");
2886 return r;
2887 }
2888
2889 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2890 int_params.irq_source =
2891 dc_interrupt_to_irq_source(dc, i, 0);
2892
2893 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2894
2895 c_irq_params->adev = adev;
2896 c_irq_params->irq_src = int_params.irq_source;
2897
2898 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2899 dm_vupdate_high_irq, c_irq_params);
2900 }
2901
2902 /* Use GRPH_PFLIP interrupt */
2903 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2904 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2905 i++) {
2906 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2907 if (r) {
2908 DRM_ERROR("Failed to add page flip irq id!\n");
2909 return r;
2910 }
2911
2912 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2913 int_params.irq_source =
2914 dc_interrupt_to_irq_source(dc, i, 0);
2915
2916 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2917
2918 c_irq_params->adev = adev;
2919 c_irq_params->irq_src = int_params.irq_source;
2920
2921 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2922 dm_pflip_high_irq, c_irq_params);
2923
2924 }
2925
2926 /* HPD */
2927 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2928 &adev->hpd_irq);
2929 if (r) {
2930 DRM_ERROR("Failed to add hpd irq id!\n");
2931 return r;
2932 }
2933
2934 register_hpd_handlers(adev);
2935
2936 return 0;
2937 }
2938 #endif
2939
2940 /*
2941 * Acquires the lock for the atomic state object and returns
2942 * the new atomic state.
2943 *
2944 * This should only be called during atomic check.
2945 */
dm_atomic_get_state(struct drm_atomic_state * state,struct dm_atomic_state ** dm_state)2946 static int dm_atomic_get_state(struct drm_atomic_state *state,
2947 struct dm_atomic_state **dm_state)
2948 {
2949 struct drm_device *dev = state->dev;
2950 struct amdgpu_device *adev = drm_to_adev(dev);
2951 struct amdgpu_display_manager *dm = &adev->dm;
2952 struct drm_private_state *priv_state;
2953
2954 if (*dm_state)
2955 return 0;
2956
2957 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2958 if (IS_ERR(priv_state))
2959 return PTR_ERR(priv_state);
2960
2961 *dm_state = to_dm_atomic_state(priv_state);
2962
2963 return 0;
2964 }
2965
2966 static struct dm_atomic_state *
dm_atomic_get_new_state(struct drm_atomic_state * state)2967 dm_atomic_get_new_state(struct drm_atomic_state *state)
2968 {
2969 struct drm_device *dev = state->dev;
2970 struct amdgpu_device *adev = drm_to_adev(dev);
2971 struct amdgpu_display_manager *dm = &adev->dm;
2972 struct drm_private_obj *obj;
2973 struct drm_private_state *new_obj_state;
2974 int i;
2975
2976 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2977 if (obj->funcs == dm->atomic_obj.funcs)
2978 return to_dm_atomic_state(new_obj_state);
2979 }
2980
2981 return NULL;
2982 }
2983
2984 static struct drm_private_state *
dm_atomic_duplicate_state(struct drm_private_obj * obj)2985 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2986 {
2987 struct dm_atomic_state *old_state, *new_state;
2988
2989 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2990 if (!new_state)
2991 return NULL;
2992
2993 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2994
2995 old_state = to_dm_atomic_state(obj->state);
2996
2997 if (old_state && old_state->context)
2998 new_state->context = dc_copy_state(old_state->context);
2999
3000 if (!new_state->context) {
3001 kfree(new_state);
3002 return NULL;
3003 }
3004
3005 return &new_state->base;
3006 }
3007
dm_atomic_destroy_state(struct drm_private_obj * obj,struct drm_private_state * state)3008 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3009 struct drm_private_state *state)
3010 {
3011 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3012
3013 if (dm_state && dm_state->context)
3014 dc_release_state(dm_state->context);
3015
3016 kfree(dm_state);
3017 }
3018
3019 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3020 .atomic_duplicate_state = dm_atomic_duplicate_state,
3021 .atomic_destroy_state = dm_atomic_destroy_state,
3022 };
3023
amdgpu_dm_mode_config_init(struct amdgpu_device * adev)3024 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3025 {
3026 struct dm_atomic_state *state;
3027 int r;
3028
3029 adev->mode_info.mode_config_initialized = true;
3030
3031 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3032 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3033
3034 adev_to_drm(adev)->mode_config.max_width = 16384;
3035 adev_to_drm(adev)->mode_config.max_height = 16384;
3036
3037 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3038 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3039 /* indicates support for immediate flip */
3040 adev_to_drm(adev)->mode_config.async_page_flip = true;
3041
3042 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3043
3044 state = kzalloc(sizeof(*state), GFP_KERNEL);
3045 if (!state)
3046 return -ENOMEM;
3047
3048 state->context = dc_create_state(adev->dm.dc);
3049 if (!state->context) {
3050 kfree(state);
3051 return -ENOMEM;
3052 }
3053
3054 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3055
3056 drm_atomic_private_obj_init(adev_to_drm(adev),
3057 &adev->dm.atomic_obj,
3058 &state->base,
3059 &dm_atomic_state_funcs);
3060
3061 r = amdgpu_display_modeset_create_props(adev);
3062 if (r) {
3063 dc_release_state(state->context);
3064 kfree(state);
3065 return r;
3066 }
3067
3068 r = amdgpu_dm_audio_init(adev);
3069 if (r) {
3070 dc_release_state(state->context);
3071 kfree(state);
3072 return r;
3073 }
3074
3075 return 0;
3076 }
3077
3078 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3079 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3080 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3081
3082 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3083 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3084
amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager * dm)3085 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3086 {
3087 #if defined(CONFIG_ACPI)
3088 struct amdgpu_dm_backlight_caps caps;
3089
3090 memset(&caps, 0, sizeof(caps));
3091
3092 if (dm->backlight_caps.caps_valid)
3093 return;
3094
3095 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3096 if (caps.caps_valid) {
3097 dm->backlight_caps.caps_valid = true;
3098 if (caps.aux_support)
3099 return;
3100 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3101 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3102 } else {
3103 dm->backlight_caps.min_input_signal =
3104 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3105 dm->backlight_caps.max_input_signal =
3106 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3107 }
3108 #else
3109 if (dm->backlight_caps.aux_support)
3110 return;
3111
3112 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3113 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3114 #endif
3115 }
3116
get_brightness_range(const struct amdgpu_dm_backlight_caps * caps,unsigned * min,unsigned * max)3117 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3118 unsigned *min, unsigned *max)
3119 {
3120 if (!caps)
3121 return 0;
3122
3123 if (caps->aux_support) {
3124 // Firmware limits are in nits, DC API wants millinits.
3125 *max = 1000 * caps->aux_max_input_signal;
3126 *min = 1000 * caps->aux_min_input_signal;
3127 } else {
3128 // Firmware limits are 8-bit, PWM control is 16-bit.
3129 *max = 0x101 * caps->max_input_signal;
3130 *min = 0x101 * caps->min_input_signal;
3131 }
3132 return 1;
3133 }
3134
convert_brightness_from_user(const struct amdgpu_dm_backlight_caps * caps,uint32_t brightness)3135 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3136 uint32_t brightness)
3137 {
3138 unsigned min, max;
3139
3140 if (!get_brightness_range(caps, &min, &max))
3141 return brightness;
3142
3143 // Rescale 0..255 to min..max
3144 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3145 AMDGPU_MAX_BL_LEVEL);
3146 }
3147
convert_brightness_to_user(const struct amdgpu_dm_backlight_caps * caps,uint32_t brightness)3148 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3149 uint32_t brightness)
3150 {
3151 unsigned min, max;
3152
3153 if (!get_brightness_range(caps, &min, &max))
3154 return brightness;
3155
3156 if (brightness < min)
3157 return 0;
3158 // Rescale min..max to 0..255
3159 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3160 max - min);
3161 }
3162
amdgpu_dm_backlight_update_status(struct backlight_device * bd)3163 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3164 {
3165 struct amdgpu_display_manager *dm = bl_get_data(bd);
3166 struct amdgpu_dm_backlight_caps caps;
3167 struct dc_link *link = NULL;
3168 u32 brightness;
3169 bool rc;
3170
3171 amdgpu_dm_update_backlight_caps(dm);
3172 caps = dm->backlight_caps;
3173
3174 link = (struct dc_link *)dm->backlight_link;
3175
3176 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3177 // Change brightness based on AUX property
3178 if (caps.aux_support)
3179 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3180 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3181 else
3182 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3183
3184 return rc ? 0 : 1;
3185 }
3186
amdgpu_dm_backlight_get_brightness(struct backlight_device * bd)3187 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3188 {
3189 struct amdgpu_display_manager *dm = bl_get_data(bd);
3190 struct amdgpu_dm_backlight_caps caps;
3191
3192 amdgpu_dm_update_backlight_caps(dm);
3193 caps = dm->backlight_caps;
3194
3195 if (caps.aux_support) {
3196 struct dc_link *link = (struct dc_link *)dm->backlight_link;
3197 u32 avg, peak;
3198 bool rc;
3199
3200 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3201 if (!rc)
3202 return bd->props.brightness;
3203 return convert_brightness_to_user(&caps, avg);
3204 } else {
3205 int ret = dc_link_get_backlight_level(dm->backlight_link);
3206
3207 if (ret == DC_ERROR_UNEXPECTED)
3208 return bd->props.brightness;
3209 return convert_brightness_to_user(&caps, ret);
3210 }
3211 }
3212
3213 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3214 .options = BL_CORE_SUSPENDRESUME,
3215 .get_brightness = amdgpu_dm_backlight_get_brightness,
3216 .update_status = amdgpu_dm_backlight_update_status,
3217 };
3218
3219 static void
amdgpu_dm_register_backlight_device(struct amdgpu_display_manager * dm)3220 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3221 {
3222 char bl_name[16];
3223 struct backlight_properties props = { 0 };
3224
3225 amdgpu_dm_update_backlight_caps(dm);
3226
3227 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3228 props.brightness = AMDGPU_MAX_BL_LEVEL;
3229 props.type = BACKLIGHT_RAW;
3230
3231 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3232 adev_to_drm(dm->adev)->primary->index);
3233
3234 dm->backlight_dev = backlight_device_register(bl_name,
3235 adev_to_drm(dm->adev)->dev,
3236 dm,
3237 &amdgpu_dm_backlight_ops,
3238 &props);
3239
3240 if (IS_ERR(dm->backlight_dev))
3241 DRM_ERROR("DM: Backlight registration failed!\n");
3242 else
3243 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3244 }
3245
3246 #endif
3247
initialize_plane(struct amdgpu_display_manager * dm,struct amdgpu_mode_info * mode_info,int plane_id,enum drm_plane_type plane_type,const struct dc_plane_cap * plane_cap)3248 static int initialize_plane(struct amdgpu_display_manager *dm,
3249 struct amdgpu_mode_info *mode_info, int plane_id,
3250 enum drm_plane_type plane_type,
3251 const struct dc_plane_cap *plane_cap)
3252 {
3253 struct drm_plane *plane;
3254 unsigned long possible_crtcs;
3255 int ret = 0;
3256
3257 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3258 if (!plane) {
3259 DRM_ERROR("KMS: Failed to allocate plane\n");
3260 return -ENOMEM;
3261 }
3262 plane->type = plane_type;
3263
3264 /*
3265 * HACK: IGT tests expect that the primary plane for a CRTC
3266 * can only have one possible CRTC. Only expose support for
3267 * any CRTC if they're not going to be used as a primary plane
3268 * for a CRTC - like overlay or underlay planes.
3269 */
3270 possible_crtcs = 1 << plane_id;
3271 if (plane_id >= dm->dc->caps.max_streams)
3272 possible_crtcs = 0xff;
3273
3274 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3275
3276 if (ret) {
3277 DRM_ERROR("KMS: Failed to initialize plane\n");
3278 kfree(plane);
3279 return ret;
3280 }
3281
3282 if (mode_info)
3283 mode_info->planes[plane_id] = plane;
3284
3285 return ret;
3286 }
3287
3288
register_backlight_device(struct amdgpu_display_manager * dm,struct dc_link * link)3289 static void register_backlight_device(struct amdgpu_display_manager *dm,
3290 struct dc_link *link)
3291 {
3292 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3293 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3294
3295 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3296 link->type != dc_connection_none) {
3297 /*
3298 * Event if registration failed, we should continue with
3299 * DM initialization because not having a backlight control
3300 * is better then a black screen.
3301 */
3302 amdgpu_dm_register_backlight_device(dm);
3303
3304 if (dm->backlight_dev)
3305 dm->backlight_link = link;
3306 }
3307 #endif
3308 }
3309
3310
3311 /*
3312 * In this architecture, the association
3313 * connector -> encoder -> crtc
3314 * id not really requried. The crtc and connector will hold the
3315 * display_index as an abstraction to use with DAL component
3316 *
3317 * Returns 0 on success
3318 */
amdgpu_dm_initialize_drm_device(struct amdgpu_device * adev)3319 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3320 {
3321 struct amdgpu_display_manager *dm = &adev->dm;
3322 int32_t i;
3323 struct amdgpu_dm_connector *aconnector = NULL;
3324 struct amdgpu_encoder *aencoder = NULL;
3325 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3326 uint32_t link_cnt;
3327 int32_t primary_planes;
3328 enum dc_connection_type new_connection_type = dc_connection_none;
3329 const struct dc_plane_cap *plane;
3330
3331 dm->display_indexes_num = dm->dc->caps.max_streams;
3332 /* Update the actual used number of crtc */
3333 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3334
3335 link_cnt = dm->dc->caps.max_links;
3336 if (amdgpu_dm_mode_config_init(dm->adev)) {
3337 DRM_ERROR("DM: Failed to initialize mode config\n");
3338 return -EINVAL;
3339 }
3340
3341 /* There is one primary plane per CRTC */
3342 primary_planes = dm->dc->caps.max_streams;
3343 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3344
3345 /*
3346 * Initialize primary planes, implicit planes for legacy IOCTLS.
3347 * Order is reversed to match iteration order in atomic check.
3348 */
3349 for (i = (primary_planes - 1); i >= 0; i--) {
3350 plane = &dm->dc->caps.planes[i];
3351
3352 if (initialize_plane(dm, mode_info, i,
3353 DRM_PLANE_TYPE_PRIMARY, plane)) {
3354 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3355 goto fail;
3356 }
3357 }
3358
3359 /*
3360 * Initialize overlay planes, index starting after primary planes.
3361 * These planes have a higher DRM index than the primary planes since
3362 * they should be considered as having a higher z-order.
3363 * Order is reversed to match iteration order in atomic check.
3364 *
3365 * Only support DCN for now, and only expose one so we don't encourage
3366 * userspace to use up all the pipes.
3367 */
3368 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3369 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3370
3371 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3372 continue;
3373
3374 if (!plane->blends_with_above || !plane->blends_with_below)
3375 continue;
3376
3377 if (!plane->pixel_format_support.argb8888)
3378 continue;
3379
3380 if (initialize_plane(dm, NULL, primary_planes + i,
3381 DRM_PLANE_TYPE_OVERLAY, plane)) {
3382 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3383 goto fail;
3384 }
3385
3386 /* Only create one overlay plane. */
3387 break;
3388 }
3389
3390 for (i = 0; i < dm->dc->caps.max_streams; i++)
3391 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3392 DRM_ERROR("KMS: Failed to initialize crtc\n");
3393 goto fail;
3394 }
3395
3396 /* loops over all connectors on the board */
3397 for (i = 0; i < link_cnt; i++) {
3398 struct dc_link *link = NULL;
3399
3400 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3401 DRM_ERROR(
3402 "KMS: Cannot support more than %d display indexes\n",
3403 AMDGPU_DM_MAX_DISPLAY_INDEX);
3404 continue;
3405 }
3406
3407 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3408 if (!aconnector)
3409 goto fail;
3410
3411 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3412 if (!aencoder)
3413 goto fail;
3414
3415 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3416 DRM_ERROR("KMS: Failed to initialize encoder\n");
3417 goto fail;
3418 }
3419
3420 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3421 DRM_ERROR("KMS: Failed to initialize connector\n");
3422 goto fail;
3423 }
3424
3425 link = dc_get_link_at_index(dm->dc, i);
3426
3427 if (!dc_link_detect_sink(link, &new_connection_type))
3428 DRM_ERROR("KMS: Failed to detect connector\n");
3429
3430 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3431 emulated_link_detect(link);
3432 amdgpu_dm_update_connector_after_detect(aconnector);
3433
3434 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3435 amdgpu_dm_update_connector_after_detect(aconnector);
3436 register_backlight_device(dm, link);
3437 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3438 amdgpu_dm_set_psr_caps(link);
3439 }
3440
3441
3442 }
3443
3444 /* Software is initialized. Now we can register interrupt handlers. */
3445 switch (adev->asic_type) {
3446 #if defined(CONFIG_DRM_AMD_DC_SI)
3447 case CHIP_TAHITI:
3448 case CHIP_PITCAIRN:
3449 case CHIP_VERDE:
3450 case CHIP_OLAND:
3451 if (dce60_register_irq_handlers(dm->adev)) {
3452 DRM_ERROR("DM: Failed to initialize IRQ\n");
3453 goto fail;
3454 }
3455 break;
3456 #endif
3457 case CHIP_BONAIRE:
3458 case CHIP_HAWAII:
3459 case CHIP_KAVERI:
3460 case CHIP_KABINI:
3461 case CHIP_MULLINS:
3462 case CHIP_TONGA:
3463 case CHIP_FIJI:
3464 case CHIP_CARRIZO:
3465 case CHIP_STONEY:
3466 case CHIP_POLARIS11:
3467 case CHIP_POLARIS10:
3468 case CHIP_POLARIS12:
3469 case CHIP_VEGAM:
3470 case CHIP_VEGA10:
3471 case CHIP_VEGA12:
3472 case CHIP_VEGA20:
3473 if (dce110_register_irq_handlers(dm->adev)) {
3474 DRM_ERROR("DM: Failed to initialize IRQ\n");
3475 goto fail;
3476 }
3477 break;
3478 #if defined(CONFIG_DRM_AMD_DC_DCN)
3479 case CHIP_RAVEN:
3480 case CHIP_NAVI12:
3481 case CHIP_NAVI10:
3482 case CHIP_NAVI14:
3483 case CHIP_RENOIR:
3484 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3485 case CHIP_SIENNA_CICHLID:
3486 case CHIP_NAVY_FLOUNDER:
3487 #endif
3488 if (dcn10_register_irq_handlers(dm->adev)) {
3489 DRM_ERROR("DM: Failed to initialize IRQ\n");
3490 goto fail;
3491 }
3492 break;
3493 #endif
3494 default:
3495 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3496 goto fail;
3497 }
3498
3499 return 0;
3500 fail:
3501 kfree(aencoder);
3502 kfree(aconnector);
3503
3504 return -EINVAL;
3505 }
3506
amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager * dm)3507 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3508 {
3509 drm_mode_config_cleanup(dm->ddev);
3510 drm_atomic_private_obj_fini(&dm->atomic_obj);
3511 return;
3512 }
3513
3514 /******************************************************************************
3515 * amdgpu_display_funcs functions
3516 *****************************************************************************/
3517
3518 /*
3519 * dm_bandwidth_update - program display watermarks
3520 *
3521 * @adev: amdgpu_device pointer
3522 *
3523 * Calculate and program the display watermarks and line buffer allocation.
3524 */
dm_bandwidth_update(struct amdgpu_device * adev)3525 static void dm_bandwidth_update(struct amdgpu_device *adev)
3526 {
3527 /* TODO: implement later */
3528 }
3529
3530 static const struct amdgpu_display_funcs dm_display_funcs = {
3531 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3532 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3533 .backlight_set_level = NULL, /* never called for DC */
3534 .backlight_get_level = NULL, /* never called for DC */
3535 .hpd_sense = NULL,/* called unconditionally */
3536 .hpd_set_polarity = NULL, /* called unconditionally */
3537 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3538 .page_flip_get_scanoutpos =
3539 dm_crtc_get_scanoutpos,/* called unconditionally */
3540 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3541 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3542 };
3543
3544 #if defined(CONFIG_DEBUG_KERNEL_DC)
3545
s3_debug_store(struct device * device,struct device_attribute * attr,const char * buf,size_t count)3546 static ssize_t s3_debug_store(struct device *device,
3547 struct device_attribute *attr,
3548 const char *buf,
3549 size_t count)
3550 {
3551 int ret;
3552 int s3_state;
3553 struct drm_device *drm_dev = dev_get_drvdata(device);
3554 struct amdgpu_device *adev = drm_to_adev(drm_dev);
3555
3556 ret = kstrtoint(buf, 0, &s3_state);
3557
3558 if (ret == 0) {
3559 if (s3_state) {
3560 dm_resume(adev);
3561 drm_kms_helper_hotplug_event(adev_to_drm(adev));
3562 } else
3563 dm_suspend(adev);
3564 }
3565
3566 return ret == 0 ? count : 0;
3567 }
3568
3569 DEVICE_ATTR_WO(s3_debug);
3570
3571 #endif
3572
dm_early_init(void * handle)3573 static int dm_early_init(void *handle)
3574 {
3575 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3576
3577 switch (adev->asic_type) {
3578 #if defined(CONFIG_DRM_AMD_DC_SI)
3579 case CHIP_TAHITI:
3580 case CHIP_PITCAIRN:
3581 case CHIP_VERDE:
3582 adev->mode_info.num_crtc = 6;
3583 adev->mode_info.num_hpd = 6;
3584 adev->mode_info.num_dig = 6;
3585 break;
3586 case CHIP_OLAND:
3587 adev->mode_info.num_crtc = 2;
3588 adev->mode_info.num_hpd = 2;
3589 adev->mode_info.num_dig = 2;
3590 break;
3591 #endif
3592 case CHIP_BONAIRE:
3593 case CHIP_HAWAII:
3594 adev->mode_info.num_crtc = 6;
3595 adev->mode_info.num_hpd = 6;
3596 adev->mode_info.num_dig = 6;
3597 break;
3598 case CHIP_KAVERI:
3599 adev->mode_info.num_crtc = 4;
3600 adev->mode_info.num_hpd = 6;
3601 adev->mode_info.num_dig = 7;
3602 break;
3603 case CHIP_KABINI:
3604 case CHIP_MULLINS:
3605 adev->mode_info.num_crtc = 2;
3606 adev->mode_info.num_hpd = 6;
3607 adev->mode_info.num_dig = 6;
3608 break;
3609 case CHIP_FIJI:
3610 case CHIP_TONGA:
3611 adev->mode_info.num_crtc = 6;
3612 adev->mode_info.num_hpd = 6;
3613 adev->mode_info.num_dig = 7;
3614 break;
3615 case CHIP_CARRIZO:
3616 adev->mode_info.num_crtc = 3;
3617 adev->mode_info.num_hpd = 6;
3618 adev->mode_info.num_dig = 9;
3619 break;
3620 case CHIP_STONEY:
3621 adev->mode_info.num_crtc = 2;
3622 adev->mode_info.num_hpd = 6;
3623 adev->mode_info.num_dig = 9;
3624 break;
3625 case CHIP_POLARIS11:
3626 case CHIP_POLARIS12:
3627 adev->mode_info.num_crtc = 5;
3628 adev->mode_info.num_hpd = 5;
3629 adev->mode_info.num_dig = 5;
3630 break;
3631 case CHIP_POLARIS10:
3632 case CHIP_VEGAM:
3633 adev->mode_info.num_crtc = 6;
3634 adev->mode_info.num_hpd = 6;
3635 adev->mode_info.num_dig = 6;
3636 break;
3637 case CHIP_VEGA10:
3638 case CHIP_VEGA12:
3639 case CHIP_VEGA20:
3640 adev->mode_info.num_crtc = 6;
3641 adev->mode_info.num_hpd = 6;
3642 adev->mode_info.num_dig = 6;
3643 break;
3644 #if defined(CONFIG_DRM_AMD_DC_DCN)
3645 case CHIP_RAVEN:
3646 adev->mode_info.num_crtc = 4;
3647 adev->mode_info.num_hpd = 4;
3648 adev->mode_info.num_dig = 4;
3649 break;
3650 #endif
3651 case CHIP_NAVI10:
3652 case CHIP_NAVI12:
3653 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3654 case CHIP_SIENNA_CICHLID:
3655 case CHIP_NAVY_FLOUNDER:
3656 #endif
3657 adev->mode_info.num_crtc = 6;
3658 adev->mode_info.num_hpd = 6;
3659 adev->mode_info.num_dig = 6;
3660 break;
3661 case CHIP_NAVI14:
3662 adev->mode_info.num_crtc = 5;
3663 adev->mode_info.num_hpd = 5;
3664 adev->mode_info.num_dig = 5;
3665 break;
3666 case CHIP_RENOIR:
3667 adev->mode_info.num_crtc = 4;
3668 adev->mode_info.num_hpd = 4;
3669 adev->mode_info.num_dig = 4;
3670 break;
3671 default:
3672 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3673 return -EINVAL;
3674 }
3675
3676 amdgpu_dm_set_irq_funcs(adev);
3677
3678 if (adev->mode_info.funcs == NULL)
3679 adev->mode_info.funcs = &dm_display_funcs;
3680
3681 /*
3682 * Note: Do NOT change adev->audio_endpt_rreg and
3683 * adev->audio_endpt_wreg because they are initialised in
3684 * amdgpu_device_init()
3685 */
3686 #if defined(CONFIG_DEBUG_KERNEL_DC)
3687 device_create_file(
3688 adev_to_drm(adev)->dev,
3689 &dev_attr_s3_debug);
3690 #endif
3691
3692 return 0;
3693 }
3694
modeset_required(struct drm_crtc_state * crtc_state,struct dc_stream_state * new_stream,struct dc_stream_state * old_stream)3695 static bool modeset_required(struct drm_crtc_state *crtc_state,
3696 struct dc_stream_state *new_stream,
3697 struct dc_stream_state *old_stream)
3698 {
3699 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3700 }
3701
modereset_required(struct drm_crtc_state * crtc_state)3702 static bool modereset_required(struct drm_crtc_state *crtc_state)
3703 {
3704 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3705 }
3706
amdgpu_dm_encoder_destroy(struct drm_encoder * encoder)3707 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3708 {
3709 drm_encoder_cleanup(encoder);
3710 kfree(encoder);
3711 }
3712
3713 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3714 .destroy = amdgpu_dm_encoder_destroy,
3715 };
3716
3717
fill_dc_scaling_info(const struct drm_plane_state * state,struct dc_scaling_info * scaling_info)3718 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3719 struct dc_scaling_info *scaling_info)
3720 {
3721 int scale_w, scale_h;
3722
3723 memset(scaling_info, 0, sizeof(*scaling_info));
3724
3725 /* Source is fixed 16.16 but we ignore mantissa for now... */
3726 scaling_info->src_rect.x = state->src_x >> 16;
3727 scaling_info->src_rect.y = state->src_y >> 16;
3728
3729 /*
3730 * For reasons we don't (yet) fully understand a non-zero
3731 * src_y coordinate into an NV12 buffer can cause a
3732 * system hang. To avoid hangs (and maybe be overly cautious)
3733 * let's reject both non-zero src_x and src_y.
3734 *
3735 * We currently know of only one use-case to reproduce a
3736 * scenario with non-zero src_x and src_y for NV12, which
3737 * is to gesture the YouTube Android app into full screen
3738 * on ChromeOS.
3739 */
3740 if (state->fb &&
3741 state->fb->format->format == DRM_FORMAT_NV12 &&
3742 (scaling_info->src_rect.x != 0 ||
3743 scaling_info->src_rect.y != 0))
3744 return -EINVAL;
3745
3746 /*
3747 * For reasons we don't (yet) fully understand a non-zero
3748 * src_y coordinate into an NV12 buffer can cause a
3749 * system hang. To avoid hangs (and maybe be overly cautious)
3750 * let's reject both non-zero src_x and src_y.
3751 *
3752 * We currently know of only one use-case to reproduce a
3753 * scenario with non-zero src_x and src_y for NV12, which
3754 * is to gesture the YouTube Android app into full screen
3755 * on ChromeOS.
3756 */
3757 if (state->fb &&
3758 state->fb->format->format == DRM_FORMAT_NV12 &&
3759 (scaling_info->src_rect.x != 0 ||
3760 scaling_info->src_rect.y != 0))
3761 return -EINVAL;
3762
3763 scaling_info->src_rect.width = state->src_w >> 16;
3764 if (scaling_info->src_rect.width == 0)
3765 return -EINVAL;
3766
3767 scaling_info->src_rect.height = state->src_h >> 16;
3768 if (scaling_info->src_rect.height == 0)
3769 return -EINVAL;
3770
3771 scaling_info->dst_rect.x = state->crtc_x;
3772 scaling_info->dst_rect.y = state->crtc_y;
3773
3774 if (state->crtc_w == 0)
3775 return -EINVAL;
3776
3777 scaling_info->dst_rect.width = state->crtc_w;
3778
3779 if (state->crtc_h == 0)
3780 return -EINVAL;
3781
3782 scaling_info->dst_rect.height = state->crtc_h;
3783
3784 /* DRM doesn't specify clipping on destination output. */
3785 scaling_info->clip_rect = scaling_info->dst_rect;
3786
3787 /* TODO: Validate scaling per-format with DC plane caps */
3788 scale_w = scaling_info->dst_rect.width * 1000 /
3789 scaling_info->src_rect.width;
3790
3791 if (scale_w < 250 || scale_w > 16000)
3792 return -EINVAL;
3793
3794 scale_h = scaling_info->dst_rect.height * 1000 /
3795 scaling_info->src_rect.height;
3796
3797 if (scale_h < 250 || scale_h > 16000)
3798 return -EINVAL;
3799
3800 /*
3801 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3802 * assume reasonable defaults based on the format.
3803 */
3804
3805 return 0;
3806 }
3807
get_fb_info(const struct amdgpu_framebuffer * amdgpu_fb,uint64_t * tiling_flags,bool * tmz_surface)3808 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3809 uint64_t *tiling_flags, bool *tmz_surface)
3810 {
3811 struct amdgpu_bo *rbo;
3812 int r;
3813
3814 if (!amdgpu_fb) {
3815 *tiling_flags = 0;
3816 *tmz_surface = false;
3817 return 0;
3818 }
3819
3820 rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3821 r = amdgpu_bo_reserve(rbo, false);
3822
3823 if (unlikely(r)) {
3824 /* Don't show error message when returning -ERESTARTSYS */
3825 if (r != -ERESTARTSYS)
3826 DRM_ERROR("Unable to reserve buffer: %d\n", r);
3827 return r;
3828 }
3829
3830 if (tiling_flags)
3831 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3832
3833 if (tmz_surface)
3834 *tmz_surface = amdgpu_bo_encrypted(rbo);
3835
3836 amdgpu_bo_unreserve(rbo);
3837
3838 return r;
3839 }
3840
get_dcc_address(uint64_t address,uint64_t tiling_flags)3841 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3842 {
3843 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3844
3845 return offset ? (address + offset * 256) : 0;
3846 }
3847
3848 static int
fill_plane_dcc_attributes(struct amdgpu_device * adev,const struct amdgpu_framebuffer * afb,const enum surface_pixel_format format,const enum dc_rotation_angle rotation,const struct plane_size * plane_size,const union dc_tiling_info * tiling_info,const uint64_t info,struct dc_plane_dcc_param * dcc,struct dc_plane_address * address,bool force_disable_dcc)3849 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3850 const struct amdgpu_framebuffer *afb,
3851 const enum surface_pixel_format format,
3852 const enum dc_rotation_angle rotation,
3853 const struct plane_size *plane_size,
3854 const union dc_tiling_info *tiling_info,
3855 const uint64_t info,
3856 struct dc_plane_dcc_param *dcc,
3857 struct dc_plane_address *address,
3858 bool force_disable_dcc)
3859 {
3860 struct dc *dc = adev->dm.dc;
3861 struct dc_dcc_surface_param input;
3862 struct dc_surface_dcc_cap output;
3863 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3864 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3865 uint64_t dcc_address;
3866
3867 memset(&input, 0, sizeof(input));
3868 memset(&output, 0, sizeof(output));
3869
3870 if (force_disable_dcc)
3871 return 0;
3872
3873 if (!offset)
3874 return 0;
3875
3876 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3877 return 0;
3878
3879 if (!dc->cap_funcs.get_dcc_compression_cap)
3880 return -EINVAL;
3881
3882 input.format = format;
3883 input.surface_size.width = plane_size->surface_size.width;
3884 input.surface_size.height = plane_size->surface_size.height;
3885 input.swizzle_mode = tiling_info->gfx9.swizzle;
3886
3887 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3888 input.scan = SCAN_DIRECTION_HORIZONTAL;
3889 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3890 input.scan = SCAN_DIRECTION_VERTICAL;
3891
3892 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3893 return -EINVAL;
3894
3895 if (!output.capable)
3896 return -EINVAL;
3897
3898 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3899 return -EINVAL;
3900
3901 dcc->enable = 1;
3902 dcc->meta_pitch =
3903 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3904 dcc->independent_64b_blks = i64b;
3905
3906 dcc_address = get_dcc_address(afb->address, info);
3907 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3908 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3909
3910 return 0;
3911 }
3912
3913 static int
fill_plane_buffer_attributes(struct amdgpu_device * adev,const struct amdgpu_framebuffer * afb,const enum surface_pixel_format format,const enum dc_rotation_angle rotation,const uint64_t tiling_flags,union dc_tiling_info * tiling_info,struct plane_size * plane_size,struct dc_plane_dcc_param * dcc,struct dc_plane_address * address,bool tmz_surface,bool force_disable_dcc)3914 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3915 const struct amdgpu_framebuffer *afb,
3916 const enum surface_pixel_format format,
3917 const enum dc_rotation_angle rotation,
3918 const uint64_t tiling_flags,
3919 union dc_tiling_info *tiling_info,
3920 struct plane_size *plane_size,
3921 struct dc_plane_dcc_param *dcc,
3922 struct dc_plane_address *address,
3923 bool tmz_surface,
3924 bool force_disable_dcc)
3925 {
3926 const struct drm_framebuffer *fb = &afb->base;
3927 int ret;
3928
3929 memset(tiling_info, 0, sizeof(*tiling_info));
3930 memset(plane_size, 0, sizeof(*plane_size));
3931 memset(dcc, 0, sizeof(*dcc));
3932 memset(address, 0, sizeof(*address));
3933
3934 address->tmz_surface = tmz_surface;
3935
3936 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3937 plane_size->surface_size.x = 0;
3938 plane_size->surface_size.y = 0;
3939 plane_size->surface_size.width = fb->width;
3940 plane_size->surface_size.height = fb->height;
3941 plane_size->surface_pitch =
3942 fb->pitches[0] / fb->format->cpp[0];
3943
3944 address->type = PLN_ADDR_TYPE_GRAPHICS;
3945 address->grph.addr.low_part = lower_32_bits(afb->address);
3946 address->grph.addr.high_part = upper_32_bits(afb->address);
3947 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3948 uint64_t chroma_addr = afb->address + fb->offsets[1];
3949
3950 plane_size->surface_size.x = 0;
3951 plane_size->surface_size.y = 0;
3952 plane_size->surface_size.width = fb->width;
3953 plane_size->surface_size.height = fb->height;
3954 plane_size->surface_pitch =
3955 fb->pitches[0] / fb->format->cpp[0];
3956
3957 plane_size->chroma_size.x = 0;
3958 plane_size->chroma_size.y = 0;
3959 /* TODO: set these based on surface format */
3960 plane_size->chroma_size.width = fb->width / 2;
3961 plane_size->chroma_size.height = fb->height / 2;
3962
3963 plane_size->chroma_pitch =
3964 fb->pitches[1] / fb->format->cpp[1];
3965
3966 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3967 address->video_progressive.luma_addr.low_part =
3968 lower_32_bits(afb->address);
3969 address->video_progressive.luma_addr.high_part =
3970 upper_32_bits(afb->address);
3971 address->video_progressive.chroma_addr.low_part =
3972 lower_32_bits(chroma_addr);
3973 address->video_progressive.chroma_addr.high_part =
3974 upper_32_bits(chroma_addr);
3975 }
3976
3977 /* Fill GFX8 params */
3978 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3979 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3980
3981 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3982 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3983 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3984 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3985 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3986
3987 /* XXX fix me for VI */
3988 tiling_info->gfx8.num_banks = num_banks;
3989 tiling_info->gfx8.array_mode =
3990 DC_ARRAY_2D_TILED_THIN1;
3991 tiling_info->gfx8.tile_split = tile_split;
3992 tiling_info->gfx8.bank_width = bankw;
3993 tiling_info->gfx8.bank_height = bankh;
3994 tiling_info->gfx8.tile_aspect = mtaspect;
3995 tiling_info->gfx8.tile_mode =
3996 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3997 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3998 == DC_ARRAY_1D_TILED_THIN1) {
3999 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4000 }
4001
4002 tiling_info->gfx8.pipe_config =
4003 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4004
4005 if (adev->asic_type == CHIP_VEGA10 ||
4006 adev->asic_type == CHIP_VEGA12 ||
4007 adev->asic_type == CHIP_VEGA20 ||
4008 adev->asic_type == CHIP_NAVI10 ||
4009 adev->asic_type == CHIP_NAVI14 ||
4010 adev->asic_type == CHIP_NAVI12 ||
4011 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
4012 adev->asic_type == CHIP_SIENNA_CICHLID ||
4013 adev->asic_type == CHIP_NAVY_FLOUNDER ||
4014 #endif
4015 adev->asic_type == CHIP_RENOIR ||
4016 adev->asic_type == CHIP_RAVEN) {
4017 /* Fill GFX9 params */
4018 tiling_info->gfx9.num_pipes =
4019 adev->gfx.config.gb_addr_config_fields.num_pipes;
4020 tiling_info->gfx9.num_banks =
4021 adev->gfx.config.gb_addr_config_fields.num_banks;
4022 tiling_info->gfx9.pipe_interleave =
4023 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4024 tiling_info->gfx9.num_shader_engines =
4025 adev->gfx.config.gb_addr_config_fields.num_se;
4026 tiling_info->gfx9.max_compressed_frags =
4027 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4028 tiling_info->gfx9.num_rb_per_se =
4029 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4030 tiling_info->gfx9.swizzle =
4031 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
4032 tiling_info->gfx9.shaderEnable = 1;
4033
4034 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
4035 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4036 adev->asic_type == CHIP_NAVY_FLOUNDER)
4037 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4038 #endif
4039 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
4040 plane_size, tiling_info,
4041 tiling_flags, dcc, address,
4042 force_disable_dcc);
4043 if (ret)
4044 return ret;
4045 }
4046
4047 return 0;
4048 }
4049
4050 static void
fill_blending_from_plane_state(const struct drm_plane_state * plane_state,bool * per_pixel_alpha,bool * global_alpha,int * global_alpha_value)4051 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4052 bool *per_pixel_alpha, bool *global_alpha,
4053 int *global_alpha_value)
4054 {
4055 *per_pixel_alpha = false;
4056 *global_alpha = false;
4057 *global_alpha_value = 0xff;
4058
4059 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4060 return;
4061
4062 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4063 static const uint32_t alpha_formats[] = {
4064 DRM_FORMAT_ARGB8888,
4065 DRM_FORMAT_RGBA8888,
4066 DRM_FORMAT_ABGR8888,
4067 };
4068 uint32_t format = plane_state->fb->format->format;
4069 unsigned int i;
4070
4071 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4072 if (format == alpha_formats[i]) {
4073 *per_pixel_alpha = true;
4074 break;
4075 }
4076 }
4077 }
4078
4079 if (plane_state->alpha < 0xffff) {
4080 *global_alpha = true;
4081 *global_alpha_value = plane_state->alpha >> 8;
4082 }
4083 }
4084
4085 static int
fill_plane_color_attributes(const struct drm_plane_state * plane_state,const enum surface_pixel_format format,enum dc_color_space * color_space)4086 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4087 const enum surface_pixel_format format,
4088 enum dc_color_space *color_space)
4089 {
4090 bool full_range;
4091
4092 *color_space = COLOR_SPACE_SRGB;
4093
4094 /* DRM color properties only affect non-RGB formats. */
4095 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4096 return 0;
4097
4098 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4099
4100 switch (plane_state->color_encoding) {
4101 case DRM_COLOR_YCBCR_BT601:
4102 if (full_range)
4103 *color_space = COLOR_SPACE_YCBCR601;
4104 else
4105 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4106 break;
4107
4108 case DRM_COLOR_YCBCR_BT709:
4109 if (full_range)
4110 *color_space = COLOR_SPACE_YCBCR709;
4111 else
4112 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4113 break;
4114
4115 case DRM_COLOR_YCBCR_BT2020:
4116 if (full_range)
4117 *color_space = COLOR_SPACE_2020_YCBCR;
4118 else
4119 return -EINVAL;
4120 break;
4121
4122 default:
4123 return -EINVAL;
4124 }
4125
4126 return 0;
4127 }
4128
4129 static int
fill_dc_plane_info_and_addr(struct amdgpu_device * adev,const struct drm_plane_state * plane_state,const uint64_t tiling_flags,struct dc_plane_info * plane_info,struct dc_plane_address * address,bool tmz_surface,bool force_disable_dcc)4130 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4131 const struct drm_plane_state *plane_state,
4132 const uint64_t tiling_flags,
4133 struct dc_plane_info *plane_info,
4134 struct dc_plane_address *address,
4135 bool tmz_surface,
4136 bool force_disable_dcc)
4137 {
4138 const struct drm_framebuffer *fb = plane_state->fb;
4139 const struct amdgpu_framebuffer *afb =
4140 to_amdgpu_framebuffer(plane_state->fb);
4141 struct drm_format_name_buf format_name;
4142 int ret;
4143
4144 memset(plane_info, 0, sizeof(*plane_info));
4145
4146 switch (fb->format->format) {
4147 case DRM_FORMAT_C8:
4148 plane_info->format =
4149 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4150 break;
4151 case DRM_FORMAT_RGB565:
4152 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4153 break;
4154 case DRM_FORMAT_XRGB8888:
4155 case DRM_FORMAT_ARGB8888:
4156 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4157 break;
4158 case DRM_FORMAT_XRGB2101010:
4159 case DRM_FORMAT_ARGB2101010:
4160 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4161 break;
4162 case DRM_FORMAT_XBGR2101010:
4163 case DRM_FORMAT_ABGR2101010:
4164 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4165 break;
4166 case DRM_FORMAT_XBGR8888:
4167 case DRM_FORMAT_ABGR8888:
4168 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4169 break;
4170 case DRM_FORMAT_NV21:
4171 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4172 break;
4173 case DRM_FORMAT_NV12:
4174 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4175 break;
4176 case DRM_FORMAT_P010:
4177 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4178 break;
4179 case DRM_FORMAT_XRGB16161616F:
4180 case DRM_FORMAT_ARGB16161616F:
4181 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4182 break;
4183 case DRM_FORMAT_XBGR16161616F:
4184 case DRM_FORMAT_ABGR16161616F:
4185 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4186 break;
4187 default:
4188 DRM_ERROR(
4189 "Unsupported screen format %s\n",
4190 drm_get_format_name(fb->format->format, &format_name));
4191 return -EINVAL;
4192 }
4193
4194 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4195 case DRM_MODE_ROTATE_0:
4196 plane_info->rotation = ROTATION_ANGLE_0;
4197 break;
4198 case DRM_MODE_ROTATE_90:
4199 plane_info->rotation = ROTATION_ANGLE_90;
4200 break;
4201 case DRM_MODE_ROTATE_180:
4202 plane_info->rotation = ROTATION_ANGLE_180;
4203 break;
4204 case DRM_MODE_ROTATE_270:
4205 plane_info->rotation = ROTATION_ANGLE_270;
4206 break;
4207 default:
4208 plane_info->rotation = ROTATION_ANGLE_0;
4209 break;
4210 }
4211
4212 plane_info->visible = true;
4213 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4214
4215 plane_info->layer_index = 0;
4216
4217 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4218 &plane_info->color_space);
4219 if (ret)
4220 return ret;
4221
4222 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4223 plane_info->rotation, tiling_flags,
4224 &plane_info->tiling_info,
4225 &plane_info->plane_size,
4226 &plane_info->dcc, address, tmz_surface,
4227 force_disable_dcc);
4228 if (ret)
4229 return ret;
4230
4231 fill_blending_from_plane_state(
4232 plane_state, &plane_info->per_pixel_alpha,
4233 &plane_info->global_alpha, &plane_info->global_alpha_value);
4234
4235 return 0;
4236 }
4237
fill_dc_plane_attributes(struct amdgpu_device * adev,struct dc_plane_state * dc_plane_state,struct drm_plane_state * plane_state,struct drm_crtc_state * crtc_state)4238 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4239 struct dc_plane_state *dc_plane_state,
4240 struct drm_plane_state *plane_state,
4241 struct drm_crtc_state *crtc_state)
4242 {
4243 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4244 struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
4245 struct dc_scaling_info scaling_info;
4246 struct dc_plane_info plane_info;
4247 int ret;
4248 bool force_disable_dcc = false;
4249
4250 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4251 if (ret)
4252 return ret;
4253
4254 dc_plane_state->src_rect = scaling_info.src_rect;
4255 dc_plane_state->dst_rect = scaling_info.dst_rect;
4256 dc_plane_state->clip_rect = scaling_info.clip_rect;
4257 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4258
4259 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4260 ret = fill_dc_plane_info_and_addr(adev, plane_state,
4261 dm_plane_state->tiling_flags,
4262 &plane_info,
4263 &dc_plane_state->address,
4264 dm_plane_state->tmz_surface,
4265 force_disable_dcc);
4266 if (ret)
4267 return ret;
4268
4269 dc_plane_state->format = plane_info.format;
4270 dc_plane_state->color_space = plane_info.color_space;
4271 dc_plane_state->format = plane_info.format;
4272 dc_plane_state->plane_size = plane_info.plane_size;
4273 dc_plane_state->rotation = plane_info.rotation;
4274 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4275 dc_plane_state->stereo_format = plane_info.stereo_format;
4276 dc_plane_state->tiling_info = plane_info.tiling_info;
4277 dc_plane_state->visible = plane_info.visible;
4278 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4279 dc_plane_state->global_alpha = plane_info.global_alpha;
4280 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4281 dc_plane_state->dcc = plane_info.dcc;
4282 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4283
4284 /*
4285 * Always set input transfer function, since plane state is refreshed
4286 * every time.
4287 */
4288 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4289 if (ret)
4290 return ret;
4291
4292 return 0;
4293 }
4294
update_stream_scaling_settings(const struct drm_display_mode * mode,const struct dm_connector_state * dm_state,struct dc_stream_state * stream)4295 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4296 const struct dm_connector_state *dm_state,
4297 struct dc_stream_state *stream)
4298 {
4299 enum amdgpu_rmx_type rmx_type;
4300
4301 struct rect src = { 0 }; /* viewport in composition space*/
4302 struct rect dst = { 0 }; /* stream addressable area */
4303
4304 /* no mode. nothing to be done */
4305 if (!mode)
4306 return;
4307
4308 /* Full screen scaling by default */
4309 src.width = mode->hdisplay;
4310 src.height = mode->vdisplay;
4311 dst.width = stream->timing.h_addressable;
4312 dst.height = stream->timing.v_addressable;
4313
4314 if (dm_state) {
4315 rmx_type = dm_state->scaling;
4316 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4317 if (src.width * dst.height <
4318 src.height * dst.width) {
4319 /* height needs less upscaling/more downscaling */
4320 dst.width = src.width *
4321 dst.height / src.height;
4322 } else {
4323 /* width needs less upscaling/more downscaling */
4324 dst.height = src.height *
4325 dst.width / src.width;
4326 }
4327 } else if (rmx_type == RMX_CENTER) {
4328 dst = src;
4329 }
4330
4331 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4332 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4333
4334 if (dm_state->underscan_enable) {
4335 dst.x += dm_state->underscan_hborder / 2;
4336 dst.y += dm_state->underscan_vborder / 2;
4337 dst.width -= dm_state->underscan_hborder;
4338 dst.height -= dm_state->underscan_vborder;
4339 }
4340 }
4341
4342 stream->src = src;
4343 stream->dst = dst;
4344
4345 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
4346 dst.x, dst.y, dst.width, dst.height);
4347
4348 }
4349
4350 static enum dc_color_depth
convert_color_depth_from_display_info(const struct drm_connector * connector,bool is_y420,int requested_bpc)4351 convert_color_depth_from_display_info(const struct drm_connector *connector,
4352 bool is_y420, int requested_bpc)
4353 {
4354 uint8_t bpc;
4355
4356 if (is_y420) {
4357 bpc = 8;
4358
4359 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4360 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4361 bpc = 16;
4362 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4363 bpc = 12;
4364 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4365 bpc = 10;
4366 } else {
4367 bpc = (uint8_t)connector->display_info.bpc;
4368 /* Assume 8 bpc by default if no bpc is specified. */
4369 bpc = bpc ? bpc : 8;
4370 }
4371
4372 if (requested_bpc > 0) {
4373 /*
4374 * Cap display bpc based on the user requested value.
4375 *
4376 * The value for state->max_bpc may not correctly updated
4377 * depending on when the connector gets added to the state
4378 * or if this was called outside of atomic check, so it
4379 * can't be used directly.
4380 */
4381 bpc = min_t(u8, bpc, requested_bpc);
4382
4383 /* Round down to the nearest even number. */
4384 bpc = bpc - (bpc & 1);
4385 }
4386
4387 switch (bpc) {
4388 case 0:
4389 /*
4390 * Temporary Work around, DRM doesn't parse color depth for
4391 * EDID revision before 1.4
4392 * TODO: Fix edid parsing
4393 */
4394 return COLOR_DEPTH_888;
4395 case 6:
4396 return COLOR_DEPTH_666;
4397 case 8:
4398 return COLOR_DEPTH_888;
4399 case 10:
4400 return COLOR_DEPTH_101010;
4401 case 12:
4402 return COLOR_DEPTH_121212;
4403 case 14:
4404 return COLOR_DEPTH_141414;
4405 case 16:
4406 return COLOR_DEPTH_161616;
4407 default:
4408 return COLOR_DEPTH_UNDEFINED;
4409 }
4410 }
4411
4412 static enum dc_aspect_ratio
get_aspect_ratio(const struct drm_display_mode * mode_in)4413 get_aspect_ratio(const struct drm_display_mode *mode_in)
4414 {
4415 /* 1-1 mapping, since both enums follow the HDMI spec. */
4416 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4417 }
4418
4419 static enum dc_color_space
get_output_color_space(const struct dc_crtc_timing * dc_crtc_timing)4420 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4421 {
4422 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4423
4424 switch (dc_crtc_timing->pixel_encoding) {
4425 case PIXEL_ENCODING_YCBCR422:
4426 case PIXEL_ENCODING_YCBCR444:
4427 case PIXEL_ENCODING_YCBCR420:
4428 {
4429 /*
4430 * 27030khz is the separation point between HDTV and SDTV
4431 * according to HDMI spec, we use YCbCr709 and YCbCr601
4432 * respectively
4433 */
4434 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4435 if (dc_crtc_timing->flags.Y_ONLY)
4436 color_space =
4437 COLOR_SPACE_YCBCR709_LIMITED;
4438 else
4439 color_space = COLOR_SPACE_YCBCR709;
4440 } else {
4441 if (dc_crtc_timing->flags.Y_ONLY)
4442 color_space =
4443 COLOR_SPACE_YCBCR601_LIMITED;
4444 else
4445 color_space = COLOR_SPACE_YCBCR601;
4446 }
4447
4448 }
4449 break;
4450 case PIXEL_ENCODING_RGB:
4451 color_space = COLOR_SPACE_SRGB;
4452 break;
4453
4454 default:
4455 WARN_ON(1);
4456 break;
4457 }
4458
4459 return color_space;
4460 }
4461
adjust_colour_depth_from_display_info(struct dc_crtc_timing * timing_out,const struct drm_display_info * info)4462 static bool adjust_colour_depth_from_display_info(
4463 struct dc_crtc_timing *timing_out,
4464 const struct drm_display_info *info)
4465 {
4466 enum dc_color_depth depth = timing_out->display_color_depth;
4467 int normalized_clk;
4468 do {
4469 normalized_clk = timing_out->pix_clk_100hz / 10;
4470 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4471 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4472 normalized_clk /= 2;
4473 /* Adjusting pix clock following on HDMI spec based on colour depth */
4474 switch (depth) {
4475 case COLOR_DEPTH_888:
4476 break;
4477 case COLOR_DEPTH_101010:
4478 normalized_clk = (normalized_clk * 30) / 24;
4479 break;
4480 case COLOR_DEPTH_121212:
4481 normalized_clk = (normalized_clk * 36) / 24;
4482 break;
4483 case COLOR_DEPTH_161616:
4484 normalized_clk = (normalized_clk * 48) / 24;
4485 break;
4486 default:
4487 /* The above depths are the only ones valid for HDMI. */
4488 return false;
4489 }
4490 if (normalized_clk <= info->max_tmds_clock) {
4491 timing_out->display_color_depth = depth;
4492 return true;
4493 }
4494 } while (--depth > COLOR_DEPTH_666);
4495 return false;
4496 }
4497
fill_stream_properties_from_drm_display_mode(struct dc_stream_state * stream,const struct drm_display_mode * mode_in,const struct drm_connector * connector,const struct drm_connector_state * connector_state,const struct dc_stream_state * old_stream,int requested_bpc)4498 static void fill_stream_properties_from_drm_display_mode(
4499 struct dc_stream_state *stream,
4500 const struct drm_display_mode *mode_in,
4501 const struct drm_connector *connector,
4502 const struct drm_connector_state *connector_state,
4503 const struct dc_stream_state *old_stream,
4504 int requested_bpc)
4505 {
4506 struct dc_crtc_timing *timing_out = &stream->timing;
4507 const struct drm_display_info *info = &connector->display_info;
4508 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4509 struct hdmi_vendor_infoframe hv_frame;
4510 struct hdmi_avi_infoframe avi_frame;
4511
4512 memset(&hv_frame, 0, sizeof(hv_frame));
4513 memset(&avi_frame, 0, sizeof(avi_frame));
4514
4515 timing_out->h_border_left = 0;
4516 timing_out->h_border_right = 0;
4517 timing_out->v_border_top = 0;
4518 timing_out->v_border_bottom = 0;
4519 /* TODO: un-hardcode */
4520 if (drm_mode_is_420_only(info, mode_in)
4521 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4522 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4523 else if (drm_mode_is_420_also(info, mode_in)
4524 && aconnector->force_yuv420_output)
4525 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4526 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4527 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4528 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4529 else
4530 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4531
4532 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4533 timing_out->display_color_depth = convert_color_depth_from_display_info(
4534 connector,
4535 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4536 requested_bpc);
4537 timing_out->scan_type = SCANNING_TYPE_NODATA;
4538 timing_out->hdmi_vic = 0;
4539
4540 if(old_stream) {
4541 timing_out->vic = old_stream->timing.vic;
4542 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4543 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4544 } else {
4545 timing_out->vic = drm_match_cea_mode(mode_in);
4546 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4547 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4548 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4549 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4550 }
4551
4552 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4553 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4554 timing_out->vic = avi_frame.video_code;
4555 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4556 timing_out->hdmi_vic = hv_frame.vic;
4557 }
4558
4559 timing_out->h_addressable = mode_in->crtc_hdisplay;
4560 timing_out->h_total = mode_in->crtc_htotal;
4561 timing_out->h_sync_width =
4562 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4563 timing_out->h_front_porch =
4564 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4565 timing_out->v_total = mode_in->crtc_vtotal;
4566 timing_out->v_addressable = mode_in->crtc_vdisplay;
4567 timing_out->v_front_porch =
4568 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4569 timing_out->v_sync_width =
4570 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4571 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4572 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4573
4574 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4575 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4576 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4577 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4578 drm_mode_is_420_also(info, mode_in) &&
4579 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4580 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4581 adjust_colour_depth_from_display_info(timing_out, info);
4582 }
4583 }
4584
4585 stream->output_color_space = get_output_color_space(timing_out);
4586 }
4587
fill_audio_info(struct audio_info * audio_info,const struct drm_connector * drm_connector,const struct dc_sink * dc_sink)4588 static void fill_audio_info(struct audio_info *audio_info,
4589 const struct drm_connector *drm_connector,
4590 const struct dc_sink *dc_sink)
4591 {
4592 int i = 0;
4593 int cea_revision = 0;
4594 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4595
4596 audio_info->manufacture_id = edid_caps->manufacturer_id;
4597 audio_info->product_id = edid_caps->product_id;
4598
4599 cea_revision = drm_connector->display_info.cea_rev;
4600
4601 strscpy(audio_info->display_name,
4602 edid_caps->display_name,
4603 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4604
4605 if (cea_revision >= 3) {
4606 audio_info->mode_count = edid_caps->audio_mode_count;
4607
4608 for (i = 0; i < audio_info->mode_count; ++i) {
4609 audio_info->modes[i].format_code =
4610 (enum audio_format_code)
4611 (edid_caps->audio_modes[i].format_code);
4612 audio_info->modes[i].channel_count =
4613 edid_caps->audio_modes[i].channel_count;
4614 audio_info->modes[i].sample_rates.all =
4615 edid_caps->audio_modes[i].sample_rate;
4616 audio_info->modes[i].sample_size =
4617 edid_caps->audio_modes[i].sample_size;
4618 }
4619 }
4620
4621 audio_info->flags.all = edid_caps->speaker_flags;
4622
4623 /* TODO: We only check for the progressive mode, check for interlace mode too */
4624 if (drm_connector->latency_present[0]) {
4625 audio_info->video_latency = drm_connector->video_latency[0];
4626 audio_info->audio_latency = drm_connector->audio_latency[0];
4627 }
4628
4629 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4630
4631 }
4632
4633 static void
copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode * src_mode,struct drm_display_mode * dst_mode)4634 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4635 struct drm_display_mode *dst_mode)
4636 {
4637 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4638 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4639 dst_mode->crtc_clock = src_mode->crtc_clock;
4640 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4641 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4642 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
4643 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4644 dst_mode->crtc_htotal = src_mode->crtc_htotal;
4645 dst_mode->crtc_hskew = src_mode->crtc_hskew;
4646 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4647 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4648 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4649 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4650 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4651 }
4652
4653 static void
decide_crtc_timing_for_drm_display_mode(struct drm_display_mode * drm_mode,const struct drm_display_mode * native_mode,bool scale_enabled)4654 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4655 const struct drm_display_mode *native_mode,
4656 bool scale_enabled)
4657 {
4658 if (scale_enabled) {
4659 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4660 } else if (native_mode->clock == drm_mode->clock &&
4661 native_mode->htotal == drm_mode->htotal &&
4662 native_mode->vtotal == drm_mode->vtotal) {
4663 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4664 } else {
4665 /* no scaling nor amdgpu inserted, no need to patch */
4666 }
4667 }
4668
4669 static struct dc_sink *
create_fake_sink(struct amdgpu_dm_connector * aconnector)4670 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4671 {
4672 struct dc_sink_init_data sink_init_data = { 0 };
4673 struct dc_sink *sink = NULL;
4674 sink_init_data.link = aconnector->dc_link;
4675 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4676
4677 sink = dc_sink_create(&sink_init_data);
4678 if (!sink) {
4679 DRM_ERROR("Failed to create sink!\n");
4680 return NULL;
4681 }
4682 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4683
4684 return sink;
4685 }
4686
set_multisync_trigger_params(struct dc_stream_state * stream)4687 static void set_multisync_trigger_params(
4688 struct dc_stream_state *stream)
4689 {
4690 if (stream->triggered_crtc_reset.enabled) {
4691 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4692 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4693 }
4694 }
4695
set_master_stream(struct dc_stream_state * stream_set[],int stream_count)4696 static void set_master_stream(struct dc_stream_state *stream_set[],
4697 int stream_count)
4698 {
4699 int j, highest_rfr = 0, master_stream = 0;
4700
4701 for (j = 0; j < stream_count; j++) {
4702 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4703 int refresh_rate = 0;
4704
4705 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4706 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4707 if (refresh_rate > highest_rfr) {
4708 highest_rfr = refresh_rate;
4709 master_stream = j;
4710 }
4711 }
4712 }
4713 for (j = 0; j < stream_count; j++) {
4714 if (stream_set[j])
4715 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4716 }
4717 }
4718
dm_enable_per_frame_crtc_master_sync(struct dc_state * context)4719 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4720 {
4721 int i = 0;
4722
4723 if (context->stream_count < 2)
4724 return;
4725 for (i = 0; i < context->stream_count ; i++) {
4726 if (!context->streams[i])
4727 continue;
4728 /*
4729 * TODO: add a function to read AMD VSDB bits and set
4730 * crtc_sync_master.multi_sync_enabled flag
4731 * For now it's set to false
4732 */
4733 set_multisync_trigger_params(context->streams[i]);
4734 }
4735 set_master_stream(context->streams, context->stream_count);
4736 }
4737
4738 static struct dc_stream_state *
create_stream_for_sink(struct amdgpu_dm_connector * aconnector,const struct drm_display_mode * drm_mode,const struct dm_connector_state * dm_state,const struct dc_stream_state * old_stream,int requested_bpc)4739 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4740 const struct drm_display_mode *drm_mode,
4741 const struct dm_connector_state *dm_state,
4742 const struct dc_stream_state *old_stream,
4743 int requested_bpc)
4744 {
4745 struct drm_display_mode *preferred_mode = NULL;
4746 struct drm_connector *drm_connector;
4747 const struct drm_connector_state *con_state =
4748 dm_state ? &dm_state->base : NULL;
4749 struct dc_stream_state *stream = NULL;
4750 struct drm_display_mode mode = *drm_mode;
4751 bool native_mode_found = false;
4752 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4753 int mode_refresh;
4754 int preferred_refresh = 0;
4755 #if defined(CONFIG_DRM_AMD_DC_DCN)
4756 struct dsc_dec_dpcd_caps dsc_caps;
4757 #endif
4758 uint32_t link_bandwidth_kbps;
4759
4760 struct dc_sink *sink = NULL;
4761 if (aconnector == NULL) {
4762 DRM_ERROR("aconnector is NULL!\n");
4763 return stream;
4764 }
4765
4766 drm_connector = &aconnector->base;
4767
4768 if (!aconnector->dc_sink) {
4769 sink = create_fake_sink(aconnector);
4770 if (!sink)
4771 return stream;
4772 } else {
4773 sink = aconnector->dc_sink;
4774 dc_sink_retain(sink);
4775 }
4776
4777 stream = dc_create_stream_for_sink(sink);
4778
4779 if (stream == NULL) {
4780 DRM_ERROR("Failed to create stream for sink!\n");
4781 goto finish;
4782 }
4783
4784 stream->dm_stream_context = aconnector;
4785
4786 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4787 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4788
4789 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4790 /* Search for preferred mode */
4791 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4792 native_mode_found = true;
4793 break;
4794 }
4795 }
4796 if (!native_mode_found)
4797 preferred_mode = list_first_entry_or_null(
4798 &aconnector->base.modes,
4799 struct drm_display_mode,
4800 head);
4801
4802 mode_refresh = drm_mode_vrefresh(&mode);
4803
4804 if (preferred_mode == NULL) {
4805 /*
4806 * This may not be an error, the use case is when we have no
4807 * usermode calls to reset and set mode upon hotplug. In this
4808 * case, we call set mode ourselves to restore the previous mode
4809 * and the modelist may not be filled in in time.
4810 */
4811 DRM_DEBUG_DRIVER("No preferred mode found\n");
4812 } else {
4813 decide_crtc_timing_for_drm_display_mode(
4814 &mode, preferred_mode,
4815 dm_state ? (dm_state->scaling != RMX_OFF) : false);
4816 preferred_refresh = drm_mode_vrefresh(preferred_mode);
4817 }
4818
4819 if (!dm_state)
4820 drm_mode_set_crtcinfo(&mode, 0);
4821
4822 /*
4823 * If scaling is enabled and refresh rate didn't change
4824 * we copy the vic and polarities of the old timings
4825 */
4826 if (!scale || mode_refresh != preferred_refresh)
4827 fill_stream_properties_from_drm_display_mode(stream,
4828 &mode, &aconnector->base, con_state, NULL, requested_bpc);
4829 else
4830 fill_stream_properties_from_drm_display_mode(stream,
4831 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
4832
4833 stream->timing.flags.DSC = 0;
4834
4835 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4836 #if defined(CONFIG_DRM_AMD_DC_DCN)
4837 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4838 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4839 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4840 &dsc_caps);
4841 #endif
4842 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4843 dc_link_get_link_cap(aconnector->dc_link));
4844
4845 #if defined(CONFIG_DRM_AMD_DC_DCN)
4846 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
4847 /* Set DSC policy according to dsc_clock_en */
4848 dc_dsc_policy_set_enable_dsc_when_not_needed(
4849 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
4850
4851 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4852 &dsc_caps,
4853 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4854 link_bandwidth_kbps,
4855 &stream->timing,
4856 &stream->timing.dsc_cfg))
4857 stream->timing.flags.DSC = 1;
4858 /* Overwrite the stream flag if DSC is enabled through debugfs */
4859 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
4860 stream->timing.flags.DSC = 1;
4861
4862 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
4863 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
4864
4865 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
4866 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
4867
4868 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
4869 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
4870 }
4871 #endif
4872 }
4873
4874 update_stream_scaling_settings(&mode, dm_state, stream);
4875
4876 fill_audio_info(
4877 &stream->audio_info,
4878 drm_connector,
4879 sink);
4880
4881 update_stream_signal(stream, sink);
4882
4883 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4884 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
4885
4886 if (stream->link->psr_settings.psr_feature_enabled) {
4887 //
4888 // should decide stream support vsc sdp colorimetry capability
4889 // before building vsc info packet
4890 //
4891 stream->use_vsc_sdp_for_colorimetry = false;
4892 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4893 stream->use_vsc_sdp_for_colorimetry =
4894 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4895 } else {
4896 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4897 stream->use_vsc_sdp_for_colorimetry = true;
4898 }
4899 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4900 }
4901 finish:
4902 dc_sink_release(sink);
4903
4904 return stream;
4905 }
4906
amdgpu_dm_crtc_destroy(struct drm_crtc * crtc)4907 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4908 {
4909 drm_crtc_cleanup(crtc);
4910 kfree(crtc);
4911 }
4912
dm_crtc_destroy_state(struct drm_crtc * crtc,struct drm_crtc_state * state)4913 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4914 struct drm_crtc_state *state)
4915 {
4916 struct dm_crtc_state *cur = to_dm_crtc_state(state);
4917
4918 /* TODO Destroy dc_stream objects are stream object is flattened */
4919 if (cur->stream)
4920 dc_stream_release(cur->stream);
4921
4922
4923 __drm_atomic_helper_crtc_destroy_state(state);
4924
4925
4926 kfree(state);
4927 }
4928
dm_crtc_reset_state(struct drm_crtc * crtc)4929 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4930 {
4931 struct dm_crtc_state *state;
4932
4933 if (crtc->state)
4934 dm_crtc_destroy_state(crtc, crtc->state);
4935
4936 state = kzalloc(sizeof(*state), GFP_KERNEL);
4937 if (WARN_ON(!state))
4938 return;
4939
4940 __drm_atomic_helper_crtc_reset(crtc, &state->base);
4941 }
4942
4943 static struct drm_crtc_state *
dm_crtc_duplicate_state(struct drm_crtc * crtc)4944 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4945 {
4946 struct dm_crtc_state *state, *cur;
4947
4948 cur = to_dm_crtc_state(crtc->state);
4949
4950 if (WARN_ON(!crtc->state))
4951 return NULL;
4952
4953 state = kzalloc(sizeof(*state), GFP_KERNEL);
4954 if (!state)
4955 return NULL;
4956
4957 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4958
4959 if (cur->stream) {
4960 state->stream = cur->stream;
4961 dc_stream_retain(state->stream);
4962 }
4963
4964 state->active_planes = cur->active_planes;
4965 state->vrr_infopacket = cur->vrr_infopacket;
4966 state->abm_level = cur->abm_level;
4967 state->vrr_supported = cur->vrr_supported;
4968 state->freesync_config = cur->freesync_config;
4969 state->crc_src = cur->crc_src;
4970 state->cm_has_degamma = cur->cm_has_degamma;
4971 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4972
4973 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4974
4975 return &state->base;
4976 }
4977
dm_set_vupdate_irq(struct drm_crtc * crtc,bool enable)4978 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4979 {
4980 enum dc_irq_source irq_source;
4981 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4982 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4983 int rc;
4984
4985 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4986
4987 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4988
4989 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4990 acrtc->crtc_id, enable ? "en" : "dis", rc);
4991 return rc;
4992 }
4993
dm_set_vblank(struct drm_crtc * crtc,bool enable)4994 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4995 {
4996 enum dc_irq_source irq_source;
4997 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4998 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4999 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5000 int rc = 0;
5001
5002 if (enable) {
5003 /* vblank irq on -> Only need vupdate irq in vrr mode */
5004 if (amdgpu_dm_vrr_active(acrtc_state))
5005 rc = dm_set_vupdate_irq(crtc, true);
5006 } else {
5007 /* vblank irq off -> vupdate irq off */
5008 rc = dm_set_vupdate_irq(crtc, false);
5009 }
5010
5011 if (rc)
5012 return rc;
5013
5014 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5015 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5016 }
5017
dm_enable_vblank(struct drm_crtc * crtc)5018 static int dm_enable_vblank(struct drm_crtc *crtc)
5019 {
5020 return dm_set_vblank(crtc, true);
5021 }
5022
dm_disable_vblank(struct drm_crtc * crtc)5023 static void dm_disable_vblank(struct drm_crtc *crtc)
5024 {
5025 dm_set_vblank(crtc, false);
5026 }
5027
5028 /* Implemented only the options currently availible for the driver */
5029 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5030 .reset = dm_crtc_reset_state,
5031 .destroy = amdgpu_dm_crtc_destroy,
5032 .gamma_set = drm_atomic_helper_legacy_gamma_set,
5033 .set_config = drm_atomic_helper_set_config,
5034 .page_flip = drm_atomic_helper_page_flip,
5035 .atomic_duplicate_state = dm_crtc_duplicate_state,
5036 .atomic_destroy_state = dm_crtc_destroy_state,
5037 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5038 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5039 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5040 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5041 .enable_vblank = dm_enable_vblank,
5042 .disable_vblank = dm_disable_vblank,
5043 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5044 };
5045
5046 static enum drm_connector_status
amdgpu_dm_connector_detect(struct drm_connector * connector,bool force)5047 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5048 {
5049 bool connected;
5050 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5051
5052 /*
5053 * Notes:
5054 * 1. This interface is NOT called in context of HPD irq.
5055 * 2. This interface *is called* in context of user-mode ioctl. Which
5056 * makes it a bad place for *any* MST-related activity.
5057 */
5058
5059 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5060 !aconnector->fake_enable)
5061 connected = (aconnector->dc_sink != NULL);
5062 else
5063 connected = (aconnector->base.force == DRM_FORCE_ON);
5064
5065 update_subconnector_property(aconnector);
5066
5067 return (connected ? connector_status_connected :
5068 connector_status_disconnected);
5069 }
5070
amdgpu_dm_connector_atomic_set_property(struct drm_connector * connector,struct drm_connector_state * connector_state,struct drm_property * property,uint64_t val)5071 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5072 struct drm_connector_state *connector_state,
5073 struct drm_property *property,
5074 uint64_t val)
5075 {
5076 struct drm_device *dev = connector->dev;
5077 struct amdgpu_device *adev = drm_to_adev(dev);
5078 struct dm_connector_state *dm_old_state =
5079 to_dm_connector_state(connector->state);
5080 struct dm_connector_state *dm_new_state =
5081 to_dm_connector_state(connector_state);
5082
5083 int ret = -EINVAL;
5084
5085 if (property == dev->mode_config.scaling_mode_property) {
5086 enum amdgpu_rmx_type rmx_type;
5087
5088 switch (val) {
5089 case DRM_MODE_SCALE_CENTER:
5090 rmx_type = RMX_CENTER;
5091 break;
5092 case DRM_MODE_SCALE_ASPECT:
5093 rmx_type = RMX_ASPECT;
5094 break;
5095 case DRM_MODE_SCALE_FULLSCREEN:
5096 rmx_type = RMX_FULL;
5097 break;
5098 case DRM_MODE_SCALE_NONE:
5099 default:
5100 rmx_type = RMX_OFF;
5101 break;
5102 }
5103
5104 if (dm_old_state->scaling == rmx_type)
5105 return 0;
5106
5107 dm_new_state->scaling = rmx_type;
5108 ret = 0;
5109 } else if (property == adev->mode_info.underscan_hborder_property) {
5110 dm_new_state->underscan_hborder = val;
5111 ret = 0;
5112 } else if (property == adev->mode_info.underscan_vborder_property) {
5113 dm_new_state->underscan_vborder = val;
5114 ret = 0;
5115 } else if (property == adev->mode_info.underscan_property) {
5116 dm_new_state->underscan_enable = val;
5117 ret = 0;
5118 } else if (property == adev->mode_info.abm_level_property) {
5119 dm_new_state->abm_level = val;
5120 ret = 0;
5121 }
5122
5123 return ret;
5124 }
5125
amdgpu_dm_connector_atomic_get_property(struct drm_connector * connector,const struct drm_connector_state * state,struct drm_property * property,uint64_t * val)5126 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5127 const struct drm_connector_state *state,
5128 struct drm_property *property,
5129 uint64_t *val)
5130 {
5131 struct drm_device *dev = connector->dev;
5132 struct amdgpu_device *adev = drm_to_adev(dev);
5133 struct dm_connector_state *dm_state =
5134 to_dm_connector_state(state);
5135 int ret = -EINVAL;
5136
5137 if (property == dev->mode_config.scaling_mode_property) {
5138 switch (dm_state->scaling) {
5139 case RMX_CENTER:
5140 *val = DRM_MODE_SCALE_CENTER;
5141 break;
5142 case RMX_ASPECT:
5143 *val = DRM_MODE_SCALE_ASPECT;
5144 break;
5145 case RMX_FULL:
5146 *val = DRM_MODE_SCALE_FULLSCREEN;
5147 break;
5148 case RMX_OFF:
5149 default:
5150 *val = DRM_MODE_SCALE_NONE;
5151 break;
5152 }
5153 ret = 0;
5154 } else if (property == adev->mode_info.underscan_hborder_property) {
5155 *val = dm_state->underscan_hborder;
5156 ret = 0;
5157 } else if (property == adev->mode_info.underscan_vborder_property) {
5158 *val = dm_state->underscan_vborder;
5159 ret = 0;
5160 } else if (property == adev->mode_info.underscan_property) {
5161 *val = dm_state->underscan_enable;
5162 ret = 0;
5163 } else if (property == adev->mode_info.abm_level_property) {
5164 *val = dm_state->abm_level;
5165 ret = 0;
5166 }
5167
5168 return ret;
5169 }
5170
amdgpu_dm_connector_unregister(struct drm_connector * connector)5171 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5172 {
5173 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5174
5175 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5176 }
5177
amdgpu_dm_connector_destroy(struct drm_connector * connector)5178 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5179 {
5180 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5181 const struct dc_link *link = aconnector->dc_link;
5182 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5183 struct amdgpu_display_manager *dm = &adev->dm;
5184
5185 /*
5186 * Call only if mst_mgr was iniitalized before since it's not done
5187 * for all connector types.
5188 */
5189 if (aconnector->mst_mgr.dev)
5190 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5191
5192 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5193 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5194
5195 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5196 link->type != dc_connection_none &&
5197 dm->backlight_dev) {
5198 backlight_device_unregister(dm->backlight_dev);
5199 dm->backlight_dev = NULL;
5200 }
5201 #endif
5202
5203 if (aconnector->dc_em_sink)
5204 dc_sink_release(aconnector->dc_em_sink);
5205 aconnector->dc_em_sink = NULL;
5206 if (aconnector->dc_sink)
5207 dc_sink_release(aconnector->dc_sink);
5208 aconnector->dc_sink = NULL;
5209
5210 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5211 drm_connector_unregister(connector);
5212 drm_connector_cleanup(connector);
5213 if (aconnector->i2c) {
5214 i2c_del_adapter(&aconnector->i2c->base);
5215 kfree(aconnector->i2c);
5216 }
5217 kfree(aconnector->dm_dp_aux.aux.name);
5218
5219 kfree(connector);
5220 }
5221
amdgpu_dm_connector_funcs_reset(struct drm_connector * connector)5222 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5223 {
5224 struct dm_connector_state *state =
5225 to_dm_connector_state(connector->state);
5226
5227 if (connector->state)
5228 __drm_atomic_helper_connector_destroy_state(connector->state);
5229
5230 kfree(state);
5231
5232 state = kzalloc(sizeof(*state), GFP_KERNEL);
5233
5234 if (state) {
5235 state->scaling = RMX_OFF;
5236 state->underscan_enable = false;
5237 state->underscan_hborder = 0;
5238 state->underscan_vborder = 0;
5239 state->base.max_requested_bpc = 8;
5240 state->vcpi_slots = 0;
5241 state->pbn = 0;
5242 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5243 state->abm_level = amdgpu_dm_abm_level;
5244
5245 __drm_atomic_helper_connector_reset(connector, &state->base);
5246 }
5247 }
5248
5249 struct drm_connector_state *
amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector * connector)5250 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5251 {
5252 struct dm_connector_state *state =
5253 to_dm_connector_state(connector->state);
5254
5255 struct dm_connector_state *new_state =
5256 kmemdup(state, sizeof(*state), GFP_KERNEL);
5257
5258 if (!new_state)
5259 return NULL;
5260
5261 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5262
5263 new_state->freesync_capable = state->freesync_capable;
5264 new_state->abm_level = state->abm_level;
5265 new_state->scaling = state->scaling;
5266 new_state->underscan_enable = state->underscan_enable;
5267 new_state->underscan_hborder = state->underscan_hborder;
5268 new_state->underscan_vborder = state->underscan_vborder;
5269 new_state->vcpi_slots = state->vcpi_slots;
5270 new_state->pbn = state->pbn;
5271 return &new_state->base;
5272 }
5273
5274 static int
amdgpu_dm_connector_late_register(struct drm_connector * connector)5275 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5276 {
5277 struct amdgpu_dm_connector *amdgpu_dm_connector =
5278 to_amdgpu_dm_connector(connector);
5279 int r;
5280
5281 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5282 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5283 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5284 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5285 if (r)
5286 return r;
5287 }
5288
5289 #if defined(CONFIG_DEBUG_FS)
5290 connector_debugfs_init(amdgpu_dm_connector);
5291 #endif
5292
5293 return 0;
5294 }
5295
5296 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5297 .reset = amdgpu_dm_connector_funcs_reset,
5298 .detect = amdgpu_dm_connector_detect,
5299 .fill_modes = drm_helper_probe_single_connector_modes,
5300 .destroy = amdgpu_dm_connector_destroy,
5301 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5302 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5303 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5304 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5305 .late_register = amdgpu_dm_connector_late_register,
5306 .early_unregister = amdgpu_dm_connector_unregister
5307 };
5308
get_modes(struct drm_connector * connector)5309 static int get_modes(struct drm_connector *connector)
5310 {
5311 return amdgpu_dm_connector_get_modes(connector);
5312 }
5313
create_eml_sink(struct amdgpu_dm_connector * aconnector)5314 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5315 {
5316 struct dc_sink_init_data init_params = {
5317 .link = aconnector->dc_link,
5318 .sink_signal = SIGNAL_TYPE_VIRTUAL
5319 };
5320 struct edid *edid;
5321
5322 if (!aconnector->base.edid_blob_ptr) {
5323 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5324 aconnector->base.name);
5325
5326 aconnector->base.force = DRM_FORCE_OFF;
5327 aconnector->base.override_edid = false;
5328 return;
5329 }
5330
5331 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5332
5333 aconnector->edid = edid;
5334
5335 aconnector->dc_em_sink = dc_link_add_remote_sink(
5336 aconnector->dc_link,
5337 (uint8_t *)edid,
5338 (edid->extensions + 1) * EDID_LENGTH,
5339 &init_params);
5340
5341 if (aconnector->base.force == DRM_FORCE_ON) {
5342 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5343 aconnector->dc_link->local_sink :
5344 aconnector->dc_em_sink;
5345 dc_sink_retain(aconnector->dc_sink);
5346 }
5347 }
5348
handle_edid_mgmt(struct amdgpu_dm_connector * aconnector)5349 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5350 {
5351 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5352
5353 /*
5354 * In case of headless boot with force on for DP managed connector
5355 * Those settings have to be != 0 to get initial modeset
5356 */
5357 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5358 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5359 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5360 }
5361
5362
5363 aconnector->base.override_edid = true;
5364 create_eml_sink(aconnector);
5365 }
5366
5367 static struct dc_stream_state *
create_validate_stream_for_sink(struct amdgpu_dm_connector * aconnector,const struct drm_display_mode * drm_mode,const struct dm_connector_state * dm_state,const struct dc_stream_state * old_stream)5368 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5369 const struct drm_display_mode *drm_mode,
5370 const struct dm_connector_state *dm_state,
5371 const struct dc_stream_state *old_stream)
5372 {
5373 struct drm_connector *connector = &aconnector->base;
5374 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5375 struct dc_stream_state *stream;
5376 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5377 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5378 enum dc_status dc_result = DC_OK;
5379
5380 do {
5381 stream = create_stream_for_sink(aconnector, drm_mode,
5382 dm_state, old_stream,
5383 requested_bpc);
5384 if (stream == NULL) {
5385 DRM_ERROR("Failed to create stream for sink!\n");
5386 break;
5387 }
5388
5389 dc_result = dc_validate_stream(adev->dm.dc, stream);
5390
5391 if (dc_result != DC_OK) {
5392 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5393 drm_mode->hdisplay,
5394 drm_mode->vdisplay,
5395 drm_mode->clock,
5396 dc_result,
5397 dc_status_to_str(dc_result));
5398
5399 dc_stream_release(stream);
5400 stream = NULL;
5401 requested_bpc -= 2; /* lower bpc to retry validation */
5402 }
5403
5404 } while (stream == NULL && requested_bpc >= 6);
5405
5406 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
5407 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
5408
5409 aconnector->force_yuv420_output = true;
5410 stream = create_validate_stream_for_sink(aconnector, drm_mode,
5411 dm_state, old_stream);
5412 aconnector->force_yuv420_output = false;
5413 }
5414
5415 return stream;
5416 }
5417
amdgpu_dm_connector_mode_valid(struct drm_connector * connector,struct drm_display_mode * mode)5418 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5419 struct drm_display_mode *mode)
5420 {
5421 int result = MODE_ERROR;
5422 struct dc_sink *dc_sink;
5423 /* TODO: Unhardcode stream count */
5424 struct dc_stream_state *stream;
5425 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5426
5427 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5428 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5429 return result;
5430
5431 /*
5432 * Only run this the first time mode_valid is called to initilialize
5433 * EDID mgmt
5434 */
5435 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5436 !aconnector->dc_em_sink)
5437 handle_edid_mgmt(aconnector);
5438
5439 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5440
5441 if (dc_sink == NULL) {
5442 DRM_ERROR("dc_sink is NULL!\n");
5443 goto fail;
5444 }
5445
5446 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5447 if (stream) {
5448 dc_stream_release(stream);
5449 result = MODE_OK;
5450 }
5451
5452 fail:
5453 /* TODO: error handling*/
5454 return result;
5455 }
5456
fill_hdr_info_packet(const struct drm_connector_state * state,struct dc_info_packet * out)5457 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5458 struct dc_info_packet *out)
5459 {
5460 struct hdmi_drm_infoframe frame;
5461 unsigned char buf[30]; /* 26 + 4 */
5462 ssize_t len;
5463 int ret, i;
5464
5465 memset(out, 0, sizeof(*out));
5466
5467 if (!state->hdr_output_metadata)
5468 return 0;
5469
5470 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5471 if (ret)
5472 return ret;
5473
5474 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5475 if (len < 0)
5476 return (int)len;
5477
5478 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5479 if (len != 30)
5480 return -EINVAL;
5481
5482 /* Prepare the infopacket for DC. */
5483 switch (state->connector->connector_type) {
5484 case DRM_MODE_CONNECTOR_HDMIA:
5485 out->hb0 = 0x87; /* type */
5486 out->hb1 = 0x01; /* version */
5487 out->hb2 = 0x1A; /* length */
5488 out->sb[0] = buf[3]; /* checksum */
5489 i = 1;
5490 break;
5491
5492 case DRM_MODE_CONNECTOR_DisplayPort:
5493 case DRM_MODE_CONNECTOR_eDP:
5494 out->hb0 = 0x00; /* sdp id, zero */
5495 out->hb1 = 0x87; /* type */
5496 out->hb2 = 0x1D; /* payload len - 1 */
5497 out->hb3 = (0x13 << 2); /* sdp version */
5498 out->sb[0] = 0x01; /* version */
5499 out->sb[1] = 0x1A; /* length */
5500 i = 2;
5501 break;
5502
5503 default:
5504 return -EINVAL;
5505 }
5506
5507 memcpy(&out->sb[i], &buf[4], 26);
5508 out->valid = true;
5509
5510 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5511 sizeof(out->sb), false);
5512
5513 return 0;
5514 }
5515
5516 static bool
is_hdr_metadata_different(const struct drm_connector_state * old_state,const struct drm_connector_state * new_state)5517 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5518 const struct drm_connector_state *new_state)
5519 {
5520 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5521 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5522
5523 if (old_blob != new_blob) {
5524 if (old_blob && new_blob &&
5525 old_blob->length == new_blob->length)
5526 return memcmp(old_blob->data, new_blob->data,
5527 old_blob->length);
5528
5529 return true;
5530 }
5531
5532 return false;
5533 }
5534
5535 static int
amdgpu_dm_connector_atomic_check(struct drm_connector * conn,struct drm_atomic_state * state)5536 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5537 struct drm_atomic_state *state)
5538 {
5539 struct drm_connector_state *new_con_state =
5540 drm_atomic_get_new_connector_state(state, conn);
5541 struct drm_connector_state *old_con_state =
5542 drm_atomic_get_old_connector_state(state, conn);
5543 struct drm_crtc *crtc = new_con_state->crtc;
5544 struct drm_crtc_state *new_crtc_state;
5545 int ret;
5546
5547 if (!crtc)
5548 return 0;
5549
5550 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5551 struct dc_info_packet hdr_infopacket;
5552
5553 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5554 if (ret)
5555 return ret;
5556
5557 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5558 if (IS_ERR(new_crtc_state))
5559 return PTR_ERR(new_crtc_state);
5560
5561 /*
5562 * DC considers the stream backends changed if the
5563 * static metadata changes. Forcing the modeset also
5564 * gives a simple way for userspace to switch from
5565 * 8bpc to 10bpc when setting the metadata to enter
5566 * or exit HDR.
5567 *
5568 * Changing the static metadata after it's been
5569 * set is permissible, however. So only force a
5570 * modeset if we're entering or exiting HDR.
5571 */
5572 new_crtc_state->mode_changed =
5573 !old_con_state->hdr_output_metadata ||
5574 !new_con_state->hdr_output_metadata;
5575 }
5576
5577 return 0;
5578 }
5579
5580 static const struct drm_connector_helper_funcs
5581 amdgpu_dm_connector_helper_funcs = {
5582 /*
5583 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5584 * modes will be filtered by drm_mode_validate_size(), and those modes
5585 * are missing after user start lightdm. So we need to renew modes list.
5586 * in get_modes call back, not just return the modes count
5587 */
5588 .get_modes = get_modes,
5589 .mode_valid = amdgpu_dm_connector_mode_valid,
5590 .atomic_check = amdgpu_dm_connector_atomic_check,
5591 };
5592
dm_crtc_helper_disable(struct drm_crtc * crtc)5593 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5594 {
5595 }
5596
count_crtc_active_planes(struct drm_crtc_state * new_crtc_state)5597 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5598 {
5599 struct drm_atomic_state *state = new_crtc_state->state;
5600 struct drm_plane *plane;
5601 int num_active = 0;
5602
5603 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5604 struct drm_plane_state *new_plane_state;
5605
5606 /* Cursor planes are "fake". */
5607 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5608 continue;
5609
5610 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5611
5612 if (!new_plane_state) {
5613 /*
5614 * The plane is enable on the CRTC and hasn't changed
5615 * state. This means that it previously passed
5616 * validation and is therefore enabled.
5617 */
5618 num_active += 1;
5619 continue;
5620 }
5621
5622 /* We need a framebuffer to be considered enabled. */
5623 num_active += (new_plane_state->fb != NULL);
5624 }
5625
5626 return num_active;
5627 }
5628
dm_update_crtc_active_planes(struct drm_crtc * crtc,struct drm_crtc_state * new_crtc_state)5629 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5630 struct drm_crtc_state *new_crtc_state)
5631 {
5632 struct dm_crtc_state *dm_new_crtc_state =
5633 to_dm_crtc_state(new_crtc_state);
5634
5635 dm_new_crtc_state->active_planes = 0;
5636
5637 if (!dm_new_crtc_state->stream)
5638 return;
5639
5640 dm_new_crtc_state->active_planes =
5641 count_crtc_active_planes(new_crtc_state);
5642 }
5643
dm_crtc_helper_atomic_check(struct drm_crtc * crtc,struct drm_crtc_state * state)5644 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5645 struct drm_crtc_state *state)
5646 {
5647 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5648 struct dc *dc = adev->dm.dc;
5649 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5650 int ret = -EINVAL;
5651
5652 dm_update_crtc_active_planes(crtc, state);
5653
5654 if (unlikely(!dm_crtc_state->stream &&
5655 modeset_required(state, NULL, dm_crtc_state->stream))) {
5656 WARN_ON(1);
5657 return ret;
5658 }
5659
5660 /*
5661 * We require the primary plane to be enabled whenever the CRTC is, otherwise
5662 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
5663 * planes are disabled, which is not supported by the hardware. And there is legacy
5664 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
5665 */
5666 if (state->enable &&
5667 !(state->plane_mask & drm_plane_mask(crtc->primary)))
5668 return -EINVAL;
5669
5670 /* In some use cases, like reset, no stream is attached */
5671 if (!dm_crtc_state->stream)
5672 return 0;
5673
5674 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5675 return 0;
5676
5677 return ret;
5678 }
5679
dm_crtc_helper_mode_fixup(struct drm_crtc * crtc,const struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)5680 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5681 const struct drm_display_mode *mode,
5682 struct drm_display_mode *adjusted_mode)
5683 {
5684 return true;
5685 }
5686
5687 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5688 .disable = dm_crtc_helper_disable,
5689 .atomic_check = dm_crtc_helper_atomic_check,
5690 .mode_fixup = dm_crtc_helper_mode_fixup,
5691 .get_scanout_position = amdgpu_crtc_get_scanout_position,
5692 };
5693
dm_encoder_helper_disable(struct drm_encoder * encoder)5694 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5695 {
5696
5697 }
5698
convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)5699 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5700 {
5701 switch (display_color_depth) {
5702 case COLOR_DEPTH_666:
5703 return 6;
5704 case COLOR_DEPTH_888:
5705 return 8;
5706 case COLOR_DEPTH_101010:
5707 return 10;
5708 case COLOR_DEPTH_121212:
5709 return 12;
5710 case COLOR_DEPTH_141414:
5711 return 14;
5712 case COLOR_DEPTH_161616:
5713 return 16;
5714 default:
5715 break;
5716 }
5717 return 0;
5718 }
5719
dm_encoder_helper_atomic_check(struct drm_encoder * encoder,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)5720 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5721 struct drm_crtc_state *crtc_state,
5722 struct drm_connector_state *conn_state)
5723 {
5724 struct drm_atomic_state *state = crtc_state->state;
5725 struct drm_connector *connector = conn_state->connector;
5726 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5727 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5728 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5729 struct drm_dp_mst_topology_mgr *mst_mgr;
5730 struct drm_dp_mst_port *mst_port;
5731 enum dc_color_depth color_depth;
5732 int clock, bpp = 0;
5733 bool is_y420 = false;
5734
5735 if (!aconnector->port || !aconnector->dc_sink)
5736 return 0;
5737
5738 mst_port = aconnector->port;
5739 mst_mgr = &aconnector->mst_port->mst_mgr;
5740
5741 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5742 return 0;
5743
5744 if (!state->duplicated) {
5745 int max_bpc = conn_state->max_requested_bpc;
5746 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5747 aconnector->force_yuv420_output;
5748 color_depth = convert_color_depth_from_display_info(connector,
5749 is_y420,
5750 max_bpc);
5751 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5752 clock = adjusted_mode->clock;
5753 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5754 }
5755 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5756 mst_mgr,
5757 mst_port,
5758 dm_new_connector_state->pbn,
5759 dm_mst_get_pbn_divider(aconnector->dc_link));
5760 if (dm_new_connector_state->vcpi_slots < 0) {
5761 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5762 return dm_new_connector_state->vcpi_slots;
5763 }
5764 return 0;
5765 }
5766
5767 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5768 .disable = dm_encoder_helper_disable,
5769 .atomic_check = dm_encoder_helper_atomic_check
5770 };
5771
5772 #if defined(CONFIG_DRM_AMD_DC_DCN)
dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state * state,struct dc_state * dc_state)5773 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5774 struct dc_state *dc_state)
5775 {
5776 struct dc_stream_state *stream = NULL;
5777 struct drm_connector *connector;
5778 struct drm_connector_state *new_con_state, *old_con_state;
5779 struct amdgpu_dm_connector *aconnector;
5780 struct dm_connector_state *dm_conn_state;
5781 int i, j, clock, bpp;
5782 int vcpi, pbn_div, pbn = 0;
5783
5784 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5785
5786 aconnector = to_amdgpu_dm_connector(connector);
5787
5788 if (!aconnector->port)
5789 continue;
5790
5791 if (!new_con_state || !new_con_state->crtc)
5792 continue;
5793
5794 dm_conn_state = to_dm_connector_state(new_con_state);
5795
5796 for (j = 0; j < dc_state->stream_count; j++) {
5797 stream = dc_state->streams[j];
5798 if (!stream)
5799 continue;
5800
5801 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5802 break;
5803
5804 stream = NULL;
5805 }
5806
5807 if (!stream)
5808 continue;
5809
5810 if (stream->timing.flags.DSC != 1) {
5811 drm_dp_mst_atomic_enable_dsc(state,
5812 aconnector->port,
5813 dm_conn_state->pbn,
5814 0,
5815 false);
5816 continue;
5817 }
5818
5819 pbn_div = dm_mst_get_pbn_divider(stream->link);
5820 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5821 clock = stream->timing.pix_clk_100hz / 10;
5822 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5823 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5824 aconnector->port,
5825 pbn, pbn_div,
5826 true);
5827 if (vcpi < 0)
5828 return vcpi;
5829
5830 dm_conn_state->pbn = pbn;
5831 dm_conn_state->vcpi_slots = vcpi;
5832 }
5833 return 0;
5834 }
5835 #endif
5836
dm_drm_plane_reset(struct drm_plane * plane)5837 static void dm_drm_plane_reset(struct drm_plane *plane)
5838 {
5839 struct dm_plane_state *amdgpu_state = NULL;
5840
5841 if (plane->state)
5842 plane->funcs->atomic_destroy_state(plane, plane->state);
5843
5844 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5845 WARN_ON(amdgpu_state == NULL);
5846
5847 if (amdgpu_state)
5848 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5849 }
5850
5851 static struct drm_plane_state *
dm_drm_plane_duplicate_state(struct drm_plane * plane)5852 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5853 {
5854 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5855
5856 old_dm_plane_state = to_dm_plane_state(plane->state);
5857 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5858 if (!dm_plane_state)
5859 return NULL;
5860
5861 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5862
5863 if (old_dm_plane_state->dc_state) {
5864 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5865 dc_plane_state_retain(dm_plane_state->dc_state);
5866 }
5867
5868 /* Framebuffer hasn't been updated yet, so retain old flags. */
5869 dm_plane_state->tiling_flags = old_dm_plane_state->tiling_flags;
5870 dm_plane_state->tmz_surface = old_dm_plane_state->tmz_surface;
5871
5872 return &dm_plane_state->base;
5873 }
5874
dm_drm_plane_destroy_state(struct drm_plane * plane,struct drm_plane_state * state)5875 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5876 struct drm_plane_state *state)
5877 {
5878 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5879
5880 if (dm_plane_state->dc_state)
5881 dc_plane_state_release(dm_plane_state->dc_state);
5882
5883 drm_atomic_helper_plane_destroy_state(plane, state);
5884 }
5885
5886 static const struct drm_plane_funcs dm_plane_funcs = {
5887 .update_plane = drm_atomic_helper_update_plane,
5888 .disable_plane = drm_atomic_helper_disable_plane,
5889 .destroy = drm_primary_helper_destroy,
5890 .reset = dm_drm_plane_reset,
5891 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5892 .atomic_destroy_state = dm_drm_plane_destroy_state,
5893 };
5894
dm_plane_helper_prepare_fb(struct drm_plane * plane,struct drm_plane_state * new_state)5895 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5896 struct drm_plane_state *new_state)
5897 {
5898 struct amdgpu_framebuffer *afb;
5899 struct drm_gem_object *obj;
5900 struct amdgpu_device *adev;
5901 struct amdgpu_bo *rbo;
5902 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5903 struct list_head list;
5904 struct ttm_validate_buffer tv;
5905 struct ww_acquire_ctx ticket;
5906 uint32_t domain;
5907 int r;
5908
5909 if (!new_state->fb) {
5910 DRM_DEBUG_DRIVER("No FB bound\n");
5911 return 0;
5912 }
5913
5914 afb = to_amdgpu_framebuffer(new_state->fb);
5915 obj = new_state->fb->obj[0];
5916 rbo = gem_to_amdgpu_bo(obj);
5917 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5918 INIT_LIST_HEAD(&list);
5919
5920 tv.bo = &rbo->tbo;
5921 tv.num_shared = 1;
5922 list_add(&tv.head, &list);
5923
5924 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5925 if (r) {
5926 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5927 return r;
5928 }
5929
5930 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5931 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5932 else
5933 domain = AMDGPU_GEM_DOMAIN_VRAM;
5934
5935 r = amdgpu_bo_pin(rbo, domain);
5936 if (unlikely(r != 0)) {
5937 if (r != -ERESTARTSYS)
5938 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5939 ttm_eu_backoff_reservation(&ticket, &list);
5940 return r;
5941 }
5942
5943 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5944 if (unlikely(r != 0)) {
5945 amdgpu_bo_unpin(rbo);
5946 ttm_eu_backoff_reservation(&ticket, &list);
5947 DRM_ERROR("%p bind failed\n", rbo);
5948 return r;
5949 }
5950
5951 ttm_eu_backoff_reservation(&ticket, &list);
5952
5953 afb->address = amdgpu_bo_gpu_offset(rbo);
5954
5955 amdgpu_bo_ref(rbo);
5956
5957 /**
5958 * We don't do surface updates on planes that have been newly created,
5959 * but we also don't have the afb->address during atomic check.
5960 *
5961 * Fill in buffer attributes depending on the address here, but only on
5962 * newly created planes since they're not being used by DC yet and this
5963 * won't modify global state.
5964 */
5965 dm_plane_state_old = to_dm_plane_state(plane->state);
5966 dm_plane_state_new = to_dm_plane_state(new_state);
5967
5968 if (dm_plane_state_new->dc_state &&
5969 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5970 struct dc_plane_state *plane_state =
5971 dm_plane_state_new->dc_state;
5972 bool force_disable_dcc = !plane_state->dcc.enable;
5973
5974 fill_plane_buffer_attributes(
5975 adev, afb, plane_state->format, plane_state->rotation,
5976 dm_plane_state_new->tiling_flags,
5977 &plane_state->tiling_info, &plane_state->plane_size,
5978 &plane_state->dcc, &plane_state->address,
5979 dm_plane_state_new->tmz_surface, force_disable_dcc);
5980 }
5981
5982 return 0;
5983 }
5984
dm_plane_helper_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)5985 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5986 struct drm_plane_state *old_state)
5987 {
5988 struct amdgpu_bo *rbo;
5989 int r;
5990
5991 if (!old_state->fb)
5992 return;
5993
5994 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5995 r = amdgpu_bo_reserve(rbo, false);
5996 if (unlikely(r)) {
5997 DRM_ERROR("failed to reserve rbo before unpin\n");
5998 return;
5999 }
6000
6001 amdgpu_bo_unpin(rbo);
6002 amdgpu_bo_unreserve(rbo);
6003 amdgpu_bo_unref(&rbo);
6004 }
6005
dm_plane_helper_check_state(struct drm_plane_state * state,struct drm_crtc_state * new_crtc_state)6006 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6007 struct drm_crtc_state *new_crtc_state)
6008 {
6009 int max_downscale = 0;
6010 int max_upscale = INT_MAX;
6011
6012 /* TODO: These should be checked against DC plane caps */
6013 return drm_atomic_helper_check_plane_state(
6014 state, new_crtc_state, max_downscale, max_upscale, true, true);
6015 }
6016
dm_plane_atomic_check(struct drm_plane * plane,struct drm_plane_state * state)6017 static int dm_plane_atomic_check(struct drm_plane *plane,
6018 struct drm_plane_state *state)
6019 {
6020 struct amdgpu_device *adev = drm_to_adev(plane->dev);
6021 struct dc *dc = adev->dm.dc;
6022 struct dm_plane_state *dm_plane_state;
6023 struct dc_scaling_info scaling_info;
6024 struct drm_crtc_state *new_crtc_state;
6025 int ret;
6026
6027 dm_plane_state = to_dm_plane_state(state);
6028
6029 if (!dm_plane_state->dc_state)
6030 return 0;
6031
6032 new_crtc_state =
6033 drm_atomic_get_new_crtc_state(state->state, state->crtc);
6034 if (!new_crtc_state)
6035 return -EINVAL;
6036
6037 ret = dm_plane_helper_check_state(state, new_crtc_state);
6038 if (ret)
6039 return ret;
6040
6041 ret = fill_dc_scaling_info(state, &scaling_info);
6042 if (ret)
6043 return ret;
6044
6045 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6046 return 0;
6047
6048 return -EINVAL;
6049 }
6050
dm_plane_atomic_async_check(struct drm_plane * plane,struct drm_plane_state * new_plane_state)6051 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6052 struct drm_plane_state *new_plane_state)
6053 {
6054 /* Only support async updates on cursor planes. */
6055 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6056 return -EINVAL;
6057
6058 return 0;
6059 }
6060
dm_plane_atomic_async_update(struct drm_plane * plane,struct drm_plane_state * new_state)6061 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6062 struct drm_plane_state *new_state)
6063 {
6064 struct drm_plane_state *old_state =
6065 drm_atomic_get_old_plane_state(new_state->state, plane);
6066
6067 swap(plane->state->fb, new_state->fb);
6068
6069 plane->state->src_x = new_state->src_x;
6070 plane->state->src_y = new_state->src_y;
6071 plane->state->src_w = new_state->src_w;
6072 plane->state->src_h = new_state->src_h;
6073 plane->state->crtc_x = new_state->crtc_x;
6074 plane->state->crtc_y = new_state->crtc_y;
6075 plane->state->crtc_w = new_state->crtc_w;
6076 plane->state->crtc_h = new_state->crtc_h;
6077
6078 handle_cursor_update(plane, old_state);
6079 }
6080
6081 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6082 .prepare_fb = dm_plane_helper_prepare_fb,
6083 .cleanup_fb = dm_plane_helper_cleanup_fb,
6084 .atomic_check = dm_plane_atomic_check,
6085 .atomic_async_check = dm_plane_atomic_async_check,
6086 .atomic_async_update = dm_plane_atomic_async_update
6087 };
6088
6089 /*
6090 * TODO: these are currently initialized to rgb formats only.
6091 * For future use cases we should either initialize them dynamically based on
6092 * plane capabilities, or initialize this array to all formats, so internal drm
6093 * check will succeed, and let DC implement proper check
6094 */
6095 static const uint32_t rgb_formats[] = {
6096 DRM_FORMAT_XRGB8888,
6097 DRM_FORMAT_ARGB8888,
6098 DRM_FORMAT_RGBA8888,
6099 DRM_FORMAT_XRGB2101010,
6100 DRM_FORMAT_XBGR2101010,
6101 DRM_FORMAT_ARGB2101010,
6102 DRM_FORMAT_ABGR2101010,
6103 DRM_FORMAT_XBGR8888,
6104 DRM_FORMAT_ABGR8888,
6105 DRM_FORMAT_RGB565,
6106 };
6107
6108 static const uint32_t overlay_formats[] = {
6109 DRM_FORMAT_XRGB8888,
6110 DRM_FORMAT_ARGB8888,
6111 DRM_FORMAT_RGBA8888,
6112 DRM_FORMAT_XBGR8888,
6113 DRM_FORMAT_ABGR8888,
6114 DRM_FORMAT_RGB565
6115 };
6116
6117 static const u32 cursor_formats[] = {
6118 DRM_FORMAT_ARGB8888
6119 };
6120
get_plane_formats(const struct drm_plane * plane,const struct dc_plane_cap * plane_cap,uint32_t * formats,int max_formats)6121 static int get_plane_formats(const struct drm_plane *plane,
6122 const struct dc_plane_cap *plane_cap,
6123 uint32_t *formats, int max_formats)
6124 {
6125 int i, num_formats = 0;
6126
6127 /*
6128 * TODO: Query support for each group of formats directly from
6129 * DC plane caps. This will require adding more formats to the
6130 * caps list.
6131 */
6132
6133 switch (plane->type) {
6134 case DRM_PLANE_TYPE_PRIMARY:
6135 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6136 if (num_formats >= max_formats)
6137 break;
6138
6139 formats[num_formats++] = rgb_formats[i];
6140 }
6141
6142 if (plane_cap && plane_cap->pixel_format_support.nv12)
6143 formats[num_formats++] = DRM_FORMAT_NV12;
6144 if (plane_cap && plane_cap->pixel_format_support.p010)
6145 formats[num_formats++] = DRM_FORMAT_P010;
6146 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6147 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6148 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6149 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6150 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6151 }
6152 break;
6153
6154 case DRM_PLANE_TYPE_OVERLAY:
6155 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6156 if (num_formats >= max_formats)
6157 break;
6158
6159 formats[num_formats++] = overlay_formats[i];
6160 }
6161 break;
6162
6163 case DRM_PLANE_TYPE_CURSOR:
6164 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6165 if (num_formats >= max_formats)
6166 break;
6167
6168 formats[num_formats++] = cursor_formats[i];
6169 }
6170 break;
6171 }
6172
6173 return num_formats;
6174 }
6175
amdgpu_dm_plane_init(struct amdgpu_display_manager * dm,struct drm_plane * plane,unsigned long possible_crtcs,const struct dc_plane_cap * plane_cap)6176 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6177 struct drm_plane *plane,
6178 unsigned long possible_crtcs,
6179 const struct dc_plane_cap *plane_cap)
6180 {
6181 uint32_t formats[32];
6182 int num_formats;
6183 int res = -EPERM;
6184 unsigned int supported_rotations;
6185
6186 num_formats = get_plane_formats(plane, plane_cap, formats,
6187 ARRAY_SIZE(formats));
6188
6189 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6190 &dm_plane_funcs, formats, num_formats,
6191 NULL, plane->type, NULL);
6192 if (res)
6193 return res;
6194
6195 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6196 plane_cap && plane_cap->per_pixel_alpha) {
6197 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6198 BIT(DRM_MODE_BLEND_PREMULTI);
6199
6200 drm_plane_create_alpha_property(plane);
6201 drm_plane_create_blend_mode_property(plane, blend_caps);
6202 }
6203
6204 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6205 plane_cap &&
6206 (plane_cap->pixel_format_support.nv12 ||
6207 plane_cap->pixel_format_support.p010)) {
6208 /* This only affects YUV formats. */
6209 drm_plane_create_color_properties(
6210 plane,
6211 BIT(DRM_COLOR_YCBCR_BT601) |
6212 BIT(DRM_COLOR_YCBCR_BT709) |
6213 BIT(DRM_COLOR_YCBCR_BT2020),
6214 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6215 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6216 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6217 }
6218
6219 supported_rotations =
6220 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6221 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6222
6223 if (dm->adev->asic_type >= CHIP_BONAIRE)
6224 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6225 supported_rotations);
6226
6227 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6228
6229 /* Create (reset) the plane state */
6230 if (plane->funcs->reset)
6231 plane->funcs->reset(plane);
6232
6233 return 0;
6234 }
6235
amdgpu_dm_crtc_init(struct amdgpu_display_manager * dm,struct drm_plane * plane,uint32_t crtc_index)6236 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6237 struct drm_plane *plane,
6238 uint32_t crtc_index)
6239 {
6240 struct amdgpu_crtc *acrtc = NULL;
6241 struct drm_plane *cursor_plane;
6242
6243 int res = -ENOMEM;
6244
6245 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6246 if (!cursor_plane)
6247 goto fail;
6248
6249 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6250 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6251
6252 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6253 if (!acrtc)
6254 goto fail;
6255
6256 res = drm_crtc_init_with_planes(
6257 dm->ddev,
6258 &acrtc->base,
6259 plane,
6260 cursor_plane,
6261 &amdgpu_dm_crtc_funcs, NULL);
6262
6263 if (res)
6264 goto fail;
6265
6266 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6267
6268 /* Create (reset) the plane state */
6269 if (acrtc->base.funcs->reset)
6270 acrtc->base.funcs->reset(&acrtc->base);
6271
6272 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6273 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6274
6275 acrtc->crtc_id = crtc_index;
6276 acrtc->base.enabled = false;
6277 acrtc->otg_inst = -1;
6278
6279 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6280 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6281 true, MAX_COLOR_LUT_ENTRIES);
6282 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6283
6284 return 0;
6285
6286 fail:
6287 kfree(acrtc);
6288 kfree(cursor_plane);
6289 return res;
6290 }
6291
6292
to_drm_connector_type(enum signal_type st)6293 static int to_drm_connector_type(enum signal_type st)
6294 {
6295 switch (st) {
6296 case SIGNAL_TYPE_HDMI_TYPE_A:
6297 return DRM_MODE_CONNECTOR_HDMIA;
6298 case SIGNAL_TYPE_EDP:
6299 return DRM_MODE_CONNECTOR_eDP;
6300 case SIGNAL_TYPE_LVDS:
6301 return DRM_MODE_CONNECTOR_LVDS;
6302 case SIGNAL_TYPE_RGB:
6303 return DRM_MODE_CONNECTOR_VGA;
6304 case SIGNAL_TYPE_DISPLAY_PORT:
6305 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6306 return DRM_MODE_CONNECTOR_DisplayPort;
6307 case SIGNAL_TYPE_DVI_DUAL_LINK:
6308 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6309 return DRM_MODE_CONNECTOR_DVID;
6310 case SIGNAL_TYPE_VIRTUAL:
6311 return DRM_MODE_CONNECTOR_VIRTUAL;
6312
6313 default:
6314 return DRM_MODE_CONNECTOR_Unknown;
6315 }
6316 }
6317
amdgpu_dm_connector_to_encoder(struct drm_connector * connector)6318 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6319 {
6320 struct drm_encoder *encoder;
6321
6322 /* There is only one encoder per connector */
6323 drm_connector_for_each_possible_encoder(connector, encoder)
6324 return encoder;
6325
6326 return NULL;
6327 }
6328
amdgpu_dm_get_native_mode(struct drm_connector * connector)6329 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6330 {
6331 struct drm_encoder *encoder;
6332 struct amdgpu_encoder *amdgpu_encoder;
6333
6334 encoder = amdgpu_dm_connector_to_encoder(connector);
6335
6336 if (encoder == NULL)
6337 return;
6338
6339 amdgpu_encoder = to_amdgpu_encoder(encoder);
6340
6341 amdgpu_encoder->native_mode.clock = 0;
6342
6343 if (!list_empty(&connector->probed_modes)) {
6344 struct drm_display_mode *preferred_mode = NULL;
6345
6346 list_for_each_entry(preferred_mode,
6347 &connector->probed_modes,
6348 head) {
6349 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6350 amdgpu_encoder->native_mode = *preferred_mode;
6351
6352 break;
6353 }
6354
6355 }
6356 }
6357
6358 static struct drm_display_mode *
amdgpu_dm_create_common_mode(struct drm_encoder * encoder,char * name,int hdisplay,int vdisplay)6359 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6360 char *name,
6361 int hdisplay, int vdisplay)
6362 {
6363 struct drm_device *dev = encoder->dev;
6364 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6365 struct drm_display_mode *mode = NULL;
6366 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6367
6368 mode = drm_mode_duplicate(dev, native_mode);
6369
6370 if (mode == NULL)
6371 return NULL;
6372
6373 mode->hdisplay = hdisplay;
6374 mode->vdisplay = vdisplay;
6375 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6376 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6377
6378 return mode;
6379
6380 }
6381
amdgpu_dm_connector_add_common_modes(struct drm_encoder * encoder,struct drm_connector * connector)6382 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6383 struct drm_connector *connector)
6384 {
6385 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6386 struct drm_display_mode *mode = NULL;
6387 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6388 struct amdgpu_dm_connector *amdgpu_dm_connector =
6389 to_amdgpu_dm_connector(connector);
6390 int i;
6391 int n;
6392 struct mode_size {
6393 char name[DRM_DISPLAY_MODE_LEN];
6394 int w;
6395 int h;
6396 } common_modes[] = {
6397 { "640x480", 640, 480},
6398 { "800x600", 800, 600},
6399 { "1024x768", 1024, 768},
6400 { "1280x720", 1280, 720},
6401 { "1280x800", 1280, 800},
6402 {"1280x1024", 1280, 1024},
6403 { "1440x900", 1440, 900},
6404 {"1680x1050", 1680, 1050},
6405 {"1600x1200", 1600, 1200},
6406 {"1920x1080", 1920, 1080},
6407 {"1920x1200", 1920, 1200}
6408 };
6409
6410 n = ARRAY_SIZE(common_modes);
6411
6412 for (i = 0; i < n; i++) {
6413 struct drm_display_mode *curmode = NULL;
6414 bool mode_existed = false;
6415
6416 if (common_modes[i].w > native_mode->hdisplay ||
6417 common_modes[i].h > native_mode->vdisplay ||
6418 (common_modes[i].w == native_mode->hdisplay &&
6419 common_modes[i].h == native_mode->vdisplay))
6420 continue;
6421
6422 list_for_each_entry(curmode, &connector->probed_modes, head) {
6423 if (common_modes[i].w == curmode->hdisplay &&
6424 common_modes[i].h == curmode->vdisplay) {
6425 mode_existed = true;
6426 break;
6427 }
6428 }
6429
6430 if (mode_existed)
6431 continue;
6432
6433 mode = amdgpu_dm_create_common_mode(encoder,
6434 common_modes[i].name, common_modes[i].w,
6435 common_modes[i].h);
6436 if (!mode)
6437 continue;
6438
6439 drm_mode_probed_add(connector, mode);
6440 amdgpu_dm_connector->num_modes++;
6441 }
6442 }
6443
amdgpu_dm_connector_ddc_get_modes(struct drm_connector * connector,struct edid * edid)6444 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6445 struct edid *edid)
6446 {
6447 struct amdgpu_dm_connector *amdgpu_dm_connector =
6448 to_amdgpu_dm_connector(connector);
6449
6450 if (edid) {
6451 /* empty probed_modes */
6452 INIT_LIST_HEAD(&connector->probed_modes);
6453 amdgpu_dm_connector->num_modes =
6454 drm_add_edid_modes(connector, edid);
6455
6456 /* sorting the probed modes before calling function
6457 * amdgpu_dm_get_native_mode() since EDID can have
6458 * more than one preferred mode. The modes that are
6459 * later in the probed mode list could be of higher
6460 * and preferred resolution. For example, 3840x2160
6461 * resolution in base EDID preferred timing and 4096x2160
6462 * preferred resolution in DID extension block later.
6463 */
6464 drm_mode_sort(&connector->probed_modes);
6465 amdgpu_dm_get_native_mode(connector);
6466 } else {
6467 amdgpu_dm_connector->num_modes = 0;
6468 }
6469 }
6470
amdgpu_dm_connector_get_modes(struct drm_connector * connector)6471 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6472 {
6473 struct amdgpu_dm_connector *amdgpu_dm_connector =
6474 to_amdgpu_dm_connector(connector);
6475 struct drm_encoder *encoder;
6476 struct edid *edid = amdgpu_dm_connector->edid;
6477
6478 encoder = amdgpu_dm_connector_to_encoder(connector);
6479
6480 if (!edid || !drm_edid_is_valid(edid)) {
6481 amdgpu_dm_connector->num_modes =
6482 drm_add_modes_noedid(connector, 640, 480);
6483 } else {
6484 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6485 amdgpu_dm_connector_add_common_modes(encoder, connector);
6486 }
6487 amdgpu_dm_fbc_init(connector);
6488
6489 return amdgpu_dm_connector->num_modes;
6490 }
6491
amdgpu_dm_connector_init_helper(struct amdgpu_display_manager * dm,struct amdgpu_dm_connector * aconnector,int connector_type,struct dc_link * link,int link_index)6492 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6493 struct amdgpu_dm_connector *aconnector,
6494 int connector_type,
6495 struct dc_link *link,
6496 int link_index)
6497 {
6498 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6499
6500 /*
6501 * Some of the properties below require access to state, like bpc.
6502 * Allocate some default initial connector state with our reset helper.
6503 */
6504 if (aconnector->base.funcs->reset)
6505 aconnector->base.funcs->reset(&aconnector->base);
6506
6507 aconnector->connector_id = link_index;
6508 aconnector->dc_link = link;
6509 aconnector->base.interlace_allowed = false;
6510 aconnector->base.doublescan_allowed = false;
6511 aconnector->base.stereo_allowed = false;
6512 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6513 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6514 aconnector->audio_inst = -1;
6515 mutex_init(&aconnector->hpd_lock);
6516
6517 /*
6518 * configure support HPD hot plug connector_>polled default value is 0
6519 * which means HPD hot plug not supported
6520 */
6521 switch (connector_type) {
6522 case DRM_MODE_CONNECTOR_HDMIA:
6523 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6524 aconnector->base.ycbcr_420_allowed =
6525 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6526 break;
6527 case DRM_MODE_CONNECTOR_DisplayPort:
6528 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6529 aconnector->base.ycbcr_420_allowed =
6530 link->link_enc->features.dp_ycbcr420_supported ? true : false;
6531 break;
6532 case DRM_MODE_CONNECTOR_DVID:
6533 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6534 break;
6535 default:
6536 break;
6537 }
6538
6539 drm_object_attach_property(&aconnector->base.base,
6540 dm->ddev->mode_config.scaling_mode_property,
6541 DRM_MODE_SCALE_NONE);
6542
6543 drm_object_attach_property(&aconnector->base.base,
6544 adev->mode_info.underscan_property,
6545 UNDERSCAN_OFF);
6546 drm_object_attach_property(&aconnector->base.base,
6547 adev->mode_info.underscan_hborder_property,
6548 0);
6549 drm_object_attach_property(&aconnector->base.base,
6550 adev->mode_info.underscan_vborder_property,
6551 0);
6552
6553 if (!aconnector->mst_port)
6554 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6555
6556 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6557 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6558 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6559
6560 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6561 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6562 drm_object_attach_property(&aconnector->base.base,
6563 adev->mode_info.abm_level_property, 0);
6564 }
6565
6566 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6567 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6568 connector_type == DRM_MODE_CONNECTOR_eDP) {
6569 drm_object_attach_property(
6570 &aconnector->base.base,
6571 dm->ddev->mode_config.hdr_output_metadata_property, 0);
6572
6573 if (!aconnector->mst_port)
6574 drm_connector_attach_vrr_capable_property(&aconnector->base);
6575
6576 #ifdef CONFIG_DRM_AMD_DC_HDCP
6577 if (adev->dm.hdcp_workqueue)
6578 drm_connector_attach_content_protection_property(&aconnector->base, true);
6579 #endif
6580 }
6581 }
6582
amdgpu_dm_i2c_xfer(struct i2c_adapter * i2c_adap,struct i2c_msg * msgs,int num)6583 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6584 struct i2c_msg *msgs, int num)
6585 {
6586 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6587 struct ddc_service *ddc_service = i2c->ddc_service;
6588 struct i2c_command cmd;
6589 int i;
6590 int result = -EIO;
6591
6592 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6593
6594 if (!cmd.payloads)
6595 return result;
6596
6597 cmd.number_of_payloads = num;
6598 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6599 cmd.speed = 100;
6600
6601 for (i = 0; i < num; i++) {
6602 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6603 cmd.payloads[i].address = msgs[i].addr;
6604 cmd.payloads[i].length = msgs[i].len;
6605 cmd.payloads[i].data = msgs[i].buf;
6606 }
6607
6608 if (dc_submit_i2c(
6609 ddc_service->ctx->dc,
6610 ddc_service->ddc_pin->hw_info.ddc_channel,
6611 &cmd))
6612 result = num;
6613
6614 kfree(cmd.payloads);
6615 return result;
6616 }
6617
amdgpu_dm_i2c_func(struct i2c_adapter * adap)6618 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6619 {
6620 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6621 }
6622
6623 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6624 .master_xfer = amdgpu_dm_i2c_xfer,
6625 .functionality = amdgpu_dm_i2c_func,
6626 };
6627
6628 static struct amdgpu_i2c_adapter *
create_i2c(struct ddc_service * ddc_service,int link_index,int * res)6629 create_i2c(struct ddc_service *ddc_service,
6630 int link_index,
6631 int *res)
6632 {
6633 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6634 struct amdgpu_i2c_adapter *i2c;
6635
6636 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6637 if (!i2c)
6638 return NULL;
6639 i2c->base.owner = THIS_MODULE;
6640 i2c->base.class = I2C_CLASS_DDC;
6641 i2c->base.dev.parent = &adev->pdev->dev;
6642 i2c->base.algo = &amdgpu_dm_i2c_algo;
6643 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6644 i2c_set_adapdata(&i2c->base, i2c);
6645 i2c->ddc_service = ddc_service;
6646 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6647
6648 return i2c;
6649 }
6650
6651
6652 /*
6653 * Note: this function assumes that dc_link_detect() was called for the
6654 * dc_link which will be represented by this aconnector.
6655 */
amdgpu_dm_connector_init(struct amdgpu_display_manager * dm,struct amdgpu_dm_connector * aconnector,uint32_t link_index,struct amdgpu_encoder * aencoder)6656 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6657 struct amdgpu_dm_connector *aconnector,
6658 uint32_t link_index,
6659 struct amdgpu_encoder *aencoder)
6660 {
6661 int res = 0;
6662 int connector_type;
6663 struct dc *dc = dm->dc;
6664 struct dc_link *link = dc_get_link_at_index(dc, link_index);
6665 struct amdgpu_i2c_adapter *i2c;
6666
6667 link->priv = aconnector;
6668
6669 DRM_DEBUG_DRIVER("%s()\n", __func__);
6670
6671 i2c = create_i2c(link->ddc, link->link_index, &res);
6672 if (!i2c) {
6673 DRM_ERROR("Failed to create i2c adapter data\n");
6674 return -ENOMEM;
6675 }
6676
6677 aconnector->i2c = i2c;
6678 res = i2c_add_adapter(&i2c->base);
6679
6680 if (res) {
6681 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6682 goto out_free;
6683 }
6684
6685 connector_type = to_drm_connector_type(link->connector_signal);
6686
6687 res = drm_connector_init_with_ddc(
6688 dm->ddev,
6689 &aconnector->base,
6690 &amdgpu_dm_connector_funcs,
6691 connector_type,
6692 &i2c->base);
6693
6694 if (res) {
6695 DRM_ERROR("connector_init failed\n");
6696 aconnector->connector_id = -1;
6697 goto out_free;
6698 }
6699
6700 drm_connector_helper_add(
6701 &aconnector->base,
6702 &amdgpu_dm_connector_helper_funcs);
6703
6704 amdgpu_dm_connector_init_helper(
6705 dm,
6706 aconnector,
6707 connector_type,
6708 link,
6709 link_index);
6710
6711 drm_connector_attach_encoder(
6712 &aconnector->base, &aencoder->base);
6713
6714 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6715 || connector_type == DRM_MODE_CONNECTOR_eDP)
6716 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6717
6718 out_free:
6719 if (res) {
6720 kfree(i2c);
6721 aconnector->i2c = NULL;
6722 }
6723 return res;
6724 }
6725
amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device * adev)6726 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6727 {
6728 switch (adev->mode_info.num_crtc) {
6729 case 1:
6730 return 0x1;
6731 case 2:
6732 return 0x3;
6733 case 3:
6734 return 0x7;
6735 case 4:
6736 return 0xf;
6737 case 5:
6738 return 0x1f;
6739 case 6:
6740 default:
6741 return 0x3f;
6742 }
6743 }
6744
amdgpu_dm_encoder_init(struct drm_device * dev,struct amdgpu_encoder * aencoder,uint32_t link_index)6745 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6746 struct amdgpu_encoder *aencoder,
6747 uint32_t link_index)
6748 {
6749 struct amdgpu_device *adev = drm_to_adev(dev);
6750
6751 int res = drm_encoder_init(dev,
6752 &aencoder->base,
6753 &amdgpu_dm_encoder_funcs,
6754 DRM_MODE_ENCODER_TMDS,
6755 NULL);
6756
6757 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6758
6759 if (!res)
6760 aencoder->encoder_id = link_index;
6761 else
6762 aencoder->encoder_id = -1;
6763
6764 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6765
6766 return res;
6767 }
6768
manage_dm_interrupts(struct amdgpu_device * adev,struct amdgpu_crtc * acrtc,bool enable)6769 static void manage_dm_interrupts(struct amdgpu_device *adev,
6770 struct amdgpu_crtc *acrtc,
6771 bool enable)
6772 {
6773 /*
6774 * We have no guarantee that the frontend index maps to the same
6775 * backend index - some even map to more than one.
6776 *
6777 * TODO: Use a different interrupt or check DC itself for the mapping.
6778 */
6779 int irq_type =
6780 amdgpu_display_crtc_idx_to_irq_type(
6781 adev,
6782 acrtc->crtc_id);
6783
6784 if (enable) {
6785 drm_crtc_vblank_on(&acrtc->base);
6786 amdgpu_irq_get(
6787 adev,
6788 &adev->pageflip_irq,
6789 irq_type);
6790 } else {
6791
6792 amdgpu_irq_put(
6793 adev,
6794 &adev->pageflip_irq,
6795 irq_type);
6796 drm_crtc_vblank_off(&acrtc->base);
6797 }
6798 }
6799
dm_update_pflip_irq_state(struct amdgpu_device * adev,struct amdgpu_crtc * acrtc)6800 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6801 struct amdgpu_crtc *acrtc)
6802 {
6803 int irq_type =
6804 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6805
6806 /**
6807 * This reads the current state for the IRQ and force reapplies
6808 * the setting to hardware.
6809 */
6810 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6811 }
6812
6813 static bool
is_scaling_state_different(const struct dm_connector_state * dm_state,const struct dm_connector_state * old_dm_state)6814 is_scaling_state_different(const struct dm_connector_state *dm_state,
6815 const struct dm_connector_state *old_dm_state)
6816 {
6817 if (dm_state->scaling != old_dm_state->scaling)
6818 return true;
6819 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6820 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6821 return true;
6822 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6823 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6824 return true;
6825 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6826 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6827 return true;
6828 return false;
6829 }
6830
6831 #ifdef CONFIG_DRM_AMD_DC_HDCP
is_content_protection_different(struct drm_connector_state * state,const struct drm_connector_state * old_state,const struct drm_connector * connector,struct hdcp_workqueue * hdcp_w)6832 static bool is_content_protection_different(struct drm_connector_state *state,
6833 const struct drm_connector_state *old_state,
6834 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6835 {
6836 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6837
6838 if (old_state->hdcp_content_type != state->hdcp_content_type &&
6839 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6840 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6841 return true;
6842 }
6843
6844 /* CP is being re enabled, ignore this */
6845 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6846 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6847 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6848 return false;
6849 }
6850
6851 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6852 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6853 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6854 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6855
6856 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6857 * hot-plug, headless s3, dpms
6858 */
6859 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6860 aconnector->dc_sink != NULL)
6861 return true;
6862
6863 if (old_state->content_protection == state->content_protection)
6864 return false;
6865
6866 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6867 return true;
6868
6869 return false;
6870 }
6871
6872 #endif
remove_stream(struct amdgpu_device * adev,struct amdgpu_crtc * acrtc,struct dc_stream_state * stream)6873 static void remove_stream(struct amdgpu_device *adev,
6874 struct amdgpu_crtc *acrtc,
6875 struct dc_stream_state *stream)
6876 {
6877 /* this is the update mode case */
6878
6879 acrtc->otg_inst = -1;
6880 acrtc->enabled = false;
6881 }
6882
get_cursor_position(struct drm_plane * plane,struct drm_crtc * crtc,struct dc_cursor_position * position)6883 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6884 struct dc_cursor_position *position)
6885 {
6886 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6887 int x, y;
6888 int xorigin = 0, yorigin = 0;
6889
6890 if (!crtc || !plane->state->fb)
6891 return 0;
6892
6893 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6894 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6895 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6896 __func__,
6897 plane->state->crtc_w,
6898 plane->state->crtc_h);
6899 return -EINVAL;
6900 }
6901
6902 x = plane->state->crtc_x;
6903 y = plane->state->crtc_y;
6904
6905 if (x <= -amdgpu_crtc->max_cursor_width ||
6906 y <= -amdgpu_crtc->max_cursor_height)
6907 return 0;
6908
6909 if (x < 0) {
6910 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6911 x = 0;
6912 }
6913 if (y < 0) {
6914 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6915 y = 0;
6916 }
6917 position->enable = true;
6918 position->translate_by_source = true;
6919 position->x = x;
6920 position->y = y;
6921 position->x_hotspot = xorigin;
6922 position->y_hotspot = yorigin;
6923
6924 return 0;
6925 }
6926
handle_cursor_update(struct drm_plane * plane,struct drm_plane_state * old_plane_state)6927 static void handle_cursor_update(struct drm_plane *plane,
6928 struct drm_plane_state *old_plane_state)
6929 {
6930 struct amdgpu_device *adev = drm_to_adev(plane->dev);
6931 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6932 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6933 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6934 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6935 uint64_t address = afb ? afb->address : 0;
6936 struct dc_cursor_position position = {0};
6937 struct dc_cursor_attributes attributes;
6938 int ret;
6939
6940 if (!plane->state->fb && !old_plane_state->fb)
6941 return;
6942
6943 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6944 __func__,
6945 amdgpu_crtc->crtc_id,
6946 plane->state->crtc_w,
6947 plane->state->crtc_h);
6948
6949 ret = get_cursor_position(plane, crtc, &position);
6950 if (ret)
6951 return;
6952
6953 if (!position.enable) {
6954 /* turn off cursor */
6955 if (crtc_state && crtc_state->stream) {
6956 mutex_lock(&adev->dm.dc_lock);
6957 dc_stream_set_cursor_position(crtc_state->stream,
6958 &position);
6959 mutex_unlock(&adev->dm.dc_lock);
6960 }
6961 return;
6962 }
6963
6964 amdgpu_crtc->cursor_width = plane->state->crtc_w;
6965 amdgpu_crtc->cursor_height = plane->state->crtc_h;
6966
6967 memset(&attributes, 0, sizeof(attributes));
6968 attributes.address.high_part = upper_32_bits(address);
6969 attributes.address.low_part = lower_32_bits(address);
6970 attributes.width = plane->state->crtc_w;
6971 attributes.height = plane->state->crtc_h;
6972 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6973 attributes.rotation_angle = 0;
6974 attributes.attribute_flags.value = 0;
6975
6976 /* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM
6977 * legacy gamma setup.
6978 */
6979 if (crtc_state->cm_is_degamma_srgb &&
6980 adev->dm.dc->caps.color.dpp.gamma_corr)
6981 attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1;
6982
6983 attributes.pitch = attributes.width;
6984
6985 if (crtc_state->stream) {
6986 mutex_lock(&adev->dm.dc_lock);
6987 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6988 &attributes))
6989 DRM_ERROR("DC failed to set cursor attributes\n");
6990
6991 if (!dc_stream_set_cursor_position(crtc_state->stream,
6992 &position))
6993 DRM_ERROR("DC failed to set cursor position\n");
6994 mutex_unlock(&adev->dm.dc_lock);
6995 }
6996 }
6997
prepare_flip_isr(struct amdgpu_crtc * acrtc)6998 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6999 {
7000
7001 assert_spin_locked(&acrtc->base.dev->event_lock);
7002 WARN_ON(acrtc->event);
7003
7004 acrtc->event = acrtc->base.state->event;
7005
7006 /* Set the flip status */
7007 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7008
7009 /* Mark this event as consumed */
7010 acrtc->base.state->event = NULL;
7011
7012 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7013 acrtc->crtc_id);
7014 }
7015
update_freesync_state_on_stream(struct amdgpu_display_manager * dm,struct dm_crtc_state * new_crtc_state,struct dc_stream_state * new_stream,struct dc_plane_state * surface,u32 flip_timestamp_in_us)7016 static void update_freesync_state_on_stream(
7017 struct amdgpu_display_manager *dm,
7018 struct dm_crtc_state *new_crtc_state,
7019 struct dc_stream_state *new_stream,
7020 struct dc_plane_state *surface,
7021 u32 flip_timestamp_in_us)
7022 {
7023 struct mod_vrr_params vrr_params;
7024 struct dc_info_packet vrr_infopacket = {0};
7025 struct amdgpu_device *adev = dm->adev;
7026 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7027 unsigned long flags;
7028
7029 if (!new_stream)
7030 return;
7031
7032 /*
7033 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7034 * For now it's sufficient to just guard against these conditions.
7035 */
7036
7037 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7038 return;
7039
7040 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7041 vrr_params = acrtc->dm_irq_params.vrr_params;
7042
7043 if (surface) {
7044 mod_freesync_handle_preflip(
7045 dm->freesync_module,
7046 surface,
7047 new_stream,
7048 flip_timestamp_in_us,
7049 &vrr_params);
7050
7051 if (adev->family < AMDGPU_FAMILY_AI &&
7052 amdgpu_dm_vrr_active(new_crtc_state)) {
7053 mod_freesync_handle_v_update(dm->freesync_module,
7054 new_stream, &vrr_params);
7055
7056 /* Need to call this before the frame ends. */
7057 dc_stream_adjust_vmin_vmax(dm->dc,
7058 new_crtc_state->stream,
7059 &vrr_params.adjust);
7060 }
7061 }
7062
7063 mod_freesync_build_vrr_infopacket(
7064 dm->freesync_module,
7065 new_stream,
7066 &vrr_params,
7067 PACKET_TYPE_VRR,
7068 TRANSFER_FUNC_UNKNOWN,
7069 &vrr_infopacket);
7070
7071 new_crtc_state->freesync_timing_changed |=
7072 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7073 &vrr_params.adjust,
7074 sizeof(vrr_params.adjust)) != 0);
7075
7076 new_crtc_state->freesync_vrr_info_changed |=
7077 (memcmp(&new_crtc_state->vrr_infopacket,
7078 &vrr_infopacket,
7079 sizeof(vrr_infopacket)) != 0);
7080
7081 acrtc->dm_irq_params.vrr_params = vrr_params;
7082 new_crtc_state->vrr_infopacket = vrr_infopacket;
7083
7084 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7085 new_stream->vrr_infopacket = vrr_infopacket;
7086
7087 if (new_crtc_state->freesync_vrr_info_changed)
7088 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7089 new_crtc_state->base.crtc->base.id,
7090 (int)new_crtc_state->base.vrr_enabled,
7091 (int)vrr_params.state);
7092
7093 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7094 }
7095
update_stream_irq_parameters(struct amdgpu_display_manager * dm,struct dm_crtc_state * new_crtc_state)7096 static void update_stream_irq_parameters(
7097 struct amdgpu_display_manager *dm,
7098 struct dm_crtc_state *new_crtc_state)
7099 {
7100 struct dc_stream_state *new_stream = new_crtc_state->stream;
7101 struct mod_vrr_params vrr_params;
7102 struct mod_freesync_config config = new_crtc_state->freesync_config;
7103 struct amdgpu_device *adev = dm->adev;
7104 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7105 unsigned long flags;
7106
7107 if (!new_stream)
7108 return;
7109
7110 /*
7111 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7112 * For now it's sufficient to just guard against these conditions.
7113 */
7114 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7115 return;
7116
7117 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7118 vrr_params = acrtc->dm_irq_params.vrr_params;
7119
7120 if (new_crtc_state->vrr_supported &&
7121 config.min_refresh_in_uhz &&
7122 config.max_refresh_in_uhz) {
7123 config.state = new_crtc_state->base.vrr_enabled ?
7124 VRR_STATE_ACTIVE_VARIABLE :
7125 VRR_STATE_INACTIVE;
7126 } else {
7127 config.state = VRR_STATE_UNSUPPORTED;
7128 }
7129
7130 mod_freesync_build_vrr_params(dm->freesync_module,
7131 new_stream,
7132 &config, &vrr_params);
7133
7134 new_crtc_state->freesync_timing_changed |=
7135 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7136 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7137
7138 new_crtc_state->freesync_config = config;
7139 /* Copy state for access from DM IRQ handler */
7140 acrtc->dm_irq_params.freesync_config = config;
7141 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7142 acrtc->dm_irq_params.vrr_params = vrr_params;
7143 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7144 }
7145
amdgpu_dm_handle_vrr_transition(struct dm_crtc_state * old_state,struct dm_crtc_state * new_state)7146 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7147 struct dm_crtc_state *new_state)
7148 {
7149 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7150 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7151
7152 if (!old_vrr_active && new_vrr_active) {
7153 /* Transition VRR inactive -> active:
7154 * While VRR is active, we must not disable vblank irq, as a
7155 * reenable after disable would compute bogus vblank/pflip
7156 * timestamps if it likely happened inside display front-porch.
7157 *
7158 * We also need vupdate irq for the actual core vblank handling
7159 * at end of vblank.
7160 */
7161 dm_set_vupdate_irq(new_state->base.crtc, true);
7162 drm_crtc_vblank_get(new_state->base.crtc);
7163 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7164 __func__, new_state->base.crtc->base.id);
7165 } else if (old_vrr_active && !new_vrr_active) {
7166 /* Transition VRR active -> inactive:
7167 * Allow vblank irq disable again for fixed refresh rate.
7168 */
7169 dm_set_vupdate_irq(new_state->base.crtc, false);
7170 drm_crtc_vblank_put(new_state->base.crtc);
7171 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7172 __func__, new_state->base.crtc->base.id);
7173 }
7174 }
7175
amdgpu_dm_commit_cursors(struct drm_atomic_state * state)7176 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7177 {
7178 struct drm_plane *plane;
7179 struct drm_plane_state *old_plane_state, *new_plane_state;
7180 int i;
7181
7182 /*
7183 * TODO: Make this per-stream so we don't issue redundant updates for
7184 * commits with multiple streams.
7185 */
7186 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7187 new_plane_state, i)
7188 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7189 handle_cursor_update(plane, old_plane_state);
7190 }
7191
amdgpu_dm_commit_planes(struct drm_atomic_state * state,struct dc_state * dc_state,struct drm_device * dev,struct amdgpu_display_manager * dm,struct drm_crtc * pcrtc,bool wait_for_vblank)7192 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7193 struct dc_state *dc_state,
7194 struct drm_device *dev,
7195 struct amdgpu_display_manager *dm,
7196 struct drm_crtc *pcrtc,
7197 bool wait_for_vblank)
7198 {
7199 uint32_t i;
7200 uint64_t timestamp_ns;
7201 struct drm_plane *plane;
7202 struct drm_plane_state *old_plane_state, *new_plane_state;
7203 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7204 struct drm_crtc_state *new_pcrtc_state =
7205 drm_atomic_get_new_crtc_state(state, pcrtc);
7206 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7207 struct dm_crtc_state *dm_old_crtc_state =
7208 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7209 int planes_count = 0, vpos, hpos;
7210 long r;
7211 unsigned long flags;
7212 struct amdgpu_bo *abo;
7213 uint32_t target_vblank, last_flip_vblank;
7214 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7215 bool pflip_present = false;
7216 struct {
7217 struct dc_surface_update surface_updates[MAX_SURFACES];
7218 struct dc_plane_info plane_infos[MAX_SURFACES];
7219 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7220 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7221 struct dc_stream_update stream_update;
7222 } *bundle;
7223
7224 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7225
7226 if (!bundle) {
7227 dm_error("Failed to allocate update bundle\n");
7228 goto cleanup;
7229 }
7230
7231 /*
7232 * Disable the cursor first if we're disabling all the planes.
7233 * It'll remain on the screen after the planes are re-enabled
7234 * if we don't.
7235 */
7236 if (acrtc_state->active_planes == 0)
7237 amdgpu_dm_commit_cursors(state);
7238
7239 /* update planes when needed */
7240 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7241 struct drm_crtc *crtc = new_plane_state->crtc;
7242 struct drm_crtc_state *new_crtc_state;
7243 struct drm_framebuffer *fb = new_plane_state->fb;
7244 bool plane_needs_flip;
7245 struct dc_plane_state *dc_plane;
7246 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7247
7248 /* Cursor plane is handled after stream updates */
7249 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7250 continue;
7251
7252 if (!fb || !crtc || pcrtc != crtc)
7253 continue;
7254
7255 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7256 if (!new_crtc_state->active)
7257 continue;
7258
7259 dc_plane = dm_new_plane_state->dc_state;
7260 if (!dc_plane)
7261 continue;
7262
7263 bundle->surface_updates[planes_count].surface = dc_plane;
7264 if (new_pcrtc_state->color_mgmt_changed) {
7265 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7266 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7267 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7268 }
7269
7270 fill_dc_scaling_info(new_plane_state,
7271 &bundle->scaling_infos[planes_count]);
7272
7273 bundle->surface_updates[planes_count].scaling_info =
7274 &bundle->scaling_infos[planes_count];
7275
7276 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7277
7278 pflip_present = pflip_present || plane_needs_flip;
7279
7280 if (!plane_needs_flip) {
7281 planes_count += 1;
7282 continue;
7283 }
7284
7285 abo = gem_to_amdgpu_bo(fb->obj[0]);
7286
7287 /*
7288 * Wait for all fences on this FB. Do limited wait to avoid
7289 * deadlock during GPU reset when this fence will not signal
7290 * but we hold reservation lock for the BO.
7291 */
7292 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7293 false,
7294 msecs_to_jiffies(5000));
7295 if (unlikely(r <= 0))
7296 DRM_ERROR("Waiting for fences timed out!");
7297
7298 fill_dc_plane_info_and_addr(
7299 dm->adev, new_plane_state,
7300 dm_new_plane_state->tiling_flags,
7301 &bundle->plane_infos[planes_count],
7302 &bundle->flip_addrs[planes_count].address,
7303 dm_new_plane_state->tmz_surface, false);
7304
7305 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7306 new_plane_state->plane->index,
7307 bundle->plane_infos[planes_count].dcc.enable);
7308
7309 bundle->surface_updates[planes_count].plane_info =
7310 &bundle->plane_infos[planes_count];
7311
7312 /*
7313 * Only allow immediate flips for fast updates that don't
7314 * change FB pitch, DCC state, rotation or mirroing.
7315 */
7316 bundle->flip_addrs[planes_count].flip_immediate =
7317 crtc->state->async_flip &&
7318 acrtc_state->update_type == UPDATE_TYPE_FAST;
7319
7320 timestamp_ns = ktime_get_ns();
7321 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7322 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7323 bundle->surface_updates[planes_count].surface = dc_plane;
7324
7325 if (!bundle->surface_updates[planes_count].surface) {
7326 DRM_ERROR("No surface for CRTC: id=%d\n",
7327 acrtc_attach->crtc_id);
7328 continue;
7329 }
7330
7331 if (plane == pcrtc->primary)
7332 update_freesync_state_on_stream(
7333 dm,
7334 acrtc_state,
7335 acrtc_state->stream,
7336 dc_plane,
7337 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7338
7339 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7340 __func__,
7341 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7342 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7343
7344 planes_count += 1;
7345
7346 }
7347
7348 if (pflip_present) {
7349 if (!vrr_active) {
7350 /* Use old throttling in non-vrr fixed refresh rate mode
7351 * to keep flip scheduling based on target vblank counts
7352 * working in a backwards compatible way, e.g., for
7353 * clients using the GLX_OML_sync_control extension or
7354 * DRI3/Present extension with defined target_msc.
7355 */
7356 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7357 }
7358 else {
7359 /* For variable refresh rate mode only:
7360 * Get vblank of last completed flip to avoid > 1 vrr
7361 * flips per video frame by use of throttling, but allow
7362 * flip programming anywhere in the possibly large
7363 * variable vrr vblank interval for fine-grained flip
7364 * timing control and more opportunity to avoid stutter
7365 * on late submission of flips.
7366 */
7367 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7368 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7369 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7370 }
7371
7372 target_vblank = last_flip_vblank + wait_for_vblank;
7373
7374 /*
7375 * Wait until we're out of the vertical blank period before the one
7376 * targeted by the flip
7377 */
7378 while ((acrtc_attach->enabled &&
7379 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7380 0, &vpos, &hpos, NULL,
7381 NULL, &pcrtc->hwmode)
7382 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7383 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7384 (int)(target_vblank -
7385 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7386 usleep_range(1000, 1100);
7387 }
7388
7389 /**
7390 * Prepare the flip event for the pageflip interrupt to handle.
7391 *
7392 * This only works in the case where we've already turned on the
7393 * appropriate hardware blocks (eg. HUBP) so in the transition case
7394 * from 0 -> n planes we have to skip a hardware generated event
7395 * and rely on sending it from software.
7396 */
7397 if (acrtc_attach->base.state->event &&
7398 acrtc_state->active_planes > 0) {
7399 drm_crtc_vblank_get(pcrtc);
7400
7401 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7402
7403 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7404 prepare_flip_isr(acrtc_attach);
7405
7406 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7407 }
7408
7409 if (acrtc_state->stream) {
7410 if (acrtc_state->freesync_vrr_info_changed)
7411 bundle->stream_update.vrr_infopacket =
7412 &acrtc_state->stream->vrr_infopacket;
7413 }
7414 }
7415
7416 /* Update the planes if changed or disable if we don't have any. */
7417 if ((planes_count || acrtc_state->active_planes == 0) &&
7418 acrtc_state->stream) {
7419 bundle->stream_update.stream = acrtc_state->stream;
7420 if (new_pcrtc_state->mode_changed) {
7421 bundle->stream_update.src = acrtc_state->stream->src;
7422 bundle->stream_update.dst = acrtc_state->stream->dst;
7423 }
7424
7425 if (new_pcrtc_state->color_mgmt_changed) {
7426 /*
7427 * TODO: This isn't fully correct since we've actually
7428 * already modified the stream in place.
7429 */
7430 bundle->stream_update.gamut_remap =
7431 &acrtc_state->stream->gamut_remap_matrix;
7432 bundle->stream_update.output_csc_transform =
7433 &acrtc_state->stream->csc_color_matrix;
7434 bundle->stream_update.out_transfer_func =
7435 acrtc_state->stream->out_transfer_func;
7436 }
7437
7438 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7439 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7440 bundle->stream_update.abm_level = &acrtc_state->abm_level;
7441
7442 mutex_lock(&dm->dc_lock);
7443 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7444 acrtc_state->stream->link->psr_settings.psr_allow_active)
7445 amdgpu_dm_psr_disable(acrtc_state->stream);
7446 mutex_unlock(&dm->dc_lock);
7447
7448 /*
7449 * If FreeSync state on the stream has changed then we need to
7450 * re-adjust the min/max bounds now that DC doesn't handle this
7451 * as part of commit.
7452 */
7453 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7454 amdgpu_dm_vrr_active(acrtc_state)) {
7455 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7456 dc_stream_adjust_vmin_vmax(
7457 dm->dc, acrtc_state->stream,
7458 &acrtc_attach->dm_irq_params.vrr_params.adjust);
7459 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7460 }
7461 mutex_lock(&dm->dc_lock);
7462
7463 dc_commit_updates_for_stream(dm->dc,
7464 bundle->surface_updates,
7465 planes_count,
7466 acrtc_state->stream,
7467 &bundle->stream_update,
7468 dc_state);
7469
7470 /**
7471 * Enable or disable the interrupts on the backend.
7472 *
7473 * Most pipes are put into power gating when unused.
7474 *
7475 * When power gating is enabled on a pipe we lose the
7476 * interrupt enablement state when power gating is disabled.
7477 *
7478 * So we need to update the IRQ control state in hardware
7479 * whenever the pipe turns on (since it could be previously
7480 * power gated) or off (since some pipes can't be power gated
7481 * on some ASICs).
7482 */
7483 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7484 dm_update_pflip_irq_state(drm_to_adev(dev),
7485 acrtc_attach);
7486
7487 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7488 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7489 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7490 amdgpu_dm_link_setup_psr(acrtc_state->stream);
7491 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7492 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7493 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
7494 amdgpu_dm_psr_enable(acrtc_state->stream);
7495 }
7496
7497 mutex_unlock(&dm->dc_lock);
7498 }
7499
7500 /*
7501 * Update cursor state *after* programming all the planes.
7502 * This avoids redundant programming in the case where we're going
7503 * to be disabling a single plane - those pipes are being disabled.
7504 */
7505 if (acrtc_state->active_planes)
7506 amdgpu_dm_commit_cursors(state);
7507
7508 cleanup:
7509 kfree(bundle);
7510 }
7511
amdgpu_dm_commit_audio(struct drm_device * dev,struct drm_atomic_state * state)7512 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7513 struct drm_atomic_state *state)
7514 {
7515 struct amdgpu_device *adev = drm_to_adev(dev);
7516 struct amdgpu_dm_connector *aconnector;
7517 struct drm_connector *connector;
7518 struct drm_connector_state *old_con_state, *new_con_state;
7519 struct drm_crtc_state *new_crtc_state;
7520 struct dm_crtc_state *new_dm_crtc_state;
7521 const struct dc_stream_status *status;
7522 int i, inst;
7523
7524 /* Notify device removals. */
7525 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7526 if (old_con_state->crtc != new_con_state->crtc) {
7527 /* CRTC changes require notification. */
7528 goto notify;
7529 }
7530
7531 if (!new_con_state->crtc)
7532 continue;
7533
7534 new_crtc_state = drm_atomic_get_new_crtc_state(
7535 state, new_con_state->crtc);
7536
7537 if (!new_crtc_state)
7538 continue;
7539
7540 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7541 continue;
7542
7543 notify:
7544 aconnector = to_amdgpu_dm_connector(connector);
7545
7546 mutex_lock(&adev->dm.audio_lock);
7547 inst = aconnector->audio_inst;
7548 aconnector->audio_inst = -1;
7549 mutex_unlock(&adev->dm.audio_lock);
7550
7551 amdgpu_dm_audio_eld_notify(adev, inst);
7552 }
7553
7554 /* Notify audio device additions. */
7555 for_each_new_connector_in_state(state, connector, new_con_state, i) {
7556 if (!new_con_state->crtc)
7557 continue;
7558
7559 new_crtc_state = drm_atomic_get_new_crtc_state(
7560 state, new_con_state->crtc);
7561
7562 if (!new_crtc_state)
7563 continue;
7564
7565 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7566 continue;
7567
7568 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7569 if (!new_dm_crtc_state->stream)
7570 continue;
7571
7572 status = dc_stream_get_status(new_dm_crtc_state->stream);
7573 if (!status)
7574 continue;
7575
7576 aconnector = to_amdgpu_dm_connector(connector);
7577
7578 mutex_lock(&adev->dm.audio_lock);
7579 inst = status->audio_inst;
7580 aconnector->audio_inst = inst;
7581 mutex_unlock(&adev->dm.audio_lock);
7582
7583 amdgpu_dm_audio_eld_notify(adev, inst);
7584 }
7585 }
7586
7587 /*
7588 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7589 * @crtc_state: the DRM CRTC state
7590 * @stream_state: the DC stream state.
7591 *
7592 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7593 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7594 */
amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state * crtc_state,struct dc_stream_state * stream_state)7595 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7596 struct dc_stream_state *stream_state)
7597 {
7598 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7599 }
7600
amdgpu_dm_atomic_commit(struct drm_device * dev,struct drm_atomic_state * state,bool nonblock)7601 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7602 struct drm_atomic_state *state,
7603 bool nonblock)
7604 {
7605 /*
7606 * Add check here for SoC's that support hardware cursor plane, to
7607 * unset legacy_cursor_update
7608 */
7609
7610 return drm_atomic_helper_commit(dev, state, nonblock);
7611
7612 /*TODO Handle EINTR, reenable IRQ*/
7613 }
7614
7615 /**
7616 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7617 * @state: The atomic state to commit
7618 *
7619 * This will tell DC to commit the constructed DC state from atomic_check,
7620 * programming the hardware. Any failures here implies a hardware failure, since
7621 * atomic check should have filtered anything non-kosher.
7622 */
amdgpu_dm_atomic_commit_tail(struct drm_atomic_state * state)7623 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7624 {
7625 struct drm_device *dev = state->dev;
7626 struct amdgpu_device *adev = drm_to_adev(dev);
7627 struct amdgpu_display_manager *dm = &adev->dm;
7628 struct dm_atomic_state *dm_state;
7629 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7630 uint32_t i, j;
7631 struct drm_crtc *crtc;
7632 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7633 unsigned long flags;
7634 bool wait_for_vblank = true;
7635 struct drm_connector *connector;
7636 struct drm_connector_state *old_con_state, *new_con_state;
7637 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7638 int crtc_disable_count = 0;
7639 bool mode_set_reset_required = false;
7640
7641 drm_atomic_helper_update_legacy_modeset_state(dev, state);
7642
7643 dm_state = dm_atomic_get_new_state(state);
7644 if (dm_state && dm_state->context) {
7645 dc_state = dm_state->context;
7646 } else {
7647 /* No state changes, retain current state. */
7648 dc_state_temp = dc_create_state(dm->dc);
7649 ASSERT(dc_state_temp);
7650 dc_state = dc_state_temp;
7651 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7652 }
7653
7654 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
7655 new_crtc_state, i) {
7656 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7657
7658 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7659
7660 if (old_crtc_state->active &&
7661 (!new_crtc_state->active ||
7662 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7663 manage_dm_interrupts(adev, acrtc, false);
7664 dc_stream_release(dm_old_crtc_state->stream);
7665 }
7666 }
7667
7668 drm_atomic_helper_calc_timestamping_constants(state);
7669
7670 /* update changed items */
7671 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7672 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7673
7674 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7675 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7676
7677 DRM_DEBUG_DRIVER(
7678 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7679 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7680 "connectors_changed:%d\n",
7681 acrtc->crtc_id,
7682 new_crtc_state->enable,
7683 new_crtc_state->active,
7684 new_crtc_state->planes_changed,
7685 new_crtc_state->mode_changed,
7686 new_crtc_state->active_changed,
7687 new_crtc_state->connectors_changed);
7688
7689 /* Copy all transient state flags into dc state */
7690 if (dm_new_crtc_state->stream) {
7691 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7692 dm_new_crtc_state->stream);
7693 }
7694
7695 /* handles headless hotplug case, updating new_state and
7696 * aconnector as needed
7697 */
7698
7699 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7700
7701 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7702
7703 if (!dm_new_crtc_state->stream) {
7704 /*
7705 * this could happen because of issues with
7706 * userspace notifications delivery.
7707 * In this case userspace tries to set mode on
7708 * display which is disconnected in fact.
7709 * dc_sink is NULL in this case on aconnector.
7710 * We expect reset mode will come soon.
7711 *
7712 * This can also happen when unplug is done
7713 * during resume sequence ended
7714 *
7715 * In this case, we want to pretend we still
7716 * have a sink to keep the pipe running so that
7717 * hw state is consistent with the sw state
7718 */
7719 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7720 __func__, acrtc->base.base.id);
7721 continue;
7722 }
7723
7724 if (dm_old_crtc_state->stream)
7725 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7726
7727 pm_runtime_get_noresume(dev->dev);
7728
7729 acrtc->enabled = true;
7730 acrtc->hw_mode = new_crtc_state->mode;
7731 crtc->hwmode = new_crtc_state->mode;
7732 mode_set_reset_required = true;
7733 } else if (modereset_required(new_crtc_state)) {
7734 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7735 /* i.e. reset mode */
7736 if (dm_old_crtc_state->stream)
7737 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7738 mode_set_reset_required = true;
7739 }
7740 } /* for_each_crtc_in_state() */
7741
7742 if (dc_state) {
7743 /* if there mode set or reset, disable eDP PSR */
7744 if (mode_set_reset_required)
7745 amdgpu_dm_psr_disable_all(dm);
7746
7747 dm_enable_per_frame_crtc_master_sync(dc_state);
7748 mutex_lock(&dm->dc_lock);
7749 WARN_ON(!dc_commit_state(dm->dc, dc_state));
7750 mutex_unlock(&dm->dc_lock);
7751 }
7752
7753 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7754 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7755
7756 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7757
7758 if (dm_new_crtc_state->stream != NULL) {
7759 const struct dc_stream_status *status =
7760 dc_stream_get_status(dm_new_crtc_state->stream);
7761
7762 if (!status)
7763 status = dc_stream_get_status_from_state(dc_state,
7764 dm_new_crtc_state->stream);
7765 if (!status)
7766 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7767 else
7768 acrtc->otg_inst = status->primary_otg_inst;
7769 }
7770 }
7771 #ifdef CONFIG_DRM_AMD_DC_HDCP
7772 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7773 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7774 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7775 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7776
7777 new_crtc_state = NULL;
7778
7779 if (acrtc)
7780 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7781
7782 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7783
7784 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7785 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7786 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7787 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7788 continue;
7789 }
7790
7791 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7792 hdcp_update_display(
7793 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7794 new_con_state->hdcp_content_type,
7795 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7796 : false);
7797 }
7798 #endif
7799
7800 /* Handle connector state changes */
7801 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7802 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7803 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7804 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7805 struct dc_surface_update dummy_updates[MAX_SURFACES];
7806 struct dc_stream_update stream_update;
7807 struct dc_info_packet hdr_packet;
7808 struct dc_stream_status *status = NULL;
7809 bool abm_changed, hdr_changed, scaling_changed;
7810
7811 memset(&dummy_updates, 0, sizeof(dummy_updates));
7812 memset(&stream_update, 0, sizeof(stream_update));
7813
7814 if (acrtc) {
7815 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7816 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7817 }
7818
7819 /* Skip any modesets/resets */
7820 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7821 continue;
7822
7823 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7824 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7825
7826 scaling_changed = is_scaling_state_different(dm_new_con_state,
7827 dm_old_con_state);
7828
7829 abm_changed = dm_new_crtc_state->abm_level !=
7830 dm_old_crtc_state->abm_level;
7831
7832 hdr_changed =
7833 is_hdr_metadata_different(old_con_state, new_con_state);
7834
7835 if (!scaling_changed && !abm_changed && !hdr_changed)
7836 continue;
7837
7838 stream_update.stream = dm_new_crtc_state->stream;
7839 if (scaling_changed) {
7840 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7841 dm_new_con_state, dm_new_crtc_state->stream);
7842
7843 stream_update.src = dm_new_crtc_state->stream->src;
7844 stream_update.dst = dm_new_crtc_state->stream->dst;
7845 }
7846
7847 if (abm_changed) {
7848 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7849
7850 stream_update.abm_level = &dm_new_crtc_state->abm_level;
7851 }
7852
7853 if (hdr_changed) {
7854 fill_hdr_info_packet(new_con_state, &hdr_packet);
7855 stream_update.hdr_static_metadata = &hdr_packet;
7856 }
7857
7858 status = dc_stream_get_status(dm_new_crtc_state->stream);
7859 WARN_ON(!status);
7860 WARN_ON(!status->plane_count);
7861
7862 /*
7863 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7864 * Here we create an empty update on each plane.
7865 * To fix this, DC should permit updating only stream properties.
7866 */
7867 for (j = 0; j < status->plane_count; j++)
7868 dummy_updates[j].surface = status->plane_states[0];
7869
7870
7871 mutex_lock(&dm->dc_lock);
7872 dc_commit_updates_for_stream(dm->dc,
7873 dummy_updates,
7874 status->plane_count,
7875 dm_new_crtc_state->stream,
7876 &stream_update,
7877 dc_state);
7878 mutex_unlock(&dm->dc_lock);
7879 }
7880
7881 /* Count number of newly disabled CRTCs for dropping PM refs later. */
7882 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7883 new_crtc_state, i) {
7884 if (old_crtc_state->active && !new_crtc_state->active)
7885 crtc_disable_count++;
7886
7887 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7888 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7889
7890 /* For freesync config update on crtc state and params for irq */
7891 update_stream_irq_parameters(dm, dm_new_crtc_state);
7892
7893 /* Handle vrr on->off / off->on transitions */
7894 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7895 dm_new_crtc_state);
7896 }
7897
7898 /**
7899 * Enable interrupts for CRTCs that are newly enabled or went through
7900 * a modeset. It was intentionally deferred until after the front end
7901 * state was modified to wait until the OTG was on and so the IRQ
7902 * handlers didn't access stale or invalid state.
7903 */
7904 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7905 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7906
7907 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7908
7909 if (new_crtc_state->active &&
7910 (!old_crtc_state->active ||
7911 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7912 dc_stream_retain(dm_new_crtc_state->stream);
7913 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
7914 manage_dm_interrupts(adev, acrtc, true);
7915
7916 #ifdef CONFIG_DEBUG_FS
7917 /**
7918 * Frontend may have changed so reapply the CRC capture
7919 * settings for the stream.
7920 */
7921 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7922
7923 if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7924 amdgpu_dm_crtc_configure_crc_source(
7925 crtc, dm_new_crtc_state,
7926 dm_new_crtc_state->crc_src);
7927 }
7928 #endif
7929 }
7930 }
7931
7932 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7933 if (new_crtc_state->async_flip)
7934 wait_for_vblank = false;
7935
7936 /* update planes when needed per crtc*/
7937 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7938 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7939
7940 if (dm_new_crtc_state->stream)
7941 amdgpu_dm_commit_planes(state, dc_state, dev,
7942 dm, crtc, wait_for_vblank);
7943 }
7944
7945 /* Update audio instances for each connector. */
7946 amdgpu_dm_commit_audio(dev, state);
7947
7948 /*
7949 * send vblank event on all events not handled in flip and
7950 * mark consumed event for drm_atomic_helper_commit_hw_done
7951 */
7952 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7953 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7954
7955 if (new_crtc_state->event)
7956 drm_send_event_locked(dev, &new_crtc_state->event->base);
7957
7958 new_crtc_state->event = NULL;
7959 }
7960 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7961
7962 /* Signal HW programming completion */
7963 drm_atomic_helper_commit_hw_done(state);
7964
7965 if (wait_for_vblank)
7966 drm_atomic_helper_wait_for_flip_done(dev, state);
7967
7968 drm_atomic_helper_cleanup_planes(dev, state);
7969
7970 /*
7971 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7972 * so we can put the GPU into runtime suspend if we're not driving any
7973 * displays anymore
7974 */
7975 for (i = 0; i < crtc_disable_count; i++)
7976 pm_runtime_put_autosuspend(dev->dev);
7977 pm_runtime_mark_last_busy(dev->dev);
7978
7979 if (dc_state_temp)
7980 dc_release_state(dc_state_temp);
7981 }
7982
7983
dm_force_atomic_commit(struct drm_connector * connector)7984 static int dm_force_atomic_commit(struct drm_connector *connector)
7985 {
7986 int ret = 0;
7987 struct drm_device *ddev = connector->dev;
7988 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7989 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7990 struct drm_plane *plane = disconnected_acrtc->base.primary;
7991 struct drm_connector_state *conn_state;
7992 struct drm_crtc_state *crtc_state;
7993 struct drm_plane_state *plane_state;
7994
7995 if (!state)
7996 return -ENOMEM;
7997
7998 state->acquire_ctx = ddev->mode_config.acquire_ctx;
7999
8000 /* Construct an atomic state to restore previous display setting */
8001
8002 /*
8003 * Attach connectors to drm_atomic_state
8004 */
8005 conn_state = drm_atomic_get_connector_state(state, connector);
8006
8007 ret = PTR_ERR_OR_ZERO(conn_state);
8008 if (ret)
8009 goto out;
8010
8011 /* Attach crtc to drm_atomic_state*/
8012 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8013
8014 ret = PTR_ERR_OR_ZERO(crtc_state);
8015 if (ret)
8016 goto out;
8017
8018 /* force a restore */
8019 crtc_state->mode_changed = true;
8020
8021 /* Attach plane to drm_atomic_state */
8022 plane_state = drm_atomic_get_plane_state(state, plane);
8023
8024 ret = PTR_ERR_OR_ZERO(plane_state);
8025 if (ret)
8026 goto out;
8027
8028 /* Call commit internally with the state we just constructed */
8029 ret = drm_atomic_commit(state);
8030
8031 out:
8032 drm_atomic_state_put(state);
8033 if (ret)
8034 DRM_ERROR("Restoring old state failed with %i\n", ret);
8035
8036 return ret;
8037 }
8038
8039 /*
8040 * This function handles all cases when set mode does not come upon hotplug.
8041 * This includes when a display is unplugged then plugged back into the
8042 * same port and when running without usermode desktop manager supprot
8043 */
dm_restore_drm_connector_state(struct drm_device * dev,struct drm_connector * connector)8044 void dm_restore_drm_connector_state(struct drm_device *dev,
8045 struct drm_connector *connector)
8046 {
8047 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8048 struct amdgpu_crtc *disconnected_acrtc;
8049 struct dm_crtc_state *acrtc_state;
8050
8051 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8052 return;
8053
8054 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8055 if (!disconnected_acrtc)
8056 return;
8057
8058 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8059 if (!acrtc_state->stream)
8060 return;
8061
8062 /*
8063 * If the previous sink is not released and different from the current,
8064 * we deduce we are in a state where we can not rely on usermode call
8065 * to turn on the display, so we do it here
8066 */
8067 if (acrtc_state->stream->sink != aconnector->dc_sink)
8068 dm_force_atomic_commit(&aconnector->base);
8069 }
8070
8071 /*
8072 * Grabs all modesetting locks to serialize against any blocking commits,
8073 * Waits for completion of all non blocking commits.
8074 */
do_aquire_global_lock(struct drm_device * dev,struct drm_atomic_state * state)8075 static int do_aquire_global_lock(struct drm_device *dev,
8076 struct drm_atomic_state *state)
8077 {
8078 struct drm_crtc *crtc;
8079 struct drm_crtc_commit *commit;
8080 long ret;
8081
8082 /*
8083 * Adding all modeset locks to aquire_ctx will
8084 * ensure that when the framework release it the
8085 * extra locks we are locking here will get released to
8086 */
8087 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8088 if (ret)
8089 return ret;
8090
8091 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8092 spin_lock(&crtc->commit_lock);
8093 commit = list_first_entry_or_null(&crtc->commit_list,
8094 struct drm_crtc_commit, commit_entry);
8095 if (commit)
8096 drm_crtc_commit_get(commit);
8097 spin_unlock(&crtc->commit_lock);
8098
8099 if (!commit)
8100 continue;
8101
8102 /*
8103 * Make sure all pending HW programming completed and
8104 * page flips done
8105 */
8106 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8107
8108 if (ret > 0)
8109 ret = wait_for_completion_interruptible_timeout(
8110 &commit->flip_done, 10*HZ);
8111
8112 if (ret == 0)
8113 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8114 "timed out\n", crtc->base.id, crtc->name);
8115
8116 drm_crtc_commit_put(commit);
8117 }
8118
8119 return ret < 0 ? ret : 0;
8120 }
8121
get_freesync_config_for_crtc(struct dm_crtc_state * new_crtc_state,struct dm_connector_state * new_con_state)8122 static void get_freesync_config_for_crtc(
8123 struct dm_crtc_state *new_crtc_state,
8124 struct dm_connector_state *new_con_state)
8125 {
8126 struct mod_freesync_config config = {0};
8127 struct amdgpu_dm_connector *aconnector =
8128 to_amdgpu_dm_connector(new_con_state->base.connector);
8129 struct drm_display_mode *mode = &new_crtc_state->base.mode;
8130 int vrefresh = drm_mode_vrefresh(mode);
8131
8132 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8133 vrefresh >= aconnector->min_vfreq &&
8134 vrefresh <= aconnector->max_vfreq;
8135
8136 if (new_crtc_state->vrr_supported) {
8137 new_crtc_state->stream->ignore_msa_timing_param = true;
8138 config.state = new_crtc_state->base.vrr_enabled ?
8139 VRR_STATE_ACTIVE_VARIABLE :
8140 VRR_STATE_INACTIVE;
8141 config.min_refresh_in_uhz =
8142 aconnector->min_vfreq * 1000000;
8143 config.max_refresh_in_uhz =
8144 aconnector->max_vfreq * 1000000;
8145 config.vsif_supported = true;
8146 config.btr = true;
8147 }
8148
8149 new_crtc_state->freesync_config = config;
8150 }
8151
reset_freesync_config_for_crtc(struct dm_crtc_state * new_crtc_state)8152 static void reset_freesync_config_for_crtc(
8153 struct dm_crtc_state *new_crtc_state)
8154 {
8155 new_crtc_state->vrr_supported = false;
8156
8157 memset(&new_crtc_state->vrr_infopacket, 0,
8158 sizeof(new_crtc_state->vrr_infopacket));
8159 }
8160
dm_update_crtc_state(struct amdgpu_display_manager * dm,struct drm_atomic_state * state,struct drm_crtc * crtc,struct drm_crtc_state * old_crtc_state,struct drm_crtc_state * new_crtc_state,bool enable,bool * lock_and_validation_needed)8161 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8162 struct drm_atomic_state *state,
8163 struct drm_crtc *crtc,
8164 struct drm_crtc_state *old_crtc_state,
8165 struct drm_crtc_state *new_crtc_state,
8166 bool enable,
8167 bool *lock_and_validation_needed)
8168 {
8169 struct dm_atomic_state *dm_state = NULL;
8170 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8171 struct dc_stream_state *new_stream;
8172 int ret = 0;
8173
8174 /*
8175 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8176 * update changed items
8177 */
8178 struct amdgpu_crtc *acrtc = NULL;
8179 struct amdgpu_dm_connector *aconnector = NULL;
8180 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8181 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8182
8183 new_stream = NULL;
8184
8185 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8186 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8187 acrtc = to_amdgpu_crtc(crtc);
8188 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8189
8190 /* TODO This hack should go away */
8191 if (aconnector && enable) {
8192 /* Make sure fake sink is created in plug-in scenario */
8193 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8194 &aconnector->base);
8195 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8196 &aconnector->base);
8197
8198 if (IS_ERR(drm_new_conn_state)) {
8199 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8200 goto fail;
8201 }
8202
8203 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8204 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8205
8206 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8207 goto skip_modeset;
8208
8209 new_stream = create_validate_stream_for_sink(aconnector,
8210 &new_crtc_state->mode,
8211 dm_new_conn_state,
8212 dm_old_crtc_state->stream);
8213
8214 /*
8215 * we can have no stream on ACTION_SET if a display
8216 * was disconnected during S3, in this case it is not an
8217 * error, the OS will be updated after detection, and
8218 * will do the right thing on next atomic commit
8219 */
8220
8221 if (!new_stream) {
8222 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8223 __func__, acrtc->base.base.id);
8224 ret = -ENOMEM;
8225 goto fail;
8226 }
8227
8228 /*
8229 * TODO: Check VSDB bits to decide whether this should
8230 * be enabled or not.
8231 */
8232 new_stream->triggered_crtc_reset.enabled =
8233 dm->force_timing_sync;
8234
8235 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8236
8237 ret = fill_hdr_info_packet(drm_new_conn_state,
8238 &new_stream->hdr_static_metadata);
8239 if (ret)
8240 goto fail;
8241
8242 /*
8243 * If we already removed the old stream from the context
8244 * (and set the new stream to NULL) then we can't reuse
8245 * the old stream even if the stream and scaling are unchanged.
8246 * We'll hit the BUG_ON and black screen.
8247 *
8248 * TODO: Refactor this function to allow this check to work
8249 * in all conditions.
8250 */
8251 if (dm_new_crtc_state->stream &&
8252 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8253 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8254 new_crtc_state->mode_changed = false;
8255 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8256 new_crtc_state->mode_changed);
8257 }
8258 }
8259
8260 /* mode_changed flag may get updated above, need to check again */
8261 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8262 goto skip_modeset;
8263
8264 DRM_DEBUG_DRIVER(
8265 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8266 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8267 "connectors_changed:%d\n",
8268 acrtc->crtc_id,
8269 new_crtc_state->enable,
8270 new_crtc_state->active,
8271 new_crtc_state->planes_changed,
8272 new_crtc_state->mode_changed,
8273 new_crtc_state->active_changed,
8274 new_crtc_state->connectors_changed);
8275
8276 /* Remove stream for any changed/disabled CRTC */
8277 if (!enable) {
8278
8279 if (!dm_old_crtc_state->stream)
8280 goto skip_modeset;
8281
8282 ret = dm_atomic_get_state(state, &dm_state);
8283 if (ret)
8284 goto fail;
8285
8286 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8287 crtc->base.id);
8288
8289 /* i.e. reset mode */
8290 if (dc_remove_stream_from_ctx(
8291 dm->dc,
8292 dm_state->context,
8293 dm_old_crtc_state->stream) != DC_OK) {
8294 ret = -EINVAL;
8295 goto fail;
8296 }
8297
8298 dc_stream_release(dm_old_crtc_state->stream);
8299 dm_new_crtc_state->stream = NULL;
8300
8301 reset_freesync_config_for_crtc(dm_new_crtc_state);
8302
8303 *lock_and_validation_needed = true;
8304
8305 } else {/* Add stream for any updated/enabled CRTC */
8306 /*
8307 * Quick fix to prevent NULL pointer on new_stream when
8308 * added MST connectors not found in existing crtc_state in the chained mode
8309 * TODO: need to dig out the root cause of that
8310 */
8311 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8312 goto skip_modeset;
8313
8314 if (modereset_required(new_crtc_state))
8315 goto skip_modeset;
8316
8317 if (modeset_required(new_crtc_state, new_stream,
8318 dm_old_crtc_state->stream)) {
8319
8320 WARN_ON(dm_new_crtc_state->stream);
8321
8322 ret = dm_atomic_get_state(state, &dm_state);
8323 if (ret)
8324 goto fail;
8325
8326 dm_new_crtc_state->stream = new_stream;
8327
8328 dc_stream_retain(new_stream);
8329
8330 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8331 crtc->base.id);
8332
8333 if (dc_add_stream_to_ctx(
8334 dm->dc,
8335 dm_state->context,
8336 dm_new_crtc_state->stream) != DC_OK) {
8337 ret = -EINVAL;
8338 goto fail;
8339 }
8340
8341 *lock_and_validation_needed = true;
8342 }
8343 }
8344
8345 skip_modeset:
8346 /* Release extra reference */
8347 if (new_stream)
8348 dc_stream_release(new_stream);
8349
8350 /*
8351 * We want to do dc stream updates that do not require a
8352 * full modeset below.
8353 */
8354 if (!(enable && aconnector && new_crtc_state->active))
8355 return 0;
8356 /*
8357 * Given above conditions, the dc state cannot be NULL because:
8358 * 1. We're in the process of enabling CRTCs (just been added
8359 * to the dc context, or already is on the context)
8360 * 2. Has a valid connector attached, and
8361 * 3. Is currently active and enabled.
8362 * => The dc stream state currently exists.
8363 */
8364 BUG_ON(dm_new_crtc_state->stream == NULL);
8365
8366 /* Scaling or underscan settings */
8367 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
8368 drm_atomic_crtc_needs_modeset(new_crtc_state))
8369 update_stream_scaling_settings(
8370 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8371
8372 /* ABM settings */
8373 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8374
8375 /*
8376 * Color management settings. We also update color properties
8377 * when a modeset is needed, to ensure it gets reprogrammed.
8378 */
8379 if (dm_new_crtc_state->base.color_mgmt_changed ||
8380 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8381 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8382 if (ret)
8383 goto fail;
8384 }
8385
8386 /* Update Freesync settings. */
8387 get_freesync_config_for_crtc(dm_new_crtc_state,
8388 dm_new_conn_state);
8389
8390 return ret;
8391
8392 fail:
8393 if (new_stream)
8394 dc_stream_release(new_stream);
8395 return ret;
8396 }
8397
should_reset_plane(struct drm_atomic_state * state,struct drm_plane * plane,struct drm_plane_state * old_plane_state,struct drm_plane_state * new_plane_state)8398 static bool should_reset_plane(struct drm_atomic_state *state,
8399 struct drm_plane *plane,
8400 struct drm_plane_state *old_plane_state,
8401 struct drm_plane_state *new_plane_state)
8402 {
8403 struct drm_plane *other;
8404 struct drm_plane_state *old_other_state, *new_other_state;
8405 struct drm_crtc_state *new_crtc_state;
8406 int i;
8407
8408 /*
8409 * TODO: Remove this hack once the checks below are sufficient
8410 * enough to determine when we need to reset all the planes on
8411 * the stream.
8412 */
8413 if (state->allow_modeset)
8414 return true;
8415
8416 /* Exit early if we know that we're adding or removing the plane. */
8417 if (old_plane_state->crtc != new_plane_state->crtc)
8418 return true;
8419
8420 /* old crtc == new_crtc == NULL, plane not in context. */
8421 if (!new_plane_state->crtc)
8422 return false;
8423
8424 new_crtc_state =
8425 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8426
8427 if (!new_crtc_state)
8428 return true;
8429
8430 /* CRTC Degamma changes currently require us to recreate planes. */
8431 if (new_crtc_state->color_mgmt_changed)
8432 return true;
8433
8434 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8435 return true;
8436
8437 /*
8438 * If there are any new primary or overlay planes being added or
8439 * removed then the z-order can potentially change. To ensure
8440 * correct z-order and pipe acquisition the current DC architecture
8441 * requires us to remove and recreate all existing planes.
8442 *
8443 * TODO: Come up with a more elegant solution for this.
8444 */
8445 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8446 struct dm_plane_state *old_dm_plane_state, *new_dm_plane_state;
8447
8448 if (other->type == DRM_PLANE_TYPE_CURSOR)
8449 continue;
8450
8451 if (old_other_state->crtc != new_plane_state->crtc &&
8452 new_other_state->crtc != new_plane_state->crtc)
8453 continue;
8454
8455 if (old_other_state->crtc != new_other_state->crtc)
8456 return true;
8457
8458 /* Src/dst size and scaling updates. */
8459 if (old_other_state->src_w != new_other_state->src_w ||
8460 old_other_state->src_h != new_other_state->src_h ||
8461 old_other_state->crtc_w != new_other_state->crtc_w ||
8462 old_other_state->crtc_h != new_other_state->crtc_h)
8463 return true;
8464
8465 /* Rotation / mirroring updates. */
8466 if (old_other_state->rotation != new_other_state->rotation)
8467 return true;
8468
8469 /* Blending updates. */
8470 if (old_other_state->pixel_blend_mode !=
8471 new_other_state->pixel_blend_mode)
8472 return true;
8473
8474 /* Alpha updates. */
8475 if (old_other_state->alpha != new_other_state->alpha)
8476 return true;
8477
8478 /* Colorspace changes. */
8479 if (old_other_state->color_range != new_other_state->color_range ||
8480 old_other_state->color_encoding != new_other_state->color_encoding)
8481 return true;
8482
8483 /* Framebuffer checks fall at the end. */
8484 if (!old_other_state->fb || !new_other_state->fb)
8485 continue;
8486
8487 /* Pixel format changes can require bandwidth updates. */
8488 if (old_other_state->fb->format != new_other_state->fb->format)
8489 return true;
8490
8491 old_dm_plane_state = to_dm_plane_state(old_other_state);
8492 new_dm_plane_state = to_dm_plane_state(new_other_state);
8493
8494 /* Tiling and DCC changes also require bandwidth updates. */
8495 if (old_dm_plane_state->tiling_flags !=
8496 new_dm_plane_state->tiling_flags)
8497 return true;
8498 }
8499
8500 return false;
8501 }
8502
dm_update_plane_state(struct dc * dc,struct drm_atomic_state * state,struct drm_plane * plane,struct drm_plane_state * old_plane_state,struct drm_plane_state * new_plane_state,bool enable,bool * lock_and_validation_needed)8503 static int dm_update_plane_state(struct dc *dc,
8504 struct drm_atomic_state *state,
8505 struct drm_plane *plane,
8506 struct drm_plane_state *old_plane_state,
8507 struct drm_plane_state *new_plane_state,
8508 bool enable,
8509 bool *lock_and_validation_needed)
8510 {
8511
8512 struct dm_atomic_state *dm_state = NULL;
8513 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8514 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8515 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8516 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8517 struct amdgpu_crtc *new_acrtc;
8518 bool needs_reset;
8519 int ret = 0;
8520
8521
8522 new_plane_crtc = new_plane_state->crtc;
8523 old_plane_crtc = old_plane_state->crtc;
8524 dm_new_plane_state = to_dm_plane_state(new_plane_state);
8525 dm_old_plane_state = to_dm_plane_state(old_plane_state);
8526
8527 /*TODO Implement better atomic check for cursor plane */
8528 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8529 if (!enable || !new_plane_crtc ||
8530 drm_atomic_plane_disabling(plane->state, new_plane_state))
8531 return 0;
8532
8533 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8534
8535 if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8536 (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8537 DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8538 new_plane_state->crtc_w, new_plane_state->crtc_h);
8539 return -EINVAL;
8540 }
8541
8542 return 0;
8543 }
8544
8545 needs_reset = should_reset_plane(state, plane, old_plane_state,
8546 new_plane_state);
8547
8548 /* Remove any changed/removed planes */
8549 if (!enable) {
8550 if (!needs_reset)
8551 return 0;
8552
8553 if (!old_plane_crtc)
8554 return 0;
8555
8556 old_crtc_state = drm_atomic_get_old_crtc_state(
8557 state, old_plane_crtc);
8558 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8559
8560 if (!dm_old_crtc_state->stream)
8561 return 0;
8562
8563 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8564 plane->base.id, old_plane_crtc->base.id);
8565
8566 ret = dm_atomic_get_state(state, &dm_state);
8567 if (ret)
8568 return ret;
8569
8570 if (!dc_remove_plane_from_context(
8571 dc,
8572 dm_old_crtc_state->stream,
8573 dm_old_plane_state->dc_state,
8574 dm_state->context)) {
8575
8576 return -EINVAL;
8577 }
8578
8579 if (dm_old_plane_state->dc_state)
8580 dc_plane_state_release(dm_old_plane_state->dc_state);
8581
8582 dm_new_plane_state->dc_state = NULL;
8583
8584 *lock_and_validation_needed = true;
8585
8586 } else { /* Add new planes */
8587 struct dc_plane_state *dc_new_plane_state;
8588
8589 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8590 return 0;
8591
8592 if (!new_plane_crtc)
8593 return 0;
8594
8595 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8596 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8597
8598 if (!dm_new_crtc_state->stream)
8599 return 0;
8600
8601 if (!needs_reset)
8602 return 0;
8603
8604 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8605 if (ret)
8606 return ret;
8607
8608 WARN_ON(dm_new_plane_state->dc_state);
8609
8610 dc_new_plane_state = dc_create_plane_state(dc);
8611 if (!dc_new_plane_state)
8612 return -ENOMEM;
8613
8614 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8615 plane->base.id, new_plane_crtc->base.id);
8616
8617 ret = fill_dc_plane_attributes(
8618 drm_to_adev(new_plane_crtc->dev),
8619 dc_new_plane_state,
8620 new_plane_state,
8621 new_crtc_state);
8622 if (ret) {
8623 dc_plane_state_release(dc_new_plane_state);
8624 return ret;
8625 }
8626
8627 ret = dm_atomic_get_state(state, &dm_state);
8628 if (ret) {
8629 dc_plane_state_release(dc_new_plane_state);
8630 return ret;
8631 }
8632
8633 /*
8634 * Any atomic check errors that occur after this will
8635 * not need a release. The plane state will be attached
8636 * to the stream, and therefore part of the atomic
8637 * state. It'll be released when the atomic state is
8638 * cleaned.
8639 */
8640 if (!dc_add_plane_to_context(
8641 dc,
8642 dm_new_crtc_state->stream,
8643 dc_new_plane_state,
8644 dm_state->context)) {
8645
8646 dc_plane_state_release(dc_new_plane_state);
8647 return -EINVAL;
8648 }
8649
8650 dm_new_plane_state->dc_state = dc_new_plane_state;
8651
8652 /* Tell DC to do a full surface update every time there
8653 * is a plane change. Inefficient, but works for now.
8654 */
8655 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8656
8657 *lock_and_validation_needed = true;
8658 }
8659
8660
8661 return ret;
8662 }
8663
8664 #if defined(CONFIG_DRM_AMD_DC_DCN)
add_affected_mst_dsc_crtcs(struct drm_atomic_state * state,struct drm_crtc * crtc)8665 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8666 {
8667 struct drm_connector *connector;
8668 struct drm_connector_state *conn_state, *old_conn_state;
8669 struct amdgpu_dm_connector *aconnector = NULL;
8670 int i;
8671 for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
8672 if (!conn_state->crtc)
8673 conn_state = old_conn_state;
8674
8675 if (conn_state->crtc != crtc)
8676 continue;
8677
8678 aconnector = to_amdgpu_dm_connector(connector);
8679 if (!aconnector->port || !aconnector->mst_port)
8680 aconnector = NULL;
8681 else
8682 break;
8683 }
8684
8685 if (!aconnector)
8686 return 0;
8687
8688 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8689 }
8690 #endif
8691
validate_overlay(struct drm_atomic_state * state)8692 static int validate_overlay(struct drm_atomic_state *state)
8693 {
8694 int i;
8695 struct drm_plane *plane;
8696 struct drm_plane_state *old_plane_state, *new_plane_state;
8697 struct drm_plane_state *primary_state, *overlay_state = NULL;
8698
8699 /* Check if primary plane is contained inside overlay */
8700 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8701 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
8702 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8703 return 0;
8704
8705 overlay_state = new_plane_state;
8706 continue;
8707 }
8708 }
8709
8710 /* check if we're making changes to the overlay plane */
8711 if (!overlay_state)
8712 return 0;
8713
8714 /* check if overlay plane is enabled */
8715 if (!overlay_state->crtc)
8716 return 0;
8717
8718 /* find the primary plane for the CRTC that the overlay is enabled on */
8719 primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
8720 if (IS_ERR(primary_state))
8721 return PTR_ERR(primary_state);
8722
8723 /* check if primary plane is enabled */
8724 if (!primary_state->crtc)
8725 return 0;
8726
8727 /* Perform the bounds check to ensure the overlay plane covers the primary */
8728 if (primary_state->crtc_x < overlay_state->crtc_x ||
8729 primary_state->crtc_y < overlay_state->crtc_y ||
8730 primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
8731 primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
8732 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
8733 return -EINVAL;
8734 }
8735
8736 return 0;
8737 }
8738
8739 /**
8740 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8741 * @dev: The DRM device
8742 * @state: The atomic state to commit
8743 *
8744 * Validate that the given atomic state is programmable by DC into hardware.
8745 * This involves constructing a &struct dc_state reflecting the new hardware
8746 * state we wish to commit, then querying DC to see if it is programmable. It's
8747 * important not to modify the existing DC state. Otherwise, atomic_check
8748 * may unexpectedly commit hardware changes.
8749 *
8750 * When validating the DC state, it's important that the right locks are
8751 * acquired. For full updates case which removes/adds/updates streams on one
8752 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8753 * that any such full update commit will wait for completion of any outstanding
8754 * flip using DRMs synchronization events.
8755 *
8756 * Note that DM adds the affected connectors for all CRTCs in state, when that
8757 * might not seem necessary. This is because DC stream creation requires the
8758 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8759 * be possible but non-trivial - a possible TODO item.
8760 *
8761 * Return: -Error code if validation failed.
8762 */
amdgpu_dm_atomic_check(struct drm_device * dev,struct drm_atomic_state * state)8763 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8764 struct drm_atomic_state *state)
8765 {
8766 struct amdgpu_device *adev = drm_to_adev(dev);
8767 struct dm_atomic_state *dm_state = NULL;
8768 struct dc *dc = adev->dm.dc;
8769 struct drm_connector *connector;
8770 struct drm_connector_state *old_con_state, *new_con_state;
8771 struct drm_crtc *crtc;
8772 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8773 struct drm_plane *plane;
8774 struct drm_plane_state *old_plane_state, *new_plane_state;
8775 enum dc_status status;
8776 int ret, i;
8777 bool lock_and_validation_needed = false;
8778
8779 amdgpu_check_debugfs_connector_property_change(adev, state);
8780
8781 ret = drm_atomic_helper_check_modeset(dev, state);
8782 if (ret)
8783 goto fail;
8784
8785 /* Check connector changes */
8786 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8787 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8788 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8789
8790 /* Skip connectors that are disabled or part of modeset already. */
8791 if (!old_con_state->crtc && !new_con_state->crtc)
8792 continue;
8793
8794 if (!new_con_state->crtc)
8795 continue;
8796
8797 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8798 if (IS_ERR(new_crtc_state)) {
8799 ret = PTR_ERR(new_crtc_state);
8800 goto fail;
8801 }
8802
8803 if (dm_old_con_state->abm_level != dm_new_con_state->abm_level ||
8804 dm_old_con_state->scaling != dm_new_con_state->scaling)
8805 new_crtc_state->connectors_changed = true;
8806 }
8807
8808 #if defined(CONFIG_DRM_AMD_DC_DCN)
8809 if (dc_resource_is_dsc_encoding_supported(dc)) {
8810 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8811 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8812 ret = add_affected_mst_dsc_crtcs(state, crtc);
8813 if (ret)
8814 goto fail;
8815 }
8816 }
8817 }
8818 #endif
8819 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8820 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8821 !new_crtc_state->color_mgmt_changed &&
8822 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8823 continue;
8824
8825 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
8826 if (ret)
8827 goto fail;
8828
8829 if (!new_crtc_state->enable)
8830 continue;
8831
8832 ret = drm_atomic_add_affected_connectors(state, crtc);
8833 if (ret)
8834 return ret;
8835
8836 ret = drm_atomic_add_affected_planes(state, crtc);
8837 if (ret)
8838 goto fail;
8839 }
8840
8841 /*
8842 * Add all primary and overlay planes on the CRTC to the state
8843 * whenever a plane is enabled to maintain correct z-ordering
8844 * and to enable fast surface updates.
8845 */
8846 drm_for_each_crtc(crtc, dev) {
8847 bool modified = false;
8848
8849 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8850 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8851 continue;
8852
8853 if (new_plane_state->crtc == crtc ||
8854 old_plane_state->crtc == crtc) {
8855 modified = true;
8856 break;
8857 }
8858 }
8859
8860 if (!modified)
8861 continue;
8862
8863 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8864 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8865 continue;
8866
8867 new_plane_state =
8868 drm_atomic_get_plane_state(state, plane);
8869
8870 if (IS_ERR(new_plane_state)) {
8871 ret = PTR_ERR(new_plane_state);
8872 goto fail;
8873 }
8874 }
8875 }
8876
8877 /* Prepass for updating tiling flags on new planes. */
8878 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
8879 struct dm_plane_state *new_dm_plane_state = to_dm_plane_state(new_plane_state);
8880 struct amdgpu_framebuffer *new_afb = to_amdgpu_framebuffer(new_plane_state->fb);
8881
8882 ret = get_fb_info(new_afb, &new_dm_plane_state->tiling_flags,
8883 &new_dm_plane_state->tmz_surface);
8884 if (ret)
8885 goto fail;
8886 }
8887
8888 /* Remove exiting planes if they are modified */
8889 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8890 ret = dm_update_plane_state(dc, state, plane,
8891 old_plane_state,
8892 new_plane_state,
8893 false,
8894 &lock_and_validation_needed);
8895 if (ret)
8896 goto fail;
8897 }
8898
8899 /* Disable all crtcs which require disable */
8900 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8901 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8902 old_crtc_state,
8903 new_crtc_state,
8904 false,
8905 &lock_and_validation_needed);
8906 if (ret)
8907 goto fail;
8908 }
8909
8910 /* Enable all crtcs which require enable */
8911 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8912 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8913 old_crtc_state,
8914 new_crtc_state,
8915 true,
8916 &lock_and_validation_needed);
8917 if (ret)
8918 goto fail;
8919 }
8920
8921 ret = validate_overlay(state);
8922 if (ret)
8923 goto fail;
8924
8925 /* Add new/modified planes */
8926 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8927 ret = dm_update_plane_state(dc, state, plane,
8928 old_plane_state,
8929 new_plane_state,
8930 true,
8931 &lock_and_validation_needed);
8932 if (ret)
8933 goto fail;
8934 }
8935
8936 /* Run this here since we want to validate the streams we created */
8937 ret = drm_atomic_helper_check_planes(dev, state);
8938 if (ret)
8939 goto fail;
8940
8941 if (state->legacy_cursor_update) {
8942 /*
8943 * This is a fast cursor update coming from the plane update
8944 * helper, check if it can be done asynchronously for better
8945 * performance.
8946 */
8947 state->async_update =
8948 !drm_atomic_helper_async_check(dev, state);
8949
8950 /*
8951 * Skip the remaining global validation if this is an async
8952 * update. Cursor updates can be done without affecting
8953 * state or bandwidth calcs and this avoids the performance
8954 * penalty of locking the private state object and
8955 * allocating a new dc_state.
8956 */
8957 if (state->async_update)
8958 return 0;
8959 }
8960
8961 /* Check scaling and underscan changes*/
8962 /* TODO Removed scaling changes validation due to inability to commit
8963 * new stream into context w\o causing full reset. Need to
8964 * decide how to handle.
8965 */
8966 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8967 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8968 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8969 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8970
8971 /* Skip any modesets/resets */
8972 if (!acrtc || drm_atomic_crtc_needs_modeset(
8973 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8974 continue;
8975
8976 /* Skip any thing not scale or underscan changes */
8977 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8978 continue;
8979
8980 lock_and_validation_needed = true;
8981 }
8982
8983 /**
8984 * Streams and planes are reset when there are changes that affect
8985 * bandwidth. Anything that affects bandwidth needs to go through
8986 * DC global validation to ensure that the configuration can be applied
8987 * to hardware.
8988 *
8989 * We have to currently stall out here in atomic_check for outstanding
8990 * commits to finish in this case because our IRQ handlers reference
8991 * DRM state directly - we can end up disabling interrupts too early
8992 * if we don't.
8993 *
8994 * TODO: Remove this stall and drop DM state private objects.
8995 */
8996 if (lock_and_validation_needed) {
8997 ret = dm_atomic_get_state(state, &dm_state);
8998 if (ret)
8999 goto fail;
9000
9001 ret = do_aquire_global_lock(dev, state);
9002 if (ret)
9003 goto fail;
9004
9005 #if defined(CONFIG_DRM_AMD_DC_DCN)
9006 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9007 goto fail;
9008
9009 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9010 if (ret)
9011 goto fail;
9012 #endif
9013
9014 /*
9015 * Perform validation of MST topology in the state:
9016 * We need to perform MST atomic check before calling
9017 * dc_validate_global_state(), or there is a chance
9018 * to get stuck in an infinite loop and hang eventually.
9019 */
9020 ret = drm_dp_mst_atomic_check(state);
9021 if (ret)
9022 goto fail;
9023 status = dc_validate_global_state(dc, dm_state->context, false);
9024 if (status != DC_OK) {
9025 drm_dbg_atomic(dev,
9026 "DC global validation failure: %s (%d)",
9027 dc_status_to_str(status), status);
9028 ret = -EINVAL;
9029 goto fail;
9030 }
9031 } else {
9032 /*
9033 * The commit is a fast update. Fast updates shouldn't change
9034 * the DC context, affect global validation, and can have their
9035 * commit work done in parallel with other commits not touching
9036 * the same resource. If we have a new DC context as part of
9037 * the DM atomic state from validation we need to free it and
9038 * retain the existing one instead.
9039 *
9040 * Furthermore, since the DM atomic state only contains the DC
9041 * context and can safely be annulled, we can free the state
9042 * and clear the associated private object now to free
9043 * some memory and avoid a possible use-after-free later.
9044 */
9045
9046 for (i = 0; i < state->num_private_objs; i++) {
9047 struct drm_private_obj *obj = state->private_objs[i].ptr;
9048
9049 if (obj->funcs == adev->dm.atomic_obj.funcs) {
9050 int j = state->num_private_objs-1;
9051
9052 dm_atomic_destroy_state(obj,
9053 state->private_objs[i].state);
9054
9055 /* If i is not at the end of the array then the
9056 * last element needs to be moved to where i was
9057 * before the array can safely be truncated.
9058 */
9059 if (i != j)
9060 state->private_objs[i] =
9061 state->private_objs[j];
9062
9063 state->private_objs[j].ptr = NULL;
9064 state->private_objs[j].state = NULL;
9065 state->private_objs[j].old_state = NULL;
9066 state->private_objs[j].new_state = NULL;
9067
9068 state->num_private_objs = j;
9069 break;
9070 }
9071 }
9072 }
9073
9074 /* Store the overall update type for use later in atomic check. */
9075 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9076 struct dm_crtc_state *dm_new_crtc_state =
9077 to_dm_crtc_state(new_crtc_state);
9078
9079 dm_new_crtc_state->update_type = lock_and_validation_needed ?
9080 UPDATE_TYPE_FULL :
9081 UPDATE_TYPE_FAST;
9082 }
9083
9084 /* Must be success */
9085 WARN_ON(ret);
9086 return ret;
9087
9088 fail:
9089 if (ret == -EDEADLK)
9090 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9091 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9092 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9093 else
9094 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9095
9096 return ret;
9097 }
9098
is_dp_capable_without_timing_msa(struct dc * dc,struct amdgpu_dm_connector * amdgpu_dm_connector)9099 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9100 struct amdgpu_dm_connector *amdgpu_dm_connector)
9101 {
9102 uint8_t dpcd_data;
9103 bool capable = false;
9104
9105 if (amdgpu_dm_connector->dc_link &&
9106 dm_helpers_dp_read_dpcd(
9107 NULL,
9108 amdgpu_dm_connector->dc_link,
9109 DP_DOWN_STREAM_PORT_COUNT,
9110 &dpcd_data,
9111 sizeof(dpcd_data))) {
9112 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9113 }
9114
9115 return capable;
9116 }
amdgpu_dm_update_freesync_caps(struct drm_connector * connector,struct edid * edid)9117 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9118 struct edid *edid)
9119 {
9120 int i;
9121 bool edid_check_required;
9122 struct detailed_timing *timing;
9123 struct detailed_non_pixel *data;
9124 struct detailed_data_monitor_range *range;
9125 struct amdgpu_dm_connector *amdgpu_dm_connector =
9126 to_amdgpu_dm_connector(connector);
9127 struct dm_connector_state *dm_con_state = NULL;
9128
9129 struct drm_device *dev = connector->dev;
9130 struct amdgpu_device *adev = drm_to_adev(dev);
9131 bool freesync_capable = false;
9132
9133 if (!connector->state) {
9134 DRM_ERROR("%s - Connector has no state", __func__);
9135 goto update;
9136 }
9137
9138 if (!edid) {
9139 dm_con_state = to_dm_connector_state(connector->state);
9140
9141 amdgpu_dm_connector->min_vfreq = 0;
9142 amdgpu_dm_connector->max_vfreq = 0;
9143 amdgpu_dm_connector->pixel_clock_mhz = 0;
9144
9145 goto update;
9146 }
9147
9148 dm_con_state = to_dm_connector_state(connector->state);
9149
9150 edid_check_required = false;
9151 if (!amdgpu_dm_connector->dc_sink) {
9152 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9153 goto update;
9154 }
9155 if (!adev->dm.freesync_module)
9156 goto update;
9157 /*
9158 * if edid non zero restrict freesync only for dp and edp
9159 */
9160 if (edid) {
9161 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9162 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9163 edid_check_required = is_dp_capable_without_timing_msa(
9164 adev->dm.dc,
9165 amdgpu_dm_connector);
9166 }
9167 }
9168 if (edid_check_required == true && (edid->version > 1 ||
9169 (edid->version == 1 && edid->revision > 1))) {
9170 for (i = 0; i < 4; i++) {
9171
9172 timing = &edid->detailed_timings[i];
9173 data = &timing->data.other_data;
9174 range = &data->data.range;
9175 /*
9176 * Check if monitor has continuous frequency mode
9177 */
9178 if (data->type != EDID_DETAIL_MONITOR_RANGE)
9179 continue;
9180 /*
9181 * Check for flag range limits only. If flag == 1 then
9182 * no additional timing information provided.
9183 * Default GTF, GTF Secondary curve and CVT are not
9184 * supported
9185 */
9186 if (range->flags != 1)
9187 continue;
9188
9189 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9190 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9191 amdgpu_dm_connector->pixel_clock_mhz =
9192 range->pixel_clock_mhz * 10;
9193 break;
9194 }
9195
9196 if (amdgpu_dm_connector->max_vfreq -
9197 amdgpu_dm_connector->min_vfreq > 10) {
9198
9199 freesync_capable = true;
9200 }
9201 }
9202
9203 update:
9204 if (dm_con_state)
9205 dm_con_state->freesync_capable = freesync_capable;
9206
9207 if (connector->vrr_capable_property)
9208 drm_connector_set_vrr_capable_property(connector,
9209 freesync_capable);
9210 }
9211
amdgpu_dm_set_psr_caps(struct dc_link * link)9212 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9213 {
9214 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9215
9216 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9217 return;
9218 if (link->type == dc_connection_none)
9219 return;
9220 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9221 dpcd_data, sizeof(dpcd_data))) {
9222 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9223
9224 if (dpcd_data[0] == 0) {
9225 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9226 link->psr_settings.psr_feature_enabled = false;
9227 } else {
9228 link->psr_settings.psr_version = DC_PSR_VERSION_1;
9229 link->psr_settings.psr_feature_enabled = true;
9230 }
9231
9232 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9233 }
9234 }
9235
9236 /*
9237 * amdgpu_dm_link_setup_psr() - configure psr link
9238 * @stream: stream state
9239 *
9240 * Return: true if success
9241 */
amdgpu_dm_link_setup_psr(struct dc_stream_state * stream)9242 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9243 {
9244 struct dc_link *link = NULL;
9245 struct psr_config psr_config = {0};
9246 struct psr_context psr_context = {0};
9247 bool ret = false;
9248
9249 if (stream == NULL)
9250 return false;
9251
9252 link = stream->link;
9253
9254 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9255
9256 if (psr_config.psr_version > 0) {
9257 psr_config.psr_exit_link_training_required = 0x1;
9258 psr_config.psr_frame_capture_indication_req = 0;
9259 psr_config.psr_rfb_setup_time = 0x37;
9260 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9261 psr_config.allow_smu_optimizations = 0x0;
9262
9263 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9264
9265 }
9266 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
9267
9268 return ret;
9269 }
9270
9271 /*
9272 * amdgpu_dm_psr_enable() - enable psr f/w
9273 * @stream: stream state
9274 *
9275 * Return: true if success
9276 */
amdgpu_dm_psr_enable(struct dc_stream_state * stream)9277 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9278 {
9279 struct dc_link *link = stream->link;
9280 unsigned int vsync_rate_hz = 0;
9281 struct dc_static_screen_params params = {0};
9282 /* Calculate number of static frames before generating interrupt to
9283 * enter PSR.
9284 */
9285 // Init fail safe of 2 frames static
9286 unsigned int num_frames_static = 2;
9287
9288 DRM_DEBUG_DRIVER("Enabling psr...\n");
9289
9290 vsync_rate_hz = div64_u64(div64_u64((
9291 stream->timing.pix_clk_100hz * 100),
9292 stream->timing.v_total),
9293 stream->timing.h_total);
9294
9295 /* Round up
9296 * Calculate number of frames such that at least 30 ms of time has
9297 * passed.
9298 */
9299 if (vsync_rate_hz != 0) {
9300 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9301 num_frames_static = (30000 / frame_time_microsec) + 1;
9302 }
9303
9304 params.triggers.cursor_update = true;
9305 params.triggers.overlay_update = true;
9306 params.triggers.surface_update = true;
9307 params.num_frames = num_frames_static;
9308
9309 dc_stream_set_static_screen_params(link->ctx->dc,
9310 &stream, 1,
9311 ¶ms);
9312
9313 return dc_link_set_psr_allow_active(link, true, false);
9314 }
9315
9316 /*
9317 * amdgpu_dm_psr_disable() - disable psr f/w
9318 * @stream: stream state
9319 *
9320 * Return: true if success
9321 */
amdgpu_dm_psr_disable(struct dc_stream_state * stream)9322 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9323 {
9324
9325 DRM_DEBUG_DRIVER("Disabling psr...\n");
9326
9327 return dc_link_set_psr_allow_active(stream->link, false, true);
9328 }
9329
9330 /*
9331 * amdgpu_dm_psr_disable() - disable psr f/w
9332 * if psr is enabled on any stream
9333 *
9334 * Return: true if success
9335 */
amdgpu_dm_psr_disable_all(struct amdgpu_display_manager * dm)9336 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9337 {
9338 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9339 return dc_set_psr_allow_active(dm->dc, false);
9340 }
9341
amdgpu_dm_trigger_timing_sync(struct drm_device * dev)9342 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9343 {
9344 struct amdgpu_device *adev = drm_to_adev(dev);
9345 struct dc *dc = adev->dm.dc;
9346 int i;
9347
9348 mutex_lock(&adev->dm.dc_lock);
9349 if (dc->current_state) {
9350 for (i = 0; i < dc->current_state->stream_count; ++i)
9351 dc->current_state->streams[i]
9352 ->triggered_crtc_reset.enabled =
9353 adev->dm.force_timing_sync;
9354
9355 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9356 dc_trigger_sync(dc, dc->current_state);
9357 }
9358 mutex_unlock(&adev->dm.dc_lock);
9359 }
9360