1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57
58 #include "ivsrcid/ivsrcid_vislands30.h"
59
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87
88 #include "soc15_common.h"
89 #endif
90
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #endif
103 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
105
106 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
107 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
108
109 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
110 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
111
112 /* Number of bytes in PSP header for firmware. */
113 #define PSP_HEADER_BYTES 0x100
114
115 /* Number of bytes in PSP footer for firmware. */
116 #define PSP_FOOTER_BYTES 0x100
117
118 /**
119 * DOC: overview
120 *
121 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
122 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
123 * requests into DC requests, and DC responses into DRM responses.
124 *
125 * The root control structure is &struct amdgpu_display_manager.
126 */
127
128 /* basic init/fini API */
129 static int amdgpu_dm_init(struct amdgpu_device *adev);
130 static void amdgpu_dm_fini(struct amdgpu_device *adev);
131
get_subconnector_type(struct dc_link * link)132 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
133 {
134 switch (link->dpcd_caps.dongle_type) {
135 case DISPLAY_DONGLE_NONE:
136 return DRM_MODE_SUBCONNECTOR_Native;
137 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
138 return DRM_MODE_SUBCONNECTOR_VGA;
139 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
140 case DISPLAY_DONGLE_DP_DVI_DONGLE:
141 return DRM_MODE_SUBCONNECTOR_DVID;
142 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
143 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
144 return DRM_MODE_SUBCONNECTOR_HDMIA;
145 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
146 default:
147 return DRM_MODE_SUBCONNECTOR_Unknown;
148 }
149 }
150
update_subconnector_property(struct amdgpu_dm_connector * aconnector)151 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
152 {
153 struct dc_link *link = aconnector->dc_link;
154 struct drm_connector *connector = &aconnector->base;
155 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
156
157 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
158 return;
159
160 if (aconnector->dc_sink)
161 subconnector = get_subconnector_type(link);
162
163 drm_object_property_set_value(&connector->base,
164 connector->dev->mode_config.dp_subconnector_property,
165 subconnector);
166 }
167
168 /*
169 * initializes drm_device display related structures, based on the information
170 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
171 * drm_encoder, drm_mode_config
172 *
173 * Returns 0 on success
174 */
175 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
176 /* removes and deallocates the drm structures, created by the above function */
177 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
178
179 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
180 struct drm_plane *plane,
181 unsigned long possible_crtcs,
182 const struct dc_plane_cap *plane_cap);
183 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
184 struct drm_plane *plane,
185 uint32_t link_index);
186 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
187 struct amdgpu_dm_connector *amdgpu_dm_connector,
188 uint32_t link_index,
189 struct amdgpu_encoder *amdgpu_encoder);
190 static int amdgpu_dm_encoder_init(struct drm_device *dev,
191 struct amdgpu_encoder *aencoder,
192 uint32_t link_index);
193
194 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
195
196 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
197 struct drm_atomic_state *state,
198 bool nonblock);
199
200 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
201
202 static int amdgpu_dm_atomic_check(struct drm_device *dev,
203 struct drm_atomic_state *state);
204
205 static void handle_cursor_update(struct drm_plane *plane,
206 struct drm_plane_state *old_plane_state);
207
208 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
209 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
211 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
212 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
213
214 /*
215 * dm_vblank_get_counter
216 *
217 * @brief
218 * Get counter for number of vertical blanks
219 *
220 * @param
221 * struct amdgpu_device *adev - [in] desired amdgpu device
222 * int disp_idx - [in] which CRTC to get the counter from
223 *
224 * @return
225 * Counter for vertical blanks
226 */
dm_vblank_get_counter(struct amdgpu_device * adev,int crtc)227 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
228 {
229 if (crtc >= adev->mode_info.num_crtc)
230 return 0;
231 else {
232 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
233
234 if (acrtc->dm_irq_params.stream == NULL) {
235 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
236 crtc);
237 return 0;
238 }
239
240 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
241 }
242 }
243
dm_crtc_get_scanoutpos(struct amdgpu_device * adev,int crtc,u32 * vbl,u32 * position)244 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
245 u32 *vbl, u32 *position)
246 {
247 uint32_t v_blank_start, v_blank_end, h_position, v_position;
248
249 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
250 return -EINVAL;
251 else {
252 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
253
254 if (acrtc->dm_irq_params.stream == NULL) {
255 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
256 crtc);
257 return 0;
258 }
259
260 /*
261 * TODO rework base driver to use values directly.
262 * for now parse it back into reg-format
263 */
264 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
265 &v_blank_start,
266 &v_blank_end,
267 &h_position,
268 &v_position);
269
270 *position = v_position | (h_position << 16);
271 *vbl = v_blank_start | (v_blank_end << 16);
272 }
273
274 return 0;
275 }
276
dm_is_idle(void * handle)277 static bool dm_is_idle(void *handle)
278 {
279 /* XXX todo */
280 return true;
281 }
282
dm_wait_for_idle(void * handle)283 static int dm_wait_for_idle(void *handle)
284 {
285 /* XXX todo */
286 return 0;
287 }
288
dm_check_soft_reset(void * handle)289 static bool dm_check_soft_reset(void *handle)
290 {
291 return false;
292 }
293
dm_soft_reset(void * handle)294 static int dm_soft_reset(void *handle)
295 {
296 /* XXX todo */
297 return 0;
298 }
299
300 static struct amdgpu_crtc *
get_crtc_by_otg_inst(struct amdgpu_device * adev,int otg_inst)301 get_crtc_by_otg_inst(struct amdgpu_device *adev,
302 int otg_inst)
303 {
304 struct drm_device *dev = adev_to_drm(adev);
305 struct drm_crtc *crtc;
306 struct amdgpu_crtc *amdgpu_crtc;
307
308 if (otg_inst == -1) {
309 WARN_ON(1);
310 return adev->mode_info.crtcs[0];
311 }
312
313 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
314 amdgpu_crtc = to_amdgpu_crtc(crtc);
315
316 if (amdgpu_crtc->otg_inst == otg_inst)
317 return amdgpu_crtc;
318 }
319
320 return NULL;
321 }
322
amdgpu_dm_vrr_active_irq(struct amdgpu_crtc * acrtc)323 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
324 {
325 return acrtc->dm_irq_params.freesync_config.state ==
326 VRR_STATE_ACTIVE_VARIABLE ||
327 acrtc->dm_irq_params.freesync_config.state ==
328 VRR_STATE_ACTIVE_FIXED;
329 }
330
amdgpu_dm_vrr_active(struct dm_crtc_state * dm_state)331 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
332 {
333 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
334 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
335 }
336
337 /**
338 * dm_pflip_high_irq() - Handle pageflip interrupt
339 * @interrupt_params: ignored
340 *
341 * Handles the pageflip interrupt by notifying all interested parties
342 * that the pageflip has been completed.
343 */
dm_pflip_high_irq(void * interrupt_params)344 static void dm_pflip_high_irq(void *interrupt_params)
345 {
346 struct amdgpu_crtc *amdgpu_crtc;
347 struct common_irq_params *irq_params = interrupt_params;
348 struct amdgpu_device *adev = irq_params->adev;
349 unsigned long flags;
350 struct drm_pending_vblank_event *e;
351 uint32_t vpos, hpos, v_blank_start, v_blank_end;
352 bool vrr_active;
353
354 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
355
356 /* IRQ could occur when in initial stage */
357 /* TODO work and BO cleanup */
358 if (amdgpu_crtc == NULL) {
359 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
360 return;
361 }
362
363 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
364
365 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
366 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
367 amdgpu_crtc->pflip_status,
368 AMDGPU_FLIP_SUBMITTED,
369 amdgpu_crtc->crtc_id,
370 amdgpu_crtc);
371 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
372 return;
373 }
374
375 /* page flip completed. */
376 e = amdgpu_crtc->event;
377 amdgpu_crtc->event = NULL;
378
379 if (!e)
380 WARN_ON(1);
381
382 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
383
384 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
385 if (!vrr_active ||
386 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
387 &v_blank_end, &hpos, &vpos) ||
388 (vpos < v_blank_start)) {
389 /* Update to correct count and vblank timestamp if racing with
390 * vblank irq. This also updates to the correct vblank timestamp
391 * even in VRR mode, as scanout is past the front-porch atm.
392 */
393 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
394
395 /* Wake up userspace by sending the pageflip event with proper
396 * count and timestamp of vblank of flip completion.
397 */
398 if (e) {
399 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
400
401 /* Event sent, so done with vblank for this flip */
402 drm_crtc_vblank_put(&amdgpu_crtc->base);
403 }
404 } else if (e) {
405 /* VRR active and inside front-porch: vblank count and
406 * timestamp for pageflip event will only be up to date after
407 * drm_crtc_handle_vblank() has been executed from late vblank
408 * irq handler after start of back-porch (vline 0). We queue the
409 * pageflip event for send-out by drm_crtc_handle_vblank() with
410 * updated timestamp and count, once it runs after us.
411 *
412 * We need to open-code this instead of using the helper
413 * drm_crtc_arm_vblank_event(), as that helper would
414 * call drm_crtc_accurate_vblank_count(), which we must
415 * not call in VRR mode while we are in front-porch!
416 */
417
418 /* sequence will be replaced by real count during send-out. */
419 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
420 e->pipe = amdgpu_crtc->crtc_id;
421
422 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
423 e = NULL;
424 }
425
426 /* Keep track of vblank of this flip for flip throttling. We use the
427 * cooked hw counter, as that one incremented at start of this vblank
428 * of pageflip completion, so last_flip_vblank is the forbidden count
429 * for queueing new pageflips if vsync + VRR is enabled.
430 */
431 amdgpu_crtc->dm_irq_params.last_flip_vblank =
432 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
433
434 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
435 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
436
437 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
438 amdgpu_crtc->crtc_id, amdgpu_crtc,
439 vrr_active, (int) !e);
440 }
441
dm_vupdate_high_irq(void * interrupt_params)442 static void dm_vupdate_high_irq(void *interrupt_params)
443 {
444 struct common_irq_params *irq_params = interrupt_params;
445 struct amdgpu_device *adev = irq_params->adev;
446 struct amdgpu_crtc *acrtc;
447 unsigned long flags;
448 int vrr_active;
449
450 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
451
452 if (acrtc) {
453 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
454
455 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
456 acrtc->crtc_id,
457 vrr_active);
458
459 /* Core vblank handling is done here after end of front-porch in
460 * vrr mode, as vblank timestamping will give valid results
461 * while now done after front-porch. This will also deliver
462 * page-flip completion events that have been queued to us
463 * if a pageflip happened inside front-porch.
464 */
465 if (vrr_active) {
466 drm_crtc_handle_vblank(&acrtc->base);
467
468 /* BTR processing for pre-DCE12 ASICs */
469 if (acrtc->dm_irq_params.stream &&
470 adev->family < AMDGPU_FAMILY_AI) {
471 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
472 mod_freesync_handle_v_update(
473 adev->dm.freesync_module,
474 acrtc->dm_irq_params.stream,
475 &acrtc->dm_irq_params.vrr_params);
476
477 dc_stream_adjust_vmin_vmax(
478 adev->dm.dc,
479 acrtc->dm_irq_params.stream,
480 &acrtc->dm_irq_params.vrr_params.adjust);
481 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
482 }
483 }
484 }
485 }
486
487 /**
488 * dm_crtc_high_irq() - Handles CRTC interrupt
489 * @interrupt_params: used for determining the CRTC instance
490 *
491 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
492 * event handler.
493 */
dm_crtc_high_irq(void * interrupt_params)494 static void dm_crtc_high_irq(void *interrupt_params)
495 {
496 struct common_irq_params *irq_params = interrupt_params;
497 struct amdgpu_device *adev = irq_params->adev;
498 struct amdgpu_crtc *acrtc;
499 unsigned long flags;
500 int vrr_active;
501
502 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
503 if (!acrtc)
504 return;
505
506 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
507
508 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
509 vrr_active, acrtc->dm_irq_params.active_planes);
510
511 /**
512 * Core vblank handling at start of front-porch is only possible
513 * in non-vrr mode, as only there vblank timestamping will give
514 * valid results while done in front-porch. Otherwise defer it
515 * to dm_vupdate_high_irq after end of front-porch.
516 */
517 if (!vrr_active)
518 drm_crtc_handle_vblank(&acrtc->base);
519
520 /**
521 * Following stuff must happen at start of vblank, for crc
522 * computation and below-the-range btr support in vrr mode.
523 */
524 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
525
526 /* BTR updates need to happen before VUPDATE on Vega and above. */
527 if (adev->family < AMDGPU_FAMILY_AI)
528 return;
529
530 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
531
532 if (acrtc->dm_irq_params.stream &&
533 acrtc->dm_irq_params.vrr_params.supported &&
534 acrtc->dm_irq_params.freesync_config.state ==
535 VRR_STATE_ACTIVE_VARIABLE) {
536 mod_freesync_handle_v_update(adev->dm.freesync_module,
537 acrtc->dm_irq_params.stream,
538 &acrtc->dm_irq_params.vrr_params);
539
540 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
541 &acrtc->dm_irq_params.vrr_params.adjust);
542 }
543
544 /*
545 * If there aren't any active_planes then DCH HUBP may be clock-gated.
546 * In that case, pageflip completion interrupts won't fire and pageflip
547 * completion events won't get delivered. Prevent this by sending
548 * pending pageflip events from here if a flip is still pending.
549 *
550 * If any planes are enabled, use dm_pflip_high_irq() instead, to
551 * avoid race conditions between flip programming and completion,
552 * which could cause too early flip completion events.
553 */
554 if (adev->family >= AMDGPU_FAMILY_RV &&
555 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
556 acrtc->dm_irq_params.active_planes == 0) {
557 if (acrtc->event) {
558 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
559 acrtc->event = NULL;
560 drm_crtc_vblank_put(&acrtc->base);
561 }
562 acrtc->pflip_status = AMDGPU_FLIP_NONE;
563 }
564
565 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
566 }
567
dm_set_clockgating_state(void * handle,enum amd_clockgating_state state)568 static int dm_set_clockgating_state(void *handle,
569 enum amd_clockgating_state state)
570 {
571 return 0;
572 }
573
dm_set_powergating_state(void * handle,enum amd_powergating_state state)574 static int dm_set_powergating_state(void *handle,
575 enum amd_powergating_state state)
576 {
577 return 0;
578 }
579
580 /* Prototypes of private functions */
581 static int dm_early_init(void* handle);
582
583 /* Allocate memory for FBC compressed data */
amdgpu_dm_fbc_init(struct drm_connector * connector)584 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
585 {
586 struct drm_device *dev = connector->dev;
587 struct amdgpu_device *adev = drm_to_adev(dev);
588 struct dm_compressor_info *compressor = &adev->dm.compressor;
589 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
590 struct drm_display_mode *mode;
591 unsigned long max_size = 0;
592
593 if (adev->dm.dc->fbc_compressor == NULL)
594 return;
595
596 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
597 return;
598
599 if (compressor->bo_ptr)
600 return;
601
602
603 list_for_each_entry(mode, &connector->modes, head) {
604 if (max_size < mode->htotal * mode->vtotal)
605 max_size = mode->htotal * mode->vtotal;
606 }
607
608 if (max_size) {
609 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
610 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
611 &compressor->gpu_addr, &compressor->cpu_addr);
612
613 if (r)
614 DRM_ERROR("DM: Failed to initialize FBC\n");
615 else {
616 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
617 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
618 }
619
620 }
621
622 }
623
amdgpu_dm_audio_component_get_eld(struct device * kdev,int port,int pipe,bool * enabled,unsigned char * buf,int max_bytes)624 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
625 int pipe, bool *enabled,
626 unsigned char *buf, int max_bytes)
627 {
628 struct drm_device *dev = dev_get_drvdata(kdev);
629 struct amdgpu_device *adev = drm_to_adev(dev);
630 struct drm_connector *connector;
631 struct drm_connector_list_iter conn_iter;
632 struct amdgpu_dm_connector *aconnector;
633 int ret = 0;
634
635 *enabled = false;
636
637 mutex_lock(&adev->dm.audio_lock);
638
639 drm_connector_list_iter_begin(dev, &conn_iter);
640 drm_for_each_connector_iter(connector, &conn_iter) {
641 aconnector = to_amdgpu_dm_connector(connector);
642 if (aconnector->audio_inst != port)
643 continue;
644
645 *enabled = true;
646 ret = drm_eld_size(connector->eld);
647 memcpy(buf, connector->eld, min(max_bytes, ret));
648
649 break;
650 }
651 drm_connector_list_iter_end(&conn_iter);
652
653 mutex_unlock(&adev->dm.audio_lock);
654
655 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
656
657 return ret;
658 }
659
660 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
661 .get_eld = amdgpu_dm_audio_component_get_eld,
662 };
663
amdgpu_dm_audio_component_bind(struct device * kdev,struct device * hda_kdev,void * data)664 static int amdgpu_dm_audio_component_bind(struct device *kdev,
665 struct device *hda_kdev, void *data)
666 {
667 struct drm_device *dev = dev_get_drvdata(kdev);
668 struct amdgpu_device *adev = drm_to_adev(dev);
669 struct drm_audio_component *acomp = data;
670
671 acomp->ops = &amdgpu_dm_audio_component_ops;
672 acomp->dev = kdev;
673 adev->dm.audio_component = acomp;
674
675 return 0;
676 }
677
amdgpu_dm_audio_component_unbind(struct device * kdev,struct device * hda_kdev,void * data)678 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
679 struct device *hda_kdev, void *data)
680 {
681 struct drm_device *dev = dev_get_drvdata(kdev);
682 struct amdgpu_device *adev = drm_to_adev(dev);
683 struct drm_audio_component *acomp = data;
684
685 acomp->ops = NULL;
686 acomp->dev = NULL;
687 adev->dm.audio_component = NULL;
688 }
689
690 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
691 .bind = amdgpu_dm_audio_component_bind,
692 .unbind = amdgpu_dm_audio_component_unbind,
693 };
694
amdgpu_dm_audio_init(struct amdgpu_device * adev)695 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
696 {
697 int i, ret;
698
699 if (!amdgpu_audio)
700 return 0;
701
702 adev->mode_info.audio.enabled = true;
703
704 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
705
706 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
707 adev->mode_info.audio.pin[i].channels = -1;
708 adev->mode_info.audio.pin[i].rate = -1;
709 adev->mode_info.audio.pin[i].bits_per_sample = -1;
710 adev->mode_info.audio.pin[i].status_bits = 0;
711 adev->mode_info.audio.pin[i].category_code = 0;
712 adev->mode_info.audio.pin[i].connected = false;
713 adev->mode_info.audio.pin[i].id =
714 adev->dm.dc->res_pool->audios[i]->inst;
715 adev->mode_info.audio.pin[i].offset = 0;
716 }
717
718 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
719 if (ret < 0)
720 return ret;
721
722 adev->dm.audio_registered = true;
723
724 return 0;
725 }
726
amdgpu_dm_audio_fini(struct amdgpu_device * adev)727 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
728 {
729 if (!amdgpu_audio)
730 return;
731
732 if (!adev->mode_info.audio.enabled)
733 return;
734
735 if (adev->dm.audio_registered) {
736 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
737 adev->dm.audio_registered = false;
738 }
739
740 /* TODO: Disable audio? */
741
742 adev->mode_info.audio.enabled = false;
743 }
744
amdgpu_dm_audio_eld_notify(struct amdgpu_device * adev,int pin)745 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
746 {
747 struct drm_audio_component *acomp = adev->dm.audio_component;
748
749 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
750 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
751
752 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
753 pin, -1);
754 }
755 }
756
dm_dmub_hw_init(struct amdgpu_device * adev)757 static int dm_dmub_hw_init(struct amdgpu_device *adev)
758 {
759 const struct dmcub_firmware_header_v1_0 *hdr;
760 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
761 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
762 const struct firmware *dmub_fw = adev->dm.dmub_fw;
763 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
764 struct abm *abm = adev->dm.dc->res_pool->abm;
765 struct dmub_srv_hw_params hw_params;
766 enum dmub_status status;
767 const unsigned char *fw_inst_const, *fw_bss_data;
768 uint32_t i, fw_inst_const_size, fw_bss_data_size;
769 bool has_hw_support;
770
771 if (!dmub_srv)
772 /* DMUB isn't supported on the ASIC. */
773 return 0;
774
775 if (!fb_info) {
776 DRM_ERROR("No framebuffer info for DMUB service.\n");
777 return -EINVAL;
778 }
779
780 if (!dmub_fw) {
781 /* Firmware required for DMUB support. */
782 DRM_ERROR("No firmware provided for DMUB.\n");
783 return -EINVAL;
784 }
785
786 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
787 if (status != DMUB_STATUS_OK) {
788 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
789 return -EINVAL;
790 }
791
792 if (!has_hw_support) {
793 DRM_INFO("DMUB unsupported on ASIC\n");
794 return 0;
795 }
796
797 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
798
799 fw_inst_const = dmub_fw->data +
800 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
801 PSP_HEADER_BYTES;
802
803 fw_bss_data = dmub_fw->data +
804 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
805 le32_to_cpu(hdr->inst_const_bytes);
806
807 /* Copy firmware and bios info into FB memory. */
808 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
809 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
810
811 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
812
813 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
814 * amdgpu_ucode_init_single_fw will load dmub firmware
815 * fw_inst_const part to cw0; otherwise, the firmware back door load
816 * will be done by dm_dmub_hw_init
817 */
818 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
819 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
820 fw_inst_const_size);
821 }
822
823 if (fw_bss_data_size)
824 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
825 fw_bss_data, fw_bss_data_size);
826
827 /* Copy firmware bios info into FB memory. */
828 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
829 adev->bios_size);
830
831 /* Reset regions that need to be reset. */
832 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
833 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
834
835 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
836 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
837
838 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
839 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
840
841 /* Initialize hardware. */
842 memset(&hw_params, 0, sizeof(hw_params));
843 hw_params.fb_base = adev->gmc.fb_start;
844 hw_params.fb_offset = adev->gmc.aper_base;
845
846 /* backdoor load firmware and trigger dmub running */
847 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
848 hw_params.load_inst_const = true;
849
850 if (dmcu)
851 hw_params.psp_version = dmcu->psp_version;
852
853 for (i = 0; i < fb_info->num_fb; ++i)
854 hw_params.fb[i] = &fb_info->fb[i];
855
856 status = dmub_srv_hw_init(dmub_srv, &hw_params);
857 if (status != DMUB_STATUS_OK) {
858 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
859 return -EINVAL;
860 }
861
862 /* Wait for firmware load to finish. */
863 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
864 if (status != DMUB_STATUS_OK)
865 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
866
867 /* Init DMCU and ABM if available. */
868 if (dmcu && abm) {
869 dmcu->funcs->dmcu_init(dmcu);
870 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
871 }
872
873 if (!adev->dm.dc->ctx->dmub_srv)
874 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
875 if (!adev->dm.dc->ctx->dmub_srv) {
876 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
877 return -ENOMEM;
878 }
879
880 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
881 adev->dm.dmcub_fw_version);
882
883 return 0;
884 }
885
amdgpu_check_debugfs_connector_property_change(struct amdgpu_device * adev,struct drm_atomic_state * state)886 static void amdgpu_check_debugfs_connector_property_change(struct amdgpu_device *adev,
887 struct drm_atomic_state *state)
888 {
889 struct drm_connector *connector;
890 struct drm_crtc *crtc;
891 struct amdgpu_dm_connector *amdgpu_dm_connector;
892 struct drm_connector_state *conn_state;
893 struct dm_crtc_state *acrtc_state;
894 struct drm_crtc_state *crtc_state;
895 struct dc_stream_state *stream;
896 struct drm_device *dev = adev_to_drm(adev);
897
898 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
899
900 amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
901 conn_state = connector->state;
902
903 if (!(conn_state && conn_state->crtc))
904 continue;
905
906 crtc = conn_state->crtc;
907 acrtc_state = to_dm_crtc_state(crtc->state);
908
909 if (!(acrtc_state && acrtc_state->stream))
910 continue;
911
912 stream = acrtc_state->stream;
913
914 if (amdgpu_dm_connector->dsc_settings.dsc_force_enable ||
915 amdgpu_dm_connector->dsc_settings.dsc_num_slices_v ||
916 amdgpu_dm_connector->dsc_settings.dsc_num_slices_h ||
917 amdgpu_dm_connector->dsc_settings.dsc_bits_per_pixel) {
918 conn_state = drm_atomic_get_connector_state(state, connector);
919 crtc_state = drm_atomic_get_crtc_state(state, crtc);
920 crtc_state->mode_changed = true;
921 }
922 }
923 }
924
amdgpu_dm_init(struct amdgpu_device * adev)925 static int amdgpu_dm_init(struct amdgpu_device *adev)
926 {
927 struct dc_init_data init_data;
928 #ifdef CONFIG_DRM_AMD_DC_HDCP
929 struct dc_callback_init init_params;
930 #endif
931 int r;
932
933 adev->dm.ddev = adev_to_drm(adev);
934 adev->dm.adev = adev;
935
936 /* Zero all the fields */
937 memset(&init_data, 0, sizeof(init_data));
938 #ifdef CONFIG_DRM_AMD_DC_HDCP
939 memset(&init_params, 0, sizeof(init_params));
940 #endif
941
942 mutex_init(&adev->dm.dc_lock);
943 mutex_init(&adev->dm.audio_lock);
944
945 if(amdgpu_dm_irq_init(adev)) {
946 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
947 goto error;
948 }
949
950 init_data.asic_id.chip_family = adev->family;
951
952 init_data.asic_id.pci_revision_id = adev->pdev->revision;
953 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
954 init_data.asic_id.chip_id = adev->pdev->device;
955
956 init_data.asic_id.vram_width = adev->gmc.vram_width;
957 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
958 init_data.asic_id.atombios_base_address =
959 adev->mode_info.atom_context->bios;
960
961 init_data.driver = adev;
962
963 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
964
965 if (!adev->dm.cgs_device) {
966 DRM_ERROR("amdgpu: failed to create cgs device.\n");
967 goto error;
968 }
969
970 init_data.cgs_device = adev->dm.cgs_device;
971
972 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
973
974 switch (adev->asic_type) {
975 case CHIP_CARRIZO:
976 case CHIP_STONEY:
977 case CHIP_RAVEN:
978 case CHIP_RENOIR:
979 init_data.flags.gpu_vm_support = true;
980 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
981 init_data.flags.disable_dmcu = true;
982 break;
983 default:
984 break;
985 }
986
987 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
988 init_data.flags.fbc_support = true;
989
990 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
991 init_data.flags.multi_mon_pp_mclk_switch = true;
992
993 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
994 init_data.flags.disable_fractional_pwm = true;
995
996 init_data.flags.power_down_display_on_boot = true;
997
998 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
999
1000 /* Display Core create. */
1001 adev->dm.dc = dc_create(&init_data);
1002
1003 if (adev->dm.dc) {
1004 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1005 } else {
1006 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1007 goto error;
1008 }
1009
1010 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1011 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1012 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1013 }
1014
1015 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1016 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1017
1018 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1019 adev->dm.dc->debug.disable_stutter = true;
1020
1021 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1022 adev->dm.dc->debug.disable_dsc = true;
1023
1024 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1025 adev->dm.dc->debug.disable_clock_gate = true;
1026
1027 r = dm_dmub_hw_init(adev);
1028 if (r) {
1029 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1030 goto error;
1031 }
1032
1033 dc_hardware_init(adev->dm.dc);
1034
1035 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1036 if (!adev->dm.freesync_module) {
1037 DRM_ERROR(
1038 "amdgpu: failed to initialize freesync_module.\n");
1039 } else
1040 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1041 adev->dm.freesync_module);
1042
1043 amdgpu_dm_init_color_mod();
1044
1045 #ifdef CONFIG_DRM_AMD_DC_HDCP
1046 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1047 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1048
1049 if (!adev->dm.hdcp_workqueue)
1050 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1051 else
1052 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1053
1054 dc_init_callbacks(adev->dm.dc, &init_params);
1055 }
1056 #endif
1057 if (amdgpu_dm_initialize_drm_device(adev)) {
1058 DRM_ERROR(
1059 "amdgpu: failed to initialize sw for display support.\n");
1060 goto error;
1061 }
1062
1063 /* create fake encoders for MST */
1064 dm_dp_create_fake_mst_encoders(adev);
1065
1066 /* TODO: Add_display_info? */
1067
1068 /* TODO use dynamic cursor width */
1069 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1070 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1071
1072 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1073 DRM_ERROR(
1074 "amdgpu: failed to initialize sw for display support.\n");
1075 goto error;
1076 }
1077
1078 DRM_DEBUG_DRIVER("KMS initialized.\n");
1079
1080 return 0;
1081 error:
1082 amdgpu_dm_fini(adev);
1083
1084 return -EINVAL;
1085 }
1086
amdgpu_dm_fini(struct amdgpu_device * adev)1087 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1088 {
1089 int i;
1090
1091 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1092 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1093 }
1094
1095 amdgpu_dm_audio_fini(adev);
1096
1097 amdgpu_dm_destroy_drm_device(&adev->dm);
1098
1099 #ifdef CONFIG_DRM_AMD_DC_HDCP
1100 if (adev->dm.hdcp_workqueue) {
1101 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1102 adev->dm.hdcp_workqueue = NULL;
1103 }
1104
1105 if (adev->dm.dc)
1106 dc_deinit_callbacks(adev->dm.dc);
1107 #endif
1108 if (adev->dm.dc->ctx->dmub_srv) {
1109 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1110 adev->dm.dc->ctx->dmub_srv = NULL;
1111 }
1112
1113 if (adev->dm.dmub_bo)
1114 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1115 &adev->dm.dmub_bo_gpu_addr,
1116 &adev->dm.dmub_bo_cpu_addr);
1117
1118 /* DC Destroy TODO: Replace destroy DAL */
1119 if (adev->dm.dc)
1120 dc_destroy(&adev->dm.dc);
1121 /*
1122 * TODO: pageflip, vlank interrupt
1123 *
1124 * amdgpu_dm_irq_fini(adev);
1125 */
1126
1127 if (adev->dm.cgs_device) {
1128 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1129 adev->dm.cgs_device = NULL;
1130 }
1131 if (adev->dm.freesync_module) {
1132 mod_freesync_destroy(adev->dm.freesync_module);
1133 adev->dm.freesync_module = NULL;
1134 }
1135
1136 mutex_destroy(&adev->dm.audio_lock);
1137 mutex_destroy(&adev->dm.dc_lock);
1138
1139 return;
1140 }
1141
load_dmcu_fw(struct amdgpu_device * adev)1142 static int load_dmcu_fw(struct amdgpu_device *adev)
1143 {
1144 const char *fw_name_dmcu = NULL;
1145 int r;
1146 const struct dmcu_firmware_header_v1_0 *hdr;
1147
1148 switch(adev->asic_type) {
1149 #if defined(CONFIG_DRM_AMD_DC_SI)
1150 case CHIP_TAHITI:
1151 case CHIP_PITCAIRN:
1152 case CHIP_VERDE:
1153 case CHIP_OLAND:
1154 #endif
1155 case CHIP_BONAIRE:
1156 case CHIP_HAWAII:
1157 case CHIP_KAVERI:
1158 case CHIP_KABINI:
1159 case CHIP_MULLINS:
1160 case CHIP_TONGA:
1161 case CHIP_FIJI:
1162 case CHIP_CARRIZO:
1163 case CHIP_STONEY:
1164 case CHIP_POLARIS11:
1165 case CHIP_POLARIS10:
1166 case CHIP_POLARIS12:
1167 case CHIP_VEGAM:
1168 case CHIP_VEGA10:
1169 case CHIP_VEGA12:
1170 case CHIP_VEGA20:
1171 case CHIP_NAVI10:
1172 case CHIP_NAVI14:
1173 case CHIP_RENOIR:
1174 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1175 case CHIP_SIENNA_CICHLID:
1176 case CHIP_NAVY_FLOUNDER:
1177 #endif
1178 return 0;
1179 case CHIP_NAVI12:
1180 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1181 break;
1182 case CHIP_RAVEN:
1183 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1184 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1185 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1186 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1187 else
1188 return 0;
1189 break;
1190 default:
1191 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1192 return -EINVAL;
1193 }
1194
1195 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1196 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1197 return 0;
1198 }
1199
1200 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1201 if (r == -ENOENT) {
1202 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1203 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1204 adev->dm.fw_dmcu = NULL;
1205 return 0;
1206 }
1207 if (r) {
1208 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1209 fw_name_dmcu);
1210 return r;
1211 }
1212
1213 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1214 if (r) {
1215 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1216 fw_name_dmcu);
1217 release_firmware(adev->dm.fw_dmcu);
1218 adev->dm.fw_dmcu = NULL;
1219 return r;
1220 }
1221
1222 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1223 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1224 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1225 adev->firmware.fw_size +=
1226 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1227
1228 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1229 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1230 adev->firmware.fw_size +=
1231 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1232
1233 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1234
1235 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1236
1237 return 0;
1238 }
1239
amdgpu_dm_dmub_reg_read(void * ctx,uint32_t address)1240 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1241 {
1242 struct amdgpu_device *adev = ctx;
1243
1244 return dm_read_reg(adev->dm.dc->ctx, address);
1245 }
1246
amdgpu_dm_dmub_reg_write(void * ctx,uint32_t address,uint32_t value)1247 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1248 uint32_t value)
1249 {
1250 struct amdgpu_device *adev = ctx;
1251
1252 return dm_write_reg(adev->dm.dc->ctx, address, value);
1253 }
1254
dm_dmub_sw_init(struct amdgpu_device * adev)1255 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1256 {
1257 struct dmub_srv_create_params create_params;
1258 struct dmub_srv_region_params region_params;
1259 struct dmub_srv_region_info region_info;
1260 struct dmub_srv_fb_params fb_params;
1261 struct dmub_srv_fb_info *fb_info;
1262 struct dmub_srv *dmub_srv;
1263 const struct dmcub_firmware_header_v1_0 *hdr;
1264 const char *fw_name_dmub;
1265 enum dmub_asic dmub_asic;
1266 enum dmub_status status;
1267 int r;
1268
1269 switch (adev->asic_type) {
1270 case CHIP_RENOIR:
1271 dmub_asic = DMUB_ASIC_DCN21;
1272 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1273 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1274 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1275 break;
1276 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1277 case CHIP_SIENNA_CICHLID:
1278 dmub_asic = DMUB_ASIC_DCN30;
1279 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1280 break;
1281 case CHIP_NAVY_FLOUNDER:
1282 dmub_asic = DMUB_ASIC_DCN30;
1283 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1284 break;
1285 #endif
1286
1287 default:
1288 /* ASIC doesn't support DMUB. */
1289 return 0;
1290 }
1291
1292 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1293 if (r) {
1294 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1295 return 0;
1296 }
1297
1298 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1299 if (r) {
1300 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1301 return 0;
1302 }
1303
1304 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1305 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1306
1307 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1308 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1309 AMDGPU_UCODE_ID_DMCUB;
1310 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1311 adev->dm.dmub_fw;
1312 adev->firmware.fw_size +=
1313 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1314
1315 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1316 adev->dm.dmcub_fw_version);
1317 }
1318
1319
1320 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1321 dmub_srv = adev->dm.dmub_srv;
1322
1323 if (!dmub_srv) {
1324 DRM_ERROR("Failed to allocate DMUB service!\n");
1325 return -ENOMEM;
1326 }
1327
1328 memset(&create_params, 0, sizeof(create_params));
1329 create_params.user_ctx = adev;
1330 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1331 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1332 create_params.asic = dmub_asic;
1333
1334 /* Create the DMUB service. */
1335 status = dmub_srv_create(dmub_srv, &create_params);
1336 if (status != DMUB_STATUS_OK) {
1337 DRM_ERROR("Error creating DMUB service: %d\n", status);
1338 return -EINVAL;
1339 }
1340
1341 /* Calculate the size of all the regions for the DMUB service. */
1342 memset(®ion_params, 0, sizeof(region_params));
1343
1344 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1345 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1346 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1347 region_params.vbios_size = adev->bios_size;
1348 region_params.fw_bss_data = region_params.bss_data_size ?
1349 adev->dm.dmub_fw->data +
1350 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1351 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1352 region_params.fw_inst_const =
1353 adev->dm.dmub_fw->data +
1354 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1355 PSP_HEADER_BYTES;
1356
1357 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1358 ®ion_info);
1359
1360 if (status != DMUB_STATUS_OK) {
1361 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1362 return -EINVAL;
1363 }
1364
1365 /*
1366 * Allocate a framebuffer based on the total size of all the regions.
1367 * TODO: Move this into GART.
1368 */
1369 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1370 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1371 &adev->dm.dmub_bo_gpu_addr,
1372 &adev->dm.dmub_bo_cpu_addr);
1373 if (r)
1374 return r;
1375
1376 /* Rebase the regions on the framebuffer address. */
1377 memset(&fb_params, 0, sizeof(fb_params));
1378 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1379 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1380 fb_params.region_info = ®ion_info;
1381
1382 adev->dm.dmub_fb_info =
1383 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1384 fb_info = adev->dm.dmub_fb_info;
1385
1386 if (!fb_info) {
1387 DRM_ERROR(
1388 "Failed to allocate framebuffer info for DMUB service!\n");
1389 return -ENOMEM;
1390 }
1391
1392 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1393 if (status != DMUB_STATUS_OK) {
1394 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1395 return -EINVAL;
1396 }
1397
1398 return 0;
1399 }
1400
dm_sw_init(void * handle)1401 static int dm_sw_init(void *handle)
1402 {
1403 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1404 int r;
1405
1406 r = dm_dmub_sw_init(adev);
1407 if (r)
1408 return r;
1409
1410 return load_dmcu_fw(adev);
1411 }
1412
dm_sw_fini(void * handle)1413 static int dm_sw_fini(void *handle)
1414 {
1415 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1416
1417 kfree(adev->dm.dmub_fb_info);
1418 adev->dm.dmub_fb_info = NULL;
1419
1420 if (adev->dm.dmub_srv) {
1421 dmub_srv_destroy(adev->dm.dmub_srv);
1422 adev->dm.dmub_srv = NULL;
1423 }
1424
1425 release_firmware(adev->dm.dmub_fw);
1426 adev->dm.dmub_fw = NULL;
1427
1428 release_firmware(adev->dm.fw_dmcu);
1429 adev->dm.fw_dmcu = NULL;
1430
1431 return 0;
1432 }
1433
detect_mst_link_for_all_connectors(struct drm_device * dev)1434 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1435 {
1436 struct amdgpu_dm_connector *aconnector;
1437 struct drm_connector *connector;
1438 struct drm_connector_list_iter iter;
1439 int ret = 0;
1440
1441 drm_connector_list_iter_begin(dev, &iter);
1442 drm_for_each_connector_iter(connector, &iter) {
1443 aconnector = to_amdgpu_dm_connector(connector);
1444 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1445 aconnector->mst_mgr.aux) {
1446 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1447 aconnector,
1448 aconnector->base.base.id);
1449
1450 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1451 if (ret < 0) {
1452 DRM_ERROR("DM_MST: Failed to start MST\n");
1453 aconnector->dc_link->type =
1454 dc_connection_single;
1455 break;
1456 }
1457 }
1458 }
1459 drm_connector_list_iter_end(&iter);
1460
1461 return ret;
1462 }
1463
dm_late_init(void * handle)1464 static int dm_late_init(void *handle)
1465 {
1466 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1467
1468 struct dmcu_iram_parameters params;
1469 unsigned int linear_lut[16];
1470 int i;
1471 struct dmcu *dmcu = NULL;
1472 bool ret = true;
1473
1474 dmcu = adev->dm.dc->res_pool->dmcu;
1475
1476 for (i = 0; i < 16; i++)
1477 linear_lut[i] = 0xFFFF * i / 15;
1478
1479 params.set = 0;
1480 params.backlight_ramping_start = 0xCCCC;
1481 params.backlight_ramping_reduction = 0xCCCCCCCC;
1482 params.backlight_lut_array_size = 16;
1483 params.backlight_lut_array = linear_lut;
1484
1485 /* Min backlight level after ABM reduction, Don't allow below 1%
1486 * 0xFFFF x 0.01 = 0x28F
1487 */
1488 params.min_abm_backlight = 0x28F;
1489
1490 /* In the case where abm is implemented on dmcub,
1491 * dmcu object will be null.
1492 * ABM 2.4 and up are implemented on dmcub.
1493 */
1494 if (dmcu)
1495 ret = dmcu_load_iram(dmcu, params);
1496 else if (adev->dm.dc->ctx->dmub_srv)
1497 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1498
1499 if (!ret)
1500 return -EINVAL;
1501
1502 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1503 }
1504
s3_handle_mst(struct drm_device * dev,bool suspend)1505 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1506 {
1507 struct amdgpu_dm_connector *aconnector;
1508 struct drm_connector *connector;
1509 struct drm_connector_list_iter iter;
1510 struct drm_dp_mst_topology_mgr *mgr;
1511 int ret;
1512 bool need_hotplug = false;
1513
1514 drm_connector_list_iter_begin(dev, &iter);
1515 drm_for_each_connector_iter(connector, &iter) {
1516 aconnector = to_amdgpu_dm_connector(connector);
1517 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1518 aconnector->mst_port)
1519 continue;
1520
1521 mgr = &aconnector->mst_mgr;
1522
1523 if (suspend) {
1524 drm_dp_mst_topology_mgr_suspend(mgr);
1525 } else {
1526 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1527 if (ret < 0) {
1528 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1529 need_hotplug = true;
1530 }
1531 }
1532 }
1533 drm_connector_list_iter_end(&iter);
1534
1535 if (need_hotplug)
1536 drm_kms_helper_hotplug_event(dev);
1537 }
1538
amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device * adev)1539 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1540 {
1541 struct smu_context *smu = &adev->smu;
1542 int ret = 0;
1543
1544 if (!is_support_sw_smu(adev))
1545 return 0;
1546
1547 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1548 * on window driver dc implementation.
1549 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1550 * should be passed to smu during boot up and resume from s3.
1551 * boot up: dc calculate dcn watermark clock settings within dc_create,
1552 * dcn20_resource_construct
1553 * then call pplib functions below to pass the settings to smu:
1554 * smu_set_watermarks_for_clock_ranges
1555 * smu_set_watermarks_table
1556 * navi10_set_watermarks_table
1557 * smu_write_watermarks_table
1558 *
1559 * For Renoir, clock settings of dcn watermark are also fixed values.
1560 * dc has implemented different flow for window driver:
1561 * dc_hardware_init / dc_set_power_state
1562 * dcn10_init_hw
1563 * notify_wm_ranges
1564 * set_wm_ranges
1565 * -- Linux
1566 * smu_set_watermarks_for_clock_ranges
1567 * renoir_set_watermarks_table
1568 * smu_write_watermarks_table
1569 *
1570 * For Linux,
1571 * dc_hardware_init -> amdgpu_dm_init
1572 * dc_set_power_state --> dm_resume
1573 *
1574 * therefore, this function apply to navi10/12/14 but not Renoir
1575 * *
1576 */
1577 switch(adev->asic_type) {
1578 case CHIP_NAVI10:
1579 case CHIP_NAVI14:
1580 case CHIP_NAVI12:
1581 break;
1582 default:
1583 return 0;
1584 }
1585
1586 ret = smu_write_watermarks_table(smu);
1587 if (ret) {
1588 DRM_ERROR("Failed to update WMTABLE!\n");
1589 return ret;
1590 }
1591
1592 return 0;
1593 }
1594
1595 /**
1596 * dm_hw_init() - Initialize DC device
1597 * @handle: The base driver device containing the amdgpu_dm device.
1598 *
1599 * Initialize the &struct amdgpu_display_manager device. This involves calling
1600 * the initializers of each DM component, then populating the struct with them.
1601 *
1602 * Although the function implies hardware initialization, both hardware and
1603 * software are initialized here. Splitting them out to their relevant init
1604 * hooks is a future TODO item.
1605 *
1606 * Some notable things that are initialized here:
1607 *
1608 * - Display Core, both software and hardware
1609 * - DC modules that we need (freesync and color management)
1610 * - DRM software states
1611 * - Interrupt sources and handlers
1612 * - Vblank support
1613 * - Debug FS entries, if enabled
1614 */
dm_hw_init(void * handle)1615 static int dm_hw_init(void *handle)
1616 {
1617 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1618 /* Create DAL display manager */
1619 amdgpu_dm_init(adev);
1620 amdgpu_dm_hpd_init(adev);
1621
1622 return 0;
1623 }
1624
1625 /**
1626 * dm_hw_fini() - Teardown DC device
1627 * @handle: The base driver device containing the amdgpu_dm device.
1628 *
1629 * Teardown components within &struct amdgpu_display_manager that require
1630 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1631 * were loaded. Also flush IRQ workqueues and disable them.
1632 */
dm_hw_fini(void * handle)1633 static int dm_hw_fini(void *handle)
1634 {
1635 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1636
1637 amdgpu_dm_hpd_fini(adev);
1638
1639 amdgpu_dm_irq_fini(adev);
1640 amdgpu_dm_fini(adev);
1641 return 0;
1642 }
1643
1644
1645 static int dm_enable_vblank(struct drm_crtc *crtc);
1646 static void dm_disable_vblank(struct drm_crtc *crtc);
1647
dm_gpureset_toggle_interrupts(struct amdgpu_device * adev,struct dc_state * state,bool enable)1648 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1649 struct dc_state *state, bool enable)
1650 {
1651 enum dc_irq_source irq_source;
1652 struct amdgpu_crtc *acrtc;
1653 int rc = -EBUSY;
1654 int i = 0;
1655
1656 for (i = 0; i < state->stream_count; i++) {
1657 acrtc = get_crtc_by_otg_inst(
1658 adev, state->stream_status[i].primary_otg_inst);
1659
1660 if (acrtc && state->stream_status[i].plane_count != 0) {
1661 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1662 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1663 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1664 acrtc->crtc_id, enable ? "en" : "dis", rc);
1665 if (rc)
1666 DRM_WARN("Failed to %s pflip interrupts\n",
1667 enable ? "enable" : "disable");
1668
1669 if (enable) {
1670 rc = dm_enable_vblank(&acrtc->base);
1671 if (rc)
1672 DRM_WARN("Failed to enable vblank interrupts\n");
1673 } else {
1674 dm_disable_vblank(&acrtc->base);
1675 }
1676
1677 }
1678 }
1679
1680 }
1681
amdgpu_dm_commit_zero_streams(struct dc * dc)1682 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1683 {
1684 struct dc_state *context = NULL;
1685 enum dc_status res = DC_ERROR_UNEXPECTED;
1686 int i;
1687 struct dc_stream_state *del_streams[MAX_PIPES];
1688 int del_streams_count = 0;
1689
1690 memset(del_streams, 0, sizeof(del_streams));
1691
1692 context = dc_create_state(dc);
1693 if (context == NULL)
1694 goto context_alloc_fail;
1695
1696 dc_resource_state_copy_construct_current(dc, context);
1697
1698 /* First remove from context all streams */
1699 for (i = 0; i < context->stream_count; i++) {
1700 struct dc_stream_state *stream = context->streams[i];
1701
1702 del_streams[del_streams_count++] = stream;
1703 }
1704
1705 /* Remove all planes for removed streams and then remove the streams */
1706 for (i = 0; i < del_streams_count; i++) {
1707 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1708 res = DC_FAIL_DETACH_SURFACES;
1709 goto fail;
1710 }
1711
1712 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1713 if (res != DC_OK)
1714 goto fail;
1715 }
1716
1717
1718 res = dc_validate_global_state(dc, context, false);
1719
1720 if (res != DC_OK) {
1721 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1722 goto fail;
1723 }
1724
1725 res = dc_commit_state(dc, context);
1726
1727 fail:
1728 dc_release_state(context);
1729
1730 context_alloc_fail:
1731 return res;
1732 }
1733
dm_suspend(void * handle)1734 static int dm_suspend(void *handle)
1735 {
1736 struct amdgpu_device *adev = handle;
1737 struct amdgpu_display_manager *dm = &adev->dm;
1738 int ret = 0;
1739
1740 if (amdgpu_in_reset(adev)) {
1741 mutex_lock(&dm->dc_lock);
1742 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1743
1744 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1745
1746 amdgpu_dm_commit_zero_streams(dm->dc);
1747
1748 amdgpu_dm_irq_suspend(adev);
1749
1750 return ret;
1751 }
1752
1753 WARN_ON(adev->dm.cached_state);
1754 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1755
1756 s3_handle_mst(adev_to_drm(adev), true);
1757
1758 amdgpu_dm_irq_suspend(adev);
1759
1760 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1761
1762 return 0;
1763 }
1764
1765 static struct amdgpu_dm_connector *
amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state * state,struct drm_crtc * crtc)1766 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1767 struct drm_crtc *crtc)
1768 {
1769 uint32_t i;
1770 struct drm_connector_state *new_con_state;
1771 struct drm_connector *connector;
1772 struct drm_crtc *crtc_from_state;
1773
1774 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1775 crtc_from_state = new_con_state->crtc;
1776
1777 if (crtc_from_state == crtc)
1778 return to_amdgpu_dm_connector(connector);
1779 }
1780
1781 return NULL;
1782 }
1783
emulated_link_detect(struct dc_link * link)1784 static void emulated_link_detect(struct dc_link *link)
1785 {
1786 struct dc_sink_init_data sink_init_data = { 0 };
1787 struct display_sink_capability sink_caps = { 0 };
1788 enum dc_edid_status edid_status;
1789 struct dc_context *dc_ctx = link->ctx;
1790 struct dc_sink *sink = NULL;
1791 struct dc_sink *prev_sink = NULL;
1792
1793 link->type = dc_connection_none;
1794 prev_sink = link->local_sink;
1795
1796 if (prev_sink)
1797 dc_sink_release(prev_sink);
1798
1799 switch (link->connector_signal) {
1800 case SIGNAL_TYPE_HDMI_TYPE_A: {
1801 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1802 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1803 break;
1804 }
1805
1806 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1807 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1808 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1809 break;
1810 }
1811
1812 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1813 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1814 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1815 break;
1816 }
1817
1818 case SIGNAL_TYPE_LVDS: {
1819 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1820 sink_caps.signal = SIGNAL_TYPE_LVDS;
1821 break;
1822 }
1823
1824 case SIGNAL_TYPE_EDP: {
1825 sink_caps.transaction_type =
1826 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1827 sink_caps.signal = SIGNAL_TYPE_EDP;
1828 break;
1829 }
1830
1831 case SIGNAL_TYPE_DISPLAY_PORT: {
1832 sink_caps.transaction_type =
1833 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1834 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1835 break;
1836 }
1837
1838 default:
1839 DC_ERROR("Invalid connector type! signal:%d\n",
1840 link->connector_signal);
1841 return;
1842 }
1843
1844 sink_init_data.link = link;
1845 sink_init_data.sink_signal = sink_caps.signal;
1846
1847 sink = dc_sink_create(&sink_init_data);
1848 if (!sink) {
1849 DC_ERROR("Failed to create sink!\n");
1850 return;
1851 }
1852
1853 /* dc_sink_create returns a new reference */
1854 link->local_sink = sink;
1855
1856 edid_status = dm_helpers_read_local_edid(
1857 link->ctx,
1858 link,
1859 sink);
1860
1861 if (edid_status != EDID_OK)
1862 DC_ERROR("Failed to read EDID");
1863
1864 }
1865
dm_gpureset_commit_state(struct dc_state * dc_state,struct amdgpu_display_manager * dm)1866 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1867 struct amdgpu_display_manager *dm)
1868 {
1869 struct {
1870 struct dc_surface_update surface_updates[MAX_SURFACES];
1871 struct dc_plane_info plane_infos[MAX_SURFACES];
1872 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1873 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1874 struct dc_stream_update stream_update;
1875 } * bundle;
1876 int k, m;
1877
1878 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1879
1880 if (!bundle) {
1881 dm_error("Failed to allocate update bundle\n");
1882 goto cleanup;
1883 }
1884
1885 for (k = 0; k < dc_state->stream_count; k++) {
1886 bundle->stream_update.stream = dc_state->streams[k];
1887
1888 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1889 bundle->surface_updates[m].surface =
1890 dc_state->stream_status->plane_states[m];
1891 bundle->surface_updates[m].surface->force_full_update =
1892 true;
1893 }
1894 dc_commit_updates_for_stream(
1895 dm->dc, bundle->surface_updates,
1896 dc_state->stream_status->plane_count,
1897 dc_state->streams[k], &bundle->stream_update, dc_state);
1898 }
1899
1900 cleanup:
1901 kfree(bundle);
1902
1903 return;
1904 }
1905
dm_set_dpms_off(struct dc_link * link)1906 static void dm_set_dpms_off(struct dc_link *link)
1907 {
1908 struct dc_stream_state *stream_state;
1909 struct amdgpu_dm_connector *aconnector = link->priv;
1910 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
1911 struct dc_stream_update stream_update;
1912 bool dpms_off = true;
1913
1914 memset(&stream_update, 0, sizeof(stream_update));
1915 stream_update.dpms_off = &dpms_off;
1916
1917 mutex_lock(&adev->dm.dc_lock);
1918 stream_state = dc_stream_find_from_link(link);
1919
1920 if (stream_state == NULL) {
1921 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
1922 mutex_unlock(&adev->dm.dc_lock);
1923 return;
1924 }
1925
1926 stream_update.stream = stream_state;
1927 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
1928 stream_state, &stream_update,
1929 stream_state->ctx->dc->current_state);
1930 mutex_unlock(&adev->dm.dc_lock);
1931 }
1932
dm_resume(void * handle)1933 static int dm_resume(void *handle)
1934 {
1935 struct amdgpu_device *adev = handle;
1936 struct drm_device *ddev = adev_to_drm(adev);
1937 struct amdgpu_display_manager *dm = &adev->dm;
1938 struct amdgpu_dm_connector *aconnector;
1939 struct drm_connector *connector;
1940 struct drm_connector_list_iter iter;
1941 struct drm_crtc *crtc;
1942 struct drm_crtc_state *new_crtc_state;
1943 struct dm_crtc_state *dm_new_crtc_state;
1944 struct drm_plane *plane;
1945 struct drm_plane_state *new_plane_state;
1946 struct dm_plane_state *dm_new_plane_state;
1947 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1948 enum dc_connection_type new_connection_type = dc_connection_none;
1949 struct dc_state *dc_state;
1950 int i, r, j;
1951
1952 if (amdgpu_in_reset(adev)) {
1953 dc_state = dm->cached_dc_state;
1954
1955 r = dm_dmub_hw_init(adev);
1956 if (r)
1957 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1958
1959 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1960 dc_resume(dm->dc);
1961
1962 amdgpu_dm_irq_resume_early(adev);
1963
1964 for (i = 0; i < dc_state->stream_count; i++) {
1965 dc_state->streams[i]->mode_changed = true;
1966 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1967 dc_state->stream_status->plane_states[j]->update_flags.raw
1968 = 0xffffffff;
1969 }
1970 }
1971
1972 WARN_ON(!dc_commit_state(dm->dc, dc_state));
1973
1974 dm_gpureset_commit_state(dm->cached_dc_state, dm);
1975
1976 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1977
1978 dc_release_state(dm->cached_dc_state);
1979 dm->cached_dc_state = NULL;
1980
1981 amdgpu_dm_irq_resume_late(adev);
1982
1983 mutex_unlock(&dm->dc_lock);
1984
1985 return 0;
1986 }
1987 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1988 dc_release_state(dm_state->context);
1989 dm_state->context = dc_create_state(dm->dc);
1990 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1991 dc_resource_state_construct(dm->dc, dm_state->context);
1992
1993 /* Before powering on DC we need to re-initialize DMUB. */
1994 r = dm_dmub_hw_init(adev);
1995 if (r)
1996 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1997
1998 /* power on hardware */
1999 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2000
2001 /* program HPD filter */
2002 dc_resume(dm->dc);
2003
2004 /*
2005 * early enable HPD Rx IRQ, should be done before set mode as short
2006 * pulse interrupts are used for MST
2007 */
2008 amdgpu_dm_irq_resume_early(adev);
2009
2010 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2011 s3_handle_mst(ddev, false);
2012
2013 /* Do detection*/
2014 drm_connector_list_iter_begin(ddev, &iter);
2015 drm_for_each_connector_iter(connector, &iter) {
2016 aconnector = to_amdgpu_dm_connector(connector);
2017
2018 /*
2019 * this is the case when traversing through already created
2020 * MST connectors, should be skipped
2021 */
2022 if (aconnector->mst_port)
2023 continue;
2024
2025 mutex_lock(&aconnector->hpd_lock);
2026 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2027 DRM_ERROR("KMS: Failed to detect connector\n");
2028
2029 if (aconnector->base.force && new_connection_type == dc_connection_none)
2030 emulated_link_detect(aconnector->dc_link);
2031 else
2032 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2033
2034 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2035 aconnector->fake_enable = false;
2036
2037 if (aconnector->dc_sink)
2038 dc_sink_release(aconnector->dc_sink);
2039 aconnector->dc_sink = NULL;
2040 amdgpu_dm_update_connector_after_detect(aconnector);
2041 mutex_unlock(&aconnector->hpd_lock);
2042 }
2043 drm_connector_list_iter_end(&iter);
2044
2045 /* Force mode set in atomic commit */
2046 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2047 new_crtc_state->active_changed = true;
2048
2049 /*
2050 * atomic_check is expected to create the dc states. We need to release
2051 * them here, since they were duplicated as part of the suspend
2052 * procedure.
2053 */
2054 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2055 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2056 if (dm_new_crtc_state->stream) {
2057 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2058 dc_stream_release(dm_new_crtc_state->stream);
2059 dm_new_crtc_state->stream = NULL;
2060 }
2061 }
2062
2063 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2064 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2065 if (dm_new_plane_state->dc_state) {
2066 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2067 dc_plane_state_release(dm_new_plane_state->dc_state);
2068 dm_new_plane_state->dc_state = NULL;
2069 }
2070 }
2071
2072 drm_atomic_helper_resume(ddev, dm->cached_state);
2073
2074 dm->cached_state = NULL;
2075
2076 amdgpu_dm_irq_resume_late(adev);
2077
2078 amdgpu_dm_smu_write_watermarks_table(adev);
2079
2080 return 0;
2081 }
2082
2083 /**
2084 * DOC: DM Lifecycle
2085 *
2086 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2087 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2088 * the base driver's device list to be initialized and torn down accordingly.
2089 *
2090 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2091 */
2092
2093 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2094 .name = "dm",
2095 .early_init = dm_early_init,
2096 .late_init = dm_late_init,
2097 .sw_init = dm_sw_init,
2098 .sw_fini = dm_sw_fini,
2099 .hw_init = dm_hw_init,
2100 .hw_fini = dm_hw_fini,
2101 .suspend = dm_suspend,
2102 .resume = dm_resume,
2103 .is_idle = dm_is_idle,
2104 .wait_for_idle = dm_wait_for_idle,
2105 .check_soft_reset = dm_check_soft_reset,
2106 .soft_reset = dm_soft_reset,
2107 .set_clockgating_state = dm_set_clockgating_state,
2108 .set_powergating_state = dm_set_powergating_state,
2109 };
2110
2111 const struct amdgpu_ip_block_version dm_ip_block =
2112 {
2113 .type = AMD_IP_BLOCK_TYPE_DCE,
2114 .major = 1,
2115 .minor = 0,
2116 .rev = 0,
2117 .funcs = &amdgpu_dm_funcs,
2118 };
2119
2120
2121 /**
2122 * DOC: atomic
2123 *
2124 * *WIP*
2125 */
2126
2127 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2128 .fb_create = amdgpu_display_user_framebuffer_create,
2129 .output_poll_changed = drm_fb_helper_output_poll_changed,
2130 .atomic_check = amdgpu_dm_atomic_check,
2131 .atomic_commit = amdgpu_dm_atomic_commit,
2132 };
2133
2134 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2135 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2136 };
2137
update_connector_ext_caps(struct amdgpu_dm_connector * aconnector)2138 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2139 {
2140 u32 max_cll, min_cll, max, min, q, r;
2141 struct amdgpu_dm_backlight_caps *caps;
2142 struct amdgpu_display_manager *dm;
2143 struct drm_connector *conn_base;
2144 struct amdgpu_device *adev;
2145 struct dc_link *link = NULL;
2146 static const u8 pre_computed_values[] = {
2147 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2148 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2149
2150 if (!aconnector || !aconnector->dc_link)
2151 return;
2152
2153 link = aconnector->dc_link;
2154 if (link->connector_signal != SIGNAL_TYPE_EDP)
2155 return;
2156
2157 conn_base = &aconnector->base;
2158 adev = drm_to_adev(conn_base->dev);
2159 dm = &adev->dm;
2160 caps = &dm->backlight_caps;
2161 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2162 caps->aux_support = false;
2163 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2164 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2165
2166 if (caps->ext_caps->bits.oled == 1 /*||
2167 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2168 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2169 caps->aux_support = true;
2170
2171 if (amdgpu_backlight == 0)
2172 caps->aux_support = false;
2173 else if (amdgpu_backlight == 1)
2174 caps->aux_support = true;
2175
2176 /* From the specification (CTA-861-G), for calculating the maximum
2177 * luminance we need to use:
2178 * Luminance = 50*2**(CV/32)
2179 * Where CV is a one-byte value.
2180 * For calculating this expression we may need float point precision;
2181 * to avoid this complexity level, we take advantage that CV is divided
2182 * by a constant. From the Euclids division algorithm, we know that CV
2183 * can be written as: CV = 32*q + r. Next, we replace CV in the
2184 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2185 * need to pre-compute the value of r/32. For pre-computing the values
2186 * We just used the following Ruby line:
2187 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2188 * The results of the above expressions can be verified at
2189 * pre_computed_values.
2190 */
2191 q = max_cll >> 5;
2192 r = max_cll % 32;
2193 max = (1 << q) * pre_computed_values[r];
2194
2195 // min luminance: maxLum * (CV/255)^2 / 100
2196 q = DIV_ROUND_CLOSEST(min_cll, 255);
2197 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2198
2199 caps->aux_max_input_signal = max;
2200 caps->aux_min_input_signal = min;
2201 }
2202
amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector * aconnector)2203 void amdgpu_dm_update_connector_after_detect(
2204 struct amdgpu_dm_connector *aconnector)
2205 {
2206 struct drm_connector *connector = &aconnector->base;
2207 struct drm_device *dev = connector->dev;
2208 struct dc_sink *sink;
2209
2210 /* MST handled by drm_mst framework */
2211 if (aconnector->mst_mgr.mst_state == true)
2212 return;
2213
2214 sink = aconnector->dc_link->local_sink;
2215 if (sink)
2216 dc_sink_retain(sink);
2217
2218 /*
2219 * Edid mgmt connector gets first update only in mode_valid hook and then
2220 * the connector sink is set to either fake or physical sink depends on link status.
2221 * Skip if already done during boot.
2222 */
2223 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2224 && aconnector->dc_em_sink) {
2225
2226 /*
2227 * For S3 resume with headless use eml_sink to fake stream
2228 * because on resume connector->sink is set to NULL
2229 */
2230 mutex_lock(&dev->mode_config.mutex);
2231
2232 if (sink) {
2233 if (aconnector->dc_sink) {
2234 amdgpu_dm_update_freesync_caps(connector, NULL);
2235 /*
2236 * retain and release below are used to
2237 * bump up refcount for sink because the link doesn't point
2238 * to it anymore after disconnect, so on next crtc to connector
2239 * reshuffle by UMD we will get into unwanted dc_sink release
2240 */
2241 dc_sink_release(aconnector->dc_sink);
2242 }
2243 aconnector->dc_sink = sink;
2244 dc_sink_retain(aconnector->dc_sink);
2245 amdgpu_dm_update_freesync_caps(connector,
2246 aconnector->edid);
2247 } else {
2248 amdgpu_dm_update_freesync_caps(connector, NULL);
2249 if (!aconnector->dc_sink) {
2250 aconnector->dc_sink = aconnector->dc_em_sink;
2251 dc_sink_retain(aconnector->dc_sink);
2252 }
2253 }
2254
2255 mutex_unlock(&dev->mode_config.mutex);
2256
2257 if (sink)
2258 dc_sink_release(sink);
2259 return;
2260 }
2261
2262 /*
2263 * TODO: temporary guard to look for proper fix
2264 * if this sink is MST sink, we should not do anything
2265 */
2266 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2267 dc_sink_release(sink);
2268 return;
2269 }
2270
2271 if (aconnector->dc_sink == sink) {
2272 /*
2273 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2274 * Do nothing!!
2275 */
2276 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2277 aconnector->connector_id);
2278 if (sink)
2279 dc_sink_release(sink);
2280 return;
2281 }
2282
2283 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2284 aconnector->connector_id, aconnector->dc_sink, sink);
2285
2286 mutex_lock(&dev->mode_config.mutex);
2287
2288 /*
2289 * 1. Update status of the drm connector
2290 * 2. Send an event and let userspace tell us what to do
2291 */
2292 if (sink) {
2293 /*
2294 * TODO: check if we still need the S3 mode update workaround.
2295 * If yes, put it here.
2296 */
2297 if (aconnector->dc_sink) {
2298 amdgpu_dm_update_freesync_caps(connector, NULL);
2299 dc_sink_release(aconnector->dc_sink);
2300 }
2301
2302 aconnector->dc_sink = sink;
2303 dc_sink_retain(aconnector->dc_sink);
2304 if (sink->dc_edid.length == 0) {
2305 aconnector->edid = NULL;
2306 if (aconnector->dc_link->aux_mode) {
2307 drm_dp_cec_unset_edid(
2308 &aconnector->dm_dp_aux.aux);
2309 }
2310 } else {
2311 aconnector->edid =
2312 (struct edid *)sink->dc_edid.raw_edid;
2313
2314 drm_connector_update_edid_property(connector,
2315 aconnector->edid);
2316 if (aconnector->dc_link->aux_mode)
2317 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2318 aconnector->edid);
2319 }
2320
2321 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2322 update_connector_ext_caps(aconnector);
2323 } else {
2324 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2325 amdgpu_dm_update_freesync_caps(connector, NULL);
2326 drm_connector_update_edid_property(connector, NULL);
2327 aconnector->num_modes = 0;
2328 dc_sink_release(aconnector->dc_sink);
2329 aconnector->dc_sink = NULL;
2330 aconnector->edid = NULL;
2331 #ifdef CONFIG_DRM_AMD_DC_HDCP
2332 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2333 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2334 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2335 #endif
2336 }
2337
2338 mutex_unlock(&dev->mode_config.mutex);
2339
2340 update_subconnector_property(aconnector);
2341
2342 if (sink)
2343 dc_sink_release(sink);
2344 }
2345
handle_hpd_irq(void * param)2346 static void handle_hpd_irq(void *param)
2347 {
2348 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2349 struct drm_connector *connector = &aconnector->base;
2350 struct drm_device *dev = connector->dev;
2351 enum dc_connection_type new_connection_type = dc_connection_none;
2352 #ifdef CONFIG_DRM_AMD_DC_HDCP
2353 struct amdgpu_device *adev = drm_to_adev(dev);
2354 #endif
2355
2356 /*
2357 * In case of failure or MST no need to update connector status or notify the OS
2358 * since (for MST case) MST does this in its own context.
2359 */
2360 mutex_lock(&aconnector->hpd_lock);
2361
2362 #ifdef CONFIG_DRM_AMD_DC_HDCP
2363 if (adev->dm.hdcp_workqueue)
2364 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2365 #endif
2366 if (aconnector->fake_enable)
2367 aconnector->fake_enable = false;
2368
2369 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2370 DRM_ERROR("KMS: Failed to detect connector\n");
2371
2372 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2373 emulated_link_detect(aconnector->dc_link);
2374
2375
2376 drm_modeset_lock_all(dev);
2377 dm_restore_drm_connector_state(dev, connector);
2378 drm_modeset_unlock_all(dev);
2379
2380 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2381 drm_kms_helper_hotplug_event(dev);
2382
2383 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2384 if (new_connection_type == dc_connection_none &&
2385 aconnector->dc_link->type == dc_connection_none)
2386 dm_set_dpms_off(aconnector->dc_link);
2387
2388 amdgpu_dm_update_connector_after_detect(aconnector);
2389
2390 drm_modeset_lock_all(dev);
2391 dm_restore_drm_connector_state(dev, connector);
2392 drm_modeset_unlock_all(dev);
2393
2394 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2395 drm_kms_helper_hotplug_event(dev);
2396 }
2397 mutex_unlock(&aconnector->hpd_lock);
2398
2399 }
2400
dm_handle_hpd_rx_irq(struct amdgpu_dm_connector * aconnector)2401 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2402 {
2403 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2404 uint8_t dret;
2405 bool new_irq_handled = false;
2406 int dpcd_addr;
2407 int dpcd_bytes_to_read;
2408
2409 const int max_process_count = 30;
2410 int process_count = 0;
2411
2412 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2413
2414 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2415 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2416 /* DPCD 0x200 - 0x201 for downstream IRQ */
2417 dpcd_addr = DP_SINK_COUNT;
2418 } else {
2419 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2420 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2421 dpcd_addr = DP_SINK_COUNT_ESI;
2422 }
2423
2424 dret = drm_dp_dpcd_read(
2425 &aconnector->dm_dp_aux.aux,
2426 dpcd_addr,
2427 esi,
2428 dpcd_bytes_to_read);
2429
2430 while (dret == dpcd_bytes_to_read &&
2431 process_count < max_process_count) {
2432 uint8_t retry;
2433 dret = 0;
2434
2435 process_count++;
2436
2437 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2438 /* handle HPD short pulse irq */
2439 if (aconnector->mst_mgr.mst_state)
2440 drm_dp_mst_hpd_irq(
2441 &aconnector->mst_mgr,
2442 esi,
2443 &new_irq_handled);
2444
2445 if (new_irq_handled) {
2446 /* ACK at DPCD to notify down stream */
2447 const int ack_dpcd_bytes_to_write =
2448 dpcd_bytes_to_read - 1;
2449
2450 for (retry = 0; retry < 3; retry++) {
2451 uint8_t wret;
2452
2453 wret = drm_dp_dpcd_write(
2454 &aconnector->dm_dp_aux.aux,
2455 dpcd_addr + 1,
2456 &esi[1],
2457 ack_dpcd_bytes_to_write);
2458 if (wret == ack_dpcd_bytes_to_write)
2459 break;
2460 }
2461
2462 /* check if there is new irq to be handled */
2463 dret = drm_dp_dpcd_read(
2464 &aconnector->dm_dp_aux.aux,
2465 dpcd_addr,
2466 esi,
2467 dpcd_bytes_to_read);
2468
2469 new_irq_handled = false;
2470 } else {
2471 break;
2472 }
2473 }
2474
2475 if (process_count == max_process_count)
2476 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2477 }
2478
handle_hpd_rx_irq(void * param)2479 static void handle_hpd_rx_irq(void *param)
2480 {
2481 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2482 struct drm_connector *connector = &aconnector->base;
2483 struct drm_device *dev = connector->dev;
2484 struct dc_link *dc_link = aconnector->dc_link;
2485 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2486 enum dc_connection_type new_connection_type = dc_connection_none;
2487 #ifdef CONFIG_DRM_AMD_DC_HDCP
2488 union hpd_irq_data hpd_irq_data;
2489 struct amdgpu_device *adev = drm_to_adev(dev);
2490
2491 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2492 #endif
2493
2494 /*
2495 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2496 * conflict, after implement i2c helper, this mutex should be
2497 * retired.
2498 */
2499 if (dc_link->type != dc_connection_mst_branch)
2500 mutex_lock(&aconnector->hpd_lock);
2501
2502
2503 #ifdef CONFIG_DRM_AMD_DC_HDCP
2504 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2505 #else
2506 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2507 #endif
2508 !is_mst_root_connector) {
2509 /* Downstream Port status changed. */
2510 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2511 DRM_ERROR("KMS: Failed to detect connector\n");
2512
2513 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2514 emulated_link_detect(dc_link);
2515
2516 if (aconnector->fake_enable)
2517 aconnector->fake_enable = false;
2518
2519 amdgpu_dm_update_connector_after_detect(aconnector);
2520
2521
2522 drm_modeset_lock_all(dev);
2523 dm_restore_drm_connector_state(dev, connector);
2524 drm_modeset_unlock_all(dev);
2525
2526 drm_kms_helper_hotplug_event(dev);
2527 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2528
2529 if (aconnector->fake_enable)
2530 aconnector->fake_enable = false;
2531
2532 amdgpu_dm_update_connector_after_detect(aconnector);
2533
2534
2535 drm_modeset_lock_all(dev);
2536 dm_restore_drm_connector_state(dev, connector);
2537 drm_modeset_unlock_all(dev);
2538
2539 drm_kms_helper_hotplug_event(dev);
2540 }
2541 }
2542 #ifdef CONFIG_DRM_AMD_DC_HDCP
2543 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2544 if (adev->dm.hdcp_workqueue)
2545 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2546 }
2547 #endif
2548 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2549 (dc_link->type == dc_connection_mst_branch))
2550 dm_handle_hpd_rx_irq(aconnector);
2551
2552 if (dc_link->type != dc_connection_mst_branch) {
2553 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2554 mutex_unlock(&aconnector->hpd_lock);
2555 }
2556 }
2557
register_hpd_handlers(struct amdgpu_device * adev)2558 static void register_hpd_handlers(struct amdgpu_device *adev)
2559 {
2560 struct drm_device *dev = adev_to_drm(adev);
2561 struct drm_connector *connector;
2562 struct amdgpu_dm_connector *aconnector;
2563 const struct dc_link *dc_link;
2564 struct dc_interrupt_params int_params = {0};
2565
2566 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2567 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2568
2569 list_for_each_entry(connector,
2570 &dev->mode_config.connector_list, head) {
2571
2572 aconnector = to_amdgpu_dm_connector(connector);
2573 dc_link = aconnector->dc_link;
2574
2575 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2576 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2577 int_params.irq_source = dc_link->irq_source_hpd;
2578
2579 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2580 handle_hpd_irq,
2581 (void *) aconnector);
2582 }
2583
2584 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2585
2586 /* Also register for DP short pulse (hpd_rx). */
2587 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2588 int_params.irq_source = dc_link->irq_source_hpd_rx;
2589
2590 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2591 handle_hpd_rx_irq,
2592 (void *) aconnector);
2593 }
2594 }
2595 }
2596
2597 #if defined(CONFIG_DRM_AMD_DC_SI)
2598 /* Register IRQ sources and initialize IRQ callbacks */
dce60_register_irq_handlers(struct amdgpu_device * adev)2599 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2600 {
2601 struct dc *dc = adev->dm.dc;
2602 struct common_irq_params *c_irq_params;
2603 struct dc_interrupt_params int_params = {0};
2604 int r;
2605 int i;
2606 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2607
2608 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2609 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2610
2611 /*
2612 * Actions of amdgpu_irq_add_id():
2613 * 1. Register a set() function with base driver.
2614 * Base driver will call set() function to enable/disable an
2615 * interrupt in DC hardware.
2616 * 2. Register amdgpu_dm_irq_handler().
2617 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2618 * coming from DC hardware.
2619 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2620 * for acknowledging and handling. */
2621
2622 /* Use VBLANK interrupt */
2623 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2624 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2625 if (r) {
2626 DRM_ERROR("Failed to add crtc irq id!\n");
2627 return r;
2628 }
2629
2630 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2631 int_params.irq_source =
2632 dc_interrupt_to_irq_source(dc, i+1 , 0);
2633
2634 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2635
2636 c_irq_params->adev = adev;
2637 c_irq_params->irq_src = int_params.irq_source;
2638
2639 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2640 dm_crtc_high_irq, c_irq_params);
2641 }
2642
2643 /* Use GRPH_PFLIP interrupt */
2644 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2645 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2646 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2647 if (r) {
2648 DRM_ERROR("Failed to add page flip irq id!\n");
2649 return r;
2650 }
2651
2652 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2653 int_params.irq_source =
2654 dc_interrupt_to_irq_source(dc, i, 0);
2655
2656 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2657
2658 c_irq_params->adev = adev;
2659 c_irq_params->irq_src = int_params.irq_source;
2660
2661 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2662 dm_pflip_high_irq, c_irq_params);
2663
2664 }
2665
2666 /* HPD */
2667 r = amdgpu_irq_add_id(adev, client_id,
2668 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2669 if (r) {
2670 DRM_ERROR("Failed to add hpd irq id!\n");
2671 return r;
2672 }
2673
2674 register_hpd_handlers(adev);
2675
2676 return 0;
2677 }
2678 #endif
2679
2680 /* Register IRQ sources and initialize IRQ callbacks */
dce110_register_irq_handlers(struct amdgpu_device * adev)2681 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2682 {
2683 struct dc *dc = adev->dm.dc;
2684 struct common_irq_params *c_irq_params;
2685 struct dc_interrupt_params int_params = {0};
2686 int r;
2687 int i;
2688 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2689
2690 if (adev->asic_type >= CHIP_VEGA10)
2691 client_id = SOC15_IH_CLIENTID_DCE;
2692
2693 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2694 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2695
2696 /*
2697 * Actions of amdgpu_irq_add_id():
2698 * 1. Register a set() function with base driver.
2699 * Base driver will call set() function to enable/disable an
2700 * interrupt in DC hardware.
2701 * 2. Register amdgpu_dm_irq_handler().
2702 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2703 * coming from DC hardware.
2704 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2705 * for acknowledging and handling. */
2706
2707 /* Use VBLANK interrupt */
2708 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2709 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2710 if (r) {
2711 DRM_ERROR("Failed to add crtc irq id!\n");
2712 return r;
2713 }
2714
2715 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2716 int_params.irq_source =
2717 dc_interrupt_to_irq_source(dc, i, 0);
2718
2719 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2720
2721 c_irq_params->adev = adev;
2722 c_irq_params->irq_src = int_params.irq_source;
2723
2724 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2725 dm_crtc_high_irq, c_irq_params);
2726 }
2727
2728 /* Use VUPDATE interrupt */
2729 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2730 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2731 if (r) {
2732 DRM_ERROR("Failed to add vupdate irq id!\n");
2733 return r;
2734 }
2735
2736 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2737 int_params.irq_source =
2738 dc_interrupt_to_irq_source(dc, i, 0);
2739
2740 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2741
2742 c_irq_params->adev = adev;
2743 c_irq_params->irq_src = int_params.irq_source;
2744
2745 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2746 dm_vupdate_high_irq, c_irq_params);
2747 }
2748
2749 /* Use GRPH_PFLIP interrupt */
2750 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2751 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2752 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2753 if (r) {
2754 DRM_ERROR("Failed to add page flip irq id!\n");
2755 return r;
2756 }
2757
2758 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2759 int_params.irq_source =
2760 dc_interrupt_to_irq_source(dc, i, 0);
2761
2762 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2763
2764 c_irq_params->adev = adev;
2765 c_irq_params->irq_src = int_params.irq_source;
2766
2767 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2768 dm_pflip_high_irq, c_irq_params);
2769
2770 }
2771
2772 /* HPD */
2773 r = amdgpu_irq_add_id(adev, client_id,
2774 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2775 if (r) {
2776 DRM_ERROR("Failed to add hpd irq id!\n");
2777 return r;
2778 }
2779
2780 register_hpd_handlers(adev);
2781
2782 return 0;
2783 }
2784
2785 #if defined(CONFIG_DRM_AMD_DC_DCN)
2786 /* Register IRQ sources and initialize IRQ callbacks */
dcn10_register_irq_handlers(struct amdgpu_device * adev)2787 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2788 {
2789 struct dc *dc = adev->dm.dc;
2790 struct common_irq_params *c_irq_params;
2791 struct dc_interrupt_params int_params = {0};
2792 int r;
2793 int i;
2794
2795 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2796 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2797
2798 /*
2799 * Actions of amdgpu_irq_add_id():
2800 * 1. Register a set() function with base driver.
2801 * Base driver will call set() function to enable/disable an
2802 * interrupt in DC hardware.
2803 * 2. Register amdgpu_dm_irq_handler().
2804 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2805 * coming from DC hardware.
2806 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2807 * for acknowledging and handling.
2808 */
2809
2810 /* Use VSTARTUP interrupt */
2811 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2812 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2813 i++) {
2814 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2815
2816 if (r) {
2817 DRM_ERROR("Failed to add crtc irq id!\n");
2818 return r;
2819 }
2820
2821 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2822 int_params.irq_source =
2823 dc_interrupt_to_irq_source(dc, i, 0);
2824
2825 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2826
2827 c_irq_params->adev = adev;
2828 c_irq_params->irq_src = int_params.irq_source;
2829
2830 amdgpu_dm_irq_register_interrupt(
2831 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2832 }
2833
2834 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2835 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2836 * to trigger at end of each vblank, regardless of state of the lock,
2837 * matching DCE behaviour.
2838 */
2839 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2840 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2841 i++) {
2842 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2843
2844 if (r) {
2845 DRM_ERROR("Failed to add vupdate irq id!\n");
2846 return r;
2847 }
2848
2849 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2850 int_params.irq_source =
2851 dc_interrupt_to_irq_source(dc, i, 0);
2852
2853 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2854
2855 c_irq_params->adev = adev;
2856 c_irq_params->irq_src = int_params.irq_source;
2857
2858 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2859 dm_vupdate_high_irq, c_irq_params);
2860 }
2861
2862 /* Use GRPH_PFLIP interrupt */
2863 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2864 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2865 i++) {
2866 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2867 if (r) {
2868 DRM_ERROR("Failed to add page flip irq id!\n");
2869 return r;
2870 }
2871
2872 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2873 int_params.irq_source =
2874 dc_interrupt_to_irq_source(dc, i, 0);
2875
2876 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2877
2878 c_irq_params->adev = adev;
2879 c_irq_params->irq_src = int_params.irq_source;
2880
2881 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2882 dm_pflip_high_irq, c_irq_params);
2883
2884 }
2885
2886 /* HPD */
2887 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2888 &adev->hpd_irq);
2889 if (r) {
2890 DRM_ERROR("Failed to add hpd irq id!\n");
2891 return r;
2892 }
2893
2894 register_hpd_handlers(adev);
2895
2896 return 0;
2897 }
2898 #endif
2899
2900 /*
2901 * Acquires the lock for the atomic state object and returns
2902 * the new atomic state.
2903 *
2904 * This should only be called during atomic check.
2905 */
dm_atomic_get_state(struct drm_atomic_state * state,struct dm_atomic_state ** dm_state)2906 static int dm_atomic_get_state(struct drm_atomic_state *state,
2907 struct dm_atomic_state **dm_state)
2908 {
2909 struct drm_device *dev = state->dev;
2910 struct amdgpu_device *adev = drm_to_adev(dev);
2911 struct amdgpu_display_manager *dm = &adev->dm;
2912 struct drm_private_state *priv_state;
2913
2914 if (*dm_state)
2915 return 0;
2916
2917 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2918 if (IS_ERR(priv_state))
2919 return PTR_ERR(priv_state);
2920
2921 *dm_state = to_dm_atomic_state(priv_state);
2922
2923 return 0;
2924 }
2925
2926 static struct dm_atomic_state *
dm_atomic_get_new_state(struct drm_atomic_state * state)2927 dm_atomic_get_new_state(struct drm_atomic_state *state)
2928 {
2929 struct drm_device *dev = state->dev;
2930 struct amdgpu_device *adev = drm_to_adev(dev);
2931 struct amdgpu_display_manager *dm = &adev->dm;
2932 struct drm_private_obj *obj;
2933 struct drm_private_state *new_obj_state;
2934 int i;
2935
2936 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2937 if (obj->funcs == dm->atomic_obj.funcs)
2938 return to_dm_atomic_state(new_obj_state);
2939 }
2940
2941 return NULL;
2942 }
2943
2944 static struct drm_private_state *
dm_atomic_duplicate_state(struct drm_private_obj * obj)2945 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2946 {
2947 struct dm_atomic_state *old_state, *new_state;
2948
2949 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2950 if (!new_state)
2951 return NULL;
2952
2953 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2954
2955 old_state = to_dm_atomic_state(obj->state);
2956
2957 if (old_state && old_state->context)
2958 new_state->context = dc_copy_state(old_state->context);
2959
2960 if (!new_state->context) {
2961 kfree(new_state);
2962 return NULL;
2963 }
2964
2965 return &new_state->base;
2966 }
2967
dm_atomic_destroy_state(struct drm_private_obj * obj,struct drm_private_state * state)2968 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2969 struct drm_private_state *state)
2970 {
2971 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2972
2973 if (dm_state && dm_state->context)
2974 dc_release_state(dm_state->context);
2975
2976 kfree(dm_state);
2977 }
2978
2979 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2980 .atomic_duplicate_state = dm_atomic_duplicate_state,
2981 .atomic_destroy_state = dm_atomic_destroy_state,
2982 };
2983
amdgpu_dm_mode_config_init(struct amdgpu_device * adev)2984 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2985 {
2986 struct dm_atomic_state *state;
2987 int r;
2988
2989 adev->mode_info.mode_config_initialized = true;
2990
2991 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2992 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2993
2994 adev_to_drm(adev)->mode_config.max_width = 16384;
2995 adev_to_drm(adev)->mode_config.max_height = 16384;
2996
2997 adev_to_drm(adev)->mode_config.preferred_depth = 24;
2998 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
2999 /* indicates support for immediate flip */
3000 adev_to_drm(adev)->mode_config.async_page_flip = true;
3001
3002 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3003
3004 state = kzalloc(sizeof(*state), GFP_KERNEL);
3005 if (!state)
3006 return -ENOMEM;
3007
3008 state->context = dc_create_state(adev->dm.dc);
3009 if (!state->context) {
3010 kfree(state);
3011 return -ENOMEM;
3012 }
3013
3014 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3015
3016 drm_atomic_private_obj_init(adev_to_drm(adev),
3017 &adev->dm.atomic_obj,
3018 &state->base,
3019 &dm_atomic_state_funcs);
3020
3021 r = amdgpu_display_modeset_create_props(adev);
3022 if (r) {
3023 dc_release_state(state->context);
3024 kfree(state);
3025 return r;
3026 }
3027
3028 r = amdgpu_dm_audio_init(adev);
3029 if (r) {
3030 dc_release_state(state->context);
3031 kfree(state);
3032 return r;
3033 }
3034
3035 return 0;
3036 }
3037
3038 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3039 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3040 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3041
3042 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3043 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3044
amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager * dm)3045 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3046 {
3047 #if defined(CONFIG_ACPI)
3048 struct amdgpu_dm_backlight_caps caps;
3049
3050 memset(&caps, 0, sizeof(caps));
3051
3052 if (dm->backlight_caps.caps_valid)
3053 return;
3054
3055 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3056 if (caps.caps_valid) {
3057 dm->backlight_caps.caps_valid = true;
3058 if (caps.aux_support)
3059 return;
3060 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3061 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3062 } else {
3063 dm->backlight_caps.min_input_signal =
3064 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3065 dm->backlight_caps.max_input_signal =
3066 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3067 }
3068 #else
3069 if (dm->backlight_caps.aux_support)
3070 return;
3071
3072 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3073 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3074 #endif
3075 }
3076
get_brightness_range(const struct amdgpu_dm_backlight_caps * caps,unsigned * min,unsigned * max)3077 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3078 unsigned *min, unsigned *max)
3079 {
3080 if (!caps)
3081 return 0;
3082
3083 if (caps->aux_support) {
3084 // Firmware limits are in nits, DC API wants millinits.
3085 *max = 1000 * caps->aux_max_input_signal;
3086 *min = 1000 * caps->aux_min_input_signal;
3087 } else {
3088 // Firmware limits are 8-bit, PWM control is 16-bit.
3089 *max = 0x101 * caps->max_input_signal;
3090 *min = 0x101 * caps->min_input_signal;
3091 }
3092 return 1;
3093 }
3094
convert_brightness_from_user(const struct amdgpu_dm_backlight_caps * caps,uint32_t brightness)3095 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3096 uint32_t brightness)
3097 {
3098 unsigned min, max;
3099
3100 if (!get_brightness_range(caps, &min, &max))
3101 return brightness;
3102
3103 // Rescale 0..255 to min..max
3104 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3105 AMDGPU_MAX_BL_LEVEL);
3106 }
3107
convert_brightness_to_user(const struct amdgpu_dm_backlight_caps * caps,uint32_t brightness)3108 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3109 uint32_t brightness)
3110 {
3111 unsigned min, max;
3112
3113 if (!get_brightness_range(caps, &min, &max))
3114 return brightness;
3115
3116 if (brightness < min)
3117 return 0;
3118 // Rescale min..max to 0..255
3119 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3120 max - min);
3121 }
3122
amdgpu_dm_backlight_update_status(struct backlight_device * bd)3123 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3124 {
3125 struct amdgpu_display_manager *dm = bl_get_data(bd);
3126 struct amdgpu_dm_backlight_caps caps;
3127 struct dc_link *link = NULL;
3128 u32 brightness;
3129 bool rc;
3130
3131 amdgpu_dm_update_backlight_caps(dm);
3132 caps = dm->backlight_caps;
3133
3134 link = (struct dc_link *)dm->backlight_link;
3135
3136 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3137 // Change brightness based on AUX property
3138 if (caps.aux_support)
3139 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3140 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3141 else
3142 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3143
3144 return rc ? 0 : 1;
3145 }
3146
amdgpu_dm_backlight_get_brightness(struct backlight_device * bd)3147 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3148 {
3149 struct amdgpu_display_manager *dm = bl_get_data(bd);
3150 struct amdgpu_dm_backlight_caps caps;
3151
3152 amdgpu_dm_update_backlight_caps(dm);
3153 caps = dm->backlight_caps;
3154
3155 if (caps.aux_support) {
3156 struct dc_link *link = (struct dc_link *)dm->backlight_link;
3157 u32 avg, peak;
3158 bool rc;
3159
3160 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3161 if (!rc)
3162 return bd->props.brightness;
3163 return convert_brightness_to_user(&caps, avg);
3164 } else {
3165 int ret = dc_link_get_backlight_level(dm->backlight_link);
3166
3167 if (ret == DC_ERROR_UNEXPECTED)
3168 return bd->props.brightness;
3169 return convert_brightness_to_user(&caps, ret);
3170 }
3171 }
3172
3173 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3174 .options = BL_CORE_SUSPENDRESUME,
3175 .get_brightness = amdgpu_dm_backlight_get_brightness,
3176 .update_status = amdgpu_dm_backlight_update_status,
3177 };
3178
3179 static void
amdgpu_dm_register_backlight_device(struct amdgpu_display_manager * dm)3180 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3181 {
3182 char bl_name[16];
3183 struct backlight_properties props = { 0 };
3184
3185 amdgpu_dm_update_backlight_caps(dm);
3186
3187 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3188 props.brightness = AMDGPU_MAX_BL_LEVEL;
3189 props.type = BACKLIGHT_RAW;
3190
3191 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3192 adev_to_drm(dm->adev)->primary->index);
3193
3194 dm->backlight_dev = backlight_device_register(bl_name,
3195 adev_to_drm(dm->adev)->dev,
3196 dm,
3197 &amdgpu_dm_backlight_ops,
3198 &props);
3199
3200 if (IS_ERR(dm->backlight_dev))
3201 DRM_ERROR("DM: Backlight registration failed!\n");
3202 else
3203 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3204 }
3205
3206 #endif
3207
initialize_plane(struct amdgpu_display_manager * dm,struct amdgpu_mode_info * mode_info,int plane_id,enum drm_plane_type plane_type,const struct dc_plane_cap * plane_cap)3208 static int initialize_plane(struct amdgpu_display_manager *dm,
3209 struct amdgpu_mode_info *mode_info, int plane_id,
3210 enum drm_plane_type plane_type,
3211 const struct dc_plane_cap *plane_cap)
3212 {
3213 struct drm_plane *plane;
3214 unsigned long possible_crtcs;
3215 int ret = 0;
3216
3217 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3218 if (!plane) {
3219 DRM_ERROR("KMS: Failed to allocate plane\n");
3220 return -ENOMEM;
3221 }
3222 plane->type = plane_type;
3223
3224 /*
3225 * HACK: IGT tests expect that the primary plane for a CRTC
3226 * can only have one possible CRTC. Only expose support for
3227 * any CRTC if they're not going to be used as a primary plane
3228 * for a CRTC - like overlay or underlay planes.
3229 */
3230 possible_crtcs = 1 << plane_id;
3231 if (plane_id >= dm->dc->caps.max_streams)
3232 possible_crtcs = 0xff;
3233
3234 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3235
3236 if (ret) {
3237 DRM_ERROR("KMS: Failed to initialize plane\n");
3238 kfree(plane);
3239 return ret;
3240 }
3241
3242 if (mode_info)
3243 mode_info->planes[plane_id] = plane;
3244
3245 return ret;
3246 }
3247
3248
register_backlight_device(struct amdgpu_display_manager * dm,struct dc_link * link)3249 static void register_backlight_device(struct amdgpu_display_manager *dm,
3250 struct dc_link *link)
3251 {
3252 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3253 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3254
3255 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3256 link->type != dc_connection_none) {
3257 /*
3258 * Event if registration failed, we should continue with
3259 * DM initialization because not having a backlight control
3260 * is better then a black screen.
3261 */
3262 amdgpu_dm_register_backlight_device(dm);
3263
3264 if (dm->backlight_dev)
3265 dm->backlight_link = link;
3266 }
3267 #endif
3268 }
3269
3270
3271 /*
3272 * In this architecture, the association
3273 * connector -> encoder -> crtc
3274 * id not really requried. The crtc and connector will hold the
3275 * display_index as an abstraction to use with DAL component
3276 *
3277 * Returns 0 on success
3278 */
amdgpu_dm_initialize_drm_device(struct amdgpu_device * adev)3279 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3280 {
3281 struct amdgpu_display_manager *dm = &adev->dm;
3282 int32_t i;
3283 struct amdgpu_dm_connector *aconnector = NULL;
3284 struct amdgpu_encoder *aencoder = NULL;
3285 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3286 uint32_t link_cnt;
3287 int32_t primary_planes;
3288 enum dc_connection_type new_connection_type = dc_connection_none;
3289 const struct dc_plane_cap *plane;
3290
3291 dm->display_indexes_num = dm->dc->caps.max_streams;
3292 /* Update the actual used number of crtc */
3293 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3294
3295 link_cnt = dm->dc->caps.max_links;
3296 if (amdgpu_dm_mode_config_init(dm->adev)) {
3297 DRM_ERROR("DM: Failed to initialize mode config\n");
3298 return -EINVAL;
3299 }
3300
3301 /* There is one primary plane per CRTC */
3302 primary_planes = dm->dc->caps.max_streams;
3303 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3304
3305 /*
3306 * Initialize primary planes, implicit planes for legacy IOCTLS.
3307 * Order is reversed to match iteration order in atomic check.
3308 */
3309 for (i = (primary_planes - 1); i >= 0; i--) {
3310 plane = &dm->dc->caps.planes[i];
3311
3312 if (initialize_plane(dm, mode_info, i,
3313 DRM_PLANE_TYPE_PRIMARY, plane)) {
3314 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3315 goto fail;
3316 }
3317 }
3318
3319 /*
3320 * Initialize overlay planes, index starting after primary planes.
3321 * These planes have a higher DRM index than the primary planes since
3322 * they should be considered as having a higher z-order.
3323 * Order is reversed to match iteration order in atomic check.
3324 *
3325 * Only support DCN for now, and only expose one so we don't encourage
3326 * userspace to use up all the pipes.
3327 */
3328 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3329 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3330
3331 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3332 continue;
3333
3334 if (!plane->blends_with_above || !plane->blends_with_below)
3335 continue;
3336
3337 if (!plane->pixel_format_support.argb8888)
3338 continue;
3339
3340 if (initialize_plane(dm, NULL, primary_planes + i,
3341 DRM_PLANE_TYPE_OVERLAY, plane)) {
3342 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3343 goto fail;
3344 }
3345
3346 /* Only create one overlay plane. */
3347 break;
3348 }
3349
3350 for (i = 0; i < dm->dc->caps.max_streams; i++)
3351 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3352 DRM_ERROR("KMS: Failed to initialize crtc\n");
3353 goto fail;
3354 }
3355
3356 /* loops over all connectors on the board */
3357 for (i = 0; i < link_cnt; i++) {
3358 struct dc_link *link = NULL;
3359
3360 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3361 DRM_ERROR(
3362 "KMS: Cannot support more than %d display indexes\n",
3363 AMDGPU_DM_MAX_DISPLAY_INDEX);
3364 continue;
3365 }
3366
3367 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3368 if (!aconnector)
3369 goto fail;
3370
3371 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3372 if (!aencoder)
3373 goto fail;
3374
3375 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3376 DRM_ERROR("KMS: Failed to initialize encoder\n");
3377 goto fail;
3378 }
3379
3380 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3381 DRM_ERROR("KMS: Failed to initialize connector\n");
3382 goto fail;
3383 }
3384
3385 link = dc_get_link_at_index(dm->dc, i);
3386
3387 if (!dc_link_detect_sink(link, &new_connection_type))
3388 DRM_ERROR("KMS: Failed to detect connector\n");
3389
3390 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3391 emulated_link_detect(link);
3392 amdgpu_dm_update_connector_after_detect(aconnector);
3393
3394 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3395 amdgpu_dm_update_connector_after_detect(aconnector);
3396 register_backlight_device(dm, link);
3397 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3398 amdgpu_dm_set_psr_caps(link);
3399 }
3400
3401
3402 }
3403
3404 /* Software is initialized. Now we can register interrupt handlers. */
3405 switch (adev->asic_type) {
3406 #if defined(CONFIG_DRM_AMD_DC_SI)
3407 case CHIP_TAHITI:
3408 case CHIP_PITCAIRN:
3409 case CHIP_VERDE:
3410 case CHIP_OLAND:
3411 if (dce60_register_irq_handlers(dm->adev)) {
3412 DRM_ERROR("DM: Failed to initialize IRQ\n");
3413 goto fail;
3414 }
3415 break;
3416 #endif
3417 case CHIP_BONAIRE:
3418 case CHIP_HAWAII:
3419 case CHIP_KAVERI:
3420 case CHIP_KABINI:
3421 case CHIP_MULLINS:
3422 case CHIP_TONGA:
3423 case CHIP_FIJI:
3424 case CHIP_CARRIZO:
3425 case CHIP_STONEY:
3426 case CHIP_POLARIS11:
3427 case CHIP_POLARIS10:
3428 case CHIP_POLARIS12:
3429 case CHIP_VEGAM:
3430 case CHIP_VEGA10:
3431 case CHIP_VEGA12:
3432 case CHIP_VEGA20:
3433 if (dce110_register_irq_handlers(dm->adev)) {
3434 DRM_ERROR("DM: Failed to initialize IRQ\n");
3435 goto fail;
3436 }
3437 break;
3438 #if defined(CONFIG_DRM_AMD_DC_DCN)
3439 case CHIP_RAVEN:
3440 case CHIP_NAVI12:
3441 case CHIP_NAVI10:
3442 case CHIP_NAVI14:
3443 case CHIP_RENOIR:
3444 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3445 case CHIP_SIENNA_CICHLID:
3446 case CHIP_NAVY_FLOUNDER:
3447 #endif
3448 if (dcn10_register_irq_handlers(dm->adev)) {
3449 DRM_ERROR("DM: Failed to initialize IRQ\n");
3450 goto fail;
3451 }
3452 break;
3453 #endif
3454 default:
3455 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3456 goto fail;
3457 }
3458
3459 return 0;
3460 fail:
3461 kfree(aencoder);
3462 kfree(aconnector);
3463
3464 return -EINVAL;
3465 }
3466
amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager * dm)3467 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3468 {
3469 drm_mode_config_cleanup(dm->ddev);
3470 drm_atomic_private_obj_fini(&dm->atomic_obj);
3471 return;
3472 }
3473
3474 /******************************************************************************
3475 * amdgpu_display_funcs functions
3476 *****************************************************************************/
3477
3478 /*
3479 * dm_bandwidth_update - program display watermarks
3480 *
3481 * @adev: amdgpu_device pointer
3482 *
3483 * Calculate and program the display watermarks and line buffer allocation.
3484 */
dm_bandwidth_update(struct amdgpu_device * adev)3485 static void dm_bandwidth_update(struct amdgpu_device *adev)
3486 {
3487 /* TODO: implement later */
3488 }
3489
3490 static const struct amdgpu_display_funcs dm_display_funcs = {
3491 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3492 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3493 .backlight_set_level = NULL, /* never called for DC */
3494 .backlight_get_level = NULL, /* never called for DC */
3495 .hpd_sense = NULL,/* called unconditionally */
3496 .hpd_set_polarity = NULL, /* called unconditionally */
3497 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3498 .page_flip_get_scanoutpos =
3499 dm_crtc_get_scanoutpos,/* called unconditionally */
3500 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3501 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3502 };
3503
3504 #if defined(CONFIG_DEBUG_KERNEL_DC)
3505
s3_debug_store(struct device * device,struct device_attribute * attr,const char * buf,size_t count)3506 static ssize_t s3_debug_store(struct device *device,
3507 struct device_attribute *attr,
3508 const char *buf,
3509 size_t count)
3510 {
3511 int ret;
3512 int s3_state;
3513 struct drm_device *drm_dev = dev_get_drvdata(device);
3514 struct amdgpu_device *adev = drm_to_adev(drm_dev);
3515
3516 ret = kstrtoint(buf, 0, &s3_state);
3517
3518 if (ret == 0) {
3519 if (s3_state) {
3520 dm_resume(adev);
3521 drm_kms_helper_hotplug_event(adev_to_drm(adev));
3522 } else
3523 dm_suspend(adev);
3524 }
3525
3526 return ret == 0 ? count : 0;
3527 }
3528
3529 DEVICE_ATTR_WO(s3_debug);
3530
3531 #endif
3532
dm_early_init(void * handle)3533 static int dm_early_init(void *handle)
3534 {
3535 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3536
3537 switch (adev->asic_type) {
3538 #if defined(CONFIG_DRM_AMD_DC_SI)
3539 case CHIP_TAHITI:
3540 case CHIP_PITCAIRN:
3541 case CHIP_VERDE:
3542 adev->mode_info.num_crtc = 6;
3543 adev->mode_info.num_hpd = 6;
3544 adev->mode_info.num_dig = 6;
3545 break;
3546 case CHIP_OLAND:
3547 adev->mode_info.num_crtc = 2;
3548 adev->mode_info.num_hpd = 2;
3549 adev->mode_info.num_dig = 2;
3550 break;
3551 #endif
3552 case CHIP_BONAIRE:
3553 case CHIP_HAWAII:
3554 adev->mode_info.num_crtc = 6;
3555 adev->mode_info.num_hpd = 6;
3556 adev->mode_info.num_dig = 6;
3557 break;
3558 case CHIP_KAVERI:
3559 adev->mode_info.num_crtc = 4;
3560 adev->mode_info.num_hpd = 6;
3561 adev->mode_info.num_dig = 7;
3562 break;
3563 case CHIP_KABINI:
3564 case CHIP_MULLINS:
3565 adev->mode_info.num_crtc = 2;
3566 adev->mode_info.num_hpd = 6;
3567 adev->mode_info.num_dig = 6;
3568 break;
3569 case CHIP_FIJI:
3570 case CHIP_TONGA:
3571 adev->mode_info.num_crtc = 6;
3572 adev->mode_info.num_hpd = 6;
3573 adev->mode_info.num_dig = 7;
3574 break;
3575 case CHIP_CARRIZO:
3576 adev->mode_info.num_crtc = 3;
3577 adev->mode_info.num_hpd = 6;
3578 adev->mode_info.num_dig = 9;
3579 break;
3580 case CHIP_STONEY:
3581 adev->mode_info.num_crtc = 2;
3582 adev->mode_info.num_hpd = 6;
3583 adev->mode_info.num_dig = 9;
3584 break;
3585 case CHIP_POLARIS11:
3586 case CHIP_POLARIS12:
3587 adev->mode_info.num_crtc = 5;
3588 adev->mode_info.num_hpd = 5;
3589 adev->mode_info.num_dig = 5;
3590 break;
3591 case CHIP_POLARIS10:
3592 case CHIP_VEGAM:
3593 adev->mode_info.num_crtc = 6;
3594 adev->mode_info.num_hpd = 6;
3595 adev->mode_info.num_dig = 6;
3596 break;
3597 case CHIP_VEGA10:
3598 case CHIP_VEGA12:
3599 case CHIP_VEGA20:
3600 adev->mode_info.num_crtc = 6;
3601 adev->mode_info.num_hpd = 6;
3602 adev->mode_info.num_dig = 6;
3603 break;
3604 #if defined(CONFIG_DRM_AMD_DC_DCN)
3605 case CHIP_RAVEN:
3606 adev->mode_info.num_crtc = 4;
3607 adev->mode_info.num_hpd = 4;
3608 adev->mode_info.num_dig = 4;
3609 break;
3610 #endif
3611 case CHIP_NAVI10:
3612 case CHIP_NAVI12:
3613 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3614 case CHIP_SIENNA_CICHLID:
3615 case CHIP_NAVY_FLOUNDER:
3616 #endif
3617 adev->mode_info.num_crtc = 6;
3618 adev->mode_info.num_hpd = 6;
3619 adev->mode_info.num_dig = 6;
3620 break;
3621 case CHIP_NAVI14:
3622 adev->mode_info.num_crtc = 5;
3623 adev->mode_info.num_hpd = 5;
3624 adev->mode_info.num_dig = 5;
3625 break;
3626 case CHIP_RENOIR:
3627 adev->mode_info.num_crtc = 4;
3628 adev->mode_info.num_hpd = 4;
3629 adev->mode_info.num_dig = 4;
3630 break;
3631 default:
3632 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3633 return -EINVAL;
3634 }
3635
3636 amdgpu_dm_set_irq_funcs(adev);
3637
3638 if (adev->mode_info.funcs == NULL)
3639 adev->mode_info.funcs = &dm_display_funcs;
3640
3641 /*
3642 * Note: Do NOT change adev->audio_endpt_rreg and
3643 * adev->audio_endpt_wreg because they are initialised in
3644 * amdgpu_device_init()
3645 */
3646 #if defined(CONFIG_DEBUG_KERNEL_DC)
3647 device_create_file(
3648 adev_to_drm(adev)->dev,
3649 &dev_attr_s3_debug);
3650 #endif
3651
3652 return 0;
3653 }
3654
modeset_required(struct drm_crtc_state * crtc_state,struct dc_stream_state * new_stream,struct dc_stream_state * old_stream)3655 static bool modeset_required(struct drm_crtc_state *crtc_state,
3656 struct dc_stream_state *new_stream,
3657 struct dc_stream_state *old_stream)
3658 {
3659 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3660 }
3661
modereset_required(struct drm_crtc_state * crtc_state)3662 static bool modereset_required(struct drm_crtc_state *crtc_state)
3663 {
3664 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3665 }
3666
amdgpu_dm_encoder_destroy(struct drm_encoder * encoder)3667 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3668 {
3669 drm_encoder_cleanup(encoder);
3670 kfree(encoder);
3671 }
3672
3673 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3674 .destroy = amdgpu_dm_encoder_destroy,
3675 };
3676
3677
fill_dc_scaling_info(const struct drm_plane_state * state,struct dc_scaling_info * scaling_info)3678 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3679 struct dc_scaling_info *scaling_info)
3680 {
3681 int scale_w, scale_h;
3682
3683 memset(scaling_info, 0, sizeof(*scaling_info));
3684
3685 /* Source is fixed 16.16 but we ignore mantissa for now... */
3686 scaling_info->src_rect.x = state->src_x >> 16;
3687 scaling_info->src_rect.y = state->src_y >> 16;
3688
3689 /*
3690 * For reasons we don't (yet) fully understand a non-zero
3691 * src_y coordinate into an NV12 buffer can cause a
3692 * system hang. To avoid hangs (and maybe be overly cautious)
3693 * let's reject both non-zero src_x and src_y.
3694 *
3695 * We currently know of only one use-case to reproduce a
3696 * scenario with non-zero src_x and src_y for NV12, which
3697 * is to gesture the YouTube Android app into full screen
3698 * on ChromeOS.
3699 */
3700 if (state->fb &&
3701 state->fb->format->format == DRM_FORMAT_NV12 &&
3702 (scaling_info->src_rect.x != 0 ||
3703 scaling_info->src_rect.y != 0))
3704 return -EINVAL;
3705
3706 /*
3707 * For reasons we don't (yet) fully understand a non-zero
3708 * src_y coordinate into an NV12 buffer can cause a
3709 * system hang. To avoid hangs (and maybe be overly cautious)
3710 * let's reject both non-zero src_x and src_y.
3711 *
3712 * We currently know of only one use-case to reproduce a
3713 * scenario with non-zero src_x and src_y for NV12, which
3714 * is to gesture the YouTube Android app into full screen
3715 * on ChromeOS.
3716 */
3717 if (state->fb &&
3718 state->fb->format->format == DRM_FORMAT_NV12 &&
3719 (scaling_info->src_rect.x != 0 ||
3720 scaling_info->src_rect.y != 0))
3721 return -EINVAL;
3722
3723 scaling_info->src_rect.width = state->src_w >> 16;
3724 if (scaling_info->src_rect.width == 0)
3725 return -EINVAL;
3726
3727 scaling_info->src_rect.height = state->src_h >> 16;
3728 if (scaling_info->src_rect.height == 0)
3729 return -EINVAL;
3730
3731 scaling_info->dst_rect.x = state->crtc_x;
3732 scaling_info->dst_rect.y = state->crtc_y;
3733
3734 if (state->crtc_w == 0)
3735 return -EINVAL;
3736
3737 scaling_info->dst_rect.width = state->crtc_w;
3738
3739 if (state->crtc_h == 0)
3740 return -EINVAL;
3741
3742 scaling_info->dst_rect.height = state->crtc_h;
3743
3744 /* DRM doesn't specify clipping on destination output. */
3745 scaling_info->clip_rect = scaling_info->dst_rect;
3746
3747 /* TODO: Validate scaling per-format with DC plane caps */
3748 scale_w = scaling_info->dst_rect.width * 1000 /
3749 scaling_info->src_rect.width;
3750
3751 if (scale_w < 250 || scale_w > 16000)
3752 return -EINVAL;
3753
3754 scale_h = scaling_info->dst_rect.height * 1000 /
3755 scaling_info->src_rect.height;
3756
3757 if (scale_h < 250 || scale_h > 16000)
3758 return -EINVAL;
3759
3760 /*
3761 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3762 * assume reasonable defaults based on the format.
3763 */
3764
3765 return 0;
3766 }
3767
get_fb_info(const struct amdgpu_framebuffer * amdgpu_fb,uint64_t * tiling_flags,bool * tmz_surface)3768 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3769 uint64_t *tiling_flags, bool *tmz_surface)
3770 {
3771 struct amdgpu_bo *rbo;
3772 int r;
3773
3774 if (!amdgpu_fb) {
3775 *tiling_flags = 0;
3776 *tmz_surface = false;
3777 return 0;
3778 }
3779
3780 rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3781 r = amdgpu_bo_reserve(rbo, false);
3782
3783 if (unlikely(r)) {
3784 /* Don't show error message when returning -ERESTARTSYS */
3785 if (r != -ERESTARTSYS)
3786 DRM_ERROR("Unable to reserve buffer: %d\n", r);
3787 return r;
3788 }
3789
3790 if (tiling_flags)
3791 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3792
3793 if (tmz_surface)
3794 *tmz_surface = amdgpu_bo_encrypted(rbo);
3795
3796 amdgpu_bo_unreserve(rbo);
3797
3798 return r;
3799 }
3800
get_dcc_address(uint64_t address,uint64_t tiling_flags)3801 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3802 {
3803 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3804
3805 return offset ? (address + offset * 256) : 0;
3806 }
3807
3808 static int
fill_plane_dcc_attributes(struct amdgpu_device * adev,const struct amdgpu_framebuffer * afb,const enum surface_pixel_format format,const enum dc_rotation_angle rotation,const struct plane_size * plane_size,const union dc_tiling_info * tiling_info,const uint64_t info,struct dc_plane_dcc_param * dcc,struct dc_plane_address * address,bool force_disable_dcc)3809 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3810 const struct amdgpu_framebuffer *afb,
3811 const enum surface_pixel_format format,
3812 const enum dc_rotation_angle rotation,
3813 const struct plane_size *plane_size,
3814 const union dc_tiling_info *tiling_info,
3815 const uint64_t info,
3816 struct dc_plane_dcc_param *dcc,
3817 struct dc_plane_address *address,
3818 bool force_disable_dcc)
3819 {
3820 struct dc *dc = adev->dm.dc;
3821 struct dc_dcc_surface_param input;
3822 struct dc_surface_dcc_cap output;
3823 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3824 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3825 uint64_t dcc_address;
3826
3827 memset(&input, 0, sizeof(input));
3828 memset(&output, 0, sizeof(output));
3829
3830 if (force_disable_dcc)
3831 return 0;
3832
3833 if (!offset)
3834 return 0;
3835
3836 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3837 return 0;
3838
3839 if (!dc->cap_funcs.get_dcc_compression_cap)
3840 return -EINVAL;
3841
3842 input.format = format;
3843 input.surface_size.width = plane_size->surface_size.width;
3844 input.surface_size.height = plane_size->surface_size.height;
3845 input.swizzle_mode = tiling_info->gfx9.swizzle;
3846
3847 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3848 input.scan = SCAN_DIRECTION_HORIZONTAL;
3849 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3850 input.scan = SCAN_DIRECTION_VERTICAL;
3851
3852 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3853 return -EINVAL;
3854
3855 if (!output.capable)
3856 return -EINVAL;
3857
3858 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3859 return -EINVAL;
3860
3861 dcc->enable = 1;
3862 dcc->meta_pitch =
3863 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3864 dcc->independent_64b_blks = i64b;
3865
3866 dcc_address = get_dcc_address(afb->address, info);
3867 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3868 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3869
3870 return 0;
3871 }
3872
3873 static int
fill_plane_buffer_attributes(struct amdgpu_device * adev,const struct amdgpu_framebuffer * afb,const enum surface_pixel_format format,const enum dc_rotation_angle rotation,const uint64_t tiling_flags,union dc_tiling_info * tiling_info,struct plane_size * plane_size,struct dc_plane_dcc_param * dcc,struct dc_plane_address * address,bool tmz_surface,bool force_disable_dcc)3874 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3875 const struct amdgpu_framebuffer *afb,
3876 const enum surface_pixel_format format,
3877 const enum dc_rotation_angle rotation,
3878 const uint64_t tiling_flags,
3879 union dc_tiling_info *tiling_info,
3880 struct plane_size *plane_size,
3881 struct dc_plane_dcc_param *dcc,
3882 struct dc_plane_address *address,
3883 bool tmz_surface,
3884 bool force_disable_dcc)
3885 {
3886 const struct drm_framebuffer *fb = &afb->base;
3887 int ret;
3888
3889 memset(tiling_info, 0, sizeof(*tiling_info));
3890 memset(plane_size, 0, sizeof(*plane_size));
3891 memset(dcc, 0, sizeof(*dcc));
3892 memset(address, 0, sizeof(*address));
3893
3894 address->tmz_surface = tmz_surface;
3895
3896 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3897 plane_size->surface_size.x = 0;
3898 plane_size->surface_size.y = 0;
3899 plane_size->surface_size.width = fb->width;
3900 plane_size->surface_size.height = fb->height;
3901 plane_size->surface_pitch =
3902 fb->pitches[0] / fb->format->cpp[0];
3903
3904 address->type = PLN_ADDR_TYPE_GRAPHICS;
3905 address->grph.addr.low_part = lower_32_bits(afb->address);
3906 address->grph.addr.high_part = upper_32_bits(afb->address);
3907 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3908 uint64_t chroma_addr = afb->address + fb->offsets[1];
3909
3910 plane_size->surface_size.x = 0;
3911 plane_size->surface_size.y = 0;
3912 plane_size->surface_size.width = fb->width;
3913 plane_size->surface_size.height = fb->height;
3914 plane_size->surface_pitch =
3915 fb->pitches[0] / fb->format->cpp[0];
3916
3917 plane_size->chroma_size.x = 0;
3918 plane_size->chroma_size.y = 0;
3919 /* TODO: set these based on surface format */
3920 plane_size->chroma_size.width = fb->width / 2;
3921 plane_size->chroma_size.height = fb->height / 2;
3922
3923 plane_size->chroma_pitch =
3924 fb->pitches[1] / fb->format->cpp[1];
3925
3926 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3927 address->video_progressive.luma_addr.low_part =
3928 lower_32_bits(afb->address);
3929 address->video_progressive.luma_addr.high_part =
3930 upper_32_bits(afb->address);
3931 address->video_progressive.chroma_addr.low_part =
3932 lower_32_bits(chroma_addr);
3933 address->video_progressive.chroma_addr.high_part =
3934 upper_32_bits(chroma_addr);
3935 }
3936
3937 /* Fill GFX8 params */
3938 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3939 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3940
3941 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3942 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3943 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3944 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3945 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3946
3947 /* XXX fix me for VI */
3948 tiling_info->gfx8.num_banks = num_banks;
3949 tiling_info->gfx8.array_mode =
3950 DC_ARRAY_2D_TILED_THIN1;
3951 tiling_info->gfx8.tile_split = tile_split;
3952 tiling_info->gfx8.bank_width = bankw;
3953 tiling_info->gfx8.bank_height = bankh;
3954 tiling_info->gfx8.tile_aspect = mtaspect;
3955 tiling_info->gfx8.tile_mode =
3956 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3957 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3958 == DC_ARRAY_1D_TILED_THIN1) {
3959 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3960 }
3961
3962 tiling_info->gfx8.pipe_config =
3963 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3964
3965 if (adev->asic_type == CHIP_VEGA10 ||
3966 adev->asic_type == CHIP_VEGA12 ||
3967 adev->asic_type == CHIP_VEGA20 ||
3968 adev->asic_type == CHIP_NAVI10 ||
3969 adev->asic_type == CHIP_NAVI14 ||
3970 adev->asic_type == CHIP_NAVI12 ||
3971 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3972 adev->asic_type == CHIP_SIENNA_CICHLID ||
3973 adev->asic_type == CHIP_NAVY_FLOUNDER ||
3974 #endif
3975 adev->asic_type == CHIP_RENOIR ||
3976 adev->asic_type == CHIP_RAVEN) {
3977 /* Fill GFX9 params */
3978 tiling_info->gfx9.num_pipes =
3979 adev->gfx.config.gb_addr_config_fields.num_pipes;
3980 tiling_info->gfx9.num_banks =
3981 adev->gfx.config.gb_addr_config_fields.num_banks;
3982 tiling_info->gfx9.pipe_interleave =
3983 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3984 tiling_info->gfx9.num_shader_engines =
3985 adev->gfx.config.gb_addr_config_fields.num_se;
3986 tiling_info->gfx9.max_compressed_frags =
3987 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3988 tiling_info->gfx9.num_rb_per_se =
3989 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3990 tiling_info->gfx9.swizzle =
3991 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3992 tiling_info->gfx9.shaderEnable = 1;
3993
3994 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
3995 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3996 adev->asic_type == CHIP_NAVY_FLOUNDER)
3997 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3998 #endif
3999 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
4000 plane_size, tiling_info,
4001 tiling_flags, dcc, address,
4002 force_disable_dcc);
4003 if (ret)
4004 return ret;
4005 }
4006
4007 return 0;
4008 }
4009
4010 static void
fill_blending_from_plane_state(const struct drm_plane_state * plane_state,bool * per_pixel_alpha,bool * global_alpha,int * global_alpha_value)4011 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4012 bool *per_pixel_alpha, bool *global_alpha,
4013 int *global_alpha_value)
4014 {
4015 *per_pixel_alpha = false;
4016 *global_alpha = false;
4017 *global_alpha_value = 0xff;
4018
4019 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4020 return;
4021
4022 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4023 static const uint32_t alpha_formats[] = {
4024 DRM_FORMAT_ARGB8888,
4025 DRM_FORMAT_RGBA8888,
4026 DRM_FORMAT_ABGR8888,
4027 };
4028 uint32_t format = plane_state->fb->format->format;
4029 unsigned int i;
4030
4031 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4032 if (format == alpha_formats[i]) {
4033 *per_pixel_alpha = true;
4034 break;
4035 }
4036 }
4037 }
4038
4039 if (plane_state->alpha < 0xffff) {
4040 *global_alpha = true;
4041 *global_alpha_value = plane_state->alpha >> 8;
4042 }
4043 }
4044
4045 static int
fill_plane_color_attributes(const struct drm_plane_state * plane_state,const enum surface_pixel_format format,enum dc_color_space * color_space)4046 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4047 const enum surface_pixel_format format,
4048 enum dc_color_space *color_space)
4049 {
4050 bool full_range;
4051
4052 *color_space = COLOR_SPACE_SRGB;
4053
4054 /* DRM color properties only affect non-RGB formats. */
4055 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4056 return 0;
4057
4058 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4059
4060 switch (plane_state->color_encoding) {
4061 case DRM_COLOR_YCBCR_BT601:
4062 if (full_range)
4063 *color_space = COLOR_SPACE_YCBCR601;
4064 else
4065 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4066 break;
4067
4068 case DRM_COLOR_YCBCR_BT709:
4069 if (full_range)
4070 *color_space = COLOR_SPACE_YCBCR709;
4071 else
4072 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4073 break;
4074
4075 case DRM_COLOR_YCBCR_BT2020:
4076 if (full_range)
4077 *color_space = COLOR_SPACE_2020_YCBCR;
4078 else
4079 return -EINVAL;
4080 break;
4081
4082 default:
4083 return -EINVAL;
4084 }
4085
4086 return 0;
4087 }
4088
4089 static int
fill_dc_plane_info_and_addr(struct amdgpu_device * adev,const struct drm_plane_state * plane_state,const uint64_t tiling_flags,struct dc_plane_info * plane_info,struct dc_plane_address * address,bool tmz_surface,bool force_disable_dcc)4090 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4091 const struct drm_plane_state *plane_state,
4092 const uint64_t tiling_flags,
4093 struct dc_plane_info *plane_info,
4094 struct dc_plane_address *address,
4095 bool tmz_surface,
4096 bool force_disable_dcc)
4097 {
4098 const struct drm_framebuffer *fb = plane_state->fb;
4099 const struct amdgpu_framebuffer *afb =
4100 to_amdgpu_framebuffer(plane_state->fb);
4101 struct drm_format_name_buf format_name;
4102 int ret;
4103
4104 memset(plane_info, 0, sizeof(*plane_info));
4105
4106 switch (fb->format->format) {
4107 case DRM_FORMAT_C8:
4108 plane_info->format =
4109 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4110 break;
4111 case DRM_FORMAT_RGB565:
4112 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4113 break;
4114 case DRM_FORMAT_XRGB8888:
4115 case DRM_FORMAT_ARGB8888:
4116 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4117 break;
4118 case DRM_FORMAT_XRGB2101010:
4119 case DRM_FORMAT_ARGB2101010:
4120 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4121 break;
4122 case DRM_FORMAT_XBGR2101010:
4123 case DRM_FORMAT_ABGR2101010:
4124 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4125 break;
4126 case DRM_FORMAT_XBGR8888:
4127 case DRM_FORMAT_ABGR8888:
4128 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4129 break;
4130 case DRM_FORMAT_NV21:
4131 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4132 break;
4133 case DRM_FORMAT_NV12:
4134 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4135 break;
4136 case DRM_FORMAT_P010:
4137 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4138 break;
4139 case DRM_FORMAT_XRGB16161616F:
4140 case DRM_FORMAT_ARGB16161616F:
4141 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4142 break;
4143 case DRM_FORMAT_XBGR16161616F:
4144 case DRM_FORMAT_ABGR16161616F:
4145 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4146 break;
4147 default:
4148 DRM_ERROR(
4149 "Unsupported screen format %s\n",
4150 drm_get_format_name(fb->format->format, &format_name));
4151 return -EINVAL;
4152 }
4153
4154 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4155 case DRM_MODE_ROTATE_0:
4156 plane_info->rotation = ROTATION_ANGLE_0;
4157 break;
4158 case DRM_MODE_ROTATE_90:
4159 plane_info->rotation = ROTATION_ANGLE_90;
4160 break;
4161 case DRM_MODE_ROTATE_180:
4162 plane_info->rotation = ROTATION_ANGLE_180;
4163 break;
4164 case DRM_MODE_ROTATE_270:
4165 plane_info->rotation = ROTATION_ANGLE_270;
4166 break;
4167 default:
4168 plane_info->rotation = ROTATION_ANGLE_0;
4169 break;
4170 }
4171
4172 plane_info->visible = true;
4173 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4174
4175 plane_info->layer_index = 0;
4176
4177 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4178 &plane_info->color_space);
4179 if (ret)
4180 return ret;
4181
4182 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4183 plane_info->rotation, tiling_flags,
4184 &plane_info->tiling_info,
4185 &plane_info->plane_size,
4186 &plane_info->dcc, address, tmz_surface,
4187 force_disable_dcc);
4188 if (ret)
4189 return ret;
4190
4191 fill_blending_from_plane_state(
4192 plane_state, &plane_info->per_pixel_alpha,
4193 &plane_info->global_alpha, &plane_info->global_alpha_value);
4194
4195 return 0;
4196 }
4197
fill_dc_plane_attributes(struct amdgpu_device * adev,struct dc_plane_state * dc_plane_state,struct drm_plane_state * plane_state,struct drm_crtc_state * crtc_state)4198 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4199 struct dc_plane_state *dc_plane_state,
4200 struct drm_plane_state *plane_state,
4201 struct drm_crtc_state *crtc_state)
4202 {
4203 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4204 struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
4205 struct dc_scaling_info scaling_info;
4206 struct dc_plane_info plane_info;
4207 int ret;
4208 bool force_disable_dcc = false;
4209
4210 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4211 if (ret)
4212 return ret;
4213
4214 dc_plane_state->src_rect = scaling_info.src_rect;
4215 dc_plane_state->dst_rect = scaling_info.dst_rect;
4216 dc_plane_state->clip_rect = scaling_info.clip_rect;
4217 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4218
4219 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4220 ret = fill_dc_plane_info_and_addr(adev, plane_state,
4221 dm_plane_state->tiling_flags,
4222 &plane_info,
4223 &dc_plane_state->address,
4224 dm_plane_state->tmz_surface,
4225 force_disable_dcc);
4226 if (ret)
4227 return ret;
4228
4229 dc_plane_state->format = plane_info.format;
4230 dc_plane_state->color_space = plane_info.color_space;
4231 dc_plane_state->format = plane_info.format;
4232 dc_plane_state->plane_size = plane_info.plane_size;
4233 dc_plane_state->rotation = plane_info.rotation;
4234 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4235 dc_plane_state->stereo_format = plane_info.stereo_format;
4236 dc_plane_state->tiling_info = plane_info.tiling_info;
4237 dc_plane_state->visible = plane_info.visible;
4238 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4239 dc_plane_state->global_alpha = plane_info.global_alpha;
4240 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4241 dc_plane_state->dcc = plane_info.dcc;
4242 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4243
4244 /*
4245 * Always set input transfer function, since plane state is refreshed
4246 * every time.
4247 */
4248 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4249 if (ret)
4250 return ret;
4251
4252 return 0;
4253 }
4254
update_stream_scaling_settings(const struct drm_display_mode * mode,const struct dm_connector_state * dm_state,struct dc_stream_state * stream)4255 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4256 const struct dm_connector_state *dm_state,
4257 struct dc_stream_state *stream)
4258 {
4259 enum amdgpu_rmx_type rmx_type;
4260
4261 struct rect src = { 0 }; /* viewport in composition space*/
4262 struct rect dst = { 0 }; /* stream addressable area */
4263
4264 /* no mode. nothing to be done */
4265 if (!mode)
4266 return;
4267
4268 /* Full screen scaling by default */
4269 src.width = mode->hdisplay;
4270 src.height = mode->vdisplay;
4271 dst.width = stream->timing.h_addressable;
4272 dst.height = stream->timing.v_addressable;
4273
4274 if (dm_state) {
4275 rmx_type = dm_state->scaling;
4276 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4277 if (src.width * dst.height <
4278 src.height * dst.width) {
4279 /* height needs less upscaling/more downscaling */
4280 dst.width = src.width *
4281 dst.height / src.height;
4282 } else {
4283 /* width needs less upscaling/more downscaling */
4284 dst.height = src.height *
4285 dst.width / src.width;
4286 }
4287 } else if (rmx_type == RMX_CENTER) {
4288 dst = src;
4289 }
4290
4291 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4292 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4293
4294 if (dm_state->underscan_enable) {
4295 dst.x += dm_state->underscan_hborder / 2;
4296 dst.y += dm_state->underscan_vborder / 2;
4297 dst.width -= dm_state->underscan_hborder;
4298 dst.height -= dm_state->underscan_vborder;
4299 }
4300 }
4301
4302 stream->src = src;
4303 stream->dst = dst;
4304
4305 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
4306 dst.x, dst.y, dst.width, dst.height);
4307
4308 }
4309
4310 static enum dc_color_depth
convert_color_depth_from_display_info(const struct drm_connector * connector,bool is_y420,int requested_bpc)4311 convert_color_depth_from_display_info(const struct drm_connector *connector,
4312 bool is_y420, int requested_bpc)
4313 {
4314 uint8_t bpc;
4315
4316 if (is_y420) {
4317 bpc = 8;
4318
4319 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4320 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4321 bpc = 16;
4322 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4323 bpc = 12;
4324 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4325 bpc = 10;
4326 } else {
4327 bpc = (uint8_t)connector->display_info.bpc;
4328 /* Assume 8 bpc by default if no bpc is specified. */
4329 bpc = bpc ? bpc : 8;
4330 }
4331
4332 if (requested_bpc > 0) {
4333 /*
4334 * Cap display bpc based on the user requested value.
4335 *
4336 * The value for state->max_bpc may not correctly updated
4337 * depending on when the connector gets added to the state
4338 * or if this was called outside of atomic check, so it
4339 * can't be used directly.
4340 */
4341 bpc = min_t(u8, bpc, requested_bpc);
4342
4343 /* Round down to the nearest even number. */
4344 bpc = bpc - (bpc & 1);
4345 }
4346
4347 switch (bpc) {
4348 case 0:
4349 /*
4350 * Temporary Work around, DRM doesn't parse color depth for
4351 * EDID revision before 1.4
4352 * TODO: Fix edid parsing
4353 */
4354 return COLOR_DEPTH_888;
4355 case 6:
4356 return COLOR_DEPTH_666;
4357 case 8:
4358 return COLOR_DEPTH_888;
4359 case 10:
4360 return COLOR_DEPTH_101010;
4361 case 12:
4362 return COLOR_DEPTH_121212;
4363 case 14:
4364 return COLOR_DEPTH_141414;
4365 case 16:
4366 return COLOR_DEPTH_161616;
4367 default:
4368 return COLOR_DEPTH_UNDEFINED;
4369 }
4370 }
4371
4372 static enum dc_aspect_ratio
get_aspect_ratio(const struct drm_display_mode * mode_in)4373 get_aspect_ratio(const struct drm_display_mode *mode_in)
4374 {
4375 /* 1-1 mapping, since both enums follow the HDMI spec. */
4376 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4377 }
4378
4379 static enum dc_color_space
get_output_color_space(const struct dc_crtc_timing * dc_crtc_timing)4380 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4381 {
4382 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4383
4384 switch (dc_crtc_timing->pixel_encoding) {
4385 case PIXEL_ENCODING_YCBCR422:
4386 case PIXEL_ENCODING_YCBCR444:
4387 case PIXEL_ENCODING_YCBCR420:
4388 {
4389 /*
4390 * 27030khz is the separation point between HDTV and SDTV
4391 * according to HDMI spec, we use YCbCr709 and YCbCr601
4392 * respectively
4393 */
4394 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4395 if (dc_crtc_timing->flags.Y_ONLY)
4396 color_space =
4397 COLOR_SPACE_YCBCR709_LIMITED;
4398 else
4399 color_space = COLOR_SPACE_YCBCR709;
4400 } else {
4401 if (dc_crtc_timing->flags.Y_ONLY)
4402 color_space =
4403 COLOR_SPACE_YCBCR601_LIMITED;
4404 else
4405 color_space = COLOR_SPACE_YCBCR601;
4406 }
4407
4408 }
4409 break;
4410 case PIXEL_ENCODING_RGB:
4411 color_space = COLOR_SPACE_SRGB;
4412 break;
4413
4414 default:
4415 WARN_ON(1);
4416 break;
4417 }
4418
4419 return color_space;
4420 }
4421
adjust_colour_depth_from_display_info(struct dc_crtc_timing * timing_out,const struct drm_display_info * info)4422 static bool adjust_colour_depth_from_display_info(
4423 struct dc_crtc_timing *timing_out,
4424 const struct drm_display_info *info)
4425 {
4426 enum dc_color_depth depth = timing_out->display_color_depth;
4427 int normalized_clk;
4428 do {
4429 normalized_clk = timing_out->pix_clk_100hz / 10;
4430 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4431 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4432 normalized_clk /= 2;
4433 /* Adjusting pix clock following on HDMI spec based on colour depth */
4434 switch (depth) {
4435 case COLOR_DEPTH_888:
4436 break;
4437 case COLOR_DEPTH_101010:
4438 normalized_clk = (normalized_clk * 30) / 24;
4439 break;
4440 case COLOR_DEPTH_121212:
4441 normalized_clk = (normalized_clk * 36) / 24;
4442 break;
4443 case COLOR_DEPTH_161616:
4444 normalized_clk = (normalized_clk * 48) / 24;
4445 break;
4446 default:
4447 /* The above depths are the only ones valid for HDMI. */
4448 return false;
4449 }
4450 if (normalized_clk <= info->max_tmds_clock) {
4451 timing_out->display_color_depth = depth;
4452 return true;
4453 }
4454 } while (--depth > COLOR_DEPTH_666);
4455 return false;
4456 }
4457
fill_stream_properties_from_drm_display_mode(struct dc_stream_state * stream,const struct drm_display_mode * mode_in,const struct drm_connector * connector,const struct drm_connector_state * connector_state,const struct dc_stream_state * old_stream,int requested_bpc)4458 static void fill_stream_properties_from_drm_display_mode(
4459 struct dc_stream_state *stream,
4460 const struct drm_display_mode *mode_in,
4461 const struct drm_connector *connector,
4462 const struct drm_connector_state *connector_state,
4463 const struct dc_stream_state *old_stream,
4464 int requested_bpc)
4465 {
4466 struct dc_crtc_timing *timing_out = &stream->timing;
4467 const struct drm_display_info *info = &connector->display_info;
4468 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4469 struct hdmi_vendor_infoframe hv_frame;
4470 struct hdmi_avi_infoframe avi_frame;
4471
4472 memset(&hv_frame, 0, sizeof(hv_frame));
4473 memset(&avi_frame, 0, sizeof(avi_frame));
4474
4475 timing_out->h_border_left = 0;
4476 timing_out->h_border_right = 0;
4477 timing_out->v_border_top = 0;
4478 timing_out->v_border_bottom = 0;
4479 /* TODO: un-hardcode */
4480 if (drm_mode_is_420_only(info, mode_in)
4481 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4482 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4483 else if (drm_mode_is_420_also(info, mode_in)
4484 && aconnector->force_yuv420_output)
4485 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4486 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4487 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4488 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4489 else
4490 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4491
4492 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4493 timing_out->display_color_depth = convert_color_depth_from_display_info(
4494 connector,
4495 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4496 requested_bpc);
4497 timing_out->scan_type = SCANNING_TYPE_NODATA;
4498 timing_out->hdmi_vic = 0;
4499
4500 if(old_stream) {
4501 timing_out->vic = old_stream->timing.vic;
4502 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4503 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4504 } else {
4505 timing_out->vic = drm_match_cea_mode(mode_in);
4506 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4507 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4508 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4509 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4510 }
4511
4512 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4513 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4514 timing_out->vic = avi_frame.video_code;
4515 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4516 timing_out->hdmi_vic = hv_frame.vic;
4517 }
4518
4519 timing_out->h_addressable = mode_in->crtc_hdisplay;
4520 timing_out->h_total = mode_in->crtc_htotal;
4521 timing_out->h_sync_width =
4522 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4523 timing_out->h_front_porch =
4524 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4525 timing_out->v_total = mode_in->crtc_vtotal;
4526 timing_out->v_addressable = mode_in->crtc_vdisplay;
4527 timing_out->v_front_porch =
4528 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4529 timing_out->v_sync_width =
4530 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4531 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4532 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4533
4534 stream->output_color_space = get_output_color_space(timing_out);
4535
4536 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4537 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4538 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4539 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4540 drm_mode_is_420_also(info, mode_in) &&
4541 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4542 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4543 adjust_colour_depth_from_display_info(timing_out, info);
4544 }
4545 }
4546 }
4547
fill_audio_info(struct audio_info * audio_info,const struct drm_connector * drm_connector,const struct dc_sink * dc_sink)4548 static void fill_audio_info(struct audio_info *audio_info,
4549 const struct drm_connector *drm_connector,
4550 const struct dc_sink *dc_sink)
4551 {
4552 int i = 0;
4553 int cea_revision = 0;
4554 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4555
4556 audio_info->manufacture_id = edid_caps->manufacturer_id;
4557 audio_info->product_id = edid_caps->product_id;
4558
4559 cea_revision = drm_connector->display_info.cea_rev;
4560
4561 strscpy(audio_info->display_name,
4562 edid_caps->display_name,
4563 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4564
4565 if (cea_revision >= 3) {
4566 audio_info->mode_count = edid_caps->audio_mode_count;
4567
4568 for (i = 0; i < audio_info->mode_count; ++i) {
4569 audio_info->modes[i].format_code =
4570 (enum audio_format_code)
4571 (edid_caps->audio_modes[i].format_code);
4572 audio_info->modes[i].channel_count =
4573 edid_caps->audio_modes[i].channel_count;
4574 audio_info->modes[i].sample_rates.all =
4575 edid_caps->audio_modes[i].sample_rate;
4576 audio_info->modes[i].sample_size =
4577 edid_caps->audio_modes[i].sample_size;
4578 }
4579 }
4580
4581 audio_info->flags.all = edid_caps->speaker_flags;
4582
4583 /* TODO: We only check for the progressive mode, check for interlace mode too */
4584 if (drm_connector->latency_present[0]) {
4585 audio_info->video_latency = drm_connector->video_latency[0];
4586 audio_info->audio_latency = drm_connector->audio_latency[0];
4587 }
4588
4589 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4590
4591 }
4592
4593 static void
copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode * src_mode,struct drm_display_mode * dst_mode)4594 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4595 struct drm_display_mode *dst_mode)
4596 {
4597 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4598 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4599 dst_mode->crtc_clock = src_mode->crtc_clock;
4600 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4601 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4602 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
4603 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4604 dst_mode->crtc_htotal = src_mode->crtc_htotal;
4605 dst_mode->crtc_hskew = src_mode->crtc_hskew;
4606 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4607 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4608 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4609 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4610 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4611 }
4612
4613 static void
decide_crtc_timing_for_drm_display_mode(struct drm_display_mode * drm_mode,const struct drm_display_mode * native_mode,bool scale_enabled)4614 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4615 const struct drm_display_mode *native_mode,
4616 bool scale_enabled)
4617 {
4618 if (scale_enabled) {
4619 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4620 } else if (native_mode->clock == drm_mode->clock &&
4621 native_mode->htotal == drm_mode->htotal &&
4622 native_mode->vtotal == drm_mode->vtotal) {
4623 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4624 } else {
4625 /* no scaling nor amdgpu inserted, no need to patch */
4626 }
4627 }
4628
4629 static struct dc_sink *
create_fake_sink(struct amdgpu_dm_connector * aconnector)4630 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4631 {
4632 struct dc_sink_init_data sink_init_data = { 0 };
4633 struct dc_sink *sink = NULL;
4634 sink_init_data.link = aconnector->dc_link;
4635 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4636
4637 sink = dc_sink_create(&sink_init_data);
4638 if (!sink) {
4639 DRM_ERROR("Failed to create sink!\n");
4640 return NULL;
4641 }
4642 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4643
4644 return sink;
4645 }
4646
set_multisync_trigger_params(struct dc_stream_state * stream)4647 static void set_multisync_trigger_params(
4648 struct dc_stream_state *stream)
4649 {
4650 if (stream->triggered_crtc_reset.enabled) {
4651 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4652 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4653 }
4654 }
4655
set_master_stream(struct dc_stream_state * stream_set[],int stream_count)4656 static void set_master_stream(struct dc_stream_state *stream_set[],
4657 int stream_count)
4658 {
4659 int j, highest_rfr = 0, master_stream = 0;
4660
4661 for (j = 0; j < stream_count; j++) {
4662 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4663 int refresh_rate = 0;
4664
4665 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4666 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4667 if (refresh_rate > highest_rfr) {
4668 highest_rfr = refresh_rate;
4669 master_stream = j;
4670 }
4671 }
4672 }
4673 for (j = 0; j < stream_count; j++) {
4674 if (stream_set[j])
4675 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4676 }
4677 }
4678
dm_enable_per_frame_crtc_master_sync(struct dc_state * context)4679 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4680 {
4681 int i = 0;
4682
4683 if (context->stream_count < 2)
4684 return;
4685 for (i = 0; i < context->stream_count ; i++) {
4686 if (!context->streams[i])
4687 continue;
4688 /*
4689 * TODO: add a function to read AMD VSDB bits and set
4690 * crtc_sync_master.multi_sync_enabled flag
4691 * For now it's set to false
4692 */
4693 set_multisync_trigger_params(context->streams[i]);
4694 }
4695 set_master_stream(context->streams, context->stream_count);
4696 }
4697
4698 static struct dc_stream_state *
create_stream_for_sink(struct amdgpu_dm_connector * aconnector,const struct drm_display_mode * drm_mode,const struct dm_connector_state * dm_state,const struct dc_stream_state * old_stream,int requested_bpc)4699 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4700 const struct drm_display_mode *drm_mode,
4701 const struct dm_connector_state *dm_state,
4702 const struct dc_stream_state *old_stream,
4703 int requested_bpc)
4704 {
4705 struct drm_display_mode *preferred_mode = NULL;
4706 struct drm_connector *drm_connector;
4707 const struct drm_connector_state *con_state =
4708 dm_state ? &dm_state->base : NULL;
4709 struct dc_stream_state *stream = NULL;
4710 struct drm_display_mode mode = *drm_mode;
4711 bool native_mode_found = false;
4712 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4713 int mode_refresh;
4714 int preferred_refresh = 0;
4715 #if defined(CONFIG_DRM_AMD_DC_DCN)
4716 struct dsc_dec_dpcd_caps dsc_caps;
4717 #endif
4718 uint32_t link_bandwidth_kbps;
4719
4720 struct dc_sink *sink = NULL;
4721 if (aconnector == NULL) {
4722 DRM_ERROR("aconnector is NULL!\n");
4723 return stream;
4724 }
4725
4726 drm_connector = &aconnector->base;
4727
4728 if (!aconnector->dc_sink) {
4729 sink = create_fake_sink(aconnector);
4730 if (!sink)
4731 return stream;
4732 } else {
4733 sink = aconnector->dc_sink;
4734 dc_sink_retain(sink);
4735 }
4736
4737 stream = dc_create_stream_for_sink(sink);
4738
4739 if (stream == NULL) {
4740 DRM_ERROR("Failed to create stream for sink!\n");
4741 goto finish;
4742 }
4743
4744 stream->dm_stream_context = aconnector;
4745
4746 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4747 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4748
4749 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4750 /* Search for preferred mode */
4751 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4752 native_mode_found = true;
4753 break;
4754 }
4755 }
4756 if (!native_mode_found)
4757 preferred_mode = list_first_entry_or_null(
4758 &aconnector->base.modes,
4759 struct drm_display_mode,
4760 head);
4761
4762 mode_refresh = drm_mode_vrefresh(&mode);
4763
4764 if (preferred_mode == NULL) {
4765 /*
4766 * This may not be an error, the use case is when we have no
4767 * usermode calls to reset and set mode upon hotplug. In this
4768 * case, we call set mode ourselves to restore the previous mode
4769 * and the modelist may not be filled in in time.
4770 */
4771 DRM_DEBUG_DRIVER("No preferred mode found\n");
4772 } else {
4773 decide_crtc_timing_for_drm_display_mode(
4774 &mode, preferred_mode,
4775 dm_state ? (dm_state->scaling != RMX_OFF) : false);
4776 preferred_refresh = drm_mode_vrefresh(preferred_mode);
4777 }
4778
4779 if (!dm_state)
4780 drm_mode_set_crtcinfo(&mode, 0);
4781
4782 /*
4783 * If scaling is enabled and refresh rate didn't change
4784 * we copy the vic and polarities of the old timings
4785 */
4786 if (!scale || mode_refresh != preferred_refresh)
4787 fill_stream_properties_from_drm_display_mode(stream,
4788 &mode, &aconnector->base, con_state, NULL, requested_bpc);
4789 else
4790 fill_stream_properties_from_drm_display_mode(stream,
4791 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
4792
4793 stream->timing.flags.DSC = 0;
4794
4795 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4796 #if defined(CONFIG_DRM_AMD_DC_DCN)
4797 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4798 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4799 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4800 &dsc_caps);
4801 #endif
4802 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4803 dc_link_get_link_cap(aconnector->dc_link));
4804
4805 #if defined(CONFIG_DRM_AMD_DC_DCN)
4806 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
4807 /* Set DSC policy according to dsc_clock_en */
4808 dc_dsc_policy_set_enable_dsc_when_not_needed(
4809 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
4810
4811 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4812 &dsc_caps,
4813 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4814 link_bandwidth_kbps,
4815 &stream->timing,
4816 &stream->timing.dsc_cfg))
4817 stream->timing.flags.DSC = 1;
4818 /* Overwrite the stream flag if DSC is enabled through debugfs */
4819 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
4820 stream->timing.flags.DSC = 1;
4821
4822 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
4823 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
4824
4825 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
4826 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
4827
4828 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
4829 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
4830 }
4831 #endif
4832 }
4833
4834 update_stream_scaling_settings(&mode, dm_state, stream);
4835
4836 fill_audio_info(
4837 &stream->audio_info,
4838 drm_connector,
4839 sink);
4840
4841 update_stream_signal(stream, sink);
4842
4843 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4844 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
4845
4846 if (stream->link->psr_settings.psr_feature_enabled) {
4847 //
4848 // should decide stream support vsc sdp colorimetry capability
4849 // before building vsc info packet
4850 //
4851 stream->use_vsc_sdp_for_colorimetry = false;
4852 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4853 stream->use_vsc_sdp_for_colorimetry =
4854 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4855 } else {
4856 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4857 stream->use_vsc_sdp_for_colorimetry = true;
4858 }
4859 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4860 }
4861 finish:
4862 dc_sink_release(sink);
4863
4864 return stream;
4865 }
4866
amdgpu_dm_crtc_destroy(struct drm_crtc * crtc)4867 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4868 {
4869 drm_crtc_cleanup(crtc);
4870 kfree(crtc);
4871 }
4872
dm_crtc_destroy_state(struct drm_crtc * crtc,struct drm_crtc_state * state)4873 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4874 struct drm_crtc_state *state)
4875 {
4876 struct dm_crtc_state *cur = to_dm_crtc_state(state);
4877
4878 /* TODO Destroy dc_stream objects are stream object is flattened */
4879 if (cur->stream)
4880 dc_stream_release(cur->stream);
4881
4882
4883 __drm_atomic_helper_crtc_destroy_state(state);
4884
4885
4886 kfree(state);
4887 }
4888
dm_crtc_reset_state(struct drm_crtc * crtc)4889 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4890 {
4891 struct dm_crtc_state *state;
4892
4893 if (crtc->state)
4894 dm_crtc_destroy_state(crtc, crtc->state);
4895
4896 state = kzalloc(sizeof(*state), GFP_KERNEL);
4897 if (WARN_ON(!state))
4898 return;
4899
4900 __drm_atomic_helper_crtc_reset(crtc, &state->base);
4901 }
4902
4903 static struct drm_crtc_state *
dm_crtc_duplicate_state(struct drm_crtc * crtc)4904 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4905 {
4906 struct dm_crtc_state *state, *cur;
4907
4908 cur = to_dm_crtc_state(crtc->state);
4909
4910 if (WARN_ON(!crtc->state))
4911 return NULL;
4912
4913 state = kzalloc(sizeof(*state), GFP_KERNEL);
4914 if (!state)
4915 return NULL;
4916
4917 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4918
4919 if (cur->stream) {
4920 state->stream = cur->stream;
4921 dc_stream_retain(state->stream);
4922 }
4923
4924 state->active_planes = cur->active_planes;
4925 state->vrr_infopacket = cur->vrr_infopacket;
4926 state->abm_level = cur->abm_level;
4927 state->vrr_supported = cur->vrr_supported;
4928 state->freesync_config = cur->freesync_config;
4929 state->crc_src = cur->crc_src;
4930 state->cm_has_degamma = cur->cm_has_degamma;
4931 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4932
4933 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4934
4935 return &state->base;
4936 }
4937
dm_set_vupdate_irq(struct drm_crtc * crtc,bool enable)4938 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4939 {
4940 enum dc_irq_source irq_source;
4941 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4942 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4943 int rc;
4944
4945 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4946
4947 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4948
4949 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4950 acrtc->crtc_id, enable ? "en" : "dis", rc);
4951 return rc;
4952 }
4953
dm_set_vblank(struct drm_crtc * crtc,bool enable)4954 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4955 {
4956 enum dc_irq_source irq_source;
4957 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4958 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4959 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4960 int rc = 0;
4961
4962 if (enable) {
4963 /* vblank irq on -> Only need vupdate irq in vrr mode */
4964 if (amdgpu_dm_vrr_active(acrtc_state))
4965 rc = dm_set_vupdate_irq(crtc, true);
4966 } else {
4967 /* vblank irq off -> vupdate irq off */
4968 rc = dm_set_vupdate_irq(crtc, false);
4969 }
4970
4971 if (rc)
4972 return rc;
4973
4974 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4975 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4976 }
4977
dm_enable_vblank(struct drm_crtc * crtc)4978 static int dm_enable_vblank(struct drm_crtc *crtc)
4979 {
4980 return dm_set_vblank(crtc, true);
4981 }
4982
dm_disable_vblank(struct drm_crtc * crtc)4983 static void dm_disable_vblank(struct drm_crtc *crtc)
4984 {
4985 dm_set_vblank(crtc, false);
4986 }
4987
4988 /* Implemented only the options currently availible for the driver */
4989 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4990 .reset = dm_crtc_reset_state,
4991 .destroy = amdgpu_dm_crtc_destroy,
4992 .gamma_set = drm_atomic_helper_legacy_gamma_set,
4993 .set_config = drm_atomic_helper_set_config,
4994 .page_flip = drm_atomic_helper_page_flip,
4995 .atomic_duplicate_state = dm_crtc_duplicate_state,
4996 .atomic_destroy_state = dm_crtc_destroy_state,
4997 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
4998 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4999 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5000 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5001 .enable_vblank = dm_enable_vblank,
5002 .disable_vblank = dm_disable_vblank,
5003 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5004 };
5005
5006 static enum drm_connector_status
amdgpu_dm_connector_detect(struct drm_connector * connector,bool force)5007 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5008 {
5009 bool connected;
5010 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5011
5012 /*
5013 * Notes:
5014 * 1. This interface is NOT called in context of HPD irq.
5015 * 2. This interface *is called* in context of user-mode ioctl. Which
5016 * makes it a bad place for *any* MST-related activity.
5017 */
5018
5019 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5020 !aconnector->fake_enable)
5021 connected = (aconnector->dc_sink != NULL);
5022 else
5023 connected = (aconnector->base.force == DRM_FORCE_ON);
5024
5025 update_subconnector_property(aconnector);
5026
5027 return (connected ? connector_status_connected :
5028 connector_status_disconnected);
5029 }
5030
amdgpu_dm_connector_atomic_set_property(struct drm_connector * connector,struct drm_connector_state * connector_state,struct drm_property * property,uint64_t val)5031 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5032 struct drm_connector_state *connector_state,
5033 struct drm_property *property,
5034 uint64_t val)
5035 {
5036 struct drm_device *dev = connector->dev;
5037 struct amdgpu_device *adev = drm_to_adev(dev);
5038 struct dm_connector_state *dm_old_state =
5039 to_dm_connector_state(connector->state);
5040 struct dm_connector_state *dm_new_state =
5041 to_dm_connector_state(connector_state);
5042
5043 int ret = -EINVAL;
5044
5045 if (property == dev->mode_config.scaling_mode_property) {
5046 enum amdgpu_rmx_type rmx_type;
5047
5048 switch (val) {
5049 case DRM_MODE_SCALE_CENTER:
5050 rmx_type = RMX_CENTER;
5051 break;
5052 case DRM_MODE_SCALE_ASPECT:
5053 rmx_type = RMX_ASPECT;
5054 break;
5055 case DRM_MODE_SCALE_FULLSCREEN:
5056 rmx_type = RMX_FULL;
5057 break;
5058 case DRM_MODE_SCALE_NONE:
5059 default:
5060 rmx_type = RMX_OFF;
5061 break;
5062 }
5063
5064 if (dm_old_state->scaling == rmx_type)
5065 return 0;
5066
5067 dm_new_state->scaling = rmx_type;
5068 ret = 0;
5069 } else if (property == adev->mode_info.underscan_hborder_property) {
5070 dm_new_state->underscan_hborder = val;
5071 ret = 0;
5072 } else if (property == adev->mode_info.underscan_vborder_property) {
5073 dm_new_state->underscan_vborder = val;
5074 ret = 0;
5075 } else if (property == adev->mode_info.underscan_property) {
5076 dm_new_state->underscan_enable = val;
5077 ret = 0;
5078 } else if (property == adev->mode_info.abm_level_property) {
5079 dm_new_state->abm_level = val;
5080 ret = 0;
5081 }
5082
5083 return ret;
5084 }
5085
amdgpu_dm_connector_atomic_get_property(struct drm_connector * connector,const struct drm_connector_state * state,struct drm_property * property,uint64_t * val)5086 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5087 const struct drm_connector_state *state,
5088 struct drm_property *property,
5089 uint64_t *val)
5090 {
5091 struct drm_device *dev = connector->dev;
5092 struct amdgpu_device *adev = drm_to_adev(dev);
5093 struct dm_connector_state *dm_state =
5094 to_dm_connector_state(state);
5095 int ret = -EINVAL;
5096
5097 if (property == dev->mode_config.scaling_mode_property) {
5098 switch (dm_state->scaling) {
5099 case RMX_CENTER:
5100 *val = DRM_MODE_SCALE_CENTER;
5101 break;
5102 case RMX_ASPECT:
5103 *val = DRM_MODE_SCALE_ASPECT;
5104 break;
5105 case RMX_FULL:
5106 *val = DRM_MODE_SCALE_FULLSCREEN;
5107 break;
5108 case RMX_OFF:
5109 default:
5110 *val = DRM_MODE_SCALE_NONE;
5111 break;
5112 }
5113 ret = 0;
5114 } else if (property == adev->mode_info.underscan_hborder_property) {
5115 *val = dm_state->underscan_hborder;
5116 ret = 0;
5117 } else if (property == adev->mode_info.underscan_vborder_property) {
5118 *val = dm_state->underscan_vborder;
5119 ret = 0;
5120 } else if (property == adev->mode_info.underscan_property) {
5121 *val = dm_state->underscan_enable;
5122 ret = 0;
5123 } else if (property == adev->mode_info.abm_level_property) {
5124 *val = dm_state->abm_level;
5125 ret = 0;
5126 }
5127
5128 return ret;
5129 }
5130
amdgpu_dm_connector_unregister(struct drm_connector * connector)5131 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5132 {
5133 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5134
5135 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5136 }
5137
amdgpu_dm_connector_destroy(struct drm_connector * connector)5138 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5139 {
5140 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5141 const struct dc_link *link = aconnector->dc_link;
5142 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5143 struct amdgpu_display_manager *dm = &adev->dm;
5144
5145 /*
5146 * Call only if mst_mgr was iniitalized before since it's not done
5147 * for all connector types.
5148 */
5149 if (aconnector->mst_mgr.dev)
5150 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5151
5152 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5153 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5154
5155 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5156 link->type != dc_connection_none &&
5157 dm->backlight_dev) {
5158 backlight_device_unregister(dm->backlight_dev);
5159 dm->backlight_dev = NULL;
5160 }
5161 #endif
5162
5163 if (aconnector->dc_em_sink)
5164 dc_sink_release(aconnector->dc_em_sink);
5165 aconnector->dc_em_sink = NULL;
5166 if (aconnector->dc_sink)
5167 dc_sink_release(aconnector->dc_sink);
5168 aconnector->dc_sink = NULL;
5169
5170 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5171 drm_connector_unregister(connector);
5172 drm_connector_cleanup(connector);
5173 if (aconnector->i2c) {
5174 i2c_del_adapter(&aconnector->i2c->base);
5175 kfree(aconnector->i2c);
5176 }
5177 kfree(aconnector->dm_dp_aux.aux.name);
5178
5179 kfree(connector);
5180 }
5181
amdgpu_dm_connector_funcs_reset(struct drm_connector * connector)5182 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5183 {
5184 struct dm_connector_state *state =
5185 to_dm_connector_state(connector->state);
5186
5187 if (connector->state)
5188 __drm_atomic_helper_connector_destroy_state(connector->state);
5189
5190 kfree(state);
5191
5192 state = kzalloc(sizeof(*state), GFP_KERNEL);
5193
5194 if (state) {
5195 state->scaling = RMX_OFF;
5196 state->underscan_enable = false;
5197 state->underscan_hborder = 0;
5198 state->underscan_vborder = 0;
5199 state->base.max_requested_bpc = 8;
5200 state->vcpi_slots = 0;
5201 state->pbn = 0;
5202 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5203 state->abm_level = amdgpu_dm_abm_level;
5204
5205 __drm_atomic_helper_connector_reset(connector, &state->base);
5206 }
5207 }
5208
5209 struct drm_connector_state *
amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector * connector)5210 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5211 {
5212 struct dm_connector_state *state =
5213 to_dm_connector_state(connector->state);
5214
5215 struct dm_connector_state *new_state =
5216 kmemdup(state, sizeof(*state), GFP_KERNEL);
5217
5218 if (!new_state)
5219 return NULL;
5220
5221 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5222
5223 new_state->freesync_capable = state->freesync_capable;
5224 new_state->abm_level = state->abm_level;
5225 new_state->scaling = state->scaling;
5226 new_state->underscan_enable = state->underscan_enable;
5227 new_state->underscan_hborder = state->underscan_hborder;
5228 new_state->underscan_vborder = state->underscan_vborder;
5229 new_state->vcpi_slots = state->vcpi_slots;
5230 new_state->pbn = state->pbn;
5231 return &new_state->base;
5232 }
5233
5234 static int
amdgpu_dm_connector_late_register(struct drm_connector * connector)5235 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5236 {
5237 struct amdgpu_dm_connector *amdgpu_dm_connector =
5238 to_amdgpu_dm_connector(connector);
5239 int r;
5240
5241 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5242 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5243 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5244 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5245 if (r)
5246 return r;
5247 }
5248
5249 #if defined(CONFIG_DEBUG_FS)
5250 connector_debugfs_init(amdgpu_dm_connector);
5251 #endif
5252
5253 return 0;
5254 }
5255
5256 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5257 .reset = amdgpu_dm_connector_funcs_reset,
5258 .detect = amdgpu_dm_connector_detect,
5259 .fill_modes = drm_helper_probe_single_connector_modes,
5260 .destroy = amdgpu_dm_connector_destroy,
5261 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5262 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5263 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5264 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5265 .late_register = amdgpu_dm_connector_late_register,
5266 .early_unregister = amdgpu_dm_connector_unregister
5267 };
5268
get_modes(struct drm_connector * connector)5269 static int get_modes(struct drm_connector *connector)
5270 {
5271 return amdgpu_dm_connector_get_modes(connector);
5272 }
5273
create_eml_sink(struct amdgpu_dm_connector * aconnector)5274 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5275 {
5276 struct dc_sink_init_data init_params = {
5277 .link = aconnector->dc_link,
5278 .sink_signal = SIGNAL_TYPE_VIRTUAL
5279 };
5280 struct edid *edid;
5281
5282 if (!aconnector->base.edid_blob_ptr) {
5283 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5284 aconnector->base.name);
5285
5286 aconnector->base.force = DRM_FORCE_OFF;
5287 aconnector->base.override_edid = false;
5288 return;
5289 }
5290
5291 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5292
5293 aconnector->edid = edid;
5294
5295 aconnector->dc_em_sink = dc_link_add_remote_sink(
5296 aconnector->dc_link,
5297 (uint8_t *)edid,
5298 (edid->extensions + 1) * EDID_LENGTH,
5299 &init_params);
5300
5301 if (aconnector->base.force == DRM_FORCE_ON) {
5302 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5303 aconnector->dc_link->local_sink :
5304 aconnector->dc_em_sink;
5305 dc_sink_retain(aconnector->dc_sink);
5306 }
5307 }
5308
handle_edid_mgmt(struct amdgpu_dm_connector * aconnector)5309 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5310 {
5311 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5312
5313 /*
5314 * In case of headless boot with force on for DP managed connector
5315 * Those settings have to be != 0 to get initial modeset
5316 */
5317 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5318 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5319 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5320 }
5321
5322
5323 aconnector->base.override_edid = true;
5324 create_eml_sink(aconnector);
5325 }
5326
5327 static struct dc_stream_state *
create_validate_stream_for_sink(struct amdgpu_dm_connector * aconnector,const struct drm_display_mode * drm_mode,const struct dm_connector_state * dm_state,const struct dc_stream_state * old_stream)5328 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5329 const struct drm_display_mode *drm_mode,
5330 const struct dm_connector_state *dm_state,
5331 const struct dc_stream_state *old_stream)
5332 {
5333 struct drm_connector *connector = &aconnector->base;
5334 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5335 struct dc_stream_state *stream;
5336 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5337 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5338 enum dc_status dc_result = DC_OK;
5339
5340 do {
5341 stream = create_stream_for_sink(aconnector, drm_mode,
5342 dm_state, old_stream,
5343 requested_bpc);
5344 if (stream == NULL) {
5345 DRM_ERROR("Failed to create stream for sink!\n");
5346 break;
5347 }
5348
5349 dc_result = dc_validate_stream(adev->dm.dc, stream);
5350
5351 if (dc_result != DC_OK) {
5352 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5353 drm_mode->hdisplay,
5354 drm_mode->vdisplay,
5355 drm_mode->clock,
5356 dc_result,
5357 dc_status_to_str(dc_result));
5358
5359 dc_stream_release(stream);
5360 stream = NULL;
5361 requested_bpc -= 2; /* lower bpc to retry validation */
5362 }
5363
5364 } while (stream == NULL && requested_bpc >= 6);
5365
5366 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
5367 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
5368
5369 aconnector->force_yuv420_output = true;
5370 stream = create_validate_stream_for_sink(aconnector, drm_mode,
5371 dm_state, old_stream);
5372 aconnector->force_yuv420_output = false;
5373 }
5374
5375 return stream;
5376 }
5377
amdgpu_dm_connector_mode_valid(struct drm_connector * connector,struct drm_display_mode * mode)5378 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5379 struct drm_display_mode *mode)
5380 {
5381 int result = MODE_ERROR;
5382 struct dc_sink *dc_sink;
5383 /* TODO: Unhardcode stream count */
5384 struct dc_stream_state *stream;
5385 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5386
5387 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5388 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5389 return result;
5390
5391 /*
5392 * Only run this the first time mode_valid is called to initilialize
5393 * EDID mgmt
5394 */
5395 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5396 !aconnector->dc_em_sink)
5397 handle_edid_mgmt(aconnector);
5398
5399 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5400
5401 if (dc_sink == NULL) {
5402 DRM_ERROR("dc_sink is NULL!\n");
5403 goto fail;
5404 }
5405
5406 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5407 if (stream) {
5408 dc_stream_release(stream);
5409 result = MODE_OK;
5410 }
5411
5412 fail:
5413 /* TODO: error handling*/
5414 return result;
5415 }
5416
fill_hdr_info_packet(const struct drm_connector_state * state,struct dc_info_packet * out)5417 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5418 struct dc_info_packet *out)
5419 {
5420 struct hdmi_drm_infoframe frame;
5421 unsigned char buf[30]; /* 26 + 4 */
5422 ssize_t len;
5423 int ret, i;
5424
5425 memset(out, 0, sizeof(*out));
5426
5427 if (!state->hdr_output_metadata)
5428 return 0;
5429
5430 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5431 if (ret)
5432 return ret;
5433
5434 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5435 if (len < 0)
5436 return (int)len;
5437
5438 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5439 if (len != 30)
5440 return -EINVAL;
5441
5442 /* Prepare the infopacket for DC. */
5443 switch (state->connector->connector_type) {
5444 case DRM_MODE_CONNECTOR_HDMIA:
5445 out->hb0 = 0x87; /* type */
5446 out->hb1 = 0x01; /* version */
5447 out->hb2 = 0x1A; /* length */
5448 out->sb[0] = buf[3]; /* checksum */
5449 i = 1;
5450 break;
5451
5452 case DRM_MODE_CONNECTOR_DisplayPort:
5453 case DRM_MODE_CONNECTOR_eDP:
5454 out->hb0 = 0x00; /* sdp id, zero */
5455 out->hb1 = 0x87; /* type */
5456 out->hb2 = 0x1D; /* payload len - 1 */
5457 out->hb3 = (0x13 << 2); /* sdp version */
5458 out->sb[0] = 0x01; /* version */
5459 out->sb[1] = 0x1A; /* length */
5460 i = 2;
5461 break;
5462
5463 default:
5464 return -EINVAL;
5465 }
5466
5467 memcpy(&out->sb[i], &buf[4], 26);
5468 out->valid = true;
5469
5470 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5471 sizeof(out->sb), false);
5472
5473 return 0;
5474 }
5475
5476 static bool
is_hdr_metadata_different(const struct drm_connector_state * old_state,const struct drm_connector_state * new_state)5477 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5478 const struct drm_connector_state *new_state)
5479 {
5480 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5481 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5482
5483 if (old_blob != new_blob) {
5484 if (old_blob && new_blob &&
5485 old_blob->length == new_blob->length)
5486 return memcmp(old_blob->data, new_blob->data,
5487 old_blob->length);
5488
5489 return true;
5490 }
5491
5492 return false;
5493 }
5494
5495 static int
amdgpu_dm_connector_atomic_check(struct drm_connector * conn,struct drm_atomic_state * state)5496 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5497 struct drm_atomic_state *state)
5498 {
5499 struct drm_connector_state *new_con_state =
5500 drm_atomic_get_new_connector_state(state, conn);
5501 struct drm_connector_state *old_con_state =
5502 drm_atomic_get_old_connector_state(state, conn);
5503 struct drm_crtc *crtc = new_con_state->crtc;
5504 struct drm_crtc_state *new_crtc_state;
5505 int ret;
5506
5507 if (!crtc)
5508 return 0;
5509
5510 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5511 struct dc_info_packet hdr_infopacket;
5512
5513 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5514 if (ret)
5515 return ret;
5516
5517 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5518 if (IS_ERR(new_crtc_state))
5519 return PTR_ERR(new_crtc_state);
5520
5521 /*
5522 * DC considers the stream backends changed if the
5523 * static metadata changes. Forcing the modeset also
5524 * gives a simple way for userspace to switch from
5525 * 8bpc to 10bpc when setting the metadata to enter
5526 * or exit HDR.
5527 *
5528 * Changing the static metadata after it's been
5529 * set is permissible, however. So only force a
5530 * modeset if we're entering or exiting HDR.
5531 */
5532 new_crtc_state->mode_changed =
5533 !old_con_state->hdr_output_metadata ||
5534 !new_con_state->hdr_output_metadata;
5535 }
5536
5537 return 0;
5538 }
5539
5540 static const struct drm_connector_helper_funcs
5541 amdgpu_dm_connector_helper_funcs = {
5542 /*
5543 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5544 * modes will be filtered by drm_mode_validate_size(), and those modes
5545 * are missing after user start lightdm. So we need to renew modes list.
5546 * in get_modes call back, not just return the modes count
5547 */
5548 .get_modes = get_modes,
5549 .mode_valid = amdgpu_dm_connector_mode_valid,
5550 .atomic_check = amdgpu_dm_connector_atomic_check,
5551 };
5552
dm_crtc_helper_disable(struct drm_crtc * crtc)5553 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5554 {
5555 }
5556
count_crtc_active_planes(struct drm_crtc_state * new_crtc_state)5557 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5558 {
5559 struct drm_atomic_state *state = new_crtc_state->state;
5560 struct drm_plane *plane;
5561 int num_active = 0;
5562
5563 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5564 struct drm_plane_state *new_plane_state;
5565
5566 /* Cursor planes are "fake". */
5567 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5568 continue;
5569
5570 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5571
5572 if (!new_plane_state) {
5573 /*
5574 * The plane is enable on the CRTC and hasn't changed
5575 * state. This means that it previously passed
5576 * validation and is therefore enabled.
5577 */
5578 num_active += 1;
5579 continue;
5580 }
5581
5582 /* We need a framebuffer to be considered enabled. */
5583 num_active += (new_plane_state->fb != NULL);
5584 }
5585
5586 return num_active;
5587 }
5588
dm_update_crtc_active_planes(struct drm_crtc * crtc,struct drm_crtc_state * new_crtc_state)5589 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5590 struct drm_crtc_state *new_crtc_state)
5591 {
5592 struct dm_crtc_state *dm_new_crtc_state =
5593 to_dm_crtc_state(new_crtc_state);
5594
5595 dm_new_crtc_state->active_planes = 0;
5596
5597 if (!dm_new_crtc_state->stream)
5598 return;
5599
5600 dm_new_crtc_state->active_planes =
5601 count_crtc_active_planes(new_crtc_state);
5602 }
5603
dm_crtc_helper_atomic_check(struct drm_crtc * crtc,struct drm_crtc_state * state)5604 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5605 struct drm_crtc_state *state)
5606 {
5607 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5608 struct dc *dc = adev->dm.dc;
5609 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5610 int ret = -EINVAL;
5611
5612 dm_update_crtc_active_planes(crtc, state);
5613
5614 if (unlikely(!dm_crtc_state->stream &&
5615 modeset_required(state, NULL, dm_crtc_state->stream))) {
5616 WARN_ON(1);
5617 return ret;
5618 }
5619
5620 /*
5621 * We require the primary plane to be enabled whenever the CRTC is, otherwise
5622 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
5623 * planes are disabled, which is not supported by the hardware. And there is legacy
5624 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
5625 */
5626 if (state->enable &&
5627 !(state->plane_mask & drm_plane_mask(crtc->primary)))
5628 return -EINVAL;
5629
5630 /* In some use cases, like reset, no stream is attached */
5631 if (!dm_crtc_state->stream)
5632 return 0;
5633
5634 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5635 return 0;
5636
5637 return ret;
5638 }
5639
dm_crtc_helper_mode_fixup(struct drm_crtc * crtc,const struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)5640 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5641 const struct drm_display_mode *mode,
5642 struct drm_display_mode *adjusted_mode)
5643 {
5644 return true;
5645 }
5646
5647 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5648 .disable = dm_crtc_helper_disable,
5649 .atomic_check = dm_crtc_helper_atomic_check,
5650 .mode_fixup = dm_crtc_helper_mode_fixup,
5651 .get_scanout_position = amdgpu_crtc_get_scanout_position,
5652 };
5653
dm_encoder_helper_disable(struct drm_encoder * encoder)5654 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5655 {
5656
5657 }
5658
convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)5659 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5660 {
5661 switch (display_color_depth) {
5662 case COLOR_DEPTH_666:
5663 return 6;
5664 case COLOR_DEPTH_888:
5665 return 8;
5666 case COLOR_DEPTH_101010:
5667 return 10;
5668 case COLOR_DEPTH_121212:
5669 return 12;
5670 case COLOR_DEPTH_141414:
5671 return 14;
5672 case COLOR_DEPTH_161616:
5673 return 16;
5674 default:
5675 break;
5676 }
5677 return 0;
5678 }
5679
dm_encoder_helper_atomic_check(struct drm_encoder * encoder,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)5680 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5681 struct drm_crtc_state *crtc_state,
5682 struct drm_connector_state *conn_state)
5683 {
5684 struct drm_atomic_state *state = crtc_state->state;
5685 struct drm_connector *connector = conn_state->connector;
5686 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5687 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5688 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5689 struct drm_dp_mst_topology_mgr *mst_mgr;
5690 struct drm_dp_mst_port *mst_port;
5691 enum dc_color_depth color_depth;
5692 int clock, bpp = 0;
5693 bool is_y420 = false;
5694
5695 if (!aconnector->port || !aconnector->dc_sink)
5696 return 0;
5697
5698 mst_port = aconnector->port;
5699 mst_mgr = &aconnector->mst_port->mst_mgr;
5700
5701 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5702 return 0;
5703
5704 if (!state->duplicated) {
5705 int max_bpc = conn_state->max_requested_bpc;
5706 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5707 aconnector->force_yuv420_output;
5708 color_depth = convert_color_depth_from_display_info(connector,
5709 is_y420,
5710 max_bpc);
5711 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5712 clock = adjusted_mode->clock;
5713 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5714 }
5715 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5716 mst_mgr,
5717 mst_port,
5718 dm_new_connector_state->pbn,
5719 dm_mst_get_pbn_divider(aconnector->dc_link));
5720 if (dm_new_connector_state->vcpi_slots < 0) {
5721 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5722 return dm_new_connector_state->vcpi_slots;
5723 }
5724 return 0;
5725 }
5726
5727 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5728 .disable = dm_encoder_helper_disable,
5729 .atomic_check = dm_encoder_helper_atomic_check
5730 };
5731
5732 #if defined(CONFIG_DRM_AMD_DC_DCN)
dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state * state,struct dc_state * dc_state)5733 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5734 struct dc_state *dc_state)
5735 {
5736 struct dc_stream_state *stream = NULL;
5737 struct drm_connector *connector;
5738 struct drm_connector_state *new_con_state, *old_con_state;
5739 struct amdgpu_dm_connector *aconnector;
5740 struct dm_connector_state *dm_conn_state;
5741 int i, j, clock, bpp;
5742 int vcpi, pbn_div, pbn = 0;
5743
5744 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5745
5746 aconnector = to_amdgpu_dm_connector(connector);
5747
5748 if (!aconnector->port)
5749 continue;
5750
5751 if (!new_con_state || !new_con_state->crtc)
5752 continue;
5753
5754 dm_conn_state = to_dm_connector_state(new_con_state);
5755
5756 for (j = 0; j < dc_state->stream_count; j++) {
5757 stream = dc_state->streams[j];
5758 if (!stream)
5759 continue;
5760
5761 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5762 break;
5763
5764 stream = NULL;
5765 }
5766
5767 if (!stream)
5768 continue;
5769
5770 if (stream->timing.flags.DSC != 1) {
5771 drm_dp_mst_atomic_enable_dsc(state,
5772 aconnector->port,
5773 dm_conn_state->pbn,
5774 0,
5775 false);
5776 continue;
5777 }
5778
5779 pbn_div = dm_mst_get_pbn_divider(stream->link);
5780 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5781 clock = stream->timing.pix_clk_100hz / 10;
5782 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5783 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5784 aconnector->port,
5785 pbn, pbn_div,
5786 true);
5787 if (vcpi < 0)
5788 return vcpi;
5789
5790 dm_conn_state->pbn = pbn;
5791 dm_conn_state->vcpi_slots = vcpi;
5792 }
5793 return 0;
5794 }
5795 #endif
5796
dm_drm_plane_reset(struct drm_plane * plane)5797 static void dm_drm_plane_reset(struct drm_plane *plane)
5798 {
5799 struct dm_plane_state *amdgpu_state = NULL;
5800
5801 if (plane->state)
5802 plane->funcs->atomic_destroy_state(plane, plane->state);
5803
5804 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5805 WARN_ON(amdgpu_state == NULL);
5806
5807 if (amdgpu_state)
5808 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5809 }
5810
5811 static struct drm_plane_state *
dm_drm_plane_duplicate_state(struct drm_plane * plane)5812 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5813 {
5814 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5815
5816 old_dm_plane_state = to_dm_plane_state(plane->state);
5817 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5818 if (!dm_plane_state)
5819 return NULL;
5820
5821 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5822
5823 if (old_dm_plane_state->dc_state) {
5824 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5825 dc_plane_state_retain(dm_plane_state->dc_state);
5826 }
5827
5828 /* Framebuffer hasn't been updated yet, so retain old flags. */
5829 dm_plane_state->tiling_flags = old_dm_plane_state->tiling_flags;
5830 dm_plane_state->tmz_surface = old_dm_plane_state->tmz_surface;
5831
5832 return &dm_plane_state->base;
5833 }
5834
dm_drm_plane_destroy_state(struct drm_plane * plane,struct drm_plane_state * state)5835 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5836 struct drm_plane_state *state)
5837 {
5838 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5839
5840 if (dm_plane_state->dc_state)
5841 dc_plane_state_release(dm_plane_state->dc_state);
5842
5843 drm_atomic_helper_plane_destroy_state(plane, state);
5844 }
5845
5846 static const struct drm_plane_funcs dm_plane_funcs = {
5847 .update_plane = drm_atomic_helper_update_plane,
5848 .disable_plane = drm_atomic_helper_disable_plane,
5849 .destroy = drm_primary_helper_destroy,
5850 .reset = dm_drm_plane_reset,
5851 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5852 .atomic_destroy_state = dm_drm_plane_destroy_state,
5853 };
5854
dm_plane_helper_prepare_fb(struct drm_plane * plane,struct drm_plane_state * new_state)5855 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5856 struct drm_plane_state *new_state)
5857 {
5858 struct amdgpu_framebuffer *afb;
5859 struct drm_gem_object *obj;
5860 struct amdgpu_device *adev;
5861 struct amdgpu_bo *rbo;
5862 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5863 struct list_head list;
5864 struct ttm_validate_buffer tv;
5865 struct ww_acquire_ctx ticket;
5866 uint32_t domain;
5867 int r;
5868
5869 if (!new_state->fb) {
5870 DRM_DEBUG_DRIVER("No FB bound\n");
5871 return 0;
5872 }
5873
5874 afb = to_amdgpu_framebuffer(new_state->fb);
5875 obj = new_state->fb->obj[0];
5876 rbo = gem_to_amdgpu_bo(obj);
5877 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5878 INIT_LIST_HEAD(&list);
5879
5880 tv.bo = &rbo->tbo;
5881 tv.num_shared = 1;
5882 list_add(&tv.head, &list);
5883
5884 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5885 if (r) {
5886 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5887 return r;
5888 }
5889
5890 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5891 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5892 else
5893 domain = AMDGPU_GEM_DOMAIN_VRAM;
5894
5895 r = amdgpu_bo_pin(rbo, domain);
5896 if (unlikely(r != 0)) {
5897 if (r != -ERESTARTSYS)
5898 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5899 ttm_eu_backoff_reservation(&ticket, &list);
5900 return r;
5901 }
5902
5903 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5904 if (unlikely(r != 0)) {
5905 amdgpu_bo_unpin(rbo);
5906 ttm_eu_backoff_reservation(&ticket, &list);
5907 DRM_ERROR("%p bind failed\n", rbo);
5908 return r;
5909 }
5910
5911 ttm_eu_backoff_reservation(&ticket, &list);
5912
5913 afb->address = amdgpu_bo_gpu_offset(rbo);
5914
5915 amdgpu_bo_ref(rbo);
5916
5917 /**
5918 * We don't do surface updates on planes that have been newly created,
5919 * but we also don't have the afb->address during atomic check.
5920 *
5921 * Fill in buffer attributes depending on the address here, but only on
5922 * newly created planes since they're not being used by DC yet and this
5923 * won't modify global state.
5924 */
5925 dm_plane_state_old = to_dm_plane_state(plane->state);
5926 dm_plane_state_new = to_dm_plane_state(new_state);
5927
5928 if (dm_plane_state_new->dc_state &&
5929 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5930 struct dc_plane_state *plane_state =
5931 dm_plane_state_new->dc_state;
5932 bool force_disable_dcc = !plane_state->dcc.enable;
5933
5934 fill_plane_buffer_attributes(
5935 adev, afb, plane_state->format, plane_state->rotation,
5936 dm_plane_state_new->tiling_flags,
5937 &plane_state->tiling_info, &plane_state->plane_size,
5938 &plane_state->dcc, &plane_state->address,
5939 dm_plane_state_new->tmz_surface, force_disable_dcc);
5940 }
5941
5942 return 0;
5943 }
5944
dm_plane_helper_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)5945 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5946 struct drm_plane_state *old_state)
5947 {
5948 struct amdgpu_bo *rbo;
5949 int r;
5950
5951 if (!old_state->fb)
5952 return;
5953
5954 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5955 r = amdgpu_bo_reserve(rbo, false);
5956 if (unlikely(r)) {
5957 DRM_ERROR("failed to reserve rbo before unpin\n");
5958 return;
5959 }
5960
5961 amdgpu_bo_unpin(rbo);
5962 amdgpu_bo_unreserve(rbo);
5963 amdgpu_bo_unref(&rbo);
5964 }
5965
dm_plane_helper_check_state(struct drm_plane_state * state,struct drm_crtc_state * new_crtc_state)5966 static int dm_plane_helper_check_state(struct drm_plane_state *state,
5967 struct drm_crtc_state *new_crtc_state)
5968 {
5969 int max_downscale = 0;
5970 int max_upscale = INT_MAX;
5971
5972 /* TODO: These should be checked against DC plane caps */
5973 return drm_atomic_helper_check_plane_state(
5974 state, new_crtc_state, max_downscale, max_upscale, true, true);
5975 }
5976
dm_plane_atomic_check(struct drm_plane * plane,struct drm_plane_state * state)5977 static int dm_plane_atomic_check(struct drm_plane *plane,
5978 struct drm_plane_state *state)
5979 {
5980 struct amdgpu_device *adev = drm_to_adev(plane->dev);
5981 struct dc *dc = adev->dm.dc;
5982 struct dm_plane_state *dm_plane_state;
5983 struct dc_scaling_info scaling_info;
5984 struct drm_crtc_state *new_crtc_state;
5985 int ret;
5986
5987 dm_plane_state = to_dm_plane_state(state);
5988
5989 if (!dm_plane_state->dc_state)
5990 return 0;
5991
5992 new_crtc_state =
5993 drm_atomic_get_new_crtc_state(state->state, state->crtc);
5994 if (!new_crtc_state)
5995 return -EINVAL;
5996
5997 ret = dm_plane_helper_check_state(state, new_crtc_state);
5998 if (ret)
5999 return ret;
6000
6001 ret = fill_dc_scaling_info(state, &scaling_info);
6002 if (ret)
6003 return ret;
6004
6005 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6006 return 0;
6007
6008 return -EINVAL;
6009 }
6010
dm_plane_atomic_async_check(struct drm_plane * plane,struct drm_plane_state * new_plane_state)6011 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6012 struct drm_plane_state *new_plane_state)
6013 {
6014 /* Only support async updates on cursor planes. */
6015 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6016 return -EINVAL;
6017
6018 return 0;
6019 }
6020
dm_plane_atomic_async_update(struct drm_plane * plane,struct drm_plane_state * new_state)6021 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6022 struct drm_plane_state *new_state)
6023 {
6024 struct drm_plane_state *old_state =
6025 drm_atomic_get_old_plane_state(new_state->state, plane);
6026
6027 swap(plane->state->fb, new_state->fb);
6028
6029 plane->state->src_x = new_state->src_x;
6030 plane->state->src_y = new_state->src_y;
6031 plane->state->src_w = new_state->src_w;
6032 plane->state->src_h = new_state->src_h;
6033 plane->state->crtc_x = new_state->crtc_x;
6034 plane->state->crtc_y = new_state->crtc_y;
6035 plane->state->crtc_w = new_state->crtc_w;
6036 plane->state->crtc_h = new_state->crtc_h;
6037
6038 handle_cursor_update(plane, old_state);
6039 }
6040
6041 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6042 .prepare_fb = dm_plane_helper_prepare_fb,
6043 .cleanup_fb = dm_plane_helper_cleanup_fb,
6044 .atomic_check = dm_plane_atomic_check,
6045 .atomic_async_check = dm_plane_atomic_async_check,
6046 .atomic_async_update = dm_plane_atomic_async_update
6047 };
6048
6049 /*
6050 * TODO: these are currently initialized to rgb formats only.
6051 * For future use cases we should either initialize them dynamically based on
6052 * plane capabilities, or initialize this array to all formats, so internal drm
6053 * check will succeed, and let DC implement proper check
6054 */
6055 static const uint32_t rgb_formats[] = {
6056 DRM_FORMAT_XRGB8888,
6057 DRM_FORMAT_ARGB8888,
6058 DRM_FORMAT_RGBA8888,
6059 DRM_FORMAT_XRGB2101010,
6060 DRM_FORMAT_XBGR2101010,
6061 DRM_FORMAT_ARGB2101010,
6062 DRM_FORMAT_ABGR2101010,
6063 DRM_FORMAT_XBGR8888,
6064 DRM_FORMAT_ABGR8888,
6065 DRM_FORMAT_RGB565,
6066 };
6067
6068 static const uint32_t overlay_formats[] = {
6069 DRM_FORMAT_XRGB8888,
6070 DRM_FORMAT_ARGB8888,
6071 DRM_FORMAT_RGBA8888,
6072 DRM_FORMAT_XBGR8888,
6073 DRM_FORMAT_ABGR8888,
6074 DRM_FORMAT_RGB565
6075 };
6076
6077 static const u32 cursor_formats[] = {
6078 DRM_FORMAT_ARGB8888
6079 };
6080
get_plane_formats(const struct drm_plane * plane,const struct dc_plane_cap * plane_cap,uint32_t * formats,int max_formats)6081 static int get_plane_formats(const struct drm_plane *plane,
6082 const struct dc_plane_cap *plane_cap,
6083 uint32_t *formats, int max_formats)
6084 {
6085 int i, num_formats = 0;
6086
6087 /*
6088 * TODO: Query support for each group of formats directly from
6089 * DC plane caps. This will require adding more formats to the
6090 * caps list.
6091 */
6092
6093 switch (plane->type) {
6094 case DRM_PLANE_TYPE_PRIMARY:
6095 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6096 if (num_formats >= max_formats)
6097 break;
6098
6099 formats[num_formats++] = rgb_formats[i];
6100 }
6101
6102 if (plane_cap && plane_cap->pixel_format_support.nv12)
6103 formats[num_formats++] = DRM_FORMAT_NV12;
6104 if (plane_cap && plane_cap->pixel_format_support.p010)
6105 formats[num_formats++] = DRM_FORMAT_P010;
6106 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6107 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6108 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6109 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6110 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6111 }
6112 break;
6113
6114 case DRM_PLANE_TYPE_OVERLAY:
6115 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6116 if (num_formats >= max_formats)
6117 break;
6118
6119 formats[num_formats++] = overlay_formats[i];
6120 }
6121 break;
6122
6123 case DRM_PLANE_TYPE_CURSOR:
6124 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6125 if (num_formats >= max_formats)
6126 break;
6127
6128 formats[num_formats++] = cursor_formats[i];
6129 }
6130 break;
6131 }
6132
6133 return num_formats;
6134 }
6135
amdgpu_dm_plane_init(struct amdgpu_display_manager * dm,struct drm_plane * plane,unsigned long possible_crtcs,const struct dc_plane_cap * plane_cap)6136 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6137 struct drm_plane *plane,
6138 unsigned long possible_crtcs,
6139 const struct dc_plane_cap *plane_cap)
6140 {
6141 uint32_t formats[32];
6142 int num_formats;
6143 int res = -EPERM;
6144 unsigned int supported_rotations;
6145
6146 num_formats = get_plane_formats(plane, plane_cap, formats,
6147 ARRAY_SIZE(formats));
6148
6149 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6150 &dm_plane_funcs, formats, num_formats,
6151 NULL, plane->type, NULL);
6152 if (res)
6153 return res;
6154
6155 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6156 plane_cap && plane_cap->per_pixel_alpha) {
6157 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6158 BIT(DRM_MODE_BLEND_PREMULTI);
6159
6160 drm_plane_create_alpha_property(plane);
6161 drm_plane_create_blend_mode_property(plane, blend_caps);
6162 }
6163
6164 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6165 plane_cap &&
6166 (plane_cap->pixel_format_support.nv12 ||
6167 plane_cap->pixel_format_support.p010)) {
6168 /* This only affects YUV formats. */
6169 drm_plane_create_color_properties(
6170 plane,
6171 BIT(DRM_COLOR_YCBCR_BT601) |
6172 BIT(DRM_COLOR_YCBCR_BT709) |
6173 BIT(DRM_COLOR_YCBCR_BT2020),
6174 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6175 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6176 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6177 }
6178
6179 supported_rotations =
6180 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6181 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6182
6183 if (dm->adev->asic_type >= CHIP_BONAIRE)
6184 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6185 supported_rotations);
6186
6187 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6188
6189 /* Create (reset) the plane state */
6190 if (plane->funcs->reset)
6191 plane->funcs->reset(plane);
6192
6193 return 0;
6194 }
6195
amdgpu_dm_crtc_init(struct amdgpu_display_manager * dm,struct drm_plane * plane,uint32_t crtc_index)6196 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6197 struct drm_plane *plane,
6198 uint32_t crtc_index)
6199 {
6200 struct amdgpu_crtc *acrtc = NULL;
6201 struct drm_plane *cursor_plane;
6202
6203 int res = -ENOMEM;
6204
6205 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6206 if (!cursor_plane)
6207 goto fail;
6208
6209 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6210 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6211
6212 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6213 if (!acrtc)
6214 goto fail;
6215
6216 res = drm_crtc_init_with_planes(
6217 dm->ddev,
6218 &acrtc->base,
6219 plane,
6220 cursor_plane,
6221 &amdgpu_dm_crtc_funcs, NULL);
6222
6223 if (res)
6224 goto fail;
6225
6226 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6227
6228 /* Create (reset) the plane state */
6229 if (acrtc->base.funcs->reset)
6230 acrtc->base.funcs->reset(&acrtc->base);
6231
6232 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6233 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6234
6235 acrtc->crtc_id = crtc_index;
6236 acrtc->base.enabled = false;
6237 acrtc->otg_inst = -1;
6238
6239 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6240 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6241 true, MAX_COLOR_LUT_ENTRIES);
6242 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6243
6244 return 0;
6245
6246 fail:
6247 kfree(acrtc);
6248 kfree(cursor_plane);
6249 return res;
6250 }
6251
6252
to_drm_connector_type(enum signal_type st)6253 static int to_drm_connector_type(enum signal_type st)
6254 {
6255 switch (st) {
6256 case SIGNAL_TYPE_HDMI_TYPE_A:
6257 return DRM_MODE_CONNECTOR_HDMIA;
6258 case SIGNAL_TYPE_EDP:
6259 return DRM_MODE_CONNECTOR_eDP;
6260 case SIGNAL_TYPE_LVDS:
6261 return DRM_MODE_CONNECTOR_LVDS;
6262 case SIGNAL_TYPE_RGB:
6263 return DRM_MODE_CONNECTOR_VGA;
6264 case SIGNAL_TYPE_DISPLAY_PORT:
6265 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6266 return DRM_MODE_CONNECTOR_DisplayPort;
6267 case SIGNAL_TYPE_DVI_DUAL_LINK:
6268 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6269 return DRM_MODE_CONNECTOR_DVID;
6270 case SIGNAL_TYPE_VIRTUAL:
6271 return DRM_MODE_CONNECTOR_VIRTUAL;
6272
6273 default:
6274 return DRM_MODE_CONNECTOR_Unknown;
6275 }
6276 }
6277
amdgpu_dm_connector_to_encoder(struct drm_connector * connector)6278 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6279 {
6280 struct drm_encoder *encoder;
6281
6282 /* There is only one encoder per connector */
6283 drm_connector_for_each_possible_encoder(connector, encoder)
6284 return encoder;
6285
6286 return NULL;
6287 }
6288
amdgpu_dm_get_native_mode(struct drm_connector * connector)6289 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6290 {
6291 struct drm_encoder *encoder;
6292 struct amdgpu_encoder *amdgpu_encoder;
6293
6294 encoder = amdgpu_dm_connector_to_encoder(connector);
6295
6296 if (encoder == NULL)
6297 return;
6298
6299 amdgpu_encoder = to_amdgpu_encoder(encoder);
6300
6301 amdgpu_encoder->native_mode.clock = 0;
6302
6303 if (!list_empty(&connector->probed_modes)) {
6304 struct drm_display_mode *preferred_mode = NULL;
6305
6306 list_for_each_entry(preferred_mode,
6307 &connector->probed_modes,
6308 head) {
6309 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6310 amdgpu_encoder->native_mode = *preferred_mode;
6311
6312 break;
6313 }
6314
6315 }
6316 }
6317
6318 static struct drm_display_mode *
amdgpu_dm_create_common_mode(struct drm_encoder * encoder,char * name,int hdisplay,int vdisplay)6319 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6320 char *name,
6321 int hdisplay, int vdisplay)
6322 {
6323 struct drm_device *dev = encoder->dev;
6324 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6325 struct drm_display_mode *mode = NULL;
6326 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6327
6328 mode = drm_mode_duplicate(dev, native_mode);
6329
6330 if (mode == NULL)
6331 return NULL;
6332
6333 mode->hdisplay = hdisplay;
6334 mode->vdisplay = vdisplay;
6335 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6336 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6337
6338 return mode;
6339
6340 }
6341
amdgpu_dm_connector_add_common_modes(struct drm_encoder * encoder,struct drm_connector * connector)6342 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6343 struct drm_connector *connector)
6344 {
6345 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6346 struct drm_display_mode *mode = NULL;
6347 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6348 struct amdgpu_dm_connector *amdgpu_dm_connector =
6349 to_amdgpu_dm_connector(connector);
6350 int i;
6351 int n;
6352 struct mode_size {
6353 char name[DRM_DISPLAY_MODE_LEN];
6354 int w;
6355 int h;
6356 } common_modes[] = {
6357 { "640x480", 640, 480},
6358 { "800x600", 800, 600},
6359 { "1024x768", 1024, 768},
6360 { "1280x720", 1280, 720},
6361 { "1280x800", 1280, 800},
6362 {"1280x1024", 1280, 1024},
6363 { "1440x900", 1440, 900},
6364 {"1680x1050", 1680, 1050},
6365 {"1600x1200", 1600, 1200},
6366 {"1920x1080", 1920, 1080},
6367 {"1920x1200", 1920, 1200}
6368 };
6369
6370 n = ARRAY_SIZE(common_modes);
6371
6372 for (i = 0; i < n; i++) {
6373 struct drm_display_mode *curmode = NULL;
6374 bool mode_existed = false;
6375
6376 if (common_modes[i].w > native_mode->hdisplay ||
6377 common_modes[i].h > native_mode->vdisplay ||
6378 (common_modes[i].w == native_mode->hdisplay &&
6379 common_modes[i].h == native_mode->vdisplay))
6380 continue;
6381
6382 list_for_each_entry(curmode, &connector->probed_modes, head) {
6383 if (common_modes[i].w == curmode->hdisplay &&
6384 common_modes[i].h == curmode->vdisplay) {
6385 mode_existed = true;
6386 break;
6387 }
6388 }
6389
6390 if (mode_existed)
6391 continue;
6392
6393 mode = amdgpu_dm_create_common_mode(encoder,
6394 common_modes[i].name, common_modes[i].w,
6395 common_modes[i].h);
6396 drm_mode_probed_add(connector, mode);
6397 amdgpu_dm_connector->num_modes++;
6398 }
6399 }
6400
amdgpu_dm_connector_ddc_get_modes(struct drm_connector * connector,struct edid * edid)6401 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6402 struct edid *edid)
6403 {
6404 struct amdgpu_dm_connector *amdgpu_dm_connector =
6405 to_amdgpu_dm_connector(connector);
6406
6407 if (edid) {
6408 /* empty probed_modes */
6409 INIT_LIST_HEAD(&connector->probed_modes);
6410 amdgpu_dm_connector->num_modes =
6411 drm_add_edid_modes(connector, edid);
6412
6413 /* sorting the probed modes before calling function
6414 * amdgpu_dm_get_native_mode() since EDID can have
6415 * more than one preferred mode. The modes that are
6416 * later in the probed mode list could be of higher
6417 * and preferred resolution. For example, 3840x2160
6418 * resolution in base EDID preferred timing and 4096x2160
6419 * preferred resolution in DID extension block later.
6420 */
6421 drm_mode_sort(&connector->probed_modes);
6422 amdgpu_dm_get_native_mode(connector);
6423 } else {
6424 amdgpu_dm_connector->num_modes = 0;
6425 }
6426 }
6427
amdgpu_dm_connector_get_modes(struct drm_connector * connector)6428 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6429 {
6430 struct amdgpu_dm_connector *amdgpu_dm_connector =
6431 to_amdgpu_dm_connector(connector);
6432 struct drm_encoder *encoder;
6433 struct edid *edid = amdgpu_dm_connector->edid;
6434
6435 encoder = amdgpu_dm_connector_to_encoder(connector);
6436
6437 if (!edid || !drm_edid_is_valid(edid)) {
6438 amdgpu_dm_connector->num_modes =
6439 drm_add_modes_noedid(connector, 640, 480);
6440 } else {
6441 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6442 amdgpu_dm_connector_add_common_modes(encoder, connector);
6443 }
6444 amdgpu_dm_fbc_init(connector);
6445
6446 return amdgpu_dm_connector->num_modes;
6447 }
6448
amdgpu_dm_connector_init_helper(struct amdgpu_display_manager * dm,struct amdgpu_dm_connector * aconnector,int connector_type,struct dc_link * link,int link_index)6449 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6450 struct amdgpu_dm_connector *aconnector,
6451 int connector_type,
6452 struct dc_link *link,
6453 int link_index)
6454 {
6455 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6456
6457 /*
6458 * Some of the properties below require access to state, like bpc.
6459 * Allocate some default initial connector state with our reset helper.
6460 */
6461 if (aconnector->base.funcs->reset)
6462 aconnector->base.funcs->reset(&aconnector->base);
6463
6464 aconnector->connector_id = link_index;
6465 aconnector->dc_link = link;
6466 aconnector->base.interlace_allowed = false;
6467 aconnector->base.doublescan_allowed = false;
6468 aconnector->base.stereo_allowed = false;
6469 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6470 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6471 aconnector->audio_inst = -1;
6472 mutex_init(&aconnector->hpd_lock);
6473
6474 /*
6475 * configure support HPD hot plug connector_>polled default value is 0
6476 * which means HPD hot plug not supported
6477 */
6478 switch (connector_type) {
6479 case DRM_MODE_CONNECTOR_HDMIA:
6480 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6481 aconnector->base.ycbcr_420_allowed =
6482 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6483 break;
6484 case DRM_MODE_CONNECTOR_DisplayPort:
6485 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6486 aconnector->base.ycbcr_420_allowed =
6487 link->link_enc->features.dp_ycbcr420_supported ? true : false;
6488 break;
6489 case DRM_MODE_CONNECTOR_DVID:
6490 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6491 break;
6492 default:
6493 break;
6494 }
6495
6496 drm_object_attach_property(&aconnector->base.base,
6497 dm->ddev->mode_config.scaling_mode_property,
6498 DRM_MODE_SCALE_NONE);
6499
6500 drm_object_attach_property(&aconnector->base.base,
6501 adev->mode_info.underscan_property,
6502 UNDERSCAN_OFF);
6503 drm_object_attach_property(&aconnector->base.base,
6504 adev->mode_info.underscan_hborder_property,
6505 0);
6506 drm_object_attach_property(&aconnector->base.base,
6507 adev->mode_info.underscan_vborder_property,
6508 0);
6509
6510 if (!aconnector->mst_port)
6511 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6512
6513 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6514 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6515 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6516
6517 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6518 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6519 drm_object_attach_property(&aconnector->base.base,
6520 adev->mode_info.abm_level_property, 0);
6521 }
6522
6523 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6524 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6525 connector_type == DRM_MODE_CONNECTOR_eDP) {
6526 drm_object_attach_property(
6527 &aconnector->base.base,
6528 dm->ddev->mode_config.hdr_output_metadata_property, 0);
6529
6530 if (!aconnector->mst_port)
6531 drm_connector_attach_vrr_capable_property(&aconnector->base);
6532
6533 #ifdef CONFIG_DRM_AMD_DC_HDCP
6534 if (adev->dm.hdcp_workqueue)
6535 drm_connector_attach_content_protection_property(&aconnector->base, true);
6536 #endif
6537 }
6538 }
6539
amdgpu_dm_i2c_xfer(struct i2c_adapter * i2c_adap,struct i2c_msg * msgs,int num)6540 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6541 struct i2c_msg *msgs, int num)
6542 {
6543 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6544 struct ddc_service *ddc_service = i2c->ddc_service;
6545 struct i2c_command cmd;
6546 int i;
6547 int result = -EIO;
6548
6549 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6550
6551 if (!cmd.payloads)
6552 return result;
6553
6554 cmd.number_of_payloads = num;
6555 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6556 cmd.speed = 100;
6557
6558 for (i = 0; i < num; i++) {
6559 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6560 cmd.payloads[i].address = msgs[i].addr;
6561 cmd.payloads[i].length = msgs[i].len;
6562 cmd.payloads[i].data = msgs[i].buf;
6563 }
6564
6565 if (dc_submit_i2c(
6566 ddc_service->ctx->dc,
6567 ddc_service->ddc_pin->hw_info.ddc_channel,
6568 &cmd))
6569 result = num;
6570
6571 kfree(cmd.payloads);
6572 return result;
6573 }
6574
amdgpu_dm_i2c_func(struct i2c_adapter * adap)6575 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6576 {
6577 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6578 }
6579
6580 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6581 .master_xfer = amdgpu_dm_i2c_xfer,
6582 .functionality = amdgpu_dm_i2c_func,
6583 };
6584
6585 static struct amdgpu_i2c_adapter *
create_i2c(struct ddc_service * ddc_service,int link_index,int * res)6586 create_i2c(struct ddc_service *ddc_service,
6587 int link_index,
6588 int *res)
6589 {
6590 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6591 struct amdgpu_i2c_adapter *i2c;
6592
6593 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6594 if (!i2c)
6595 return NULL;
6596 i2c->base.owner = THIS_MODULE;
6597 i2c->base.class = I2C_CLASS_DDC;
6598 i2c->base.dev.parent = &adev->pdev->dev;
6599 i2c->base.algo = &amdgpu_dm_i2c_algo;
6600 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6601 i2c_set_adapdata(&i2c->base, i2c);
6602 i2c->ddc_service = ddc_service;
6603 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6604
6605 return i2c;
6606 }
6607
6608
6609 /*
6610 * Note: this function assumes that dc_link_detect() was called for the
6611 * dc_link which will be represented by this aconnector.
6612 */
amdgpu_dm_connector_init(struct amdgpu_display_manager * dm,struct amdgpu_dm_connector * aconnector,uint32_t link_index,struct amdgpu_encoder * aencoder)6613 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6614 struct amdgpu_dm_connector *aconnector,
6615 uint32_t link_index,
6616 struct amdgpu_encoder *aencoder)
6617 {
6618 int res = 0;
6619 int connector_type;
6620 struct dc *dc = dm->dc;
6621 struct dc_link *link = dc_get_link_at_index(dc, link_index);
6622 struct amdgpu_i2c_adapter *i2c;
6623
6624 link->priv = aconnector;
6625
6626 DRM_DEBUG_DRIVER("%s()\n", __func__);
6627
6628 i2c = create_i2c(link->ddc, link->link_index, &res);
6629 if (!i2c) {
6630 DRM_ERROR("Failed to create i2c adapter data\n");
6631 return -ENOMEM;
6632 }
6633
6634 aconnector->i2c = i2c;
6635 res = i2c_add_adapter(&i2c->base);
6636
6637 if (res) {
6638 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6639 goto out_free;
6640 }
6641
6642 connector_type = to_drm_connector_type(link->connector_signal);
6643
6644 res = drm_connector_init_with_ddc(
6645 dm->ddev,
6646 &aconnector->base,
6647 &amdgpu_dm_connector_funcs,
6648 connector_type,
6649 &i2c->base);
6650
6651 if (res) {
6652 DRM_ERROR("connector_init failed\n");
6653 aconnector->connector_id = -1;
6654 goto out_free;
6655 }
6656
6657 drm_connector_helper_add(
6658 &aconnector->base,
6659 &amdgpu_dm_connector_helper_funcs);
6660
6661 amdgpu_dm_connector_init_helper(
6662 dm,
6663 aconnector,
6664 connector_type,
6665 link,
6666 link_index);
6667
6668 drm_connector_attach_encoder(
6669 &aconnector->base, &aencoder->base);
6670
6671 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6672 || connector_type == DRM_MODE_CONNECTOR_eDP)
6673 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6674
6675 out_free:
6676 if (res) {
6677 kfree(i2c);
6678 aconnector->i2c = NULL;
6679 }
6680 return res;
6681 }
6682
amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device * adev)6683 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6684 {
6685 switch (adev->mode_info.num_crtc) {
6686 case 1:
6687 return 0x1;
6688 case 2:
6689 return 0x3;
6690 case 3:
6691 return 0x7;
6692 case 4:
6693 return 0xf;
6694 case 5:
6695 return 0x1f;
6696 case 6:
6697 default:
6698 return 0x3f;
6699 }
6700 }
6701
amdgpu_dm_encoder_init(struct drm_device * dev,struct amdgpu_encoder * aencoder,uint32_t link_index)6702 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6703 struct amdgpu_encoder *aencoder,
6704 uint32_t link_index)
6705 {
6706 struct amdgpu_device *adev = drm_to_adev(dev);
6707
6708 int res = drm_encoder_init(dev,
6709 &aencoder->base,
6710 &amdgpu_dm_encoder_funcs,
6711 DRM_MODE_ENCODER_TMDS,
6712 NULL);
6713
6714 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6715
6716 if (!res)
6717 aencoder->encoder_id = link_index;
6718 else
6719 aencoder->encoder_id = -1;
6720
6721 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6722
6723 return res;
6724 }
6725
manage_dm_interrupts(struct amdgpu_device * adev,struct amdgpu_crtc * acrtc,bool enable)6726 static void manage_dm_interrupts(struct amdgpu_device *adev,
6727 struct amdgpu_crtc *acrtc,
6728 bool enable)
6729 {
6730 /*
6731 * We have no guarantee that the frontend index maps to the same
6732 * backend index - some even map to more than one.
6733 *
6734 * TODO: Use a different interrupt or check DC itself for the mapping.
6735 */
6736 int irq_type =
6737 amdgpu_display_crtc_idx_to_irq_type(
6738 adev,
6739 acrtc->crtc_id);
6740
6741 if (enable) {
6742 drm_crtc_vblank_on(&acrtc->base);
6743 amdgpu_irq_get(
6744 adev,
6745 &adev->pageflip_irq,
6746 irq_type);
6747 } else {
6748
6749 amdgpu_irq_put(
6750 adev,
6751 &adev->pageflip_irq,
6752 irq_type);
6753 drm_crtc_vblank_off(&acrtc->base);
6754 }
6755 }
6756
dm_update_pflip_irq_state(struct amdgpu_device * adev,struct amdgpu_crtc * acrtc)6757 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6758 struct amdgpu_crtc *acrtc)
6759 {
6760 int irq_type =
6761 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6762
6763 /**
6764 * This reads the current state for the IRQ and force reapplies
6765 * the setting to hardware.
6766 */
6767 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6768 }
6769
6770 static bool
is_scaling_state_different(const struct dm_connector_state * dm_state,const struct dm_connector_state * old_dm_state)6771 is_scaling_state_different(const struct dm_connector_state *dm_state,
6772 const struct dm_connector_state *old_dm_state)
6773 {
6774 if (dm_state->scaling != old_dm_state->scaling)
6775 return true;
6776 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6777 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6778 return true;
6779 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6780 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6781 return true;
6782 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6783 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6784 return true;
6785 return false;
6786 }
6787
6788 #ifdef CONFIG_DRM_AMD_DC_HDCP
is_content_protection_different(struct drm_connector_state * state,const struct drm_connector_state * old_state,const struct drm_connector * connector,struct hdcp_workqueue * hdcp_w)6789 static bool is_content_protection_different(struct drm_connector_state *state,
6790 const struct drm_connector_state *old_state,
6791 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6792 {
6793 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6794
6795 if (old_state->hdcp_content_type != state->hdcp_content_type &&
6796 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6797 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6798 return true;
6799 }
6800
6801 /* CP is being re enabled, ignore this */
6802 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6803 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6804 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6805 return false;
6806 }
6807
6808 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6809 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6810 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6811 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6812
6813 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6814 * hot-plug, headless s3, dpms
6815 */
6816 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6817 aconnector->dc_sink != NULL)
6818 return true;
6819
6820 if (old_state->content_protection == state->content_protection)
6821 return false;
6822
6823 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6824 return true;
6825
6826 return false;
6827 }
6828
6829 #endif
remove_stream(struct amdgpu_device * adev,struct amdgpu_crtc * acrtc,struct dc_stream_state * stream)6830 static void remove_stream(struct amdgpu_device *adev,
6831 struct amdgpu_crtc *acrtc,
6832 struct dc_stream_state *stream)
6833 {
6834 /* this is the update mode case */
6835
6836 acrtc->otg_inst = -1;
6837 acrtc->enabled = false;
6838 }
6839
get_cursor_position(struct drm_plane * plane,struct drm_crtc * crtc,struct dc_cursor_position * position)6840 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6841 struct dc_cursor_position *position)
6842 {
6843 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6844 int x, y;
6845 int xorigin = 0, yorigin = 0;
6846
6847 if (!crtc || !plane->state->fb)
6848 return 0;
6849
6850 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6851 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6852 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6853 __func__,
6854 plane->state->crtc_w,
6855 plane->state->crtc_h);
6856 return -EINVAL;
6857 }
6858
6859 x = plane->state->crtc_x;
6860 y = plane->state->crtc_y;
6861
6862 if (x <= -amdgpu_crtc->max_cursor_width ||
6863 y <= -amdgpu_crtc->max_cursor_height)
6864 return 0;
6865
6866 if (x < 0) {
6867 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6868 x = 0;
6869 }
6870 if (y < 0) {
6871 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6872 y = 0;
6873 }
6874 position->enable = true;
6875 position->translate_by_source = true;
6876 position->x = x;
6877 position->y = y;
6878 position->x_hotspot = xorigin;
6879 position->y_hotspot = yorigin;
6880
6881 return 0;
6882 }
6883
handle_cursor_update(struct drm_plane * plane,struct drm_plane_state * old_plane_state)6884 static void handle_cursor_update(struct drm_plane *plane,
6885 struct drm_plane_state *old_plane_state)
6886 {
6887 struct amdgpu_device *adev = drm_to_adev(plane->dev);
6888 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6889 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6890 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6891 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6892 uint64_t address = afb ? afb->address : 0;
6893 struct dc_cursor_position position = {0};
6894 struct dc_cursor_attributes attributes;
6895 int ret;
6896
6897 if (!plane->state->fb && !old_plane_state->fb)
6898 return;
6899
6900 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6901 __func__,
6902 amdgpu_crtc->crtc_id,
6903 plane->state->crtc_w,
6904 plane->state->crtc_h);
6905
6906 ret = get_cursor_position(plane, crtc, &position);
6907 if (ret)
6908 return;
6909
6910 if (!position.enable) {
6911 /* turn off cursor */
6912 if (crtc_state && crtc_state->stream) {
6913 mutex_lock(&adev->dm.dc_lock);
6914 dc_stream_set_cursor_position(crtc_state->stream,
6915 &position);
6916 mutex_unlock(&adev->dm.dc_lock);
6917 }
6918 return;
6919 }
6920
6921 amdgpu_crtc->cursor_width = plane->state->crtc_w;
6922 amdgpu_crtc->cursor_height = plane->state->crtc_h;
6923
6924 memset(&attributes, 0, sizeof(attributes));
6925 attributes.address.high_part = upper_32_bits(address);
6926 attributes.address.low_part = lower_32_bits(address);
6927 attributes.width = plane->state->crtc_w;
6928 attributes.height = plane->state->crtc_h;
6929 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6930 attributes.rotation_angle = 0;
6931 attributes.attribute_flags.value = 0;
6932
6933 attributes.pitch = attributes.width;
6934
6935 if (crtc_state->stream) {
6936 mutex_lock(&adev->dm.dc_lock);
6937 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6938 &attributes))
6939 DRM_ERROR("DC failed to set cursor attributes\n");
6940
6941 if (!dc_stream_set_cursor_position(crtc_state->stream,
6942 &position))
6943 DRM_ERROR("DC failed to set cursor position\n");
6944 mutex_unlock(&adev->dm.dc_lock);
6945 }
6946 }
6947
prepare_flip_isr(struct amdgpu_crtc * acrtc)6948 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6949 {
6950
6951 assert_spin_locked(&acrtc->base.dev->event_lock);
6952 WARN_ON(acrtc->event);
6953
6954 acrtc->event = acrtc->base.state->event;
6955
6956 /* Set the flip status */
6957 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6958
6959 /* Mark this event as consumed */
6960 acrtc->base.state->event = NULL;
6961
6962 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6963 acrtc->crtc_id);
6964 }
6965
update_freesync_state_on_stream(struct amdgpu_display_manager * dm,struct dm_crtc_state * new_crtc_state,struct dc_stream_state * new_stream,struct dc_plane_state * surface,u32 flip_timestamp_in_us)6966 static void update_freesync_state_on_stream(
6967 struct amdgpu_display_manager *dm,
6968 struct dm_crtc_state *new_crtc_state,
6969 struct dc_stream_state *new_stream,
6970 struct dc_plane_state *surface,
6971 u32 flip_timestamp_in_us)
6972 {
6973 struct mod_vrr_params vrr_params;
6974 struct dc_info_packet vrr_infopacket = {0};
6975 struct amdgpu_device *adev = dm->adev;
6976 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
6977 unsigned long flags;
6978
6979 if (!new_stream)
6980 return;
6981
6982 /*
6983 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6984 * For now it's sufficient to just guard against these conditions.
6985 */
6986
6987 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6988 return;
6989
6990 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
6991 vrr_params = acrtc->dm_irq_params.vrr_params;
6992
6993 if (surface) {
6994 mod_freesync_handle_preflip(
6995 dm->freesync_module,
6996 surface,
6997 new_stream,
6998 flip_timestamp_in_us,
6999 &vrr_params);
7000
7001 if (adev->family < AMDGPU_FAMILY_AI &&
7002 amdgpu_dm_vrr_active(new_crtc_state)) {
7003 mod_freesync_handle_v_update(dm->freesync_module,
7004 new_stream, &vrr_params);
7005
7006 /* Need to call this before the frame ends. */
7007 dc_stream_adjust_vmin_vmax(dm->dc,
7008 new_crtc_state->stream,
7009 &vrr_params.adjust);
7010 }
7011 }
7012
7013 mod_freesync_build_vrr_infopacket(
7014 dm->freesync_module,
7015 new_stream,
7016 &vrr_params,
7017 PACKET_TYPE_VRR,
7018 TRANSFER_FUNC_UNKNOWN,
7019 &vrr_infopacket);
7020
7021 new_crtc_state->freesync_timing_changed |=
7022 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7023 &vrr_params.adjust,
7024 sizeof(vrr_params.adjust)) != 0);
7025
7026 new_crtc_state->freesync_vrr_info_changed |=
7027 (memcmp(&new_crtc_state->vrr_infopacket,
7028 &vrr_infopacket,
7029 sizeof(vrr_infopacket)) != 0);
7030
7031 acrtc->dm_irq_params.vrr_params = vrr_params;
7032 new_crtc_state->vrr_infopacket = vrr_infopacket;
7033
7034 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7035 new_stream->vrr_infopacket = vrr_infopacket;
7036
7037 if (new_crtc_state->freesync_vrr_info_changed)
7038 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7039 new_crtc_state->base.crtc->base.id,
7040 (int)new_crtc_state->base.vrr_enabled,
7041 (int)vrr_params.state);
7042
7043 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7044 }
7045
update_stream_irq_parameters(struct amdgpu_display_manager * dm,struct dm_crtc_state * new_crtc_state)7046 static void update_stream_irq_parameters(
7047 struct amdgpu_display_manager *dm,
7048 struct dm_crtc_state *new_crtc_state)
7049 {
7050 struct dc_stream_state *new_stream = new_crtc_state->stream;
7051 struct mod_vrr_params vrr_params;
7052 struct mod_freesync_config config = new_crtc_state->freesync_config;
7053 struct amdgpu_device *adev = dm->adev;
7054 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7055 unsigned long flags;
7056
7057 if (!new_stream)
7058 return;
7059
7060 /*
7061 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7062 * For now it's sufficient to just guard against these conditions.
7063 */
7064 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7065 return;
7066
7067 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7068 vrr_params = acrtc->dm_irq_params.vrr_params;
7069
7070 if (new_crtc_state->vrr_supported &&
7071 config.min_refresh_in_uhz &&
7072 config.max_refresh_in_uhz) {
7073 config.state = new_crtc_state->base.vrr_enabled ?
7074 VRR_STATE_ACTIVE_VARIABLE :
7075 VRR_STATE_INACTIVE;
7076 } else {
7077 config.state = VRR_STATE_UNSUPPORTED;
7078 }
7079
7080 mod_freesync_build_vrr_params(dm->freesync_module,
7081 new_stream,
7082 &config, &vrr_params);
7083
7084 new_crtc_state->freesync_timing_changed |=
7085 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7086 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7087
7088 new_crtc_state->freesync_config = config;
7089 /* Copy state for access from DM IRQ handler */
7090 acrtc->dm_irq_params.freesync_config = config;
7091 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7092 acrtc->dm_irq_params.vrr_params = vrr_params;
7093 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7094 }
7095
amdgpu_dm_handle_vrr_transition(struct dm_crtc_state * old_state,struct dm_crtc_state * new_state)7096 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7097 struct dm_crtc_state *new_state)
7098 {
7099 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7100 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7101
7102 if (!old_vrr_active && new_vrr_active) {
7103 /* Transition VRR inactive -> active:
7104 * While VRR is active, we must not disable vblank irq, as a
7105 * reenable after disable would compute bogus vblank/pflip
7106 * timestamps if it likely happened inside display front-porch.
7107 *
7108 * We also need vupdate irq for the actual core vblank handling
7109 * at end of vblank.
7110 */
7111 dm_set_vupdate_irq(new_state->base.crtc, true);
7112 drm_crtc_vblank_get(new_state->base.crtc);
7113 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7114 __func__, new_state->base.crtc->base.id);
7115 } else if (old_vrr_active && !new_vrr_active) {
7116 /* Transition VRR active -> inactive:
7117 * Allow vblank irq disable again for fixed refresh rate.
7118 */
7119 dm_set_vupdate_irq(new_state->base.crtc, false);
7120 drm_crtc_vblank_put(new_state->base.crtc);
7121 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7122 __func__, new_state->base.crtc->base.id);
7123 }
7124 }
7125
amdgpu_dm_commit_cursors(struct drm_atomic_state * state)7126 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7127 {
7128 struct drm_plane *plane;
7129 struct drm_plane_state *old_plane_state, *new_plane_state;
7130 int i;
7131
7132 /*
7133 * TODO: Make this per-stream so we don't issue redundant updates for
7134 * commits with multiple streams.
7135 */
7136 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7137 new_plane_state, i)
7138 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7139 handle_cursor_update(plane, old_plane_state);
7140 }
7141
amdgpu_dm_commit_planes(struct drm_atomic_state * state,struct dc_state * dc_state,struct drm_device * dev,struct amdgpu_display_manager * dm,struct drm_crtc * pcrtc,bool wait_for_vblank)7142 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7143 struct dc_state *dc_state,
7144 struct drm_device *dev,
7145 struct amdgpu_display_manager *dm,
7146 struct drm_crtc *pcrtc,
7147 bool wait_for_vblank)
7148 {
7149 uint32_t i;
7150 uint64_t timestamp_ns;
7151 struct drm_plane *plane;
7152 struct drm_plane_state *old_plane_state, *new_plane_state;
7153 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7154 struct drm_crtc_state *new_pcrtc_state =
7155 drm_atomic_get_new_crtc_state(state, pcrtc);
7156 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7157 struct dm_crtc_state *dm_old_crtc_state =
7158 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7159 int planes_count = 0, vpos, hpos;
7160 long r;
7161 unsigned long flags;
7162 struct amdgpu_bo *abo;
7163 uint32_t target_vblank, last_flip_vblank;
7164 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7165 bool pflip_present = false;
7166 struct {
7167 struct dc_surface_update surface_updates[MAX_SURFACES];
7168 struct dc_plane_info plane_infos[MAX_SURFACES];
7169 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7170 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7171 struct dc_stream_update stream_update;
7172 } *bundle;
7173
7174 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7175
7176 if (!bundle) {
7177 dm_error("Failed to allocate update bundle\n");
7178 goto cleanup;
7179 }
7180
7181 /*
7182 * Disable the cursor first if we're disabling all the planes.
7183 * It'll remain on the screen after the planes are re-enabled
7184 * if we don't.
7185 */
7186 if (acrtc_state->active_planes == 0)
7187 amdgpu_dm_commit_cursors(state);
7188
7189 /* update planes when needed */
7190 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7191 struct drm_crtc *crtc = new_plane_state->crtc;
7192 struct drm_crtc_state *new_crtc_state;
7193 struct drm_framebuffer *fb = new_plane_state->fb;
7194 bool plane_needs_flip;
7195 struct dc_plane_state *dc_plane;
7196 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7197
7198 /* Cursor plane is handled after stream updates */
7199 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7200 continue;
7201
7202 if (!fb || !crtc || pcrtc != crtc)
7203 continue;
7204
7205 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7206 if (!new_crtc_state->active)
7207 continue;
7208
7209 dc_plane = dm_new_plane_state->dc_state;
7210
7211 bundle->surface_updates[planes_count].surface = dc_plane;
7212 if (new_pcrtc_state->color_mgmt_changed) {
7213 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7214 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7215 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7216 }
7217
7218 fill_dc_scaling_info(new_plane_state,
7219 &bundle->scaling_infos[planes_count]);
7220
7221 bundle->surface_updates[planes_count].scaling_info =
7222 &bundle->scaling_infos[planes_count];
7223
7224 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7225
7226 pflip_present = pflip_present || plane_needs_flip;
7227
7228 if (!plane_needs_flip) {
7229 planes_count += 1;
7230 continue;
7231 }
7232
7233 abo = gem_to_amdgpu_bo(fb->obj[0]);
7234
7235 /*
7236 * Wait for all fences on this FB. Do limited wait to avoid
7237 * deadlock during GPU reset when this fence will not signal
7238 * but we hold reservation lock for the BO.
7239 */
7240 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7241 false,
7242 msecs_to_jiffies(5000));
7243 if (unlikely(r <= 0))
7244 DRM_ERROR("Waiting for fences timed out!");
7245
7246 fill_dc_plane_info_and_addr(
7247 dm->adev, new_plane_state,
7248 dm_new_plane_state->tiling_flags,
7249 &bundle->plane_infos[planes_count],
7250 &bundle->flip_addrs[planes_count].address,
7251 dm_new_plane_state->tmz_surface, false);
7252
7253 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7254 new_plane_state->plane->index,
7255 bundle->plane_infos[planes_count].dcc.enable);
7256
7257 bundle->surface_updates[planes_count].plane_info =
7258 &bundle->plane_infos[planes_count];
7259
7260 /*
7261 * Only allow immediate flips for fast updates that don't
7262 * change FB pitch, DCC state, rotation or mirroing.
7263 */
7264 bundle->flip_addrs[planes_count].flip_immediate =
7265 crtc->state->async_flip &&
7266 acrtc_state->update_type == UPDATE_TYPE_FAST;
7267
7268 timestamp_ns = ktime_get_ns();
7269 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7270 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7271 bundle->surface_updates[planes_count].surface = dc_plane;
7272
7273 if (!bundle->surface_updates[planes_count].surface) {
7274 DRM_ERROR("No surface for CRTC: id=%d\n",
7275 acrtc_attach->crtc_id);
7276 continue;
7277 }
7278
7279 if (plane == pcrtc->primary)
7280 update_freesync_state_on_stream(
7281 dm,
7282 acrtc_state,
7283 acrtc_state->stream,
7284 dc_plane,
7285 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7286
7287 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7288 __func__,
7289 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7290 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7291
7292 planes_count += 1;
7293
7294 }
7295
7296 if (pflip_present) {
7297 if (!vrr_active) {
7298 /* Use old throttling in non-vrr fixed refresh rate mode
7299 * to keep flip scheduling based on target vblank counts
7300 * working in a backwards compatible way, e.g., for
7301 * clients using the GLX_OML_sync_control extension or
7302 * DRI3/Present extension with defined target_msc.
7303 */
7304 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7305 }
7306 else {
7307 /* For variable refresh rate mode only:
7308 * Get vblank of last completed flip to avoid > 1 vrr
7309 * flips per video frame by use of throttling, but allow
7310 * flip programming anywhere in the possibly large
7311 * variable vrr vblank interval for fine-grained flip
7312 * timing control and more opportunity to avoid stutter
7313 * on late submission of flips.
7314 */
7315 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7316 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7317 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7318 }
7319
7320 target_vblank = last_flip_vblank + wait_for_vblank;
7321
7322 /*
7323 * Wait until we're out of the vertical blank period before the one
7324 * targeted by the flip
7325 */
7326 while ((acrtc_attach->enabled &&
7327 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7328 0, &vpos, &hpos, NULL,
7329 NULL, &pcrtc->hwmode)
7330 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7331 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7332 (int)(target_vblank -
7333 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7334 usleep_range(1000, 1100);
7335 }
7336
7337 /**
7338 * Prepare the flip event for the pageflip interrupt to handle.
7339 *
7340 * This only works in the case where we've already turned on the
7341 * appropriate hardware blocks (eg. HUBP) so in the transition case
7342 * from 0 -> n planes we have to skip a hardware generated event
7343 * and rely on sending it from software.
7344 */
7345 if (acrtc_attach->base.state->event &&
7346 acrtc_state->active_planes > 0) {
7347 drm_crtc_vblank_get(pcrtc);
7348
7349 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7350
7351 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7352 prepare_flip_isr(acrtc_attach);
7353
7354 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7355 }
7356
7357 if (acrtc_state->stream) {
7358 if (acrtc_state->freesync_vrr_info_changed)
7359 bundle->stream_update.vrr_infopacket =
7360 &acrtc_state->stream->vrr_infopacket;
7361 }
7362 }
7363
7364 /* Update the planes if changed or disable if we don't have any. */
7365 if ((planes_count || acrtc_state->active_planes == 0) &&
7366 acrtc_state->stream) {
7367 bundle->stream_update.stream = acrtc_state->stream;
7368 if (new_pcrtc_state->mode_changed) {
7369 bundle->stream_update.src = acrtc_state->stream->src;
7370 bundle->stream_update.dst = acrtc_state->stream->dst;
7371 }
7372
7373 if (new_pcrtc_state->color_mgmt_changed) {
7374 /*
7375 * TODO: This isn't fully correct since we've actually
7376 * already modified the stream in place.
7377 */
7378 bundle->stream_update.gamut_remap =
7379 &acrtc_state->stream->gamut_remap_matrix;
7380 bundle->stream_update.output_csc_transform =
7381 &acrtc_state->stream->csc_color_matrix;
7382 bundle->stream_update.out_transfer_func =
7383 acrtc_state->stream->out_transfer_func;
7384 }
7385
7386 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7387 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7388 bundle->stream_update.abm_level = &acrtc_state->abm_level;
7389
7390 /*
7391 * If FreeSync state on the stream has changed then we need to
7392 * re-adjust the min/max bounds now that DC doesn't handle this
7393 * as part of commit.
7394 */
7395 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7396 amdgpu_dm_vrr_active(acrtc_state)) {
7397 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7398 dc_stream_adjust_vmin_vmax(
7399 dm->dc, acrtc_state->stream,
7400 &acrtc_attach->dm_irq_params.vrr_params.adjust);
7401 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7402 }
7403 mutex_lock(&dm->dc_lock);
7404 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7405 acrtc_state->stream->link->psr_settings.psr_allow_active)
7406 amdgpu_dm_psr_disable(acrtc_state->stream);
7407
7408 dc_commit_updates_for_stream(dm->dc,
7409 bundle->surface_updates,
7410 planes_count,
7411 acrtc_state->stream,
7412 &bundle->stream_update,
7413 dc_state);
7414
7415 /**
7416 * Enable or disable the interrupts on the backend.
7417 *
7418 * Most pipes are put into power gating when unused.
7419 *
7420 * When power gating is enabled on a pipe we lose the
7421 * interrupt enablement state when power gating is disabled.
7422 *
7423 * So we need to update the IRQ control state in hardware
7424 * whenever the pipe turns on (since it could be previously
7425 * power gated) or off (since some pipes can't be power gated
7426 * on some ASICs).
7427 */
7428 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7429 dm_update_pflip_irq_state(drm_to_adev(dev),
7430 acrtc_attach);
7431
7432 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7433 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7434 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7435 amdgpu_dm_link_setup_psr(acrtc_state->stream);
7436 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7437 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7438 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
7439 amdgpu_dm_psr_enable(acrtc_state->stream);
7440 }
7441
7442 mutex_unlock(&dm->dc_lock);
7443 }
7444
7445 /*
7446 * Update cursor state *after* programming all the planes.
7447 * This avoids redundant programming in the case where we're going
7448 * to be disabling a single plane - those pipes are being disabled.
7449 */
7450 if (acrtc_state->active_planes)
7451 amdgpu_dm_commit_cursors(state);
7452
7453 cleanup:
7454 kfree(bundle);
7455 }
7456
amdgpu_dm_commit_audio(struct drm_device * dev,struct drm_atomic_state * state)7457 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7458 struct drm_atomic_state *state)
7459 {
7460 struct amdgpu_device *adev = drm_to_adev(dev);
7461 struct amdgpu_dm_connector *aconnector;
7462 struct drm_connector *connector;
7463 struct drm_connector_state *old_con_state, *new_con_state;
7464 struct drm_crtc_state *new_crtc_state;
7465 struct dm_crtc_state *new_dm_crtc_state;
7466 const struct dc_stream_status *status;
7467 int i, inst;
7468
7469 /* Notify device removals. */
7470 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7471 if (old_con_state->crtc != new_con_state->crtc) {
7472 /* CRTC changes require notification. */
7473 goto notify;
7474 }
7475
7476 if (!new_con_state->crtc)
7477 continue;
7478
7479 new_crtc_state = drm_atomic_get_new_crtc_state(
7480 state, new_con_state->crtc);
7481
7482 if (!new_crtc_state)
7483 continue;
7484
7485 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7486 continue;
7487
7488 notify:
7489 aconnector = to_amdgpu_dm_connector(connector);
7490
7491 mutex_lock(&adev->dm.audio_lock);
7492 inst = aconnector->audio_inst;
7493 aconnector->audio_inst = -1;
7494 mutex_unlock(&adev->dm.audio_lock);
7495
7496 amdgpu_dm_audio_eld_notify(adev, inst);
7497 }
7498
7499 /* Notify audio device additions. */
7500 for_each_new_connector_in_state(state, connector, new_con_state, i) {
7501 if (!new_con_state->crtc)
7502 continue;
7503
7504 new_crtc_state = drm_atomic_get_new_crtc_state(
7505 state, new_con_state->crtc);
7506
7507 if (!new_crtc_state)
7508 continue;
7509
7510 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7511 continue;
7512
7513 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7514 if (!new_dm_crtc_state->stream)
7515 continue;
7516
7517 status = dc_stream_get_status(new_dm_crtc_state->stream);
7518 if (!status)
7519 continue;
7520
7521 aconnector = to_amdgpu_dm_connector(connector);
7522
7523 mutex_lock(&adev->dm.audio_lock);
7524 inst = status->audio_inst;
7525 aconnector->audio_inst = inst;
7526 mutex_unlock(&adev->dm.audio_lock);
7527
7528 amdgpu_dm_audio_eld_notify(adev, inst);
7529 }
7530 }
7531
7532 /*
7533 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7534 * @crtc_state: the DRM CRTC state
7535 * @stream_state: the DC stream state.
7536 *
7537 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7538 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7539 */
amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state * crtc_state,struct dc_stream_state * stream_state)7540 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7541 struct dc_stream_state *stream_state)
7542 {
7543 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7544 }
7545
amdgpu_dm_atomic_commit(struct drm_device * dev,struct drm_atomic_state * state,bool nonblock)7546 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7547 struct drm_atomic_state *state,
7548 bool nonblock)
7549 {
7550 /*
7551 * Add check here for SoC's that support hardware cursor plane, to
7552 * unset legacy_cursor_update
7553 */
7554
7555 return drm_atomic_helper_commit(dev, state, nonblock);
7556
7557 /*TODO Handle EINTR, reenable IRQ*/
7558 }
7559
7560 /**
7561 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7562 * @state: The atomic state to commit
7563 *
7564 * This will tell DC to commit the constructed DC state from atomic_check,
7565 * programming the hardware. Any failures here implies a hardware failure, since
7566 * atomic check should have filtered anything non-kosher.
7567 */
amdgpu_dm_atomic_commit_tail(struct drm_atomic_state * state)7568 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7569 {
7570 struct drm_device *dev = state->dev;
7571 struct amdgpu_device *adev = drm_to_adev(dev);
7572 struct amdgpu_display_manager *dm = &adev->dm;
7573 struct dm_atomic_state *dm_state;
7574 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7575 uint32_t i, j;
7576 struct drm_crtc *crtc;
7577 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7578 unsigned long flags;
7579 bool wait_for_vblank = true;
7580 struct drm_connector *connector;
7581 struct drm_connector_state *old_con_state, *new_con_state;
7582 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7583 int crtc_disable_count = 0;
7584 bool mode_set_reset_required = false;
7585
7586 drm_atomic_helper_update_legacy_modeset_state(dev, state);
7587
7588 dm_state = dm_atomic_get_new_state(state);
7589 if (dm_state && dm_state->context) {
7590 dc_state = dm_state->context;
7591 } else {
7592 /* No state changes, retain current state. */
7593 dc_state_temp = dc_create_state(dm->dc);
7594 ASSERT(dc_state_temp);
7595 dc_state = dc_state_temp;
7596 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7597 }
7598
7599 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
7600 new_crtc_state, i) {
7601 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7602
7603 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7604
7605 if (old_crtc_state->active &&
7606 (!new_crtc_state->active ||
7607 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7608 manage_dm_interrupts(adev, acrtc, false);
7609 dc_stream_release(dm_old_crtc_state->stream);
7610 }
7611 }
7612
7613 drm_atomic_helper_calc_timestamping_constants(state);
7614
7615 /* update changed items */
7616 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7617 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7618
7619 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7620 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7621
7622 DRM_DEBUG_DRIVER(
7623 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7624 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7625 "connectors_changed:%d\n",
7626 acrtc->crtc_id,
7627 new_crtc_state->enable,
7628 new_crtc_state->active,
7629 new_crtc_state->planes_changed,
7630 new_crtc_state->mode_changed,
7631 new_crtc_state->active_changed,
7632 new_crtc_state->connectors_changed);
7633
7634 /* Copy all transient state flags into dc state */
7635 if (dm_new_crtc_state->stream) {
7636 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7637 dm_new_crtc_state->stream);
7638 }
7639
7640 /* handles headless hotplug case, updating new_state and
7641 * aconnector as needed
7642 */
7643
7644 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7645
7646 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7647
7648 if (!dm_new_crtc_state->stream) {
7649 /*
7650 * this could happen because of issues with
7651 * userspace notifications delivery.
7652 * In this case userspace tries to set mode on
7653 * display which is disconnected in fact.
7654 * dc_sink is NULL in this case on aconnector.
7655 * We expect reset mode will come soon.
7656 *
7657 * This can also happen when unplug is done
7658 * during resume sequence ended
7659 *
7660 * In this case, we want to pretend we still
7661 * have a sink to keep the pipe running so that
7662 * hw state is consistent with the sw state
7663 */
7664 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7665 __func__, acrtc->base.base.id);
7666 continue;
7667 }
7668
7669 if (dm_old_crtc_state->stream)
7670 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7671
7672 pm_runtime_get_noresume(dev->dev);
7673
7674 acrtc->enabled = true;
7675 acrtc->hw_mode = new_crtc_state->mode;
7676 crtc->hwmode = new_crtc_state->mode;
7677 mode_set_reset_required = true;
7678 } else if (modereset_required(new_crtc_state)) {
7679 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7680 /* i.e. reset mode */
7681 if (dm_old_crtc_state->stream)
7682 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7683 mode_set_reset_required = true;
7684 }
7685 } /* for_each_crtc_in_state() */
7686
7687 if (dc_state) {
7688 /* if there mode set or reset, disable eDP PSR */
7689 if (mode_set_reset_required)
7690 amdgpu_dm_psr_disable_all(dm);
7691
7692 dm_enable_per_frame_crtc_master_sync(dc_state);
7693 mutex_lock(&dm->dc_lock);
7694 WARN_ON(!dc_commit_state(dm->dc, dc_state));
7695 mutex_unlock(&dm->dc_lock);
7696 }
7697
7698 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7699 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7700
7701 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7702
7703 if (dm_new_crtc_state->stream != NULL) {
7704 const struct dc_stream_status *status =
7705 dc_stream_get_status(dm_new_crtc_state->stream);
7706
7707 if (!status)
7708 status = dc_stream_get_status_from_state(dc_state,
7709 dm_new_crtc_state->stream);
7710 if (!status)
7711 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7712 else
7713 acrtc->otg_inst = status->primary_otg_inst;
7714 }
7715 }
7716 #ifdef CONFIG_DRM_AMD_DC_HDCP
7717 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7718 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7719 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7720 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7721
7722 new_crtc_state = NULL;
7723
7724 if (acrtc)
7725 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7726
7727 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7728
7729 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7730 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7731 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7732 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7733 continue;
7734 }
7735
7736 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7737 hdcp_update_display(
7738 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7739 new_con_state->hdcp_content_type,
7740 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7741 : false);
7742 }
7743 #endif
7744
7745 /* Handle connector state changes */
7746 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7747 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7748 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7749 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7750 struct dc_surface_update dummy_updates[MAX_SURFACES];
7751 struct dc_stream_update stream_update;
7752 struct dc_info_packet hdr_packet;
7753 struct dc_stream_status *status = NULL;
7754 bool abm_changed, hdr_changed, scaling_changed;
7755
7756 memset(&dummy_updates, 0, sizeof(dummy_updates));
7757 memset(&stream_update, 0, sizeof(stream_update));
7758
7759 if (acrtc) {
7760 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7761 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7762 }
7763
7764 /* Skip any modesets/resets */
7765 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7766 continue;
7767
7768 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7769 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7770
7771 scaling_changed = is_scaling_state_different(dm_new_con_state,
7772 dm_old_con_state);
7773
7774 abm_changed = dm_new_crtc_state->abm_level !=
7775 dm_old_crtc_state->abm_level;
7776
7777 hdr_changed =
7778 is_hdr_metadata_different(old_con_state, new_con_state);
7779
7780 if (!scaling_changed && !abm_changed && !hdr_changed)
7781 continue;
7782
7783 stream_update.stream = dm_new_crtc_state->stream;
7784 if (scaling_changed) {
7785 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7786 dm_new_con_state, dm_new_crtc_state->stream);
7787
7788 stream_update.src = dm_new_crtc_state->stream->src;
7789 stream_update.dst = dm_new_crtc_state->stream->dst;
7790 }
7791
7792 if (abm_changed) {
7793 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7794
7795 stream_update.abm_level = &dm_new_crtc_state->abm_level;
7796 }
7797
7798 if (hdr_changed) {
7799 fill_hdr_info_packet(new_con_state, &hdr_packet);
7800 stream_update.hdr_static_metadata = &hdr_packet;
7801 }
7802
7803 status = dc_stream_get_status(dm_new_crtc_state->stream);
7804 WARN_ON(!status);
7805 WARN_ON(!status->plane_count);
7806
7807 /*
7808 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7809 * Here we create an empty update on each plane.
7810 * To fix this, DC should permit updating only stream properties.
7811 */
7812 for (j = 0; j < status->plane_count; j++)
7813 dummy_updates[j].surface = status->plane_states[0];
7814
7815
7816 mutex_lock(&dm->dc_lock);
7817 dc_commit_updates_for_stream(dm->dc,
7818 dummy_updates,
7819 status->plane_count,
7820 dm_new_crtc_state->stream,
7821 &stream_update,
7822 dc_state);
7823 mutex_unlock(&dm->dc_lock);
7824 }
7825
7826 /* Count number of newly disabled CRTCs for dropping PM refs later. */
7827 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7828 new_crtc_state, i) {
7829 if (old_crtc_state->active && !new_crtc_state->active)
7830 crtc_disable_count++;
7831
7832 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7833 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7834
7835 /* For freesync config update on crtc state and params for irq */
7836 update_stream_irq_parameters(dm, dm_new_crtc_state);
7837
7838 /* Handle vrr on->off / off->on transitions */
7839 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7840 dm_new_crtc_state);
7841 }
7842
7843 /**
7844 * Enable interrupts for CRTCs that are newly enabled or went through
7845 * a modeset. It was intentionally deferred until after the front end
7846 * state was modified to wait until the OTG was on and so the IRQ
7847 * handlers didn't access stale or invalid state.
7848 */
7849 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7850 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7851
7852 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7853
7854 if (new_crtc_state->active &&
7855 (!old_crtc_state->active ||
7856 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7857 dc_stream_retain(dm_new_crtc_state->stream);
7858 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
7859 manage_dm_interrupts(adev, acrtc, true);
7860
7861 #ifdef CONFIG_DEBUG_FS
7862 /**
7863 * Frontend may have changed so reapply the CRC capture
7864 * settings for the stream.
7865 */
7866 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7867
7868 if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7869 amdgpu_dm_crtc_configure_crc_source(
7870 crtc, dm_new_crtc_state,
7871 dm_new_crtc_state->crc_src);
7872 }
7873 #endif
7874 }
7875 }
7876
7877 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7878 if (new_crtc_state->async_flip)
7879 wait_for_vblank = false;
7880
7881 /* update planes when needed per crtc*/
7882 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7883 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7884
7885 if (dm_new_crtc_state->stream)
7886 amdgpu_dm_commit_planes(state, dc_state, dev,
7887 dm, crtc, wait_for_vblank);
7888 }
7889
7890 /* Update audio instances for each connector. */
7891 amdgpu_dm_commit_audio(dev, state);
7892
7893 /*
7894 * send vblank event on all events not handled in flip and
7895 * mark consumed event for drm_atomic_helper_commit_hw_done
7896 */
7897 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7898 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7899
7900 if (new_crtc_state->event)
7901 drm_send_event_locked(dev, &new_crtc_state->event->base);
7902
7903 new_crtc_state->event = NULL;
7904 }
7905 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7906
7907 /* Signal HW programming completion */
7908 drm_atomic_helper_commit_hw_done(state);
7909
7910 if (wait_for_vblank)
7911 drm_atomic_helper_wait_for_flip_done(dev, state);
7912
7913 drm_atomic_helper_cleanup_planes(dev, state);
7914
7915 /*
7916 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7917 * so we can put the GPU into runtime suspend if we're not driving any
7918 * displays anymore
7919 */
7920 for (i = 0; i < crtc_disable_count; i++)
7921 pm_runtime_put_autosuspend(dev->dev);
7922 pm_runtime_mark_last_busy(dev->dev);
7923
7924 if (dc_state_temp)
7925 dc_release_state(dc_state_temp);
7926 }
7927
7928
dm_force_atomic_commit(struct drm_connector * connector)7929 static int dm_force_atomic_commit(struct drm_connector *connector)
7930 {
7931 int ret = 0;
7932 struct drm_device *ddev = connector->dev;
7933 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7934 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7935 struct drm_plane *plane = disconnected_acrtc->base.primary;
7936 struct drm_connector_state *conn_state;
7937 struct drm_crtc_state *crtc_state;
7938 struct drm_plane_state *plane_state;
7939
7940 if (!state)
7941 return -ENOMEM;
7942
7943 state->acquire_ctx = ddev->mode_config.acquire_ctx;
7944
7945 /* Construct an atomic state to restore previous display setting */
7946
7947 /*
7948 * Attach connectors to drm_atomic_state
7949 */
7950 conn_state = drm_atomic_get_connector_state(state, connector);
7951
7952 ret = PTR_ERR_OR_ZERO(conn_state);
7953 if (ret)
7954 goto out;
7955
7956 /* Attach crtc to drm_atomic_state*/
7957 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7958
7959 ret = PTR_ERR_OR_ZERO(crtc_state);
7960 if (ret)
7961 goto out;
7962
7963 /* force a restore */
7964 crtc_state->mode_changed = true;
7965
7966 /* Attach plane to drm_atomic_state */
7967 plane_state = drm_atomic_get_plane_state(state, plane);
7968
7969 ret = PTR_ERR_OR_ZERO(plane_state);
7970 if (ret)
7971 goto out;
7972
7973 /* Call commit internally with the state we just constructed */
7974 ret = drm_atomic_commit(state);
7975
7976 out:
7977 drm_atomic_state_put(state);
7978 if (ret)
7979 DRM_ERROR("Restoring old state failed with %i\n", ret);
7980
7981 return ret;
7982 }
7983
7984 /*
7985 * This function handles all cases when set mode does not come upon hotplug.
7986 * This includes when a display is unplugged then plugged back into the
7987 * same port and when running without usermode desktop manager supprot
7988 */
dm_restore_drm_connector_state(struct drm_device * dev,struct drm_connector * connector)7989 void dm_restore_drm_connector_state(struct drm_device *dev,
7990 struct drm_connector *connector)
7991 {
7992 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7993 struct amdgpu_crtc *disconnected_acrtc;
7994 struct dm_crtc_state *acrtc_state;
7995
7996 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7997 return;
7998
7999 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8000 if (!disconnected_acrtc)
8001 return;
8002
8003 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8004 if (!acrtc_state->stream)
8005 return;
8006
8007 /*
8008 * If the previous sink is not released and different from the current,
8009 * we deduce we are in a state where we can not rely on usermode call
8010 * to turn on the display, so we do it here
8011 */
8012 if (acrtc_state->stream->sink != aconnector->dc_sink)
8013 dm_force_atomic_commit(&aconnector->base);
8014 }
8015
8016 /*
8017 * Grabs all modesetting locks to serialize against any blocking commits,
8018 * Waits for completion of all non blocking commits.
8019 */
do_aquire_global_lock(struct drm_device * dev,struct drm_atomic_state * state)8020 static int do_aquire_global_lock(struct drm_device *dev,
8021 struct drm_atomic_state *state)
8022 {
8023 struct drm_crtc *crtc;
8024 struct drm_crtc_commit *commit;
8025 long ret;
8026
8027 /*
8028 * Adding all modeset locks to aquire_ctx will
8029 * ensure that when the framework release it the
8030 * extra locks we are locking here will get released to
8031 */
8032 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8033 if (ret)
8034 return ret;
8035
8036 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8037 spin_lock(&crtc->commit_lock);
8038 commit = list_first_entry_or_null(&crtc->commit_list,
8039 struct drm_crtc_commit, commit_entry);
8040 if (commit)
8041 drm_crtc_commit_get(commit);
8042 spin_unlock(&crtc->commit_lock);
8043
8044 if (!commit)
8045 continue;
8046
8047 /*
8048 * Make sure all pending HW programming completed and
8049 * page flips done
8050 */
8051 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8052
8053 if (ret > 0)
8054 ret = wait_for_completion_interruptible_timeout(
8055 &commit->flip_done, 10*HZ);
8056
8057 if (ret == 0)
8058 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8059 "timed out\n", crtc->base.id, crtc->name);
8060
8061 drm_crtc_commit_put(commit);
8062 }
8063
8064 return ret < 0 ? ret : 0;
8065 }
8066
get_freesync_config_for_crtc(struct dm_crtc_state * new_crtc_state,struct dm_connector_state * new_con_state)8067 static void get_freesync_config_for_crtc(
8068 struct dm_crtc_state *new_crtc_state,
8069 struct dm_connector_state *new_con_state)
8070 {
8071 struct mod_freesync_config config = {0};
8072 struct amdgpu_dm_connector *aconnector =
8073 to_amdgpu_dm_connector(new_con_state->base.connector);
8074 struct drm_display_mode *mode = &new_crtc_state->base.mode;
8075 int vrefresh = drm_mode_vrefresh(mode);
8076
8077 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8078 vrefresh >= aconnector->min_vfreq &&
8079 vrefresh <= aconnector->max_vfreq;
8080
8081 if (new_crtc_state->vrr_supported) {
8082 new_crtc_state->stream->ignore_msa_timing_param = true;
8083 config.state = new_crtc_state->base.vrr_enabled ?
8084 VRR_STATE_ACTIVE_VARIABLE :
8085 VRR_STATE_INACTIVE;
8086 config.min_refresh_in_uhz =
8087 aconnector->min_vfreq * 1000000;
8088 config.max_refresh_in_uhz =
8089 aconnector->max_vfreq * 1000000;
8090 config.vsif_supported = true;
8091 config.btr = true;
8092 }
8093
8094 new_crtc_state->freesync_config = config;
8095 }
8096
reset_freesync_config_for_crtc(struct dm_crtc_state * new_crtc_state)8097 static void reset_freesync_config_for_crtc(
8098 struct dm_crtc_state *new_crtc_state)
8099 {
8100 new_crtc_state->vrr_supported = false;
8101
8102 memset(&new_crtc_state->vrr_infopacket, 0,
8103 sizeof(new_crtc_state->vrr_infopacket));
8104 }
8105
dm_update_crtc_state(struct amdgpu_display_manager * dm,struct drm_atomic_state * state,struct drm_crtc * crtc,struct drm_crtc_state * old_crtc_state,struct drm_crtc_state * new_crtc_state,bool enable,bool * lock_and_validation_needed)8106 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8107 struct drm_atomic_state *state,
8108 struct drm_crtc *crtc,
8109 struct drm_crtc_state *old_crtc_state,
8110 struct drm_crtc_state *new_crtc_state,
8111 bool enable,
8112 bool *lock_and_validation_needed)
8113 {
8114 struct dm_atomic_state *dm_state = NULL;
8115 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8116 struct dc_stream_state *new_stream;
8117 int ret = 0;
8118
8119 /*
8120 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8121 * update changed items
8122 */
8123 struct amdgpu_crtc *acrtc = NULL;
8124 struct amdgpu_dm_connector *aconnector = NULL;
8125 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8126 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8127
8128 new_stream = NULL;
8129
8130 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8131 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8132 acrtc = to_amdgpu_crtc(crtc);
8133 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8134
8135 /* TODO This hack should go away */
8136 if (aconnector && enable) {
8137 /* Make sure fake sink is created in plug-in scenario */
8138 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8139 &aconnector->base);
8140 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8141 &aconnector->base);
8142
8143 if (IS_ERR(drm_new_conn_state)) {
8144 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8145 goto fail;
8146 }
8147
8148 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8149 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8150
8151 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8152 goto skip_modeset;
8153
8154 new_stream = create_validate_stream_for_sink(aconnector,
8155 &new_crtc_state->mode,
8156 dm_new_conn_state,
8157 dm_old_crtc_state->stream);
8158
8159 /*
8160 * we can have no stream on ACTION_SET if a display
8161 * was disconnected during S3, in this case it is not an
8162 * error, the OS will be updated after detection, and
8163 * will do the right thing on next atomic commit
8164 */
8165
8166 if (!new_stream) {
8167 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8168 __func__, acrtc->base.base.id);
8169 ret = -ENOMEM;
8170 goto fail;
8171 }
8172
8173 /*
8174 * TODO: Check VSDB bits to decide whether this should
8175 * be enabled or not.
8176 */
8177 new_stream->triggered_crtc_reset.enabled =
8178 dm->force_timing_sync;
8179
8180 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8181
8182 ret = fill_hdr_info_packet(drm_new_conn_state,
8183 &new_stream->hdr_static_metadata);
8184 if (ret)
8185 goto fail;
8186
8187 /*
8188 * If we already removed the old stream from the context
8189 * (and set the new stream to NULL) then we can't reuse
8190 * the old stream even if the stream and scaling are unchanged.
8191 * We'll hit the BUG_ON and black screen.
8192 *
8193 * TODO: Refactor this function to allow this check to work
8194 * in all conditions.
8195 */
8196 if (dm_new_crtc_state->stream &&
8197 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8198 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8199 new_crtc_state->mode_changed = false;
8200 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8201 new_crtc_state->mode_changed);
8202 }
8203 }
8204
8205 /* mode_changed flag may get updated above, need to check again */
8206 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8207 goto skip_modeset;
8208
8209 DRM_DEBUG_DRIVER(
8210 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8211 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8212 "connectors_changed:%d\n",
8213 acrtc->crtc_id,
8214 new_crtc_state->enable,
8215 new_crtc_state->active,
8216 new_crtc_state->planes_changed,
8217 new_crtc_state->mode_changed,
8218 new_crtc_state->active_changed,
8219 new_crtc_state->connectors_changed);
8220
8221 /* Remove stream for any changed/disabled CRTC */
8222 if (!enable) {
8223
8224 if (!dm_old_crtc_state->stream)
8225 goto skip_modeset;
8226
8227 ret = dm_atomic_get_state(state, &dm_state);
8228 if (ret)
8229 goto fail;
8230
8231 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8232 crtc->base.id);
8233
8234 /* i.e. reset mode */
8235 if (dc_remove_stream_from_ctx(
8236 dm->dc,
8237 dm_state->context,
8238 dm_old_crtc_state->stream) != DC_OK) {
8239 ret = -EINVAL;
8240 goto fail;
8241 }
8242
8243 dc_stream_release(dm_old_crtc_state->stream);
8244 dm_new_crtc_state->stream = NULL;
8245
8246 reset_freesync_config_for_crtc(dm_new_crtc_state);
8247
8248 *lock_and_validation_needed = true;
8249
8250 } else {/* Add stream for any updated/enabled CRTC */
8251 /*
8252 * Quick fix to prevent NULL pointer on new_stream when
8253 * added MST connectors not found in existing crtc_state in the chained mode
8254 * TODO: need to dig out the root cause of that
8255 */
8256 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8257 goto skip_modeset;
8258
8259 if (modereset_required(new_crtc_state))
8260 goto skip_modeset;
8261
8262 if (modeset_required(new_crtc_state, new_stream,
8263 dm_old_crtc_state->stream)) {
8264
8265 WARN_ON(dm_new_crtc_state->stream);
8266
8267 ret = dm_atomic_get_state(state, &dm_state);
8268 if (ret)
8269 goto fail;
8270
8271 dm_new_crtc_state->stream = new_stream;
8272
8273 dc_stream_retain(new_stream);
8274
8275 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8276 crtc->base.id);
8277
8278 if (dc_add_stream_to_ctx(
8279 dm->dc,
8280 dm_state->context,
8281 dm_new_crtc_state->stream) != DC_OK) {
8282 ret = -EINVAL;
8283 goto fail;
8284 }
8285
8286 *lock_and_validation_needed = true;
8287 }
8288 }
8289
8290 skip_modeset:
8291 /* Release extra reference */
8292 if (new_stream)
8293 dc_stream_release(new_stream);
8294
8295 /*
8296 * We want to do dc stream updates that do not require a
8297 * full modeset below.
8298 */
8299 if (!(enable && aconnector && new_crtc_state->active))
8300 return 0;
8301 /*
8302 * Given above conditions, the dc state cannot be NULL because:
8303 * 1. We're in the process of enabling CRTCs (just been added
8304 * to the dc context, or already is on the context)
8305 * 2. Has a valid connector attached, and
8306 * 3. Is currently active and enabled.
8307 * => The dc stream state currently exists.
8308 */
8309 BUG_ON(dm_new_crtc_state->stream == NULL);
8310
8311 /* Scaling or underscan settings */
8312 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
8313 drm_atomic_crtc_needs_modeset(new_crtc_state))
8314 update_stream_scaling_settings(
8315 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8316
8317 /* ABM settings */
8318 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8319
8320 /*
8321 * Color management settings. We also update color properties
8322 * when a modeset is needed, to ensure it gets reprogrammed.
8323 */
8324 if (dm_new_crtc_state->base.color_mgmt_changed ||
8325 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8326 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8327 if (ret)
8328 goto fail;
8329 }
8330
8331 /* Update Freesync settings. */
8332 get_freesync_config_for_crtc(dm_new_crtc_state,
8333 dm_new_conn_state);
8334
8335 return ret;
8336
8337 fail:
8338 if (new_stream)
8339 dc_stream_release(new_stream);
8340 return ret;
8341 }
8342
should_reset_plane(struct drm_atomic_state * state,struct drm_plane * plane,struct drm_plane_state * old_plane_state,struct drm_plane_state * new_plane_state)8343 static bool should_reset_plane(struct drm_atomic_state *state,
8344 struct drm_plane *plane,
8345 struct drm_plane_state *old_plane_state,
8346 struct drm_plane_state *new_plane_state)
8347 {
8348 struct drm_plane *other;
8349 struct drm_plane_state *old_other_state, *new_other_state;
8350 struct drm_crtc_state *new_crtc_state;
8351 int i;
8352
8353 /*
8354 * TODO: Remove this hack once the checks below are sufficient
8355 * enough to determine when we need to reset all the planes on
8356 * the stream.
8357 */
8358 if (state->allow_modeset)
8359 return true;
8360
8361 /* Exit early if we know that we're adding or removing the plane. */
8362 if (old_plane_state->crtc != new_plane_state->crtc)
8363 return true;
8364
8365 /* old crtc == new_crtc == NULL, plane not in context. */
8366 if (!new_plane_state->crtc)
8367 return false;
8368
8369 new_crtc_state =
8370 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8371
8372 if (!new_crtc_state)
8373 return true;
8374
8375 /* CRTC Degamma changes currently require us to recreate planes. */
8376 if (new_crtc_state->color_mgmt_changed)
8377 return true;
8378
8379 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8380 return true;
8381
8382 /*
8383 * If there are any new primary or overlay planes being added or
8384 * removed then the z-order can potentially change. To ensure
8385 * correct z-order and pipe acquisition the current DC architecture
8386 * requires us to remove and recreate all existing planes.
8387 *
8388 * TODO: Come up with a more elegant solution for this.
8389 */
8390 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8391 struct dm_plane_state *old_dm_plane_state, *new_dm_plane_state;
8392
8393 if (other->type == DRM_PLANE_TYPE_CURSOR)
8394 continue;
8395
8396 if (old_other_state->crtc != new_plane_state->crtc &&
8397 new_other_state->crtc != new_plane_state->crtc)
8398 continue;
8399
8400 if (old_other_state->crtc != new_other_state->crtc)
8401 return true;
8402
8403 /* Src/dst size and scaling updates. */
8404 if (old_other_state->src_w != new_other_state->src_w ||
8405 old_other_state->src_h != new_other_state->src_h ||
8406 old_other_state->crtc_w != new_other_state->crtc_w ||
8407 old_other_state->crtc_h != new_other_state->crtc_h)
8408 return true;
8409
8410 /* Rotation / mirroring updates. */
8411 if (old_other_state->rotation != new_other_state->rotation)
8412 return true;
8413
8414 /* Blending updates. */
8415 if (old_other_state->pixel_blend_mode !=
8416 new_other_state->pixel_blend_mode)
8417 return true;
8418
8419 /* Alpha updates. */
8420 if (old_other_state->alpha != new_other_state->alpha)
8421 return true;
8422
8423 /* Colorspace changes. */
8424 if (old_other_state->color_range != new_other_state->color_range ||
8425 old_other_state->color_encoding != new_other_state->color_encoding)
8426 return true;
8427
8428 /* Framebuffer checks fall at the end. */
8429 if (!old_other_state->fb || !new_other_state->fb)
8430 continue;
8431
8432 /* Pixel format changes can require bandwidth updates. */
8433 if (old_other_state->fb->format != new_other_state->fb->format)
8434 return true;
8435
8436 old_dm_plane_state = to_dm_plane_state(old_other_state);
8437 new_dm_plane_state = to_dm_plane_state(new_other_state);
8438
8439 /* Tiling and DCC changes also require bandwidth updates. */
8440 if (old_dm_plane_state->tiling_flags !=
8441 new_dm_plane_state->tiling_flags)
8442 return true;
8443 }
8444
8445 return false;
8446 }
8447
dm_update_plane_state(struct dc * dc,struct drm_atomic_state * state,struct drm_plane * plane,struct drm_plane_state * old_plane_state,struct drm_plane_state * new_plane_state,bool enable,bool * lock_and_validation_needed)8448 static int dm_update_plane_state(struct dc *dc,
8449 struct drm_atomic_state *state,
8450 struct drm_plane *plane,
8451 struct drm_plane_state *old_plane_state,
8452 struct drm_plane_state *new_plane_state,
8453 bool enable,
8454 bool *lock_and_validation_needed)
8455 {
8456
8457 struct dm_atomic_state *dm_state = NULL;
8458 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8459 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8460 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8461 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8462 struct amdgpu_crtc *new_acrtc;
8463 bool needs_reset;
8464 int ret = 0;
8465
8466
8467 new_plane_crtc = new_plane_state->crtc;
8468 old_plane_crtc = old_plane_state->crtc;
8469 dm_new_plane_state = to_dm_plane_state(new_plane_state);
8470 dm_old_plane_state = to_dm_plane_state(old_plane_state);
8471
8472 /*TODO Implement better atomic check for cursor plane */
8473 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8474 if (!enable || !new_plane_crtc ||
8475 drm_atomic_plane_disabling(plane->state, new_plane_state))
8476 return 0;
8477
8478 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8479
8480 if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8481 (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8482 DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8483 new_plane_state->crtc_w, new_plane_state->crtc_h);
8484 return -EINVAL;
8485 }
8486
8487 return 0;
8488 }
8489
8490 needs_reset = should_reset_plane(state, plane, old_plane_state,
8491 new_plane_state);
8492
8493 /* Remove any changed/removed planes */
8494 if (!enable) {
8495 if (!needs_reset)
8496 return 0;
8497
8498 if (!old_plane_crtc)
8499 return 0;
8500
8501 old_crtc_state = drm_atomic_get_old_crtc_state(
8502 state, old_plane_crtc);
8503 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8504
8505 if (!dm_old_crtc_state->stream)
8506 return 0;
8507
8508 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8509 plane->base.id, old_plane_crtc->base.id);
8510
8511 ret = dm_atomic_get_state(state, &dm_state);
8512 if (ret)
8513 return ret;
8514
8515 if (!dc_remove_plane_from_context(
8516 dc,
8517 dm_old_crtc_state->stream,
8518 dm_old_plane_state->dc_state,
8519 dm_state->context)) {
8520
8521 return -EINVAL;
8522 }
8523
8524
8525 dc_plane_state_release(dm_old_plane_state->dc_state);
8526 dm_new_plane_state->dc_state = NULL;
8527
8528 *lock_and_validation_needed = true;
8529
8530 } else { /* Add new planes */
8531 struct dc_plane_state *dc_new_plane_state;
8532
8533 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8534 return 0;
8535
8536 if (!new_plane_crtc)
8537 return 0;
8538
8539 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8540 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8541
8542 if (!dm_new_crtc_state->stream)
8543 return 0;
8544
8545 if (!needs_reset)
8546 return 0;
8547
8548 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8549 if (ret)
8550 return ret;
8551
8552 WARN_ON(dm_new_plane_state->dc_state);
8553
8554 dc_new_plane_state = dc_create_plane_state(dc);
8555 if (!dc_new_plane_state)
8556 return -ENOMEM;
8557
8558 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8559 plane->base.id, new_plane_crtc->base.id);
8560
8561 ret = fill_dc_plane_attributes(
8562 drm_to_adev(new_plane_crtc->dev),
8563 dc_new_plane_state,
8564 new_plane_state,
8565 new_crtc_state);
8566 if (ret) {
8567 dc_plane_state_release(dc_new_plane_state);
8568 return ret;
8569 }
8570
8571 ret = dm_atomic_get_state(state, &dm_state);
8572 if (ret) {
8573 dc_plane_state_release(dc_new_plane_state);
8574 return ret;
8575 }
8576
8577 /*
8578 * Any atomic check errors that occur after this will
8579 * not need a release. The plane state will be attached
8580 * to the stream, and therefore part of the atomic
8581 * state. It'll be released when the atomic state is
8582 * cleaned.
8583 */
8584 if (!dc_add_plane_to_context(
8585 dc,
8586 dm_new_crtc_state->stream,
8587 dc_new_plane_state,
8588 dm_state->context)) {
8589
8590 dc_plane_state_release(dc_new_plane_state);
8591 return -EINVAL;
8592 }
8593
8594 dm_new_plane_state->dc_state = dc_new_plane_state;
8595
8596 /* Tell DC to do a full surface update every time there
8597 * is a plane change. Inefficient, but works for now.
8598 */
8599 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8600
8601 *lock_and_validation_needed = true;
8602 }
8603
8604
8605 return ret;
8606 }
8607
8608 #if defined(CONFIG_DRM_AMD_DC_DCN)
add_affected_mst_dsc_crtcs(struct drm_atomic_state * state,struct drm_crtc * crtc)8609 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8610 {
8611 struct drm_connector *connector;
8612 struct drm_connector_state *conn_state;
8613 struct amdgpu_dm_connector *aconnector = NULL;
8614 int i;
8615 for_each_new_connector_in_state(state, connector, conn_state, i) {
8616 if (conn_state->crtc != crtc)
8617 continue;
8618
8619 aconnector = to_amdgpu_dm_connector(connector);
8620 if (!aconnector->port || !aconnector->mst_port)
8621 aconnector = NULL;
8622 else
8623 break;
8624 }
8625
8626 if (!aconnector)
8627 return 0;
8628
8629 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8630 }
8631 #endif
8632
validate_overlay(struct drm_atomic_state * state)8633 static int validate_overlay(struct drm_atomic_state *state)
8634 {
8635 int i;
8636 struct drm_plane *plane;
8637 struct drm_plane_state *old_plane_state, *new_plane_state;
8638 struct drm_plane_state *primary_state, *overlay_state = NULL;
8639
8640 /* Check if primary plane is contained inside overlay */
8641 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8642 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
8643 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8644 return 0;
8645
8646 overlay_state = new_plane_state;
8647 continue;
8648 }
8649 }
8650
8651 /* check if we're making changes to the overlay plane */
8652 if (!overlay_state)
8653 return 0;
8654
8655 /* check if overlay plane is enabled */
8656 if (!overlay_state->crtc)
8657 return 0;
8658
8659 /* find the primary plane for the CRTC that the overlay is enabled on */
8660 primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
8661 if (IS_ERR(primary_state))
8662 return PTR_ERR(primary_state);
8663
8664 /* check if primary plane is enabled */
8665 if (!primary_state->crtc)
8666 return 0;
8667
8668 /* Perform the bounds check to ensure the overlay plane covers the primary */
8669 if (primary_state->crtc_x < overlay_state->crtc_x ||
8670 primary_state->crtc_y < overlay_state->crtc_y ||
8671 primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
8672 primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
8673 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
8674 return -EINVAL;
8675 }
8676
8677 return 0;
8678 }
8679
8680 /**
8681 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8682 * @dev: The DRM device
8683 * @state: The atomic state to commit
8684 *
8685 * Validate that the given atomic state is programmable by DC into hardware.
8686 * This involves constructing a &struct dc_state reflecting the new hardware
8687 * state we wish to commit, then querying DC to see if it is programmable. It's
8688 * important not to modify the existing DC state. Otherwise, atomic_check
8689 * may unexpectedly commit hardware changes.
8690 *
8691 * When validating the DC state, it's important that the right locks are
8692 * acquired. For full updates case which removes/adds/updates streams on one
8693 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8694 * that any such full update commit will wait for completion of any outstanding
8695 * flip using DRMs synchronization events.
8696 *
8697 * Note that DM adds the affected connectors for all CRTCs in state, when that
8698 * might not seem necessary. This is because DC stream creation requires the
8699 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8700 * be possible but non-trivial - a possible TODO item.
8701 *
8702 * Return: -Error code if validation failed.
8703 */
amdgpu_dm_atomic_check(struct drm_device * dev,struct drm_atomic_state * state)8704 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8705 struct drm_atomic_state *state)
8706 {
8707 struct amdgpu_device *adev = drm_to_adev(dev);
8708 struct dm_atomic_state *dm_state = NULL;
8709 struct dc *dc = adev->dm.dc;
8710 struct drm_connector *connector;
8711 struct drm_connector_state *old_con_state, *new_con_state;
8712 struct drm_crtc *crtc;
8713 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8714 struct drm_plane *plane;
8715 struct drm_plane_state *old_plane_state, *new_plane_state;
8716 enum dc_status status;
8717 int ret, i;
8718 bool lock_and_validation_needed = false;
8719
8720 amdgpu_check_debugfs_connector_property_change(adev, state);
8721
8722 ret = drm_atomic_helper_check_modeset(dev, state);
8723 if (ret)
8724 goto fail;
8725
8726 /* Check connector changes */
8727 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8728 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8729 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8730
8731 /* Skip connectors that are disabled or part of modeset already. */
8732 if (!old_con_state->crtc && !new_con_state->crtc)
8733 continue;
8734
8735 if (!new_con_state->crtc)
8736 continue;
8737
8738 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8739 if (IS_ERR(new_crtc_state)) {
8740 ret = PTR_ERR(new_crtc_state);
8741 goto fail;
8742 }
8743
8744 if (dm_old_con_state->abm_level !=
8745 dm_new_con_state->abm_level)
8746 new_crtc_state->connectors_changed = true;
8747 }
8748
8749 #if defined(CONFIG_DRM_AMD_DC_DCN)
8750 if (dc_resource_is_dsc_encoding_supported(dc)) {
8751 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8752 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8753 ret = add_affected_mst_dsc_crtcs(state, crtc);
8754 if (ret)
8755 goto fail;
8756 }
8757 }
8758 }
8759 #endif
8760 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8761 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8762 !new_crtc_state->color_mgmt_changed &&
8763 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8764 continue;
8765
8766 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
8767 if (ret)
8768 goto fail;
8769
8770 if (!new_crtc_state->enable)
8771 continue;
8772
8773 ret = drm_atomic_add_affected_connectors(state, crtc);
8774 if (ret)
8775 return ret;
8776
8777 ret = drm_atomic_add_affected_planes(state, crtc);
8778 if (ret)
8779 goto fail;
8780 }
8781
8782 /*
8783 * Add all primary and overlay planes on the CRTC to the state
8784 * whenever a plane is enabled to maintain correct z-ordering
8785 * and to enable fast surface updates.
8786 */
8787 drm_for_each_crtc(crtc, dev) {
8788 bool modified = false;
8789
8790 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8791 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8792 continue;
8793
8794 if (new_plane_state->crtc == crtc ||
8795 old_plane_state->crtc == crtc) {
8796 modified = true;
8797 break;
8798 }
8799 }
8800
8801 if (!modified)
8802 continue;
8803
8804 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8805 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8806 continue;
8807
8808 new_plane_state =
8809 drm_atomic_get_plane_state(state, plane);
8810
8811 if (IS_ERR(new_plane_state)) {
8812 ret = PTR_ERR(new_plane_state);
8813 goto fail;
8814 }
8815 }
8816 }
8817
8818 /* Prepass for updating tiling flags on new planes. */
8819 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
8820 struct dm_plane_state *new_dm_plane_state = to_dm_plane_state(new_plane_state);
8821 struct amdgpu_framebuffer *new_afb = to_amdgpu_framebuffer(new_plane_state->fb);
8822
8823 ret = get_fb_info(new_afb, &new_dm_plane_state->tiling_flags,
8824 &new_dm_plane_state->tmz_surface);
8825 if (ret)
8826 goto fail;
8827 }
8828
8829 /* Remove exiting planes if they are modified */
8830 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8831 ret = dm_update_plane_state(dc, state, plane,
8832 old_plane_state,
8833 new_plane_state,
8834 false,
8835 &lock_and_validation_needed);
8836 if (ret)
8837 goto fail;
8838 }
8839
8840 /* Disable all crtcs which require disable */
8841 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8842 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8843 old_crtc_state,
8844 new_crtc_state,
8845 false,
8846 &lock_and_validation_needed);
8847 if (ret)
8848 goto fail;
8849 }
8850
8851 /* Enable all crtcs which require enable */
8852 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8853 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8854 old_crtc_state,
8855 new_crtc_state,
8856 true,
8857 &lock_and_validation_needed);
8858 if (ret)
8859 goto fail;
8860 }
8861
8862 ret = validate_overlay(state);
8863 if (ret)
8864 goto fail;
8865
8866 /* Add new/modified planes */
8867 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8868 ret = dm_update_plane_state(dc, state, plane,
8869 old_plane_state,
8870 new_plane_state,
8871 true,
8872 &lock_and_validation_needed);
8873 if (ret)
8874 goto fail;
8875 }
8876
8877 /* Run this here since we want to validate the streams we created */
8878 ret = drm_atomic_helper_check_planes(dev, state);
8879 if (ret)
8880 goto fail;
8881
8882 if (state->legacy_cursor_update) {
8883 /*
8884 * This is a fast cursor update coming from the plane update
8885 * helper, check if it can be done asynchronously for better
8886 * performance.
8887 */
8888 state->async_update =
8889 !drm_atomic_helper_async_check(dev, state);
8890
8891 /*
8892 * Skip the remaining global validation if this is an async
8893 * update. Cursor updates can be done without affecting
8894 * state or bandwidth calcs and this avoids the performance
8895 * penalty of locking the private state object and
8896 * allocating a new dc_state.
8897 */
8898 if (state->async_update)
8899 return 0;
8900 }
8901
8902 /* Check scaling and underscan changes*/
8903 /* TODO Removed scaling changes validation due to inability to commit
8904 * new stream into context w\o causing full reset. Need to
8905 * decide how to handle.
8906 */
8907 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8908 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8909 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8910 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8911
8912 /* Skip any modesets/resets */
8913 if (!acrtc || drm_atomic_crtc_needs_modeset(
8914 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8915 continue;
8916
8917 /* Skip any thing not scale or underscan changes */
8918 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8919 continue;
8920
8921 lock_and_validation_needed = true;
8922 }
8923
8924 /**
8925 * Streams and planes are reset when there are changes that affect
8926 * bandwidth. Anything that affects bandwidth needs to go through
8927 * DC global validation to ensure that the configuration can be applied
8928 * to hardware.
8929 *
8930 * We have to currently stall out here in atomic_check for outstanding
8931 * commits to finish in this case because our IRQ handlers reference
8932 * DRM state directly - we can end up disabling interrupts too early
8933 * if we don't.
8934 *
8935 * TODO: Remove this stall and drop DM state private objects.
8936 */
8937 if (lock_and_validation_needed) {
8938 ret = dm_atomic_get_state(state, &dm_state);
8939 if (ret)
8940 goto fail;
8941
8942 ret = do_aquire_global_lock(dev, state);
8943 if (ret)
8944 goto fail;
8945
8946 #if defined(CONFIG_DRM_AMD_DC_DCN)
8947 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8948 goto fail;
8949
8950 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8951 if (ret)
8952 goto fail;
8953 #endif
8954
8955 /*
8956 * Perform validation of MST topology in the state:
8957 * We need to perform MST atomic check before calling
8958 * dc_validate_global_state(), or there is a chance
8959 * to get stuck in an infinite loop and hang eventually.
8960 */
8961 ret = drm_dp_mst_atomic_check(state);
8962 if (ret)
8963 goto fail;
8964 status = dc_validate_global_state(dc, dm_state->context, false);
8965 if (status != DC_OK) {
8966 drm_dbg_atomic(dev,
8967 "DC global validation failure: %s (%d)",
8968 dc_status_to_str(status), status);
8969 ret = -EINVAL;
8970 goto fail;
8971 }
8972 } else {
8973 /*
8974 * The commit is a fast update. Fast updates shouldn't change
8975 * the DC context, affect global validation, and can have their
8976 * commit work done in parallel with other commits not touching
8977 * the same resource. If we have a new DC context as part of
8978 * the DM atomic state from validation we need to free it and
8979 * retain the existing one instead.
8980 *
8981 * Furthermore, since the DM atomic state only contains the DC
8982 * context and can safely be annulled, we can free the state
8983 * and clear the associated private object now to free
8984 * some memory and avoid a possible use-after-free later.
8985 */
8986
8987 for (i = 0; i < state->num_private_objs; i++) {
8988 struct drm_private_obj *obj = state->private_objs[i].ptr;
8989
8990 if (obj->funcs == adev->dm.atomic_obj.funcs) {
8991 int j = state->num_private_objs-1;
8992
8993 dm_atomic_destroy_state(obj,
8994 state->private_objs[i].state);
8995
8996 /* If i is not at the end of the array then the
8997 * last element needs to be moved to where i was
8998 * before the array can safely be truncated.
8999 */
9000 if (i != j)
9001 state->private_objs[i] =
9002 state->private_objs[j];
9003
9004 state->private_objs[j].ptr = NULL;
9005 state->private_objs[j].state = NULL;
9006 state->private_objs[j].old_state = NULL;
9007 state->private_objs[j].new_state = NULL;
9008
9009 state->num_private_objs = j;
9010 break;
9011 }
9012 }
9013 }
9014
9015 /* Store the overall update type for use later in atomic check. */
9016 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9017 struct dm_crtc_state *dm_new_crtc_state =
9018 to_dm_crtc_state(new_crtc_state);
9019
9020 dm_new_crtc_state->update_type = lock_and_validation_needed ?
9021 UPDATE_TYPE_FULL :
9022 UPDATE_TYPE_FAST;
9023 }
9024
9025 /* Must be success */
9026 WARN_ON(ret);
9027 return ret;
9028
9029 fail:
9030 if (ret == -EDEADLK)
9031 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9032 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9033 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9034 else
9035 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9036
9037 return ret;
9038 }
9039
is_dp_capable_without_timing_msa(struct dc * dc,struct amdgpu_dm_connector * amdgpu_dm_connector)9040 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9041 struct amdgpu_dm_connector *amdgpu_dm_connector)
9042 {
9043 uint8_t dpcd_data;
9044 bool capable = false;
9045
9046 if (amdgpu_dm_connector->dc_link &&
9047 dm_helpers_dp_read_dpcd(
9048 NULL,
9049 amdgpu_dm_connector->dc_link,
9050 DP_DOWN_STREAM_PORT_COUNT,
9051 &dpcd_data,
9052 sizeof(dpcd_data))) {
9053 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9054 }
9055
9056 return capable;
9057 }
amdgpu_dm_update_freesync_caps(struct drm_connector * connector,struct edid * edid)9058 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9059 struct edid *edid)
9060 {
9061 int i;
9062 bool edid_check_required;
9063 struct detailed_timing *timing;
9064 struct detailed_non_pixel *data;
9065 struct detailed_data_monitor_range *range;
9066 struct amdgpu_dm_connector *amdgpu_dm_connector =
9067 to_amdgpu_dm_connector(connector);
9068 struct dm_connector_state *dm_con_state = NULL;
9069
9070 struct drm_device *dev = connector->dev;
9071 struct amdgpu_device *adev = drm_to_adev(dev);
9072 bool freesync_capable = false;
9073
9074 if (!connector->state) {
9075 DRM_ERROR("%s - Connector has no state", __func__);
9076 goto update;
9077 }
9078
9079 if (!edid) {
9080 dm_con_state = to_dm_connector_state(connector->state);
9081
9082 amdgpu_dm_connector->min_vfreq = 0;
9083 amdgpu_dm_connector->max_vfreq = 0;
9084 amdgpu_dm_connector->pixel_clock_mhz = 0;
9085
9086 goto update;
9087 }
9088
9089 dm_con_state = to_dm_connector_state(connector->state);
9090
9091 edid_check_required = false;
9092 if (!amdgpu_dm_connector->dc_sink) {
9093 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9094 goto update;
9095 }
9096 if (!adev->dm.freesync_module)
9097 goto update;
9098 /*
9099 * if edid non zero restrict freesync only for dp and edp
9100 */
9101 if (edid) {
9102 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9103 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9104 edid_check_required = is_dp_capable_without_timing_msa(
9105 adev->dm.dc,
9106 amdgpu_dm_connector);
9107 }
9108 }
9109 if (edid_check_required == true && (edid->version > 1 ||
9110 (edid->version == 1 && edid->revision > 1))) {
9111 for (i = 0; i < 4; i++) {
9112
9113 timing = &edid->detailed_timings[i];
9114 data = &timing->data.other_data;
9115 range = &data->data.range;
9116 /*
9117 * Check if monitor has continuous frequency mode
9118 */
9119 if (data->type != EDID_DETAIL_MONITOR_RANGE)
9120 continue;
9121 /*
9122 * Check for flag range limits only. If flag == 1 then
9123 * no additional timing information provided.
9124 * Default GTF, GTF Secondary curve and CVT are not
9125 * supported
9126 */
9127 if (range->flags != 1)
9128 continue;
9129
9130 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9131 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9132 amdgpu_dm_connector->pixel_clock_mhz =
9133 range->pixel_clock_mhz * 10;
9134 break;
9135 }
9136
9137 if (amdgpu_dm_connector->max_vfreq -
9138 amdgpu_dm_connector->min_vfreq > 10) {
9139
9140 freesync_capable = true;
9141 }
9142 }
9143
9144 update:
9145 if (dm_con_state)
9146 dm_con_state->freesync_capable = freesync_capable;
9147
9148 if (connector->vrr_capable_property)
9149 drm_connector_set_vrr_capable_property(connector,
9150 freesync_capable);
9151 }
9152
amdgpu_dm_set_psr_caps(struct dc_link * link)9153 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9154 {
9155 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9156
9157 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9158 return;
9159 if (link->type == dc_connection_none)
9160 return;
9161 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9162 dpcd_data, sizeof(dpcd_data))) {
9163 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9164
9165 if (dpcd_data[0] == 0) {
9166 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9167 link->psr_settings.psr_feature_enabled = false;
9168 } else {
9169 link->psr_settings.psr_version = DC_PSR_VERSION_1;
9170 link->psr_settings.psr_feature_enabled = true;
9171 }
9172
9173 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9174 }
9175 }
9176
9177 /*
9178 * amdgpu_dm_link_setup_psr() - configure psr link
9179 * @stream: stream state
9180 *
9181 * Return: true if success
9182 */
amdgpu_dm_link_setup_psr(struct dc_stream_state * stream)9183 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9184 {
9185 struct dc_link *link = NULL;
9186 struct psr_config psr_config = {0};
9187 struct psr_context psr_context = {0};
9188 bool ret = false;
9189
9190 if (stream == NULL)
9191 return false;
9192
9193 link = stream->link;
9194
9195 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9196
9197 if (psr_config.psr_version > 0) {
9198 psr_config.psr_exit_link_training_required = 0x1;
9199 psr_config.psr_frame_capture_indication_req = 0;
9200 psr_config.psr_rfb_setup_time = 0x37;
9201 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9202 psr_config.allow_smu_optimizations = 0x0;
9203
9204 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9205
9206 }
9207 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
9208
9209 return ret;
9210 }
9211
9212 /*
9213 * amdgpu_dm_psr_enable() - enable psr f/w
9214 * @stream: stream state
9215 *
9216 * Return: true if success
9217 */
amdgpu_dm_psr_enable(struct dc_stream_state * stream)9218 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9219 {
9220 struct dc_link *link = stream->link;
9221 unsigned int vsync_rate_hz = 0;
9222 struct dc_static_screen_params params = {0};
9223 /* Calculate number of static frames before generating interrupt to
9224 * enter PSR.
9225 */
9226 // Init fail safe of 2 frames static
9227 unsigned int num_frames_static = 2;
9228
9229 DRM_DEBUG_DRIVER("Enabling psr...\n");
9230
9231 vsync_rate_hz = div64_u64(div64_u64((
9232 stream->timing.pix_clk_100hz * 100),
9233 stream->timing.v_total),
9234 stream->timing.h_total);
9235
9236 /* Round up
9237 * Calculate number of frames such that at least 30 ms of time has
9238 * passed.
9239 */
9240 if (vsync_rate_hz != 0) {
9241 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9242 num_frames_static = (30000 / frame_time_microsec) + 1;
9243 }
9244
9245 params.triggers.cursor_update = true;
9246 params.triggers.overlay_update = true;
9247 params.triggers.surface_update = true;
9248 params.num_frames = num_frames_static;
9249
9250 dc_stream_set_static_screen_params(link->ctx->dc,
9251 &stream, 1,
9252 ¶ms);
9253
9254 return dc_link_set_psr_allow_active(link, true, false);
9255 }
9256
9257 /*
9258 * amdgpu_dm_psr_disable() - disable psr f/w
9259 * @stream: stream state
9260 *
9261 * Return: true if success
9262 */
amdgpu_dm_psr_disable(struct dc_stream_state * stream)9263 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9264 {
9265
9266 DRM_DEBUG_DRIVER("Disabling psr...\n");
9267
9268 return dc_link_set_psr_allow_active(stream->link, false, true);
9269 }
9270
9271 /*
9272 * amdgpu_dm_psr_disable() - disable psr f/w
9273 * if psr is enabled on any stream
9274 *
9275 * Return: true if success
9276 */
amdgpu_dm_psr_disable_all(struct amdgpu_display_manager * dm)9277 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9278 {
9279 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9280 return dc_set_psr_allow_active(dm->dc, false);
9281 }
9282
amdgpu_dm_trigger_timing_sync(struct drm_device * dev)9283 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9284 {
9285 struct amdgpu_device *adev = drm_to_adev(dev);
9286 struct dc *dc = adev->dm.dc;
9287 int i;
9288
9289 mutex_lock(&adev->dm.dc_lock);
9290 if (dc->current_state) {
9291 for (i = 0; i < dc->current_state->stream_count; ++i)
9292 dc->current_state->streams[i]
9293 ->triggered_crtc_reset.enabled =
9294 adev->dm.force_timing_sync;
9295
9296 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9297 dc_trigger_sync(dc, dc->current_state);
9298 }
9299 mutex_unlock(&adev->dm.dc_lock);
9300 }
9301