1 /*
2 * Copyright 2007-8 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie
24 * Alex Deucher
25 */
26
27 #include <drm/amdgpu_drm.h>
28 #include "amdgpu.h"
29 #include "amdgpu_i2c.h"
30 #include "atom.h"
31 #include "amdgpu_connectors.h"
32 #include "amdgpu_display.h"
33 #include <asm/div64.h>
34
35 #include <linux/pci.h>
36 #include <linux/pm_runtime.h>
37 #include <drm/drm_crtc_helper.h>
38 #include <drm/drm_edid.h>
39 #include <drm/drm_gem_framebuffer_helper.h>
40 #include <drm/drm_fb_helper.h>
41 #include <drm/drm_vblank.h>
42
amdgpu_display_flip_callback(struct dma_fence * f,struct dma_fence_cb * cb)43 static void amdgpu_display_flip_callback(struct dma_fence *f,
44 struct dma_fence_cb *cb)
45 {
46 struct amdgpu_flip_work *work =
47 container_of(cb, struct amdgpu_flip_work, cb);
48
49 dma_fence_put(f);
50 schedule_work(&work->flip_work.work);
51 }
52
amdgpu_display_flip_handle_fence(struct amdgpu_flip_work * work,struct dma_fence ** f)53 static bool amdgpu_display_flip_handle_fence(struct amdgpu_flip_work *work,
54 struct dma_fence **f)
55 {
56 struct dma_fence *fence= *f;
57
58 if (fence == NULL)
59 return false;
60
61 *f = NULL;
62
63 if (!dma_fence_add_callback(fence, &work->cb,
64 amdgpu_display_flip_callback))
65 return true;
66
67 dma_fence_put(fence);
68 return false;
69 }
70
amdgpu_display_flip_work_func(struct work_struct * __work)71 static void amdgpu_display_flip_work_func(struct work_struct *__work)
72 {
73 struct delayed_work *delayed_work =
74 container_of(__work, struct delayed_work, work);
75 struct amdgpu_flip_work *work =
76 container_of(delayed_work, struct amdgpu_flip_work, flip_work);
77 struct amdgpu_device *adev = work->adev;
78 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id];
79
80 struct drm_crtc *crtc = &amdgpu_crtc->base;
81 unsigned long flags;
82 unsigned i;
83 int vpos, hpos;
84
85 if (amdgpu_display_flip_handle_fence(work, &work->excl))
86 return;
87
88 for (i = 0; i < work->shared_count; ++i)
89 if (amdgpu_display_flip_handle_fence(work, &work->shared[i]))
90 return;
91
92 /* Wait until we're out of the vertical blank period before the one
93 * targeted by the flip
94 */
95 if (amdgpu_crtc->enabled &&
96 (amdgpu_display_get_crtc_scanoutpos(adev_to_drm(adev), work->crtc_id, 0,
97 &vpos, &hpos, NULL, NULL,
98 &crtc->hwmode)
99 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
100 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
101 (int)(work->target_vblank -
102 amdgpu_get_vblank_counter_kms(crtc)) > 0) {
103 schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
104 return;
105 }
106
107 /* We borrow the event spin lock for protecting flip_status */
108 spin_lock_irqsave(&crtc->dev->event_lock, flags);
109
110 /* Do the flip (mmio) */
111 adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
112
113 /* Set the flip status */
114 amdgpu_crtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
115 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
116
117
118 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
119 amdgpu_crtc->crtc_id, amdgpu_crtc, work);
120
121 }
122
123 /*
124 * Handle unpin events outside the interrupt handler proper.
125 */
amdgpu_display_unpin_work_func(struct work_struct * __work)126 static void amdgpu_display_unpin_work_func(struct work_struct *__work)
127 {
128 struct amdgpu_flip_work *work =
129 container_of(__work, struct amdgpu_flip_work, unpin_work);
130 int r;
131
132 /* unpin of the old buffer */
133 r = amdgpu_bo_reserve(work->old_abo, true);
134 if (likely(r == 0)) {
135 r = amdgpu_bo_unpin(work->old_abo);
136 if (unlikely(r != 0)) {
137 DRM_ERROR("failed to unpin buffer after flip\n");
138 }
139 amdgpu_bo_unreserve(work->old_abo);
140 } else
141 DRM_ERROR("failed to reserve buffer after flip\n");
142
143 amdgpu_bo_unref(&work->old_abo);
144 kfree(work->shared);
145 kfree(work);
146 }
147
amdgpu_display_crtc_page_flip_target(struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_pending_vblank_event * event,uint32_t page_flip_flags,uint32_t target,struct drm_modeset_acquire_ctx * ctx)148 int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
149 struct drm_framebuffer *fb,
150 struct drm_pending_vblank_event *event,
151 uint32_t page_flip_flags, uint32_t target,
152 struct drm_modeset_acquire_ctx *ctx)
153 {
154 struct drm_device *dev = crtc->dev;
155 struct amdgpu_device *adev = drm_to_adev(dev);
156 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
157 struct drm_gem_object *obj;
158 struct amdgpu_flip_work *work;
159 struct amdgpu_bo *new_abo;
160 unsigned long flags;
161 u64 tiling_flags;
162 int i, r;
163
164 work = kzalloc(sizeof *work, GFP_KERNEL);
165 if (work == NULL)
166 return -ENOMEM;
167
168 INIT_DELAYED_WORK(&work->flip_work, amdgpu_display_flip_work_func);
169 INIT_WORK(&work->unpin_work, amdgpu_display_unpin_work_func);
170
171 work->event = event;
172 work->adev = adev;
173 work->crtc_id = amdgpu_crtc->crtc_id;
174 work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
175
176 /* schedule unpin of the old buffer */
177 obj = crtc->primary->fb->obj[0];
178
179 /* take a reference to the old object */
180 work->old_abo = gem_to_amdgpu_bo(obj);
181 amdgpu_bo_ref(work->old_abo);
182
183 obj = fb->obj[0];
184 new_abo = gem_to_amdgpu_bo(obj);
185
186 /* pin the new buffer */
187 r = amdgpu_bo_reserve(new_abo, false);
188 if (unlikely(r != 0)) {
189 DRM_ERROR("failed to reserve new abo buffer before flip\n");
190 goto cleanup;
191 }
192
193 if (!adev->enable_virtual_display) {
194 r = amdgpu_bo_pin(new_abo,
195 amdgpu_display_supported_domains(adev, new_abo->flags));
196 if (unlikely(r != 0)) {
197 DRM_ERROR("failed to pin new abo buffer before flip\n");
198 goto unreserve;
199 }
200 }
201
202 r = amdgpu_ttm_alloc_gart(&new_abo->tbo);
203 if (unlikely(r != 0)) {
204 DRM_ERROR("%p bind failed\n", new_abo);
205 goto unpin;
206 }
207
208 r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl,
209 &work->shared_count,
210 &work->shared);
211 if (unlikely(r != 0)) {
212 DRM_ERROR("failed to get fences for buffer\n");
213 goto unpin;
214 }
215
216 amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
217 amdgpu_bo_unreserve(new_abo);
218
219 if (!adev->enable_virtual_display)
220 work->base = amdgpu_bo_gpu_offset(new_abo);
221 work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
222 amdgpu_get_vblank_counter_kms(crtc);
223
224 /* we borrow the event spin lock for protecting flip_wrok */
225 spin_lock_irqsave(&crtc->dev->event_lock, flags);
226 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_NONE) {
227 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
228 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
229 r = -EBUSY;
230 goto pflip_cleanup;
231 }
232
233 amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
234 amdgpu_crtc->pflip_works = work;
235
236
237 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
238 amdgpu_crtc->crtc_id, amdgpu_crtc, work);
239 /* update crtc fb */
240 crtc->primary->fb = fb;
241 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
242 amdgpu_display_flip_work_func(&work->flip_work.work);
243 return 0;
244
245 pflip_cleanup:
246 if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
247 DRM_ERROR("failed to reserve new abo in error path\n");
248 goto cleanup;
249 }
250 unpin:
251 if (!adev->enable_virtual_display)
252 if (unlikely(amdgpu_bo_unpin(new_abo) != 0))
253 DRM_ERROR("failed to unpin new abo in error path\n");
254
255 unreserve:
256 amdgpu_bo_unreserve(new_abo);
257
258 cleanup:
259 amdgpu_bo_unref(&work->old_abo);
260 dma_fence_put(work->excl);
261 for (i = 0; i < work->shared_count; ++i)
262 dma_fence_put(work->shared[i]);
263 kfree(work->shared);
264 kfree(work);
265
266 return r;
267 }
268
amdgpu_display_crtc_set_config(struct drm_mode_set * set,struct drm_modeset_acquire_ctx * ctx)269 int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
270 struct drm_modeset_acquire_ctx *ctx)
271 {
272 struct drm_device *dev;
273 struct amdgpu_device *adev;
274 struct drm_crtc *crtc;
275 bool active = false;
276 int ret;
277
278 if (!set || !set->crtc)
279 return -EINVAL;
280
281 dev = set->crtc->dev;
282
283 ret = pm_runtime_get_sync(dev->dev);
284 if (ret < 0)
285 goto out;
286
287 ret = drm_crtc_helper_set_config(set, ctx);
288
289 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
290 if (crtc->enabled)
291 active = true;
292
293 pm_runtime_mark_last_busy(dev->dev);
294
295 adev = drm_to_adev(dev);
296 /* if we have active crtcs and we don't have a power ref,
297 take the current one */
298 if (active && !adev->have_disp_power_ref) {
299 adev->have_disp_power_ref = true;
300 return ret;
301 }
302 /* if we have no active crtcs, then drop the power ref
303 we got before */
304 if (!active && adev->have_disp_power_ref) {
305 pm_runtime_put_autosuspend(dev->dev);
306 adev->have_disp_power_ref = false;
307 }
308
309 out:
310 /* drop the power reference we got coming in here */
311 pm_runtime_put_autosuspend(dev->dev);
312 return ret;
313 }
314
315 static const char *encoder_names[41] = {
316 "NONE",
317 "INTERNAL_LVDS",
318 "INTERNAL_TMDS1",
319 "INTERNAL_TMDS2",
320 "INTERNAL_DAC1",
321 "INTERNAL_DAC2",
322 "INTERNAL_SDVOA",
323 "INTERNAL_SDVOB",
324 "SI170B",
325 "CH7303",
326 "CH7301",
327 "INTERNAL_DVO1",
328 "EXTERNAL_SDVOA",
329 "EXTERNAL_SDVOB",
330 "TITFP513",
331 "INTERNAL_LVTM1",
332 "VT1623",
333 "HDMI_SI1930",
334 "HDMI_INTERNAL",
335 "INTERNAL_KLDSCP_TMDS1",
336 "INTERNAL_KLDSCP_DVO1",
337 "INTERNAL_KLDSCP_DAC1",
338 "INTERNAL_KLDSCP_DAC2",
339 "SI178",
340 "MVPU_FPGA",
341 "INTERNAL_DDI",
342 "VT1625",
343 "HDMI_SI1932",
344 "DP_AN9801",
345 "DP_DP501",
346 "INTERNAL_UNIPHY",
347 "INTERNAL_KLDSCP_LVTMA",
348 "INTERNAL_UNIPHY1",
349 "INTERNAL_UNIPHY2",
350 "NUTMEG",
351 "TRAVIS",
352 "INTERNAL_VCE",
353 "INTERNAL_UNIPHY3",
354 "HDMI_ANX9805",
355 "INTERNAL_AMCLK",
356 "VIRTUAL",
357 };
358
359 static const char *hpd_names[6] = {
360 "HPD1",
361 "HPD2",
362 "HPD3",
363 "HPD4",
364 "HPD5",
365 "HPD6",
366 };
367
amdgpu_display_print_display_setup(struct drm_device * dev)368 void amdgpu_display_print_display_setup(struct drm_device *dev)
369 {
370 struct drm_connector *connector;
371 struct amdgpu_connector *amdgpu_connector;
372 struct drm_encoder *encoder;
373 struct amdgpu_encoder *amdgpu_encoder;
374 struct drm_connector_list_iter iter;
375 uint32_t devices;
376 int i = 0;
377
378 drm_connector_list_iter_begin(dev, &iter);
379 DRM_INFO("AMDGPU Display Connectors\n");
380 drm_for_each_connector_iter(connector, &iter) {
381 amdgpu_connector = to_amdgpu_connector(connector);
382 DRM_INFO("Connector %d:\n", i);
383 DRM_INFO(" %s\n", connector->name);
384 if (amdgpu_connector->hpd.hpd != AMDGPU_HPD_NONE)
385 DRM_INFO(" %s\n", hpd_names[amdgpu_connector->hpd.hpd]);
386 if (amdgpu_connector->ddc_bus) {
387 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
388 amdgpu_connector->ddc_bus->rec.mask_clk_reg,
389 amdgpu_connector->ddc_bus->rec.mask_data_reg,
390 amdgpu_connector->ddc_bus->rec.a_clk_reg,
391 amdgpu_connector->ddc_bus->rec.a_data_reg,
392 amdgpu_connector->ddc_bus->rec.en_clk_reg,
393 amdgpu_connector->ddc_bus->rec.en_data_reg,
394 amdgpu_connector->ddc_bus->rec.y_clk_reg,
395 amdgpu_connector->ddc_bus->rec.y_data_reg);
396 if (amdgpu_connector->router.ddc_valid)
397 DRM_INFO(" DDC Router 0x%x/0x%x\n",
398 amdgpu_connector->router.ddc_mux_control_pin,
399 amdgpu_connector->router.ddc_mux_state);
400 if (amdgpu_connector->router.cd_valid)
401 DRM_INFO(" Clock/Data Router 0x%x/0x%x\n",
402 amdgpu_connector->router.cd_mux_control_pin,
403 amdgpu_connector->router.cd_mux_state);
404 } else {
405 if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
406 connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
407 connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
408 connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
409 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
410 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
411 DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
412 }
413 DRM_INFO(" Encoders:\n");
414 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
415 amdgpu_encoder = to_amdgpu_encoder(encoder);
416 devices = amdgpu_encoder->devices & amdgpu_connector->devices;
417 if (devices) {
418 if (devices & ATOM_DEVICE_CRT1_SUPPORT)
419 DRM_INFO(" CRT1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
420 if (devices & ATOM_DEVICE_CRT2_SUPPORT)
421 DRM_INFO(" CRT2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
422 if (devices & ATOM_DEVICE_LCD1_SUPPORT)
423 DRM_INFO(" LCD1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
424 if (devices & ATOM_DEVICE_DFP1_SUPPORT)
425 DRM_INFO(" DFP1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
426 if (devices & ATOM_DEVICE_DFP2_SUPPORT)
427 DRM_INFO(" DFP2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
428 if (devices & ATOM_DEVICE_DFP3_SUPPORT)
429 DRM_INFO(" DFP3: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
430 if (devices & ATOM_DEVICE_DFP4_SUPPORT)
431 DRM_INFO(" DFP4: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
432 if (devices & ATOM_DEVICE_DFP5_SUPPORT)
433 DRM_INFO(" DFP5: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
434 if (devices & ATOM_DEVICE_DFP6_SUPPORT)
435 DRM_INFO(" DFP6: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
436 if (devices & ATOM_DEVICE_TV1_SUPPORT)
437 DRM_INFO(" TV1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
438 if (devices & ATOM_DEVICE_CV_SUPPORT)
439 DRM_INFO(" CV: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
440 }
441 }
442 i++;
443 }
444 drm_connector_list_iter_end(&iter);
445 }
446
447 /**
448 * amdgpu_display_ddc_probe
449 *
450 */
amdgpu_display_ddc_probe(struct amdgpu_connector * amdgpu_connector,bool use_aux)451 bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
452 bool use_aux)
453 {
454 u8 out = 0x0;
455 u8 buf[8];
456 int ret;
457 struct i2c_msg msgs[] = {
458 {
459 .addr = DDC_ADDR,
460 .flags = 0,
461 .len = 1,
462 .buf = &out,
463 },
464 {
465 .addr = DDC_ADDR,
466 .flags = I2C_M_RD,
467 .len = 8,
468 .buf = buf,
469 }
470 };
471
472 /* on hw with routers, select right port */
473 if (amdgpu_connector->router.ddc_valid)
474 amdgpu_i2c_router_select_ddc_port(amdgpu_connector);
475
476 if (use_aux) {
477 ret = i2c_transfer(&amdgpu_connector->ddc_bus->aux.ddc, msgs, 2);
478 } else {
479 ret = i2c_transfer(&amdgpu_connector->ddc_bus->adapter, msgs, 2);
480 }
481
482 if (ret != 2)
483 /* Couldn't find an accessible DDC on this connector */
484 return false;
485 /* Probe also for valid EDID header
486 * EDID header starts with:
487 * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
488 * Only the first 6 bytes must be valid as
489 * drm_edid_block_valid() can fix the last 2 bytes */
490 if (drm_edid_header_is_valid(buf) < 6) {
491 /* Couldn't find an accessible EDID on this
492 * connector */
493 return false;
494 }
495 return true;
496 }
497
498 static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
499 .destroy = drm_gem_fb_destroy,
500 .create_handle = drm_gem_fb_create_handle,
501 };
502
amdgpu_display_supported_domains(struct amdgpu_device * adev,uint64_t bo_flags)503 uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
504 uint64_t bo_flags)
505 {
506 uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
507
508 #if defined(CONFIG_DRM_AMD_DC)
509 /*
510 * if amdgpu_bo_support_uswc returns false it means that USWC mappings
511 * is not supported for this board. But this mapping is required
512 * to avoid hang caused by placement of scanout BO in GTT on certain
513 * APUs. So force the BO placement to VRAM in case this architecture
514 * will not allow USWC mappings.
515 * Also, don't allow GTT domain if the BO doens't have USWC falg set.
516 */
517 if ((bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
518 amdgpu_bo_support_uswc(bo_flags) &&
519 amdgpu_device_asic_has_dc_support(adev->asic_type)) {
520 switch (adev->asic_type) {
521 case CHIP_CARRIZO:
522 case CHIP_STONEY:
523 domain |= AMDGPU_GEM_DOMAIN_GTT;
524 break;
525 case CHIP_RAVEN:
526 /* enable S/G on PCO and RV2 */
527 if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
528 (adev->apu_flags & AMD_APU_IS_PICASSO))
529 domain |= AMDGPU_GEM_DOMAIN_GTT;
530 break;
531 default:
532 break;
533 }
534 }
535 #endif
536
537 return domain;
538 }
539
amdgpu_display_framebuffer_init(struct drm_device * dev,struct amdgpu_framebuffer * rfb,const struct drm_mode_fb_cmd2 * mode_cmd,struct drm_gem_object * obj)540 int amdgpu_display_framebuffer_init(struct drm_device *dev,
541 struct amdgpu_framebuffer *rfb,
542 const struct drm_mode_fb_cmd2 *mode_cmd,
543 struct drm_gem_object *obj)
544 {
545 int ret;
546 rfb->base.obj[0] = obj;
547 drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
548 ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
549 if (ret) {
550 rfb->base.obj[0] = NULL;
551 return ret;
552 }
553 return 0;
554 }
555
556 struct drm_framebuffer *
amdgpu_display_user_framebuffer_create(struct drm_device * dev,struct drm_file * file_priv,const struct drm_mode_fb_cmd2 * mode_cmd)557 amdgpu_display_user_framebuffer_create(struct drm_device *dev,
558 struct drm_file *file_priv,
559 const struct drm_mode_fb_cmd2 *mode_cmd)
560 {
561 struct drm_gem_object *obj;
562 struct amdgpu_framebuffer *amdgpu_fb;
563 int ret;
564
565 obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
566 if (obj == NULL) {
567 dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
568 "can't create framebuffer\n", mode_cmd->handles[0]);
569 return ERR_PTR(-ENOENT);
570 }
571
572 /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
573 if (obj->import_attach) {
574 DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
575 return ERR_PTR(-EINVAL);
576 }
577
578 amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
579 if (amdgpu_fb == NULL) {
580 drm_gem_object_put(obj);
581 return ERR_PTR(-ENOMEM);
582 }
583
584 ret = amdgpu_display_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj);
585 if (ret) {
586 kfree(amdgpu_fb);
587 drm_gem_object_put(obj);
588 return ERR_PTR(ret);
589 }
590
591 return &amdgpu_fb->base;
592 }
593
594 const struct drm_mode_config_funcs amdgpu_mode_funcs = {
595 .fb_create = amdgpu_display_user_framebuffer_create,
596 .output_poll_changed = drm_fb_helper_output_poll_changed,
597 };
598
599 static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
600 { { UNDERSCAN_OFF, "off" },
601 { UNDERSCAN_ON, "on" },
602 { UNDERSCAN_AUTO, "auto" },
603 };
604
605 static const struct drm_prop_enum_list amdgpu_audio_enum_list[] =
606 { { AMDGPU_AUDIO_DISABLE, "off" },
607 { AMDGPU_AUDIO_ENABLE, "on" },
608 { AMDGPU_AUDIO_AUTO, "auto" },
609 };
610
611 /* XXX support different dither options? spatial, temporal, both, etc. */
612 static const struct drm_prop_enum_list amdgpu_dither_enum_list[] =
613 { { AMDGPU_FMT_DITHER_DISABLE, "off" },
614 { AMDGPU_FMT_DITHER_ENABLE, "on" },
615 };
616
amdgpu_display_modeset_create_props(struct amdgpu_device * adev)617 int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
618 {
619 int sz;
620
621 adev->mode_info.coherent_mode_property =
622 drm_property_create_range(adev_to_drm(adev), 0, "coherent", 0, 1);
623 if (!adev->mode_info.coherent_mode_property)
624 return -ENOMEM;
625
626 adev->mode_info.load_detect_property =
627 drm_property_create_range(adev_to_drm(adev), 0, "load detection", 0, 1);
628 if (!adev->mode_info.load_detect_property)
629 return -ENOMEM;
630
631 drm_mode_create_scaling_mode_property(adev_to_drm(adev));
632
633 sz = ARRAY_SIZE(amdgpu_underscan_enum_list);
634 adev->mode_info.underscan_property =
635 drm_property_create_enum(adev_to_drm(adev), 0,
636 "underscan",
637 amdgpu_underscan_enum_list, sz);
638
639 adev->mode_info.underscan_hborder_property =
640 drm_property_create_range(adev_to_drm(adev), 0,
641 "underscan hborder", 0, 128);
642 if (!adev->mode_info.underscan_hborder_property)
643 return -ENOMEM;
644
645 adev->mode_info.underscan_vborder_property =
646 drm_property_create_range(adev_to_drm(adev), 0,
647 "underscan vborder", 0, 128);
648 if (!adev->mode_info.underscan_vborder_property)
649 return -ENOMEM;
650
651 sz = ARRAY_SIZE(amdgpu_audio_enum_list);
652 adev->mode_info.audio_property =
653 drm_property_create_enum(adev_to_drm(adev), 0,
654 "audio",
655 amdgpu_audio_enum_list, sz);
656
657 sz = ARRAY_SIZE(amdgpu_dither_enum_list);
658 adev->mode_info.dither_property =
659 drm_property_create_enum(adev_to_drm(adev), 0,
660 "dither",
661 amdgpu_dither_enum_list, sz);
662
663 if (amdgpu_device_has_dc_support(adev)) {
664 adev->mode_info.abm_level_property =
665 drm_property_create_range(adev_to_drm(adev), 0,
666 "abm level", 0, 4);
667 if (!adev->mode_info.abm_level_property)
668 return -ENOMEM;
669 }
670
671 return 0;
672 }
673
amdgpu_display_update_priority(struct amdgpu_device * adev)674 void amdgpu_display_update_priority(struct amdgpu_device *adev)
675 {
676 /* adjustment options for the display watermarks */
677 if ((amdgpu_disp_priority == 0) || (amdgpu_disp_priority > 2))
678 adev->mode_info.disp_priority = 0;
679 else
680 adev->mode_info.disp_priority = amdgpu_disp_priority;
681
682 }
683
amdgpu_display_is_hdtv_mode(const struct drm_display_mode * mode)684 static bool amdgpu_display_is_hdtv_mode(const struct drm_display_mode *mode)
685 {
686 /* try and guess if this is a tv or a monitor */
687 if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
688 (mode->vdisplay == 576) || /* 576p */
689 (mode->vdisplay == 720) || /* 720p */
690 (mode->vdisplay == 1080)) /* 1080p */
691 return true;
692 else
693 return false;
694 }
695
amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc * crtc,const struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)696 bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
697 const struct drm_display_mode *mode,
698 struct drm_display_mode *adjusted_mode)
699 {
700 struct drm_device *dev = crtc->dev;
701 struct drm_encoder *encoder;
702 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
703 struct amdgpu_encoder *amdgpu_encoder;
704 struct drm_connector *connector;
705 u32 src_v = 1, dst_v = 1;
706 u32 src_h = 1, dst_h = 1;
707
708 amdgpu_crtc->h_border = 0;
709 amdgpu_crtc->v_border = 0;
710
711 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
712 if (encoder->crtc != crtc)
713 continue;
714 amdgpu_encoder = to_amdgpu_encoder(encoder);
715 connector = amdgpu_get_connector_for_encoder(encoder);
716
717 /* set scaling */
718 if (amdgpu_encoder->rmx_type == RMX_OFF)
719 amdgpu_crtc->rmx_type = RMX_OFF;
720 else if (mode->hdisplay < amdgpu_encoder->native_mode.hdisplay ||
721 mode->vdisplay < amdgpu_encoder->native_mode.vdisplay)
722 amdgpu_crtc->rmx_type = amdgpu_encoder->rmx_type;
723 else
724 amdgpu_crtc->rmx_type = RMX_OFF;
725 /* copy native mode */
726 memcpy(&amdgpu_crtc->native_mode,
727 &amdgpu_encoder->native_mode,
728 sizeof(struct drm_display_mode));
729 src_v = crtc->mode.vdisplay;
730 dst_v = amdgpu_crtc->native_mode.vdisplay;
731 src_h = crtc->mode.hdisplay;
732 dst_h = amdgpu_crtc->native_mode.hdisplay;
733
734 /* fix up for overscan on hdmi */
735 if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
736 ((amdgpu_encoder->underscan_type == UNDERSCAN_ON) ||
737 ((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) &&
738 drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) &&
739 amdgpu_display_is_hdtv_mode(mode)))) {
740 if (amdgpu_encoder->underscan_hborder != 0)
741 amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder;
742 else
743 amdgpu_crtc->h_border = (mode->hdisplay >> 5) + 16;
744 if (amdgpu_encoder->underscan_vborder != 0)
745 amdgpu_crtc->v_border = amdgpu_encoder->underscan_vborder;
746 else
747 amdgpu_crtc->v_border = (mode->vdisplay >> 5) + 16;
748 amdgpu_crtc->rmx_type = RMX_FULL;
749 src_v = crtc->mode.vdisplay;
750 dst_v = crtc->mode.vdisplay - (amdgpu_crtc->v_border * 2);
751 src_h = crtc->mode.hdisplay;
752 dst_h = crtc->mode.hdisplay - (amdgpu_crtc->h_border * 2);
753 }
754 }
755 if (amdgpu_crtc->rmx_type != RMX_OFF) {
756 fixed20_12 a, b;
757 a.full = dfixed_const(src_v);
758 b.full = dfixed_const(dst_v);
759 amdgpu_crtc->vsc.full = dfixed_div(a, b);
760 a.full = dfixed_const(src_h);
761 b.full = dfixed_const(dst_h);
762 amdgpu_crtc->hsc.full = dfixed_div(a, b);
763 } else {
764 amdgpu_crtc->vsc.full = dfixed_const(1);
765 amdgpu_crtc->hsc.full = dfixed_const(1);
766 }
767 return true;
768 }
769
770 /*
771 * Retrieve current video scanout position of crtc on a given gpu, and
772 * an optional accurate timestamp of when query happened.
773 *
774 * \param dev Device to query.
775 * \param pipe Crtc to query.
776 * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
777 * For driver internal use only also supports these flags:
778 *
779 * USE_REAL_VBLANKSTART to use the real start of vblank instead
780 * of a fudged earlier start of vblank.
781 *
782 * GET_DISTANCE_TO_VBLANKSTART to return distance to the
783 * fudged earlier start of vblank in *vpos and the distance
784 * to true start of vblank in *hpos.
785 *
786 * \param *vpos Location where vertical scanout position should be stored.
787 * \param *hpos Location where horizontal scanout position should go.
788 * \param *stime Target location for timestamp taken immediately before
789 * scanout position query. Can be NULL to skip timestamp.
790 * \param *etime Target location for timestamp taken immediately after
791 * scanout position query. Can be NULL to skip timestamp.
792 *
793 * Returns vpos as a positive number while in active scanout area.
794 * Returns vpos as a negative number inside vblank, counting the number
795 * of scanlines to go until end of vblank, e.g., -1 means "one scanline
796 * until start of active scanout / end of vblank."
797 *
798 * \return Flags, or'ed together as follows:
799 *
800 * DRM_SCANOUTPOS_VALID = Query successful.
801 * DRM_SCANOUTPOS_INVBL = Inside vblank.
802 * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
803 * this flag means that returned position may be offset by a constant but
804 * unknown small number of scanlines wrt. real scanout position.
805 *
806 */
amdgpu_display_get_crtc_scanoutpos(struct drm_device * dev,unsigned int pipe,unsigned int flags,int * vpos,int * hpos,ktime_t * stime,ktime_t * etime,const struct drm_display_mode * mode)807 int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
808 unsigned int pipe, unsigned int flags, int *vpos,
809 int *hpos, ktime_t *stime, ktime_t *etime,
810 const struct drm_display_mode *mode)
811 {
812 u32 vbl = 0, position = 0;
813 int vbl_start, vbl_end, vtotal, ret = 0;
814 bool in_vbl = true;
815
816 struct amdgpu_device *adev = drm_to_adev(dev);
817
818 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
819
820 /* Get optional system timestamp before query. */
821 if (stime)
822 *stime = ktime_get();
823
824 if (amdgpu_display_page_flip_get_scanoutpos(adev, pipe, &vbl, &position) == 0)
825 ret |= DRM_SCANOUTPOS_VALID;
826
827 /* Get optional system timestamp after query. */
828 if (etime)
829 *etime = ktime_get();
830
831 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
832
833 /* Decode into vertical and horizontal scanout position. */
834 *vpos = position & 0x1fff;
835 *hpos = (position >> 16) & 0x1fff;
836
837 /* Valid vblank area boundaries from gpu retrieved? */
838 if (vbl > 0) {
839 /* Yes: Decode. */
840 ret |= DRM_SCANOUTPOS_ACCURATE;
841 vbl_start = vbl & 0x1fff;
842 vbl_end = (vbl >> 16) & 0x1fff;
843 }
844 else {
845 /* No: Fake something reasonable which gives at least ok results. */
846 vbl_start = mode->crtc_vdisplay;
847 vbl_end = 0;
848 }
849
850 /* Called from driver internal vblank counter query code? */
851 if (flags & GET_DISTANCE_TO_VBLANKSTART) {
852 /* Caller wants distance from real vbl_start in *hpos */
853 *hpos = *vpos - vbl_start;
854 }
855
856 /* Fudge vblank to start a few scanlines earlier to handle the
857 * problem that vblank irqs fire a few scanlines before start
858 * of vblank. Some driver internal callers need the true vblank
859 * start to be used and signal this via the USE_REAL_VBLANKSTART flag.
860 *
861 * The cause of the "early" vblank irq is that the irq is triggered
862 * by the line buffer logic when the line buffer read position enters
863 * the vblank, whereas our crtc scanout position naturally lags the
864 * line buffer read position.
865 */
866 if (!(flags & USE_REAL_VBLANKSTART))
867 vbl_start -= adev->mode_info.crtcs[pipe]->lb_vblank_lead_lines;
868
869 /* Test scanout position against vblank region. */
870 if ((*vpos < vbl_start) && (*vpos >= vbl_end))
871 in_vbl = false;
872
873 /* In vblank? */
874 if (in_vbl)
875 ret |= DRM_SCANOUTPOS_IN_VBLANK;
876
877 /* Called from driver internal vblank counter query code? */
878 if (flags & GET_DISTANCE_TO_VBLANKSTART) {
879 /* Caller wants distance from fudged earlier vbl_start */
880 *vpos -= vbl_start;
881 return ret;
882 }
883
884 /* Check if inside vblank area and apply corrective offsets:
885 * vpos will then be >=0 in video scanout area, but negative
886 * within vblank area, counting down the number of lines until
887 * start of scanout.
888 */
889
890 /* Inside "upper part" of vblank area? Apply corrective offset if so: */
891 if (in_vbl && (*vpos >= vbl_start)) {
892 vtotal = mode->crtc_vtotal;
893
894 /* With variable refresh rate displays the vpos can exceed
895 * the vtotal value. Clamp to 0 to return -vbl_end instead
896 * of guessing the remaining number of lines until scanout.
897 */
898 *vpos = (*vpos < vtotal) ? (*vpos - vtotal) : 0;
899 }
900
901 /* Correct for shifted end of vbl at vbl_end. */
902 *vpos = *vpos - vbl_end;
903
904 return ret;
905 }
906
amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device * adev,int crtc)907 int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
908 {
909 if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
910 return AMDGPU_CRTC_IRQ_NONE;
911
912 switch (crtc) {
913 case 0:
914 return AMDGPU_CRTC_IRQ_VBLANK1;
915 case 1:
916 return AMDGPU_CRTC_IRQ_VBLANK2;
917 case 2:
918 return AMDGPU_CRTC_IRQ_VBLANK3;
919 case 3:
920 return AMDGPU_CRTC_IRQ_VBLANK4;
921 case 4:
922 return AMDGPU_CRTC_IRQ_VBLANK5;
923 case 5:
924 return AMDGPU_CRTC_IRQ_VBLANK6;
925 default:
926 return AMDGPU_CRTC_IRQ_NONE;
927 }
928 }
929
amdgpu_crtc_get_scanout_position(struct drm_crtc * crtc,bool in_vblank_irq,int * vpos,int * hpos,ktime_t * stime,ktime_t * etime,const struct drm_display_mode * mode)930 bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
931 bool in_vblank_irq, int *vpos,
932 int *hpos, ktime_t *stime, ktime_t *etime,
933 const struct drm_display_mode *mode)
934 {
935 struct drm_device *dev = crtc->dev;
936 unsigned int pipe = crtc->index;
937
938 return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
939 stime, etime, mode);
940 }
941