• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include <linux/delay.h>
8 
9 #include <drm/drm_vblank.h>
10 
11 #include "msm_drv.h"
12 #include "msm_gem.h"
13 #include "msm_mmu.h"
14 #include "mdp4_kms.h"
15 
16 static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev);
17 
mdp4_hw_init(struct msm_kms * kms)18 static int mdp4_hw_init(struct msm_kms *kms)
19 {
20 	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
21 	struct drm_device *dev = mdp4_kms->dev;
22 	u32 dmap_cfg, vg_cfg;
23 	unsigned long clk;
24 	int ret = 0;
25 
26 	pm_runtime_get_sync(dev->dev);
27 
28 	if (mdp4_kms->rev > 1) {
29 		mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff);
30 		mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f);
31 	}
32 
33 	mdp4_write(mdp4_kms, REG_MDP4_PORTMAP_MODE, 0x3);
34 
35 	/* max read pending cmd config, 3 pending requests: */
36 	mdp4_write(mdp4_kms, REG_MDP4_READ_CNFG, 0x02222);
37 
38 	clk = clk_get_rate(mdp4_kms->clk);
39 
40 	if ((mdp4_kms->rev >= 1) || (clk >= 90000000)) {
41 		dmap_cfg = 0x47;     /* 16 bytes-burst x 8 req */
42 		vg_cfg = 0x47;       /* 16 bytes-burs x 8 req */
43 	} else {
44 		dmap_cfg = 0x27;     /* 8 bytes-burst x 8 req */
45 		vg_cfg = 0x43;       /* 16 bytes-burst x 4 req */
46 	}
47 
48 	DBG("fetch config: dmap=%02x, vg=%02x", dmap_cfg, vg_cfg);
49 
50 	mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_P), dmap_cfg);
51 	mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_E), dmap_cfg);
52 
53 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG1), vg_cfg);
54 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG2), vg_cfg);
55 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB1), vg_cfg);
56 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB2), vg_cfg);
57 
58 	if (mdp4_kms->rev >= 2)
59 		mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD, 1);
60 	mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, 0);
61 
62 	/* disable CSC matrix / YUV by default: */
63 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG1), 0);
64 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG2), 0);
65 	mdp4_write(mdp4_kms, REG_MDP4_DMA_P_OP_MODE, 0);
66 	mdp4_write(mdp4_kms, REG_MDP4_DMA_S_OP_MODE, 0);
67 	mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(1), 0);
68 	mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(2), 0);
69 
70 	if (mdp4_kms->rev > 1)
71 		mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);
72 
73 	pm_runtime_put_sync(dev->dev);
74 
75 	return ret;
76 }
77 
mdp4_enable_commit(struct msm_kms * kms)78 static void mdp4_enable_commit(struct msm_kms *kms)
79 {
80 	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
81 	mdp4_enable(mdp4_kms);
82 }
83 
mdp4_disable_commit(struct msm_kms * kms)84 static void mdp4_disable_commit(struct msm_kms *kms)
85 {
86 	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
87 	mdp4_disable(mdp4_kms);
88 }
89 
mdp4_prepare_commit(struct msm_kms * kms,struct drm_atomic_state * state)90 static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
91 {
92 }
93 
mdp4_flush_commit(struct msm_kms * kms,unsigned crtc_mask)94 static void mdp4_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
95 {
96 	/* TODO */
97 }
98 
mdp4_wait_flush(struct msm_kms * kms,unsigned crtc_mask)99 static void mdp4_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
100 {
101 	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
102 	struct drm_crtc *crtc;
103 
104 	for_each_crtc_mask(mdp4_kms->dev, crtc, crtc_mask)
105 		mdp4_crtc_wait_for_commit_done(crtc);
106 }
107 
mdp4_complete_commit(struct msm_kms * kms,unsigned crtc_mask)108 static void mdp4_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
109 {
110 }
111 
mdp4_round_pixclk(struct msm_kms * kms,unsigned long rate,struct drm_encoder * encoder)112 static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
113 		struct drm_encoder *encoder)
114 {
115 	/* if we had >1 encoder, we'd need something more clever: */
116 	switch (encoder->encoder_type) {
117 	case DRM_MODE_ENCODER_TMDS:
118 		return mdp4_dtv_round_pixclk(encoder, rate);
119 	case DRM_MODE_ENCODER_LVDS:
120 	case DRM_MODE_ENCODER_DSI:
121 	default:
122 		return rate;
123 	}
124 }
125 
mdp4_destroy(struct msm_kms * kms)126 static void mdp4_destroy(struct msm_kms *kms)
127 {
128 	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
129 	struct device *dev = mdp4_kms->dev->dev;
130 	struct msm_gem_address_space *aspace = kms->aspace;
131 
132 	if (mdp4_kms->blank_cursor_iova)
133 		msm_gem_unpin_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
134 	drm_gem_object_put(mdp4_kms->blank_cursor_bo);
135 
136 	if (aspace) {
137 		aspace->mmu->funcs->detach(aspace->mmu);
138 		msm_gem_address_space_put(aspace);
139 	}
140 
141 	if (mdp4_kms->rpm_enabled)
142 		pm_runtime_disable(dev);
143 
144 	kfree(mdp4_kms);
145 }
146 
147 static const struct mdp_kms_funcs kms_funcs = {
148 	.base = {
149 		.hw_init         = mdp4_hw_init,
150 		.irq_preinstall  = mdp4_irq_preinstall,
151 		.irq_postinstall = mdp4_irq_postinstall,
152 		.irq_uninstall   = mdp4_irq_uninstall,
153 		.irq             = mdp4_irq,
154 		.enable_vblank   = mdp4_enable_vblank,
155 		.disable_vblank  = mdp4_disable_vblank,
156 		.enable_commit   = mdp4_enable_commit,
157 		.disable_commit  = mdp4_disable_commit,
158 		.prepare_commit  = mdp4_prepare_commit,
159 		.flush_commit    = mdp4_flush_commit,
160 		.wait_flush      = mdp4_wait_flush,
161 		.complete_commit = mdp4_complete_commit,
162 		.get_format      = mdp_get_format,
163 		.round_pixclk    = mdp4_round_pixclk,
164 		.destroy         = mdp4_destroy,
165 	},
166 	.set_irqmask         = mdp4_set_irqmask,
167 };
168 
mdp4_disable(struct mdp4_kms * mdp4_kms)169 int mdp4_disable(struct mdp4_kms *mdp4_kms)
170 {
171 	DBG("");
172 
173 	clk_disable_unprepare(mdp4_kms->clk);
174 	if (mdp4_kms->pclk)
175 		clk_disable_unprepare(mdp4_kms->pclk);
176 	if (mdp4_kms->lut_clk)
177 		clk_disable_unprepare(mdp4_kms->lut_clk);
178 	if (mdp4_kms->axi_clk)
179 		clk_disable_unprepare(mdp4_kms->axi_clk);
180 
181 	return 0;
182 }
183 
mdp4_enable(struct mdp4_kms * mdp4_kms)184 int mdp4_enable(struct mdp4_kms *mdp4_kms)
185 {
186 	DBG("");
187 
188 	clk_prepare_enable(mdp4_kms->clk);
189 	if (mdp4_kms->pclk)
190 		clk_prepare_enable(mdp4_kms->pclk);
191 	if (mdp4_kms->lut_clk)
192 		clk_prepare_enable(mdp4_kms->lut_clk);
193 	if (mdp4_kms->axi_clk)
194 		clk_prepare_enable(mdp4_kms->axi_clk);
195 
196 	return 0;
197 }
198 
199 
mdp4_modeset_init_intf(struct mdp4_kms * mdp4_kms,int intf_type)200 static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
201 				  int intf_type)
202 {
203 	struct drm_device *dev = mdp4_kms->dev;
204 	struct msm_drm_private *priv = dev->dev_private;
205 	struct drm_encoder *encoder;
206 	struct drm_connector *connector;
207 	struct device_node *panel_node;
208 	int dsi_id;
209 	int ret;
210 
211 	switch (intf_type) {
212 	case DRM_MODE_ENCODER_LVDS:
213 		/*
214 		 * bail out early if there is no panel node (no need to
215 		 * initialize LCDC encoder and LVDS connector)
216 		 */
217 		panel_node = of_graph_get_remote_node(dev->dev->of_node, 0, 0);
218 		if (!panel_node)
219 			return 0;
220 
221 		encoder = mdp4_lcdc_encoder_init(dev, panel_node);
222 		if (IS_ERR(encoder)) {
223 			DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n");
224 			of_node_put(panel_node);
225 			return PTR_ERR(encoder);
226 		}
227 
228 		/* LCDC can be hooked to DMA_P (TODO: Add DMA_S later?) */
229 		encoder->possible_crtcs = 1 << DMA_P;
230 
231 		connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
232 		if (IS_ERR(connector)) {
233 			DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n");
234 			of_node_put(panel_node);
235 			return PTR_ERR(connector);
236 		}
237 
238 		priv->encoders[priv->num_encoders++] = encoder;
239 		priv->connectors[priv->num_connectors++] = connector;
240 
241 		break;
242 	case DRM_MODE_ENCODER_TMDS:
243 		encoder = mdp4_dtv_encoder_init(dev);
244 		if (IS_ERR(encoder)) {
245 			DRM_DEV_ERROR(dev->dev, "failed to construct DTV encoder\n");
246 			return PTR_ERR(encoder);
247 		}
248 
249 		/* DTV can be hooked to DMA_E: */
250 		encoder->possible_crtcs = 1 << 1;
251 
252 		if (priv->hdmi) {
253 			/* Construct bridge/connector for HDMI: */
254 			ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
255 			if (ret) {
256 				DRM_DEV_ERROR(dev->dev, "failed to initialize HDMI: %d\n", ret);
257 				return ret;
258 			}
259 		}
260 
261 		priv->encoders[priv->num_encoders++] = encoder;
262 
263 		break;
264 	case DRM_MODE_ENCODER_DSI:
265 		/* only DSI1 supported for now */
266 		dsi_id = 0;
267 
268 		if (!priv->dsi[dsi_id])
269 			break;
270 
271 		encoder = mdp4_dsi_encoder_init(dev);
272 		if (IS_ERR(encoder)) {
273 			ret = PTR_ERR(encoder);
274 			DRM_DEV_ERROR(dev->dev,
275 				"failed to construct DSI encoder: %d\n", ret);
276 			return ret;
277 		}
278 
279 		/* TODO: Add DMA_S later? */
280 		encoder->possible_crtcs = 1 << DMA_P;
281 		priv->encoders[priv->num_encoders++] = encoder;
282 
283 		ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
284 		if (ret) {
285 			DRM_DEV_ERROR(dev->dev, "failed to initialize DSI: %d\n",
286 				ret);
287 			return ret;
288 		}
289 
290 		break;
291 	default:
292 		DRM_DEV_ERROR(dev->dev, "Invalid or unsupported interface\n");
293 		return -EINVAL;
294 	}
295 
296 	return 0;
297 }
298 
modeset_init(struct mdp4_kms * mdp4_kms)299 static int modeset_init(struct mdp4_kms *mdp4_kms)
300 {
301 	struct drm_device *dev = mdp4_kms->dev;
302 	struct msm_drm_private *priv = dev->dev_private;
303 	struct drm_plane *plane;
304 	struct drm_crtc *crtc;
305 	int i, ret;
306 	static const enum mdp4_pipe rgb_planes[] = {
307 		RGB1, RGB2,
308 	};
309 	static const enum mdp4_pipe vg_planes[] = {
310 		VG1, VG2,
311 	};
312 	static const enum mdp4_dma mdp4_crtcs[] = {
313 		DMA_P, DMA_E,
314 	};
315 	static const char * const mdp4_crtc_names[] = {
316 		"DMA_P", "DMA_E",
317 	};
318 	static const int mdp4_intfs[] = {
319 		DRM_MODE_ENCODER_LVDS,
320 		DRM_MODE_ENCODER_DSI,
321 		DRM_MODE_ENCODER_TMDS,
322 	};
323 
324 	/* construct non-private planes: */
325 	for (i = 0; i < ARRAY_SIZE(vg_planes); i++) {
326 		plane = mdp4_plane_init(dev, vg_planes[i], false);
327 		if (IS_ERR(plane)) {
328 			DRM_DEV_ERROR(dev->dev,
329 				"failed to construct plane for VG%d\n", i + 1);
330 			ret = PTR_ERR(plane);
331 			goto fail;
332 		}
333 		priv->planes[priv->num_planes++] = plane;
334 	}
335 
336 	for (i = 0; i < ARRAY_SIZE(mdp4_crtcs); i++) {
337 		plane = mdp4_plane_init(dev, rgb_planes[i], true);
338 		if (IS_ERR(plane)) {
339 			DRM_DEV_ERROR(dev->dev,
340 				"failed to construct plane for RGB%d\n", i + 1);
341 			ret = PTR_ERR(plane);
342 			goto fail;
343 		}
344 
345 		crtc  = mdp4_crtc_init(dev, plane, priv->num_crtcs, i,
346 				mdp4_crtcs[i]);
347 		if (IS_ERR(crtc)) {
348 			DRM_DEV_ERROR(dev->dev, "failed to construct crtc for %s\n",
349 				mdp4_crtc_names[i]);
350 			ret = PTR_ERR(crtc);
351 			goto fail;
352 		}
353 
354 		priv->crtcs[priv->num_crtcs++] = crtc;
355 	}
356 
357 	/*
358 	 * we currently set up two relatively fixed paths:
359 	 *
360 	 * LCDC/LVDS path: RGB1 -> DMA_P -> LCDC -> LVDS
361 	 *			or
362 	 * DSI path: RGB1 -> DMA_P -> DSI1 -> DSI Panel
363 	 *
364 	 * DTV/HDMI path: RGB2 -> DMA_E -> DTV -> HDMI
365 	 */
366 
367 	for (i = 0; i < ARRAY_SIZE(mdp4_intfs); i++) {
368 		ret = mdp4_modeset_init_intf(mdp4_kms, mdp4_intfs[i]);
369 		if (ret) {
370 			DRM_DEV_ERROR(dev->dev, "failed to initialize intf: %d, %d\n",
371 				i, ret);
372 			goto fail;
373 		}
374 	}
375 
376 	return 0;
377 
378 fail:
379 	return ret;
380 }
381 
read_mdp_hw_revision(struct mdp4_kms * mdp4_kms,u32 * major,u32 * minor)382 static void read_mdp_hw_revision(struct mdp4_kms *mdp4_kms,
383 				 u32 *major, u32 *minor)
384 {
385 	struct drm_device *dev = mdp4_kms->dev;
386 	u32 version;
387 
388 	mdp4_enable(mdp4_kms);
389 	version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
390 	mdp4_disable(mdp4_kms);
391 
392 	*major = FIELD(version, MDP4_VERSION_MAJOR);
393 	*minor = FIELD(version, MDP4_VERSION_MINOR);
394 
395 	DRM_DEV_INFO(dev->dev, "MDP4 version v%d.%d", *major, *minor);
396 }
397 
mdp4_kms_init(struct drm_device * dev)398 struct msm_kms *mdp4_kms_init(struct drm_device *dev)
399 {
400 	struct platform_device *pdev = to_platform_device(dev->dev);
401 	struct mdp4_platform_config *config = mdp4_get_config(pdev);
402 	struct msm_drm_private *priv = dev->dev_private;
403 	struct mdp4_kms *mdp4_kms;
404 	struct msm_kms *kms = NULL;
405 	struct msm_gem_address_space *aspace;
406 	int irq, ret;
407 	u32 major, minor;
408 
409 	mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
410 	if (!mdp4_kms) {
411 		DRM_DEV_ERROR(dev->dev, "failed to allocate kms\n");
412 		ret = -ENOMEM;
413 		goto fail;
414 	}
415 
416 	mdp_kms_init(&mdp4_kms->base, &kms_funcs);
417 
418 	priv->kms = &mdp4_kms->base.base;
419 	kms = priv->kms;
420 
421 	mdp4_kms->dev = dev;
422 
423 	mdp4_kms->mmio = msm_ioremap(pdev, NULL, "MDP4");
424 	if (IS_ERR(mdp4_kms->mmio)) {
425 		ret = PTR_ERR(mdp4_kms->mmio);
426 		goto fail;
427 	}
428 
429 	irq = platform_get_irq(pdev, 0);
430 	if (irq < 0) {
431 		ret = irq;
432 		DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
433 		goto fail;
434 	}
435 
436 	kms->irq = irq;
437 
438 	/* NOTE: driver for this regulator still missing upstream.. use
439 	 * _get_exclusive() and ignore the error if it does not exist
440 	 * (and hope that the bootloader left it on for us)
441 	 */
442 	mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd");
443 	if (IS_ERR(mdp4_kms->vdd))
444 		mdp4_kms->vdd = NULL;
445 
446 	if (mdp4_kms->vdd) {
447 		ret = regulator_enable(mdp4_kms->vdd);
448 		if (ret) {
449 			DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n", ret);
450 			goto fail;
451 		}
452 	}
453 
454 	mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk");
455 	if (IS_ERR(mdp4_kms->clk)) {
456 		DRM_DEV_ERROR(dev->dev, "failed to get core_clk\n");
457 		ret = PTR_ERR(mdp4_kms->clk);
458 		goto fail;
459 	}
460 
461 	mdp4_kms->pclk = devm_clk_get(&pdev->dev, "iface_clk");
462 	if (IS_ERR(mdp4_kms->pclk))
463 		mdp4_kms->pclk = NULL;
464 
465 	mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
466 	if (IS_ERR(mdp4_kms->axi_clk)) {
467 		DRM_DEV_ERROR(dev->dev, "failed to get axi_clk\n");
468 		ret = PTR_ERR(mdp4_kms->axi_clk);
469 		goto fail;
470 	}
471 
472 	clk_set_rate(mdp4_kms->clk, config->max_clk);
473 
474 	read_mdp_hw_revision(mdp4_kms, &major, &minor);
475 
476 	if (major != 4) {
477 		DRM_DEV_ERROR(dev->dev, "unexpected MDP version: v%d.%d\n",
478 			      major, minor);
479 		ret = -ENXIO;
480 		goto fail;
481 	}
482 
483 	mdp4_kms->rev = minor;
484 
485 	if (mdp4_kms->rev >= 2) {
486 		mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
487 		if (IS_ERR(mdp4_kms->lut_clk)) {
488 			DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n");
489 			ret = PTR_ERR(mdp4_kms->lut_clk);
490 			goto fail;
491 		}
492 		clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
493 	}
494 
495 	pm_runtime_enable(dev->dev);
496 	mdp4_kms->rpm_enabled = true;
497 
498 	/* make sure things are off before attaching iommu (bootloader could
499 	 * have left things on, in which case we'll start getting faults if
500 	 * we don't disable):
501 	 */
502 	mdp4_enable(mdp4_kms);
503 	mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
504 	mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
505 	mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0);
506 	mdp4_disable(mdp4_kms);
507 	mdelay(16);
508 
509 	if (config->iommu) {
510 		struct msm_mmu *mmu = msm_iommu_new(&pdev->dev,
511 			config->iommu);
512 
513 		aspace  = msm_gem_address_space_create(mmu,
514 			"mdp4", 0x1000, 0x100000000 - 0x1000);
515 
516 		if (IS_ERR(aspace)) {
517 			if (!IS_ERR(mmu))
518 				mmu->funcs->destroy(mmu);
519 			ret = PTR_ERR(aspace);
520 			goto fail;
521 		}
522 
523 		kms->aspace = aspace;
524 	} else {
525 		DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
526 				"contig buffers for scanout\n");
527 		aspace = NULL;
528 	}
529 
530 	ret = modeset_init(mdp4_kms);
531 	if (ret) {
532 		DRM_DEV_ERROR(dev->dev, "modeset_init failed: %d\n", ret);
533 		goto fail;
534 	}
535 
536 	mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC | MSM_BO_SCANOUT);
537 	if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
538 		ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
539 		DRM_DEV_ERROR(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
540 		mdp4_kms->blank_cursor_bo = NULL;
541 		goto fail;
542 	}
543 
544 	ret = msm_gem_get_and_pin_iova(mdp4_kms->blank_cursor_bo, kms->aspace,
545 			&mdp4_kms->blank_cursor_iova);
546 	if (ret) {
547 		DRM_DEV_ERROR(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
548 		goto fail;
549 	}
550 
551 	dev->mode_config.min_width = 0;
552 	dev->mode_config.min_height = 0;
553 	dev->mode_config.max_width = 2048;
554 	dev->mode_config.max_height = 2048;
555 
556 	return kms;
557 
558 fail:
559 	if (kms)
560 		mdp4_destroy(kms);
561 	return ERR_PTR(ret);
562 }
563 
mdp4_get_config(struct platform_device * dev)564 static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
565 {
566 	static struct mdp4_platform_config config = {};
567 
568 	/* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */
569 	config.max_clk = 266667000;
570 	config.iommu = iommu_domain_alloc(&platform_bus_type);
571 
572 	return &config;
573 }
574