1 /*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18
19 #include "msm_drv.h"
20 #include "msm_mmu.h"
21 #include "mdp5_kms.h"
22
23 static const char *iommu_ports[] = {
24 "mdp_0",
25 };
26
27 static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev);
28
29 const struct mdp5_config *mdp5_cfg;
30
31 static const struct mdp5_config msm8x74_config = {
32 .name = "msm8x74",
33 .ctl = {
34 .count = 5,
35 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
36 },
37 .pipe_vig = {
38 .count = 3,
39 .base = { 0x01200, 0x01600, 0x01a00 },
40 },
41 .pipe_rgb = {
42 .count = 3,
43 .base = { 0x01e00, 0x02200, 0x02600 },
44 },
45 .pipe_dma = {
46 .count = 2,
47 .base = { 0x02a00, 0x02e00 },
48 },
49 .lm = {
50 .count = 5,
51 .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
52 },
53 .dspp = {
54 .count = 3,
55 .base = { 0x04600, 0x04a00, 0x04e00 },
56 },
57 .ad = {
58 .count = 2,
59 .base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */
60 },
61 .intf = {
62 .count = 4,
63 .base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
64 },
65 };
66
67 static const struct mdp5_config apq8084_config = {
68 .name = "apq8084",
69 .ctl = {
70 .count = 5,
71 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
72 },
73 .pipe_vig = {
74 .count = 4,
75 .base = { 0x01200, 0x01600, 0x01a00, 0x01e00 },
76 },
77 .pipe_rgb = {
78 .count = 4,
79 .base = { 0x02200, 0x02600, 0x02a00, 0x02e00 },
80 },
81 .pipe_dma = {
82 .count = 2,
83 .base = { 0x03200, 0x03600 },
84 },
85 .lm = {
86 .count = 6,
87 .base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 },
88 },
89 .dspp = {
90 .count = 4,
91 .base = { 0x05200, 0x05600, 0x05a00, 0x05e00 },
92
93 },
94 .ad = {
95 .count = 3,
96 .base = { 0x13500, 0x13700, 0x13900 },
97 },
98 .intf = {
99 .count = 5,
100 .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
101 },
102 };
103
104 struct mdp5_config_entry {
105 int revision;
106 const struct mdp5_config *config;
107 };
108
109 static const struct mdp5_config_entry mdp5_configs[] = {
110 { .revision = 0, .config = &msm8x74_config },
111 { .revision = 2, .config = &msm8x74_config },
112 { .revision = 3, .config = &apq8084_config },
113 };
114
mdp5_select_hw_cfg(struct msm_kms * kms)115 static int mdp5_select_hw_cfg(struct msm_kms *kms)
116 {
117 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
118 struct drm_device *dev = mdp5_kms->dev;
119 uint32_t version, major, minor;
120 int i, ret = 0;
121
122 mdp5_enable(mdp5_kms);
123 version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
124 mdp5_disable(mdp5_kms);
125
126 major = FIELD(version, MDP5_MDP_VERSION_MAJOR);
127 minor = FIELD(version, MDP5_MDP_VERSION_MINOR);
128
129 DBG("found MDP5 version v%d.%d", major, minor);
130
131 if (major != 1) {
132 dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n",
133 major, minor);
134 ret = -ENXIO;
135 goto out;
136 }
137
138 mdp5_kms->rev = minor;
139
140 /* only after mdp5_cfg global pointer's init can we access the hw */
141 for (i = 0; i < ARRAY_SIZE(mdp5_configs); i++) {
142 if (mdp5_configs[i].revision != minor)
143 continue;
144 mdp5_kms->hw_cfg = mdp5_cfg = mdp5_configs[i].config;
145 break;
146 }
147 if (unlikely(!mdp5_kms->hw_cfg)) {
148 dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
149 major, minor);
150 ret = -ENXIO;
151 goto out;
152 }
153
154 DBG("MDP5: %s config selected", mdp5_kms->hw_cfg->name);
155
156 return 0;
157 out:
158 return ret;
159 }
160
mdp5_hw_init(struct msm_kms * kms)161 static int mdp5_hw_init(struct msm_kms *kms)
162 {
163 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
164 struct drm_device *dev = mdp5_kms->dev;
165 int i;
166
167 pm_runtime_get_sync(dev->dev);
168
169 /* Magic unknown register writes:
170 *
171 * W VBIF:0x004 00000001 (mdss_mdp.c:839)
172 * W MDP5:0x2e0 0xe9 (mdss_mdp.c:839)
173 * W MDP5:0x2e4 0x55 (mdss_mdp.c:839)
174 * W MDP5:0x3ac 0xc0000ccc (mdss_mdp.c:839)
175 * W MDP5:0x3b4 0xc0000ccc (mdss_mdp.c:839)
176 * W MDP5:0x3bc 0xcccccc (mdss_mdp.c:839)
177 * W MDP5:0x4a8 0xcccc0c0 (mdss_mdp.c:839)
178 * W MDP5:0x4b0 0xccccc0c0 (mdss_mdp.c:839)
179 * W MDP5:0x4b8 0xccccc000 (mdss_mdp.c:839)
180 *
181 * Downstream fbdev driver gets these register offsets/values
182 * from DT.. not really sure what these registers are or if
183 * different values for different boards/SoC's, etc. I guess
184 * they are the golden registers.
185 *
186 * Not setting these does not seem to cause any problem. But
187 * we may be getting lucky with the bootloader initializing
188 * them for us. OTOH, if we can always count on the bootloader
189 * setting the golden registers, then perhaps we don't need to
190 * care.
191 */
192
193 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
194
195 for (i = 0; i < mdp5_kms->hw_cfg->ctl.count; i++)
196 mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(i), 0);
197
198 pm_runtime_put_sync(dev->dev);
199
200 return 0;
201 }
202
mdp5_round_pixclk(struct msm_kms * kms,unsigned long rate,struct drm_encoder * encoder)203 static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate,
204 struct drm_encoder *encoder)
205 {
206 return rate;
207 }
208
mdp5_preclose(struct msm_kms * kms,struct drm_file * file)209 static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file)
210 {
211 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
212 struct msm_drm_private *priv = mdp5_kms->dev->dev_private;
213 unsigned i;
214
215 for (i = 0; i < priv->num_crtcs; i++)
216 mdp5_crtc_cancel_pending_flip(priv->crtcs[i], file);
217 }
218
mdp5_destroy(struct msm_kms * kms)219 static void mdp5_destroy(struct msm_kms *kms)
220 {
221 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
222 struct msm_mmu *mmu = mdp5_kms->mmu;
223
224 if (mmu) {
225 mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
226 mmu->funcs->destroy(mmu);
227 }
228 kfree(mdp5_kms);
229 }
230
231 static const struct mdp_kms_funcs kms_funcs = {
232 .base = {
233 .hw_init = mdp5_hw_init,
234 .irq_preinstall = mdp5_irq_preinstall,
235 .irq_postinstall = mdp5_irq_postinstall,
236 .irq_uninstall = mdp5_irq_uninstall,
237 .irq = mdp5_irq,
238 .enable_vblank = mdp5_enable_vblank,
239 .disable_vblank = mdp5_disable_vblank,
240 .get_format = mdp_get_format,
241 .round_pixclk = mdp5_round_pixclk,
242 .preclose = mdp5_preclose,
243 .destroy = mdp5_destroy,
244 },
245 .set_irqmask = mdp5_set_irqmask,
246 };
247
mdp5_disable(struct mdp5_kms * mdp5_kms)248 int mdp5_disable(struct mdp5_kms *mdp5_kms)
249 {
250 DBG("");
251
252 clk_disable_unprepare(mdp5_kms->ahb_clk);
253 clk_disable_unprepare(mdp5_kms->axi_clk);
254 clk_disable_unprepare(mdp5_kms->core_clk);
255 clk_disable_unprepare(mdp5_kms->lut_clk);
256
257 return 0;
258 }
259
mdp5_enable(struct mdp5_kms * mdp5_kms)260 int mdp5_enable(struct mdp5_kms *mdp5_kms)
261 {
262 DBG("");
263
264 clk_prepare_enable(mdp5_kms->ahb_clk);
265 clk_prepare_enable(mdp5_kms->axi_clk);
266 clk_prepare_enable(mdp5_kms->core_clk);
267 clk_prepare_enable(mdp5_kms->lut_clk);
268
269 return 0;
270 }
271
modeset_init(struct mdp5_kms * mdp5_kms)272 static int modeset_init(struct mdp5_kms *mdp5_kms)
273 {
274 static const enum mdp5_pipe crtcs[] = {
275 SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
276 };
277 struct drm_device *dev = mdp5_kms->dev;
278 struct msm_drm_private *priv = dev->dev_private;
279 struct drm_encoder *encoder;
280 int i, ret;
281
282 /* construct CRTCs: */
283 for (i = 0; i < mdp5_kms->hw_cfg->pipe_rgb.count; i++) {
284 struct drm_plane *plane;
285 struct drm_crtc *crtc;
286
287 plane = mdp5_plane_init(dev, crtcs[i], true);
288 if (IS_ERR(plane)) {
289 ret = PTR_ERR(plane);
290 dev_err(dev->dev, "failed to construct plane for %s (%d)\n",
291 pipe2name(crtcs[i]), ret);
292 goto fail;
293 }
294
295 crtc = mdp5_crtc_init(dev, plane, i);
296 if (IS_ERR(crtc)) {
297 ret = PTR_ERR(crtc);
298 dev_err(dev->dev, "failed to construct crtc for %s (%d)\n",
299 pipe2name(crtcs[i]), ret);
300 goto fail;
301 }
302 priv->crtcs[priv->num_crtcs++] = crtc;
303 }
304
305 /* Construct encoder for HDMI: */
306 encoder = mdp5_encoder_init(dev, 3, INTF_HDMI);
307 if (IS_ERR(encoder)) {
308 dev_err(dev->dev, "failed to construct encoder\n");
309 ret = PTR_ERR(encoder);
310 goto fail;
311 }
312
313 /* NOTE: the vsync and error irq's are actually associated with
314 * the INTF/encoder.. the easiest way to deal with this (ie. what
315 * we do now) is assume a fixed relationship between crtc's and
316 * encoders. I'm not sure if there is ever a need to more freely
317 * assign crtcs to encoders, but if there is then we need to take
318 * care of error and vblank irq's that the crtc has registered,
319 * and also update user-requested vblank_mask.
320 */
321 encoder->possible_crtcs = BIT(0);
322 mdp5_crtc_set_intf(priv->crtcs[0], 3, INTF_HDMI);
323
324 priv->encoders[priv->num_encoders++] = encoder;
325
326 /* Construct bridge/connector for HDMI: */
327 mdp5_kms->hdmi = hdmi_init(dev, encoder);
328 if (IS_ERR(mdp5_kms->hdmi)) {
329 ret = PTR_ERR(mdp5_kms->hdmi);
330 dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
331 goto fail;
332 }
333
334 return 0;
335
336 fail:
337 return ret;
338 }
339
get_clk(struct platform_device * pdev,struct clk ** clkp,const char * name)340 static int get_clk(struct platform_device *pdev, struct clk **clkp,
341 const char *name)
342 {
343 struct device *dev = &pdev->dev;
344 struct clk *clk = devm_clk_get(dev, name);
345 if (IS_ERR(clk)) {
346 dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
347 return PTR_ERR(clk);
348 }
349 *clkp = clk;
350 return 0;
351 }
352
mdp5_kms_init(struct drm_device * dev)353 struct msm_kms *mdp5_kms_init(struct drm_device *dev)
354 {
355 struct platform_device *pdev = dev->platformdev;
356 struct mdp5_platform_config *config = mdp5_get_config(pdev);
357 struct mdp5_kms *mdp5_kms;
358 struct msm_kms *kms = NULL;
359 struct msm_mmu *mmu;
360 int i, ret;
361
362 mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL);
363 if (!mdp5_kms) {
364 dev_err(dev->dev, "failed to allocate kms\n");
365 ret = -ENOMEM;
366 goto fail;
367 }
368
369 mdp_kms_init(&mdp5_kms->base, &kms_funcs);
370
371 kms = &mdp5_kms->base.base;
372
373 mdp5_kms->dev = dev;
374 mdp5_kms->smp_blk_cnt = config->smp_blk_cnt;
375
376 mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
377 if (IS_ERR(mdp5_kms->mmio)) {
378 ret = PTR_ERR(mdp5_kms->mmio);
379 goto fail;
380 }
381
382 mdp5_kms->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
383 if (IS_ERR(mdp5_kms->vbif)) {
384 ret = PTR_ERR(mdp5_kms->vbif);
385 goto fail;
386 }
387
388 mdp5_kms->vdd = devm_regulator_get(&pdev->dev, "vdd");
389 if (IS_ERR(mdp5_kms->vdd)) {
390 ret = PTR_ERR(mdp5_kms->vdd);
391 goto fail;
392 }
393
394 ret = regulator_enable(mdp5_kms->vdd);
395 if (ret) {
396 dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
397 goto fail;
398 }
399
400 ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk");
401 if (ret)
402 goto fail;
403 ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk");
404 if (ret)
405 goto fail;
406 ret = get_clk(pdev, &mdp5_kms->src_clk, "core_clk_src");
407 if (ret)
408 goto fail;
409 ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk");
410 if (ret)
411 goto fail;
412 ret = get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk");
413 if (ret)
414 goto fail;
415 ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk");
416 if (ret)
417 goto fail;
418
419 ret = clk_set_rate(mdp5_kms->src_clk, config->max_clk);
420
421 ret = mdp5_select_hw_cfg(kms);
422 if (ret)
423 goto fail;
424
425 /* make sure things are off before attaching iommu (bootloader could
426 * have left things on, in which case we'll start getting faults if
427 * we don't disable):
428 */
429 mdp5_enable(mdp5_kms);
430 for (i = 0; i < mdp5_kms->hw_cfg->intf.count; i++)
431 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
432 mdp5_disable(mdp5_kms);
433 mdelay(16);
434
435 if (config->iommu) {
436 mmu = msm_iommu_new(&pdev->dev, config->iommu);
437 if (IS_ERR(mmu)) {
438 ret = PTR_ERR(mmu);
439 dev_err(dev->dev, "failed to init iommu: %d\n", ret);
440 goto fail;
441 }
442
443 ret = mmu->funcs->attach(mmu, iommu_ports,
444 ARRAY_SIZE(iommu_ports));
445 if (ret) {
446 dev_err(dev->dev, "failed to attach iommu: %d\n", ret);
447 mmu->funcs->destroy(mmu);
448 goto fail;
449 }
450 } else {
451 dev_info(dev->dev, "no iommu, fallback to phys "
452 "contig buffers for scanout\n");
453 mmu = NULL;
454 }
455 mdp5_kms->mmu = mmu;
456
457 mdp5_kms->id = msm_register_mmu(dev, mmu);
458 if (mdp5_kms->id < 0) {
459 ret = mdp5_kms->id;
460 dev_err(dev->dev, "failed to register mdp5 iommu: %d\n", ret);
461 goto fail;
462 }
463
464 ret = modeset_init(mdp5_kms);
465 if (ret) {
466 dev_err(dev->dev, "modeset_init failed: %d\n", ret);
467 goto fail;
468 }
469
470 return kms;
471
472 fail:
473 if (kms)
474 mdp5_destroy(kms);
475 return ERR_PTR(ret);
476 }
477
mdp5_get_config(struct platform_device * dev)478 static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev)
479 {
480 static struct mdp5_platform_config config = {};
481 #ifdef CONFIG_OF
482 /* TODO */
483 #endif
484 config.iommu = iommu_domain_alloc(&platform_bus_type);
485 /* TODO hard-coded in downstream mdss, but should it be? */
486 config.max_clk = 200000000;
487 /* TODO get from DT: */
488 config.smp_blk_cnt = 22;
489
490 return &config;
491 }
492